content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ---- message=FALSE-----------------------------------------------------------
library(tidyverse)
library(CKMRpop)
## -----------------------------------------------------------------------------
pars <- list()
pars$`max-age` <- 5
pars$`fem-surv-probs` <- c(1, 0.7, 0.8, 0.8, 0.8)
pars$`male-surv-probs` <- c(1, 0.7, 0.8, 0.8, 0.8)
pars$`fem-prob-repro` <- c(0, 0, 1, 1, 1)
pars$`male-prob-repro` <- c(0, 0, 1, 1, 1)
pars$`fem-asrf` <- c(0, 0, .5, .7, 1)
pars$`male-asrp` <- c(0, 0, .5, .7, 1)
pars$`fem-rep-disp-par` <- 0.25
pars$`male-rep-disp-par` <- 0.25
pars$`sex-ratio` <- 0.5
## -----------------------------------------------------------------------------
pars$`number-of-years` <- 25
# given cohort sizes of 250 the stable age distribution
# can be found and used for the initial number of indivs
L <- leslie_from_spip(pars, 250)
# then we add those to the spip parameters
pars$`initial-males` <- floor(L$stable_age_distro_fem)
pars$`initial-females` <- floor(L$stable_age_distro_male)
pars$`cohort-size` <- "const 250"
## -----------------------------------------------------------------------------
pars$`discard-all` <- 0
pars$`gtyp-ppn-fem-pre` <- "10-25 0.05 0.00 0.00 0.00 0.00"
pars$`gtyp-ppn-male-pre` <- "10-25 0.05 0.00 0.00 0.00 0.00"
pars$`gtyp-ppn-fem-post` <- "10-25 0.00 0.01 0.01 0.01 0.01"
pars$`gtyp-ppn-male-post` <- "10-25 0.00 0.01 0.01 0.01 0.01"
## ---- eval=FALSE--------------------------------------------------------------
# set.seed(15)
# one_pop_dir <- run_spip(pars = pars, num_pops = 1)
## -----------------------------------------------------------------------------
pars_list <- list(
pars,
pars,
pars
)
## ---- echo=FALSE, results='hide', message=FALSE-------------------------------
# NOTE the following if()...else() blocks are here
# to test whether spip has been installed yet.
# If spip is not available (for example, on CRAN's build machines) this
# is noted and stored package data are used for the variable
# "slurped" to build the remainder of the vignette.
if(spip_exists()) {
message("spip is installed and will be used")
set.seed(15)
three_pop_dir <- run_spip(
pars = pars_list,
num_pops = 3
)
slurp3 <- slurp_spip(three_pop_dir, num_generations = 1)
} else {
message("Using stored package data because spip is not installed")
slurp3 <- three_pops_no_mig_slurped_results
}
## ---- eval=FALSE--------------------------------------------------------------
# set.seed(15)
# three_pop_dir <- run_spip(
# pars = pars_list,
# num_pops = 3
# )
# slurp3 <- slurp_spip(three_pop_dir, num_generations = 2)
## ---- fig.width=5, out.width = '100%'-----------------------------------------
ggplot_census_by_year_age_sex(slurp3$census_postkill)
## -----------------------------------------------------------------------------
# out-migration rates pops 0, 1, 2
pars_list[[1]]$`fem-prob-mig-out` <- "5-25 1 .20"
pars_list[[1]]$`male-prob-mig-out` <- "5-25 1 .20"
pars_list[[2]]$`fem-prob-mig-out` <- "5-25 1 .15"
pars_list[[2]]$`male-prob-mig-out` <- "5-25 1 .15"
pars_list[[3]]$`fem-prob-mig-out` <- "5-25 1 .05"
pars_list[[3]]$`male-prob-mig-out` <- "5-25 1 .05"
# in-migration rates
pars_list[[1]]$`fem-prob-mig-in` <- "5-25 1 0.00 0.80 0.20"
pars_list[[1]]$`male-prob-mig-in` <- "5-25 1 0.00 0.80 0.20"
pars_list[[2]]$`fem-prob-mig-in` <- "5-25 1 0.05 0.00 0.95"
pars_list[[2]]$`male-prob-mig-in` <- "5-25 1 0.05 0.00 0.95"
pars_list[[3]]$`fem-prob-mig-in` <- "5-25 1 0.10 0.90 0.00"
pars_list[[3]]$`male-prob-mig-in` <- "5-25 1 0.10 0.90 0.00"
## ---- echo=FALSE, results='hide', message=FALSE-------------------------------
# NOTE the following if()...else() blocks are here
# to test whether spip has been installed yet.
# If spip is not available (for example, on CRAN's build machines) this
# is noted and stored package data are used for the variable
# "slurped" to build the remainder of the vignette.
if(spip_exists()) {
message("spip is installed and will be used")
set.seed(15)
mig_dir <- run_spip(
pars = pars_list,
num_pops = 3
)
slurp_mig <- slurp_spip(mig_dir, num_generations = 1)
} else {
message("Using stored package data because spip is not installed")
slurp_mig <- three_pops_with_mig_slurped_results
}
## ---- eval=FALSE--------------------------------------------------------------
# set.seed(15)
# mig_dir <- run_spip(
# pars = pars_list,
# num_pops = 3
# )
# slurp_mig <- slurp_spip(mig_dir, num_generations = 1)
## -----------------------------------------------------------------------------
slurp_mig$migrants %>%
count(age, from_pop, to_pop)
## -----------------------------------------------------------------------------
# compile relationships
crel <- compile_related_pairs(slurp_mig$samples)
# count number of PO pairs by which populations
# the members were born in
crel %>%
filter(dom_relat == "PO") %>%
mutate(
parent_born_pop = case_when(
upper_member == 1 ~ born_pop_1,
upper_member == 2 ~ born_pop_2,
TRUE ~ NA_integer_
),
child_born_pop = case_when(
upper_member == 1 ~ born_pop_2,
upper_member == 2 ~ born_pop_1,
TRUE ~ NA_integer_
)
) %>%
count(parent_born_pop, child_born_pop)
|
/scratch/gouwar.j/cran-all/cranData/CKMRpop/inst/doc/simple-example-with-migration.R
|
---
title: "A Simple Example With Migration"
author: "Eric C. Anderson"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{simple-example-with-migration}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
In this vignette we show how to use CKMRpop/spip in a
multi-population version with asymmetrical migration rates. It would
be good to read the "about_spip" vignette beforehand to understand how
migration is implemented in spip.
Our goal here is to model three "populations" of a fish species in which
migration occurs during the larval stage, and it is influenced by
currents that flow down the coast and make migration occur primarily in
one direction. Each of the populations will have the same life tables,
etc. We will make it simple.
First load some packages:
```{r, message=FALSE}
library(tidyverse)
library(CKMRpop)
```
## Basic Demography
The basic demography for each, is given below. We assume no
mortality from newborn to age one. In effect we are only modeling the
the newborns that survive to age 1. Only 3, 4, and 5 year-olds mate,
and older fish have somewhat more reproductive success.
```{r}
pars <- list()
pars$`max-age` <- 5
pars$`fem-surv-probs` <- c(1, 0.7, 0.8, 0.8, 0.8)
pars$`male-surv-probs` <- c(1, 0.7, 0.8, 0.8, 0.8)
pars$`fem-prob-repro` <- c(0, 0, 1, 1, 1)
pars$`male-prob-repro` <- c(0, 0, 1, 1, 1)
pars$`fem-asrf` <- c(0, 0, .5, .7, 1)
pars$`male-asrp` <- c(0, 0, .5, .7, 1)
pars$`fem-rep-disp-par` <- 0.25
pars$`male-rep-disp-par` <- 0.25
pars$`sex-ratio` <- 0.5
```
We will run these simulations for 25 years and just assume
an annual cohort size of 200 fish in each population, and we will
go for a constant population size and start off with the stable
age distribution.
```{r}
pars$`number-of-years` <- 25
# given cohort sizes of 250 the stable age distribution
# can be found and used for the initial number of indivs
L <- leslie_from_spip(pars, 250)
# then we add those to the spip parameters
pars$`initial-males` <- floor(L$stable_age_distro_fem)
pars$`initial-females` <- floor(L$stable_age_distro_male)
pars$`cohort-size` <- "const 250"
```
## Sampling
We will simulate a sampling program in which juveniles are sampled
when they recruit after their larval phase. We assume that the larval
phase starts at birth, but is over by age 1. So we can sample new
recruits via prekill sampling at age 1. We will pretend the sampling
is non-lethal. We will also non-lethally sample a fraction of the older
age-class fish.
```{r}
pars$`discard-all` <- 0
pars$`gtyp-ppn-fem-pre` <- "10-25 0.05 0.00 0.00 0.00 0.00"
pars$`gtyp-ppn-male-pre` <- "10-25 0.05 0.00 0.00 0.00 0.00"
pars$`gtyp-ppn-fem-post` <- "10-25 0.00 0.01 0.01 0.01 0.01"
pars$`gtyp-ppn-male-post` <- "10-25 0.00 0.01 0.01 0.01 0.01"
```
OK! At this point, we have set up the demography for a single population. Let's just
show that we could simulate a single population with that demography and the sampling
like this:
```{r, eval=FALSE}
set.seed(15)
one_pop_dir <- run_spip(pars = pars, num_pops = 1)
```
But, of course, the question is, how do we simulate multiple, interacting populations?
## Simulating Multiple Populations
To simulate multiple populations in spip, from within CKMRpop, we set the
`num_pops` option of `run_spip()` to the number of populations.
And then, for the `pars` option we pass a _list_ of `num_pops`
different lists, each one holding the demography and sampling information
for each of the populations in the simulation. In our case, since
we are simulating three populations with the same demography,
`num_pops = 3` and we can construct our `pars` argument like this:
```{r}
pars_list <- list(
pars,
pars,
pars
)
```
because each population has the same demography.
Note that we have not included any migration between the the populations yet,
but we can still run a simulation with all of them like this:
```{r, echo=FALSE, results='hide', message=FALSE}
# NOTE the following if()...else() blocks are here
# to test whether spip has been installed yet.
# If spip is not available (for example, on CRAN's build machines) this
# is noted and stored package data are used for the variable
# "slurped" to build the remainder of the vignette.
if(spip_exists()) {
message("spip is installed and will be used")
set.seed(15)
three_pop_dir <- run_spip(
pars = pars_list,
num_pops = 3
)
slurp3 <- slurp_spip(three_pop_dir, num_generations = 1)
} else {
message("Using stored package data because spip is not installed")
slurp3 <- three_pops_no_mig_slurped_results
}
```
```{r, eval=FALSE}
set.seed(15)
three_pop_dir <- run_spip(
pars = pars_list,
num_pops = 3
)
slurp3 <- slurp_spip(three_pop_dir, num_generations = 2)
```
And you can investigate that output by making the plots and other
summaries shown in the main vignette. Many of these summaries account
for the multiple populations like this:
```{r, fig.width=5, out.width = '100%'}
ggplot_census_by_year_age_sex(slurp3$census_postkill)
```
Those are the postkill census sizes in the three different populations, which get the labels
0, 1, and 2, according to the order in which they were passed to spip (i.e., their order
in the `pars_list`, above.)
## Migration between multiple populations
The above simulated multiple populations, but did not simulate any migration between them.
Here, we will imagine different rates of self-recruitment of the larvae, which means different rates
at which surviving larvae are going to be migrants.
The populations are labeled 0, 1, and 2, and, as stated above, we will simulate
somewhat unidirectional migration between them, like so:
- 20% of pop 0 individuals are migrants; 80% of those end up in pop 1 and 20% in pop 2
- 15% of pop 1 indivs are migrants; 95% of them end up in pop 2 and 5% in pop 0
- 5% of pop 2 are migrants; 90% of those end up in pop 1 and 10% make it to pop 0.
The way we implement this is by adding options to each population's demography parameters
like so:
```{r}
# out-migration rates pops 0, 1, 2
pars_list[[1]]$`fem-prob-mig-out` <- "5-25 1 .20"
pars_list[[1]]$`male-prob-mig-out` <- "5-25 1 .20"
pars_list[[2]]$`fem-prob-mig-out` <- "5-25 1 .15"
pars_list[[2]]$`male-prob-mig-out` <- "5-25 1 .15"
pars_list[[3]]$`fem-prob-mig-out` <- "5-25 1 .05"
pars_list[[3]]$`male-prob-mig-out` <- "5-25 1 .05"
# in-migration rates
pars_list[[1]]$`fem-prob-mig-in` <- "5-25 1 0.00 0.80 0.20"
pars_list[[1]]$`male-prob-mig-in` <- "5-25 1 0.00 0.80 0.20"
pars_list[[2]]$`fem-prob-mig-in` <- "5-25 1 0.05 0.00 0.95"
pars_list[[2]]$`male-prob-mig-in` <- "5-25 1 0.05 0.00 0.95"
pars_list[[3]]$`fem-prob-mig-in` <- "5-25 1 0.10 0.90 0.00"
pars_list[[3]]$`male-prob-mig-in` <- "5-25 1 0.10 0.90 0.00"
```
Now, we can simulate this:
```{r, echo=FALSE, results='hide', message=FALSE}
# NOTE the following if()...else() blocks are here
# to test whether spip has been installed yet.
# If spip is not available (for example, on CRAN's build machines) this
# is noted and stored package data are used for the variable
# "slurped" to build the remainder of the vignette.
if(spip_exists()) {
message("spip is installed and will be used")
set.seed(15)
mig_dir <- run_spip(
pars = pars_list,
num_pops = 3
)
slurp_mig <- slurp_spip(mig_dir, num_generations = 1)
} else {
message("Using stored package data because spip is not installed")
slurp_mig <- three_pops_with_mig_slurped_results
}
```
```{r, eval=FALSE}
set.seed(15)
mig_dir <- run_spip(
pars = pars_list,
num_pops = 3
)
slurp_mig <- slurp_spip(mig_dir, num_generations = 1)
```
Now, the object `slurp_mig` holds information about census sizes of the populations
and the relationships between samples, etc. That can be broken down by populations.
First, a record of every migration event is stored in `slurp_mig$migrants`. This can
be easily summarized:
```{r}
slurp_mig$migrants %>%
count(age, from_pop, to_pop)
```
For a further example, to tabulate parent-offspring pairs found between different
populations, we can compile the relationships and summarize:
```{r}
# compile relationships
crel <- compile_related_pairs(slurp_mig$samples)
# count number of PO pairs by which populations
# the members were born in
crel %>%
filter(dom_relat == "PO") %>%
mutate(
parent_born_pop = case_when(
upper_member == 1 ~ born_pop_1,
upper_member == 2 ~ born_pop_2,
TRUE ~ NA_integer_
),
child_born_pop = case_when(
upper_member == 1 ~ born_pop_2,
upper_member == 2 ~ born_pop_1,
TRUE ~ NA_integer_
)
) %>%
count(parent_born_pop, child_born_pop)
```
|
/scratch/gouwar.j/cran-all/cranData/CKMRpop/inst/doc/simple-example-with-migration.Rmd
|
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----setup, message=FALSE-----------------------------------------------------
library(tidyverse)
library(CKMRpop)
## -----------------------------------------------------------------------------
species_1_life_history
## -----------------------------------------------------------------------------
SPD <- species_1_life_history
## -----------------------------------------------------------------------------
# before we tell spip what the cohort sizes are, we need to
# tell it how long we will be running the simulation
SPD$`number-of-years` <- 100 # run the sim forward for 100 years
# this is our cohort size
cohort_size <- 300
# Do some matrix algebra to compute starting values from the
# stable age distribution:
L <- leslie_from_spip(SPD, cohort_size)
# then we add those to the spip parameters
SPD$`initial-males` <- floor(L$stable_age_distro_fem)
SPD$`initial-females` <- floor(L$stable_age_distro_male)
# tell spip to use the cohort size
SPD$`cohort-size` <- paste("const", cohort_size, collapse = " ")
## -----------------------------------------------------------------------------
samp_frac <- 0.03
samp_start_year <- 50
samp_stop_year <- 75
SPD$`discard-all` <- 0
SPD$`gtyp-ppn-fem-post` <- paste(
samp_start_year, "-", samp_stop_year, " ",
samp_frac, " ", samp_frac, " ", samp_frac, " ",
paste(rep(0, SPD$`max-age` - 3), collapse = " "),
sep = ""
)
SPD$`gtyp-ppn-male-post` <- SPD$`gtyp-ppn-fem-post`
## ---- echo=FALSE, results='hide', message=FALSE-------------------------------
# NOTE the following if()...else() blocks are here
# to test whether spip has been installed yet.
# If spip is not available (for example, on CRAN's build machines) this
# is noted and stored package data are used for the variable
# "slurped" to build the remainder of the vignette.
if(spip_exists()) {
message("spip is installed and will be used")
set.seed(5)
spip_dir <- run_spip(pars = SPD)
# now read that in and find relatives within the grandparental range
slurped <- slurp_spip(spip_dir, 2)
} else {
message("Using stored package data because spip is not installed")
slurped <- species_1_slurped_results
}
## ---- eval=FALSE--------------------------------------------------------------
# set.seed(5) # set a seed for reproducibility of results
# spip_dir <- run_spip(pars = SPD) # run spip
# slurped <- slurp_spip(spip_dir, 2) # read the spip output into R
## ---- fig.width = 7, fig.height = 5.5-----------------------------------------
ggplot_census_by_year_age_sex(slurped$census_postkill)
## -----------------------------------------------------------------------------
surv_rates <- summarize_survival_from_census(slurped$census_postkill)
## -----------------------------------------------------------------------------
surv_rates$survival_tibble %>%
slice(1:40)
## ---- fig.width = 9, fig.height=7.5, out.height=600, out.width=700------------
surv_rates$plot_histos_by_age_and_sex
## ---- fig.width = 9, fig.height=7.5, out.height=600, out.width=700------------
surv_rates2 <- summarize_survival_from_census(
census = slurped$census_prekill,
fem_surv_probs = SPD$`fem-surv-probs`,
male_surv_probs = SPD$`male-surv-probs`
)
# print the plot
surv_rates2$plot_histos_by_age_and_sex
## -----------------------------------------------------------------------------
offs_and_mates <- summarize_offspring_and_mate_numbers(
census_postkill = slurped$census_postkill,
pedigree = slurped$pedigree,
deaths = slurped$deaths, lifetime_hexbin_width = c(1, 2)
)
## ---- fig.width = 7, fig.height = 5.5-----------------------------------------
offs_and_mates$plot_age_specific_number_of_offspring
## ---- fig.height = 6, fig.width = 7-------------------------------------------
offs_and_mates$plot_lifetime_output_vs_age_at_death
## ---- fig.width=7, fig.height=7-----------------------------------------------
offs_and_mates$plot_fraction_of_offspring_from_each_age_class
## -----------------------------------------------------------------------------
mates <- count_and_plot_mate_distribution(slurped$pedigree)
## -----------------------------------------------------------------------------
head(mates$mate_counts)
## ---- fig.width=7, fig.height=5-----------------------------------------------
mates$plot_mate_counts
## -----------------------------------------------------------------------------
crel <- compile_related_pairs(slurped$samples)
## -----------------------------------------------------------------------------
crel %>%
slice(1:10)
## -----------------------------------------------------------------------------
relat_counts <- count_and_plot_ancestry_matrices(crel)
## -----------------------------------------------------------------------------
relat_counts$highly_summarised
## -----------------------------------------------------------------------------
relat_counts$dr_counts
## ---- fig.width=7, fig.height=7-----------------------------------------------
relat_counts$dr_plots$FC
## ---- fig.width=7, fig.height=7-----------------------------------------------
relat_counts$dr_plots$Si
## -----------------------------------------------------------------------------
relat_counts$anc_mat_counts
## ---- fig.width=7, fig.height=7-----------------------------------------------
relat_counts$anc_mat_plots[[1]]
## ---- fig.width=7, fig.height=7-----------------------------------------------
relat_counts$anc_mat_plots[[2]]
## -----------------------------------------------------------------------------
nrow(slurped$samples)
## -----------------------------------------------------------------------------
slurped$samples %>%
mutate(ns = map_int(samp_years_list, length)) %>%
summarise(tot_times = sum(ns))
## -----------------------------------------------------------------------------
SS2 <- slurped$samples %>%
filter(map_int(samp_years_list, length) > 1) %>%
select(ID, samp_years_list)
SS2
## -----------------------------------------------------------------------------
# first indiv:
SS2$samp_years_list[[1]]
# second indiv:
SS2$samp_years_list[[2]]
## -----------------------------------------------------------------------------
subsampled_pairs <- downsample_pairs(
S = slurped$samples,
P = crel,
n = 100
)
## -----------------------------------------------------------------------------
# num samples before downsampling
ns_bd <- nrow(slurped$samples)
# num samples after downsampling
ns_ad <- nrow(subsampled_pairs$ds_samples)
# ratio of sample sizes
ssz_rat <- ns_ad / ns_bd
# square of the ratio
sq_rat <- ssz_rat ^ 2
# ratio of number of pairs found amongst samples
num_pairs_before <- nrow(crel)
num_pairs_after_downsampling = nrow(subsampled_pairs$ds_pairs)
ratio <- num_pairs_after_downsampling / num_pairs_before
# compare these two things
c(sq_rat, ratio)
## -----------------------------------------------------------------------------
# because we jitter some points, we can set a seed to get the same
# result each time
set.seed(22)
spag <- uncooked_spaghetti(
Pairs = crel,
Samples = slurped$samples
)
## ---- fig.width=7.5, fig.height=9.5-------------------------------------------
spag$plot
## -----------------------------------------------------------------------------
crel %>%
slice(1:10)
## ---- echo=FALSE, results='hide', message=FALSE-------------------------------
# NOTE the following if()...else() blocks are here
# to test whether spip has been installed yet.
# If spip is not available (for example, on CRAN's build machines) this
# is noted and stored package data are used for the variable
# "slurped" to build the remainder of the vignette.
if(spip_exists()) {
# read the spip output in and find relatives within the parental range
slurped_1gen <- slurp_spip(spip_dir, num_generations = 1)
} else {
message("Using stored package data for 1gen results because spip is not installed")
slurped_1gen <- species_1_slurped_results_1gen
}
## ---- eval=FALSE--------------------------------------------------------------
# slurped_1gen <- slurp_spip(spip_dir, num_generations = 1)
## -----------------------------------------------------------------------------
crel_1gen <- compile_related_pairs(slurped_1gen$samples)
## -----------------------------------------------------------------------------
nrow(crel_1gen)
## -----------------------------------------------------------------------------
set.seed(10)
ssp_1gen <- downsample_pairs(
S = slurped_1gen$samples,
P = crel_1gen,
n = 150
)
## -----------------------------------------------------------------------------
ssp_1gen$ds_pairs %>%
count(conn_comp) %>%
arrange(desc(n))
## ---- fig.width=6, fig.height=6-----------------------------------------------
# for some reason, the aes() function gets confused unless
# ggraph library is loaded...
library(ggraph)
one_gen_graph <- plot_conn_comps(ssp_1gen$ds_pairs)
one_gen_graph$plot
## ---- fig.width=6, fig.height=6-----------------------------------------------
one_gen_graph +
ggraph::geom_node_text(aes(label = name), repel = TRUE, size = 1.2) +
scale_edge_color_manual(values = c(`PO-1` = "tan2", `Si-1` = "gold", `Si-2` = "blue"))
## ---- fig.width=6, fig.height=6-----------------------------------------------
plot_conn_comps(crel)$plot
## -----------------------------------------------------------------------------
set.seed(10)
freqs <- lapply(1:100, function(x) {
nA = 1 + rpois(1, 3)
f = runif(nA)
f/sum(f)
})
## ---- echo=FALSE, results='hide', message=FALSE-------------------------------
# now we can run spip with those as input
if(spip_exists()) {
message("spip is installed and will be used")
set.seed(5)
spip_dir <- run_spip(
pars = SPD,
allele_freqs = freqs
)
# now read that in and find relatives within the grandparental range
slurped <- slurp_spip(spip_dir, 2)
} else {
message("Using stored package data because spip is not installed")
slurped <- species_1_slurped_results_100_loci
}
## ---- eval=FALSE--------------------------------------------------------------
# set.seed(5)
# spip_dir <- run_spip(
# pars = SPD,
# allele_freqs = freqs
# )
# # now read that in and find relatives within the grandparental range
# slurped <- slurp_spip(spip_dir, 2)
## -----------------------------------------------------------------------------
slurped$genotypes[1:10, 1:5]
|
/scratch/gouwar.j/cran-all/cranData/CKMRpop/inst/doc/species_1_simulation.R
|
---
title: "Simulation from species 1 life history"
output:
rmarkdown::html_vignette:
toc: true
vignette: >
%\VignetteIndexEntry{species_1_simulation}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
```{r setup, message=FALSE}
library(tidyverse)
library(CKMRpop)
```
For this first example, we use the hypothetical life history of species 1.
First we have to set spip up to run with that life history.
## Setting the spip parameters
`spip` has a large number of demographic parameters. Typically `spip` is run as
a command-line program in Unix. In CKMRpop, all that action goes on under the
hood, but you still have to use the `spip` parameters. This vignette is not about
using `spip`. For a short listing of all the `spip` options, do this:
```r
library(CKMRpop)
spip_help()
```
If you want a full, complete, long listing of all the `spip` options, then you
can do:
```r
library(CKMRpop)
spip_help_full()
```
All of the "long-form" options to `spip` are given on the Unix command line starting
with two dashes, like `--fem-surv-probs`. To set parameters within `CKMRpop` to send
to spip, you simply make a named list of input values. The names of the items in the
list are the long-format option names _without the leading two dashes_. For an example,
see the package data object `species_1_life_history`, as described below.
### Basic life history parameters
These parameters are included in the package in the variable `species_1_life_history`.
It is named list of parameters to send to spip. The list names are the names of the
spip options. It looks like this:
```{r}
species_1_life_history
```
We want to add instructions to those, telling spip how long to run the simulation,
and what the initial census sizes should be.
So, first, we copy `species_1_life_history` to a new variable, `SPD`:
```{r}
SPD <- species_1_life_history
```
Now, we can add things to SPD.
### Setting Initial Census, New Fish per Year, and Length of Simulation
The number of new fish added each year is called the "cohort-size". Once we know
that, we can figure out what the stable age distribution would be given the survival
rates, and we can use that as our starting point. There is a function
in the package that helps with that:
```{r}
# before we tell spip what the cohort sizes are, we need to
# tell it how long we will be running the simulation
SPD$`number-of-years` <- 100 # run the sim forward for 100 years
# this is our cohort size
cohort_size <- 300
# Do some matrix algebra to compute starting values from the
# stable age distribution:
L <- leslie_from_spip(SPD, cohort_size)
# then we add those to the spip parameters
SPD$`initial-males` <- floor(L$stable_age_distro_fem)
SPD$`initial-females` <- floor(L$stable_age_distro_male)
# tell spip to use the cohort size
SPD$`cohort-size` <- paste("const", cohort_size, collapse = " ")
```
### Specifying the fraction of sampled fish, and in different years
Spip let's you specify what fraction of fish of different ages should be
sampled in different years. Here we do something simple, and instruct
spip to sample 1% of the fish of ages 1, 2, and 3 (after the episode of
death, see the spip vignette...) every year from year 50 to 75.
```{r}
samp_frac <- 0.03
samp_start_year <- 50
samp_stop_year <- 75
SPD$`discard-all` <- 0
SPD$`gtyp-ppn-fem-post` <- paste(
samp_start_year, "-", samp_stop_year, " ",
samp_frac, " ", samp_frac, " ", samp_frac, " ",
paste(rep(0, SPD$`max-age` - 3), collapse = " "),
sep = ""
)
SPD$`gtyp-ppn-male-post` <- SPD$`gtyp-ppn-fem-post`
```
## Running spip and slurping up the results
There are two function that do all this for you. The function `run_spip()` runs spip in
a temporary directory. After running spip, it also processes the output
with a few shell scripts. The function returns the path to the temporary
directory. You pass that temporary directory path into the function `slurp_spip()`
to read the output back into R. It looks like this:
```{r, echo=FALSE, results='hide', message=FALSE}
# NOTE the following if()...else() blocks are here
# to test whether spip has been installed yet.
# If spip is not available (for example, on CRAN's build machines) this
# is noted and stored package data are used for the variable
# "slurped" to build the remainder of the vignette.
if(spip_exists()) {
message("spip is installed and will be used")
set.seed(5)
spip_dir <- run_spip(pars = SPD)
# now read that in and find relatives within the grandparental range
slurped <- slurp_spip(spip_dir, 2)
} else {
message("Using stored package data because spip is not installed")
slurped <- species_1_slurped_results
}
```
```{r, eval=FALSE}
set.seed(5) # set a seed for reproducibility of results
spip_dir <- run_spip(pars = SPD) # run spip
slurped <- slurp_spip(spip_dir, 2) # read the spip output into R
```
Note that setting the seed allows you to get the same results from spip.
If you don't set the seed, that is fine. spip will be seeded by the next
two integers in current random number sequence.
If you are doing multiple runs and you want them to be different, you should
make sure that you don't inadvertently set the seed to be the same each time.
## Some functions to summarize the runs
Although during massive production simulations, you might not go back to every run
and summarize it to see what it looks like, when you are parameterizing demographic
simulations you will want to be able to quickly look at observed demographic rates
and things. There are a few functions in CKMRpop that make this quick and easy to do.
### Plot the age-specific census sizes over time
This is just a convenience function to make a pretty plot so you can check to
see what the population demographics look like:
```{r, fig.width = 7, fig.height = 5.5}
ggplot_census_by_year_age_sex(slurped$census_postkill)
```
This shows that the function `leslie_from_spip()` does a good job of finding the
initial population numbers that accord with the stable age distribution.
### Assess the observed survival rates
We can compute the survival rates like this:
```{r}
surv_rates <- summarize_survival_from_census(slurped$census_postkill)
```
That returns a list. One part of the list is a tibble with observed survival fractions.
The first 40 rows look like this:
```{r}
surv_rates$survival_tibble %>%
slice(1:40)
```
The second part of the list holds a plot with histograms of age-specific,
observed survival rates across all years. The blue line is the mean over
all years.
```{r, fig.width = 9, fig.height=7.5, out.height=600, out.width=700}
surv_rates$plot_histos_by_age_and_sex
```
To compare these values to the parameter values for the simulation, you must
pass those to the function:
```{r, fig.width = 9, fig.height=7.5, out.height=600, out.width=700}
surv_rates2 <- summarize_survival_from_census(
census = slurped$census_prekill,
fem_surv_probs = SPD$`fem-surv-probs`,
male_surv_probs = SPD$`male-surv-probs`
)
# print the plot
surv_rates2$plot_histos_by_age_and_sex
```
Here, the red dashed line is the value chosen as the parameter for
the simulations. The means are particularly different for the older
age classes, which makes sense because there the total number of
individuals in each of those year classes is smaller.
## The distribution of offspring number
It makes sense to check that your simulation is delivering a reasonable distribution
of offspring per year. This is the number of offspring that survive to just before the
first prekill census. Keep in mind that, for super high-fecundity species, we won't model
every single larva, we just don't start "keeping track of them" until they reach a stage
that is recognizable in some way.
We make this summary from the pedigree information.
In order to get the number of adults that were present, but did not produce any offspring,
we also need to pass in the postkill census information. Also, to get lifetime
reproductive output, we need to know how old individuals were when they died, so
we also pass in the information about deaths.
To make all the summaries, we do:
```{r}
offs_and_mates <- summarize_offspring_and_mate_numbers(
census_postkill = slurped$census_postkill,
pedigree = slurped$pedigree,
deaths = slurped$deaths, lifetime_hexbin_width = c(1, 2)
)
```
Note that we are setting the lifetime reproductive output hexbin width to be suitable
for this example.
The function above returns a list of plots, as follows:
### Age and sex specific number of offspring
```{r, fig.width = 7, fig.height = 5.5}
offs_and_mates$plot_age_specific_number_of_offspring
```
Especially when dealing with viviparous species (like sharks and mammals) it is
worth checking this to make sure that there aren't some females having far too many
offspring.
### Lifetime reproductive output as a function of age at death
Especially with long-lived organisms, it can be instructive to see how lifetime
reproductive output varies with age at death.
```{r, fig.height = 6, fig.width = 7}
offs_and_mates$plot_lifetime_output_vs_age_at_death
```
Yep, many individuals have no offspring, and you have more kids if you live longer.
### Fractional contribution of each age class to the year's offspring
Out of all the offspring born each year, we can tabulate the fraction that
were born to males (or females) of each age. This summary shows a histogram
of those values. The represent the distribution of the fractional contribution
of each age group each year.
```{r, fig.width=7, fig.height=7}
offs_and_mates$plot_fraction_of_offspring_from_each_age_class
```
The blue vertical lines show the means over all years.
## The distribution of the number of mates
Some of the parameters in `spip` affect the distribution of the number
of mates that each individual will have. We can have a quick look
at whether the distribution of number of mates (that produced at least
one offspring) appears to be what we might hope it to be.
```{r}
mates <- count_and_plot_mate_distribution(slurped$pedigree)
```
That gives us a tibble with a summary, like this:
```{r}
head(mates$mate_counts)
```
And also a plot:
```{r, fig.width=7, fig.height=5}
mates$plot_mate_counts
```
## Compiling the related pairs from the samples
From the samples that we slurped up from the spip output we
compile all the related pairs that we found in there with a single
function. It is important to note that this finds all the related pairs
that share ancestors back within `num_generations` generations. Recall, that
we ran `slurp_spip()` with `num_generations = 2` which means that we
check for matching ancestors up to and including the grandparents of the sample.
```{r}
crel <- compile_related_pairs(slurped$samples)
```
The result that comes back has a single row for each pair. The individuals appear in
each pair such that the first name comes before the second name, alphabetically,
in each pair. There is information in list columns about the year(s) that each
member of the pair was sampled in and also years in which they were born, and also
the indices of the populations they were sampled from. (Knowing the population
will become useful when/if we start simulating multiple populations connected
by gene flow). Here we show the first 10 pairs in the samples:
```{r}
crel %>%
slice(1:10)
```
Because some pairs might be related in multiple ways (i.e., they might be paternal half-sibs,
but, through their mother's lineage, they might also be half-first cousins), things can
get complicated. However, `CKMRpop` has an algorithm to categorize pairs according to
their "most important" relationship.
The column `dom_relat` gives the "dominant" or "closest" relationship between the pair.
The possibilities, when considering up to two generations of ancestors are:
- `Se`: self.
- `PO`: parent-offspring
- `Si`: sibling.
- `GP`: grandparental
- `A` : avuncular (aunt-niece)
- `FC`: first cousin.
The `max_hit` column can be interpreted as the number of shared ancestors at the
level of the dominant relationship. For example a pair of half-siblings are of
category `Si` and have `max_hit = 1`, because they share one parent. On the other
hand, a pair with category `Si` and `max_hit = 2` would be full siblings.
Likewise, `A` with `max_hit = 1` is a half-aunt-niece or half-uncle-nephew pair,
while `A` with `max_hit = 2` would be a full-aunt-niece or full-uncle-nephew pair.
The column `dr_hits` gives the number of shared ancestors on the upper vs lower
diagonals of the ancestry match matrices (see below). These are meaningful primarily
for understanding the "directionality" of non-symmetrical relationships. Some explanation
is in order: some relationships, like Se, Si, and FC are _symmetrical_ relationships, because,
if, for example Greta is your sibling, then you are also Greta's sibling. Likewise, if you are
Milton's first cousin, then Milton is also your first cousin. Other relationships, like
PO, A, and GP, are not symmetrical: If Chelsea is your mother, then you are not Chelsea's
mother, you are Chelsea's child. In the non-symmetrical relationships there is always one
member who is typically expected to be older than the other. This is a requirement
in a direct-descent relationship (like parent-offspring, or grandparent-grandchild),
but is not actually required in avuncular relationships (i.e. it is possible to have an
aunt that is younger than the nephew...). We refer to the "typically older" member
of non-symmetrical pairs as the "upper member" and the `upper_member` column of the output
above tells us whether `id_1` or `id_2` is the upper member in such relationships,
when `upper_member` is 1 or 2, respectively. `upper_member` is NA for symmetrical
relationships and it can be 0 for weird situations that should rarely arise where,
for example a pair A and B is related such that A is B's half-uncle, but B is A's half-aunt.
Often the dominant relationship is the only relationship between the pair. However,
if you want to delve deeper into the full relationship out to `num_generations`
generations, you can analyze the ancestry match matrix for the pair, which is stored in the
the `anc_match_matrix` column. This matrix holds a TRUE
for each shared ancestor in the two individual's ancestry (out to `num_generations`).
If this seems obtuse, it should become more understandable when we look at some figures, later.
### Tallying relationships
Here, we count up the number of pairs that fall into different relationship types:
```{r}
relat_counts <- count_and_plot_ancestry_matrices(crel)
```
The first component of the return list is a tibble of the relationship counts in a highly summarized form
tabulating just the `dom_relat` and `max_hit` over all the pairs.
```{r}
relat_counts$highly_summarised
```
This is telling us there are 194 half-first-cousin pairs, 130 half-avuncular (aunt/uncle with niece/nephew) pairs,
and so forth.
### A closer look within the dominant relationships
As noted before, the table just lists the dominant relationship of
each pair. If you want to quickly assess, within those dominant categories,
how many specific ancestry match matrices underlie them, you can
look at the `dr_counts` component of the output:
```{r}
relat_counts$dr_counts
```
Each of these distinct ancestry match matrices for each dominant relationship can
be visualized in a series of faceted plots, which are also returned. For example,
the ancestry match matrices seen amongst the FC relationships are:
```{r, fig.width=7, fig.height=7}
relat_counts$dr_plots$FC
```
Within each dominant relationship, the distinct ancestry matrices in each
separate panel are named according to their number
(`001`, `002`, `003`, etc), relationship and the
`dr_hits` vector, (`FC[1,2]`) and the number of times this ancestry match matrix
was observed amongst pairs in the sample (like ` - 4`). So `006-FC[1,1] - 24` was
observed in 24 of the sampled pairs.
It is worth pointing out that the `014-FC[2,2] - 2` plot shows two pairwise relationships
in which individual 2 is inbred, because its father's father (pp) and mother's father
(mp) are the same individual.
Let's look at the distinct ancestry match matrices from the siblings:
```{r, fig.width=7, fig.height=7}
relat_counts$dr_plots$Si
```
Here in `005` we see an interesting case where ind_1 and ind_2 are maternal half sibs,
put also individual 2's father is also his/her uncle.
### Tallying all the ancestry match matrices
At times, for example, when looking for the more bizarre relationships,
you might just want to visualize all the ancestry match matrices in order
of the number of times that they occur. The number of different ancestry
match matrices (and the matrices themselves) can be accessed with:
```{r}
relat_counts$anc_mat_counts
```
But more useful for visualizing things is `relat_counts$anc_mat_plots` which is a list that
holds a series of pages/plots showing all the different
ancestry matrices seen. Here are the first 30:
```{r, fig.width=7, fig.height=7}
relat_counts$anc_mat_plots[[1]]
```
And here are the remaining 14 relationship types:
```{r, fig.width=7, fig.height=7}
relat_counts$anc_mat_plots[[2]]
```
These are worth staring at for a while, and making sure you understand what
they are saying. I spent a lot of time staring at these, which is how I
settled upon a decent algorithm for identifying the dominant relationship
in each.
## A Brief Digression: downsampling the sampled pairs
When using `spip` within `CKMRpop` you have to specify the fraction of
individuals in the population that you want to sample at any particular time.
You must set those fractions so that, given the population size, you end up with
roughly the correct number of samples for the situation you are trying to
simulate. Sometimes, however, you might want to have sampled exactly 5,000
fish. Or some other number. The function `downsample_pairs` lets you randomly
discard specific instances in which an individual was sampled so that the
number of individuals (or sampling instances) that remains is the exact number
you want.
For example, looking closely at `slurped$samples` shows that `r nrow(slurped$samples)` distinct individuals were sampled:
```{r}
nrow(slurped$samples)
```
However, those `r nrow(slurped$samples)` individuals represent multiple distinct sampling instances, because
some individuals may sampled twice, as, in this simulation scenario,
sampling the individuals does not remove them from the population:
```{r}
slurped$samples %>%
mutate(ns = map_int(samp_years_list, length)) %>%
summarise(tot_times = sum(ns))
```
Here are some individuals sampled at multiple times
```{r}
SS2 <- slurped$samples %>%
filter(map_int(samp_years_list, length) > 1) %>%
select(ID, samp_years_list)
SS2
```
And the years that the first two of those individuals were sampled are as follows:
```{r}
# first indiv:
SS2$samp_years_list[[1]]
# second indiv:
SS2$samp_years_list[[2]]
```
Great! Now, imagine that we wanted to see how many kin pairs we found when
our sampling was such that we had only 100 instances of sampling (i.e., it could
have been 98 individuals sampled in total, but two of them were sampled in
two different years). We do like so:
```{r}
subsampled_pairs <- downsample_pairs(
S = slurped$samples,
P = crel,
n = 100
)
```
Now there are only `r nrow(subsampled_pairs$ds_pairs)` pairs
instead of `r nrow(crel)`.
We can do a little calculation to see if that makes sense: because the number of pairs
varies roughly quadratically, we would expect that the number of pairs to decrease by a
quadratic factor of the number of samples:
```{r}
# num samples before downsampling
ns_bd <- nrow(slurped$samples)
# num samples after downsampling
ns_ad <- nrow(subsampled_pairs$ds_samples)
# ratio of sample sizes
ssz_rat <- ns_ad / ns_bd
# square of the ratio
sq_rat <- ssz_rat ^ 2
# ratio of number of pairs found amongst samples
num_pairs_before <- nrow(crel)
num_pairs_after_downsampling = nrow(subsampled_pairs$ds_pairs)
ratio <- num_pairs_after_downsampling / num_pairs_before
# compare these two things
c(sq_rat, ratio)
```
That checks out.
## Uncooked Spaghetti Plots
Finally, in order to visually summarize all the kin pairs that were found,
with specific reference to their age, time of sampling, and sex, I find it
helpful to use what I have named the "Uncooked Spaghetti Plot". There are multiple
subpanels on this plot. Here is how to read/view these plots:
- Each row of subpanels is for a different dominant relationship, going from
closer relationships near the top and more distant ones further down. You can
find the abbreviation for the dominant relationship at the right edge of the panels.
- In each row, there are four subpanels: `F->F`, `F->M`, `M->F`, and `M->M`. These
refer to the different possible combinations of sexes of the individuals in the pair.
+ For the non-symmetrical relationships these are naturally defined with the
first letter (`F` for female or `M` for male) denoting the sex of the "upper_member"
of the relationship. That is, if it is PO, then the sex of the parent is the first letter.
The sex of the non-upper-member is the second letter. Thus a `PO` pair that consists of
a father and a daughter would appear in a plot that is in the `PO` row in the `M->F` column.
+ For the symmetrical relationships, there isn't a comparably natural way of
ordering the individuals' sexes for presentation. For these relationships, the
first letter refers to the sex of the individual that was sampled in the earliest
year. If both individuals were sampled in the same year, and they are of different
sexes, then the female is considered the first one, so those all go on the `F->M` subpanel.
- On the subpanels, each straight line (i.e., each piece of uncooked spaghetti) represents
a single kin pair. The two endpoints represent the year/time of sampling (on the x-axis)
and the age of the individual when it was sampled (on the y-axis) of the two members of
the pair.
+ If the relationship is non-symmetrical, then the line is drawn as an arrow pointing
from the upper member to the lower member.
+ The color of the line gives the number of shared ancestors (`max_hits`) at the level
of the dominant relationship. This is how you can distinguish full-sibs from half-sibs, etc.
We crunch out the data and make the plot like this:
```{r}
# because we jitter some points, we can set a seed to get the same
# result each time
set.seed(22)
spag <- uncooked_spaghetti(
Pairs = crel,
Samples = slurped$samples
)
```
Now, the plot can be printed like so:
```{r, fig.width=7.5, fig.height=9.5}
spag$plot
```
## Identifying connected components
One issue that arises frequently in CKMR is the concern (especially in small
populations) that the pairs of related individuals are not independent. The
simplest way in which this occurs is when, for example, A is a half-sib of B,
but B is also a half-sib of C, so that the pairs A-B and B-C share the
individual B. These sorts of dependencies can be captured quickly by thinking
of individuals as vertices and relationships between pairs of individuals as
edges, which defines a _graph_. Finding all the connected components of such a graph
provides a nice summary of all those pairs that share members and hence are certainly
not independent.
The `CKMRpop` package provides the connected component of this graph for every
related pair discovered. This is in column `conn_comp` of the output from
`compile_related_pairs()`. Here we can see it from our example, which shows that
the first 10 pairs all belong to the same connected component, 1.
```{r}
crel %>%
slice(1:10)
```
It should clearly be noted that the size of the connected
components will be affected by the size of the population (with smaller populations,
more of the related pairs will share members) and the number of generations back in time
over which generations are compiled (if you go back for enough in time, all the pairs
will be related to one another). In our example case, with a small population (so it can
be simulated quickly for building the vignettes) and going back `num_generations = 2`
generations (thus including grandparents and first cousins, etc.) we actually find that
_all_ of the pairs are in the same connected component. Wow!
Because this simulated population is quite small, at this juncture we will reduce
the number of generations so as to create more connected components amongst these pairs
for illustration. So, let us compile just the pairs with `num_generations = 1`. To
do this, we must slurp up the spip results a second time
```{r, echo=FALSE, results='hide', message=FALSE}
# NOTE the following if()...else() blocks are here
# to test whether spip has been installed yet.
# If spip is not available (for example, on CRAN's build machines) this
# is noted and stored package data are used for the variable
# "slurped" to build the remainder of the vignette.
if(spip_exists()) {
# read the spip output in and find relatives within the parental range
slurped_1gen <- slurp_spip(spip_dir, num_generations = 1)
} else {
message("Using stored package data for 1gen results because spip is not installed")
slurped_1gen <- species_1_slurped_results_1gen
}
```
```{r, eval=FALSE}
slurped_1gen <- slurp_spip(spip_dir, num_generations = 1)
```
And after we have done that, we can compile the related pairs:
```{r}
crel_1gen <- compile_related_pairs(slurped_1gen$samples)
```
Look at the number of pairs:
```{r}
nrow(crel_1gen)
```
That is still a lot of pairs, so let us downsample to 150 samples so that our figures
are not overwhelmed by connected components.
```{r}
set.seed(10)
ssp_1gen <- downsample_pairs(
S = slurped_1gen$samples,
P = crel_1gen,
n = 150
)
```
And also tally up the number of pairs in different connected components:
```{r}
ssp_1gen$ds_pairs %>%
count(conn_comp) %>%
arrange(desc(n))
```
There are some rather large connected components there. Let's plot them.
```{r, fig.width=6, fig.height=6}
# for some reason, the aes() function gets confused unless
# ggraph library is loaded...
library(ggraph)
one_gen_graph <- plot_conn_comps(ssp_1gen$ds_pairs)
one_gen_graph$plot
```
Note that if you want to attach labels to those nodes, to see which individuals
we are talking about, you can do this (and also adjust colors...):
```{r, fig.width=6, fig.height=6}
one_gen_graph +
ggraph::geom_node_text(aes(label = name), repel = TRUE, size = 1.2) +
scale_edge_color_manual(values = c(`PO-1` = "tan2", `Si-1` = "gold", `Si-2` = "blue"))
```
And, for fun, look at it with 2 generations and all of the samples:
```{r, fig.width=6, fig.height=6}
plot_conn_comps(crel)$plot
```
What a snarl! With a small population, several generations, and large samples,
in this case...everyone is connected!
## Simulating Genotypes
We can simulate the genotypes of the sampled individuals at unlinked
markers that have allele frequencies (amongst the founders) that we specify.
We provide the desired allele frequencies in a list. Here we simulate
uniformly distributed allele frequencies at 100 markers, each with
a random number of alleles that is 1 + Poisson(3):
```{r}
set.seed(10)
freqs <- lapply(1:100, function(x) {
nA = 1 + rpois(1, 3)
f = runif(nA)
f/sum(f)
})
```
Then run spip with those allele frequencies:
```{r, echo=FALSE, results='hide', message=FALSE}
# now we can run spip with those as input
if(spip_exists()) {
message("spip is installed and will be used")
set.seed(5)
spip_dir <- run_spip(
pars = SPD,
allele_freqs = freqs
)
# now read that in and find relatives within the grandparental range
slurped <- slurp_spip(spip_dir, 2)
} else {
message("Using stored package data because spip is not installed")
slurped <- species_1_slurped_results_100_loci
}
```
```{r, eval=FALSE}
set.seed(5)
spip_dir <- run_spip(
pars = SPD,
allele_freqs = freqs
)
# now read that in and find relatives within the grandparental range
slurped <- slurp_spip(spip_dir, 2)
```
Now, the variable `slurped$genotypes` has the genotypes we requested.
The first column, (`ID`) is the ID of the individual (congruent with the `ID`
column in `slurped$samples`) and the remaining columns are for the markers.
Each locus occupies one column and the alleles are separated by a slash.
Here are the first 10 individuals at the first four loci:
```{r}
slurped$genotypes[1:10, 1:5]
```
|
/scratch/gouwar.j/cran-all/cranData/CKMRpop/inst/doc/species_1_simulation.Rmd
|
---
title: "About spip"
resource_files:
- ../man/figures/spip-periods.svg
- ../man/figures/spip-periods-with-sampling.svg
- ../man/figures/spip-periods-with-migration.svg
author: "Eric C. Anderson"
date: "`r Sys.Date()`"
output:
rmarkdown::html_vignette:
toc: true
vignette: >
%\VignetteIndexEntry{About spip}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
This provides a short description of a few things that one
should know about how `spip` works.
## Timing of events in a `spip` life cycle
`spip` is a program that records time in discrete periods that can be
thought of as years. When individuals are born at time $t$ they are considered
to be of age 0 and they have a birth year of $t$. The user must input to the program
the maximum possible age of an individual. Here, we will refer that
maximum age as $\mathrm{MA}$.
Within each year in `spip` the following events occur, in order:
1. The age of each individual is incremented at the beginning of the new year $t$. This
means that newborns from year $t-1$ are turned into 1-year-olds as they enter year $t$,
and so forth.
1. Next there is a chance to sample individuals before the death episode. Only individuals
of ages 1 to $\mathrm{MA}$ can be sampled. These are the individuals, who, if they
survive the upcoming death episode, might have the chance to contribute to the
next generation.
1. Next there is an episode of death. Individuals that are $a$ years old in year $t$
survive this episode of death with a probability of $s_a$. Individuals that are
$\mathrm{MA} + 1$ are all killed with probability of 1.
1. Then there is a chance to sample individuals after the episode of death.
Only individuals of ages 1 to $\mathrm{MA}$ can be sampled. This is a sample
from the individuals that are around and alive to contribute to the next generation.
1. Next there is an episode of reproduction which produces 0-year-olds born at time
$t$.
- During the reproduction episode there is an opportunity
to draw a sample from the individuals that were chosen to
be amongst the reproducing adults. (This is particularly useful for critters like
salmon that can be sampled explicitly when they are migrating in fresh water to
reproduce).
- After the sampling period during reproduction, there is also the chance for
individuals to die after reproduction according to the probability
set by `--fem-postrep-die` and `--male-postrep-die`. With this value set
to 1, for example, semelparity can be enforced.
1. After reproduction there are individuals of ages 0 to $\mathrm{MA}$ that
sit around and don't do much of anything. Eventually the year gets advanced
to the following one ($t+1$), and the ages of individuals get incremented, so that
at the beginning of year $t+1$ individuals are of ages 1 to $\mathrm{MA}+1$.
It is worth noting that even though there are some $\mathrm{MA}+1$-year-olds around
at the beginning of each time period, they all die
during the episode of death, and, because they cannot be sampled during the
sampling episode before death, it is like they do not exist.
The year in spip can thus be divided into three different periods between
the demographic events/episodes:
1. The period before the episode of death. This is known as the _prekill_
period of the year, and, in the following, we will use a
superscript $\mbox{}^\mathrm{pre}$ to denote census sizes during that period.
2. The period after the episode of death, but before reproduction. This
is known as the _postkill_ period, and will be denoted with a superscript
$\mbox{}^\mathrm{pok}$.
3. The period after reproduction, but before the year gets incremented. This
is the _post-reproduction_ period and will be denoted with a superscript
$\mbox{}^\mathrm{por}$.
We will use $F_{t,a}^\mathrm{pre}$, $F_{t,a}^\mathrm{pok}$, and $F_{t,a}^\mathrm{por}$
to denote the number of $a$-year-old females during the
prekill, postkill, and post-reproduction periods, respectively, of time $t$.
The diagram below, showing these numbers in relation to one another,
along with notations of their expected values
should help users to understand the spip annual cycle.
The numbers of males change across time periods in a similar fashion.
```{r, echo=FALSE, out.width="100%"}
knitr::include_graphics("../man/figures/spip-periods.svg")
```
Annotated in the above are the three distinct periods in spip's annual cycle:
the _prekill_, _postkill_ and _post-reproductive_ periods. When the output
of spip is slurped up by CKMRpop, these numbers become the respective tibbles
(elements of the output list of `slurp_spip()`) of: `census_prekill`, `census_postkill`,
and `census_postrepro`, respectively.
The expected numbers of individuals after each transition is as follows:
- $F_{t,a}^\mathrm{pre} \equiv F_{t-1,a-1}^\mathrm{por}~~,~~a = 1,\ldots,\mathrm{MA}$
- $E[F_{t,a}^\mathrm{pok}] = s_a^F F_{t,a}^\mathrm{pre}~~,~~a = 1,\ldots,\mathrm{MA}$, where $s_a^F$ is the probability that an $a-1$ year old female survives to be an $a$ year old female, as given with the `--fem-surv-probs` option.
- $E[F_{t,a}^\mathrm{por}] = (1 - r_a^Fd_a^F) F_{t,a}^\mathrm{pok}~~,~~a = 1,\ldots,\mathrm{MA}$, where $r_a^F$ is the probability that an $a$-year-old
female reproduces (set with the `--fem-prob-repro` option) and $d_a^F$ is the
probability that an $a$-year-old female will die after engaging in reproduction
(even if no offspring were actually produced!), as given
in the `--fem-postrep-die` option. This is an additional source of death
that is useful for modeling anadromous species whose reproductive journey
incurs a substantial cost.
## Sampling episodes in a `spip` annual cycle
The two main sampling schemes available in spip are keyed to these different
time periods within the spip annual cycle as shown by the following figure:
```{r, echo=FALSE, out.width="100%"}
knitr::include_graphics("../man/figures/spip-periods-with-sampling.svg")
```
Thus, `--gtyp-ppn-fem-pre` and `--gtyp-ppn-male-pre` involve sampling
from the simulated population at a different point in the year than
do the `--gtyp-ppn-fem-post` and `--gtyp-ppn-male-post` options.
It is also possible to only sample those individuals that are trying to
reproduce in a certain year using a third sampling scheme requested with the
`--gtyp-ppn-fem-dur` and `--gtyp-ppn-male-dur` options
to spip. The probability that an individual would try to reproduce
in a given year is age specific and is set using the `--fem-prob-repro`
and `--male-prob-repro` options.
It is worth noting that the `pre`, `post` and `dur`, sampling options all
occur relatively independently (so long as sampling is not lethal---see the
somewhat experimental `--lethal-sampling` option). spip reports the different
years when an individual is sampled during the `pre`, `post`, and `dur`
periods in the year. CKMRpop preserves those times in separate lists
when it slurps up the spip output. For example `slurped$samples` has the
list columns: `samp_years_list_pre`, `samp_years_list_post`, and `samp_years_list_dur`.
For all downstream analyses, CKMRpop uses the list column `samp_years_list`, which,
by default is the same as the `samp_years_list_post`. This means, at the present
time, you should use the options to sample individuals after the episode of death
using the the `--gtyp-ppn-fem-post` and `--gtyp-ppn-male-post` options.
Note that, in most cases when exploring CKMR, the user will want to
use the `--gtyp-ppn-fem-post` and `--gtyp-ppn-male-post` options, anyway, because
those are samples from the adult population that are available for reproduction.
If it is desired to sample all newborns at time $t$, then currently the way to do
that is to sample 1-year-olds at time $t+1$ using the
`--gtyp-ppn-fem-pre` and `--gtyp-ppn-male-pre` options. However, it would take some
extra finagling to get those sampling years into the `samp_years_list` column
referenced above for the downstream analyses.
TODO ITEM: combine sampling at all times into the single `samp_years_list` column, perhaps,
or make it easier for users to decide how to combine those different sampling
episodes. For now, though, users should stick to using the
`--gtyp-ppn-fem-post` and `--gtyp-ppn-male-post` options.
## How inter-population migration occurs in `spip`
We can use the same diagrams developed above to describe how migration is implemented
in `spip`. Migration in `spip` is a "two-stage" phenomenon: in the first stage,
individuals leave a population with sex-, year- and age-specific out-migration rates
specified with the population's options `--fem-prob-mig-out` and
`--male-prob-mig-out`.
They leave each population before the prekill census occurs and also before the prekill
sampling occurs. Diagrammatically,
it looks like this:
```{r, echo=FALSE, out.width="100%", fig.cap="Schematic describing the first stage of migration: migration out of a population. Each blue line shows individuals leaving the population and entering a pool of migrants."}
knitr::include_graphics("../man/figures/spip-periods-with-migration.svg")
```
The expected numbers of individuals in the pool of migrants who have left the population
is given by the time- and age-specific rates set by the user. We will denote the outmigration
rate for age $a$ individuals at time $t$ from a given population by $m^\mathrm{out}_{t,a}$.
It follows then that, for this given population:
$$
E[F^\mathrm{out}_{t,a}] = m^\mathrm{out}_{t,a}F_{t-1,a-1},~a=1,\ldots, \mathrm{MA}.
$$
In the following, we will want to refer to these outmigration rates for each
population, so we may also adorn the notation, thus:
$$
E[F^{\mathrm{out},i}_{t,a}] = m^{\mathrm{out},i}_{t,a}F^i_{t-1,a-1},~a=1,\ldots, \mathrm{MA}.
$$
to refer to rates and sizes specifically for population $i$.
After the outmigration stage, each population has a pool of migrants that are waiting
to migrate into other populations. The rates by which this happens are specified
with the `--fem-prob-mig-in` and `--male-prob-mig-in`
options. These options set in-migration rates for different years and
for different ages, effectively setting the fraction of the total number
of out-migrated individuals from population $i$ of age $a$ at time $t$,
$F^{\mathrm{out},i}_{t,a}$, that will migrate into the other populations. Thus,
there is one number to set for each population. For example, if
there are $K$ populations, we would have:
$$
m^{\mathrm{in},i}_{t,a} = [m^{\mathrm{in},i}_{t,a,1},\ldots,m^{\mathrm{in},i}_{t,a,K}]~~,~~
\sum_{j=1}^K m^{\mathrm{in},i}_{t,a,j} = 1.
$$
The probability of migrating back to the population from whence one came
is always 0. So, even if the user sets that to some non-zero value,
it will be forced to zero and the values of the remaining in-migration rates will
be re-scaled so as to sum to 1.
Given this set up, the expected number of individuals from the
outmigrant pool from population $i$ that will arrive in population $j$,
of age $a$ at time $t$ is
$$
E[F^{\mathrm{in},i}_{t,a,j}] = m^{\mathrm{in},i}_{t,a,j} F^{\mathrm{out},i}_{t,a}
$$
And, so we can also write that entirely in terms of current population sizes
and migration rates:
$$
E[F^{\mathrm{in},i}_{t,a,j}] = m^{\mathrm{in},i}_{t,a,j} m^{\mathrm{out},i}_{t,a}F^i_{t-1,a-1}
$$
So, this whole system of specifying migrants is a little more complex than
a system whereby the user specifies the fraction of individuals in population
$j$ that originated from population $i$. But, it does provide a lot more control
by the user, as well as realism, in that the number of migrants into a population
depends on the size of the donor population.
|
/scratch/gouwar.j/cran-all/cranData/CKMRpop/vignettes/about_spip.Rmd
|
---
title: "A Simple Example With Migration"
author: "Eric C. Anderson"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{simple-example-with-migration}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
In this vignette we show how to use CKMRpop/spip in a
multi-population version with asymmetrical migration rates. It would
be good to read the "about_spip" vignette beforehand to understand how
migration is implemented in spip.
Our goal here is to model three "populations" of a fish species in which
migration occurs during the larval stage, and it is influenced by
currents that flow down the coast and make migration occur primarily in
one direction. Each of the populations will have the same life tables,
etc. We will make it simple.
First load some packages:
```{r, message=FALSE}
library(tidyverse)
library(CKMRpop)
```
## Basic Demography
The basic demography for each, is given below. We assume no
mortality from newborn to age one. In effect we are only modeling the
the newborns that survive to age 1. Only 3, 4, and 5 year-olds mate,
and older fish have somewhat more reproductive success.
```{r}
pars <- list()
pars$`max-age` <- 5
pars$`fem-surv-probs` <- c(1, 0.7, 0.8, 0.8, 0.8)
pars$`male-surv-probs` <- c(1, 0.7, 0.8, 0.8, 0.8)
pars$`fem-prob-repro` <- c(0, 0, 1, 1, 1)
pars$`male-prob-repro` <- c(0, 0, 1, 1, 1)
pars$`fem-asrf` <- c(0, 0, .5, .7, 1)
pars$`male-asrp` <- c(0, 0, .5, .7, 1)
pars$`fem-rep-disp-par` <- 0.25
pars$`male-rep-disp-par` <- 0.25
pars$`sex-ratio` <- 0.5
```
We will run these simulations for 25 years and just assume
an annual cohort size of 200 fish in each population, and we will
go for a constant population size and start off with the stable
age distribution.
```{r}
pars$`number-of-years` <- 25
# given cohort sizes of 250 the stable age distribution
# can be found and used for the initial number of indivs
L <- leslie_from_spip(pars, 250)
# then we add those to the spip parameters
pars$`initial-males` <- floor(L$stable_age_distro_fem)
pars$`initial-females` <- floor(L$stable_age_distro_male)
pars$`cohort-size` <- "const 250"
```
## Sampling
We will simulate a sampling program in which juveniles are sampled
when they recruit after their larval phase. We assume that the larval
phase starts at birth, but is over by age 1. So we can sample new
recruits via prekill sampling at age 1. We will pretend the sampling
is non-lethal. We will also non-lethally sample a fraction of the older
age-class fish.
```{r}
pars$`discard-all` <- 0
pars$`gtyp-ppn-fem-pre` <- "10-25 0.05 0.00 0.00 0.00 0.00"
pars$`gtyp-ppn-male-pre` <- "10-25 0.05 0.00 0.00 0.00 0.00"
pars$`gtyp-ppn-fem-post` <- "10-25 0.00 0.01 0.01 0.01 0.01"
pars$`gtyp-ppn-male-post` <- "10-25 0.00 0.01 0.01 0.01 0.01"
```
OK! At this point, we have set up the demography for a single population. Let's just
show that we could simulate a single population with that demography and the sampling
like this:
```{r, eval=FALSE}
set.seed(15)
one_pop_dir <- run_spip(pars = pars, num_pops = 1)
```
But, of course, the question is, how do we simulate multiple, interacting populations?
## Simulating Multiple Populations
To simulate multiple populations in spip, from within CKMRpop, we set the
`num_pops` option of `run_spip()` to the number of populations.
And then, for the `pars` option we pass a _list_ of `num_pops`
different lists, each one holding the demography and sampling information
for each of the populations in the simulation. In our case, since
we are simulating three populations with the same demography,
`num_pops = 3` and we can construct our `pars` argument like this:
```{r}
pars_list <- list(
pars,
pars,
pars
)
```
because each population has the same demography.
Note that we have not included any migration between the the populations yet,
but we can still run a simulation with all of them like this:
```{r, echo=FALSE, results='hide', message=FALSE}
# NOTE the following if()...else() blocks are here
# to test whether spip has been installed yet.
# If spip is not available (for example, on CRAN's build machines) this
# is noted and stored package data are used for the variable
# "slurped" to build the remainder of the vignette.
if(spip_exists()) {
message("spip is installed and will be used")
set.seed(15)
three_pop_dir <- run_spip(
pars = pars_list,
num_pops = 3
)
slurp3 <- slurp_spip(three_pop_dir, num_generations = 1)
} else {
message("Using stored package data because spip is not installed")
slurp3 <- three_pops_no_mig_slurped_results
}
```
```{r, eval=FALSE}
set.seed(15)
three_pop_dir <- run_spip(
pars = pars_list,
num_pops = 3
)
slurp3 <- slurp_spip(three_pop_dir, num_generations = 2)
```
And you can investigate that output by making the plots and other
summaries shown in the main vignette. Many of these summaries account
for the multiple populations like this:
```{r, fig.width=5, out.width = '100%'}
ggplot_census_by_year_age_sex(slurp3$census_postkill)
```
Those are the postkill census sizes in the three different populations, which get the labels
0, 1, and 2, according to the order in which they were passed to spip (i.e., their order
in the `pars_list`, above.)
## Migration between multiple populations
The above simulated multiple populations, but did not simulate any migration between them.
Here, we will imagine different rates of self-recruitment of the larvae, which means different rates
at which surviving larvae are going to be migrants.
The populations are labeled 0, 1, and 2, and, as stated above, we will simulate
somewhat unidirectional migration between them, like so:
- 20% of pop 0 individuals are migrants; 80% of those end up in pop 1 and 20% in pop 2
- 15% of pop 1 indivs are migrants; 95% of them end up in pop 2 and 5% in pop 0
- 5% of pop 2 are migrants; 90% of those end up in pop 1 and 10% make it to pop 0.
The way we implement this is by adding options to each population's demography parameters
like so:
```{r}
# out-migration rates pops 0, 1, 2
pars_list[[1]]$`fem-prob-mig-out` <- "5-25 1 .20"
pars_list[[1]]$`male-prob-mig-out` <- "5-25 1 .20"
pars_list[[2]]$`fem-prob-mig-out` <- "5-25 1 .15"
pars_list[[2]]$`male-prob-mig-out` <- "5-25 1 .15"
pars_list[[3]]$`fem-prob-mig-out` <- "5-25 1 .05"
pars_list[[3]]$`male-prob-mig-out` <- "5-25 1 .05"
# in-migration rates
pars_list[[1]]$`fem-prob-mig-in` <- "5-25 1 0.00 0.80 0.20"
pars_list[[1]]$`male-prob-mig-in` <- "5-25 1 0.00 0.80 0.20"
pars_list[[2]]$`fem-prob-mig-in` <- "5-25 1 0.05 0.00 0.95"
pars_list[[2]]$`male-prob-mig-in` <- "5-25 1 0.05 0.00 0.95"
pars_list[[3]]$`fem-prob-mig-in` <- "5-25 1 0.10 0.90 0.00"
pars_list[[3]]$`male-prob-mig-in` <- "5-25 1 0.10 0.90 0.00"
```
Now, we can simulate this:
```{r, echo=FALSE, results='hide', message=FALSE}
# NOTE the following if()...else() blocks are here
# to test whether spip has been installed yet.
# If spip is not available (for example, on CRAN's build machines) this
# is noted and stored package data are used for the variable
# "slurped" to build the remainder of the vignette.
if(spip_exists()) {
message("spip is installed and will be used")
set.seed(15)
mig_dir <- run_spip(
pars = pars_list,
num_pops = 3
)
slurp_mig <- slurp_spip(mig_dir, num_generations = 1)
} else {
message("Using stored package data because spip is not installed")
slurp_mig <- three_pops_with_mig_slurped_results
}
```
```{r, eval=FALSE}
set.seed(15)
mig_dir <- run_spip(
pars = pars_list,
num_pops = 3
)
slurp_mig <- slurp_spip(mig_dir, num_generations = 1)
```
Now, the object `slurp_mig` holds information about census sizes of the populations
and the relationships between samples, etc. That can be broken down by populations.
First, a record of every migration event is stored in `slurp_mig$migrants`. This can
be easily summarized:
```{r}
slurp_mig$migrants %>%
count(age, from_pop, to_pop)
```
For a further example, to tabulate parent-offspring pairs found between different
populations, we can compile the relationships and summarize:
```{r}
# compile relationships
crel <- compile_related_pairs(slurp_mig$samples)
# count number of PO pairs by which populations
# the members were born in
crel %>%
filter(dom_relat == "PO") %>%
mutate(
parent_born_pop = case_when(
upper_member == 1 ~ born_pop_1,
upper_member == 2 ~ born_pop_2,
TRUE ~ NA_integer_
),
child_born_pop = case_when(
upper_member == 1 ~ born_pop_2,
upper_member == 2 ~ born_pop_1,
TRUE ~ NA_integer_
)
) %>%
count(parent_born_pop, child_born_pop)
```
|
/scratch/gouwar.j/cran-all/cranData/CKMRpop/vignettes/simple-example-with-migration.Rmd
|
---
title: "Simulation from species 1 life history"
output:
rmarkdown::html_vignette:
toc: true
vignette: >
%\VignetteIndexEntry{species_1_simulation}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
```{r setup, message=FALSE}
library(tidyverse)
library(CKMRpop)
```
For this first example, we use the hypothetical life history of species 1.
First we have to set spip up to run with that life history.
## Setting the spip parameters
`spip` has a large number of demographic parameters. Typically `spip` is run as
a command-line program in Unix. In CKMRpop, all that action goes on under the
hood, but you still have to use the `spip` parameters. This vignette is not about
using `spip`. For a short listing of all the `spip` options, do this:
```r
library(CKMRpop)
spip_help()
```
If you want a full, complete, long listing of all the `spip` options, then you
can do:
```r
library(CKMRpop)
spip_help_full()
```
All of the "long-form" options to `spip` are given on the Unix command line starting
with two dashes, like `--fem-surv-probs`. To set parameters within `CKMRpop` to send
to spip, you simply make a named list of input values. The names of the items in the
list are the long-format option names _without the leading two dashes_. For an example,
see the package data object `species_1_life_history`, as described below.
### Basic life history parameters
These parameters are included in the package in the variable `species_1_life_history`.
It is named list of parameters to send to spip. The list names are the names of the
spip options. It looks like this:
```{r}
species_1_life_history
```
We want to add instructions to those, telling spip how long to run the simulation,
and what the initial census sizes should be.
So, first, we copy `species_1_life_history` to a new variable, `SPD`:
```{r}
SPD <- species_1_life_history
```
Now, we can add things to SPD.
### Setting Initial Census, New Fish per Year, and Length of Simulation
The number of new fish added each year is called the "cohort-size". Once we know
that, we can figure out what the stable age distribution would be given the survival
rates, and we can use that as our starting point. There is a function
in the package that helps with that:
```{r}
# before we tell spip what the cohort sizes are, we need to
# tell it how long we will be running the simulation
SPD$`number-of-years` <- 100 # run the sim forward for 100 years
# this is our cohort size
cohort_size <- 300
# Do some matrix algebra to compute starting values from the
# stable age distribution:
L <- leslie_from_spip(SPD, cohort_size)
# then we add those to the spip parameters
SPD$`initial-males` <- floor(L$stable_age_distro_fem)
SPD$`initial-females` <- floor(L$stable_age_distro_male)
# tell spip to use the cohort size
SPD$`cohort-size` <- paste("const", cohort_size, collapse = " ")
```
### Specifying the fraction of sampled fish, and in different years
Spip let's you specify what fraction of fish of different ages should be
sampled in different years. Here we do something simple, and instruct
spip to sample 1% of the fish of ages 1, 2, and 3 (after the episode of
death, see the spip vignette...) every year from year 50 to 75.
```{r}
samp_frac <- 0.03
samp_start_year <- 50
samp_stop_year <- 75
SPD$`discard-all` <- 0
SPD$`gtyp-ppn-fem-post` <- paste(
samp_start_year, "-", samp_stop_year, " ",
samp_frac, " ", samp_frac, " ", samp_frac, " ",
paste(rep(0, SPD$`max-age` - 3), collapse = " "),
sep = ""
)
SPD$`gtyp-ppn-male-post` <- SPD$`gtyp-ppn-fem-post`
```
## Running spip and slurping up the results
There are two function that do all this for you. The function `run_spip()` runs spip in
a temporary directory. After running spip, it also processes the output
with a few shell scripts. The function returns the path to the temporary
directory. You pass that temporary directory path into the function `slurp_spip()`
to read the output back into R. It looks like this:
```{r, echo=FALSE, results='hide', message=FALSE}
# NOTE the following if()...else() blocks are here
# to test whether spip has been installed yet.
# If spip is not available (for example, on CRAN's build machines) this
# is noted and stored package data are used for the variable
# "slurped" to build the remainder of the vignette.
if(spip_exists()) {
message("spip is installed and will be used")
set.seed(5)
spip_dir <- run_spip(pars = SPD)
# now read that in and find relatives within the grandparental range
slurped <- slurp_spip(spip_dir, 2)
} else {
message("Using stored package data because spip is not installed")
slurped <- species_1_slurped_results
}
```
```{r, eval=FALSE}
set.seed(5) # set a seed for reproducibility of results
spip_dir <- run_spip(pars = SPD) # run spip
slurped <- slurp_spip(spip_dir, 2) # read the spip output into R
```
Note that setting the seed allows you to get the same results from spip.
If you don't set the seed, that is fine. spip will be seeded by the next
two integers in current random number sequence.
If you are doing multiple runs and you want them to be different, you should
make sure that you don't inadvertently set the seed to be the same each time.
## Some functions to summarize the runs
Although during massive production simulations, you might not go back to every run
and summarize it to see what it looks like, when you are parameterizing demographic
simulations you will want to be able to quickly look at observed demographic rates
and things. There are a few functions in CKMRpop that make this quick and easy to do.
### Plot the age-specific census sizes over time
This is just a convenience function to make a pretty plot so you can check to
see what the population demographics look like:
```{r, fig.width = 7, fig.height = 5.5}
ggplot_census_by_year_age_sex(slurped$census_postkill)
```
This shows that the function `leslie_from_spip()` does a good job of finding the
initial population numbers that accord with the stable age distribution.
### Assess the observed survival rates
We can compute the survival rates like this:
```{r}
surv_rates <- summarize_survival_from_census(slurped$census_postkill)
```
That returns a list. One part of the list is a tibble with observed survival fractions.
The first 40 rows look like this:
```{r}
surv_rates$survival_tibble %>%
slice(1:40)
```
The second part of the list holds a plot with histograms of age-specific,
observed survival rates across all years. The blue line is the mean over
all years.
```{r, fig.width = 9, fig.height=7.5, out.height=600, out.width=700}
surv_rates$plot_histos_by_age_and_sex
```
To compare these values to the parameter values for the simulation, you must
pass those to the function:
```{r, fig.width = 9, fig.height=7.5, out.height=600, out.width=700}
surv_rates2 <- summarize_survival_from_census(
census = slurped$census_prekill,
fem_surv_probs = SPD$`fem-surv-probs`,
male_surv_probs = SPD$`male-surv-probs`
)
# print the plot
surv_rates2$plot_histos_by_age_and_sex
```
Here, the red dashed line is the value chosen as the parameter for
the simulations. The means are particularly different for the older
age classes, which makes sense because there the total number of
individuals in each of those year classes is smaller.
## The distribution of offspring number
It makes sense to check that your simulation is delivering a reasonable distribution
of offspring per year. This is the number of offspring that survive to just before the
first prekill census. Keep in mind that, for super high-fecundity species, we won't model
every single larva, we just don't start "keeping track of them" until they reach a stage
that is recognizable in some way.
We make this summary from the pedigree information.
In order to get the number of adults that were present, but did not produce any offspring,
we also need to pass in the postkill census information. Also, to get lifetime
reproductive output, we need to know how old individuals were when they died, so
we also pass in the information about deaths.
To make all the summaries, we do:
```{r}
offs_and_mates <- summarize_offspring_and_mate_numbers(
census_postkill = slurped$census_postkill,
pedigree = slurped$pedigree,
deaths = slurped$deaths, lifetime_hexbin_width = c(1, 2)
)
```
Note that we are setting the lifetime reproductive output hexbin width to be suitable
for this example.
The function above returns a list of plots, as follows:
### Age and sex specific number of offspring
```{r, fig.width = 7, fig.height = 5.5}
offs_and_mates$plot_age_specific_number_of_offspring
```
Especially when dealing with viviparous species (like sharks and mammals) it is
worth checking this to make sure that there aren't some females having far too many
offspring.
### Lifetime reproductive output as a function of age at death
Especially with long-lived organisms, it can be instructive to see how lifetime
reproductive output varies with age at death.
```{r, fig.height = 6, fig.width = 7}
offs_and_mates$plot_lifetime_output_vs_age_at_death
```
Yep, many individuals have no offspring, and you have more kids if you live longer.
### Fractional contribution of each age class to the year's offspring
Out of all the offspring born each year, we can tabulate the fraction that
were born to males (or females) of each age. This summary shows a histogram
of those values. The represent the distribution of the fractional contribution
of each age group each year.
```{r, fig.width=7, fig.height=7}
offs_and_mates$plot_fraction_of_offspring_from_each_age_class
```
The blue vertical lines show the means over all years.
## The distribution of the number of mates
Some of the parameters in `spip` affect the distribution of the number
of mates that each individual will have. We can have a quick look
at whether the distribution of number of mates (that produced at least
one offspring) appears to be what we might hope it to be.
```{r}
mates <- count_and_plot_mate_distribution(slurped$pedigree)
```
That gives us a tibble with a summary, like this:
```{r}
head(mates$mate_counts)
```
And also a plot:
```{r, fig.width=7, fig.height=5}
mates$plot_mate_counts
```
## Compiling the related pairs from the samples
From the samples that we slurped up from the spip output we
compile all the related pairs that we found in there with a single
function. It is important to note that this finds all the related pairs
that share ancestors back within `num_generations` generations. Recall, that
we ran `slurp_spip()` with `num_generations = 2` which means that we
check for matching ancestors up to and including the grandparents of the sample.
```{r}
crel <- compile_related_pairs(slurped$samples)
```
The result that comes back has a single row for each pair. The individuals appear in
each pair such that the first name comes before the second name, alphabetically,
in each pair. There is information in list columns about the year(s) that each
member of the pair was sampled in and also years in which they were born, and also
the indices of the populations they were sampled from. (Knowing the population
will become useful when/if we start simulating multiple populations connected
by gene flow). Here we show the first 10 pairs in the samples:
```{r}
crel %>%
slice(1:10)
```
Because some pairs might be related in multiple ways (i.e., they might be paternal half-sibs,
but, through their mother's lineage, they might also be half-first cousins), things can
get complicated. However, `CKMRpop` has an algorithm to categorize pairs according to
their "most important" relationship.
The column `dom_relat` gives the "dominant" or "closest" relationship between the pair.
The possibilities, when considering up to two generations of ancestors are:
- `Se`: self.
- `PO`: parent-offspring
- `Si`: sibling.
- `GP`: grandparental
- `A` : avuncular (aunt-niece)
- `FC`: first cousin.
The `max_hit` column can be interpreted as the number of shared ancestors at the
level of the dominant relationship. For example a pair of half-siblings are of
category `Si` and have `max_hit = 1`, because they share one parent. On the other
hand, a pair with category `Si` and `max_hit = 2` would be full siblings.
Likewise, `A` with `max_hit = 1` is a half-aunt-niece or half-uncle-nephew pair,
while `A` with `max_hit = 2` would be a full-aunt-niece or full-uncle-nephew pair.
The column `dr_hits` gives the number of shared ancestors on the upper vs lower
diagonals of the ancestry match matrices (see below). These are meaningful primarily
for understanding the "directionality" of non-symmetrical relationships. Some explanation
is in order: some relationships, like Se, Si, and FC are _symmetrical_ relationships, because,
if, for example Greta is your sibling, then you are also Greta's sibling. Likewise, if you are
Milton's first cousin, then Milton is also your first cousin. Other relationships, like
PO, A, and GP, are not symmetrical: If Chelsea is your mother, then you are not Chelsea's
mother, you are Chelsea's child. In the non-symmetrical relationships there is always one
member who is typically expected to be older than the other. This is a requirement
in a direct-descent relationship (like parent-offspring, or grandparent-grandchild),
but is not actually required in avuncular relationships (i.e. it is possible to have an
aunt that is younger than the nephew...). We refer to the "typically older" member
of non-symmetrical pairs as the "upper member" and the `upper_member` column of the output
above tells us whether `id_1` or `id_2` is the upper member in such relationships,
when `upper_member` is 1 or 2, respectively. `upper_member` is NA for symmetrical
relationships and it can be 0 for weird situations that should rarely arise where,
for example a pair A and B is related such that A is B's half-uncle, but B is A's half-aunt.
Often the dominant relationship is the only relationship between the pair. However,
if you want to delve deeper into the full relationship out to `num_generations`
generations, you can analyze the ancestry match matrix for the pair, which is stored in the
the `anc_match_matrix` column. This matrix holds a TRUE
for each shared ancestor in the two individual's ancestry (out to `num_generations`).
If this seems obtuse, it should become more understandable when we look at some figures, later.
### Tallying relationships
Here, we count up the number of pairs that fall into different relationship types:
```{r}
relat_counts <- count_and_plot_ancestry_matrices(crel)
```
The first component of the return list is a tibble of the relationship counts in a highly summarized form
tabulating just the `dom_relat` and `max_hit` over all the pairs.
```{r}
relat_counts$highly_summarised
```
This is telling us there are 194 half-first-cousin pairs, 130 half-avuncular (aunt/uncle with niece/nephew) pairs,
and so forth.
### A closer look within the dominant relationships
As noted before, the table just lists the dominant relationship of
each pair. If you want to quickly assess, within those dominant categories,
how many specific ancestry match matrices underlie them, you can
look at the `dr_counts` component of the output:
```{r}
relat_counts$dr_counts
```
Each of these distinct ancestry match matrices for each dominant relationship can
be visualized in a series of faceted plots, which are also returned. For example,
the ancestry match matrices seen amongst the FC relationships are:
```{r, fig.width=7, fig.height=7}
relat_counts$dr_plots$FC
```
Within each dominant relationship, the distinct ancestry matrices in each
separate panel are named according to their number
(`001`, `002`, `003`, etc), relationship and the
`dr_hits` vector, (`FC[1,2]`) and the number of times this ancestry match matrix
was observed amongst pairs in the sample (like ` - 4`). So `006-FC[1,1] - 24` was
observed in 24 of the sampled pairs.
It is worth pointing out that the `014-FC[2,2] - 2` plot shows two pairwise relationships
in which individual 2 is inbred, because its father's father (pp) and mother's father
(mp) are the same individual.
Let's look at the distinct ancestry match matrices from the siblings:
```{r, fig.width=7, fig.height=7}
relat_counts$dr_plots$Si
```
Here in `005` we see an interesting case where ind_1 and ind_2 are maternal half sibs,
put also individual 2's father is also his/her uncle.
### Tallying all the ancestry match matrices
At times, for example, when looking for the more bizarre relationships,
you might just want to visualize all the ancestry match matrices in order
of the number of times that they occur. The number of different ancestry
match matrices (and the matrices themselves) can be accessed with:
```{r}
relat_counts$anc_mat_counts
```
But more useful for visualizing things is `relat_counts$anc_mat_plots` which is a list that
holds a series of pages/plots showing all the different
ancestry matrices seen. Here are the first 30:
```{r, fig.width=7, fig.height=7}
relat_counts$anc_mat_plots[[1]]
```
And here are the remaining 14 relationship types:
```{r, fig.width=7, fig.height=7}
relat_counts$anc_mat_plots[[2]]
```
These are worth staring at for a while, and making sure you understand what
they are saying. I spent a lot of time staring at these, which is how I
settled upon a decent algorithm for identifying the dominant relationship
in each.
## A Brief Digression: downsampling the sampled pairs
When using `spip` within `CKMRpop` you have to specify the fraction of
individuals in the population that you want to sample at any particular time.
You must set those fractions so that, given the population size, you end up with
roughly the correct number of samples for the situation you are trying to
simulate. Sometimes, however, you might want to have sampled exactly 5,000
fish. Or some other number. The function `downsample_pairs` lets you randomly
discard specific instances in which an individual was sampled so that the
number of individuals (or sampling instances) that remains is the exact number
you want.
For example, looking closely at `slurped$samples` shows that `r nrow(slurped$samples)` distinct individuals were sampled:
```{r}
nrow(slurped$samples)
```
However, those `r nrow(slurped$samples)` individuals represent multiple distinct sampling instances, because
some individuals may sampled twice, as, in this simulation scenario,
sampling the individuals does not remove them from the population:
```{r}
slurped$samples %>%
mutate(ns = map_int(samp_years_list, length)) %>%
summarise(tot_times = sum(ns))
```
Here are some individuals sampled at multiple times
```{r}
SS2 <- slurped$samples %>%
filter(map_int(samp_years_list, length) > 1) %>%
select(ID, samp_years_list)
SS2
```
And the years that the first two of those individuals were sampled are as follows:
```{r}
# first indiv:
SS2$samp_years_list[[1]]
# second indiv:
SS2$samp_years_list[[2]]
```
Great! Now, imagine that we wanted to see how many kin pairs we found when
our sampling was such that we had only 100 instances of sampling (i.e., it could
have been 98 individuals sampled in total, but two of them were sampled in
two different years). We do like so:
```{r}
subsampled_pairs <- downsample_pairs(
S = slurped$samples,
P = crel,
n = 100
)
```
Now there are only `r nrow(subsampled_pairs$ds_pairs)` pairs
instead of `r nrow(crel)`.
We can do a little calculation to see if that makes sense: because the number of pairs
varies roughly quadratically, we would expect that the number of pairs to decrease by a
quadratic factor of the number of samples:
```{r}
# num samples before downsampling
ns_bd <- nrow(slurped$samples)
# num samples after downsampling
ns_ad <- nrow(subsampled_pairs$ds_samples)
# ratio of sample sizes
ssz_rat <- ns_ad / ns_bd
# square of the ratio
sq_rat <- ssz_rat ^ 2
# ratio of number of pairs found amongst samples
num_pairs_before <- nrow(crel)
num_pairs_after_downsampling = nrow(subsampled_pairs$ds_pairs)
ratio <- num_pairs_after_downsampling / num_pairs_before
# compare these two things
c(sq_rat, ratio)
```
That checks out.
## Uncooked Spaghetti Plots
Finally, in order to visually summarize all the kin pairs that were found,
with specific reference to their age, time of sampling, and sex, I find it
helpful to use what I have named the "Uncooked Spaghetti Plot". There are multiple
subpanels on this plot. Here is how to read/view these plots:
- Each row of subpanels is for a different dominant relationship, going from
closer relationships near the top and more distant ones further down. You can
find the abbreviation for the dominant relationship at the right edge of the panels.
- In each row, there are four subpanels: `F->F`, `F->M`, `M->F`, and `M->M`. These
refer to the different possible combinations of sexes of the individuals in the pair.
+ For the non-symmetrical relationships these are naturally defined with the
first letter (`F` for female or `M` for male) denoting the sex of the "upper_member"
of the relationship. That is, if it is PO, then the sex of the parent is the first letter.
The sex of the non-upper-member is the second letter. Thus a `PO` pair that consists of
a father and a daughter would appear in a plot that is in the `PO` row in the `M->F` column.
+ For the symmetrical relationships, there isn't a comparably natural way of
ordering the individuals' sexes for presentation. For these relationships, the
first letter refers to the sex of the individual that was sampled in the earliest
year. If both individuals were sampled in the same year, and they are of different
sexes, then the female is considered the first one, so those all go on the `F->M` subpanel.
- On the subpanels, each straight line (i.e., each piece of uncooked spaghetti) represents
a single kin pair. The two endpoints represent the year/time of sampling (on the x-axis)
and the age of the individual when it was sampled (on the y-axis) of the two members of
the pair.
+ If the relationship is non-symmetrical, then the line is drawn as an arrow pointing
from the upper member to the lower member.
+ The color of the line gives the number of shared ancestors (`max_hits`) at the level
of the dominant relationship. This is how you can distinguish full-sibs from half-sibs, etc.
We crunch out the data and make the plot like this:
```{r}
# because we jitter some points, we can set a seed to get the same
# result each time
set.seed(22)
spag <- uncooked_spaghetti(
Pairs = crel,
Samples = slurped$samples
)
```
Now, the plot can be printed like so:
```{r, fig.width=7.5, fig.height=9.5}
spag$plot
```
## Identifying connected components
One issue that arises frequently in CKMR is the concern (especially in small
populations) that the pairs of related individuals are not independent. The
simplest way in which this occurs is when, for example, A is a half-sib of B,
but B is also a half-sib of C, so that the pairs A-B and B-C share the
individual B. These sorts of dependencies can be captured quickly by thinking
of individuals as vertices and relationships between pairs of individuals as
edges, which defines a _graph_. Finding all the connected components of such a graph
provides a nice summary of all those pairs that share members and hence are certainly
not independent.
The `CKMRpop` package provides the connected component of this graph for every
related pair discovered. This is in column `conn_comp` of the output from
`compile_related_pairs()`. Here we can see it from our example, which shows that
the first 10 pairs all belong to the same connected component, 1.
```{r}
crel %>%
slice(1:10)
```
It should clearly be noted that the size of the connected
components will be affected by the size of the population (with smaller populations,
more of the related pairs will share members) and the number of generations back in time
over which generations are compiled (if you go back for enough in time, all the pairs
will be related to one another). In our example case, with a small population (so it can
be simulated quickly for building the vignettes) and going back `num_generations = 2`
generations (thus including grandparents and first cousins, etc.) we actually find that
_all_ of the pairs are in the same connected component. Wow!
Because this simulated population is quite small, at this juncture we will reduce
the number of generations so as to create more connected components amongst these pairs
for illustration. So, let us compile just the pairs with `num_generations = 1`. To
do this, we must slurp up the spip results a second time
```{r, echo=FALSE, results='hide', message=FALSE}
# NOTE the following if()...else() blocks are here
# to test whether spip has been installed yet.
# If spip is not available (for example, on CRAN's build machines) this
# is noted and stored package data are used for the variable
# "slurped" to build the remainder of the vignette.
if(spip_exists()) {
# read the spip output in and find relatives within the parental range
slurped_1gen <- slurp_spip(spip_dir, num_generations = 1)
} else {
message("Using stored package data for 1gen results because spip is not installed")
slurped_1gen <- species_1_slurped_results_1gen
}
```
```{r, eval=FALSE}
slurped_1gen <- slurp_spip(spip_dir, num_generations = 1)
```
And after we have done that, we can compile the related pairs:
```{r}
crel_1gen <- compile_related_pairs(slurped_1gen$samples)
```
Look at the number of pairs:
```{r}
nrow(crel_1gen)
```
That is still a lot of pairs, so let us downsample to 150 samples so that our figures
are not overwhelmed by connected components.
```{r}
set.seed(10)
ssp_1gen <- downsample_pairs(
S = slurped_1gen$samples,
P = crel_1gen,
n = 150
)
```
And also tally up the number of pairs in different connected components:
```{r}
ssp_1gen$ds_pairs %>%
count(conn_comp) %>%
arrange(desc(n))
```
There are some rather large connected components there. Let's plot them.
```{r, fig.width=6, fig.height=6}
# for some reason, the aes() function gets confused unless
# ggraph library is loaded...
library(ggraph)
one_gen_graph <- plot_conn_comps(ssp_1gen$ds_pairs)
one_gen_graph$plot
```
Note that if you want to attach labels to those nodes, to see which individuals
we are talking about, you can do this (and also adjust colors...):
```{r, fig.width=6, fig.height=6}
one_gen_graph +
ggraph::geom_node_text(aes(label = name), repel = TRUE, size = 1.2) +
scale_edge_color_manual(values = c(`PO-1` = "tan2", `Si-1` = "gold", `Si-2` = "blue"))
```
And, for fun, look at it with 2 generations and all of the samples:
```{r, fig.width=6, fig.height=6}
plot_conn_comps(crel)$plot
```
What a snarl! With a small population, several generations, and large samples,
in this case...everyone is connected!
## Simulating Genotypes
We can simulate the genotypes of the sampled individuals at unlinked
markers that have allele frequencies (amongst the founders) that we specify.
We provide the desired allele frequencies in a list. Here we simulate
uniformly distributed allele frequencies at 100 markers, each with
a random number of alleles that is 1 + Poisson(3):
```{r}
set.seed(10)
freqs <- lapply(1:100, function(x) {
nA = 1 + rpois(1, 3)
f = runif(nA)
f/sum(f)
})
```
Then run spip with those allele frequencies:
```{r, echo=FALSE, results='hide', message=FALSE}
# now we can run spip with those as input
if(spip_exists()) {
message("spip is installed and will be used")
set.seed(5)
spip_dir <- run_spip(
pars = SPD,
allele_freqs = freqs
)
# now read that in and find relatives within the grandparental range
slurped <- slurp_spip(spip_dir, 2)
} else {
message("Using stored package data because spip is not installed")
slurped <- species_1_slurped_results_100_loci
}
```
```{r, eval=FALSE}
set.seed(5)
spip_dir <- run_spip(
pars = SPD,
allele_freqs = freqs
)
# now read that in and find relatives within the grandparental range
slurped <- slurp_spip(spip_dir, 2)
```
Now, the variable `slurped$genotypes` has the genotypes we requested.
The first column, (`ID`) is the ID of the individual (congruent with the `ID`
column in `slurped$samples`) and the remaining columns are for the markers.
Each locus occupies one column and the alleles are separated by a slash.
Here are the first 10 individuals at the first four loci:
```{r}
slurped$genotypes[1:10, 1:5]
```
|
/scratch/gouwar.j/cran-all/cranData/CKMRpop/vignettes/species_1_simulation.Rmd
|
## In next version of package 'sfsmisc' - for now here (not exported)
funEnv <- function(..., envir = NULL, parent = parent.frame(),
hash = (...length() > 100), size = max(29L, ...length())) {
e <- list2env(list(...), envir=envir, parent=parent, hash=hash, size=size)
for(n in names(e)) ## iff function or formula, set environment to 'e':
if(is.function(e[[n]]) || (is.call(e[[n]]) &&
inherits(e[[n]], "formula")))
environment(e[[n]]) <- e
e
}
if(!is.function(.BaseNamespaceEnv$...length)) # ...length() only in R >= 3.5.0
## kludgy substitute, using parent.env() -- but it works (sometimes) in funEnv()
...length <- function() eval(quote(length(list(...))), envir = parent.frame())
|
/scratch/gouwar.j/cran-all/cranData/CLA/R/Auxiliaries.R
|
#### Started from "Version 8" (Ver8.R)
## Initialize the weight -- Find first free weight
initAlgo <- function(mu, lB, uB ) {
## New-ordered return, lB, uB with decreasing return
w <- c()
index.new <- order(mu,decreasing = TRUE) # new order with decreasing return
lB.new <- lB[index.new]
uB.new <- uB[index.new]
## free weight - starting solution
i.new <- 0
w.new <- lB.new # initially
while(sum(w.new) < 1) {
i.new <- i.new + 1
w.new[i.new] <- uB.new[i.new]
}
w.new[i.new] <- 1 - sum(w.new[-i.new])
w[index.new] <- w.new # back to original order
i <- index.new[i.new]
## return the index of first free asset and vector w :
list(index = i, weights = w)
}
## getMatrices -----------------------------------------------------------------
getMatrices <- function(mu, covar, w, f) {
## Slice covarF,covarFB,covarB, muF,muB, wF,wB
covarF <- covar[f,f , drop=FALSE]
muF <- mu[f]
b <- seq_along(mu)[-f]
covarFB <- covar[f,b , drop=FALSE]
wB <- w[b]
list(covarF = covarF, covarFB = covarFB, muF = muF, wB = wB)
}
computeInv <- function(get) {
solve(get$covarF, cbind(1, get$muF, get$covarFB %*% get$wB, deparse.level = 0L))
}
## computeW -----------------------------------------------------------------
computeW <- function(lam, inv, wB) {
## w2 <- inv[,1]; w3 <- inv[,2]; w1 <- inv[,3]
inv.s <- colSums(inv) # g1 <- inv.s[2]; g2 <- inv.s[1]; g4 <- inv.s[3]
## 1) compute gamma
g <- (-lam * inv.s[2] + (1- sum(wB) + inv.s[3]))/inv.s[1]
## 2) compute free weights
list(wF = - inv[,3] + g * inv[,1] + lam * inv[,2],
gamma = g)
}
## computeLambda --------------------------------------------------------------
computeLambda <- function(wB, inv, i, bi.input) {
inv.s <- colSums(inv)
## c1 <- inv.s[1]; l2 <- inv.s[3]; c2i <- inv[i,2];
## c3 <- inv.s[2]; c4i <- inv[i, 1]; l1 <- sum(wB)
c1 <- inv.s[1]
if(length(bi.input) == 1L) { # 1.bound to free
c4i <- inv[i, 1]
Ci <- - c1 * inv[i, 2] + inv.s[2] * c4i
## return lambda :
if(Ci == 0)
0
else
((1- sum(wB) + inv.s[3])* c4i- c1 * (bi.input + inv[i, 3]))/Ci
}
else { # 2.free to bound
c4i <- inv[, 1]
Ci <- - c1 * inv[, 2] + inv.s[2] * c4i
bi <- bi.input[i, 1] # bi.lB
bi[Ci > 0] <- bi.input[i[Ci > 0], 2] # bi.uB
bi[Ci == 0] <- 0
## return lambda and boundary :
list(lambda = ((1- sum(wB) + inv.s[3]) * c4i- c1 *(bi + inv[, 3]))/Ci,
bi = bi)
}
}
MS <- function(weights_set, mu, covar) {
Sig2 <- colSums(weights_set *(covar %*% weights_set) )
cbind(Sig = sqrt(Sig2), Mu = as.vector(t(weights_set) %*% mu))
}
CLA <- function(mu, covar, lB, uB, check.cov = TRUE, check.f = TRUE, tol.lambda = 1e-7,
give.MS = TRUE, keep.names = TRUE, trace = 0) {
## minimal argument checks
cl <- match.call()
n <- length(mu)
if(length(lB) == 1) lB <- rep.int(lB, n)
if(length(uB) == 1) uB <- rep.int(uB, n)
stopifnot(is.numeric(mu), is.matrix(covar), identical(dim(covar), c(n,n)),
is.numeric(lB), length(lB) == n,
is.numeric(uB), length(uB) == n, lB <= uB)# and in [0,1]
if(check.cov) {
## if the covar matrix is *not* positive (semi)definite, CLA()
## may produce a "solution" which does not fulfill desired properties
## 1) Check kappa() or rcond() ... if they are "large", check the eigen values
" __ FIXME __ "
## 2)
ev <- eigen(covar, only.values=TRUE)$values
if(any(ev < 0))
warning("covariance matrix 'covar' has negative eigenvalues")
}
## Compute the turning points, free sets and weights
ans <- initAlgo(mu, lB, uB)
f <- ans$index
w <- ans$weights
## initialize result parts
lambdas <- gammas <- numeric()
weights_set <- array(dim = c(n,0L))
eLambdas <- free_indices <- list()
lam <- 1 # set non-zero lam
while (lam > 0 && (nf <- length(f)) <= length(mu)) {
if(trace) cat(sprintf("while(lam = %g > 0 && |f|=%d <= |mu|=%d)\n", lam, nf, length(mu)))
## 1) case a): Bound one free weight F -> B
l_in <- 0
if(nf > 1L) {
compl <- computeLambda(wB = w[-f], inv = inv, # inv from last step k (k > 1)
i = f, bi.input = cbind(lB, uB))
if(trace >= 2) { cat(" case a) : computeLambda(): "); str(compl) }
lam_in <- compl$lambda
bi <- compl$bi
k <- which.max(lam_in)
i_in <- f[k]
bi_in <- bi[k]
l_in <- lam_in[k]
if(trace >= 2) cat(sprintf(" [a) cont.]: k = which.max(lam_in) = %d; l_in = %g\n",
k, l_in))
}
## 2) case b): Free one bounded weight B -> F
b <- seq_along(mu)[-f]
inv_list <- lapply(b, function(bi) {
get_i <- getMatrices(mu, covar, w, c(f,bi))
computeInv(get_i)
})
if(trace >= 2) { cat(sprintf(" case b) : \"B -> F\": b = (%s); inv_list:\n",
paste(b,collapse=", "))) ; str(inv_list) }
if(nf < length(mu)) { # still have free weights
fi <- nf + 1L
lam_out <- sapply(seq_along(b), function(i)
computeLambda(wB = w[b[-i]], inv = inv_list[[i]],
i = fi, bi.input = w[b[i]]))
if(trace) {
cat(sprintf("|f| < |mu|: computeLambda() => lam_out[1:%d]%s",
length(b), if(trace >= 2) "= " else ""))
if(trace >= 2) print(lam_out)
}
if (length(lambdas) && !all(sml <- lam_out < lam*(1-tol.lambda))) {
tol.l <- tol.lambda
while((!any(sml)) && 2*tol.l >= .Machine$double.neg.eps) # empty
## extreme: new lam_out are *not* smaller than lam(1-eps)
sml <- lam_out < lam*(1 - (tol.l <- tol.l/2))
if(trace) cat(" new 'sml' case: which(sml) = ",
if(any(sml)) paste(which(sml), collapse=", ")
else "{} (i.e. empty)", "\n")
lam_out <- lam_out[sml]
b <- b [sml]
inv_list <- inv_list[sml]
}
if((hasLam <- length(lam_out) > 0)) {
k <- which.max(lam_out)
if(trace) cat(" --> new k = which.max(lam_out): ", k, "\n")
i_out <- b [k] # one only !
l_out <- lam_out[k]
inv_out <- inv_list[[k]]
} else { # 'empty' --- should not happen typically, but see 'mc3' ex.
if(check.f)
warning("Had free weights but could not improve solution")
l_out <- -Inf
}
} else { ## length(f) == length(mu) <==> |b| = 0
hasLam <- FALSE
l_out <- -Inf
}
## 3) decide lambda
lam <- max(l_in, l_out)
if(trace) cat(sprintf(" l_{in,out}=(%g,%g) => new candidate lam=%g\n",
l_in, l_out, lam))
if(lam > 0) { # remove i_in from f; or add i_out into f
if(l_in > l_out) {
w[i_in] <- bi_in # set value at the correct boundary
f <- f[f != i_in]
getM <- getMatrices(mu, covar, w, f)
inv <- computeInv(getM)
}
else {
f <- c(f, i_out)
inv <- inv_out
}
}
else{ # 4) if max(l_in, l_out) <= 0, "stop" when at the min var solution!
lam <- 0
## muF = 0 not necessary, get1 replaced by getM (ie getM from previous step)
}
compW <- computeW(lam, inv = inv, wB = w[-f])
g <- compW$gamma
w[f] <- compW$wF[seq_along(f)]
lambdas <- c(lambdas, lam)
if(!hasLam)
eLambdas <- c(eLambdas, lam)
gammas <- c(gammas, g)
weights_set <- cbind(weights_set, w, deparse.level = 0L) # store solution
free_indices <- c(free_indices, list(sort(f)))
}# end While
if(keep.names) rownames(weights_set) <- names(mu)
structure(class = "CLA",
list(weights_set = weights_set,
free_indices = free_indices,
gammas = gammas, lambdas = lambdas,
emptyLambdas = eLambdas,
MS_weights = if(give.MS)
MS(weights_set = weights_set, mu = mu, covar = covar),
call = cl))
}
## print method
print.CLA <- function(x, ...) {
cat("Call: ", paste(deparse(x$call), sep = "\n", collapse = "\n"),
"\n", sep = "")
wts <- x$weights_set
n <- nrow(wts)
nT <- ncol(wts)
cat(gettextf("Critical Line Algorithm for n = %d assets, ", n),
gettextf("resulting in %d turning points", nT),"\n", sep="")
## For now; we can do better later:
cat("Overview of result parts:\n")
utils::str(x[1:5], max.level = 1, give.attr=FALSE)
## TODO better, e.g., summarizing the number "active assets"
## and those with non-0 weights -- only if lower bounds were (mostly) 0
invisible(x)
}
## TODO: see ../inst/issues/nonPD/many_NA_data__non-PD-covmat.R : cbind(...)
if(FALSE)
summary.CLA <- function(object, ...) {
}
## As basically from .../YanhaoShi/R/Functions/Plot.R :
MS_plot <- function(ms, type = "o",
main = "Efficient Frontier",
xlab = expression(sigma(w)),
ylab = expression(mu(w)),
col = adjustcolor("blue", alpha.f = 0.5),
pch = 16, ...) {
## list of weights_set, legend...
stopifnot(is.matrix(ms), ncol(ms) == 2)
plot(ms[,"Sig"], ms[,"Mu"], type=type, pch=pch, col=col,
xlab = xlab, ylab=ylab, main=main, ...)
}
## FIXME: --> see also in ../man/plot.CLA.Rd
## -----
## 0) Use findMu() and findSig() to draw the lines *between*
## 1) Learn from Tobias Setz to plot the lower part of the feasible region
## 2) Better title, using 'call'
## 3) mark some critical points particularly
## 4) give information about the *number* critical points / weights sets
## 5) consider using a 'add = FALSE' argument and then use 'lines()'
## 6) "label" turning points by #{assets}, or plot these additionally, or use axis 4
plot.CLA <- function(x, type = "o", main = "Efficient Frontier",
xlab = expression(sigma(w)),
ylab = expression(mu(w)),
col = adjustcolor("blue", alpha.f = 0.5),
pch = 16, ...) {
stopifnot(is.matrix(ms <- x$MS_weights))
plot(ms[,"Sig"], ms[,"Mu"], type=type, pch=pch, col=col,
xlab=xlab, ylab=ylab, main=main, ...)
}
## TODO 2nd plot: --> ../inst/issues/nonPD/many_NA_data__non-PD-covmat.R
|
/scratch/gouwar.j/cran-all/cranData/CLA/R/CLA.R
|
Env7.YS2 <- funEnv(
info = "more cosmetic (computeLambda)",
# Initialize the weight -- Find first free weight
initAlgo = function(mu, lB, uB ){
#New-ordered return, lB, uB with decreasing return
w <- c()
index.new <- order(mu,decreasing = TRUE) # new order with decreasing return
lB.new <- lB[index.new]
uB.new <- uB[index.new]
# free weight - starting solution
i.new <- 0
w.new <- lB.new # initialy
while(sum(w.new) < 1) {
i.new <- i.new + 1
w.new[i.new] <- uB.new[i.new]
}
w.new[i.new] <- 1 - sum(w.new[-i.new])
w[index.new] <- w.new #back to original order
i <- index.new[i.new]
## return the index of first free asset and vector w :
list(index = i, weights = w)
},
# getMatrices -----------------------------------------------------------------
getMatrices = function(mu, covar, w, f){
# Slice covarF,covarFB,covarB,muF,muB,wF,wB
covarF <- covar[f,f]
muF <- mu[f]
b <- (seq_along(mu))[-f]
covarFB <- covar[f,b]
wB <- w[b]
list(covarF = covarF, covarFB = covarFB, muF = muF, wB = wB)
},
# compute_g_W -----------------------------------------------------------------
# compute gamma and Weights #(Bailey and Lopez de Prado 2013, 11)
compute_g_W = function(lam, get, muF = get$muF){
covarFB <- get$covarFB
wB <- get$wB
covarF <- get$covarF
#1) compute gamma
inv <- solve(covarF, cbind(1, muF, covarFB %*% wB, deparse.level=0L))
w1 <- inv[,3]
w2 <- inv[,1]
w3 <- inv[,2]
g1 <- sum(w3)
g2 <- sum(w2)
g3 <- sum(wB)
g4 <- sum(w1)
gam <- (-lam*g1+(1-g3+g4))/g2
#2) compute weights
list(gamma = gam, wF = -w1 + gam*w2 + lam*w3)
},
# computeLambda --------------------------------------------------------------
computeLambda = function(get, i, bi.input){
## really _two_ different variants:
## 1. length(bi.input) > 1 <=> case a) "F -> B" (bi.input= (lB, uB)[f,])
## 2. length(bi.input) == 1 <=> case b) "B -> F" (bi.input= w[bi] (in {lB,uB}))
covarFB <- get$covarFB
muF <- get$muF
wB <- get$wB
covarF <- get$covarF
inv <- solve(covarF, cbind(1, muF, covarFB %*% wB, deparse.level=0L))
c1 <- sum(inv[,1])
c4i <- inv[i, 1]
c2i <- inv[i, 2]
c3 <- sum(inv[,2])
Ci <- -c1*c2i + c3*c4i
l1 <- sum(wB)
l3 <- inv[,3]
l2 <- sum(l3)
l3 <- l3[i] # = inv[i,3]
if(length(bi.input)==1){ # "a): B -> F" (bound to free)
if(Ci == 0) 0
## Oops! missing 'else' !!
((1-l1+l2)*c4i-c1*(bi.input+l3))/Ci # return lambda
}
else { # "b): F -> B" (free to bound)
bi.lB <- bi.input[,1]
bi.uB <- bi.input[,2]
bi <- bi.lB
bi[Ci>0] <- bi.uB[Ci>0]
bi[Ci == 0] <- 0
list(lambda = ((1-l1+l2)*c4i-c1*(bi+l3))/Ci, bi = bi)
# return lambda and boundary
}
},
MS = function(weights_set, mu, covar){
Sig2 <- colSums(weights_set *(covar %*% weights_set) )
cbind(Sig = sqrt(Sig2), Mu = as.vector(t(weights_set) %*% mu))
},
cla.solve = function(mu, covar, lB, uB, tol.lambda = 1e-7) {
# Compute the turning points, free sets and weights
ans <- initAlgo(mu, lB, uB)
f <- ans$index
w <- ans$weights
weights_set <- w # store solution
lambdas <- NA # The first step has no lambda or gamma, add NA instead.
gammas <- NA
free_indices <- list(f)
lam <- 1 # set non-zero lam
while ( lam > 0 && length(f) < length(mu)) {
# 1) case a): Bound one free weight F -> B
l_in <- 0
if(length(f) > 1 ){
get1 <- getMatrices(mu, covar, w, f)##
compl <- computeLambda(get = get1, i = seq_along(f),
bi.input = cbind(lB[f], uB[f]))
lam_in <- compl$lambda
bi <- compl$bi
k <- which.max(lam_in)
i_in <- f[k]
bi_in <- bi[k]
l_in <- lam_in[k]
}
# 2) case b): Free one bounded weight B -> F
b <- seq_along(mu)[-f]
lam_out <- sapply(b, function(bi) {
get_i <- getMatrices(mu, covar, w, c(f,bi))
computeLambda(get = get_i, i = length(get_i$muF), bi.input = w[bi])
})
if (length(lambdas) > 1 && any(!(sml <- lam_out < lam*(1-tol.lambda)))) {
lam_out <- lam_out[sml]
b <- b [sml]
}
k <- which.max(lam_out)
i_out <- b [k] # one only !
l_out <- lam_out[k]
# 3) decide lambda
lam <- max(l_in, l_out, 0)
if(lam > 0) { # remove i_in from f; or add i_out into f
if(l_in > l_out ){
w[i_in] <- bi_in # set value at the correct boundary
f <- f[f != i_in]
}
else {
f <- c(f,i_out)
}
getM <- getMatrices(mu, covar, w, f)
compW <- compute_g_W(lam, get = getM)
}
else{ #4) if max(l_in, l_out) < 0, "stop" when at the min var solution!
compW <- compute_g_W(lam = lam, get = get1, muF = 0)
}
wF <- compW$wF
g <- compW$gamma
w[f] <- wF[seq_along(f)]
lambdas <- c(lambdas, lam)
weights_set <- cbind(weights_set, w, deparse.level = 0L) # store solution
gammas <- c(gammas, g)
free_indices <- c(free_indices, list(sort(f)))
} #end While
list(weights_set = weights_set,
free_indices = free_indices,
gammas = gammas, lambdas = lambdas,
MS_weight = MS(weights_set = weights_set, mu = mu, covar = covar))
}
)
|
/scratch/gouwar.j/cran-all/cranData/CLA/R/Fenv.R
|
Env8 <- funEnv(
info = "inv-solve(covF)",
# Initialize the weight -- Find first free weight
initAlgo = function(mu, lB, uB ){
#New-ordered return, lB, uB with decreasing return
w <- c()
index.new <- order(mu,decreasing = TRUE) # new order with decreasing return
lB.new <- lB[index.new]
uB.new <- uB[index.new]
# free weight - starting solution
i.new <- 0
w.new <- lB.new # initialy
while(sum(w.new) < 1) {
i.new <- i.new + 1
w.new[i.new] <- uB.new[i.new]
}
w.new[i.new] <- 1 - sum(w.new[-i.new])
w[index.new] <- w.new #back to original order
i <- index.new[i.new]
## return the index of first free asset and vector w :
list(index = i, weights = w)
},
# getMatrices -----------------------------------------------------------------
getMatrices = function(mu, covar, w, f){
# Slice covarF,covarFB,covarB,muF,muB,wF,wB
covarF <- covar[f,f]
muF <- mu[f]
b <- (seq_along(mu))[-f]
covarFB <- covar[f,b]
wB <- w[b]
list(covarF = covarF, covarFB = covarFB, muF = muF, wB = wB)
},
computeInv = function(get){
solve(get$covarF, cbind(1, get$muF, get$covarFB %*% get$wB, deparse.level = 0L))
},
# computeW -----------------------------------------------------------------
computeW = function(lam, inv, wB){
# w2 <- inv[,1]; w3 <- inv[,2]; w1 <- inv[,3]
inv.s <- colSums(inv) # g1 <- inv.s[2]; g2 <- inv.s[1]; g4 <- inv.s[3]
#1) compute gamma
g <- (-lam * inv.s[2] + (1- sum(wB) + inv.s[3]))/inv.s[1]
#2) compute free weights
list(wF = - inv[,3] + g * inv[,1] + lam * inv[,2], gamma = g)
},
# computeLambda --------------------------------------------------------------
computeLambda = function(wB, inv, i, bi.input){
inv.s <- colSums(inv)
# c1 <- inv.s[1]; l2 <- inv.s[3]; c2i <- inv[i,2];
# c3 <- inv.s[2]; c4i <- inv[i, 1]; l1 <- sum(wB)
c1 <- inv.s[1]
if(length(bi.input)==1){ # 1.bound to free
c4i <- inv[i, 1]
Ci <- - c1 * inv[i, 2] + inv.s[2] * c4i
if(Ci == 0) 0
((1- sum(wB) + inv.s[3])* c4i- c1 * (bi.input + inv[i, 3]))/Ci # return lambda
} else { # 2.free to bound
c4i <- inv[, 1]
Ci <- - c1 * inv[, 2] + inv.s[2] * c4i
bi <- bi.input[i, 1] # bi.lB
bi[Ci > 0] <- bi.input[i[Ci > 0], 2] # bi.uB
bi[Ci == 0] <- 0
list(lambda = ((1- sum(wB) + inv.s[3]) * c4i- c1 *(bi + inv[, 3]))/Ci,
bi = bi)
# return lambda and boundary
}
},
MS = function(weights_set, mu, covar){
Sig2 <- colSums(weights_set *(covar %*% weights_set) )
cbind(Sig = sqrt(Sig2), Mu = as.vector(t(weights_set) %*% mu))
},
cla.solve = function(mu, covar, lB, uB, tol.lambda = 1e-7) {
# Compute the turning points, free sets and weights
ans <- initAlgo(mu, lB, uB)
f <- ans$index
w <- ans$weights
weights_set <- w # store solution
lambdas <- NA # The first step has no lambda or gamma, add NA instead.
gammas <- NA
free_indices <- list(f)
lam <- 1 # set non-zero lam
while ( lam > 0 && length(f) < length(mu)) {
# 1) case a): Bound one free weight F -> B
l_in <- 0
if(length(f) > 1 ){
compl <- computeLambda(wB = w[-f], inv = inv, # inv from last step k (k > 1)
i = f, bi.input = cbind(lB, uB))
lam_in <- compl$lambda
bi <- compl$bi
k <- which.max(lam_in)
i_in <- f[k]
bi_in <- bi[k]
l_in <- lam_in[k]
}
# 2) case b): Free one bounded weight B -> F
b <- seq_along(mu)[-f]
inv_list <- lapply(b, function(bi){
get_i <- getMatrices(mu, covar, w, c(f,bi))
computeInv(get_i)
})
fi <- length(f) + 1
lam_out <- sapply(seq_along(b), function(i) {
computeLambda(wB = w[b[-i]], inv = inv_list[[i]],
i = fi, bi.input = w[b[i]])
})
if (length(lambdas) > 1 && any(!(sml <- lam_out < lam*(1-tol.lambda)))) {
lam_out <- lam_out[sml]
b <- b [sml]
inv_list <- inv_list[sml]
}
k <- which.max(lam_out)
i_out <- b [k] # one only !
l_out <- lam_out[k]
inv_out <- inv_list[[k]]
# 3) decide lambda
lam <- max(l_in, l_out, 0)
if(lam > 0) { # remove i_in from f; or add i_out into f
if(l_in > l_out ){
w[i_in] <- bi_in # set value at the correct boundary
f <- f[f != i_in]
getM <- getMatrices(mu, covar, w, f)
inv <- computeInv(getM)
}
else {
f <- c(f,i_out)
inv <- inv_out
}
compW <- computeW(lam, inv = inv, wB = w[-f])
}
else{ #4) if max(l_in, l_out) < 0, "stop" when at the min var solution!
compW <- computeW(lam = lam, inv = inv, wB = w[-f])
# muF = 0 not necessary, get1 replaced by getM (ie getM from previous step)
}
wF <- compW$wF
g <- compW$gamma
w[f] <- wF[seq_along(f)]
lambdas <- c(lambdas, lam)
weights_set <- cbind(weights_set, w, deparse.level = 0L) # store solution
gammas <- c(gammas, g)
free_indices <- c(free_indices, list(sort(f)))
} #end While
list(weights_set = weights_set,
free_indices = free_indices,
gammas = gammas, lambdas = lambdas,
MS_weight = MS(weights_set = weights_set, mu = mu, covar = covar))
}
)
|
/scratch/gouwar.j/cran-all/cranData/CLA/R/Ver8.R
|
Env9 <- funEnv(
info = "covF_inv: add/remove i from previous covF_inv (Niederreiter^2)",
# Initialize the weight -- Find first free weight
initAlgo = function(mu, lB, uB ){
#New-ordered return, lB, uB with decreasing return
w <- c()
index.new <- order(mu,decreasing = TRUE) # new order with decreasing return
lB.new <- lB[index.new]
uB.new <- uB[index.new]
# free weight - starting solution
i.new <- 0
w.new <- lB.new # initialy
while(sum(w.new) < 1) {
i.new <- i.new + 1
w.new[i.new] <- uB.new[i.new]
}
w.new[i.new] <- 1 - sum(w.new[-i.new])
w[index.new] <- w.new #back to original order
i <- index.new[i.new]
## return the index of first free asset and vector w :
list(index = i, weights = w)
},
# getMatrices -----------------------------------------------------------------
getMatrices = function(mu, covar, w, f){
# Slice covarF,covarFB,covarB,muF,muB,wF,wB
covarF <- covar[f,f]
muF <- mu[f]
b <- (seq_along(mu))[-f]
covarFB <- covar[f,b]
wB <- w[b]
list(covarF = covarF, covarFB = covarFB, muF = muF, wB = wB)
},
computeMat = function(f, b, covar, covF.inv.pre){
a <- covar[f, b, drop = FALSE]
cc <- covF.inv.pre %*% a
list(a = a, cc = cc)
},
computeInv = function(covF.inv.pre, covar, f, i, ib, add, mu, w, Mat){
if(add){ # B -> F
a <- Mat$a[,ib]
cc <- Mat$cc[, ib]
bb <- covar[i, i] - sum(a * cc)
## compute (bb, cc) for all i != f outside the i-loop
e1 <- tcrossprod(cc)/bb
m <- cc/bb
f1 <- length(f) + 1
covF.inv <- matrix(0, f1, f1)
covF.inv[-f1, ] <- cbind(covF.inv.pre + e1, -m)
covF.inv[f1, ] <- cbind(-t(m), 1/bb)
f.new <- c(f, i)
} else {
ii <- which(f == i)
B <- covF.inv.pre[-ii, -ii]
bv <- covF.inv.pre[, ii]
# b <- bv[-ii], beta <- bv[ii]
covF.inv <- B - 1/bv[ii] * tcrossprod(bv[-ii])
f.new <- f[-ii]
}
muF <- mu[f.new]
covarFB <- covar[f.new,-f.new]
wB <- w[-f.new]
list(inv = covF.inv %*% cbind(1, muF, covarFB %*% wB, deparse.level=0L),
covF.inv = covF.inv)
},
# computeW -----------------------------------------------------------------
computeW = function(lam, inv, wB){
# w2 <- inv[,1]; w3 <- inv[,2]; w1 <- inv[,3]
inv.s <- colSums(inv) # g1 <- inv.s[2]; g2 <- inv.s[1]; g4 <- inv.s[3]
#1) compute gamma
g <- (-lam * inv.s[2] + (1- sum(wB) + inv.s[3]))/inv.s[1]
#2) compute free weights
list(wF = - inv[,3] + g * inv[,1] + lam * inv[,2], gamma = g)
},
# computeLambda --------------------------------------------------------------
computeLambda = function(wB, inv, i, bi.input){
inv.s <- colSums(inv)
# c1 <- inv.s[1]; l2 <- inv.s[3]; c2i <- inv[i,2];
# c3 <- inv.s[2]; c4i <- inv[i, 1]; l1 <- sum(wB)
c1 <- inv.s[1]
if(length(bi.input)==1){ # 1.bound to free
c4i <- inv[i, 1]
Ci <- - c1 * inv[i, 2] + inv.s[2] * c4i
if(Ci == 0) 0
((1- sum(wB) + inv.s[3])* c4i- c1 * (bi.input + inv[i, 3]))/Ci # return lambda
} else { # 2.free to bound
c4i <- inv[, 1]
Ci <- - c1 * inv[, 2] + inv.s[2] * c4i
bi <- bi.input[i, 1] # bi.lB
bi[Ci > 0] <- bi.input[i[Ci > 0], 2] # bi.uB
bi[Ci == 0] <- 0
list(lambda = ((1- sum(wB) + inv.s[3]) * c4i- c1 *(bi + inv[, 3]))/Ci,
bi = bi)
# return lambda and boundary
}
},
MS = function(weights_set, mu, covar){
Sig2 <- colSums(weights_set *(covar %*% weights_set) )
cbind(Sig = sqrt(Sig2), Mu = as.vector(t(weights_set) %*% mu))
},
cla.solve = function(mu, covar, lB, uB, tol.lambda = 1e-7) {
# Compute the turning points, free sets and weights
ans <- initAlgo(mu, lB, uB)
f <- ans$index
w <- ans$weights
weights_set <- w # store solution
lambdas <- NA # The first step has no lambda or gamma, add NA instead.
gammas <- NA
free_indices <- list(f)
lam <- 1 # set non-zero lam
covFinv <- 1/covar[f, f]
while ( lam > 0 && length(f) < length(mu)) {
# 1) case a): Bound one free weight F -> B
l_in <- 0
if(length(f) > 1 ){
compl <- computeLambda(wB = w[-f], inv = inv, # inv from last step k (k > 1)
i = f, bi.input = cbind(lB, uB))
lam_in <- compl$lambda
bi <- compl$bi
k <- which.max(lam_in)
i_in <- f[k]
bi_in <- bi[k]
l_in <- lam_in[k]
}
# 2) case b): Free one bounded weight B -> F
b <- seq_along(mu)[-f]
Mat <- computeMat(f, b, covar, covFinv)
fi <- length(f) + 1
inv_list <- lapply(seq_along(b), function(i){
ans <- computeInv(covF.inv.pre = covFinv, covar = covar, f = f, i = b[i],
ib = i, add = TRUE, mu = mu, w = w, Mat = Mat)
lam <- computeLambda(wB = w[b[-i]], inv = ans$inv,
i = fi, bi.input = w[b[i]])
list(inv = ans$inv, covF.inv = ans$covF.inv, lam = lam)
})
lam_out <- sapply(inv_list, function(x) x$lam)
if (length(lambdas) > 1 && any(!(sml <- lam_out < lam*(1-tol.lambda)))) {
lam_out <- lam_out[sml]
b <- b [sml]
inv_list <- inv_list[sml]
}
k <- which.max(lam_out)
i_out <- b [k] # one only !
l_out <- lam_out[k]
inv_out <- inv_list[[k]]$inv
covFinv.out <- inv_list[[k]]$covF.inv
# 3) decide lambda
lam <- max(l_in, l_out, 0)
if(lam > 0) { # remove i_in from f; or add i_out into f
if(l_in > l_out ){
w[i_in] <- bi_in # set value at the correct boundary
a <- computeInv(covF.inv.pre = covFinv, covar = covar,
f = f, i = i_in, add = FALSE, mu = mu, w = w)
f <- f[f != i_in]
covFinv <- a$covF.inv
inv <- a$inv
}
else {
f <- c(f,i_out)
inv <- inv_out
covFinv <- covFinv.out
}
compW <- computeW(lam, inv = inv, wB = w[-f])
}
else{ #4) if max(l_in, l_out) < 0, "stop" when at the min var solution!
compW <- computeW(lam = lam, inv = inv, wB = w[-f])
# muF = 0 not necessary, get1 replaced by getM (ie getM from previous step)
}
wF <- compW$wF
g <- compW$gamma
w[f] <- wF[seq_along(f)]
lambdas <- c(lambdas, lam)
weights_set <- cbind(weights_set, w, deparse.level = 0L) # store solution
gammas <- c(gammas, g)
free_indices <- c(free_indices, list(sort(f)))
} #end While
list(weights_set = weights_set,
free_indices = free_indices,
gammas = gammas, lambdas = lambdas,
MS_weight = MS(weights_set = weights_set, mu = mu, covar = covar))
}
)
|
/scratch/gouwar.j/cran-all/cranData/CLA/R/Ver9.R
|
#### From Yanhao Shi's thesis -- R/Functions/CLA_analysis.R
## Vectorized in first argument by Martin Maechler
### Version 1 -- of the code, simply "vectorize via lapply" -- not so fast
findMu <- function(Sig0, result, covar, tol.unir = 1e-6, equal.tol = 1e-6) {
R <- lapply(Sig0, findMu.1,
result=result, covar=covar, tol.unir=tol.unir, equal.tol=equal.tol)
## each R[[i]] has (Mu, weight)
list(Mu = vapply(R, `[[`, numeric(1), "Mu"),
weight = vapply(R, `[[`, numeric(nrow(covar)), "weight"))
}
findSig <- function(Mu0, result, covar, equal.tol = 1e-6) { # equal.tol > 1e-16
R <- lapply(Mu0, findSig.1,
result=result, covar=covar, equal.tol=equal.tol)
## each R[[i]] has (Sig, weight):
list(Sig = vapply(R, `[[`, numeric(1), "Sig"),
weight = vapply(R, `[[`, numeric(nrow(covar)), "weight"))
}
##' Now simplified and very close to findSig0() -- just returning weights additionally
findSig.1 <- function(Mu0, result, covar, equal.tol) { # equal.tol > 1e-16
stopifnot(length(Mu0) == 1L)
ms.w <- result$MS_weight
nd <- order(ms.w[, "Mu"])
n <- length(nd)
## revert order for all (w, mu, sig):
ms.w <- ms.w[nd, ]
w <- result$weights_set[, nd]
mu.w <- ms.w[, "Mu"]
sig.w <- ms.w[, "Sig"]
if(Mu0 < min(mu.w) || Mu0 > max(mu.w))
stop(sprintf("Mu0 must be in [%g, %g]", min(mu.w), max(mu.w)))
i <- findInterval(Mu0, mu.w) # [..)...[..]
if(i == n || # Mu0 is max(mu.w)
isTRUE(all.equal(mu.w[i], mu.w[i+1], tol = equal.tol))) {
list(Sig = sig.w[i], weight = w[, i])
} else {
a <- (Mu0 - mu.w[i+1])/(mu.w[i] - mu.w[i+1])
# solve for a : Mu0 = a* Mu1 + (1-a)* Mu2 :
wm <- a*w[, i] + (1-a)*w[, i+1]
list(Sig = sqrt(colSums(wm *(covar %*% wm) )),
weight = wm)
}
}
##' This is much cleaner and logical than the original findSig() ==> use a version of it
##' A version of findSig() only used inside findMu():
findSig0 <- function(Mu0, result, covar, equal.tol = 1e-8){ # equal.tol > 1e-16
stopifnot(length(Mu0) == 1L)
ms.w <- result$MS_weight
nd <- order(ms.w[, "Mu"])
n <- length(nd)
## revert order for all (w, mu, sig):
ms.w <- ms.w[nd, ]
w <- result$weights_set[, nd]
mu.w <- ms.w[, "Mu"]
sig.w <- ms.w[, "Sig"]
i <- findInterval(Mu0, mu.w) # [..)...[..]
if(i == n || isTRUE(all.equal(mu.w[i], mu.w[i+1], tol = equal.tol))) {
sig.w[i]
} else {
a <- (Mu0 - mu.w[i+1])/(mu.w[i] - mu.w[i+1])
# solve for a : Mu0 = a* Mu1 + (1-a)* Mu2 :
w0 <- a*w[, i] + (1-a)*w[, i+1]
sqrt(colSums(w0 *(covar %*% w0) )) #return Sig0
}
}
findMu.1 <- function(Sig0, result, covar, tol.unir = 1e-6, equal.tol = 1e-6) {
stopifnot(length(Sig0) == 1L)
ms.w <- result$MS_weight
nd <- order(ms.w[, "Sig"])
n <- length(nd)
mu.w <- ms.w[nd, "Mu"]
sig.w <- ms.w[nd, "Sig"]
w <- result$weights_set[, nd]
if(Sig0 < min(sig.w) || Sig0 > max(sig.w))
stop(sprintf("Sig0 must be in [%g, %g]", min(sig.w), max(sig.w)))
i <- findInterval(Sig0, sig.w)
if(i == n || # Mu0 is max(mu.w)
isTRUE(all.equal(mu.w[i], mu.w[i+1], tol = equal.tol))) {
list(Mu = mu.w[i], weight = w[, i])
} else { ## FIXME: here are using default equal.tol = 1e-8 in findSig0() !
r <- uniroot(function(mu) findSig0(mu, result, covar) - Sig0,
interval = mu.w[c(i, i+1)], tol=tol.unir)
Mu0 <- r$root
# solve for a : mu = a* Mu1 + (1-a)* Mu2 :
a <- (Mu0 - mu.w[i+1])/(mu.w[i] - mu.w[i+1])
w0 <- a* w[, i] + (1-a)*w[, i+1]
list(Mu = Mu0, weight = w0)
}
}
### Version 2 --- vectorize inside -- only those parts that need it
## not yet !!! === For first part, look at
## =========== findSigMu.R+
## ============
if(FALSE) ## will be .... not yet
findMu <- function(Sig0, result, covar, tol.unir = 1e-6, equal.tol = 1e-6){
ms.w <- result$MS_weight
nd <- order(ms.w[, "Sig"])
n <- length(nd)
mu.w <- ms.w[nd, "Mu"]
sig.w <- ms.w[nd, "Sig"]
w <- result$weights_set[, nd]
if(Sig0 < min(sig.w) || Sig0 > max(sig.w))
stop(sprintf("Sig0 must be in [%g, %g]", min(sig.w), max(sig.w)))
ini <- findInterval(Sig0, sig.w)
m <- length(Sig0)
mu0 <- numeric(m)
wt0 <- matrix(NA_real_, n, m)
iBnd <- vapply(ini, function(i) {
(i == n || # Mu0 is max(mu.w)
isTRUE(all.equal(mu.w[i], mu.w[i+1], tol = equal.tol))) # duplicate turning pt
}, NA)
if(any(iBnd)) {
i <- ini[iBnd]
mu0[ iBnd] <- mu.w[i]
wt0[,iBnd] <- w[, i]
}
if(any(iIn <- !iBnd)) { # regular case
i <- ini[iIn]
mus <- vapply(..., function(i) {
r <- uniroot(function(mu) findSig0(mu, result, covar) - Sig0[i],
interval = mu.w[c(i, i+1)], tol = tol.unir)
## TODO check convergence?
r$root
}, numeric(1))
## solve for a : mu = a* Mu1 + (1-a)* Mu2 ==>
a <- (Mu0 - mu.w[i+1])/(mu.w[i] - mu.w[i+1])
w0 <- a* w[, i] + (1-a)*w[, i+1]
mu0[ii] <- mu.w[i]
wt0[,ii] <- w[, i]
}
list(Mu = Mu0, weight = w0)
}
|
/scratch/gouwar.j/cran-all/cranData/CLA/R/findSigMu.R
|
#### Yanhao Shi's implementation (in her MSc thesis) to get
#### (mu, Sigma) from timeseries of assets.
#### =========== Basically her Index.ms2() function
##
muSigmaGarch <- function(x,
formula = ~ garch(1,1),
cond.dist = "std", # <- standard t-distrib (w/ df = 5) ?
trace = FALSE, ...)
{
if(length(d <- dim(x)) != 2) stop("'x' must be a numeric matrix or data frame (alike)")
if(!requireNamespace("fGarch"))
stop("muSigmaGarch() needs the 'fGarch' package installed")
n <- d[1] # number of observations
nA <- d[2]# number of assets
if(any(d < 2)) stop("must have at least 2 observations of at least 2 assets")
lr <- log(x[-1, ]/x[-n,]) # log return
loss <- -lr
garchFit <- fGarch :: garchFit
gfit <- if(is.data.frame(loss))
lapply(loss, garchFit, formula=formula, cond.dist=cond.dist, trace=trace)
else if(is.matrix(loss))
apply(loss, 2L, garchFit, formula=formula, cond.dist=cond.dist, trace=trace)
else stop("Invalid 'loss' (log return computation problem?)")
gPRED <- fGarch::predict
gpredict <- t(sapply(gfit, gPRED, n.ahead = 1))
mu <- -unlist(gpredict[, "meanForecast"])
sd <- unlist(gpredict[, "standardDeviation"])
x.std <- sapply(1:nA, function(i) lr[,i]/gfit[[i]]@sigma.t)
## diag.sd <- diag(sd)
V <- cor(x.std, ...) # including names
V[] <- sd * V * rep(sd, each = nA)
## = diag.sd %*% cor(x.std) %*% diag.sd
list(mu = mu, covar = V)
}
|
/scratch/gouwar.j/cran-all/cranData/CLA/R/muSigmaGarch.R
|
### ---> ../../R/
if(FALSE) { ## ==> in another file -- TODO
data(SP500, package="FRAPO")
system.time(muS.sp500 <- muSigma(SP500)) # 26 sec. (lynne, 2017)
data(NASDAQ, package="FRAPO")
system.time(muS.nasdaq <- muSigma(NASDAQ)) # 122.3 sec. (lynne, 2017)
## Prove they are the same as Yanhao's version in her thesis:
if(exists("trans.sp")) {# from her (*rds file)
cat("comparing Yanhao Shi's 'trans.sp' with our muSigma() result:\n")
stopifnot(all.equal(as.vector(muS.sp500$mu),
as.vector(trans.sp$mu), tol = 1e-15))
stopifnot(all.equal(muS.sp500$covmat,
trans.sp $covmat, tol = 1e-15))
}
if(exists("trans.nasdaq")) {# from her (*rds file)
cat("comparing Yanhao Shi's 'trans.nasdaq' with our muSigma() result:\n")
stopifnot(all.equal(as.vector( muS.nasdaq$mu),
as.vector(trans.nasdaq$mu), tol = 1e-15))
stopifnot(all.equal( muS.nasdaq $covmat,
trans.nasdaq $covmat, tol = 1e-15))
}
object.size(muS.sp500 ) ## 1843784 bytes : 1.8 MB ; ok for CRAN pkg
object.size(muS.nasdaq) ## 38720584 bytes : 39 Mega too large for CRAN package
##
if(FALSE) { ## as MM: do once
save (muS.sp500, file = "~/R/Pkgs/CLA/data/muSig_sp500.rda", compress = "xz")
saveRDS(muS.sp500, file = "~/R/Pkgs/CLA/data/muSig_sp500.rds", compress = "xz")
}
}
|
/scratch/gouwar.j/cran-all/cranData/CLA/inst/xtraR/fGarch-mu-Sigma.R
|
## <---> sync with ~/R/Pkgs/robustbase/inst/xtraR/platform-sessionInfo.R
## ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
##' return 'x' unless it is NULL where you'd use 'orElse'
`%||%` <- function(x, orElse) if(!is.null(x)) x else orElse
##' not %in% :
`%nin%` <- function (x, table) is.na(match(x, table))
##' Derive more sessionInfo() like information, notably about BLAS, LAPACK, arithmetic, etc
moreSessionInfo <- function(print. = FALSE) {
.M <- .Machine
if(print.) str(.M[grep("^sizeof", names(.M))]) ## differentiate long-double..
b64 <- .M$sizeof.pointer == 8
onWindows <- .Platform$OS.type == "windows"
## Do we have 64bit but no-long-double ?
arch <- Sys.info()[["machine"]]
b64nLD <- (b64 && .M$sizeof.longdouble != 16)
if(b64nLD) arch <- paste0(arch, "--no-long-double")
if(print.)
cat(sprintf("%d bit platform type '%s' ==> onWindows: %s\narch: %s\n",
if(b64) 64 else 32, .Platform$OS.type, onWindows, arch))
sInfo <- sessionInfo()
if(!exists("osVersion")) osVersion <- sInfo$running
if(print.) cat("osVersion (0):", osVersion, "\n")
if(is.null(osVersion)) osVersion <- "Fedora" # very last resort
if(!length(BLAS.is.LAPACK <- sInfo$BLAS == sInfo$LAPACK))
BLAS.is.LAPACK <- NA # R versions <= 3.3.x
## A cheap check (that works on KH's debian-gcc setup, 2019-05):
if(!length(BLAS.is.openBLAS <- grepl("openblas", sInfo$BLAS, ignore.case=TRUE)))
BLAS.is.openBLAS <- NA
if(!length(Lapack.is.openBLAS <- grepl("openblas", sInfo$LAPACK, ignore.case=TRUE)))
Lapack.is.openBLAS <- NA
if(print.)
cat("osVersion:", sQuote(osVersion), "\n"
,'+ BLAS "is" Lapack:', BLAS.is.LAPACK
, '| BLAS=OpenBLAS:', BLAS.is.openBLAS
, '| Lapack=OpenBLAS:', Lapack.is.openBLAS
, "\n")
## NB: sessionInfo() really gets these:
if(getRversion() >= "3.4") local({
is.BLAS.LAPACK <- exists("La_library", mode="function") && ## R 3.4.0 and newer
identical(La_library(), extSoftVersion()[["BLAS"]])
stopifnot(isTRUE(is.BLAS.LAPACK == BLAS.is.LAPACK))
})
## also TRUE for Windows [since both are "" !!]
## Find out if we are running Micrsoft R Open
is.MS.Ropen <- {
file.exists(Rpr <- file.path(R.home("etc"), "Rprofile.site")) &&
length(lnsRpr <- readLines(Rpr)) &&
## length(grep("[Mm]icrosoft", lnsRpr)) > 3 # MRO 3.5.1 has '20' times "[Mm]icrosoft"
length(grep("Microsoft R Open", lnsRpr, fixed=TRUE, value=TRUE)) > 0 ## MRO 3.5.1 has it twice
}
if(print. && is.MS.Ropen) cat("We are running 'Microsoft R Open'\n")
## I'd really would want to know which of (OpenBLAS | ATLAS | MKL | R's own BLAS+LAPACK)
##
## Next best, I'd really like
##
## strictR <- we_are_using_Rs_own_BLAS_and_Lapack() [ ==> BLAS != Lapack ]
##
## Actually the following aims to be equivalent to {and *is* for MM on Fedora, 2019-03}
## strictR <- !(using ATLAS || OpenBLAS || MKL )
if(TRUE) {
strictR <-
!BLAS.is.LAPACK && !is.MS.Ropen &&
!BLAS.is.openBLAS && !Lapack.is.openBLAS &&
TRUE
} else { ## workaround:
strictR <- print(Sys.info()[["user"]]) == "maechler"# actually
## but not when testing with /usr/bin/R [OpenBLAS on Fedora!] (as "maechler"):
if(strictR && substr(osVersion, 1,6) == "Fedora" && R.home() == "/usr/lib64/R")
strictR <- FALSE
}
if(print.) cat("strictR:", strictR, "\n")
structure(class = "moreSessionInfo",
list(
arch = arch
, b64 = b64 # 64-bit (:<==> sizeof.pointer == 8 )
, b64nLD = b64nLD # 64-bit, but --no-long-double (sizeof.longdouble != 16)
, BLAS.is.LAPACK = BLAS.is.LAPACK
, BLAS.is.openBLAS = BLAS.is.openBLAS
, Lapack.is.openBLAS = Lapack.is.openBLAS
, is.MS.Ropen = is.MS.Ropen # is R a version of Microsoft R Open (==> MKL-linked BLAS)
, onWindows = onWindows
, osVersion = osVersion
, strictR = strictR # are BLAS & Lapack from R's source, and "otherwise known safe platform"
))
}
if(getRversion() < "3.4.0") withAutoprint <- function(x, ...) x
if(isTRUE(getOption("chk.moreSessionInfo"))) withAutoprint({
ms1 <- moreSessionInfo()
ms. <- moreSessionInfo(print. = TRUE)
stopifnot(is.list(ms1), length(ms1) > 1, identical(ms1, ms.) )
})
|
/scratch/gouwar.j/cran-all/cranData/CLA/inst/xtraR/platform-sessionInfo.R
|
## Bottom of page 16 "5. A Numerical Example"
## of the (2013) paper about the Python implementation
## Edited into R code by Martin Maechler
LBound <- c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
UBound <- c(1, 1, 1, 1, 1, 1, 1, 1, 1, 1)
Mean <- c(1.175, 1.19, 0.396, 1.12, 0.346, 0.679, 0.089, 0.73, 0.481, 1.08)
Cov <- c(0.4075516, 0.0317584, 0.9063047, 0.0518392, 0.0313639, 0.194909, 0.056639, 0.0268726, 0.0440849, 0.1952847, 0.0330226, 0.0191717, 0.0300677, 0.0277735, 0.3405911, 0.0082778, 0.0093438, 0.0132274, 0.0052667, 0.0077706, 0.1598387, 0.0216594, 0.0249504, 0.0352597, 0.0137581, 0.0206784, 0.0210558, 0.6805671, 0.0133242, 0.0076104, 0.0115493, 0.0078088, 0.0073641, 0.0051869, 0.0137788, 0.9552692, 0.0343476, 0.0287487, 0.0427563, 0.0291418, 0.0254266, 0.0172374, 0.0462703, 0.0106553, 0.3168158, 0.022499, 0.0133687, 0.020573, 0.0164038, 0.0128408, 0.0072378, 0.0192609, 0.0076096, 0.0185432, 0.1107929)
(p <- length(Mean))# 10
stopifnot(p == length(LBound), p == length(UBound),
p*(p+1)/2 == length(Cov))
require(Matrix)
options(width = 110)# so printing shows the matrix nicely
C <- matrix(0, p,p); C[ lower.tri(C, diag=TRUE) ] <- Cov
(C <- as(C, "Matrix")) # wrong
## try again
C <- matrix(0, p,p); C[ upper.tri(C, diag=TRUE) ] <- Cov
(C <- t(as(C, "Matrix")))# yes
##====== get data from
if(FALSE) # << maybe offline
str(a10 <- read.csv("http://www.quantresearch.info/CLA_Data.csv.txt"))
## MM: local copy in package source:
str(a10 <- read.csv("CLA_Data.csv.txt")
a10 <- as.matrix(a10)
str(mu.10 <- a10[1,])
stopifnot(a10[2,] == 0)# == Lower_bound
stopifnot(a10[3,] == 1)# == Upper_bound
str(cc <- a10[-(1:3),]) # 10 x 10
isSymmetric(cc, check.attributes=FALSE)
LT <- lower.tri(cc, diag=TRUE)
all.equal(cc[LT], C[LT])) # ==> mean relative difference: 3.59e-7
muS.10ex <- list(mu = mu.10, # including names "X1" .. "X10"
covar = unname(cc))
if(FALSE) ## as maintainer, did once
save(muS.10ex, file = "~/R/Pkgs/CLA/data/muS.10ex.rda", compress = "xz")
CLA.10ex <- with(muS.10ex, CLA(mu, covar, lB=0, uB=1)) # works after 'drop = FALSE' fix in getMatrices() !!
drop0(zapsmall(CLA.10ex$weights_set))
## or to look similar as in Alexander Norrington's M.thesis, p.33
## (but has 1st row doubled, *and* last row [lambda=0] missing (!?)
t(round(CLA.10ex$weights_set, 3))
## X1 X2 X3 X4 X5 X6 X7 X8 X9 X10
## [1,] 0.000 1.000 0.000 0.000 0.000 0.000 0 0.000 0.000 0.000
## [2,] 0.000 1.000 0.000 0.000 0.000 0.000 0 0.000 0.000 0.000
## [3,] 0.649 0.351 0.000 0.000 0.000 0.000 0 0.000 0.000 0.000
## [4,] 0.434 0.231 0.000 0.335 0.000 0.000 0 0.000 0.000 0.000
## [5,] 0.127 0.072 0.000 0.281 0.000 0.000 0 0.000 0.000 0.520
## [6,] 0.123 0.070 0.000 0.279 0.000 0.000 0 0.006 0.000 0.521
## [7,] 0.087 0.050 0.000 0.224 0.000 0.174 0 0.030 0.000 0.435
## [8,] 0.085 0.049 0.000 0.220 0.000 0.180 0 0.031 0.006 0.429
## [9,] 0.074 0.044 0.000 0.199 0.026 0.198 0 0.033 0.028 0.398
## [10,] 0.068 0.041 0.015 0.188 0.034 0.202 0 0.034 0.034 0.383
|
/scratch/gouwar.j/cran-all/cranData/CLA/inst/xtraR/simple_10_asset-ex.R
|
#' @importFrom stats qbeta var uniroot qnorm
#' @importFrom graphics plot points lines title par abline
NULL
|
/scratch/gouwar.j/cran-all/cranData/CLAST/R/CLAST.R
|
#' @export
CP.lower <-
function (x, n, a = 0.05)
{
# FUNCTION COMPUTES EXACT CONFIDENCE INTERVAL FOR
# BINOMIAL PROBABILITY BY INVERSION OF THE EXACT TEST.
#
# REQUIRED ARGUMENTS:
# x - observed value (scalar)
# n - number of trials
#
if (x > 0 & x < n) {lower <- stats::qbeta(a,x,n-x+1)}
if (x == 0) {lower <- 0}
if (x == n) {lower <- a^(1/n)}
lower
}
|
/scratch/gouwar.j/cran-all/cranData/CLAST/R/CP.lower.R
|
#' @export
CP.stats.SM <-
function(obj,alpha=0.05,type="upper"){
# Calculates all possible upper limits for p from an object
# that contains all possible data sets from a group sequential trial.
lims=NULL
for(h in 1:length(obj$S)){
if(type=="upper"){lims=c(lims,CP.upper(obj$S[h],sum(obj$design[,1][1:obj$M[h]]),a=alpha))}
if(type=="lower"){lims=c(lims,CP.lower(obj$S[h],sum(obj$design[,1][1:obj$M[h]]),a=alpha))}
}
out=obj
out$lims=signif(lims,6)
out
}
|
/scratch/gouwar.j/cran-all/cranData/CLAST/R/CP.stats.SM.R
|
#' @export
CP.upper <-
function (x, n, a = 0.05)
{
# FUNCTION COMPUTES EXACT CONFIDENCE INTERVAL FOR
# BINOMIAL PROBABILITY BY INVERSION OF THE EXACT TEST.
#
# REQUIRED ARGUMENTS:
# x - observed value (scalar)
# n - number of trials
#
if (x > 0 & x < n) {upper <- stats::qbeta(1-a,x+1,n-x)}
if (x == 0) {upper <- 1 - a^(1/n)}
if (x == n) {upper <- 1}
upper
}
|
/scratch/gouwar.j/cran-all/cranData/CLAST/R/CP.upper.R
|
#' @export
JT.rank.SM <-
function(obj){
# Calculates Jennison & Turnbull (1983) ranking of sample space
# REQUIRED ARGUMENT
# Object returned from function sample.space
#
r.s=RANK(obj$S+obj$decision*(dim(obj$design)[1]+10-obj$M)*100)
out=obj
out$lims=r.s
out
}
|
/scratch/gouwar.j/cran-all/cranData/CLAST/R/JT.rank.SM.R
|
#' @export
LR.lower <-
function(x,n,a=0.05,epsilon=1e-18){
if(x==0){lower = 0}
if(x>0){
loglik=function(p,x,n,offset=0,epsilon=1e-18){
lmax=x*log((x+epsilon)/(n+2*epsilon))+(n-x)*log((n-x+epsilon)/(n+2*epsilon))
2*(lmax-x*log(p+epsilon)-(n-x)*log(1-p+epsilon))-offset
}
lower=stats::uniroot(loglik,c(0,x/n),x=x,n=n,offset=stats::qnorm(1-a)^2,epsilon=epsilon)$root
}
lower
}
|
/scratch/gouwar.j/cran-all/cranData/CLAST/R/LR.lower.R
|
#' @export
LR.stats.SM <-
function(obj,alpha=0.05,type="upper"){
# Calculates all possible upper limits for p from an object
# that contains all possible data sets from a group sequential trial.
lims=NULL
for(h in 1:length(obj$S)){
if(type=="upper"){
lims=c(lims,LR.upper(obj$S[h],
sum(obj$design[,1][1:obj$M[h]]),a=alpha))}
if(type=="lower"){
lims=c(lims,LR.lower(obj$S[h],
sum(obj$design[,1][1:obj$M[h]]),a=alpha))}
}
out=obj
out$lims=signif(lims,6)
out
}
|
/scratch/gouwar.j/cran-all/cranData/CLAST/R/LR.stats.SM.R
|
#' @export
LR.upper <-
function(x,n,a=0.05,epsilon=1e-18){
if(x==n){upper = 1}
if(x<n){
loglik=function(p,x,n,offset=0,epsilon=1e-18){
lmax=x*log((x+epsilon)/(n+2*epsilon))+(n-x)*log((n-x+epsilon)/(n+2*epsilon))
2*(lmax-x*log(p+epsilon)-(n-x)*log(1-p+epsilon))-offset
}
upper=stats::uniroot(loglik,c(x/n,1),x=x,n=n,offset=stats::qnorm(1-a)^2,epsilon=epsilon)$root
}
upper
}
|
/scratch/gouwar.j/cran-all/cranData/CLAST/R/LR.upper.R
|
#' @export
ML.rank.SM <-
function(obj){
# Calculated ML estimator of p for each element of sample space.
# This is a potential ranking for exact limits.
# REQUIRED ARGUMENT
# Object returned from function sample.space
#
out=obj
out$P=obj$S/cumsum(obj$design[,1])[obj$M]
out$lims=signif(out$P,6)
out
}
|
/scratch/gouwar.j/cran-all/cranData/CLAST/R/ML.rank.SM.R
|
#' @export
RANK <-
function(x){
# Produces ranking of consecutive integers from (1:unique(x))-1
r.x=rank(x)
index=1:length(r.x)
index=index[order(r.x)]
r.x=sort(r.x)
r.x=c(0,cumsum(sign(diff(rank(r.x)))))
r.x[order(index)]+1
}
|
/scratch/gouwar.j/cran-all/cranData/CLAST/R/RANK.R
|
#' @export
cross <-
function(A,v){
out=NULL
for(i in 1:length(v)){out=rbind(out,cbind(as.matrix(A),v[i]))}
out
}
|
/scratch/gouwar.j/cran-all/cranData/CLAST/R/cross.R
|
#' @export
errors.SM <-
function(n,a,b,p0,p1){
data=sample.space.SM(n,a,b)
probs.SM=function(obj,p){obj$count*p^obj$S*(1-p)^(obj$N-obj$S)}
list(type1=sum(probs.SM(data,p0)*data$decision),
type2=sum(probs.SM(data,p1)*(1-data$decision)))
}
|
/scratch/gouwar.j/cran-all/cranData/CLAST/R/errors.SM.R
|
#' @export
exact.lower.limits.SM <-
function(obj,lims=NULL,alpha=0.05,set=FALSE){
# Calculates Buehler upper limit for all possible data sets.
# There is protection for no solution. Largest value is automatically set to 1.
#
# ARGUMENTS:
# obj - produced from sample.space.SM,must have components data and lims
# set - if true then artificial limits of 0 are modified to min(u:u>0) as described in Kabaila and Lloyd (2003).
#
probs.SM=function(obj,p){obj$count*p^obj$S*(1-p)^(obj$N-obj$S)}
pr.tail.SM=function(p,obj,lims,J,alpha=0.05){sum(probs.SM(obj,p)[lims<=lims[J]])-alpha}
#
if(missing(lims)){lims=obj$lims}
lims=-lims
exact.lims=NULL
for(J in 1:(length(lims))){
if(lims[J]==max(lims)){exact.lims=c(exact.lims,0)}
if(lims[J]<max(lims)){
test=sign(pr.tail.SM(0,obj,lims,J,alpha)*pr.tail.SM(1,obj,lims,J,alpha))
if(test>=0){exact.lims=c(exact.lims,1)}
if(test<0){exact.lims=c(exact.lims,uniroot(pr.tail.SM,c(0,1),obj=obj,lims=lims,J=J,alpha=alpha)$root)}
}
}
if(set){
exact.lims[exact.lims==1]=max(exact.lims[exact.lims<1])
}
for(v in 1:length(lims)){lims[lims==lims[v]]=exact.lims[v]}
signif(exact.lims,6)
}
|
/scratch/gouwar.j/cran-all/cranData/CLAST/R/exact.lower.limits.SM.R
|
#' @export
exact.upper.limits.SM <-
function(obj,lims=NULL,alpha=0.05,set=FALSE){
# Calculates Buehler upper limit for all possible data sets.
# There is protection for no solution. Largest value is automatically set to 1.
#
# ARGUMENTS:
# obj - produced from sample.space.SM,must have components data and lims
# set - if true then artificial limits of 0 are modified to min(u:u>0) as described in Kabaila and Lloyd (2003).
#
probs.SM=function(obj,p){obj$count*p^obj$S*(1-p)^(obj$N-obj$S)}
pr.tail.SM=function(p,obj,lims,J,alpha=0.05){sum(probs.SM(obj,p)[lims<=lims[J]])-alpha}
#
if(missing(lims)){lims=obj$lims}
exact.lims=NULL
for(J in 1:(length(lims))){
if(lims[J]==max(lims)){exact.lims=c(exact.lims,1)}
if(lims[J]<max(lims)){
test=sign(pr.tail.SM(0,obj,lims,J,alpha)*pr.tail.SM(1,obj,lims,J,alpha))
if(test>=0){exact.lims=c(exact.lims,0)}
if(test<0){exact.lims=c(exact.lims,uniroot(pr.tail.SM,c(0,1),obj=obj,lims=lims,J=J,alpha=alpha)$root)}
}
}
if(set){exact.lims[exact.lims==0]=min(exact.lims[exact.lims>0])}
for(v in 1:length(lims)){lims[lims==lims[v]]=exact.lims[v]}
signif(exact.lims,6)
}
|
/scratch/gouwar.j/cran-all/cranData/CLAST/R/exact.upper.limits.SM.R
|
#' @export
inference <-
function(n,a,b,y,alpha=.05,type="LR"){
# Calculated upper and lower limits after sequential trial
#
m=length(y)
s=sum(y)
data.SM=sample.space.SM(n,a,b)
i=(data.SM$M==m)&(data.SM$S==s) #Select data set from sample space
if(sum(i)==0){stop("Impossible observation vector for given design")}
#
if(type=="CP"){
upper.lims=exact.upper.limits.SM(CP.stats.SM(data.SM,alpha=alpha,type="upper"),alpha=alpha,set=T)
lower.lims=exact.lower.limits.SM(CP.stats.SM(data.SM,alpha=alpha,type="lower"),alpha=alpha,set=T)
}
#
if(type=="LR"){
upper.lims=exact.upper.limits.SM(LR.stats.SM(data.SM,alpha=alpha,type="upper"),alpha=alpha,set=T)
lower.lims=exact.lower.limits.SM(LR.stats.SM(data.SM,alpha=alpha,type="lower"),alpha=alpha,set=T)
}
#
if(type=="JT"){
upper.lims=exact.upper.limits.SM(JT.rank.SM(data.SM),alpha=alpha,set=T)
lower.lims=exact.lower.limits.SM(JT.rank.SM(data.SM),alpha=alpha,set=T)
}
#
if(type=="ML"){
upper.lims=exact.upper.limits.SM(ML.rank.SM(data.SM),alpha=alpha,set=T)
lower.lims=exact.lower.limits.SM(ML.rank.SM(data.SM),alpha=alpha,set=T)
}
#
list(lower=lower.lims[i],upper=upper.lims[i],est=s/sum(n[1:m]),type=type)
}
|
/scratch/gouwar.j/cran-all/cranData/CLAST/R/inference.R
|
#' @export
mv.SM <-
function(obj,lims=NULL,p=NULL,B=99,offset=TRUE,wgt=TRUE){
# Calculates mean value of upper limits in lims as a function of p
# If offset=T then MLE is subtracted.
# If wgt=T then outcome of all successes is given probability zer0.
#
probs.SM=function(obj,p){obj$count*p^obj$S*(1-p)^(obj$N-obj$S)}
if(missing(lims)){lims=obj$lims}
if(missing(p)){p=(1:B)/(B+1)}
take=(lims==1)
lims=lims-offset*obj$S/obj$N
mv=NULL
for(P in as.vector(p)){
prob.dist=probs.SM(obj,P)
if(wgt){
prob.dist[take]=0
prob.dist=prob.dist/sum(prob.dist)
}
mv=c(mv,sum(prob.dist*lims))
}
list(x=p,y=mv)
}
|
/scratch/gouwar.j/cran-all/cranData/CLAST/R/mv.SM.R
|
#' @export
mv.plots.SM <-
function(n,a,b,type="interval",B=100,offset=TRUE,plt=c(1,1,1),p0=NULL,p1=NULL,focus=FALSE){
# reset "par" using on.exit()
init_par <- graphics::par(no.readonly = TRUE)
on.exit(graphics::par(init_par))
data.SM=sample.space.SM(n,a,b)
#
if(stats::var(c(length(n),length(a),length(b)))>0){stop("Unequal lengths")}
K=length(n)
if(b[K]-a[K]>1){stop("Last boundary is not exclusive")}
if(sum(a<b)<K){stop("Check valid vectors")}
if(sum(a<b)<K){stop("Check valid vectors")}
if(min(a)<(-1)){stop("Check valid vectors")}
if(b[1]>n[1]+1){stop("Check valid vectors")}
#
x=(1:B)/(B+1)
if(focus){x=p0+(p1-p0)*x}
#
if((type=="upper")|(type=="interval")){
exact.lr.upper.lims=exact.upper.limits.SM(LR.stats.SM(data.SM,type="upper"),set=T)
exact.cp.upper.lims=exact.upper.limits.SM(CP.stats.SM(data.SM,type="upper"),set=T)
jt.lims=JT.rank.SM(data.SM)$lims
exact.jt.upper.lims=exact.upper.limits.SM(data.SM,lims=jt.lims,set=T)
ml.lims=ML.rank.SM(data.SM)$lims
exact.ml.upper.lims=exact.upper.limits.SM(data.SM,lims=ml.lims,set=T)
}
#
if((type=="lower")|(type=="interval")){
exact.lr.lower.lims=exact.lower.limits.SM(LR.stats.SM(data.SM,type="lower"),set=T)
exact.cp.lower.lims=exact.lower.limits.SM(CP.stats.SM(data.SM,type="lower"),set=T)
jt.lims=JT.rank.SM(data.SM)$lims
exact.jt.lower.lims=exact.lower.limits.SM(data.SM,lims=jt.lims,set=T)
ml.lims=ML.rank.SM(data.SM)$lims
exact.ml.lower.lims=exact.lower.limits.SM(data.SM,lims=ml.lims,set=T)
}
#
value=NULL
if((type=="upper")|(type=="interval")){
cp.upper.mv=mv.SM(data.SM,exact.cp.upper.lims,B=B,p=x,offset=offset)$y
lr.upper.mv=mv.SM(data.SM,exact.lr.upper.lims,B=B,p=x,offset=offset)$y
jt.upper.mv=mv.SM(data.SM,exact.jt.upper.lims,B=B,p=x,offset=offset)$y
ml.upper.mv=mv.SM(data.SM,exact.ml.upper.lims,B=B,p=x,offset=offset)$y
value$cp.upper.mv=cp.upper.mv
value$lr.upper.mv=lr.upper.mv
value$jt.upper.mv=jt.upper.mv
value$ml.upper.mv=ml.upper.mv
}
if((type=="lower")|(type=="interval")){
cp.lower.mv=mv.SM(data.SM,exact.cp.lower.lims,B=B,p=x,offset=offset)$y
lr.lower.mv=mv.SM(data.SM,exact.lr.lower.lims,B=B,p=x,offset=offset)$y
jt.lower.mv=mv.SM(data.SM,exact.jt.lower.lims,B=B,p=x,offset=offset)$y
ml.lower.mv=mv.SM(data.SM,exact.ml.lower.lims,B=B,p=x,offset=offset)$y
value$cp.lower.mv=cp.lower.mv
value$lr.lower.mv=lr.lower.mv
value$jt.lower.mv=jt.lower.mv
value$ml.lower.mv=ml.lower.mv
}
# Plotting follows
graphics::par(mfrow=c(1,sum(plt)))
#
if(plt[1]==1){
graphics::plot(range(x),range(c(jt.upper.mv-ml.upper.mv,cp.upper.mv-ml.upper.mv,lr.upper.mv-ml.upper.mv)),
type="n",xlab="p",ylab="",las=1)
graphics::lines(x,jt.upper.mv-ml.upper.mv,col="red")
graphics::lines(x,cp.upper.mv-ml.upper.mv,col="green")
graphics::lines(x,lr.upper.mv-ml.upper.mv,col="blue")
graphics::abline(h=0,lty=3)
graphics::title(main="Mean exact upper limits")
graphics::title(sub="JT(red),LR(blue),CP(green)")
if(!missing(p0)){graphics::abline(v=p0,lty=3)}
if(!missing(p1)){graphics::abline(v=p1,lty=3)} }
#
if(plt[2]==1){
graphics::plot(range(x),range(c(jt.lower.mv-ml.lower.mv,cp.lower.mv-ml.lower.mv,lr.lower.mv-ml.lower.mv)),type="n",xlab="p",
ylab="",las=1)
graphics::lines(x,jt.lower.mv-ml.lower.mv,col="red")
graphics::lines(x,cp.lower.mv-ml.lower.mv,col="green")
graphics::lines(x,lr.lower.mv-ml.lower.mv,col="blue")
graphics::abline(h=0,lty=3)
graphics::title(main="Mean exact lower limits")
graphics::title(sub="JT(red),LR(blue),CP(green)")
if(!missing(p0)){graphics::abline(v=p0,lty=3)}
if(!missing(p1)){graphics::abline(v=p1,lty=3)} }
#
ml.int.mv=ml.upper.mv-ml.lower.mv
cp.int.mv=cp.upper.mv-cp.lower.mv
lr.int.mv=lr.upper.mv-lr.lower.mv
jt.int.mv=jt.upper.mv-jt.lower.mv
#
if(plt[3]==1){
y1=jt.int.mv-ml.int.mv
y2=lr.int.mv-ml.int.mv
y3=cp.int.mv-ml.int.mv
graphics::plot(range(x),range(c(y1,y2,y3)),type="n",las=1,xlab="p",ylab="",las=1)
graphics::lines(x,jt.int.mv-ml.int.mv,col="red")
graphics::lines(x,lr.int.mv-ml.int.mv,col="blue")
graphics::lines(x,cp.int.mv-ml.int.mv,col="green")
graphics::abline(h=0,lty=3)
graphics::title(main="Mean interval width")
if(!missing(p0)){graphics::abline(v=p0,lty=3)}
if(!missing(p1)){graphics::abline(v=p1,lty=3)}
graphics::title(sub="JT(red),LR(blue),CP(green)")
}
value
}
|
/scratch/gouwar.j/cran-all/cranData/CLAST/R/mv.plots.SM.R
|
#' @export
plt.sample.space.SM <-
function(n,a,b,p0=NULL,p1=NULL){
# Plots decision function in S-M space with boundary vectors "a" and "b". If p0 and p1 are
# provided then the type 1 and type 2 error are displayed in the graphic title
#
space.SM=sample.space.SM(n,a,b)
K=dim(space.SM$design)[1]
graphics::plot(space.SM$M,space.SM$S,type="n",xlab="stage stopped",ylab="total responses",lab=c(K,6,K))
graphics::points(space.SM$M[space.SM$decision==1],space.SM$S[space.SM$decision==1],pch=20)
graphics::points(space.SM$M[space.SM$decision==0],space.SM$S[space.SM$decision==0])
graphics::lines(1:K,space.SM$design[,2],lty=3)
graphics::lines(1:K,space.SM$design[,3],lty=3)
if(!missing(p0)&!missing(p1)){
err=errors.SM(n,a,b,p0,p1)
err$type1=round(err$type1*1000)/1000
err$type2=round(err$type2*1000)/1000
lab=paste("alpha=",err$type1,", beta=",err$type2,sep="")
graphics::title(main=lab)
}
NULL
}
|
/scratch/gouwar.j/cran-all/cranData/CLAST/R/plt.sample.space.SM.R
|
#' @export
prob.SM <-
function(data,p,m=NULL,s=NULL){
data.take=data$Y[(data$M==m)&(data$S==s),]
data.take=matrix(data.take,ncol=dim(data$Y)[2])
value=NULL
if(length(data.take)>0){
count=0
for(g in 1:dim(data.take)[1]){
count=count+prod(choose(data$design[1:m,1],data.take[g,data.take[g,]>=0]))
}
}
list(prob=count*p^s*(1-p)^(sum(data$design[1:m,1])-s),
count=count,
subcount=dim(data.take)[1],
data=data.take)
}
|
/scratch/gouwar.j/cran-all/cranData/CLAST/R/prob.SM.R
|
#' @export
sample.space.2 <-
function(n,a,b){
# Creates all possible samples from a TWO group sequential trial with possible one-sided stopping at stage 1.
#
# VALUE
# list with components
# Y - matrix with K columns listing all possible data sets. Outcomes where the trial does not proceed are coded -1.
# M - number of executed trials corresponding to each data set in Y
# S - total successes corresponding to each data set in Y
# decision - to conclude H1 or H0
#
# Error checks
if(var(c(length(n),length(a),length(b)))>0){stop("Unequal lengths")}
K=length(n)
if(b[K]-a[K]>1){stop("Last boundary is not exclusive")}
if(sum(a<b)<K){stop("Check valid vectors")}
if(sum(a<b)<K){stop("Check valid vectors")}
if(min(a)<(-1)){stop("Check valid vectors")}
if(b[1]>n[1]+1){stop("Check valid vectors")}
#
if(a[1]<0){Y=as.matrix(c(b[1]:n[1]))}
if(b[1]>n[1]){Y=as.matrix(c(0:a[1]))}
if((a[1]>=0)&(b[1]<=n[1])){Y=as.matrix(c(0:a[1],b[1]:n[1]))}
S=Y
M=0*Y+1
decision=1*(Y>a[1])
#
Y=cbind(Y,0*Y-1)
#
for(s in (a[1]+1):a[2]){
for(y1 in max((a[1]+1),s-n[2]):min(s,(b[1]-1))){
decision=c(decision,0)
S=c(S,s)
M=c(M,2)
Y=rbind(Y,c(y1,s-y1))
}
}
for(s in b[2]:(b[1]-1+n[2])){
for(y1 in max((a[1]+1),s-n[2]):min(s,(b[1]-1))){
decision=c(decision,1)
S=c(S,s)
M=c(M,2)
Y=rbind(Y,c(y1,s-y1))
}
}
#
cbind(Y,S,M,decision)
#
list(Y=Y,M=M,S=S,decision=decision,design=cbind(n,a,b))
}
|
/scratch/gouwar.j/cran-all/cranData/CLAST/R/sample.space.2.R
|
#' @export
sample.space <-
function(n,a,b){
# Creates all possible samples from a multi-stage group sequential trial.
#
# VALUE
# list with components
# Y - matrix with K columns listing all possible data sets. Outcomes where the trial does not proceed are coded -1.
# M - number of executed trials corresponding to each data set in Y
# S - total successes corresponding to each data set in Y
# decision - to conclude H1 or H0
#
# Error checks
if(stats::var(c(length(n),length(a),length(b)))>0){stop("Unequal lengths")}
K=length(a)
if(b[K]-a[K]>1){stop("Last boundary is not exclusive")}
if(sum((a<=cumsum(n))*(b<=cumsum(n)*(a<b)))<K){stop("Check valid vectors")}
#
K=length(a)
if(a[1]<0){Y=as.matrix(c(b[1]:n[1]))}
if(a[1]>=0){Y=as.matrix(c(0:a[1],b[1]:n[1]))}
for(j in 1:(K-1)){Y=cbind(Y,rep(-1,dim(Y)[1]))}
decision=c(rep(0,a[1]+1),rep(1,n[1]-b[1]+1))
#
tmp=as.matrix(0:n[1])
H0=(tmp<=a[1])
H1=(tmp>=b[1])
cont=!(H0|H1)
#
if(K>=2){
for(k in 2:(K-1)){
tmp=cross(tmp[cont,],0:n[k])
S=apply(tmp,1,sum)
H0=(S<=a[k])
H1=(S>=b[k])
cont=!(H0|H1)
out=as.matrix(tmp[H0|H1,])
decision=c(decision,apply(out,1,sum)>=b[k])
for(j in 1:(K-dim(out)[2])){out=cbind(out,rep(-1,dim(out)[1]))}
Y=rbind(Y,out)
}
}
out=cross(tmp[cont,],0:n[K])
decision=c(decision,apply(out,1,sum)>=b[K])
Y=rbind(Y,out)
#
M=apply(Y>=0,1,sum)
S=apply(Y,1,sum)+(K-M)
Y=Y[order(S),]
M=M[order(S)]
decision=decision[order(S)]
S=sort(S)
Y=Y[order(M),]
S=S[order(M)]
decision=decision[order(M)]
M=sort(M)
#
list(Y=Y,M=M,S=S,decision=decision,design=cbind(n,a,b))
}
|
/scratch/gouwar.j/cran-all/cranData/CLAST/R/sample.space.R
|
#' @export
sample.space.SM <-
function(n,a,b){
# Creates a sample space object at the level of individual stage counts (for K=2 or K>2)
#
K=length(n)
if(b[K]-a[K]>1){stop("Last boundary is not exclusive")}
if(sum(a<b)<K){stop("Check valid vectors")}
if(sum(a<b)<K){stop("Check valid vectors")}
if(min(a)<(-1)){stop("Check valid vectors")}
if(b[1]>n[1]+1){stop("Check valid vectors")}
#
if(K==2){obj=sample.space.2(n,a,b)}
if(K>2){obj=sample.space(n,a,b)}
tab=table(obj$S,obj$M)
mvals=as.numeric(colnames(tab))
svals=as.numeric(rownames(tab))
tab.M=matrix(rep(mvals,length(svals)),nrow=length(svals),byrow=T)
tab.S=matrix(rep(svals,length(mvals)),nrow=length(svals))
out=NULL
out$M=tab.M[tab>0]
out$S=tab.S[tab>0]
count=subcount=decision=N=NULL
for(g in 1:length(out$M)){
N=c(N,cumsum(obj$design[,1])[out$M[g]])
tmp=prob.SM(obj,0,m=out$M[g],s=out$S[g])
count=c(count,tmp$count)
subcount=c(subcount,tmp$subcount)
decision=c(decision,mean(obj$decision[obj$M==out$M[g]&obj$S==out$S[g]]))
}
out$N=N
out$count=count
out$subcount=subcount
out$decision=decision
out$design=obj$design
out
}
|
/scratch/gouwar.j/cran-all/cranData/CLAST/R/sample.space.SM.R
|
##
## This is the PACKAGE documentation
##
#' Constrained inference for linear mixed models.
#'
#' @docType package
#' @name CLME-package
#' @rdname CLME-package
#'
#' @description
#' Constrained inference on linear fixed and mixed models using residual bootstrap.
#' Covariates and random effects are permitted but not required.
#'
#' Appropriate credit should be given when publishing results obtained using \pkg{CLME}, or when
#' developing other programs/packages based off of this one. Use \code{citation(package="CLME")}
#' for Bibtex information.
#'
#' The work was produced in part with funding from the Intramural Research Program of the NIH,
#' National Institute of Environmental Health Sciences (Z01 ES101744).
#'
#'
#' @details
#'
#' This package was introduced in Jelsema and Peddada (2016). The primary function is \code{\link{clme}}.
#' The other functions in this package may be run separately, but in general are designed for use by \code{\link{clme}}.
#'
#' The method which is implemented is the constrained linear mixed effects model described in
#' Farnan, Ivanova, and Peddada (2014). See that paper for more details regarding the method.
#' Here we give a brief overview of the assumed model:
#'
#' \deqn{ Y = X_{1}\theta_{1} + X_{2}\theta_{2} + U\xi + \epsilon }{Y = X1*theta1 + X2*theta2 + U*xi + e}
#'
#' where
#'
#' \itemize{
#' \item \eqn{X_1}{X1} is a \eqn{N \times p_1}{N x p1} design matrix.
#' \item \eqn{\theta_1}{theta1} are the coefficients (often treatment effects).
#' \item \eqn{X_2}{X2} is a \eqn{N \times p_2}{N x p2} matrix of fixed covariates.
#' \item \eqn{\theta_1}{theta2} are the coefficients for the covariates.
#' \item \eqn{U}{U} is a \eqn{N \times c}{N x c} matrix of random effects.
#' \item \eqn{\xi}{xi} is a zero-mean random vector with covariance \eqn{T}{T} (see below).
#' \item \eqn{\epsilon}{e} is a zero-mean random vector with covariance \eqn{\Sigma}{Sigma} (see below).
#' }
#'
#' Neither covariates (\eqn{X_2}{X2}) nor random effects (\eqn{U}{U}) are required by the model or \pkg{CLME}. The covariance matrix of \eqn{\xi}{xi} is given by:
#'
#' \deqn{ T = diag\left( \tau^{2}_{1}I_{c_{1}}, \tau^{2}_{2}I_{c_{2}} , \dots , \tau^{2}_{q}I_{c_{q}} \right) }{ T = diag( tau1^2 I_c1, tau2^2 I_c2 , ... , tauq^2 I_cq) }
#'
#' The first \eqn{c_{1}}{c1} random effects will share a common variance, \eqn{\tau^{2}_{1}}{tau1^2}, the next \eqn{c_{2}}{c2} random effects will share a common variance, and so on. Note that \eqn{c = \sum_{i=1}^{q} c_i}{c = SUM(ci), i=1,...q}. Homogeneity of variances in the random effects can be induced by letting \eqn{q=1}{q=1} (hence \eqn{c_{1}=c=ncol(U)}{c1=c=ncol(U)}).
#'
#' Similarly, the covariance matrix of \eqn{\epsilon}{e} is given by:
#'
#' \deqn{ \Sigma = diag\left( \sigma^{2}_{1}I_{n_{1}}, \sigma^{2}_{2}I_{n_{2}} , \dots , \sigma^{2}_{q}I_{n_{k}} \right) }{ Sigma = diag( sigma1^2 I_n1, sigma2^2 I_n2 , ... , sigmak^2 I_nk)}
#'
#' Again, the first \eqn{n_{1}}{n1} observations will share a common variance, \eqn{\sigma^{2}_{1}}{sigma1^2}, the next \eqn{n_{2}}{n2} will share a common variance, and so on. Note that \eqn{N = \sum_{i=1}^{k} n_i}{N = SUM(n_i), i=1,...k}. Homogeneity of variances in the residuals can be induced by letting \eqn{k=1}{k=1}.
#'
#' The order constraints are defined by the matrix \eqn{A}{A}. This is an \eqn{r \times p}{r x p} matrix where \eqn{r}{r} is the number of constraints, and \eqn{p=p_{1}+p_{2}}{p = p1 + p2} is the dimension of \eqn{ \theta = ( \theta_{1}' , \theta_{2}')'}{ theta = ( theta1' , theta2')'}. Formally the hypothesis being tested is:
#'
#' \deqn{ H_{a}: A\theta > 0 }{Ha: A*theta > 0 }
#'
#' For several default orders (simple, umbrella, simple tree) the \eqn{A}{A} matrix can be automatically generated. Alternatively, the user may define a custom \eqn{A}{A} matrix to test other patterns among the elements of \eqn{\theta}{theta}. See \code{\link{create.constraints}} and \code{\link{clme}} for more details.
#'
#' For computational reasons, the implementation is not identical to the model expressed. Particularly, the fixed-effects matrix (or matrices) and the random effects matrix are assumed to be columns in a data frame, not passed as matrices. The \eqn{A}{A} matrix is not \eqn{r\ times p}{r x p}, but \eqn{r\ times 2}{r x 2}, where each row gives the indices of the constrained coefficients. See \code{\link{create.constraints}} for further explanation.
#'
#'
#' The creation of this package \pkg{CLME}, this manual, and the vignette were all supported by the Intramural Research Program of the United States' National Institutes of Health (Z01 ES101744).
#'
#' @references
#' Jelsema, C. M. and Peddada, S. D. (2016).
#' CLME: An R Package for Linear Mixed Effects Models under Inequality Constraints.
#' \emph{Journal of Statistical Software}, 75(1), 1-32. doi:10.18637/jss.v075.i01
#'
#' Farnan, L., Ivanova, A., and Peddada, S. D. (2014).
#' Linear Mixed Efects Models under Inequality Constraints with Applications.
#' \emph{PLOS ONE}, 9(1). e84778. doi: 10.1371/journal.pone.0084778
#'
#'
#' @import methods
#' @import stats
#'
#'
#'
"_PACKAGE"
#' @title
#' Fibroid Growth Study
#'
#' @description
#' This data set contains a subset of the data from the Fibroid Growth Study.
#'
#' \tabular{rll}{
#' [,1] \tab ID \tab ID for subject. \cr
#' [,2] \tab fid \tab ID for fibroid (each women could have multiple fibroids). \cr
#' [,3] \tab lfgr \tab log fibroid growth rate. See details. \cr
#' [,4] \tab age \tab age category Younger, Middle, Older. \cr
#' [,5] \tab loc \tab location of fibroid, corpus, fundus, or lower segment. \cr
#' [,6] \tab bmi \tab body mass index of subject. \cr
#' [,7] \tab preg \tab parity, whether the subject had delivered a child. \cr
#' [,8] \tab race \tab race of subject (Black or White only). \cr
#' [,9] \tab vol \tab initial volume of fibroid. \cr
#' }
#'
#' @details
#' The response variable \code{lfgr} was calculated as the change in log fibroid volume,
#' divided by the length of time between measurements. The growth rates were averaged to produce
#' a single value for each fibroid, which was scaled to represent a 6-month percent change in volume.
#'
#' @references
#' Peddada, Laughlin, Miner, Guyon, Haneke, Vahdat, Semelka, Kowalik, Armao, Davis, and Baird(2008).
#' Growth of Uterine Leiomyomata Among Premenopausal Black and White Women.
#' Proceedings of the National Academy of Sciences of the United States of America, 105(50),
#' 19887-19892. URL \url{http://www.pnas.org/content/105/50/19887.full.pdf}.
#'
#'
#' @docType data
#' @keywords datasets
#' @name fibroid
#' @usage data(fibroid)
#' @format A data frame containing 240 observations on 9 variables.
"fibroid"
#' @title
#' Experiment on mice
#'
#' @description
#' This data set contains the data from an experiment on 24 Sprague-Dawley rats from Cora et al (2012).
#'
#' \tabular{rll}{
#' [,1] \tab id \tab ID for rat (factor). \cr
#' [,2] \tab time \tab time period (in order, 0 , 6, 24, 48, 72, 96 hours). \cr
#' [,3] \tab temp \tab storage temperature reference (\code{''Ref''}) vs. room temperature (\code{''RT''}). \cr
#' [,4] \tab sex \tab sex, male (\code{''Male''}) vs. female (\code{''Female''}). Coded as \code{''Female''=1}. \cr
#' [,5] \tab wbc \tab white blood cell count (\eqn{10^3 / \mu L}{10^3 / mu L}). \cr
#' [,6] \tab rbc \tab red blood cell count )\eqn{10^6 / \mu L}{10^6 / mu L}). \cr
#' [,7] \tab hgb \tab hemoglobin concentration (g/dl). \cr
#' [,8] \tab hct \tab hematocrit (\%). \cr
#' [,9] \tab spun \tab (HCT \%). \cr
#' [,10] \tab mcv \tab MCV, a measurement of erythrocyte volume (fl). \cr
#' [,11] \tab mch \tab mean corpuscular hemoglobin (pg). \cr % ????
#' [,12] \tab mchc \tab mean corpuscular hemoglobin concentration (g/dl). \cr
#' [,13] \tab plts \tab platelet count (\eqn{10^3 / \mu L}{10^3 / mu L}). \cr
#' }
#'
#' @details
#' The response variable \code{lfgr} was calculated as the change in log fibroid volume,
#' divided by the length of time between measurements. The growth rates were averaged to produce
#' a single value for each fibroid, which was scaled to represent a 6-month percent change in volume.
#'
#' @references
#' Cora M, King D, Betz L, Wilson R, and Travlos G (2012).
#' Artifactual changes in Sprauge-Dawley rat hematologic parameters after storage of samples at 3 C and 21 C.
#' Journal of the American Association for Laboratory Animal Science, 51(5), 616-621.
#' URL \url{http://www.ncbi.nlm.nih.gov/pmc/articles/PMC3447451/}.
#'
#'
#' @docType data
#' @keywords datasets
#' @name rat.blood
#' @usage data(rat.blood)
#' @format A data frame containing 241 observations on 13 variables.
"rat.blood"
# library("shiny")
# library("lme4")
# library("isotone")
# library("stringr")
# library("prettyR")
# library("openxlsx")
# devtools::check( devtools::as.package("../CLME_2.0-12.tar.gz") )
# devtools::check( devtools::as.package("C:/Users/statman/Documents/00-Packages/CLME_2.0-12.tar.gz") )
|
/scratch/gouwar.j/cran-all/cranData/CLME/R/00Package.r
|
#' @description \code{clme_em_fixed} performs a constrained EM algorithm for linear fixed effects models.
#'
#' @rdname clme_em
#' @export
#'
clme_em_fixed <- function( Y, X1, X2 = NULL, U = NULL, Nks = dim(X1)[1],
Qs = dim(U)[2], constraints, mq.phi = NULL, tsf = lrt.stat,
tsf.ind = w.stat.ind, mySolver="LS", em.iter = 500,
em.eps = 0.0001, all_pair = FALSE, dvar = NULL, verbose = FALSE, ... ){
if( verbose==TRUE ){
message("Starting EM algorithm")
}
N <- sum(Nks)
N1 <- 1 + cumsum(Nks) - Nks
N2 <- cumsum(Nks)
X <- as.matrix(cbind(X1, X2))
theta.names <- NULL
if( is.null(colnames(X))==FALSE ){
theta.names <- colnames(X)
}
K <- length(Nks)
P1 <- dim(X1)[2]
P2 <- dim(X2)[2]
# Unpack the constraints
if( is.null(constraints$A) ){
const <- create.constraints( P1, constraints)
A <- const$A
Anull <- const$Anull
B <- const$B
} else{
A <- constraints$A
Anull <- constraints$Anull
B <- constraints$B
if( is.null(Anull) ){
Anull <- create.constraints( P1, list(order="simple", node=1, decreasing=TRUE) )$Anull
}
}
# Initialize values
theta <- ginv( t(X)%*%X) %*% ( t(X)%*%Y )
R <- Y-X%*%theta
ssq <- apply( as.matrix(1:K, nrow=1), 1 ,
FUN=function(k, N1, N2, R ){ sum( R[N1[k]:N2[k]]^2 ) / Nks[k] } ,
N1, N2, R)
var_fix <- (ssq <= .Machine$double.eps )
if( any(var_fix) ){
ssq[ var_fix==1 ] <- dvar[var_fix==1]
}
ssqvec <- rep( ssq, Nks )
tsq <- NULL
theta1 <- theta
ssq1 <- ssq
tsq1 <- tsq
# Being the EM Algorithm convergence loop
CONVERGE <- 0
iteration <- 0
while( CONVERGE==0 ){
iteration <- iteration+1
R <- Y-X%*%theta
if( verbose==TRUE ){
message( "EM iteration " , iteration)
}
# Step 1: Estimate Sigma
trace.vec <- (R/ssqvec)^2 - 1/ssqvec
# trace.vec <- diag( PsiI%*%( R%*%t(R) )%*%PsiI - PsiI )
ssq <- apply( as.matrix(1:K, nrow=1), 1 ,
FUN=function(k, ssq, Nks, N1, N2 , trv){
idx <- N1[k]:N2[k]
ssq[k] + ( (ssq[k]^2)/(Nks[k]) )*sum( trv[idx] )
} ,
ssq , Nks , N1 , N2 , trace.vec )
if( any(var_fix) ){
ssq[ var_fix==1 ] <- dvar[ var_fix==1 ]
}
ssqvec <- rep( ssq, Nks)
# Step 2a: Estimate Thetas
# Update the blocks
SiR <- R / ssqvec
#theta[1:P1] <- theta1[ 1:P1] + ginv(t(X1)%*%SigmaI%*%X1) %*% ((t(X1)%*%PsiI)%*%R )
theta[1:P1] <- theta1[1:P1] + ginv( t(X1)%*%(X1/ssqvec) ) %*% (t(X1) %*% SiR)
if( is.null(X2)==FALSE ){
#theta[(P1+1):(P1+P2)] <- ( theta1[ (P1+1):(P1+P2)] +
# ginv(t(X2)%*%SigmaI%*%X2) %*% ((t(X2)%*%PsiI)%*%R ) )
X2SiR <- t(X2) %*% SiR
theta[(P1+1):(P1+P2)] <- ( theta1[(P1+1):(P1+P2)] +
ginv(t(X2)%*%(X2/ssqvec)) %*% (X2SiR) )
}
cov.theta <- solve( t(X) %*% (X/ssqvec) )
## Apply order constraints / isotonization
if( all_pair==FALSE ){
if( mySolver=="GLS"){
wts <- solve( cov.theta )[1:P1, 1:P1, drop=FALSE]
} else{
wts <- diag( solve(cov.theta) )[1:P1]
}
theta[1:P1] <- activeSet(A, y = theta[1:P1], weights = wts, mySolver=mySolver )$x
}
# Evaluate some convergence criterion
rel.change <- abs(theta - theta1)/theta1
if( mean(rel.change) < em.eps || iteration >= em.iter ){
CONVERGE <- 1
} else{
theta1 <- theta
ssq1 <- ssq
}
} # End converge loop (while)
if( verbose==TRUE ){
message("EM Algorithm ran for " , iteration , " iterations." )
}
wts <- diag( solve(cov.theta) )[1:P1]
theta <- c(theta)
names(theta) <- theta.names
theta.null <- theta
theta.null[1:P1] <- activeSet( Anull, y = theta[1:P1], weights = wts , mysolver=mySolver )$x
# Compute test statistic
ts.glb <- tsf( theta=theta, theta.null=theta.null, cov.theta=cov.theta, B=B, A=A, Y=Y, X1=X1,
X2=X2, U=U, tsq=tsq, ssq=ssq, Nks=Nks, Qs=Qs )
ts.ind <- tsf.ind(theta=theta, theta.null=theta.null, cov.theta=cov.theta, B=B, A=A, Y=Y, X1=X1,
X2=X2, U=U, tsq=tsq, ssq=ssq, Nks=Nks, Qs=Qs )
# Return the results
em.results <- list(theta=theta, theta.null=theta.null, ssq=ssq, tsq=tsq,
cov.theta=cov.theta, ts.glb=ts.glb, ts.ind=ts.ind, mySolver=mySolver )
return( em.results )
}
|
/scratch/gouwar.j/cran-all/cranData/CLME/R/clme.em.fixed.r
|
#' @description \code{clme_em_mixed} performs a constrained EM algorithm for linear mixed effects models.
#'
#' @rdname clme_em
#' @export
#'
clme_em_mixed <- function( Y, X1, X2 = NULL, U = NULL, Nks = dim(X1)[1],
Qs = dim(U)[2], constraints, mq.phi = NULL, tsf = lrt.stat,
tsf.ind = w.stat.ind, mySolver="LS", em.iter = 500,
em.eps = 0.0001, all_pair = FALSE, dvar = NULL, verbose = FALSE, ... ){
if( verbose==TRUE ){
message("Starting EM algorithm")
}
N <- sum(Nks)
N1 <- 1 + cumsum(Nks) - Nks
N2 <- cumsum(Nks)
Q <- length(Qs)
Q1 <- 1 + cumsum(Qs) - Qs
Q2 <- cumsum(Qs)
X <- as.matrix(cbind(X1, X2))
theta.names <- NULL
if( !is.null(colnames(X)) ){
theta.names <- colnames(X)
}
K <- length(Nks)
P1 <- dim(X1)[2]
P2 <- dim(X2)[2]
# Unpack the constraints
if( is.null(constraints$A) ){
const <- create.constraints( P1, constraints)
A <- const$A
Anull <- const$Anull
B <- const$B
} else{
A <- constraints$A
Anull <- constraints$Anull
B <- constraints$B
if( is.null(Anull) ){
Anull <- create.constraints( P1, list(order="simple", node=1, decreasing=TRUE) )$Anull
}
}
# Initialize values
theta <- ginv( t(X)%*%X) %*% ( t(X)%*%Y )
R <- Y-X%*%theta
ssq <- apply( as.matrix(1:K, nrow=1), 1 ,
FUN=function(k, N1, N2, R ){ sum( R[N1[k]:N2[k]]^2 ) / Nks[k] } ,
N1, N2, R)
var_fix <- (ssq <= .Machine$double.eps )
if( any(var_fix) ){
ssq[ var_fix==1 ] <- dvar[var_fix==1]
}
ssqvec <- rep( ssq,Nks)
if( is.null(mq.phi) ){
mq.phi <- minque( Y=Y, X1=X1, X2=X2, U=U, Nks=Nks, Qs=Qs)
}
tsq <- mq.phi[1:Q]
tsqvec <- rep(tsq,Qs)
theta1 <- theta
ssq1 <- ssq
tsq1 <- tsq
# Being the EM Algorithm convergence loop
CONVERGE <- 0
iteration <- 0
while( CONVERGE==0 ){
iteration <- iteration+1
R <- Y-X%*%theta
if( verbose==TRUE ){
message( "EM iteration " , iteration)
}
# Step 1: Estimate Sigma
SiR <- R / ssqvec
# U' * SigI * U
U1 <- apply( U , 2 , FUN=function(x,sq){x*sq} , 1/sqrt(ssqvec) )
tusu <- t(U1) %*% U1
diag(tusu) <- diag(tusu) + 1/tsqvec
tusui <- solve(tusu)
PiR <- SiR - (U %*% (tusui %*% (t(U)%*%SiR))) * (1/ssqvec)
# PsiI
BU <- tusui %*% t(U)
UBU <- apply( as.matrix(1:sum(Nks)) , 1 , FUN=function(kk,uu,bu){ sum( uu[kk,]*bu[,kk] ) } , U, BU )
SUBUS <- 1/ssqvec - UBU / ssqvec^2
trace.vec <- PiR^2 - SUBUS
# trace.vec <- diag( PsiI%*%( R%*%t(R) )%*%PsiI - PsiI )
ssq <- apply( as.matrix(1:K, nrow=1), 1 ,
FUN=function(k, ssq, Nks, N1, N2 , trv){
idx <- N1[k]:N2[k]
ssq[k] + ( (ssq[k]^2)/(Nks[k]) )*sum( trv[idx] )
} ,
ssq , Nks , N1 , N2 , trace.vec )
var_fix <- (ssq == 0 )
if( any(var_fix) ){
ssq[ var_fix==1 ] <- dvar[var_fix==1]
}
ssqvec <- rep( ssq,Nks)
# Step 2a: Estimate Thetas
# Update the blocks
SiR <- R / ssqvec
X1SiR <- t(X1) %*% SiR
X1SiU <- t(X1) %*% (U/ssqvec) # X1SU
USiR <- t(U) %*% SiR
U1 <- apply( U , 2 , FUN=function(x,sq){x*sq} , 1/sqrt(ssqvec) )
tusu <- t(U1) %*% U1
diag(tusu) <- diag(tusu) + 1/tsqvec
tusui <- solve(tusu)
#theta[1:P1] <- theta1[ 1:P1] + ginv(t(X1)%*%SigmaI%*%X1) %*% ((t(X1)%*%PsiI)%*%R )
theta[1:P1] <- theta1[1:P1] + ginv( t(X1)%*%(X1/ssqvec) ) %*% (X1SiR - X1SiU%*%(tusui%*%USiR))
if( is.null(X2)==FALSE ){
#theta[(P1+1):(P1+P2)] <- ( theta1[ (P1+1):(P1+P2)] +
# ginv(t(X2)%*%SigmaI%*%X2) %*% ((t(X2)%*%PsiI)%*%R ) )
X2SiU <- t(X2) %*% (U/ssqvec)
X2SiR <- t(X2) %*% SiR
theta[(P1+1):(P1+P2)] <- ( theta1[(P1+1):(P1+P2)] +
ginv(t(X2)%*%(X2/ssqvec)) %*% (X2SiR - X2SiU%*%(tusui%*%USiR)) )
}
# Step 2b: Estimate Tau
# USiR <- t(U) %*% SiR # <-- previously calcualted
USiU <- t(U) %*% (U/ssqvec)
UPiR <- USiR - USiU%*%(tusui%*%USiR)
trace.vec.tau <- (UPiR^2) - diag(USiU) + diag( USiU%*%tusui%*%USiU )
for( q in 1:Q ){
# tau.idx <- Q1[q]:Q2[q]
#Uq <- as.matrix( U[,tau.idx] )
#cq <- dim(Uq)[2]
#sumdg <- sum(diag( t(Uq)%*%( PsiI%*%R%*%t(R)%*%PsiI - PsiI )%*%Uq ))
#tsq[q] <- tsq1[q] + ((tsq1[q]^2)/cq)*sumdg
tau.idx <- Q1[q]:Q2[q]
cq <- length(tau.idx)
tsq[q] <- tsq1[q] + ((tsq1[q]^2)/cq)*sum(trace.vec.tau[tau.idx])
}
XSiX <- t(X) %*% (X/ssqvec)
tsqvec <- rep(tsq,Qs)
XSiU <- t(X) %*% (U/ssqvec)
cov.theta <- solve( XSiX - XSiU%*%tusui%*%t(XSiU) )
## Apply order constraints / isotonization
if( all_pair==FALSE ){
if( mySolver=="GLS"){
wts <- solve( cov.theta )[1:P1, 1:P1, drop=FALSE]
} else{
wts <- diag( solve(cov.theta) )[1:P1]
}
theta[1:P1] <- activeSet(A, y = theta[1:P1], weights = wts, mySolver=mySolver )$x
}
# Evaluate some convergence criterion
rel.change <- abs(theta - theta1)/theta1
if( mean(rel.change) < em.eps || iteration >= em.iter ){
CONVERGE <- 1
} else{
theta1 <- theta
ssq1 <- ssq
tsq1 <- tsq
}
} # End converge loop (while)
if( verbose==TRUE ){
message("EM Algorithm ran for " , iteration , " iterations." )
}
wts <- diag( solve(cov.theta) )[1:P1]
theta <- c(theta)
names(theta) <- theta.names
theta.null <- theta
theta.null[1:P1] <- activeSet( Anull, y = theta[1:P1], weights = wts , mySolver=mySolver )$x
# Compute test statistic
ts.glb <- tsf( theta=theta, theta.null=theta.null, cov.theta=cov.theta, B=B, A=A, Y=Y, X1=X1,
X2=X2, U=U, tsq=tsq, ssq=ssq, Nks=Nks, Qs=Qs )
ts.ind <- tsf.ind(theta=theta, theta.null=theta.null, cov.theta=cov.theta, B=B, A=A, Y=Y, X1=X1,
X2=X2, U=U, tsq=tsq, ssq=ssq, Nks=Nks, Qs=Qs )
# Return the results
em.results <- list(theta=theta, theta.null=theta.null, ssq=ssq, tsq=tsq,
cov.theta=cov.theta, ts.glb=ts.glb, ts.ind=ts.ind, mySolver=mySolver )
return( em.results )
}
|
/scratch/gouwar.j/cran-all/cranData/CLME/R/clme.em.mixed.r
|
#' @title Constrained EM algorithm for linear fixed or mixed effects models.
#'
#' @description \code{clme_em} is the general function, it will call the others.
#' These Expectation-maximization (EM) algorithms estimate model parameters and
#' compute a test statistic.
#'
#' @rdname clme_em
#'
#' @param Y \eqn{N \times 1}{Nx1} vector of response data.
#' @param X1 \eqn{N \times p_1}{Nxp1} design matrix.
#' @param X2 optional \eqn{N \times p_2}{Nxp2} matrix of covariates.
#' @param U optional \eqn{N \times c}{Nxc} matrix of random effects.
#' @param Nks optional \eqn{K \times 1}{Kx1} vector of group sizes.
#' @param Qs optional \eqn{Q \times 1}{Qx1} vector of group sizes for random effects.
#' @param constraints list containing the constraints. See Details.
#' @param mq.phi optional MINQUE estimates of variance parameters.
#' @param tsf function to calculate the test statistic.
#' @param tsf.ind function to calculate the test statistic for individual constrats. See Details for further information.
#' @param mySolver solver to use in isotonization (passed to \code{activeSet}).
#' @param em.eps criterion for convergence for the EM algorithm.
#' @param em.iter maximum number of iterations permitted for the EM algorithm.
#' @param all_pair logical, whether all pairwise comparisons should be considered (constraints will be ignored).
#' @param dvar fixed values to replace bootstrap variance of 0.
#' @param verbose if \code{TRUE}, function prints messages on progress of the EM algorithm.
#' @param ... space for additional arguments.
#'
#' @details
#' Argument \code{constraints} is a list including at least the elements \code{A}, \code{B}, and \code{Anull}. This argument can be generated by function \code{\link{create.constraints}}.
#'
#' @return
#' The function returns a list with the elements:
#' \itemize{
#' \item{\code{theta}}{ coefficient estimates.}
#' \item{\code{theta.null}}{ vector of coefficient estimates under the null hypothesis.}
#' \item{\code{ssq}}{ estimate of residual variance term(s).}
#' \item{\code{tsq}}{ estimate of variance components for any random effects.}
#' \item{\code{cov.theta}}{ covariance matrix of the unconstrained coefficients. }
#' \item{\code{ts.glb}}{ test statistic for the global hypothesis.}
#' \item{\code{ts.ind}}{ test statistics for each of the constraints.}
#' \item{\code{mySolver}}{ the solver used for isotonization.}
#' }
#'
#' @note
#' There are few error catches in these functions. If only the EM estimates are desired,
#' users are recommended to run \code{\link{clme}} setting \code{nsim=0}.
#'
#' By default, homogeneous variances are assumed for the residuals and (if included)
#' random effects. Heterogeneity can be induced using the arguments \code{Nks} and \code{Qs},
#' which refer to the vectors \eqn{ (n_{1}, n_{2}, \ldots, n_{k}) }{(n1, n2 ,... , nk)} and
#' \eqn{ (c_{1}, c_{2}, \ldots, c_{q}) }{(c1, c2 ,... , cq)}, respectively. See
#' \code{\link{CLME-package}} for further explanation the model and these values.
#'
#' See \code{\link{w.stat}} and \code{\link{lrt.stat}} for more details on using custom
#' test statistics.
#'
#' @seealso
#' \code{\link{CLME-package}}
#' \code{\link{clme}}
#' \code{\link{create.constraints}}
#' \code{\link{lrt.stat}}
#' \code{\link{w.stat}}
#'
#' @examples
#' data( rat.blood )
#'
#' model_mats <- model_terms_clme( mcv ~ time + temp + sex + (1|id), data = rat.blood )
#'
#' Y <- model_mats$Y
#' X1 <- model_mats$X1
#' X2 <- model_mats$X2
#' U <- model_mats$U
#'
#' cons <- list(order = "simple", decreasing = FALSE, node = 1 )
#'
#' clme.out <- clme_em(Y = Y, X1 = X1, X2 = X2, U = U, constraints = cons)
#'
#' @importFrom isotone activeSet
#' @importFrom isotone gpava
#' @export
#'
clme_em <- function( Y, X1, X2 = NULL, U = NULL, Nks = nrow(X1),
Qs = ncol(U), constraints, mq.phi = NULL, tsf = lrt.stat,
tsf.ind = w.stat.ind, mySolver="LS", em.iter = 500,
em.eps = 0.0001, all_pair = FALSE, dvar = NULL, verbose = FALSE, ... ){
##
## Development plans:
## - make function inputs more flexible like clme()
## - this may make passing the arguments need to be more detailed, but
## passing arguments TO clme.em (e.g. from clme() ) would be less detailed.
##
em_call <- as.list( environment() )
dots <- as.list(substitute(list(...)))[-1L]
new_call <- append( em_call, dots )
if( is.null(U) ){
em_results <- do.call( "clme_em_fixed" , new_call )
} else{
em_results <- do.call( "clme_em_mixed" , new_call )
}
return( em_results )
}
|
/scratch/gouwar.j/cran-all/cranData/CLME/R/clme.em.r
|
#' Constrained Inference for Linear Mixed Effects Models
#'
#' @description Constrained inference for linear fixed or mixed
#' effects models using distribution-free bootstrap methodology
#'
#' @rdname clme
#'
#' @param formula a formula expression. The constrained effect must come before any unconstrained
#' covariates on the right-hand side of the expression. The constrained effect should
#' be an ordered factor.
#' @param data data frame containing the variables in the model.
#' @param gfix optional vector of group levels for residual variances. Data should be sorted by this value.
#' @param constraints optional list containing the constraints. See Details for further information.
#' @param tsf function to calculate the test statistic.
#' @param tsf.ind function to calculate the test statistic for individual constrats. See Details for further information.
#' @param mySolver solver to use in isotonization (passed to \code{activeSet}).
#' @param all_pair logical, whether all pairwise comparisons should be considered (constraints will be ignored).
#' @param verbose optional. Vector of 3 logicals. The first causes printing of iteration step, the second two are passed as the \code{verbose} argument to the functions \code{\link{minque}} and \code{\link{clme_em}}, respectively.
#' @param ... space for additional arguments.
#'
#'
#' @details
#' If any random effects are included, the function computes MINQUE estimates of variance components. After,
#' \code{\link{clme_em}} is run to obtain the observed values. If \code{nsim}>0, a bootstrap test is performed
#' using \code{\link{resid_boot}}.
#' For the argument \code{levels} the first list element should be the column index (in \code{data}) of the
#' constrained effect. The second element should be the true order of the levels.
#'
#' @note
#' The argument \code{constraints} is a list containing the order restrictions. The elements are
#' \code{order}, \code{node}, \code{decreasing}, \code{A}, and \code{B}, though not all are necessary.
#' The function can calculate the last two for default orders (simple, umbrella, or simple tree). For
#' default orders, \code{constraints} should be a list containing any subset of \code{order},
#' \code{node}, and \code{descending}. See Figure 1 from Jelsema \& Peddada (2016); the
#' pictured \code{node} of the simple tree orders (middle column) is 1, and the \code{node} for the
#' umbrella orders (right column) is 3. These may be vectors (e.g. order=('simple','umbrella') ).
#' If any of these three are missing, the function will test for all possible values of the missing
#' element(s), excluding simple tree.
#'
#' For non-default orders, the elements \code{A} and \code{B} should be provided. \code{A} is an
#' \eqn{r \times2}{r x 2} matrix (where r is the number of linear constraints, \eqn{0 < r}{0 < r}.
#' Each row should contain two indices, the first element is the index of the lesser coefficient, the
#' second element is the index of the greater coefficient. So a row of \eqn{(1,2)}{(1,2)} corresponds
#' to the constraint \eqn{\theta_1 \leq \theta_2}{theta_1 <= theta_2}, and a row \eqn{(4,3)}{(4,3)}
#' corresponds to the constraint \eqn{\theta_4 \leq \theta_3}{theta_4 <= theta_3}, etc. Element \code{B}
#' should hold similar contrasts, specifically those needed for calculating the Williams' type test
#' statistic (\code{B} is only needed if \code{tsf=w.stat})
#' The argument \code{tsf} is a function to calculate the desired test statistic. The default function
#' calculates likelihood ratio type test statistic. A Williams type test statistic, which is the maximum
#' of the test statistic over the constraints in \code{constraints\$B}, is also available, and custom
#' functions may be defined. See \code{\link{w.stat}} for details.
#' By default, homogeneity of variances is assumed for residuals (e.g., \code{gfix} does not define groups)
#' and for each random effect.
#' Some values can be passed to \code{clme} that are not used in this function. For instance,
#' \code{seed} and \code{nsim} can each be passed as an argument here, and \code{\link{summary.clme}} will
#' use these values.
#'
#'
#'
#' @return
#' The output of \code{clme} is an object of the class \code{clme}, which is list with elements:
#' \itemize{
#' \item{\code{theta}}{ estimates of \eqn{\theta}{theta} coefficients}
#' \item{\code{theta}}{ estimates of \eqn{\theta_0}{theta_0} coefficients under the null hypothesis}
#' \item{\code{ssq}}{ estimate of residual variance(s), \eqn{\sigma^{2}_{i}}{sigma.i^2}.}
#' \item{\code{tsq}}{ estimate of random effects variance component(s), \eqn{\tau^{2}_{i}}{tau.i^2}.}
#' \item{\code{cov.theta}}{ the unconstrained covariance matrix of \eqn{\theta}{theta}}
#' \item{\code{ts.glb}}{ test statistic for the global hypothesis.}
#' \item{\code{ts.ind}}{ test statistics for each of the constraints.}
#' \item{\code{mySolver}}{ the solver used for isotonization.}
#' \item{\code{constraints}}{ list containing the constraints (\code{A}) and the contrast for the global test (\code{B}).}
#' \item{\code{dframe}}{ data frame containing the variables in the model.}
#' \item{\code{residuals}}{ matrix containing residuals. For mixed models three types of residuals are given. }
#' \item{\code{random.effects}}{ estimates of random effects. }
#' \item{\code{gfix}}{ group sample sizes for residual variances. }
#' \item{\code{gran}}{ group sizes for random effect variance components. }
#' \item{\code{gfix_group}}{ group names for residual variances. }
#' \item{\code{formula}}{ the formula used in the model. }
#' \item{\code{call}}{ the function call. }
#' \item{\code{order}}{ list describing the specified or estimated constraints.}
#' \item{\code{P1}}{ the number of constrained parameters.}
#' \item{\code{nsim}}{ the number of bootstrap simulations used for inference.}
#' }
#'
#'
#'
#'
#' @examples
#' data( rat.blood )
#' cons <- list(order="simple", decreasing=FALSE, node=1 )
#'
#' clme.out <- clme(mcv ~ time + temp + sex + (1|id), data=rat.blood ,
#' constraints=cons, seed=42, nsim=10 )
#'
#' @references
#' Jelsema, C. M. and Peddada, S. D. (2016).
#' CLME: An R Package for Linear Mixed Effects Models under Inequality Constraints.
#' \emph{Journal of Statistical Software}, 75(1), 1-32. doi:10.18637/jss.v075.i01
#'
#' @importFrom MASS ginv
#' @export
#'
clme <-
function( formula, data=NULL, gfix=NULL, constraints=list(), tsf=lrt.stat, tsf.ind=w.stat.ind,
mySolver="LS", all_pair=FALSE, verbose=c(FALSE,FALSE,FALSE), ... ){
cc <- match.call( expand.dots=TRUE )
## If provided, sort the data by gfix
if( !is.null(gfix) & !is.null(data) ){
data <- data[ with(data, order(gfix)), ]
}
mmat <- eval( model_terms_clme( formula, data ), parent.frame() )
# mmat <- model_terms_clme( formula, data )
formula2 <- mmat$formula
Y <- mmat$Y
P1 <- mmat$P1
X1 <- mmat$X1
X2 <- mmat$X2
U <- mmat$U
xlev <- mmat$xlev
ncon <- 1
if( is.null(xlev) ){
xlev <- colnames(X1)
} else{
colnames(X1) <- xlev
}
if( is.null(gfix) ){
gfix <- rep("Residual", nrow(X1) )
} else{
data <- with( data, data[order(gfix),])
}
Nks <- table(gfix)
if( !is.null(U) ){
if( !is.null(mmat$REidx) ){
Qs <- table( mmat$REidx )
names(Qs) <- mmat$REnames
} else{
Qs <- table( rep("tsq", ncol(U)) )
}
} else{
Qs <- NULL
}
# If only one element for verbose specified, fill the rest with FALSEs
if( length(verbose)<3 ){
verbose <- c(verbose, rep(FALSE, 3-length(verbose) ) )
}
## Assess the constraints
cust_const <- is.matrix( constraints$A )
prnt_warn <- ""
if( all_pair==TRUE ){
Amat <- t(combn( 1:P1 , m=2 ))
Bmat <- Amat
Anull <- rbind( Amat, Amat[,2:1] )
constraints <- list( A=Amat, B=Bmat, Anull=Anull )
cust_const <- TRUE
}
if( cust_const == TRUE ){
if( !is.numeric(constraints$A) ){
stop( "'constraints$A' must be numeric" )
}
} else {
# Constraints are non-null, but A and B are not provided
# Determine which other elements are missing/needed
if( is.null(constraints$order) ){
prnt_warn <- paste( prnt_warn, "\n-'constraints$order' is NULL, program will run search for ''simple'' and ''umbrella'' orders")
constraints$order <- c("simple" , "umbrella" )
}
if( is.null(constraints$node) ){
prnt_warn <- paste( prnt_warn, "\n'constraints$node' is NULL, program will run search for node")
constraints$node <- 1:P1
} else{
search.node <- FALSE
}
if( is.null(constraints$decreasing) ){
prnt_warn <- paste( prnt_warn, "\n'constraints$decreasing' is NULL, program will run search for TRUE and FALSE")
constraints$decreasing <- c(TRUE,FALSE)
}
}
## Make sure test stat function is okay
if( is.function(tsf)==FALSE ){
stop("'tsf' is not a function")
}
if( is.function(tsf.ind)==FALSE ){
stop("'tsf.ind' is not a function")
}
## Revert to LRT if necessary
if( cust_const==TRUE & identical( tsf , w.stat ) & is.null(constraints$B) ){
prnt_warn <- paste( prnt_warn, "\nWilliams type statistic selected with custom constraints, but
'constraints$B' is NULL. Reverting to LRT statistic")
tsf <- lrt.stat
}
## Set up search grid if using defaults
if( cust_const==FALSE ){
search.grid <- expand.grid( constraints$order ,
constraints$decreasing ,
constraints$node )
search.grid[,1] <- as.character(search.grid[,1])
# Remove duplicates / extraneous
# "simple" doesn't need node
# idx <- 1*(search.grid[,1]=="simple" & search.grid[,3] > 1)
# search.grid <- search.grid[ idx==0 , , drop=FALSE]
idx <- 1*(search.grid[,1]=="simple" )
search.grid[idx==1,3] <- 0
# Detect any umbrella that match simple order
repl_row1 <- data.frame( Var1="simple", Var2=FALSE, Var3=1 ) # simple INCREASING = umbrella INC_1 and DEC_P1
repl_row2 <- data.frame( Var1="simple", Var2=TRUE, Var3=1 ) # simple DECREASING = umbrella INC_P1 and DEC_1
repl_row1$Var1 <- as.character( repl_row1$Var1 )
repl_row2$Var1 <- as.character( repl_row2$Var1 )
idx <- 1*( (search.grid[,1]=="umbrella" & search.grid[,2] == FALSE & search.grid[,3] == 1 ) +
(search.grid[,1]=="umbrella" & search.grid[,2] == TRUE & search.grid[,3] == P1 ) )
if( sum(idx)>1 ){ search.grid[ idx==1, ] <- repl_row1 }
idx <- 1*( (search.grid[,1]=="umbrella" & search.grid[,2] == FALSE & search.grid[,3] == P1 ) +
(search.grid[,1]=="umbrella" & search.grid[,2] == TRUE & search.grid[,3] == 1 ) )
if( sum(idx)>1 ){ search.grid[ idx==1, ] <- repl_row2 }
# Move simple.tree to the bottom
idx <- search.grid[,1]=="simple.tree"
if( sum(idx)>0 ){
search.grid <- rbind( search.grid[idx==0, , drop=FALSE] ,
search.grid[idx==1, , drop=FALSE] )
}
## Remove duplicate rows
search.grid <- unique( search.grid )
MNK <- dim( search.grid )[1]
} else{
MNK <- 1
loop.const <- est_const <- constraints
}
##
## End preparation steps, begin the analysis
##
## Obtain tau if needed
if( is.null(U) ){
mq.phi <- NULL
} else{
mq.phi <- minque( Y=Y , X1=X1 , X2=X2 , U=U , Nks=Nks , Qs=Qs ,
verbose=verbose[2], ... )
}
## EM for the observed data
if( verbose[1]==TRUE ){
print( paste( "Starting EM Algorithm for observed data." , sep=""))
}
## Loop through the search grid
est.order <- NULL
ts.max <- -Inf
for( mnk in 1:MNK ){
if( cust_const==FALSE ){
grid.row <- list( order = search.grid[mnk,1],
node = search.grid[mnk,3],
decreasing= search.grid[mnk,2])
loop.const <- create.constraints( P1=ncol(X1), constraints=grid.row )
}
clme.temp <- clme_em( Y=Y, X1=X1, X2=X2, U=U, Nks=Nks,
Qs=Qs, constraints=loop.const, mq.phi=mq.phi,
tsf=tsf, tsf.ind=tsf.ind, mySolver=mySolver,
verbose=verbose[3], all_pair=all_pair, ... )
# If global test stat is larger, update current estimate of order
if( cust_const==FALSE ){
update.max <- (mnk==1) + (clme.temp$ts.glb > ts.max)
} else{
update.max <- 1
}
if( update.max > 0 ){
ts.max <- clme.temp$ts.glb
clme.out <- clme.temp
est.order <- mnk
}
}
if( cust_const==FALSE ){
grid.row <- list( order = search.grid[est.order,1],
node = search.grid[est.order,3],
decreasing= search.grid[est.order,2])
est_const <- create.constraints( P1=ncol(X1), constraints=grid.row )
} else{
est_const <- constraints
}
#constraints$A <- est_const$A
#constraints$B <- est_const$B
## Calculate the residuals from unconstrained model
mr <- clme_resids( formula=formula, data=mmat$dframe, gfix=gfix )
## Add some values to the output object
class(clme.out) <- "clme"
clme.out$call <- cc
clme.out$formula <- mmat$formula
clme.out$constraints <- est_const
clme.out$dframe <- mmat$dframe
names(clme.out$theta) <- c( colnames(X1), colnames(X2) )
names(clme.out$ssq) <- names(Nks)
names(clme.out$tsq) <- names(Qs)
clme.out$cust_const <- cust_const
clme.out$all_pair <- all_pair
# clme.out$ncon <- ncon
clme.out$tsf <- tsf
clme.out$tsf.ind <- tsf.ind
clme.out$random.effects <- mr$xi
clme.out$gfix <- Nks
clme.out$gfix_group <- gfix
clme.out$gran <- Qs
clme.out$P1 <- P1
clme.out$mq.phi <- mq.phi
clme.out$nsim <- eval(cc$nsim)
clme.out$seed <- eval(cc$seed)
if( is.null(U) ){
clme.out$residuals <- mr$PA
} else{
clme.out$residuals <- cbind( mr$PA, mr$SS, mr$FM )
colnames(clme.out$residuals) <- c("PA", "SS", "FM")
}
## Report the estimated order
clme.out$order <- list()
clme.out$order$est_order <- est.order
if( all_pair==TRUE ){
clme.out$order$order <- "unconstrained"
} else if( cust_const == TRUE ){
clme.out$order$estimated <- FALSE
clme.out$order$order <- "custom"
clme.out$order$node <- NULL
clme.out$order$inc.dec <- NULL
clme.out$search.grid <- NULL
} else{
if( MNK==1 ){
clme.out$order$estimated <- FALSE
} else{
clme.out$order$estimated <- TRUE
}
clme.out$order$order <- est_const$order
clme.out$order$node <- est_const$node
if( est_const$decreasing ){
clme.out$order$inc.dec <- "decreasing"
} else{
clme.out$order$inc.dec <- "increasing"
}
clme.out$search.grid <- search.grid
}
if (verbose[1]==TRUE){
cat( prnt_warn )
}
## Return the output object
return( clme.out )
}
|
/scratch/gouwar.j/cran-all/cranData/CLME/R/clme.r
|
#' Computes various types of residuals
#'
#' @description Computes several types of residuals for objects of class \code{clme}.
#'
#' @rdname clme_resids
#'
#' @param formula a formula expression. The constrained effect(s) must come before any unconstrained covariates on the right-hand side of the expression. The first \code{ncon} terms will be assumed to be constrained.
#' @param data data frame containing the variables in the model.
#' @param gfix optional vector of group levels for residual variances. Data should be sorted by this value.
#'
#' @details
#' For fixed-effects models \eqn{Y = X\beta + \epsilon}{Y = X*b + e}, residuals are given as \eqn{\hat{e} = Y - X\hat{\beta}}{ ehat = Y - X*betahat}.
#' For mixed-effects models \eqn{Y = X\beta + + U\xi + \epsilon}{Y = X*b + U*xi + e}, three types of residuals are available.
#' \eqn{PA = Y - X\hat{\beta}}{ PA = Y - X*betahat}\\
#' \eqn{SS = U\hat{\xi}}{ SS = U*xihat}\\
#' \eqn{FM = Y - X\hat{\beta} - U\hat{\xi}}{ FM = Y - X*betahat - U*xihat}
#'
#' @return
#' List containing the elements \code{PA}, \code{SS}, \code{FM}, \code{cov.theta}, \code{xi}, \code{ssq}, \code{tsq}.
#' \code{PA}, \code{SS}, \code{FM} are defined above (for fixed-effects models, the residuals are only \code{PA}). Then \code{cov.theta} is the unconstrained covariance matrix of the fixed-effects coefficients, \code{xi} is the vector of random effect estimates, and \code{ssq} and \code{tsq} are unconstrained estimates of the variance components.
#'
#' @note
#' There are few error catches in these functions. If only the EM estimates are desired,
#' users are recommended to run \code{\link{clme}} setting \code{nsim=0}.
#'
#' By default, homogeneous variances are assumed for the residuals and (if included)
#' random effects. Heterogeneity can be induced using the arguments \code{Nks} and \code{Qs},
#' which refer to the vectors \eqn{ (n_{1}, n_{2}, \ldots, n_{k}) }{(n1, n2 ,... , nk)} and
#' \eqn{ (c_{1}, c_{2}, \ldots, c_{q}) }{(c1, c2 ,... , cq)}, respectively. See
#' \code{\link{CLME-package}} for further explanation the model and these values.
#'
#' See \code{\link{w.stat}} and \code{\link{lrt.stat}} for more details on using custom
#' test statistics.
#'
#' @seealso
#' \code{\link{CLME-package}}
#' \code{\link{clme}}
#'
#' @examples
#' \dontrun{
#' data( rat.blood )
#' cons <- list(order = "simple", decreasing = FALSE, node = 1 )
#'
#' clme.out <- clme_resids(mcv ~ time + temp + sex + (1|id), data = rat.blood )
#' }
#'
#' @importFrom MASS ginv
#' @export
#'
#'
clme_resids <-
function( formula, data, gfix=NULL ){
##
## I should consider a way to condense this so it doesn't need to
## be copied between here, clme() and resid_boot().
##
suppressMessages( mmat <- model_terms_clme( formula, data ) )
formula2 <- mmat$formula
Y <- mmat$Y
P1 <- mmat$P1
X1 <- mmat$X1
X2 <- mmat$X2
U <- mmat$U
if( is.null(gfix) ){
gfix <- rep("Residual", nrow(X1))
} else{
data <- with( data, data[order(gfix),])
}
Nks <- table(gfix)
if( !is.null(U) ){
if( !is.null(mmat$REidx) ){
Qs <- table( mmat$REidx )
names(Qs) <- mmat$REnames
} else{
Qs <- table( rep("tsq", ncol(U)) )
}
} else{
Qs <- NULL
}
#############################################
N <- sum(Nks)
N1 <- 1 + cumsum(Nks) - Nks
N2 <- cumsum(Nks)
Q <- length(Qs)
Q1 <- 1 + cumsum(Qs) - Qs
Q2 <- cumsum(Qs)
X <- as.matrix( cbind(X1, X2) )
K <- length(Nks)
# Initial values
theta <- ginv( t(X)%*%X )%*%( t(X)%*%Y )
ssq <- vector()
for( k in 1:K ){
Yk <- Y[ N1[k]:N2[k] ]
Xk <- X[ N1[k]:N2[k],]
ssq[k] <- sum( (Yk - Xk%*%theta)^2 ) / (Nks[k])
}
## Obtain the estimates of epsilon and delta
ssqvec <- rep(ssq,Nks)
XSiX <- t(X) %*% (X/ssqvec)
XSiY <- t(X) %*% (Y/ssqvec)
if( Q > 0 ){
mq.phi <- minque( Y=Y, X1=X1, X2=X2, U=U, Nks=Nks, Qs=Qs )[1:Q]
tsq <- mq.phi
tsqvec <- rep(tsq,Qs)
C <- U * tsqvec
U1 <- apply( U , 2 , FUN=function(x,sq){x*sq} , 1/sqrt(ssqvec) )
tusu <- t(U1) %*% U1
diag(tusu) <- diag(tusu) + 1/tsqvec
tusui <- solve(tusu)
XSiU <- t(X) %*% (U/ssqvec)
USiY <- t(U) %*% (Y/ssqvec)
XPiX <- XSiX - XSiU%*%tusui%*%t(XSiU)
XPiY <- XSiY - XSiU%*%(tusui%*%USiY)
} else{
XPiX <- XSiX
XPiY <- XSiY
}
# H <- X%*%ginv( XPiX )%*%(t(X)%*%PsiI)
#eps <- c( Y - H%*%Y )
Yhat <- X %*% ginv( XPiX )%*%XPiY
PA <- c(Y - Yhat)
if( Q > 0 ){
# xi <- c( t(C) %*% PsiI %*% eps )
USiR <- t(U) %*% (PA/ssqvec)
USiU <- t(U) %*% (U/ssqvec)
xi <- tsqvec * ( USiR - USiU%*%(tusui%*%USiR) )
SS <- U %*% xi
FM <- PA - SS
resid.out <- list( PA=c(PA), SS=c(SS), FM=c(FM), cov.theta=solve(XPiX),
xi=c(xi), ssq=ssq, tsq=tsq )
} else{
resid.out <- list( PA=c(PA), cov.theta=XPiX, ssq=ssq )
}
return( resid.out )
}
|
/scratch/gouwar.j/cran-all/cranData/CLME/R/clme_resids.r
|
#' Generate common order constraints
#'
#' @description Automatically generates the constraints in the format used by \code{\link{clme}}. Allowed orders are simple, simple tree, and umbrella orders.
#'
#'
#' @param P1 the length of \eqn{\theta_1}{theta_1}, the vector constrained coefficients.
#' @param constraints List with the elements \code{order}, \code{node}, and \code{decreasing}. See Details for further information.
#'
#' @details
#' The elements of \code{constraints} are:
#' \itemize{
#' \item \code{order}: string. Currently \dQuote{simple}, \dQuote{simple.tree} and \dQuote{umbrella} are supported.
#' \item \code{node}: numeric, the node of the coefficients (unnecessary for simple orders).
#' \item \code{decreasing}: logical. For simple orders, is the trend decreasing? For umbrella and simple tree, does the nodal parameter have the greatest value (e.g., the peak, instead of the valley)?
#' }
#'
#' See \code{\link{clme}} for more information and a depiction of these three elements.
#'
#' @return
#' The function returns a list containing the elements of input argument \code{constraints} as well as
#' \itemize{
#' \item{ \code{A} }{matrix of dimension \eqn{r \times 2}{r x 2} containing the order constraints, where r is the number of linear constraints.}
#' \item{ \code{B} }{matrix containing the contrasts necessary for computation of the Williams' type test statistic (may be identical to \code{A}).}
#' \item{ \code{Anull} }{matrix similar to \code{A} which defines all possible constraints. Used to obtain parameter estimates under the null hypothesis.}
#' \item{ \code{order} }{the input argument for \code{constraints\$order}.}
#' \item{ \code{node} }{the input argument for \code{constraints\$node}.}
#' \item{ \code{decreasing} }{ the input argument for \code{constraints\$decreasing}}
#' }
#' See \code{\link{w.stat}} for more information on \code{B}
#'
#'
#' @note
#' The function \code{\link{clme}} also utilizes the argument \code{constraints}. For \code{clme}, this argument may either be identical to the argument of this function, or may be the output of \code{create.constraints} (that is, a list containing appropriate matrices \code{A}, \code{Anull}, and if necessary, \code{B}).
#'
#' An example the the \code{A} matrix might be:
#' \tabular{ccc}{
#' [1,] \tab [,1] \tab [,2] \cr
#' [2,] \tab 1 \tab 2 \cr
#' [3,] \tab 2 \tab 3 \cr
#' [4,] \tab 4 \tab 3 \cr
#' [5,] \tab 5 \tab 4 \cr
#' [6,] \tab 6 \tab 5 \cr
#' }
#' This matrix defines what \pkg{CLME} describes as a decreasing umbrella order. The first row defines the constraint that \eqn{\theta_1 \leq \theta_2}{theta_1 <= theta_2}, the second row defined the constraint \eqn{\theta_2 \leq \theta_3}{theta_2 <= theta_3}, the third row defines \eqn{\theta_4 \leq \theta_3}{theta_4 <= theta_3}, and so on. The values are indexes, and the left column is the index of the parameter constrained to be smaller.
#'
#'
#' @seealso
#' \code{\link{clme}},
#' \code{\link{w.stat}}
#'
#' @examples
#' \dontrun{
#' # For simple order, the node does not matter
#' create.constraints( P1 = 5, constraints = list( order='simple' ,
#' decreasing=FALSE ))
#'
#' # Compare constraints against decreasing=TRUE
#' create.constraints( P1 = 5, constraints=list( order='simple' ,
#' decreasing=TRUE ))
#'
#' # Umbrella order
#' create.constraints( P1 = 5, constraints=list( order='umbrella' , node=3
#' , decreasing=FALSE ))
#' }
#'
#'
#' @importFrom utils combn
#' @export
#'
create.constraints <- function( P1, constraints ){
Q1 <- P1-1
order <- tolower(constraints$order)
node <- constraints$node
decreasing <- constraints$decreasing
if( is.null(node) ){
node <- 1
}
if( order =="tree" ){ order <- "simple.tree" }
if( (order %in% c("simple", "simple.tree", "umbrella"))==FALSE ){
stop("'order' must be one or more of: simple, simple.tree, umbrella")
}
## Revert to simple order if umbrella has node at extreme
if( order=="umbrella" & node %in% c(1,P1) ){
order <- "simple"
node <- 1
if( node==P1 ){
if( decreasing==TRUE ){
decreasing <- FALSE
} else {
decreasing <- TRUE
}
}
}
A <- matrix( 0, nrow=Q1, ncol=2 )
## Simple order
## e.g. mu_1 <= mu_2 <= ... <= mu_K
if( order=="simple" ){
if( decreasing==TRUE ){
A <- as.matrix(cbind( 1:Q1+1 , 1:Q1 ))
B <- matrix( c(P1,1) , nrow=1 )
} else{
A <- as.matrix(cbind( 1:Q1 , 1:Q1 + 1 ))
B <- matrix( c(1,P1) , nrow=1 )
}
node <- NULL
}
## Simple tree order
## e.g. mu_1 <= mu_i ; i=2,...,P1
if( order=="simple.tree" ){
if( decreasing==TRUE ){
A <- as.matrix( cbind( (1:P1)[-node] , rep(node,Q1) ) )
} else{
A <- as.matrix(cbind( rep(node,Q1) , (1:P1)[-node] ))
}
B <- A
}
## Umbrella order
## e.g. mu_1 <= mu_2 <= ... <= mu_b >= mu_{b+1} >= ... >= mu_K
if( order=="umbrella" ){
if( decreasing==TRUE ){
for( ii in 1:(node-1) ){
A[ii,] <- c(ii,ii+1)
}
for( ii in node:Q1 ){
A[ii,] <- c(ii+1,ii)
}
B <- as.matrix( rbind( c(1,node) , c(P1,node) ) )
} else{
for( ii in 1:(node-1) ){
A[ii,] <- c(ii+1,ii)
}
for( ii in node:Q1 ){
A[ii,] <- c(ii,ii+1)
}
B <- as.matrix( rbind( c(node,1) , c(node,P1) ) )
}
}
## Make the null A-matrix
Anull <- t(combn( 1:P1 , m=2 ))
Anull <- rbind( Anull, Anull[,2:1] )
## If A has only one row, activeSet() from package "isotone" causes error.
## Placing same constraint twice alleviates this problem.
if( nrow(A)==1 ){
A <- rbind( A , A )
Anull <- rbind( Anull, Anull )
}
# Return the constraints object
new_constraints <- list( A = A, B = B, Anull = Anull, order=order, node=node, decreasing=decreasing )
return(new_constraints)
}
|
/scratch/gouwar.j/cran-all/cranData/CLME/R/create.constraints.r
|
#' MINQUE Algorithm
#'
#' @description Algorithm to obtain MINQUE estimates of variance components of a linear mixed effects model.
#'
#' @param Y \eqn{N \times 1}{Nx1} vector of response data.
#' @param X1 \eqn{N \times p_1}{Nxp1} design matrix.
#' @param X2 optional \eqn{N \times p_2}{Nxp2} matrix of covariates.
#' @param U optional \eqn{N \times c}{Nxc} matrix of random effects.
#' @param Nks optional \eqn{K \times 1}{Kx1} vector of group sizes.
#' @param Qs optional \eqn{Q \times 1}{Qx1} vector of group sizes for random effects.
#' @param mq.eps criterion for convergence for the MINQUE algorithm.
#' @param mq.iter maximum number of iterations permitted for the MINQUE algorithm.
#' @param verbose if \code{TRUE}, function prints messages on progress of the MINQUE algorithm.
#' @param ... space for additional arguments.
#'
#' @details
#' By default, the model assumes homogeneity of variances for both the residuals and the random effects
#' (if included). See the Details in \code{\link{clme_em}} for more information on how to use the
#' arguments \code{Nks} and \code{Qs} to permit heterogeneous variances.
#'
#' @return
#' The function returns a vector of the form \eqn{(\tau^{2}_{1}, \tau^{2}_{2}, \ldots, \tau^{2}_{q}, \sigma^{2}_{1},\sigma^{2}_{2},\ldots, \sigma^{2}_{k})'}{(tau1^2, tau2^2, \ldots, tauq^2, sigma1^2,sigma2^2,\ldots, sigmak^2)'}. If there are no random effects, then the output is just \eqn{(\sigma^{2}_{1},\sigma^{2}_{2},\ldots, \sigma^{2}_{k})'}{(sigma1^2,sigma2^2,\ldots, sigmak^2)'}.
#'
#' @note
#' This function is called by several other function in \pkg{CLME} to obtain estimates of the random effect variances. If there are no random effects, they will not call \code{minque}.
#'
#'
#' @examples
#' data( rat.blood )
#'
#' model_mats <- model_terms_clme( mcv ~ time + temp + sex + (1|id) ,
#' data = rat.blood )
#' Y <- model_mats$Y
#' X1 <- model_mats$X1
#' X2 <- model_mats$X2
#' U <- model_mats$U
#'
#' # No covariates or random effects
#' minque(Y = Y, X1 = X1 )
#'
#' # Include covariates and random effects
#' minque(Y = Y, X1 = X1, X2 = X2, U = U )
#'
#' @importFrom MASS ginv
#' @export
#'
#'
minque <- function( Y , X1 , X2=NULL , U=NULL , Nks=dim(X1)[1] , Qs=dim(U)[2] ,
mq.eps=0.0001, mq.iter=500 , verbose=FALSE, ... ){
if( verbose==TRUE ){
message("Running minque to estimate tau-squared")
}
X <- as.matrix( cbind(X1,X2) )
N <- sum(Nks)
N1 <- 1 + cumsum(Nks) - Nks
N2 <- cumsum(Nks)
Q <- length(Qs)
Q1 <- 1 + cumsum(Qs) - Qs
Q2 <- cumsum(Qs)
K <- length(Nks)
# Initial values
theta <- ginv( t(X)%*%X )%*%t(X)%*%Y
tsq <- rep( 1 , length(Qs) )
ssq <- vector()
for( k in 1:K ){
Yk <- Y[ N1[k]:N2[k] ]
Xk <- X[ N1[k]:N2[k],]
ssq[k] <- sum( (Yk-Xk%*%theta)^2 ) / Nks[k]
}
theta1 <- theta
tsq1 <- tsq
ssq1 <- ssq
# Get the F-list
Flist <- list()
nullmat <- matrix( 0 , nrow=N , ncol=N )
if( Q > 0 ){
for( qk in 1:Q ){
ind <- Q1[qk]:Q2[qk]
Flist[[qk]] <- U[,ind] %*% t( U[,ind] )
}
}
for( qk in (Q+1):(Q+K) ){
idx <- N1[qk-Q]:N2[qk-Q]
X.temp <- nullmat
diag(X.temp)[idx] <- rep( 1 ,length(idx) )
Flist[[qk]] <- X.temp
}
CONVERGE <- 0
iteration <- 0
phi.hats <- phi.hats1 <- c( tsq , ssq )
# Begin the minque convergence loop
while( CONVERGE==0 ){
iteration <- iteration + 1
if( verbose==TRUE ){
message("Iteration " , iteration )
}
# Calcualte the G-matrix
G.phi <- matrix( 0 , nrow=N , ncol=N )
for( qk in 1:length(Flist) ){
G.phi <- G.phi + phi.hats1[qk]*Flist[[qk]]
}
W.phi <- G.phi + X%*%t(X)
W.phiI <- ginv(W.phi)
WX <- W.phiI%*%X
R.phi <- W.phiI - WX%*%ginv( t(X)%*%WX )%*%t(WX)
YR <- t(Y)%*%R.phi
Z.phi <- as.matrix(sapply( Flist,
FUN=function(x, a){ a%*%x%*%t(a) } , a=YR ))
S.phi <- matrix( 0 , nrow=(Q+K) , ncol=(Q+K) )
RFR <- lapply( Flist , FUN=function(x,a){ a%*%x%*%t(a) }, a=R.phi )
for( qk1 in 1:(Q+K) ){
S.phi[,qk1] <- sapply( RFR ,
FUN=function(x, a){ sum(diag(a%*%x)) } ,
a=Flist[[qk1]] )
}
phi.hats <- ginv(S.phi) %*% Z.phi
idx.neg <- phi.hats < 0
phi.hats[idx.neg,1] <- rep(sqrt(.Machine$double.eps), sum(idx.neg))
phi.hats <- c(phi.hats)
# Assess convergence
rel.change <- abs(phi.hats - phi.hats1)/phi.hats1
if( mean(rel.change) < mq.eps || iteration >= mq.iter ){
CONVERGE <- 1
} else{
phi.hats1 <- phi.hats
}
}
# Return output
if( Q > 0 ){
names(phi.hats) <- c(paste("tsq.", 1:Q, sep=""), paste("ssq.", 1:K, sep=""))
} else{
names(phi.hats) <- c(paste("ssq.", 1:K,sep=""))
}
phi.hats
}
|
/scratch/gouwar.j/cran-all/cranData/CLME/R/minque.r
|
#' S3 method to plot objects of class \code{clme}
#'
#' @description Generates a basic plot of estimated coefficients which are subject to constraints (\eqn{\theta_1}{theta_1} ). Lines indicate individual constraints (not global tests) and significance.
#'
#' @inheritParams plot.summary.clme
#'
#' @note
#' While it is possible to plot the output of a clme fit, this will only plot the fitted means.
#' To indicate significance, plotting must be performed on the summary of a clme fit. This method
#' will change the class so that plot.summary.clme will be called properly.
#'
#' @seealso
#' \code{\link{CLME-package}}
#' \code{\link{clme}}
#' \code{\link{plot.summary.clme}}
#'
#' @examples
#' \dontrun{
#' set.seed( 42 )
#' data( rat.blood )
#' cons <- list(order = "simple", decreasing = FALSE, node = 1 )
#' clme.out <- clme(mcv ~ time + temp + sex + (1|id), data = rat.blood ,
#' constraints = cons, seed = 42, nsim = 10)
#' plot( clme.out )
#' }
#'
#' @import graphics
#' @export
#'
plot.clme <- function(x , ...){
if( is.clme(x) ){
class(x) <- "summary.clme"
} else{
stop("'x' is not of class clme")
}
x$p.value.ind <- rep( 1, length(x$ts.ind) )
x$p.value <- rep( 1, length(x$ts.glb) )
# plot(x)
plt_call <- as.list( environment() )
dots <- as.list(substitute(list(...)))[-1L]
new_call <- append( plt_call, dots )
do.call( "plot" , new_call )
}
#' S3 method to plot objects of class \code{clme}
#'
#' @description Generates a basic plot of estimated coefficients which are subject to constraints (\eqn{\theta_1}{theta_1} ). Lines indicate individual constraints (not global tests) and significance.
#'
#' @param x object of class 'clme' to be plotted.
#' @param alpha significance level of the test.
#' @param legendx character indicating placement of legend. See Details.
#' @param inset inset distance(s) from the margins as a fraction of the plot region when legend is placed by keyword.
#' @param ci plot individual confidence intervals.
#' @param ylim limits of the y axis.
#' @param cex size of plotting symbols.
#' @param pch plotting symbols.
#' @param bg background (fill) color of the plotting symbols.
#' @param xlab label of the x axis.
#' @param ylab label of the y axis.
#' @param tree logical to produce alternate graph for tree ordering.
#' @param ... additional plotting arguments.
#'
#' @details
#' All of the individual contrasts in the \code{constraints\$A} matrix are tested and plotted.
#' The global test is not represented (unless it happens to coincide with an individual contrast).
#' Only the elements of \eqn{\theta}{theta} which appear in any constraints (e.g. the elements of
#' \eqn{\theta_{1}}{theta_1}) are plotted. Coefficients for the covariates are not plotted.
#' Solid lines denote no significant difference, while dashed lines denote statistical significance.
#' Significance is determined by the individual p-value being less than or equal to the supplied
#' \eqn{\alpha}{alpha} threshold. By default a legend denoting the meaning of solid and dashed lines
#' will be placed below the graph. Argument \code{legendx} may be set to a legend keyword (e.g.
#' \code{legend=''bottomright''}) to place it inside the graph at the specified location. Setting
#' \code{legendx} to \code{FALSE} or to a non-supported keyword suppresses the legend.
#' Confidence intervals for the coefficients may be plotted. They are individual confidence intervals,
#' and are computed using the covariance matrix of the unconstrained estimates of
#' \eqn{\theta_{1}}{theta_1}. These confidence intervals have higher coverage probability than the
#' nominal value, and as such may appear to be in conflict with the significance tests. Alternate
#' forms of confidence intervals may be provided in future updates.#'
#'
#' @seealso
#' \code{\link{CLME-package}}
#' \code{\link{clme}}
#'
#' @examples
#' \dontrun{
#' set.seed( 42 )
#' data( rat.blood )
#' cons <- list(order = "simple", decreasing = FALSE, node = 1 )
#' clme.out <- clme(mcv ~ time + temp + sex + (1|id), data = rat.blood ,
#' constraints = cons, seed = 42, nsim = 10)
#' clme.out2 <- summary( clme.out )
#' plot( clme.out2 )
#' }
#'
#'
#' @import graphics
#'
#' @export
#'
plot.summary.clme <-
function(x , alpha=0.05 , legendx="below" , inset=0.01,
ci=FALSE , ylim=NULL , cex=1.75 , pch=21 , bg="white" ,
xlab = expression( paste( "Component of " , theta[1] ) ),
ylab = expression( paste( "Estimated Value of " , theta[1] ) ) ,
tree=NULL, ...){
object <- x
#if( !is.clme(object) ){ stop("Argument 'object' is not of class clme.") }
class(object) <- "clme"
theta <- fixef(object)
A <- object$constraints$A
r <- nrow(A)
p1 <- max(A)
all_pair <- object$all_pair
if( ci ){
ci.wd <- min( 1/p1 , 1/15 )
CIs <- confint(object, level=(1-alpha))
}
if( legendx=="below" ){
layout( rbind(1,2) , heights=c(7,1) )
}
theta1 <- object$theta[1:p1]
# Pick some reasonable plot limits
if( is.null(ylim) ){
if( ci ){
ylim <- c( min(CIs[1:p1,]) , max(CIs[1:p1,]))
} else{
if( min(theta1) < 0 ){
ymin <- min(theta1)*1.05
} else{
ymin <- min(theta1)/1.05
}
if( max(theta1) < 0 ){
ymax <- max(theta1)/1.05
} else{
ymax <- max(theta1)*1.05
}
ylim <- c( ymin , ymax )
}
}
if( is.null(tree) ){
tree <- object$order$order == "simple.tree"
}
## PLOT FOR SIMPLE / UMBRELLA ORDERS
if( !tree ){
# The initial plot of the points
plot( 1:p1 , theta1 , cex=cex , pch=pch , bg=bg ,
ylim = ylim , xaxt='n' , xlab = xlab , ylab = ylab, ...)
axis(side=1, at=1:p1, labels=names(theta1), ...)
# Connect the contrasts with solid/dashed lines
if( all_pair==FALSE ){
for( ii in 1:r){
idx <- A[ii,]
if( object$p.value.ind[ii] > alpha ){ lty <- 1 }
if( object$p.value.ind[ii] <= alpha ){ lty <- 2 }
points( idx , theta[idx] , lty=lty , lwd=2 , type="l")
}
}
## Add the CIs if necessary
if( ci ){
for( ii in 1:p1){
points( c(ii,ii) , c(CIs[ii,1], CIs[ii,2]) , type="l" )
points( c(ii-ci.wd, ii+ci.wd) , c(CIs[ii,1], CIs[ii,1]) , type="l" )
points( c(ii-ci.wd, ii+ci.wd) , c(CIs[ii,2], CIs[ii,2]) , type="l" )
}
}
# Replot the pointsso the circles are filled
points( 1:p1 , theta[1:p1] , cex=cex , pch=pch , bg=bg )
}
if( tree ){
## PLOT FOR TREE ORDER
plot(x=1, y=0, col=0, ylim=ylim, xlim=c(0.9,2.1), xlab="", ylab="Estimated Coefficient", xaxt="n")
axis(side=1, at=c(1,1.78), labels=c("Control (Node)" , "Treatment") )
node <- object$constraints$node
legend( 0.86, theta1[node]+0.35, names(theta1)[node] ,cex=.8, bty='n' )
for( ii in (1:p1)[-node] ){
legend( 1.77 , theta1[ii]+0.15, names(theta1)[ii] ,cex=.8, bty='n' )
points( c(1,1.78) , theta1[c(node,ii)] , col=1 , type="l" , lwd=2 , lty=(1 + 1*(object$p.value.ind[ii-1] < alpha)) )
points( c(1,1.78) , theta1[c(node,ii)] , col=1 , cex=1.5 , pch=21 , bg="white" )
}
}
## Put a legend on the plot if requested (if the p-values aren't all =1)
if( !identical( object$p.value.ind, rep(1, length(object$p.value.ind)) ) ){
if( legendx=="below" ){
safe.mar <- par( no.readonly=TRUE )$mar
par(mar=c(0, 0, 0, 0) , ...)
plot.new()
legend('center','groups', c( paste("p >" , alpha, " " ) , paste("p <" , alpha, " " )),
lty = c(1,2), col=1 , ncol=2 , bty ="o" , ...)
par( mar=safe.mar )
} else{
leg.texts <- c("bottom", "bottomleft", "left", "topleft",
"top", "topright", "right", "bottomright", "center")
if( legendx %in% leg.texts){
legend( legendx , legend=c( paste("p >" , alpha, " " ) , paste("p <" , alpha, " " )),
lty = c(1,2), col=1 , ncol=1 , bty ="o" , inset=inset , ...)
}
}
}
}
|
/scratch/gouwar.j/cran-all/cranData/CLME/R/plot.clme.r
|
#' Obtain Residual Bootstrap
#'
#' @description Generates bootstrap samples of the data vector.
#'
#' @param formula a formula expression. The constrained effect(s) must come before any unconstrained covariates on the right-hand side of the expression. The first \code{ncon} terms will be assumed to be constrained.
#' @param data data frame containing the variables in the model.
#' @param gfix optional vector of group levels for residual variances. Data should be sorted by this value.
#' @param eps estimates of residuals.
#' @param xi estimates of random effects.
#' @param null.resids logical indicating if residuals should be computed under the null hypothesis.
#' @param theta estimates of fixed effects coefficients. Estimated if not submitted.
#' @param ssq estimates of residual variance components. Estimated if not submitted.
#' @param tsq estimates of random effects variance components. Estimated if not submitted.
#' @param cov.theta covariance matrix of fixed effects coefficients. Estimated if not submitted.
#' @param seed set the seed for the RNG.
#' @param nsim number of bootstrap samples to use for significance testing.
#' @param mySolver solver to use, passed to \code{activeSet}.
#' @param ... space for additional arguments.
#'
#' @details
#' If any of the parameters \code{theta}, \code{ssq}, \code{tsq}, \code{eps}, or \code{xi} are provided, the function will use those values in generating the bootstrap samples. They will be estimated if not submitted. If\code{null.resids=TRUE}, then \code{theta} will be projected onto the space of the null hypothesis ( \eqn{H_{0}: \theta_1 = \theta_2 = ... = \theta_{p_1}}{Ho: theta_1 = theta_2 = ... = theta_p1}) regardless of whether it is provided or estimated. To generate bootstraps with a specific \code{theta}, set \code{null.residuals=FALSE}.
#'
#' @return
#' Output is \eqn{N \ times nsim}{N x nsim} matrix, where each column is a bootstrap sample of the response data \code{Y}.
#'
#' @note
#' This function is primarily designed to be called by \code{\link{clme}}.
#'
#' By default, homogeneous variances are assumed for the residuals and (if included) random effects. Heterogeneity can be induced using the arguments \code{Nks} and \code{Qs}, which refer to the vectors \eqn{ (n_{1}, n_{2}, \ldots, n_{k}) }{(n1, n2 ,... , nk)} and \eqn{ (c_{1}, c_{2}, \ldots, c_{q}) }{(c1, c2 ,... , cq)}, respectively. See \code{\link{clme_em}} for further explanation of these values.
#'
#'
#' @seealso
#' \code{\link{clme}}
#'
#' @examples
#' data( rat.blood )
#' boot_sample <- resid_boot(mcv ~ time + temp + sex + (1|id), nsim = 10,
#' data = rat.blood, null.resids = TRUE )
#'
#' @export
#'
resid_boot <-
function(formula, data, gfix=NULL, eps=NULL, xi=NULL, null.resids=TRUE,
theta=NULL, ssq=NULL, tsq=NULL, cov.theta=NULL, seed=NULL,
nsim=1000, mySolver="LS", ... ){
##
## I should consider a way to condense this so it doesn't need to
## be copied between here, clme() and clme_resids().
##
suppressMessages( mmat <- model_terms_clme( formula, data ) )
formula2 <- mmat$formula
Y <- mmat$Y
P1 <- mmat$P1
X1 <- mmat$X1
X2 <- mmat$X2
U <- mmat$U
if( is.null(gfix) ){
gfix <- rep("Residual", nrow(U))
}
Nks <- table(gfix)
if( !is.null(U) ){
if( !is.null(mmat$REidx) ){
Qs <- table( mmat$REidx )
names(Qs) <- mmat$REnames
} else{
Qs <- table( rep("tsq", ncol(U)) )
}
} else{
Qs <- NULL
}
#############################################
if( is.numeric(seed) ){
set.seed(seed)
}
N <- sum(Nks)
N1 <- 1 + cumsum(Nks) - Nks
N2 <- cumsum(Nks)
Q <- length(Qs)
Q1 <- 1 + cumsum(Qs) - Qs
Q2 <- cumsum(Qs)
X <- as.matrix( cbind( X1,X2 ))
K <- length(Nks)
## Estimate parameters if they are missing
if( is.null(ssq) ){
theta1 <- ginv( t(X)%*%X )%*%(t(X)%*%Y)
ssq <- vector()
for( k in 1:K ){
Yk <- Y[ N1[k]:N2[k] ]
Xk <- X[ N1[k]:N2[k],]
ssq[k] <- sum( (Yk - Xk%*%theta1)^2 ) / (Nks[k])
}
}
if( (Q > 0) & is.null(tsq)){
tsq <- minque( Y=Y, X1=X1, X2=X2, U=U, Nks=Nks, Qs=Qs )[1:Q]
}
if( is.null(theta) ){
theta <- ginv( t(X)%*%X )%*%(t(X)%*%Y)
}
if( null.resids ){
# Only Anull is needed, so the true constraints are irrelevant
Anull <- create.constraints( P1, list(order="simple", node=1, decreasing=TRUE) )$Anull
if( is.null(cov.theta) ){
ssqvec <- rep(ssq,Nks)
XSiX <- t(X) %*% (X/ssqvec)
if( Q > 0 ){
tsqvec <- rep(tsq,Qs)
C <- U * tsqvec
U1 <- apply( U , 2 , FUN=function(x,sq){x*sq} , 1/sqrt(ssqvec) )
tusu <- t(U1) %*% U1
diag(tusu) <- diag(tusu) + 1/tsqvec
tusui <- solve(tusu)
XSiU <- t(X) %*% (U/ssqvec)
USiY <- t(U) %*% (Y/ssqvec)
cov.theta <- solve( XSiX - XSiU%*%tusui%*%t(XSiU) )
} else{
cov.theta <- solve( XSiX )
}
}
if( mySolver=="GLS"){
wts <- solve(cov.theta)[1:P1, 1:P1, drop=FALSE]
} else{
wts <- diag( solve(cov.theta) )[1:P1]
}
theta[1:P1] <- activeSet( Anull, y = theta[1:P1], weights = wts, mySolver=mySolver )$x
}
if( is.null(eps) ){
resids <- clme_resids( formula, data, gfix )
eps <- resids$PA
xi <- resids$xi
}
nu <- eps
for( i in 1:K ){
idx <- N1[i]:N2[i]
nu[idx] <- eps[idx] / sd( eps[idx] )
}
## Obtain the bootstrap samples
Y.boot <- matrix( NA, nrow=N, ncol=nsim )
XT.boot <- X%*%theta
if( Q > 0 ){
delta <- xi
for( i in 1:Q ){
idx <- Q1[i]:Q2[i]
delta[idx] <- xi[idx] / sd( xi[idx] )
}
Qc <- dim(U)[2]
for( m in 1:nsim ){
xi.boot <- sample( delta, replace=TRUE )
eps.boot <- sample( nu , replace=TRUE )
for( i in 1:Q ){
idx <- Q1[i]:Q2[i]
xi.boot[idx] <- sqrt(tsq[i]) * xi.boot[idx]
}
for( i in 1:K ){
idx <- N1[i]:N2[i]
eps.boot[idx] <- sqrt(ssq[i]) * eps.boot[idx]
}
Y.boot[,m] <- XT.boot + U%*%xi.boot + eps.boot
}
} else{
for( m in 1:nsim ){
eps.boot <- sample( nu , replace=TRUE )
for( i in 1:K ){
idx <- N1[i]:N2[i]
eps.boot[idx] <- sqrt(ssq[i]) * eps.boot[idx]
}
Y.boot[,m] <- XT.boot + eps.boot
}
}
## Return the bootstrap samples
return( Y.boot )
}
|
/scratch/gouwar.j/cran-all/cranData/CLME/R/resid_boot.r
|
#' Shiny GUI for CLME
#'
#' @description Opens a graphical user interface to run \pkg{CLME}, built from the \pkg{shiny} package.
#'
#' @rdname shiny_clme
#'
#' @param input input from GUI.
#' @param output output to GUI.
#'
#' @details
#' Currently the GUI does not allow specification of custom orders for the alternative hypothesis. Future versions may enable this capability.
#' The data should be a CSV or table-delimited file with the first row being a header. Variables are identified using their column letter or number (e.g., 1 or A). Separate multiple variables with a comma (e.g., 1,2,4 or A,B,D), or select a range of variables with a dash (e.g., 1-4 or A-D). Set to 'None' (default) to indicate no covariates or random effects.
#' If group levels for the constrained effect are character, they may not be read in the proper order. An extra column may contain the ordered group levels (it may therefore have different length than the rest of the dataset).
#'
#' @note
#' This function is primarily designed to call \code{\link{clme}}.
#'
#'
#'
#' @examples
#' \dontrun{ shiny_clme() }
#'
#' @import shiny
#' @export
#'
shiny_clme <- function(){
library("CLME")
runApp(
list(
ui = shinyUI_clme,
server = shinyServer_clme
)
)
}
##############################################################################
##
## The user interface for the shiny app
##
##############################################################################
# shinyUI(bootstrapPage())
#' CLME siny GUI: UI
#'
#' @description The UI for the shiny app in CLME
#'
#' @rdname shiny_clme
#'
#' @export
#'
shinyUI_clme <- fluidPage(
titlePanel("Constrained Linear Mixed Effects"),
sidebarLayout(
sidebarPanel(
##
## Data input
##
fileInput('file1', 'Data (choose file)',
accept=c('text/csv', 'text/comma-separated-values,text/plain', '.csv', '.xlsx')
),
selectInput(inputId = "dlmt",
label = "Delimiter:",
choices=c("Comma-delimited", "Tab-delimited", "xlsx")
),
##
## Main controls
##
hr(),
selectInput(inputId = "solver",
label = "Type of Solver / fitting norm:",
choices=c("Least Squares (LS)", "Least Absolute Value (L1)",
"General LS (GLS)", "Asymmetrix LS" ,
"L1 approx", "Huber", "SILF", "Chebyshev",
"L-p Power Norm", "Quantile", "Poisson" )
),
selectInput(inputId = "tsfunc",
label = "Test Statistic:",
choices=c("LRT", "Williams")
),
selectInput(inputId = "order",
label = "Order:",
choices=c("Unspecified", "Simple", "Umbrella", "Tree")
),
selectInput(inputId = "decreasing",
label = "Direction:",
choices=c("Unspecified (both)", "Increasing", "Decreasing")
),
textInput(inputId = "node",
label = "Node:",
value = "None"
),
helpText("Identify columns of data"),
helpText("Use column letters or numbers"),
helpText("e.g., 1-3 or A-C or a list: 1,4,6"),
textInput(inputId = "yy",
label = "Column of response:"),
textInput(inputId = "p1",
label = "Column of constrained effect:"),
textInput(inputId = "p2",
label = "Column(s) of Covariates:", value="None"),
textInput(inputId = "q",
label = "Column(s) of random effects:", value="None"),
numericInput(inputId = "nsim",
label = "Number of Bootstraps:",
min=0 , max=50000 , value=1000
),
##
## Action buttons
##
hr(),
helpText("Click to run model or update output."),
actionButton(inputId = "compute1",
label = "Run model"
),
actionButton(inputId = "compute2",
label = "Update plot"
),
actionButton(inputId = "compute3",
label = "Update summary"
),
##
## Output controls
##
hr(),
checkboxInput(inputId = "outcheck",
label = "Format Output:",
value=FALSE
),
conditionalPanel(condition = "input.outcheck",
checkboxInput(inputId = "plotci",
label = "CI on Plot:",
value=FALSE
)
),
conditionalPanel(condition = "input.outcheck",
sliderInput(inputId = "alpha",
label = "Alpha level:",
min=0 , max=0.15 , value=0.05 , step=0.01
)
),
conditionalPanel(condition = "input.outcheck",
checkboxInput(inputId = "makeFactor",
label = "Force constrained effect to be factor",
value=FALSE
)
),
#br(),
#conditionalPanel(condition = "input.outcheck",
# helpText("Number of decimal places for:")
#),
#conditionalPanel(condition = "input.outcheck",
# sliderInput(inputId = "digits",
# label = "p-values:",
# min=0 , max=8 , value=4 , step=1
# )
#),
##
## Extra parameters
##
hr(),
checkboxInput(inputId = "varssq",
label = "Heteroscedasticity:",
value=FALSE
),
conditionalPanel(condition = "input.varssq",
textInput(inputId = "gfix",
label = "Column of variance groups:")
),
checkboxInput(inputId = "xlevel1",
label = "Define order of constrained groups:",
value=FALSE
),
conditionalPanel(condition = "input.xlevel1",
textInput(inputId = "xlevels",
label = "Column of ordered group levels:")
),
##
## Technical controls
##
checkboxInput(inputId = "technical",
label = "Select Control Parameters:",
value=FALSE
),
conditionalPanel(condition = "input.technical",
numericInput(inputId = "emiter",
label = "Max EM Iterations:",
min=10 , max=50000 , value=500
)
),
conditionalPanel(condition = "input.technical",
numericInput(inputId = "mqiter",
label = "Max MINQUE Iterations:",
min=10 , max=50000 , value=500)
),
conditionalPanel(condition = "input.technical",
numericInput(inputId = "emeps",
label = "EM Convergence Criteria:",
min=0 , max=50000 , value=0.0001)
),
conditionalPanel(condition = "input.technical",
numericInput(inputId = "mqeps",
label = "MINQUE Convergence Criteria:",
min=0 , max=50000 , value=0.0001)
),
conditionalPanel(condition = "input.technical",
numericInput(inputId = "ranseed",
label = "Set RNG Seed:",
min=0 , max=Inf , value=42)
)
),
mainPanel(
tabsetPanel(
tabPanel("Data Summary" ,
plotOutput(outputId = "fig0", height = "650px"),
tableOutput(outputId = "sum_table")
),
tabPanel("Model Summary",
verbatimTextOutput(outputId = "summary"),
h5("Code to run model:"),
verbatimTextOutput(outputId = "fullCode")
),
tabPanel("Model Plot" ,
plotOutput(outputId = "fig1", height = "650px")
),
tabPanel("Model Data" ,
dataTableOutput(outputId = "datatbl")
)
)
)
))
##############################################################################
##
## The server for the shiny app
##
##############################################################################
#' CLME siny GUI: server
#'
#' @description The server for the shiny app in CLME
#'
#' @rdname shiny_clme
#'
#'
#' @import graphics
#' @importFrom openxlsx read.xlsx
#' @importFrom utils read.csv
#'
#' @export
#'
shinyServer_clme <- function(input, output) {
clme_out <- reactive({
compute1 <- input$compute1
## Put all the code to run CLME inside this
if( compute1 > 0 ){
isolate({
file1 <- input$file1[4]
solver <- input$solver
dlmt <- input$dlmt
#data1 <- as.matrix( read.csv( file=paste(file1) ) )
if( dlmt=="Comma-delimited"){
data1 <- read.csv( file=paste(file1) )
file_text <- paste0( "dFrame <- data.frame(read.csv(file='", input$file1[1], "'))" )
}
if( dlmt=="Tab-delimited" ){
data1 <- read.csv( file=paste(file1) , sep="\t")
file_text <- paste0( "dFrame <- data.frame(read.csv(file='", input$file1[1], "', sep='\t'))" )
}
if( dlmt=="xlsx"){
data1 <- read.xlsx( xlsxFile=paste(file1), colNames=TRUE )
file_text <- paste0( "dFrame <- data.frame(read.xlsx(xlsxFile='", input$file1[1], "', colNames=TRUE))" )
}
yy <- input$yy
p1 <- input$p1
p2 <- input$p2
q <- input$q
nsim <- input$nsim
alpha <- 0.05
makeFactor <- FALSE
if( input$outcheck==TRUE ){
makeFactor <- input$makeFactor
alpha <- input$alpha
}
if( p2 == '' | p2==0 ){ p2 <- "None" }
if( q == '' | q==0 ){ q <- "None" }
##
mySolver <- switch( EXPR=solver ,
"Least Squares (LS)" = "LS",
"Least Absolute Value (L1)" = "L1",
"General LS (GLS)" = "GLS",
"Asymmetrix LS" = "asyLS",
"L1 approx" = "L1eps",
"Huber" = "huber",
"SILF" = "SILF",
"Chebyshev" = "chebyshev",
"L-p Power Norm" = "Lp",
"Quantile" = "quantile",
"Poisson" = "poisson")
## Get the indexes for X2 and U
parse_idx <- function( arg1 ){
arg1 <- toupper(gsub( " ", "", arg1 ))
if( arg1=="NONE" ){
p4 <- 0
} else{
p2s <- strsplit(arg1, ",")[[1]]
if( length(p2s) > 1 ){
p3 <- vector()
for( ii in 1:length(p2s) ){
next1 <- sapply( p2s[ii], FUN=function(x){ strsplit(x, "-") } )[[1]]
if( length(next1)==1 ){
if( next1 %in% LETTERS ){
p3 <- append( p3 , which(next1 == LETTERS) )
} else{
p3 <- append( p3 , as.numeric(next1) )
}
} else{
next1a <- next1[1]
next1b <- next1[2]
if( next1a%in%LETTERS & next1b%in%LETTERS ){
n1a <- which(next1a == LETTERS)
n1b <- which(next1b == LETTERS)
p3 <- append( p3 , n1a:n1b )
} else{
p3 <- append( p3 , as.numeric(next1a):as.numeric(next1b) )
}
}
}
} else{
next1 <- sapply( p2s, FUN=function(x){ strsplit(x, "-") } )[[1]]
if( length(next1)==1 ){
if( next1 %in% LETTERS ){
p3 <- which(next1 == LETTERS)
} else{
p3 <-as.numeric(next1)
}
} else{
next1a <- next1[1]
next1b <- next1[2]
if( next1a%in%LETTERS & next1b%in%LETTERS ){
n1a <- which(next1a == LETTERS)
n1b <- which(next1b == LETTERS)
p3 <- n1a:n1b
} else{
p3 <- as.numeric(next1a):as.numeric(next1b)
}
}
}
p4 <- as.numeric(p3)
}
p4
}
idx_yy <- parse_idx(yy)
yn <- colnames(data1)[idx_yy]
idx_x1 <- parse_idx(p1)
x1n <- colnames(data1)[idx_x1]
cov <- ran <- FALSE
if( p2 != "None" ){
idx_x2 <- parse_idx(p2)
x2n <- colnames(data1)[idx_x2]
cov <- TRUE
}
if( q != "None" ){
idx_u <- parse_idx(q)
uun <- colnames(data1)[idx_u]
ran <- TRUE
}
## Create the formula
## Reorder the levels of X1 if needed
if( input$xlevel1 ){
xlev <- parse_idx(input$xlevels)
nlev <- length( levels(as.factor( data1[,xlev] )) )
xlevels <- as.character( data1[1:nlev,xlev] )
if( any(xlevels=="") ){
xlevels <- xlevels[ -which(xlevels=="")]
}
data1[,idx_x1] <- factor( data1[,idx_x1], levels=xlevels, ordered=TRUE )
} else if( !is.ordered(data1[,idx_x1]) ){
data1[,idx_x1] <- factor( data1[,idx_x1] , ordered=TRUE )
}
## Build the formula
x1f <- paste( x1n , collapse=" + ")
if( !cov & !ran ){
## No extra terms
tform <- paste( yn, "~", x1f )
} else if( cov & !ran ){
## Yes covariates, no random effects
x2f <- paste( x2n , collapse=" + ")
tform <- paste( yn, "~", x1f, "+", x2f )
} else if( !cov & ran ){
## No covariates, yes random effects
uf <- paste( "(1|", uun , ")", collapse=" + " )
tform <- paste( yn, "~", x1f, "+", uf )
} else if( cov & ran ){
## Covariates and random effects
x2f <- paste( x2n , collapse=" + ")
uf <- paste( "(1|", uun , ")", collapse=" + " )
tform <- paste( yn, "~", x1f, "+", x2f, "+", uf )
}
frml <- formula( tform )
## Input control arguments
## Select the test statistic
if( input$tsfunc=="Williams" ){
tsf <- w.stat
} else{
tsf <- lrt.stat
}
## Construct the constraints
constraints <- list()
if( input$order=="Unspecified" ){
constraints$order <- c("simple", "umbrella")
} else{
constraints$order <- paste0(tolower(input$order))
constraints$order <- gsub( "tree", "simple.tree", constraints$order )
}
if( input$decreasing=="Increasing" ){
constraints$decreasing <- FALSE
} else if( input$decreasing=="Decreasing" ){
constraints$decreasing <- TRUE
} else{
constraints$decreasing <- c(TRUE,FALSE)
}
if( input$node=="None" ){
constraints$node <- 1:length(levels(data1[,idx_x1]))
} else{
constraints$node <- parse_idx( input$node )
}
## Create the code as text string to run model
gen_code <- paste0( "clme_out <- summary( clme( ", tform, ", data=dFrame" )
gfix <- NULL
if( input$varssq ){
idx_grp <- parse_idx(input$gfix)
gfix <- data1[,idx_grp]
gen_code <- paste0( gen_code, ", gfix = dFrame[,", idx_grp,"]" )
}
gen_code <- paste0( gen_code, ", constraints=constraints" )
if( input$tsfunc=="Williams" ){
gen_code <- paste0( gen_code, ", tsf=w.stat" )
} else{
gen_code <- paste0( gen_code, ", tsf=lrt.stat" )
}
emiter <- 500
mqiter <- 500
emeps <- 0.00001
mqeps <- 0.00001
seedvl <- NULL
if( input$technical==TRUE ){
emiter <- input$emiter
mqiter <- input$mqiter
emeps <- input$emeps
mqeps <- input$mqeps
seedvl <- input$ranseed
}
gen_code <- paste0( gen_code, ", mySolver= ", mySolver , ", ",
"em.eps=" , emeps, ", ",
"em.iter=", emiter, ", ",
"mq.eps=" , mqeps, ", ",
"mq.iter=", mqiter, "), ",
"alpha=" , alpha, ", ",
"seed=" , seedvl, ", ",
"nsim=" , nsim
)
gen_code <- paste0( gen_code, ")" )
## Build the code for the constraints
if( length(constraints$order)>1 ){
con_text <- paste0( "constraints <- list(order=c('", paste0(constraints$order, collapse="','"), "'), decreasing=" )
} else{
con_text <- paste0( "constraints <- list(order='", constraints$order, "', decreasing=" )
}
if( length(constraints$decreasing)>1 ){
con_text <- paste0( con_text, "c(TRUE,FALSE), node=" )
} else{
con_text <- paste0( con_text, constraints$decreasing, ", node=" )
}
if( length(constraints$node)>1 ){
con_text <- paste0( con_text, "c(", paste0(constraints$node, collapse=","), ") )" )
} else{
con_text <- paste0( con_text, constraints$node, ")" )
}
## Put all of the code together
full_code <- list( file_text = file_text, con_text = con_text, gen_code = gen_code )
## Run the model
data2 <- as.data.frame( data1 )
withProgress(message = 'Status:', value = 1, {
setProgress(1/2, detail = paste("Computing"))
clme.results <- summary( clme(
formula=frml, data=data2, gfix=gfix, constraints=constraints,
verbose = c(FALSE, FALSE, FALSE), tsf=tsf, tsf.ind = w.stat.ind,
mySolver=mySolver, em.eps=emeps, em.iter=emiter, mq.eps=mqeps,
mq.iter=mqiter ), alpha=alpha, seed=seedvl, nsim=nsim )
#if( ncon==1 & input$xlevel1 ){
# clme.results <- summary( clme(
# formula=frml, data=data2, gfix=gfix, constraints=constraints,
# verbose = c(FALSE, FALSE, FALSE), tsf=tsf, tsf.ind = w.stat.ind,
# mySolver=mySolver, ncon=ncon, levels=list(idx_x1, xlevels),
# em.eps=emeps, em.iter=emiter, mq.eps=mqeps, mq.iter=mqiter
# ), alpha=alpha, seed=seedvl, nsim=nsim )
#} else{
# clme.results <- summary( clme(
# formula=frml, data=data2, gfix=gfix, constraints=constraints,
# verbose = c(FALSE, FALSE, FALSE), tsf=tsf, tsf.ind = w.stat.ind,
# mySolver=mySolver, ncon=ncon,
# em.eps=emeps, em.iter=emiter, mq.eps=mqeps, mq.iter=mqiter
# ), alpha=alpha, seed=seedvl, nsim=nsim )
# }
})
clme.results$full_code <- full_code
clme.results
})
}
})
##
## Boxplot of the data
##
output$fig0 <- renderPlot({
clme_out <- clme_out()
if( length(clme_out)>1 ){
dframe <- clme_out$dframe
if( is.factor( dframe[,2] ) ){
if( length(levels(dframe[,2]))==clme_out$P1 ){
boxplot( dframe[,1] ~ dframe[,2],
xlab=colnames(dframe)[2], ylab=colnames(dframe)[1] )
}
#if( (ncol(dframe)-1) >= clme_out$P1 ){
# xx1 <- apply( dframe[,2:(clme_out$P1+1)], 2, FUN=function(x){ (max(x)==1)*(min(x)==0) } )
#
#}
} else{
xx <- yy <- c(0,1)
plot( xx, yy, xlab="", ylab="", xaxt='n', yaxt='n', main="", col=0 )
legend( "top", inset=0.45, box.lwd=0,
legend="Cannot detect appropriate plot type.\n Try making constrained variable a factor.")
}
} else{
plot( 1:5 , 1:5 , col="white", xaxt='n', yaxt='n', xlab="", ylab="", frame.plot=FALSE )
}
})
output$sum_table <- renderTable({
clme_out <- clme_out()
if( length(clme_out)>1 ){
dframe <- clme_out$dframe
funq1 <- function(x) quantile(x, 0.25 )
funq3 <- function(x) quantile(x, 0.75 )
xbar <- aggregate( dframe[,1] , by=list(dframe[,2]), FUN="mean")
ndat <- aggregate( dframe[,1] , by=list(dframe[,2]), FUN="length")
stdv <- aggregate( dframe[,1] , by=list(dframe[,2]), FUN="sd")
minx <- aggregate( dframe[,1] , by=list(dframe[,2]), FUN="min")
maxx <- aggregate( dframe[,1] , by=list(dframe[,2]), FUN="max")
medn <- aggregate( dframe[,1] , by=list(dframe[,2]), FUN="median")
qrt1 <- aggregate( dframe[,1] , by=list(dframe[,2]), FUN=funq1)
qrt3 <- aggregate( dframe[,1] , by=list(dframe[,2]), FUN=funq3)
tbl1 <- cbind( ndat, xbar[,2], stdv[,2], minx[,2], qrt1[,2], medn[,2], qrt3[,2], maxx[,2] )
colnames(tbl1) <- c("Groups", "N", "Mean", "Std", "Min", "Q1", "Med", "Q3", "Max")
format(tbl1, digits=3)
} else{
tbl1 <- matrix( "No results yet" , ncol=1, nrow=1)
format(tbl1, digits=3)
}
})
##
## Summarize the model
##
output$fig1 <- renderPlot({
clme_out <- clme_out()
if( length(clme_out)>1 ){
compute2 <- input$compute2
if( compute2 > 0 ){
isolate({
ciwd <- FALSE
alpha <- 0.05
if( input$outcheck==TRUE ){
ciwd <- input$plotci
alpha <- input$alpha
}
plot( clme_out , ci=ciwd , alpha=alpha)
})
}
} else{
plot( 1:5 , 1:5 , col="white", xaxt='n', yaxt='n', xlab="", ylab="", frame.plot=FALSE )
}
})
output$summary <- renderPrint({
clme_out <- clme_out()
if( length(clme_out)>1 ){
compute3 <- input$compute3
if( compute3 > 0 ){
isolate({
clme_out
})
}
} else{
print( "Model has not yet been run." )
}
})
output$fullCode <- renderPrint({
clme_out <- clme_out()
full_code <- clme_out$full_code
if( length(clme_out)>1 ){
compute3 <- input$compute3
if( compute3 > 0 ){
isolate({
cat( paste0(full_code$file_text), "\n# NOTE: may need to add path to file\n\n",
paste0(full_code$con_text), "\n\n", paste0(full_code$gen_code) )
})
}
} else{
print( "Model has not yet been run." )
}
})
output$datatbl <- renderDataTable({
clme_out <- clme_out()
if( length(clme_out) > 1 ){
clme_out$dframe
}
})
## To check the output of various things when I'm updating
#output$etc <- renderPrint({
# print( clme_out() )
#})
}
|
/scratch/gouwar.j/cran-all/cranData/CLME/R/shiny_clme.r
|
#' Produce summary values for objects of class \code{clme}
#'
#' @description Summarizes the output of objects of class \code{clme}, such as those produced by \code{\link{clme}}.
#'
#' @param object an object of class \code{clme}.
#' @param nsim the number of bootstrap samples to use for inference.
#' @param seed the value for the seed of the random number generator.
#' @param verbose vector of logicals. First element will print progress for bootstrap test,
#' second element is passed to the EM algorithm for every bootstrap sample.
#' @param ... additional arguments passed to other functions.
#'
#'
#' @return
#' The output of \code{summary.clme} is an object of the class \code{summary.clme}. This is a list
#' containing the input object (of class \code{clme}), along with elements:
#' \item{\code{p.value}}{ p-value for the global hypothesis}
#' \item{\code{p.value.ind}}{ p-values for each of the constraints}
#'
#'
#'
#' @seealso
#' \code{\link{CLME-package}}
#' \code{\link{clme}}
#'
#' @examples
#' \dontrun{
#' set.seed( 42 )
#' data( rat.blood )
#' cons <- list(order = "simple", decreasing = FALSE, node = 1 )
#' clme.out <- clme(mcv ~ time + temp + sex + (1|id), data = rat.blood ,
#' constraints = cons, seed = 42, nsim = 10)
#'
#' summary( clme.out )
#' }
#'
#'
#' @method summary clme
#' @export
#'
summary.clme <- function( object, nsim=1000, seed=NULL, verbose=c(FALSE,FALSE), ... ){
if( !is.clme(object) ){ stop("'object' is not of class clme")}
## Extract some values from the fitted object
cust_const <- object$cust_const
all_pair <- object$all_pair
if( cust_const==TRUE ){
loop.const <- object$constraints
MNK <- 1
} else{
search.grid <- object$search.grid
MNK <- dim( search.grid )[1]
}
mmat <- model_terms_clme( formula(object), data=object$dframe )
P1 <- mmat$P1
X1 <- mmat$X1
X2 <- mmat$X2
U <- mmat$U
Qs <- object$gran
tsf <- object$tsf
tsf.ind <- object$tsf.ind
mq.phi <- object$mq.phi
est_const <- object$constraints
est_order <- object$order$est_order
if( length(verbose)<2 ){
verbose <- c(verbose, rep(FALSE, 2-length(verbose) ) )
}
# Pick up some values from the input object if they are there
if( !is.null(object$nsim) ){
nsim2 <- eval( object$nsim )
} else{
nsim2 <- nsim
object$nsim <- nsim
}
if( !is.null(object$seed) ){
seed2 <- eval( object$seed )
} else{
seed2 <- seed
object$seed <- seed
}
mr <- clme_resids( formula=formula(object), data=object$dframe,
gfix=object$gfix_group )
## This is the loop for the bootstrap simulations
# Eventually this should be (optionally) parallelized
if( nsim2 > 0 ){
## Obtain bootstrap samples
Y_boot <- resid_boot( formula=formula(object), data=object$dframe, gfix=object$gfix_group,
eps=mr$PA, xi=mr$xi, ssq=mr$ssq, tsq=mr$tsq,
cov.theta=mr$cov.theta, nsim=nsim2,
theta=object$theta.null, mySolver=object$mySolver,
seed=seed2, null.resids=FALSE )
## EM for the bootstrap samples
TS.boot <- matrix( 0, nrow=nsim2, ncol=length(object$ts.glb) )
TS.boot.ind <- matrix( 0, nrow=nsim2, ncol=nrow(est_const$A) )
p.value <- rep( 0 , length(object$ts.glb) )
pval.ind <- rep( 0 , nrow(est_const$A) )
mprint <- round( seq( 1 , round(nsim2*0.9), length.out=10 ) )
for( m in 1:nsim2 ){
if( verbose[1]==TRUE & (m %in% mprint) ){
print( paste( "Bootstrap Iteration " , m , " of " , nsim2 , sep=""))
}
## Loop through the search grid
ts.boot <- -Inf
for( mnk in 1:MNK ){
if( cust_const==FALSE ){
grid.row <- list( order=search.grid[mnk,1], node=search.grid[mnk,3],
decreasing=search.grid[mnk,2] )
loop.const <- create.constraints( P1=P1, constraints=grid.row )
}
clme.temp <- clme_em( Y=Y_boot[,m], X1=X1, X2=X2, U=U, Nks=object$gfix,
Qs=Qs, constraints=loop.const, mq.phi=mq.phi,
tsf=tsf, tsf.ind=tsf.ind, mySolver=object$mySolver,
verbose=verbose[2], all_pair=all_pair, dvar=object$ssq, ... )
idx <- which(clme.temp$ts.glb > ts.boot)
if( length(idx)>0 ){
ts.boot[idx] <- clme.temp$ts.glb[idx]
}
update.ind <- (MNK==1) + (mnk == est_order)
if( update.ind>0 ){
ts.ind.boot <- clme.temp$ts.ind
}
}
# Compute p-values
if( all_pair==TRUE ){
p.value <- p.value + 1*( ts.boot >= abs(object$ts.glb) ) + 1*( ts.boot <= -abs(object$ts.glb) )
pval.ind <- pval.ind + 1*(ts.ind.boot >= abs(object$ts.ind) ) + 1*(ts.ind.boot <= -abs(object$ts.ind) )
} else{
# The default / original: one-sided inference
p.value <- p.value + 1*( ts.boot >= object$ts.glb )
pval.ind <- pval.ind + 1*(ts.ind.boot >= object$ts.ind )
}
# Collect test stats
TS.boot[m,] <- ts.boot
TS.boot.ind[m,] <- ts.ind.boot
}
object$p.value <- p.value/nsim2
object$p.value.ind <- pval.ind/nsim2
## End of the SEQUENTIAL BOOTSTRAP LOOP
} else{
object$p.value <- NA
object$p.value.ind <- rep( NA, nrow(est_const$A) )
}
## Collect the results and return the object
class(object) <- "summary.clme"
return(object)
}
#' S3 method to print a summary for objects of class \code{clme}
#'
#' @description Summarizes the output of objects of class \code{clme}, such as those produced by \code{\link{clme}}. Prints a tabulated display of global and individual tests, as well as parameter estimates.
#'
#' @param x an object of class \code{clme}.
#' @param alpha level of significance.
#' @param digits number of decimal digits to print.
#' @param ... additional arguments passed to other functions.
#'
#' @note
#' The individual tests are performed on the specified order. If no specific order was specified, then the individual tests are performed on the estimated order.
#'
#' @return
#' \code{NULL}, just prints results to the console.
#'
#' @seealso
#' \code{\link{CLME-package}}
#' \code{\link{clme}}
#'
#' @examples
#' \dontrun{
#' set.seed( 42 )
#' data( rat.blood )
#' cons <- list(order = "simple", decreasing = FALSE, node = 1 )
#' clme.out <- clme(mcv ~ time + temp + sex + (1|id), data = rat.blood ,
#' constraints = cons, seed = 42, nsim = 10)
#'
#' summary( clme.out )
#' }
#'
#' @importFrom stringr str_pad
#' @importFrom stringr str_trim
#' @importFrom prettyR decimal.align
#'
#' @method print summary.clme
#' @export
#'
print.summary.clme <- function( x, alpha=0.05, digits=4, ...){
object <- x
all_pair <- object$all_pair
if( class(object)=="summary.clme" ){
class(object) <- "clme"
}
## Title and formula
cat( "Linear mixed model subject to order restrictions\n" )
cat( "Formula: ")
print( object$formula )
if( object$order$order=="unconstrained" ){
cat( paste0("\nNo order restrictions\n (pairwise analysis with two-tailed alternatives)") )
} else{
## THIS CAN PROBABLY BE REARRANGED TO BE MORE STRAIGHTFORWARD
## Order statement
if( object$order$order=="simple" ){
order <- "simple order"
}
if( object$order$order=="umbrella" ){
order <- paste0("umbrella order with node at ", object$order$node )
}
if( object$order$order=="simple.tree" ){
order <- paste0("tree order with node at ", object$order$node )
}
if( object$order$order == "custom" ){
cat( "\nCustom order constraints were provided" )
} else{
if( object$order$estimated ){
## Estimated the order
cat( paste0("\nOrder estimated: " , object$order$inc.dec , " ", order ) )
} else{
cat( paste0("\nOrder specified: " , object$order$inc.dec , " ", order ) )
}
}
}
## Diagnostic criterion
crit <- c(logLik(object),
AIC(object),
BIC(object) )
critc <- format( crit , digits=4)
cat( "\n\nlog-likelihood:", critc[1] )
cat( "\nAIC: " , critc[2] )
cat( "\nBIC: " , critc[3] )
cat( "\n(log-likelihood, AIC, BIC computed under normality)")
## Tests
est <- fixef(object)
tnames <- names(est)
Amat <- object$constraints$A
Bmat <- object$constraints$B
## Global tests
if( is.null(names(object$ts.glb)) ){
glbn <- "Unknown"
} else{
glbn <- names( object$ts.glb )
}
#if( object$order$order != "unconstrained" ){
if( length(object$ts.glb)>1 ){
glbs <- object$ts.glb
grow <- matrix( "NA" , nrow=length(glbs), ncol=3 )
for( ii in 1:length(glbs) ){
grow[ii,] <- c( glbn[ii], round(object$ts.glb[ii],3) , sprintf("%.4f", object$p.value[ii]) )
}
#colnames( grow ) <- c("Contrast", "Estimate", "Stat", "p-value")
#grow <- .align_table.clme( grow )
for( ii in 2:3){
val1 <- str_trim( decimal.align( grow[,ii]), side="right" )
grow[,ii] <- str_pad(val1, width=max(nchar(val1)), side = "right", pad = "0")
}
colnames( grow ) <- c("Contrast", "Statistic", "p-value")
grow1 <- c(colnames(grow)[1], grow[,1])
grow1 <- str_pad( grow1, width=max(nchar(grow1)), side = "right", pad = " ")
grow2 <- .align_table.clme( grow[,2:3,drop=FALSE] )
grow <- cbind( grow1[2:length(grow1)] , grow2)
colnames(grow)[1] <- grow1[1]
cat( "\n\nGlobal tests: ")
cat( "\n", paste(colnames(grow) , collapse=" ") )
for( ii in 1:length(glbs) ){
cat( "\n", paste(grow[ii,] , collapse=" ") )
}
} else{
grow <- cbind( glbn, round(object$ts.glb,3) , sprintf("%.4f", object$p.value) )
colnames( grow ) <- c("Contrast", "Statistic", "p-value")
grow1 <- c(colnames(grow)[1], grow[,1])
grow1 <- str_pad( grow1, width=max(nchar(grow1)), side = "right", pad = " ")
grow2 <- .align_table.clme( grow[,2:3,drop=FALSE] )
grow <- cbind( grow1[2:length(grow1)] , grow2)
colnames(grow)[1] <- grow1[1]
cat( "\n\nGlobal test: ")
cat( "\n", paste0(colnames(grow) , collapse=" ") )
cat( "\n", paste0(grow , collapse=" ") )
}
#}
## Individual tests
glbs <- object$ts.ind
grow <- matrix( "NA" , nrow=length(glbs), ncol=4 )
for( ii in 1:length(glbs) ){
glbn <- paste( tnames[Amat[ii,2]] , "-", tnames[Amat[ii,1]] )
glbe <- round( est[Amat[ii,2]] - est[Amat[ii,1]], digits=3 )
grow[ii,] <- c( glbn, glbe, round(object$ts.ind[ii],3) , sprintf("%.4f", object$p.value.ind[ii]) )
}
for( ii in 2:4){
if( !any(grow[,ii]==rep("NA",nrow(grow))) ){
val1 <- str_trim( decimal.align( grow[,ii]), side="right" )
grow[,ii] <- str_pad(val1, width=max(nchar(val1)), side = "right", pad = "0")
}
}
colnames( grow ) <- c("Contrast", "Estimate", "Statistic", "p-value")
grow1 <- c(colnames(grow)[1], grow[,1])
grow1 <- str_pad( grow1, width=max(nchar(grow1)), side = "right", pad = " ")
grow2 <- .align_table.clme( grow[,2:4,drop=FALSE] )
grow <- cbind( grow1[2:length(grow1)] , grow2)
colnames(grow)[1] <- grow1[1]
cat( "\n\nIndividual Tests (Williams' type tests): ")
cat( "\n", paste(colnames(grow) , collapse=" ") )
for( ii in 1:length(glbs) ){
cat( "\n", paste(grow[ii,] , collapse=" ") )
}
## Random effects
cat( "\n\nVariance components: \n")
print( VarCorr.clme(object) )
## Fixed effects
vars <- diag( vcov.clme(object) )
CIs <- confint.clme( object , level=(1-alpha), ...)
tvals <- cbind(tnames = str_pad(tnames, width=max(nchar(tnames)), side = "right", pad = " "),
cest = format(est , digits=4),
cvars = format(sqrt(vars), digits=4),
clcl = format(CIs[,1] , digits=4),
cucl = format(CIs[,2] , digits=4))
cipct <- round(100*(1-alpha),2)
colnames(tvals) <- c(" ", "Estimate", "Std. Err",
paste0(cipct, "% lower"), paste0(cipct, "% upper"))
tvals <- .align_table.clme( tvals )
cat( "\nFixed effect coefficients (theta): \n")
cat( paste0(colnames(tvals), collapse=" ") )
for( ii in 1:length(est) ){
cat( "\n", paste0( c(tvals[ii,]), collapse=" ") )
}
if( all_pair==FALSE ){
cat( "\nStd. Errors and confidence limits based on unconstrained covariance matrix")
cat( "\n\nParameters are ordered according to the following factor levels:\n" )
cat( paste( names(fixef(object))[1:object$P1], collapse=", ") )
}
cat( "\n\nModel based on", paste0(object$nsim), "bootstrap samples" )
}
|
/scratch/gouwar.j/cran-all/cranData/CLME/R/summary_clme.r
|
#' Williams' Type Test Statistic.
#'
#' @description
#' Calculates a Williams' type test statistic for a constrained linear mixed effects model.
#'
#' @rdname w.stat
#'
#'
#' @param theta estimated coefficients.
#' @param cov.theta covariance matrix of the (unconstrained) coefficients.
#' @param B matrix to obtain the global contrast.
#' @param A matrix of linear constraints.
#' @param ... additional arguments, to enable custom test statistic functions.
#'
#' @details
#' See \code{\link{create.constraints}} for an example of \code{A}. Argument \code{B} is similar, but defines the global contrast for a Williams' type test statistic. This is the largest hypothesized difference in the constrained coefficients. So for an increasing simple order, the test statistic is the difference between the two extreme coefficients, \eqn{\theta_1}{theta_1} and \eqn{\theta_{p_1}}{theta_p1}, divided by the standard error (unconstrained). For an umbrella order order, two contrasts are considered, \eqn{\theta_1}{theta_1} to \eqn{\theta_{s}}{theta_s}, and \eqn{\theta_{p_1}}{theta_p1} to \eqn{\theta_{s}}{theta_s}, each divided by the appropriate unconstrained standard error. A general way to express this statistic is:
#'
#' \deqn{W = max \theta_{B[i,2]} - \theta_{B[i,1]} / sqrt( VAR( \theta_{B[i,2]} - \theta_{B[i,1]} ) )}{W = max theta_{B[i,2]} - theta_{B[i,1]} / sqrt( VAR( theta_{B[i,2]} - theta_{B[i,1]} ) )}
#'
#' where the numerator is the difference in the constrained estimates, and the standard error in the denominator is based on the covariance matrix of the unconstrained estimates.
#'
#' The function \code{w.stat.ind} does the same, but uses the \code{A} matrix which defines all of the individual constraints, and returns a test statistic for each constraints instead of taking the maximum.
#'
#' @return
#' Output is a numeric value.
#'
#' @note
#' See \code{\link{lrt.stat}} for information on creating custom test statistics.
#'
#' @examples
#' theta <- exp(1:4/4)
#' th.cov <- diag(4)
#' X1 <- matrix( 0 , nrow=1 , ncol=4 )
#' const <- create.constraints( P1=4 , constraints=list(order='simple' ,
#' decreasing=FALSE) )
#'
#' w.stat( theta , th.cov , const$B , const$A )
#'
#' w.stat.ind( theta , th.cov , const$B , const$A )
#'
#' @export
#'
##
## Williams' type statistic (global)
##
w.stat <- function( theta , cov.theta , B , A , ... ){
stats <- vector( "numeric",length=nrow(B) )
ctd <- diag( cov.theta )
stats <- apply( B , 1 ,
FUN=function(b,theta,cov,ctd){
std <- sqrt( ctd[b[1]] + ctd[b[2]] - 2*cov.theta[b[1],b[2]] )
(theta[b[2]]-theta[b[1]])/std
}, theta=theta, cov=cov.theta, ctd=ctd)
test.stat <- max( stats )
tnames <- names(theta)
ii <- min( which( stats == max(stats) ) )
if( !is.null(tnames) ){
names(test.stat) <- paste0( tnames[B[ii,2]] , " - ", tnames[B[ii,1]] )
} else{
names(test.stat) <- paste0( "Theta ", B[ii,2] , " - Theta ", B[ii,1] )
}
return( test.stat )
}
#' Williams' type statistic (individual)
#'
#' @rdname w.stat
#' @export
#'
w.stat.ind <- function( theta , cov.theta , B , A , ... ){
stats <- vector( "numeric",length=nrow(A) )
ctd <- diag( cov.theta )
stats <- apply( A , 1 ,
FUN=function(a,theta,cov,ctd){
std <- sqrt( ctd[a[1]] + ctd[a[2]] - 2*cov.theta[a[1],a[2]] )
(theta[a[2]]-theta[a[1]])/std
}, theta=theta, cov=cov.theta, ctd=ctd)
return( stats )
}
#' Likelihood ratio type statistic (global)
#'
#' @description
#' Calculates the likeihood ratio type test statistic (under Normality assumption) for a
#' constrained linear mixed effects model. This is the default test statistic for \pkg{CLME}.
#'
#'
#' @param theta estimated coefficients.
#' @param theta.null coefficients estimated under the null hypothesis.
#' @param cov.theta covariance matrix of the (unconstrained) coefficients.
#' @param ... additional arguments, to enable custom test statistic functions.
#'
#' @return
#' Output is a numeric value.
#'
#' @note
#' This is an internal function, unlikely to be useful outside of \link{CLME-package}. To define custom functions, the arguments available are:
#'
#' \code{theta}, \code{theta.null}, \code{cov.theta}, \code{B}, \code{A}, \code{Y}, \code{X1}, \code{X2}, \code{U}, \code{tsq}, \code{ssq}, \code{Nks}, and \code{Qs}.
#'
#' Of the additional arguments, \code{B} and \code{A} are identical to those produced by \code{\link{create.constraints}}. The rest, \code{Y}, \code{X1}, \code{X2}, \code{U}, \code{tsq}, , \code{ssq}, \code{Nks}, and \code{Qs}, are equivalent to arguments to \code{\link{clme_em}}.
#'
#' Custom functions must produce numeric output. Output may have length greater than 1, which corresponds to testing multiple global hypotheses.
#'
#' @seealso
#' \code{\link{clme_em}},
#' \code{\link{w.stat}}
#'
#'
#' @examples
#' data( rat.blood )
#' cons <- list(order = "simple", decreasing = FALSE, node = 1 )
#'
#' clme.out <- clme(mcv ~ time + temp + sex + (1|id), data = rat.blood ,
#' constraints = cons, seed = 42, nsim = 0)
#'
#' # Individually compute lrt statistic
#' lrt.stat(clme.out$theta, clme.out$theta.null, clme.out$cov.theta )
#'
#' @export
#'
#'
lrt.stat <- function( theta, theta.null, cov.theta, ... ){
theta.diff <- theta - theta.null
test.stat <- 2*c( t(theta.diff) %*% cov.theta %*% theta.diff )
names(test.stat) <- "Bootstrap LRT"
# Return test statistic
return(test.stat)
}
|
/scratch/gouwar.j/cran-all/cranData/CLME/R/test.stat.r
|
##
## Various support functions to assist the main functions
##
#' Create model matrices for \code{clme}
#'
#' @description
#' Parses formulas to creates model matrices for \code{clme}.
#'
#' @param formula a formula defining a linear fixed or mixed effects model. The constrained effect(s) must come before any unconstrained covariates on the right-hand side of the expression. The first \code{ncon} terms will be assumed to be constrained.
#' @param data data frame containing the variables in the model.
#'
#'
#' @note
#' The first term on the right-hand side of the formula should be the fixed effect
#' with constrained coefficients. Random effects are represented with a vertical bar,
#' so for example the random effect \code{U} would be included by
#' \code{Y ~ X1 + (1|U)}.
#'
#' The intercept is removed automatically. This is done to ensure that parameter
#' estimates are of the means of interest, rather than being expressed as a mean
#' with offsets.
#'
#' @return
#' A list with the elements:
#' \tabular{rl}{
#' Y \tab response variable \cr
#' X1 \tab design matrix for constrained effect \cr
#' X2 \tab design matrix for covariates \cr
#' P1 \tab number of constrained coefficients \cr
#' U \tab matrix of random effects \cr
#' formula \tab the final formula call (automatically removes intercept) \cr
#' dframe \tab the dataframe containing the variables in the model \cr
#' REidx \tab an element to define random effect variance components \cr
#' REnames \tab an element to define random effect variance components \cr
#' }
#'
#' @seealso
#' \code{\link{CLME-package}}
#' \code{\link{clme}}
#'
#' @examples
#' data( rat.blood )
#' model_terms_clme( mcv ~ time + temp + sex + (1|id) , data = rat.blood )
#'
#' @importFrom lme4 lFormula
#' @export
#'
model_terms_clme <- function( formula, data ){
cc <- match.call()
mf <- match.call( expand.dots = FALSE )
cc$formula <- mf$formula <- update.formula( formula , . ~ . - 1 )
m <- match( c("formula", "data"), names(mf), 0L )
mf <- mf[ c(1L, m) ]
# mf$drop.unused.levels <- TRUE
## Determine if random effects are present
any_RE <- length( lme4::findbars(formula) )
if( any_RE==0 ){
## ----- NO RANDOM EFFECTS -----
mf[[1L]] <- quote(stats::model.frame)
mf <- eval( mf, parent.frame() )
mt <- attr(mf, "terms")
if( !is.ordered( mf[,2] ) ){
## If this is stable, can remove the double xlev call and remove the else portion (same as below)
warning( "Constrained effect is not an ordered factor, attempting to force ordering\n Assumed order: ", paste0( levels(mf[,2]), collapse=" < "), "\n See help(ordered) for more information")
mf[,2] <- factor( mf[,2], ordered=TRUE )
xlev <- levels( mf[,2] )
} else{
xlev <- levels( mf[,2] )
}
Y <- model.response(mf, "numeric")
X <- model.matrix(mt, mf )
P1 <- length( unique(mf[,2]) )
X1 <- X[, 1:P1 , drop=FALSE]
if( ncol(X) > P1 ){
X2 <- X[,(P1+1):ncol(X), drop=FALSE]
} else{
X2 <- NULL
}
U <- NULL
dframe <- mf
REnames <- NULL
REidx <- NULL
} else{
## ----- YES RANDOM EFFECTS -----
mf[[1L]] <- quote( lme4::lFormula )
suppressMessages(
# clme_terms <- lme4::lFormula( formula, data=data )
clme_terms <- eval( mf, parent.frame() )
)
if( !is.ordered( clme_terms$fr[,2] ) ){
# stop( "Constrained effect is not an ordered factor")
warning( "Constrained effect is not an ordered factor, attempting to force ordering\n Assumed order: ", paste0( levels(clme_terms$fr[,2]), collapse=" < "), "\n See help(ordered) for more information")
clme_terms$fr[,2] <- factor( clme_terms$fr[,2], ordered=TRUE )
xlev <- levels( clme_terms$fr[,2] )
} else{
xlev <- levels( clme_terms$fr[,2] )
}
Y <- clme_terms$fr[,1]
X <- clme_terms$X
P1 <- length( unique(clme_terms$fr[,2]) )
X1 <- X[, 1:P1 , drop=FALSE]
if( ncol(X) > P1 ){
X2 <- X[,(P1+1):ncol(X), drop=FALSE]
} else{
X2 <- NULL
}
U <- t( as.matrix(clme_terms$reTrms$Zt) )
dframe <- clme_terms$fr
REnames <- names(clme_terms$reTrms$flist)
REidx <- clme_terms$reTrms$Lind
}
##############################
return_obj <- list( Y=Y , X1=X1 , X2=X2 , P1=P1 , U=U , formula=cc$formula,
dframe=dframe , REidx=REidx , REnames=REnames, xlev=xlev )
return( return_obj )
}
##
## Some methods for class CLME
##
#' Constructor method for objects S3 class clme
#'
#' @rdname as.clme
#' @export
#'
is.clme <- function(x) inherits(x, "clme")
#' Constructor method for objects S3 class clme
#'
#' @description
#' Test if an object is of class \code{clme} or coerce an object to be such.
#'
#' @rdname as.clme
#'
#' @param x list with the elements corresponding to the output of \code{\link{clme}}.
#' @param ... space for additional arguments.
#'
#' @return
#' Returns an object of the class \code{clme}.
#'
#'
#' @seealso
#' \code{\link{CLME-package}}
#' \code{\link{clme}}
#'
#' @examples
#' data( rat.blood )
#'
#' cons <- list(order = "simple", decreasing = FALSE, node = 1 )
#' clme.out <- clme(mcv ~ time + temp + sex + (1|id), data = rat.blood ,
#' constraints = cons, seed = 42, nsim = 0)
#'
#' is.clme( clme.out )
#' as.clme( clme.out )
#' @export
#'
as.clme <- function( x , ... ){
if( is.clme(x) ){
return(x)
} else{
err.flag <- 0
flagTheta <- flagSsq <- flagTsq <- flagCov <- flagW1 <- flagW2 <- flagP1 <- flagP2 <- flagConst <- ""
if( !is.numeric(x$theta) ){
err.flag <- 1
flagTheta <- " theta must be numeric \n"
x$theta <- numeric(0)
}
if( !is.numeric(x$ssq) ){
err.flag <- 1
flagSsq <- " ssq must be numeric \n"
x$ssq <- numeric(0)
}
if( !is.null(x$tsq) & !is.numeric(x$tsq) ){
err.flag <- 1
flagTsq <- " if present, tau must be numeric \n"
x$tsq <- NULL
}
if( !is.matrix(x$cov.theta) || !is.numeric(x$cov.theta) ||
nrow(x$cov.theta) != ncol(x$cov.theta) ||
nrow(x$cov.theta) != length(x$theta) ||
sum(sum(abs(x$cov.theta - t(x$cov.theta)))) > sqrt(.Machine$double.eps) ){
err.flag <- 1
flagCov <- " cov.theta must be square, symmetric, numeric matrix with dimensions equal to length of theta\n"
x$cov.theta <- matrix( numeric(0) , nrow=length(x$theta) , ncol=length(x$theta) )
}
if( !is.numeric(x$ts.glb) ){
err.flag <- 1
flagW1 <- " ts.glb must be numeric \n"
x$ts.glb <- numeric(0)
}
if( !is.numeric(x$ts.ind) ){
err.flag <- 1
flagW2 <- " ts.ind must be numeric \n"
x$ts.ind <- numeric(0)
}
if( !is.numeric(x$p.value) || length(x$p.value) != length(x$ts.glb) ){
err.flag <- 1
flagP1 <- " p.value must be numeric and of same length as ts.glb \n"
x$p.value <- numeric(0)
}
if( !is.numeric(x$p.value.ind) || length(x$p.value.ind) != length(x$ts.ind) ){
err.flag <- 1
flagP2 <- " p.value.ind must be numeric and of same length as ts.ind \n"
x$p.value.ind <- numeric(0)
}
if( !is.list(x$constraints) ){
err.flag <- 1
flagConst <- " constraints must be list \n"
x$constraints <- list( A=matrix( numeric(0) ) )
} else{
cnames <- names(x$constraints)
if( sum(cnames=="A") != 1 ){
err.flag <- 1
flagConst <- " constraints must contain element A\n"
x$constraints$A <- matrix( numeric(0) , nrow=length(x$ts.ind ))
}
}
if( err.flag==1 ){
err.mssg <- paste( "coercing 'x' to class 'clme' produced errors: \n",
flagTheta, flagSsq, flagTsq, flagCov, flagW1,
flagW2, flagP1, flagP2, flagConst, "output may not be valid." , sep = "")
# warning(warn, sys.call(-1))
warning( err.mssg )
}
class(x) <- "clme"
return(x)
}
}
################################################################################
#' Akaike information criterion
#'
#' @description
#' Calculates the Akaike and Bayesian information criterion for objects of class \code{clme}.
#'
#' @param object object of class \code{\link{clme}}.
#' @param ... space for additional arguments.
#' @param k value multiplied by number of coefficients
#'
#' @details
#' The log-likelihood is assumed to be the Normal distribution. The model uses residual bootstrap methodology, and Normality is neither required nor assumed. Therefore the log-likelihood and these information criterion may not be useful measures for comparing models.
#' For \code{k=2}, the function computes the AIC. To obtain BIC, set \eqn{k = log( n/(2*pi) )}; which the method \code{BIC.clme} does.
#'
#'
#' @return
#' Returns the information criterion (numeric).
#'
#'
#' @seealso
#' \code{\link{CLME-package}}
#' \code{\link{clme}}
#'
#' @examples
#'
#' data( rat.blood )
#'
#' cons <- list(order = "simple", decreasing = FALSE, node = 1 )
#' clme.out <- clme(mcv ~ time + temp + sex + (1|id), data = rat.blood ,
#' constraints = cons, seed = 42, nsim = 0)
#'
#' AIC( clme.out )
#' AIC( clme.out, k=log( nobs(clme.out)/(2*pi) ) )
#'
#'
#' @method AIC clme
#' @export
#'
AIC.clme <- function( object, ..., k=2 ){
## For BIC, set k = ln( n/(2*pi) )
logl <- logLik.clme( object, ...)[1]
kk <- ncol(model.matrix.clme(object)) + length(object$tsq) + length(object$ssq)
# aic <- 2*(kk - logl)
aic <- k*kk - 2*logl
return(aic)
}
#' Akaike information criterion
#'
#' @description
#' Calculates the Akaike and Bayesian information criterion for objects of class \code{clme}.
#'
#' @rdname AIC.clme
#'
#' @seealso
#' \code{\link{CLME-package}}
#' \code{\link{clme}}
#'
#' @method AIC summary.clme
#' @export
#'
AIC.summary.clme <- function( object, ..., k=2 ){
class(object) <- "clme"
AIC( object, ..., k=k )
}
#' Bayesian information criterion
#'
#' @description
#' Calculates the Bayesian information criterion for objects of class \code{clme}.
#'
#' @param object object of class \code{\link{clme}}.
#' @param ... space for additional arguments.
#' @param k value multiplied by number of coefficients
#'
#' @details
#' The log-likelihood is assumed to be the Normal distribution. The model uses residual bootstrap methodology, and Normality is neither required nor assumed. Therefore the log-likelihood and these information criterion may not be useful measures for comparing models.
#' For \code{k=2}, the function computes the AIC. To obtain BIC, set \eqn{k = log( n/(2*pi) )}; which the method \code{BIC.clme} does.
#'
#'
#' @return
#' Returns the Bayesian information criterion (numeric).
#'
#'
#' @seealso
#' \code{\link{CLME-package}}
#' \code{\link{clme}}
#'
#' @examples
#'
#' data( rat.blood )
#'
#' cons <- list(order = "simple", decreasing = FALSE, node = 1 )
#' clme.out <- clme(mcv ~ time + temp + sex + (1|id), data = rat.blood ,
#' constraints = cons, seed = 42, nsim = 0)
#'
#' BIC( clme.out )
#' BIC( clme.out, k=log( nobs(clme.out)/(2*pi) ) )
#'
#'
#' @method BIC clme
#' @export
#'
BIC.clme <- function( object, ..., k=log(nobs(object)/(2*pi)) ){
## For BIC, set k = ln( n/(2*pi) )
logl <- logLik( object, ...)[1]
bic <- AIC( object, k=k )
return(bic)
}
#' Bayesian information criterion
#'
#' @description
#' Calculates the Akaike and Bayesian information criterion for objects of class \code{clme}.
#'
#' @rdname BIC.clme
#'
#' @seealso
#' \code{\link{CLME-package}}
#' \code{\link{clme}}
#'
#' @method BIC summary.clme
#' @export
#'
BIC.summary.clme <- function( object, ..., k=log(nobs(object)/(2*pi)) ){
class(object) <- "clme"
BIC( object, ..., k=k )
}
#' Individual confidence intervals
#'
#' @description
#' Calculates confidence intervals for fixed effects parameter estimates in objects of class \code{clme}.
#' @rdname confint
#'
#' @param object object of class \code{\link{clme}}.
#' @param parm parameter for which confidence intervals are computed (not used).
#' @param level nominal confidence level.
#' @param ... space for additional arguments.
#'
#' @details
#' Confidence intervals are computed using Standard Normal critical values.
#' Standard errors are taken from the covariance matrix of the unconstrained parameter estimates.
#'
#'
#' @return
#' Returns a matrix with two columns named lcl and ucl (lower and upper confidence limit).
#'
#'
#' @seealso
#' \code{\link{CLME-package}}
#' \code{\link{clme}}
#'
#' @examples
#'
#' data( rat.blood )
#' cons <- list(order = "simple", decreasing = FALSE, node = 1 )
#' clme.out <- clme(mcv ~ time + temp + sex + (1|id), data = rat.blood ,
#' constraints = cons, seed = 42, nsim = 0)
#'
#' confint( clme.out )
#'
#'
#' @method confint clme
#' @export
#'
confint.clme <- function(object, parm, level=0.95, ...){
## More types of confidence intervals (e.g., bootstrap) may be added in the future.
## If so, default confidence interval will be the current methods
## Confidence intervals based on unconstrained variance-covariance matrix
## Actual covarage >= Nominal coverage
cc <- match.call()
digits <- cc$digits
if( is.null(digits) ){ digits <- 3 }
if( !is.numeric(digits) ){ digits <- 3 }
if( digits < 0 ){ digits <- 3 }
alpha <- 1 - level
theta <- fixef(object)
cv <- qnorm(1-alpha/2)
varco <- vcov( object )
lcl <- as.numeric( format( round(theta - cv*sqrt(diag(varco)) , digits=digits)) )
ucl <- as.numeric( format( round(theta + cv*sqrt(diag(varco)) , digits=digits)) )
ints <- cbind( lcl, ucl)
## Return intervals
return( ints )
}
#' Individual confidence intervals
#'
#' @description
#' Calculates confidence intervals for fixed effects parameter estimates in objects of class \code{clme}.
#' @rdname confint
#'
#' @seealso
#' \code{\link{CLME-package}}
#' \code{\link{clme}}
#'
#' @method confint summary.clme
#' @export
#'
confint.summary.clme <- function(object, parm, level=0.95, ...){
class(object) <- "clme"
confint( object, parm, level, ... )
}
#' Extract fixed effects
#'
#' @importFrom lme4 fixef
#' @method fixef clme
#' @export
#'
fixef.clme <- function( object, ...){ UseMethod("fixef") }
#' Extract fixed effects
#'
#' @rdname fixef.clme
#' @importFrom lme4 fixef
#' @method fixef summary.clme
#' @export
#'
fixef.summary.clme <- function( object, ...){
class(object) <- "clme"
fixef(object, ...)
}
#' Extract fixed effects
#'
#' @description
#' Extracts the fixed effects estimates from objects of class \code{clme}.
#'
#' @rdname fixef.clme
#'
#' @param object object of class \code{\link{clme}}.
#' @param ... space for additional arguments
#'
#' @return
#' Returns a numeric vector.
#'
#' @seealso
#' \code{\link{CLME-package}}
#' \code{\link{clme}}
#'
#' @examples
#'
#' data( rat.blood )
#' cons <- list(order = "simple", decreasing = FALSE, node = 1 )
#' clme.out <- clme(mcv ~ time + temp + sex + (1|id), data = rat.blood ,
#' constraints = cons, seed = 42, nsim = 0)
#'
#' fixef( clme.out )
#'
#' @importFrom lme4 fixef
#' @method fixef clme
#' @export
#'
fixef.clme <- function( object, ... ){
## Print out the fixed effects
if( is.clme(object) ){
return( object$theta )
} else{
stop("'object' is not of class clme")
}
}
#' Extract fixed effects
#'
#' @rdname fixef.clme
#' @importFrom nlme fixed.effects
#' @export
#'
fixed.effects <- function( object, ...){ UseMethod("fixed.effects") }
#' Extract fixed effects
#'
#' @rdname fixef.clme
#' @importFrom nlme fixed.effects
#' @export
#'
fixed.effects.summary.clme <- function( object, ...){
class(object) <- "clme"
fixef(object, ...)
}
#' Extract fixed effects
#'
#' @rdname fixef.clme
#'
#' @importFrom nlme fixed.effects
#' @method fixed.effects clme
#' @export
#'
fixed.effects.clme <- function( object , ... ){
fixef.clme( object, ... )
}
#' Extract fixed effects
#'
#' @rdname fixef.clme
#' @method coefficients clme
#' @export
#'
coefficients.clme <- function( object, ... ){
fixef.clme( object, ... )
}
#' Extract fixed effects
#'
#' @rdname fixef.clme
#' @method coef clme
#' @export
#'
coef.clme <- function( object, ... ){
fixef.clme( object, ... )
}
#' Extract fixed effects
#'
#' @rdname fixef.clme
#' @method coefficients summary.clme
#' @export
#'
coefficients.summary.clme <- function( object, ... ){
class(object) <- "clme"
fixef.clme( object, ... )
}
#' Extract fixed effects
#'
#' @rdname fixef.clme
#' @method coef summary.clme
#' @export
#'
coef.summary.clme <- function( object, ... ){
class(object) <- "clme"
fixef.clme( object, ... )
}
#' Extract formula
#'
#' @description
#' Extracts the formula from objects of class \code{clme}.
#'
#' @param x object of class \code{\link{clme}}.
#' @param ... space for additional arguments
#'
#' @details
#' The package \pkg{CLME} parametrizes the model with no intercept term.
#' If an intercept was included, it will be removed automatically.
#'
#' @return
#' Returns a formula object
#'
#'
#' @seealso
#' \code{\link{CLME-package}}
#' \code{\link{clme}}
#'
#' @examples
#'
#' data( rat.blood )
#' cons <- list(order = "simple", decreasing = FALSE, node = 1 )
#' clme.out <- clme(mcv ~ time + temp + sex + (1|id), data = rat.blood ,
#' constraints = cons, seed = 42, nsim = 0)
#'
#' formula( clme.out )
#'
#' @method formula clme
#' @export
#'
formula.clme <- function(x, ...){
return( x$formula )
}
#' Log-likelihood
#'
#' @description
#' Computes the log-likelihood of the fitted model for objects of class \code{clme}.
#'
#' @param object object of class \code{\link{clme}}.
#' @param ... space for additional arguments
#'
#' @details
#' The log-likelihood is computed using the Normal distribution. The model uses residual bootstrap
#' methodology, and Normality is neither required nor assumed. Therefore the log-likelihood may
#' not be a useful measure in the context of \pkg{CLME}.
#'
#' @return
#' Numeric.
#'
#'
#' @seealso
#' \code{\link{CLME-package}}
#' \code{\link{clme}}
#'
#' @examples
#'
#' data( rat.blood )
#' cons <- list(order = "simple", decreasing = FALSE, node = 1 )
#' clme.out <- clme(mcv ~ time + temp + sex + (1|id), data = rat.blood ,
#' constraints = cons, seed = 42, nsim = 0)
#'
#' logLik( clme.out )
#'
#' @method logLik clme
#' @export
#'
logLik.clme <- function( object, ...){
## Residuals
YY <- object$dframe[,1]
XX <- model.matrix( object )
TT <- fixef( object )
RR <- YY - apply( XX , 1 , FUN=function(xx,tht){ sum(xx*tht) }, tht=TT )
nn <- nobs(object)
## Covariance matrix (piecewise)
ssq <- object$ssq
Nks <- object$gfix
ssqvec <- rep( ssq, Nks )
RSiR <- c( t(RR) %*% (RR/ssqvec) )
detS <- sum( log(ssqvec) )
if( is.null(object$tsq) ){
## Fixed effects only
RPiR <- RSiR
detPhi <- detS
} else{
## Mixed Effects
UU <- model.matrix( object , type="ranef" )
tsq <- object$tsq
Qs <- object$gran
tsqvec <- rep( tsq, Qs )
RSiU <- matrix( apply( UU, 2, FUN=function(uu,rr){ sum(uu*rr) }, rr=(RR/ssqvec) ), nrow=1 )
U1 <- apply( UU, 2, FUN=function(uu,sq){uu/sq}, sq=sqrt(ssqvec) )
tusu <- t(U1) %*% U1
diag(tusu) <- diag(tusu) + 1/tsqvec
tusui <- solve( tusu )
RPiR <- RSiR - c( RSiU%*%(tusui%*%t(RSiU)) )
detPhi <- log(det( tusu )) + sum( log(tsqvec) ) + detS
}
logL <- -0.5*( nn*log(2*pi) + detPhi + RPiR )
return( logL )
}
#' Log-likelihood
#'
#' @rdname logLik.clme
#'
#' @seealso
#' \code{\link{logLik.clme}}
#'
#' @method logLik summary.clme
#' @export
#'
logLik.summary.clme <- function( object, ...){
class(object) <- "clme"
logLik(object, ...)
}
#' Extract the model design matrix.
#'
#' @description
#' Extracts the fixed-effects design matrix from objects of class \code{clme}.
#'
#' @param object an object of class \code{clme}.
#' @param type specify whether to return the fixed-effects or random-effects matrix.
#' @param ... space for additional arguments
#'
#'
#' @return
#' Returns a matrix.
#'
#'
#' @seealso
#' \code{\link{CLME-package}}
#' \code{\link{clme}}
#'
#' @examples
#' \dontrun{
#' data( rat.blood )
#' cons <- list(order = "simple", decreasing = FALSE, node = 1 )
#' clme.out <- clme(mcv ~ time + temp + sex + (1|id), data = rat.blood ,
#' constraints = cons, seed = 42, nsim = 0)
#'
#' model.matrix( clme.out )
#' }
#' @method model.matrix clme
#' @export
#'
model.matrix.clme <- function( object, type="fixef", ...){
mmat <- model_terms_clme( object$formula, object$dframe )
if( type=="fixef" ){
## Return the fixed-effects matrix
X1 <- mmat$X1
X2 <- mmat$X2
return( cbind(X1, X2) )
} else if( type=="ranef" ){
## Return the random-effects matrix
return(mmat$U)
}
}
#' Extract the model design matrix.
#'
#' @rdname model.matrix.clme
#'
#' @seealso
#' \code{\link{model.matrix.clme}}
#'
#' @method model.matrix summary.clme
#' @export
#'
model.matrix.summary.clme <- function( object, ...){
class(object) <- "clme"
model.matrix(object, ...)
}
#' Number of observations
#'
#' @description
#' Obtains the number of observations used to fit an model for objects of class \code{clme}.
#'
#' @param object an object of class \code{clme}.
#' @param ... space for additional arguments
#'
#'
#' @return
#' Numeric.
#'
#'
#' @seealso
#' \code{\link{CLME-package}}
#' \code{\link{clme}}
#'
#' @examples
#'
#' data( rat.blood )
#' cons <- list(order = "simple", decreasing = FALSE, node = 1 )
#' clme.out <- clme(mcv ~ time + temp + sex + (1|id), data = rat.blood ,
#' constraints = cons, seed = 42, nsim = 0)
#'
#' nobs( clme.out )
#'
#' @method nobs clme
#' @export
#'
nobs.clme <- function(object, ...){
nrow( model.matrix.clme(object) )
}
#' Number of observations
#'
#' @rdname nobs.clme
#'
#' @seealso
#' \code{\link{nobs.clme}}
#'
#' @method nobs summary.clme
#' @export
#'
nobs.summary.clme <- function( object, ...){
class(object) <- "clme"
nobs(object, ...)
}
#' Printout of fitted object.
#'
#' @description
#' Prints basic information on a fitted object of class \code{clme}.
#'
#' @param x an object of class \code{clme}.
#' @param ... space for additional arguments
#'
#' @return
#' Text printed to console.
#'
#' @seealso
#' \code{\link{CLME-package}}
#' \code{\link{clme}}
#'
#' @examples
#' \dontrun{
#' data( rat.blood )
#' set.seed( 42 )
#' cons <- list(order = "simple", decreasing = FALSE, node = 1 )
#' clme.out <- clme(mcv ~ time + temp + sex + (1|id), data = rat.blood ,
#' constraints = cons, seed = 42, nsim = 10)
#'
#' print( clme.out )
#' }
#'
#' @method print clme
#' @export
#'
print.clme <- function(x, ...){
#cc <- match.call()
#digits <- cc$digits
object <- x
## Print out residuals of specified type
if( !is.clme(object) ){
stop("'object' is not of class clme")
}
cat( "Linear mixed model subject to order restrictions\n")
cat( "Formula: ")
print( object$formula )
crit <- c(logLik.clme(object),
AIC.clme(object),
AIC( object, k=log(nobs.clme(object)/(2*pi)) ) )
critc <- format( crit , digits=5)
cat( "\nlog-likelihood:", critc[1] )
cat( "\nAIC: ", critc[2] )
cat( "\nBIC: ", critc[3] )
cat( "\n(log-likelihood, AIC, BIC computed under normality)")
cat( "\n\nFixed effect coefficients (theta): \n")
print( fixef.clme(object) )
cat( "\nVariance components: \n")
print( VarCorr.clme(object) )
#cat( "\n\nModel based on", object$nsim, "bootstrap samples." )
}
#' Extract random effects
#'
#' @param object object of class clme.
#' @param ... space for additional arguments
#'
#' @rdname ranef.clme
#' @importFrom nlme ranef
#' @export
#'
ranef.clme <- function( object, ...){ UseMethod("ranef") }
#' Extract random effects
#'
#' @rdname ranef.clme
#'
#' @importFrom nlme ranef
#' @export
#'
ranef.summary.clme <- function( object, ...){
class(object) <- "clme"
ranef(object, ...)
}
#' Extract random effects
#'
#' @description
#' Extracts the random effects estimates from objects of class \code{clme}.
#'
#' @rdname ranef.clme
#'
#' @return
#' Returns a numeric vector.
#'
#'
#' @seealso
#' \code{\link{CLME-package}}
#' \code{\link{clme}}
#'
#' @examples
#'
#' data( rat.blood )
#' cons <- list(order = "simple", decreasing = FALSE, node = 1 )
#' clme.out <- clme(mcv ~ time + temp + sex + (1|id), data = rat.blood ,
#' constraints = cons, seed = 42, nsim = 0)
#'
#' ranef( clme.out )
#'
#'
#' @importFrom nlme ranef
#' @method ranef clme
#' @export
#'
ranef.clme <- function( object, ... ){
## Print out the random effects
if( is.clme(object) ){
return( object$random.effects )
} else{
stop("'object' is not of class clme")
}
}
#' Extract random effects
#'
#' @param object object of class clme.
#' @param ... space for additional arguments
#'
#' @rdname ranef
#' @importFrom nlme random.effects
#' @export
#'
random.effects <- function( object, ... ){ UseMethod("random.effects") }
#' Extract random effects
#'
#' @rdname ranef
#' @importFrom nlme random.effects
#' @export
#'
random.effects.summary.clme <- function( object, ...){
class(object) <- "clme"
ranef(object, ...)
}
#' Extract random effects
#'
#' @rdname ranef.clme
#' @importFrom nlme random.effects
#' @method random.effects clme
#' @export
#'
random.effects.clme <- function( object , ... ){
ranef.clme( object, ... )
}
#' Various types of residuals
#'
#' @description
#' Computes several types of residuals for objects of class \code{clme}.
#'
#' @param object object of class \code{\link{clme}}.
#' @param type type of residual (for mixed-effects models only).
#' @param ... space for additional arguments
#'
#' @details
#' For fixed-effects models \eqn{Y = X\beta + \epsilon}{Y = X*b + e}, residuals are given as \deqn{\hat{e} = Y - X\hat{\beta}}{ ehat = Y - X*betahat}.
#' For mixed-effects models \eqn{Y = X\beta + + U\xi + \epsilon}{Y = X*b + U*xi + e}, three types of residuals are available.
#' \eqn{PA = Y - X\hat{\beta}}{ PA = Y - X*betahat}\\
#' \eqn{SS = U\hat{\xi}}{ SS = U*xihat}\\
#' \eqn{FM = Y - X\hat{\beta} - U\hat{\xi}}{ FM = Y - X*betahat - U*xihat}
#'
#' @return
#' Returns a numeric matrix.
#'
#' @seealso
#' \code{\link{CLME-package}}
#' \code{\link{clme}}
#'
#' @examples
#' \dontrun{
#' data( rat.blood )
#' cons <- list(order = "simple", decreasing = FALSE, node = 1 )
#' clme.out <- clme(mcv ~ time + temp + sex + (1|id), data = rat.blood ,
#' constraints = cons, seed = 42, nsim = 0)
#'
#' residuals( clme.out, type='PA' )
#' }
#' @method residuals clme
#' @export
#'
residuals.clme <- function( object, type="FM", ... ){
## Print out residuals of specified type
if( is.clme(object) ){
ridx <- which( c("PA", "SS", "FM")==type )
if( ncol(object$residuals)<ridx ) ridx <- 1
return( object$residuals[,ridx] )
} else{
stop("'object' is not of class clme")
}
}
#' Various types of residuals
#'
#' @rdname residuals.clme
#'
#' @method residuals summary.clme
#' @export
#'
residuals.summary.clme <- function( object, type="FM", ... ){
class(object) <- "clme"
residuals( object, type, ...)
}
#' Residual variance components
#'
#' @description
#' Extract residual variance components for objects of class \code{clme}.
#'
#' @param object object of class \code{\link{clme}}.
#' @param ... space for additional arguments
#'
#'
#' @return
#' Numeric.
#'
#' @seealso
#' \code{\link{CLME-package}}
#' \code{\link{clme}}
#'
#' @examples
#'
#' data( rat.blood )
#' cons <- list(order = "simple", decreasing = FALSE, node = 1 )
#' clme.out <- clme(mcv ~ time + temp + sex + (1|id), data = rat.blood ,
#' constraints = cons, seed = 42, nsim = 0)
#'
#' sigma( clme.out )
#'
#'
#' @importFrom stats sigma
#' @method sigma clme
#' @export
#'
# WAS IMPORTING sigma FROM lme4
sigma.clme <- function( object, ...){
return( object$ssq )
}
#' Residual variance components
#'
#' @description
#' Extract residual variance components for objects of class \code{clme}.
#'
#' @param object object of class \code{\link{clme}}.
#' @param ... space for additional arguments
#'
#'
#' @return
#' Numeric.
#'
#' @seealso
#' \code{\link{CLME-package}}
#' \code{\link{clme}}
#'
#' @examples
#'
#' data( rat.blood )
#' cons <- list(order = "simple", decreasing = FALSE, node = 1 )
#' clme.out <- clme(mcv ~ time + temp + sex + (1|id), data = rat.blood ,
#' constraints = cons, seed = 42, nsim = 0)
#'
#' sigma( clme.out )
#'
#' @method sigma summary.clme
#' @export
#'
sigma.summary.clme <- function( object, ...){
return( object$ssq )
}
#' Variance components
#'
#' @param x object of class \code{\link{summary.clme}}.
#' @param sigma (unused at present).
#' @param rdig number of digits to round to (unused at present).
#'
#' @rdname VarCorr
#' @export
#'
VarCorr <- function( x, sigma, rdig ){ UseMethod("VarCorr") }
#' Variance components
#'
#' @rdname VarCorr
#' @export
#'
VarCorr.summary.clme <- function( x, sigma, rdig ){
class(x) <- "clme"
VarCorr(x, sigma=1, rdig=4)
}
#' Variance components.
#'
#' @description
#' Extracts variance components for objects of class \code{clme}.
#'
#' @rdname VarCorr
#'
#' @return
#' Numeric.
#'
#' @seealso
#' \code{\link{CLME-package}}
#' \code{\link{clme}}
#'
#' @examples
#'
#' data( rat.blood )
#' cons <- list(order = "simple", decreasing = FALSE, node = 1 )
#' clme.out <- clme(mcv ~ time + temp + sex + (1|id), data = rat.blood ,
#' constraints = cons, seed = 42, nsim = 0)
#'
#' VarCorr( clme.out )
#'
#'
#'
#' @method VarCorr clme
#' @export
#'
VarCorr.clme <- function(x, sigma, rdig ){
# @importFrom lme4 VarCorr
## Print out variances or SDs
## Defines tiny class "varcorr_clme" to handle printing
## using the method: print.varcorr_clme
if( !is.clme(x) ){
stop("'x' is not of class clme")
} else{
varcomps <- matrix( sqrt(c(x$tsq, x$ssq )), ncol=1 )
if( !is.null(x$tsq) & is.null(names(x$tsq)) ){
names(x$tsq) <- paste0( "tau_", 1:length(x$tsq) )
}
rnames <- c( "Source", names(x$tsq), names(x$ssq) )
rownames(varcomps) <- rnames[-1]
colnames(varcomps) <- "Std. Error"
#class(varcomps) <- "varcorr_clme"
return( varcomps )
}
}
## Leave this method out of alphabetical order so that
## is it right next to the VarCorr.clme method
#' Printout for variance components
#'
#' @description
#' Prints variance components of an objects of \code{clme}.
#'
#' @param object object of class \code{\link{clme}}.
#' @param rdig number of digits to round to.
#' @param ... space for additional arguments.
#'
#' @return
#' Text printed to console.
#'
#' @seealso
#' \code{\link{CLME-package}}
#' \code{\link{clme}}
#'
#' @examples
#' \dontrun{
#' data( rat.blood )
#' cons <- list(order = "simple", decreasing = FALSE, node = 1 )
#' clme.out <- clme(mcv ~ time + temp + sex + (1|id), data = rat.blood ,
#' constraints = cons, seed = 42, nsim = 0)
#'
#' print.varcorr_clme( clme.out )
#' }
#' @importFrom stringr str_pad
#'
#'
print.varcorr_clme <- function(object, rdig=5, ...){
rnames <- c( "Source", rownames( object ) )
rnames <- str_pad(rnames, width=max(nchar(rnames)), side = "right", pad = " ")
vars <- format( object , digits=rdig )
cat( rnames[1], "\t" , "Variance" )
for( ii in 1:length(vars) ){
cat( "\n", rnames[ii+1], "\t" , vars[ii] )
}
return(NULL)
# @exportMethod print varcorr_clme
}
#' Variance-covariance matrix
#'
#' @description
#' Extracts variance-covariance matrix for objects of class \code{clme}.
#'
#' @param object object of class \code{\link{clme}}.
#' @param ... space for additional arguments
#'
#'
#' @return
#' Numeric matrix.
#'
#' @seealso
#' \code{\link{CLME-package}}
#' \code{\link{clme}}
#'
#' @examples
#'
#' data( rat.blood )
#' cons <- list(order = "simple", decreasing = FALSE, node = 1 )
#' clme.out <- clme(mcv ~ time + temp + sex + (1|id), data = rat.blood ,
#' constraints = cons, seed = 42, nsim = 0)
#'
#' vcov( clme.out )
#'
#' @method vcov clme
#' @export
#'
vcov.clme <- function(object, ...){
## Print out covariance matrix of theta
if( is.clme(object) ){
return( object$cov.theta )
} else{
stop("'object' is not of class clme")
}
}
#' Variance-covariance matrix
#'
#' @rdname vcov.clme
#'
#' @method vcov summary.clme
#' @export
#'
vcov.summary.clme <- function(object, ...){
class(object) <- "clme"
vcov(object, ...)
}
##
## Hidden functions to format characters / decimals
##
## Align the length of header with length of values
.align_table.clme <- function( tble, digits=4, ... ){
cnames <- colnames( tble )
for( ii in 1:length(cnames) ){
ntitle <- nchar( cnames[ii] )
maxc <- max( nchar(tble[,ii]) )
if( ntitle > maxc ){
tble[,ii] <- str_pad( tble[,ii], width=ntitle, side = "left", pad = " ")
}
if( ntitle < maxc ){
cnames[ii] <- str_pad( cnames[ii], width=(maxc+1), side = "right", pad = " ")
}
}
colnames(tble) <- cnames
return( tble )
}
|
/scratch/gouwar.j/cran-all/cranData/CLME/R/utilities.r
|
## define imports
#' @import parallel sets ggplot2 ggrepel arules dbscan
#' @export error_table
#' @export seg_tb_toy
#' @export pileup_tumor_toy
#' @export pileup_normal_toy
#' @export snv_reads_toy
#' @export bt_toy
#' @export pl_table_toy
#' @export adm_table_toy
#' @export scna_clonality_table_toy
#' @export allele_specific_cna_table_toy
#' @export snv_clonality_table_toy
NULL
fromListToDF <- function(inputList){
if (is.null(inputList)){return(NULL)}
#check if some is null and remove
nullPositions <- which(sapply(inputList,is.null))
if (length(nullPositions) > 0){
inputList <- inputList[-nullPositions]
}
#inputList <- globalAdm.list
firstEl <- inputList[[1]][1,]
#inputList <- lapply(inputList, function(x){ cat(x$gene,i,"\n"); i<-i+1;matrix(unlist(x), ncol=ncol(x))} )
# for(i in c(1:length(inputList))){
# #cat(inputList[[i]]$Gene.id[1],i,"\n")
# matrix(unlist(inputList[[i]]), ncol=ncol(inputList[[i]]))
# }
#
inputList <- lapply(inputList, function(x){ matrix(unlist(x), ncol=ncol(x))} )
#outDF <- as.data.frame(do.call(rbind, inputList),stringsAsFactors=F)
outDF <- as.data.frame(do.call(rbind, inputList),stringsAsFactors=F)
colnames(outDF) <-names(firstEl)
for(idx in c(1:ncol(outDF))){
if (class(firstEl[[idx]]) == "logical"){
if (is.na(firstEl[[idx]])){
class(outDF[[idx]]) <- "numeric"
}else if (outDF[1,idx] == "TRUE" ||outDF[1,idx] == "FALSE" ){
outDF[,idx] <- as.logical(outDF[,idx]) * 1
}
class(outDF[[idx]]) <- "numeric"
}else if (class(firstEl[[idx]]) == "factor"){
class(firstEl[[idx]]) == "character"
}else {
class(outDF[[idx]]) <- class(firstEl[[idx]])
}
}
# tt<-lapply(c(1:ncol(outDF)),function(idx){
# class(outDF[[idx]]) <- class(firstEl[[idx]])
# })
#sapply(firstEl, class)
#sapply(outDF, class)
#f <- which(sapply(firstEl, class)=="factor")
# for(i in f) {
# lev <- levels(firstEl[[i]])
# outDF[[i]] <- factor(as.integer(outDF[[i]]), levels=seq_along(lev), labels=lev)
# }
return(outDF)
}
setBetween0and1 <- function(x){
return(max(0,min(1,x)))
}
## Function to extend beta table with cnA and cnB
## It is required that column log2.plCorr is defined
extendBetaTableWithCopyNumbers <- function(BetaTable, G, ncores = 1){
#i<-1
extendBetaLine <- function(i, BetaTable, G ){
cat(i,"\n");
BetaLine <- BetaTable[i,,drop=F]
cnAB.beta <- from_BetaLogR_to_cnAB(G = G, LogR = BetaLine$log2.plCorr, Beta = BetaLine$beta)
cnAB.beta.min <- from_BetaLogR_to_cnAB(G = G, LogR = BetaLine$log2.plCorr, Beta = BetaLine$beta.min)
cnAB.beta.max <- from_BetaLogR_to_cnAB(G = G, LogR = BetaLine$log2.plCorr, Beta = BetaLine$beta.max)
BetaLine$cnA <- cnAB.beta$cnA
BetaLine$cnB <- cnAB.beta$cnB
BetaLine$cnA.betaMin <- cnAB.beta.min$cnA
BetaLine$cnB.betaMin <- cnAB.beta.min$cnB
BetaLine$cnA.betaMax <- cnAB.beta.max$cnA
BetaLine$cnB.betaMax <- cnAB.beta.max$cnB
return(BetaLine)
}
BetaTableExt.list <- mclapply(seq(1,nrow(BetaTable),1),extendBetaLine, BetaTable=BetaTable, G=G,mc.preschedule = T,mc.cores = ncores)
BetaTableExt <- fromListToDF(BetaTableExt.list)
return(BetaTableExt)
}
# for each line of the beta table compute min and max beta accordingly to error table
addErrorToBetaTable <- function(BetaTable, errorTable,ncores=1){
betaLine <- 1
addErrorToBetaLine <- function(betaLine, BetaTable, errorTable ){
betas <- BetaTable$beta[betaLine]
nsnps <- BetaTable$nsnp[betaLine]
cov <- BetaTable$cov[betaLine]
idxError <- which(errorTable$n.info.snps <= nsnps & errorTable$mean.cov <= cov)
if ( length(idxError) == 0 ){ idxError <- 1 }
local.errors <- errorTable$adm.estimation.error[max(idxError)]
BetaTable.line <- BetaTable[betaLine,,drop=F]
BetaTable.line$beta.error <- local.errors
BetaTable.line$beta.min <- max(0,betas-local.errors)
BetaTable.line$beta.max <- min(1,betas+local.errors)
return(BetaTable.line)
}
extBetaTable.list <- mclapply(seq(1,nrow(BetaTable),1), addErrorToBetaLine, BetaTable=BetaTable, errorTable=errorTable, mc.preschedule = T, mc.cores = ncores )
extBetaTable <- do.call(rbind,extBetaTable.list)
return(extBetaTable)
# plot(extBetaTable$beta,extBetaTable$log2)
# plot(extBetaTable$beta.min,extBetaTable$log2,pch=20,col="red4")
# points(extBetaTable$beta.max,extBetaTable$log2,pch=20,col="blue4",new=T)
}
getSegmentsPos <- function(chr, initialPos, finalPos, segments,flank = 0){
#select segments that intersect initialPos and finalPs
suppressWarnings(goodSegs <- which(
( as.numeric(gsub("chr","",segments[,1])) == as.numeric(gsub("chr","",chr)) |
gsub("chr","",segments[,1]) == gsub("chr","",chr) ) & (
(as.numeric(segments[,2]) >= (as.numeric(initialPos) - flank) & as.numeric(segments[,2]) <= (as.numeric(finalPos) + flank) ) |
(as.numeric(segments[,3]) >= (as.numeric(initialPos) - flank) & as.numeric(segments[,3]) <= (as.numeric(finalPos) + flank) ) |
(as.numeric(segments[,2]) <= (as.numeric(initialPos) - flank) & as.numeric(segments[,3]) >= (as.numeric(finalPos) + flank) ))))
return(goodSegs)
}
|
/scratch/gouwar.j/cran-all/cranData/CLONETv2/R/CLONETv2_basic_functions.R
|
#f given a max cn value return all the possible allele specific combinatios
getPossibleCnComb <- function(maxCN,step=1){
out<-c()
for(i in seq(0,maxCN,step)){
for(j in seq(0,i,step)){
out <- rbind(out,c(j,(i-j)))
}
}
return(out)
}
#' Function to compute ploidy from a beta table.
#'
#' This function takes the beta table of a tumor sample and returns its ploidy.
#'
#' @param beta_table data.frame formatted as the output of function
#' \code{\link[CLONETv2:compute_beta_table]{compute_beta_table}}
#' @param ploidy_table data.frame formatted as the output of function
#' \code{\link[CLONETv2:compute_ploidy]{compute_ploidy}}
#' @param admixture_table data.frame formatted as the output of function
#' \code{\link[CLONETv2:compute_dna_admixture]{compute_dna_admixture}}
#' @return A ggplot2 plot reporting log2 on the x axis and beta and the y axis.
#' Each dot represents a segment of the input beta_table. Red transparent
#' circles corresponds to expected log2 vs beta position for different allele
#' specific copy number combinations given ploidy and admixture reported in
#' tables ploidy_table and admixture_table, respectively. Labels in the form
#' (cnA, cnB) indicate repsectively the major and minor allele copy number
#' value. Labels above the plot comprises sample name and ploddy/admixture estimates.
#' @examples
#'
#' ## check ploidy and admixture estimates
#' check_plot_toy <- check_ploidy_and_admixture(beta_table = bt_toy, ploidy_table = pl_table_toy,
#' admixture_table = adm_table_toy)
#'
#' @author Davide Prandi
#' @export
#' @md
check_ploidy_and_admixture <- function(beta_table,
ploidy_table,
admixture_table){
sample_id <- unique(beta_table$sample)
if (length(sample_id) != 1){stop("Beta table must contains only one sample")}
# filter good segs
beta_table <- beta_table[which(!is.na(beta_table$beta)),]
# compute expected log2, beta
n_arms <- 4
possibleCNs <- getPossibleCnComb(n_arms)
expected_log2_beta <- as.data.frame(possibleCNs)
expected_log2_beta <- expected_log2_beta[which(expected_log2_beta[,1] >= expected_log2_beta[,2]),]
expected_log2_beta <- expected_log2_beta[which(expected_log2_beta[,1] !=0 | expected_log2_beta[,2] != 0),]
colnames(expected_log2_beta) <- c("cnA","cnB")
expected_log2_beta$admixture <- rep(admixture_table$adm,nrow(expected_log2_beta))
expected_log2_beta$ploidy <- rep(ploidy_table$ploidy,nrow(expected_log2_beta))
expected_log2_beta$plT <- expected_log2_beta$cnA + expected_log2_beta$cnB
expected_log2_beta$plN <- 2
expected_log2_beta$betaVal <- ( expected_log2_beta$admixture * expected_log2_beta$plN ) /
( expected_log2_beta$plT - expected_log2_beta$admixture * ( expected_log2_beta$plT - expected_log2_beta$plN ) )
expected_log2_beta$n.cells.mono.apparent <- pmax(expected_log2_beta$cnA,expected_log2_beta$cnB) - pmin(expected_log2_beta$cnA,expected_log2_beta$cnB) #(cnA + cnB) - min(cnA,cnB)
expected_log2_beta$n.cells.bi.apparent <- 2*pmin(expected_log2_beta$cnA,expected_log2_beta$cnB)
expected_log2_beta$adm.local.apparent <- ( expected_log2_beta$admixture + (1-expected_log2_beta$adm)*(expected_log2_beta$n.cells.bi.apparent)) /
( expected_log2_beta$admixture + (1-expected_log2_beta$admixture)*(expected_log2_beta$n.cells.bi.apparent) +
(1-expected_log2_beta$adm)*(expected_log2_beta$n.cells.mono.apparent) )
expected_log2_beta$betaVal.apparent <- ( expected_log2_beta$adm.local.apparent * 2 ) / ( 1 + expected_log2_beta$adm.local.apparent )
expected_log2_beta$log2Val <-
log2(( ( expected_log2_beta$betaVal * expected_log2_beta$plN + ( 1 - expected_log2_beta$betaVal ) * expected_log2_beta$plT ) / expected_log2_beta$plN) /
(expected_log2_beta$ploidy / expected_log2_beta$plN))
xmin <- min(-1, min(beta_table$log2, na.rm = T))
xmax <- max(1, max(beta_table$log2, na.rm = T))
check_plot <- ggplot(beta_table, aes(x=log2, y=beta)) +
geom_point(col="gray40") +
geom_point(data = data.frame(), aes(x=expected_log2_beta$log2Val, y=expected_log2_beta$betaVal.apparent), col="red", size=10, alpha=0.1) +
ylim(0,1) +
xlim(xmin,xmax ) +
theme_light() +
xlab("LogR") +
ylab("beta") +
labs(title = sample_id, subtitle = paste0("Ploidy ",ploidy_table$ploidy,"\nAdmixture ",admixture_table$adm))
expected_log2_beta$plot_label <- paste0("(",expected_log2_beta$cnA,",",expected_log2_beta$cnB,")")
check_plot <- check_plot + geom_text_repel(data = data.frame(), aes(x=expected_log2_beta$log2Val, y=expected_log2_beta$betaVal.apparent, label = expected_log2_beta$plot_label))
return(check_plot)
}
|
/scratch/gouwar.j/cran-all/cranData/CLONETv2/R/CLONETv2_check_ploidy_and_admixture.R
|
## single sample adm.global estimate in 2D space
## use the cluster defined by localBeta.good to define the clonality
getAdmGlobal.2D <- function(betaT, G.seq, minVal = 0, ncores = 1 ){
#betaT <- betaTable.this.ext.good
#admG <- G.seq[1]
localTransform <- function(admG,betaT,minVal){
cnAB.list <- lapply(seq(1,nrow(betaT),1),
function(i){return( from_BetaLogR_to_cnAB(G=admG, LogR=betaT$log2.plCorr[i], Beta=betaT$beta[i] )) })
cnAB <- do.call(rbind,cnAB.list)
dfOut <- data.frame(row.names = admG)
dfOut$adm <- admG
dfOut$dist <- abs(stats::median(cnAB$cnB) - minVal)
dfOut$mindist <- abs(min(cnAB$cnB) - minVal)
dfOut$maxdist <- abs(max(cnAB$cnB) - minVal)
return(dfOut)
}
dists.list <- mclapply(G.seq, localTransform, betaT = betaT, minVal = minVal, mc.preschedule = T, mc.cores = ncores )
adm.dist <- fromListToDF(dists.list)
adm.dist <- adm.dist[which(adm.dist$dist <=1),]
# to add for compiling reports
#plot(adm.dist$adm,adm.dist$dist,pch=20 )
#points(adm.dist$adm,adm.dist$mindist ,pch=20,col="green3" )
#points(adm.dist$adm,adm.dist$maxdist,pch=20,col="blue4" )
globalAdm <- data.frame(row.names=betaT$sample[1])
globalAdm$sample <- betaT$sample[1]
globalAdm$adm <- adm.dist$adm[which.min(adm.dist$dist)]
globalAdm$adm.min <- adm.dist$adm[which.min(adm.dist$mindist)]
globalAdm$adm.max <- adm.dist$adm[which.min(adm.dist$maxdist)]
globalAdm$n.segs <- nrow(betaT)
globalAdm$n.SNPs <- sum(betaT$nsnp)
return(globalAdm)
}
## Function for cnA vs cnB space management ----
from_BetaLogR_to_cnAB <- function(G, LogR, Beta ){
cnB = ( Beta * 2^(LogR) - G) / (1 - G)
cnA = ( (2 - Beta) * ( Beta * 2^LogR - G) + 2 * G * (1-Beta) ) / ( (1-G) * Beta )
dfOut <- data.frame(cnA=cnA,cnB=cnB,stringsAsFactors=F)
return(dfOut)
}
#####
## compute DNA admixture----
#' Function to compute DNA admixture of a tumor sample from the associatd beta
#' table and ploidy table
#'
#' This function takes a beta table and the associated ploidy table and computes
#' DNA admixture.
#'
#' @param beta_table data.frame formatted as the output of function
#' \code{\link[CLONETv2:compute_beta_table]{compute_beta_table}}
#' @param ploidy_table data.frame formatted as the output of function
#' \code{\link[CLONETv2:compute_ploidy]{compute_ploidy}}
#' @param min_coverage minimum coverage of a segment valid for computing ploidy
#' (default=20)
#' @param min_required_snps minimum number of informative snps in a segment
#' valid for computing ploidy (default=10)
#' @param error_tb data.frame that reports for each combination of coverage and
#' number informative SNPs the expected estimation error around beta. The
#' data.frame error_tb must contains 3 columns: \describe{
#' \item{mean.cov}{mean coverage} \item{n.info.snps}{number of informative
#' SNPs} \item{adm.estimation.error}{estimated error on computed beta on a
#' segment with coverage mean.cov and n.info.snps informative SNPs} } Package
#' CLONETv2 have built in error_tb named error_table (default=error_table)
#' @param library_type WES, WGS (default=WES)
#' @param n_digits number of digits in the output table (default=3)
#' @param n_cores number of available cores for computation (default=1)
#' @param debug return extra columns for debugging (default=F)
#' @return A data.frame with two columns: sample that corresponds to column
#' sample of the input beta_table, and amd that represent the fraction of
#' estimated DNA admixture
#' @examples
#'
#' ## Compute admixture table with default parameters
#' adm_table_toy <- compute_dna_admixture(beta_table = bt_toy, ploidy_table = pl_table_toy)
#'
#' @author Davide Prandi
#' @export
#' @md
compute_dna_admixture <- function(beta_table,
ploidy_table,
min_required_snps=10,
min_coverage=20,
error_tb = error_table,
library_type="WES",
n_digits=3,
n_cores=1,
debug=F){
available_library_types <- c("WES","WGS")
if (!library_type %in% available_library_types){stop("Parameter library_type must be one of ",paste(available_library_types,collapse = ", "))}
sample_id <- unique(beta_table$sample)
if (length(sample_id) != 1){stop(paste("[",Sys.time() ,"] beta_table must contain exactly one sample\n",sep=""))}
## use only chr 1 - 22
beta_table <- beta_table[which(suppressWarnings(as.numeric(gsub("chr|Chr|CHR","",beta_table$chr))) %in% seq(1,22,1 )),]
## add ploidy information
ploidy_table$log2shift <- round(-log2(ploidy_table$ploidy/2),3)
beta_table <- merge(x = beta_table, y = ploidy_table, by.x = "sample", by.y = "sample")
beta_table$log2.plCorr <- beta_table$log2 - beta_table$log2shift
## add error information
beta_table.list <- by(beta_table, INDICES = beta_table$sample, FUN = addErrorToBetaTable, errorTable = error_tb, ncores = n_cores )
beta_table <- fromListToDF(beta_table.list)
rm(beta_table.list)
## prepare output data.frame
adm_tb <- data.frame(row.names = sample_id)
adm_tb$sample <- sample_id
adm_tb$adm <- NA
adm_tb$adm.min <- NA
adm_tb$adm.max <- NA
adm_tb$n.segs <- NA
adm_tb$n.SNPs <- NA
## filter on nsnps and coverage
beta_table <- beta_table[which(beta_table$nsnp >= min_required_snps & beta_table$cov >= min_coverage),]
if (library_type == "WGS"){
## filter beta on normal less than 0.9
beta_table <- beta_table[which(beta_table$n_beta > 0.9),]
}
## check if empty beta_table or not defined ploidy
if (nrow(beta_table) == 0 || is.na(beta_table$ploidy[1])){
return(adm_tb)
}
## compute allele specific copy number supposing a 100% pure sample
beta_table <- extendBetaTableWithCopyNumbers(BetaTable = beta_table, G =0, ncores = n_cores)
## find maximum cnB value for the low cluster
if (library_type %in% c("WES")){
localBeta <- beta_table[which(beta_table$cnB < 1),]
}else if (library_type %in% c("WGS")){
localBeta <- beta_table[which(beta_table$cnB < 1 & beta_table$cnA > 0.75),]
}else{
stop("Parameter library_type ",library_type," not fully supported")
}
if (nrow(localBeta) < 2){
return(adm_tb)
}
## sort by increasing cnB
localBeta <- localBeta[with(localBeta, expr = order(cnB)),]
## extract localBeta starting from position2 and adding an empty row at the end
localBeta.next <- rbind(localBeta[seq(2,nrow(localBeta),1),],rep(NA,ncol(localBeta)))
## report on the same line segments and the next one in the genomic order
colnames(localBeta.next) <- paste0("next_",colnames(localBeta.next))
localBeta <- cbind(localBeta,localBeta.next)
## find segment such that the difference in the cnB with the next segment can't be explained with the error on beta estimate
localBeta$max_cnB_current <- pmax(localBeta$cnB,localBeta$cnB.betaMin,localBeta$cnB.betaMax, na.rm = T)
localBeta$min_cnB_next <- pmin(localBeta$next_cnB,localBeta$next_cnB.betaMin,localBeta$next_cnB.betaMax,na.rm = T)
max.cnB <- suppressWarnings(localBeta$cnB[min(which(localBeta$max_cnB_current < localBeta$min_cnB_next ))])
## only segments in the cluster with cnB less that max.cnB
localBeta.good <- localBeta[which(localBeta$cnB <= max.cnB ),]
if (nrow(localBeta.good) > 0){
G.seq <- seq(0,0.99,0.01)
adm_tb <- getAdmGlobal.2D(betaT = localBeta.good, G.seq = G.seq, minVal = 0, ncores = n_cores)
adm_tb$adm <- round(adm_tb$adm, n_digits)
adm_tb$adm.min <- round(adm_tb$adm.min, n_digits)
adm_tb$adm.max <- round(adm_tb$adm.max, n_digits)
if (!debug){
col_to_save <- c("sample","adm", "adm.min", "adm.max")
adm_tb <- adm_tb[,col_to_save]
}
}
return(adm_tb)
}
|
/scratch/gouwar.j/cran-all/cranData/CLONETv2/R/CLONETv2_compute_admixture.R
|
computeAllelicImbalance <- function(betaTable,
cnTh = 0.5, #the max (min) distance from the integer cn value
Ncores = 4
){
#round cn values
line<-1
computeAllelicImbalance.line <- function(line,betaTable,cnTh){
dfOut <- betaTable[line,]
#cat(line,"\n")
dfOut$AllelicImbalance <- NA
dfOut$AllelicImbalance.int <- NA
dfOut$cnA.int <- NA
dfOut$cnB.int <- NA
dfOut$isCNNL <- 0
dfOut$isLOHg <- 0
dfOut$isLOHseg <- 0
dfOut$isUnbalancedGain <- 0
if ( !is.na(dfOut$cnA) & !is.na(dfOut$cnB) ){
if ( dfOut$cnA < floor(dfOut$cnA) + cnTh ){
dfOut$cnA.int <- floor(dfOut$cnA)
}else if (dfOut$cnA >= ceiling(dfOut$cnA) - cnTh){
dfOut$cnA.int <- ceiling(dfOut$cnA)
}
if ( dfOut$cnB < floor(dfOut$cnB) + cnTh ){
dfOut$cnB.int <- floor(dfOut$cnB)
}else if (dfOut$cnB >= ceiling(dfOut$cnB) - cnTh){
dfOut$cnB.int <- ceiling(dfOut$cnB)
}
dfOut$AllelicImbalance <- dfOut$cnA - dfOut$cnB
dfOut$AllelicImbalance.int <- dfOut$cnA.int - dfOut$cnB.int
if (!is.na(dfOut$cnA.int) & !is.na(dfOut$cnB.int)){
if (dfOut$cnA.int == 2 & dfOut$cnB.int == 0){
dfOut$isCNNL <- 1
}
if (dfOut$cnA.int > 2 & dfOut$cnB.int == 0){
dfOut$isLOHg <- 1
}
if (dfOut$cnA.int > 1 & dfOut$cnB.int == 0){
dfOut$isLOHseg <- 1
}
if (dfOut$cnA.int > 2 & dfOut$cnB.int > 0 & ( dfOut$cnA.int - dfOut$cnB.int > 1)){
dfOut$isUnbalancedGain <- 1
}
}
}
return(dfOut)
}
betaTableOut.List <- mclapply(seq(1,nrow(betaTable),1), computeAllelicImbalance.line, betaTable, cnTh, mc.preschedule = T, mc.cores = Ncores )
betaTableOut.List <- lapply(seq(1,nrow(betaTable),1), computeAllelicImbalance.line, betaTable, cnTh)
betaTableOut <- fromListToDF(betaTableOut.List)
}
#'Function to compute allele specific somatic copy number
#'
#'This function takes the beta table of a tumor sample together with the
#'associated ploidy and admixtures tables and computes the allele specific copy
#'number of each segment in the beta table.
#'
#'@param beta_table data.frame formatted as the output of function
#' \code{\link[CLONETv2:compute_beta_table]{compute_beta_table}}
#'@param ploidy_table data.frame formatted as the output of function
#' \code{\link[CLONETv2:compute_ploidy]{compute_ploidy}}
#'@param admixture_table data.frame formatted as the output of function
#' \code{\link[CLONETv2:compute_dna_admixture]{compute_dna_admixture}}
#'@param error_tb data.frame that reports for each combination of coverage and
#' number informative SNPs the expected estimation error around beta. The
#' data.frame error_tb must contains 3 columns: \describe{ \item{mean.cov}{mean
#' coverage} \item{n.info.snps}{number of informative SNPs}
#' \item{adm.estimation.error}{estimated error on computed beta on a segment
#' with coverage mean.cov and n.info.snps informative SNPs} } Package CLONETv2
#' have built in error_tb named error_table (default=error_table)
#'@param allelic_imbalance_th maximum distance from allele spefici copy number
#' of a segment to define integer alelle specific copy number value. Value 0.5
#' corresponds to round cnA and cnB (default=0.5)
#'@param n_digits number of digits in the output table (default=3)
#'@param n_cores number of cores (default=1)
#'@param debug return extra columns for debugging (default=F)
#'@return A data.frame that extends input beta_table with columns \describe{
#' \item{log2.corr}{log2 ratio adjusted by ploidy and admixture}
#' \item{cnA}{copy number of the major allele} \item{cnB}{copy number of the
#' minor allele} \item{cnA.int}{integet copy number of the major allele}
#' \item{cnB.int}{integet copy number of the minor allele} }
#' @examples
#'
#' ## Compute clonality table with default parameters
#' allele_specific_cna_table_toy <- compute_allele_specific_scna_table(
#' beta_table = bt_toy, ploidy_table = pl_table_toy,
#' admixture_table = adm_table_toy)
#'
#'@author Davide Prandi
#'@export
#'@md
compute_allele_specific_scna_table<-function(beta_table,
ploidy_table,
admixture_table,
error_tb = error_table,
allelic_imbalance_th = 0.5,
n_digits=3,
n_cores=1,
debug=F){
## check consistency of sample names
sample_id <- unique(beta_table$sample)
beta_cols <- colnames(beta_table)
if (length(sample_id) != 1){stop(paste("[",Sys.time() ,"] beta_table must contain exactly one sample\n",sep=""))}
if (nrow(ploidy_table) !=1 || nrow(admixture_table) !=1 || (pls = ploidy_table$sample) != sample_id || (ads = admixture_table$sample) != sample_id){
stop(paste("[",Sys.time() ,"] ploidy_table and admixture_table must contain only sample ",sample_id,"\n",sep=""))
}
## merge ploidy table, admixture table and beta_table and adjust log2 by ploidy and admixture
beta_table <- merge(beta_table, ploidy_table, by="sample")
beta_table$log2shift <- round(-log2(beta_table$ploidy/2),n_digits)
beta_table$log2.plCorr <- beta_table$log2 - beta_table$log2shift
beta_table <- merge(x = beta_table, y = admixture_table, by="sample")
beta_table$log2.corr <- suppressWarnings(log2(pmax(( 2 ^ (beta_table$log2.plCorr ) - admixture_table$adm[1] ) / (1 - admixture_table$adm[1]), 0)))
## add error information
beta_table.list <- by(beta_table, INDICES = beta_table$sample, FUN = addErrorToBetaTable, errorTable = error_tb, ncores = n_cores )
beta_table <- fromListToDF(beta_table.list)
rm(beta_table.list)
## add error information
beta_table.list <- by(beta_table, INDICES = beta_table$sample, FUN = addErrorToBetaTable, errorTable = error_tb, ncores = n_cores )
beta_table <- fromListToDF(beta_table.list)
rm(beta_table.list)
beta_table <- extendBetaTableWithCopyNumbers(BetaTable = beta_table, G = admixture_table$adm[1], ncores = n_cores)
beta_table <- computeAllelicImbalance(betaTable = beta_table, cnTh = allelic_imbalance_th, Ncores = n_cores)
if (!debug){
valid_cols <- c(beta_cols,"log2.corr","cnA", "cnB","cnA.int", "cnB.int")
beta_table <- beta_table[,valid_cols]
}
return(beta_table)
}
|
/scratch/gouwar.j/cran-all/cranData/CLONETv2/R/CLONETv2_compute_allele_specific_scna_table.R
|
#source("R/CLONETv2_basic_functions.R")
getValsfromPDF <- function(pdf,n)
{
return(pdf[sample(1:length(pdf),n,replace=T)])
}
# af <- af.control
# cov <- cov.control
# size <- 1000
# subsample_data <- function(af, cov, size){
#
# af_valid_ids <- which(af >= quantile(af)[2] & af <= quantile(af)[4])
#
# af_valid <- af[af_valid_ids]
# cov_valid <- cov[af_valid_ids]
#
# if (length(af_valid) > size){
#
# cov_quant_ids <- which(cov_valid >= quantile(cov_valid)[2] & cov_valid <= quantile(cov_valid)[4])
# cov_valid <- cov_valid[cov_quant_ids]
# af_valid <- af_valid[cov_quant_ids]
#
# if (length(af_valid) > size){
# set.seed(3451)
# random_ids <- sample(x = length(af_valid),size = size, replace = T)
# cov_valid <- cov_valid[random_ids]
# af_valid <- af_valid[random_ids]
# }
#
# }
#
# return(list(cov = cov_valid, af = af_valid))
# }
BetaDistr <- function(af.control,cov.control)
{
rep = 1000
#sample_data <- subsample_data(af.control, cov.control, 10000)
#tumor <- sample_data$cov
#tumor.af <- sample_data$af
beta.distr = list()
for(beta in seq(1,0.01,-0.01))
{
tumor = getValsfromPDF(cov.control,min(rep, length(cov.control)))
tumor[which(tumor<20)] = 20
tumor.adm = round(tumor*beta)
tumor.del = round(tumor-tumor.adm)
tumor.af = getValsfromPDF(af.control,min(rep, length(af.control)))
tumor.alt = round(tumor.adm*tumor.af)
tumor.ref = tumor.adm-tumor.alt
number = sample(1:length(tumor.ref),1)
dir = sample(1:length(tumor.ref),number)
if(beta<1)
{
tumor.ref[dir] = tumor.ref[dir] + tumor.del[dir]
if(length(dir)<length(tumor.ref))
tumor.alt[-dir] = tumor.alt[-dir] + tumor.del[-dir]
}
tumor.af = tumor.alt/(tumor.alt+tumor.ref)
af.mirror=tumor.alt/(tumor.ref+tumor.alt)
af.mirror[which(af.mirror<0.5)] = 1-af.mirror[which(af.mirror<0.5)]
beta.distr[[length(beta.distr)+1]] = af.mirror
}
return(beta.distr)
}
BetaDistrMonotone <- function(beta.distr)
{
for(i in 2:length(beta.distr))
{
if(stats::median(beta.distr[[i]]) < stats::median(beta.distr[[i-1]]))
{
shift = stats::median(beta.distr[[i-1]]) - stats::median(beta.distr[[i]])
beta.distr[[i]] = beta.distr[[i]]+shift
}
}
return(beta.distr)
}
computeBeta <- function(snps.distr,beta.distr,p.thr=0.01)
{
betas = seq(1,0.01,-0.01)
evidence = 1
if (stats::wilcox.test(snps.distr,beta.distr[[1]],alternative="greater")$p.value>p.thr)
evidence = 0
greater = 1
for(i in 1:length(beta.distr))
{
if(stats::wilcox.test(snps.distr,beta.distr[[i]],alternative="greater")$p.value<p.thr) { greater=i } else { break }
}
less = greater = i
for(j in i:length(beta.distr))
{
if(stats::wilcox.test(snps.distr,beta.distr[[j]],alternative="less")$p.value<p.thr) { less=j;break }
}
less = j
if(less>greater)
less = j-1
q = 1-(stats::quantile(snps.distr)-min(snps.distr))/(max(snps.distr)-min(snps.distr))
beta = betas[less] + (betas[greater]-betas[less])*q[3]
beta.max = betas[less] + (betas[greater]-betas[less])*q[2]
beta.min = betas[less] + (betas[greater]-betas[less])*q[4]
return(c(beta,beta.min,beta.max,betas[greater],betas[less],evidence))
}
#' Function to compute beta table
#'
#' This function takes segmented data and per base pileup of tumor and matched
#' normal of a sample as input and associates a beta value to each genomic segment.
#'
#' @param seg_tb data.frame in [SEG
#' format](http://software.broadinstitute.org/software/igv/SEG). Rows
#' report per segment log2 ratio
#' numeric value. CLONETv2 inteprets first column as sample name, columns two
#' to four as genomic coordinates (chromosome, start location, and end
#' location), column five is not used, and column six is the log2 ratio
#' returned by segmentation algorithm.
#' @param pileup_tumor,pileup_normal data.frame reporting pileup of SNPs in
#' tumor and normal samples respectively. First row contains column names and
#' subsequent rows report the pileup of a specific genomic positions. Required
#' information for each genomic position includes
#' chromosome, position, allelic fraction, and coverage. Required column names
#' are chr, pos, af, and cov
#' @param min_coverage minimum number of reads for considering a pileup position
#' valid (default=20)
#' @param min_required_snps minimum number of snps to call beta for a segment
#' (default=10)
#' @param min_af_het_snps minimum allowed allelic fraction of a SNP genomic
#' position (default=0.2)
#' @param max_af_het_snps maximum allowed allelic fraction of a SNP genomic
#' position (default=0.8)
#' @param n_digits number of digits in the output table (default=3)
#' @param n_cores number of available cores for computation (default=1)
#' @param plot_stats plot summary statistics of the computed beta table (default=F)
#' @param debug return extra columns for debugging (default=F)
#' @return A data.frame that extends input seg_tb with columns beta, nsnp, cov,
#' n_beta. Moreover, CLONETv2 renames colums of seg_tb as sample, chr,
#' start, end, XYZ, log2, with XYZ being the original name of column five
#' As for seg_tb, each raw of the output table represents a genomic
#' segments. For each raw, the value of beta is the proportion of neutral
#' reads in the segment, while nsnp and cov represents respectively the number
#' of informative SNPs and the mean coverage of the given segment. The value
#' n_beta is the proportion of neutral reads in the normal sample. The value
#' of n_beta should be 1 as in normal samples parental chromosomes are equally
#' represented. Values lower than 1 of n_beta could indicate the presence of germline CNVs
#' or sequencing errors.
#' @examples
#'
#' ## Compue beta table with default parameters
#' bt_toy <- compute_beta_table(seg_tb_toy, pileup_tumor_toy, pileup_normal_toy)
#' @author Davide Prandi, Alessandro Romanel
#' @export
#' @md
compute_beta_table<-function(seg_tb,
pileup_tumor,
pileup_normal,
min_coverage=20,
min_required_snps=10,
min_af_het_snps=0.2,
max_af_het_snps=0.8,
n_digits=3,
n_cores=1,
plot_stats=F,
debug=F
){
## rename colnames
colnames(seg_tb)[c(1:4,6)] <- c("sample","chr","start","end","log2")
## check if sample is uniqe in seg file
sample_id <- unique(seg_tb$sample)
if(length(sample_id) > 1){stop("Table seg_tb contains more than 1 sample")}
## check pileup columns
if (!all(c("chr","pos","af","cov") %in% colnames(pileup_tumor)) | !all(c("chr","pos","af","cov") %in% colnames(pileup_normal))){
stop("Tables pileup_tumor and pileup_normal must have columns chrm pos, af and cov")
}
##############
### filter pileup
pileup_normal <- pileup_normal[which( pileup_normal$ref %in% c("A","C","G","T") &
pileup_normal$alt %in% c("A","C","G","T")),]
pileup_tumor <- pileup_tumor[which( pileup_tumor$ref %in% c("A","C","G","T") &
pileup_tumor$alt %in% c("A","C","G","T")),]
pileup_normal <- pileup_normal[which( pileup_normal$af >= min_af_het_snps &
pileup_normal$af <= max_af_het_snps &
pileup_normal$cov >= min_coverage ),]
pileup_normal$UID <- paste0(pileup_normal$chr,":",pileup_normal$pos)
pileup_tumor$UID <- paste0(pileup_tumor$chr,":",pileup_tumor$pos)
pileup_tumor <- pileup_tumor[which( pileup_tumor$cov >= min_coverage & pileup_tumor$UID %in% pileup_normal$UID ),]
# use only chromosomes 1-22
pileup_tumor <- pileup_tumor[which(suppressWarnings(as.numeric(gsub("chr|CHR|Chr","",pileup_tumor$chr))) %in% seq(1,22,1)),]
## some checks before computing beta
if (nrow(pileup_tumor) == 0){
stop("No valid heterozygous SNPs identified in tumor_pileup table")
}
if (nrow(seg_tb) == 0){
stop("No segments in seg_tb table")
}
## for reproducibility
RNGkind("L'Ecuyer-CMRG")
set.seed(utf8ToInt(sample_id)[1]*sum(utf8ToInt(sample_id)))
#############
### compute the distribution of beta on normal to asses noise
beta.distr.raw = BetaDistr(af.control = pileup_normal$af,cov.control = pileup_normal$cov)
beta.distr = BetaDistrMonotone(beta.distr.raw)
# compute beta for each tumor sample raw
extend_seg_with_beta_value <- function(seg_number, seg_tb, pileup_tumor, pileup_normal,beta.distr){
#cat(seg_number,"\n")
thisSeg <- seg_tb[seg_number,,drop=F]
## init output
thisSeg$beta = NA
thisSeg$nref = NA
thisSeg$nsnp = NA
thisSeg$cov = NA
thisSeg$sd = NA
thisSeg$AFmean = NA
thisSeg$AFsd = NA
thisSeg$pval = NA
thisSeg$delevidence = NA
thisSeg$beta75 = NA
thisSeg$beta25 = NA
thisSeg$betamin = NA
thisSeg$betamax = NA
## add beta on normal
thisSeg$n_beta = NA
thisSeg$n_delevidence = NA
thisSeg$n_beta75 = NA
thisSeg$n_beta25 = NA
thisSeg$n_betamin = NA
thisSeg$n_betamax = NA
representativeSNPs <- pileup_tumor[which(
pileup_tumor$chr == thisSeg$chr &
pileup_tumor$pos >= thisSeg$star &
pileup_tumor$pos <= thisSeg$end) ,]
if(nrow(representativeSNPs)>=min_required_snps)
{
af.seg = representativeSNPs$af
af.seg[which(af.seg<0.5)] = 1-af.seg[which(af.seg<0.5)]
beta = computeBeta(snps.distr=af.seg,beta.distr=beta.distr,p.thr=0.01)
if(length(beta)>0)
{
thisSeg$beta = round(beta[1],digits=n_digits)
thisSeg$nref = NA
thisSeg$nsnp = nrow(representativeSNPs)
thisSeg$cov = mean(representativeSNPs$cov)
thisSeg$sd = stats::sd(representativeSNPs$cov)
thisSeg$AFmean = mean(representativeSNPs$af)
thisSeg$AFsd = stats::sd(representativeSNPs$af)
thisSeg$pval = 0.01
thisSeg$delevidence = round(beta[6])
thisSeg$beta75 = round(beta[2],digits=n_digits)
thisSeg$beta25 = round(beta[3],digits=n_digits)
thisSeg$betamin = round(beta[5],digits=n_digits)
thisSeg$betamax = round(beta[4],digits=n_digits)
}
## compute on normal
n_representativeSNPs <- pileup_normal[which(pileup_normal$UID %in% representativeSNPs$UID),]
n_af.seg = n_representativeSNPs$af
n_af.seg[which(n_af.seg<0.5)] = 1-n_af.seg[which(n_af.seg<0.5)]
n_beta = computeBeta(snps.distr=n_af.seg,beta.distr=beta.distr,p.thr=0.01)
if(length(n_beta)>0)
{
thisSeg$n_beta = round(n_beta[1],digits=n_digits)
thisSeg$n_delevidence = round(n_beta[6])
thisSeg$n_beta75 = round(n_beta[2],digits=n_digits)
thisSeg$n_beta25 = round(n_beta[3],digits=n_digits)
thisSeg$n_betamin = round(n_beta[5],digits=n_digits)
thisSeg$n_betamax = round(n_beta[4],digits=n_digits)
}
}else{
thisSeg$nsnp = nrow(representativeSNPs)
thisSeg$cov = mean(representativeSNPs$cov)
thisSeg$sd = stats::sd(representativeSNPs$cov)
thisSeg$AFmean = mean(representativeSNPs$af)
thisSeg$AFsd = stats::sd(representativeSNPs$af)
}
return(thisSeg)
}
res = mclapply(seq(1,nrow(seg_tb),1),extend_seg_with_beta_value,seg_tb, pileup_tumor, pileup_normal,beta.distr,
mc.preschedule = T, mc.cores = n_cores, mc.set.seed = T)
outTable <- fromListToDF(res)
if (plot_stats){
n_segments <- nrow(outTable)
n_segments_with_beta <- length(which(!is.na(outTable$beta)))
fraction_analyzed_segments <- n_segments_with_beta / n_segments
seg_lenght_distribution <- stats::quantile(outTable$end-outTable$start + 1)
seg_cov_distribution <- stats::quantile(outTable$cov, na.rm = T)
n_snps_distr <- stats::quantile(outTable$nsnp)
out_text <-
paste(
"Computed beta table of sample \"",sample_id,"\"\n ",
"Number of processed segments: ",n_segments,"\n ",
"Number of segments with valid beta: ",n_segments_with_beta," (",round(fraction_analyzed_segments*100),"%)\n ",
"Quantiles of input segment lenghts:\n ",
paste(format(names(seg_lenght_distribution)), format(seg_lenght_distribution), sep = ":", collapse = "\n "),
"\n ",
"Quantiles of input segment coverage:\n ",
paste(format(names(seg_cov_distribution)), format(seg_cov_distribution), sep = ":", collapse = "\n "),
"\n ",
"Quantiles of number of informative SNPs per input segment:\n ",
paste(format(names(n_snps_distr)), format(n_snps_distr), sep = ":", collapse = "\n "),
sep = ""
)
cat(out_text)
}
if (!debug){
seg_columns <-colnames(outTable)[1:6]
extra_cols <- c("beta", "nsnp", "cov","n_beta")
outTable <- outTable[,c(seg_columns,extra_cols)]
}
return(outTable)
}
|
/scratch/gouwar.j/cran-all/cranData/CLONETv2/R/CLONETv2_compute_beta_table.R
|
findOptimumPloidy <- function(observedLog2Beta,maxCN,minAdm=0.1,maxAdm=0.7,stepCN=0.01,stepAdm = 0.05,library_type="WES",ncores){
confs <- expand.grid(adm=seq(minAdm,maxAdm,stepAdm),cn=seq(1,maxCN,stepCN), stringsAsFactors=F)
computeObservedRMSE.ind <- function(i, confs,observedLog2Beta,maxCN){
Conf <- confs[i,]
Conf$RMSE <- computeObservedRMSE(Conf=c(confs$adm[i],confs$cn[i]), observedLog2Beta,maxCN)
return(Conf )
}
confs.list <-mclapply(seq(1,nrow(confs),1),FUN=computeObservedRMSE.ind,confs=confs,observedLog2Beta=observedLog2Beta,maxCN=maxCN,mc.preschedule = T,mc.cores=ncores)
confs <- fromListToDF(confs.list)
if (library_type == "WES"){
# select confs with minimum RMSE
candidate.confs <- confs[which(confs$RMSE <= stats::quantile(confs$RMSE,probs=0.05)),]
# take only cases with minimum adm
if (nrow(candidate.confs) > 1){
discontinuity_points <- which(candidate.confs$cn[seq(1,nrow(candidate.confs)-1,1)] + 2*stepCN < candidate.confs$cn[seq(2,nrow(candidate.confs),1)])
if (length(discontinuity_points) > 0 ){
candidate.confs <- candidate.confs[seq(1, min(discontinuity_points,na.rm = T),1),]
}
}
valToRet <- candidate.confs$cn[which.min(candidate.confs$RMSE)]
return(valToRet)
}else if (library_type == "WGS"){
# as you have more segments in WS you can use dscan algorithm to find clusters of valid points
observedLog2Beta_clusters <- hdbscan(observedLog2Beta, minPts = 5)
#plot(x = observedLog2Beta$log2,y = observedLog2Beta$beta, pch=20, xlim=c(-1.2,1), ylim=c(0,1), col=cl$cluster+1)
## compute median of each cluster
found_clusters <- setdiff(unique(observedLog2Beta_clusters$cluster),0)
cl_stats <- data.frame(row.names = found_clusters)
cl_stats$cluster_id <- found_clusters
cl_stats$log2_median <- NA
cl_stats$log2_mean <- NA
for(cl in found_clusters ){
cl_stats[as.character(cl),"log2_median"] <- round(stats::median(observedLog2Beta$log2[which(observedLog2Beta_clusters$cluster == cl)]),2)
#cl_stats[as.character(cl),"log2_mean"] <- round(mean(observedLog2Beta$log2[which(observedLog2Beta_clusters$cluster == cl)]),2)
}
## use leftmost cluster median
valToRet <- round(2*2^(-1*min(cl_stats$log2_median)),2)
return(valToRet)
}else{ stop("Value ",library_type," for parameter library_type not supported")}
}
computeObservedRMSE <- function(Conf,observedLog2Beta,maxCN ){
nPoints <- maxCN + 1
adm.local <- Conf[1]
ploidy <- Conf[2]
#at("adm=",adm.local," ploid=",ploidy,"\n")
randomData <- data.frame(row.names=seq(1,nPoints,1))
randomData$amd.local <- rep(adm.local,nPoints)
randomData$ploidy <- rep(ploidy,nPoints)
randomData$cnA <- c(0,seq(1,maxCN,1))
randomData$cnB <- c(0,seq(1,maxCN,1))
syntheticSegs.list <- lapply(seq(1,nrow(randomData),1),perturbDataDF,randomData)
syntheticSegs<-cbind(randomData,do.call(rbind,syntheticSegs.list))
#plot(syntheticSegs$log2Val,syntheticSegs$betaVal.apparent,pch=20,ylim=c(0,1))
#points(syntheticSegs$log2Val,syntheticSegs$betaVal.apparent,pch=20,col="orange2")
expectedLog2Beta <- syntheticSegs[,c("log2Val","betaVal.apparent")]
colnames(expectedLog2Beta ) <- c("log2Val","betaVal")
return(computeRMSE(observedLog2Beta=observedLog2Beta,expectedLog2Beta=expectedLog2Beta))
}
perturbData <- function(cnA,cnB,adm.local,MeanPloidy,coeffVariation=0.2){
plT <- cnA+cnB
#pltSd <- plT * coeffVariation
#plT <- max(0,plT + rnorm(n=1,sd=pltSd))
plN<-2
betaVal <- ( adm.local * plN ) / ( plT - adm.local * ( plT - plN ) )
n.cells.mono.apparent <- max(cnA,cnB) - min(cnA,cnB) #(cnA + cnB) - min(cnA,cnB)
n.cells.bi.apparent <- 2*min(cnA,cnB)
adm.local.apparent <- ( adm.local + (1-adm.local)*(n.cells.bi.apparent)) / ( adm.local + (1-adm.local)*(n.cells.bi.apparent) + (1-adm.local)*(n.cells.mono.apparent) )
betaVal.apparent <- ( adm.local.apparent * 2 ) / ( 1 + adm.local.apparent )
#adm.local.apparent <- adm.local + (1-adm.local)*(min(cnA,cnB)/max(cnA,cnB))
#betaVal.apparent <- round(( adm.local.apparent * 2 ) / ( 1 + adm.local.apparent ) ,3 )
#betaVal.apparent <- if (cnA==cnB){ 1 }else{ betaVal + (1-betaVal) * ( 2*min(cnA,cnB)/(cnA+cnB)) }
#adm.local.apparent <- betaVal.apparent / (2-betaVal.apparent)
log2Val <- log2(( ( betaVal * plN + ( 1 - betaVal ) * plT ) / plN) / (MeanPloidy / plN))
log2Val.corr <- log2(plT / plN)
## add noise to betaBav
#betaValSD
#betaVal.apparent <- if (cnA==cnB){ 1 }else{ betaVal + ( 2*min(cnA,cnB)/(cnA+cnB))}
#((MeanPloidy/2)))
outDF <- data.frame(row.names=1)
outDF$cnA <- cnA
outDF$cnB <- cnB
outDF$ploidy <- MeanPloidy
outDF$adm.local <- adm.local
outDF$adm.local.apparent <- adm.local.apparent
outDF$betaVal <- betaVal
outDF$betaVal.apparent <- betaVal.apparent
outDF$log2Val <- log2Val
outDF$log2Val.corr <- log2Val.corr
#outDF$log2Val.Segmentation <- log2(plT/MeanPloidy)
return(outDF)
}
perturbDataDF <- function(i,randomData){
return(perturbData(cnA=randomData$cnA[i],cnB=randomData$cnB[i],adm.local=randomData$amd.local[i],MeanPloidy=randomData$ploidy[i]))
}
computeRMSE <- function(observedLog2Beta,expectedLog2Beta,nCores=1){
#for each observed compute the minimum distance from all expected
distances <- unlist(mclapply(seq(1,nrow(observedLog2Beta),1),
computeMinDistanceFromExpected,
observedLog2Beta=observedLog2Beta,
expectedLog2Beta=expectedLog2Beta,mc.preschedule = T,mc.cores=nCores))
RMSE <- sqrt(sum(distances^2)/length(distances))
return(RMSE)
}
computeMinDistanceFromExpected <- function(i,observedLog2Beta,expectedLog2Beta){
thisLog2 <- observedLog2Beta$log2[i]
return(min(abs(expectedLog2Beta$log2Val - thisLog2),na.rm=T))
}
#for testing
getPossibleCnComb <- function(maxCN,step=1){
out<-c()
for(i in seq(0,maxCN,step)){
for(j in seq(0,i,step)){
out <- rbind(out,c(j,(i-j)))
}
}
return(out)
}
#' Function to compute ploidy from a beta table.
#'
#' This function takes the beta table of a tumor sample and returns its ploidy.
#'
#' @param beta_table data.frame formatted as the output of function
#' \code{\link[CLONETv2:compute_beta_table]{compute_beta_table}}
#' @param max_homo_dels_fraction estimated maximum proportion of genomic
#' segments corresponding to an homozygous deletion (default=0.01)
#' @param beta_limit_for_neutral_reads minimum beta value of a segment valid for
#' computing ploidy (default=0.90)
#' @param min_coverage minimum coverage of a segment valid for computing ploidy
#' (default=20)
#' @param min_required_snps minimum number of informative snps in a segment
#' valid for computing ploidy (default=10)
#' @param library_type WES, WGS (default=WES)
#' @param n_digits number of digits in the output table (default=3)
#' @param n_cores number of available cores for computation (default=1)
#' @return A data.frame with two columns: sample that corresponds to column
#' sample of the input beta_table, and ploidy computed
#' @examples
#' \donttest{
#' ## Compute ploidy table with default parameters
#' pl_table_toy <- compute_ploidy(bt_toy)
#' }
#' @author Davide Prandi
#' @export
#' @md
compute_ploidy <- function(beta_table,
max_homo_dels_fraction = 0.01,
beta_limit_for_neutral_reads = 0.90,
min_coverage=20,
min_required_snps=10,
library_type="WES",
n_digits=3,
n_cores=1){
available_library_types <- c("WES","WGS")
if (!library_type %in% available_library_types){stop("Parameter library_type must be one of ",paste(available_library_types,collapse = ", "))}
sample_id <- unique(beta_table$sample)
if (length(sample_id) != 1){stop("Beta table must contains only one sample")}
## remove potential homo dels
if (library_type == "WES"){
beta_table <- beta_table[which(beta_table$log2 > stats::quantile(beta_table$log2,probs=max_homo_dels_fraction)),]
}else if (library_type == "WGS"){
beta_table <- beta_table[which(beta_table$log2 > stats::quantile(beta_table$log2[which(!is.na(beta_table$beta))],probs=max_homo_dels_fraction)),]
}else{
stop("Value ",library_type," for parameter library_type not supported")
}
## only putative copy number neutral segments
beta_table <- beta_table[which(beta_table$beta >= beta_limit_for_neutral_reads &
beta_table$nsnp >= min_required_snps &
beta_table$cov >= min_coverage ),]
if (nrow(beta_table) > 0 ){
## prepare data
observedLog2Beta <- data.frame(row.names=seq(1,nrow(beta_table),1))
observedLog2Beta$log2 <- beta_table$log2
observedLog2Beta$beta <- beta_table$beta
maxCN <- max(3,ceiling( 2/(2^min(observedLog2Beta$log2)) * 10)/10)
plComputed <- findOptimumPloidy(observedLog2Beta,maxCN=maxCN,library_type = library_type ,ncores=n_cores )
}else{
plComputed <- NA
}
dfOut <- data.frame(row.names=1,stringsAsFactors=F)
dfOut$sample <- sample_id
dfOut$ploidy <- round(plComputed, n_digits)
return(dfOut)
}
|
/scratch/gouwar.j/cran-all/cranData/CLONETv2/R/CLONETv2_compute_ploidy.R
|
getClonalityWithError<-function(integerCN,
beta,
adm.global.thisZone,
adm.global.thisZone.min,
adm.global.thisZone.max,
clonalityThreshold,
local.error,
roundDec=3){
if(integerCN == 0){
## to fix when beta = 0
if (beta==0){
betaCorr <- NA
zone.ctm.local <- NA
clonality <- NA
clonality.int <- interval(NULL,NULL)
clonality.status <- "not.analysed"
}else{
betaCorr <- beta
zone.ctm.local <- beta / (2 - beta)
G <- adm.global.thisZone
L <- zone.ctm.local
clonality <- setBetween0and1(1- ((G - ( L * G)) / ( L * ( 1 - G))))
zone.ctm.local.min <- zone.ctm.local - local.error
zone.ctm.local.max <- zone.ctm.local + local.error
G.int <- interval(adm.global.thisZone.min,adm.global.thisZone.max)
L.int <- interval(zone.ctm.local.min,zone.ctm.local.max )
clonality.int <- 1 - ((G.int - ( L.int * G.int)) / ( L.int * ( 1 - G.int)))
clonality.int <- interval(setBetween0and1(min(clonality.int)),setBetween0and1(max(clonality.int)))
}
}
if (integerCN == 1){
betaCorr <- beta
zone.ctm.local <- beta / (2 - beta)
clonality <- setBetween0and1((1 - zone.ctm.local) / (1 - adm.global.thisZone))
zone.ctm.local.min <- zone.ctm.local - local.error
zone.ctm.local.max <- zone.ctm.local + local.error
zone.ctm.local.int <- interval(zone.ctm.local.min,zone.ctm.local.max)
adm.global.thisZone.int <- interval(adm.global.thisZone.min,adm.global.thisZone.max)
clonality.int <- ( 1 - zone.ctm.local.int) / (1 - adm.global.thisZone.int)
clonality.int <- interval(setBetween0and1(min(clonality.int)),setBetween0and1(max(clonality.int)))
}
if (integerCN == 2){
betaCorr <- beta
zone.ctm.local <- betaCorr
clonality <- zone.ctm.local
zone.ctm.local.min <- zone.ctm.local - local.error
zone.ctm.local.max <- zone.ctm.local + local.error
clonality.int <- interval(zone.ctm.local.min,zone.ctm.local.max)
clonality.int <- interval(setBetween0and1(min(clonality.int)),setBetween0and1(max(clonality.int)))
}
if (integerCN == 3){
betaCorr <- 1 - (3 * (1 - beta) )
if ( betaCorr > 0){
zone.ctm.local <- ( 3 * betaCorr ) / (( 3 - 2 ) * betaCorr + 2)
clonality <- setBetween0and1((1 - zone.ctm.local) / (1 - adm.global.thisZone))
zone.ctm.local.min <- zone.ctm.local - local.error
zone.ctm.local.max <- zone.ctm.local + local.error
zone.ctm.local.int <- interval(zone.ctm.local.min,zone.ctm.local.max)
adm.global.thisZone.int <- interval(adm.global.thisZone.min,adm.global.thisZone.max)
clonality.int <- ( 1 - zone.ctm.local.int) / (1 - adm.global.thisZone.int)
clonality.int <- interval(setBetween0and1(min(clonality.int)),setBetween0and1(max(clonality.int)))
}else{
betaCorr <- NA
zone.ctm.local <- NA
clonality <- NA
clonality.int <- interval(NULL,NULL)
clonality.status <- "not.analysed"
}
}
if (integerCN == 4){
betaCorr <- beta
zone.ctm.local <- ( 4 * betaCorr ) / ((4-2) * betaCorr +2 )
clonality <- setBetween0and1(zone.ctm.local )
zone.ctm.local.min <- zone.ctm.local - local.error
zone.ctm.local.max <- zone.ctm.local + local.error
clonality.int <- interval(zone.ctm.local.min,zone.ctm.local.max)
clonality.int <- interval(setBetween0and1(min(clonality.int)),setBetween0and1(max(clonality.int)))
}
if (integerCN == 5){
betaCorr <- 1 - (5 * (1 - beta) )
if ( betaCorr > 0){
zone.ctm.local <- ( 5 * betaCorr ) / (( 5 - 2 ) * betaCorr + 2)
clonality <- setBetween0and1((1 - zone.ctm.local) / (1 - adm.global.thisZone))
zone.ctm.local.min <- zone.ctm.local - local.error
zone.ctm.local.max <- zone.ctm.local + local.error
zone.ctm.local.int <- interval(zone.ctm.local.min,zone.ctm.local.max)
adm.global.thisZone.int <- interval(adm.global.thisZone.min,adm.global.thisZone.max)
clonality.int <- ( 1 - zone.ctm.local.int) / (1 - adm.global.thisZone.int)
clonality.int <- interval(setBetween0and1(min(clonality.int)),setBetween0and1(max(clonality.int)))
}else{
betaCorr <- NA
zone.ctm.local <- NA
clonality <- NA
clonality.int <- interval(NULL,NULL)
clonality.status <- "not.analysed"
}
}
if (integerCN > 5){
betaCorr <- NA
zone.ctm.local <- NA
clonality <- NA
clonality.int <- interval(NULL,NULL)
clonality.status <- "not.analysed"
}
if (interval_is_empty(clonality.int)){
clonality.min <- NA
clonality.max <- NA
}else{
clonality <- round(clonality,roundDec)
clonality.min <- round(min(clonality.int),roundDec)
clonality.max <- round(max(clonality.int),roundDec)
if ( clonality.min >= clonalityThreshold ){ clonality.status <- "clonal"}
if ( clonality.max <= clonalityThreshold ){ clonality.status <- "subclonal"}
if ( clonality.min <= clonalityThreshold && clonality.max >= clonalityThreshold && clonality < clonalityThreshold ) { clonality.status <- "uncertain.subclonal" }
if ( clonality.min <= clonalityThreshold && clonality.max >= clonalityThreshold && clonality >= clonalityThreshold ){ clonality.status <- "uncertain.clonal"}
}
data<-data.frame(row.names=1,stringsAsFactors=F)
data$betaCorr <- betaCorr
data$zone.ctm.local <- zone.ctm.local
data$clonality <- clonality
data$clonality.min <- clonality.min
data$clonality.max <- clonality.max
data$clonality.status <- clonality.status
return(data)
}
compute_clonality <- function(betaTable,errorTable,clonalityThreshold,betaThreshold=NULL,roundDec = 3,n_cores=1){
#cat(betaTable$sample[1],"\n")
## backward compatibility
if (is.null(betaThreshold)){betaThreshold<-clonalityThreshold}
betaTable$integerCN <- NA
betaTable$clonality <- NA
betaTable$clonality.min <- NA
betaTable$clonality.max <- NA
betaTable$clonality.status <- "not.analysed"
#i<-1
getIndexClonality <- function(i){
#cat(i,"\n")
zone <- betaTable[i,,drop=F]
zone$adm.global <- round(zone$adm,roundDec)
zone$adm.global.min <- round(zone$adm.min,roundDec)
zone$adm.global.max <- round(zone$adm.max,roundDec)
#for(colN in c(1:ncol(zone)))
#can be analysed
if (is.na(zone$beta) || is.na(zone$log2.corr) || is.na( zone$adm.global.min) || is.na(zone$adm.global.max)){
return(zone)
}
#determine error from nsnps and cov
nsnps <- zone$nsnp
cov <- zone$cov
idxError <- which(errorTable$n.info.snps <= nsnps & errorTable$mean.cov <= cov)
if (length(idxError) == 0 ){idxError <- 1}
local.error <- errorTable$adm.estimation.error[max(idxError)]
#cn given log2
cn <- 2*2^zone$log2.corr
#valid cn are floor(cn) and ceil(cn)
clonCNlow <- getClonalityWithError(integerCN=floor(cn),
beta=zone$beta,
adm.global.thisZone=zone$adm.global,
adm.global.thisZone.min=zone$adm.global.min,
adm.global.thisZone.max=zone$adm.global.max,
clonalityThreshold,
local.error,
roundDec)
clonCNhigh <- getClonalityWithError(integerCN=ceiling(cn),
beta=zone$beta,
adm.global.thisZone=zone$adm.global,
adm.global.thisZone.min=zone$adm.global.min,
adm.global.thisZone.max=zone$adm.global.max,
clonalityThreshold,
local.error,
roundDec)
if (is.null(clonCNlow) | is.null(clonCNhigh) | is.na(clonCNlow$clonality) | is.na(clonCNhigh$clonality) ){
return(zone)
}
if (cn >= 0 & cn < 1){
#if ( ! clonCNhigh$clonality.status %in% c("clonal","uncertain.clonal")){
#if ( betaThreshold
# zone$integerCN <- floor(cn)
# zone$clonality <- clonCNlow$clonality
# zone$clonality.min <- clonCNlow$clonality.min
# zone$clonality.max <- clonCNlow$clonality.max
# zone$clonality.status <- clonCNlow$clonality.status
#}else{
# zone$integerCN <- ceil(cn)
# zone$clonality <- clonCNhigh$clonality
# zone$clonality.min <- clonCNhigh$clonality.min
# zone$clonality.max <- clonCNhigh$clonality.max
# zone$clonality.status <- clonCNhigh$clonality.status
#}
if ( zone$beta >= betaThreshold ){
zone$integerCN <- 0
zone$clonality <- clonCNlow$clonality
zone$clonality.min <- clonCNlow$clonality.min
zone$clonality.max <- clonCNlow$clonality.max
zone$clonality.status <- "clonal"
}else if (cn <= 0.8) {
zone$integerCN <- floor(cn)
zone$clonality <- clonCNlow$clonality
zone$clonality.min <- clonCNlow$clonality.min
zone$clonality.max <- clonCNlow$clonality.max
zone$clonality.status <- clonCNlow$clonality.status
}else{
zone$integerCN <- ceiling(cn)
zone$clonality <- clonCNhigh$clonality
zone$clonality.min <- clonCNhigh$clonality.min
zone$clonality.max <- clonCNhigh$clonality.max
zone$clonality.status <- clonCNhigh$clonality.status
}
}
if (cn >= 1 & cn < 2){
#if ( cn < 1+clonalityThreshold && ! clonCNhigh$clonality.status %in% c("clonal")){
if (zone$beta <= betaThreshold){
zone$integerCN <- floor(cn)
zone$clonality <- clonCNlow$clonality
zone$clonality.min <- clonCNlow$clonality.min
zone$clonality.max <- clonCNlow$clonality.max
zone$clonality.status <- clonCNlow$clonality.status
}else{
zone$integerCN <- ceiling(cn)
zone$clonality <- clonCNhigh$clonality
zone$clonality.min <- clonCNhigh$clonality.min
zone$clonality.max <- clonCNhigh$clonality.max
zone$clonality.status <- "not.analysed"
}
}
if (cn >= 2 & cn <= 3){
#if ( cn > 2 + ( 1 - clonalityThreshold) && ! clonCNlow$clonality.status %in% c("clonal") ){
if (zone$beta <= betaThreshold){
zone$integerCN <- ceiling(cn)
zone$clonality <- clonCNhigh$clonality
zone$clonality.min <- clonCNhigh$clonality.min
zone$clonality.max <- clonCNhigh$clonality.max
zone$clonality.status <- clonCNhigh$clonality.status
}else{
zone$integerCN <- floor(cn)
zone$clonality <- clonCNlow$clonality
zone$clonality.min <- clonCNlow$clonality.min
zone$clonality.max <- clonCNlow$clonality.max
zone$clonality.status <- "not.analysed"
}
}
#if(zone$nsnp < minSNPs || zone$cov < minCov){
# zone$clonality.status <- "not.analysed"
#}
return(zone)
}
#i<-346
#clonalityTable.list <-lapply(c(1:nrow(betaTable)),getIndexClonality)
clonalityTable.list <- mclapply(seq(1,nrow(betaTable),1), getIndexClonality, mc.preschedule = T, mc.cores = n_cores)
clonalityTable <- fromListToDF(clonalityTable.list)
return(clonalityTable)
#hist(clonalityTable$ )
}
#'Function to compute clonality of somatic copy number data
#'
#'This function takes the beta table of a tumor sample together with the
#'associated ploidy and admixtures tables and computes the clonality of each
#'segment in the beta table.
#'
#'@param beta_table data.frame formatted as the output of function
#' \code{\link[CLONETv2:compute_beta_table]{compute_beta_table}}
#'@param ploidy_table data.frame formatted as the output of function
#' \code{\link[CLONETv2:compute_ploidy]{compute_ploidy}}
#'@param admixture_table data.frame formatted as the output of function
#' \code{\link[CLONETv2:compute_dna_admixture]{compute_dna_admixture}}
#'@param error_tb data.frame that reports for each combination of coverage and
#' number informative SNPs the expected estimation error around beta. The
#' data.frame error_tb must contains 3 columns: \describe{ \item{mean.cov}{mean
#' coverage} \item{n.info.snps}{number of informative SNPs}
#' \item{adm.estimation.error}{estimated error on computed beta on a segment
#' with coverage mean.cov and n.info.snps informative SNPs} } Package CLONETv2
#' have built in error_tb named error_table (default=error_table)
#'@param clonality_threshold threshold to discretize continuous clonality value
#' (default=0.85)
#'@param beta_threshold threshold on beta value to determine clonality direction
#' (default=0.90)
#'@param n_digits number of digits in the output table (default=3)
#'@param n_cores number of cores (default=1)
#'@param debug return extra columns for debugging (default=F)
#'@return A data.frame that extends input beta_table with columns \describe{
#' \item{clonality}{estimated fraction of tumor cell with log2 copy number}
#' \item{clonality.min}{minum estimated fraction of tumor cell with log2 copy
#' number} \item{clonality.max}{minum estimated fraction of tumor cell with
#' log2 copy number} \item{clonality.status}{discretized clonality status into
#' five values: \emph{clonal}, large majority of the tumor cells has the same
#' copy number; \emph{subclonal}, not all the tumor cells has the same copy
#' number; \emph{not.analysed}, is is not possible to determine clonality;
#' \emph{uncertain.clonal} and \emph{uncertain.subclonal} correspond
#' respectively to \emph{clonal} and \emph{subclonal} populations but with less
#' reliable clonality estimate } }
#' @examples
#' \donttest{
#'
#' ## Compute clonality table with default parameters
#' scna_clonality_table_toy <- compute_scna_clonality_table(beta_table = bt_toy,
#' ploidy_table = pl_table_toy, admixture_table = adm_table_toy)
#' }
#'@author Davide Prandi
#'@export
#'@md
compute_scna_clonality_table<-function(beta_table,
ploidy_table,
admixture_table,
error_tb = error_table,
clonality_threshold = 0.85,
beta_threshold = 0.90,
n_digits=3,
n_cores=1,
debug=F){
## check consistency of sample names
sample_id <- unique(beta_table$sample)
if (length(sample_id) != 1){stop(paste("[",Sys.time() ,"] beta_table must contain exactly one sample\n",sep=""))}
if (nrow(ploidy_table) !=1 || nrow(admixture_table) !=1 || (pls = ploidy_table$sample) != sample_id || (ads = admixture_table$sample) != sample_id){
stop(paste("[",Sys.time() ,"] ploidy_table and admixture_table must contain only sample ",sample_id,"\n",sep=""))
}
## merge ploidy table, admixture table and beta_table and adjust log2 by ploidy and admixture
beta_table <- merge(beta_table, ploidy_table, by="sample")
beta_table$log2shift <- round(-log2(beta_table$ploidy/2),n_digits)
beta_table$log2.plCorr <- round(beta_table$log2 - beta_table$log2shift, n_digits)
beta_table <- merge(x = beta_table, y = admixture_table, by="sample")
beta_table$log2.corr <- suppressWarnings(log2(pmax(( 2 ^ (beta_table$log2.plCorr ) - admixture_table$adm[1] ) / (1 - admixture_table$adm[1]), 0)))
## compute clonality
clonality_table <- compute_clonality(betaTable = beta_table,
errorTable = error_tb,
clonalityThreshold = clonality_threshold,
betaThreshold = beta_threshold,
roundDec = n_digits,
n_cores = n_cores )
if (!debug){
cols_to_save <- c("sample","chr","start","end","num.mark","log2","beta","nsnp","cov","n_beta","clonality","clonality.min","clonality.max","clonality.status")
clonality_table <- clonality_table[,intersect(colnames(clonality_table),cols_to_save)]
}
return(clonality_table)
}
|
/scratch/gouwar.j/cran-all/cranData/CLONETv2/R/CLONETv2_compute_scna_clonality_table.R
|
## add to vep annotated snvs columns for clonality computations
preprocess_snvtable_vep <- function (snv_table, normal_sample_name, tumor_sample_name) {
filt_snv_table <- snv_table[snv_table$VARIANT_CLASS == 'SNV', ]
with(filt_snv_table, {
get_el <- function (i) function (ee) ee[i]
split_loc <- strsplit(Location, ':')
pos <- as.numeric(sapply(split_loc, get_el(2)))
data.frame(
filt_snv_table,
Chromosome = sapply(split_loc, get_el(1)),
Start_position = pos,
End_position = pos,
Tumor_Sample_Barcode = tumor_sample_name,
Matched_Norm_Sample_Barcode = normal_sample_name,
stringsAsFactors = F,
check.names = F
)
})
}
## merge a table reporting snvs and a beta table
## snvs table needs columns: sample, chr, start, end
## beta table needs columns: sample, chr, start, end
extend_SNVt_with_bt <- function(SNVtable,
betaTable, # five columns table: chr start end HUGO cyto arm
Ncores = 4){
#snvID<-278
findSNVinBT <- function(snvID, SNVtable, betaTable){
#cat(snvID,"\n")
thisBT <- betaTable[which(betaTable$sample == SNVtable$Tumor_Sample_Barcode[snvID]),]
#betaPoses <- getSegmentsPos(SNVtable$chr[snvID], SNVtable$start[snvID], SNVtable$end[snvID], segments = thisBT[,c("chr","start","end")] )
betaPoses <- getSegmentsPos(SNVtable$Chromosome[snvID], SNVtable$Start_position[snvID], SNVtable$End_position[snvID], segments = thisBT[,c("chr","start","end")] )
if (length(betaPoses) > 1){
stop("Error ",snvID,": many segments intersect one position\n")
}
if (length(betaPoses) == 0){
## cat("Warning ",snvID,": no segments\n")
## create an empty bt
emptyBT <- thisBT[1,,drop=F]
# emptyBT$chr <- SNVtable$chr[snvID]
# emptyBT$start <- SNVtable$start[snvID]
# emptyBT$end <- SNVtable$end[snvID]
emptyBT$chr <- SNVtable$Chromosome[snvID]
emptyBT$start <- SNVtable$Start_position[snvID]
emptyBT$end <- SNVtable$End_position[snvID]
emptyBT[,setdiff(colnames(emptyBT),c("sample","chr","start","end","adm","ploidy"))] <- NA
thisBT <- emptyBT
betaPoses <- 1
}
thisGene <- SNVtable[snvID,,drop=F]
thisGene <- cbind(thisGene,thisBT[betaPoses,])
return(thisGene)
}
geneBT.list <- mclapply(seq(1,nrow(SNVtable),1), findSNVinBT, SNVtable = SNVtable, betaTable = betaTable, mc.preschedule = T, mc.cores = Ncores)
#lapply(seq(20,nrow(SNVtable),1), findSNVinBT, SNVtable = SNVtable, betaTable = betaTable)
geneBT <- fromListToDF(geneBT.list)
return(geneBT)
}
## given and snv table extended with allelic specific copy nubmer data compute the clonality of each snvs
## requried columns
#SNVid <-12
#SNVtable.ext <- snv_read_count_ext
computeSNVclonality <- function(SNVid, SNVtable.ext){
## select snvs
## cat(SNVid,"\n")
thisSNV <- SNVtable.ext[SNVid,]
thisSNV$t_cov <- NA
thisSNV$t_af <- NA
thisSNV$n_admReads <- NA
thisSNV$t_ref_count_corr <- NA
thisSNV$t_af_corr <- NA
thisSNV$cn.int <- NA
thisSNV$CN_SNVmut <- NA
thisSNV$VAFexp <- NA
thisSNV$SNV.clonality <- NA
thisSNV$SNV.clonality.int <- NA
if (is.na(thisSNV$cnA) || is.na(thisSNV$rc_alt_tumor)){
return(thisSNV)
}
thisSNV$t_cov <- thisSNV$rc_alt_tumor + thisSNV$rc_ref_tumor
if (thisSNV$t_cov == 0){
return(thisSNV)
}
thisSNV$t_af <- thisSNV$rc_alt_tumor / thisSNV$t_cov
#thisSNV$n_admReads <- round((2 * thisSNV$ASEQ_t_cov * thisSNV$adm) / ( 2 * thisSNV$adm + (thisSNV$cnA + thisSNV$cnB) * (1 - thisSNV$adm)))
thisSNV$n_admReads <- round((2 * thisSNV$t_cov * thisSNV$adm) /
( 2 * thisSNV$adm + (thisSNV$cnA + thisSNV$cnB) * (1 - thisSNV$adm)))
thisSNV$t_ref_count_corr <- thisSNV$rc_ref_tumor - thisSNV$n_admReads
thisSNV$t_af_corr <- thisSNV$rc_alt_tumor / (thisSNV$rc_alt_tumor + thisSNV$t_ref_count_corr)
#thisSNV$ASEQ_t_alt/thisSNV$ASEQ_t_cov
#### compute clonality (% of tumor cell with the mutation)
## firt find the number of allele mutated
thisSNV$cn.int <- thisSNV$cnA.int + thisSNV$cnB.int
if (thisSNV$cn.int <= 0){return(thisSNV)}
possibleAF <- seq(1,thisSNV$cn.int,1)/thisSNV$cn.int
#possibleAF <- c(1,2,3)/3
#CN_M <- seq(1,thisSNV$cn.int,1)[which.min(abs(possibleAF - thisSNV$ASEQ_t_AF_corr))]
#seq(1,3,1)[which.min(abs(possibleAF - thisSNV$ASEQ_t_AF_corr))]
thisSNV$CN_SNVmut <- seq(1,thisSNV$cn.int,1)[which.min(abs(possibleAF - thisSNV$t_af_corr))]
thisSNV$VAFexp <- (thisSNV$CN_SNVmut * (1 - thisSNV$adm)) /
( 2 * thisSNV$adm + (thisSNV$cnA + thisSNV$cnB) * (1 - thisSNV$adm))
thisSNV$SNV.clonality = ((thisSNV$t_af - thisSNV$VAFexp) * ( 2 * thisSNV$adm + (thisSNV$cnA + thisSNV$cnB) * (1 - thisSNV$adm))) / (thisSNV$CN_SNVmut * (1 -thisSNV$adm)) + 1
thisSNV$SNV.clonality.int = ((thisSNV$t_af - thisSNV$VAFexp) * ( 2 * thisSNV$adm + (thisSNV$cnA.int + thisSNV$cnB.int) * (1 - thisSNV$adm))) / (thisSNV$CN_SNVmut * (1 -thisSNV$adm)) + 1
return(thisSNV)
}
#'Function to compute clonality of SNVs
#'
#'This function takes as input the genomic position of a SNVs and computes the
#'percentage of genomic homogeneus cells harboring the mutation.
#'
#'
#'@param sample_id the id of the analyzed sample. It must be the same value
#' reported in column sample of tables beta_table, ploidy_table, and
#' admixture_table
#'@param snv_read_count data.frame reporting in each row the genomic coordinates
#' of an SNV together with number of reference and alternative reads covering
#' the position in columns rc_ref_tumor and rc_alt_tumor, respectively. See
#' parameter annotation_style for details about column names
#'@param beta_table data.frame formatted as the output of function
#' \code{\link[CLONETv2:compute_beta_table]{compute_beta_table}}
#'@param ploidy_table data.frame formatted as the output of function
#' \code{\link[CLONETv2:compute_beta_table]{compute_ploidy}}
#'@param admixture_table data.frame formatted as the output of function
#' \code{\link[CLONETv2:compute_beta_table]{compute_dna_admixture}}
#'@param error_tb data.frame that reports for each combination of coverage and
#' number informative SNPs the expected estimation error around beta. The
#' data.frame error_tb must contains 3 columns: \describe{ \item{mean.cov}{mean
#' coverage} \item{n.info.snps}{number of informative SNPs}
#' \item{adm.estimation.error}{estimated error on computed beta on a segment
#' with coverage mean.cov and n.info.snps informative SNPs} } Package CLONETv2
#' have built in error_tb named error_table (default=error_table)
#'@param error_rate expected fraction of SNV positions with outlier variant
#' allelic fraction (default=0.05)
#'@param n_digits number of digits in the output table (default=3)
#'@param n_cores number of cores (default=1)
#'@param annotation_style a string that corresponds to the format of the columns
#' that describe the genomic coordinates of a SNV. Accepted values are VEP and
#' MAF. [VEP
#' annotation](https://www.ensembl.org/info/docs/tools/vep/index.html)
#' describes genomic coordinates with a single column named Location. [MAF
#' format](https://docs.gdc.cancer.gov/Data/File_Formats/MAF_Format/) has
#' columns Chromosome, Start_position, and End_position for each aberrant
#' position
#'@param debug return extra columns for debugging (default=F)
#'@return A data.frame that extends input table snv_read_count with columns
#' sample, cnA, cnB, t_af, t_af_corr, SNV.clonality, and SNV.clonality.status.
#' Columns cnA and cnB report the allele specific copy number of the genomic
#' segment containing the SNV position. Columns t_af and t_af_corr are
#' respectively raw and ploidy/purity adjusted tumor varian allelic fractions.
#' SNV.clonality reports the percentage of tumor cells harboring the SNV and
#' with allele specific copy number cnA and cnB. SNV.clonality.status column
#' lists dicretized SNV.clonality values. Discrete states are clonal,
#' uncertain.clonal, uncertain.subclonal, and subclonal based in threshold
#' automatically computed on the SNV.clonality values. Empty
#' SNV.clonality.status of an SNV indicates that clonality cannot be assessed.
#' @examples
#'
#' ## Compute SNVs clonality
#' snv_clonality_table_toy <- compute_snv_clonality("toy_sample",
#' snv_reads_toy, bt_toy, pl_table_toy, adm_table_toy)
#'
#'@author Davide Prandi, Tarcisio Fedrizzi
#'@export
#'@md
compute_snv_clonality<-function(sample_id,
snv_read_count,
beta_table,
ploidy_table,
admixture_table,
error_tb = error_table,
error_rate=0.05,
n_digits=3,
n_cores=1,
annotation_style = "VEP",
debug=F
){
## check consistency of sample names
bt_id <- unique(beta_table$sample)
snv_cols <- colnames(snv_read_count)
if (length(sample_id) != 1){stop(paste("[",Sys.time() ,"] beta_table must contain exactly one sample\n",sep=""))}
if (nrow(ploidy_table) !=1 || nrow(admixture_table) !=1 || (pls = ploidy_table$sample) != bt_id || (ads = admixture_table$sample) != bt_id){
stop(paste("[",Sys.time() ,"] ploidy_table and admixture_table must contain only sample ",sample_id,"\n",sep=""))
}
if(sample_id != bt_id){
stop(paste("[",Sys.time() ,"] sample_id and sample in beta_table do not match",sample_id,"\n",sep=""))
}
beta_table <- compute_allele_specific_scna_table(beta_table, ploidy_table, admixture_table, error_tb = error_tb, n_digits = n_digits, n_cores = n_cores, debug = T)
if (annotation_style == "VEP"){
snv_read_count <- preprocess_snvtable_vep(snv_table = snv_read_count, normal_sample_name = paste0(sample_id,"_normal"), tumor_sample_name = sample_id)
}else if (annotation_style == "MAF"){
}else{
stop(paste("[",Sys.time() ,"] only VEP or MAF annotations are supported",sample_id,"\n",sep=""))
}
###################################
## compute clonality
snv_read_count_ext <- extend_SNVt_with_bt(SNVtable = snv_read_count, betaTable = beta_table, Ncores = n_cores)
SNVdata.Cl.list <- mclapply(seq(1,nrow(snv_read_count_ext),1), computeSNVclonality, snv_read_count_ext, mc.preschedule = T, mc.cores = n_cores)
SNVdata.Cl <- fromListToDF(SNVdata.Cl.list)
## correct clonality > 1
limitVal <- stats::quantile(SNVdata.Cl$SNV.clonality,na.rm = T,probs = 1-error_rate)
SNVdata.Cl$SNV.clonality[which(SNVdata.Cl$SNV.clonality >= limitVal)] <- NA
SNVdata.Cl$SNV.clonality[which(SNVdata.Cl$SNV.clonality > 1)] <- 2-SNVdata.Cl$SNV.clonality[which(SNVdata.Cl$SNV.clonality > 1)]
## discretize calls
## discretize clonality
SNVdata.Cl$SNV.clonality.status <- ""
if (nrow(SNVdata.Cl) > 4){
clonality_intervals <- suppressWarnings(discretize(x = SNVdata.Cl$SNV.clonality, method = "cluster", categories = 4, onlycuts = T, nstart=20, iter.max=500))
SNVdata.Cl$SNV.clonality.status[which(SNVdata.Cl$SNV.clonality >= clonality_intervals[1] & SNVdata.Cl$SNV.clonality < clonality_intervals[2] )] <- "subclonal"
SNVdata.Cl$SNV.clonality.status[which(SNVdata.Cl$SNV.clonality >= clonality_intervals[2] & SNVdata.Cl$SNV.clonality < clonality_intervals[3] )] <- "uncertain.subclonal"
SNVdata.Cl$SNV.clonality.status[which(SNVdata.Cl$SNV.clonality >= clonality_intervals[3] & SNVdata.Cl$SNV.clonality < clonality_intervals[4] )] <- "uncertain.clonal"
SNVdata.Cl$SNV.clonality.status[which(SNVdata.Cl$SNV.clonality > clonality_intervals[4] )] <- "clonal"
}
if (!debug){
save_cols <- c("sample","cnA", "cnB", "t_af", "t_af_corr", "SNV.clonality", "SNV.clonality.status")
SNVdata.Cl <- SNVdata.Cl[,c(snv_cols,save_cols)]
}
return(SNVdata.Cl)
}
|
/scratch/gouwar.j/cran-all/cranData/CLONETv2/R/CLONETv2_compute_snv_clonality.R
|
#' Beta estimation error.
#'
#' A precomputed table reporting for different combinations of coverage and
#' number of informative SNPs the expected error of the beta value computed by
#' function \code{\link[CLONETv2:compute_beta_table]{compute_beta_table}}.
#'
#' @format A data frame column names mean.cov, n.info.snps, and
#' adm.estimation.error \describe{ \item{mean.cov}{genomic segment coverage}
#' \item{n.info.snps}{number of informative SNPs}
#' \item{adm.estimation.error}{expected error on beta estimate } }
"error_table"
#' Toy example of segmetd data.
#'
"seg_tb_toy"
#' Toy example of tumor pileup data.
#'
"pileup_tumor_toy"
#' Toy example of normal pileup data.
#'
"pileup_normal_toy"
#' Toy example of snv data.
#'
"snv_reads_toy"
#' Toy example of beta table.
#'
"bt_toy"
#' Toy example of ploidy table.
#'
"pl_table_toy"
#' Toy example of admixture table.
#'
"adm_table_toy"
#' Toy example of clonality table of somatic copy number.
#'
"scna_clonality_table_toy"
#' Toy example of allele specific table of somatic copy number.
#'
"allele_specific_cna_table_toy"
#' Toy example of snv clonality table.
#'
"snv_clonality_table_toy"
|
/scratch/gouwar.j/cran-all/cranData/CLONETv2/R/CLONETv2_data.R
|
#' Calculate bias validation interval
#'
#' @param TV True value
#' @param m factor
#' @param se_c SE Combined
#'
#' @return named list with the interval
bias_validation_interval <- function(TV, m, se_c){
return(list(lower_limit = TV - m*se_c, higher_limit = TV + m*se_c))
}
#' Calculate bias interval from TV
#'
#' @param scenario Choosed scenario from section 3.3 of EP15-A3
#' @param nrun Number of runs
#' @param nrep number of repetitions per run (n0)
#' @param SWL S within laboratory (obtained from anova)
#' @param SR S repetability (obtained from anova)
#' @param nsamples total number of samples tested usual 1
#' @param expected_mean Expected mean or TV
#' @param user_mean Mean of all samples (obtained from anova)
#' @param ... additional parameters necessary for processing the choosed scenario
#'
#' @return a named list with the defined mean, the interval significance (user mean should be in for approval), and total bias (user mean - TV)
#' @export
#'
#' @examples calculate_bias_interval(scenario = 'E',
#'nrun = 7,
#'nrep = 5,
#'SWL = .042,
#'SR = .032,
#'nsamples = 2,
#'expected_mean = 1,
#'user_mean = .94
#')
calculate_bias_interval <- function(scenario, nrun, nrep, SWL, SR, nsamples, expected_mean, user_mean, ...){
if (!missing(...)) {
additional_args <- list(...)
}else{
additional_args <- list()
}
nlab<-NA
for(i in names(additional_args)){
assign(i, additional_args[[i]])
}
se_x <- calculate_se_x(nrun, nrep, SWL, SR)
se_rm <- calculate_se_rm(scenario, additional_args)
se_c <- calculate_se_c(se_x, se_rm)
df_x <- nrun-1
df_c <- calculate_df_combined(scenario, se_x=se_x, se_rm=se_rm, se_c=se_c, df_x=df_x, nlab=nlab, nrun=nrun)
m <- calculate_m(df_c, nsamples = nsamples)
interval <- bias_validation_interval(expected_mean, m, se_c)
is_significant <- ifelse(user_mean < interval$lower_limit | user_mean > interval$higher_limit, TRUE, FALSE)
bias = user_mean - expected_mean
return(
list(mean = user_mean, TV = expected_mean, interval = interval, signif = is_significant, bias = bias)
)
}
|
/scratch/gouwar.j/cran-all/cranData/CLSIEP15/R/bias_calculate_interval.R
|
utils::globalVariables(c("dfc_references", "labs", "runs"))
#' Calculate degrees of freedom of SE C (SE combined) given a selected scenario and additional parameters necessary for the scenario
#' @importFrom dplyr filter
#'
#' @param scenario Scenario (A, B, C, D, E)
#' @param ... additional parameters necessary for the scenario
#'
#' @return DF
calculate_df_combined <- function(scenario, ...){
if (!missing(...)) {
additional_args <- list(...)
}else{
additional_args <- list()
}
if(scenario == 'E'){
if(is.null(additional_args[['df_x']])){
stop("Can't calc df_combined for scenario E: df_x is necessary")
}
return(additional_args[['df_x']])
}else if(scenario %in% c('A', 'D')){
if(
is.null(additional_args[['df_x']]) |
is.null(additional_args[['se_c']]) |
is.null(additional_args[['se_x']])
){
stop("Can't calc df_combined for scenario: df_x, se_c, se_x are necessary")
}
df_x <- additional_args[['df_x']]
se_c <- additional_args[['se_c']]
se_x <- additional_args[['se_x']]
return(df_x*((se_c/se_x)^4))
}else if(scenario %in% c('B', 'C')){
if(
is.null(additional_args[['se_rm']]) |
is.null(additional_args[['se_x']]) |
is.null(additional_args[['nlab']]) |
is.null(additional_args[['nrun']])
){
stop("Can't calc df_combined for scenario: se_rm, se_x, nrun, nlab are necessary")
}
references <- dfc_references
tau = additional_args[['se_rm']]/additional_args[['se_x']]
nlab <- additional_args[['nlab']]
nrun <- additional_args[['nrun']]
tau <- ifelse(tau == Inf, NA, tau)
lab_aprox <- unique(references$labs)[which.min(abs(unique(references$labs)-nlab))]
filtered_reference <-
references |>
dplyr::filter(lab_aprox == `labs` & nrun == `runs`)
return(unique(filtered_reference$df)[which.min(abs(unique(filtered_reference$tau)-tau))] )
}else{
stop('Allowed scenarios must be A, B, C, D or E')
}
}
|
/scratch/gouwar.j/cran-all/cranData/CLSIEP15/R/bias_df_combined.R
|
#' Calculate M
#' @importFrom stats qt
#'
#' @param df degrees of freedom
#' @param conf.level confidence interval
#' @param nsamples number of samples
#'
#' @return m factor
calculate_m <- function(df, conf.level=95, nsamples = 1){
a = 1-(((100-conf.level)/100)/(2*nsamples))
return(round(qt(a, df), 2))
}
|
/scratch/gouwar.j/cran-all/cranData/CLSIEP15/R/bias_m.R
|
#' Calculate SE combined based on SE X and SE RM
#'
#' @param se_x SE X
#' @param se_rm SE RM
#'
#' @return SE C
calculate_se_c <- function(se_x, se_rm){
return(sqrt(se_x^2 + se_rm^2))
}
|
/scratch/gouwar.j/cran-all/cranData/CLSIEP15/R/bias_se_c.R
|
#' Calculate SE RM for scenario A when “standard error” or “standard uncertainty” (abbreviated by lowercase “u”) or “combined standard uncertainty” (often denoted by “uC ”)
#'
#' @param u “standard error” or “standard uncertainty” (abbreviated by lowercase “u”) or “combined standard uncertainty” (often denoted by “uC ”)
#'
#' @return SE RM
calculate_se_rm_a_u <- function(u){
return(u)
}
#' Calculate SE RM for scenario A when f the manufacturer supplies an “expanded uncertainty” (abbreviated by uppercase “U”) for the TV and the “coverage factor” (abbreviated by “k”)
#'
#' @param U expanded uncertainty
#' @param k coverage factor
#'
#' @return SE RM
calculate_se_rm_a_Uk <- function(U, k){
return(U/k)
}
#' Calculate SE RM for scenario A when f the manufacturer supplies an “expanded uncertainty” (abbreviated by uppercase “U”) for the TV and coverage e.g. 95 or 99,
#' @importFrom stats qnorm
#'
#' @param U expanded uncertainty
#' @param coverage coverage
#'
#' @return SE RM
calculate_se_rm_a_Ucoverage <- function(U, coverage){
p_dist <- (100 - (100-coverage)/2)/100
q_dist <- round(qnorm(p_dist), 2)
return(U/q_dist)
}
#' Calculate SE RM for scenario A when f the manufacturer supplies lower and upper limits and coverage confidence interval (95 or 99...)
#' @importFrom stats qnorm
#'
#' @param upper upper limit
#' @param lower lower limit
#' @param coverage coverage
#'
#' @return SE RM
calculate_se_rm_a_lowerupper <- function(upper, lower, coverage){
p_dist <- (100 - (100-coverage)/2)/100
q_dist <- round(qnorm(p_dist), 2)
return((upper-lower)/(2*q_dist))
}
#' Calculate SE RM for scenario B or C If the reference material has a TV determined by PT or peer group results
#'
#' @param sd_rm SD RM
#' @param nlab number of lab or peer group results
#'
#' @return SE RM
calculate_se_rm_scenario_b_c <- function(sd_rm, nlab){
returnValue(sd_rm/sqrt(nlab))
}
#' Calculate SE RM for scenario D or E If the TV represents a conventional quantity value or When working with a commercial QC material supplied with a TV for which the standard error cannot be estimated
#'
#' @return SE RM
calculate_se_rm_scenario_d_e <- function(){
return(0)
}
#' Calculate SE RM given a scenario and a list of additional args that can change based on the selected scenario or sub scenario
#'
#' @param scenario scenario (A, B, C, D, E)
#' @param additional_args additional arguments list
#'
#' @return SE RM
calculate_se_rm <- function(scenario, additional_args){
if(scenario == 'A'){
if(is.null(additional_args[['subscenario']]) || !additional_args[['subscenario']] %in% c('u', 'Uk', 'Ucoverage', 'lowerupper')){
stop("One of the following subscenarios should be supplied:
'u', 'Uk', 'Ucoverage', 'lowerupper'")
}
if(additional_args[['subscenario']] == 'u'){
if(is.null(additional_args[['u']])){
stop('For the choosed scenario u must be supplied')
}
return(calculate_se_rm_a_u(additional_args[['u']]))
}else if(additional_args[['subscenario']] == 'Uk'){
if(
is.null(additional_args[['U']]) |
is.null(additional_args[['k']])
){
stop('For the choosed scenario U and k must be supplied')
}
return(calculate_se_rm_a_Uk(additional_args[['U']], additional_args[['k']]))
}else if(additional_args[['subscenario']] == 'Ucoverage'){
if(
is.null(additional_args[['U']]) |
is.null(additional_args[['coverage']])
){
stop('For the choosed scenario U and coverage must be supplied')
}
return(calculate_se_rm_a_Ucoverage(additional_args[['U']], additional_args[['coverage']]))
}else if(additional_args[['subscenario']] == 'lowerupper'){
if(
is.null(additional_args[['lower']]) |
is.null(additional_args[['upper']]) |
is.null(additional_args[['coverage']])
){
stop('For the choosed scenario lower, upper and coverage must be supplied')
}
return(calculate_se_rm_a_lowerupper(additional_args[['lower']], additional_args[['upper']], additional_args[['coverage']]))
}
}else if(scenario %in% c('B', 'C')){
if(
is.null(additional_args[['sd_rm']]) |
is.null(additional_args[['nlab']])
){
stop('For the choosed scenario sd_rm and nlab must be supplied')
}
return(calculate_se_rm_scenario_b_c(additional_args[['sd_rm']], additional_args[['nlab']]))
}else if(scenario %in% c('D', 'E')){
return(calculate_se_rm_scenario_d_e())
}else{
stop("Scenario must be one of the following: 'A', 'B', 'C', 'D' or 'E'")
}
}
|
/scratch/gouwar.j/cran-all/cranData/CLSIEP15/R/bias_se_rm.R
|
#' Calculate SE x
#'
#' @param nrun Run number
#' @param nrep Number of repetitions per run n0
#' @param SWL SWL from aov table
#' @param SR SR from aov table
#'
#' @return SE X
calculate_se_x <- function(nrun, nrep, SWL, SR){
f <- 1/nrun
s <- SWL^2 - ((nrep-1)/nrep) * (SR^2)
se_mean <- sqrt(f*s)
return(round(se_mean, 2))
}
|
/scratch/gouwar.j/cran-all/cranData/CLSIEP15/R/bias_se_x.R
|
utils::globalVariables(c("value"))
#' Create table for precision calculations
#' @importFrom dplyr filter
#' @importFrom tidyr pivot_longer
#' @importFrom utils data
#'
#' @param data a long or a wider data.frame with the same structure of CLSIEP15::ferritin_long or CLSIEP15::ferritin_wider
#' @param data_type c('wider', 'long')
#'
#' @return a data.frame with renamed columns and structure adjustments
#' @export
#'
#' @examples data <- create_table_ep_15(ferritin_long, data_type = "longer")
create_table_ep_15 <- function(data, data_type = 'wider'){
if(data_type == 'wider'){
colnames(data) <- c('rep', paste0('Run_', 1:(ncol(data)-1)))
pivoted <-
data |>
pivot_longer(cols = -rep) |>
dplyr::filter(!is.na(`value`))
return(pivoted)
}else if(data_type == 'long'){
colnames(data) <- c('rep', 'name', 'value')
return(data)
}
}
|
/scratch/gouwar.j/cran-all/cranData/CLSIEP15/R/create_table_ep_15.R
|
#' Ferrtin data used in CLSI document examples in wide format
#'
#'
#' @format `ferritin_wider`
#' A data frame with 5 rows and 6 columns:
#' \describe{
#' \item{rep}{Repetition of sample}
#' \item{Run_1, Run_2, Run_3, Run_4, Run_5}{Runs from 5 distinct days}
#' ...
#' }
#' @source CLSI EP15-A3
"ferritin_wider"
#' Ferrtin data used in CLSI document examples in wide format
#'
#'
#' @format `ferritin_long`
#' A data frame with 25 rows and 3 columns:
#' \describe{
#' \item{rep}{Repetition of sample}
#' \item{name}{Run of the Runs obtained from 5 distinct days}
#' \item{value}{result of the observation}
#' ...
#' }
#' @source CLSI EP15-A3
"ferritin_long"
#' Reference of degrees of freedon based on tau given in the CLSI Manual
#'
#'
#' @format `dfc_references`
#' A data frame with 390 rows and 4 columns:
#' \describe{
#' \item{tau}{tau}
#' \item{df}{degrees of freedon}
#' \item{labs}{number of labs or peers}
#' \item{runs}{number of runs}
#' ...
#' }
#' @source CLSI EP15-A3
"dfc_references"
|
/scratch/gouwar.j/cran-all/cranData/CLSIEP15/R/data.R
|
#' Calculate n0
#'
#' @importFrom dplyr group_by count
#'
#' @param long_result_table table generated by create_table_ep_15 function
#'
#' @return The n0 number which refers to Number of Results per Run
calculate_n0 <- function(long_result_table){
n0_table <-
long_result_table |>
group_by(rep) |>
count()
N <- sum(n0_table$n)
SN2 <- sum(n0_table$n ^ 2)
return((N - (SN2/N)) / (length(n0_table$rep) - 1))
}
#' Calculate ANOVA Results and Imprecision Estimates
#' @importFrom stats aov sd
#'
#' @param ep_15_table table generated from create_table_ep_15()
#'
#' @return Named list with ANOVA Results and Imprecision Estimates
#' @export
#'
#' @examples calculate_aov_infos(create_table_ep_15(CLSIEP15::ferritin_long, data_type = 'long'))
calculate_aov_infos <- function(ep_15_table){
return_object <- list()
aov_results <- summary(aov(value ~ name, data = ep_15_table))
aov_table <- data.frame(
source_of_variation = c('between_run', 'within_run'),
ss = aov_results[[1]]$`Sum Sq`,
df = aov_results[[1]]$Df,
ms = aov_results[[1]]$`Mean Sq`
)
return_object[['N']] <- length(ep_15_table$value)
return_object[['k']] <- length(unique(ep_15_table$rep))
return_object[['mean']] <- mean(ep_15_table$value)
return_object[['sd']] <- sd(ep_15_table$value)
return_object[['aov_table']] <- aov_table
vw = aov_table$ms[2]
n0 <- calculate_n0(ep_15_table)
return_object[['n0']] = n0
vb <- abs((aov_table$ms[1] - aov_table$ms[2])/n0)
return_object[['Vbetween']] = vb
return_object[['Vwithin']] = vw
sR <- sqrt(vw)
sB <- sqrt(vb)
sWL <- sqrt(vw + vb)
return_object[['SR']] = sR
return_object[['SWL']] = sWL
cvR <- sR/mean(ep_15_table$value)*100
cvB <- sB/mean(ep_15_table$value)*100
cvWL <- sWL/mean(ep_15_table$value)*100
return_object[['CVR']] = cvR
return_object[['CVWL']] = cvWL
return(return_object)
}
|
/scratch/gouwar.j/cran-all/cranData/CLSIEP15/R/precision_calculate_aov_info.R
|
#' Calculate degres of freedom within-lab as specified in appendix B
#'
#' @param cvr_manufacture CV repeatability informed by the manufacturer
#' @param cvwl_manufacture CV within-lab informed by the manufacturer
#' @param k the number of runs
#' @param n0 the “average” number of results per run
#' @param N the total number of replicates
#'
#' @return dfwl
calculate_dfWL <- function(cvr_manufacture, cvwl_manufacture, k, n0, N){
p <- cvwl_manufacture/cvr_manufacture
cv_wl_a <- p
vw <- (1/100)^2
vwl <- (cv_wl_a/100)^2
vb <- vwl - vw
MS1 = vw + n0 * vb
MS2 = vw
DF1 = k-1
DF2 = N-k
a1 = 1/n0
a2 = (n0-1)/n0
num = (a1*MS1 + a2*MS2)^2
den1 = ((a1*MS1)^2)/DF1
den2 = ((a2*MS2)^2)/DF2
return(round(num/(den1 + den2)))
}
#' Calculate the UVL factor
#'
#' @importFrom stats qchisq
#'
#' @param nsamp n samples in the study
#' @param df degres of freedom
#' @param alpha confidence level
#'
#' @return Uvl factor
#'
calculate_F_uvl <- function(nsamp = 1, df, alpha = 0.05){
X2 <- qchisq(1-alpha/nsamp, df)
F_uvl <- sqrt(X2/df)
return(F_uvl)
}
#' Calculate upper verification limit
#'
#' Generic function for calculating UVL the return is a named list and cv_uvl_r and cv_uvl_wl depends on what is the input (S or CV) if the input is SR and SWL the returns is S
#'
#' @param aov_return Return of calculate_aov_info()
#' @param nsamp number of samples in the experiment
#' @param cvr_or_sr Desirable CV or S repetability
#' @param cvwl_or_swl Desirable CV or S within-lab
#'
#' @return Named list with UVL params
#' @export
#'
#' @examples data <- create_table_ep_15(ferritin_wider)
#' aov_t <- calculate_aov_infos(data)
#' calculate_uvl_info(aov_t, nsamp = 5, cvr_or_sr = .43, cvwl_or_swl = .7)
calculate_uvl_info <- function(aov_return, nsamp = 1, cvr_or_sr, cvwl_or_swl){
N <- aov_return$N
k <- aov_return$k
n0 <-aov_return$n0
dfR <- N - k
dfWL <- calculate_dfWL(cvr_manufacture = cvr_or_sr, cvwl_manufacture = cvwl_or_swl, k = k, n0 = n0, N = N)
f_r <- calculate_F_uvl(df = dfR, nsamp)
f_wl <- calculate_F_uvl(df = dfWL, nsamp)
cv_uvl_r <- f_r*cvr_or_sr
cv_uvl_wl <- f_wl*cvwl_or_swl
return_object <- list()
return_object[['dfR']] = dfR
return_object[['dfWL']] = dfWL
return_object[['f_r']] = f_r
return_object[['f_wl']] = f_wl
return_object[['cv_uvl_r']] = cv_uvl_r
return_object[['cv_uvl_wl']] = cv_uvl_wl
return(return_object)
}
|
/scratch/gouwar.j/cran-all/cranData/CLSIEP15/R/precision_calculate_uvl_info.R
|
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----eval=FALSE, include=FALSE------------------------------------------------
# devtools::install_github('clauciorank/CLSIEP15')
## ----setup--------------------------------------------------------------------
library(CLSIEP15)
## ----echo=FALSE---------------------------------------------------------------
knitr::kable(ferritin_wider)
## ----echo=FALSE---------------------------------------------------------------
knitr::kable(head(ferritin_long, 12))
## -----------------------------------------------------------------------------
data <- create_table_ep_15(ferritin_long, data_type = 'long')
## -----------------------------------------------------------------------------
data <- create_table_ep_15(ferritin_wider)
## -----------------------------------------------------------------------------
aov_t <- calculate_aov_infos(data)
aov_t
## -----------------------------------------------------------------------------
uvl_info <- calculate_uvl_info(aov_return = aov_t, cvr_or_sr = .43, cvwl_or_swl = .7)
uvl_info
## ----eval=FALSE---------------------------------------------------------------
# calculate_bias_interval(
# scenario,
# nrun,
# nrep,
# SWL,
# SR,
# nsamples,
# expected_mean,
# user_mean,
# ...
# )
## ----eval=FALSE---------------------------------------------------------------
# calculate_bias_interval('A',
# subscenario = 'Uk',
# nrun = 7,
# nrep = 5,
# SWL = .042,
# SR = .032,
# nsamples = 2,
# exppected_mean = 1,
# user_mean = .94
# )
## -----------------------------------------------------------------------------
calculate_bias_interval('A',
subscenario = 'Uk',
nrun = 7,
nrep = 5,
SWL = .042,
SR = .032,
nsamples = 2,
expected_mean = 1,
user_mean = .94,
U = 140,
k = 1.96
)
## -----------------------------------------------------------------------------
calculate_bias_interval('C', nrun = 7,
nrep = 5,
SWL = .042,
SR = .032,
nsamples = 2,
expected_mean = 1,
user_mean = .94,
sd_rm = .05,
nlab = 43)
## -----------------------------------------------------------------------------
calculate_bias_interval('E',
nrun = 7,
nrep = 5,
SWL = .042,
SR = .032,
nsamples = 2,
expected_mean = 1,
user_mean = .94
)
|
/scratch/gouwar.j/cran-all/cranData/CLSIEP15/inst/doc/CLSIEP15.R
|
---
title: "CLSIEP15"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{CLSIEP15}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
**This package aims on Clinical and Laboratory Standards Institute (CLSI) EP15-A3 Calculations**
CLSI EP15-A3 provides guidance on the user verification of precision and the estimation of bias for laboratory test methods. It outlines the steps and procedures that clinical laboratories should follow to evaluate the performance of a test method they intend to implement.
This package is a R implementation of the calculations used in the document
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
# Install
```{r eval=FALSE, include=FALSE}
devtools::install_github('clauciorank/CLSIEP15')
```
# Load
```{r setup}
library(CLSIEP15)
```
# Usage
**Create a table in the specified format**
**Wider Format**
```{r echo=FALSE}
knitr::kable(ferritin_wider)
```
**Long Format**
```{r echo=FALSE}
knitr::kable(head(ferritin_long, 12))
```
`ferritin_long` and `ferritin_wider` are provided as data in the package and can be used as example
**For Long Format**
```{r}
data <- create_table_ep_15(ferritin_long, data_type = 'long')
```
**For Wide Format**
```{r}
data <- create_table_ep_15(ferritin_wider)
```
# Precision
***Calculate Anova parameters and Imprecision Estimates***
```{r}
aov_t <- calculate_aov_infos(data)
aov_t
```
If user repetibility(SR or CVR) < repetibility claim and Within-lab(SWL or CVWL) < Within-lab claim the
user has verified manufacture's precision claims if not the upper verification limit (UVL) should be checked
```{r}
uvl_info <- calculate_uvl_info(aov_return = aov_t, cvr_or_sr = .43, cvwl_or_swl = .7)
uvl_info
```
Where arguments are the follow:
- aov_return: Return of calculate_aov_info()
- nsamp: number of samples in the experiment. Default is 1
- cvr_or_sr: Desirable CV or S repetability
- cvwl_or_swl: Desirable CV or S within-lab
Recheck If user repetability(SR or CVR) < UVL repetability claim and Within-lab(SWL or CVWL) < UVL Within-lab claim
# Bias
For calculating a range for acceptable bias different scenarios and subscenarios are provided by the document
`calculate_bias_interval` is the function used:
```{r eval=FALSE}
calculate_bias_interval(
scenario,
nrun,
nrep,
SWL,
SR,
nsamples,
expected_mean,
user_mean,
...
)
```
These are the mandatory parameters:
- scenario: Choosed scenario from section 3.3 of EP15-A3
- nrun: Number of runs
- nrep: number of repetitions per run (n0)
- SWL: S within laboratory (obtained from anova)
- SR: S repetability (obtained from anova)
- nsamples: total number of samples tested usual 1
- expected_mean: Expected mean or TV
- user_mean: Mean of all samples (obtained from anova)
- ... : additional parameters necessary for processing the choosed scenario
**Scenario A**
Bona fide reference materials, can vary depending on the information provided by the manufacturer.
- Sub scenario "u":
- manufacturer supplies a "standard error," "standard uncertainty" (u), or "combined standard uncertainty" (often denoted as uC ) for the TV
- Sub scenario "Uk":
- manufacturer provides an "expanded uncertainty" (U) for the TV and a "coverage factor" (k)
- Sub scenario "Ucoverage":
- manufacturer provides an "expanded uncertainty" (U) for the TV and a "coverage percentage"
- Sub scenario "lowerupper":
- manufacturer provides an lower and upper limits and a "coverage percentage" (CI)
**Example**
```{r eval=FALSE}
calculate_bias_interval('A',
subscenario = 'Uk',
nrun = 7,
nrep = 5,
SWL = .042,
SR = .032,
nsamples = 2,
exppected_mean = 1,
user_mean = .94
)
```
Will return
`Error in calculate_se_rm(scenario, additional_args) : For the choosed scenario U and k must be supplied`
So we need to pass the requested parameters:
```{r}
calculate_bias_interval('A',
subscenario = 'Uk',
nrun = 7,
nrep = 5,
SWL = .042,
SR = .032,
nsamples = 2,
expected_mean = 1,
user_mean = .94,
U = 140,
k = 1.96
)
```
**Scenario B and C**
When a reference material's total uncertainty (TV) is determined based on Proficiency Testing (PT) (B) or peer group results from an interlaboratory QC program (C)
Additional parameters necessary are sd_rm and nlab
**Example**
```{r}
calculate_bias_interval('C', nrun = 7,
nrep = 5,
SWL = .042,
SR = .032,
nsamples = 2,
expected_mean = 1,
user_mean = .94,
sd_rm = .05,
nlab = 43)
```
**Scenario D and E**
If the TV represents a conventional quantity value (D) or When working with a commercial QC material supplied
with a TV for which the standard error cannot be estimated (E)
```{r}
calculate_bias_interval('E',
nrun = 7,
nrep = 5,
SWL = .042,
SR = .032,
nsamples = 2,
expected_mean = 1,
user_mean = .94
)
```
**Bias conclusion**
If the mean is inside `interval` object returned in `calculate_bias_interval()` the result is not significant and the observed bias is inside the manufacture claims
-------------------------------------------------------
Package repository: https://github.com/clauciorank/CLSIEP15
|
/scratch/gouwar.j/cran-all/cranData/CLSIEP15/inst/doc/CLSIEP15.Rmd
|
---
title: "CLSIEP15"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{CLSIEP15}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
**This package aims on Clinical and Laboratory Standards Institute (CLSI) EP15-A3 Calculations**
CLSI EP15-A3 provides guidance on the user verification of precision and the estimation of bias for laboratory test methods. It outlines the steps and procedures that clinical laboratories should follow to evaluate the performance of a test method they intend to implement.
This package is a R implementation of the calculations used in the document
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
# Install
```{r eval=FALSE, include=FALSE}
devtools::install_github('clauciorank/CLSIEP15')
```
# Load
```{r setup}
library(CLSIEP15)
```
# Usage
**Create a table in the specified format**
**Wider Format**
```{r echo=FALSE}
knitr::kable(ferritin_wider)
```
**Long Format**
```{r echo=FALSE}
knitr::kable(head(ferritin_long, 12))
```
`ferritin_long` and `ferritin_wider` are provided as data in the package and can be used as example
**For Long Format**
```{r}
data <- create_table_ep_15(ferritin_long, data_type = 'long')
```
**For Wide Format**
```{r}
data <- create_table_ep_15(ferritin_wider)
```
# Precision
***Calculate Anova parameters and Imprecision Estimates***
```{r}
aov_t <- calculate_aov_infos(data)
aov_t
```
If user repetibility(SR or CVR) < repetibility claim and Within-lab(SWL or CVWL) < Within-lab claim the
user has verified manufacture's precision claims if not the upper verification limit (UVL) should be checked
```{r}
uvl_info <- calculate_uvl_info(aov_return = aov_t, cvr_or_sr = .43, cvwl_or_swl = .7)
uvl_info
```
Where arguments are the follow:
- aov_return: Return of calculate_aov_info()
- nsamp: number of samples in the experiment. Default is 1
- cvr_or_sr: Desirable CV or S repetability
- cvwl_or_swl: Desirable CV or S within-lab
Recheck If user repetability(SR or CVR) < UVL repetability claim and Within-lab(SWL or CVWL) < UVL Within-lab claim
# Bias
For calculating a range for acceptable bias different scenarios and subscenarios are provided by the document
`calculate_bias_interval` is the function used:
```{r eval=FALSE}
calculate_bias_interval(
scenario,
nrun,
nrep,
SWL,
SR,
nsamples,
expected_mean,
user_mean,
...
)
```
These are the mandatory parameters:
- scenario: Choosed scenario from section 3.3 of EP15-A3
- nrun: Number of runs
- nrep: number of repetitions per run (n0)
- SWL: S within laboratory (obtained from anova)
- SR: S repetability (obtained from anova)
- nsamples: total number of samples tested usual 1
- expected_mean: Expected mean or TV
- user_mean: Mean of all samples (obtained from anova)
- ... : additional parameters necessary for processing the choosed scenario
**Scenario A**
Bona fide reference materials, can vary depending on the information provided by the manufacturer.
- Sub scenario "u":
- manufacturer supplies a "standard error," "standard uncertainty" (u), or "combined standard uncertainty" (often denoted as uC ) for the TV
- Sub scenario "Uk":
- manufacturer provides an "expanded uncertainty" (U) for the TV and a "coverage factor" (k)
- Sub scenario "Ucoverage":
- manufacturer provides an "expanded uncertainty" (U) for the TV and a "coverage percentage"
- Sub scenario "lowerupper":
- manufacturer provides an lower and upper limits and a "coverage percentage" (CI)
**Example**
```{r eval=FALSE}
calculate_bias_interval('A',
subscenario = 'Uk',
nrun = 7,
nrep = 5,
SWL = .042,
SR = .032,
nsamples = 2,
exppected_mean = 1,
user_mean = .94
)
```
Will return
`Error in calculate_se_rm(scenario, additional_args) : For the choosed scenario U and k must be supplied`
So we need to pass the requested parameters:
```{r}
calculate_bias_interval('A',
subscenario = 'Uk',
nrun = 7,
nrep = 5,
SWL = .042,
SR = .032,
nsamples = 2,
expected_mean = 1,
user_mean = .94,
U = 140,
k = 1.96
)
```
**Scenario B and C**
When a reference material's total uncertainty (TV) is determined based on Proficiency Testing (PT) (B) or peer group results from an interlaboratory QC program (C)
Additional parameters necessary are sd_rm and nlab
**Example**
```{r}
calculate_bias_interval('C', nrun = 7,
nrep = 5,
SWL = .042,
SR = .032,
nsamples = 2,
expected_mean = 1,
user_mean = .94,
sd_rm = .05,
nlab = 43)
```
**Scenario D and E**
If the TV represents a conventional quantity value (D) or When working with a commercial QC material supplied
with a TV for which the standard error cannot be estimated (E)
```{r}
calculate_bias_interval('E',
nrun = 7,
nrep = 5,
SWL = .042,
SR = .032,
nsamples = 2,
expected_mean = 1,
user_mean = .94
)
```
**Bias conclusion**
If the mean is inside `interval` object returned in `calculate_bias_interval()` the result is not significant and the observed bias is inside the manufacture claims
-------------------------------------------------------
Package repository: https://github.com/clauciorank/CLSIEP15
|
/scratch/gouwar.j/cran-all/cranData/CLSIEP15/vignettes/CLSIEP15.Rmd
|
#' Start CLUSTShiny
#' @title Launch 'CLUSTShiny' Interface
#' @return Nothing
#' @description CLUSTShiny() loads interactive user interface built using R 'shiny'.
#' @details The interactive user interface is to provide an easy way for cluster analysis and downloading relevant plots.
#' @keywords CLUSTShiny
#' @examples
#' if(interactive()){
#' library(rmarkdown)
#' CLUSTShiny()
#' }
CLUSTShiny <- function() {
rmarkdown::run(system.file("img", "CLUSTShiny.Rmd", package = "CLUSTShiny"))
Sys.setenv("R_TESTS" = "")
}
|
/scratch/gouwar.j/cran-all/cranData/CLUSTShiny/R/CLUSTShiny.R
|
---
title: "Cluster Analysis"
output: html_document
runtime: shiny
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
library(dplyr)
library(datasets)
library(psycho)
```
## Hierarchical and K-means Clustering
The distance measure used is Euclidean in case of both Hierarchical and K-means(Non-hierarchical) Clustering.
The method(algorithm) used is Ward's method for hierarchical clustering
and k-means algorithm for non-hierarchical clustering.
Hierarchical Clustering is for exploration .So is Silhouette plot for detecting outliers.
K-means Clustering is meant for confirmation. K-means is done usually after hierarchical clustering.
There would be a difference between the cluster sizes from the two algorithms.
Final Clusters and membership is usually taken from k-means clustering.
```{r,echo=FALSE}
sidebarPanel(
checkboxInput("ex","Uncheck for using your own file",value = TRUE),
fileInput("file", "Upload the *.csv file with headers"),
numericInput("ncluster","Enter the number of clusters",value = 2),
checkboxInput("std","STANDARDIZE",value = FALSE),
downloadButton("downloaddata", "Download Dataset"),
downloadButton("downloadPlot", "Download Plot"),
downloadButton("downloadPlot2", "Download Dendrogram"),
downloadButton("downloadPlot3", "Download Silhouette"),
uiOutput("vx")
)
mainPanel(
tabsetPanel(type = "tab",
tabPanel("Hierarchical Clustering Visualization",plotOutput("HV")),
tabPanel("Detecting outliers : Silhouette Plot(hierarchical)",plotOutput("SP")),
tabPanel("K- means Clustering", verbatimTextOutput("AD") ),
tabPanel("Cluster Visualization(k-means)", plotOutput("MV") )
),
h6("", tags$img(src ="K.JPG", height= 400, width=400))
)
output$AD<-renderPrint({
if(input$ex == TRUE)
{data("iris")
data = iris}
else{
file1 = input$file
if(is.null(file1)){return()}
data = read.table(file = file1$datapath,sep =",",header = TRUE)
if(is.null(data())){return()}
}
ds = select(data,input$variablex)
if(input$std == TRUE)
{
ds = standardize(ds)
}
set.seed(1)
fitkmeans = kmeans(ds,centers = input$ncluster )
cat(sprintf("\nThe Cluster centres are as follows:\n"))
print(fitkmeans$centers)
cat(sprintf("\nThe Cluster sizes are as follows:\n"))
cat(sprintf("%d",fitkmeans$size))
})
output$MV<-renderPlot({
if(input$ex == TRUE)
{data("iris")
data = iris}
else{
file1 = input$file
if(is.null(file1)){return()}
data = read.table(file = file1$datapath,sep =",",header = TRUE)
if(is.null(data())){return()}
}
ds = data
ds = select(ds,input$variablex)
if(input$std == TRUE)
{
ds = standardize(ds)
}
set.seed(1)
fitkmeans = kmeans(ds,centers = input$ncluster )
ds$cluster = as.factor(fitkmeans$cluster)
if(ncol(ds)>2 && input$ncluster >1)
{ klaR:: partimat(formula = cluster~.,data = ds,method="lda")}
else
{
if(input$ncluster>=1 && ncol(ds)==2)
{ attach(ds)
boxplot(get(input$variablex)~cluster,col ="red",ylab = input$variablex,xlab = "cluster")
}
}
# }
})
output$vx <- renderUI({
if(input$ex == TRUE)
{data("iris")
data = iris}
else
{
file1 = input$file
if(is.null(file1)){return()}
data = read.table(file = file1$datapath,sep =",",header = TRUE)
if(is.null(data())){return()}
}
ds = select_if(data,is.numeric)
checkboxGroupInput("variablex","Select the set of quantitative variables",choices = colnames(ds),selected = colnames(ds) )
})
datasetInput1 <- reactive({
if(input$ex == TRUE)
{data("iris")
data = iris}
else
{
file1 = input$file
if(is.null(file1)){return()}
data = read.table(file = file1$datapath,sep =",",header = TRUE)
if(is.null(data())){return()}
}
ds = data
ds = select(ds,input$variablex)
if(input$std == TRUE)
{
ds = standardize(ds)
}
set.seed(1)
fitkmeans = kmeans(ds,centers = input$ncluster )
data$cluster = as.factor(fitkmeans$cluster)
data = data
})
output$downloaddata <- downloadHandler(
filename = function() {
filetitle = paste("dataset")
paste(filetitle, ".csv", sep = "")
},
content = function(file) {
write.csv(datasetInput1(), file, row.names = FALSE)
}
)
output$downloadPlot<- downloadHandler(
filename = function() {
paste("Discriminantplot", ".png", sep = "")
},
content = function(file) {
png(file)
if(input$ex == TRUE)
{data("iris")
data = iris}
else{
file1 = input$file
if(is.null(file1)){return()}
data = read.table(file = file1$datapath,sep =",",header = TRUE)
if(is.null(data())){return()}
}
ds = data
ds = select(ds,input$variablex)
if(input$std == TRUE)
{
ds = standardize(ds)
}
set.seed(1)
fitkmeans = kmeans(ds,centers = input$ncluster )
ds$cluster = as.factor(fitkmeans$cluster)
if(ncol(ds)>2 && input$ncluster >1)
{ klaR:: partimat(formula = cluster~.,data = ds,method="lda")}
else
{
if(input$ncluster>=1 && ncol(ds)==2)
{ attach(ds)
boxplot(get(input$variablex)~cluster,col ="red",ylab = input$variablex,xlab = "cluster")
}
}
# }
dev.off()
})
output$HV<-renderPlot({
if(input$ex == TRUE)
{data("iris")
data = iris}
else{
file1 = input$file
if(is.null(file1)){return()}
data = read.table(file = file1$datapath,sep =",",header = TRUE)
if(is.null(data())){return()}
}
ds = select(data,input$variablex)
if(input$std == TRUE)
{
ds = standardize(ds)
}
d = dist(ds,method = "euclidean") # Measure of similarity
fit = hclust(d,method = "ward.D") # Method of using similarity measure
plot(fit)
rect.hclust(fit,k=input$ncluster,border = "red")
#groups = cutree(fit,k=input$ncluster)
#plot(cluster::silhouette(groups,d))
})
output$downloadPlot2<- downloadHandler(
filename = function() {
paste("Dendogram", ".png", sep = "")
},
content = function(file) {
png(file)
if(input$ex == TRUE)
{data("iris")
data = iris}
else{
file1 = input$file
if(is.null(file1)){return()}
data = read.table(file = file1$datapath,sep =",",header = TRUE)
if(is.null(data())){return()}
}
ds = select(data,input$variablex)
if(input$std == TRUE)
{
ds = standardize(ds)
}
d = dist(ds,method = "euclidean") # Measure of similarity
fit = hclust(d,method = "ward.D") # Method of using similarity measure
plot(fit)
rect.hclust(fit,k=input$ncluster,border = "red")
dev.off()
})
output$SP<-renderPlot({
if(input$ex == TRUE)
{data("iris")
data = iris}
else{
file1 = input$file
if(is.null(file1)){return()}
data = read.table(file = file1$datapath,sep =",",header = TRUE)
if(is.null(data())){return()}
}
ds = select(data,input$variablex)
if(input$std == TRUE)
{
ds = standardize(ds)
}
d = dist(ds,method = "euclidean") # Measure of similarity
fit = hclust(d,method = "ward.D") # Method of using similarity measure
#plot(fit)
#rect.hclust(fit,k=input$ncluster,border = "red")
groups = cutree(fit,k=input$ncluster)
plot(cluster::silhouette(groups,d))
})
output$downloadPlot3<- downloadHandler(
filename = function() {
paste("Silhouette", ".png", sep = "")
},
content = function(file) {
png(file)
if(input$ex == TRUE)
{data("iris")
data = iris}
else{
file1 = input$file
if(is.null(file1)){return()}
data = read.table(file = file1$datapath,sep =",",header = TRUE)
if(is.null(data())){return()}
}
ds = select(data,input$variablex)
if(input$std == TRUE)
{
ds = standardize(ds)
}
d = dist(ds,method = "euclidean") # Measure of similarity
fit = hclust(d,method = "ward.D") # Method of using similarity measure
#plot(fit)
#rect.hclust(fit,k=input$ncluster,border = "red")
groups = cutree(fit,k=input$ncluster)
plot(cluster::silhouette(groups,d))
dev.off()
})
```
|
/scratch/gouwar.j/cran-all/cranData/CLUSTShiny/inst/CLUSTShiny.Rmd
|
---
title: "Cluster Analysis"
output: html_document
runtime: shiny
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
library(dplyr)
library(datasets)
library(psycho)
```
## Hierarchical and K-means Clustering
The distance measure used is Euclidean in case of both Hierarchical and K-means(Non-hierarchical) Clustering.
The method(algorithm) used is Ward's method for hierarchical clustering
and k-means algorithm for non-hierarchical clustering.
Hierarchical Clustering is for exploration .So is Silhouette plot for detecting outliers.
K-means Clustering is meant for confirmation. K-means is done usually after hierarchical clustering.
There would be a difference between the cluster sizes from the two algorithms.
Final Clusters and membership is usually taken from k-means clustering.
```{r,echo=FALSE}
sidebarPanel(
checkboxInput("ex","Uncheck for using your own file",value = TRUE),
fileInput("file", "Upload the *.csv file with headers"),
numericInput("ncluster","Enter the number of clusters",value = 2),
checkboxInput("std","STANDARDIZE",value = FALSE),
downloadButton("downloaddata", "Download Dataset"),
downloadButton("downloadPlot", "Download Plot"),
downloadButton("downloadPlot2", "Download Dendrogram"),
downloadButton("downloadPlot3", "Download Silhouette"),
uiOutput("vx")
)
mainPanel(
tabsetPanel(type = "tab",
tabPanel("Hierarchical Clustering Visualization",plotOutput("HV")),
tabPanel("Detecting outliers : Silhouette Plot(hierarchical)",plotOutput("SP")),
tabPanel("K- means Clustering", verbatimTextOutput("AD") ),
tabPanel("Cluster Visualization(k-means)", plotOutput("MV") )
),
h6("", tags$img(src ="K.JPG", height= 400, width=400))
)
output$AD<-renderPrint({
if(input$ex == TRUE)
{data("iris")
data = iris}
else{
file1 = input$file
if(is.null(file1)){return()}
data = read.table(file = file1$datapath,sep =",",header = TRUE)
if(is.null(data())){return()}
}
ds = select(data,input$variablex)
if(input$std == TRUE)
{
ds = standardize(ds)
}
set.seed(1)
fitkmeans = kmeans(ds,centers = input$ncluster )
cat(sprintf("\nThe Cluster centres are as follows:\n"))
print(fitkmeans$centers)
cat(sprintf("\nThe Cluster sizes are as follows:\n"))
cat(sprintf("%d",fitkmeans$size))
})
output$MV<-renderPlot({
if(input$ex == TRUE)
{data("iris")
data = iris}
else{
file1 = input$file
if(is.null(file1)){return()}
data = read.table(file = file1$datapath,sep =",",header = TRUE)
if(is.null(data())){return()}
}
ds = data
ds = select(ds,input$variablex)
if(input$std == TRUE)
{
ds = standardize(ds)
}
set.seed(1)
fitkmeans = kmeans(ds,centers = input$ncluster )
ds$cluster = as.factor(fitkmeans$cluster)
if(ncol(ds)>2 && input$ncluster >1)
{ klaR:: partimat(formula = cluster~.,data = ds,method="lda")}
else
{
if(input$ncluster>=1 && ncol(ds)==2)
{ attach(ds)
boxplot(get(input$variablex)~cluster,col ="red",ylab = input$variablex,xlab = "cluster")
}
}
# }
})
output$vx <- renderUI({
if(input$ex == TRUE)
{data("iris")
data = iris}
else
{
file1 = input$file
if(is.null(file1)){return()}
data = read.table(file = file1$datapath,sep =",",header = TRUE)
if(is.null(data())){return()}
}
ds = select_if(data,is.numeric)
checkboxGroupInput("variablex","Select the set of quantitative variables",choices = colnames(ds),selected = colnames(ds) )
})
datasetInput1 <- reactive({
if(input$ex == TRUE)
{data("iris")
data = iris}
else
{
file1 = input$file
if(is.null(file1)){return()}
data = read.table(file = file1$datapath,sep =",",header = TRUE)
if(is.null(data())){return()}
}
ds = data
ds = select(ds,input$variablex)
if(input$std == TRUE)
{
ds = standardize(ds)
}
set.seed(1)
fitkmeans = kmeans(ds,centers = input$ncluster )
data$cluster = as.factor(fitkmeans$cluster)
data = data
})
output$downloaddata <- downloadHandler(
filename = function() {
filetitle = paste("dataset")
paste(filetitle, ".csv", sep = "")
},
content = function(file) {
write.csv(datasetInput1(), file, row.names = FALSE)
}
)
output$downloadPlot<- downloadHandler(
filename = function() {
paste("Discriminantplot", ".png", sep = "")
},
content = function(file) {
png(file)
if(input$ex == TRUE)
{data("iris")
data = iris}
else{
file1 = input$file
if(is.null(file1)){return()}
data = read.table(file = file1$datapath,sep =",",header = TRUE)
if(is.null(data())){return()}
}
ds = data
ds = select(ds,input$variablex)
if(input$std == TRUE)
{
ds = standardize(ds)
}
set.seed(1)
fitkmeans = kmeans(ds,centers = input$ncluster )
ds$cluster = as.factor(fitkmeans$cluster)
if(ncol(ds)>2 && input$ncluster >1)
{ klaR:: partimat(formula = cluster~.,data = ds,method="lda")}
else
{
if(input$ncluster>=1 && ncol(ds)==2)
{ attach(ds)
boxplot(get(input$variablex)~cluster,col ="red",ylab = input$variablex,xlab = "cluster")
}
}
# }
dev.off()
})
output$HV<-renderPlot({
if(input$ex == TRUE)
{data("iris")
data = iris}
else{
file1 = input$file
if(is.null(file1)){return()}
data = read.table(file = file1$datapath,sep =",",header = TRUE)
if(is.null(data())){return()}
}
ds = select(data,input$variablex)
if(input$std == TRUE)
{
ds = standardize(ds)
}
d = dist(ds,method = "euclidean") # Measure of similarity
fit = hclust(d,method = "ward.D") # Method of using similarity measure
plot(fit)
rect.hclust(fit,k=input$ncluster,border = "red")
#groups = cutree(fit,k=input$ncluster)
#plot(cluster::silhouette(groups,d))
})
output$downloadPlot2<- downloadHandler(
filename = function() {
paste("Dendogram", ".png", sep = "")
},
content = function(file) {
png(file)
if(input$ex == TRUE)
{data("iris")
data = iris}
else{
file1 = input$file
if(is.null(file1)){return()}
data = read.table(file = file1$datapath,sep =",",header = TRUE)
if(is.null(data())){return()}
}
ds = select(data,input$variablex)
if(input$std == TRUE)
{
ds = standardize(ds)
}
d = dist(ds,method = "euclidean") # Measure of similarity
fit = hclust(d,method = "ward.D") # Method of using similarity measure
plot(fit)
rect.hclust(fit,k=input$ncluster,border = "red")
dev.off()
})
output$SP<-renderPlot({
if(input$ex == TRUE)
{data("iris")
data = iris}
else{
file1 = input$file
if(is.null(file1)){return()}
data = read.table(file = file1$datapath,sep =",",header = TRUE)
if(is.null(data())){return()}
}
ds = select(data,input$variablex)
if(input$std == TRUE)
{
ds = standardize(ds)
}
d = dist(ds,method = "euclidean") # Measure of similarity
fit = hclust(d,method = "ward.D") # Method of using similarity measure
#plot(fit)
#rect.hclust(fit,k=input$ncluster,border = "red")
groups = cutree(fit,k=input$ncluster)
plot(cluster::silhouette(groups,d))
})
output$downloadPlot3<- downloadHandler(
filename = function() {
paste("Silhouette", ".png", sep = "")
},
content = function(file) {
png(file)
if(input$ex == TRUE)
{data("iris")
data = iris}
else{
file1 = input$file
if(is.null(file1)){return()}
data = read.table(file = file1$datapath,sep =",",header = TRUE)
if(is.null(data())){return()}
}
ds = select(data,input$variablex)
if(input$std == TRUE)
{
ds = standardize(ds)
}
d = dist(ds,method = "euclidean") # Measure of similarity
fit = hclust(d,method = "ward.D") # Method of using similarity measure
#plot(fit)
#rect.hclust(fit,k=input$ncluster,border = "red")
groups = cutree(fit,k=input$ncluster)
plot(cluster::silhouette(groups,d))
dev.off()
})
```
|
/scratch/gouwar.j/cran-all/cranData/CLUSTShiny/inst/img/CLUSTShiny.Rmd
|
#' @title Customer Lifetime Value Tools
#' @description
#' CLVTools is a toolbox for various probabilistic customer attrition models
#' for non-contractual settings. It provides a framework, which is capable
#' of unifying different probabilistic customer attrition models. This package
#' provides tools to estimate the number of future transactions of individual
#' customers as well as the probability of customers being alive in future
#' periods. Further, the average spending by customers can be estimated.
#' Multiplying the future transactions conditional on being alive and the
#' predicted individual spending per transaction results in an individual CLV value.
#'
#' The implemented models require transactional data from non-contractual
#' businesses (i.e. customers' purchase history).
#'
#' @seealso
#' Development for CLVTools can be followed via the GitHub repository
#' at \url{https://github.com/bachmannpatrick/CLVTools}.
#'
#' @examples
#'
#' \donttest{
#'
#' data("cdnow")
#'
#' # Create a CLV data object, split data in estimation and holdout sample
#' clv.data.cdnow <- clvdata(data.transactions = cdnow, date.format = "ymd",
#' time.unit = "week", estimation.split = 39, name.id = "Id")
#'
#' # summary of data
#' summary(clv.data.cdnow)
#'
#' # Fit a PNBD model without covariates on the first 39 periods
#' pnbd.cdnow <- pnbd(clv.data.cdnow,
#' start.params.model = c(r=0.5, alpha=8, s=0.5, beta=10))
#' # inspect fit
#' summary(pnbd.cdnow)
#'
#' # Predict 10 periods (weeks) ahead from estimation end
#' # and compare to actuals in this period
#' pred.out <- predict(pnbd.cdnow, prediction.end = 10)
#'
#' # Plot the fitted model to the actual repeat transactions
#' plot(pnbd.cdnow)
#'
#' }
#'
#' @docType package
#' @useDynLib CLVTools, .registration=TRUE
#' @importFrom Rcpp evalCpp
#' @import data.table
#' @importFrom methods setClass
"_PACKAGE"
|
/scratch/gouwar.j/cran-all/cranData/CLVTools/R/CLVTools.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
#' @name bgnbd_CET
#'
#' @templateVar name_model_full BG/NBD
#' @templateVar name_model_short bgnbd
#' @template template_titledescriptionreturn_CET
#'
#' @template template_params_bgnbd
#' @template template_params_rcppperiods
#' @template template_params_rcppxtxtcal
#' @template template_params_rcppcovmatrix
#' @template template_params_rcppvcovparams
#'
#' @templateVar name_params_cov_life vCovParams_life
#' @templateVar name_params_cov_trans vCovParams_trans
#' @template template_details_rcppcovmatrix
#'
#' @template template_references_bgnbd
#'
NULL
#' @name bgnbd_expectation
#' @title BG/NBD: Unconditional Expectation
#'
#' @template template_expectation_description
#'
#' @template template_params_bgnbd
#' @template template_expectation_params
#' @param vAlpha_i Vector of individual parameters alpha
#' @param vA_i Vector of individual parameters a
#' @param vB_i Vector of individual parameters b
#'
#' @template template_references_bgnbd
#'
#' @template template_expectation_return
#'
NULL
#' @name bgnbd_PAlive
#'
#' @templateVar name_model_full BG/NBD
#' @templateVar name_model_short bgnbd
#' @template template_titledescriptionreturn_palive
#'
#' @template template_params_bgnbd
#' @template template_params_rcppxtxtcal
#' @template template_params_rcppcovmatrix
#' @template template_params_rcppvcovparams
#'
#' @templateVar name_params_cov_life vCovParams_life
#' @templateVar name_params_cov_trans vCovParams_trans
#' @template template_details_rcppcovmatrix
#'
#' @template template_references_bgnbd
#'
NULL
#' @name bgnbd_LL
#'
#' @templateVar name_model_full BG/NBD
#' @templateVar name_model_short bgnbd
#' @templateVar model_params_ordered r, alpha_0, a, b
#' @template template_titleparamsdescriptionreturndetails_LL
#'
#' @template template_params_rcppxtxtcal
#' @template template_params_rcppcovmatrix
#'
#' @templateVar name_params_cov_life vLogparams
#' @templateVar name_params_cov_trans vLogparams
#' @template template_details_rcppcovmatrix
#'
#' @template template_references_bgnbd
#'
NULL
#' @name bgnbd_pmf
#' @templateVar name_model_full BG/NBD
#' @template template_pmf_titledescreturnpmfparams
#' @template template_params_bgnbd
#' @param vAlpha_i Vector of individual parameters alpha
#' @param vA_i Vector of individual parameters a
#' @param vB_i Vector of individual parameters b
#' @template template_references_bgnbd
#'
NULL
#' @rdname bgnbd_CET
bgnbd_nocov_CET <- function(r, alpha, a, b, dPeriods, vX, vT_x, vT_cal) {
.Call(`_CLVTools_bgnbd_nocov_CET`, r, alpha, a, b, dPeriods, vX, vT_x, vT_cal)
}
#' @rdname bgnbd_CET
bgnbd_staticcov_CET <- function(r, alpha, a, b, dPeriods, vX, vT_x, vT_cal, vCovParams_trans, vCovParams_life, mCov_trans, mCov_life) {
.Call(`_CLVTools_bgnbd_staticcov_CET`, r, alpha, a, b, dPeriods, vX, vT_x, vT_cal, vCovParams_trans, vCovParams_life, mCov_trans, mCov_life)
}
#' @rdname bgnbd_expectation
bgnbd_nocov_expectation <- function(r, alpha, a, b, vT_i) {
.Call(`_CLVTools_bgnbd_nocov_expectation`, r, alpha, a, b, vT_i)
}
#' @rdname bgnbd_expectation
bgnbd_staticcov_expectation <- function(r, vAlpha_i, vA_i, vB_i, vT_i) {
.Call(`_CLVTools_bgnbd_staticcov_expectation`, r, vAlpha_i, vA_i, vB_i, vT_i)
}
#' @rdname bgnbd_PAlive
bgnbd_nocov_PAlive <- function(r, alpha, a, b, vX, vT_x, vT_cal) {
.Call(`_CLVTools_bgnbd_nocov_PAlive`, r, alpha, a, b, vX, vT_x, vT_cal)
}
#' @rdname bgnbd_PAlive
bgnbd_staticcov_PAlive <- function(r, alpha, a, b, vX, vT_x, vT_cal, vCovParams_trans, vCovParams_life, mCov_trans, mCov_life) {
.Call(`_CLVTools_bgnbd_staticcov_PAlive`, r, alpha, a, b, vX, vT_x, vT_cal, vCovParams_trans, vCovParams_life, mCov_trans, mCov_life)
}
#' @rdname bgnbd_LL
bgnbd_nocov_LL_ind <- function(vLogparams, vX, vT_x, vT_cal) {
.Call(`_CLVTools_bgnbd_nocov_LL_ind`, vLogparams, vX, vT_x, vT_cal)
}
#' @rdname bgnbd_LL
bgnbd_nocov_LL_sum <- function(vLogparams, vX, vT_x, vT_cal) {
.Call(`_CLVTools_bgnbd_nocov_LL_sum`, vLogparams, vX, vT_x, vT_cal)
}
#' @rdname bgnbd_LL
bgnbd_staticcov_LL_ind <- function(vParams, vX, vT_x, vT_cal, mCov_life, mCov_trans) {
.Call(`_CLVTools_bgnbd_staticcov_LL_ind`, vParams, vX, vT_x, vT_cal, mCov_life, mCov_trans)
}
#' @rdname bgnbd_LL
bgnbd_staticcov_LL_sum <- function(vParams, vX, vT_x, vT_cal, mCov_life, mCov_trans) {
.Call(`_CLVTools_bgnbd_staticcov_LL_sum`, vParams, vX, vT_x, vT_cal, mCov_life, mCov_trans)
}
#' @rdname bgnbd_pmf
bgnbd_nocov_PMF <- function(r, alpha, a, b, x, vT_i) {
.Call(`_CLVTools_bgnbd_nocov_PMF`, r, alpha, a, b, x, vT_i)
}
#' @rdname bgnbd_pmf
bgnbd_staticcov_PMF <- function(r, x, vAlpha_i, vA_i, vB_i, vT_i) {
.Call(`_CLVTools_bgnbd_staticcov_PMF`, r, x, vAlpha_i, vA_i, vB_i, vT_i)
}
bgnbd_staticcov_alpha_i <- function(alpha_0, vCovParams_trans, mCov_trans) {
.Call(`_CLVTools_bgnbd_staticcov_alpha_i`, alpha_0, vCovParams_trans, mCov_trans)
}
bgnbd_staticcov_a_i <- function(a_0, vCovParams_life, mCov_life) {
.Call(`_CLVTools_bgnbd_staticcov_a_i`, a_0, vCovParams_life, mCov_life)
}
bgnbd_staticcov_b_i <- function(b_0, vCovParams_life, mCov_life) {
.Call(`_CLVTools_bgnbd_staticcov_b_i`, b_0, vCovParams_life, mCov_life)
}
#' @title GSL Hypergeometric 2F0 for equal length vectors
#'
#' @param vA Vector of values for parameter a
#' @param vB Vector of values for parameter b
#' @param vZ Vector of values for parameter z
#'
#' @description Calculate the hypergeometric 2f0 using the GSL library (gsl_sf_hyperg_2F0_e)
#' @return List with vector of values and vector of gsl status codes
#' @keywords internal
vec_gsl_hyp2f0_e <- function(vA, vB, vZ) {
.Call(`_CLVTools_vec_gsl_hyp2f0_e`, vA, vB, vZ)
}
#' @title GSL Hypergeometric 2F1 for equal length vectors
#'
#' @param vA Vector of values for parameter a
#' @param vB Vector of values for parameter b
#' @param vC Vector of values for parameter c
#' @param vZ Vector of values for parameter z
#'
#' @description Calculate the hypergeometric 2f1 using the GSL library (gsl_sf_hyperg_2F1_e)
#' @return List with vector of values and vector of gsl status codes
#' @keywords internal
vec_gsl_hyp2f1_e <- function(vA, vB, vC, vZ) {
.Call(`_CLVTools_vec_gsl_hyp2f1_e`, vA, vB, vC, vZ)
}
#' @title Gamma-Gamma: Log-Likelihood Function
#'
#' @description
#' Calculates the Log-Likelihood value for the Gamma-Gamma model.
#'
#' @param vLogparams a vector containing the log of the parameters p, q, gamma
#' @param vX frequency vector of length n counting the numbers of purchases
#' @param vM_x the observed average spending for every customer during the calibration time.
#'
#' @details
#' \code{vLogparams} is a vector with the parameters for the Gamma-Gamma model.
#' It has three parameters (p, q, gamma). The scale parameter for each transaction
#' is distributed across customers according to a gamma distribution with
#' parameters q (shape) and gamma (scale).
#'
#'@return
#' Returns the Log-Likelihood value for the Gamma-Gamma model.
#'
#' @template template_references_gg
#'
#'
gg_LL <- function(vLogparams, vX, vM_x) {
.Call(`_CLVTools_gg_LL`, vLogparams, vX, vM_x)
}
#' @name ggomnbd_CET
#'
#' @templateVar name_model_full GGompertz/NBD
#' @templateVar name_model_short ggomnbd
#' @template template_titledescriptionreturn_CET
#'
#' @template template_params_ggomnbd
#' @template template_params_rcppperiods
#' @template template_params_rcppxtxtcal
#' @template template_params_rcppcovmatrix
#' @template template_params_rcppvcovparams
#'
#' @templateVar name_params_cov_life vCovParams_life
#' @templateVar name_params_cov_trans vCovParams_trans
#' @template template_details_rcppcovmatrix
#'
#' @template template_references_ggomnbd
#'
NULL
#' @name ggomnbd_expectation
#' @title GGompertz/NBD: Unconditional Expectation
#'
#' @template template_expectation_description
#'
#' @template template_params_ggomnbd
#' @template template_expectation_params
#' @param vAlpha_i Vector of individual parameters alpha
#' @param vBeta_i Vector of individual parameters beta
#'
#' @template template_references_ggomnbd
#'
#' @template template_expectation_return
#'
NULL
#' @name ggomnbd_LL
#'
#' @templateVar name_model_full GGompertz/NBD
#' @templateVar name_model_short ggomnbd
#' @templateVar model_params_ordered r, alpha_0, b, s, beta_0
#' @template template_titleparamsdescriptionreturndetails_LL
#'
#' @template template_params_rcppxtxtcal
#' @template template_params_rcppcovmatrix
#'
#' @templateVar name_params_cov_life vParams
#' @templateVar name_params_cov_trans vParams
#' @template template_details_rcppcovmatrix
#'
#' @template template_references_ggomnbd
#'
NULL
#' @name ggomnbd_PAlive
#'
#' @templateVar name_model_full GGompertz/NBD
#' @templateVar name_model_short ggomnbd
#' @template template_titledescriptionreturn_palive
#'
#' @template template_params_ggomnbd
#' @template template_params_rcppxtxtcal
#' @template template_params_rcppcovmatrix
#' @template template_params_rcppvcovparams
#'
#' @templateVar name_params_cov_life vCovParams_life
#' @templateVar name_params_cov_trans vCovParams_trans
#' @template template_details_rcppcovmatrix
#'
#' @template template_references_ggomnbd
#'
NULL
#' @rdname ggomnbd_CET
ggomnbd_nocov_CET <- function(r, alpha_0, b, s, beta_0, dPeriods, vX, vT_x, vT_cal) {
.Call(`_CLVTools_ggomnbd_nocov_CET`, r, alpha_0, b, s, beta_0, dPeriods, vX, vT_x, vT_cal)
}
#' @rdname ggomnbd_CET
ggomnbd_staticcov_CET <- function(r, alpha_0, b, s, beta_0, dPeriods, vX, vT_x, vT_cal, vCovParams_trans, vCovParams_life, mCov_life, mCov_trans) {
.Call(`_CLVTools_ggomnbd_staticcov_CET`, r, alpha_0, b, s, beta_0, dPeriods, vX, vT_x, vT_cal, vCovParams_trans, vCovParams_life, mCov_life, mCov_trans)
}
#' @rdname ggomnbd_expectation
ggomnbd_nocov_expectation <- function(r, alpha_0, b, s, beta_0, vT_i) {
.Call(`_CLVTools_ggomnbd_nocov_expectation`, r, alpha_0, b, s, beta_0, vT_i)
}
#' @rdname ggomnbd_expectation
ggomnbd_staticcov_expectation <- function(r, b, s, vAlpha_i, vBeta_i, vT_i) {
.Call(`_CLVTools_ggomnbd_staticcov_expectation`, r, b, s, vAlpha_i, vBeta_i, vT_i)
}
#' @rdname ggomnbd_LL
ggomnbd_nocov_LL_ind <- function(vLogparams, vX, vT_x, vT_cal) {
.Call(`_CLVTools_ggomnbd_nocov_LL_ind`, vLogparams, vX, vT_x, vT_cal)
}
#' @rdname ggomnbd_LL
ggomnbd_nocov_LL_sum <- function(vLogparams, vX, vT_x, vT_cal) {
.Call(`_CLVTools_ggomnbd_nocov_LL_sum`, vLogparams, vX, vT_x, vT_cal)
}
#' @rdname ggomnbd_LL
ggomnbd_staticcov_LL_ind <- function(vParams, vX, vT_x, vT_cal, mCov_life, mCov_trans) {
.Call(`_CLVTools_ggomnbd_staticcov_LL_ind`, vParams, vX, vT_x, vT_cal, mCov_life, mCov_trans)
}
#' @rdname ggomnbd_LL
ggomnbd_staticcov_LL_sum <- function(vParams, vX, vT_x, vT_cal, mCov_life, mCov_trans) {
.Call(`_CLVTools_ggomnbd_staticcov_LL_sum`, vParams, vX, vT_x, vT_cal, mCov_life, mCov_trans)
}
ggomnbd_staticcov_alpha_i <- function(alpha_0, vCovParams_trans, mCov_trans) {
.Call(`_CLVTools_ggomnbd_staticcov_alpha_i`, alpha_0, vCovParams_trans, mCov_trans)
}
ggomnbd_staticcov_beta_i <- function(beta_0, vCovParams_life, mCov_life) {
.Call(`_CLVTools_ggomnbd_staticcov_beta_i`, beta_0, vCovParams_life, mCov_life)
}
#' @rdname ggomnbd_PAlive
ggomnbd_staticcov_PAlive <- function(r, alpha_0, b, s, beta_0, vX, vT_x, vT_cal, vCovParams_trans, vCovParams_life, mCov_life, mCov_trans) {
.Call(`_CLVTools_ggomnbd_staticcov_PAlive`, r, alpha_0, b, s, beta_0, vX, vT_x, vT_cal, vCovParams_trans, vCovParams_life, mCov_life, mCov_trans)
}
#' @rdname ggomnbd_PAlive
ggomnbd_nocov_PAlive <- function(r, alpha_0, b, s, beta_0, vX, vT_x, vT_cal) {
.Call(`_CLVTools_ggomnbd_nocov_PAlive`, r, alpha_0, b, s, beta_0, vX, vT_x, vT_cal)
}
#' @name pnbd_CET
#'
#' @templateVar name_model_full Pareto/NBD
#' @templateVar name_model_short pnbd
#' @template template_titledescriptionreturn_CET
#'
#' @template template_params_pnbd
#' @template template_params_rcppperiods
#' @template template_params_rcppxtxtcal
#' @template template_params_rcppcovmatrix
#' @template template_params_rcppvcovparams
#'
#' @templateVar name_params_cov_life vCovParams_life
#' @templateVar name_params_cov_trans vCovParams_trans
#' @template template_details_rcppcovmatrix
#'
#' @template template_references_pnbd
#'
NULL
#' @name pnbd_DERT
#'
#' @title Pareto/NBD: Discounted Expected Residual Transactions
#'
#' @description
#' Calculates the discounted expected residual transactions.
#'
#' \describe{
#' \item{\code{pnbd_nocov_DERT}}{ Discounted expected residual transactions for the Pareto/NBD model without covariates}
#' \item{\code{pnbd_staticcov_DERT}}{ Discounted expected residual transactions for the Pareto/NBD model with static covariates}
#' }
#'
#' @template template_params_pnbd
#' @template template_params_rcppxtxtcal
#' @template template_params_rcppcovmatrix
#' @template template_params_rcppvcovparams
#' @param continuous_discount_factor continuous discount factor to use
#'
#'
#' @templateVar name_params_cov_life vCovParams_life
#' @templateVar name_params_cov_trans vCovParams_trans
#' @template template_details_rcppcovmatrix
#'
#' @return
#' Returns a vector with the DERT for each customer.
#'
#' @template template_references_pnbd
#'
#'
NULL
#' @name pnbd_expectation
#' @title Pareto/NBD: Unconditional Expectation
#'
#' @template template_expectation_description
#'
#' @template template_params_pnbd
#' @template template_expectation_params
#' @param vAlpha_i Vector of individual parameters alpha
#' @param vBeta_i Vector of individual parameters beta
#'
#'
#' @template template_references_pnbd
#'
#' @template template_expectation_return
#'
NULL
#' @name pnbd_LL
#'
#' @templateVar name_model_full Pareto/NBD
#' @templateVar name_model_short pnbd
#' @templateVar model_params_ordered r, alpha_0, s, beta_0
#' @template template_titleparamsdescriptionreturndetails_LL
#'
#' @template template_params_rcppxtxtcal
#' @template template_params_rcppcovmatrix
#'
#' @templateVar name_params_cov_life vParams
#' @templateVar name_params_cov_trans vParams
#' @template template_details_rcppcovmatrix
#'
#' @template template_references_pnbd
#'
NULL
#' @name pnbd_PAlive
#'
#' @templateVar name_model_full Pareto/NBD
#' @templateVar name_model_short pnbd
#' @template template_titledescriptionreturn_palive
#'
#' @template template_params_pnbd
#' @template template_params_rcppxtxtcal
#' @template template_params_rcppcovmatrix
#' @template template_params_rcppvcovparams
#'
#' @templateVar name_params_cov_life vCovParams_life
#' @templateVar name_params_cov_trans vCovParams_trans
#' @template template_details_rcppcovmatrix
#'
#' @template template_references_pnbd
#'
NULL
#' @name pnbd_pmf
#' @templateVar name_model_full Pareto/NBD
#' @template template_pmf_titledescreturnpmfparams
#' @template template_params_pnbd
#' @param vAlpha_i Vector of individual parameters alpha.
#' @param vBeta_i Vector of individual parameters beta.
#' @template template_references_pnbd
#'
NULL
#' @rdname pnbd_CET
pnbd_nocov_CET <- function(r, alpha_0, s, beta_0, dPeriods, vX, vT_x, vT_cal) {
.Call(`_CLVTools_pnbd_nocov_CET`, r, alpha_0, s, beta_0, dPeriods, vX, vT_x, vT_cal)
}
#' @rdname pnbd_CET
pnbd_staticcov_CET <- function(r, alpha_0, s, beta_0, dPeriods, vX, vT_x, vT_cal, vCovParams_trans, vCovParams_life, mCov_trans, mCov_life) {
.Call(`_CLVTools_pnbd_staticcov_CET`, r, alpha_0, s, beta_0, dPeriods, vX, vT_x, vT_cal, vCovParams_trans, vCovParams_life, mCov_trans, mCov_life)
}
#' @rdname pnbd_DERT
pnbd_nocov_DERT <- function(r, alpha_0, s, beta_0, continuous_discount_factor, vX, vT_x, vT_cal) {
.Call(`_CLVTools_pnbd_nocov_DERT`, r, alpha_0, s, beta_0, continuous_discount_factor, vX, vT_x, vT_cal)
}
#' @rdname pnbd_DERT
pnbd_staticcov_DERT <- function(r, alpha_0, s, beta_0, continuous_discount_factor, vX, vT_x, vT_cal, mCov_life, mCov_trans, vCovParams_life, vCovParams_trans) {
.Call(`_CLVTools_pnbd_staticcov_DERT`, r, alpha_0, s, beta_0, continuous_discount_factor, vX, vT_x, vT_cal, mCov_life, mCov_trans, vCovParams_life, vCovParams_trans)
}
#' @rdname pnbd_expectation
pnbd_nocov_expectation <- function(r, s, alpha_0, beta_0, vT_i) {
.Call(`_CLVTools_pnbd_nocov_expectation`, r, s, alpha_0, beta_0, vT_i)
}
#' @rdname pnbd_expectation
pnbd_staticcov_expectation <- function(r, s, vAlpha_i, vBeta_i, vT_i) {
.Call(`_CLVTools_pnbd_staticcov_expectation`, r, s, vAlpha_i, vBeta_i, vT_i)
}
#' @rdname pnbd_LL
pnbd_nocov_LL_ind <- function(vLogparams, vX, vT_x, vT_cal) {
.Call(`_CLVTools_pnbd_nocov_LL_ind`, vLogparams, vX, vT_x, vT_cal)
}
#' @rdname pnbd_LL
pnbd_nocov_LL_sum <- function(vLogparams, vX, vT_x, vT_cal) {
.Call(`_CLVTools_pnbd_nocov_LL_sum`, vLogparams, vX, vT_x, vT_cal)
}
#' @rdname pnbd_LL
pnbd_staticcov_LL_ind <- function(vParams, vX, vT_x, vT_cal, mCov_life, mCov_trans) {
.Call(`_CLVTools_pnbd_staticcov_LL_ind`, vParams, vX, vT_x, vT_cal, mCov_life, mCov_trans)
}
#' @rdname pnbd_LL
pnbd_staticcov_LL_sum <- function(vParams, vX, vT_x, vT_cal, mCov_life, mCov_trans) {
.Call(`_CLVTools_pnbd_staticcov_LL_sum`, vParams, vX, vT_x, vT_cal, mCov_life, mCov_trans)
}
pnbd_staticcov_alpha_i <- function(alpha_0, vCovParams_trans, mCov_trans) {
.Call(`_CLVTools_pnbd_staticcov_alpha_i`, alpha_0, vCovParams_trans, mCov_trans)
}
pnbd_staticcov_beta_i <- function(beta_0, vCovParams_life, mCov_life) {
.Call(`_CLVTools_pnbd_staticcov_beta_i`, beta_0, vCovParams_life, mCov_life)
}
#' @rdname pnbd_PAlive
pnbd_nocov_PAlive <- function(r, alpha_0, s, beta_0, vX, vT_x, vT_cal) {
.Call(`_CLVTools_pnbd_nocov_PAlive`, r, alpha_0, s, beta_0, vX, vT_x, vT_cal)
}
#' @rdname pnbd_PAlive
pnbd_staticcov_PAlive <- function(r, alpha_0, s, beta_0, vX, vT_x, vT_cal, vCovParams_trans, vCovParams_life, mCov_trans, mCov_life) {
.Call(`_CLVTools_pnbd_staticcov_PAlive`, r, alpha_0, s, beta_0, vX, vT_x, vT_cal, vCovParams_trans, vCovParams_life, mCov_trans, mCov_life)
}
#' @rdname pnbd_pmf
pnbd_nocov_PMF <- function(r, alpha_0, s, beta_0, x, vT_i) {
.Call(`_CLVTools_pnbd_nocov_PMF`, r, alpha_0, s, beta_0, x, vT_i)
}
#' @rdname pnbd_pmf
pnbd_staticcov_PMF <- function(r, s, x, vAlpha_i, vBeta_i, vT_i) {
.Call(`_CLVTools_pnbd_staticcov_PMF`, r, s, x, vAlpha_i, vBeta_i, vT_i)
}
pnbd_dyncov_LL_negsum <- function(params, X, t_x, T_cal, d_omega, walkinfo_aux_life, walkinfo_real_life, walkinfo_aux_trans, walkinfo_real_trans, walkinfo_trans_real_from, walkinfo_trans_real_to, covdata_aux_life, covdata_real_life, covdata_aux_trans, covdata_real_trans) {
.Call(`_CLVTools_pnbd_dyncov_LL_negsum`, params, X, t_x, T_cal, d_omega, walkinfo_aux_life, walkinfo_real_life, walkinfo_aux_trans, walkinfo_real_trans, walkinfo_trans_real_from, walkinfo_trans_real_to, covdata_aux_life, covdata_real_life, covdata_aux_trans, covdata_real_trans)
}
pnbd_dyncov_LL_ind <- function(params, X, t_x, T_cal, d_omega, walkinfo_aux_life, walkinfo_real_life, walkinfo_aux_trans, walkinfo_real_trans, walkinfo_trans_real_from, walkinfo_trans_real_to, covdata_aux_life, covdata_real_life, covdata_aux_trans, covdata_real_trans, return_intermediate_results = FALSE) {
.Call(`_CLVTools_pnbd_dyncov_LL_ind`, params, X, t_x, T_cal, d_omega, walkinfo_aux_life, walkinfo_real_life, walkinfo_aux_trans, walkinfo_real_trans, walkinfo_trans_real_from, walkinfo_trans_real_to, covdata_aux_life, covdata_real_life, covdata_aux_trans, covdata_real_trans, return_intermediate_results)
}
|
/scratch/gouwar.j/cran-all/cranData/CLVTools/R/RcppExports.R
|
# Predict Generics -------------------------------------------------------
# The S4 generic is defined explicitely for clarity, instead of relying on it as a side-effect of
# only defining the method with setMethod
# As explained in ?Methods_for_S3 and per Martin Morgan's answer (https://stackoverflow.com/questions/32512785/properly-specify-s4-generics)
# Needs:
# setClass
# S3 implementation fun.class
# S4 method setMethod that dispatches to S3 implementation
# S3method(fun, class)
# exportMethods(fun)
setGeneric(name = "predict")
# Controlflows -------------------------------------------------------------------------------------------------
# Steps performed by all models but different between base (no cov) and covariate models
# . Estimate ---------------------------------------------------------------------------------------------------
setGeneric("clv.controlflow.estimate.check.inputs", def=function(clv.fitted, start.params.model, optimx.args, verbose,...)
standardGeneric("clv.controlflow.estimate.check.inputs"))
setGeneric("clv.controlflow.estimate.put.inputs", def=function(clv.fitted, verbose, ...)
standardGeneric("clv.controlflow.estimate.put.inputs"))
setGeneric("clv.controlflow.estimate.generate.start.params", def=function(clv.fitted, start.params.model, verbose,...)
standardGeneric("clv.controlflow.estimate.generate.start.params"))
setGeneric("clv.controlflow.estimate.prepare.optimx.args", def=function(clv.fitted, start.params.all)
standardGeneric("clv.controlflow.estimate.prepare.optimx.args"))
setGeneric("clv.controlflow.estimate.process.post.estimation", def=function(clv.fitted, res.optimx)
standardGeneric("clv.controlflow.estimate.process.post.estimation"))
# . Predict -----------------------------------------------------------------------------------------------
setGeneric("clv.controlflow.predict.check.inputs", def = function(clv.fitted, verbose, ...)
standardGeneric("clv.controlflow.predict.check.inputs"))
setGeneric("clv.controlflow.predict.set.prediction.params", def = function(clv.fitted)
standardGeneric("clv.controlflow.predict.set.prediction.params"))
setGeneric("clv.controlflow.predict.build.result.table", def = function(clv.fitted, verbose, ...)
standardGeneric("clv.controlflow.predict.build.result.table"))
setGeneric(name = "clv.controlflow.predict.get.has.actuals", def = function(clv.fitted, dt.predictions)
standardGeneric("clv.controlflow.predict.get.has.actuals"))
setGeneric(name = "clv.controlflow.predict.add.actuals", def = function(clv.fitted, dt.predictions, has.actuals, verbose, ...)
standardGeneric("clv.controlflow.predict.add.actuals"))
setGeneric(name = "clv.controlflow.predict.post.process.prediction.table", def = function(clv.fitted, dt.predictions, has.actuals, verbose, ...)
standardGeneric("clv.controlflow.predict.post.process.prediction.table"))
# .. Newdata: replace data in existing model -----------------------------------------------------------------
# For plot and predict
setGeneric("clv.controlflow.check.newdata", def = function(clv.fitted, user.newdata, ...)
standardGeneric("clv.controlflow.check.newdata"))
# .. Prediction params -------------------------------------------------------------------------------------
# Check whether prediction params are ok to predict/plot
setGeneric("clv.controlflow.check.prediction.params", def = function(clv.fitted)
standardGeneric("clv.controlflow.check.prediction.params"))
# . Plot ----------------------------------------------------------------------------------------------------
# clv.controlflow.plot.check.inputs is needed for fitted.dyncov models only to check dyncov length
setGeneric("clv.controlflow.plot.check.inputs", def = function(obj, prediction.end, cumulative, plot, label.line, verbose)
standardGeneric("clv.controlflow.plot.check.inputs"))
# Model specific steps ------------------------------------------------------------------------------------------------------------
# . For all (base) models -----------------------------------------------------------------------------------------------------------
# .. Estimate ----------------------------------------------------------------------------------------------------------------------
# Perform model specific checks on user inputs to estimate
setGeneric("clv.model.check.input.args", def = function(clv.model, clv.fitted, start.params.model, optimx.args, verbose, ...)
standardGeneric("clv.model.check.input.args"))
# Store additional arguments potentially given in estimate
setGeneric("clv.model.put.estimation.input", def = function(clv.model, ...)
standardGeneric("clv.model.put.estimation.input"))
setGeneric(name="clv.model.generate.start.param.cor", def = function(clv.model, start.param.cor, transformed.start.params.model)
standardGeneric("clv.model.generate.start.param.cor"))
# Finish the arguments to optimx with model specific arguments (mostly LL)
setGeneric(name="clv.model.prepare.optimx.args", def=function(clv.model, clv.fitted, prepared.optimx.args)
standardGeneric("clv.model.prepare.optimx.args"))
# Transform standard or user given start params to optimizer (prefixed) scale
setGeneric(name="clv.model.transform.start.params.model", def=function(clv.model, original.start.params.model)
standardGeneric("clv.model.transform.start.params.model"))
# Transform prefixed params to original scale
setGeneric(name="clv.model.backtransform.estimated.params.model", def=function(clv.model, prefixed.params.model)
standardGeneric("clv.model.backtransform.estimated.params.model"))
# ie post.estimation.steps
setGeneric(name="clv.model.process.post.estimation", def=function(clv.model, clv.fitted, res.optimx)
standardGeneric("clv.model.process.post.estimation"))
# . Density for spending models
setGeneric(name="clv.model.probability.density", def=function(clv.model, x, clv.fitted)
standardGeneric("clv.model.probability.density"))
# .. Correlation ---------------------------------------------------------------------------------
# Whether the model in general supports life/trans correlation
setGeneric(name="clv.model.supports.correlation", def = function(clv.model)
standardGeneric("clv.model.supports.correlation"))
# Whether this fit used correlation
setGeneric(name="clv.model.estimation.used.correlation", def = function(clv.model)
standardGeneric("clv.model.estimation.used.correlation"))
# Adds the correlation parameter to a given vector of params after reading it from the optimx results
setGeneric(name="clv.model.coef.add.correlation", def = function(clv.model, last.row.optimx.coef, original.scale.params)
standardGeneric("clv.model.coef.add.correlation"))
setGeneric(name="clv.model.m.to.cor", def = function(clv.model, prefixed.params.model, param.m)
standardGeneric("clv.model.m.to.cor"))
setGeneric(name="clv.model.cor.to.m", def = function(clv.model, prefixed.params.model, param.cor)
standardGeneric("clv.model.cor.to.m"))
# .. Predict ----------------------------------------------------------------------------------------------------------------------
# Predict clv per model
setGeneric(name = "clv.model.predict", def = function(clv.model, clv.fitted, dt.predictions, verbose, ...)
standardGeneric("clv.model.predict"))
setGeneric(name = "clv.model.expectation", def = function(clv.model, clv.fitted, dt.expectation.seq, verbose)
standardGeneric("clv.model.expectation"))
# .. Generics --------------------------------------------------------------------------------------------------------------------
# return diag matrix to correct for transformations because inv(hessian) != vcov for transformed params
setGeneric(name="clv.model.vcov.jacobi.diag", def=function(clv.model, clv.fitted, prefixed.params)
standardGeneric("clv.model.vcov.jacobi.diag"))
# .. Newdata ---------------------------------------------------------------------------------------------------------------
# Do the steps necessary to integrate user newdata in the fitted model (ie do cbs etc)
setGeneric(name="clv.model.process.newdata", def=function(clv.model, clv.fitted, user.newdata, verbose)
standardGeneric("clv.model.process.newdata"))
setGeneric(name="clv.model.pmf", def=function(clv.model, clv.fitted, x)
standardGeneric("clv.model.pmf"))
# . For covariate models -----------------------------------------------------------------------------------------------------------
# .. Estimate -----------------------------------------------------------------------------------------------------------
# for a single process, because could be that start params for one process were different
setGeneric("clv.model.transform.start.params.cov", def = function(clv.model, start.params.cov)
standardGeneric("clv.model.transform.start.params.cov"))
# Transform prefixed params to original scale
setGeneric(name="clv.model.backtransform.estimated.params.cov", def=function(clv.model, prefixed.params.cov)
standardGeneric("clv.model.backtransform.estimated.params.cov"))
# clv.time ----------------------------------------------------------------------------------------------------
setGeneric("clv.time.epsilon", function(clv.time)
standardGeneric("clv.time.epsilon"))
# convert user given date/datetimes
setGeneric("clv.time.convert.user.input.to.timepoint", function(clv.time, user.timepoint)
standardGeneric("clv.time.convert.user.input.to.timepoint"))
setGeneric("clv.time.interval.in.number.tu", def = function(clv.time, interv)
standardGeneric("clv.time.interval.in.number.tu"))
setGeneric("clv.time.number.timeunits.to.timeperiod", function(clv.time, user.number.periods)
standardGeneric("clv.time.number.timeunits.to.timeperiod"))
setGeneric("clv.time.tu.to.ly", function(clv.time)
standardGeneric("clv.time.tu.to.ly"))
setGeneric("clv.time.floor.date", function(clv.time, timepoint)
standardGeneric("clv.time.floor.date"))
# only for pnbd dyncov createwalks
setGeneric("clv.time.ceiling.date", function(clv.time, timepoint)
standardGeneric("clv.time.ceiling.date"))
setGeneric("clv.time.format.timepoint", function(clv.time, timepoint)
standardGeneric("clv.time.format.timepoint"))
# S3 Generics ---------------------------------------------------------------
#' Coerce to clv.data object
#'
#' Functions to coerce transaction data to a \code{clv.data} object.
#'
#' @param x Transaction data.
#' @templateVar name_param_trans x
#' @template template_params_clvdata
#' @template template_param_dots
#'
#' @details
#' See section "Details" of \link{clvdata} for more details on parameters and usage.
#'
#' @examples
#' \donttest{ # dont test because ncpu=2 limit on cran (too fast)
#' data(cdnow)
#'
#' # Turn data.table of transaction data into a clv.data object,
#' # using default date format and column names but no holdout period
#' clv.cdnow <- as.clv.data(cdnow)
#' }
#'
#' @export
as.clv.data <- function(x,
date.format="ymd", time.unit="weeks",
estimation.split = NULL,
name.id="Id", name.date="Date", name.price="Price",
...){
UseMethod("as.clv.data", x)
}
|
/scratch/gouwar.j/cran-all/cranData/CLVTools/R/all_generics.R
|
# This dummy function definition is included with the package to ensure that
# 'tools::package_native_routine_registration_skeleton()' generates the required
# registration info for the 'run_testthat_tests' symbol.
(function() {
.Call("run_testthat_tests", FALSE, PACKAGE = "CLVTools")
})
|
/scratch/gouwar.j/cran-all/cranData/CLVTools/R/catch-routine-registration.R
|
#' @templateVar name_model_full BG/NBD
#' @templateVar name_class_clvmodel clv.model.bgnbd.no.cov
#' @template template_class_clvfittedtransactionmodels
#'
#' @template template_slot_bgnbdcbs
#'
#' @seealso \linkS4class{clv.fitted}, \linkS4class{clv.fitted.transactions}, \linkS4class{clv.model.bgnbd.no.cov}, \linkS4class{clv.bgnbd.static.cov}
#'
#' @keywords internal
#' @importFrom methods setClass
#' @include class_clv_model_bgnbd.R class_clv_data.R class_clv_fitted_transactions.R
setClass(Class = "clv.bgnbd", contains = "clv.fitted.transactions",
slots = c(
cbs = "data.table"),
# Prototype is labeled not useful anymore, but still recommended by Hadley / Bioc
prototype = list(
cbs = data.table()))
clv.bgnbd <- function(cl, clv.data){
dt.cbs.bgnbd <- bgnbd_cbs(clv.data = clv.data)
clv.model <- clv.model.bgnbd.no.cov()
return(new("clv.bgnbd",
clv.fitted.transactions(cl=cl, clv.model=clv.model, clv.data=clv.data),
cbs = dt.cbs.bgnbd))
}
bgnbd_cbs <- function(clv.data){
Date <- Price <- x <- date.first.actual.trans <- date.last.transaction <- NULL
# Customer-By-Sufficiency (CBS) Matrix
# Only for transactions in calibration period
# Only repeat transactions are relevant
#
# For every customer:
# x: Number of repeat transactions := Number of actual transactions - 1
# t.x: Time between first actual and last transaction
# T.cal: Time between first actual transaction and end of calibration period
#
# All time is expressed in time units
trans.dt <- clv.data.get.transactions.in.estimation.period(clv.data = clv.data)
#Initial cbs, for every Id a row
cbs <- trans.dt[ , list(x =.N,
date.first.actual.trans = min(Date),
date.last.transaction = max(Date)),
by="Id"]
# Only repeat transactions -> Number of transactions - 1
cbs[, x := x - 1]
# t.x, T.cal
cbs[, ':='(t.x = clv.time.interval.in.number.tu([email protected], interv=interval(start = date.first.actual.trans, end = date.last.transaction)),
T.cal = clv.time.interval.in.number.tu([email protected], interv=interval(start = date.first.actual.trans, end = [email protected]@timepoint.estimation.end)))]
cbs[, date.last.transaction := NULL]
setkeyv(cbs, c("Id", "date.first.actual.trans"))
setcolorder(cbs, c("Id","x","t.x","T.cal", "date.first.actual.trans"))
return(cbs)
}
|
/scratch/gouwar.j/cran-all/cranData/CLVTools/R/class_clv_bgnbd.R
|
#' @templateVar name_model_full BG/NBD
#' @templateVar name_class_clvmodel clv.model.bgnbd.static.cov
#' @template template_class_clvfittedtransactionmodels_staticcov
#'
#' @template template_slot_bgnbdcbs
#'
#' @seealso \linkS4class{clv.fitted.transactions.static.cov}, \linkS4class{clv.model.bgnbd.static.cov}, \linkS4class{clv.bgnbd}
#'
#' @keywords internal
#' @importFrom methods setClass
#' @include class_clv_model_bgnbd_staticcov.R class_clv_data_staticcovariates.R class_clv_fitted_transactions_staticcov.R
setClass(Class = "clv.bgnbd.static.cov", contains = "clv.fitted.transactions.static.cov",
slots = c(
cbs = "data.table"),
# Prototype is labeled not useful anymore, but still recommended by Hadley / Bioc
prototype = list(
cbs = data.table()))
#' @importFrom methods new
clv.bgnbd.static.cov <- function(cl, clv.data){
dt.cbs.bgnbd <- bgnbd_cbs(clv.data = clv.data)
clv.model <- clv.model.bgnbd.static.cov()
return(new("clv.bgnbd.static.cov",
clv.fitted.transactions.static.cov(cl=cl, clv.model=clv.model, clv.data=clv.data),
cbs = dt.cbs.bgnbd))
}
|
/scratch/gouwar.j/cran-all/cranData/CLVTools/R/class_clv_bgnbd_staticcov.R
|
#' Transactional data to fit CLV models
#'
#' @description
#' Stores the processed transactional data and holds an object of class \linkS4class{clv.time}
#' which stores further information about the split in an estimation and holdout sample.
#'
#' A \code{clv.data} object serves as input into the various model fitting functions.
#'
#' @slot call Single language of the call used to create the object
#' @slot name Human-readable name of the type of transactional data
#' @slot clv.time clv.time object that stores and is used for processing all timepoint related information
#' @slot data.transactions Single \code{data.table} containing the original transaction data, with columns renamed to 'Id', 'Date', 'Price'
#' @slot data.repeat.trans Single \code{data.table} containing only the repeat transactions
#' @slot has.spending Single logical whether the data contains information about the amount spent per transaction
#' @slot has.holdout Single logical whether the data is split in a holdout and estimation period
#'
#' @seealso \linkS4class{clv.time}
#'
#' @keywords internal
#' @include all_generics.R class_clv_time.R
setClass(Class = "clv.data",
slots = c(
call = "language",
name = "character",
clv.time = "clv.time",
data.transactions = "data.table",
data.repeat.trans = "data.table",
has.spending = "logical",
has.holdout = "logical"),
# Prototype is labeled not useful anymore, but still recommended by Hadley / Bioc
prototype = list(
name = character(0),
data.transactions = data.table(),
data.repeat.trans = data.table(),
has.spending = logical(0),
has.holdout = logical(0)))
#' @importFrom methods new
clv.data <- function(call, data.transactions, data.repeat.trans, has.spending, clv.time){
has.holdout <- clv.time.has.holdout(clv.time)
setkeyv(data.transactions, c("Id", "Date"))
setkeyv(data.repeat.trans, c("Id", "Date"))
return(new("clv.data",
name = "CLV Transaction Data",
call = call,
clv.time = clv.time,
data.transactions = copy(data.transactions),
data.repeat.trans = copy(data.repeat.trans),
has.spending = has.spending,
has.holdout = has.holdout))
}
clv.data.has.holdout <- function(clv.data){
return([email protected])
}
clv.data.has.spending <- function(clv.data){
return([email protected])
}
clv.data.has.negative.spending <- function(clv.data){
Price <- NULL
if(clv.data.has.spending(clv.data = clv.data) == FALSE)
return(FALSE)
return([email protected][Price < 0, .N] > 0)
}
clv.data.get.transactions.in.estimation.period <- function(clv.data){
Date <- NULL
return([email protected][Date <= [email protected]@timepoint.estimation.end])
}
clv.data.get.transactions.in.holdout.period <- function(clv.data){
Date <- NULL
stopifnot(clv.data.has.holdout(clv.data))
return([email protected][Date >= [email protected]@timepoint.holdout.start])
}
clv.data.make.repeat.transactions <- function(dt.transactions){
Date <- previous <- NULL
# Copy because alters table
dt.repeat.transactions <- copy(dt.transactions)
dt.repeat.transactions[order(Date), previous := shift(x=Date, n = 1L, type = "lag"), by="Id"]
# Remove first transaction: Have no previous (ie is NA)
dt.repeat.transactions <- dt.repeat.transactions[!is.na(previous)]
dt.repeat.transactions[, previous := NULL]
# Alternative:
# Works only because all on same Date were aggregated. Otherwise, there could be more than one removed
# dt.repeat.transactions[, is.first.trans := (Date == min(Date), by="Id"]
# dt.repeat.transactions <- dt.trans[is.first.trans == FALSE]
return(dt.repeat.transactions)
}
# Aggregate what is on same smallest scale representable by time
# Spending is summed, if present
# aggregating what is in same time.unit does not not make sense
# Date: on same day
# posix: on same second
clv.data.aggregate.transactions <- function(dt.transactions, has.spending){
Price <- NULL
if(has.spending){
dt.aggregated.transactions <- dt.transactions[, list("Price" = sum(Price)), by=c("Id", "Date")]
}else{
# Only keep one observation, does not matter which
# head(.SD) does not work because Id and Date both in by=
# unique() has the same effect because there are only 2 columns
dt.aggregated.transactions <- unique(dt.transactions, by=c("Id", "Date"))
}
return(dt.aggregated.transactions)
}
# Interpurchase time, for repeaters only
# Time between consecutive purchases of each customer - convert to intervals then time units
# If zero-repeaters (only 1 trans) set NA to ignore it in mean / sd calculations
#' @importFrom lubridate int_diff
clv.data.mean.interpurchase.times <- function(clv.data, dt.transactions){
Id <- num.trans <- Date <- NULL
num.transactions <- dt.transactions[, list(num.trans = .N), by="Id"]
return(rbindlist(list(
# 1 Transaction = NA
dt.transactions[Id %in% num.transactions[num.trans == 1,Id], list(interp.time = NA_real_, Id)],
dt.transactions[Id %in% num.transactions[num.trans > 1,Id],
list(interp.time = mean(clv.time.interval.in.number.tu(clv.time = [email protected],
interv = int_diff(Date)))),
by="Id"]
), use.names = TRUE))
}
#' @importFrom stats sd
#' @importFrom lubridate time_length
clv.data.make.descriptives <- function(clv.data, Ids){
Id <- Date <- .N <- N <- Price <- interp.time<- Name <- Holdout <- NULL
# readability
clv.time <- [email protected]
Ids <- unique(Ids)
# Make descriptives ------------------------------------------------------------------------------
# Do not simply overwrite all NA/NaN with "-", only where these are expected (num obs = 1).
# Let propagate otherwise to help find errors
fct.make.descriptives <- function(dt.data, sample.name){
# Subset transaction data to relevant Ids
if(!is.null(Ids)){
dt.data <- dt.data[Id %in% Ids]
# print warning only once
if(sample.name == "Total" & dt.data[, uniqueN(Id)] != length(unique(Ids))){
warning("Not all given Ids were found in the transaction data.", call. = FALSE)
}
}
dt.interp <- clv.data.mean.interpurchase.times(clv.data=clv.data, dt.transactions = dt.data)
dt.num.trans.by.cust <- dt.data[, .N, by="Id"]
l.desc <- list(
"Number of customers" = if(sample.name=="Total"){nrow(dt.num.trans.by.cust)}else{"-"},
"First Transaction in period" = clv.time.format.timepoint(clv.time=clv.time, timepoint=dt.data[, min(Date)]),
"Last Transaction in period" = clv.time.format.timepoint(clv.time=clv.time, timepoint=dt.data[, max(Date)]),
"Total # Transactions" = nrow(dt.data),
"Mean # Transactions per cust" = dt.num.trans.by.cust[, mean(N)],
"(SD)" = if(nrow(dt.num.trans.by.cust) > 1){dt.num.trans.by.cust[, sd(N)]}else{"-"})
if(clv.data.has.spending(clv.data)){
l.desc <- c(l.desc, list(
"Mean Spending per Transaction" = dt.data[, mean(Price)],
# SD is calculated not across customers but across transactions
"(SD) " = if(dt.data[, .N] > 1){dt.data[, sd(Price)]}else{"-"},
"Total Spending" = dt.data[, sum(Price)]))
}
num.interp.obs <- dt.interp[!is.na(interp.time), .N]
l.desc <- c(l.desc, list(
# Zero-repeaters can only be in Estimation ()
"Total # zero repeaters" = if(sample.name == "Estimation"){dt.num.trans.by.cust[, sum(N==1)]}else{"-"},
"Percentage of zero repeaters" = if(sample.name == "Estimation"){dt.num.trans.by.cust[, mean(N==1)*100]}else{"-"},
# Inter-purchase time
# Remove NAs resulting from zero-repeaters
"Mean Interpurchase time" = if(num.interp.obs > 0){dt.interp[, mean(interp.time, na.rm=TRUE)]}else{"-"},
# Need 2 obs to calculate SD
"(SD) " = if(num.interp.obs > 1){dt.interp[, sd(interp.time, na.rm=TRUE)]}else{"-"}))
# Format numbers
l.desc <- format(l.desc, digits=3, nsmall=3)
return(l.desc)
}
l.desc.estimation <- fct.make.descriptives(dt.data = clv.data.get.transactions.in.estimation.period(clv.data),
sample.name="Estimation")
l.desc.total <- fct.make.descriptives(dt.data = [email protected],
sample.name="Total")
dt.summary <- cbind(
data.table(Estimation=l.desc.estimation),
data.table(Total=l.desc.total))
# Add holdout descriptives, if
# - has holdout sample period
# - has transactions in holdout sample (might not have if make descriptives for single customer)
dt.summary[, Holdout := "-"]
if(clv.data.has.holdout(clv.data)){
dt.trans.holdout <- clv.data.get.transactions.in.holdout.period(clv.data = clv.data)
# Need to subset to Ids here already to check if there actually are transactions in holdout period
if(!is.null(Ids)){
dt.trans.holdout <- dt.trans.holdout[Id %in% Ids]
}
if(nrow(dt.trans.holdout) > 0){
dt.summary[, Holdout := fct.make.descriptives(dt.data = dt.trans.holdout, sample.name="Holdout")]
}
}
dt.summary[, Name := names(l.desc.estimation)]
setcolorder(dt.summary, c("Name", "Estimation", "Holdout", "Total"))
return(dt.summary)
}
# default.choices might differ in order
clv.data.select.sample.data <- function(clv.data, sample, choices){
# check if sample is valid
check_err_msg(.check_userinput_matcharg(char=sample, choices=choices, var.name="sample"))
sample <- match.arg(arg = tolower(sample), choices = choices)
if(sample == "holdout" & !clv.data.has.holdout(clv.data)){
check_err_msg("The given clv.data object has no holdout data!")
}
return(switch(sample,
"full" = copy([email protected]),
"estimation" = clv.data.get.transactions.in.estimation.period(clv.data),
"holdout" = clv.data.get.transactions.in.holdout.period(clv.data)))
}
# Add the number of repeat transactions to the given dt.date.seq
clv.data.add.repeat.transactions.to.periods <- function(clv.data, dt.date.seq, cumulative){
num.repeat.trans <- i.num.repeat.trans <- Date <- period.until <- NULL
# Add period at every repeat transaction (and therefore copy)
dt.repeat.trans <- copy([email protected])
# join (roll: -Inf=NOCF) period number onto all repeat transaction by dates
# ie assign each repeat transaction the next period number to which it belongs
dt.repeat.trans <- dt.date.seq[dt.repeat.trans, on = c("period.until"="Date"), roll=-Inf, rollends=c(FALSE, FALSE)]
# !period.until now is missleading, as it stands for the repeat transaction date!
# Count num rep trans in every time unit
dt.repeat.trans <- dt.repeat.trans[, list(num.repeat.trans = .N), by="period.num"]
setorderv(dt.repeat.trans, order = 1L, cols = "period.num") # sort in ascending order
# make double to avoid coercion warning in melt
dt.date.seq[dt.repeat.trans, num.repeat.trans := as.numeric(i.num.repeat.trans), on = "period.num"]
# set 0 where there are no transactions
# for when there are transactions again later on
dt.date.seq[is.na(num.repeat.trans), num.repeat.trans := 0]
# After last transaction, there are no more transactions.
# dt.expectation can however be longer. Set these intentionally to NA so that
# nothing is plotted (setting 0 plots a line at the bottom)
date.last.repeat.transaction <- [email protected][, max(Date)]
dt.date.seq[period.until > date.last.repeat.transaction, num.repeat.trans := NA_real_]
if(cumulative)
dt.date.seq[, num.repeat.trans := cumsum(num.repeat.trans)]
return(dt.date.seq)
}
|
/scratch/gouwar.j/cran-all/cranData/CLVTools/R/class_clv_data.R
|
#' Transactional and dynamic covariates data to fit CLV models
#'
#' @description
#' Extends the class \linkS4class{clv.data.static.covariates},
#' but adds no additional slots to it. The purpose of this class rather is to define different behaviors
#' suitable for dynamic covariates.
#'
#' The \code{data.table}s stored in the slots \code{data.cov.life} and \code{data.cov.trans}
#' each contain an additional column \code{Cov.Date} for the timepoint of the covariate.
#'
#' An object of this class serves as input to fit models with dynamic covariates.
#'
#'
#' @slot data.cov.life Single \code{data.table} with all static covariate data for the lifetime process
#' @slot data.cov.trans Single \code{data.table} with all static covariate data for the transaction process
#' @slot names.cov.data.life Character vector with names of the dynamic lifetime covariates.
#' @slot names.cov.data.trans Character vector with names of the dynamic transaction covariates.
#'
#' @seealso Definition of the parent class \linkS4class{clv.data.static.covariates}.
#' @seealso For fitting dynamic covariate models: \code{\link[CLVTools:pnbd]{pnbd}}
#'
# Corresponds to the column names of the \code{data.table} in slot data.cov.trans
#'
#' @keywords internal
#' @importFrom methods setClass
#' @include all_generics.R class_clv_data.R class_clv_data_staticcovariates.R class_clv_time.R
setClass(Class = "clv.data.dynamic.covariates", contains = "clv.data.static.covariates")
#' @importFrom methods new
clv.data.dynamic.covariates <- function(no.cov.obj,
data.cov.life,
data.cov.trans,
names.cov.data.life,
names.cov.data.trans){
return(new("clv.data.dynamic.covariates",
# no need for deep copy as done in static cov constructor
clv.data.static.covariates(no.cov.obj = no.cov.obj,
names.cov.data.life = names.cov.data.life,
names.cov.data.trans = names.cov.data.trans,
data.cov.life = data.cov.life,
data.cov.trans = data.cov.trans),
name = "CLV Transaction Data with Dynamic Covariates",
names.cov.data.life = names.cov.data.life,
names.cov.data.trans = names.cov.data.trans))
}
|
/scratch/gouwar.j/cran-all/cranData/CLVTools/R/class_clv_data_dynamiccovariates.R
|
#' Transactional and static covariates data to fit CLV models
#'
#'
#' Extends the class \linkS4class{clv.data} and adds slots to store data and names of
#' static covariates for both processes.
#' An object of this class then serves as input to fit models with static covariates.
#'
#'
#' @slot data.cov.life Single \code{data.table} with all static covariate data for the lifetime process
#' @slot data.cov.trans Single \code{data.table} with all static covariate data for the transaction process
#' @slot names.cov.data.life Character vector with names of the static lifetime covariates.
#' @slot names.cov.data.trans Character vector with names of the static transaction covariates.
#Corresponds to the column names of the \code{data.table} in slot data.cov.life
#'
#' @seealso Definition of the parent class \linkS4class{clv.data}.
#' @seealso For fitting covariate models: \code{\link[CLVTools:pnbd]{pnbd}}
#'
#'
#' @keywords internal
#' @importFrom methods setClass
#' @include all_generics.R class_clv_data.R class_clv_time.R
setClass(Class = "clv.data.static.covariates", contains = "clv.data",
slots = c(
data.cov.life = "data.table",
data.cov.trans = "data.table",
names.cov.data.life = "character",
names.cov.data.trans = "character"),
# Prototype is labeled not useful anymore, but still recommended by Hadley / Bioc
prototype = list(
data.cov.life = data.table(),
data.cov.trans = data.table(),
names.cov.data.life = character(0),
names.cov.data.trans = character(0)))
#' @importFrom methods new
clv.data.static.covariates <- function(no.cov.obj, data.cov.life, data.cov.trans, names.cov.data.life,names.cov.data.trans){
# Cannot set keys here because only setting "Id" would remove the keys set for dyncov
# all the data in the no covariate clv.data object need to be deep copied.
# This is only relevant for the data.tables in it (data.transactions)
# Do not call the clv.data constructor function because it would require taking the clv.data object apart to pass
# it as single arguments
return(new("clv.data.static.covariates",
copy(no.cov.obj), # copy construct on deep copy of no cov data
name = "CLV Transaction Data with Static Covariates",
names.cov.data.life = names.cov.data.life,
names.cov.data.trans = names.cov.data.trans,
data.cov.life = data.cov.life,
data.cov.trans = data.cov.trans))
}
clv.data.get.matrix.data.cov.life <- function(clv.data, correct.col.names, correct.row.names){
# .SD returns copy, can use setDF without modifying the original data
m.cov.data.life <- data.matrix(setDF([email protected][, .SD, [email protected]],
rownames = [email protected]$Id))
if(!all(rownames(m.cov.data.life) == correct.row.names))
stop("Covariate data (life) rows are not sorted correctly. Please file a bug!")
if(!all(colnames(m.cov.data.life) == correct.col.names))
stop("Covariate data (life) cols are not sorted correctly. Please file a bug!")
return(m.cov.data.life)
}
# Returns matrix of transaction cov data
# with cols sorted same as in vector names.cov.data.trans
clv.data.get.matrix.data.cov.trans <- function(clv.data, correct.col.names, correct.row.names){
# .SD returns copy, can use setDF without modifying the original data
m.cov.data.trans <- data.matrix(setDF([email protected][, .SD, [email protected]],
rownames = [email protected]$Id))
if(!all(rownames(m.cov.data.trans) == correct.row.names))
stop("Covariate data (trans) rows are not sorted correctly. Please file a bug!")
if(!all(colnames(m.cov.data.trans) == correct.col.names))
stop("Covariate data (trans) columns are not sorted correctly. Please file a bug!")
return(m.cov.data.trans)
}
clv.data.get.names.cov.life <- function(clv.data){
return([email protected])
}
clv.data.get.names.cov.trans <- function(clv.data){
return([email protected])
}
clv.data.reduce.covariates <- function(clv.data, names.cov.life, names.cov.trans){
# Reduce covariate data to Id + cov names if told by user
if(length(names.cov.life) != 0 & !identical(names.cov.life, [email protected])){
[email protected] <- names.cov.life
[email protected] <- [email protected][, .SD, .SDcols=c("Id", [email protected])]
}
if(length(names.cov.trans) !=0 & !identical(names.cov.trans, [email protected])){
[email protected] <- names.cov.trans
[email protected] <- [email protected][, .SD, .SDcols=c("Id", [email protected])]
}
return(clv.data)
}
#' @importFrom stats model.frame model.matrix reformulate
convert_userinput_covariatedata <- function(dt.cov.data, names.cov){
# Make syntactically valid names
# Rename data in order to be able to use model.frame() which requires legal names
original.cov.names <- names.cov
legal.cov.names <- make.names(names.cov)
setnames(dt.cov.data, old = original.cov.names, new = legal.cov.names)
# Use model.frame/model.matrix to convert cov data
# numeric stays numeric, char/factors to k-1 dummies
# Always need intercept!
# to always get k-1 dummies, as no intercept implies k dummies in the
# case of only a single categorical covariate
f.covs <- reformulate(termlabels = legal.cov.names,
response = NULL,
intercept = TRUE)
mf <- model.frame(f.covs, data = dt.cov.data)
mm <- model.matrix(object = f.covs, data = dt.cov.data)
# Combine everything else (Id, maybe Cov.Date for dyncov) and raw converted numeric covariate data
dt.cov <- cbind(
# Id and Cov.Date from original data, everything except actual cov data
dt.cov.data[, .SD, .SDcols=setdiff(colnames(dt.cov.data), legal.cov.names)],
# everything except the Intercept: numeric, dummies, etc
mm[, setdiff(colnames(mm), "(Intercept)"), drop=FALSE])
# Read final names which in the case of dummies are completely different from
# original.cov.names (or legal.cov.names)
final.names.cov <- setdiff(colnames(dt.cov), c("Id", "Cov.Date"))
return(list(data.cov = dt.cov, final.names.cov=final.names.cov))
}
|
/scratch/gouwar.j/cran-all/cranData/CLVTools/R/class_clv_data_staticcovariates.R
|
# Register an S4 class for optimx to be able to save the estimation output in the clv S4 class' slot
#' @import optimx
setOldClass("optimx")
#' Fitted model without covariates
#'
#' @description
#' The class stores the transaction data and various optimization outputs and options.
#' It is created already when model fitting is initiated and is then used to perform no covariate specific steps
#' during the estimation process.
#' Serves as parent classes to fitted transaction and spending models.
#'
#' Created with an existing clv.data and clv.model object (or subclasses thereof).
#'
#' @slot call Single language of the call used to create the object
#' @slot clv.model Single object of (sub-) class \code{clv.model} that determines model-specific behavior.
#' @slot clv.data Single object of (sub-) class \code{clv.data} that contains the data and temporal information to fit the model to.
#' @slot prediction.params.model Numeric vector of the model parameters, set and used solely when predicting. Named after model parameters in original scale and derived from \code{coef()}.
#' @slot optimx.estimation.output A single object of class \code{optimx} as returned from method \code{optimx::optimx} after optimizing the log-likelihood fitting the model.
#' @slot optimx.hessian Single matrix that is the hessian extracted from the last row of the optimization output stored in the slot \code{optimx.estimation.output}.
#'
#' @seealso \linkS4class{clv.fitted.spending}, \linkS4class{clv.fitted.transactions}, \linkS4class{clv.fitted.transactions.static.cov}, \linkS4class{clv.fitted.transactions.dynamic.cov}
#'
#' @importFrom methods setClass
#' @keywords internal
#' @include class_clv_model.R class_clv_data.R
setClass(Class = "clv.fitted", contains = "VIRTUAL",
slots = c(
call = "language",
clv.model = "clv.model",
clv.data = "clv.data",
prediction.params.model = "numeric",
# Can save optimx result as optimx class because setOldClass (optimx) is
# done before
optimx.estimation.output = "optimx",
optimx.hessian = "matrix"),
# Prototype is labeled not useful anymore, but still recommended by Hadley / Bioc
prototype = list(
prediction.params.model = numeric(0),
optimx.estimation.output = structure(data.frame(), class="optimx"),
optimx.hessian = matrix(data = numeric(0))))
|
/scratch/gouwar.j/cran-all/cranData/CLVTools/R/class_clv_fitted.R
|
#' Fitted Spending Model
#'
#' Extends the class \code{clv.fitted} with slots for spending models and performs steps during the
#' estimation, prediction and plotting process that are specific to all spending models.
#'
#' @slot estimation.removed.first.transaction Single boolean whether every customers' first transaction was removed when counting transactions.
#'
#' @seealso Definition of the parent class \linkS4class{clv.fitted}
#' @seealso For transaction models \linkS4class{clv.fitted.transactions}
#'
#' @include class_clv_fitted.R
#' @keywords internal
setClass(Class = "clv.fitted.spending", contains = "clv.fitted",
slots = list(estimation.removed.first.transaction = "logical"),
prototype = list(estimation.removed.first.transaction = logical(0)))
#' @importFrom methods new
clv.fitted.spending <- function(cl, clv.model, clv.data){
# Deep copy of clv.data if ever modified by reference later on
return(new("clv.fitted.spending",
call = cl,
clv.model = clv.model,
clv.data = copy(clv.data)))
}
setMethod(f = "clv.controlflow.estimate.check.inputs", signature = signature(clv.fitted="clv.fitted.spending"), definition = function(clv.fitted, start.params.model, optimx.args, verbose, remove.first.transaction, ...){
# clv.fitted inputchecks
callNextMethod()
err.msg <- c()
if(!clv.data.has.spending([email protected]))
err.msg <- c(err.msg, "Spending models can only be fit on clv.data objects with spending data!")
# Have to check in interface because already required when building cbs
# err.msg <- c(err.msg, .check_user_data_single_boolean(b = remove.first.transaction, var.name = "remove.first.transaction"))
check_err_msg(err.msg)
})
|
/scratch/gouwar.j/cran-all/cranData/CLVTools/R/class_clv_fitted_spending.R
|
#' Fitted Transaction Model without covariates
#'
#' Extends the class \code{clv.fitted} to performs steps during the
#' estimation, prediction and plotting process that are specific to all transaction models.
#'
#' @seealso Definition of the parent class \linkS4class{clv.fitted}
#' @seealso For spending models \linkS4class{clv.fitted.spending}
#'
#' @include class_clv_fitted.R
#' @keywords internal
setClass(Class = "clv.fitted.transactions", contains = "clv.fitted")
#' @importFrom methods new
clv.fitted.transactions <- function(cl, clv.model, clv.data){
# Deep copy of clv.data if ever modified by reference later on
return(new("clv.fitted.transactions",
call = cl,
clv.model = clv.model,
clv.data = copy(clv.data)))
}
clv.fitted.transactions.add.expectation.data <- function(clv.fitted.transactions, dt.expectation.seq, cumulative, verbose){
expectation <- i.expectation <- NULL
# Pass copy of expectation table file because will be modified and contain column named expecation
dt.model.expectation <- clv.model.expectation([email protected], clv.fitted=clv.fitted.transactions,
dt.expectation.seq=copy(dt.expectation.seq), verbose = verbose)
# Only the expectation data
dt.model.expectation <- dt.model.expectation[, c("period.until", "expectation")]
if(cumulative)
dt.model.expectation[, expectation := cumsum(expectation)]
# add expectation to plot data
# name columns by model
dt.expectation.seq[dt.model.expectation, expectation := i.expectation, on = "period.until"]
return(dt.expectation.seq)
}
|
/scratch/gouwar.j/cran-all/cranData/CLVTools/R/class_clv_fitted_transactions.R
|
#' Fitted CLV Model with Dynamic covariates
#'
#' @description
#'
#' Extends the class \linkS4class{clv.fitted.transactions.static.cov} but adds no
#' additional slots to it. The purpose of this class rather is to perform steps during the fitting
#' process that are specific to dynamic covariates models.
#'
#' @seealso \linkS4class{clv.fitted}, \linkS4class{clv.fitted.transactions.static.cov}
#'
#' @importFrom methods setClass
#' @include all_generics.R class_clv_data_dynamiccovariates.R class_clv_fitted_transactions_staticcov.R class_clv_fitted_transactions.R
#' @keywords internal
setClass(Class = "clv.fitted.transactions.dynamic.cov", contains = "clv.fitted.transactions.static.cov")
#' @importFrom methods new
clv.fitted.transactions.dynamic.cov <- function(cl, clv.model, clv.data){
return(new("clv.fitted.transactions.dynamic.cov",
clv.fitted.transactions.static.cov(cl=cl, clv.model=clv.model, clv.data=clv.data)))
}
|
/scratch/gouwar.j/cran-all/cranData/CLVTools/R/class_clv_fitted_transactions_dynamiccov.R
|
#' Fitted Transaction Model with Static covariates
#'
#' Extends the class \code{clv.fitted.transactions} with slots to accommodate the various additional
#' optimization options that can be used for covariates models.
#' Also used to perform steps during the estimation process that are specific to static covariates models.
#'
#' @slot estimation.used.constraints Single boolean whether the estimation constraint any covariate parameters to be the same for both processes.
#' @slot names.original.params.constr Character vector with the original names of the constraint covariate parameters. Length zero if none are constraint.
#' @slot names.original.params.free.life Character vector with the original names of the not constraint lifetime covariate parameters. Length zero if none are free.
#' @slot names.original.params.free.trans Character vector with the original names of the not constraint transaction covariate parameters. Length zero if none are free.
#' @slot names.prefixed.params.constr Character vector with the prefixed names of the constraint covariate parameters during optimization. Length zero if none are constraint.
#' @slot names.prefixed.params.free.life Character vector with the prefixed names of the not constraint lifetime covariate parameters during optimization. Length zero if none are free.
#' @slot names.prefixed.params.free.trans Character vector with the prefixed names of the not constraint transaction covariate parameters during optimization. Length zero if none are free.
#' @slot names.prefixed.params.after.constr.life Character vector containing the names of all constraint and free lifetime covariates parameters with lifetime prefixes only. Needed after reduplicating the constraint parameters.
#' @slot names.prefixed.params.after.constr.trans Character vector containing the names of all constraint and free transaction covariates parameters with transaction prefixes only. Needed after reduplicating the constraint parameters.
#' @slot estimation.used.regularization Single boolean whether the estimation used regularization.
#' @slot reg.lambda.life Single numeric with the lambda used for regularizing the lifetime covariate parameters. Length zero if regularization is not used.
#' @slot reg.lambda.trans Single numeric with the lambda used for regularizing the transaction covariate parameters. Length zero if regularization is not used.
#' @slot prediction.params.life Numeric vector of the lifetime covariate parameters, set and used solely when predicting. Named after lifetime covariates and derived from \code{coef()}.
#' @slot prediction.params.trans Numeric vector of the transaction covariate parameters, set and used solely when predicting. Named after transaction covariates and derived from \code{coef()}.
#'
#' @seealso \linkS4class{clv.fitted}, \linkS4class{clv.fitted.transactions}, \linkS4class{clv.fitted.transactions.dynamic.cov}
#'
#' @importFrom methods setClass
#' @keywords internal
#' @include class_clv_fitted_transactions.R
setClass(Class = "clv.fitted.transactions.static.cov", contains = "clv.fitted.transactions",
slots = c(
estimation.used.constraints = "logical",
# Needs original names per type to map back prefixed names
# to original names (creating prediction.params)
names.original.params.constr = "character",
names.original.params.free.life = "character",
names.original.params.free.trans = "character",
names.prefixed.params.constr = "character",
names.prefixed.params.free.life = "character",
names.prefixed.params.free.trans = "character",
# cannot use prefixed in interlayers after constraint interlayer (before LL & in reg),
# it needs all life and trans params, not split in free/constraint
names.prefixed.params.after.constr.life = "character",
names.prefixed.params.after.constr.trans = "character",
# Regularization parameters
estimation.used.regularization = "logical",
reg.lambda.life = "numeric",
reg.lambda.trans = "numeric",
# Params from constraint and unconstraint coefs
prediction.params.life = "numeric",
prediction.params.trans = "numeric"),
# Prototype is labeled not useful anymore, but still recommended by Hadley / Bioc
prototype = list(
estimation.used.constraints = logical(0),
names.original.params.free.life = character(0),
names.original.params.free.trans = character(0),
names.original.params.constr = character(0),
names.prefixed.params.free.life = character(0),
names.prefixed.params.free.trans = character(0),
names.prefixed.params.constr = character(0),
names.prefixed.params.after.constr.trans = character(0),
names.prefixed.params.after.constr.life = character(0),
estimation.used.regularization = logical(0),
reg.lambda.life = numeric(0),
reg.lambda.trans = numeric(0),
prediction.params.life = numeric(0),
prediction.params.trans = numeric(0)))
#' @importFrom methods new
clv.fitted.transactions.static.cov <- function(cl, clv.model, clv.data){
return(new("clv.fitted.transactions.static.cov",
clv.fitted.transactions(cl = cl, clv.model = clv.model, clv.data = clv.data)))
}
|
/scratch/gouwar.j/cran-all/cranData/CLVTools/R/class_clv_fitted_transactions_staticcov.R
|
#' @templateVar name_model_full Gamma-Gamma
#' @templateVar name_class_clvmodel clv.model.gg
#' @template template_class_clvfittedspendingmodels
#'
#' @slot cbs Single \code{data.table} that contains information about
#' each customers' mean spending per transaction (Spending) and the number of purchases (x) which
#' both depend on whether the first transaction was removed or not.
#'
#' @seealso \linkS4class{clv.fitted}, \linkS4class{clv.fitted.spending}, \linkS4class{clv.model.gg}
#'
#'
#' @keywords internal
#' @importFrom methods setClass
#' @include class_clv_model_gg.R class_clv_fitted_spending.R
setClass(Class = "clv.gg", contains = "clv.fitted.spending",
slots = c(
cbs = "data.table"),
# Prototype is labeled not useful anymore, but still recommended by Hadley / Bioc
prototype = list(
cbs = data.table()))
#' @importFrom methods new
clv.gg <- function(cl, clv.data, remove.first.transaction){
dt.cbs.gg <- gg_cbs(clv.data = clv.data, remove.first.transaction = remove.first.transaction)
clv.model <- clv.model.gg()
return(new("clv.gg",
clv.fitted.spending(cl=cl, clv.model=clv.model, clv.data=clv.data),
cbs = dt.cbs.gg))
}
gg_cbs <- function(clv.data, remove.first.transaction){
Date <- Price <- x <- i.x <- Spending <- i.Spending <- NULL
# Customer-By-Sufficiency (CBS) Matrix
# Only for transactions in calibration period
# After first transaction was removed (if required)
# For every customer:
# x: Number of transactions
# Spending: Average (mean) spending per transaction
dt.transactions <- clv.data.get.transactions.in.estimation.period(clv.data = clv.data)
# Removing the first transaction and then counting transactions and spending on it, will
# lose customers. Therefore do in separate steps: Id of all, then match their data
if(!remove.first.transaction){
# Ordinary approach is ok because will not lose Ids
cbs <- dt.transactions[ , list(x = .N,
Spending = mean(Price)),
by="Id"]
}else{
# Ensure all Ids are kept in cbs
cbs <- unique(dt.transactions[, "Id"])
# Add statistics based on repeat transactions only
# Cannot use [email protected] because these also include holdout
dt.transactions <- clv.data.get.transactions.in.estimation.period(clv.data)
dt.repeat.transactions <- clv.data.make.repeat.transactions(dt.transactions)
dt.stats.repeat.trans <- dt.repeat.transactions[ , list(x = .N,
Spending = mean(Price)),
keyby="Id"]
cbs[dt.stats.repeat.trans, x := i.x, on = "Id"]
cbs[dt.stats.repeat.trans, Spending := i.Spending, on = "Id"]
# Zero-repeaters have no spending and repeat-transactions
cbs[is.na(x), x := 0]
cbs[is.na(Spending), Spending := 0]
}
setcolorder(cbs, c("Id", "x", "Spending"))
setkeyv(cbs, "Id")
return(cbs)
}
|
/scratch/gouwar.j/cran-all/cranData/CLVTools/R/class_clv_gg.R
|
#' @templateVar name_model_full GGompertz/NBD
#' @templateVar name_class_clvmodel clv.model.ggomnbd.no.cov
#' @template template_class_clvfittedtransactionmodels
#'
#' @template template_slot_ggomnbdcbs
#'
#' @seealso \linkS4class{clv.fitted}, \linkS4class{clv.fitted.transactions}, \linkS4class{clv.model.ggomnbd.no.cov}, \linkS4class{clv.ggomnbd.static.cov}
#'
#' @keywords internal
#' @importFrom methods setClass
#' @include class_clv_model_ggomnbd_nocov.R class_clv_data.R class_clv_fitted_transactions.R
setClass(Class = "clv.ggomnbd", contains = "clv.fitted.transactions",
slots = c(
cbs = "data.table"),
# Prototype is labeled not useful anymore, but still recommended by Hadley / Bioc
prototype = list(
cbs = data.table()))
# Convenience constructor to encapsulate all steps for object creation
clv.ggomnbd <- function(cl, clv.data){
dt.cbs.ggomnbd <- ggomnbd_cbs(clv.data = clv.data)
clv.model <- clv.model.ggomnbd.no.cov()
# Reuse clv.fitted constructor to ensure proper object creation
# a recommended pattern by Martin Morgan on SO
return(new("clv.ggomnbd",
clv.fitted.transactions(cl=cl, clv.model=clv.model, clv.data=clv.data),
cbs = dt.cbs.ggomnbd))
}
ggomnbd_cbs <- function(clv.data){
Date <- Price <- x <- date.first.actual.trans <- date.last.transaction <- NULL
# Customer-By-Sufficiency (CBS) Matrix
# Only for transactions in calibration period
# Only repeat transactions are relevant
#
# For every customer:
# x: Number of repeat transactions := Number of actual transactions - 1
# t.x: Time between first actual and last transaction
# T.cal: Time between first actual transaction and end of calibration period
#
# All time is expressed in time units
trans.dt <- clv.data.get.transactions.in.estimation.period(clv.data = clv.data)
#Initial cbs, for every Id a row
cbs <- trans.dt[ , list(x =.N,
date.first.actual.trans = min(Date),
date.last.transaction = max(Date)),
by="Id"]
# Only repeat transactions -> Number of transactions - 1
cbs[, x := x - 1]
# t.x, T.cal
cbs[, ':='(t.x = clv.time.interval.in.number.tu([email protected], interv=interval(start = date.first.actual.trans, end = date.last.transaction)),
T.cal = clv.time.interval.in.number.tu([email protected], interv=interval(start = date.first.actual.trans, end = [email protected]@timepoint.estimation.end)))]
cbs[, date.last.transaction := NULL]
setkeyv(cbs, c("Id", "date.first.actual.trans"))
setcolorder(cbs, c("Id","x","t.x","T.cal", "date.first.actual.trans"))
return(cbs)
}
|
/scratch/gouwar.j/cran-all/cranData/CLVTools/R/class_clv_ggomnbd.R
|
#' @templateVar name_model_full GGompertz/NBD
#' @templateVar name_class_clvmodel clv.model.ggomnbd.static.cov
#' @template template_class_clvfittedtransactionmodels_staticcov
#'
#' @template template_slot_ggomnbdcbs
#'
#' @seealso \linkS4class{clv.fitted.transactions.static.cov}, \linkS4class{clv.model.ggomnbd.static.cov}, \linkS4class{clv.ggomnbd}
#'
#' @keywords internal
#' @importFrom methods setClass
#' @include class_clv_model_ggomnbd_staticcov.R class_clv_data_staticcovariates.R class_clv_fitted_transactions_staticcov.R
setClass(Class = "clv.ggomnbd.static.cov", contains = "clv.fitted.transactions.static.cov",
slots = c(
cbs = "data.table"),
# Prototype is labeled not useful anymore, but still recommended by Hadley / Bioc
prototype = list(
cbs = data.table()))
#' @importFrom methods new
clv.ggomnbd.static <- function(cl, clv.data){
dt.cbs.ggomnbd <- ggomnbd_cbs(clv.data = clv.data)
clv.model <- clv.model.ggomnbd.static.cov()
# Reuse clv.fitted constructor to ensure proper object creation
# a recommended pattern by Martin Morgan on SO
return(new("clv.ggomnbd.static.cov",
clv.fitted.transactions.static.cov(cl=cl, clv.model=clv.model, clv.data=clv.data),
cbs = dt.cbs.ggomnbd))
}
|
/scratch/gouwar.j/cran-all/cranData/CLVTools/R/class_clv_ggomnbd_staticcov.R
|
#' CLV Model providing model related functionalities
#'
#' @description
#' Objects of class \code{clv.fitted} contain an instance of class clv.model that is called
#' whenever a model-related step is performed (Strategy pattern).
#'
#' This class is the abstract parent class from which new clv models inherit. It implements
#' only stubs (with stop()) for all methods that need to be implemented in order
#' to obtain a functional \code{clv.fitted} object.
#'
#' @slot name.model Name of the model as it should be displayed
#' @slot names.original.params.model character vector that defines the names of the model parameters as they should be reported
#' @slot names.prefixed.params.model character vector that defines the names of the model parameters as they are named during LL optimization
#' @slot start.params.model numeric vector of default values at original scale that should be used for the LL optimization if the user does not provide start parameters. Named with \code{names.original.params.model}.
#' @slot optimx.defaults list of default arguments for calling \code{\link[optimx]{optimx}} with \code{do.call}. Named after the respective arguments in optimx.
#'
#' @seealso CLV model subclasses with and without support for fitting with correlation \linkS4class{clv.model.with.correlation}, \linkS4class{clv.model.no.correlation}
#' @seealso Class using its instances: \linkS4class{clv.fitted}
#'
#' @keywords internal
#' @include all_generics.R
#' @importFrom methods setClass
setClass(Class = "clv.model", contains = "VIRTUAL",
slots = list(
# Anything that will be used from main execution code.
# Enforce it through slots instead of relying on setting it in model generics
name.model = "character",
names.original.params.model = "character",
names.prefixed.params.model = "character",
start.params.model = "numeric",
# The standard method is overwritten with Nelder-Mead if correlation is used because
# if param.m is out of bounds, Inf is returned.
optimx.defaults = "list"),
# Prototype is labeled not useful anymore, but still recommended by Hadley / Bioc
prototype = list(
name.model = character(0),
names.original.params.model = character(0),
names.prefixed.params.model = character(0),
start.params.model = numeric(0),
optimx.defaults = list()))
# No constructor as should not be created
# Default / fallback methods for all models --------------------------------------------------------
# . clv.model.check.input.args -----------------------------------------------------------------------------
setMethod(f = "clv.model.check.input.args", signature = signature(clv.model="clv.model"), definition = function(clv.model, clv.fitted, start.params.model, optimx.args, verbose, ...){
# Example:
# if(length(list(...)) > 0)
# stop("Any further parameters passed in ... are not needed for this model.", call. = FALSE)
stop("The method clv.model.check.input.args has not been implemented by this model!")
})
# . clv.model.put.estimation.input -----------------------------------------------------------------------------
setMethod(f = "clv.model.put.estimation.input", signature = signature(clv.model="clv.model"), definition = function(clv.model, ...){
# Example: do nothing
# return(clv.model)
stop("The method clv.model.put.estimation.input has not been implemented by this model!")
})
# . clv.model.transform.start.params.model -----------------------------------------------------------------------------
setMethod("clv.model.transform.start.params.model", signature = signature(clv.model="clv.model"), definition = function(clv.model, original.start.params.model){
# Example: return start params as given
# return(original.start.params.model)
stop("The method clv.model.transform.start.params.model has not been implemented by this model!")
})
# . clv.model.backtransform.estimated.params.model ---------------------------------------------------------------------
setMethod("clv.model.backtransform.estimated.params.model", signature = signature(clv.model="clv.model"), definition = function(clv.model, prefixed.params.model){
# Example: return as optimized
# return(prefixed.params.model)
stop("The method clv.model.backtransform.estimated.params.model has not been implemented by this model!")
})
# . clv.model.prepare.optimx.args -----------------------------------------------------------------------------
setMethod(f = "clv.model.prepare.optimx.args", signature = signature(clv.model="clv.model"), definition = function(clv.model, clv.fitted, prepared.optimx.args){
stop("The method clv.model.prepare.optimx.args has not been implemented by this model!")
})
# . clv.model.process.post.estimation -----------------------------------------------------------------------------------------
setMethod("clv.model.process.post.estimation", signature = signature(clv.model="clv.model"), definition = function(clv.model, clv.fitted, res.optimx){
# Example: do nothing
# No additional step needed (ie store model specific stuff, extra process)
# return(clv.fitted)
stop("The method clv.model.process.post.estimation has not been implemented by this model!")
})
# . clv.model.cor.to.m ----------------------------------------------------------------------------------------
setMethod(f="clv.model.cor.to.m", signature = signature(clv.model="clv.model"), definition = function(clv.model, prefixed.params.model, param.cor){
# Example:
# res.m <- param.cor / .XXX
# return unnamed as otherwise still called "cor"
# return(unname(res.m))
stop("The method clv.model.cor.to.m has not been implemented by this model!")
})
# . clv.model.m.to.cor ----------------------------------------------------------------------------------------
setMethod(f="clv.model.m.to.cor", signature = signature(clv.model="clv.model"), definition = function(clv.model, prefixed.params.model, param.m){
# Example:
# res.cor <- param.m * XXX
# return unnamed as otherwise still called "m"
# return(unname(res.cor))
stop("The method clv.model.m.to.cor has not been implemented by this model!")
})
# . clv.model.vcov.jacobi.diag ------------------------------------------------------------------------------------------
setMethod(f = "clv.model.vcov.jacobi.diag", signature = signature(clv.model="clv.model"), definition = function(clv.model, clv.fitted, prefixed.params){
# Example: No transformation needed (because also untransformed), only 1s in diag
# m.diag <- diag(x = 1, nrow = length(prefixed.params))
# rownames(m.diag) <- colnames(m.diag) <- names(prefixed.params)
# return(m.diag)
stop("The method clv.model.vcov.jacobi.diag has not been implemented by this model!")
})
# . clv.model.predict ------------------------------------------------------------------------------------------
setMethod("clv.model.predict", signature(clv.model="clv.model"), definition = function(clv.model, clv.fitted, dt.predictions, verbose, continuous.discount.factor, ...){
stop("The method clv.model.predict has not been implemented by this model!")
})
# . clv.model.expectation ------------------------------------------------------------------------------------------
setMethod("clv.model.expectation", signature(clv.model="clv.model"), definition = function(clv.model, clv.fitted, dt.expectation.seq, verbose){
stop("The method clv.model.expectation has not been implemented by this model!")
})
# . clv.model.process.newdata ------------------------------------------------------------------------------------------
setMethod("clv.model.process.newdata", signature(clv.model="clv.model"), definition = function(clv.model, clv.fitted, user.newdata, verbose){
stop("The method clv.model.process.newdata has not been implemented by this model!")
})
# Default covariate model steps ----------------------------------------------------------------------------------------------------
# . clv.model.transform.start.params.cov -------------------------------------------------------------------------------------------
setMethod(f = "clv.model.transform.start.params.cov", signature = signature(clv.model="clv.model"), definition = function(clv.model, start.params.cov){
# Example: no transformation
# return(start.params.cov)
stop("The method clv.model.transform.start.params.cov has not been implemented by this model!")
})
# . clv.model.backtransform.estimated.params.cov ----------------------------------------------------------------------------------------
setMethod(f = "clv.model.backtransform.estimated.params.cov", signature = signature(clv.model="clv.model"), definition = function(clv.model, prefixed.params.cov){
# Example: no back transformation
# return(prefixed.params.cov)
stop("The method clv.model.backtransform.estimated.params.cov has not been implemented by this model!")
})
# .clv.model.probability.density -------------------------------------------------------------------------------------------------------
setMethod(f = "clv.model.probability.density", signature = signature(clv.model="clv.model"), definition = function(clv.model, x, clv.fitted){
stop("The method clv.model.probability.density has not been implemented for this model!")
})
|
/scratch/gouwar.j/cran-all/cranData/CLVTools/R/class_clv_model.R
|
#' @templateVar name_model_full BG/NBD
#' @template template_class_clvmodelnocov
#'
#' @importFrom methods setClass
#' @seealso Other clv model classes \linkS4class{clv.model}, \linkS4class{clv.model.bgnbd.static.cov}
#' @seealso Classes using its instance: \linkS4class{clv.fitted}
#'
#' @include all_generics.R class_clv_model_nocorrelation.R
setClass(Class = "clv.model.bgnbd.no.cov", contains = "clv.model.no.correlation")
#' @importFrom methods new
clv.model.bgnbd.no.cov <- function(){
return(new("clv.model.bgnbd.no.cov",
name.model = "BG/NBD Standard",
names.original.params.model = c(r="r", alpha="alpha", a="a", b="b"),
names.prefixed.params.model = c("log.r", "log.alpha", "log.a", "log.b"),
start.params.model = c(r=1, alpha = 3, a = 1, b = 3)))
}
# Methods --------------------------------------------------------------------------------------------------------------------------------
#' @include all_generics.R
setMethod(f = "clv.model.check.input.args", signature = signature(clv.model="clv.model.bgnbd.no.cov"), definition = function(clv.model, clv.fitted, start.params.model, optimx.args, verbose, ...){
err.msg <- c()
# Have to be > 0 as will be logged
if(any(start.params.model <= 0)){
err.msg <- c(err.msg, "Please provide only model start parameters greater than 0 as they will be log()-ed for the optimization!")
}
check_err_msg(err.msg)
})
# .clv.model.put.estimation.input --------------------------------------------------------------------------------------------------------
# Nothing required, use clv.model.no.correlation
# .clv.model.transform.start.params.model --------------------------------------------------------------------------------------------------------
#' @importFrom stats setNames
setMethod("clv.model.transform.start.params.model", signature = signature(clv.model="clv.model.bgnbd.no.cov"), definition = function(clv.model, original.start.params.model){
# Log all user given or default start params
return(setNames(log(original.start.params.model[[email protected]]),
[email protected]))
})
# .clv.model.backtransform.estimated.params.model --------------------------------------------------------------------------------------------------------
setMethod("clv.model.backtransform.estimated.params.model", signature = signature(clv.model="clv.model.bgnbd.no.cov"), definition = function(clv.model, prefixed.params.model){
# exp all prefixed params
return(exp(prefixed.params.model[[email protected]]))
})
# .clv.model.prepare.optimx.args --------------------------------------------------------------------------------------------------------
setMethod(f = "clv.model.prepare.optimx.args", signature = signature(clv.model="clv.model.bgnbd.no.cov"), definition = function(clv.model, clv.fitted, prepared.optimx.args){
# Only add LL function args, everything else is prepared already, incl. start parameters
optimx.args <- modifyList(prepared.optimx.args,
list(LL.function.sum = bgnbd_nocov_LL_sum,
LL.function.ind = bgnbd_nocov_LL_ind, # if doing correlation
obj = clv.fitted,
vX = clv.fitted@cbs$x,
vT_x = clv.fitted@cbs$t.x,
vT_cal = clv.fitted@cbs$T.cal,
# parameter ordering for the callLL interlayer
LL.params.names.ordered = c(log.r = "log.r",log.alpha = "log.alpha", log.a = "log.a", log.b = "log.b")),
keep.null = TRUE)
return(optimx.args)
})
# . clv.model.process.post.estimation -----------------------------------------------------------------------------------------
setMethod("clv.model.process.post.estimation", signature = signature(clv.model="clv.model.bgnbd.no.cov"), definition = function(clv.model, clv.fitted, res.optimx){
# No additional step needed (ie store model specific stuff, extra process)
return(clv.fitted)
})
# clv.model.process.newdata --------------------------------------------------------------------------------------------------------
setMethod(f = "clv.model.process.newdata", signature = signature(clv.model = "clv.model.bgnbd.no.cov"), definition = function(clv.model, clv.fitted, verbose){
# clv.data in clv.fitted is already replaced with newdata here
# Need to only redo cbs if given new data
clv.fitted@cbs <- bgnbd_cbs(clv.data = [email protected])
return(clv.fitted)
})
# . clv.model.expectation --------------------------------------------------------------------------------------------------------
#' @include all_generics.R
setMethod("clv.model.expectation", signature(clv.model="clv.model.bgnbd.no.cov"), function(clv.model, clv.fitted, dt.expectation.seq, verbose){
r <- alpha <- a <- b <- date.first.repeat.trans<- date.first.actual.trans <- T.cal <- t_i<- period.first.trans<-NULL
params_i <- clv.fitted@cbs[, c("Id", "T.cal", "date.first.actual.trans")]
fct.bgnbd.expectation <- function(params_i.t){return(bgnbd_nocov_expectation(r = [email protected][["r"]],
alpha = [email protected][["alpha"]],
a = [email protected][["a"]],
b = [email protected][["b"]],
vT_i = params_i.t$t_i))}
return(DoExpectation(dt.expectation.seq = dt.expectation.seq, params_i = params_i,
fct.expectation = fct.bgnbd.expectation, clv.time = [email protected]@clv.time))
})
# . clv.model.pmf --------------------------------------------------------------------------------------------------------
#' @include all_generics.R
setMethod("clv.model.pmf", signature=(clv.model="clv.model.bgnbd.no.cov"), function(clv.model, clv.fitted, x){
Id <- T.cal <- pmf.x <- NULL
dt.res <- clv.fitted@cbs[, list(Id, T.cal)]
dt.res[, pmf.x := bgnbd_nocov_PMF(r = [email protected][["r"]],
alpha = [email protected][["alpha"]],
a = [email protected][["a"]],
b = [email protected][["b"]],
vT_i = T.cal,
x = x)]
dt.res <- dt.res[, list(Id, pmf.x)]
setnames(dt.res, "pmf.x", paste0("pmf.x.", x))
return(dt.res)
})
# clv.model.predict --------------------------------------------------------------------------------------------------------
#' @include all_generics.R
setMethod("clv.model.predict", signature(clv.model="clv.model.bgnbd.no.cov"), function(clv.model, clv.fitted, dt.predictions, verbose, continuous.discount.factor, ...){
r <- alpha <- a <- b <- period.length <- CET <- PAlive <- i.CET <- i.PAlive <- x <- t.x <- T.cal <- NULL
predict.number.of.periods <- dt.predictions[1, period.length]
# To ensure sorting, do everything in a single table
dt.result <- copy(clv.fitted@cbs[, c("Id", "x", "t.x", "T.cal")])
# Add CET
dt.result[, CET := bgnbd_nocov_CET(r = [email protected][["r"]],
alpha = [email protected][["alpha"]],
a = [email protected][["a"]],
b = [email protected][["b"]],
dPeriods = predict.number.of.periods,
vX = x,
vT_x = t.x,
vT_cal = T.cal)]
# Add PAlive
dt.result[, PAlive := bgnbd_nocov_PAlive(r = [email protected][["r"]],
alpha = [email protected][["alpha"]],
a = [email protected][["a"]],
b = [email protected][["b"]],
vX = x,
vT_x = t.x,
vT_cal = T.cal)]
# Add results to prediction table, by matching Id
dt.predictions[dt.result, CET := i.CET, on = "Id"]
dt.predictions[dt.result, PAlive := i.PAlive, on = "Id"]
return(dt.predictions)
})
# .clv.model.vcov.jacobi.diag --------------------------------------------------------------------------------------------------------
setMethod(f = "clv.model.vcov.jacobi.diag", signature = signature(clv.model="clv.model.bgnbd.no.cov"), definition = function(clv.model, clv.fitted, prefixed.params){
# Create matrix with the full required size
m.diag <- diag(x = 0, ncol = length(prefixed.params), nrow=length(prefixed.params))
rownames(m.diag) <- colnames(m.diag) <- names(prefixed.params)
# Add the transformations for the model to the matrix
# All model params need to be exp()
m.diag[[email protected],
[email protected]] <- diag(x = exp(prefixed.params[[email protected]]),
nrow = length([email protected]),
ncol = length([email protected]))
return(m.diag)
})
|
/scratch/gouwar.j/cran-all/cranData/CLVTools/R/class_clv_model_bgnbd.R
|
#' @templateVar name_model_full BG/NBD
#' @template template_class_clvmodelstaticcov
#'
#' @seealso Other clv model classes \linkS4class{clv.model}, \linkS4class{clv.model.bgnbd.no.cov}
#' @seealso Classes using its instance: \linkS4class{clv.fitted.transactions.static.cov}
#'
#' @include all_generics.R class_clv_model_bgnbd.R
setClass(Class = "clv.model.bgnbd.static.cov", contains = "clv.model.bgnbd.no.cov",
slots = list(start.param.cov = "numeric"),
prototype = list(
start.param.cov = numeric(0)
))
#' @importFrom methods new
clv.model.bgnbd.static.cov <- function(){
return(new("clv.model.bgnbd.static.cov",
clv.model.bgnbd.no.cov(),
name.model = "BG/NBD with Static Covariates",
start.param.cov = 0.1))
}
clv.model.bgnbd.static.cov.get.alpha_i <- function(clv.fitted){
alpha_i <- NULL
dt.alpha_i <- clv.fitted@cbs[, "Id"]
m.cov.data.trans <- clv.data.get.matrix.data.cov.trans([email protected], correct.row.names=dt.alpha_i$Id,
correct.col.names=names([email protected]))
dt.alpha_i[, alpha_i := bgnbd_staticcov_alpha_i(alpha_0 = [email protected][["alpha"]],
vCovParams_trans = [email protected],
mCov_trans = m.cov.data.trans)]
return(dt.alpha_i)
}
clv.model.bgnbd.static.cov.get.a_i <- function(clv.fitted){
a_i <- NULL
dt.a_i <- clv.fitted@cbs[, "Id"]
m.cov.data.life <- clv.data.get.matrix.data.cov.life([email protected], correct.row.names=dt.a_i$Id,
correct.col.names=names([email protected]))
dt.a_i[, a_i := bgnbd_staticcov_a_i(a_0 = [email protected][["a"]],
vCovParams_life = [email protected],
mCov_life = m.cov.data.life)]
return(dt.a_i)
}
clv.model.bgnbd.static.cov.get.b_i <- function(clv.fitted){
b_i <- NULL
dt.b_i <- clv.fitted@cbs[, "Id"]
m.cov.data.life <- clv.data.get.matrix.data.cov.life([email protected], correct.row.names=dt.b_i$Id,
correct.col.names=names([email protected]))
dt.b_i[, b_i := bgnbd_staticcov_b_i(b_0 = [email protected][["b"]],
vCovParams_life = [email protected],
mCov_life = m.cov.data.life)]
return(dt.b_i)
}
# Methods --------------------------------------------------------------------------------------------------------------------------------
# . clv.model.check.input.args ----------------------------------------------------------------------------------------------------------
# Use nocov
# . clv.model.put.estimation.input ------------------------------------------------------------------------------------------------------------
# Nothing specific required, use nocov
# . clv.model.transform.start.params.cov ------------------------------------------------------------------------------------------------------------
setMethod(f = "clv.model.transform.start.params.cov", signature = signature(clv.model="clv.model.bgnbd.static.cov"), definition = function(clv.model, start.params.cov){
# no transformation needed
return(start.params.cov)
})
# . clv.model.backtransform.estimated.params.cov -----------------------------------------------------------------------------------------------------
setMethod(f = "clv.model.backtransform.estimated.params.cov", signature = signature(clv.model="clv.model.bgnbd.static.cov"), definition = function(clv.model, prefixed.params.cov){
# no transformation needed
return(prefixed.params.cov)
})
# . clv.model.prepare.optimx.args -----------------------------------------------------------------------------------------------------
setMethod(f = "clv.model.prepare.optimx.args", signature = signature(clv.model="clv.model.bgnbd.static.cov"), definition = function(clv.model, clv.fitted, prepared.optimx.args){
# Do not call the no.cov function as the LL is different
# Everything to call the LL function
optimx.args <- modifyList(prepared.optimx.args,
list(LL.function.sum = bgnbd_staticcov_LL_sum,
LL.function.ind = bgnbd_staticcov_LL_ind, # if doing correlation
obj = clv.fitted,
vX = clv.fitted@cbs$x,
vT_x = clv.fitted@cbs$t.x,
vT_cal = clv.fitted@cbs$T.cal,
mCov_life = clv.data.get.matrix.data.cov.life(clv.data = [email protected], correct.row.names=clv.fitted@cbs$Id,
correct.col.names=clv.data.get.names.cov.life([email protected])),
mCov_trans = clv.data.get.matrix.data.cov.trans(clv.data = [email protected], correct.row.names=clv.fitted@cbs$Id,
correct.col.names=clv.data.get.names.cov.trans([email protected])),
# parameter ordering for the callLL interlayer
LL.params.names.ordered = c([email protected],
[email protected],
[email protected]),
keep.null = TRUE))
return(optimx.args)
})
# . clv.model.expectation -----------------------------------------------------------------------------------------------------
setMethod("clv.model.expectation", signature(clv.model="clv.model.bgnbd.static.cov"), function(clv.model, clv.fitted, dt.expectation.seq, verbose){
r <- alpha_i <- i.alpha_i <- a_i <- i.a_i <- b_i <- i.b_i <- date.first.repeat.trans<- date.first.actual.trans <- T.cal <- t_i<- period.first.trans<-NULL
params_i <- clv.fitted@cbs[, c("Id", "T.cal", "date.first.actual.trans")]
dt.alpha_i <- clv.model.bgnbd.static.cov.get.alpha_i(clv.fitted)
dt.a_i <- clv.model.bgnbd.static.cov.get.a_i(clv.fitted)
dt.b_i <- clv.model.bgnbd.static.cov.get.b_i(clv.fitted)
params_i[dt.alpha_i, alpha_i := i.alpha_i, on="Id"]
params_i[dt.a_i, a_i := i.a_i, on="Id"]
params_i[dt.b_i, b_i := i.b_i, on="Id"]
# Alpha is for trans, a and b for live!
fct.bgnbd.expectation <- function(params_i.t){
return(drop(bgnbd_staticcov_expectation(r = [email protected][["r"]],
vAlpha_i = params_i.t$alpha_i,
vA_i = params_i.t$a_i,
vB_i = params_i.t$b_i,
vT_i = params_i.t$t_i)))}
return(DoExpectation(dt.expectation.seq = dt.expectation.seq, params_i = params_i,
fct.expectation = fct.bgnbd.expectation, clv.time = [email protected]@clv.time))
})
# . clv.model.pmf --------------------------------------------------------------------------------------------------------
#' @include all_generics.R
setMethod("clv.model.pmf", signature=(clv.model="clv.model.bgnbd.static.cov"), function(clv.model, clv.fitted, x){
Id <- T.cal <- pmf.x <- alpha_i <-i.alpha_i <- a_i <- i.a_i <- b_i <- i.b_i <- NULL
dt.res <- clv.fitted@cbs[, c("Id", "T.cal")]
dt.alpha_i <- clv.model.bgnbd.static.cov.get.alpha_i(clv.fitted)
dt.a_i <- clv.model.bgnbd.static.cov.get.a_i(clv.fitted)
dt.b_i <- clv.model.bgnbd.static.cov.get.b_i(clv.fitted)
dt.res[dt.alpha_i, alpha_i := i.alpha_i, on="Id"]
dt.res[dt.a_i, a_i := i.a_i, on="Id"]
dt.res[dt.b_i, b_i := i.b_i, on="Id"]
dt.res[, pmf.x := bgnbd_staticcov_PMF(x = x, r = [email protected][["r"]],
vAlpha_i = alpha_i, vA_i = a_i, vB_i = b_i,
vT_i = T.cal)]
dt.res <- dt.res[, list(Id, pmf.x)]
setnames(dt.res, "pmf.x", paste0("pmf.x.", x))
return(dt.res)
})
# . clv.model.predict -----------------------------------------------------------------------------------------------------
setMethod("clv.model.predict", signature(clv.model="clv.model.bgnbd.static.cov"), function(clv.model, clv.fitted, dt.predictions, verbose, continuous.discount.factor, ...){
r <- alpha <- a <- b <- period.length <- CET <- PAlive <- i.CET <- i.PAlive <- x <- t.x <- T.cal <- NULL
predict.number.of.periods <- dt.predictions[1, period.length]
# To ensure sorting, do everything in a single table
dt.result <- copy(clv.fitted@cbs[, c("Id", "x", "t.x", "T.cal")])
data.cov.mat.life <- clv.data.get.matrix.data.cov.life(clv.data = [email protected], correct.row.names=dt.result$Id,
correct.col.names=names([email protected]))
data.cov.mat.trans <- clv.data.get.matrix.data.cov.trans(clv.data = [email protected], correct.row.names=dt.result$Id,
correct.col.names=names([email protected]))
# Add CET
dt.result[, CET := bgnbd_staticcov_CET(r = [email protected][["r"]],
alpha = [email protected][["alpha"]],
a = [email protected][["a"]],
b = [email protected][["b"]],
dPeriods = predict.number.of.periods,
vX = x,
vT_x = t.x,
vT_cal = T.cal,
vCovParams_trans = [email protected],
vCovParams_life = [email protected],
mCov_trans = data.cov.mat.trans,
mCov_life = data.cov.mat.life)]
# Add PAlive
dt.result[, PAlive := bgnbd_staticcov_PAlive(r = [email protected][["r"]],
alpha = [email protected][["alpha"]],
a = [email protected][["a"]],
b = [email protected][["b"]],
vX = x,
vT_x = t.x,
vT_cal = T.cal,
vCovParams_trans = [email protected],
vCovParams_life = [email protected],
mCov_trans = data.cov.mat.trans,
mCov_life = data.cov.mat.life)]
# Add results to prediction table, by matching Id
dt.predictions[dt.result, CET := i.CET, on = "Id"]
dt.predictions[dt.result, PAlive := i.PAlive, on = "Id"]
return(dt.predictions)
})
# .clv.model.vcov.jacobi.diag --------------------------------------------------------------------------------------------------------
setMethod(f = "clv.model.vcov.jacobi.diag", signature = signature(clv.model="clv.model.bgnbd.static.cov"), definition = function(clv.model, clv.fitted, prefixed.params){
# Get corrections from nocov model
m.diag.model <- callNextMethod()
# No transformations for static covs: Set diag to 1 for all static cov params
# Gather names of cov param
names.cov.prefixed.params <- c([email protected],
[email protected])
if([email protected])
names.cov.prefixed.params <- c(names.cov.prefixed.params, [email protected])
# Set to 1
m.diag.model[names.cov.prefixed.params,
names.cov.prefixed.params] <- diag(x = 1,
nrow = length(names.cov.prefixed.params),
ncol = length(names.cov.prefixed.params))
return(m.diag.model)
})
|
/scratch/gouwar.j/cran-all/cranData/CLVTools/R/class_clv_model_bgnbd_staticcov.R
|
#' CLV Model functionality for the Gamma-Gamma spending model
#'
#' This class implements the functionalities and model-specific steps which are required
#' to fit the Gamma-Gamma spending model.
#'
#' @importFrom methods setClass
#' @keywords internal
#' @include all_generics.R class_clv_model_nocorrelation.R
setClass(Class = "clv.model.gg", contains = "clv.model.no.correlation")
#' @importFrom methods new
clv.model.gg <- function(){
return(new("clv.model.gg",
name.model = "Gamma-Gamma",
names.original.params.model = c(p="p", q="q", gamma="gamma"),
names.prefixed.params.model = c(log.p="log.p", log.q="log.q", log.gamma="log.gamma"),
start.params.model = c(p=1, q=1, gamma=1),
optimx.defaults = list(method = "L-BFGS-B",
itnmax = 3000,
# upper = c(log(10000),log(10000),log(10000)),
# lower = c(log(0),log(0),log(0)),
control = list(
kkt = TRUE,
save.failures = TRUE,
# Do not perform starttests because it checks the scales with max(logpar)-min(logpar)
# but all standard start parameters are <= 0, hence there are no logpars what
# produces a warning
starttests = FALSE))))
}
# Methods --------------------------------------------------------------------------------------------------------------------------------
# .clv.model.check.input.args -----------------------------------------------------------------------------------------------------------
setMethod(f = "clv.model.check.input.args", signature = signature(clv.model="clv.model.gg"), definition = function(clv.model, clv.fitted, start.params.model, optimx.args, verbose, ...){
err.msg <- c()
# Have to be > 0 as will be logged
if(any(start.params.model <= 0))
err.msg <- c(err.msg, "Please provide only model start parameters greater than 0 as they will be log()-ed for the optimization!")
check_err_msg(err.msg)
})
#' @importFrom stats setNames
setMethod("clv.model.transform.start.params.model", signature = signature(clv.model="clv.model.gg"), definition = function(clv.model, original.start.params.model){
# Log all user given or default start params
return(setNames(log(original.start.params.model[[email protected]]),
[email protected]))
})
# .clv.model.backtransform.estimated.params.model --------------------------------------------------------------------------------------------------------
setMethod("clv.model.backtransform.estimated.params.model", signature = signature(clv.model="clv.model.gg"), definition = function(clv.model, prefixed.params.model){
# exp all prefixed params
return(exp(prefixed.params.model[[email protected]]))
})
# .clv.model.prepare.optimx.args --------------------------------------------------------------------------------------------------------
#' @importFrom utils modifyList
setMethod(f = "clv.model.prepare.optimx.args", signature = signature(clv.model="clv.model.gg"), definition = function(clv.model, clv.fitted, prepared.optimx.args){
optimx.args <- modifyList(prepared.optimx.args,
list(LL.function.sum = gg_LL,
LL.function.ind = NULL,
vX = clv.fitted@cbs$x,
vM_x = clv.fitted@cbs$Spending,
# parameter ordering for the callLL interlayer
LL.params.names.ordered = c(log.p="log.p", log.q="log.q",
log.gamma="log.gamma")),
keep.null = TRUE)
return(optimx.args)
})
# . clv.model.process.post.estimation -----------------------------------------------------------------------------------------
setMethod("clv.model.process.post.estimation", signature = signature(clv.model="clv.model.gg"), definition = function(clv.model, clv.fitted, res.optimx){
# No additional step needed (ie store model specific stuff, extra process)
return(clv.fitted)
})
# clv.model.process.newdata --------------------------------------------------------------------------------------------------------
setMethod(f = "clv.model.process.newdata", signature = signature(clv.model = "clv.model.gg"), definition = function(clv.model, clv.fitted, verbose){
# clv.data in clv.fitted is already replaced with newdata here
# Only need to redo cbs if new data is given
clv.fitted@cbs <- gg_cbs(clv.data = [email protected], remove.first.transaction = [email protected])
return(clv.fitted)
})
# .clv.model.predict -------------------------------------------------------------------------------------------------------------------
setMethod("clv.model.predict", signature(clv.model="clv.model.gg"), function(clv.model, clv.fitted, dt.predictions, verbose, ...){
cbs.x <- cbs.Spending <- i.Spending <- i.x <- predicted.mean.spending <- NULL
p <- [email protected][["p"]]
q <- [email protected][["q"]]
gamma <- [email protected][["gamma"]]
# Predict spending
# add data from cbs by Id to ensure matching
dt.predictions[clv.fitted@cbs, cbs.x := i.x, on="Id"]
dt.predictions[clv.fitted@cbs, cbs.Spending := i.Spending, on="Id"]
dt.predictions[, predicted.mean.spending := (gamma + cbs.Spending * cbs.x) * p/(p * cbs.x + q - 1)]
dt.predictions[, cbs.x := NULL]
dt.predictions[, cbs.Spending := NULL]
return(dt.predictions)
})
# .clv.model.vcov.jacobi.diag --------------------------------------------------------------------------------------------------------
setMethod(f = "clv.model.vcov.jacobi.diag", signature = signature(clv.model="clv.model.gg"), definition = function(clv.model, clv.fitted, prefixed.params){
# Jeff:
# Delta method:
# h=(log(t),log(t),log(t),log(t),t,t,t)
# g=h^-1=(exp(t),exp(t),exp(t),exp(t),t,t,t)
# Deltaexp = g' = (exp(t),exp(t),exp(t),exp(t),1,1,1)
# Create matrix with the full required size
m.diag <- diag(x = 0, ncol = length(prefixed.params), nrow=length(prefixed.params))
rownames(m.diag) <- colnames(m.diag) <- names(prefixed.params)
# Add the transformations for the model to the matrix
# All model params need to be exp()
m.diag[[email protected],
[email protected]] <- diag(x = exp(prefixed.params[[email protected]]),
nrow = length([email protected]),
ncol = length([email protected]))
return(m.diag)
})
# .clv.model.probability.density -------------------------------------------------------------------------------------------------------
setMethod(f = "clv.model.probability.density", signature = signature(clv.model="clv.model.gg"), definition = function(clv.model, x, clv.fitted){
a1 <- a2 <- a3 <- a4 <- a5 <- g1 <- x1 <- NULL
cbs <- copy(clv.fitted@cbs[x>0,])
setnames(cbs, "x", "x1")
p <- coef(clv.fitted)["p"]
q <- coef(clv.fitted)["q"]
gamma <- coef(clv.fitted)["gamma"]
results <- sapply(x, function(zbar){
cbs[,a1 := lgamma(p*x1+q)-lgamma(p*x1)-lgamma(q)]
cbs[,a2 := q*log(gamma)]
cbs[,a3 := (p*x1-1)*log(zbar)]
cbs[,a4 := (p*x1)*log(x1)]
cbs[,a5 := (p*x1+q)*log(gamma+x1*zbar)]
cbs[,g1 := exp(a1+a2+a3+a4-a5)]
return(cbs[,mean(g1)])
})
return(results)
})
|
/scratch/gouwar.j/cran-all/cranData/CLVTools/R/class_clv_model_gg.R
|
#' @templateVar name_model_full GGompertz/NBD
#' @template template_class_clvmodelnocov
#'
#' @seealso Other clv model classes \linkS4class{clv.model}, \linkS4class{clv.model.ggomnbd.static.cov}
#' @seealso Classes using its instance: \linkS4class{clv.fitted}
#'
#' @include all_generics.R class_clv_model_nocorrelation.R
#' @importFrom methods setClass
setClass(Class = "clv.model.ggomnbd.no.cov", contains = "clv.model.no.correlation")
clv.model.ggomnbd.no.cov <- function(){
return(new("clv.model.ggomnbd.no.cov",
name.model = "GGompertz/NBD Standard",
names.original.params.model = c(r="r", alpha="alpha", b="b", s="s", beta="beta"),
names.prefixed.params.model = c("log.r","log.alpha", "log.b", "log.s", "log.beta"),
start.params.model = c(r=1, alpha=1, b=1, s=1, beta=1),
optimx.defaults = list(method = "L-BFGS-B",
itnmax = 5000,
control = list(
kkt = TRUE,
all.methods = FALSE,
save.failures = TRUE,
# Do not perform starttests because it checks the scales with max(logpar)-min(logpar)
# but all standard start parameters are <= 0, hence there are no logpars what
# produces a warning
starttests = FALSE))))
}
# Methods --------------------------------------------------------------------------------------------------------------------------------
#' @include all_generics.R
setMethod(f = "clv.model.check.input.args", signature = signature(clv.model="clv.model.ggomnbd.no.cov"), definition = function(clv.model, clv.fitted, start.params.model, optimx.args, verbose, ...){
err.msg <- c()
# Have to be > 0 as will be logged
if(any(start.params.model <= 0)){
err.msg <- c(err.msg, "Please provide only model start parameters greater than 0 as they will be log()-ed for the optimization!")
}
check_err_msg(err.msg)
})
# .clv.model.put.estimation.input --------------------------------------------------------------------------------------------------------
# Nothing required, use clv.model.no.correlation
#' @importFrom stats setNames
setMethod("clv.model.transform.start.params.model", signature = signature(clv.model="clv.model.ggomnbd.no.cov"), definition = function(clv.model, original.start.params.model){
# Log all user given or default start params
return(setNames(log(original.start.params.model[[email protected]]),
[email protected]))
})
setMethod("clv.model.backtransform.estimated.params.model", signature = signature(clv.model="clv.model.ggomnbd.no.cov"), definition = function(clv.model, prefixed.params.model){
# exp all prefixed params
return(exp(prefixed.params.model[[email protected]]))
})
# . clv.model.process.post.estimation -----------------------------------------------------------------------------------------
setMethod("clv.model.process.post.estimation", signature = signature(clv.model="clv.model.ggomnbd.no.cov"), definition = function(clv.model, clv.fitted, res.optimx){
# No additional step needed (ie store model specific stuff, extra process)
return(clv.fitted)
})
setMethod(f = "clv.model.process.newdata", signature = signature(clv.model = "clv.model.ggomnbd.no.cov"), definition = function(clv.model, clv.fitted, verbose){
# clv.data in clv.fitted is already replaced with newdata here
# Need to only redo cbs if given new data
clv.fitted@cbs <- ggomnbd_cbs(clv.data = [email protected])
return(clv.fitted)
})
setMethod(f = "clv.model.prepare.optimx.args", signature = signature(clv.model="clv.model.ggomnbd.no.cov"), definition = function(clv.model, clv.fitted, prepared.optimx.args){
# Also model optimization settings should go here
# Only add LL function args, everything else is prepared already, incl. start parameters
optimx.args <- modifyList(prepared.optimx.args,
list(LL.function.sum = ggomnbd_nocov_LL_sum,
LL.function.ind = ggomnbd_nocov_LL_ind, # if doing correlation
obj = clv.fitted,
vX = clv.fitted@cbs$x,
vT_x = clv.fitted@cbs$t.x,
vT_cal = clv.fitted@cbs$T.cal,
# parameter ordering for the callLL interlayer
LL.params.names.ordered = c(log.r = "log.r",log.alpha = "log.alpha", log.b = "log.b", log.s = "log.s", log.beta = "log.beta")),
keep.null = TRUE)
return(optimx.args)
})
#' @include all_generics.R
#' @importFrom stats integrate
setMethod("clv.model.expectation", signature(clv.model="clv.model.ggomnbd.no.cov"), function(clv.model, clv.fitted, dt.expectation.seq, verbose){
r <- alpha <- beta <- b <- s <- t_i <- tau <- NULL
params_i <- clv.fitted@cbs[, c("Id", "T.cal", "date.first.actual.trans")]
fct.expectation <- function(params_i.t){
return(drop(ggomnbd_nocov_expectation(r = [email protected][["r"]],
alpha_0 = [email protected][["alpha"]],
beta_0 = [email protected][["beta"]],
b = [email protected][["b"]],
s = [email protected][["s"]],
vT_i = params_i.t$t_i)))
}
return(DoExpectation(dt.expectation.seq = dt.expectation.seq, params_i = params_i,
fct.expectation = fct.expectation, clv.time = [email protected]@clv.time))
})
# . clv.model.pmf --------------------------------------------------------------------------------------------------------
setMethod("clv.model.pmf", signature=(clv.model="clv.model.ggomnbd.no.cov"), function(clv.model, clv.fitted, x){
stop("PMF is not available for ggomnbd!", call.=FALSE)
})
# . clv.model.predict --------------------------------------------------------------------------------------------------------
#' @include all_generics.R
setMethod("clv.model.predict", signature(clv.model="clv.model.ggomnbd.no.cov"), function(clv.model, clv.fitted, dt.predictions, verbose, continuous.discount.factor, ...){
r <- alpha <- b <- s <- beta <- x <- t.x <- T.cal <- PAlive <- i.PAlive <- CET <- i.CET <- period.length <- NULL
predict.number.of.periods <- dt.predictions[1, period.length]
# To ensure sorting, do everything in a single table
dt.result <- copy(clv.fitted@cbs[, c("Id", "x", "t.x", "T.cal")])
# Add CET
dt.result[, CET := ggomnbd_nocov_CET(r = [email protected][["r"]],
alpha_0 = [email protected][["alpha"]],
b = [email protected][["b"]],
s = [email protected][["s"]],
beta_0 = [email protected][["beta"]],
dPeriods = predict.number.of.periods,
vX = x,
vT_x = t.x,
vT_cal = T.cal)]
# Add PAlive
dt.result[, PAlive := ggomnbd_nocov_PAlive(r = [email protected][["r"]],
alpha_0 = [email protected][["alpha"]],
b = [email protected][["b"]],
s = [email protected][["s"]],
beta_0 = [email protected][["beta"]],
vX = x,
vT_x = t.x,
vT_cal = T.cal)]
# Add results to prediction table, by matching Id
dt.predictions[dt.result, CET := i.CET, on = "Id"]
dt.predictions[dt.result, PAlive := i.PAlive, on = "Id"]
return(dt.predictions)
})
# .clv.model.vcov.jacobi.diag --------------------------------------------------------------------------------------------------------
setMethod(f = "clv.model.vcov.jacobi.diag", signature = signature(clv.model="clv.model.ggomnbd.no.cov"), definition = function(clv.model, clv.fitted, prefixed.params){
# Create matrix with the full required size
m.diag <- diag(x = 0, ncol = length(prefixed.params), nrow=length(prefixed.params))
rownames(m.diag) <- colnames(m.diag) <- names(prefixed.params)
# Add the transformations for the model to the matrix
# All model params need to be exp()
m.diag[[email protected],
[email protected]] <- diag(x = exp(prefixed.params[[email protected]]),
nrow = length([email protected]),
ncol = length([email protected]))
return(m.diag)
})
|
/scratch/gouwar.j/cran-all/cranData/CLVTools/R/class_clv_model_ggomnbd_nocov.R
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.