content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
check_dots <- function(...) {
dots <- list(...)
if (length(dots) > 0) {
stop(
"'...' is not used. Arguments must be matched by name!",
call. = FALSE
)
}
}
check_dates <- function(x,
date.format = "%Y-%m-%d"
){
tryCatch(
!is.na(as.Date(x, date.format)),
error = function(e) {FALSE}
)
}
`%||%` <- function(x, y) if (!is.null(x)) x else y
`%NA%` <- function(x, y) if (!is.na(x)) x else y
|
/scratch/gouwar.j/cran-all/cranData/weibulltools/R/utils.R
|
# Function that checks if distribution has a threshold parameter:
has_thres <- function(distribution) {
distribution %in% c("weibull3", "lognormal3", "loglogistic3", "exponential2")
}
# Function that removes the trailing number. Used for threshold distributions:
std_parametric <- function(distribution) {
sub("[[:digit:]]$", "", distribution)
}
# Function that checks for standard variant of distribution (without threshold):
is_std_parametric <- function(distribution, dist_params) {
n_par <- length(dist_params)
if (distribution == "exponential") {
n_par == 1L
} else {
n_par == 2L
}
}
# Function that checks the correctness between input distribution and parameters:
check_dist_params <- function(dist_params, distribution) {
three_parametric <- distribution %in%
c("weibull3", "lognormal3", "loglogistic3")
one_parametric <- distribution == "exponential"
name_arg <- as.character(substitute(dist_params))
if (three_parametric && length(dist_params) != 3) {
stop(
"A three-parametric distribution needs three parameters but ",
sQuote(name_arg), " has length ", length(dist_params), ".",
call. = FALSE
)
}
if (!three_parametric && !one_parametric && length(dist_params) != 2) {
stop(
"A two-parametric distribution needs two parameters but ",
sQuote(name_arg), " has length ", length(dist_params), ".",
call. = FALSE
)
}
if (one_parametric && length(dist_params) != 1) {
stop(
"A one-parametric distribution needs one parameter but ",
sQuote(name_arg), " has length ", length(dist_params), ".",
call. = FALSE
)
}
}
# Function that checks compatibility of plotting grid and model
check_compatible_distributions <- function(p_obj_dist, model_dist) {
if (p_obj_dist != std_parametric(model_dist)) {
msg <- paste0(
"Incompatible distributions! Probability plot has distribution '",
p_obj_dist,
"' whereas model has distribution '",
model_dist,
"'."
)
stop(
errorCondition(
message = msg,
class = "incompatible_distributions"
)
)
}
}
# Function that converts Weibull loc-sc parameters to shape and scale:
to_shape_scale_params <- function(loc_sc_params) {
# Coefficients:
wb_params <- c(
eta = exp(loc_sc_params[[1]]),
beta = 1 / loc_sc_params[[2]],
gamma = if (length(loc_sc_params) == 3L) loc_sc_params[[3]] else NULL
)
wb_params
}
# Function that converts Weibull location-scale confint to shape-scale confint:
to_shape_scale_confint <- function(loc_sc_confint) {
# Confidence intervals:
wb_confint <- loc_sc_confint
wb_confint[1, ] <- exp(wb_confint[1, ])
wb_confint[2, ] <- rev(1 / wb_confint[2, ])
# Names:
rownames(wb_confint)[1:2] <- c("eta", "beta")
wb_confint
}
# Function that converts Weibull shape-scale parameters to location and scale:
to_location_scale_params <- function(shape_scale_params) {
# Coefficients:
wb_params <- c(
mu = log(shape_scale_params[[1]]),
sigma = 1 / shape_scale_params[[2]],
gamma = if (length(shape_scale_params) == 3L) shape_scale_params[[3]] else NULL
)
wb_params
}
# Function that converts Weibull shape-scale confint to location and scale confint:
to_location_scale_confint <- function(shape_scale_confint) {
# Confidence intervals:
wb_confint <- shape_scale_confint
wb_confint[1, ] <- log(wb_confint[1, ])
wb_confint[2, ] <- rev(1 / wb_confint[2, ])
# Names:
rownames(wb_confint)[1:2] <- c("mu", "sigma")
wb_confint
}
# Function that standardizes lifetime characteristic depending on distribution:
standardize <- function(x, dist_params, distribution) {
# Length dist_params:
n <- length(dist_params)
# Threshold model:
if (has_thres(distribution)) {
x <- x - dist_params[[n]]
n <- n - 1
}
# Standard-parametric model with q or q - threshold:
distribution <- std_parametric(distribution)
# (log-)location-scale:
if (distribution %in% c("weibull", "lognormal", "loglogistic")) {
x <- log(x)
}
# Standardize:
z <- if (distribution != "exponential") {
## Location-scale:
(x - dist_params[[1]]) / dist_params[[n]]
} else {
## Scale:
z <- x / dist_params[[1]]
}
z
}
# Quantile, probability and density functions of the smallest extreme value dist:
## Quantile function:
qsev <- function(p) {
p <- ifelse(p >= 0.9999999999999999, 0.9999999999999999, p)
p <- ifelse(p <= 1 - 0.9999999999999999, 1 - 0.9999999999999999, p)
log(-log(1 - p))
}
## Probability function (cdf):
psev <- function(q) {
1 - exp(-exp(q))
}
## Density function (pdf):
dsev <- function(x) {
exp(x - exp(x))
}
# Function that simulates a sample of a 'Dirichlet' distribution:
rdirichlet <- function(n, par) {
k <- length(par)
z <- matrix(0, nrow = n, ncol = k)
s <- matrix(0, nrow = n)
for (i in 1:k) {
z[, i] <- stats::rgamma(n, shape = par[i])
s <- s + z[, i]
}
for (i in 1:k) {
z[, i] <- z[, i]/s
}
return(z)
}
# Standard quantile function:
q_std <- function(p, distribution) {
switch(
distribution,
"weibull" =,
"sev" = qsev(p),
"lognormal" =,
"normal" = stats::qnorm(p),
"loglogistic" =,
"logistic" = stats::qlogis(p),
"exponential" = stats::qexp(p)
)
}
# Standard probability function:
p_std <- function(q, distribution) {
switch(
distribution,
"weibull" =,
"sev" = psev(q),
"lognormal" =,
"normal" = stats::pnorm(q),
"loglogistic" =,
"logistic" = stats::plogis(q),
"exponential" = stats::pexp(q)
)
}
|
/scratch/gouwar.j/cran-all/cranData/weibulltools/R/utils_parametric_models.R
|
#' @name weibulltools-package
#' @aliases weibulltools
#' @title weibulltools
#'
#' @description
#' Provides statistical methods and visualizations that are often used in
#' reliability engineering. Comprises a compact and easily accessible set of
#' methods and visualization tools that make the examination and adjustment as
#' well as the analysis and interpretation of field data (and bench tests) as
#' simple as possible.
#'
#' Besides the well-known Weibull analysis, the package supports multiple
#' lifetime distributions and also contains Monte Carlo methods for the
#' correction and completion of imprecisely recorded or unknown lifetime
#' characteristics.
#'
#' Plots are created statically
#' (\code{\link[ggplot2:ggplot2-package]{ggplot2}}) or
#' interactively (\code{\link[plotly:plot_ly]{plotly}}) and can be
#' customized with functions of the respective visualization package.
#'
#' @docType package
#' @useDynLib weibulltools, .registration = TRUE
#' @import Rcpp
#' @importFrom Rcpp sourceCpp
#' @importFrom magrittr "%>%"
#' @importFrom utils hasName
#' @importFrom dplyr .data
## usethis namespace: start
#' @importFrom lifecycle deprecated
#' @importFrom lifecycle deprecate_soft
#' @importFrom lifecycle deprecate_warn
## usethis namespace: end
NULL
|
/scratch/gouwar.j/cran-all/cranData/weibulltools/R/weibulltools-package.R
|
## ----setup, echo=FALSE, message=FALSE-----------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
screenshot.force = FALSE,
comment = "#>"
)
library(weibulltools)
## ----rank_densities, fig.cap = "Figure 1: Densities for different ranks i in samples of size n = 10.", message = FALSE, warning = FALSE----
library(dplyr) # data manipulation
library(ggplot2) # visualization
x <- seq(0, 1, length.out = 100) # CDF
n <- 10 # sample size
i <- c(1, 3, 5, 7, 9) # ranks
r <- n - i + 1 # inverse ranking
df_dens <- expand.grid(cdf = x, i = i) %>%
mutate(n = n, r = n - i + 1, pdf = dbeta(x = x, shape1 = i, shape2 = r))
densplot <- ggplot(data = df_dens, aes(x = cdf, y = pdf, colour = as.factor(i))) +
geom_line() +
scale_colour_discrete(guide = guide_legend(title = "i")) +
theme_bw() +
labs(x = "Failure Probability", y = "Density")
densplot
## ----dataset_shock, message = FALSE-------------------------------------------
shock_tbl <- reliability_data(data = shock, x = distance, status = status)
shock_tbl
## ----failure_probabilities----------------------------------------------------
# Estimate CDF with both methods:
cdf_tbl <- estimate_cdf(shock_tbl, methods = c("mr", "johnson"))
# First case where only failed units are taken into account:
cdf_tbl_mr <- cdf_tbl %>% filter(cdf_estimation_method == "mr")
cdf_tbl_mr
# Second case where both, survived and failed units are considered:
cdf_tbl_john <- cdf_tbl %>% filter(cdf_estimation_method == "johnson")
cdf_tbl_john
## ----probability_plot_weibull, fig.cap = "Figure 3: Plotting positions in Weibull grid.", message = FALSE----
# Weibull grid for estimated probabilities:
weibull_grid <- plot_prob(
cdf_tbl,
distribution = "weibull",
title_main = "Weibull Probability Plot",
title_x = "Mileage in km",
title_y = "Probability of Failure in %",
title_trace = "Method",
plot_method = "ggplot2"
)
weibull_grid
## ----probability_plot_log-normal, fig.cap = "Figure 4: Plotting positions in log-normal grid.", message = FALSE----
# Log-normal grid for estimated probabilities:
lognorm_grid <- plot_prob(
cdf_tbl,
distribution = "lognormal",
title_main = "Log-normal Probability Plot",
title_x = "Mileage in km",
title_y = "Probability of Failure in %",
title_trace = "Method",
plot_method = "ggplot2"
)
lognorm_grid
|
/scratch/gouwar.j/cran-all/cranData/weibulltools/inst/doc/Life_Data_Analysis_Part_I.R
|
---
title: "Life Data Analysis Part I - Estimation of Failure Probabilities"
subtitle: "A Non-parametric Approach"
author:
- "Tim-Gunnar Hensel"
- "David Barkemeyer"
date: "`r Sys.Date()`"
output:
rmarkdown::html_vignette:
fig_height: 6
fig_width: 7
fig_caption: yes
vignette: >
%\VignetteEngine{knitr::rmarkdown}
%\VignetteIndexEntry{Life Data Analysis Part I - Estimation of Failure Probabilities}
%\VignetteEncoding{UTF-8}
---
```{r setup, echo=FALSE, message=FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
screenshot.force = FALSE,
comment = "#>"
)
library(weibulltools)
```
This document presents non-parametric estimation methods for the computation of failure probabilities of complete data (failures) taking (multiple) right-censored units into account. A unit can be either a single component, an assembly or an entire system.
Furthermore, the estimation results are presented in distribution-specific probability plots.
## Introduction to Life Data Analysis
If the lifetime (or any other damage-equivalent quantity such as distance or load cycles) of a unit is considered to be a continuous random variable _T_, then the probability that a unit has failed at _t_ is defined by its _CDF (cumulative distribution function)_ _F(t)_.
$$ P(T\leq t) = F(t) $$
In order to obtain an estimate of the _CDF_ for each observation $t_1, t_2, ..., t_n$ two approaches are possible. Using a parametric lifetime distribution requires that the underlying assumptions for the sample data are valid. If the distribution-specific assumptions are correct, the model parameters can be estimated and the _CDF_ is computable. But if assumptions are not held, interpretations and derived conclusions are not reliable.
A more general approach for the calculation of cumulative failure probabilities is to use non-parametric statistical estimators $\hat{F}(t_1), \hat{F}(t_2), ..., \hat{F}(t_n)$. In comparison to a parametric distribution no general assumptions must be held. For non-parametric estimators, an ordered sample of size $n$ is needed. Starting at $1$, the ranks $i \in \{1, 2, ..., n \}$ are assigned to the ascending sorted sample values. Since there is a known relationship between ranks and corresponding ranking probabilities a _CDF_ can be determined.
But rank distributions are systematically skewed distributions and thus the median value instead of the expected value $E\left[F\left(t_i\right)\right] = \frac{i}{n + 1}$ is used for the estimation [^note1]. This skewness is visualized in _Figure 1_.
[^note1]: Kapur, K. C.; Lamberson, L. R.: _Reliability in Engineering Design_,
_New York: Wiley_, 1977, pp. 297-301
```{r rank_densities, fig.cap = "Figure 1: Densities for different ranks i in samples of size n = 10.", message = FALSE, warning = FALSE}
library(dplyr) # data manipulation
library(ggplot2) # visualization
x <- seq(0, 1, length.out = 100) # CDF
n <- 10 # sample size
i <- c(1, 3, 5, 7, 9) # ranks
r <- n - i + 1 # inverse ranking
df_dens <- expand.grid(cdf = x, i = i) %>%
mutate(n = n, r = n - i + 1, pdf = dbeta(x = x, shape1 = i, shape2 = r))
densplot <- ggplot(data = df_dens, aes(x = cdf, y = pdf, colour = as.factor(i))) +
geom_line() +
scale_colour_discrete(guide = guide_legend(title = "i")) +
theme_bw() +
labs(x = "Failure Probability", y = "Density")
densplot
```
### Failure Probability Estimation
In practice, a simplification for the calculation of the median value, also called median rank, is made. The formula of _Benard's_ approximation is given by
$$\hat{F}(t_i) \approx \frac{i - 0,3}{n + 0,4} $$
and is described in _The Plotting of Observations on Probability Paper_ [^note2].
[^note2]: Benard, A.; Bos-Levenbach, E. C.: _The Plotting of Observations on Probability Paper_,
_Statistica Neerlandica 7 (3)_, 1953, pp. 163-173
However, this equation only provides valid estimates for failure probabilities if all units in the sample are defectives (`estimate_cdf(methods = "mr", ...)`).
In field data analysis, however, the sample mainly consists of intact units and only a small fraction of units failed. Units that have no damage at the point of analysis and also have not reached the operating time or mileage of units that have already failed, are potential candidates for future failures. As these, for example, still are likely to fail during a specific time span, like the guarantee period, the _CDF_ must be adjusted upwards by these potential candidates.
A commonly used method for correcting probabilities of (multiple) right-censored data is _Johnson's_ method (`estimate_cdf(methods = "johnson", ...)`). By this method, all units that are included in the period looked at are sorted in an ascending order of their operating time (or any other damage-equivalent quantity). If there are units that have not failed before the _i_-th failure, an adjusted rank for the _i_-th failure is formed. This correction takes the potential candidates into account and increases the rank number. In consequence, a higher rank leads to a higher failure probability. This can be seen in _Figure 1_.
The rank adjustment is determined with:
$$j_i = j_{i-1} + x_i \cdot I_i, \;\; with \;\; j_0 = 0$$
Here, $j_ {i-1}$ is the adjusted rank of the previous failure, $x_i$ is the number of defectives at $t_i$ and $I_i$ is the increment that corrects the considered rank by the potential candidates.
$$I_i=\frac{(n+1)-j_{i-1}}{1+(n-n_i)}$$
The sample size is $n$ and $n_i$ is the number of units that have a lower $t$ than the _i_-th unit. Once the adjusted ranks are calculated, the _CDF_ can be estimated according to _Benard's_ approximation.
Other methods in {weibulltools} that can also handle (multiple) right-censored data are the _Kaplan-Meier_ estimator (`estimate_cdf(methods = "kaplan", ...)`) and the _Nelson-Aalen_ estimator (`estimate_cdf(methods = "nelson", ...)`).
### Probability Plotting
After computing failure probabilities a method called _probability plotting_ is applicable. It is a graphical _goodness of fit_ technique that is used in assessing whether an assumed distribution is appropriate to model the sample data.
The axes of a probability plot are transformed in such a way that the _CDF_ of a specified model is represented through a straight line. If the plotted points (`plot_prob()`) lie on an approximately straight line it can be said that the chosen distribution is adequate.
The two-parameter Weibull distribution can be parameterized with parameters $\mu$ and $\sigma$ such that the _CDF_ is characterized by the following equation:
$$F(t)=\Phi_{SEV}\left(\frac{\log(t) - \mu}{\sigma}\right)$$
The advantage of this representation is that the Weibull is part of the (log-)location-scale family. A linearized representation of this _CDF_ is:
$$\Phi^{-1}_{SEV}\left[F(t)\right]=\frac{1}{\sigma} \cdot \log(t) - \frac{\mu}{\sigma}$$
This leads to the following transformations regarding the axes:
* Abscissa: $x = \log(t)$
* Ordinate: $y = \Phi^{-1}_{SEV}\left[F(t)\right]$, which is the quantile function
of the SEV (_smallest extreme value_) distribution and can be written out with
$\log\left\{-\log\left[1-F(t)\right]\right\}$.
Another version of the Weibull _CDF_ with parameters $\eta$ and $\beta$ results in a _CDF_ that is defined by the following equation:
$$F(t)=1-\exp\left[ -\left(\frac{t}{\eta}\right)^{\beta}\right]$$
Then a linearized version of the CDF is:
$$ \log\left\{-\log\left[1-F(t)\right]\right\} = \beta \cdot \log(t) - \beta \cdot \log(\eta)$$
Transformations regarding the axes are:
* Abscissa: $x = \log(t)$
* Ordinate: $y = \log\left\{-\log\left[1-F(t)\right]\right\}$.
It can be easily seen that the parameters can be converted into each other. The corresponding equations are:
$$\beta = \frac{1}{\sigma}$$
and
$$\eta = \exp\left(\mu\right).$$
## Data: Shock Absorber
To apply the introduced methods of non-parametric failure probability estimation and probability plotting the `shock` data is used. In this dataset kilometer-dependent problems that have occurred on shock absorbers are reported. In addition to failed items the dataset also contains non-defectives (*censored*) observations. The data can be found in _Statistical Methods for Reliability Data_ [^note3].
[^note3]: Meeker, W. Q.; Escobar, L. A.: _Statistical Methods for Reliability Data_,
_New York, Wiley series in probability and statistics_, 1998, p. 630
For consistent handling of the data, {weibulltools} introduces the function `reliability_data()` that converts the original dataset into a `wt_reliability_data` object. This formatted object allows to easily apply the presented methods.
```{r dataset_shock, message = FALSE}
shock_tbl <- reliability_data(data = shock, x = distance, status = status)
shock_tbl
```
## Estimation of Failure Probabilities with {weibulltools}
First, we are interested in how censored observations influence the estimation of failure probabilities in comparison to the case where only failed units are considered. To deal with survived and failed units we will use `estimate_cdf()` with `methods = "johnson"`, whereas `methods = "mr"` only considers failures.
```{r failure_probabilities}
# Estimate CDF with both methods:
cdf_tbl <- estimate_cdf(shock_tbl, methods = c("mr", "johnson"))
# First case where only failed units are taken into account:
cdf_tbl_mr <- cdf_tbl %>% filter(cdf_estimation_method == "mr")
cdf_tbl_mr
# Second case where both, survived and failed units are considered:
cdf_tbl_john <- cdf_tbl %>% filter(cdf_estimation_method == "johnson")
cdf_tbl_john
```
<br>
If we compare both outputs we can see that survivors reduce the probabilities. But this is just that what was expected since undamaged units with longer or equal lifetime characteristic _x_ let us gain confidence in the product.
## Probability Plotting with {weibulltools}
The estimated probabilities should now be presented in a probability plot. With `plot_prob()` probability plots for several lifetime distributions can be constructed and estimates of multiple methods can be displayed at once.
### Weibull Probability Plot
```{r probability_plot_weibull, fig.cap = "Figure 3: Plotting positions in Weibull grid.", message = FALSE}
# Weibull grid for estimated probabilities:
weibull_grid <- plot_prob(
cdf_tbl,
distribution = "weibull",
title_main = "Weibull Probability Plot",
title_x = "Mileage in km",
title_y = "Probability of Failure in %",
title_trace = "Method",
plot_method = "ggplot2"
)
weibull_grid
```
<br>
_Figure 3_ shows that the consideration of survivors (orange points, _Method: johnson_) decreases the failure probability in comparison to the sole evaluation of failed items (green points, _Method: mr_).
### Log-normal Probability Plot
Finally, we want to use a log-normal probability plot to visualize the estimated failure probabilities.
```{r probability_plot_log-normal, fig.cap = "Figure 4: Plotting positions in log-normal grid.", message = FALSE}
# Log-normal grid for estimated probabilities:
lognorm_grid <- plot_prob(
cdf_tbl,
distribution = "lognormal",
title_main = "Log-normal Probability Plot",
title_x = "Mileage in km",
title_y = "Probability of Failure in %",
title_trace = "Method",
plot_method = "ggplot2"
)
lognorm_grid
```
<br>
On the basis of _Figure 3_ and _Figure 4_ we can subjectively assess the goodness of fit of Weibull and log-normal. It can be seen that in both grids, the plotted points roughly fall on a straight line. Hence one can say that the Weibull as well as the log-normal are good model candidates for the `shock` data.
|
/scratch/gouwar.j/cran-all/cranData/weibulltools/inst/doc/Life_Data_Analysis_Part_I.Rmd
|
## ----setup, echo=FALSE, message=FALSE-----------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
screenshot.force = FALSE,
comment = "#>"
)
library(weibulltools)
## ----dataset_shock, message = FALSE-------------------------------------------
shock_tbl <- reliability_data(data = shock, x = distance, status = status)
shock_tbl
## ---- data_alloy--------------------------------------------------------------
# Data:
alloy_tbl <- reliability_data(data = alloy, x = cycles, status = status)
alloy_tbl
## ----RR_weibull, fig.cap = "Figure 1: RR for a two-parametric Weibull distribution.", message = FALSE----
# rank_regression needs estimated failure probabilities:
shock_cdf <- estimate_cdf(shock_tbl, methods = "johnson")
# Estimating two-parameter Weibull:
rr_weibull <- rank_regression(shock_cdf, distribution = "weibull")
rr_weibull
# Probability plot:
weibull_grid <- plot_prob(
shock_cdf,
distribution = "weibull",
title_main = "Weibull Probability Plot",
title_x = "Mileage in km",
title_y = "Probability of Failure in %",
title_trace = "Defectives",
plot_method = "ggplot2"
)
# Add regression line:
weibull_plot <- plot_mod(
weibull_grid,
x = rr_weibull,
title_trace = "Rank Regression"
)
weibull_plot
## ----ML_weibull, fig.cap = "Figure 2: ML for a two-parametric Weibull distribution.", message = FALSE----
# Again estimating Weibull:
ml_weibull <- ml_estimation(
shock_tbl,
distribution = "weibull"
)
ml_weibull
# Add ML estimation to weibull_grid:
weibull_plot2 <- plot_mod(
weibull_grid,
x = ml_weibull,
title_trace = "Maximum Likelihood"
)
weibull_plot2
## ----ML_estimation_log-normal, message = FALSE--------------------------------
# Two-parameter log-normal:
ml_lognormal <- ml_estimation(
alloy_tbl,
distribution = "lognormal"
)
ml_lognormal
# Three-parameter Log-normal:
ml_lognormal3 <- ml_estimation(
alloy_tbl,
distribution = "lognormal3"
)
ml_lognormal3
## ----ML_visualization_I, fig.cap = "Figure 3: ML for a two-parametric log-normal distribution.", message = FALSE----
# Constructing probability plot:
tbl_cdf_john <- estimate_cdf(alloy_tbl, "johnson")
lognormal_grid <- plot_prob(
tbl_cdf_john,
distribution = "lognormal",
title_main = "Log-normal Probability Plot",
title_x = "Cycles",
title_y = "Probability of Failure in %",
title_trace = "Failed units",
plot_method = "ggplot2"
)
# Add two-parametric model to grid:
lognormal_plot <- plot_mod(
lognormal_grid,
x = ml_lognormal,
title_trace = "Two-parametric log-normal"
)
lognormal_plot
## ----ML_visualization_II, fig.cap = "Figure 4: ML for a three-parametric log-normal distribution.", message = FALSE----
# Add three-parametric model to lognormal_plot:
lognormal3_plot <- plot_mod(
lognormal_grid,
x = ml_lognormal3,
title_trace = "Three-parametric log-normal"
)
lognormal3_plot
|
/scratch/gouwar.j/cran-all/cranData/weibulltools/inst/doc/Life_Data_Analysis_Part_II.R
|
---
title: "Life Data Analysis Part II - Estimation Methods for Parametric Lifetime Models"
subtitle: "Rank Regression and Maximum Likelihood"
author:
- "Tim-Gunnar Hensel"
- "David Barkemeyer"
date: "`r Sys.Date()`"
output:
rmarkdown::html_vignette:
fig_height: 6
fig_width: 7
fig_caption: yes
vignette: >
%\VignetteEngine{knitr::rmarkdown}
%\VignetteIndexEntry{Life Data Analysis Part II - Estimation Methods for Parametric Lifetime Models}
%\VignetteEncoding{UTF-8}
---
```{r setup, echo=FALSE, message=FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
screenshot.force = FALSE,
comment = "#>"
)
library(weibulltools)
```
This document introduces two methods for the parameter estimation of lifetime distributions. Whereas _Rank Regression (RR)_ fits a straight line through transformed plotting positions (transformation is described precisely in `vignette(topic = "Life_Data_Analysis_Part_I", package = "weibulltools")`), _Maximum likelihood (ML)_ strives to maximize a function of the parameters given the sample data. If the parameters are obtained, a cumulative distribution function _(CDF)_ can be computed and added to a probability plot.
In the theoretical part of this vignette the focus is on the two-parameter Weibull distribution. The second part is about the application of the provided estimation methods in {weibulltools}. All implemented models can be found in the help pages of `rank_regression()` and `ml_estimation()`.
## The Weibull Distribution
The Weibull distribution is a continuous probability distribution, which is specified by the location parameter $\mu$ and the scale parameter $\sigma$. Its _CDF_ and _PDF (probability density function)_ are given by the following formulas:
$$F(t)=\Phi_{SEV}\left(\frac{\log(t) - \mu}{\sigma}\right)$$
$$f(t)=\frac{1}{\sigma t}\;\phi_{SEV}\left(\frac{\log(t) - \mu}{\sigma}\right)$$
The practical benefit of the Weibull in the field of lifetime analysis is that the common profiles of failure rates, which are observed over the lifetime of a large number of technical products, can be described using this statistical distribution.
In the following, the estimation of the specific parameters $\mu$ and $\sigma$ is explained.
## Rank Regression (RR)
In _RR_ the _CDF_ is linearized such that the true, unknown population is estimated by a straight line which is analytically placed among the plotting pairs. The lifetime characteristic, entered on the x-axis, is displayed on a logarithmic scale. A double-logarithmic representation of the estimated failure probabilities is used for the y-axis. Ordinary Least Squares _(OLS)_ determines a best-fit line in order that the sum of squared deviations between this fitted regression line and the plotted points is minimized.
In reliability analysis, it became prevalent that the line is placed in the probability plot in the way that the horizontal distances between the best-fit line and the points are minimized [^note1]. This procedure is called __x on y__ rank regression.
[^note1]: Berkson, J.: _Are There Two Regressions?_,
_Journal of the American Statistical Association 45 (250)_,
DOI: 10.2307/2280676, 1950, pp. 164-180
The formulas for estimating the slope and the intercept of the regression line according to the described method are given below.
Slope:
$$\hat{b}=\frac{\sum_{i=1}^{n}(x_i-\bar{x})\cdot(y_i-\bar{y})}{\sum_{i=1}^{n}(y_i-\bar{y})^2}$$
Intercept:
$$\hat{a}=\bar{x}-\hat{b}\cdot\bar{y}$$
With
$$x_i=\log(t_i)\;;\; \bar{x}=\frac{1}{n}\cdot\sum_{i=1}^{n}\log(t_i)\;;$$
as well as
$$y_i=\Phi^{-1}_{SEV}\left[F(t)\right]=\log\left\{-\log\left[1-F(t_i)\right]\right\}\;and \; \bar{y}=\frac{1}{n}\cdot\sum_{i=1}^{n}\log\left\{-\log\left[1-F(t_i)\right]\right\}.$$
The estimates of the intercept and slope are equal to the Weibull parameters $\mu$ and $\sigma$, i.e.
$$\hat{\mu}=\hat{a}$$
and
$$\hat{\sigma}=\hat{b}.$$
In order to obtain the parameters of the shape-scale parameterization the intercept and the slope need to be transformed [^note2].
[^note2]: ReliaSoft Corporation: _Life Data Analysis Reference Book_,
online: [ReliaSoft](http://reliawiki.org/index.php/The_Weibull_Distribution), accessed 19 December 2020
$$\hat{\eta}=\exp(\hat{a})=\exp(\hat{\mu})$$
and
$$\hat{\beta}=\frac{1}{\hat{b}}=\frac{1}{\hat{\sigma}}.$$
## Maximum Likelihood (ML)
The _ML_ method of Ronald A. Fisher estimates the parameters by maximizing the likelihood function. Assuming a theoretical distribution, the idea of _ML_ is that the specific parameters are chosen in such a way that the plausibility of obtaining the present sample is maximized. The likelihood and log-likelihood are given by the following equations:
$$L = \prod_{i=1}^n\left\{\frac{1}{\sigma t_i}\;\phi_{SEV}\left(\frac{\log(t_i) - \mu}{\sigma}\right)\right\}$$
and
$$\log L = \sum_{i=1}^n\log\left\{\frac{1}{\sigma t_i}\;\phi_{SEV}\left(\frac{\log(t_i) - \mu}{\sigma}\right)\right\}$$
Deriving and nullifying the log-likelihood function according to parameters results in two formulas that have to be solved numerically in order to obtain the estimates.
In large samples, ML estimators have optimality properties. In addition, the simulation studies by _Genschel and Meeker_ [^note3] have shown that even in small samples it is difficult to find an estimator that regularly has better properties than ML estimators.
[^note3]: Genschel, U.; Meeker, W. Q.: _A Comparison of Maximum Likelihood and Median-Rank Regression for Weibull Estimation_,
in: _Quality Engineering 22 (4)_, DOI: 10.1080/08982112.2010.503447, 2010, pp. 236-255
## Data
To apply the introduced parameter estimation methods the `shock` and `alloy` datasets are used.
### Shock Absorber
In this dataset kilometer-dependent problems that have occurred on shock absorbers are reported. In addition to failed items the dataset also contains non-defective (*censored*) observations. The data can be found in _Statistical Methods for Reliability Data_ [^note4].
[^note4]: Meeker, W. Q.; Escobar, L. A.: _Statistical Methods for Reliability Data_,
_New York, Wiley series in probability and statistics_, 1998, p. 630
For consistent handling of the data, {weibulltools} introduces the function `reliability_data()` that converts the original dataset into a `wt_reliability_data` object. This formatted object allows to easily apply the presented methods.
```{r dataset_shock, message = FALSE}
shock_tbl <- reliability_data(data = shock, x = distance, status = status)
shock_tbl
```
### Alloy T7989
The dataset `alloy` in which the cycles until a fatigue failure of a special alloy occurs are inspected. The data is also taken from Meeker and Escobar [^note5].
[^note5]: Meeker, W. Q.; Escobar, L. A.: _Statistical Methods for Reliability Data_,
_New York, Wiley series in probability and statistics_, 1998, p. 131
Again, the data have to be formatted as a `wt_reliability_data` object:
```{r, data_alloy}
# Data:
alloy_tbl <- reliability_data(data = alloy, x = cycles, status = status)
alloy_tbl
```
## RR and ML with {weibulltools}
`rank_regression()` and `ml_estimation()` can be applied to complete data as well as failure and (multiple) right-censored data. Both methods can also deal with models that have a threshold parameter $\gamma$.
In the following both methods are applied to the dataset `shock`.
### RR for two-parameter Weibull distribution
```{r RR_weibull, fig.cap = "Figure 1: RR for a two-parametric Weibull distribution.", message = FALSE}
# rank_regression needs estimated failure probabilities:
shock_cdf <- estimate_cdf(shock_tbl, methods = "johnson")
# Estimating two-parameter Weibull:
rr_weibull <- rank_regression(shock_cdf, distribution = "weibull")
rr_weibull
# Probability plot:
weibull_grid <- plot_prob(
shock_cdf,
distribution = "weibull",
title_main = "Weibull Probability Plot",
title_x = "Mileage in km",
title_y = "Probability of Failure in %",
title_trace = "Defectives",
plot_method = "ggplot2"
)
# Add regression line:
weibull_plot <- plot_mod(
weibull_grid,
x = rr_weibull,
title_trace = "Rank Regression"
)
weibull_plot
```
### ML for two-parameter Weibull distribution
```{r ML_weibull, fig.cap = "Figure 2: ML for a two-parametric Weibull distribution.", message = FALSE}
# Again estimating Weibull:
ml_weibull <- ml_estimation(
shock_tbl,
distribution = "weibull"
)
ml_weibull
# Add ML estimation to weibull_grid:
weibull_plot2 <- plot_mod(
weibull_grid,
x = ml_weibull,
title_trace = "Maximum Likelihood"
)
weibull_plot2
```
### ML for two- and three-parameter log-normal distribution
Finally, two- and three-parametric log-normal distributions are fitted to the `alloy` data using maximum likelihood.
```{r ML_estimation_log-normal, message = FALSE}
# Two-parameter log-normal:
ml_lognormal <- ml_estimation(
alloy_tbl,
distribution = "lognormal"
)
ml_lognormal
# Three-parameter Log-normal:
ml_lognormal3 <- ml_estimation(
alloy_tbl,
distribution = "lognormal3"
)
ml_lognormal3
```
<br>
```{r ML_visualization_I, fig.cap = "Figure 3: ML for a two-parametric log-normal distribution.", message = FALSE}
# Constructing probability plot:
tbl_cdf_john <- estimate_cdf(alloy_tbl, "johnson")
lognormal_grid <- plot_prob(
tbl_cdf_john,
distribution = "lognormal",
title_main = "Log-normal Probability Plot",
title_x = "Cycles",
title_y = "Probability of Failure in %",
title_trace = "Failed units",
plot_method = "ggplot2"
)
# Add two-parametric model to grid:
lognormal_plot <- plot_mod(
lognormal_grid,
x = ml_lognormal,
title_trace = "Two-parametric log-normal"
)
lognormal_plot
```
<br>
```{r ML_visualization_II, fig.cap = "Figure 4: ML for a three-parametric log-normal distribution.", message = FALSE}
# Add three-parametric model to lognormal_plot:
lognormal3_plot <- plot_mod(
lognormal_grid,
x = ml_lognormal3,
title_trace = "Three-parametric log-normal"
)
lognormal3_plot
```
|
/scratch/gouwar.j/cran-all/cranData/weibulltools/inst/doc/Life_Data_Analysis_Part_II.Rmd
|
## ----setup, echo=FALSE, message=FALSE-----------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
screenshot.force = FALSE,
comment = "#>"
)
library(weibulltools)
## ----dataset_shock, message = FALSE-------------------------------------------
# Data:
shock_tbl <- reliability_data(data = shock, x = distance, status = status)
shock_tbl
## ---- Parameter estimation procedures-----------------------------------------
# Estimation of failure probabilities:
shock_cdf <- estimate_cdf(shock_tbl, methods = "johnson")
# Rank Regression:
rr_weibull <- rank_regression(shock_cdf, distribution = "weibull")
# Maximum Likelihood Estimation:
ml_weibull <- ml_estimation(shock_tbl, distribution = "weibull")
## ---- Confidence intervals for model parameters-------------------------------
# Confidence intervals based on Rank Regression:
rr_weibull$confint
# Confidence intervals based on Maximum Likelihood Estimation:
ml_weibull$confint
## ---- Confidence level--------------------------------------------------------
# Confidence intervals based on another confidence level:
ml_weibull_99 <- ml_estimation(shock_tbl, distribution = "weibull", conf_level = 0.99)
ml_weibull_99$confint
## ---- Confidence intervals for probabilities----------------------------------
# Beta-Binomial confidence bounds:
conf_bb <- confint_betabinom(
x = rr_weibull,
b_lives = c(0.01, 0.1, 0.5),
bounds = "two_sided",
conf_level = 0.95,
direction = "y"
)
conf_bb
# Fisher's normal approximation confidence intervals:
conf_fisher <- confint_fisher(x = ml_weibull)
conf_fisher
## ---- Preparation for visualization-------------------------------------------
# Probability plot
weibull_grid <- plot_prob(
shock_cdf,
distribution = "weibull",
title_main = "Weibull Probability Plot",
title_x = "Mileage in km",
title_y = "Probability of Failure in %",
title_trace = "Defectives",
plot_method = "ggplot2"
)
## ---- BBB on failure probabilities, fig.cap = "Figure 1: Beta-Binomial confidence bounds for failure probabilities.", message = FALSE----
# Beta-Binomial confidence intervals:
weibull_conf_bb <- plot_conf(
weibull_grid,
conf_bb,
title_trace_mod = "Rank Regression",
title_trace_conf = "Beta-Binomial Bounds"
)
weibull_conf_bb
## ---- FI on failure probabilities, fig.cap = "Figure 2: Fisher's normal approximation confidence intervals for failure probabilities.", message = FALSE----
# Fisher's normal approximation confidence intervals:
weibull_conf_fisher <- plot_conf(
weibull_grid,
conf_fisher,
title_trace_mod = "Maximum Likelihood",
title_trace_conf = "Fisher's Confidence Intervals"
)
weibull_conf_fisher
## ---- Confidence intervals for quantiles--------------------------------------
# Computation of confidence intervals for quantiles:
## Beta-Binomial confidence intervals:
conf_bb_x <- confint_betabinom(
x = rr_weibull,
bounds = "upper",
conf_level = 0.95,
direction = "x"
)
conf_bb_x
## Fisher's normal approximation confidence intervals:
conf_fisher_x <- confint_fisher(x = ml_weibull, bounds = "lower", direction = "x")
conf_fisher_x
## ---- BBB on quantiles, fig.cap = "Figure 3: One-sided (upper) Beta-Binomial confidence bound for quantiles.", message = FALSE----
# Visualization:
## Beta-Binomial confidence intervals:
weibull_conf_bb_x <- plot_conf(
weibull_grid,
conf_bb_x,
title_trace_mod = "Rank Regression",
title_trace_conf = "Beta-Binomial Bounds"
)
weibull_conf_bb_x
## ---- FI on quantiles, fig.cap = "Figure 4: One-sided (lower) normal approximation confidence interval for quantiles.", message = FALSE----
## Fisher's normal approximation confidence intervals:
weibull_conf_fisher_x <- plot_conf(
weibull_grid,
conf_fisher_x,
title_trace_mod = "Maximum Likelihood",
title_trace_conf = "Fisher's Confidence Intervals"
)
weibull_conf_fisher_x
|
/scratch/gouwar.j/cran-all/cranData/weibulltools/inst/doc/Life_Data_Analysis_Part_III.R
|
---
title: "Life Data Analysis Part III - Confidence Intervals"
subtitle: "For Model Parameters, Probabilities and Quantiles"
author:
- "Tim-Gunnar Hensel"
- "David Barkemeyer"
date: "`r Sys.Date()`"
output:
rmarkdown::html_vignette:
fig_height: 6
fig_width: 7
fig_caption: yes
vignette: >
%\VignetteEngine{knitr::rmarkdown}
%\VignetteIndexEntry{Life Data Analysis Part III - Confidence Intervals}
%\VignetteEncoding{UTF-8}
---
```{r setup, echo=FALSE, message=FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
screenshot.force = FALSE,
comment = "#>"
)
library(weibulltools)
```
In contrast to point estimation procedures, interval estimation methods, e.g. the computation of confidence intervals, express the uncertainty which is associated with the use of a statistical estimator.
In reliability analysis it is common practice to provide confidence regions for the parameters of a lifetime distribution as well as for quantities that depend on these model parameters.
In this vignette, the determination of confidence intervals for model parameters, quantiles (lifetime characteristic) and failure probabilites (*CDF*) is presented.
## Confidence Intervals for Model Parameters
Confidence intervals can be calculated for every model parameter. For this, the (approximated) sampling distribution as well as an estimate of its standard deviation must be given. In the following, the formulas which strongly depend on the estimation methods, are provided for *Rank Regression* and *Maximum Likelihood Estimation*.
### Rank Regression (RR)
In *Rank Regression* a linear relationship between the lifetime characteristic and the failure probability is determined. The parameters of a simple linear regression model are the intercept and the slope, which are the location parameter $\mu$ and the scale parameter $\sigma$ for the majority of lifetime distributions.
An approximated two-sided interval for the true parameters on a $1 - \alpha$ confidence level can be obtained with the formulas
$$\bigg[\hat{\mu}_{\text{lower}} \, ; \hat{\mu}_{\text{upper}} \bigg] = \bigg[\hat{\mu}_{\text{RR}} \pm t_{1 - \frac{\alpha}{2}} \cdot \hat{se}^{\text{HC}}_{\hat{\mu}}\bigg] \qquad and \qquad \bigg[\hat{\sigma}_{\text{lower}} \, ;\hat{\sigma}_{\text{upper}} \bigg] = \bigg[\hat{\sigma}_{\text{RR}} \pm t_{1 - \frac{\alpha}{2}} \cdot \hat{se}^{\text{HC}}_{\hat{\sigma}}\bigg]. $$
For a given sample, $\hat{\mu}_{\text{RR}}$ and $\hat{\sigma}_{\text{RR}}$ are the least squares estimates and $\hat{se}^{\text{HC}}_{\hat{\mu}}$ and $\hat{se}^{\text{HC}}_{\hat{\sigma}}$ are the respective estimates of the standard deviations. The uncertainty that arises due to the unknown standard deviations must be taken into account by using the quantiles of Student's t-distribution.
When using *RR* in the context of reliability analysis, the assumption of homoscedastic error terms is often violated. Therefore, the computation of the standard errors is based on a heteroscedasticity-consistent (**HC**) variance-covariance matrix. Other assumptions of the classical linear regression model like the need of no serial correlation are questionable as well and have already been discussed in the literature [^note1].
Hence, the *Maximum Likelihood Estimation* procedure is recommended, which is described in the next section.
[^note1]: Genschel, U.; Meeker, W. Q.: _A Comparison of Maximum Likelihood and Median-Rank Regression for Weibull Estimation_,
in: _Quality Engineering 22 (4)_, DOI: 10.1080/08982112.2010.503447, 2010, pp. 236-255
### Maximum Likelihood Estimation (MLE)
*ML* estimators are subject to a variety of restrictions but in return have many useful properties in contrast to other estimation techniques. One is that *ML* estimators converge in distribution to a normal distribution and for this reason, normal approximation confidence intervals for the model parameters can be calculated by theory.
Using the parameterization introduced above, a two-sided normal approximation confidence interval for the location parameter $\mu$ can be computed with the equation
$$\bigg[\hat{\mu}_{\text{lower}} \, ; \, \hat{\mu}_{\text{upper}} \bigg] = \bigg[\hat{\mu}_{\text{MLE}} \pm z_{1 - \frac{\alpha}{2}} \cdot \hat{se}_{\hat{\mu}}\bigg].$$
By definition, the scale parameter $\sigma$ is always positive and thus an alternative confidence interval is used [^note2]:
$$\bigg[\hat{\sigma}_{\text{lower}} \, ; \, \hat{\sigma}_{\text{upper}} \bigg] = \bigg[\frac{\hat{\sigma}_{\text{MLE}}}{w} \, ; \hat{\sigma}_{\text{MLE}} \cdot w \bigg]$$
with
$$w = \exp\left[z_{1 - \frac{\alpha}{2}} \cdot \frac{\hat{se}_{\hat{\sigma}}}{\hat{\sigma}_{\text{MLE}}}\right].$$
[^note2]: Meeker, W. Q.; Escobar, L. A.: _Statistical Methods for Reliability Data_,
_New York, Wiley series in probability and statistics_, 1998, p. 188
## Confidence Intervals for Probabilities and Quantiles
In addition to the confidence regions for the distribution-specific parameters, intervals for the regression line are provided as well. These can be aligned according to the probability $F(t)$ or to the lifetime characteristic $t$.
Whereas the Beta-Binomial confidence bounds are often used in combination with *RR*, Fisher's normal approximation confidence intervals are only applicable in the case of *MLE*.
### Beta-Binomial Confidence Intervals for $F(t)$
To obtain a two-sided non-parametric confidence interval for the failure probabilities at a given $1-\alpha$ level, a procedure similar to *Median Ranks (MR)* is used.
Instead of finding the probability $p_{\text{MR}}$ for the *j-th* rank at the $50\%$ level
$$0.5 = \sum^n_{k = j} \binom{n}{k} \cdot p_{\text{MR}}^k \cdot \left(1-p_{\text{MR}}\right)^{n-k}, $$
the probability $p_{\text{lower}}$ must be determined for equation
$$\frac{\alpha}{2} = \sum^n_{k = j} \binom{n}{k} \cdot p_{\text{lower}}^k \cdot \left(1-p_{\text{lower}}\right)^{n-k}$$
and $p_{\text{upper}}$ for the expression
$$1 - \frac{\alpha}{2} = \sum^n_{k = j} \binom{n}{k} \cdot p_{\text{upper}}^k \cdot \left(1-p_{\text{upper}}\right)^{n-k}.$$
The resulting interval $\left[\hat{F}_{j, \, {\text{lower}}} \, ; \, \hat{F}_{j, \, {\text{upper}}}\right] = \left[\hat{p}_{{\text{lower}}} \, ; \, \hat{p}_{{\text{upper}}}\right]$ is the estimated confidence region for the true failure probability with respect to the *j-th* rank.
### Beta-Binomial Confidence Intervals for $t$
Once the intervals of the failure probabilities are calculated, a two-sided confidence interval for the lifetime characteristic can be found with the quantile function of the underlying lifetime distribution.
For the Weibull, the quantile function is given by the formula
$$t_{p} = F^{-1}(p) = \exp\left[\mu + \Phi^{-1}_{\text{SEV}}(p) \cdot \sigma\right],$$
where $\Phi^{-1}_{\text{SEV}}$ is the quantile function of the standard smallest extreme value distribution.
The confidence interval for $t$ with respect to the estimated *RR* parameters as well as the lower and upper probability of the *j-th* rank is then computed by
$$\hat{t}_{j \, ; \, \text{lower}} = \exp\left[\hat{\mu}_{\text{RR}} + \Phi^{-1}_{\text{SEV}}(\hat{F}_{j, \, {\text{lower}}}) \cdot \hat{\sigma}_{\text{RR}}\right]$$
and
$$\hat{t}_{j \, ; \, \text{upper}} = \exp\left[\hat{\mu}_{\text{RR}} + \Phi^{-1}_{\text{SEV}}(\hat{F}_{j, \, {\text{upper}}}) \cdot \hat{\sigma}_{\text{RR}}\right].$$
### Fisher's Confidence Intervals for $F(t)$
For a particular quantile $t$ and the vector of parameters $\hat{\theta}_{MLE}$, a normal approximation confidence interval for the failure probability $F(t)$ can be obtained by
$$\bigg[\hat{F}_{\text{lower}}(t) \, ; \, \hat{F}_{\text{upper}}(t)\bigg] = \bigg[\hat{F}_{\text{MLE}}(t) \pm z_{1 - \frac{\alpha}{2}} \cdot \hat{se}_{\hat{F}(t)}\bigg].$$
In order to guarantee that the realized confidence interval of $F(t)$ always is between 0 and 1, the so called *z-procedure* can be applied [^note3]. Using this technique, statistical inference is first done for the standardized quantile $z$ and afterwards entered in $F(t)$ to obtain the desired interval.
[^note3]: Hoang, Y.; Meeker, W. Q.; Escobar, L. A.: _The Relationship Between Confidence Intervals for Failure Probabilities and Life Time Quantiles_,
in: _IEEE Transactions on Reliability 57, 2008, pp. 260-266
For the Weibull, the *ML* estimator of the standardized quantile function $z$ is
$$\hat{z}_{\text{MLE}} = \frac{log(t) - \hat{\mu}_{\text{MLE}}}{\hat{\sigma}_{\text{MLE}}}. $$
First, an approximate confidence interval for $z$ is determined with the following formula:
$$\bigg[\hat{z}_{\text{lower}} \, ; \, \hat{z}_{\text{upper}}\bigg] = \bigg[\hat{z}_{\text{MLE}} \pm z_{1 - \frac{\alpha}{2}} \cdot \hat{se}_{\hat{z}}\bigg].$$
An approximate formula for the standard error of the estimator $\hat{z}$ can be derived with the *delta method*:
$$\hat{se}_{\hat{z}} = \sqrt{\hat{Var}_{\hat{z}}} = \sqrt{\bigg(\frac{\partial{\hat{z}}}{\partial{\hat{\theta}_{\text{MLE}}}}\bigg)^{T}\; \hat{Var}(\hat{\theta}_{\text{MLE}})\; \frac{\partial{\hat{z}}}{\partial{\hat{\theta}_{\text{MLE}}}}} \; .$$
Finally, the estimated bounds of $z$ are then plugged into the distribution-specific standard *CDF* to obtain the interval for $F(t)$, which is
$$\bigg[\hat{F}_{\text{lower}}(t) \, ; \, \hat{F}_{\text{upper}}(t)\bigg] = \bigg[\Phi_{\text{SEV}}(\hat{z}_{\text{lower}}) \, ; \, \Phi_{\text{SEV}}(\hat{z}_{\text{upper}}) \bigg].$$
### Fisher's Confidence Intervals for $t$
In reliability analysis the lifetime characteristic often is defined as a strictly positive quantity and hence, a normal approximation confidence interval for the quantile $t$ with respect to a particular probability $p$ and the vector of parameters $\hat{\theta}_{MLE}$, can be calculated by
$$\bigg[\hat{t}_{\text{lower}}(p) \, ; \, \hat{t}_{\text{upper}}(p)\bigg] = \bigg[\frac{\hat{t}_{\text{MLE}}(p)}{w} \, ; \hat{t}_{\text{MLE}}(p) \cdot w \bigg], $$
where $w$ is
$$w = \exp\left[z_{1 - \frac{\alpha}{2}} \cdot \frac{\hat{se}_{\hat{t}(p)}}{\hat{t}_{\text{MLE}}(p)}\right].$$
For the Weibull, the *ML* equation for the quantile $t(p)$ is
$$\hat{t}_{\text{MLE}}(p) = \exp\left[\hat{\mu}_{\text{MLE}} + \Phi^{-1}_{\text{SEV}}(p) \cdot \hat{\sigma}_{\text{MLE}}\right]$$
and again, through the use of the *delta method*, a formula for the standard error of $\hat{t}_p$ can be provided, which is
$$\hat{se}_{\hat{t}(p)} = \sqrt{\hat{Var}_{\hat{t}(p)}} = \sqrt{\bigg(\frac{\partial{\hat{t}(p)}}{\partial{\hat{\theta}_{\text{MLE}}}}\bigg)^{T}\; \hat{Var}(\hat{\theta}_{\text{MLE}})\; \frac{\partial{\hat{t}(p)}}{\partial{\hat{\theta}_{\text{MLE}}}}} \; .$$
## Data
For the computation of the presented confidence intervals the `shock` dataset is used. In this dataset kilometer-dependent problems that have occurred on shock absorbers are reported. In addition to failed items the dataset also contains non-defective (*censored*) observations. The data can be found in _Statistical Methods for Reliability Data_ [^note4].
[^note4]: Meeker, W. Q.; Escobar, L. A.: _Statistical Methods for Reliability Data_,
_New York, Wiley series in probability and statistics_, 1998, p. 630
For consistent handling of the data, {weibulltools} introduces the function `reliability_data()` that converts the original dataset into a `wt_reliability_data` object. This formatted object allows to easily apply the presented methods.
```{r dataset_shock, message = FALSE}
# Data:
shock_tbl <- reliability_data(data = shock, x = distance, status = status)
shock_tbl
```
## Confidence Intervals with {weibulltools}
Before calculating confidence intervals with {weibulltools} one has to conduct the basic steps of the Weibull analysis which are described in the previous vignettes.
```{r, Parameter estimation procedures}
# Estimation of failure probabilities:
shock_cdf <- estimate_cdf(shock_tbl, methods = "johnson")
# Rank Regression:
rr_weibull <- rank_regression(shock_cdf, distribution = "weibull")
# Maximum Likelihood Estimation:
ml_weibull <- ml_estimation(shock_tbl, distribution = "weibull")
```
### Confidence Intervals for Model Parameters
The confidence intervals for the distribution parameters are included in the model output of `rank_regression()` and `ml_estimation()`, respectively.
```{r, Confidence intervals for model parameters}
# Confidence intervals based on Rank Regression:
rr_weibull$confint
# Confidence intervals based on Maximum Likelihood Estimation:
ml_weibull$confint
```
The `confint` element of the model output is a matrix with the parameter names as row names and the confidence level as column names. Different levels can be specified using the argument `conf_level`.
```{r, Confidence level}
# Confidence intervals based on another confidence level:
ml_weibull_99 <- ml_estimation(shock_tbl, distribution = "weibull", conf_level = 0.99)
ml_weibull_99$confint
```
### Confidence Intervals for Probabilities
Confidence bounds for failure probabilities can be either determined with `confint_betabinom()` or `confint_fisher()`. As explained in the theoretical part of this vignette the Beta-Binomial confidence bounds should be applied to the output of `rank_regression()` whereas Fisher's normal approximation confidence intervals are only applicable if the parameters and the variance-covariance matrix were estimated with `ml_estimation()`.
```{r, Confidence intervals for probabilities}
# Beta-Binomial confidence bounds:
conf_bb <- confint_betabinom(
x = rr_weibull,
b_lives = c(0.01, 0.1, 0.5),
bounds = "two_sided",
conf_level = 0.95,
direction = "y"
)
conf_bb
# Fisher's normal approximation confidence intervals:
conf_fisher <- confint_fisher(x = ml_weibull)
conf_fisher
```
The outputs of both functions contain the calculated bounds for the failure probabilities ranging from the minimum to the maximum observed failure. Between the observed range of failures an interpolation of quantiles is made for which the intervals of the probabilities are provided as well (supporting points).
In the function call of `confint_betabinom()` the default arguments of both functions are listed. With the argument `b_lives`, confidence regions for selected probabilities are included, but only if they are in the range of the estimated failure probabilities.
The argument `bounds` is used for the specification of the bound(s) to be computed. It could be one of `c("two_sided", "lower", "upper")`.
If `direction = "y"`, confidence intervals for the probabilities are provided.
The visualization of the computed intervals is done with `plot_conf()`.
```{r, Preparation for visualization}
# Probability plot
weibull_grid <- plot_prob(
shock_cdf,
distribution = "weibull",
title_main = "Weibull Probability Plot",
title_x = "Mileage in km",
title_y = "Probability of Failure in %",
title_trace = "Defectives",
plot_method = "ggplot2"
)
```
```{r, BBB on failure probabilities, fig.cap = "Figure 1: Beta-Binomial confidence bounds for failure probabilities.", message = FALSE}
# Beta-Binomial confidence intervals:
weibull_conf_bb <- plot_conf(
weibull_grid,
conf_bb,
title_trace_mod = "Rank Regression",
title_trace_conf = "Beta-Binomial Bounds"
)
weibull_conf_bb
```
```{r, FI on failure probabilities, fig.cap = "Figure 2: Fisher's normal approximation confidence intervals for failure probabilities.", message = FALSE}
# Fisher's normal approximation confidence intervals:
weibull_conf_fisher <- plot_conf(
weibull_grid,
conf_fisher,
title_trace_mod = "Maximum Likelihood",
title_trace_conf = "Fisher's Confidence Intervals"
)
weibull_conf_fisher
```
As one can see, `plot_conf()` not only adds the confidence limits to an existing probability plot, but also includes the estimated linearized CDF. There is no need for an additional call of `plot_mod()`. In fact, the same routines used by `plot_mod()` are called under the hood, which ensures that confidence bounds are not drawn without the regression line.
### Confidence Intervals for Quantiles
The computation and visualization of confidence for the lifetime characteristic is pretty similar to the presented procedure with regard to the probabilities.
The only difference is that one has to change the value of the argument `direction` to `"x"`.
```{r, Confidence intervals for quantiles}
# Computation of confidence intervals for quantiles:
## Beta-Binomial confidence intervals:
conf_bb_x <- confint_betabinom(
x = rr_weibull,
bounds = "upper",
conf_level = 0.95,
direction = "x"
)
conf_bb_x
## Fisher's normal approximation confidence intervals:
conf_fisher_x <- confint_fisher(x = ml_weibull, bounds = "lower", direction = "x")
conf_fisher_x
```
```{r, BBB on quantiles, fig.cap = "Figure 3: One-sided (upper) Beta-Binomial confidence bound for quantiles.", message = FALSE}
# Visualization:
## Beta-Binomial confidence intervals:
weibull_conf_bb_x <- plot_conf(
weibull_grid,
conf_bb_x,
title_trace_mod = "Rank Regression",
title_trace_conf = "Beta-Binomial Bounds"
)
weibull_conf_bb_x
```
```{r, FI on quantiles, fig.cap = "Figure 4: One-sided (lower) normal approximation confidence interval for quantiles.", message = FALSE}
## Fisher's normal approximation confidence intervals:
weibull_conf_fisher_x <- plot_conf(
weibull_grid,
conf_fisher_x,
title_trace_mod = "Maximum Likelihood",
title_trace_conf = "Fisher's Confidence Intervals"
)
weibull_conf_fisher_x
```
|
/scratch/gouwar.j/cran-all/cranData/weibulltools/inst/doc/Life_Data_Analysis_Part_III.Rmd
|
## ----setup, echo=FALSE, message=FALSE-----------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
screenshot.force = FALSE,
comment = "#>"
)
library(weibulltools)
## ----dataset_voltage, message = FALSE-----------------------------------------
voltage_tbl <- reliability_data(data = voltage, x = hours, status = status)
voltage_tbl
## ----probability_plot_weibull, fig.cap = "Figure 1: Plotting positions in Weibull grid.", message = FALSE----
# Estimating failure probabilities:
voltage_cdf <- estimate_cdf(voltage_tbl, "johnson")
# Probability plot:
weibull_plot <- plot_prob(
voltage_cdf,
distribution = "weibull",
title_main = "Weibull Probability Plot",
title_x = "Time in Hours",
title_y = "Probability of Failure in %",
title_trace = "Defectives",
plot_method = "ggplot2"
)
weibull_plot
## ----segmented_weibull_I, fig.cap = "Figure 2: Subgroup-specific plotting positions using segmented regression.", message = FALSE----
# Applying mixmod_regression():
mixreg_weib <- mixmod_regression(
x = voltage_cdf,
distribution = "weibull",
k = 2
)
mixreg_weib
# Using plot_prob_mix().
mix_reg_plot <- plot_prob(
x = mixreg_weib,
title_main = "Weibull Mixture Regression",
title_x = "Time in Hours",
title_y = "Probability of Failure",
title_trace = "Subgroup",
plot_method = "ggplot2"
)
mix_reg_plot
## ----segmented_weibull_II, fig.cap = "Figure 3: Subgroup-specific regression lines using segmented regression.", message = FALSE----
# Using plot_mod() to visualize regression lines of subgroups:
mix_reg_lines <- plot_mod(
mix_reg_plot,
x = mixreg_weib,
title_trace = "Fitted Line"
)
mix_reg_lines
## ----em_weibull_I, fig.cap = "Figure 4: Subgroup-specific plotting positions using EM algorithm.", message = FALSE----
# Applying mixmod_regression():
mix_em_weib <- mixmod_em(
x = voltage_tbl,
distribution = "weibull",
k = 2
)
mix_em_weib
# Using plot_prob():
mix_em_plot <- plot_prob(
x = mix_em_weib,
title_main = "Weibull Mixture EM",
title_x = "Time in Hours",
title_y = "Probability of Failure",
title_trace = "Subgroup",
plot_method = "ggplot2"
)
mix_em_plot
## ----em_weibull_II, fig.cap = "Figure 5: Subgroup-specific regression lines using EM algorithm.", message = FALSE----
# Using plot_mod() to visualize regression lines of subgroups:
mix_em_lines <- plot_mod(
mix_em_plot,
x = mix_em_weib,
title_trace = "Fitted Line"
)
mix_em_lines
|
/scratch/gouwar.j/cran-all/cranData/weibulltools/inst/doc/Life_Data_Analysis_Part_IV.R
|
---
title: "Life Data Analysis Part IV - Mixture Models"
subtitle: "Segmented Regression and EM Algorithm"
author:
- "Tim-Gunnar Hensel"
- "David Barkemeyer"
date: "`r Sys.Date()`"
output:
rmarkdown::html_vignette:
fig_height: 6
fig_width: 7
fig_caption: yes
vignette: >
%\VignetteEngine{knitr::rmarkdown}
%\VignetteIndexEntry{Life Data Analysis Part IV - Mixture Models}
%\VignetteEncoding{UTF-8}
---
```{r setup, echo=FALSE, message=FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
screenshot.force = FALSE,
comment = "#>"
)
library(weibulltools)
```
In this vignette two methods for the separation of mixture models are presented. A mixture model can be assumed, if the points in a probability plot show one or more changes in slope, depict one or several saddle points or follow an S-shape. A mixed distribution often represents the combination of multiple failure modes and thus must be split in its components to get reasonable results in further analyses.
Segmented regression aims to detect breakpoints in the sample data from which a split in subgroups can be made. The expectation-maximization (EM) algorithm is a computation-intensive method that iteratively tries to maximize a likelihood function, which is weighted by posterior probabilities. These are conditional probabilities that an observation belongs to subgroup _k_.
In the following, the focus is on the application of these methods and their visualizations using the functions `mixmod_regression()`, `mixmod_em()`, `plot_prob()` and `plot_mod()`.
## Data: Voltage Stress Test
To apply the introduced methods the dataset `voltage` is used. The dataset contains observations for units that were passed to a high voltage stress test. _hours_ indicates the number of hours until a failure occurs or the number of hours until a unit was taken out of the test and has not failed. _status_ is a flag variable and describes the condition of a unit. If a unit has failed the flag is 1 and 0 otherwise. The dataset is taken from _Reliability Analysis by Failure Mode_ [^note1].
[^note1]: Doganaksoy, N.; Hahn, G.; Meeker, W. Q.: _Reliability Analysis by Failure Mode_,
Quality Progress, 35(6), 47-52, 2002
For consistent handling of the data, {weibulltools} introduces the function `reliability_data()` that converts the original dataset into a `wt_reliability_data` object. This formatted object allows to easily apply the presented methods.
```{r dataset_voltage, message = FALSE}
voltage_tbl <- reliability_data(data = voltage, x = hours, status = status)
voltage_tbl
```
## Probability Plot for Voltage Stress Test Data
To get an intuition whether one can assume the presence of a mixture model, a Weibull probability plot is constructed.
```{r probability_plot_weibull, fig.cap = "Figure 1: Plotting positions in Weibull grid.", message = FALSE}
# Estimating failure probabilities:
voltage_cdf <- estimate_cdf(voltage_tbl, "johnson")
# Probability plot:
weibull_plot <- plot_prob(
voltage_cdf,
distribution = "weibull",
title_main = "Weibull Probability Plot",
title_x = "Time in Hours",
title_y = "Probability of Failure in %",
title_trace = "Defectives",
plot_method = "ggplot2"
)
weibull_plot
```
<br>
Since there is one obvious slope change in the Weibull probability plot of _Figure 1_, the appearance of a mixture model consisting of two subgroups is strengthened.
## Segmented Regression with {weibulltools}
The method of segmented regression is implemented in the function `mixmod_regression()`. If a breakpoint was detected, the failure data is separated by that point. After breakpoint detection the function `rank_regression()` is called inside `mixmod_regression()` and is used to estimate the distribution parameters of the subgroups. The visualization of the obtained results is done by functions `plot_prob()` and `plot_mod()`.
```{r segmented_weibull_I, fig.cap = "Figure 2: Subgroup-specific plotting positions using segmented regression.", message = FALSE}
# Applying mixmod_regression():
mixreg_weib <- mixmod_regression(
x = voltage_cdf,
distribution = "weibull",
k = 2
)
mixreg_weib
# Using plot_prob_mix().
mix_reg_plot <- plot_prob(
x = mixreg_weib,
title_main = "Weibull Mixture Regression",
title_x = "Time in Hours",
title_y = "Probability of Failure",
title_trace = "Subgroup",
plot_method = "ggplot2"
)
mix_reg_plot
```
<br>
```{r segmented_weibull_II, fig.cap = "Figure 3: Subgroup-specific regression lines using segmented regression.", message = FALSE}
# Using plot_mod() to visualize regression lines of subgroups:
mix_reg_lines <- plot_mod(
mix_reg_plot,
x = mixreg_weib,
title_trace = "Fitted Line"
)
mix_reg_lines
```
<br>
The method has separated the data into $k = 2$ subgroups. This can bee seen in _Figure 2_ and _Figure 3_. An upside of this function is that the segmentation is done in a comprehensible manner.
Furthermore, the segmentation process can be done automatically by setting `k = NULL`. The danger here, however, is an overestimation of the breakpoints.
To sum up, this function should give an intention of the existence of a mixture model. An in-depth analysis should be done afterwards.
## EM Algorithm with {weibulltools}
The EM algorithm can be applied through the usage of the function `mixmod_em()`. In contrast to `mixmod_regression()`, this method does not support an automatic separation routine and therefore _k_, the number of subgroups, must always be specified.
The obtained results can be also visualized by the functions `plot_prob()` and `plot_mod()`.
```{r em_weibull_I, fig.cap = "Figure 4: Subgroup-specific plotting positions using EM algorithm.", message = FALSE}
# Applying mixmod_regression():
mix_em_weib <- mixmod_em(
x = voltage_tbl,
distribution = "weibull",
k = 2
)
mix_em_weib
# Using plot_prob():
mix_em_plot <- plot_prob(
x = mix_em_weib,
title_main = "Weibull Mixture EM",
title_x = "Time in Hours",
title_y = "Probability of Failure",
title_trace = "Subgroup",
plot_method = "ggplot2"
)
mix_em_plot
```
```{r em_weibull_II, fig.cap = "Figure 5: Subgroup-specific regression lines using EM algorithm.", message = FALSE}
# Using plot_mod() to visualize regression lines of subgroups:
mix_em_lines <- plot_mod(
mix_em_plot,
x = mix_em_weib,
title_trace = "Fitted Line"
)
mix_em_lines
```
<br>
One advantage over `mixmod_regression()` is, that the EM algorithm can also assign censored items to a specific subgroup. Hence, an individual analysis of the mixing components, depicted in _Figure 4_ and _Figure 5_, is possible. In conclusion an analysis of a mixture model using `mixmod_em()` is statistically founded.
|
/scratch/gouwar.j/cran-all/cranData/weibulltools/inst/doc/Life_Data_Analysis_Part_IV.Rmd
|
---
title: "Life Data Analysis Part I - Estimation of Failure Probabilities"
subtitle: "A Non-parametric Approach"
author:
- "Tim-Gunnar Hensel"
- "David Barkemeyer"
date: "`r Sys.Date()`"
output:
rmarkdown::html_vignette:
fig_height: 6
fig_width: 7
fig_caption: yes
vignette: >
%\VignetteEngine{knitr::rmarkdown}
%\VignetteIndexEntry{Life Data Analysis Part I - Estimation of Failure Probabilities}
%\VignetteEncoding{UTF-8}
---
```{r setup, echo=FALSE, message=FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
screenshot.force = FALSE,
comment = "#>"
)
library(weibulltools)
```
This document presents non-parametric estimation methods for the computation of failure probabilities of complete data (failures) taking (multiple) right-censored units into account. A unit can be either a single component, an assembly or an entire system.
Furthermore, the estimation results are presented in distribution-specific probability plots.
## Introduction to Life Data Analysis
If the lifetime (or any other damage-equivalent quantity such as distance or load cycles) of a unit is considered to be a continuous random variable _T_, then the probability that a unit has failed at _t_ is defined by its _CDF (cumulative distribution function)_ _F(t)_.
$$ P(T\leq t) = F(t) $$
In order to obtain an estimate of the _CDF_ for each observation $t_1, t_2, ..., t_n$ two approaches are possible. Using a parametric lifetime distribution requires that the underlying assumptions for the sample data are valid. If the distribution-specific assumptions are correct, the model parameters can be estimated and the _CDF_ is computable. But if assumptions are not held, interpretations and derived conclusions are not reliable.
A more general approach for the calculation of cumulative failure probabilities is to use non-parametric statistical estimators $\hat{F}(t_1), \hat{F}(t_2), ..., \hat{F}(t_n)$. In comparison to a parametric distribution no general assumptions must be held. For non-parametric estimators, an ordered sample of size $n$ is needed. Starting at $1$, the ranks $i \in \{1, 2, ..., n \}$ are assigned to the ascending sorted sample values. Since there is a known relationship between ranks and corresponding ranking probabilities a _CDF_ can be determined.
But rank distributions are systematically skewed distributions and thus the median value instead of the expected value $E\left[F\left(t_i\right)\right] = \frac{i}{n + 1}$ is used for the estimation [^note1]. This skewness is visualized in _Figure 1_.
[^note1]: Kapur, K. C.; Lamberson, L. R.: _Reliability in Engineering Design_,
_New York: Wiley_, 1977, pp. 297-301
```{r rank_densities, fig.cap = "Figure 1: Densities for different ranks i in samples of size n = 10.", message = FALSE, warning = FALSE}
library(dplyr) # data manipulation
library(ggplot2) # visualization
x <- seq(0, 1, length.out = 100) # CDF
n <- 10 # sample size
i <- c(1, 3, 5, 7, 9) # ranks
r <- n - i + 1 # inverse ranking
df_dens <- expand.grid(cdf = x, i = i) %>%
mutate(n = n, r = n - i + 1, pdf = dbeta(x = x, shape1 = i, shape2 = r))
densplot <- ggplot(data = df_dens, aes(x = cdf, y = pdf, colour = as.factor(i))) +
geom_line() +
scale_colour_discrete(guide = guide_legend(title = "i")) +
theme_bw() +
labs(x = "Failure Probability", y = "Density")
densplot
```
### Failure Probability Estimation
In practice, a simplification for the calculation of the median value, also called median rank, is made. The formula of _Benard's_ approximation is given by
$$\hat{F}(t_i) \approx \frac{i - 0,3}{n + 0,4} $$
and is described in _The Plotting of Observations on Probability Paper_ [^note2].
[^note2]: Benard, A.; Bos-Levenbach, E. C.: _The Plotting of Observations on Probability Paper_,
_Statistica Neerlandica 7 (3)_, 1953, pp. 163-173
However, this equation only provides valid estimates for failure probabilities if all units in the sample are defectives (`estimate_cdf(methods = "mr", ...)`).
In field data analysis, however, the sample mainly consists of intact units and only a small fraction of units failed. Units that have no damage at the point of analysis and also have not reached the operating time or mileage of units that have already failed, are potential candidates for future failures. As these, for example, still are likely to fail during a specific time span, like the guarantee period, the _CDF_ must be adjusted upwards by these potential candidates.
A commonly used method for correcting probabilities of (multiple) right-censored data is _Johnson's_ method (`estimate_cdf(methods = "johnson", ...)`). By this method, all units that are included in the period looked at are sorted in an ascending order of their operating time (or any other damage-equivalent quantity). If there are units that have not failed before the _i_-th failure, an adjusted rank for the _i_-th failure is formed. This correction takes the potential candidates into account and increases the rank number. In consequence, a higher rank leads to a higher failure probability. This can be seen in _Figure 1_.
The rank adjustment is determined with:
$$j_i = j_{i-1} + x_i \cdot I_i, \;\; with \;\; j_0 = 0$$
Here, $j_ {i-1}$ is the adjusted rank of the previous failure, $x_i$ is the number of defectives at $t_i$ and $I_i$ is the increment that corrects the considered rank by the potential candidates.
$$I_i=\frac{(n+1)-j_{i-1}}{1+(n-n_i)}$$
The sample size is $n$ and $n_i$ is the number of units that have a lower $t$ than the _i_-th unit. Once the adjusted ranks are calculated, the _CDF_ can be estimated according to _Benard's_ approximation.
Other methods in {weibulltools} that can also handle (multiple) right-censored data are the _Kaplan-Meier_ estimator (`estimate_cdf(methods = "kaplan", ...)`) and the _Nelson-Aalen_ estimator (`estimate_cdf(methods = "nelson", ...)`).
### Probability Plotting
After computing failure probabilities a method called _probability plotting_ is applicable. It is a graphical _goodness of fit_ technique that is used in assessing whether an assumed distribution is appropriate to model the sample data.
The axes of a probability plot are transformed in such a way that the _CDF_ of a specified model is represented through a straight line. If the plotted points (`plot_prob()`) lie on an approximately straight line it can be said that the chosen distribution is adequate.
The two-parameter Weibull distribution can be parameterized with parameters $\mu$ and $\sigma$ such that the _CDF_ is characterized by the following equation:
$$F(t)=\Phi_{SEV}\left(\frac{\log(t) - \mu}{\sigma}\right)$$
The advantage of this representation is that the Weibull is part of the (log-)location-scale family. A linearized representation of this _CDF_ is:
$$\Phi^{-1}_{SEV}\left[F(t)\right]=\frac{1}{\sigma} \cdot \log(t) - \frac{\mu}{\sigma}$$
This leads to the following transformations regarding the axes:
* Abscissa: $x = \log(t)$
* Ordinate: $y = \Phi^{-1}_{SEV}\left[F(t)\right]$, which is the quantile function
of the SEV (_smallest extreme value_) distribution and can be written out with
$\log\left\{-\log\left[1-F(t)\right]\right\}$.
Another version of the Weibull _CDF_ with parameters $\eta$ and $\beta$ results in a _CDF_ that is defined by the following equation:
$$F(t)=1-\exp\left[ -\left(\frac{t}{\eta}\right)^{\beta}\right]$$
Then a linearized version of the CDF is:
$$ \log\left\{-\log\left[1-F(t)\right]\right\} = \beta \cdot \log(t) - \beta \cdot \log(\eta)$$
Transformations regarding the axes are:
* Abscissa: $x = \log(t)$
* Ordinate: $y = \log\left\{-\log\left[1-F(t)\right]\right\}$.
It can be easily seen that the parameters can be converted into each other. The corresponding equations are:
$$\beta = \frac{1}{\sigma}$$
and
$$\eta = \exp\left(\mu\right).$$
## Data: Shock Absorber
To apply the introduced methods of non-parametric failure probability estimation and probability plotting the `shock` data is used. In this dataset kilometer-dependent problems that have occurred on shock absorbers are reported. In addition to failed items the dataset also contains non-defectives (*censored*) observations. The data can be found in _Statistical Methods for Reliability Data_ [^note3].
[^note3]: Meeker, W. Q.; Escobar, L. A.: _Statistical Methods for Reliability Data_,
_New York, Wiley series in probability and statistics_, 1998, p. 630
For consistent handling of the data, {weibulltools} introduces the function `reliability_data()` that converts the original dataset into a `wt_reliability_data` object. This formatted object allows to easily apply the presented methods.
```{r dataset_shock, message = FALSE}
shock_tbl <- reliability_data(data = shock, x = distance, status = status)
shock_tbl
```
## Estimation of Failure Probabilities with {weibulltools}
First, we are interested in how censored observations influence the estimation of failure probabilities in comparison to the case where only failed units are considered. To deal with survived and failed units we will use `estimate_cdf()` with `methods = "johnson"`, whereas `methods = "mr"` only considers failures.
```{r failure_probabilities}
# Estimate CDF with both methods:
cdf_tbl <- estimate_cdf(shock_tbl, methods = c("mr", "johnson"))
# First case where only failed units are taken into account:
cdf_tbl_mr <- cdf_tbl %>% filter(cdf_estimation_method == "mr")
cdf_tbl_mr
# Second case where both, survived and failed units are considered:
cdf_tbl_john <- cdf_tbl %>% filter(cdf_estimation_method == "johnson")
cdf_tbl_john
```
<br>
If we compare both outputs we can see that survivors reduce the probabilities. But this is just that what was expected since undamaged units with longer or equal lifetime characteristic _x_ let us gain confidence in the product.
## Probability Plotting with {weibulltools}
The estimated probabilities should now be presented in a probability plot. With `plot_prob()` probability plots for several lifetime distributions can be constructed and estimates of multiple methods can be displayed at once.
### Weibull Probability Plot
```{r probability_plot_weibull, fig.cap = "Figure 3: Plotting positions in Weibull grid.", message = FALSE}
# Weibull grid for estimated probabilities:
weibull_grid <- plot_prob(
cdf_tbl,
distribution = "weibull",
title_main = "Weibull Probability Plot",
title_x = "Mileage in km",
title_y = "Probability of Failure in %",
title_trace = "Method",
plot_method = "ggplot2"
)
weibull_grid
```
<br>
_Figure 3_ shows that the consideration of survivors (orange points, _Method: johnson_) decreases the failure probability in comparison to the sole evaluation of failed items (green points, _Method: mr_).
### Log-normal Probability Plot
Finally, we want to use a log-normal probability plot to visualize the estimated failure probabilities.
```{r probability_plot_log-normal, fig.cap = "Figure 4: Plotting positions in log-normal grid.", message = FALSE}
# Log-normal grid for estimated probabilities:
lognorm_grid <- plot_prob(
cdf_tbl,
distribution = "lognormal",
title_main = "Log-normal Probability Plot",
title_x = "Mileage in km",
title_y = "Probability of Failure in %",
title_trace = "Method",
plot_method = "ggplot2"
)
lognorm_grid
```
<br>
On the basis of _Figure 3_ and _Figure 4_ we can subjectively assess the goodness of fit of Weibull and log-normal. It can be seen that in both grids, the plotted points roughly fall on a straight line. Hence one can say that the Weibull as well as the log-normal are good model candidates for the `shock` data.
|
/scratch/gouwar.j/cran-all/cranData/weibulltools/vignettes/Life_Data_Analysis_Part_I.Rmd
|
---
title: "Life Data Analysis Part II - Estimation Methods for Parametric Lifetime Models"
subtitle: "Rank Regression and Maximum Likelihood"
author:
- "Tim-Gunnar Hensel"
- "David Barkemeyer"
date: "`r Sys.Date()`"
output:
rmarkdown::html_vignette:
fig_height: 6
fig_width: 7
fig_caption: yes
vignette: >
%\VignetteEngine{knitr::rmarkdown}
%\VignetteIndexEntry{Life Data Analysis Part II - Estimation Methods for Parametric Lifetime Models}
%\VignetteEncoding{UTF-8}
---
```{r setup, echo=FALSE, message=FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
screenshot.force = FALSE,
comment = "#>"
)
library(weibulltools)
```
This document introduces two methods for the parameter estimation of lifetime distributions. Whereas _Rank Regression (RR)_ fits a straight line through transformed plotting positions (transformation is described precisely in `vignette(topic = "Life_Data_Analysis_Part_I", package = "weibulltools")`), _Maximum likelihood (ML)_ strives to maximize a function of the parameters given the sample data. If the parameters are obtained, a cumulative distribution function _(CDF)_ can be computed and added to a probability plot.
In the theoretical part of this vignette the focus is on the two-parameter Weibull distribution. The second part is about the application of the provided estimation methods in {weibulltools}. All implemented models can be found in the help pages of `rank_regression()` and `ml_estimation()`.
## The Weibull Distribution
The Weibull distribution is a continuous probability distribution, which is specified by the location parameter $\mu$ and the scale parameter $\sigma$. Its _CDF_ and _PDF (probability density function)_ are given by the following formulas:
$$F(t)=\Phi_{SEV}\left(\frac{\log(t) - \mu}{\sigma}\right)$$
$$f(t)=\frac{1}{\sigma t}\;\phi_{SEV}\left(\frac{\log(t) - \mu}{\sigma}\right)$$
The practical benefit of the Weibull in the field of lifetime analysis is that the common profiles of failure rates, which are observed over the lifetime of a large number of technical products, can be described using this statistical distribution.
In the following, the estimation of the specific parameters $\mu$ and $\sigma$ is explained.
## Rank Regression (RR)
In _RR_ the _CDF_ is linearized such that the true, unknown population is estimated by a straight line which is analytically placed among the plotting pairs. The lifetime characteristic, entered on the x-axis, is displayed on a logarithmic scale. A double-logarithmic representation of the estimated failure probabilities is used for the y-axis. Ordinary Least Squares _(OLS)_ determines a best-fit line in order that the sum of squared deviations between this fitted regression line and the plotted points is minimized.
In reliability analysis, it became prevalent that the line is placed in the probability plot in the way that the horizontal distances between the best-fit line and the points are minimized [^note1]. This procedure is called __x on y__ rank regression.
[^note1]: Berkson, J.: _Are There Two Regressions?_,
_Journal of the American Statistical Association 45 (250)_,
DOI: 10.2307/2280676, 1950, pp. 164-180
The formulas for estimating the slope and the intercept of the regression line according to the described method are given below.
Slope:
$$\hat{b}=\frac{\sum_{i=1}^{n}(x_i-\bar{x})\cdot(y_i-\bar{y})}{\sum_{i=1}^{n}(y_i-\bar{y})^2}$$
Intercept:
$$\hat{a}=\bar{x}-\hat{b}\cdot\bar{y}$$
With
$$x_i=\log(t_i)\;;\; \bar{x}=\frac{1}{n}\cdot\sum_{i=1}^{n}\log(t_i)\;;$$
as well as
$$y_i=\Phi^{-1}_{SEV}\left[F(t)\right]=\log\left\{-\log\left[1-F(t_i)\right]\right\}\;and \; \bar{y}=\frac{1}{n}\cdot\sum_{i=1}^{n}\log\left\{-\log\left[1-F(t_i)\right]\right\}.$$
The estimates of the intercept and slope are equal to the Weibull parameters $\mu$ and $\sigma$, i.e.
$$\hat{\mu}=\hat{a}$$
and
$$\hat{\sigma}=\hat{b}.$$
In order to obtain the parameters of the shape-scale parameterization the intercept and the slope need to be transformed [^note2].
[^note2]: ReliaSoft Corporation: _Life Data Analysis Reference Book_,
online: [ReliaSoft](http://reliawiki.org/index.php/The_Weibull_Distribution), accessed 19 December 2020
$$\hat{\eta}=\exp(\hat{a})=\exp(\hat{\mu})$$
and
$$\hat{\beta}=\frac{1}{\hat{b}}=\frac{1}{\hat{\sigma}}.$$
## Maximum Likelihood (ML)
The _ML_ method of Ronald A. Fisher estimates the parameters by maximizing the likelihood function. Assuming a theoretical distribution, the idea of _ML_ is that the specific parameters are chosen in such a way that the plausibility of obtaining the present sample is maximized. The likelihood and log-likelihood are given by the following equations:
$$L = \prod_{i=1}^n\left\{\frac{1}{\sigma t_i}\;\phi_{SEV}\left(\frac{\log(t_i) - \mu}{\sigma}\right)\right\}$$
and
$$\log L = \sum_{i=1}^n\log\left\{\frac{1}{\sigma t_i}\;\phi_{SEV}\left(\frac{\log(t_i) - \mu}{\sigma}\right)\right\}$$
Deriving and nullifying the log-likelihood function according to parameters results in two formulas that have to be solved numerically in order to obtain the estimates.
In large samples, ML estimators have optimality properties. In addition, the simulation studies by _Genschel and Meeker_ [^note3] have shown that even in small samples it is difficult to find an estimator that regularly has better properties than ML estimators.
[^note3]: Genschel, U.; Meeker, W. Q.: _A Comparison of Maximum Likelihood and Median-Rank Regression for Weibull Estimation_,
in: _Quality Engineering 22 (4)_, DOI: 10.1080/08982112.2010.503447, 2010, pp. 236-255
## Data
To apply the introduced parameter estimation methods the `shock` and `alloy` datasets are used.
### Shock Absorber
In this dataset kilometer-dependent problems that have occurred on shock absorbers are reported. In addition to failed items the dataset also contains non-defective (*censored*) observations. The data can be found in _Statistical Methods for Reliability Data_ [^note4].
[^note4]: Meeker, W. Q.; Escobar, L. A.: _Statistical Methods for Reliability Data_,
_New York, Wiley series in probability and statistics_, 1998, p. 630
For consistent handling of the data, {weibulltools} introduces the function `reliability_data()` that converts the original dataset into a `wt_reliability_data` object. This formatted object allows to easily apply the presented methods.
```{r dataset_shock, message = FALSE}
shock_tbl <- reliability_data(data = shock, x = distance, status = status)
shock_tbl
```
### Alloy T7989
The dataset `alloy` in which the cycles until a fatigue failure of a special alloy occurs are inspected. The data is also taken from Meeker and Escobar [^note5].
[^note5]: Meeker, W. Q.; Escobar, L. A.: _Statistical Methods for Reliability Data_,
_New York, Wiley series in probability and statistics_, 1998, p. 131
Again, the data have to be formatted as a `wt_reliability_data` object:
```{r, data_alloy}
# Data:
alloy_tbl <- reliability_data(data = alloy, x = cycles, status = status)
alloy_tbl
```
## RR and ML with {weibulltools}
`rank_regression()` and `ml_estimation()` can be applied to complete data as well as failure and (multiple) right-censored data. Both methods can also deal with models that have a threshold parameter $\gamma$.
In the following both methods are applied to the dataset `shock`.
### RR for two-parameter Weibull distribution
```{r RR_weibull, fig.cap = "Figure 1: RR for a two-parametric Weibull distribution.", message = FALSE}
# rank_regression needs estimated failure probabilities:
shock_cdf <- estimate_cdf(shock_tbl, methods = "johnson")
# Estimating two-parameter Weibull:
rr_weibull <- rank_regression(shock_cdf, distribution = "weibull")
rr_weibull
# Probability plot:
weibull_grid <- plot_prob(
shock_cdf,
distribution = "weibull",
title_main = "Weibull Probability Plot",
title_x = "Mileage in km",
title_y = "Probability of Failure in %",
title_trace = "Defectives",
plot_method = "ggplot2"
)
# Add regression line:
weibull_plot <- plot_mod(
weibull_grid,
x = rr_weibull,
title_trace = "Rank Regression"
)
weibull_plot
```
### ML for two-parameter Weibull distribution
```{r ML_weibull, fig.cap = "Figure 2: ML for a two-parametric Weibull distribution.", message = FALSE}
# Again estimating Weibull:
ml_weibull <- ml_estimation(
shock_tbl,
distribution = "weibull"
)
ml_weibull
# Add ML estimation to weibull_grid:
weibull_plot2 <- plot_mod(
weibull_grid,
x = ml_weibull,
title_trace = "Maximum Likelihood"
)
weibull_plot2
```
### ML for two- and three-parameter log-normal distribution
Finally, two- and three-parametric log-normal distributions are fitted to the `alloy` data using maximum likelihood.
```{r ML_estimation_log-normal, message = FALSE}
# Two-parameter log-normal:
ml_lognormal <- ml_estimation(
alloy_tbl,
distribution = "lognormal"
)
ml_lognormal
# Three-parameter Log-normal:
ml_lognormal3 <- ml_estimation(
alloy_tbl,
distribution = "lognormal3"
)
ml_lognormal3
```
<br>
```{r ML_visualization_I, fig.cap = "Figure 3: ML for a two-parametric log-normal distribution.", message = FALSE}
# Constructing probability plot:
tbl_cdf_john <- estimate_cdf(alloy_tbl, "johnson")
lognormal_grid <- plot_prob(
tbl_cdf_john,
distribution = "lognormal",
title_main = "Log-normal Probability Plot",
title_x = "Cycles",
title_y = "Probability of Failure in %",
title_trace = "Failed units",
plot_method = "ggplot2"
)
# Add two-parametric model to grid:
lognormal_plot <- plot_mod(
lognormal_grid,
x = ml_lognormal,
title_trace = "Two-parametric log-normal"
)
lognormal_plot
```
<br>
```{r ML_visualization_II, fig.cap = "Figure 4: ML for a three-parametric log-normal distribution.", message = FALSE}
# Add three-parametric model to lognormal_plot:
lognormal3_plot <- plot_mod(
lognormal_grid,
x = ml_lognormal3,
title_trace = "Three-parametric log-normal"
)
lognormal3_plot
```
|
/scratch/gouwar.j/cran-all/cranData/weibulltools/vignettes/Life_Data_Analysis_Part_II.Rmd
|
---
title: "Life Data Analysis Part III - Confidence Intervals"
subtitle: "For Model Parameters, Probabilities and Quantiles"
author:
- "Tim-Gunnar Hensel"
- "David Barkemeyer"
date: "`r Sys.Date()`"
output:
rmarkdown::html_vignette:
fig_height: 6
fig_width: 7
fig_caption: yes
vignette: >
%\VignetteEngine{knitr::rmarkdown}
%\VignetteIndexEntry{Life Data Analysis Part III - Confidence Intervals}
%\VignetteEncoding{UTF-8}
---
```{r setup, echo=FALSE, message=FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
screenshot.force = FALSE,
comment = "#>"
)
library(weibulltools)
```
In contrast to point estimation procedures, interval estimation methods, e.g. the computation of confidence intervals, express the uncertainty which is associated with the use of a statistical estimator.
In reliability analysis it is common practice to provide confidence regions for the parameters of a lifetime distribution as well as for quantities that depend on these model parameters.
In this vignette, the determination of confidence intervals for model parameters, quantiles (lifetime characteristic) and failure probabilites (*CDF*) is presented.
## Confidence Intervals for Model Parameters
Confidence intervals can be calculated for every model parameter. For this, the (approximated) sampling distribution as well as an estimate of its standard deviation must be given. In the following, the formulas which strongly depend on the estimation methods, are provided for *Rank Regression* and *Maximum Likelihood Estimation*.
### Rank Regression (RR)
In *Rank Regression* a linear relationship between the lifetime characteristic and the failure probability is determined. The parameters of a simple linear regression model are the intercept and the slope, which are the location parameter $\mu$ and the scale parameter $\sigma$ for the majority of lifetime distributions.
An approximated two-sided interval for the true parameters on a $1 - \alpha$ confidence level can be obtained with the formulas
$$\bigg[\hat{\mu}_{\text{lower}} \, ; \hat{\mu}_{\text{upper}} \bigg] = \bigg[\hat{\mu}_{\text{RR}} \pm t_{1 - \frac{\alpha}{2}} \cdot \hat{se}^{\text{HC}}_{\hat{\mu}}\bigg] \qquad and \qquad \bigg[\hat{\sigma}_{\text{lower}} \, ;\hat{\sigma}_{\text{upper}} \bigg] = \bigg[\hat{\sigma}_{\text{RR}} \pm t_{1 - \frac{\alpha}{2}} \cdot \hat{se}^{\text{HC}}_{\hat{\sigma}}\bigg]. $$
For a given sample, $\hat{\mu}_{\text{RR}}$ and $\hat{\sigma}_{\text{RR}}$ are the least squares estimates and $\hat{se}^{\text{HC}}_{\hat{\mu}}$ and $\hat{se}^{\text{HC}}_{\hat{\sigma}}$ are the respective estimates of the standard deviations. The uncertainty that arises due to the unknown standard deviations must be taken into account by using the quantiles of Student's t-distribution.
When using *RR* in the context of reliability analysis, the assumption of homoscedastic error terms is often violated. Therefore, the computation of the standard errors is based on a heteroscedasticity-consistent (**HC**) variance-covariance matrix. Other assumptions of the classical linear regression model like the need of no serial correlation are questionable as well and have already been discussed in the literature [^note1].
Hence, the *Maximum Likelihood Estimation* procedure is recommended, which is described in the next section.
[^note1]: Genschel, U.; Meeker, W. Q.: _A Comparison of Maximum Likelihood and Median-Rank Regression for Weibull Estimation_,
in: _Quality Engineering 22 (4)_, DOI: 10.1080/08982112.2010.503447, 2010, pp. 236-255
### Maximum Likelihood Estimation (MLE)
*ML* estimators are subject to a variety of restrictions but in return have many useful properties in contrast to other estimation techniques. One is that *ML* estimators converge in distribution to a normal distribution and for this reason, normal approximation confidence intervals for the model parameters can be calculated by theory.
Using the parameterization introduced above, a two-sided normal approximation confidence interval for the location parameter $\mu$ can be computed with the equation
$$\bigg[\hat{\mu}_{\text{lower}} \, ; \, \hat{\mu}_{\text{upper}} \bigg] = \bigg[\hat{\mu}_{\text{MLE}} \pm z_{1 - \frac{\alpha}{2}} \cdot \hat{se}_{\hat{\mu}}\bigg].$$
By definition, the scale parameter $\sigma$ is always positive and thus an alternative confidence interval is used [^note2]:
$$\bigg[\hat{\sigma}_{\text{lower}} \, ; \, \hat{\sigma}_{\text{upper}} \bigg] = \bigg[\frac{\hat{\sigma}_{\text{MLE}}}{w} \, ; \hat{\sigma}_{\text{MLE}} \cdot w \bigg]$$
with
$$w = \exp\left[z_{1 - \frac{\alpha}{2}} \cdot \frac{\hat{se}_{\hat{\sigma}}}{\hat{\sigma}_{\text{MLE}}}\right].$$
[^note2]: Meeker, W. Q.; Escobar, L. A.: _Statistical Methods for Reliability Data_,
_New York, Wiley series in probability and statistics_, 1998, p. 188
## Confidence Intervals for Probabilities and Quantiles
In addition to the confidence regions for the distribution-specific parameters, intervals for the regression line are provided as well. These can be aligned according to the probability $F(t)$ or to the lifetime characteristic $t$.
Whereas the Beta-Binomial confidence bounds are often used in combination with *RR*, Fisher's normal approximation confidence intervals are only applicable in the case of *MLE*.
### Beta-Binomial Confidence Intervals for $F(t)$
To obtain a two-sided non-parametric confidence interval for the failure probabilities at a given $1-\alpha$ level, a procedure similar to *Median Ranks (MR)* is used.
Instead of finding the probability $p_{\text{MR}}$ for the *j-th* rank at the $50\%$ level
$$0.5 = \sum^n_{k = j} \binom{n}{k} \cdot p_{\text{MR}}^k \cdot \left(1-p_{\text{MR}}\right)^{n-k}, $$
the probability $p_{\text{lower}}$ must be determined for equation
$$\frac{\alpha}{2} = \sum^n_{k = j} \binom{n}{k} \cdot p_{\text{lower}}^k \cdot \left(1-p_{\text{lower}}\right)^{n-k}$$
and $p_{\text{upper}}$ for the expression
$$1 - \frac{\alpha}{2} = \sum^n_{k = j} \binom{n}{k} \cdot p_{\text{upper}}^k \cdot \left(1-p_{\text{upper}}\right)^{n-k}.$$
The resulting interval $\left[\hat{F}_{j, \, {\text{lower}}} \, ; \, \hat{F}_{j, \, {\text{upper}}}\right] = \left[\hat{p}_{{\text{lower}}} \, ; \, \hat{p}_{{\text{upper}}}\right]$ is the estimated confidence region for the true failure probability with respect to the *j-th* rank.
### Beta-Binomial Confidence Intervals for $t$
Once the intervals of the failure probabilities are calculated, a two-sided confidence interval for the lifetime characteristic can be found with the quantile function of the underlying lifetime distribution.
For the Weibull, the quantile function is given by the formula
$$t_{p} = F^{-1}(p) = \exp\left[\mu + \Phi^{-1}_{\text{SEV}}(p) \cdot \sigma\right],$$
where $\Phi^{-1}_{\text{SEV}}$ is the quantile function of the standard smallest extreme value distribution.
The confidence interval for $t$ with respect to the estimated *RR* parameters as well as the lower and upper probability of the *j-th* rank is then computed by
$$\hat{t}_{j \, ; \, \text{lower}} = \exp\left[\hat{\mu}_{\text{RR}} + \Phi^{-1}_{\text{SEV}}(\hat{F}_{j, \, {\text{lower}}}) \cdot \hat{\sigma}_{\text{RR}}\right]$$
and
$$\hat{t}_{j \, ; \, \text{upper}} = \exp\left[\hat{\mu}_{\text{RR}} + \Phi^{-1}_{\text{SEV}}(\hat{F}_{j, \, {\text{upper}}}) \cdot \hat{\sigma}_{\text{RR}}\right].$$
### Fisher's Confidence Intervals for $F(t)$
For a particular quantile $t$ and the vector of parameters $\hat{\theta}_{MLE}$, a normal approximation confidence interval for the failure probability $F(t)$ can be obtained by
$$\bigg[\hat{F}_{\text{lower}}(t) \, ; \, \hat{F}_{\text{upper}}(t)\bigg] = \bigg[\hat{F}_{\text{MLE}}(t) \pm z_{1 - \frac{\alpha}{2}} \cdot \hat{se}_{\hat{F}(t)}\bigg].$$
In order to guarantee that the realized confidence interval of $F(t)$ always is between 0 and 1, the so called *z-procedure* can be applied [^note3]. Using this technique, statistical inference is first done for the standardized quantile $z$ and afterwards entered in $F(t)$ to obtain the desired interval.
[^note3]: Hoang, Y.; Meeker, W. Q.; Escobar, L. A.: _The Relationship Between Confidence Intervals for Failure Probabilities and Life Time Quantiles_,
in: _IEEE Transactions on Reliability 57, 2008, pp. 260-266
For the Weibull, the *ML* estimator of the standardized quantile function $z$ is
$$\hat{z}_{\text{MLE}} = \frac{log(t) - \hat{\mu}_{\text{MLE}}}{\hat{\sigma}_{\text{MLE}}}. $$
First, an approximate confidence interval for $z$ is determined with the following formula:
$$\bigg[\hat{z}_{\text{lower}} \, ; \, \hat{z}_{\text{upper}}\bigg] = \bigg[\hat{z}_{\text{MLE}} \pm z_{1 - \frac{\alpha}{2}} \cdot \hat{se}_{\hat{z}}\bigg].$$
An approximate formula for the standard error of the estimator $\hat{z}$ can be derived with the *delta method*:
$$\hat{se}_{\hat{z}} = \sqrt{\hat{Var}_{\hat{z}}} = \sqrt{\bigg(\frac{\partial{\hat{z}}}{\partial{\hat{\theta}_{\text{MLE}}}}\bigg)^{T}\; \hat{Var}(\hat{\theta}_{\text{MLE}})\; \frac{\partial{\hat{z}}}{\partial{\hat{\theta}_{\text{MLE}}}}} \; .$$
Finally, the estimated bounds of $z$ are then plugged into the distribution-specific standard *CDF* to obtain the interval for $F(t)$, which is
$$\bigg[\hat{F}_{\text{lower}}(t) \, ; \, \hat{F}_{\text{upper}}(t)\bigg] = \bigg[\Phi_{\text{SEV}}(\hat{z}_{\text{lower}}) \, ; \, \Phi_{\text{SEV}}(\hat{z}_{\text{upper}}) \bigg].$$
### Fisher's Confidence Intervals for $t$
In reliability analysis the lifetime characteristic often is defined as a strictly positive quantity and hence, a normal approximation confidence interval for the quantile $t$ with respect to a particular probability $p$ and the vector of parameters $\hat{\theta}_{MLE}$, can be calculated by
$$\bigg[\hat{t}_{\text{lower}}(p) \, ; \, \hat{t}_{\text{upper}}(p)\bigg] = \bigg[\frac{\hat{t}_{\text{MLE}}(p)}{w} \, ; \hat{t}_{\text{MLE}}(p) \cdot w \bigg], $$
where $w$ is
$$w = \exp\left[z_{1 - \frac{\alpha}{2}} \cdot \frac{\hat{se}_{\hat{t}(p)}}{\hat{t}_{\text{MLE}}(p)}\right].$$
For the Weibull, the *ML* equation for the quantile $t(p)$ is
$$\hat{t}_{\text{MLE}}(p) = \exp\left[\hat{\mu}_{\text{MLE}} + \Phi^{-1}_{\text{SEV}}(p) \cdot \hat{\sigma}_{\text{MLE}}\right]$$
and again, through the use of the *delta method*, a formula for the standard error of $\hat{t}_p$ can be provided, which is
$$\hat{se}_{\hat{t}(p)} = \sqrt{\hat{Var}_{\hat{t}(p)}} = \sqrt{\bigg(\frac{\partial{\hat{t}(p)}}{\partial{\hat{\theta}_{\text{MLE}}}}\bigg)^{T}\; \hat{Var}(\hat{\theta}_{\text{MLE}})\; \frac{\partial{\hat{t}(p)}}{\partial{\hat{\theta}_{\text{MLE}}}}} \; .$$
## Data
For the computation of the presented confidence intervals the `shock` dataset is used. In this dataset kilometer-dependent problems that have occurred on shock absorbers are reported. In addition to failed items the dataset also contains non-defective (*censored*) observations. The data can be found in _Statistical Methods for Reliability Data_ [^note4].
[^note4]: Meeker, W. Q.; Escobar, L. A.: _Statistical Methods for Reliability Data_,
_New York, Wiley series in probability and statistics_, 1998, p. 630
For consistent handling of the data, {weibulltools} introduces the function `reliability_data()` that converts the original dataset into a `wt_reliability_data` object. This formatted object allows to easily apply the presented methods.
```{r dataset_shock, message = FALSE}
# Data:
shock_tbl <- reliability_data(data = shock, x = distance, status = status)
shock_tbl
```
## Confidence Intervals with {weibulltools}
Before calculating confidence intervals with {weibulltools} one has to conduct the basic steps of the Weibull analysis which are described in the previous vignettes.
```{r, Parameter estimation procedures}
# Estimation of failure probabilities:
shock_cdf <- estimate_cdf(shock_tbl, methods = "johnson")
# Rank Regression:
rr_weibull <- rank_regression(shock_cdf, distribution = "weibull")
# Maximum Likelihood Estimation:
ml_weibull <- ml_estimation(shock_tbl, distribution = "weibull")
```
### Confidence Intervals for Model Parameters
The confidence intervals for the distribution parameters are included in the model output of `rank_regression()` and `ml_estimation()`, respectively.
```{r, Confidence intervals for model parameters}
# Confidence intervals based on Rank Regression:
rr_weibull$confint
# Confidence intervals based on Maximum Likelihood Estimation:
ml_weibull$confint
```
The `confint` element of the model output is a matrix with the parameter names as row names and the confidence level as column names. Different levels can be specified using the argument `conf_level`.
```{r, Confidence level}
# Confidence intervals based on another confidence level:
ml_weibull_99 <- ml_estimation(shock_tbl, distribution = "weibull", conf_level = 0.99)
ml_weibull_99$confint
```
### Confidence Intervals for Probabilities
Confidence bounds for failure probabilities can be either determined with `confint_betabinom()` or `confint_fisher()`. As explained in the theoretical part of this vignette the Beta-Binomial confidence bounds should be applied to the output of `rank_regression()` whereas Fisher's normal approximation confidence intervals are only applicable if the parameters and the variance-covariance matrix were estimated with `ml_estimation()`.
```{r, Confidence intervals for probabilities}
# Beta-Binomial confidence bounds:
conf_bb <- confint_betabinom(
x = rr_weibull,
b_lives = c(0.01, 0.1, 0.5),
bounds = "two_sided",
conf_level = 0.95,
direction = "y"
)
conf_bb
# Fisher's normal approximation confidence intervals:
conf_fisher <- confint_fisher(x = ml_weibull)
conf_fisher
```
The outputs of both functions contain the calculated bounds for the failure probabilities ranging from the minimum to the maximum observed failure. Between the observed range of failures an interpolation of quantiles is made for which the intervals of the probabilities are provided as well (supporting points).
In the function call of `confint_betabinom()` the default arguments of both functions are listed. With the argument `b_lives`, confidence regions for selected probabilities are included, but only if they are in the range of the estimated failure probabilities.
The argument `bounds` is used for the specification of the bound(s) to be computed. It could be one of `c("two_sided", "lower", "upper")`.
If `direction = "y"`, confidence intervals for the probabilities are provided.
The visualization of the computed intervals is done with `plot_conf()`.
```{r, Preparation for visualization}
# Probability plot
weibull_grid <- plot_prob(
shock_cdf,
distribution = "weibull",
title_main = "Weibull Probability Plot",
title_x = "Mileage in km",
title_y = "Probability of Failure in %",
title_trace = "Defectives",
plot_method = "ggplot2"
)
```
```{r, BBB on failure probabilities, fig.cap = "Figure 1: Beta-Binomial confidence bounds for failure probabilities.", message = FALSE}
# Beta-Binomial confidence intervals:
weibull_conf_bb <- plot_conf(
weibull_grid,
conf_bb,
title_trace_mod = "Rank Regression",
title_trace_conf = "Beta-Binomial Bounds"
)
weibull_conf_bb
```
```{r, FI on failure probabilities, fig.cap = "Figure 2: Fisher's normal approximation confidence intervals for failure probabilities.", message = FALSE}
# Fisher's normal approximation confidence intervals:
weibull_conf_fisher <- plot_conf(
weibull_grid,
conf_fisher,
title_trace_mod = "Maximum Likelihood",
title_trace_conf = "Fisher's Confidence Intervals"
)
weibull_conf_fisher
```
As one can see, `plot_conf()` not only adds the confidence limits to an existing probability plot, but also includes the estimated linearized CDF. There is no need for an additional call of `plot_mod()`. In fact, the same routines used by `plot_mod()` are called under the hood, which ensures that confidence bounds are not drawn without the regression line.
### Confidence Intervals for Quantiles
The computation and visualization of confidence for the lifetime characteristic is pretty similar to the presented procedure with regard to the probabilities.
The only difference is that one has to change the value of the argument `direction` to `"x"`.
```{r, Confidence intervals for quantiles}
# Computation of confidence intervals for quantiles:
## Beta-Binomial confidence intervals:
conf_bb_x <- confint_betabinom(
x = rr_weibull,
bounds = "upper",
conf_level = 0.95,
direction = "x"
)
conf_bb_x
## Fisher's normal approximation confidence intervals:
conf_fisher_x <- confint_fisher(x = ml_weibull, bounds = "lower", direction = "x")
conf_fisher_x
```
```{r, BBB on quantiles, fig.cap = "Figure 3: One-sided (upper) Beta-Binomial confidence bound for quantiles.", message = FALSE}
# Visualization:
## Beta-Binomial confidence intervals:
weibull_conf_bb_x <- plot_conf(
weibull_grid,
conf_bb_x,
title_trace_mod = "Rank Regression",
title_trace_conf = "Beta-Binomial Bounds"
)
weibull_conf_bb_x
```
```{r, FI on quantiles, fig.cap = "Figure 4: One-sided (lower) normal approximation confidence interval for quantiles.", message = FALSE}
## Fisher's normal approximation confidence intervals:
weibull_conf_fisher_x <- plot_conf(
weibull_grid,
conf_fisher_x,
title_trace_mod = "Maximum Likelihood",
title_trace_conf = "Fisher's Confidence Intervals"
)
weibull_conf_fisher_x
```
|
/scratch/gouwar.j/cran-all/cranData/weibulltools/vignettes/Life_Data_Analysis_Part_III.Rmd
|
---
title: "Life Data Analysis Part IV - Mixture Models"
subtitle: "Segmented Regression and EM Algorithm"
author:
- "Tim-Gunnar Hensel"
- "David Barkemeyer"
date: "`r Sys.Date()`"
output:
rmarkdown::html_vignette:
fig_height: 6
fig_width: 7
fig_caption: yes
vignette: >
%\VignetteEngine{knitr::rmarkdown}
%\VignetteIndexEntry{Life Data Analysis Part IV - Mixture Models}
%\VignetteEncoding{UTF-8}
---
```{r setup, echo=FALSE, message=FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
screenshot.force = FALSE,
comment = "#>"
)
library(weibulltools)
```
In this vignette two methods for the separation of mixture models are presented. A mixture model can be assumed, if the points in a probability plot show one or more changes in slope, depict one or several saddle points or follow an S-shape. A mixed distribution often represents the combination of multiple failure modes and thus must be split in its components to get reasonable results in further analyses.
Segmented regression aims to detect breakpoints in the sample data from which a split in subgroups can be made. The expectation-maximization (EM) algorithm is a computation-intensive method that iteratively tries to maximize a likelihood function, which is weighted by posterior probabilities. These are conditional probabilities that an observation belongs to subgroup _k_.
In the following, the focus is on the application of these methods and their visualizations using the functions `mixmod_regression()`, `mixmod_em()`, `plot_prob()` and `plot_mod()`.
## Data: Voltage Stress Test
To apply the introduced methods the dataset `voltage` is used. The dataset contains observations for units that were passed to a high voltage stress test. _hours_ indicates the number of hours until a failure occurs or the number of hours until a unit was taken out of the test and has not failed. _status_ is a flag variable and describes the condition of a unit. If a unit has failed the flag is 1 and 0 otherwise. The dataset is taken from _Reliability Analysis by Failure Mode_ [^note1].
[^note1]: Doganaksoy, N.; Hahn, G.; Meeker, W. Q.: _Reliability Analysis by Failure Mode_,
Quality Progress, 35(6), 47-52, 2002
For consistent handling of the data, {weibulltools} introduces the function `reliability_data()` that converts the original dataset into a `wt_reliability_data` object. This formatted object allows to easily apply the presented methods.
```{r dataset_voltage, message = FALSE}
voltage_tbl <- reliability_data(data = voltage, x = hours, status = status)
voltage_tbl
```
## Probability Plot for Voltage Stress Test Data
To get an intuition whether one can assume the presence of a mixture model, a Weibull probability plot is constructed.
```{r probability_plot_weibull, fig.cap = "Figure 1: Plotting positions in Weibull grid.", message = FALSE}
# Estimating failure probabilities:
voltage_cdf <- estimate_cdf(voltage_tbl, "johnson")
# Probability plot:
weibull_plot <- plot_prob(
voltage_cdf,
distribution = "weibull",
title_main = "Weibull Probability Plot",
title_x = "Time in Hours",
title_y = "Probability of Failure in %",
title_trace = "Defectives",
plot_method = "ggplot2"
)
weibull_plot
```
<br>
Since there is one obvious slope change in the Weibull probability plot of _Figure 1_, the appearance of a mixture model consisting of two subgroups is strengthened.
## Segmented Regression with {weibulltools}
The method of segmented regression is implemented in the function `mixmod_regression()`. If a breakpoint was detected, the failure data is separated by that point. After breakpoint detection the function `rank_regression()` is called inside `mixmod_regression()` and is used to estimate the distribution parameters of the subgroups. The visualization of the obtained results is done by functions `plot_prob()` and `plot_mod()`.
```{r segmented_weibull_I, fig.cap = "Figure 2: Subgroup-specific plotting positions using segmented regression.", message = FALSE}
# Applying mixmod_regression():
mixreg_weib <- mixmod_regression(
x = voltage_cdf,
distribution = "weibull",
k = 2
)
mixreg_weib
# Using plot_prob_mix().
mix_reg_plot <- plot_prob(
x = mixreg_weib,
title_main = "Weibull Mixture Regression",
title_x = "Time in Hours",
title_y = "Probability of Failure",
title_trace = "Subgroup",
plot_method = "ggplot2"
)
mix_reg_plot
```
<br>
```{r segmented_weibull_II, fig.cap = "Figure 3: Subgroup-specific regression lines using segmented regression.", message = FALSE}
# Using plot_mod() to visualize regression lines of subgroups:
mix_reg_lines <- plot_mod(
mix_reg_plot,
x = mixreg_weib,
title_trace = "Fitted Line"
)
mix_reg_lines
```
<br>
The method has separated the data into $k = 2$ subgroups. This can bee seen in _Figure 2_ and _Figure 3_. An upside of this function is that the segmentation is done in a comprehensible manner.
Furthermore, the segmentation process can be done automatically by setting `k = NULL`. The danger here, however, is an overestimation of the breakpoints.
To sum up, this function should give an intention of the existence of a mixture model. An in-depth analysis should be done afterwards.
## EM Algorithm with {weibulltools}
The EM algorithm can be applied through the usage of the function `mixmod_em()`. In contrast to `mixmod_regression()`, this method does not support an automatic separation routine and therefore _k_, the number of subgroups, must always be specified.
The obtained results can be also visualized by the functions `plot_prob()` and `plot_mod()`.
```{r em_weibull_I, fig.cap = "Figure 4: Subgroup-specific plotting positions using EM algorithm.", message = FALSE}
# Applying mixmod_regression():
mix_em_weib <- mixmod_em(
x = voltage_tbl,
distribution = "weibull",
k = 2
)
mix_em_weib
# Using plot_prob():
mix_em_plot <- plot_prob(
x = mix_em_weib,
title_main = "Weibull Mixture EM",
title_x = "Time in Hours",
title_y = "Probability of Failure",
title_trace = "Subgroup",
plot_method = "ggplot2"
)
mix_em_plot
```
```{r em_weibull_II, fig.cap = "Figure 5: Subgroup-specific regression lines using EM algorithm.", message = FALSE}
# Using plot_mod() to visualize regression lines of subgroups:
mix_em_lines <- plot_mod(
mix_em_plot,
x = mix_em_weib,
title_trace = "Fitted Line"
)
mix_em_lines
```
<br>
One advantage over `mixmod_regression()` is, that the EM algorithm can also assign censored items to a specific subgroup. Hence, an individual analysis of the mixing components, depicted in _Figure 4_ and _Figure 5_, is possible. In conclusion an analysis of a mixture model using `mixmod_em()` is statistically founded.
|
/scratch/gouwar.j/cran-all/cranData/weibulltools/vignettes/Life_Data_Analysis_Part_IV.Rmd
|
bootwrq <-
function(B, form, tau, data, Y, X1=NULL, X2=NULL, subject, death, time, interval.death=NULL, impute=NULL, weight=NULL, wcompute=2, seed=NULL, intermittent, file=NULL, nproc=1, MPI=FALSE)
{
## verif arguments
if(missing(B)) stop("Please specify the number of bootstrap sample in argument B")
if(missing(form)) stop("Please specify the model forula in argument form")
if(missing(tau)) stop("Please specify quantiles in argument tau")
if(missing(data)) stop("Please specify the dataset in argument data")
if(missing(Y)) stop("Please specify the outcome in argument Y")
if(missing(subject)) stop("Please specify the group variable in argument subject")
if(missing(death)) stop("Please specify death time in argument death")
if(missing(time)) stop("Please specify time variable in argument time")
if(missing(intermittent)) stop("Please specify if there are intermittent missing data in argument intermittent")
if(!is.numeric(B)) stop("B should be numeric")
if(class(form)!="formula") stop("form should be a formula")
if(!all(is.numeric(tau))) stop("tau should contain numeric values")
if(!(all((tau>0) & (tau<1)))) stop("tau should contain values between 0 and 1")
if(!is.data.frame(data)) stop("data should be a data frame")
if(!is.character(Y)) stop("Y should be a character")
if(!(Y %in% colnames(data))) stop("data should contain Y")
if(!is.null(X1))
{
if(!all(is.character(X1))) stop("X1 should only contain characters")
if(!all((X1 %in% colnames(data)))) stop("data should contain X1")
}
if(!is.null(X2))
{
if(!all(is.character(X2))) stop("X2 should only contain characters")
if(!all((X2 %in% colnames(data)))) stop("data should contain X2")
}
if(!is.character(subject)) stop("subject should be a character")
if(!(subject %in% colnames(data))) stop("data should contain subject")
if(!is.character(death)) stop("death should be a character")
if(!(death %in% colnames(data))) stop("data should contain death")
if(!is.character(time)) stop("time should be a character")
if(!(time %in% colnames(data))) stop("data should contain time")
if(is.null(interval.death)) interval.death <- 0
if(!is.null(interval.death)){ if(!all(is.numeric(interval.death))) stop("interval.death should only contain numeric values")}
if(!is.null(impute)){ if(!is.numeric(impute)) stop("impute should be numeric")}
if(!is.null(weight))
{
if(!is.character(weight)) stop("weight should be a character")
if(!(weight %in% colnames(data))) stop("data should contain weight")
}
if(!(wcompute %in% c(0,1,2))) stop("wcompute should be 0, 1 or 2")
if(!is.null(seed)){ if(!is.numeric(seed)) stop("seed should be numeric")}
if(!(intermittent %in% c(TRUE,FALSE))) stop("wcompute should be TRUE or FALSE")
if(!is.numeric(nproc)) stop("nproc should be numeric")
if(!is.null(file)) {if(!is.character(file)) stop("file should be a character")}
## parallele
if(nproc>1)
{
if(MPI==TRUE)
{
cl <- makeCluster(nproc, type = "MPI")
}
else
{
cl <- makeCluster(nproc, type = "SOCK")
}
registerDoParallel(cl)
}
## graines
if(missing(seed))
{
seed <- floor(runif(B)*1000000)
}
## calcul des poids sur data si pas deja fait
if(wcompute!=1 & missing(weight))
{
if(intermittent==FALSE)
{
data <- weightsMMD(data=data,Y=Y,X1=X1,
X2=X2,subject=subject,death=death,
time=time,
interval.death=interval.death)$data
weight <- "weight"
}
if(intermittent==TRUE)
{
data <- weightsIMD(data=data,Y=Y,X1=X1,
X2=X2,subject=subject,death=death,
time=time,impute=impute)$data
weight <- "weight"
}
}
## B reech
b <- NULL
if(nproc>1)
{
res <- foreach(b=1:B, .combine=cbind, .errorhandling="remove") %dopar%
{
.onewrq(form=form,tau=tau,data=data,Y=Y,X1=X1,
X2=X2,subject=subject,death=death,time=time,
interval.death=interval.death,impute=impute,
weight=weight,wcompute=wcompute,seed=seed[b],
intermittent=intermittent)
}
}
else
{
res <- foreach(b=1:B, .combine=cbind, .errorhandling="remove") %do%
{
.onewrq(form=form,tau=tau,data=data,Y=Y,X1=X1,
X2=X2,subject=subject,death=death,time=time,
interval.death=interval.death,impute=impute,
weight=weight,wcompute=wcompute,seed=seed[b],
intermittent=intermittent)
}
}
if(nproc>1)
{
stopCluster(cl)
}
if(!is.null(file))
{
write.table(res,file=file,sep="\t")
}
class(res) <- "bootwrq"
return(invisible(res))
}
|
/scratch/gouwar.j/cran-all/cranData/weightQuant/R/bootwrq.R
|
.onewrq <-
function(form, tau, data, Y, X1, X2, subject, death, time, interval.death, impute, weight, wcompute, seed, intermittent)
{
## graine
set.seed(seed)
## sujet dans data
numeros <- unique(data[,subject])
n <- length(numeros)
## poids dans l'echantillon de depart
poidsechdepart <- data[,weight]
if(wcompute!=1)
{
data$poidsechdepart <- data[,weight]
data <- data[,-which(colnames(data)==weight)]
}
## echantillon boot
num_b <- sample(numeros, size=n, replace=TRUE)
j_b <- sapply(num_b,function(i) which(data[,subject]==i,useNames=FALSE))
j_b <- unlist(j_b,use.names=FALSE)
nbmes_b <- sapply(num_b,function(i) length(which(data[,subject]==i)),USE.NAMES=FALSE)
ech_b <- data[j_b,]
ech_b[,subject] <- rep(1:n,nbmes_b)
## estimation des modeles
if(wcompute==0) ## on ne recalcule pas
{
## modeles si on ne recalcule pas les poids
mold <- rq(formula=form,tau=tau,data=ech_b,weights=poidsechdepart)
}
else
{
if(wcompute==1) ## on recalcule
{
## ajout des nouveaux poids
if(intermittent==FALSE)
{
dataw <- weightsMMD(data=ech_b,Y=Y,X1=X1,X2=X2,subject=subject,death=death,time=time,interval.death=interval.death)$data
}
if(intermittent==TRUE)
{
dataw <- weightsIMD(data=ech_b,Y=Y,X1=X1,X2=X2,subject=subject,death=death,time=time,impute=impute)$data
}
## modeles
mnew <- rq(formula=form,tau=tau,data=dataw,weights=weight)
}
else ## on fait les 2
{
## modeles si on ne recalcule pas les poids
mold <- rq(formula=form,tau=tau,data=ech_b,weights=poidsechdepart)
## ajout des poids
if(intermittent==FALSE)
{
dataw <- weightsMMD(data=ech_b,Y=Y,X1=X1,X2=X2,subject=subject,death=death,time=time,interval.death=interval.death)$data
}
if(intermittent==TRUE)
{
dataw <- weightsIMD(data=ech_b,Y=Y,X1=X1,X2=X2,subject=subject,death=death,time=time,impute=impute)$data
}
## modeles
mnew <- rq(formula=form,tau=tau,data=dataw,weights=weight)
}
}
## garder les coef
coef_b0 <- NULL
nbcoef0 <- 0
nomcoef0 <- NULL
if(exists("mold"))
{
coef_b0 <- mold$coefficients
if(length(tau)==1)
{
nbcoef0 <- length(coef_b0)
nomcoef0 <- paste("calc0",rep(tau,each=nbcoef0),names(coef_b0),sep="_")
}
else
{
nbcoef0 <- nrow(coef_b0)
nomcoef0 <- paste("calc0",rep(tau,each=nbcoef0),rownames(coef_b0),sep="_")
}
}
coef_b1 <- NULL
nbcoef1 <- 0
nomcoef1 <- NULL
if(exists("mnew"))
{
coef_b1 <- mnew$coefficients
if(length(tau)==1)
{
nbcoef1 <- length(coef_b1)
nomcoef1 <- paste("calc1",rep(tau,each=nbcoef1),names(coef_b1),sep="_")
}
else
{
nbcoef1 <- nrow(coef_b1)
nomcoef1 <- paste("calc1",rep(tau,each=nbcoef1),rownames(coef_b1),sep="_")
}
}
coef_b <- c(coef_b0,coef_b1)
nomcoef <- c(nomcoef0,nomcoef1)
res <- c(as.vector(coef_b),seed)
names(res) <- c(nomcoef,"seed")
return(res)
}
|
/scratch/gouwar.j/cran-all/cranData/weightQuant/R/onewrq.R
|
summary.bootwrq <-
function(object,...)
{
noms <- rownames(object)[-nrow(object)]
tabnoms <- matrix(unlist(str_split(rownames(object)[-nrow(object)], pattern="_", n=3)),ncol=3,byrow=TRUE)
tau <- unique(as.numeric(tabnoms[,2]))
dots <- list(...)
isrq <- sapply(dots,function(x) class(x) %in% c("rq","rqs"))
m <- NULL
if(any(isrq))
{
j <- which(isrq==TRUE)[1]
m <- dots[[j]]
}
if(!is.null(m))
{
if(any(tau != m$tau)) stop("Quantiles should be the same in model m as in the bootstrap results")
if(length(tau==1))
{
if(any(tabnoms[,3] != names(m$coef))) stop("Coefficients should be in the same order in the model m than in the bootstrap results")
}
else
{
if(any(tabnoms[,3] != rep(rownames(m$coef),length(tau)))) stop("Coefficients should be in the same order in the model m than in the bootstrap results")
}
}
res0 <- NULL
if(length(grep("calc0",noms))) #poids non recalcules
{
for(j in 1:length(tau))
{
x0tau <- object[grep(paste("calc0",tau[j],sep="_"),noms),,drop=FALSE]
if(is.null(m))
{
m0tau <- apply(x0tau,1,mean)
}
else
{
if(length(tau)>1)
{
m0tau <- coef(m)[,j]
}
else
{
m0tau <- coef(m)
}
}
s0tau <- apply(x0tau,1,sd)
p0tau <- 2*pnorm(abs(m0tau/s0tau),lower.tail=FALSE)
res0 <- rbind(res0,cbind(m0tau,s0tau,p0tau))
}
colnames(res0) <- c("coef","se","p-value")
rownames(res0) <- tabnoms[1:nrow(res0),3]
cat(" Without computation of the weights in each bootstrap sample :\n")
cat("\n")
k <- 0
for(j in 1:length(tau))
{
cat("Quantile regression estimates for tau =",tau[j]," :\n")
print(res0[k+1:(nrow(res0)/length(tau)),])
k <- k+nrow(res0)/length(tau)
cat("\n")
}
}
res1 <- NULL
if(length(grep("calc1",noms))) #poids recalcules
{
for(j in 1:length(tau))
{
x1tau <- object[grep(paste("calc1",tau[j],sep="_"),noms),,drop=FALSE]
if(is.null(m))
{
m1tau <- apply(x1tau,1,mean)
}
else
{
if(length(tau)>1)
{
m1tau <- coef(m)[,j]
}
else
{
m1tau <- coef(m)
}
}
s1tau <- apply(x1tau,1,sd)
p1tau <- 2*pnorm(abs(m1tau/s1tau),lower.tail=FALSE)
res1 <- rbind(res1,cbind(m1tau,s1tau,p1tau))
}
colnames(res1) <- c("coef","se","p-value")
rownames(res1) <- tabnoms[length(tabnoms[,3])-(nrow(res1)-1):0,3]
cat(" With computation of the weights in each bootstrap sample :\n")
cat("\n")
k <- 0
for(j in 1:length(tau))
{
cat("Quantile regression estimates for tau =",tau[j]," :\n")
print(res1[k+1:(nrow(res1)/length(tau)),])
k <- k+nrow(res1)/length(tau)
cat("\n")
}
}
return(invisible(list(results0=res0,results1=res1)))
}
|
/scratch/gouwar.j/cran-all/cranData/weightQuant/R/summary.bootwrq.R
|
test.bootwrq <-
function(x,m)
{
if(missing(m)) stop("Please specify the model on initial data in argument m")
if(length(m$tau)<2) stop("At least 2 quantile regressions should be estimated in model m")
noms <- rownames(x)[-nrow(x)]
tabnoms <- matrix(unlist(str_split(noms, pattern="_", n=3)),ncol=3,byrow=TRUE)
tau <- unique(as.numeric(tabnoms[,2]))
if(any(tau != m$tau)) stop("Quantiles should be the same in model m as in the bootstrap results")
if(length(tau==1))
{
if(any(tabnoms[,3] != names(m$coef))) stop("Coefficients should be in the same order in the model m than in the bootstrap results")
}
else
{
if(any(tabnoms[,3] != rep(rownames(m$coef),length(tau)))) stop("Coefficients should be in the same order in the model m than in the bootstrap results")
}
res0 <- NULL
if(length(grep("calc0",noms)))
{
cat("\n")
cat(" Without computation of the weights in each bootstrap sample : \n \n")
for(j in 1:(length(tau)-1))
{
m0tau1 <- coef(m)[,j]
m0tau2 <- coef(m)[,j+1]
x0tau1 <- x[grep(paste("calc0",tau[j],sep="_"),noms),,drop=FALSE]
x0tau2 <- x[grep(paste("calc0",tau[j+1],sep="_"),noms),,drop=FALSE]
s0diff <- apply(x0tau1-x0tau2,1,sd)
p0diff <- 2*pnorm(abs((m0tau1-m0tau2)/s0diff),lower.tail=FALSE)
cat(paste("tau =",tau[j],"versus tau =",tau[j+1],": \n"))
print(cbind(coef=m0tau1-m0tau2,se=s0diff,pvalue=p0diff))
cat("\n")
res0 <- cbind(res0,m0tau1-m0tau2,s0diff,p0diff)
}
}
res1 <- NULL
if(length(grep("calc1",noms)))
{
cat("\n")
cat(" With computation of the weights in each bootstrap sample : \n \n")
for(j in 1:(length(tau)-1))
{
m1tau1 <- coef(m)[,j]
m1tau2 <- coef(m)[,j+1]
x1tau1 <- x[grep(paste("calc1",tau[j],sep="_"),noms),,drop=FALSE]
x1tau2 <- x[grep(paste("calc1",tau[j+1],sep="_"),noms),,drop=FALSE]
s1diff <- apply(x1tau1-x1tau2,1,sd)
p1diff <- 2*pnorm(abs((m1tau1-m1tau2)/s1diff),lower.tail=FALSE)
cat(paste("tau =",tau[j],"versus tau =",tau[j+1],": \n"))
print(cbind(coef=m1tau1-m1tau2,se=s1diff,pvalue=p1diff))
cat("\n")
res1 <- cbind(res1,m1tau1-m1tau2,s1diff,p1diff)
}
}
return(invisible(list(results0=res0,results1=res1)))
}
|
/scratch/gouwar.j/cran-all/cranData/weightQuant/R/test.bootwrq.R
|
weightsIMD <-
function(data,Y,X1,X2,subject,death,time,impute=0,name="weight")
{
##verif
if(missing(data)) stop("Please specify the dataset in argument data")
if(missing(Y)) stop("Please specify the outcome in argument Y")
if(missing(subject)) stop("Please specify the group variable in argument subject")
if(missing(death)) stop("Please specify death time in argument death")
if(missing(time)) stop("Please specify time variable in argument time")
if(!is.data.frame(data)) stop("data should be a data frame")
if(!(Y %in% colnames(data))) stop("data should contain Y")
if(!is.null(X1))
{
if(!all(is.character(X1))) stop("X1 should only contain characters")
if(!all((X1 %in% colnames(data)))) stop("data should contain X1")
}
if(!is.null(X2))
{
if(!all(is.character(X2))) stop("X2 should only contain characters")
if(!all((X2 %in% colnames(data)))) stop("data should contain X2")
}
if(!is.character(subject)) stop("subject should be a character")
if(!(subject %in% colnames(data))) stop("data should contain subject")
if(!is.character(death)) stop("death should be a character")
if(!(death %in% colnames(data))) stop("data should contain death")
if(!is.character(time)) stop("time should be a character")
if(!(time %in% colnames(data))) stop("data should contain time")
if(!is.numeric(impute)) stop("impute should be numeric")
if(!is.character(name)) stop("name should be a character")
## une colonne par suivi, un sujet par ligne :
data2 <- data[which(!is.na(data[,Y])),c(subject,time,X1,X2,Y,death)]
wide <- reshape(data2, v.names=Y, idvar=subject, timevar=time, direction = "wide")
y.t <- paste(Y,sort(unique(data[,time])),sep=".")
nt <- length(y.t)
## indicateur obs chez les vivants
matobs <- matrix(-1,nrow(wide),nt)
colnames(matobs) <- paste("obs_t",1:nt,sep="")
for(j in 1:nt)
{
matobs[,j] <- ifelse(!is.na(wide[,y.t[j]]),1,ifelse((is.na(wide[,death])) | (wide[,death]>j),0,NA))
}
wide_avecobs <- cbind(wide,matobs)
## selectionner les sujets viviants pour chaque suivi
sample <- vector("list",nt-1)
for(j in 2:nt)
{
sample[[j-1]] <- subset(wide_avecobs,!is.na(matobs[,j]))
}
## ech poole avec toutes les var
reponse.var <- paste("obs_t",1:nt,sep="")
nmes <- rep(NA,nt-1)
poole <- NULL
for(j in 1:(nt-1))
{
dat <- sample[[j]][,c(subject,reponse.var[j],reponse.var[j+1],X1,X2,y.t[j])]
dat[which(is.na(dat[,y.t[j]])),y.t[j]] <- impute
dat$suivi <- j+1
nmes[j] <- nrow(dat)
colnames(dat) <- c(subject,"Ravt","R",X1,X2,"Yavt","suivi")
poole <- rbind(poole,dat)
}
colnames(poole) <- c(subject,"Ravt","R",X1,X2,"Yavt","suivi")
## regression logistique numerateur
covar1 <- c(X1,X2)
form1 <- formula(paste("R~-1+factor(suivi)+",paste(covar1,collapse="+")))
reg1 <- glm(form1,family="binomial",data=poole)
if(reg1$converged==TRUE)
{
coefnum <- reg1$coefficients
senum <- sqrt(diag(vcov(reg1)))
}
else
{
coefnum <- NA
senum <- NA
}
## regression logistique denominateur
form2 <- "R~-1+factor(suivi)"
if(length(X1))
{
form2 <- paste(form2,"+(",paste(X1,collapse="+"),")*Yavt",sep="")
}
if(length(X2))
{
form2 <- paste(form2,"+",paste(X2,collapse="+"),sep="")
}
form2 <- formula(paste(form2,"+I(1-Ravt)"))
reg2 <- glm(form2,family="binomial",data=poole)
if(reg2$converged==TRUE)
{
coefden <- reg2$coefficients
seden <- sqrt(diag(vcov(reg2)))
}
else
{
coefden <- NA
seden <- NA
}
## calcul denominateur
dtmp <- poole[,c(subject,"R",X1,X2,"Yavt","suivi")]
dtmp <- dtmp[order(dtmp$suivi,dtmp[,subject]),]
dpred0 <- cbind(dtmp,Ravt=0)
dpred1 <- cbind(dtmp,Ravt=1)
dpred <- rbind(dpred0,dpred1)
pred <- predict(reg2,newdata=dpred)
np <- length(pred)/2
d_avecpred <- cbind(dtmp,pred0=1/(1+exp(-pred[1:np])),pred1=1/(1+exp(-pred[np+1:np])))
d_avecpred <- d_avecpred[order(d_avecpred[,subject],d_avecpred$suivi),]
d_avecpred$pden <- NA
for(i in 1:nrow(d_avecpred))
{
if(d_avecpred[i,"suivi"]==2)
{
d_avecpred$pden[i] <- d_avecpred$pred1[i]
}
else
{
d_avecpred$pden[i] <- d_avecpred$pred0[i]*(1-d_avecpred$pden[i-1])+
d_avecpred$pred1[i]*d_avecpred$pden[i-1]
}
}
## calcul numerateur
pred <- predict(reg1,newdata=d_avecpred)
d_avecpred$pnum <- 1/(1+exp(-pred))
## poids
d_avecpred$poids <- d_avecpred$pnum/d_avecpred$pden
d_poids <- d_avecpred[,c(subject,"suivi","poids")]
colnames(d_poids) <- c(subject,time,name)
## ajout aux donnees initiales
data_poids <- merge(data,d_poids,all.x=TRUE,sort=FALSE)
data_poids[which(data_poids[,time]==1),name] <- 1
return(list(data=data_poids,coef=list(coefnum,coefden),se=list(senum,seden)))
}
|
/scratch/gouwar.j/cran-all/cranData/weightQuant/R/weightsIMD.R
|
weightsMMD <-
function(data,Y,X1,X2,subject,death,time,interval.death=0,name="weight")
{
## verif des arguments
if(missing(data)) stop("Please specify the dataset in argument data")
if(missing(Y)) stop("Please specify the outcome in argument Y")
if(missing(subject)) stop("Please specify the group variable in argument subject")
if(missing(death)) stop("Please specify death time in argument death")
if(missing(time)) stop("Please specify time variable in argument time")
if(!is.data.frame(data)) stop("data should be a data frame")
if(!is.character(Y)) stop("Y should be a character")
if(!(Y %in% colnames(data))) stop("data should contain Y")
if(!is.null(X1))
{
if(!all(is.character(X1))) stop("X1 should only contain characters")
if(!all((X1 %in% colnames(data)))) stop("data should contain X1")
}
if(!is.null(X2))
{
if(!all(is.character(X2))) stop("X2 should only contain characters")
if(!all((X2 %in% colnames(data)))) stop("data should contain X2")
}
if(!is.character(subject)) stop("subject should be a character")
if(!(subject %in% colnames(data))) stop("data should contain subject")
if(!is.character(death)) stop("death should be a character")
if(!(death %in% colnames(data))) stop("data should contain death")
if(!is.character(time)) stop("time should be a character")
if(!(time %in% colnames(data))) stop("data should contain time")
if(!all(is.numeric(interval.death))) stop("interval.death should only contain numeric values")
if(!is.character(name)) stop("name should be a character")
## une colonne par suivi, un sujet par ligne :
data2 <- data[which(!is.na(data[,Y])),c(subject,time,X1,X2,Y,death)]
wide <- reshape(data2, v.names=Y, idvar=subject, timevar=time, direction = "wide")
y.t <- paste(Y,unique(data[,time]),sep=".")
nt <- length(y.t)
## indicateur obs chez les vivants
matobs <- matrix(-1,nrow(wide),nt)
colnames(matobs) <- paste("obs_t",1:nt,sep="")
for(j in 1:nt)
{
matobs[,j] <- ifelse(!is.na(wide[,y.t[j]]),1,ifelse((is.na(wide[,death])) | (wide[,death]>j),0,NA))
}
wide_avecobs <- data.frame(wide,matobs)
## fonction pour un delta
prob <- function(wide,Y,X1,X2,subject,death,suivi,delta)
{
matobs <- wide[,(ncol(wide)-nt+1):ncol(wide)]
## selectionner les sujets vivants pour chaque suivi
sample <- vector("list",nt-1)
for(j in 2:nt)
{
if((j+delta)<=nt)
{
sample[[j-1]] <- subset(wide,!is.na(matobs[,j]) & !is.na(matobs[,j+delta]))
}
}
## ech poole avec toutes les var
reponse.var <- paste("obs_t",1:nt,sep="")
nmes <- rep(NA,nt-1)
poole <- NULL
for(j in 1:(nt-1-delta))
{
dat <- sample[[j]][,c(subject,reponse.var[j+1],X1,X2,y.t[j])]
dat$suivi <- j+1
nmes[j] <- nrow(dat)
colnames(dat) <- c(subject,"R",X1,X2,"Yavt","suivi")
poole <- rbind(poole,dat)
}
colnames(poole) <- c(subject,"R",X1,X2,"Yavt","suivi")
## enlever les pas obs visite precedente
poole <- poole[which(!is.na(poole$Yavt)),]
## regression logistique numerateur
if(delta==0)
{
covar1 <- c(X1,X2)
form1 <- formula(paste("R~-1+factor(suivi)+",paste(covar1,collapse="+")))
reg1 <- glm(form1,family="binomial",data=poole)
if(reg1$converged==TRUE)
{
coefnum <- reg1$coefficients
senum <- sqrt(diag(vcov(reg1)))
}
else
{
coefnum <- NA
senum <- NA
}
}
## regression logistique denominateur
form2 <- "R~-1+factor(suivi)"
if(length(X1))
{
form2 <- paste(form2,"+(",paste(X1,collapse="+"),")*Yavt",sep="")
}
if(length(X2))
{
form2 <- paste(form2,"+",paste(X2,collapse="+"),sep="")
}
reg2 <- glm(form2,family="binomial",data=poole)
if(reg2$converged==TRUE)
{
coefden <- reg2$coefficients
seden <- sqrt(diag(vcov(reg2)))
}
else
{
coefden <- NA
seden <- NA
}
## calcul denominateur
dtmp <- poole[,c(subject,"R",X1,X2,"Yavt","suivi")]
dpred <- dtmp[order(dtmp$suivi,dtmp[,subject]),]
pred <- predict(reg2,newdata=dpred)
d_avecpred <- data.frame(dpred,pden=1/(1+exp(-pred)))
## d_avecpred <- d_avecpred[order(d_avecpred[,subject],d_avecpred$suivi),]
## d_avecpred$pden <- NA
## for(i in 1:nrow(d_avecpred))
## {
## if(d_avecpred[i,"suivi"]==2)
## {
## d_avecpred$pden[i] <- d_avecpred$pred[i]
## }
## else
## {
## d_avecpred$pden[i] <- d_avecpred$pred[i]*d_avecpred$pden[i-1]
## }
## }
## calcul numerateur
if(delta==0)
{
d_avecpred <- d_avecpred[order(d_avecpred[,subject],d_avecpred[,"suivi"]),]
pred <- predict(reg1,newdata=d_avecpred)
d_avecpred$pnum <- 1/(1+exp(-pred))
d_avecpred$pnum <- unlist(tapply(d_avecpred$pnum,d_avecpred[,subject],cumprod))
}
colnames(d_avecpred)[which(colnames(d_avecpred)=="pden")] <- paste("pden",delta,sep="")
if(delta==0)
{
res <- list(d_avecpred,coefden,seden,coefnum,senum)
}
else
{
res <- list(d_avecpred,coefden,seden)
}
return(res)
}
## calculer les probas pour tous les deltas
for(delta in interval.death)
{
res <- prob(wide=wide_avecobs,Y=Y,X1=X1,
X2=X2,subject=subject,death=death,
suivi=time,delta=delta)
if(delta==0)
{
data3 <- merge(data2,res[[1]][,c(subject,"suivi","Yavt","pnum","pden0")],by.x=c(subject,time),by.y=c(subject,"suivi"),all.x=TRUE)
coef <- list(res[[4]],res[[2]])
se <- list(res[[5]],res[[3]])
}
else
{
data3 <- merge(data3,res[[1]][,c(subject,"suivi",paste("pden",delta,sep=""))],by.x=c(subject,time),by.y=c(subject,"suivi"),all.x=TRUE)
coef <- c(coef,list(res[[2]]))
se <- c(se,list(res[[3]]))
}
}
## calculer les poids
data3 <- data3[order(data3[,subject],data3[,time]),]
data3[which(data3[,time]==1),paste("pden",interval.death,sep="")] <- 1
for(l in 1:nrow(data3))
{
if(data3[l,time]==1)
{
data3$pden[l] <- 1
data3$pnum[l] <- 1
}
else
{
j <- data3[l,time]
kk <- cut(0:(j-2),breaks=c(interval.death,nt+1),labels=interval.death,right=FALSE)
kk <- as.numeric(as.character(kk))
ll <- l-0:(j-2)
p <- apply(cbind(ll,kk),1,function(x,d) d[x[1],paste("pden",x[2],sep="")],d=data3)
data3$pden[l] <- prod(p)
# pden[l] = pden0[l] * pden1[l-1] * .. *pdendelta[l-delta]
}
}
data3$poids <- data3$pnum/data3$pden
## ajouter au data initial
ajout <- data3[,c(subject,time,"poids")]
colnames(ajout) <- c(subject,time,name)
data_poids <- merge(data,ajout,all.x=TRUE,sort=FALSE)
return(list(data=data_poids,coef=coef,se=se))
}
|
/scratch/gouwar.j/cran-all/cranData/weightQuant/R/weightsMMD.R
|
# ML for ordinal probit model, using modified Newton-Raphson
iee.ord<- function(x,y,link,iprint=0,maxiter=20,toler=1.e-6)
{
if(!is.vector(x))
{ if(nrow(x)!=length(y)) stop("x, y not same length") }
else if(length(x)!=length(y)) { stop("x, y not same length") }
if(is.vector(x)) x=as.matrix(x)
n=length(y)
# assume y in 1,...,norc
norc=length(unique(y))
npred=ncol(x)
np=norc-1+npred
# centering of x so that can use start of 0 for beta
xmn=apply(x,2,"mean")
xc=scale(x,center=xmn,scale=F)
# starting point for NR
cum=(1:(norc-1))
cutp=rep(0,norc-1)
for(k in cum)
{ pr=sum(y<=k)
if (pr==0) { pr=1 }
cutp[k]=qlogis(pr/n)
}
b=rep(0,npred)
if(link=="probit") { dlatent=dnorm; platent=pnorm; der.dlatent=der.dnorm } else {
dlatent=dlogis; platent=plogis; der.dlatent=der.dlogis }
# loop
mxdif=1
iter=0
while(iter<maxiter & mxdif>toler)
{ tem=xc%*%b
cutb=c(-10,cutp,10)
ub=tem+cutb[y+1]
lb=tem+cutb[y]
ucdf=platent(ub)
lcdf=platent(lb)
updf=dlatent(ub)
lpdf=dlatent(lb)
# score vector
dbeta=rep(0,npred)
dcut=rep(0,norc+1)
# Hessian matrix
d2beta=matrix(0,npred,npred)
d2bcut=matrix(0,npred,norc+1)
d2cut=matrix(0,norc+1,norc+1)
for(i in 1:n)
{ uderi=der.dlatent(ub[i])
lderi=der.dlatent(lb[i])
xx=xc[i,]
pri=ucdf[i]-lcdf[i]
prderi=updf[i]-lpdf[i]
dbeta=dbeta+xx*prderi/pri
k=y[i]
dcut[k+1]=dcut[k+1]+updf[i]/pri
dcut[k]=dcut[k]-lpdf[i]/pri
pr2=pri^2
d2beta=d2beta+ outer(xx,xx)*((uderi-lderi)*pri-prderi^2)/pr2
d2bcut[,k+1]=d2bcut[,k+1]+xx*(uderi*pri-updf[i]*prderi)/pr2
d2bcut[,k]=d2bcut[,k]+xx*(-lderi*pri+lpdf[i]*prderi)/pr2
d2cut[k+1,k+1]=d2cut[k+1,k+1]+ (uderi*pri-updf[i]^2)/pr2
d2cut[k,k]=d2cut[k,k]+ (-lderi*pri-lpdf[i]^2)/pr2
tem2=updf[i]*lpdf[i]/pr2
d2cut[k,k+1]=d2cut[k,k+1]+tem2
d2cut[k+1,k]=d2cut[k+1,k]+tem2
}
sc=c(dcut[2:norc],dbeta)
if(npred==1) d2bcut=matrix(c(d2bcut[,2:norc]),npred,norc-1)
else d2bcut=d2bcut[,2:norc]
d2cut=d2cut[2:norc,2:norc]
h=cbind(d2cut,t(d2bcut))
h=rbind(h,cbind(d2bcut,d2beta))
dif=solve(h,sc)
mxdif=max(abs(dif))
cutp=cutp-dif[1:(norc-1)]
b=b-dif[norc:np]
# modification for cutp out of order
chk=cutp[-1]-cutp[1:(norc-2)]
ibad=sum(chk<=0)
while(ibad>0)
{ dif=dif/2
mxdif=mxdif/2
cutp=cutp+dif[1:(norc-1)]
b=b+dif[norc:np]
chk=cutp[-1]-cutp[1:(norc-2)]
ibad=sum(chk<=0)
}
iter=iter+1
if(iprint==1)
{ cat("iter=",iter,", (with centered x's) cutp=", cutp, ", b=",b,"\n")
cat(" scorevec=", sc,"\n\n")
}
}
if(iter>=maxiter) cat("*** did not converge, check with iprint=1\n")
# cutpoints with original x
for(j in 1:npred)
{ cutp=cutp-b[j]*xmn[j] }
if(iprint==1) cat("(with original x's) cutp=", cutp,"\n")
# Hessian with original x's, repeat of previous code with x instead of xc
tem=x%*%b
cutb=c(-10,cutp,10)
ub=tem+cutb[y+1]
lb=tem+cutb[y]
ucdf=platent(ub)
lcdf=platent(lb)
updf=dlatent(ub)
lpdf=dlatent(lb)
nllk=0
dbeta=rep(0,npred)
dcut=rep(0,norc+1)
d2beta=matrix(0,npred,npred)
d2bcut=matrix(0,npred,norc+1)
d2cut=matrix(0,norc+1,norc+1)
for(i in 1:n)
{ uderi=-updf[i]*ub[i]
lderi=-lpdf[i]*lb[i]
xx=x[i,]
pri=ucdf[i]-lcdf[i]
nllk=nllk-log(pri)
prderi=updf[i]-lpdf[i]
dbeta=dbeta+xx*prderi/pri
k=y[i]
dcut[k+1]=dcut[k+1]+updf[i]/pri
dcut[k]=dcut[k]-lpdf[i]/pri
pr2=pri^2
d2beta=d2beta+ outer(xx,xx)*((uderi-lderi)*pri-prderi^2)/pr2
d2bcut[,k+1]=d2bcut[,k+1]+xx*(uderi*pri-updf[i]*prderi)/pr2
d2bcut[,k]=d2bcut[,k]+xx*(-lderi*pri+lpdf[i]*prderi)/pr2
d2cut[k+1,k+1]=d2cut[k+1,k+1]+ (uderi*pri-updf[i]^2)/pr2
d2cut[k,k]=d2cut[k,k]+ (-lderi*pri-lpdf[i]^2)/pr2
tem2=updf[i]*lpdf[i]/pr2
d2cut[k,k+1]=d2cut[k,k+1]+tem2
d2cut[k+1,k]=d2cut[k+1,k]+tem2
}
sc=c(dcut[2:norc],dbeta)
# print(sc)
if(npred==1) d2bcut=matrix(c(d2bcut[,2:norc]),npred,norc-1)
if(npred>1) d2bcut=d2bcut[,2:norc]
d2cut=d2cut[2:norc,2:norc]
h=cbind(d2cut,t(d2bcut))
h=rbind(h,cbind(d2bcut,d2beta))
h=-h
#print(h)
covm=solve(h)
#print(covm)
if(iprint==1)
{ cat("nllk= ", nllk,"\n")
cat("cutpts= ", cutp,"\n")
cat("beta= ", b,"\n")
cat("SEs : ",sqrt(diag(covm)),"\n\n")
}
list(negloglik=nllk, gam=cutp, reg=b, cov=covm)
}
|
/scratch/gouwar.j/cran-all/cranData/weightedCL/R/ord.reg.univar.R
|
# cdf for bivariate t, vectorized inputs
# R interface for bivariate t cdf, nu=df is integer-valued only
# link to fortran source code in the mvtnorm R package source
# If z1,z2,rho, nu are compatible, the function will probably work,
# z1 = (vector of) variable 1
# z2 = (vector of) variable 2
# rho = correlation in (-1,1)
# nu = degree of freedom
# Output
# bivariate cdfs at (z1,z2,rho,nu)
# -1 for any illegal or NA/NaN inputs
# default output is a vector
pbvt=function(z1,z2,param,icheck=FALSE)
{
#if(!is.loaded("pbvt")) dyn.load("./pbvt.so")
if(is.matrix(param)) { rho=param[,1]; nu=param[,2] }
else { rho=param[1]; nu=param[2] }
n1=length(z1)
n2=length(z2)
nr=length(rho)
ndf=length(nu)
# check for matrices etc
imat=is.matrix(z1)
if(imat) { ir=nrow(z1); ic=ncol(z1) }
nn=max(n1,n2,nr,ndf)
#if(imat) print(c(ir,ic))
if(n1<nn & n1>1) return(NA)
if(n2<nn & n2>1) return(NA)
if(nr<nn & nr>1) return(NA)
if(ndf<nn & ndf>1) return(NA)
if(nn>1)
{ if(n1==1) z1=rep(z1,nn)
if(n2==1) z2=rep(z2,nn)
if(nr==1) rho=rep(rho,nn)
if(ndf==1) nu=rep(floor(nu),nn)
}
if(icheck)
{ # handling infinites and NaN
z1[is.infinite(z1) & z1>0]=20
z2[is.infinite(z2) & z2>0]=20
z1[is.infinite(z1) & z1<0]=-20
z2[is.infinite(z2) & z2<0]=-20
rho[is.nan(z1)]=-2; z1[is.nan(z1)]=0
rho[is.nan(z2)]=-2; z2[is.nan(z2)]=0
rho[is.nan(rho)]=-2
nu[is.nan(nu)]=-2
nu[is.nan(z1)]=-2; nu[is.nan(z2)]=-2;
rho[is.na(z1)]=-2; z1[is.na(z1)]=0
rho[is.na(z2)]=-2; z2[is.na(z2)]=0
rho[is.na(rho)]=-2
nu[is.na(nu)]=-2
nu[is.na(z1)]=-2; nu[is.na(z2)]=-2;
#print(z1)
#print(z2)
#print(rho)
}
out= .Fortran("pbvt", as.integer(nn), as.double(z1), as.double(z2),
as.double(rho), as.integer(nu), bcdf=as.double(rep(0,nn)) )
#print(out$bcdf)
if(imat) { bcdf=matrix(out$bcdf,ir,ic) } else bcdf=out$bcdf
bcdf
}
|
/scratch/gouwar.j/cran-all/cranData/weightedCL/R/pbvt.R
|
# Density of the univariate marginal distribution
# input:
# y the vector of (non-negative integer) quantiles.
# mu the mean parameter of the univariate distribution.
# gam the parameter gamma of the negative binomial distribution.
# output:
# the density of the univariate marginal distribution
dmargmodel.ord<-function(y,mu,gam,link)
{ cuts<-c(-10,gam,10)
lb=cuts[y]+mu
ub=cuts[y+1]+mu
if(link=="probit") res<-pnorm(ub)-pnorm(lb) else res<-plogis(ub)-plogis(lb)
res[y<1]<-0
res
}
# CDF of the univariate marginal distribution
# input:
# y the vector of (non-negative integer) quantiles.
# mu the mean parameter of the univariate distribution.
# gam the parameter gamma of the negative binomial distribution.
# output:
# the cdf of the univariate marginal distribution
pmargmodel.ord<-function(y,mu,gam,link)
{ cuts<-c(-10,gam,10)
ub=cuts[y+1]+mu # for mprobit
#ub=cuts[y+1]-mu # for polr
if(link=="probit") res<-pnorm(ub) else res<-plogis(ub)
res[y<1]<-0
res
}
# Bivariate composite likelihood for multivariate normal copula with ordinal regression.
# input:
# r the vector of normal copula parameters
# b the regression coefficients
# gam the gamma parameter
# xdat the matrix of covariates (use the constant 1 for the first covariate)
# ydat the vector with the response
# link is the link function. Choices are ?log? for the log link function,
# ?logit? for the logit link function, and ?probit? for the probit link function.
# output:
# negative bivariate composite likelihood for multivariate normal copula
# with ordinal regression.
bcl.ord<-function(rh,p,q,b,gam,xdat,ydat,link)
{ phi=rh[1:p]
if(q>0) { th=rh[(p+1):(p+q)] } else {th=NULL}
c1=any(Mod(polyroot(c(1, -phi))) < 1.01)
c2=any(Mod(polyroot(c(1, th))) < 1.01)
if(c1 || c2) return(1e10)
d=length(ydat)
rmat<-toeplitz(ARMAacf(ar=phi, ma=th, lag.max=d-1))
mu<-ordreg.mu(xdat,b)
vlow<-pmargmodel.ord(ydat-1,mu,gam,link)
tem<-dmargmodel.ord(ydat,mu,gam,link)
vupp<-vlow+tem
zlow=qnorm(vlow)
zupp=qnorm(vupp)
bivpairs=combn(1:d, 2)
d2=ncol(bivpairs)
zmat=matrix(NA,d2,5)
for(j in 1:d2)
{ k1=bivpairs[,j][1]
k2=bivpairs[,j][2]
zmat[j,]=c(zlow[c(k1,k2)],zupp[c(k1,k2)],rmat[k1,k2])
}
zmat[zmat==-Inf]=-10
zmat[zmat==Inf]=10
prob=pbvt(zmat[,3],zmat[,4],cbind(zmat[,5],100))+
pbvt(zmat[,1],zmat[,2],cbind(zmat[,5],100))-
pbvt(zmat[,3],zmat[,2],cbind(zmat[,5],100))-
pbvt(zmat[,1],zmat[,4],cbind(zmat[,5],100))
-sum(log(prob))
}
# optimization routine for composite likelihood for MVN copula
# b the regression coefficients
# gam the gamma parameter
# xdat the matrix of covariates (use the constant 1 for the first covariate)
# ydat the vector with the response
# link is the link function. Choices are
# ?logit? for the logit link function, and ?probit? for the probit link function.
# output: A list containing the following components:
# minimum the value of the estimated minimum of CL1 for MVN copula
# estimate the CL1 estimates
# gradient the gradient at the estimated minimum of CL1
# code an integer indicating why the optimization process terminated, see nlm.
cl.ord<-function(p,q,b,gam,xdat,ydat,link)
{ if(link=="logit") link="logistic"
t1=polr(as.factor(ydat)~xdat,method=link)
#t2 <- arima(residuals.polr(t1)[,1] , order = c(p,0,q))
t2 <- arima(resids(t1), order = c(p,0,q))
start<- t2$coef[1:(p+q)]
nlm(bcl.ord,start,p,q,b,gam,xdat,ydat,link,
print.level=2)
}
# derivative of the ordinal loglikelihood with respect to gamma
# input:
# gam the gamma parameter
# mu the mean parameter
# output:
# the vector with the derivatives of the NB loglikelihood with respect to gamma
derlik.gam.ord<-function(mu,gam,u,link)
{ K<-length(gam)+1
k<-1:K
cuts<-c(-10,gam,10)
lb=cuts[k]+mu
ub=cuts[k+1]+mu
if(link=="probit") { dlatent=dnorm; platent=pnorm } else { dlatent=dlogis; platent=plogis }
dlatentub<-dlatent(ub)
dlatentlb<-dlatent(lb)
den<-platent(ub)-platent(lb)
res<-rep(NA,K)
for(i in 1:K)
{ if(u==i)
{ res[i]=dlatentub[i]/den[i] }
else
{ if(u==i-1)
{ res[i]=-dlatentlb[i]/den[i] }
else {res[i]=0}}
}
res
}
# derivative of the NB loglikelihood with respect to gamma
# input:
# gam the gamma parameter
# mu the mean parameter
# y the value of a non-negative integer quantile
# output:
# the derivative of the NB loglikelihood with respect to gamma
iderlik.gam.ord<-function(mu,gam,y,u,link)
{ cuts<-c(-10,gam,10)
lb=cuts[y]+mu
ub=cuts[y+1]+mu
if(link=="probit") { dlatent=dnorm; platent=pnorm } else { dlatent=dlogis; platent=plogis }
den<-platent(ub)-platent(lb)
if(u==y) dlatent(ub)/den
else if(u==y-1) -dlatent(lb)/den else 0
}
der.dnorm<-function(x)
{ -x*dnorm(x) }
der.dlogis<-function(x)
{ expx=exp(x)
expx*(1-expx)/(1+expx)^3
}
# minus expectation of the second derivative of the marginal ordinal loglikelihood
# with resect to gamma
# input:
# mu the mean parameter
# gam the gamma parameter
# u the univariate cdfs
# output:
# the vector with the minus expectations of the margmodel loglikelihood
# with respect to gamma
fisher.gam.ord<-function(mu,gam,u,v,link)
{ cuts<-c(-10,gam,10)
K<-length(gam)+1
k<-1:K
lb=cuts[k]+mu
ub=cuts[k+1]+mu
if(link=="probit") { dlatent=dnorm; platent=pnorm; der.dlatent=der.dnorm } else {
dlatent=dlogis; platent=plogis; der.dlatent=der.dlogis }
den<-platent(ub)-platent(lb)
dlatentub<-dlatent(ub)
dlatentlb<-dlatent(lb)
der.dlatentub<-der.dlatent(ub)
der.dlatentlb<-der.dlatent(lb)
h<-rep(NA,K)
for(k in 1:K)
{ if(u==k & v==k)
{ num1<-der.dlatentub[k]
num2<-dlatentub[k]
tem<-num2/den[k]
h[k]<--num1/den[k]+tem*tem
}
else
{ if((u==k & v==k-1) | (u==k-1 & v==k))
{ h[k]<--dlatentub[k]*dlatentlb[k]/den[k]/den[k]
}
else
{ if(u==k-1 & v==k-1)
{ num1<-der.dlatentlb[k]
num2<-dlatentlb[k]
tem<-num2/den[k]
h[k]<-num1/den[k]+tem*tem
} else h[k]<-0}}
}
sum(h*den)
}
# the mean values of the univariate marginal distribution
# corresonding to the used link function
# input:
# x the matix of the covariates
# b the vector with the regression coefficients
# output:
# the mean values of the univariate marginal distribution
ordreg.mu<-function(x,b)
{ if(length(b)!=1) mu<-x %*% b else mu<-x*b }
# Calculating the number of categories
# input:
# gam the cutpoints
# output:
# the number of categories
noCategories<-function(gam)
{ length(gam)+1 }
# covariance matrix of the scores Omega_i
# input:
# scgam the array of the score functions with respect to gam
# index the bivariate pair
# pmf the matrix of rectangle probabilities
scoreCov.ord<-function(scgam,pmf,index)
{ j1<-index[1]
j2<-index[2]
q<-dim(scgam)[2]
cov22<-matrix(NA,q,q)
for(i1 in 1:q)
{ for(i2 in 1:q)
{ cov22[i1,i2]<-t(scgam[,i1,j1])%*%pmf%*%scgam[,i2,j2] }
}
cov22
}
# weight matrix fixed at values from the CL1 estimator
# input:
# b the regression coefficients
# gam the gamma parameter
# xdat the matrix of covariates (use the constant 1 for the first covariate)
# ydat the vector with the response
# rh the vector with CL1 estimates
# link is the link function. Choices are
# ?logit? for the logit link function, and ?probit? for the probit link function.
# output: A list containing the following components:
# omega the array with the Omega matrices
# delta the array with the Delta matrices
# X the array with the X matrices
weightMat.ord<-function(b,gam,rh,p,q,xdat,link)
{ phi=rh[1:p]
if(q>0){ th=rh[(p+1):(p+q)] } else { th=NULL }
d=nrow(xdat)
rmat=toeplitz(ARMAacf(ar=phi, ma=th, lag.max=d-1))
qq<-length(gam)
if(is.matrix(xdat))
{ dim<-dim(xdat)
pp<-dim[2]
} else {pp<-1}
bivpairs=t(combn(1:d, 2))
d2=nrow(bivpairs)
omega<-matrix(NA,d*qq,d*qq)
X<-matrix(NA,pp+qq,d*qq)
delta<-matrix(NA,d*qq,d*qq)
dom<-d*qq
if(qq>1) pos<-seq(1,dom-1,by=qq) else pos<-seq(1,dom)
mu<-ordreg.mu(xdat,b)
ub<-noCategories(gam)
du<-matrix(NA,ub,d)
scgam<-array(NA,c(ub,ub-1,d))
for(j in 1:d)
{ du[,j]<-dmargmodel.ord(1:ub,mu[j],gam,link)
for(k in 1:(ub-1))
{ scgam[,k,j]<-derlik.gam.ord(mu[j],gam,k,link) }
}
u<-apply(du,2,cumsum)
z<-qnorm(u)
z[is.nan(z)]<-7
z[z>10]<-7
z<-rbind(-7,z)
x<-NULL
diagonal<-array(NA,c(qq,qq,d))
for(j in 1:d)
{ if(is.matrix(xdat))
{ temp1<-matrix(rep(xdat[j,],each=qq),qq) } else {
temp1<-matrix(rep(xdat[j],each=qq),qq) }
x<-cbind(x,t(cbind(temp1,diag(qq))))
fisher<-matrix(NA,ub-1,ub-1)
for(k1 in 1:(ub-1))
{ for(k2 in 1:(ub-1))
{ fisher[k1,k2]<-fisher.gam.ord(mu[j],gam,k1,k2,link)
}
}
diagonal[,,j]<-fisher
}
delta<-matrix(0,dom,dom)
minus<-0
for(j in pos)
{ delta[j:(j+qq-1),j:(j+qq-1)]<-diagonal[,,j-minus]
if(qq>1) minus<-minus+qq-1 else minus<-0
}
off<-array(NA,c(qq,qq,d2))
for(k in 1:d2)
{ print(k)
k1<-bivpairs[k,][1]
k2<-bivpairs[k,][2]
x1=z[,k1]; x2=z[,k2]
xmat=meshgrid(x1,x2)
cdf=t(pbvt(xmat$x,xmat$y,c(rmat[k1,k2],1000)))
cdf1=apply(cdf,2,diff)
pmf=apply(t(cdf1),2,diff)
pmf=t(pmf)
off[,,k]<-scoreCov.ord(scgam,pmf,bivpairs[k,])
}
omega<-delta
ch1<-0
ch2<-0
ch3<-0
for(j in 1:(d-1))
{ for(r in pos[-(1:j)])
{ #print(c(j,r))
omega[(1+(j-1)*qq):(j*qq),r:(r+qq-1)]<-off[,,(j+ch2-ch1)]
omega[r:(r+qq-1),(1+(j-1)*qq):(j*qq)]<-t(off[,,(j+ch2-ch1)])
ch2<-ch2+1
}
ch1<-ch1+1
}
list(omega=omega,X=x,delta=delta)
}
# the weigted scores equations
# input:
# param the vector of regression and not regression parameters
# xdat the matrix of covariates
# ydat the vector with the response
# link is the link function. Choices are
# ?logit? for the logit link function, and ?probit? for the probit link function.
# WtScMat is a list containing the following components:
# omega the array with the Omega matrices
# delta the array with the Delta matrices
# X the array with the X matrices
# output
# the wcl estimating equations
bwcl.ord<-function(param,WtScMat,xdat,ydat,link)
{ d<-length(ydat)
if(is.matrix(xdat))
{ p<-ncol(xdat)
} else {p<-1}
b<-param[1:p]
q<-length(unique(ydat))-1
gam<-param[(p+1):(p+q)]
mu<-ordreg.mu(xdat,b)
ub<-noCategories(gam)
scgam<-array(NA,c(ub,ub-1,d))
for(j in 1:d)
{ for(k in 1:(ub-1))
{ scgam[,k,j]<-derlik.gam.ord(mu[j],gam,k,link) }
}
sc<-NULL
for(j in 1:d)
{ scgami<-NULL
for(k in 1:(ub-1))
{ scgami<-c(scgami,scgam[ydat[j],k,j])}
sc<-c(sc,scgami)
}
X<-WtScMat$X
delta<-WtScMat$delta
omega<-WtScMat$omega
g<-X%*%t(delta)%*%solve(omega,sc)
g
}
# solving the wcl estimating equations
# input:
# start the starting values (IEE estimates) for the vector of
# regression and not regression parameters
# xdat the matrix of covariates (use the constant 1 for the first covariate)
# ydat the vector with the response
# link is the link function. Choices are
# ?logit? for the logit link function, and ?probit? for the probit link function.
# WtScMat is a list containing the following components:
# omega the array with the Omega matrices
# delta the array with the Delta matrices
# X the array with the X matrices
# output:
# the wcl estimates
wcl.ord<-function(start,WtScMat,xdat,ydat,link)
{ multiroot(f=bwcl.ord,start,atol=1e-4,rtol=1e-4,ctol=1e-4,
WtScMat=WtScMat,xdat=xdat,ydat=ydat,link=link)
}
godambe.ord=function(b,gam,rh,p,q,xdat,link)
{ WtScMat<-weightMat.ord(b,gam,rh,p,q,xdat,link)
omega= WtScMat$omega
delta= WtScMat$delta
X= WtScMat$X
psi<-delta%*%t(X)
hess<-t(psi)%*%solve(omega)%*%psi
solve(hess)
}
|
/scratch/gouwar.j/cran-all/cranData/weightedCL/R/wcl-ord.R
|
# the mean values of the univariate marginal distribution
# corresonding to the used link function
# input:
# x the matix of the covariates
# b the vector with the regression coefficients
# link has three options: 1. "log", 2. "logit". 3. "probit"
# output:
# the mean values of the univariate marginal distribution
linked.mu<-function(x,b,link)
{ if(link=="log")
{ mu<-exp(x %*% b)
}
else
{ if(link=="logit")
{ expnu<-exp(x %*% b)
mu<-expnu/(1+expnu)
}
else
{ # link=probit
mu<-pnorm(x %*% b) }
}
mu
}
# Density of the univariate marginal distribution
# input:
# y the vector of (non-negative integer) quantiles.
# mu the mean parameter of the univariate distribution.
# gam the parameter gamma of the negative binomial distribution.
# invgam the inverse of parameter gamma of negative binomial distribution.
# margmodel indicates the marginal model. Choices are ?poisson? for Poisson, ?bernoulli? for
# Bernoulli, and ?nb1? , ?nb2? for the NB1 and NB2 parametrization of negative
# binomial in Cameron and Trivedi (1998).
# output:
# the density of the univariate marginal distribution
dmargmodel<-function(y,mu,gam,invgam,margmodel)
{ if(margmodel=="poisson")
{ dpois(y,mu)
}
else
{ if(margmodel=="bernoulli")
{ dbinom(y,size=1,prob=mu) }
else
{ if(margmodel=="nb1")
{ dnbinom(y,prob=1/(1+gam),size=mu*invgam) }
else
{ # margmodel=="nb2"
dnbinom(y,size=invgam,mu=mu) }}}
}
# CDF of the univariate marginal distribution
# input:
# y the vector of (non-negative integer) quantiles.
# mu the mean parameter of the univariate distribution.
# gam the parameter gamma of the negative binomial distribution.
# invgam the inverse of parameter gamma of negative binomial distribution.
# margmodel indicates the marginal model. Choices are ?poisson? for Poisson, ?bernoulli? for
# Bernoulli, and ?nb1? , ?nb2? for the NB1 and NB2 parametrization of negative
# binomial in Cameron and Trivedi (1998).
# output:
# the cdf of the univariate marginal distribution
pmargmodel<-function(y,mu,gam,invgam,margmodel)
{ if(margmodel=="poisson")
{ ppois(y,mu)
}
else
{ if(margmodel=="bernoulli")
{ pbinom(y,size=1,prob=mu) }
else
{ if(margmodel=="nb1")
{ pnbinom(y,prob=1/(1+gam),size=mu*invgam) }
else
{ # margmodel=="nb2"
pnbinom(y,size=invgam,mu=mu) }}}
}
# quantile of the univariate marginal distribution
# input:
# y the vector of probabilities
# mu the mean parameter of the univariate distribution.
# gam the parameter gamma of the negative binomial distribution.
# invgam the inverse of parameter gamma of negative binomial distribution.
# margmodel indicates the marginal model. Choices are ?poisson? for Poisson, ?bernoulli? for
# Bernoulli, and ?nb1? , ?nb2? for the NB1 and NB2 parametrization of negative
# binomial in Cameron and Trivedi (1998).
# output:
# the quantile of the univariate marginal distribution
qmargmodel<-function(y,mu,gam,invgam,margmodel)
{ if(margmodel=="poisson")
{ qpois(y,mu)
}
else
{ if(margmodel=="bernoulli")
{ qbinom(y,size=1,prob=mu) }
else
{ if(margmodel=="nb1")
{ qnbinom(y,prob=1/(1+gam),size=mu*invgam) }
else
{ # margmodel=="nb2"
qnbinom(y,size=invgam,mu=mu) }}}
}
# negative univariate logikelihood assuming independence within clusters
# input:
# param the vector of regression and not regression parameters
# xdat the matrix of covariates (use the constant 1 for the first covariate)
# ydat the vector with the response
# margmodel indicates the marginal model. Choices are ?poisson? for Poisson,
# ?bernoulli? for Bernoulli, and ?nb1? , ?nb2? for the NB1 and NB2
# parametrization of negative binomial in Cameron and Trivedi (1998)
# link is the link function. Choices are ?log? for the log link function,
# ?logit? for the logit link function, and ?probit? for the probit link function.
# output:
# negative univariate logikelihood assuming independence within clusters
marglik<-function(param,xdat,ydat,margmodel,link)
{ p<-dim(xdat)[2]
b<-param[1:p]
if(margmodel=="nb1" | margmodel=="nb2")
{ gam<-param[p+1]
invgam<-1/gam
}
#else gam<-invgam<-0
mu<-linked.mu(as.matrix(xdat),b,link)
-sum(log(dmargmodel(ydat,mu,gam,invgam,margmodel)))
}
# Independent estimating equations for binary, Poisson or
# negative binomial regression.
# xdat the matrix of covariates (use the constant 1 for the first covariate)
# ydat the vector with the response
# margmodel indicates the marginal model. Choices are ?poisson? for Poisson,
# ?bernoulli? for Bernoulli, and ?nb1? , ?nb2? for the NB1 and NB2
# parametrization of negative binomial in Cameron and Trivedi (1998)
# link is the link function. Choices are ?log? for the log link function,
# ?logit? for the logit link function, and ?probit? for the probit link function.
# output: A list containing the following components:
# coef the vector with the ML estimated regression parameters
# gam the ML estimate of gamma parameter
iee<-function(xdat,ydat,margmodel,link="log")
{ #if(margmodel=="bernoulli") family=binomial else family=poisson
if(margmodel=="nb1" | margmodel=="nb2" | margmodel=="poisson") link="log"
if(margmodel=="bernoulli" & link!="probit") link="logit"
if(margmodel=="poisson")
{ uni<-glm(ydat ~ xdat[,-1],family =poisson(link="log"))
res<-as.vector(uni$coef)
list(reg=res)
} else {
if(margmodel=="bernoulli")
{ if(link=="probit")
{ uni<-glm(ydat ~ xdat[,-1],family =binomial(link="probit"))
} else {
uni<-glm(ydat ~ xdat[,-1],family =binomial(link="logit")) }
res<-as.vector(uni$coef)
list(reg=res)
} else
{ p<-dim(xdat)[2]
uni<-nlm(marglik,c(rep(0,p),1),margmodel=margmodel,
link=link,xdat=xdat,ydat=ydat,iterlim=1000)
res1<-uni$e[1:p]
res2<-uni$e[p+1]
list(reg=res1,gam=res2) }}
}
# Bivariate composite likelihood for multivariate normal copula with Poisson,
# binary, or negative binomial regression.
# input:
# r the vector of normal copula parameters
# b the regression coefficients
# gam the gamma parameter
# xdat the matrix of covariates (use the constant 1 for the first covariate)
# ydat the vector with the response
# margmodel indicates the marginal model. Choices are ?poisson? for Poisson,
# ?bernoulli? for Bernoulli, and ?nb1? , ?nb2? for the NB1 and NB2
# parametrization of negative binomial in Cameron and Trivedi (1998)
# link is the link function. Choices are ?log? for the log link function,
# ?logit? for the logit link function, and ?probit? for the probit link function.
# output:
# negative bivariate composite likelihood for multivariate normal copula
# with Poisson, binary, or negative binomial regression.
bcl<-function(rh,p,q,b,gam,xdat,ydat,margmodel,link="log")
{ phi=rh[1:p]
if(q>0) { th=rh[(p+1):(p+q)] } else {th=NULL}
c1=any(Mod(polyroot(c(1, -phi))) < 1.01)
c2=any(Mod(polyroot(c(1, th))) < 1.01)
if(c1 || c2) return(1e10)
s<-0
if(margmodel=="nb1" | margmodel=="nb2" | margmodel=="poisson") link="log"
if(margmodel=="bernoulli" & link!="probit") link="logit"
if(margmodel=="nb1" | margmodel=="nb2") invgam<-1/gam else invgam<-NULL
d<-length(ydat)
rmat<-toeplitz(ARMAacf(ar=phi, ma=th, lag.max=d-1))
mu<-linked.mu(xdat,b,link)
vlow<-pmargmodel(ydat-1,mu,gam,invgam,margmodel)
tem<-dmargmodel(ydat,mu,gam,invgam,margmodel)
vupp<-vlow+tem
zlow=qnorm(vlow)
zupp=qnorm(vupp)
bivpairs=combn(1:d, 2)
d2=ncol(bivpairs)
zmat=matrix(NA,d2,5)
for(j in 1:d2)
{ k1=bivpairs[,j][1]
k2=bivpairs[,j][2]
zmat[j,]=c(zlow[c(k1,k2)],zupp[c(k1,k2)],rmat[k1,k2])
}
zmat[zmat==-Inf]=-10
#prob=pbivnorm(zmat[,3],zmat[,4],zmat[,5])+
# pbivnorm(zmat[,1],zmat[,2],zmat[,5])-
# pbivnorm(zmat[,3],zmat[,2],zmat[,5])-
# pbivnorm(zmat[,1],zmat[,4],zmat[,5])
prob=pbvt(zmat[,3],zmat[,4],cbind(zmat[,5],100))+
pbvt(zmat[,1],zmat[,2],cbind(zmat[,5],100))-
pbvt(zmat[,3],zmat[,2],cbind(zmat[,5],100))-
pbvt(zmat[,1],zmat[,4],cbind(zmat[,5],100))
-sum(log(prob))
}
# optimization routine for composite likelihood for MVN copula
# b the regression coefficients
# gam the gamma parameter
# xdat the matrix of covariates (use the constant 1 for the first covariate)
# ydat the vector with the response
# margmodel indicates the marginal model. Choices are ?poisson? for Poisson,
# ?bernoulli? for Bernoulli, and ?nb1? , ?nb2? for the NB1 and NB2
# parametrization of negative binomial in Cameron and Trivedi (1998)
# link is the link function. Choices are ?log? for the log link function,
# ?logit? for the logit link function, and ?probit? for the probit link function.
# output: A list containing the following components:
# minimum the value of the estimated minimum of CL1 for MVN copula
# estimate the CL1 estimates
# gradient the gradient at the estimated minimum of CL1
# code an integer indicating why the optimization process terminated, see nlm.
cl<-function(p,q,b,gam,xdat,ydat,margmodel,link="log")
{ if(margmodel=="nb1" | margmodel=="nb2" | margmodel=="poisson") link="log"
if(margmodel=="bernoulli" & link!="probit") link="logit"
mod <- glm(ydat~xdat[,-1], family="poisson")
temp <- arima(resid(mod) , order = c(p,0,q))
start<- temp$coef[1:(p+q)]
nlm(bcl,start,p,q,b,gam,xdat,ydat,margmodel,link,print.level = 2)
}
# derivative of the marginal loglikelihood with respect to nu
# input:
# mu the mean parameter
# gam the gamma parameter
# invgam the inverse of gamma parameter
# ub the truncation value
# margmodel indicates the marginal model. Choices are ?poisson? for Poisson,
# ?bernoulli? for Bernoulli, and ?nb1? , ?nb2? for the NB1 and NB2
# parametrization of negative binomial in Cameron and Trivedi (1998)
# link is the link function. Choices are ?log? for the log link function,
# ?logit? for the logit link function, and ?probit? for the probit link function.
# output:
# the vector with the derivatives of the margmodel loglikelihood with respect to nu
derlik.nu<-function(mu,gam,invgam,ub,margmodel,link)
{ if(link=="probit")
{ if(mu==1 & is.finite(mu)){mu<-0.9999}
nu<-qnorm(mu)
(0:1-mu)/mu/(1-mu)*dnorm(nu)
}
else
{ if(margmodel=="nb1")
{ j<-0:(ub-1)
s<-c(0,cumsum(1/(mu+gam*j)))
(s-invgam*log(1+gam))*mu
}
else
{ if(margmodel=="nb2")
{ pr<-1/(mu*gam+1)
(0:ub-mu)*pr
}
else { 0:ub-mu }}}
}
# derivative of the marginal loglikelihood with respect to nu
# input:
# mu the mean parameter
# gam the gamma parameter
# invgam the inverse of gamma parameter
# y the value of a non-negative integer quantile
# margmodel indicates the marginal model. Choices are ?poisson? for Poisson,
# ?bernoulli? for Bernoulli, and ?nb1? , ?nb2? for the NB1 and NB2
# parametrization of negative binomial in Cameron and Trivedi (1998)
# link is the link function. Choices are ?log? for the log link function,
# ?logit? for the logit link function, and ?probit? for the probit link function.
# output:
# the derivative of the margmodel loglikelihood with respect to nu
iderlik.nu<-function(mu,gam,invgam,y,margmodel,link)
{ if(link=="probit")
{ if(mu==1 & is.finite(mu)){mu<-0.9999}
nu<-qnorm(mu)
(y-mu)/mu/(1-mu)*dnorm(nu)
}
else
{ if(margmodel=="nb1")
{ s<-0
if(y>0)
{ j<-0:(y-1)
s<-sum(1/(mu+gam*j))
}
(s-invgam*log(1+gam))*mu
}
else
{ if(margmodel=="nb2")
{ pr<-1/(mu*gam+1)
(y-mu)*pr
}
else { y-mu }}}
}
# derivative of the NB loglikelihood with respect to gamma
# input:
# gam the gamma parameter
# invgam the inverse of gamma parameter
# mu the mean parameter
# ub the truncation value
# margmodel indicates the marginal model. Choices are ?nb1? , ?nb2? for
# the NB1 and NB2 parametrization of negative binomial in
# Cameron and Trivedi (1998)
# output:
# the vector with the derivatives of the NB loglikelihood with respect to gamma
derlik.gam<-function(mu,gam,invgam,ub,margmodel)
{ j<-0:(ub-1)
if(margmodel=="nb1")
{ s<-c(0,cumsum(j/(mu+gam*j)))
s+invgam*invgam*mu*log(1+gam)-(0:ub+invgam*mu)/(1+gam)
}
else
{ #if(margmodel=="nb2")
pr<-1/(mu*gam+1)
s<-c(0,cumsum(j/(1+j*gam)))
s-log(pr)/(gam*gam)-(0:ub+invgam)*mu*pr
}
}
# derivative of the NB loglikelihood with respect to gamma
# input:
# gam the gamma parameter
# invgam the inverse of gamma parameter
# mu the mean parameter
# y the value of a non-negative integer quantile
# margmodel indicates the marginal model. Choices are ?nb1? , ?nb2? for
# the NB1 and NB2 parametrization of negative binomial in
# Cameron and Trivedi (1998)
# output:
# the derivative of the NB loglikelihood with respect to gamma
iderlik.gam<-function(mu,gam,invgam,y,margmodel)
{ s<-0
if(margmodel=="nb1")
{ if(y>0)
{ j<-0:(y-1)
s<-sum(j/(mu+gam*j))
}
s+invgam*invgam*mu*log(1+gam)-(y+invgam*mu)/(1+gam)
}
else
{ #if(margmodel=="nb2")
if(y>0)
{ j<-0:(y-1)
s<-sum(j/(1+gam*j))
}
pr<-1/(mu*gam+1)
s-log(pr)/(gam*gam)-(y+invgam)*mu*pr
}
}
# minus expectation of the second derivative of the marginal loglikelihood
# with resect to nu
# input:
# mu the mean parameter
# gam the gamma parameter
# invgam the inverse of gamma parameter
# u the univariate cdfs
# ub the truncation value
# margmodel indicates the marginal model. Choices are ?nb1? , ?nb2? for
# the NB1 and NB2 parametrization of negative binomial in
# Cameron and Trivedi (1998)
# output:
# the vector with the minus expectations of the margmodel loglikelihood
# with respect to nu
fisher.nu<-function(mu,gam,invgam,u,ub,margmodel,link)
{ if(link=="log" & margmodel=="poisson")
{ mu }
else
{ if(link=="logit")
{ mu*(1-mu) }
else
{ if(margmodel=="nb1")
{ j<-0:ub
s1<-sum(1/(mu+j*gam)/(mu+j*gam)*(1-u))
s2<-sum(1/(mu+j*gam)*(1-u))
(mu*s1-s2+invgam*log(1+gam))*mu
}
else
{ if(margmodel=="nb2")
{ pr<-1/(mu*gam+1)
mu*pr
}
else
{ # link=="probit"
if(mu==1 & is.finite(mu)){mu<-0.9999}
nu<-qnorm(mu)
1/mu/(1-mu)*dnorm(nu)^2}
}}}
}
# minus expectation of the second derivative of the marginal NB loglikelihood
# with resect to gamma
# input:
# mu the mean parameter
# gam the gamma parameter
# invgam the inverse of gamma parameter
# u the univariate cdfs
# ub the truncation value
# margmodel indicates the marginal model. Choices are ?nb1? , ?nb2? for
# the NB1 and NB2 parametrization of negative binomial in
# Cameron and Trivedi (1998)
# output:
# the vector with the minus expectations of the margmodel loglikelihood
# with respect to gamma
fisher.gam<-function(mu,gam,invgam,u,ub,margmodel)
{ j<-0:ub
if(margmodel=="nb1")
{ pr<-1/(gam+1)
s<-sum((j/(mu+j*gam))^2*(1-u))
s+2*invgam*invgam*invgam*log(1+gam)*mu-2*invgam*invgam*mu*pr-mu/gam*pr
}
else
{ #if(margmodel=="nb2")
s<-sum((invgam+j)^(-2)*(1-u))
invgam^4*(s-gam*mu/(mu+invgam))
}
}
# minus expectation of the second derivative of the marginal loglikelihood
# with resect to nu and gamma
# input:
# mu the mean parameter
# gam the gamma parameter
# invgam the inverse of gamma parameter
# u the univariate cdfs
# ub the truncation value
# margmodel indicates the marginal model. Choices are ?nb1? , ?nb2? for
# the NB1 and NB2 parametrization of negative binomial in
# Cameron and Trivedi (1998)
# output:
# the vector with the minus expectations of the NB loglikelihood
# with respect to nu and gamma
fisher.nu.gam<-function(mu,gam,invgam,u,ub,margmodel)
{ if(margmodel=="nb1")
{ pr<-1/(gam+1)
j<-0:ub
s<-sum(j/(mu+j*gam)/(mu+j*gam)*(1-u))
(s-invgam*invgam*log(1+gam)+invgam*pr)*mu
}
else {0}
}
# Calculating the truncation value for the univariate distribution
# input:
# mu the mean parameter of the univariate distribution.
# gam the parameter gamma of the negative binomial distribution.
# margmodel indicates the marginal model. Choices are ?poisson? for Poisson, ?bernoulli? for
# Bernoulli, and ?nb1? , ?nb2? for the NB1 and NB2 parametrization of negative
# binomial in Cameron and Trivedi (1998).
# output:
# the truncation value--upper bound
truncation<-function(mu,gam,margmodel)
{ if(margmodel=="poisson")
{ ub<-round(max(10,mu+7*sqrt(mu),na.rm=T))
}
else
{ if(margmodel=="bernoulli") ub<-1
else
{ if(margmodel=="nb1")
{ pr<-1/(gam+1)
v<-mu/pr
ub<-round(max(10,mu+10*sqrt(v),na.rm=T))
}
else
{ pr<-1/(mu*gam+1)
v<-mu/pr
ub<-round(max(10,mu+7*sqrt(v),na.rm=T))
}}}
ub
}
# covariance matrix of the scores Omega_i
# input:
# scnu the matrix of the score functions with respect to nu
# scgam the matrix of the score functions with respect to gam
# index the bivariate pair
# pmf the matrix of rectangle probabilities
# margmodel indicates the marginal model. Choices are ?poisson? for Poisson, ?bernoulli? for
# Bernoulli, and ?nb1? , ?nb2? for the NB1 and NB2 parametrization of negative
# binomial in Cameron and Trivedi (1998).
scoreCov<-function(scnu,scgam,pmf,index,margmodel)
{ j1<-index[1]
j2<-index[2]
cov11<-t(scnu[,j1])%*%pmf%*%scnu[,j2]
if(margmodel=="bernoulli" | margmodel=="poisson")
{ cov11 }
else
{ cov12<-t(scnu[,j1])%*%pmf%*%scgam[,j2]
cov21<-t(scgam[,j1])%*%pmf%*%scnu[,j2]
cov22<-t(scgam[,j1])%*%pmf%*%scgam[,j2]
matrix(c(cov11,cov12,cov21,cov22),2,2)
}
}
# weight matrix fixed at values from the CL1 estimator
# input:
# b the regression coefficients
# gam the gamma parameter
# xdat the matrix of covariates (use the constant 1 for the first covariate)
# ydat the vector with the response
# rh the vector with CL1 estimates
# margmodel indicates the marginal model. Choices are ?poisson? for Poisson,
# ?bernoulli? for Bernoulli, and ?nb1? , ?nb2? for the NB1 and NB2
# parametrization of negative binomial in Cameron and Trivedi (1998)
# link is the link function. Choices are ?log? for the log link function,
# ?logit? for the logit link function, and ?probit? for the probit link function.
# output: A list containing the following components:
# omega the array with the Omega matrices
# delta the array with the Delta matrices
# X the array with the X matrices
weightMat<-function(b,gam,rh,p,q,xdat,margmodel,link="log")
{ phi=rh[1:p]
if(q>0){ th=rh[(p+1):(p+q)] } else { th=NULL }
if(margmodel=="nb1" | margmodel=="nb2" | margmodel=="poisson") link="log"
if(margmodel=="bernoulli" & link!="probit") link="logit"
if(margmodel=="nb1" | margmodel=="nb2") invgam<-1/gam else invgam<-NULL
d<-nrow(xdat)
bivpairs=t(combn(1:d, 2))
d2=nrow(bivpairs)
rmat=toeplitz(ARMAacf(ar=phi, ma=th, lag.max=d-1))
qq<-length(gam)
pp<-ncol(xdat)
omega<-matrix(NA,d*(1+qq),d*(1+qq))
X<-matrix(NA,pp+qq,d*(1+qq))
delta<-matrix(NA,d*(1+qq),d*(1+qq))
dom<-d*(1+qq)
pos<-seq(1,dom-1,by=2) #not used for binary
mu<-linked.mu(xdat,b,link)
ub<-truncation(mu,gam,margmodel)
du<-scnu<-scgam<-matrix(NA,1+ub,d)
for(j in 1:d)
{ du[,j]<-dmargmodel(0:ub,mu[j],gam,invgam,margmodel)
scnu[,j]<-derlik.nu(mu[j],gam,invgam,ub,margmodel,link)
if(margmodel=="nb1" | margmodel=="nb2")
{ scgam[,j]<-derlik.gam(mu[j],gam,invgam,ub,margmodel) }
}
u<-apply(du,2,cumsum)
z<-qnorm(u)
z[is.nan(z)]<-10
z[z==Inf]=10
z[z>4&margmodel=="bernoulli"]<-7
z<-rbind(-10,z)
x<-NULL
if(margmodel=="bernoulli" | margmodel=="poisson")
{ diagonal<-rep(NA,d)
} else {
diagonal<-array(NA,c(2,2,d)) }
for(j in 1:d)
{ f1<-fisher.nu(mu[j],gam,invgam,u[,j],ub,margmodel,link)
if(margmodel=="nb1" | margmodel=="nb2")
{ temp<-cbind(xdat[j,],0)
x<-cbind(x,rbind(temp,c(0,1)))
f2<-fisher.gam(mu[j],gam,invgam,u[,j],ub,margmodel)
f3<-fisher.nu.gam(mu[j],gam,invgam,u[,j],ub,margmodel)
diagonal[,,j]<-matrix(c(f1,f3,f3,f2),2,2)
}
else
{ temp<-xdat[j,]
x<-cbind(x,temp)
diagonal[j]<-f1
}
}
if(margmodel=="bernoulli" | margmodel=="poisson")
{ delta<-diag(diagonal)
off<-rep(NA,d2)
} else {
delta<-matrix(0,dom,dom)
minus<-0
for(j in pos)
{ delta[j:(j+1),j:(j+1)]<-diagonal[,,j-minus]
minus<-minus+1
}
off<-array(NA,c(2,2,d2))
}
for(k in 1:d2)
{ print(k)
k1<-bivpairs[k,][1]
k2<-bivpairs[k,][2]
x1=z[,k1]; x2=z[,k2]
xmat=meshgrid(x1,x2)
cdf=t(pbvt(xmat$x,xmat$y,c(rmat[k1,k2],1000)))
cdf1=apply(cdf,2,diff)
pmf=apply(t(cdf1),2,diff)
pmf=t(pmf)
if(margmodel=="bernoulli" | margmodel=="poisson")
{off[k]<-scoreCov(scnu,scgam,pmf,bivpairs[k,],margmodel)}
else {off[,,k]<-scoreCov(scnu,scgam,pmf,bivpairs[k,],margmodel)}
}
omega<-delta
if(margmodel=="bernoulli" | margmodel=="poisson")
{ for(j in 1:d2)
{ omega[bivpairs[j,1],bivpairs[j,2]]<-off[j]
omega[bivpairs[j,2],bivpairs[j,1]]<-off[j]
}}
else
{ ch1<-0
ch2<-0
for(j in 1:(d-1))
{ for(r in pos[-(1:j)])
{ omega[(j+ch1):(j+1+ch1),r:(r+1)]<-off[,,(j+ch2-ch1)]
omega[r:(r+1),(j+ch1):(j+1+ch1)]<-t(off[,,(j+ch2-ch1)])
ch2<-ch2+1
}
ch1<-ch1+1
}}
list(omega=omega,X=x,delta=delta)
}
# the wcl estimating equations
# input:
# param the vector of regression and not regression parameters
# xdat the matrix of covariates (use the constant 1 for the first covariate)
# ydat the vector with the response
# margmodel indicates the marginal model. Choices are ?poisson? for Poisson,
# ?bernoulli? for Bernoulli, and ?nb1? , ?nb2? for the NB1 and NB2
# parametrization of negative binomial in Cameron and Trivedi (1998)
# link is the link function. Choices are ?log? for the log link function,
# ?logit? for the logit link function, and ?probit? for the probit link function.
# WtScMat is a list containing the following components:
# omega the array with the Omega matrices
# delta the array with the Delta matrices
# X the array with the X matrices
# output
# the weigted scores equations
bwcl<-function(param,WtScMat,xdat,ydat,margmodel,link="log")
{ if(margmodel=="nb1" | margmodel=="nb2" | margmodel=="poisson") link="log"
if(margmodel=="bernoulli" & link!="probit") link="logit"
d<-length(ydat)
p<-ncol(xdat)
b<-param[1:p]
if(p<length(param)) {gam<-param[p+1]; invgam<-1/gam }
mu<-linked.mu(xdat,b,link)
ub<-truncation(mu,gam,margmodel)
scnu<-scgam<-matrix(NA,ub+1,d)
for(j in 1:d)
{ scnu[,j]<-derlik.nu(mu[j],gam,invgam,ub,margmodel,link)
if(margmodel=="nb1" | margmodel=="nb2")
{ scgam[,j]<-derlik.gam(mu[j],gam,invgam,ub,margmodel) }
}
sc<-NULL
for(j in 1:d)
{ if(ydat[j]>ub)
{ scnui<-iderlik.nu(mu[j],gam,invgam,ydat[j],margmodel,link)
if(margmodel=="nb1" | margmodel=="nb2")
{ scgami<-iderlik.gam(mu[j],gam,invgam,ydat[j],margmodel)
} else {
scgami<-NULL}
}
else {
scnui<-scnu[ydat[j]+1,j]
if(margmodel=="nb1" | margmodel=="nb2")
{ scgami<-scgam[ydat[j]+1,j]
} else {
scgami<-NULL}}
sc<-c(sc,c(scnui,scgami))
}
X<-WtScMat$X
delta<-WtScMat$delta
omega<-WtScMat$omega
g<-X%*%t(delta)%*%solve(omega,sc)
g
}
# solving the wcl estimating equations
# input:
# start the starting values (IEE estimates) for the vector of
# regression and not regression parameters
# xdat the matrix of covariates (use the constant 1 for the first covariate)
# ydat the vector with the response
# margmodel indicates the marginal model. Choices are ?poisson? for Poisson,
# ?bernoulli? for Bernoulli, and ?nb1? , ?nb2? for the NB1 and NB2
# parametrization of negative binomial in Cameron and Trivedi (1998)
# link is the link function. Choices are ?log? for the log link function,
# ?logit? for the logit link function, and ?probit? for the probit link function.
# WtScMat is a list containing the following components:
# omega the array with the Omega matrices
# delta the array with the Delta matrices
# X the array with the X matrices
# output:
# the wcl estimates
wcl<-function(start,WtScMat,xdat,ydat,margmodel,link="log")
{ multiroot(f=bwcl,start,atol=1e-4,rtol=1e-4,ctol=1e-4,
WtScMat=WtScMat,xdat=xdat,ydat=ydat,margmodel=margmodel,link=link)
}
godambe=function(b,gam,rh,p,q,xdat,margmodel,link="log")
{ WtScMat<-weightMat(b,gam,rh,p,q,xdat,margmodel,link)
omega= WtScMat$omega
delta= WtScMat$delta
X= WtScMat$X
psi<-delta%*%t(X)
hess<-t(psi)%*%solve(omega)%*%psi
solve(hess)
}
|
/scratch/gouwar.j/cran-all/cranData/weightedCL/R/wcl.R
|
#' Weighted Generalised Covariance Measure (WGCM) With Fixed Weight Functions
#' Conditional Independence Test
#'
#' The Weighted Generalised Covariance Measure (WGCM) with Fixed Weight Functions
#' is a test for conditional independence. It is a generalisation of the
#' Generalised Covariance Measure implemented in the R package
#' GeneralisedCovarianceMeasure.
#'
#' @param X A (n x d_X) numeric matrix with n observations of d_X variables.
#' @param Y A (n x d_Y) numeric matrix with n observations of d_Y variables.
#' @param Z A (n x d_Z) numeric matrix with n observations of d_Z variables.
#' @param regr.meth One of "gam" and "xgboost" indicating the regression method
#' used to estimate the conditional expectations E[X|Z] and E[Y|Z].
#' @param regr.pars Optional additional regression parameters according to
#' GeneralisedCovarianceMeasure::comp.resids().
#' @param weight.num Number k_0 of weight functions per dimension of Z to be
#' used additionally to the constant weight function w(z) = 1. The total number
#' of weight functions will be 1 + k_0 * d_Z. In case of max(d_X, d_Y) > 1, the
#' same 1 + k_0 * d_Z weight functions are used for every combination of the
#' components of X and Y.
#' @param weight.meth String indicating the method to choose the weight functions.
#' Currently, only "sign" is implemented.
#' @param nsim Number of samples used to calculate the p-value using simulation.
#'
#' @return A p-value for the null hypothesis of conditional independence of X
#' and Y given Z.
#'
#' @references Please cite the following papers.
#' Cyrill Scheidegger, Julia Hoerrmann, Peter Buehlmann:
#' "The Weighted Generalised Covariance Measure"
#' \url{https://arxiv.org/abs/2111.04361}
#'
#' Rajen D. Shah, Jonas Peters:
#' "The Hardness of Conditional Independence Testing and the Generalised
#' Covariance Measure"
#' \url{https://arxiv.org/abs/1804.07203}
#'
#' @examples
#' set.seed(1)
#' n <- 200
#' Z <- rnorm(n)
#' X <- Z + 0.3*rnorm(n)
#' Y1 <- Z + 0.3*rnorm(n)
#' Y2 <- Z + 0.3*rnorm(n) + 0.3*X
#' Y3 <- Z + 0.3*rnorm(n) + 0.15*X^2
#' wgcm.fix(X, Y1, Z, regr.meth = "gam", weight.num = 7, weight.meth = "sign")
#' wgcm.fix(X, Y2, Z, regr.meth = "gam", weight.num = 7, weight.meth = "sign")
#' wgcm.fix(X, Y3, Z, regr.meth = "gam", weight.num = 7, weight.meth = "sign")
#'
#' @export
#'
#' @importFrom stats pnorm quantile rnorm
wgcm.fix <- function(X, Y, Z, regr.meth, regr.pars = list(),
weight.num, weight.meth = "sign", nsim = 499) {
if ((NCOL(X) != 1) || (NCOL(Y) != 1)) {
p.value <- wgcm.fix.mult(X, Y, Z, regr.meth, regr.pars,
weight.num, weight.meth, nsim)
} else{
n <- NROW(X)
if (is.null(Z)) {
warning("No Z specified. No weight functions can be calculated. Function simply tests for vanishing correlation between X and Y.")
weight.num <- 0
eps <- X - mean(X)
xi <- Y - mean(Y)
} else {
Z <- as.matrix(Z)
eps <- as.numeric(GeneralisedCovarianceMeasure::comp.resids(X, Z,
regr.pars = regr.pars, regr.method = regr.meth))
xi <- as.numeric(GeneralisedCovarianceMeasure::comp.resids(Y, Z,
regr.pars = regr.pars, regr.method = regr.meth))
}
if (weight.num == 0) {
R <- eps * xi
T.stat <- sqrt(n) * mean(R) / sqrt(mean(R^2) - mean(R)^2)
p.value <- 2 * pnorm(-abs(T.stat))
} else {
W <- weight_matrix(Z, weight.num, weight.meth)
R <- eps * xi * W
R <- t(R)
R.norm <- R / sqrt(rowMeans(R^2) - rowMeans(R)^2)
T.stat <- sqrt(n) * max(abs(rowMeans(R.norm)))
T.stat.sim <- apply(abs(R.norm %*% matrix(rnorm(n * nsim), n, nsim)),
2, max) / sqrt(n)
p.value <- (sum(T.stat.sim >= T.stat) + 1) / (nsim + 1)
}
}
return(p.value)
}
#' Weighted Generalised Covariance Measure (WGCM) With Estimated Weight Function
#' Conditional Independence Test
#'
#' The Weighted Generalised Covariance Measure (WGCM) with Estimated Weight Function
#' is a test for conditional independence. It is a generalisation of the
#' Generalised Covariance Measure implemented in the R package
#' GeneralisedCovarianceMeasure.
#'
#' @param X A (n x d_X) numeric matrix with n observations of d_X variables.
#' @param Y A (n x d_Y) numeric matrix with n observations of d_Y variables.
#' @param Z A (n x d_Z) numeric matrix with n observations of d_Z variables.
#' @param beta A real number between 0 and 1 indicating the fraction of the sample
#' used to estimate the weight function.
#' @param regr.meth One of "gam" and "xgboost" indicating the regression method
#' used to estimate the conditional expectations E[X|Z] and E[Y|Z] and the
#' weight function sign(E[(X-E[X|Z])(Y-E[Y|Z])|Z]).
#' @param regr.pars Optional additional regression parameters according to
#' GeneralisedCovarianceMeasure::comp.resids()
#' @param nsim Number of samples used to calculate the p-value using simulation.
#' Only used if max(d_X, d_Y) > 1.
#'
#' @return A p-value for the null hypothesis of conditional independence of X
#' and Y given Z.
#'
#' @references Please cite the following papers.
#' Cyrill Scheidegger, Julia Hoerrmann, Peter Buehlmann:
#' "The Weighted Generalised Covariance Measure"
#' \url{https://arxiv.org/abs/2111.04361}
#'
#' Rajen D. Shah, Jonas Peters:
#' "The Hardness of Conditional Independence Testing and the Generalised
#' Covariance Measure"
#' \url{https://arxiv.org/abs/1804.07203}
#'
#' @examples
#' set.seed(1)
#' n <- 200
#' Z <- rnorm(n)
#' X <- Z + 0.3*rnorm(n)
#' Y1 <- Z + 0.3*rnorm(n)
#' Y2 <- Z + 0.3*rnorm(n) + 0.3*X
#' Y3 <- Z + 0.3*rnorm(n) + 0.15*X^2
#' wgcm.est(X, Y1, Z, beta = 0.3, regr.meth = "gam")
#' wgcm.est(X, Y2, Z, beta = 0.3, regr.meth = "gam")
#' wgcm.est(X, Y3, Z, beta = 0.3, regr.meth = "gam")
#'
#' @export
#'
#' @import xgboost
#' @import mgcv
#' @importFrom methods show
#' @importFrom stats formula
#' @importFrom stats predict
wgcm.est <- function(X, Y, Z, beta = 0.3, regr.meth, regr.pars = list(), nsim=499) {
if ((NCOL(X) != 1) || (NCOL(Y) != 1)) {
p.value <- wgcm.est.mult(X, Y, Z, beta, regr.meth, regr.pars, nsim)
} else {
n <- NROW(X)
if (is.null(Z)){
warning("No Z specified. No weight function can be estimated. Function simply tests for vanishing correlation between X and Y.")
eps <- X - mean(X)
xi <- Y - mean(Y)
R <- eps * xi
T.stat <- sqrt(n) * mean(R) / sqrt(mean(R^2) - mean(R)^2)
p.value <- 2 * pnorm(-abs(T.stat))
} else {
Z <- as.matrix(Z)
ind.train <- sample(1:n, ceiling(beta * n))
Ztrain <- Z[ind.train, ]
Ztest <- Z[-ind.train, ]
W <- predict_weight(X[ind.train], Y[ind.train], Ztrain, Ztest,
regr.meth, regr.pars)
p.value <- wgcm.1d.1sided(X[-ind.train],Y[-ind.train], Ztest, W,
regr.meth, regr.pars)
}
}
return(p.value)
}
|
/scratch/gouwar.j/cran-all/cranData/weightedGCM/R/WGCM.R
|
#The code in trainFunctions.R is copied (with two small modifications in the
#second function) from the R package 'GeneralisedCovarianceMeasure' by
#Jonas Peters and Rajen D. Shah released on CRAN
#<https://cran.r-project.org/package=GeneralisedCovarianceMeasure>
#under the GPL-2 license. Jonas Peters and Rajen D. Shah are the authors and
#copyright holders of the following two functions.
train.gam <- function (X, y, pars = list()) {
if (!exists("numBasisFcts", pars)) {
pars$numBasisFcts <- 100
}
if (!exists("staysilent", pars)) {
pars$staysilent <- TRUE
}
if (!exists("CV.folds", pars)) {
pars$CV.folds <- NA
}
if (is.null(X) || dim(as.matrix(X))[2] == 0) {
result <- list()
result$Yfit <- as.matrix(rep(mean(y), length(y)))
result$residuals <- as.matrix(y - result$Yfit)
result$model <- NA
result$df <- NA
result$edf <- NA
result$edf1 <- NA
result$p.values <- NA
}
else {
p <- dim(as.matrix(X))
if (!is.na(pars$CV.folds)) {
num.folds <- pars$CV.folds
rmse <- Inf
whichfold <- sample(rep(1:num.folds, length.out = p[1]))
for (j in 1:length(pars$numBasisFcts)) {
mod <- train.gam(as.matrix(X)[whichfold == j,
], y[whichfold == j], pars = list(numBasisFcts = pars$numBasisFcts,
CV.folds = NA))
datframe <- data.frame(as.matrix(X)[whichfold !=
j, ])
names(datframe) <- paste("var", 2:p[2],
sep = "")
rmse.tmp <- sum((predict(mod$model, datframe) -
y[whichfold != j])^2)
if (rmse.tmp < rmse) {
rmse <- rmse.tmp
final.numBasisFcts <- pars$numBasisFcts[j]
}
}
}
else {
final.numBasisFcts <- pars$numBasisFcts
}
if (p[1]/p[2] < 3 * final.numBasisFcts) {
final.numBasisFcts <- ceiling(p[1]/(3 * p[2]))
if (pars$staysilent == FALSE) {
cat("changed number of basis functions to ",
final.numBasisFcts, " in order to have enough samples per basis function\n")
}
}
dat <- data.frame(as.matrix(y), as.matrix(X))
coln <- rep("null", p[2] + 1)
for (i in 1:(p[2] + 1)) {
coln[i] <- paste("var", i, sep = "")
}
colnames(dat) <- coln
labs <- "var1 ~ "
if (p[2] > 1) {
for (i in 2:p[2]) {
labs <- paste(labs, "s(var", i, ",k = ",
final.numBasisFcts, ") + ", sep = "")
}
}
labs <- paste(labs, "s(var", p[2] + 1, ",k = ",
final.numBasisFcts, ")", sep = "")
mod_gam <- FALSE
try(mod_gam <- gam(formula = formula(labs), data = dat),
silent = TRUE)
if (typeof(mod_gam) == "logical") {
cat("There was some error with gam. The smoothing parameter is set to zero.\n")
labs <- "var1 ~ "
if (p[2] > 1) {
for (i in 2:p[2]) {
labs <- paste(labs, "s(var", i, ",k = ",
final.numBasisFcts, ",sp=0) + ", sep = "")
}
}
labs <- paste(labs, "s(var", p[2] + 1, ",k = ",
final.numBasisFcts, ",sp=0)", sep = "")
mod_gam <- gam(formula = formula(labs), data = dat)
}
result <- list()
result$Yfit <- as.matrix(mod_gam$fitted.values)
result$residuals <- as.matrix(mod_gam$residuals)
result$model <- mod_gam
result$df <- mod_gam$df.residual
result$edf <- mod_gam$edf
result$edf1 <- mod_gam$edf1
result$p.values <- summary.gam(mod_gam)$s.pv
}
return(result)
}
train.xgboost1 <- function (X, y, pars = list()) {
n <- length(y)
if (!exists("nrounds", pars)) {
pars$nrounds <- 50
}
if (!exists("max_depth", pars)) {
pars$max_depth <- c(1, 3, 4, 5, 6)
}
if (!exists("CV.folds", pars)) {
pars$CV.folds <- 10
}
if (!exists("ncores", pars)) {
pars$ncores <- 1
}
if (!exists("early_stopping", pars)) {
pars$early_stopping <- 10
}
if (!exists("silent", pars)) {
pars$silent <- TRUE
}
if (is.null(X) || dim(as.matrix(X))[2] == 0) {
result <- list()
result$Yfit <- as.matrix(rep(mean(y), length(y)))
result$residuals <- as.matrix(y - result$Yfit)
result$model <- NA
result$df <- NA
result$edf <- NA
result$edf1 <- NA
result$p.values <- NA
}
else {
X <- as.matrix(X)
if (!is.na(pars$CV.folds)) {
num.folds <- pars$CV.folds
rmse <- matrix(0, pars$nrounds, length(pars$max_depth))
set.seed(1)
whichfold <- sample(rep(1:num.folds, length.out = n))
for (j in 1:length(pars$max_depth)) {
max_depth <- pars$max_depth[j]
for (i in 1:10) {
dtrain <- xgb.DMatrix(data = data.matrix(X[whichfold !=
i, ]), label = y[whichfold != i])
dtest <- xgb.DMatrix(data = data.matrix(X[whichfold ==
i, ]), label = y[whichfold == i])
watchlist <- list(train = dtrain, test = dtest)
if (pars$ncores > 1) {
bst <- xgb.train(data = dtrain, nthread = pars$ncores,
watchlist = watchlist, nrounds = pars$nrounds,
max_depth = max_depth, verbose = FALSE,
early_stopping_rounds = pars$early_stopping,
callbacks = list(cb.evaluation.log()))
}
else {
bst <- xgb.train(data = dtrain, nthread = 1,
watchlist = watchlist, nrounds = pars$nrounds,
max_depth = max_depth, verbose = FALSE,
early_stopping_rounds = pars$early_stopping,
callbacks = list(cb.evaluation.log()))
}
## MODIFICATION BY CYRILL SCHEIDEGGER
########################################
#newscore <- (bst$evaluation_log[,3])^1
newscore <- bst$evaluation_log$test_rmse
########################################
## END OF MODIFICATION BY CYRILL SCHEIDEGGER
if (length(newscore) < pars$nrounds) {
newscore <- c(newscore, rep(Inf, pars$nrounds -
length(newscore)))
}
rmse[, j] <- rmse[, j] + newscore
}
}
mins <- arrayInd(which.min(rmse), .dim = dim(rmse))
if (!pars$silent) {
show(rmse)
show(mins)
if ((mins[1] == 1) | (mins[1] == pars$nrounds) |
(mins[2] == 1) | (mins[2] == length(pars$max_depth))) {
show("There have been parameters selected that were the most extreme of the CV values")
show(mins)
}
}
final.nrounds <- mins[1]
final.max_depth <- pars$max_depth[mins[2]]
}
else {
if (length(pars$max_depth) > 1) {
stop("providing a vector of parameters must be used with CV")
}
final.max_depth <- pars$max_depth
final.nrounds <- pars$nrounds
}
dtrain <- xgb.DMatrix(data = data.matrix(X), label = y)
bstY <- xgb.train(data = dtrain, nrounds = final.nrounds,
max_depth = final.max_depth, verbose = !pars$silent)
result <- list()
result$Yfit <- predict(bstY, data.matrix(X))
result$residuals <- as.matrix(y - result$Yfit)
## MODIFICATION BY CYRILL SCHEIDEGGER:
####################
# result$model <- NA
result$model <- bstY
####################
## END OF MODIFICATION BY CYRILL SCHEIDEGGER
result$df <- NA
result$edf <- NA
result$edf1 <- NA
result$p.values <- NA
}
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/weightedGCM/R/trainFunctions.R
|
## function for multivariate wgcm.fix
wgcm.fix.mult <- function(X, Y, Z, regr.meth, regr.pars, weight.num,
weight.meth, nsim){
X <- as.matrix(X)
Y <- as.matrix(Y)
n <- NROW(X)
dx <- NCOL(X)
dy <- NCOL(Y)
if(is.null(Z)){
warning("No Z specified. No weight functions can be calculated. Function simply tests for vanishing correlation between components of X and Y.")
eps.mat <- t(t(X)-colMeans(X))
xi.mat <- t(t(Y)-colMeans(Y))
W <- rep(1,n)
} else {
Z <- as.matrix(Z)
calc.res.Z <- function(V){
return(as.numeric(GeneralisedCovarianceMeasure::comp.resids(V, Z,
regr.pars = regr.pars,
regr.method = regr.meth)))
}
eps.mat <- apply(X,2, calc.res.Z)
xi.mat <- apply(Y,2,calc.res.Z)
W <- weight_matrix(Z, weight.num, weight.meth)
}
R <- NULL
for (j in 1:dx){
for (l in 1:dy){
R.jl <- eps.mat[,j]*xi.mat[,l]
R <- cbind(R, R.jl*W)
}
}
R <- t(R)
R.norm <- R / sqrt(rowMeans(R^2) - rowMeans(R)^2)
T.stat <- sqrt(n) * max(abs(rowMeans(R.norm)))
T.stat.sim <- apply(abs(R.norm %*% matrix(rnorm(n * nsim), n, nsim)),
2, max) / sqrt(n)
p.value <- (sum(T.stat.sim >= T.stat) + 1) / (nsim + 1)
return(p.value)
}
## function for multivariate wgcm.est
wgcm.est.mult <- function(X, Y, Z, beta, regr.meth, regr.pars, nsim){
X <- as.matrix(X)
Y <- as.matrix(Y)
n <- NROW(X)
dx <- NCOL(X)
dy <- NCOL(Y)
if(is.null(Z)){
warning("No Z specified. No weight functions can be estimated. Function simply tests for vanishing correlation between components of X and Y.")
eps.mat <- t(t(X)-colMeans(X))
xi.mat <- t(t(Y)-colMeans(Y))
R <- NULL
for (j in 1:dx){
for (l in 1:dy){
R.jl <- eps.mat[,j]*xi.mat[,l]
R <- cbind(R, R.jl)
}
}
R <- t(R)
R.norm <- R / sqrt(rowMeans(R^2) - rowMeans(R)^2)
T.stat <- sqrt(n) * max(abs(rowMeans(R.norm)))
T.stat.sim <- apply(abs(R.norm %*% matrix(rnorm(n * nsim), n, nsim)),
2, max) / sqrt(n)
p.value <- (sum(T.stat.sim >= T.stat) + 1) / (nsim + 1)
} else {
Z <- as.matrix(Z)
ind.train <- sample(1:n, ceiling(beta*n))
Xtrain <- X[ind.train,]
Xtest <- X[-ind.train,]
Ytrain <- Y[ind.train,]
Ytest <- Y[-ind.train,]
Ztrain <- Z[ind.train,]
Ztest <- Z[-ind.train,]
calc.res.Z <- function(V){
return(as.numeric(GeneralisedCovarianceMeasure::comp.resids(V, Ztest,
regr.pars = regr.pars,
regr.method = regr.meth)))
}
eps.mat <- apply(Xtest, 2, calc.res.Z)
xi.mat <- apply(Ytest, 2, calc.res.Z)
R <- NULL
for (j in 1:dx){
for (l in 1:dy){
W <- predict_weight(Xtrain[,j], Ytrain[,l], Ztrain, Ztest, regr.meth, regr.pars)
R.jl <- eps.mat[,j]*xi.mat[,l]
R <- cbind(R, R.jl*W)
}
}
R <- t(R)
R.norm <- R/sqrt(rowMeans(R^2)-rowMeans(R)^2)
ntest <- n-length(ind.train)
#The estimated w-functions aim at making a positive test statistic. Hence
#a one-sided test.
T.stat <- sqrt(ntest) * max(rowMeans(R.norm))
T.stat.sim <- apply(R.norm %*% matrix(rnorm(ntest * nsim), ntest, nsim),
2, max) / sqrt(ntest)
p.value <- (sum(T.stat.sim >= T.stat) + 1)/(nsim + 1)
}
return(p.value)
}
## function to calculate weight matrix
weight_matrix <- function(Z, weight.num, weight.meth) {
if (weight.meth == "sign") {
n <- NROW(Z)
dz <- NCOL(Z)
W <- rep(1,n)
if (weight.num >= 1) {
d.probs <- (1:weight.num) / (weight.num + 1)
for (i in 1:dz) {
Zi <- Z[,i]
a.vec <- quantile(Zi, d.probs, names=F)
Wi <- outer(Zi, a.vec, signa)
W <- cbind(W,Wi)
}
}
} else {
stop("Only method \"sign\" implemented yet to calculate weight function")
}
return(W)
}
## translated sign weight function
signa <- function(x,a){return(sign(x-a))}
## function to calculate a 1sided p-value for wgcm.fix, since we expect
## the test statistic to be positive under the alternative
wgcm.1d.1sided <- function(Xtest, Ytest, Ztest, W, regr.meth, regr.pars) {
n <- NROW(Ztest)
eps <- as.numeric(GeneralisedCovarianceMeasure::comp.resids(Xtest, Ztest,
regr.pars = regr.pars, regr.method = regr.meth))
xi <- as.numeric(GeneralisedCovarianceMeasure::comp.resids(Ytest, Ztest,
regr.pars = regr.pars, regr.method = regr.meth))
R <- eps*xi*W
T.stat <- sqrt(n)*mean(R)/sqrt(mean(R^2)-mean(R)^2)
p.value <- 1-pnorm(T.stat)
return(p.value)
}
## function to estimate weight function for wgcm.est
predict_weight <- function(Xtrain, Ytrain, Ztrain, Ztest,
regr.meth, regr.pars) {
eps <- as.numeric(GeneralisedCovarianceMeasure::comp.resids(Xtrain, Ztrain,
regr.pars = regr.pars, regr.method = regr.meth))
xi <- as.numeric(GeneralisedCovarianceMeasure::comp.resids(Ytrain, Ztrain,
regr.pars = regr.pars, regr.method = regr.meth))
eps.xi.train <- eps * xi
switch(regr.meth, "gam"={
W <- sign(predict_weight_gam(eps.xi.train, Ztrain, Ztest, regr.pars))
}, "xgboost"={
W <- sign(predict_weight_xgboost(eps.xi.train, Ztrain, Ztest, regr.pars))
}
)
return(W)
}
## function to estimate weight function for wgcm.est using gam
predict_weight_gam <- function(V, Z, Ztest, pars = list()) {
mod <- train.gam(Z, V, pars = list())
dz <- NCOL(Z)
dpred <- data.frame(Ztest)
names(dpred) <- paste("var", 2:(dz+1), sep = "")
Vpred <- predict(mod$model, dpred)
return(Vpred)
}
## function to estimate weight function for wgcm.est using xgboost
predict_weight_xgboost <- function(V, Z, Ztest, pars = list()) {
mod <- train.xgboost1(Z, V, pars = list())
dz <- NCOL(Z)
dpred <- xgb.DMatrix(data = data.matrix(Ztest))
Vpred <- predict(mod$model, dpred)
return(Vpred)
}
|
/scratch/gouwar.j/cran-all/cranData/weightedGCM/R/utils.R
|
amplify <-
function (gamma, lambda)
{
stopifnot(length(gamma) == 1)
stopifnot(gamma > 1)
stopifnot(min(lambda) > gamma)
delta <- (gamma * lambda - 1)/(lambda - gamma)
names(delta) <- lambda
delta
}
|
/scratch/gouwar.j/cran-all/cranData/weightedRank/R/amplify.R
|
dwgtRank <-
function (y, gamma=1, m=2, m1=2, m2=2, phifunc = NULL,
alternative="greater", scores=NULL, range=TRUE)
{
# Adjusted from wgtRank in weightedRank package, version 0.1.6
# The four specified phi functions are removed and replaced by m's
# Within sets scores are now used if scores is specified
# A lower tailed test is now an explicit option
# Within-rank scores are now an option
# The gap may now optionally replace with within-block range
# Check input
stopifnot(is.matrix(y) | is.data.frame(y))
if (!is.null(scores)) stopifnot(length(scores)==(dim(y)[2]))
stopifnot(0 == sum(is.na(as.vector(y))))
stopifnot(min(dim(y)) >= 2)
stopifnot(is.vector(gamma) & (length(gamma) == 1)
& (gamma >= 1))
stopifnot((alternative=="greater")|(alternative=="less"))
# Define several functions that will be used in computations
# This is the function used to score the ranks of the blocks.
# It is given by the right side of expression (9) in
# Rosenbaum (2011) Biometrics, 67, 1017–1027
multrnksU <- function(pk, m1 = 2, m2 = 2, m = 2) {
n <- length(pk)
q <- rep(0, n)
q <- rep(0, n)
for (l in m1:m2) {
q <- q + (l * choose(m, l) * (pk^(l - 1)) * ((1 -
pk)^(m - l)))
}
q/max(q)
}
# If the user did not specify phifunc, then use multrnksU instead
if (is.null(phifunc)) {
stopifnot((m1>=1)&(m2>=m1)&(m>=m2))
phifunc<-function(pk){multrnksU(pk,m1=m1,m2=m2,m=m)}
}
# This function computes the asymptotic separable approximation
# to the upper bound on the P-value, as discussed in
# Gastwirth, Krieger and Rosenbaum (2000) JRSS-B
separable1kA <- function(ymat, gamma = 1) {
n <- dim(ymat)[1]
m <- dim(ymat)[2]
o <- t(apply(ymat, 1, sort))
allmu <- matrix(NA, n, m - 1)
allsigma2 <- matrix(NA, n, m - 1)
maxmu <- rep(-Inf, n)
maxsig2 <- rep(-Inf, n)
for (j in 1:(m - 1)) {
pr <- c(rep(1, j), rep(gamma, m - j))/(j + ((m -
j) * gamma))
mu <- as.vector(o %*% pr)
sigma2 <- as.vector((o * o) %*% pr) - (mu * mu)
chgmu <- (mu > maxmu)
samemu <- (mu == maxmu)
if (sum(chgmu) > 0) {
maxmu[chgmu] <- mu[chgmu]
maxsig2[chgmu] <- sigma2[chgmu]
}
if (sum(samemu) > 0) {
maxsig2[samemu] <- pmax(sigma2[samemu], maxsig2[samemu])
}
}
list(maxmu = maxmu, maxsig2 = maxsig2)
}
# Begin computations
J <- dim(y)[2]
nset <- dim(y)[1]
# Use the range or the gap?
if (range) rg <- apply(y, 1, max) - apply(y, 1, min)
else {
mx <- apply(y, 1, max)
sm <- apply(y, 1, sum) - mx
rg <- mx - (sm/(J-1))
}
# Score the block ranks
rkrg <- phifunc(rank(rg)/nset)
# Determine the within-block ranks
if (is.null(scores)){
rky <- t(apply(y, 1, rank)) # uses average ranks
}
if (!is.null(scores)){
rky<-t(apply(y, 1, rank, ties.method = "min")) # uses min ranks
orky<-rky
for (i in 1:J) rky[orky==i] <- scores[i]
}
if (alternative=="less") rky<-(-rky)
mv <- separable1kA(rky, gamma = gamma)
ts <- mean(rky[, 1] * rkrg)
ex <- sum(mv$maxmu * rkrg)/nset
va <- (sum(mv$maxsig2 * rkrg * rkrg)/nset)/nset
dev <- (ts - ex)/sqrt(va)
pval <- 1 - stats::pnorm(dev)
if (alternative=="less"){
ts<-(-ts)
ex<-(-ex)
dev<-(-dev)
}
detail <- c(dev, ts, ex, va, gamma)
names(detail) <- c("Deviate", "Statistic", "Expectation",
"Variance", "Gamma")
list(pval = pval, detail = detail)
}
|
/scratch/gouwar.j/cran-all/cranData/weightedRank/R/dwgtRank.R
|
ef2C <-
function (y, gamma=1, upsilon=1, alternative="greater", trunc=0.2)
{
stopifnot((alternative=="greater")|(alternative=="less"))
stopifnot(is.matrix(y) | is.data.frame(y))
stopifnot(0 == sum(is.na(as.vector(y))))
stopifnot((dim(y)[2]) == 3)
stopifnot(is.vector(gamma) & (length(gamma) == 1)
& (gamma >= 1))
stopifnot(is.vector(upsilon) & (length(upsilon) == 1)
& (upsilon >= 1))
if (alternative=="less") y<- (-y)
f1<-dwgtRank(y[,1:2],gamma=gamma,m=8,m1=7,m2=8, phifunc = NULL,
alternative="greater", scores=NULL, range=TRUE)
f2<-dwgtRank(y[,3:1],gamma=upsilon,m=8,m1=8,m2=8, phifunc = NULL,
alternative="less", scores=c(1,2,5), range=FALSE)
TreatedVSControl1<-f1$detail
Control2vsOthers<-f2$detail
pvals<-c(f1$pval,f2$pval,sensitivitymv::truncatedP(c(f1$pval,f2$pval),trunc=trunc))
names(pvals)<-c("TreatedVSControl1","Control2vsOthers","Combined")
detail<-rbind(TreatedVSControl1,Control2vsOthers)
colnames(detail)[5]<-"Gamma/Upsilon"
names(dimnames(detail))<-c("Evidence Factor","Test Details")
list(pvals=pvals,detail=detail)
}
|
/scratch/gouwar.j/cran-all/cranData/weightedRank/R/ef2C.R
|
wgtRank <-
function(y,phi="u868",phifunc=NULL,gamma=1){
############## #
# check input #
################
stopifnot(is.matrix(y)|is.data.frame(y))
stopifnot(0 == sum(is.na(as.vector(y))))
stopifnot(min(dim(y))>=2)
stopifnot(is.vector(gamma)&(length(gamma)==1)
&(gamma>=1))
if (is.null(phifunc))
{stopifnot(is.element(phi,c("u868","u878",
"quade","wilc")))}
############## #
# subfunctions #
################
multrnksU <- function(pk, m1 = 2, m2 = 2, m = 2) {
# This is the right side of expression (9)
# in Rosenbaum (2011) Biometrics page 1022
# Note that 0 <= pk <= 1
n <- length(pk)
q <- rep(0, n)
q <- rep(0, n)
for (l in m1:m2) {
q <- q + (l * choose(m, l) * (pk^(l - 1)) * ((1 - pk)^(m - l)))
}
q/max(q)
}
u868<-function(pk){multrnksU(pk,m1=6,m2=8,m=8)}
u878<-function(pk){multrnksU(pk,m1=7,m2=8,m=8)}
quade<-function(pk){pk}
wilc<-function(pk){rep(1,length(pk))}
if (is.null(phifunc)){
if (phi=="u868") phifunc<-u868
else if (phi=="u878") phifunc<-u878
else if (phi=="quade") phifunc<-quade
else if (phi=="wilc") phifunc<-wilc
}
separable1kA <- function (ymat, gamma = 1)
{
# Modified from separable1k in sensitivitymw package
# Instead of producing the final inference,
# this version computes the max expectation and var
n <- dim(ymat)[1]
m <- dim(ymat)[2]
o <- t(apply(ymat, 1, sort))
allmu <- matrix(NA, n, m - 1)
allsigma2 <- matrix(NA, n, m - 1)
maxmu <- rep(-Inf, n)
maxsig2 <- rep(-Inf, n)
for (j in 1:(m - 1)) {
pr <- c(rep(1, j), rep(gamma, m - j))/(j + ((m - j) *
gamma))
mu <- as.vector(o %*% pr)
sigma2 <- as.vector((o * o) %*% pr) - (mu * mu)
chgmu <- (mu > maxmu)
samemu <- (mu == maxmu)
if (sum(chgmu) > 0) {
maxmu[chgmu] <- mu[chgmu]
maxsig2[chgmu] <- sigma2[chgmu]
}
if (sum(samemu) > 0) {
maxsig2[samemu] <- pmax(sigma2[samemu], maxsig2[samemu])
}
}
list(maxmu=maxmu,maxsig2=maxsig2)
}
#######################
# Begin main function #
#######################
J<-dim(y)[2]
nset<-dim(y)[1]
rg<-apply(y,1,max)-apply(y,1,min) # ranges within blocks
rkrg<-phifunc(rank(rg)/nset) # scored ranks of ranges
rky<-t(apply(y,1,rank)) # ranks within blocks
mv<-separable1kA(rky,gamma=gamma) # separable calculation
ts<-mean(rky[,1]*rkrg) # test statistic
ex<-sum(mv$maxmu*rkrg)/nset
va<-sum(mv$maxsig2*rkrg*rkrg)/(nset*nset)
dev<-(ts-ex)/sqrt(va)
pval<-1-stats::pnorm(dev)
detail<-c(dev,ts,ex,va,gamma)
names(detail)<-c("Deviate","Statistic","Expectation","Variance","Gamma")
list(pval=pval,detail=detail)
}
|
/scratch/gouwar.j/cran-all/cranData/weightedRank/R/wgtRank.R
|
wgtRanktt <-
function(y,phi1="u868",phi2="u878",phifunc1=NULL,phifunc2=NULL,gamma=1){
############## #
# check input #
################
stopifnot(is.matrix(y)|is.data.frame(y))
stopifnot(0 == sum(is.na(as.vector(y))))
stopifnot(min(dim(y))>=2)
stopifnot(is.vector(gamma)&(length(gamma)==1)
&(gamma>=1))
if (is.null(phifunc1)) {
stopifnot(is.element(phi1,c("u868","u878",
"quade","wilc")))}
if (is.null(phifunc2)) {
stopifnot(is.element(phi2,c("u868","u878",
"quade","wilc")))}
############## #
# subfunctions #
################
multrnksU <- function(pk, m1 = 2, m2 = 2, m = 2) {
# This is the right side of expression (9)
# in Rosenbaum (2011) Biometrics page 1022
# Note that 0 <= pk <= 1
n <- length(pk)
q <- rep(0, n)
q <- rep(0, n)
for (l in m1:m2) {
q <- q + (l * choose(m, l) * (pk^(l - 1)) * ((1 - pk)^(m - l)))
}
q/max(q)
}
u868<-function(pk){multrnksU(pk,m1=6,m2=8,m=8)}
u878<-function(pk){multrnksU(pk,m1=7,m2=8,m=8)}
quade<-function(pk){pk}
wilc<-function(pk){rep(1,length(pk))}
if (is.null(phifunc1)){
if (phi1=="u868") phifunc1<-u868
else if (phi1=="u878") phifunc1<-u878
else if (phi1=="quade") phifunc1<-quade
else if (phi1=="wilc") phifunc1<-wilc
}
if (is.null(phifunc2)){
if (phi2=="u868") phifunc2<-u868
else if (phi2=="u878") phifunc2<-u878
else if (phi2=="quade") phifunc2<-quade
else if (phi2=="wilc") phifunc2<-wilc
}
separable1kA <- function (ymat, gamma = 1)
{
# Modified from separable1k in sensitivitymw package
# Instead of producing the final inference,
# this version computes the max expectation and var
n <- dim(ymat)[1]
m <- dim(ymat)[2]
o <- t(apply(ymat, 1, sort))
allmu <- matrix(NA, n, m - 1)
allsigma2 <- matrix(NA, n, m - 1)
maxmu <- rep(-Inf, n)
maxsig2 <- rep(-Inf, n)
for (j in 1:(m - 1)) {
pr <- c(rep(1, j), rep(gamma, m - j))/(j + ((m - j) *
gamma))
mu <- as.vector(o %*% pr)
sigma2 <- as.vector((o * o) %*% pr) - (mu * mu)
chgmu <- (mu > maxmu)
samemu <- (mu == maxmu)
if (sum(chgmu) > 0) {
maxmu[chgmu] <- mu[chgmu]
maxsig2[chgmu] <- sigma2[chgmu]
}
if (sum(samemu) > 0) {
maxsig2[samemu] <- pmax(sigma2[samemu], maxsig2[samemu])
}
}
list(maxmu=maxmu,maxsig2=maxsig2)
}
#######################
# Begin main function #
#######################
J<-dim(y)[2]
nset<-dim(y)[1]
rg<-apply(y,1,max)-apply(y,1,min) # ranges within blocks
rkrg1<-phifunc1(rank(rg)/nset) # scored ranks of ranges
rkrg2<-phifunc2(rank(rg)/nset) # scored ranks of ranges
rky<-t(apply(y,1,rank)) # ranks within blocks
mv<-separable1kA(rky,gamma=gamma) # separable calculation
ts1<-mean(rky[,1]*rkrg1) # test statistic
ts2<-mean(rky[,1]*rkrg2) # test statistic
ex1<-sum(mv$maxmu*rkrg1)/nset
va1<-sum(mv$maxsig2*rkrg1*rkrg1)/(nset*nset)
ex2<-sum(mv$maxmu*rkrg2)/nset
va2<-sum(mv$maxsig2*rkrg2*rkrg2)/(nset*nset)
cov12<-sum(rkrg1*rkrg2*mv$maxsig2)/(nset*nset)
cor12<-cov12/sqrt(va1*va2)
dev1<-(ts1-ex1)/sqrt(va1)
dev2<-(ts2-ex2)/sqrt(va2)
devmx<-max(dev1,dev2)
cmat<-matrix(c(1,cor12,cor12,1),2,2)
jointP<-1-mvtnorm::pmvnorm(lower=c(-Inf,-Inf),mean=c(0,0),
keepAttr=FALSE,sigma=cmat,upper=c(devmx,devmx))
pval1<-1-pnorm(dev1)
pval2<-1-pnorm(dev2)
phi1<-c(pval1,dev1,ts1,ex1,va1,gamma)
phi2<-c(pval2,dev2,ts2,ex2,va2,gamma)
detail<-rbind(phi1,phi2)
colnames(detail)<-c("Pval","Deviate","Statistic","Expectation","Variance","Gamma")
list(jointP=jointP,cor12=cor12,detail=detail)
}
|
/scratch/gouwar.j/cran-all/cranData/weightedRank/R/wgtRanktt.R
|
#######################################################################
# this code is for calculating the asymptotic covariance
# matrix of CL1 estimates
###########################################################################
# below i omit any exchmvn calls
bvn<-function(lb, ub, rh)
{ rhmat=matrix(c(1,rh,rh,1),2,2)
pmvnorm(lb,ub,c(0,0),rhmat)[1]
}
bvn.deriv.margin<-function(lb, ub, rh, k, ksign)
{ rhmat=matrix(c(1,rh,rh,1),2,2)
mvn.deriv.margin(lb,ub,c(0,0),rhmat,k,ksign)$deriv
}
# i have to check if this is correct!
bvn.deriv.rho<-function(lb, ub, rh)
{ rhmat=matrix(c(1,rh,rh,1),2,2)
mu<-c(0,0)
dmvnorm(ub,mu,rhmat)-dmvnorm(c(ub[1],lb[2]),mu,
rhmat)-dmvnorm(c(lb[1],ub[2]),mu,rhmat)+dmvnorm(lb,mu,rhmat)
}
# derivative of the univariate cdf wrt gam
# input:
# y: the response cector
# nu: the nu's
# margmodel: see weightesScores
# link: see weightesScores
# output: the derivative
der.cdf.gam<-function(y,mu,gam,u,link)
{ if(y<1) return(0)
k<-1:y
cuts<-c(-10,gam,10)
lb=cuts[k]+mu
ub=cuts[k+1]+mu
if(link=="probit") { dlatent=dnorm } else { dlatent=dlogis }
dlatentub<-dlatent(ub)
dlatentlb<-dlatent(lb)
res<-rep(NA,y)
for(i in 1:y)
{ if(u==i)
{ res[i]=dlatentub[i] }
else
{ if(u==i-1)
{ res[i]=-dlatentlb[i] }
else {res[i]=0}}
}
sum(res)
}
# derivative with respect to ga_1,ga_2,...,ga_d of the BCL likelhood
# input:
# y: the d-variate vector response vector
# ...: see weightedScores
# output: a d-variate vector with the derivatives
cl1.der.gam<-function(zlow,zupp,dzlow,dzupp,y,mu,gam,rho,
bivpairs,corstr,d,d2,u,link)
{ s<-rep(0,d)
for(i in 1:d)
{ for(j in 1:d2)
{ k1<-bivpairs[j,][1]
k2<-bivpairs[j,][2]
if(corstr=="exch") rh<-rho else rh<-rho^(k2-k1)
if(i==k1)
{ temp1<-bvn.deriv.margin(zlow[c(k1,k2)],zupp[c(k1,k2)],
rh,1,1)*der.cdf.gam(y[k1],mu[k1],gam,u,link)/dzupp[k1]
temp2<-bvn.deriv.margin(zlow[c(k1,k2)],zupp[c(k1,k2)],
rh,1,-1)*der.cdf.gam(y[k1]-1,mu[k1],gam,u,link)/dzlow[k1]
prob<-bvn(zlow[c(k1,k2)],zupp[c(k1,k2)],rh)
derprob<-(temp1+temp2)/prob
} else {
if(i==k2)
{ temp1<-bvn.deriv.margin(zlow[c(k1,k2)],zupp[c(k1,k2)],
rh,2,1)*der.cdf.gam(y[k2],mu[k2],gam,u,link)/dzupp[k2]
temp2<-bvn.deriv.margin(zlow[c(k1,k2)],zupp[c(k1,k2)],
rh,2,-1)*der.cdf.gam(y[k2]-1,mu[k2],gam,u,link)/dzlow[k2]
prob<-bvn(zlow[c(k1,k2)],zupp[c(k1,k2)],rh)
derprob<-(temp1+temp2)/prob
} else {derprob<-0}}
s[i]<-s[i]+derprob
}}
s
}
# derivative with respect to rho of the BCL
# y: the d-variate vector response vector
# ...: see weightedScores
# output: the derivative
cl1.der.rho<-function(zlow,zupp,rho,bivpairs,corstr,d2)
{ s<-0
for(j in 1:d2)
{ k1<-bivpairs[j,][1]
k2<-bivpairs[j,][2]
if(corstr=="exch")
{ der<-bvn.deriv.rho(zlow[bivpairs[j,]],zupp[bivpairs[j,]],rho)
prob<-bvn(zlow[bivpairs[j,]],zupp[bivpairs[j,]],rho)
} else {
if(corstr=="ar")
{ t1<-k2-k1
rhar<-rho^(k2-k1)
der<-bvn.deriv.rho(zlow[bivpairs[j,]],zupp[bivpairs[j,]],
rhar)*t1*rhar/rho
prob<-bvn(zlow[bivpairs[j,]],zupp[bivpairs[j,]],rhar)
}
}
s<-s+ der/prob
}
s
}
# d=dimension, K=#categories, ii = decimal
# return vector of size d, each element in 1..K
d2v=function(d, K, ii)
{ t=ii-1
jj=rep(0,d)
for(i in seq(d,1,-1))
{ jj[i]=t%%K; t=floor(t/K); }
jj
}
# d-variate rectangle probability
# y: the d-variate vector response vector
# ...: see weightedScores
# output: the bivariate rectangle probability
mrect.prob<-function(zlow,zupp,rhomat,bivpairs,corstr,mvncmp)
{ d<-length(zlow)
if(mvncmp==1)
{ prob<-mvnapp(zlow,zupp,rep(0,d),sigma=rhomat)$pr } else {
set.seed(12345)
prob<-pmvnorm(lower=zlow,upper=zupp,mean=rep(0,d),corr=rhomat)[1]
}
prob
}
# derivative with respect to rho of the bivariate rectangle probability
rect.der.rho<-function(zlow,zupp,rho,corstr,j1,j2)
{ if(corstr=="ar")
{ tem1=j2-j1
tem2=bvn.deriv.rho(zlow,zupp,rho)
tem3=(rho)^(1/tem1)
derprob=tem1*tem2*tem3^(tem1-1)
} else { derprob=bvn.deriv.rho(zlow,zupp,rho) }
derprob
}
# derivative with respect to gam_j of the bivariate recatngle
rect.der.gam<-function(zlow,zupp,dzlow,dzupp,y,mu,gam,rho,j,link)
{ ub<-noCategories(gam)
derprob<-rep(NA,ub-1)
der1=bvn.deriv.margin(zlow,zupp,rho,j,1)
der2=bvn.deriv.margin(zlow,zupp,rho,j,-1)
for(u in 1:(ub-1))
{ temp1<-der1*der.cdf.gam(y[j],mu[j],gam,u,link)/dzupp[j]
temp2<-der2*der.cdf.gam(y[j]-1,mu[j],gam,u,link)/dzlow[j]
derprob[u]<-temp1+temp2
}
derprob
}
fisher.gam.rho<-function(mu,gam,rho,j,ub,corstr,j1,j2,link)
{ s<-rep(0,ub-1)
nvect=ub^2
for(ii in 1:nvect)
{ y=d2v(2,ub,ii)+1
vlow<-pmargmodel.ord(y-1,mu,gam,link)
tem<-dmargmodel.ord(y,mu,gam,link)
vupp<-vlow+tem
zlow=qnorm(vlow)
zupp=qnorm(vupp)
zlow[zlow < -10]<--10
zupp[zupp > 10]<-10
dzlow=dnorm(zlow)
dzupp=dnorm(zupp)
der1<-rect.der.gam(zlow,zupp,dzlow,dzupp,y,mu,gam,rho,j,link)
der2<-rect.der.rho(zlow,zupp,rho,corstr,j1,j2)
prob<-bvn(zlow,zupp,rho)
s<-s+der1*der2/prob
}
s
}
# the inverse Godambe matrix for CL1 estimates
# input:
# ...: see weightedScores
clic1dePar<-function(nbcl,r,b,gam,xdat,id,tvec,corstr,WtScMat,link,mvncmp)
{ if(is.matrix(xdat))
{ dim<-dim(xdat)
n<-1:dim[1]
p<-dim[2]
} else {n<-1:length(xdat); p<-1}
uid<-unique(id)
d<-id.size(id)
maxd<-max(d)
q<-length(gam)
tvec<-id.time(tvec,d)
bivpairs<-maxpairs(d)
d2<-nrow(bivpairs)
rmat<-cormat(maxd,r,bivpairs,corstr)
t=p+q+1
Dmat<-M<-matrix(0,t,t)
m<-0
for(i in uid)
{ m<-m+1
cases<-id==i
irow=n[cases]
ti<-tvec[irow]
if(length(ti)>1)
{
newmui<-rep(NA,maxd)
if(is.matrix(xdat))
{ x<-xdat[cases,]
newx<-matrix(NA,maxd,p)
newx[ti,]<-x} else {
x<-xdat[cases]
newx<-rep(NA,maxd)
newx[ti]<-x
}
mui<-ordreg.mu(x,b)
newmui[ti]<-mui
ub<-q+1
nvect=ub^d[m] # change here nvect=ub^maxd
nvect2=ub^2
y=vlow=tem=matrix(NA,nvect,maxd)
for(ii in 1:nvect)
{ y[ii,]=d2v(maxd,ub,ii)+1
vlow[ii,]<-pmargmodel.ord(y[ii,]-1,newmui,gam,link)
tem[ii,]<-dmargmodel.ord(y[ii,],newmui,gam,link)
}
vupp<-vlow+tem
zlow=qnorm(vlow)
zupp=qnorm(vupp)
zlow[zlow < -10]<--10
zupp[zupp > 10]<-10
dzlow=dnorm(zlow)
dzupp=dnorm(zupp)
D22i<-0
Delta21i<-matrix(NA,d2,maxd*q)
for(i1 in 1:d2)
{ s<-rep(0,q)
k1<-bivpairs[i1,1]
k2<-bivpairs[i1,2]
if((sum(k1==ti)==1) & (sum(k2==ti)==1))
{
for(ii in 1:nvect2)
{ y2=d2v(2,ub,ii)+1
vlow2<-pmargmodel.ord(y2-1,newmui[c(k1,k2)],gam,link)
tem2<-dmargmodel.ord(y2,newmui[c(k1,k2)],gam,link)
vupp2<-vlow2+tem2
zlow2=qnorm(vlow2)
zupp2=qnorm(vupp2)
zlow2[zlow2 < -10]<--10
zupp2[zupp2 > 10]<-10
dzlow2=dnorm(zlow2)
dzupp2=dnorm(zupp2)
der2<-rect.der.rho(zlow2,zupp2,rmat[k1,k2],corstr,k1,k2)
prob<-bvn(zlow2,zupp2,rmat[k1,k2])
D22i<-D22i+der2*der2/prob
der1<-NULL
for(i2 in 1:maxd)
{ if(i2==k1)
{ der1<-c(der1,rect.der.gam(zlow2,
zupp2,dzlow2,dzupp2,
y2,newmui[c(k1,k2)],gam,rmat[k1,k2],1,link))
} else {
if(i2==k2)
{ der1<-c(der1,rect.der.gam(zlow2,
zupp2,dzlow2,dzupp2,
y2,newmui[c(k1,k2)],gam,rmat[k1,k2],2,link))
} else {
der1<-c(der1,rep(0,q))}} # new change instead of c(der1,0)
}
s<-s+der1*der2/prob
}
}
Delta21i[i1,]<-s
}
Delta21i<-apply(Delta21i,2,sum)
if(d[m]<maxd)
{ # new code
und<-d[m]
und2<-choose(und,2)
unpairs<-maxpairs(und)
unrmat<-rmat[ti,ti]
Omega22i<-0
Omega12i<-rep(0,q*und)
unnvect=ub^und
for(ii in 1:unnvect)
{ y=d2v(und,ub,ii)+1
vlow<-pmargmodel.ord(y-1,mui,gam,link)
tem<-dmargmodel.ord(y,mui,gam,link)
vupp<-vlow+tem
zlow=as.vector(qnorm(vlow))
zupp=as.vector(qnorm(vupp))
zlow[zlow < -10]<--10
zupp[zupp > 10]<-10
dzlow=dnorm(zlow)
dzupp=dnorm(zupp)
der2<-cl1.der.rho(zlow,zupp,r,unpairs,corstr,und2)
prob<-mrect.prob(zlow,zupp,unrmat,unpairs,corstr,mvncmp)
Omega22i<-Omega22i+der2*der2*prob
mder<-rep(NA,und)
dergam<-NULL
for(jj in 1:und)
{ for(k in 1:q) #q=ub-1
{ dergam<-c(dergam,iderlik.gam.ord(mui[jj],gam,y[jj],k,link))}
}
Omega12i<-Omega12i+dergam*der2*prob
}
} else {
Omega22i<-0
Omega12i<-rep(0,q*maxd)
for(ii in 1:nvect)
{ der2<-cl1.der.rho(zlow[ii,],zupp[ii,],r,bivpairs,corstr,d2)
prob<-mrect.prob(zlow[ii,],zupp[ii,],rmat,bivpairs,corstr,mvncmp)
Omega22i<-Omega22i+der2*der2*prob
mder<-rep(NA,maxd)
dergam<-NULL
for(jj in 1:maxd)
{ for(k in 1:q) #q=ub-1
{ dergam<-c(dergam,iderlik.gam.ord(newmui[jj],gam,y[ii,jj],k,link))}
}
Omega12i<-Omega12i+dergam*der2*prob
}
}
seli<-subselect.ord(ti,q)
Xi<-WtScMat$X[,,m]
Xi<-Xi[,seli]
tXi<-t(Xi)
Delta21i=Delta21i[seli]
D21i<-Delta21i%*%tXi
Delta11<-WtScMat$delta[,,m] # from the wtsc code: weightMat()
Delta11<-Delta11[seli,seli]
D11i<-Xi%*%Delta11%*%tXi
d0<-dim(t(D21i))
D12i<-matrix(0,d0[1],d0[2])
Di<-rbind(cbind(D11i,D12i),cbind(D21i,D22i))
Omega12i<-as.matrix(Omega12i)
Omega11i<-WtScMat$omega[,,m] # from the wtsc code: weightMat() function
Omega11i<-Omega11i[seli,seli]
M11i<-Xi%*%Omega11i%*%tXi
M12i<-Xi%*%Omega12i
M21i<-t(M12i)
M22i<-Omega22i
Mi<-rbind(cbind(M11i,M12i),cbind(M21i,M22i))
# summations
Dmat<-Dmat+Di
M<-M+Mi
}}
#inDmat<-solve(Dmat)
#tr<-sum(diag(M%*%inDmat))
tr<-sum(diag(solve(Dmat,M)))
list(AIC=2*(nbcl+tr),BIC=2*nbcl+log(m)*tr)
}
cov3<-function(j1,j2,j3,mu,gam,rhomat,bivpairs,corstr,ub,link,mvncmp)
{ s<-0
q=ub-1
if(j1==j2)
{ nvect=ub^2
for(ii in 1:nvect)
{ y=d2v(2,ub,ii)+1
vlow<-pmargmodel.ord(y-1,mu[c(j2,j3)],gam,link)
tem<-dmargmodel.ord(y,mu[c(j2,j3)],gam,link)
vupp<-vlow+tem
zlow=qnorm(vlow)
zupp=qnorm(vupp)
zlow[zlow < -10]<--10
zupp[zupp > 10]<-10
#scj1<-iderlik.nu(mu[j1],gam,invgam,y[1],margmodel,link)
scj1<-NULL
for(k in 1:q) #q=ub-1
{ scj1<-c(scj1,iderlik.gam.ord(mu[j1],gam,y[1],k,link)) }
scj2j3<-rect.der.rho(zlow,zupp,rhomat[j2,j3],corstr,j2,j3)
#prob<-rect.prob(y,x[c(j2,j3),],b,gam,invgam,rhomat[j2,j3],marmodel,link)
s<-s+scj1*scj2j3#/prob
}
} else {
if(j1==j3)
{ nvect=ub^2
for(ii in 1:nvect)
{ y=d2v(2,ub,ii)+1
vlow<-pmargmodel.ord(y-1,mu[c(j3,j2)],gam,link)
tem<-dmargmodel.ord(y,mu[c(j3,j2)],gam,link)
vupp<-vlow+tem
zlow=qnorm(vlow)
zupp=qnorm(vupp)
zlow[zlow < -10]<--10
zupp[zupp > 10]<-10
#scj1<-iderlik.nu(mu[j1],gam,invgam,y[1],margmodel,link)
scj1<-NULL
for(k in 1:q) #q=ub-1
{ scj1<-c(scj1,iderlik.gam.ord(mu[j1],gam,y[1],k,link)) }
scj2j3<-rect.der.rho(zlow[c(2,1)],zupp[c(2,1)],rhomat[j3,j2],corstr,j3,j2)
#prob<-rect.prob(y[c(2,1)],x[c(j2,j3),],b,gam,invgam,rhomat[j2,j3],marmodel,link)
s<-s+scj1*scj2j3#/prob
}
}
else
{ nvect=ub^3
for(ii in 1:nvect)
{ y=d2v(3,ub,ii)+1
vlow<-pmargmodel.ord(y-1,mu[c(j1,j2,j3)],gam,link)
tem<-dmargmodel.ord(y,mu[c(j1,j2,j3)],gam,link)
vupp<-vlow+tem
zlow=qnorm(vlow)
zupp=qnorm(vupp)
zlow[zlow < -10]<--10
zupp[zupp > 10]<-10
#scj1<-iderlik.nu(mu[j1],gam,invgam,y[1],margmodel,link) #not sure about y[1]
scj1<-NULL
for(k in 1:q) #q=ub-1
{ scj1<-c(scj1,iderlik.gam.ord(mu[j1],gam,y[1],k,link)) }
scj2j3<-rect.der.rho(zlow[-1],zupp[-1],rhomat[j2,j3],corstr,j2,j3)
prob2<-bvn(zlow[-1],zupp[-1],rhomat[j2,j3])
prob3<-mrect.prob(zlow,zupp,rhomat[c(j1,j2,j3),c(j1,j2,j3)],bivpairs,corstr,mvncmp)
s<-s+scj1*scj2j3/prob2*prob3
}
}}
s
}
cov4<-function(j1,j2,j3,j4,mu,gam,rhomat,bivpairs,corstr,ub,link,mvncmp)
{ s<-0
if(j1==j3 & j2==j4)
{ nvect=ub^2
for(ii in 1:nvect)
{ y=d2v(2,ub,ii)+1
vlow<-pmargmodel.ord(y-1,mu[c(j1,j2)],gam,link)
tem<-dmargmodel.ord(y,mu[c(j1,j2)],gam,link)
vupp<-vlow+tem
zlow=qnorm(vlow)
zupp=qnorm(vupp)
zlow[zlow < -10]<--10
zupp[zupp > 10]<-10
scj1j2<-rect.der.rho(zlow,zupp,rhomat[j1,j2],corstr,j1,j2)
prob<-bvn(zlow,zupp,rhomat[j1,j2])
s<-s+scj1j2*scj1j2/prob
#print(c(scj1j2,prob))
}
} else {
if(j1==j3 & j2!=j4)
{ nvect=ub^3
for(ii in 1:nvect)
{ y=d2v(3,ub,ii)+1
vlow<-pmargmodel.ord(y-1,mu[c(j1,j2,j4)],gam,link)
tem<-dmargmodel.ord(y,mu[c(j1,j2,j4)],gam,link)
vupp<-vlow+tem
zlow=qnorm(vlow)
zupp=qnorm(vupp)
zlow[zlow < -10]<--10
zupp[zupp > 10]<-10
scj1j2<-rect.der.rho(zlow[-3],zupp[-3],rhomat[j1,j2],corstr,j1,j2)
prob21<-bvn(zlow[-3],zupp[-3],rhomat[j1,j2])
scj3j4<-rect.der.rho(zlow[-2],zupp[-2],rhomat[j3,j4],corstr,j3,j4)
prob22<-bvn(zlow[-2],zupp[-2],rhomat[j3,j4])
prob3<-mrect.prob(zlow,zupp,rhomat[c(j1,j2,j4),c(j1,j2,j4)],
bivpairs,corstr,mvncmp)
s<-s+scj1j2*scj3j4/prob21/prob22*prob3
}
} else {
if(j1==j4 & j2!=j3)
{ nvect=ub^3
for(ii in 1:nvect)
{ y=d2v(3,ub,ii)+1
vlow<-pmargmodel.ord(y-1,mu[c(j1,j2,j3)],gam,link)
tem<-dmargmodel.ord(y,mu[c(j1,j2,j3)],gam,link)
vupp<-vlow+tem
zlow=qnorm(vlow)
zupp=qnorm(vupp)
zlow[zlow < -10]<--10
zupp[zupp > 10]<-10
scj1j2<-rect.der.rho(zlow[-3],zupp[-3],rhomat[j1,j2],corstr,j1,j2)
prob21<-bvn(zlow[-3],zupp[-3],rhomat[j1,j2])
scj3j4<-rect.der.rho(zlow[c(3,1)],zupp[c(3,1)],rhomat[j3,j4],
corstr,j3,j4)
prob22<-bvn(zlow[c(3,1)],zupp[c(3,1)],rhomat[j3,j4])
prob3<-mrect.prob(zlow,zupp,rhomat[c(j1,j2,j3),c(j1,j2,j3)],
bivpairs,corstr,mvncmp)
s<-s+scj1j2*scj3j4/prob21/prob22*prob3
}
} else {
if(j2==j3 & j1!=j4)
{ nvect=ub^3
for(ii in 1:nvect)
{ y=d2v(3,ub,ii)+1
vlow<-pmargmodel.ord(y-1,mu[c(j1,j2,j4)],gam,link)
tem<-dmargmodel.ord(y,mu[c(j1,j2,j4)],gam,link)
vupp<-vlow+tem
zlow=qnorm(vlow)
zupp=qnorm(vupp)
zlow[zlow < -10]<--10
zupp[zupp > 10]<-10
scj1j2<-rect.der.rho(zlow[-3],zupp[-3],rhomat[j1,j2],corstr,j1,j2)
prob21<-bvn(zlow[-3],zupp[-3],rhomat[j1,j2])
scj3j4<-rect.der.rho(zlow[-1],zupp[-1],rhomat[j3,j4],corstr,j3,j4)
prob22<-bvn(zlow[-1],zupp[-1],rhomat[j3,j4])
prob3<-mrect.prob(zlow,zupp,rhomat[c(j1,j2,j4),c(j1,j2,j4)],
bivpairs,corstr,mvncmp)
s<-s+scj1j2*scj3j4/prob21/prob22*prob3
}
} else {
if(j2==j4 & j1!=j3)
{ nvect=ub^3
for(ii in 1:nvect)
{ y=d2v(3,ub,ii)+1
vlow<-pmargmodel.ord(y-1,mu[c(j1,j2,j3)],gam,link)
tem<-dmargmodel.ord(y,mu[c(j1,j2,j3)],gam,link)
vupp<-vlow+tem
zlow=qnorm(vlow)
zupp=qnorm(vupp)
zlow[zlow < -10]<--10
zupp[zupp > 10]<-10
scj1j2<-rect.der.rho(zlow[-3],zupp[-3],rhomat[j1,j2],corstr,j1,j2)
prob21<-bvn(zlow[-3],zupp[-3],rhomat[j1,j2])
scj3j4<-rect.der.rho(zlow[c(3,2)],zupp[c(3,2)],rhomat[j3,j4]
,corstr,j3,j4)
prob22<-bvn(zlow[c(3,2)],zupp[c(3,2)],rhomat[j3,j4])
prob3<-mrect.prob(zlow,zupp,rhomat[c(j1,j2,j3),c(j1,j2,j3)],
bivpairs,corstr,mvncmp)
s<-s+scj1j2*scj3j4/prob21/prob22*prob3
}
} else
{ nvect=ub^4
for(ii in 1:nvect)
{ y=d2v(4,ub,ii)+1
vlow<-pmargmodel.ord(y-1,mu[c(j1,j2,j3,j4)],gam,link)
tem<-dmargmodel.ord(y,mu[c(j1,j2,j3,j4)],gam,link)
vupp<-vlow+tem
zlow=qnorm(vlow)
zupp=qnorm(vupp)
zlow[zlow < -10]<--10
zupp[zupp > 10]<-10
scj1j2<-rect.der.rho(zlow[1:2],zupp[1:2],rhomat[j1,j2],corstr,j1,j2)
prob21<-bvn(zlow[1:2],zupp[1:2],rhomat[j1,j2])
scj3j4<-rect.der.rho(zlow[3:4],zupp[3:4],rhomat[j3,j4],corstr,j3,j4)
prob22<-bvn(zlow[3:4],zupp[3:4],rhomat[j3,j4])
prob4<-mrect.prob(zlow,zupp,rhomat[c(j1,j2,j3,j4),c(j1,j2,j3,j4)],
bivpairs,corstr,mvncmp)
s<-s+scj1j2*scj3j4/prob21/prob22*prob4
}
}}}}}
s
}
subselect2<-function(pairs,t)
{
lr<-nrow(pairs)
res=NULL
for(j in 1:lr)
{ if(sum(pairs[j,1]==t)==1 & sum(pairs[j,2]==t)==1) res=c(res,j)
}
res
}
clic<-function(nbcl,r,b,gam,xdat,id,tvec,corstr,WtScMat,link,mvncmp)
{ if(is.matrix(xdat))
{ dim<-dim(xdat)
n<-1:dim[1]
p<-dim[2]
} else {n<-1:length(xdat); p<-1}
uid<-unique(id)
d<-id.size(id)
maxd<-max(d)
q<-length(gam)
tvec<-id.time(tvec,d)
bivpairs<-maxpairs(d)
d2<-nrow(bivpairs)
rmat<-cormat(maxd,r,bivpairs,corstr)
t=p+q+d2
Dmat<-M<-matrix(0,t,t)
m<-0
for(i in uid)
{ m<-m+1
cases<-id==i
irow=n[cases]
ti<-tvec[irow]
if(length(ti)>1)
{
newmui<-rep(NA,maxd)
if(is.matrix(xdat))
{ x<-xdat[cases,]
newx<-matrix(NA,maxd,p)
newx[ti,]<-x} else {
x<-xdat[cases]
newx<-rep(NA,maxd)
newx[ti]<-x
}
mui<-ordreg.mu(x,b)
newmui[ti]<-mui
ub<-q+1
nvect=ub^maxd
nvect2=ub^2
y=vlow=tem=matrix(NA,nvect,maxd)
for(ii in 1:nvect)
{ y[ii,]=d2v(maxd,ub,ii)+1
vlow[ii,]<-pmargmodel.ord(y[ii,]-1,newmui,gam,link)
tem[ii,]<-dmargmodel.ord(y[ii,],newmui,gam,link)
}
vupp<-vlow+tem
zlow=qnorm(vlow)
zupp=qnorm(vupp)
zlow[zlow < -10]<--10
zupp[zupp > 10]<-10
dzlow=dnorm(zlow)
dzupp=dnorm(zupp)
if(d[m]<maxd)
{ # new code
und<-d[m]
und2<-choose(und,2)
unpairs<-maxpairs(und)
unrmat<-rmat[ti,ti]
D22i<-rep(0,und2)
Delta21i<-matrix(NA,und2,und*q)
for(i1 in 1:und2)
{ k1<-unpairs[i1,1]
k2<-unpairs[i1,2]
s<-rep(0,q)
s2=0
for(ii in 1:nvect2)
{ y2=d2v(2,ub,ii)+1
vlow2<-pmargmodel.ord(y2-1,mui[c(k1,k2)],gam,link)
tem2<-dmargmodel.ord(y2,mui[c(k1,k2)],gam,link)
vupp2<-vlow2+tem2
zlow2=qnorm(vlow2)
zupp2=qnorm(vupp2)
zlow2[zlow2 < -10]<--10
zupp2[zupp2 > 10]<-10
dzlow2=dnorm(zlow2)
dzupp2=dnorm(zupp2)
der2<-rect.der.rho(zlow2,zupp2,unrmat[k1,k2],corstr,k1,k2)
prob<-bvn(zlow2,zupp2,unrmat[k1,k2])
s2<-s2+der2*der2/prob
der1<-NULL
for(i2 in 1:und)
{ if(i2==k1)
{ der1<-c(der1,rect.der.gam(zlow2,
zupp2,dzlow2,dzupp2,
y2,mui[c(k1,k2)],gam,unrmat[k1,k2],1,link))
} else {
if(i2==k2)
{ der1<-c(der1,rect.der.gam(zlow2,
zupp2,dzlow2,dzupp2,
y2,mui[c(k1,k2)],gam,unrmat[k1,k2],2,link))
} else {
der1<-c(der1,rep(0,q))}} # new change instead of c(der1,0)
}
s<-s+der1*der2/prob
}
D22i[i1]=s2
Delta21i[i1,]<-s
}
Omega12i<-NULL #Omega12i<-matrix(NA,q*maxd,d2)
for(j1 in 1:und)
{ temp=NULL
for(j2 in 1:und2)
{ temp=c(temp,cov3(j1,unpairs[j2,1],unpairs[j2,2],
mui,gam,unrmat,unpairs,corstr,ub,link,mvncmp))
}
subOmega12i=matrix(temp,ncol=und2)
Omega12i<-rbind(Omega12i,subOmega12i)
}
Omega22i<-matrix(NA,und2,und2)
if(und2==1){ Omega22i=D22i } else {
for(j1 in 1:(und2-1))
{ for(j2 in (j1+1):und2)
{ temp<-cov4(unpairs[j1,1],unpairs[j1,2],unpairs[j2,1],unpairs[j2,2],
mui,gam,unrmat,unpairs,corstr,ub,link,mvncmp)
Omega22i[j1,j2]<-temp
Omega22i[j2,j1]<-temp
}
}}
} else {
D22i<-rep(0,d2)
Delta21i<-matrix(NA,d2,maxd*q)
for(i1 in 1:d2)
{ k1<-bivpairs[i1,1]
k2<-bivpairs[i1,2]
if((sum(k1==ti)==1) & (sum(k2==ti)==1))
{
s<-rep(0,q)
s2=0
for(ii in 1:nvect2)
{ y2=d2v(2,ub,ii)+1
vlow2<-pmargmodel.ord(y2-1,newmui[c(k1,k2)],gam,link)
tem2<-dmargmodel.ord(y2,newmui[c(k1,k2)],gam,link)
vupp2<-vlow2+tem2
zlow2=qnorm(vlow2)
zupp2=qnorm(vupp2)
zlow2[zlow2 < -10]<--10
zupp2[zupp2 > 10]<-10
dzlow2=dnorm(zlow2)
dzupp2=dnorm(zupp2)
der2<-rect.der.rho(zlow2,zupp2,rmat[k1,k2],corstr,k1,k2)
prob<-bvn(zlow2,zupp2,rmat[k1,k2])
s2<-s2+der2*der2/prob
der1<-NULL
for(i2 in 1:maxd)
{ if(i2==k1)
{ der1<-c(der1,rect.der.gam(zlow2,
zupp2,dzlow2,dzupp2,
y2,newmui[c(k1,k2)],gam,rmat[k1,k2],1,link))
} else {
if(i2==k2)
{ der1<-c(der1,rect.der.gam(zlow2,
zupp2,dzlow2,dzupp2,
y2,newmui[c(k1,k2)],gam,rmat[k1,k2],2,link))
} else {
der1<-c(der1,rep(0,q))}} # new change instead of c(der1,0)
}
s<-s+der1*der2/prob
}
D22i[i1]=s2
Delta21i[i1,]<-s
}
}
Omega12i<-NULL #Omega12i<-matrix(NA,q*maxd,d2)
for(j1 in 1:maxd)
{ temp=NULL
for(j2 in 1:d2)
{ temp=c(temp,cov3(j1,bivpairs[j2,1],bivpairs[j2,2],
newmui,gam,rmat,bivpairs,corstr,ub,link,mvncmp))
}
subOmega12i=matrix(temp,ncol=d2)
Omega12i<-rbind(Omega12i,subOmega12i)
}
Omega22i<-matrix(NA,d2,d2)
for(j1 in 1:(d2-1))
{ for(j2 in (j1+1):d2)
{ temp<-cov4(bivpairs[j1,1],bivpairs[j1,2],bivpairs[j2,1],bivpairs[j2,2],
newmui,gam,rmat,bivpairs,corstr,ub,link,mvncmp)
Omega22i[j1,j2]<-temp
Omega22i[j2,j1]<-temp
}
}
}
seli<-subselect.ord(ti,q)
if(d[m]>2) diag(Omega22i)=D22i
#D22i<-diag(diag(Omega22i)) for j1=j2
#D22i=diag(D22i)
Omega11i<-WtScMat$omega[,,m]
Omega11i<-Omega11i[seli,seli]
sel2i=subselect2(bivpairs,ti)
Xi<-WtScMat$X[,,m]
Xi<-Xi[,seli]
tXi<-t(Xi)
D21i<-Delta21i%*%tXi
temp1=matrix(0,d2,p+q)
temp1[sel2i,]=D21i
D21i=temp1
temp2=rep(0,d2)
temp2[sel2i]=D22i
D22i=diag(temp2)
Delta11<-WtScMat$delta[,,m] # from the wtsc code: weightMat()
Delta11<-Delta11[seli,seli]
D11i<-Xi%*%Delta11%*%tXi
d0<-dim(t(D21i))
D12i<-matrix(0,d0[1],d0[2])
Di<-rbind(cbind(D11i,D12i),cbind(D21i,D22i))
M11i<-Xi%*%Omega11i%*%tXi
M12i<-Xi%*%Omega12i
temp1=matrix(0,p+q,d2)
temp1[,sel2i]=M12i
M12i=temp1
M21i<-t(M12i)
M22i<-Omega22i
temp2=matrix(0,d2,d2)
temp2[sel2i,sel2i]=M22i
M22i=temp2
Mi<-rbind(cbind(M11i,M12i),cbind(M21i,M22i))
##########################
Dmat<-Dmat+Di
M<-M+Mi
}}
#inDmat<-solve(Dmat)
#tr<-sum(diag(M%*%inDmat))
tr<-sum(diag(solve(Dmat,M)))
list(AIC=2*(nbcl+tr),BIC=2*nbcl+log(m)*tr)
}
|
/scratch/gouwar.j/cran-all/cranData/weightedScores/R/CLIC-ord.R
|
# approximation to derivative of MVN rectangle probabilities using methods
# in Joe (1995), JASA
# lb=lower limit, ub=upper limit,
# mu = mean vector
# sigma= covariance matrix (this is assumed to be positive definite)
# matrix functions in R/Splus can be used to check positive definiteness
# k=argument of lb/ub for deriv, ksign=1 for upper, -1 for lower
# eps is tolerance for integration in mvn approx
# nsim is used for m>=7 if random permutations are used in the
# approximation method
mvn.deriv.margin<- function(lb, ub, mu, sigma, k, ksign, type=1, eps = 1.e-05, nsim=0)
{
#if(type==1)
#{ if(!is.loaded(symbol.C("mvndu"))) dyn.load("./mvn.so") }
#else
#{ if(!is.loaded(symbol.C("mvndu2"))) dyn.load("./mvn.so") }
m <- length(ub)
if(m>=8 && nsim==0) nsim<-10000
if(m!=length(lb)) stop("lengths of w and x must be the same")
if(k<1 || k>m) stop("k must be between 1 and dim(lb)")
if(abs(ksign)!=1) stop("ksign is -1 or 1 for lower/upper limit")
tem <- sqrt(diag(sigma))
w <- (lb - mu)/tem
x <- (ub - mu)/tem
tem <- diag(1/tem)
corr <- tem %*% sigma %*% tem
corr<-c(corr)
if(eps<1.e-06) eps<- 1.e-06
#print(w)
#print(x)
#print(corr)
ks=k*ksign
#print(ks)
if(type==1)
{ out <- .C("r_mvndu",
as.integer(m), as.double(w), as.double(x), as.double(corr),
as.integer(ks), as.integer(nsim),
as.double(eps), ifail = as.integer(0), deriv=as.double(0))
}
else
{ out <- .C("r_mvndu2",
as.integer(m), as.double(w), as.double(x), as.double(corr),
as.integer(ks), as.integer(nsim),
as.double(eps), ifail = as.integer(0), deriv=as.double(0))
}
# need to modify derivative when sigma is not correlation
#print(out$deriv)
der=out$deriv
der=der/sqrt(sigma[k,k])
out <- list(deriv = der, ifail = out$ifail)
out
}
# lb=lower limit, ub=upper limit
# mu = mean vector
# sigma= covariance matrix (this is assumed to be positive definite)
# matrix functions in R/Splus can be used to check positive definiteness
# (j1, k1) index of correlation matrix for derivative
# this is not derivative wrt a covariance!!
# eps is tolerance for integration in mvn approx
# nsim is used for m>=7 if random permutations are used in the
# approximation method
mvn.deriv.rho<- function(lb, ub, mu, sigma, j1, k1, type=1, eps = 1.e-05, nsim=0)
{
#if(type==1)
#{ if(!is.loaded(symbol.C("mvndrh"))) dyn.load("./mvn.so") }
#else
#{ if(!is.loaded(symbol.C("mvndrh2"))) dyn.load("./mvn.so") }
m <- length(ub)
if(m>=8 && nsim==0) nsim<-10000
if(m!=length(lb)) stop("lengths of w and x must be the same")
if(j1<1 || j1>m) stop("k1 must be between 1 and dim(lb)")
if(k1<1 || k1>m) stop("k1 must be between 1 and dim(lb)")
if(j1==k1) stop("j1 and k1 should be different indices")
tem <- sqrt(diag(sigma))
w <- (lb - mu)/tem
x <- (ub - mu)/tem
tem <- diag(1/tem)
corr <- tem %*% sigma %*% tem
corr<-c(corr)
if(eps<1.e-06) eps<- 1.e-06
#print(w)
#print(x)
#print(corr)
if(type==1)
{ out <- .C("r_mvndrh",
as.integer(m), as.double(w), as.double(x), as.double(corr),
as.integer(j1), as.integer(k1), as.integer(nsim),
as.double(eps), ifail = as.integer(0), deriv=as.double(0))
}
else
{ out <- .C("r_mvndrh2",
as.integer(m), as.double(w), as.double(x), as.double(corr),
as.integer(j1), as.integer(k1), as.integer(nsim),
as.double(eps), ifail = as.integer(0), deriv=as.double(0))
}
# need to modify derivative when sigma is not correlation
out <- list(deriv = out$deriv, ifail = out$ifail)
out
}
|
/scratch/gouwar.j/cran-all/cranData/weightedScores/R/mvn.deriv.R
|
# approximation to MVN rectangle probabilities using methods
# in Joe (1995), JASA
mvnapp<- function(lb, ub, mu, sigma, type=1, eps = 1.e-05, nsim=0)
{
#if(!is.loaded(symbol.C("mvnapp"))) dyn.load("./mvn.so")
m <- length(ub)
if(m>=8 && nsim==0) nsim<-10000
if(m!=length(lb)) stop("lengths of w and x must be the same")
tem <- sqrt(diag(sigma))
w <- (lb - mu)/tem
x <- (ub - mu)/tem
tem <- diag(1/tem)
corr <- tem %*% sigma %*% tem
corr<-c(corr)
if(eps<1.e-06) eps<- 1.e-06
# print(w)
# print(x)
# print(corr)
# print(inf)
perr <- 0.
# nsim<-0
ifail<-0
out <- .C("mvnapp",
as.integer(type), as.integer(m), as.double(w),
as.double(x), as.double(corr), as.integer(nsim),
as.double(eps), prob = as.double(perr), perr = as.double(perr),
ifail = as.integer(ifail))
out <- list(pr = out$prob, esterr=out$perr, ifail = out$ifail)
out
}
|
/scratch/gouwar.j/cran-all/cranData/weightedScores/R/mvnapp.R
|
# ML for ordinal probit model, using modified Newton-Raphson
iee.ord<- function(x,y,link,iprint=0,maxiter=20,toler=1.e-6)
{
if(!is.vector(x))
{ if(nrow(x)!=length(y)) stop("x, y not same length") }
else if(length(x)!=length(y)) { stop("x, y not same length") }
if(is.vector(x)) x=as.matrix(x)
n=length(y)
# assume y in 1,...,norc
norc=length(unique(y))
npred=ncol(x)
np=norc-1+npred
# centering of x so that can use start of 0 for beta
xmn=apply(x,2,"mean")
xc=scale(x,center=xmn,scale=F)
# starting point for NR
cum=(1:(norc-1))
cutp=rep(0,norc-1)
for(k in cum)
{ pr=sum(y<=k)
if (pr==0) { pr=1 }
cutp[k]=qlogis(pr/n)
}
b=rep(0,npred)
if(link=="probit") { dlatent=dnorm; platent=pnorm; der.dlatent=der.dnorm } else {
dlatent=dlogis; platent=plogis; der.dlatent=der.dlogis }
# loop
mxdif=1
iter=0
while(iter<maxiter & mxdif>toler)
{ tem=xc%*%b
cutb=c(-10,cutp,10)
ub=tem+cutb[y+1]
lb=tem+cutb[y]
ucdf=platent(ub)
lcdf=platent(lb)
updf=dlatent(ub)
lpdf=dlatent(lb)
# score vector
dbeta=rep(0,npred)
dcut=rep(0,norc+1)
# Hessian matrix
d2beta=matrix(0,npred,npred)
d2bcut=matrix(0,npred,norc+1)
d2cut=matrix(0,norc+1,norc+1)
for(i in 1:n)
{ uderi=der.dlatent(ub[i])
lderi=der.dlatent(lb[i])
xx=xc[i,]
pri=ucdf[i]-lcdf[i]
prderi=updf[i]-lpdf[i]
dbeta=dbeta+xx*prderi/pri
k=y[i]
dcut[k+1]=dcut[k+1]+updf[i]/pri
dcut[k]=dcut[k]-lpdf[i]/pri
pr2=pri^2
d2beta=d2beta+ outer(xx,xx)*((uderi-lderi)*pri-prderi^2)/pr2
d2bcut[,k+1]=d2bcut[,k+1]+xx*(uderi*pri-updf[i]*prderi)/pr2
d2bcut[,k]=d2bcut[,k]+xx*(-lderi*pri+lpdf[i]*prderi)/pr2
d2cut[k+1,k+1]=d2cut[k+1,k+1]+ (uderi*pri-updf[i]^2)/pr2
d2cut[k,k]=d2cut[k,k]+ (-lderi*pri-lpdf[i]^2)/pr2
tem2=updf[i]*lpdf[i]/pr2
d2cut[k,k+1]=d2cut[k,k+1]+tem2
d2cut[k+1,k]=d2cut[k+1,k]+tem2
}
sc=c(dcut[2:norc],dbeta)
if(npred==1) d2bcut=matrix(c(d2bcut[,2:norc]),npred,norc-1)
else d2bcut=d2bcut[,2:norc]
d2cut=d2cut[2:norc,2:norc]
h=cbind(d2cut,t(d2bcut))
h=rbind(h,cbind(d2bcut,d2beta))
dif=solve(h,sc)
mxdif=max(abs(dif))
cutp=cutp-dif[1:(norc-1)]
b=b-dif[norc:np]
# modification for cutp out of order
chk=cutp[-1]-cutp[1:(norc-2)]
ibad=sum(chk<=0)
while(ibad>0)
{ dif=dif/2
mxdif=mxdif/2
cutp=cutp+dif[1:(norc-1)]
b=b+dif[norc:np]
chk=cutp[-1]-cutp[1:(norc-2)]
ibad=sum(chk<=0)
}
iter=iter+1
if(iprint==1)
{ cat("iter=",iter,", (with centered x's) cutp=", cutp, ", b=",b,"\n")
cat(" scorevec=", sc,"\n\n")
}
}
if(iter>=maxiter) cat("*** did not converge, check with iprint=1\n")
# cutpoints with original x
for(j in 1:npred)
{ cutp=cutp-b[j]*xmn[j] }
if(iprint==1) cat("(with original x's) cutp=", cutp,"\n")
# Hessian with original x's, repeat of previous code with x instead of xc
tem=x%*%b
cutb=c(-10,cutp,10)
ub=tem+cutb[y+1]
lb=tem+cutb[y]
ucdf=platent(ub)
lcdf=platent(lb)
updf=dlatent(ub)
lpdf=dlatent(lb)
nllk=0
dbeta=rep(0,npred)
dcut=rep(0,norc+1)
d2beta=matrix(0,npred,npred)
d2bcut=matrix(0,npred,norc+1)
d2cut=matrix(0,norc+1,norc+1)
for(i in 1:n)
{ uderi=-updf[i]*ub[i]
lderi=-lpdf[i]*lb[i]
xx=x[i,]
pri=ucdf[i]-lcdf[i]
nllk=nllk-log(pri)
prderi=updf[i]-lpdf[i]
dbeta=dbeta+xx*prderi/pri
k=y[i]
dcut[k+1]=dcut[k+1]+updf[i]/pri
dcut[k]=dcut[k]-lpdf[i]/pri
pr2=pri^2
d2beta=d2beta+ outer(xx,xx)*((uderi-lderi)*pri-prderi^2)/pr2
d2bcut[,k+1]=d2bcut[,k+1]+xx*(uderi*pri-updf[i]*prderi)/pr2
d2bcut[,k]=d2bcut[,k]+xx*(-lderi*pri+lpdf[i]*prderi)/pr2
d2cut[k+1,k+1]=d2cut[k+1,k+1]+ (uderi*pri-updf[i]^2)/pr2
d2cut[k,k]=d2cut[k,k]+ (-lderi*pri-lpdf[i]^2)/pr2
tem2=updf[i]*lpdf[i]/pr2
d2cut[k,k+1]=d2cut[k,k+1]+tem2
d2cut[k+1,k]=d2cut[k+1,k]+tem2
}
sc=c(dcut[2:norc],dbeta)
# print(sc)
if(npred==1) d2bcut=matrix(c(d2bcut[,2:norc]),npred,norc-1)
if(npred>1) d2bcut=d2bcut[,2:norc]
d2cut=d2cut[2:norc,2:norc]
h=cbind(d2cut,t(d2bcut))
h=rbind(h,cbind(d2bcut,d2beta))
h=-h
#print(h)
covm=solve(h)
#print(covm)
if(iprint==1)
{ cat("nllk= ", nllk,"\n")
cat("cutpts= ", cutp,"\n")
cat("beta= ", b,"\n")
cat("SEs : ",sqrt(diag(covm)),"\n\n")
}
list(negloglik=nllk, gam=cutp, reg=b, cov=covm)
}
|
/scratch/gouwar.j/cran-all/cranData/weightedScores/R/ord.reg.univar.R
|
# bivariate standard normal log-likelihood for one observation
# input:
# low the vector of lower limits of length n.
# upp the vector of upper limits of length n.
# r the correlation parameter
# output:
# bivariate standard normal log-likelihood for one observation
bivlik<-function(low,upp,r)
{ rmat<-matrix(c(1,r,r,1),2,2)
prob<-pmvnorm(lower=low,upper=upp,mean=rep(0,2),corr=rmat)[1]
log(prob)
}
# the mean values of the univariate marginal distribution
# corresonding to the used link function
# input:
# x the matix of the covariates
# b the vector with the regression coefficients
# link has three options: 1. "log", 2. "logit". 3. "probit"
# output:
# the mean values of the univariate marginal distribution
linked.mu<-function(x,b,link)
{ if(link=="log")
{ mu<-exp(x %*% b)
}
else
{ if(link=="logit")
{ expnu<-exp(x %*% b)
mu<-expnu/(1+expnu)
}
else
{ # link=probit
mu<-pnorm(x %*% b) }
}
mu
}
# Density of the univariate marginal distribution
# input:
# y the vector of (non-negative integer) quantiles.
# mu the mean parameter of the univariate distribution.
# gam the parameter gamma of the negative binomial distribution.
# invgam the inverse of parameter gamma of negative binomial distribution.
# margmodel indicates the marginal model. Choices are ?poisson? for Poisson, ?bernoulli? for
# Bernoulli, and ?nb1? , ?nb2? for the NB1 and NB2 parametrization of negative
# binomial in Cameron and Trivedi (1998).
# output:
# the density of the univariate marginal distribution
dmargmodel<-function(y,mu,gam,invgam,margmodel)
{ if(margmodel=="poisson")
{ dpois(y,mu)
}
else
{ if(margmodel=="bernoulli")
{ dbinom(y,size=1,prob=mu) }
else
{ if(margmodel=="nb1")
{ dnbinom(y,prob=1/(1+gam),size=mu*invgam) }
else
{ # margmodel=="nb2"
dnbinom(y,size=invgam,mu=mu) }}}
}
# CDF of the univariate marginal distribution
# input:
# y the vector of (non-negative integer) quantiles.
# mu the mean parameter of the univariate distribution.
# gam the parameter gamma of the negative binomial distribution.
# invgam the inverse of parameter gamma of negative binomial distribution.
# margmodel indicates the marginal model. Choices are ?poisson? for Poisson, ?bernoulli? for
# Bernoulli, and ?nb1? , ?nb2? for the NB1 and NB2 parametrization of negative
# binomial in Cameron and Trivedi (1998).
# output:
# the cdf of the univariate marginal distribution
pmargmodel<-function(y,mu,gam,invgam,margmodel)
{ if(margmodel=="poisson")
{ ppois(y,mu)
}
else
{ if(margmodel=="bernoulli")
{ pbinom(y,size=1,prob=mu) }
else
{ if(margmodel=="nb1")
{ pnbinom(y,prob=1/(1+gam),size=mu*invgam) }
else
{ # margmodel=="nb2"
pnbinom(y,size=invgam,mu=mu) }}}
}
# quantile of the univariate marginal distribution
# input:
# y the vector of probabilities
# mu the mean parameter of the univariate distribution.
# gam the parameter gamma of the negative binomial distribution.
# invgam the inverse of parameter gamma of negative binomial distribution.
# margmodel indicates the marginal model. Choices are ?poisson? for Poisson, ?bernoulli? for
# Bernoulli, and ?nb1? , ?nb2? for the NB1 and NB2 parametrization of negative
# binomial in Cameron and Trivedi (1998).
# output:
# the quantile of the univariate marginal distribution
qmargmodel<-function(y,mu,gam,invgam,margmodel)
{ if(margmodel=="poisson")
{ qpois(y,mu)
}
else
{ if(margmodel=="bernoulli")
{ qbinom(y,size=1,prob=mu) }
else
{ if(margmodel=="nb1")
{ qnbinom(y,prob=1/(1+gam),size=mu*invgam) }
else
{ # margmodel=="nb2"
qnbinom(y,size=invgam,mu=mu) }}}
}
# negative univariate logikelihood assuming independence within clusters
# input:
# param the vector of regression and not regression parameters
# xdat the matrix of covariates (use the constant 1 for the first covariate)
# ydat the vector with the response
# margmodel indicates the marginal model. Choices are ?poisson? for Poisson,
# ?bernoulli? for Bernoulli, and ?nb1? , ?nb2? for the NB1 and NB2
# parametrization of negative binomial in Cameron and Trivedi (1998)
# link is the link function. Choices are ?log? for the log link function,
# ?logit? for the logit link function, and ?probit? for the probit link function.
# output:
# negative univariate logikelihood assuming independence within clusters
marglik<-function(param,xdat,ydat,margmodel,link)
{ p<-dim(xdat)[2]
b<-param[1:p]
if(margmodel=="nb1" | margmodel=="nb2")
{ gam<-param[p+1]
invgam<-1/gam
}
#else gam<-invgam<-0
mu<-linked.mu(as.matrix(xdat),b,link)
-sum(log(dmargmodel(ydat,mu,gam,invgam,margmodel)))
}
# Independent estimating equations for binary, Poisson or
# negative binomial regression.
# xdat the matrix of covariates (use the constant 1 for the first covariate)
# ydat the vector with the response
# margmodel indicates the marginal model. Choices are ?poisson? for Poisson,
# ?bernoulli? for Bernoulli, and ?nb1? , ?nb2? for the NB1 and NB2
# parametrization of negative binomial in Cameron and Trivedi (1998)
# link is the link function. Choices are ?log? for the log link function,
# ?logit? for the logit link function, and ?probit? for the probit link function.
# output: A list containing the following components:
# coef the vector with the ML estimated regression parameters
# gam the ML estimate of gamma parameter
iee<-function(xdat,ydat,margmodel,link="log")
{ #if(margmodel=="bernoulli") family=binomial else family=poisson
if(margmodel=="nb1" | margmodel=="nb2" | margmodel=="poisson") link="log"
if(margmodel=="bernoulli" & link!="probit") link="logit"
if(margmodel=="poisson")
{ uni<-glm(ydat ~ xdat[,-1],family =poisson(link="log"))
res<-as.vector(uni$coef)
list(reg=res)
} else {
if(margmodel=="bernoulli")
{ if(link=="probit")
{ uni<-glm(ydat ~ xdat[,-1],family =binomial(link="probit"))
} else {
uni<-glm(ydat ~ xdat[,-1],family =binomial(link="logit")) }
res<-as.vector(uni$coef)
list(reg=res)
} else
{ p<-dim(xdat)[2]
uni<-nlm(marglik,c(rep(0,p),1),margmodel=margmodel,
link=link,xdat=xdat,ydat=ydat,iterlim=1000)
res1<-uni$e[1:p]
res2<-uni$e[p+1]
list(reg=res1,gam=res2) }}
}
# corralation matrix
# input:
# d the dimension
# r a vector with correaltion parameters
# corstr indicates the latent correlation structure of normal copula.
# Choices are ?exch?, ?ar?, and ?unstr? for exchangeable, ar(1) and
# unstrucutred correlation structure, respectively.
# output:
# the correlation matrix
cormat<-function(d,r,pairs,corstr)
{ rmat<-matrix(1,d,d)
lr<-nrow(pairs)
if(length(r)==1) r<-rep(r,d^2)
if(corstr=="exch" | corstr=="unstr")
{
for(j in 1:lr)
{ rmat[pairs[j,1],pairs[j,2]]<-r[j]
rmat[pairs[j,2],pairs[j,1]]<-r[j]
}
} else {
for(j in 1:lr)
{ temp<-pairs[j,2]-pairs[j,1]
rmat[pairs[j,1],pairs[j,2]]<-r[j]^temp
rmat[pairs[j,2],pairs[j,1]]<-r[j]^temp
}
}
rmat
}
# Bivariate composite likelihood for multivariate normal copula with Poisson,
# binary, or negative binomial regression.
# input:
# r the vector of normal copula parameters
# b the regression coefficients
# gam the gamma parameter
# xdat the matrix of covariates (use the constant 1 for the first covariate)
# ydat the vector with the response
# id the the vector with the id
# tvec the time related vector
# margmodel indicates the marginal model. Choices are ?poisson? for Poisson,
# ?bernoulli? for Bernoulli, and ?nb1? , ?nb2? for the NB1 and NB2
# parametrization of negative binomial in Cameron and Trivedi (1998)
# link is the link function. Choices are ?log? for the log link function,
# ?logit? for the logit link function, and ?probit? for the probit link function.
# corstr indicates the latent correlation structure of normal copula.
# Choices are ?exch?, ?ar?, and ?unstr? for exchangeable, ar(1) and
# unstrucutred correlation structure, respectively.
# output:
# negative bivariate composite likelihood for multivariate normal copula
# with Poisson, binary, or negative binomial regression.
bcl<-function(r,b,gam,xdat,ydat,id,tvec,margmodel,corstr,link="log")
{ s<-0
if(margmodel=="nb1" | margmodel=="nb2" | margmodel=="poisson") link="log"
if(margmodel=="bernoulli" & link!="probit") link="logit"
if(margmodel=="nb1" | margmodel=="nb2") invgam<-1/gam else invgam<-NULL
uid<-unique(id)
d<-id.size(id)
maxd<-max(d)
tvec<-id.time(tvec,d)
n<-1:dim(xdat)[1]
pairs<-maxpairs(d)
rmat<-cormat(maxd,r,pairs,corstr)
c1<-(r[1] > 1 | r[1] < (-1/(maxd-1)))
c2<-(r[1] > 1 | r[1] < -1)
c3<-(det(rmat)<0 | min(rmat)< -1 | max(rmat)>1)
if((corstr=="exch" & c1) | (corstr=="ar" & c2 ) | (corstr=="unstr" & c3)) {s<-1e10}
else {
for(i in uid)
{ cases<-id==i
irow=n[cases]
yi<-ydat[irow]
ti<-tvec[irow]
newyi<-rep(NA,maxd)
newmui<-rep(NA,maxd)
newyi[ti]<-yi
x<-xdat[cases,]
mui<-linked.mu(x,b,link)
newmui[ti]<-mui
vlow<-pmargmodel(newyi-1,newmui,gam,invgam,margmodel)
tem<-dmargmodel(newyi,newmui,gam,invgam,margmodel)
vupp<-vlow+tem
zlow=qnorm(vlow)
zupp=qnorm(vupp)
for(j in 1:dim(pairs)[1])
{ k1<-pairs[j,][1]
k2<-pairs[j,][2]
if((sum(k1==ti)==1) & (sum(k2==ti)==1))
{ if(corstr=="exch")
{ s<-s +bivlik(zlow[c(k1,k2)],zupp[c(k1,k2)],r) }
else
{ if(corstr=="ar")
{ s<-s +bivlik(zlow[c(k1,k2)],zupp[c(k1,k2)],r^(k2-k1)) }
else
{ # corstr=="unstr"
s<-s +bivlik(zlow[c(k1,k2)],zupp[c(k1,k2)],r[j])
}}}
else { s<-s }
}
}
-s
}
}
# optimization routine for composite likelihood for MVN copula
# b the regression coefficients
# gam the gamma parameter
# xdat the matrix of covariates (use the constant 1 for the first covariate)
# ydat the vector with the response
# id the the vector with the id
# tvec the time related vector
# margmodel indicates the marginal model. Choices are ?poisson? for Poisson,
# ?bernoulli? for Bernoulli, and ?nb1? , ?nb2? for the NB1 and NB2
# parametrization of negative binomial in Cameron and Trivedi (1998)
# link is the link function. Choices are ?log? for the log link function,
# ?logit? for the logit link function, and ?probit? for the probit link function.
# corstr indicates the latent correlation structure of normal copula.
# Choices are ?exch?, ?ar?, and ?unstr? for exchangeable, ar(1) and
# unstrucutred correlation structure, respectively.
# output: A list containing the following components:
# minimum the value of the estimated minimum of CL1 for MVN copula
# estimate the CL1 estimates
# gradient the gradient at the estimated minimum of CL1
# code an integer indicating why the optimization process terminated, see nlm.
cl1<-function(b,gam,xdat,ydat,id,tvec,margmodel,corstr,link="log")
{ if(margmodel=="nb1" | margmodel=="nb2" | margmodel=="poisson") link="log"
if(margmodel=="bernoulli" & link!="probit") link="logit"
d<-id.size(id)
pairs<-maxpairs(d)
if(corstr=="unstr")
{ nom<-dim(pairs)[1]
nlm(bcl,rep(0.1,nom),b,gam,xdat,ydat,id,tvec,margmodel,corstr,link)
}
else
{nlm(bcl,0.1,b,gam,xdat,ydat,id,tvec,margmodel,corstr,link)}
}
# the dimension of each id
# input: the vector with the id
# id the vector with the id
# output:
# a vector with the dimension of each id
id.size<-function(id)
{ d<-NULL
uid<-unique(id)
for(i in uid)
{ di<-sum(id==i)
d<-c(d,di) }
d
}
# the transformed time points, i.e, 1,2,3... for each subject
# input:
# tvec: untrasformed time points
# d the dimension for all subjects
# output:
# the transformed time points, i.e, 1,2,3... for each subject
id.time<-function(tvec,d)
{ maxd<-max(d)
ut<-sort(unique(tvec))
newtvec<-rep(NA,length(tvec))
for(j in 1:maxd)
{ newtvec[tvec==ut[j]]<-j }
newtvec
}
# the maximum number of bivariate pairs
# input:
# d the dimension for all subjects
# output:
# the maximum number of bivariate pairs
maxpairs<-function(d)
{ pairs<-NULL
maxd<-max(d)
for(id1 in 1:(maxd-1))
{ for(id2 in (id1+1):maxd)
{ pairs<-rbind(pairs,c(id1,id2)) } }
pairs
}
# derivative of the marginal loglikelihood with respect to nu
# input:
# mu the mean parameter
# gam the gamma parameter
# invgam the inverse of gamma parameter
# ub the truncation value
# margmodel indicates the marginal model. Choices are ?poisson? for Poisson,
# ?bernoulli? for Bernoulli, and ?nb1? , ?nb2? for the NB1 and NB2
# parametrization of negative binomial in Cameron and Trivedi (1998)
# link is the link function. Choices are ?log? for the log link function,
# ?logit? for the logit link function, and ?probit? for the probit link function.
# output:
# the vector with the derivatives of the margmodel loglikelihood with respect to nu
derlik.nu<-function(mu,gam,invgam,ub,margmodel,link)
{ if(link=="probit")
{ if(mu==1 & is.finite(mu)){mu<-0.9999}
nu<-qnorm(mu)
(0:1-mu)/mu/(1-mu)*dnorm(nu)
}
else
{ if(margmodel=="nb1")
{ j<-0:(ub-1)
s<-c(0,cumsum(1/(mu+gam*j)))
(s-invgam*log(1+gam))*mu
}
else
{ if(margmodel=="nb2")
{ pr<-1/(mu*gam+1)
(0:ub-mu)*pr
}
else { 0:ub-mu }}}
}
# derivative of the marginal loglikelihood with respect to nu
# input:
# mu the mean parameter
# gam the gamma parameter
# invgam the inverse of gamma parameter
# y the value of a non-negative integer quantile
# margmodel indicates the marginal model. Choices are ?poisson? for Poisson,
# ?bernoulli? for Bernoulli, and ?nb1? , ?nb2? for the NB1 and NB2
# parametrization of negative binomial in Cameron and Trivedi (1998)
# link is the link function. Choices are ?log? for the log link function,
# ?logit? for the logit link function, and ?probit? for the probit link function.
# output:
# the derivative of the margmodel loglikelihood with respect to nu
iderlik.nu<-function(mu,gam,invgam,y,margmodel,link)
{ if(link=="probit")
{ if(mu==1 & is.finite(mu)){mu<-0.9999}
nu<-qnorm(mu)
(y-mu)/mu/(1-mu)*dnorm(nu)
}
else
{ if(margmodel=="nb1")
{ s<-0
if(y>0)
{ j<-0:(y-1)
s<-sum(1/(mu+gam*j))
}
(s-invgam*log(1+gam))*mu
}
else
{ if(margmodel=="nb2")
{ pr<-1/(mu*gam+1)
(y-mu)*pr
}
else { y-mu }}}
}
# derivative of the NB loglikelihood with respect to gamma
# input:
# gam the gamma parameter
# invgam the inverse of gamma parameter
# mu the mean parameter
# ub the truncation value
# margmodel indicates the marginal model. Choices are ?nb1? , ?nb2? for
# the NB1 and NB2 parametrization of negative binomial in
# Cameron and Trivedi (1998)
# output:
# the vector with the derivatives of the NB loglikelihood with respect to gamma
derlik.gam<-function(mu,gam,invgam,ub,margmodel)
{ j<-0:(ub-1)
if(margmodel=="nb1")
{ s<-c(0,cumsum(j/(mu+gam*j)))
s+invgam*invgam*mu*log(1+gam)-(0:ub+invgam*mu)/(1+gam)
}
else
{ #if(margmodel=="nb2")
pr<-1/(mu*gam+1)
s<-c(0,cumsum(j/(1+j*gam)))
s-log(pr)/(gam*gam)-(0:ub+invgam)*mu*pr
}
}
# derivative of the NB loglikelihood with respect to gamma
# input:
# gam the gamma parameter
# invgam the inverse of gamma parameter
# mu the mean parameter
# y the value of a non-negative integer quantile
# margmodel indicates the marginal model. Choices are ?nb1? , ?nb2? for
# the NB1 and NB2 parametrization of negative binomial in
# Cameron and Trivedi (1998)
# output:
# the derivative of the NB loglikelihood with respect to gamma
iderlik.gam<-function(mu,gam,invgam,y,margmodel)
{ s<-0
if(margmodel=="nb1")
{ if(y>0)
{ j<-0:(y-1)
s<-sum(j/(mu+gam*j))
}
s+invgam*invgam*mu*log(1+gam)-(y+invgam*mu)/(1+gam)
}
else
{ #if(margmodel=="nb2")
if(y>0)
{ j<-0:(y-1)
s<-sum(j/(1+gam*j))
}
pr<-1/(mu*gam+1)
s-log(pr)/(gam*gam)-(y+invgam)*mu*pr
}
}
# minus expectation of the second derivative of the marginal loglikelihood
# with resect to nu
# input:
# mu the mean parameter
# gam the gamma parameter
# invgam the inverse of gamma parameter
# u the univariate cdfs
# ub the truncation value
# margmodel indicates the marginal model. Choices are ?nb1? , ?nb2? for
# the NB1 and NB2 parametrization of negative binomial in
# Cameron and Trivedi (1998)
# output:
# the vector with the minus expectations of the margmodel loglikelihood
# with respect to nu
fisher.nu<-function(mu,gam,invgam,u,ub,margmodel,link)
{ if(link=="log" & margmodel=="poisson")
{ mu }
else
{ if(link=="logit")
{ mu*(1-mu) }
else
{ if(margmodel=="nb1")
{ j<-0:ub
s1<-sum(1/(mu+j*gam)/(mu+j*gam)*(1-u))
s2<-sum(1/(mu+j*gam)*(1-u))
(mu*s1-s2+invgam*log(1+gam))*mu
}
else
{ if(margmodel=="nb2")
{ pr<-1/(mu*gam+1)
mu*pr
}
else
{ # link=="probit"
if(mu==1 & is.finite(mu)){mu<-0.9999}
nu<-qnorm(mu)
1/mu/(1-mu)*dnorm(nu)^2}
}}}
}
# minus expectation of the second derivative of the marginal NB loglikelihood
# with resect to gamma
# input:
# mu the mean parameter
# gam the gamma parameter
# invgam the inverse of gamma parameter
# u the univariate cdfs
# ub the truncation value
# margmodel indicates the marginal model. Choices are ?nb1? , ?nb2? for
# the NB1 and NB2 parametrization of negative binomial in
# Cameron and Trivedi (1998)
# output:
# the vector with the minus expectations of the margmodel loglikelihood
# with respect to gamma
fisher.gam<-function(mu,gam,invgam,u,ub,margmodel)
{ j<-0:ub
if(margmodel=="nb1")
{ pr<-1/(gam+1)
s<-sum((j/(mu+j*gam))^2*(1-u))
s+2*invgam*invgam*invgam*log(1+gam)*mu-2*invgam*invgam*mu*pr-mu/gam*pr
}
else
{ #if(margmodel=="nb2")
s<-sum((invgam+j)^(-2)*(1-u))
invgam^4*(s-gam*mu/(mu+invgam))
}
}
# minus expectation of the second derivative of the marginal loglikelihood
# with resect to nu and gamma
# input:
# mu the mean parameter
# gam the gamma parameter
# invgam the inverse of gamma parameter
# u the univariate cdfs
# ub the truncation value
# margmodel indicates the marginal model. Choices are ?nb1? , ?nb2? for
# the NB1 and NB2 parametrization of negative binomial in
# Cameron and Trivedi (1998)
# output:
# the vector with the minus expectations of the NB loglikelihood
# with respect to nu and gamma
fisher.nu.gam<-function(mu,gam,invgam,u,ub,margmodel)
{ if(margmodel=="nb1")
{ pr<-1/(gam+1)
j<-0:ub
s<-sum(j/(mu+j*gam)/(mu+j*gam)*(1-u))
(s-invgam*invgam*log(1+gam)+invgam*pr)*mu
}
else {0}
}
# Calculating the truncation value for the univariate distribution
# input:
# mu the mean parameter of the univariate distribution.
# gam the parameter gamma of the negative binomial distribution.
# margmodel indicates the marginal model. Choices are ?poisson? for Poisson, ?bernoulli? for
# Bernoulli, and ?nb1? , ?nb2? for the NB1 and NB2 parametrization of negative
# binomial in Cameron and Trivedi (1998).
# output:
# the truncation value--upper bound
truncation<-function(mu,gam,margmodel)
{ if(margmodel=="poisson")
{ ub<-round(max(10,mu+7*sqrt(mu),na.rm=T))
}
else
{ if(margmodel=="bernoulli") ub<-1
else
{ if(margmodel=="nb1")
{ pr<-1/(gam+1)
v<-mu/pr
ub<-round(max(10,mu+10*sqrt(v),na.rm=T))
}
else
{ pr<-1/(mu*gam+1)
v<-mu/pr
ub<-round(max(10,mu+7*sqrt(v),na.rm=T))
}}}
ub
}
# approximation of bivariate normal cdf (Johnson&Kotz, 1972)
# For rho<=0.4 the series truncated at rho^3.
# For larger rho truncation at rho^5.
# input:
# r the normal copula parameter
approxbvncdf<-function(r,x1,x2,x1s,x2s,x1c,x2c,x1f,x2f,t1,t2)
{ r2=r*r; r3=r*r2; r4=r2*r2; r5=r4*r
tem3=r+r2*outer(x1,x2)/2+r3*outer(x1s-1,x2s-1)/6
if(r>0.4)
{ tem4=tem3+r4*outer(x1c-3*x1, x2c-3*x2)/24
tem5=tem4+r5*outer(x1f-6*x1s+3, x2f-6*x2s+3)/120
pr.a5=t1+t2*tem5
pr.a5
} else { pr.a3=t1+t2*tem3; pr.a3}
}
# covariance matrix of the scores Omega_i
# input:
# scnu the matrix of the score functions with respect to nu
# scgam the matrix of the score functions with respect to gam
# index the bivariate pair
# pmf the matrix of rectangle probabilities
# margmodel indicates the marginal model. Choices are ?poisson? for Poisson, ?bernoulli? for
# Bernoulli, and ?nb1? , ?nb2? for the NB1 and NB2 parametrization of negative
# binomial in Cameron and Trivedi (1998).
scoreCov<-function(scnu,scgam,pmf,index,margmodel)
{ j1<-index[1]
j2<-index[2]
cov11<-t(scnu[,j1])%*%pmf%*%scnu[,j2]
if(margmodel=="bernoulli" | margmodel=="poisson")
{ cov11 }
else
{ cov12<-t(scnu[,j1])%*%pmf%*%scgam[,j2]
cov21<-t(scgam[,j1])%*%pmf%*%scnu[,j2]
cov22<-t(scgam[,j1])%*%pmf%*%scgam[,j2]
matrix(c(cov11,cov12,cov21,cov22),2,2)
}
}
# select the present column and lines in Omega, Delta and X matrices
# for unbalanced data
# input:
# tvec a vector of the time for an individual
# margmodel indicates the marginal model. Choices are ?poisson? for Poisson, ?bernoulli? for
# Bernoulli, and ?nb1? , ?nb2? for the NB1 and NB2 parametrization of negative
# binomial in Cameron and Trivedi (1998).
subselect<-function(tvec,margmodel)
{ if(margmodel=="bernoulli" | margmodel=="poisson") sel<-tvec
else
{ sel<-NULL
for(i in 1:length(tvec))
{ tm<-2*tvec[i]
k<-c(tm-1,tm)
sel<-c(sel,k)
}}
sel
}
# weight matrix fixed at values from the CL1 estimator
# input:
# b the regression coefficients
# gam the gamma parameter
# xdat the matrix of covariates (use the constant 1 for the first covariate)
# ydat the vector with the response
# id the the vector with the id
# tvec the time related vector
# rh the vector with CL1 estimates
# margmodel indicates the marginal model. Choices are ?poisson? for Poisson,
# ?bernoulli? for Bernoulli, and ?nb1? , ?nb2? for the NB1 and NB2
# parametrization of negative binomial in Cameron and Trivedi (1998)
# link is the link function. Choices are ?log? for the log link function,
# ?logit? for the logit link function, and ?probit? for the probit link function.
# corstr indicates the latent correlation structure of normal copula.
# Choices are ?exch?, ?ar?, and ?unstr? for exchangeable, ar(1) and
# unstrucutred correlation structure, respectively.
# output: A list containing the following components:
# omega the array with the Omega matrices
# delta the array with the Delta matrices
# X the array with the X matrices
weightMat<-function(b,gam,rh,xdat,ydat,id,tvec,margmodel,corstr,link="log")
{ if(margmodel=="nb1" | margmodel=="nb2" | margmodel=="poisson") link="log"
if(margmodel=="bernoulli" & link!="probit") link="logit"
if(margmodel=="nb1" | margmodel=="nb2") invgam<-1/gam else invgam<-NULL
uid<-unique(id)
d<-id.size(id)
maxd<-max(d)
q<-length(gam)
lid<-length(uid)
dim<-dim(xdat)
n<-1:dim[1]
p<-dim[2]
pairs<-maxpairs(d)
tvec<-id.time(tvec,d)
omega<-array(NA,c(maxd*(1+q),maxd*(1+q),lid))
X<-array(NA,c(p+q,maxd*(1+q),lid))
delta<-array(NA,c(maxd*(1+q),maxd*(1+q),lid))
dom<-maxd*(1+q)
pos<-seq(1,dom-1,by=2) #not used for binary
m<-0
for(i in uid)
{ m<-m+1
cases<-id==i
irow=n[cases]
newx<-matrix(NA,maxd,p)
newyi<-rep(NA,maxd)
newmui<-rep(NA,maxd)
ti<-tvec[irow]
x<-xdat[cases,]
mui<-linked.mu(x,b,link)
newx[ti,]<-x
newmui[ti]<-mui
ub<-truncation(newmui,gam,margmodel)
du<-scnu<-scgam<-matrix(NA,1+ub,maxd)
for(j in ti)
{ du[,j]<-dmargmodel(0:ub,newmui[j],gam,invgam,margmodel)
scnu[,j]<-derlik.nu(newmui[j],gam,invgam,ub,margmodel,link)
if(margmodel=="nb1" | margmodel=="nb2")
{ scgam[,j]<-derlik.gam(newmui[j],gam,invgam,ub,margmodel) }
}
u<-apply(du,2,cumsum)
z<-qnorm(u)
z[is.nan(z)]<-7
z[z>4&margmodel=="bernoulli"]<-7
z<-rbind(-7,z)
pz<-pnorm(z)
dz<-dnorm(z)
zs<-z*z
zc<-z*zs
zf<-zs*zs
xi<-NULL
if(margmodel=="bernoulli" | margmodel=="poisson")
{ diagonali<-rep(NA,maxd)
} else {
diagonali<-array(NA,c(2,2,maxd)) }
for(j in 1:maxd)
{ f1<-fisher.nu(newmui[j],gam,invgam,u[,j],ub,margmodel,link)
if(margmodel=="nb1" | margmodel=="nb2")
{ temp<-cbind(newx[j,],0)
xi<-cbind(xi,rbind(temp,c(0,1)))
f2<-fisher.gam(newmui[j],gam,invgam,u[,j],ub,margmodel)
f3<-fisher.nu.gam(newmui[j],gam,invgam,u[,j],ub,margmodel)
diagonali[,,j]<-matrix(c(f1,f3,f3,f2),2,2)
}
else
{ temp<-newx[j,]
xi<-cbind(xi,temp)
diagonali[j]<-f1
}
}
if(margmodel=="bernoulli" | margmodel=="poisson")
{ deltai<-diag(diagonali)
offi<-rep(NA,dim(pairs)[1])
} else {
deltai<-matrix(0,dom,dom)
minus<-0
for(j in pos)
{ deltai[j:(j+1),j:(j+1)]<-diagonali[,,j-minus]
minus<-minus+1
}
offi<-array(NA,c(2,2,dim(pairs)[1]))
}
for(k in 1:dim(pairs)[1])
{ k1<-pairs[k,][1]
k2<-pairs[k,][2]
if((sum(k1==ti)==1) & (sum(k2==ti)==1))
{ x1=z[,k1]; x2=z[,k2]
x1s=zs[,k1]; x2s=zs[,k2]
x1c=zc[,k1]; x1f=zf[,k1]
x2c=zc[,k2]; x2f=zf[,k2]
t1=outer(pz[,k1],pz[,k2])
t2=outer(dz[,k1],dz[,k2])
if(corstr=="exch")
{ cdf<-approxbvncdf(rh,x1,x2,x1s,x2s,x1c,x2c,x1f,x2f,t1,t2) }
else
{ if(corstr=="ar")
{ cdf<-approxbvncdf(rh^(k2-k1),x1,x2,x1s,x2s,x1c,x2c,x1f,x2f,t1,t2) }
else
{ # corstr=="unstr"
cdf<-approxbvncdf(rh[k],x1,x2,x1s,x2s,x1c,x2c,x1f,x2f,t1,t2)
}}
cdf1=apply(cdf,2,diff)
pmf=apply(t(cdf1),2,diff)
pmf=t(pmf)
if(margmodel=="bernoulli" | margmodel=="poisson")
{offi[k]<-scoreCov(scnu,scgam,pmf,pairs[k,],margmodel)}
else {offi[,,k]<-scoreCov(scnu,scgam,pmf,pairs[k,],margmodel)}
}
}
omegai<-deltai
if(margmodel=="bernoulli" | margmodel=="poisson")
{ for(j in 1:dim(pairs)[1])
{ omegai[pairs[j,1],pairs[j,2]]<-offi[j]
omegai[pairs[j,2],pairs[j,1]]<-offi[j]
}}
else
{ ch1<-0
ch2<-0
for(j in 1:(maxd-1))
{ for(r in pos[-(1:j)])
{ omegai[(j+ch1):(j+1+ch1),r:(r+1)]<-offi[,,(j+ch2-ch1)]
omegai[r:(r+1),(j+ch1):(j+1+ch1)]<-t(offi[,,(j+ch2-ch1)])
ch2<-ch2+1
}
ch1<-ch1+1
}}
X[,,m]<-xi
delta[,,m]<-deltai
omega[,,m]<-omegai
}
list(omega=omega,X=X,delta=delta)
}
# the weigted scores equations
# input:
# param the vector of regression and not regression parameters
# xdat the matrix of covariates (use the constant 1 for the first covariate)
# ydat the vector with the response
# id the the vector with the id
# tvec the time related vector
# margmodel indicates the marginal model. Choices are ?poisson? for Poisson,
# ?bernoulli? for Bernoulli, and ?nb1? , ?nb2? for the NB1 and NB2
# parametrization of negative binomial in Cameron and Trivedi (1998)
# link is the link function. Choices are ?log? for the log link function,
# ?logit? for the logit link function, and ?probit? for the probit link function.
# corstr indicates the latent correlation structure of normal copula.
# Choices are ?exch?, ?ar?, and ?unstr? for exchangeable, ar(1) and
# unstrucutred correlation structure, respectively.
# WtScMat is a list containing the following components:
# omega the array with the Omega matrices
# delta the array with the Delta matrices
# X the array with the X matrices
# output
# the weigted scores equations
wtsc<-function(param,WtScMat,xdat,ydat,id,tvec,margmodel,link="log")
{ if(margmodel=="nb1" | margmodel=="nb2" | margmodel=="poisson") link="log"
if(margmodel=="bernoulli" & link!="probit") link="logit"
uid<-unique(id)
d<-id.size(id)
maxd<-max(d)
dim<-dim(xdat)
n<-1:dim[1]
p<-dim[2]
tvec<-id.time(tvec,d)
b<-param[1:p]
if(p<length(param)) {gam<-param[p+1]; invgam<-1/gam }
g<-0
m<-0
for(i in uid)
{ cases<-id==i
irow=n[cases]
m<-m+1
x<-xdat[cases,]
mu<-linked.mu(x,b,link)
ub<-truncation(mu,gam,margmodel)
scnu<-scgam<-matrix(NA,ub+1,d[m])
for(j in 1:d[m])
{ scnu[,j]<-derlik.nu(mu[j],gam,invgam,ub,margmodel,link)
if(margmodel=="nb1" | margmodel=="nb2")
{ scgam[,j]<-derlik.gam(mu[j],gam,invgam,ub,margmodel) }
}
y<-ydat[irow]
sci<-NULL
for(j in 1:d[m])
{ if(y[j]>ub)
{ scnui<-iderlik.nu(mu[j],gam,invgam,y[j],margmodel,link)
if(margmodel=="nb1" | margmodel=="nb2")
{ scgami<-iderlik.gam(mu[j],gam,invgam,y[j],margmodel)
} else {
scgami<-NULL}
}
else {
scnui<-scnu[y[j]+1,j]
if(margmodel=="nb1" | margmodel=="nb2")
{ scgami<-scgam[y[j]+1,j]
} else {
scgami<-NULL}}
sci<-c(sci,c(scnui,scgami))
}
ti<-tvec[irow]
seli<-subselect(ti,margmodel)
Xi<-WtScMat$X[,,m]
Xi<-Xi[,seli]
deltai<-WtScMat$delta[,,m]
deltai<-deltai[seli,seli]
omegai<-WtScMat$omega[,,m]
omegai<-omegai[seli,seli]
gi<-Xi%*%t(deltai)%*%solve(omegai,sci)
g<-g+gi
}
g
}
# solving the weigted scores equations
# input:
# start the starting values (IEE estimates) for the vector of
# regression and not regression parameters
# xdat the matrix of covariates (use the constant 1 for the first covariate)
# ydat the vector with the response
# id the the vector with the id
# tvec the time related vector
# margmodel indicates the marginal model. Choices are ?poisson? for Poisson,
# ?bernoulli? for Bernoulli, and ?nb1? , ?nb2? for the NB1 and NB2
# parametrization of negative binomial in Cameron and Trivedi (1998)
# link is the link function. Choices are ?log? for the log link function,
# ?logit? for the logit link function, and ?probit? for the probit link function.
# corstr indicates the latent correlation structure of normal copula.
# Choices are ?exch?, ?ar?, and ?unstr? for exchangeable, ar(1) and
# unstrucutred correlation structure, respectively.
# WtScMat is a list containing the following components:
# omega the array with the Omega matrices
# delta the array with the Delta matrices
# X the array with the X matrices
# output:
# the weighted scores estimates
solvewtsc<-function(start,WtScMat,xdat,ydat,id,tvec,margmodel,link="log")
{ multiroot(f=wtsc,start,atol=1e-4,rtol=1e-4,ctol=1e-4,
WtScMat=WtScMat,xdat=xdat,ydat=ydat,id=id,tvec=tvec,margmodel=margmodel,link=link) }
# inverse Godambe matrix with Delta and Omega evaluated at IEE estimator
# input:
# b the regression coefficients
# gam the gamma parameter
# xdat the matrix of covariates (use the constant 1 for the first covariate)
# ydat the vector with the response
# id the the vector with the id
# tvec the time related vector
# rh the vector with CL1 estimates
# WtScMat a list containing the following components:
# omega the array with the Omega matrices
# delta the array with the Delta matrices
# X the array with the X matrices
# margmodel indicates the marginal model. Choices are ?poisson? for Poisson,
# ?bernoulli? for Bernoulli, and ?nb1? , ?nb2? for the NB1 and NB2
# parametrization of negative binomial in Cameron and Trivedi (1998)
# link is the link function. Choices are ?log? for the log link function,
# ?logit? for the logit link function, and ?probit? for the probit link function.
# output:
# the inverse Godambe matrix
godambe<-function(param,WtScMat,xdat,ydat,id,tvec,margmodel,link="log")
{ if(margmodel=="nb1" | margmodel=="nb2" | margmodel=="poisson") link="log"
if(margmodel=="bernoulli" & link!="probit") link="logit"
uid<-unique(id)
d<-id.size(id)
maxd<-max(d)
dim<-dim(xdat)
n<-1:dim[1]
p<-dim[2]
tvec<-id.time(tvec,d)
b<-param[1:p]
if(margmodel=="nb1" | margmodel=="nb2")
{ gam<-param[p+1]
invgam<-1/gam
} else {
gam<-invgam<-NULL}
v<-v1<-v2<-v3<-0
fv<-fv1<-fv2<-fv3<-0
m<-0
for(i in uid)
{ m<-m+1
cases<-id==i
irow=n[cases]
newx<-matrix(NA,max(d),length(b))
newy<-rep(NA,maxd)
newmui<-rep(NA,maxd)
ti<-tvec[irow]
x<-xdat[cases,]
mui<-linked.mu(x,b,link)
newx[ti,]<-x
newmui[ti]<-mui
ub<-truncation(newmui,gam,margmodel)
scnu<-scgam<-matrix(NA,1+ub,maxd)
for(j in ti)
{ scnu[,j]<-derlik.nu(newmui[j],gam,invgam,ub,margmodel,link)
if(margmodel=="nb1" | margmodel=="nb2")
{ scgam[,j]<-derlik.gam(newmui[j],gam,invgam,ub,margmodel) }
}
y<-ydat[irow]
newy[ti]<-y
sci<-NULL
for(j in 1:maxd)
{ if(sum(newy[j]>ub,na.rm=T)==1)
{ scnui<-iderlik.nu(newmui[j],gam,invgam,newy[j],margmodel,link)
if(margmodel=="nb1" | margmodel=="nb2")
{ scgami<-iderlik.gam(newmui[j],gam,invgam,newy[j],margmodel)
} else {
scgami<-NULL}
}
else {
scnui<-scnu[newy[j]+1,j]
if(margmodel=="nb1" | margmodel=="nb2")
{ scgami<-scgam[newy[j]+1,j]
} else {
scgami<-NULL}}
sci<-c(sci,c(scnui,scgami))
}
sci<-sci[!is.na(sci)]
seli<-subselect(ti,margmodel)
Xi<-WtScMat$X[,,m]
Xi<-Xi[,seli]
fdeltai<-WtScMat$delta[,,m]
fdeltai<-fdeltai[seli,seli]
fomegai<-WtScMat$omega[,,m]
fomegai<-fomegai[seli,seli]
fci<-fdeltai%*%t(Xi)
nrhs1=ncol(fdeltai)
nrhs2=ncol(fci)
tem.lin=solve(fomegai,cbind(fdeltai,fci))
finvai=t(tem.lin[,1:nrhs1])
xi.invai=Xi %*% finvai
fai<-solve(finvai)
tem=solve(t(fai),t(Xi))
fv1i<-xi.invai %*% fci
fv2i<-xi.invai %*% (sci%*% t(sci)) %*% tem
fv3i<-t(fci) %*% tem
fv1<-fv1+fv1i
fv2<-fv2+fv2i
fv3<-fv3+fv3i
}
solve(fv1,fv2)%*%solve(fv3)
}
# the weighted scores wrapper function: handles all the steps in the weighted scores
# xdat the matrix of covariates (use the constant 1 for the first covariate)
# ydat the vector with the response
# id the the vector with the id
# tvec the time related vector
# rh the vector with CL1 estimates
# WtScMat a list containing the following components:
# omega the array with the Omega matrices
# delta the array with the Delta matrices
# X the array with the X matrices
# margmodel indicates the marginal model. Choices are ?poisson? for Poisson,
# ?bernoulli? for Bernoulli, and ?nb1? , ?nb2? for the NB1 and NB2
# parametrization of negative binomial in Cameron and Trivedi (1998)
# corstr indicates the latent correlation structure of normal copula.
# Choices are ?exch?, ?ar?, and ?unstr? for exchangeable, ar(1) and
# unstrucutred correlation structure, respectively.
# link is the link function. Choices are ?log? for the log link function,
# ?logit? for the logit link function, and ?probit? for the probit link function.
# iprint indicates printing of some intermediate results, default FALSE
wtsc.wrapper<-function(xdat,ydat,id,tvec,margmodel,corstr,link="log",iprint=FALSE)
{ if(margmodel=="nb1" | margmodel=="nb2" | margmodel=="poisson") link="log"
if(margmodel=="bernoulli" & link!="probit") link="logit"
i.est<-iee(xdat,ydat,margmodel,link)
if(iprint)
{ cat("\niest: IEE estimates\n")
print(c(i.est$reg,i.est$gam))
}
est.rho<-cl1(b=i.est$reg,gam=i.est$gam,xdat,ydat,id,tvec,margmodel,corstr,link)
if(iprint)
{ cat("\nest.rho: CL1 estimates\n")
print(est.rho$e)
cat("\nest.rho: CL1 likelihood\n")
print(-est.rho$m)
}
WtScMat<-weightMat(b=i.est$reg,gam=i.est$gam,rh=est.rho$e,
xdat,ydat,id,tvec,margmodel,corstr,link)
ws<-solvewtsc(start=c(i.est$reg,i.est$gam),WtScMat,xdat,ydat,id,
tvec,margmodel,link)
if(iprint)
{ cat("ws=parameter estimates\n")
print(ws$r)
}
acov<-godambe(ws$r,WtScMat,xdat,ydat,id,tvec,margmodel,link)
se<-sqrt(diag(acov))
if(iprint)
{ cat("\nacov: inverse Godambe matrix with W based on first-stage wt matrices\n")
print(acov)
cat("\nse: robust standard errors\n")
print(se)
res<-round(cbind(ws$r,se),3)
cat("\nres: Weighted scores estimates and standard errors\n")
print(res)
}
list(IEEest=c(i.est$reg,i.est$gam),CL1est=est.rho$e,CL1lik=-est.rho$m,WSest=ws$r, asympcov=acov)
}
|
/scratch/gouwar.j/cran-all/cranData/weightedScores/R/wtsc-all.r
|
# Density of the univariate marginal distribution
# input:
# y the vector of (non-negative integer) quantiles.
# mu the mean parameter of the univariate distribution.
# gam the parameter gamma of the negative binomial distribution.
# output:
# the density of the univariate marginal distribution
dmargmodel.ord<-function(y,mu,gam,link)
{ cuts<-c(-10,gam,10)
lb=cuts[y]+mu
ub=cuts[y+1]+mu
if(link=="probit") res<-pnorm(ub)-pnorm(lb) else res<-plogis(ub)-plogis(lb)
res[y<1]<-0
res
}
# CDF of the univariate marginal distribution
# input:
# y the vector of (non-negative integer) quantiles.
# mu the mean parameter of the univariate distribution.
# gam the parameter gamma of the negative binomial distribution.
# output:
# the cdf of the univariate marginal distribution
pmargmodel.ord<-function(y,mu,gam,link)
{ cuts<-c(-10,gam,10)
ub=cuts[y+1]+mu # for mprobit
#ub=cuts[y+1]-mu # for polr
if(link=="probit") res<-pnorm(ub) else res<-plogis(ub)
res[y<1]<-0
res
}
# Bivariate composite likelihood for multivariate normal copula with ordinal regression.
# input:
# r the vector of normal copula parameters
# b the regression coefficients
# gam the gamma parameter
# xdat the matrix of covariates (use the constant 1 for the first covariate)
# ydat the vector with the response
# id the the vector with the id
# tvec the time related vector
# link is the link function. Choices are ?log? for the log link function,
# ?logit? for the logit link function, and ?probit? for the probit link function.
# corstr indicates the latent correlation structure of normal copula.
# Choices are exch, ar, and unstr for exchangeable, ar(1) and
# unstrucutred correlation structure, respectively.
# output:
# negative bivariate composite likelihood for multivariate normal copula
# with ordinal regression.
bcl.ord<-function(r,b,gam,xdat,ydat,id,tvec,corstr,link)
{ s<-0
uid<-unique(id)
d<-id.size(id)
maxd<-max(d)
tvec<-id.time(tvec,d)
n<-1:length(ydat) #### change here
pairs<-maxpairs(d)
rmat<-cormat(maxd,r,pairs,corstr)
c1<-(r[1] > 1 | r[1] < (-1/(maxd-1)))
c2<-(r[1] > 1 | r[1] < -1)
c3<-(det(rmat)<0 | min(rmat)< -1 | max(rmat)>1)
if((corstr=="exch" & c1) | (corstr=="ar" & c2 ) | (corstr=="unstr" & c3)) {s<-1e10}
else {
for(i in uid)
{ cases<-id==i
irow=n[cases]
yi<-ydat[irow]
ti<-tvec[irow]
newyi<-rep(NA,maxd)
newmui<-rep(NA,maxd)
newyi[ti]<-yi
if(is.vector(xdat)) x<-xdat[cases] else x<-xdat[cases,] ###change here
mui<-ordreg.mu(x,b)
newmui[ti]<-mui
vlow<-pmargmodel.ord(newyi-1,newmui,gam,link)
tem<-dmargmodel.ord(newyi,newmui,gam,link)
vupp<-vlow+tem
zlow=qnorm(vlow)
zupp=qnorm(vupp)
for(j in 1:dim(pairs)[1])
{ k1<-pairs[j,][1]
k2<-pairs[j,][2]
if((sum(k1==ti)==1) & (sum(k2==ti)==1))
{ if(corstr=="exch")
{ s<-s +bivlik(zlow[c(k1,k2)],zupp[c(k1,k2)],r) }
else
{ if(corstr=="ar")
{ s<-s +bivlik(zlow[c(k1,k2)],zupp[c(k1,k2)],r^(k2-k1)) }
else
{ # corstr=="unstr"
s<-s +bivlik(zlow[c(k1,k2)],zupp[c(k1,k2)],r[j])
}}}
else { s<-s }
}
}
-s
}
}
# optimization routine for composite likelihood for MVN copula
# b the regression coefficients
# gam the gamma parameter
# xdat the matrix of covariates (use the constant 1 for the first covariate)
# ydat the vector with the response
# id the the vector with the id
# tvec the time related vector
# link is the link function. Choices are
# ?logit? for the logit link function, and ?probit? for the probit link function.
# corstr indicates the latent correlation structure of normal copula.
# Choices are ?exch?, ?ar?, and ?unstr? for exchangeable, ar(1) and
# unstrucutred correlation structure, respectively.
# output: A list containing the following components:
# minimum the value of the estimated minimum of CL1 for MVN copula
# estimate the CL1 estimates
# gradient the gradient at the estimated minimum of CL1
# code an integer indicating why the optimization process terminated, see nlm.
cl1.ord<-function(b,gam,xdat,ydat,id,tvec,corstr,link)
{ d<-id.size(id)
pairs<-maxpairs(d)
if(corstr=="unstr")
{ nom<-dim(pairs)[1]
nlm(bcl.ord,rep(0.1,nom),b,gam,xdat,ydat,id,tvec,corstr,link)
}
else
{ nlm(bcl.ord,0.1,b,gam,xdat,ydat,id,tvec,corstr,link) }
}
# derivative of the ordinal loglikelihood with respect to gamma
# input:
# gam the gamma parameter
# mu the mean parameter
# output:
# the vector with the derivatives of the NB loglikelihood with respect to gamma
derlik.gam.ord<-function(mu,gam,u,link)
{ K<-length(gam)+1
k<-1:K
cuts<-c(-10,gam,10)
lb=cuts[k]+mu
ub=cuts[k+1]+mu
if(link=="probit") { dlatent=dnorm; platent=pnorm } else { dlatent=dlogis; platent=plogis }
dlatentub<-dlatent(ub)
dlatentlb<-dlatent(lb)
den<-platent(ub)-platent(lb)
res<-rep(NA,K)
for(i in 1:K)
{ if(u==i)
{ res[i]=dlatentub[i]/den[i] }
else
{ if(u==i-1)
{ res[i]=-dlatentlb[i]/den[i] }
else {res[i]=0}}
}
res
}
# derivative of the NB loglikelihood with respect to gamma
# input:
# gam the gamma parameter
# mu the mean parameter
# y the value of a non-negative integer quantile
# output:
# the derivative of the NB loglikelihood with respect to gamma
iderlik.gam.ord<-function(mu,gam,y,u,link)
{ cuts<-c(-10,gam,10)
lb=cuts[y]+mu
ub=cuts[y+1]+mu
if(link=="probit") { dlatent=dnorm; platent=pnorm } else { dlatent=dlogis; platent=plogis }
den<-platent(ub)-platent(lb)
if(u==y) dlatent(ub)/den
else if(u==y-1) -dlatent(lb)/den else 0
}
der.dnorm<-function(x)
{ -x*dnorm(x) }
der.dlogis<-function(x)
{ expx=exp(x)
expx*(1-expx)/(1+expx)^3
}
# minus expectation of the second derivative of the marginal ordinal loglikelihood
# with resect to gamma
# input:
# mu the mean parameter
# gam the gamma parameter
# u the univariate cdfs
# output:
# the vector with the minus expectations of the margmodel loglikelihood
# with respect to gamma
fisher.gam.ord<-function(mu,gam,u,v,link)
{ cuts<-c(-10,gam,10)
K<-length(gam)+1
k<-1:K
lb=cuts[k]+mu
ub=cuts[k+1]+mu
if(link=="probit") { dlatent=dnorm; platent=pnorm; der.dlatent=der.dnorm } else {
dlatent=dlogis; platent=plogis; der.dlatent=der.dlogis }
den<-platent(ub)-platent(lb)
dlatentub<-dlatent(ub)
dlatentlb<-dlatent(lb)
der.dlatentub<-der.dlatent(ub)
der.dlatentlb<-der.dlatent(lb)
h<-rep(NA,K)
for(k in 1:K)
{ if(u==k & v==k)
{ num1<-der.dlatentub[k]
num2<-dlatentub[k]
tem<-num2/den[k]
h[k]<--num1/den[k]+tem*tem
}
else
{ if((u==k & v==k-1) | (u==k-1 & v==k))
{ h[k]<--dlatentub[k]*dlatentlb[k]/den[k]/den[k]
}
else
{ if(u==k-1 & v==k-1)
{ num1<-der.dlatentlb[k]
num2<-dlatentlb[k]
tem<-num2/den[k]
h[k]<-num1/den[k]+tem*tem
} else h[k]<-0}}
}
sum(h*den)
}
# the mean values of the univariate marginal distribution
# corresonding to the used link function
# input:
# x the matix of the covariates
# b the vector with the regression coefficients
# output:
# the mean values of the univariate marginal distribution
ordreg.mu<-function(x,b)
{ if(length(b)!=1) mu<-x %*% b else mu<-x*b }
# select the present column and lines in Omega, Delta and X matrices
# for unbalanced data
# input:
# tvec a vector of the time for an individual
subselect.ord<-function(tvec,q)
{ sel<-NULL
for(i in 1:length(tvec))
{ tm<-tvec[i]
k<-((tm-1)*q+1):((tm-1)*q+q)
sel<-c(sel,k)
}
sel
}
# Calculating the number of categories
# input:
# gam the cutpoints
# output:
# the number of categories
noCategories<-function(gam)
{ length(gam)+1 }
# covariance matrix of the scores Omega_i
# input:
# scgam the array of the score functions with respect to gam
# index the bivariate pair
# pmf the matrix of rectangle probabilities
scoreCov.ord<-function(scgam,pmf,index)
{ j1<-index[1]
j2<-index[2]
q<-dim(scgam)[2]
cov22<-matrix(NA,q,q)
for(i1 in 1:q)
{ for(i2 in 1:q)
{ cov22[i1,i2]<-t(scgam[,i1,j1])%*%pmf%*%scgam[,i2,j2] }
}
cov22
}
# weight matrix fixed at values from the CL1 estimator
# input:
# b the regression coefficients
# gam the gamma parameter
# xdat the matrix of covariates (use the constant 1 for the first covariate)
# ydat the vector with the response
# id the the vector with the id
# tvec the time related vector
# rh the vector with CL1 estimates
# link is the link function. Choices are
# ?logit? for the logit link function, and ?probit? for the probit link function.
# corstr indicates the latent correlation structure of normal copula.
# Choices are ?exch?, ?ar?, and ?unstr? for exchangeable, ar(1) and
# unstrucutred correlation structure, respectively.
# output: A list containing the following components:
# omega the array with the Omega matrices
# delta the array with the Delta matrices
# X the array with the X matrices
weightMat.ord<-function(b,gam,rh,xdat,ydat,id,tvec,corstr,link)
{ uid<-unique(id)
d<-id.size(id)
maxd<-max(d)
q<-length(gam)
lid<-length(uid)
if(is.matrix(xdat))
{ dim<-dim(xdat)
n<-1:dim[1]
p<-dim[2]
} else {n<-1:length(xdat); p<-1}
pairs<-maxpairs(d)
tvec<-id.time(tvec,d)
omega<-array(NA,c(maxd*q,maxd*q,lid))
X<-array(NA,c(p+q,maxd*q,lid))
delta<-array(NA,c(maxd*q,maxd*q,lid))
dom<-maxd*q
if(q>1) pos<-seq(1,dom-1,by=q) else pos<-seq(1,dom) #for binary
m<-0
for(i in uid)
{ m<-m+1
cases<-id==i
irow=n[cases]
newyi<-rep(NA,maxd)
newmui<-rep(NA,maxd)
ti<-tvec[irow]
if(is.matrix(xdat))
{ x<-xdat[cases,]
newx<-matrix(NA,maxd,p)
newx[ti,]<-x} else {
x<-xdat[cases]
newx<-rep(NA,maxd)
newx[ti]<-x }
mui<-ordreg.mu(x,b)
newmui[ti]<-mui
ub<-noCategories(gam)
du<-matrix(NA,ub,maxd)
scgam<-array(NA,c(ub,ub-1,maxd))
for(j in ti)
{ du[,j]<-dmargmodel.ord(1:ub,newmui[j],gam,link)
for(k in 1:(ub-1))
{ scgam[,k,j]<-derlik.gam.ord(newmui[j],gam,k,link) }
}
u<-apply(du,2,cumsum)
z<-qnorm(u)
z[is.nan(z)]<-7
z[z>10]<-7
z<-rbind(-7,z)
pz<-pnorm(z)
dz<-dnorm(z)
zs<-z*z
zc<-z*zs
zf<-zs*zs
xi<-NULL
diagonali<-array(NA,c(q,q,maxd))
for(j in 1:maxd)
{ if(is.matrix(newx))
{ temp1<-matrix(rep(newx[j,],each=q),q) } else {
temp1<-matrix(rep(newx[j],each=q),q) }
xi<-cbind(xi,t(cbind(temp1,diag(q))))
fisher<-matrix(NA,ub-1,ub-1)
for(k1 in 1:(ub-1))
{ for(k2 in 1:(ub-1))
{ fisher[k1,k2]<-fisher.gam.ord(newmui[j],gam,k1,k2,link)
}
}
diagonali[,,j]<-fisher
}
deltai<-matrix(0,dom,dom)
minus<-0
for(j in pos)
{ deltai[j:(j+q-1),j:(j+q-1)]<-diagonali[,,j-minus]
if(q>1) minus<-minus+q-1 else minus<-0
}
offi<-array(NA,c(q,q,dim(pairs)[1]))
for(k in 1:dim(pairs)[1])
{ k1<-pairs[k,][1]
k2<-pairs[k,][2]
if((sum(k1==ti)==1) & (sum(k2==ti)==1))
{ x1=z[,k1]; x2=z[,k2]
x1s=zs[,k1]; x2s=zs[,k2]
x1c=zc[,k1]; x1f=zf[,k1]
x2c=zc[,k2]; x2f=zf[,k2]
t1=outer(pz[,k1],pz[,k2])
t2=outer(dz[,k1],dz[,k2])
if(corstr=="exch")
{ cdf<-approxbvncdf(rh,x1,x2,x1s,x2s,x1c,x2c,x1f,x2f,t1,t2) } else {
if(corstr=="ar")
{ cdf<-approxbvncdf(rh^(k2-k1),x1,x2,x1s,x2s,x1c,x2c,x1f,x2f,t1,t2) } else { # corstr=="unstr"
cdf<-approxbvncdf(rh[k],x1,x2,x1s,x2s,x1c,x2c,x1f,x2f,t1,t2)
}
}
cdf1=apply(cdf,2,diff)
pmf=apply(t(cdf1),2,diff)
pmf=t(pmf)
offi[,,k]<-scoreCov.ord(scgam,pmf,pairs[k,])
}
}
omegai<-deltai
ch1<-0
ch2<-0
ch3<-0
if(d[m]>1)
{
for(j in 1:(d[m]-1))
{ for(r in pos[-(1:j)])
{ #print(c(j,r))
omegai[(1+(j-1)*q):(j*q),r:(r+q-1)]<-offi[,,(j+ch2-ch1)]
omegai[r:(r+q-1),(1+(j-1)*q):(j*q)]<-t(offi[,,(j+ch2-ch1)])
ch2<-ch2+1
}
ch1<-ch1+1
}
}
X[,,m]<-xi
delta[,,m]<-deltai
omega[,,m]<-omegai
}
list(omega=omega,X=X,delta=delta)
}
# the weigted scores equations
# input:
# param the vector of regression and not regression parameters
# xdat the matrix of covariates
# ydat the vector with the response
# id the the vector with the id
# tvec the time related vector
# link is the link function. Choices are
# ?logit? for the logit link function, and ?probit? for the probit link function.
# corstr indicates the latent correlation structure of normal copula.
# Choices are ?exch?, ?ar?, and ?unstr? for exchangeable, ar(1) and
# unstrucutred correlation structure, respectively.
# WtScMat is a list containing the following components:
# omega the array with the Omega matrices
# delta the array with the Delta matrices
# X the array with the X matrices
# output
# the weigted scores equations
wtsc.ord<-function(param,WtScMat,xdat,ydat,id,tvec,link)
{ uid<-unique(id)
d<-id.size(id)
maxd<-max(d)
if(is.matrix(xdat))
{ dim<-dim(xdat)
n<-1:dim[1]
p<-dim[2]
} else {n<-1:length(xdat); p<-1}
tvec<-id.time(tvec,d)
b<-param[1:p]
q<-length(unique(ydat))-1
gam<-param[(p+1):(p+q)]
g<-0
m<-0
for(i in uid)
{ #print(i)
cases<-id==i
irow=n[cases]
m<-m+1
if(is.matrix(xdat)) x<-xdat[cases,] else x<-xdat[cases]
mu<-ordreg.mu(x,b)
ub<-noCategories(gam)
scgam<-array(NA,c(ub,ub-1,d[m]))
for(j in 1:d[m])
{ for(k in 1:(ub-1))
{ scgam[,k,j]<-derlik.gam.ord(mu[j],gam,k,link) }
}
y<-ydat[irow]
sci<-NULL
for(j in 1:d[m])
{ scgami<-NULL
for(k in 1:(ub-1))
{ scgami<-c(scgami,scgam[y[j],k,j])}
sci<-c(sci,scgami)
}
ti<-tvec[irow]
seli<-subselect.ord(ti,q)
Xi<-WtScMat$X[,,m]
Xi<-Xi[,seli]
deltai<-WtScMat$delta[,,m]
deltai<-deltai[seli,seli]
omegai<-WtScMat$omega[,,m]
omegai<-omegai[seli,seli]
gi<-Xi%*%t(deltai)%*%solve(omegai,sci)
g<-g+gi
}
g
}
# solving the weigted scores equations
# input:
# start the starting values (IEE estimates) for the vector of
# regression and not regression parameters
# xdat the matrix of covariates (use the constant 1 for the first covariate)
# ydat the vector with the response
# id the the vector with the id
# tvec the time related vector
# link is the link function. Choices are
# ?logit? for the logit link function, and ?probit? for the probit link function.
# corstr indicates the latent correlation structure of normal copula.
# Choices are ?exch?, ?ar?, and ?unstr? for exchangeable, ar(1) and
# unstrucutred correlation structure, respectively.
# WtScMat is a list containing the following components:
# omega the array with the Omega matrices
# delta the array with the Delta matrices
# X the array with the X matrices
# output:
# the weighted scores estimates
solvewtsc.ord<-function(start,WtScMat,xdat,ydat,id,tvec,link)
{ multiroot(f=wtsc.ord,start,atol=1e-4,rtol=1e-4,ctol=1e-4,
WtScMat=WtScMat,xdat=xdat,ydat=ydat,id=id,tvec=tvec,link=link)
}
# inverse Godambe matrix with Delta and Omega evaluated at IEE estimator
# input:
# b the regression coefficients
# gam the gamma parameter
# xdat the matrix of covariates (use the constant 1 for the first covariate)
# ydat the vector with the response
# id the the vector with the id
# tvec the time related vector
# rh the vector with CL1 estimates
# WtScMat a list containing the following components:
# omega the array with the Omega matrices
# delta the array with the Delta matrices
# X the array with the X matrices
# link is the link function. Choices are
# ?logit? for the logit link function, and ?probit? for the probit link function.
# output:
# the inverse Godambe matrix
godambe.ord<-function(param,WtScMat,xdat,ydat,id,tvec,link)
{ uid<-unique(id)
d<-id.size(id)
maxd<-max(d)
if(is.matrix(xdat))
{ dim<-dim(xdat)
n<-1:dim[1]
p<-dim[2]
} else {n<-1:length(xdat); p<-1}
tvec<-id.time(tvec,d)
b<-param[1:p]
q<-length(unique(ydat))-1
gam<-param[(p+1):(p+q)]
v<-v1<-v2<-v3<-0
fv<-fv1<-fv2<-fv3<-0
m<-0
for(i in uid)
{ m<-m+1
cases<-id==i
irow=n[cases]
ti<-tvec[irow]
if(is.matrix(xdat))
{ x<-xdat[cases,]
newx<-matrix(NA,maxd,p)
newx[ti,]<-x} else {
x<-xdat[cases]
newx<-rep(NA,maxd)
newx[ti]<-x }
newy<-rep(NA,maxd)
newmui<-rep(NA,maxd)
mui<-ordreg.mu(x,b)
newmui[ti]<-mui
ub<-noCategories(gam)
scgam<-array(NA,c(ub,ub-1,maxd))
for(j in ti)
{ for(k in 1:(ub-1))
{ scgam[,k,j]<-derlik.gam.ord(newmui[j],gam,k,link) }
}
y<-ydat[irow]
newy[ti]<-y
sci<-NULL
for(j in 1:maxd)
{ scgami<-NULL
for(k in 1:(ub-1))
{ scgami<-c(scgami,scgam[newy[j],k,j])}
sci<-c(sci,scgami)
}
sci<-sci[!is.na(sci)]
seli<-subselect.ord(ti,q)
Xi<-WtScMat$X[,,m]
Xi<-Xi[,seli]
fdeltai<-WtScMat$delta[,,m]
fdeltai<-fdeltai[seli,seli]
fomegai<-WtScMat$omega[,,m]
fomegai<-fomegai[seli,seli]
fci<-fdeltai%*%t(Xi)
nrhs1=ncol(fdeltai)
nrhs2=ncol(fci)
tem.lin=solve(fomegai,cbind(fdeltai,fci))
finvai=t(tem.lin[,1:nrhs1])
xi.invai=Xi %*% finvai
fai<-solve(finvai)
tem=solve(t(fai),t(Xi))
fv1i<-xi.invai %*% fci
fv2i<-xi.invai %*% (sci%*% t(sci)) %*% tem
fv3i<-t(fci) %*% tem
fv1<-fv1+fv1i
fv2<-fv2+fv2i
fv3<-fv3+fv3i
}
solve(fv1,fv2)%*%solve(fv3)
}
# the weighted scores wrapper function: handles all the steps in the weighted scores
# xdat the matrix of covariates (use the constant 1 for the first covariate)
# ydat the vector with the response
# id the the vector with the id
# tvec the time related vector
# rh the vector with CL1 estimates
# WtScMat a list containing the following components:
# omega the array with the Omega matrices
# delta the array with the Delta matrices
# X the array with the X matrices
# corstr indicates the latent correlation structure of normal copula.
# Choices are ?exch?, ?ar?, and ?unstr? for exchangeable, ar(1) and
# unstrucutred correlation structure, respectively.
# link is the link function. Choices are
# ?logit? for the logit link function, and ?probit? for the probit link function.
# iprint indicates printing of some intermediate results, default FALSE
wtsc.ord.wrapper<-function(xdat,ydat,id,tvec,corstr,link,iprint=FALSE)
{
i.est<-iee.ord(xdat,ydat,link)
if(iprint)
{ cat("\niest: IEE estimates\n")
print(c(i.est$reg,i.est$gam))
}
est.rho<-cl1.ord(b=i.est$reg,gam=i.est$gam,xdat,ydat,id,tvec,corstr,link)
if(iprint)
{ cat("\nest.rho: CL1 estimates\n")
print(est.rho$e)
cat("\nest.rho: CL1 likelihood\n")
print(-est.rho$m)
}
WtScMat<-weightMat.ord(b=i.est$reg,gam=i.est$gam,rh=est.rho$e,
xdat,ydat,id,tvec,corstr,link)
ws<-solvewtsc.ord(start=c(i.est$reg,i.est$gam),WtScMat,xdat,ydat,id,
tvec,link)
if(iprint)
{ cat("ws=parameter estimates\n")
print(ws$r)
}
acov<-godambe.ord(ws$r,WtScMat,xdat,ydat,id,tvec,link)
se<-sqrt(diag(acov))
if(iprint)
{ cat("\nacov: inverse Godambe matrix with W based on first-stage wt matrices\n")
print(acov)
cat("\nse: robust standard errors\n")
print(se)
res<-round(cbind(ws$r,se),3)
cat("\nres: Weighted scores estimates and standard errors\n")
print(res)
}
list(IEEest=c(i.est$reg,i.est$gam),CL1est=est.rho$e,CL1lik=-est.rho$m,WSest=ws$r, asympcov=acov)
}
|
/scratch/gouwar.j/cran-all/cranData/weightedScores/R/wtsc-ord.R
|
#' Medical data of 5735 patients.
#'
#' This dataset contains medical data of 5735 patients
#' diamonds.
#'
#' @format A data frame with 5735 rows and 12 variables:
#' \describe{
#' \item{age}{patients age}
#' \item{ARF}{something}
#' \item{female}{is patient female (1) or not (0)}
#' \item{sepsis}{Sepsis Diagnosis}
#' \item{CHF}{Congestive Heart Failure}
#' \item{Cirr}{Cirrhosis}
#' \item{colcan}{Colon Cancer}
#' \item{Coma}{Coma}
#' \item{lungcan}{Lung cancer}
#' \item{MOSF}{Malignancy}
#' \item{treatment}{RHC (Swan-Ganz catheter)}
#' \item{meanbp1}{Mean blood pressure}
#' ...
#' }
#' @source \url{http://biostat.mc.vanderbilt.edu/wiki/pub/Main/DataSets/rhc.html}
"testdata"
|
/scratch/gouwar.j/cran-all/cranData/weightedZdiff/R/testdata.R
|
zdifference<-function(dataset,ref,weights=NULL,standard_weights=FALSE,na.rm=TRUE,binary_variable=NULL,ordinal_variable=NULL
,continuous_variable=NULL,nominal_variable=NULL,r=2,var.est=FALSE,coefvar.est=FALSE,grad=1){
if(is.null(weights)){
w<-as.matrix(rep(1,dim(dataset)[1]))
indw<-1
}
else{
if(any(weights%in%names(dataset))){
if(any(!weights%in%names(dataset))){
warning(paste("weight names '",weights[which(!weights%in%names(dataset))],"'not found in the names of the dataset.",sep=""))
}
indw<-which(weights%in%names(dataset))
w<-as.matrix(dataset[,weights[which(weights %in% names(dataset))]])
}
else{
stop(paste("the name ",w," of the reference variable does not exist in the names of the dataset"))
}
}
if(standard_weights==TRUE & !is.null(weights)){
w<-cbind(w,rep(1,length(w[,1])))
}
if(length(unique(w))!=1 & coefvar.est==TRUE){
coefvar.est=FALSE
warning("weighted zdifference for coefficient of variation is not available for non unique weights")
}
if(length(unique(na.omit(dataset[,ref])))!= 2){
stop("reference variable is not binary")
}
if(any(w<0)){
stop("negative weights are not allowed")
}
reference<-dataset[,ref]
dataset<-dataset[,-c(which(colnames(dataset)==ref),which(colnames(dataset)%in%weights==TRUE))]
res<-rep(NA,ncol(dataset))
if(!is.null(continuous_variable)){
if(any(!continuous_variable %in% colnames(dataset))){
stop("At least one of the variables in continuous_variable does not specify a column of data.")
}
res[which(colnames(dataset) %in% continuous_variable)]<-"con"
}
if(!is.null(binary_variable)){
if(any(!binary_variable %in% colnames(dataset))){
stop("At least one of the variables in binary_variable does not specify a column of data.")
}
res[which(colnames(dataset) %in% binary_variable)]<-"bin"
}
if(!is.null(ordinal_variable)){
if(any(!ordinal_variable %in% colnames(dataset))){
stop("At least one of the variables in ordinal_variable does not specify a column of data.")
}
res[which(colnames(dataset) %in% ordinal_variable)]<-"ordi"
}
if(!is.null(nominal_variable)){
if(any(!nominal_variable %in% colnames(dataset))){
stop("At least one of the variables in nominal_variable does not specify a column of data.")
}
res[which(colnames(dataset) %in% nominal_variable)]<-"nom"
}
if(any(is.na(res))){
for(i in 1:ncol(dataset)){
if(is.na(res[i])){
if(length(unique(dataset[,i]))==2){
res[i]<-"bin"
}
else if(length(unique(dataset[,i]))< 10 | class(dataset[,i])=="factor"){
res[i]<-"nom"
}
else if(length(unique(dataset[,i])) >= 10 ){
res[i]<-"con"
}
}
}
print("User did not specify all variable types. Please check if the variables are correctly assigned and if not change the second column.
'con' means continuous, 'bin' binary, 'ordi' ordinal and 'nom' nominal variables.")
zdiffoverview<-data.frame("names"=colnames(dataset),"type"=res)
res<-edit(zdiffoverview)[,2]
while(any(!res%in%c("nom","con","bin","ordi"))){
print("User did not specify all variable types. Please check if the variables are correctly assigned and if not changethe second column.
'con' means continuous, 'bin' binary, 'ordi' ordinal and 'nom' nominal variables.")
res<-edit(zdiffoverview)[,2]
}
}
zdiff<-matrix(NA,dim(dataset)[2],dim(w)[2])
for(i in 1:dim(w)[2]){
if(length(unique(w[,i]))==1){
zdiff[,i]<-unlist(lapply(1:dim(dataset)[2],function(z){
if(res[z]=="con"){
return(zdifference_continuous(dataset[,z],reference,w=w[,i],r=r))
}
else if(res[z]=="bin"){
return(zdifference_binary(dataset[,z],reference,w=w[,i],r=r))
}
else if(res[z]=="ordi"){
return(zdifference_ordinal(dataset[,z],reference,r=r))
}
else if(res[z]=="nom"){
return(zdifference_nominal(dataset[,z],reference,w=w[,i],r=r))
}
}))
}
else{
zdiff[,i]<-unlist(lapply(1:dim(dataset)[2],function(z){
if(res[z]=="con"){
return(zdifference_continuous(dataset[,z],reference,w[,i],r=r))
}
else if(res[z]=="bin"){
return(zdifference_binary(dataset[,z],reference,w[,i],r=r))
}
else if(res[z]=="ordi"){
return(zdifference_ordinal(dataset[,z],reference,w[,i],r=r))
}
else if(res[z]=="nom"){
if(any(table(dataset[,z])/length(dataset[,z])<0.06)){
summe<-0
for(i in 1:length(na.omit(unique(dataset[,z])))){
newvar<-ifelse(dataset[,z]==unique(dataset[,z])[i],1,0)
summe<-summe+zdifference_binary(newvar,reference,w[,i],r=r)
}
return(summe)
}
else{
return(zdifference_nominal(dataset[,z],reference,w[,i],r=r))
}
}
}))
}
}
if(grad!=1){
zdiffgrad<-list()
for(i in 1:dim(w)[2]){
zdiffgrad[[i]]<-matrix(NA,length(which(res=="con")),grad-1)
for(g in 2:grad){
if(length(unique(w[,i]))==1){
zdiffgrad[[i]][,g-1]<-unlist(lapply(1:dim(dataset)[2],function(z){
if(res[z]=="con"){
return(zdifference_continuous(dataset[,z]^g,reference,w=w[,i],r=r))
}
}))
}
else{
zdiffgrad[[i]][,g-1]<-unlist(lapply(1:dim(dataset)[2],function(z){
if(res[z]=="con"){
return(zdifference_continuous(dataset[,z]^g,reference,w=w[,i],r=r))
}
}))
}
}
}
}
if(var.est){
zdiffvar<-matrix(NA,length(which(res=="con")),dim(w)[2])
for(i in 1:dim(w)[2]){
zdiffvar[,i]<-unlist(lapply(1:dim(dataset)[2],function(z){
if(res[z]=="con"){
return(zdifference_var(dataset[,z],reference,w[,i],r=r))
}
}))
}
}
if(coefvar.est){
zdiffcoefvar<-matrix(NA,length(which(res=="con")),dim(w)[2])
for(i in 1:dim(w)[2]){
zdiffcoefvar[,i]<-unlist(lapply(1:dim(dataset)[2],function(z){
if(res[z]=="con"){
return(zdifference_coefvar(dataset[,z],reference,w[,i],r=r))
}
}))
}
}
if(grad!=1){
gradsumme<-unlist(lapply(1:dim(dataset)[2],function(z){
if(res[z]=="con"){
summe<-0
for(i in 2:grad){
summe<-summe+abs(zdifference_continuous(dataset[,z]^i,reference,w,r=r))
}
return(summe)
}
}))
}
else{
gradsumme<-0
}
if(grad==1){
moment=c(rep(1,length(res)))
moment[which(res!="con")]<-""
zdiffmatrix<-matrix(NA,length(c(zdiff[,1])),dim(w)[2])
for(i in 1:dim(w)[2]){
zdiffmatrix[,i]<-zdiff[,i]
}
}
if(grad!=1){
moment=c(rep(1,length(res)),rep(2:grad,each=length(which(res=="con"))))
moment[which(res!="con")]<-""
print(zdiffgrad)
print(zdiff)
zdiffmatrix<-matrix(NA,length(zdiffgrad[[1]])+length(zdiff)/dim(w)[2],dim(w)[2])
print(zdiffmatrix)
for(i in 1:dim(w)[2]){
zdiffmatrix[,i]<-cbind(c(zdiff[,i],zdiffgrad[[i]]))
}
}
res<-as.character(res)
variable<-data.frame("variable"=c(rep(colnames(dataset),1),rep(colnames(dataset)[which(res=="con")],grad-1)),"zdifference"=zdiffmatrix,"moment"=moment)
if(standard_weights==TRUE & !is.null(weights)){
colnames(variable)<-c("variable",c(weights[indw],"standard weights"),"moment")
}
else if(is.null(weights)){
colnames(variable)<-c("variable","unweighted","moment")
}
else{
colnames(variable)<-c("variable",weights[indw],"moment")
}
if(var.est==TRUE & coefvar.est==TRUE){
var.variable<-data.frame("variable"=colnames(dataset)[which(res=="con")],
"zdifference"=zdiffvar,"zdiffcoefvar"=zdiffcoefvar,"treated"=rep("con",length(which(res=="con"))))
return(list("result"= variable,"variance_result"=var.variable, "squaredzdiff"=colSums(abs(zdiffmatrix)^2)))
}
else if(var.est==TRUE & coefvar.est==FALSE){
var.variable<-data.frame("variable"=colnames(dataset)[which(res=="con")],
"zdifference"=zdiffvar,"treated"=rep("con",length(which(res=="con"))))
return(list("result"= variable,"variance_result"=var.variable, "squaredzdiff"=colSums(abs(zdiffmatrix)^2)))
}
else if(var.est==FALSE & coefvar.est==TRUE){
var.variable<-data.frame("variable"=colnames(dataset)[which(res=="con")],
"zdiffcoefvar"=zdiffcoefvar,"treated"=rep("con",length(which(res=="con"))))
return(list("result"= variable,"variance_result"=var.variable, "squaredzdiff"=colSums(abs(zdiffmatrix)^2)))
}
else{
return(list("result"= variable, "squaredzdiff"=colSums(abs(zdiffmatrix)^2)))
}
}
|
/scratch/gouwar.j/cran-all/cranData/weightedZdiff/R/zdifference.R
|
zdifference_binary<-function(x,ref,w=NULL,na.rm=TRUE,r=2){
if(is.null(w)){
w<-rep(1,length(x))
}
if(na.rm==TRUE){
exclna<-which(is.na(x)==TRUE |is.na(w)==TRUE | is.na(ref)==TRUE)
if(length(exclna)!=0){
x<-x[-exclna]
w<-w[-exclna]
ref<-ref[-exclna]
}
}
else{
if(any(is.na(x)==TRUE |is.na(w)==TRUE | is.na(ref)==TRUE))
warning("NA in the data and NAs not removed")
}
if(any(w<0)){
stop("negative weights are not allowed")
}
if(length(unique(na.omit(ref)))!= 2){
stop("reference variable is not binomial")
}
if(length(unique(na.omit(x)))!=2){
stop("variable is not binomial")
}
if(class(x)=="integer"){
x<-as.numeric(as.character(x))
}
if(class(x)=="factor"){
x<-as.numeric(x)
}
lx<-unique(sort(x))
l<-sort(unique(ref))
x0<-x[which(ref==l[1])]
x1<-x[which(ref==l[2])]
w0<-w[which(ref==l[1])]/sum(w[which(ref==l[1])])
w1<-w[which(ref==l[2])]/sum(w[which(ref==l[2])])
n0<-length(na.omit(x0))
n1<-length(na.omit(x1))
p0<-length(which(x0==lx[1]))/n0
p1<-length(which(x1==lx[1]))/n1
res<-(sum(w0*x0,na.rm=na.rm)/sum(w0,na.rm=na.rm)-sum(w1*x1,na.rm=na.rm)/sum(w1,na.rm=na.rm))/sqrt(p0*(1-p0)*sum(w0^2,na.rm=na.rm)/sum(w0,na.rm=na.rm)^2+p1*(1-p1)*sum(w1^2,na.rm=na.rm)/sum(w1,na.rm=na.rm)^2)
return(round(res,r))
}
|
/scratch/gouwar.j/cran-all/cranData/weightedZdiff/R/zdifference_binomial.R
|
zdifference_coefvar<-function(x,ref,na.rm=TRUE,r=2){
if(length(unique(na.omit(ref)))!= 2){
stop("reference variable is not binomial")
}
if(class(x)=="integer"){
x<-as.numeric(as.character(x))
}
if(class(x)=="factor"){
x<-as.numeric(x)
}
if(na.rm){
exclna<-which(is.na(x)| is.na(ref))
if(length(exclna)!=0){
x<-x[-exclna]
ref<-ref[-exclna]
}
}
else{
if(any(is.na(x)| is.na(ref)))
warning("NA in the data and NAs not removed")
}
x0<-x[which(ref==sort(unique(ref))[1])]
x1<-x[-which(ref==sort(unique(ref))[1])]
n0<-length(x0)
n1<-length(x1)
mean0<-mean(x0)
mean1<-mean(x1)
var0<-var(x0)
var1<-var(x1)
c0<-sqrt(var0)/mean0
c1<-sqrt(var1)/mean1
cv<-((n0-1)*c0+(n1-1)*c1)/(n0+n1-2)
res<-(c0-c1)/sqrt((1/(n0-1)+1/(n1-1))*cv^2*(0.5+cv^2))
return(round(res,r))
}
|
/scratch/gouwar.j/cran-all/cranData/weightedZdiff/R/zdifference_coefvar.R
|
zdifference_continuous<-function(x,ref,w=NULL,na.rm=TRUE,r=2){
if(is.null(w)){
w<-rep(1,length(x))
}
if(na.rm==TRUE){
exclna<-which(is.na(x)==TRUE |is.na(w)==TRUE | is.na(ref)==TRUE)
if(length(exclna)!=0){
x<-x[-exclna]
w<-w[-exclna]
ref<-ref[-exclna]
}
}
else{
if(any(is.na(x)==TRUE |is.na(w)==TRUE | is.na(ref)==TRUE))
warning("NA in the data and NAs not removed")
}
if(any(w<0)){
stop("negative weights are not allowed")
}
if(length(unique(na.omit(ref)))!= 2){
stop("reference variable is not binomial")
}
if(class(x)=="integer"){
x<-as.numeric(as.character(x))
}
if(class(x)=="factor"){
x<-as.numeric(x)
}
x0<-x[which(ref==sort(unique(ref))[1])]
x1<-x[-which(ref==sort(unique(ref))[1])]
w0<-w[which(ref==sort(unique(ref))[1])]/sum(w[which(ref==sort(unique(ref))[1])])
w1<-w[which(ref==sort(unique(ref))[2])]/sum(w[which(ref==sort(unique(ref))[2])])
xw0<-sum(w0*x0,na.rm=na.rm)/sum(w0,na.rm=na.rm)
xw1<-sum(w1*x1,na.rm=na.rm)/sum(w1,na.rm=na.rm)
var0<-sum(w0^2,na.rm=na.rm)*var(x0,na.rm=na.rm)
var1<-sum(w1^2,na.rm=na.rm)*var(x1,na.rm=na.rm)
erg<-(xw0 - xw1)/sqrt(var0+var1)
return(round(erg,r))
}
|
/scratch/gouwar.j/cran-all/cranData/weightedZdiff/R/zdifference_continuous.R
|
zdifference_nominal<-function(x,ref,w=NULL,na.rm=TRUE,norma=TRUE,r=2){
if(is.null(w)){
w<-rep(1,length(x))
}
if(na.rm==TRUE){
exclna<-which(is.na(x)==TRUE |is.na(w)==TRUE | is.na(ref)==TRUE)
if(length(exclna)!=0){
x<-x[-exclna]
w<-w[-exclna]
ref<-ref[-exclna]
}
}
else{
if(any(is.na(x)==TRUE |is.na(w)==TRUE | is.na(ref)==TRUE))
warning("NA in the data and NAs not removed")
}
if(any(w<0)){
stop("negative weights are not allowed")
}
if(any(w==0)){
x<-x[-which(w==0)]
ref<-ref[-which(w==0)]
w<-w[-which(w==0)]
}
if(length(unique(na.omit(ref)))!= 2){
stop("reference variable is not binomial")
}
x0<-na.omit(x[which(ref==sort(unique(ref))[1])])
x1<-na.omit(x[-which(ref==sort(unique(ref))[1])])
w0<-w[which(ref==sort(unique(ref))[1]&is.na(x)==FALSE)]
w1<-w[which(ref==sort(unique(ref))[2]&is.na(x)==FALSE)]
l0<-sum(w0)
l1<-sum(w1)
x00<-unlist(lapply(1:length(unique(na.omit(x))),function(z){
sum(w0[which(x0==sort(unique(na.omit(x)))[z])])
}))
s00<-unlist(lapply(1:length(unique(na.omit(x))),function(z){
sum(w0[which(x0==sort(unique(na.omit(x)))[z])]^2)
}))
x11<-unlist(lapply(1:length(unique(na.omit(x))),function(z){
sum(w1[which(x1==sort(unique(na.omit(x)))[z])])
}))
s11<-unlist(lapply(1:length(unique(na.omit(x))),function(z){
sum(w1[which(x1==sort(unique(na.omit(x)))[z])]^2)
}))
chi<-sum((l0*x11-l1*x00)^2/(l0^2*s11 + l1^2*s00))
if(norma==TRUE){
return(round(sample(c(-1,1),1)*qnorm(0.5+(1-pchisq(chi,length(unique(x))-1,lower.tail=FALSE))/2),r))
}
else{
return(round(chi,r))
}
}
|
/scratch/gouwar.j/cran-all/cranData/weightedZdiff/R/zdifference_nominal.R
|
zdifference_ordinal<-function(x,ref,w=NULL,na.rm=TRUE,r=10){
if(is.null(w)){
w<-rep(1,length(x))
}
if(na.rm==TRUE){
exclna<-which(is.na(x)==TRUE |is.na(w)==TRUE | is.na(ref)==TRUE)
if(length(exclna)!=0){
x<-x[-exclna]
w<-w[-exclna]
ref<-ref[-exclna]
}
}
else{
if(any(is.na(x)==TRUE |is.na(w)==TRUE | is.na(ref)==TRUE))
warning("NA in the data and NAs not removed")
}
if(any(w<0)){
stop("negative weights are not allowed")
}
if(length(unique(na.omit(ref)))!= 2){
stop("reference variable is not binomial")
}
if(class(x)=="integer"){
x<-as.numeric(as.character(x))
}
if(class(x)=="factor"){
x<-as.numeric(x)
}
x0<-x[which(ref==(sort(unique(ref)))[1])]
x1<-x[-which(ref==(sort(unique(ref)))[1])]
w0<-w[which(ref==(sort(unique(ref)))[1])]/sum(w[which(ref==sort(unique(ref))[1])],na.rm=na.rm)
w1<-w[which(ref==sort((unique(ref)))[2])]/sum(w[which(ref==sort(unique(ref))[2])],na.rm=na.rm)
x<-c(x0,x1)
w<-c(w0,w1)
d<-length(which(ref==sort(unique(ref))[1]))
ran<-rank(x,ties.method="average")
t<-unlist(lapply(1:length(unique(ran)),function(i) length(which(ran==unique(ran)[i]))))
if(length(unique(ran))==length(x)){
nenner<-sum(w0^2+w1^2)*((length(x)^2-1)/12+(length(x)+1)/12)
}
else{
rs<-unique(ran)
N<-length(ran)
cvv<-sum(rs^2*t*(t-1))/(N*(N-1))+(sum(rs*t*sum(rs*t)-rs^2*t^2))/(N*(N-1))-((N+1)/2)^2
v<-c(-sum(t^3-t)/12/length(ran)+(length(ran)^2-1)/12)
nenner<-sum(w0^2)*v+(-sum(w0^2))*cvv+sum(w1^2)*v +(-sum(w1^2))*cvv
}
return(round((sum(w0*ran[1:d])-sum(w1*ran[-c(1:d)]))/sqrt(nenner),r))
}
|
/scratch/gouwar.j/cran-all/cranData/weightedZdiff/R/zdifference_ordinal.R
|
zdifference_var<-function(x,ref,w=NULL,na.rm=TRUE,r=2){
if(is.null(w)){
w<-rep(1,length(x))
}
if(na.rm){
exclna<-which(is.na(x)==TRUE |is.na(w)==TRUE | is.na(ref)==TRUE)
if(length(exclna)!=0){
x<-x[-exclna]
w<-w[-exclna]
ref<-ref[-exclna]
}
}
else{
if(any(is.na(x)==TRUE |is.na(w)==TRUE | is.na(ref)==TRUE))
warning("NA in the data and NAs not removed")
}
if(any(w<0)){
stop("negative weights are not allowed")
}
if(length(unique(na.omit(ref)))!= 2){
stop("reference variable is not binomial")
}
if(class(x)=="integer"){
x<-as.numeric(as.character(x))
}
if(class(x)=="factor"){
x<-as.numeric(x)
}
x0<-scale(x[which(ref== c(sort(unique(ref))[1]))],scale=FALSE)
x1<-scale(x[which(ref==c(sort(unique(ref))[2]))],scale=FALSE)
w0<-w[which(ref==sort(unique(ref))[1] & is.na(x)==FALSE)]/sum(w[which(ref==sort(unique(ref))[1])])
w1<-w[which(ref==sort(unique(ref))[2]& is.na(x)==FALSE)]/sum(w[which(ref==sort(unique(ref))[2])])
resultat<-function(w,x){
return(-3*((sum(w^4*sum(w))/(sum(w)^4)*mean(x^4))+(4*mean(x^3)*mean(x)*(sum(w^3-w^4)))+
(3*(var(x)+mean(x)^2)^2*(sum(w^2*sum(w^2))-sum(w^4)))+(6*(var(x)+mean(x)^2)*mean(x)^2*(sum(w^4-w^3)+sum(w^2*sum(-w^2+w))+sum(w^4-w^3)))+
(mean(x)^4*(sum(w*(1-w)^2*(1-w))+sum(w*sum(w^3-2*w^2))-sum(w^4-2*w^3)+sum(2*w^2*sum(w^2))-sum(2*w^4)+sum(w*sum(w*sum(-w^2)))-sum(-w^4)-sum(w^2*sum(-w^2))+2*sum(-w^4)-sum(w*sum(-w^3))-sum(-w^3*(1-w)))))+2*(
(sum(w^3*sum(w))*mean(x^4)/sum(w)^2)+(2*mean(x^3)*mean(x)*sum(w^2-w^3))+
((var(x)+mean(x)^2)^2*(sum(w*sum(w^2))-sum(w^3)))+((var(x)+mean(x)^2)*mean(x)^2*(sum(w*sum(w-w^2))+sum(w^3-w^2)+sum(w^3 -w^2))))-4*(
(sum(w^3)/sum(w)*mean(x^4))+2*(-sum(w^3)/sum(w)*mean(x^3)*mean(x)+sum(w^2*sum(w))*mean(x^3)*mean(x))+
(-sum(w^3)/sum(w)*(mean(x)^2+var(x))^2+sum(w^2*sum(w))*(mean(x)^2+var(x))^2)+(var(x)+mean(x)^2)*mean(x)^2*(sum(w*sum(w-w^2))+sum(-w^2+w^3)+sum(w^3-w^2)))+
(sum(w*sum(w))*(var(x)+mean(x)^2)^2-sum(w^2)*(var(x)+mean(x)^2)^2+sum(w^2)*mean(x^4))+
4*((sum(w^4)/sum(w)^2*mean(x^4))+(6/sum(w)^2*(sum(w^2*sum(w^2))*(var(x)+mean(x)^2)^2-sum(w^4)*(var(x)+mean(x)^2)^2))+
(4/sum(w)^2*(sum(w^3*sum(w))*mean(x)^3*mean(x)-sum(w^4)*mean(x)^3*mean(x))))-
((sum(w)*(mean(x)^2+var(x)))+1/2*(-2*sum(w*(1-w))*mean(x)^2-2*sum(w^2)*(mean(x)^2+var(x))))^2)
}
round((sum(w0*(x0-sum(w0*x0))^2)/(1-sum(w0^2))-sum(w1*(x1-sum(w1*x1))^2)/(1-sum(w1^2)))/(sqrt(resultat(w0,x0)/(1-sum(w0^2))^2+resultat(w1,x1)/(1-sum(w1^2))^2)),r)
}
|
/scratch/gouwar.j/cran-all/cranData/weightedZdiff/R/zdifference_var.R
|
#' Studies on the Effectiveness of Writing-to-Learn Interventions
#'
#' Results from 48 studies on the effectiveness of school-based writing-to-learn
#' interventions on academic achievement.
#'
#' @docType data
#'
#' @usage dat.bangertdrowns2004
#'
#' @format A data frame; for documentation, see \code{dat.bangertdrowns2004}
#' in Wolfgang Viechtbauer's R package \code{metafor}.
#'
#'
#' @keywords datasets
#'
#' @details This reproduced dataset and its documentation are credited to Wolfgang
#' Viechtbauer and his \code{metafor} package (2010). Please see his package for
#' details.
#'
#' @references Bangert-Drowns, R. L., Hurley, M. M., & Wilkinson, B. (2004). The
#' effects of school-based writing-to-learn interventions on academic achievement:
#' A meta-analysis. Review of Educational Research, 74, 29-58.
#'
#' Viechtbauer, W. (2010). Conducting meta-analysis in R with the metafor package.
#' Journal of Statistical Software, 36(3), 1-48.
#'
#' @source Bangert-Drowns, R. L., Hurley, M. M., & Wilkinson, B. (2004). The
#' effects of school-based writing-to-learn interventions on academic achievement:
#' A meta-analysis. Review of Educational Research, 74, 29-58.
#'
#' @examples
#' \dontrun{
#' dat.bangertdrowns2004
#'
#' # Extracting the effect sizes and sampling variances:
#' effect <- dat.bangertdrowns2004$yi
#' v <- dat.bangertdrowns2004$vi
#'
#' # The weight-function model with no mean model:
#' weightfunct(effect, v)
#'
#' # The weight-function model with a mean model:
#' weightfunct(effect, v, mods=~dat.bangertdrowns2004$info)
#' }
"dat.bangertdrowns2004"
|
/scratch/gouwar.j/cran-all/cranData/weightr/R/bangertdrowns-data.R
|
#' Create a Density Plot
#'
#' This function allows you to create a plot displaying the unadjusted and adjusted densities of the specified model. Note that you must first specify a model using \code{weightfunct}.
#' @param x an object of class weightfunct
#' @param ... other arguments
#' @importFrom ggplot2 ggplot aes geom_line theme_bw geom_hline labs xlab ylab theme element_text element_blank scale_x_continuous scale_y_continuous
#' @importFrom stats median
#' @importFrom graphics box plot
#' @importFrom scales pretty_breaks
#' @keywords weightr
#' @details This function produces an approximate graphical illustration of the estimated unweighted and weighted densities. The unweighted density is represented by a dashed line and the weighted density by a solid line. For the unweighted density, the effect sizes are assumed to be normally distributed, with a mean equal to their unadjusted mean and a variance equal to their unadjusted variance component plus their individual sampling variances. This plot is an approximation because it is necessary to use a fixed sampling variance; here, we fix the sampling variance to the median of the distribution of sampling variances.
#'
#' For the adjusted density, the expected density for effect sizes within each specified p-value interval is multiplied by the estimated weight for the corresponding interval. Greater density in an interval then represents a greater likelihood of effect-size survival. (Remember, of course, that the weight for the first interval is fixed to one, and other intervals should be interpreted relative to it.) Each discontinuity in the solid line, therefore, represents a p-value cutpoint.
#'
#' Users may wonder why the adjusted density, or the solid line, sometimes falls outside of the unadjusted density, or the dashed line. In answer, recall that the mean and variance of the adjusted density also differ. Based on the severity of this difference, the adjusted density may fall outside of its unadjusted counterpart.
#' @export
#' @examples
#' \dontrun{
#' test <- weightfunct(effect, v, steps)
#' density(test)
#' }
density <- function(x, ...){
if (!inherits(x, "weightfunct")){
stop("Argument 'x' must be an object of class \"weightfunct\".")}
unadj_est <- cbind(c(x[[1]]$par[1:2]))
adj_est <- cbind(c(x[[2]]$par[1:2]))
weights <- cbind(c(x[[2]]$par[(x$npred+3):(length(x$steps)+(x$npred+1))]))
vc1 <- unadj_est[1]
mu1 <- unadj_est[2]
vc2 <- adj_est[1]
mu2 <- adj_est[2]
cuts <- x$steps
x_low_lim <- min(x$effect) - 2
x_up_lim <- max(x$effect) + 2
xfull <- seq(x_low_lim,x_up_lim,.01)
vi <- median(x$v)
fx <- ( 1/(sqrt(2*pi*(vi + vc1))) ) * exp( -1/2*( (xfull - mu1)^2 / (vi + vc1) ) )
yfull <- fx
A0 <- sum(rep(.01,length(xfull))*yfull)
fx2 <- ( 1/(sqrt(2*pi*(vi + vc1))) ) * exp( -1/2*( (xfull - mu1)^2 / (vi + vc1) ) )
testlist <- -1 * qnorm(x$steps, 0, sqrt(vi + vc2))
testxfull <- findInterval(xfull,sort(testlist))
xlist <- split(xfull, testxfull)
ylist <- split(fx2, testxfull)
weights2 <- rev(c(1, weights))
testyfull <- mapply("*", ylist, weights2)
A1 <- sum(rep(.01,length(unlist(xlist)))*unlist(testyfull))
test <- data.frame(rep(xfull, 2), c((yfull/A0), (as.numeric(unlist(testyfull))/A1)),
c(rep("Unadjusted", length((yfull/A0))), rep("Adjusted",
length(as.numeric(unlist(testyfull))))))
test2 <- subset(test, (test[,2] > 0.001))
colnames(test2) <- c("xval", "density", "model")
ggplot(data = test2, aes(x = test2$xval, y = test2$density, group = test2$model)) +
geom_line(aes(linetype = test2$model)) +
theme_bw() +
geom_hline(yintercept = 0) +
labs(title = "Adjusted and Unadjusted Densities", linetype = "Model") +
xlab("Sample Effect Size") + ylab("Density") +
theme(text = element_text(size=17), panel.grid.major = element_blank(),
panel.grid.minor = element_blank()) +
scale_x_continuous(breaks = scales::pretty_breaks(n = 10)) +
scale_y_continuous(breaks = scales::pretty_breaks(n = 10))
}
|
/scratch/gouwar.j/cran-all/cranData/weightr/R/density.R
|
#' Create a Funnel Plot
#'
#' This function allows you to create a funnel plot using a vector of effect sizes and a vector of their corresponding sampling variances.
#' @param effect a vector of meta-analytic effect sizes
#' @param v a vector of sampling variances
#' @param type \code{v} for variance or \code{se} for standard error; defaults to standard error
#' @param flip \code{FALSE} (default) to plot effect sizes on the vertical axis (for a horizontal funnel); \code{TRUE} to plot them on the horizontal axis (for a vertical funnel)
#' @keywords weightr
#' @details This funnel plot, by default, plots the effect sizes on the y-axis and the measure of study size (either variance or standard error) on the x-axis. If no asymmetry is present, the plot should resemble a horizontal funnel.
#'
#' Users can choose either standard error (default) or sampling variance as a measure of study size. The choice is mostly arbitrary. In both cases, however, \code{v} must be a vector of variances, the same as that required by \code{weightfunct}. The conversion to standard error is automatic.
#' @export
#' @examples
#' \dontrun{
#' # Funnel plot using standard error (default):
#' funnel(effect, v)
#' # Funnel plot using sampling variance:
#' funnel(effect, v, type='v')
#' # Vertical funnel plot using standard error:
#' funnel(effect, v, flip=TRUE)
#' }
funnel <- function(effect, v, type='se', flip=FALSE){
x <- sqrt(v)
if(type=='v'){
x <- v
}
xinc <- 0.05*(max(x) - min(x))
yinc <- 0.05*(max(effect) - min(effect))
xmin <- min(x) - xinc
xmax <- max(x) + xinc
ymin <- min(effect) - yinc
ymax <- max(effect) + yinc
if(type=='se'){
if(flip == FALSE){
plot(x, effect,
xlab="Standard Error", ylab="Effect Size",
xlim=c(xmin,xmax),
ylim=c(ymin,ymax))
}
if(flip == TRUE){
plot(effect, x,
xlab="Effect Size", ylab="Standard Error",
xlim=c(ymin,ymax),
ylim=c(xmin,xmax))
}
}
if(type=='v'){
if(flip == FALSE){
plot(x, effect,
xlab="Sampling Variance", ylab="Effect Size",
xlim=c(xmin,xmax),
ylim=c(ymin,ymax))
}
if(flip == TRUE){
plot(effect, x,
xlab="Effect Size", ylab="Sampling Variance",
xlim=c(ymin,ymax),
ylim=c(xmin,xmax))
}
}
box(lwd=1.2)
}
|
/scratch/gouwar.j/cran-all/cranData/weightr/R/funnel.R
|
#' Studies of the Predictive Validity of the General Ability Subscale of the
#' General Aptitude Test Battery (GATB)
#'
#' Results from 755 studies on the General Aptitude Test Battery's predictive validity
#' of job perfomance (General Ability subscale).
#'
#' @docType data
#'
#' @usage dat.gatb
#'
#' @format A data frame containing the following columns:
#' \describe{
#' \item{\code{z}}{Fisher's z-transformed correlation coefficients}
#' \item{\code{v}}{corresponding sampling variance}
#' }
#'
#' @keywords datasets
#'
#' @details The General Aptitude Test Battery (GATB) is designed to measure nine
#' cognitive, perceptual, and psychomotor skills thought relevant to the prediction
#' of job performance. From 1947 to 1993, a total of 755 studies were completed in
#' order to assess the validity of the GATB and its nine scales, and the GATB has
#' been found to be a moderately valid predictor of job performance. This dataset
#' consists of validity coefficients for the General Ability scale of the GATB.
#'
#' @references Vevea, J. L., Clements, N. C., & Hedges, L. V. (1993). Assessing the
#' effects of selection bias on validity data for the General Aptitude Test Battery.
#' Journal of Applied Psychology, 78(6), 981-987.
#'
#' U.S. Department of Labor, Division of Counseling and Test Development, Employment
#' and Training Administration. (1983a). The dimensionality of the General Aptitude
#' Test Battery (GATB) and the dominance of general factors over specific factors in
#' the prediction of job performance for the U.S. Employment Service (U.S. Employment
#' Service Test Research Rep. No. 44). Washington, DC.
#'
#' U.S. Department of Labor, Division of Counseling and Test Development, Employment
#' and Training Administration. (1983b). Test validity for 12,000 jobs: An application
#' of job classification and validity generalization analysis to the General Aptitude
#' Test Battery (U.S. Employment Service Test Research Rep. No. 45). Washington, DC.
#'
#' @source U.S. Department of Labor, Division of Counseling and Test Development,
#' Employment and Training Administration. (1983a). The dimensionality of the
#' General Aptitude Test Battery (GATB) and the dominance of general factors over
#' specific factors in the prediction of job performance for the U.S. Employment
#' Service (U.S. Employment Service Test Research Rep. No. 44). Washington, DC.
#'
#' U.S. Department of Labor, Division of Counseling and Test Development, Employment
#' and Training Administration. (1983b). Test validity for 12,000 jobs: An application
#' of job classification and validity generalization analysis to the General Aptitude
#' Test Battery (U.S. Employment Service Test Research Rep. No. 45). Washington, DC.
#'
#' @examples
#' \dontrun{
#' dat.gatb
#' effect <- dat.gatb$z
#' v <- dat.gatb$v
#' weightfunct(effect, v)
#' }
"dat.gatb"
|
/scratch/gouwar.j/cran-all/cranData/weightr/R/gatb-data.R
|
## Note to User:
## This file contains code for functions that are useful in calculating
## or formatting model output. They operate "behind the scenes."
## Function to tally the number of effect sizes per p-value interval ##
intervaltally <- function(p, steps) {
p1 <- cut(p, breaks=c(-Inf,steps), labels=steps)
return(p1) }
## Function to format the above tally into a table and add labels ##
sampletable <- function(p, pvalues, steps){
nsteps <- length(steps)
results <- matrix(nrow=length(pvalues),ncol=1)
results[,1] <- pvalues
rowlabels <- c(0, length(results[,1]))
rowlabels[1] <- paste(c("p-values <", steps[1]), collapse="")
for(i in 2:nsteps){
rowlabels[i] <- paste(c(steps[i - 1], "< p-values <", steps[i]), collapse=" ")
}
resultsb <- data.frame(results, row.names=c(rowlabels))
colnames(resultsb) <- c("Frequency")
return(resultsb)
}
##
|
/scratch/gouwar.j/cran-all/cranData/weightr/R/misc.func.hidden.R
|
#' Predicted Values for 'weightfunct' Objects
#'
#' This function calculates predicted conditional means and their corresponding standard errors for objects of class weightfunct.
#' @param object an object of class weightfunct
#' @param values a vector or matrix specifying the values of the moderator variables for which predicted values should be calculated; defaults to \code{NULL}
#' @param ... other arguments
#' @keywords weightr
#' @details \code{predict(object)} requires that the user specify a vector or matrix of predictor values. Without specifying values, the function will not work.
#'
#' For models including \code{y} number of moderator variables, users should set \code{values} equal to a \code{k} x \code{y} matrix, where \code{k} is the number of rows of data (i.e., "new" studies). In the example code, for example, there are 3 moderator variables and one row of data, so \code{values} is a 1 x 3 matrix. The intercept is incldued by default.
#'
#' Note that \code{weightfunct} handles categorical moderators automatically. To include them here, the appropriate contrast (dummy) variables must be explicitly specified. The \code{contrasts} function can help to understand the contrast matrix for a given factor.
#' @export
#' @return The function returns a list containing the following components: \code{unadjusted}, \code{adjusted}, and \code{values}. The \code{values} section simply prints the \code{values} matrix for verification. The \code{unadjusted} and \code{adjusted} sections print the conditional means for each row of new data, unadjusted and adjusted for publication bias (respectively), and their standard errors.
#' @examples
#' \dontrun{
#' test <- weightfunct(effect, v, mods=~x1 + x2 + x3, steps)
#' values <- matrix(c(0,1,0),ncol=3) # An arbitrary set of 3 dummy-coded moderators
#' predict(test, values)
#' }
predict.weightfunct <- function(object, values = NULL, ...){
if (!inherits(object, "weightfunct"))
stop("Argument 'object' must be an object of class \"weightfunct\".")
if(is.null(object$mods)){
stop("Error: You cannot specify values for moderators that do not exist.")
}
if(!(is.vector(values) || inherits(values, "matrix"))){
stop(paste0("Argument 'values' should be a vector or matrix, but is of class '", class(values), "'."))
}
if(object$npred == 1L){
k.new <- length(values)
X.new <- cbind(c(values))
}else{
if(is.vector(values) || nrow(values) == 1L){ # user gives one vector or one row
k.new <- 1
X.new <- rbind(values)
}else{ # user gives multiple rows
k.new <- nrow(values)
X.new <- cbind(values)
}
}
if(inherits(X.new[1,1], "character")){
stop("Argument 'values' should only contain numeric variables.")
}
if(ncol(X.new) != object$npred){
stop("Number of values does not match number of moderator variables.")
}
if(object$fe){
X.new <- cbind(int=rep(1,k.new), X.new)
}else{
X.new <- cbind(vc=rep(0,k.new), int=rep(1,k.new), X.new)
}
params <- matrix(object[1][[1]]$par, ncol=1, byrow=TRUE)
pred <- apply(X.new, 1, function(x) {x %*% params}) # conditional means, unadjusted
hess <- object[1][[1]]$hessian
inv.hess <- solve(hess)
se <- sqrt(apply(X.new, 1, function(x) {matrix(x,nrow=1) %*%
tcrossprod(inv.hess, matrix(x,nrow=1))})) # standard errors for conditional means, unadj
unadj <- data.frame(cbind(pred),cbind(se)) # make it look pretty
params_adj <- matrix(object[2][[1]]$par, ncol=1, byrow=TRUE)
weights.new <- matrix(rep(rep(0,(object$nsteps-1)),k.new),ncol=(object$nsteps-1),nrow=k.new) # adding placeholders for weights
X.new_adj <- cbind(X.new,weights.new)
pred_adj <- apply(X.new_adj, 1, function(x) {x %*% params_adj}) # conditional means, adjusted
if(is.null(object$weights)){ # if this is V & H, NOT V & W, calculate SEs
hess_adj <- object[2][[1]]$hessian
inv.hess_adj <- solve(hess_adj)
se_adj <- sqrt(apply(X.new_adj, 1, function(x) {matrix(x,nrow=1) %*%
tcrossprod(inv.hess_adj, matrix(x,nrow=1))})) # standard errors for conditional means, adjusted
adj <- data.frame(cbind(pred_adj),cbind(se_adj)) # make it look pretty
colnames(adj) <- c("pred", "se")
}else{ # otherwise, don't
se_adj <- NULL
adj <- data.frame(pred=cbind(pred_adj))
colnames(adj) <- c("pred")
}
return(list(unadjusted = unadj, adjusted = adj,
values = values)) # print this stuff out as a list
}
|
/scratch/gouwar.j/cran-all/cranData/weightr/R/predict.weightfunct.R
|
#' Print Model Results
#'
#' This function allows you to print the model results.
#' @param x an object of class weightfunct
#' @param ... other arguments
#' @keywords weightr
#' @export
#' @importFrom stats model.matrix optim pchisq pnorm qnorm
#' @examples
#' \dontrun{
#' print(weightfunct(d,v))
#' }
print.weightfunct <- function(x, ...){
if (!inherits(x, "weightfunct"))
stop("Argument 'x' must be an object of class \"weightfunct\".")
####### Unadjusted model ########
cat("\n")
cat("Unadjusted Model (k = ", x$k, "):", sep="")
cat("\n\n")
# Heterogeneity estimates
if(x$fe == FALSE){
cat("tau^2 (estimated amount of total heterogeneity): ", formatC(round(x$unadj_est[1], digits = 4), digits = 4, format = "f"), " (SE = ", formatC(round(x$unadj_se[1], digits = 4),digits = 4, format = "f"), ")", sep="")
cat("\n")
cat("tau (square root of estimated tau^2 value): ", formatC(round(sqrt(x$unadj_est[1]), digits = 4),digits = 4, format = "f"))
cat("\n\n")
if(x$npred==0){
cat("Test for Heterogeneity:")
}
if(x$npred > 0){
cat("Test for Residual Heterogeneity:")
}
cat("\n")
cat("Q(df = ", (x$k-x$npred-1), ") = ", formatC(round(x$QE, digits=4), digits=4, format = "f"), ", p-val = ", x$QEp, sep="")
cat("\n\n")
}else{
cat("Test for Residual Heterogeneity:")
cat("\n")
cat("Q(df = ", (x$k-x$npred-1), ") = ", formatC(round(x$QE, digits=4), digits=4, format = "f"), ", p-val = ", x$QEp, sep="")
cat("\n\n")
}
if(x$npred > 0){
cat("Test of Moderators (coefficients 2:", (x$npred + 1), "):", sep="")
cat("\n")
cat("QM(df = ", x$npred, ") = ", formatC(round(x$QM, digits=4), digits=4, format = "f"), ", p-val = ", x$QMp, sep="")
cat("\n\n")
}
cat("Model Results:")
cat("\n\n")
if(x$fe == FALSE){
unadj_est <- cbind(x[[1]]$par[2:(x$npred+2)])
unadj_se <- cbind(sqrt(diag(solve(x[[1]]$hessian)))[2:(x$npred+2)])
}
if(x$fe == TRUE){
unadj_est <- cbind(x[[1]]$par[1:(x$npred+1)])
unadj_se <- cbind(sqrt(diag(solve(x[[1]]$hessian)))[1:(x$npred+1)])
}
z_stat <- unadj_est/unadj_se
p_val <- (2*pnorm(-abs(z_stat)))
ci.lb <- unadj_est - qnorm(0.975) * unadj_se
ci.ub <- unadj_est + qnorm(0.975) * unadj_se
res.table <- data.frame(matrix(c(unadj_est, unadj_se, z_stat, p_val, ci.lb, ci.ub), nrow=(x$npred+1), byrow=F),stringsAsFactors=FALSE)
rowlabels <- rep(0, (x$npred+1))
rowlabels[1] <- "Intercept"
if(x$npred > 0){
for(i in 2:(x$npred+1)){
rowlabels[i] <- paste(c(colnames(x$XX)[i]))
}
}
row.names(res.table) <- c(rowlabels)
colnames(res.table) <- c("estimate","std.error","z-stat","p-val","ci.lb","ci.ub")
res.table[,4] <- format.pval(res.table[,4])
res.table[,c(1,2,3,5,6)] <- format(res.table[,c(1,2,3,5,6)], digits=4)
print.data.frame(res.table)
####### Adjusted model ########
cat("\n")
cat("Adjusted Model (k = ", x$k, "):", sep="")
cat("\n\n")
if(x$fe == FALSE){
if(is.null(x$weights)){
if(is.nan(suppressWarnings(sqrt(diag(solve(x[[2]]$hessian)))[1]))){
warning('The adjusted variance component is so close to zero that a border condition prevents a meaningful iterative solution. As long as other model estimates are still \nreasonable, the results are identical to those from a fixed-effect analysis.')
}
suppressWarnings(
cat("tau^2 (estimated amount of total heterogeneity): ", formatC(round(x$adj_est[1], digits = 4),digits = 4, format = "f"), " (SE = ", formatC(round(x$adj_se[1], digits = 4),digits = 4, format = "f"), ")", sep="")
)
}
if(is.null(x$weights) == FALSE){
cat("tau^2 (estimated amount of total heterogeneity): ", formatC(round(x$adj_est[1], digits = 4),digits = 4, format = "f"), " (SE = ", "---", ")", sep="")
}
cat("\n")
cat("tau (square root of estimated tau^2 value): ", formatC(round(sqrt(x$adj_est[1]), digits = 4),digits = 4, format = "f"))
cat("\n\n")
if(x$npred==0){
cat("Test for Heterogeneity:")
}
if(x$npred > 0){
cat("Test for Residual Heterogeneity:")
}
cat("\n")
cat("Q(df = ", (x$k-x$npred-1), ") = ", formatC(round(x$QE, digits=4), digits=4, format = "f"), ", p-val = ", x$QEp, sep="")
cat("\n\n")
}else{
cat("Test for Residual Heterogeneity:")
cat("\n")
cat("Q(df = ", (x$k-x$npred-1), ") = ", formatC(round(x$QE, digits=4), digits=4, format = "f"), ", p-val = ", x$QEp, sep="")
cat("\n\n")
}
if(x$npred > 0){
if(is.null(x$weights)){
cat("Test of Moderators (coefficients 2:", (x$npred + 1), "):", sep="")
cat("\n")
cat("QM(df = ", x$npred, ") = ", formatC(round(x$QM2, digits=4), digits=4, format = "f"), ", p-val = ", x$QMp2, sep="")
cat("\n\n")
}else{
}
}
cat("Model Results:")
cat("\n\n")
if(x$fe == FALSE){
if(is.null(x$weights)){
adj_int_est <- cbind(x$adj_est[2:( (x$nsteps - 1) + (x$npred+2) )])
adj_int_se <- cbind(x$adj_se[2:( (x$nsteps - 1) + (x$npred+2) )])
}
else{
adj_int_est <- cbind(c(
round(x$adj_est[2:( (x$npred+2) )], digits=4),
sprintf('%.4f', x$weights[2:length(x$weights)])
))
adj_int_se <- cbind(rep("---", length(x[[2]]$par[2:length(x[[2]]$par)])))
}
}
if(x$fe == TRUE){
if(is.null(x$weights)){
adj_int_est <- cbind(x$adj_est[1:( (x$nsteps - 1) + (x$npred+1) )])
adj_int_se <- cbind(x$adj_se[1:( (x$nsteps - 1) + (x$npred+1) )])
}
else{
adj_int_est <- cbind(c(
round(x$adj_est[1:( (x$npred+1) )], digits=4),
sprintf('%.4f', x$weights[2:length(x$weights)])
))
adj_int_se <- cbind(rep("---", length(x[[2]]$par[1:length(x[[2]]$par)])))
}
}
if(is.null(x$weights)){
z_stat_int <- adj_int_est/adj_int_se
p_val_int <- (2*pnorm(-abs(z_stat_int)))
ci.lb_int <- adj_int_est - qnorm(0.975) * adj_int_se
ci.ub_int <- adj_int_est + qnorm(0.975) * adj_int_se
}else{
if(x$fe == FALSE){
length_a <- length(x[[2]]$par[2:length(x[[2]]$par)])
z_stat_int <- rep("---", length_a)
p_val_int <- rep("---", length_a)
ci.lb_int <- rep("---", length_a)
ci.ub_int <- rep("---", length_a)
}else{
length_aF <- length(x[[2]]$par[1:length(x[[2]]$par)])
z_stat_int <- rep("---", length_aF)
p_val_int <- rep("---", length_aF)
ci.lb_int <- rep("---", length_aF)
ci.ub_int <- rep("---", length_aF)
}
}
res.table <- data.frame(matrix(c(adj_int_est, adj_int_se, z_stat_int, p_val_int, ci.lb_int, ci.ub_int), nrow=(x$npred+1+(x$nsteps-1)), byrow=F),stringsAsFactors=FALSE)
rowlabels1 <- rep(0, (x$npred+1))
rowlabels1[1] <- "Intercept"
if(x$npred > 0){
for(i in 2:length(rowlabels1)){
rowlabels1[i] <- paste(c(colnames(x$XX)[i]))
}
}
rowlabels2 <- rep(0, (x$nsteps-1))
for(i in 1:(length(rowlabels2))){
rowlabels2[i] <- paste(c(x$steps[i], "< p <", x$steps[i + 1]), collapse=" ")
}
row.names(res.table) <- c(rowlabels1,rowlabels2)
colnames(res.table) <- c("estimate","std.error","z-stat","p-val","ci.lb","ci.ub")
if(is.null(x$weights)){
res.table[,"p-val"] <- format.pval(res.table[,"p-val"])
}
res.table[,c(1,2,3,5,6)] <- format(res.table[,c(1,2,3,5,6)], digits=4)
print.data.frame(res.table)
####### LRT ########
if(is.null(x$weights)){
cat("\n")
cat("Likelihood Ratio Test:")
cat("\n")
df <- length(x[[2]]$par) - length(x[[1]]$par)
lrchisq <- 2*(abs(x[[1]]$value - x[[2]]$value))
pvalue <- 1-pchisq(lrchisq,df)
cat("X^2(df = ", df, ") = ", lrchisq, ", p-val = ", format.pval(pvalue), sep="")
}else{
cat("\n")
cat("Note: The symbol --- appears because the user has specified weights,\nchoosing to use the Vevea and Woods model, which does not estimate \nweights for p-value intervals and therefore cannot produce meaningful \nstandard errors. The likelihood ratio test is also not interpretable.")
}
####### Interval table ########
if(x$table == TRUE){
pvalues <- as.numeric(table(intervaltally(x$p, x$steps)))
cat("\n")
cat("\n")
cat("Number of Effect Sizes per Interval:")
cat("\n")
cat("\n")
format(print.data.frame(sampletable(x$p, pvalues, x$steps)))
}
if(is.null(x$removed)==FALSE){
cat("\n")
cat("There were ", length(x$removed), "cases removed from your dataset due to the presence of missing data. To view the row numbers of these cases, use the attribute '$removed'.")
}
invisible()
}
|
/scratch/gouwar.j/cran-all/cranData/weightr/R/print.weightfunct.R
|
#' Start weightr in Shiny
#'
#' This function allows you to launch the Shiny application locally.
#' @keywords weightr
#' @export
#' @importFrom stats model.matrix optim pchisq pnorm qnorm
#' @examples
#' \dontrun{
#' library(shiny)
#' library(shinyBS)
#' shiny_weightr()
#' }
shiny_weightr <- function() {
if (!requireNamespace("shiny", quietly = TRUE)) {
stop("The R package 'shiny' is needed for this function to work. Please install it.",
call. = FALSE)
}
shiny::runApp(appDir = system.file("shiny", package="weightr"))
}
|
/scratch/gouwar.j/cran-all/cranData/weightr/R/shiny_weightr.R
|
#' Studies From Smith, Glass, and Miller's (1980) Meta-Analysis of Psychotherapy Outcomes
#'
#' An arbitrary subset of 74 studies from a meta-analysis assessing the effectiveness of psychotherapy. Contains two moderator variables.
#'
#' @docType data
#'
#' @usage dat.smith
#'
#' @format A data frame containing the following columns:
#' \describe{
#' \item{\code{es}}{standardized mean difference effect sizes}
#' \item{\code{v}}{corresponding sampling variances}
#' \item{\code{age}}{continuous moderator representing clients' average age in years}
#' \item{\code{diagnosis}}{categorical moderator representing disorder for which clients were treated; 1 = complex phobia, 2 = simple phobia, 3 = other}
#' }
#'
#' @keywords datasets
#'
#' @details This dataset consists of an arbitrarily selected subset of 74 studies assessing the effectiveness of psychotherapy. Smith, Glass, and Miller (1980) published a meta-analysis designed to explore the current state of knowledge about psychotherapy effectiveness. Their original meta-analysis contains more than 1,700 effect sizes from 475 studies with multiple moderators and outcome measures. This subset is vastly simplified and intended solely for the purpose of demonstration.
#'
#' @references Smith, M. L., Glass, G. V., & Miller, T. I. (1980). Meta-analysis of psychotherapy. American Psychologist, 41, 165-180.
#'
#' @source Smith, M. L., Glass, G. V., & Miller, T. I. (1980). Meta-analysis of psychotherapy. American Psychologist, 41, 165-180.
#'
#' @examples
#' \dontrun{
#' dat.smith
#' effect <- dat.smith$es
#' v <- dat.smith$v
#' weightfunct(effect, v)
#' }
"dat.smith"
|
/scratch/gouwar.j/cran-all/cranData/weightr/R/smith-data.R
|
#' Estimate the Vevea and Hedges (1995) Weight-Function Model
#'
#' This function allows the user to estimate the Vevea and Hedges (1995) weight-function model for publication bias.
#' @param effect a vector of meta-analytic effect sizes.
#' @param v a vector of meta-analytic sampling variances; needs to match up with the vector of effects, such that the first element in the vector of effect sizes goes with the first element in the vector of sampling variances, and so on.
#' @param pval defaults to \code{NULL}. A vector containing observed p-values for the corresponding effect sizes. If not provided, p-values are calculated.
#' @param steps a vector of p-value cutpoints. The default only distinguishes between significant and non-significant effects (p < 0.05).
#' @param mods defaults to \code{NULL}. A formula specifying the linear model.
#' @param weights defaults to \code{FALSE}. A vector of prespecified weights for p-value cutpoints to estimate the Vevea and Woods (2005) model.
#' @param fe defaults to \code{FALSE}. Indicates whether to estimate a fixed-effect model.
#' @param table defaults to \code{FALSE}. Indicates whether to print a table of the p-value intervals specified and the number of effect sizes per interval.
#' @importFrom stats model.matrix optim pchisq pnorm qnorm model.frame na.action na.omit
#' @keywords weightr
#' @details This function allows meta-analysts to estimate both the
#' weight-function model for publication bias that was originally published in
#' Vevea and Hedges (1995) and the modified version presented in Vevea and Woods
#' (2005). Users can estimate both of these models with and without predictors and
#' in random-effects or fixed-effect situations. The function does not currently
#' accommodate models without an intercept.
#'
#' The Vevea and Hedges (1995) weight-function model is a tool for modeling publication
#' bias using weighted distribution theory. The model first estimates an unadjusted
#' fixed-, random-, or mixed-effects model, where the observed effect sizes are
#' assumed to be normally distributed as a function of predictors. This unadjusted
#' model is no different from the traditional meta-analytic model. Next, the Vevea
#' and Hedges (1995) weight-function model estimates an adjusted model that includes
#' not only the original mean model, fixed-, random-, or mixed-effects, but a series
#' of weights for any pre-specified p-value intervals of interest. This produces mean,
#' variance component, and covariate estimates adjusted for publication bias, as well
#' as weights that reflect the likelihood of observing effect sizes in each specified
#' interval.
#'
#' It is important to remember that the weight for each
#' estimated p-value interval must be interpreted relative to the first interval,
#' the weight for which is fixed to 1 so that the model is identified. In other
#' words, a weight of 2 for an interval indicates that effect sizes in that p-value
#' interval are about twice as likely to be observed as those in the first interval.
#' Finally, it is also important to remember that the model uses p-value cutpoints
#' corresponding to one-tailed p-values. This allows flexibility in the selection
#' function, which does not have to be symmetric for effects in the opposite direction;
#' a two-tailed p-value of 0.05 can therefore be represented as p < .025 or p > .975.
#'
#' After both the unadjusted and adjusted meta-analytic models are estimated, a
#' likelihood-ratio test compares the two. The degrees of freedom for this test are
#' equal to the number of weights being estimated. If the likelihood-ratio test is
#' significant, this indicates that the adjusted model is a better fit for the data,
#' and that publication bias may be a concern.
#'
#' To estimate a large number of weights for p-value intervals, the Vevea and Hedges
#' (1995) model works best with large meta-analytic datasets. It may have trouble
#' converging and yield unreliable parameter estimates if researchers, for instance,
#' specify a p-value interval that contains no observed effect sizes. However,
#' meta-analysts with small datasets are still likely to be interested in assessing
#' publication bias, and need tools for doing so. Vevea and Woods (2005)
#' attempted to solve this problem by adapting the Vevea and Hedges (1995) model to
#' estimate fewer parameters. The meta-analyst can specify p-value cutpoints,
#' as before, and specify corresponding fixed weights for those cutpoints. Then the
#' model is estimated. For the adjusted model, only the variance component and mean
#' model parameters are estimated, and they are adjusted relative to the fixed weights.
#' For example, weights of 1 for each p-value interval specified describes a situation
#' where there is absolutely no publication bias, in which the adjusted estimates are
#' identical to the unadjusted estimates. By specifying weights that depart from 1 over various p-value intervals, meta-analysts can
#' examine how various one-tailed or two-tailed selection patterns would alter their
#' effect size estimates. If changing the pattern of weights drastically changes
#' the estimated mean, this is evidence that the data may be vulnerable to
#' publication bias.
#'
#' For more information, consult the papers listed in the References section here.
#' Also, feel free to email the maintainer of \code{weightr} at [email protected].
#'
#' @export
#' @return The function returns a list of three lists. The first contains the following components: \code{output_unadj}, \code{output_adj}, \code{steps}, \code{mods}, \code{weights}, \code{fe}, \code{table}, \code{effect}, \code{v}, \code{npred}, \code{nsteps}, \code{k}, \code{p}, \code{removed}, and \code{XX}. \code{output_unadj} and \code{output_adj} return the results of the unadjusted and adjusted models, respectively, including Hessian matrices and model parameters. The other elements of this list are the arguments from \code{weightfunct}, as described above.
#'
#' The second list contains the following: \code{unadj_est}, \code{unadj_se}, \code{adj_est}, \code{adj_se}, \code{z_unadj}, \code{z_adj}, \code{p_unadj}, \code{p_adj}, \code{ci.lb_unadj}, \code{ci.ub_unadj}, \code{ci.lb_adj}, and \code{ci.ub_adj}. These are vectors of, respectively, the unadjusted and adjusted parameter estimates, standard errors, z-statistics, p-values, and 95% confidence interval boundaries.
#'
#' The third list contains information pertaining to heterogeneity tests: \code{QE}, \code{QEp}, \code{QM}, \code{QMp} (for the unadjusted model), and \code{QM2} and \code{QMp2} (for the adjusted model). These are the Q-values for tests of overall or excess heterogeneity and tests of moderators, along with their p-values. If no moderators are specified, the QM values will be \code{NA}.
#'
#' @references Coburn, K. M. & Vevea, J. L. (2015). Publication bias as a function
#' of study characteristics. Psychological Methods, 20(3), 310.
#'
#' Vevea, J. L. & Hedges, L. V. (1995). A general linear model for
#' estimating effect size in the presence of publication bias. Psychometrika, 60(3),
#' 419-435.
#'
#' Vevea, J. L. & Woods, C. M. (2005). Publication bias in research synthesis:
#' Sensitivity analysis using a priori weight functions. Psychological Methods, 10(4),
#' 428-443.
#' @examples
#' \dontrun{
#' # Uses the default p-value cutpoints of 0.05 and 1:
#'
#' weightfunct(effect, v)
#'
#' # Estimating a fixed-effect model, again with the default cutpoints:
#'
#' weightfunct(effect, v, fe=TRUE)
#'
#' # Specifying cutpoints:
#'
#' weightfunct(effect, v, steps=c(0.01, 0.025, 0.05, 0.10, 0.20, 0.30, 0.50, 1.00))
#'
#' # Including a linear model, where moderators are denoted as 'mod1' and mod2':
#'
#' weightfunct(effect, v, mods=~mod1+mod2)
#'
#' # Specifying cutpoints and weights to estimate Vevea and Woods (2005):
#'
#' weightfunct(effect, v, steps=c(0.01, 0.05, 0.50, 1.00), weights=c(1, .9, .7, .5))
#'
#' # Specifying cutpoints and weights while including a linear model:
#'
#' weightfunct(effect, v, mods=~mod1+mod2, steps=c(0.05, 0.10, 0.50, 1.00), weights=c(1, .9, .8, .5))
#' }
weightfunct <- function(effect, v, steps=c(0.025,1.00), mods=NULL,
weights=NULL, fe=FALSE, table=FALSE, pval=NULL){
## Function calculating the negative log-likelihood of the unadjusted
## meta-analytic model ##
neglike_unadj <- function(pars) {
if(fe == FALSE){
vc = pars[1]
beta = pars[2:(npred+2)]
mn = XX%*%beta
eta = sqrt(v + vc)
b = 1/2 * sum(((effect-mn)/eta)^2)
c = sum(log(eta))
}
else{
beta = pars[1:(npred+1)]
mn = XX%*%beta
eta = sqrt(v)
b = 1/2 * sum(((effect-mn)/eta)^2)
c = sum(log(eta))
}
return(b+c)
}
## Function calculating the negative log-likelihood of the adjusted
## meta-analytic model ##
neglike_adj <- function(pars) {
if(fe == FALSE){
vc = pars[1]
beta = pars[2:(npred+2)]
if(is.null(weights) == FALSE){
w = weights
}
else{
w = c(1,pars[(npred+3):( (nsteps - 2) + (npred+3) )])
}
contrib = log(w[wt])
mn = XX%*%beta
a = sum(contrib)
eta = sqrt(v + vc)
}
else{
beta = pars[1:(npred+1)]
if(is.null(weights) == FALSE){
w = weights
}
else{
w = c(1,pars[(npred+2):( (nsteps - 2) + (npred+2) )])
}
contrib = log(w[wt])
mn = XX%*%beta
a = sum(contrib)
eta = sqrt(v)
}
b = 1/2 * sum(((effect-mn)/eta)^2)
c = sum(log(eta))
Bij <- matrix(rep(0,number*nsteps),nrow=number,ncol=nsteps)
bi = -si * qnorm(steps[1])
Bij[,1] = 1-pnorm((bi-mn)/eta)
if(nsteps > 2){
for(j in 2:(length(steps)-1)) {
bi = -si * qnorm(steps[j])
bilast = -si * qnorm(steps[j-1])
Bij[,j] = pnorm((bilast-mn)/eta) - pnorm((bi-mn)/eta)
}
}
bilast = -si * qnorm(steps[length(steps)-1])
Bij[,length(steps)] = pnorm((bilast-mn)/eta)
swbij = 0
for(j in 1:length(steps)) swbij = swbij + w[j]*Bij[,j]
d = sum(log(swbij))
return(-a + b + c + d)
}
if(is.null(mods)){
npred <- 0
data <- data.frame(effect, v)
}
else{
if(typeof(mods)=="language"){
XX <- model.matrix(mods, model.frame(mods, na.action='na.pass'))
npred <- dim(XX)[2]-1
data <- data.frame(effect, v, XX)
}else{
stop('Moderators must be entered as "mods= ~ x1 + x2"')
}
}
if(any(is.na(data))){
data <- na.omit(data)
removed <- as.numeric(na.action(data))
}
else{
removed <- NULL
}
effect <- data[,1]
v <- data[,2]
if(npred == 0){
XX <- cbind(rep(1,length(effect)))
}
else{
XX <- as.matrix(data[,(3:(npred+3))])
}
if(length(effect)!=length(v)){
stop('Your vector of effect sizes and your vector of sampling variances are not the same length. Please check your data.')
}
if(identical(effect,v)){
stop('Your vector of effect sizes is exactly the same as your vector of sampling variances. Please check your data.')
}
if(min(v) < 0){
stop('Sampling variances cannot be negative. Please check your data.')
}
si <- sqrt(v)
################# To add: If users specify p-values, trap any missing p-values and replace them
### with manually calculated ones.
if(is.null(pval)){
p <- 1-pnorm(effect/sqrt(v))
}
else{
p <- pval
}
if(max(steps)!=1){
steps <- c(steps,1)
}
if(max(steps) > 1 | min(steps) < 0){
stop('p-value steps must be bounded by 0 and 1.')
}
if(length(unique(steps)) != length(steps)){
stop('Two or more p-value cutpoints are identical.')
}
if(is.null(weights)){
steps <- sort(steps)
}
if(is.null(weights) == FALSE){
if(min(weights) < 0){
stop('Weights for p-value intervals cannot be negative.')
}
if(length(weights)!=length(steps)){
stop('The number of weights does not match the number of p-value intervals created.')
}
new <- cbind(steps, weights)
steps <- new[order(steps),1]
weights <- new[order(steps),2]
}
number <- length(effect)
nsteps <- length(steps)
wt <- rep(1,number)
for(i in 1:number) {
if(p[i] <= steps[1]) wt[i] = 1
for(j in 2:nsteps) {
if (steps[j-1] < p[i] && p[i] <= steps[j]) wt[i] = j
}
if( p[i] > steps[nsteps-1] ) wt[i] = nsteps
}
pvalues <- as.numeric(table(intervaltally(p, steps)))
if(sum(table(intervaltally(p,steps)) == 0) >= 1){
warning('At least one of the p-value intervals contains no effect sizes, leading to estimation problems. Consider re-specifying the cutpoints.')
}
if(sum( table(intervaltally(p,steps)) > 0 & table(intervaltally(p, steps)) <= 3) >= 1){
warning('At least one of the p-value intervals contains three or fewer effect sizes, which may lead to estimation problems. Consider re-specifying the cutpoints.')
}
if(is.null(mods)){
pars <- c(mean(v)/4, mean(effect), rep(0,(nsteps-1)))
if(fe == FALSE){
output_unadj <- optim(par=pars[1:2],fn=neglike_unadj,lower=c(0,-Inf),method="L-BFGS-B",hessian=TRUE)
output_adj <- optim(par=pars,fn=neglike_adj,lower=c(0,-Inf, rep(0.01,(nsteps-1))),method="L-BFGS-B",hessian=TRUE)
}
if(fe == TRUE){
output_unadj <- optim(par=pars[2],fn=neglike_unadj,lower=c(-Inf),method="L-BFGS-B",hessian=TRUE)
output_adj <- optim(par=pars[2:length(pars)],fn=neglike_adj,lower=c(-Inf, rep(0.01,(nsteps-1))),method="L-BFGS-B",hessian=TRUE)
}
}
else{
if(typeof(mods)=="language"){
pars <- c(mean(v)/4, mean(effect), rep(0, npred), rep(1, (nsteps - 1)))
if(fe == FALSE){
output_unadj <- optim(par=pars[1:(npred+2)],fn=neglike_unadj,lower=c(0,rep(-Inf, (npred+1))),method="L-BFGS-B",hessian=TRUE)
output_adj <- optim(par=pars,fn=neglike_adj,lower=c(0,rep(-Inf, (npred+1)),rep(0.01,(nsteps-1))),method="L-BFGS-B",hessian=TRUE)
}
if(fe == TRUE){
output_unadj <- optim(par=pars[2:(npred+2)],fn=neglike_unadj,lower=c(rep(-Inf, (npred+1))),method="L-BFGS-B",hessian=TRUE)
output_adj <- optim(par=pars[2:length(pars)],fn=neglike_adj,lower=c(rep(-Inf, (npred+1)),rep(0.01,(nsteps-1))),method="L-BFGS-B",hessian=TRUE)
}
}
}
unadj_est <- c(output_unadj$par)
unadj_se <- c(sqrt(diag(solve(output_unadj$hessian))))
adj_est <- c(output_adj$par)
if(is.null(weights)){
adj_se <- c(sqrt(diag(solve(output_adj$hessian))))
}else{
adj_se <- c(NULL)
}
## This is a test for heterogeneity when there are no moderators specified
## and a test for residual heterogeneity when there ARE. The difference
## can be indicated conditionally in the print statement. It's the
## same for both the unadjusted and adjusted models as it doesn't involve
## any actual parameter estimates.
w.FE <- diag((1/v), nrow=number, ncol=number)
stXWX <- tcrossprod(qr.solve( (sqrt(w.FE) %*% XX), diag(number) ))
P <- w.FE - w.FE %*% XX %*% stXWX %*% crossprod(XX, w.FE)
QE <- max(0, c(crossprod(as.matrix(effect), P) %*% as.matrix(effect)))
QEp <- pchisq(QE, df=(number), lower.tail=FALSE)
## This is the test for heterogeneity in the unadjusted model when
## there are moderators present.
## There's one of these tests for each of the adjusted and unadjusted models
## because the conditional mean estimates are adjusted.
if(npred > 0){
if(fe==FALSE){
beta_vect <- c(output_unadj$par[2:(npred+2)])
w.M <- 1/(v+output_unadj$par[1])
beta_vect2 <- c(output_adj$par[2:(npred+2)])
w.M2 <- 1/(v+output_adj$par[1])
}else{
beta_vect <- c(output_unadj$par[1:(npred+1)])
w.M <- 1/v
beta_vect2 <- c(output_adj$par[1:(npred+1)])
w.M2 <- 1/v
}
w.M <- diag(w.M, nrow=number, ncol=number)
w.M2 <- diag(w.M2, nrow=number, ncol=number)
swx <- sqrt(w.M) %*% XX
swx2 <- sqrt(w.M2) %*% XX
res.qrs <- qr.solve(swx, diag(number))
res.qrs2 <- qr.solve(swx2, diag(number))
vb <- tcrossprod(res.qrs)
vb2 <- tcrossprod(res.qrs2)
QM <- as.vector(t(beta_vect[2:(npred+1)]) %*% chol2inv(chol(vb[(2:(npred+1)),(2:(npred+1))])) %*% beta_vect[2:(npred+1)])
QMp <- pchisq(QM, df=(length(beta_vect)-1), lower.tail=FALSE)
QM2 <- as.vector(t(beta_vect2[2:(npred+1)]) %*% chol2inv(chol(vb2[(2:(npred+1)),(2:(npred+1))])) %*% beta_vect2[2:(npred+1)])
QMp2 <- pchisq(QM2, df=(length(beta_vect2)-1), lower.tail=FALSE)
}else{
## If there's no moderators, QM doesn't even get printed, so
## it's replaced with NA.
QM <- NA
QMp <- NA
QM2 <- NA
QMp2 <- NA
}
results <- c(list(output_unadj=output_unadj,
output_adj=output_adj,
steps=steps,
mods=mods,
weights=weights,
fe=fe,
table=table,
effect=effect,
v=v,
npred=npred,
nsteps=nsteps,
k=number,
p=p,
removed=removed,
XX=XX),
list(unadj_est=cbind(unadj_est),
unadj_se=cbind(unadj_se),
adj_est=cbind(adj_est),
adj_se=cbind(adj_se),
z_unadj=cbind(unadj_est/unadj_se),
z_adj=cbind(adj_est/adj_se),
p_unadj=cbind(2*pnorm(-abs(unadj_est/unadj_se))),
p_adj=cbind(2*pnorm(-abs(adj_est/adj_se))),
ci.lb_unadj=cbind(unadj_est - qnorm(0.975) * unadj_se),
ci.ub_unadj=cbind(unadj_est + qnorm(0.975) * unadj_se),
ci.lb_adj=cbind(adj_est - qnorm(0.975) * adj_se),
ci.ub_adj=cbind(output_adj$par + qnorm(0.975) * adj_se)
),
list(
QE = QE,
QEp = QEp,
QM = QM,
QMp = QMp,
QM2 = QM2,
QMp2 = QMp2
)
)
class(results) <- c("weightfunct")
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/weightr/R/weightfunct.R
|
library("shiny")
library("foreign")
library("gridExtra")
library("ggplot2")
library("plotly")
source("weightfunction.R")
shinyServer(function(input, output, session) {
filedata <- reactive({
inFile <- input$file1
if (is.null(inFile))
return(NULL)
return(read.table(inFile$datapath, header=input$header, sep=input$sep, quote=input$quote,fill=TRUE))
})
output$contents <- renderTable({
validate(
need(input$file1 !="NULL", "Please upload a data file."))
filedata()
})
output$selectdigits <- renderUI({
selectInput(inputId = "digits", label="Select the number of significant digits to report.", choices=c(1,2,3,4,5,6,7,8,9,10),selected=c(4),multiple=FALSE)
})
output$selecteffects <- renderUI({
#if(!is.null(filedata())){
colNames <- colnames(filedata())
selectInput(inputId = "effects", label="Select the variable containing your effect sizes.", choices=colNames,multiple=FALSE)#}
#else{
# return()
#}
})
output$selectvariances <- renderUI({
#if(!is.null(filedata())){
colNames <- colnames(filedata())
colNames2 <- colNames[colNames != input$effects] #WORKS
selectInput(inputId = "variances", label="Select the variable containing your sampling variances.", choices=colNames2,multiple=FALSE)#}
# else{
# return()
# }
})
output$thesearemyp <- renderUI({
colNames <- colnames(filedata())
colNames2 <- colNames[colNames != input$effects]
selectInput(inputId = "thesearemyp", label="Select that column.", choices=colNames2,selected=0,multiple=FALSE)
})
output$selectmods <- renderUI({
#if(!is.null(filedata())){
colNames <- colnames(filedata())
colNames2 <- colNames[colNames != input$effects]
colNames3 <- colNames2[colNames2 != input$variances]
selectInput(inputId = "moderators", label="Select any moderator variables to include.", choices=colNames3,multiple=TRUE)#}
#else{
# return()
#}
})
output$selectsteps <- renderUI({
#if(!is.null(filedata())){
selectizeInput(inputId = "steps", label="Select at least one p-value cutpoint to include in your model. To include a cutpoint not provided, type it in and press enter.", choices=c(0.001,
0.005,
0.010,
0.020,
0.025,
0.050,
0.100,
0.200,
0.250,
0.300,
0.350,
0.500,
0.600,
0.650,
0.700,
0.750,
0.800,
0.900,
0.950,
0.975,
0.980,
0.990,
0.995,
0.999),
multiple=TRUE,
selected=c(0.025), options=list(create=TRUE,openOnFocus=TRUE))#}
# else{
# return()
#}
})
output$presetweights <- renderUI({
steps <- c(sort(input$steps),1.00)
lapply(1:length(steps), function(i) {
if(i == 1){
numericInput(paste("weight", i), paste('<', steps[1]), value = 1, width = '25%')
}
numericInput(paste("weight", i), paste(steps[i - 1], "<", steps[i]), value = 1)
})
})
unadjustweightnomods <- reactive({
validate(need(input$file1 !="NULL", "Please upload a data file."),
need(input$effects != 0, "Please enter the column numbers of the data file containing your effect sizes and variances."))
if(length(input$moderators) == 0){
effect <- filedata()[,input$effects]
v <- filedata()[,input$variances]
if(input$selectp==TRUE){
p <- filedata()[,input$thesearemyp]
}
if(input$selectp==FALSE){
p <- 1-pnorm(effect/sqrt(v))
}
unadnomods <- weightfunction(effect=effect, v=v,npred=0, 600, 600, p=p)
unadnomods
}
})
unadjustweightmods <- reactive({
if(length(input$moderators) > 0){
npred <- length(input$moderators)
effect <- filedata()[,input$effects]
v <- filedata()[,input$variances]
number <- length(effect)
# XX <- matrix(nrow=number,ncol=(npred+1))
# XX[,1] <- rep(1,number)
# for(i in 2:(npred+1)){ XX[,i] <- filedata()[,input$moderators[i - 1]] }
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Xx <- matrix(nrow=number,ncol=npred)
# modnames <- rep(0, npred)
# for(i in 1:npred)
# {
# Xx[,i] <- filedata()[,input$moderators[i]]
# modnames[i] <- noquote(paste(c("Xx[,",i,"]","+","Xx[,",i + 1,"]"),collapse=" "))
# }
# XX <- model.matrix(~modnames)
Xx <- matrix(nrow=number,ncol=npred)
Xx <- as.data.frame(Xx)
for(i in 1:npred)
{
Xx[,i] <- filedata()[,input$moderators[i]]
colnames(Xx)[i] <- input$moderators[i]
}
XX <- model.matrix(~., Xx)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
npred <- dim(XX)[2] - 1
prednames <- colnames(XX)
if(input$selectp==TRUE){
p <- filedata()[,input$thesearemyp]
}
if(input$selectp==FALSE){
p <- 1-pnorm(effect/sqrt(v))
}
unadmods <- weightfunction(effect=effect,
v=v,
npred, steps=600, XX=XX, prednames=prednames, p=p)
unadmods
}
})
adjustweightnomods <- reactive({
if(length(input$moderators) == 0){
effect <- filedata()[,input$effects]
v <- filedata()[,input$variances]
steps <- c(as.numeric(sort(input$steps)),1.00)
if(input$selectp==TRUE){
p <- filedata()[,input$thesearemyp]
}
if(input$selectp==FALSE){
p <- 1-pnorm(effect/sqrt(v))
}
if(input$woods){
weights <- rep(0, length(steps))
for(i in 1:length(steps)){
weights[i] <- eval(parse(text=paste("input$'weight ", i, "'", sep="")))
}
adnomods <- weightfunction(effect=effect, v=v, npred=0, steps=steps, 600, weights=weights, p=p)
}
else{
adnomods <- weightfunction(effect=effect, v=v, npred=0, steps=steps, 600, p=p)
}
adnomods
# format(adnomods, digits=input$digits)
}
})
adjustweightmods <- reactive({
if(length(input$moderators) > 0){
effect <- filedata()[,input$effects]
v <- filedata()[,input$variances]
npred <- length(input$moderators)
steps <- c(as.numeric(sort(input$steps)),1.00)
number <- length(effect)
if(input$selectp==TRUE){
p <- filedata()[,input$thesearemyp]
}
if(input$selectp==FALSE){
p <- 1-pnorm(effect/sqrt(v))
}
# XX <- matrix(nrow=number,ncol=(npred+1))
# XX[,1] <- rep(1,number)
# for(i in 2:(npred+1)){ XX[,i] <- filedata()[,input$moderators[i - 1]] }
Xx <- matrix(nrow=number,ncol=npred)
Xx <- as.data.frame(Xx)
for(i in 1:npred)
{
Xx[,i] <- filedata()[,input$moderators[i]]
colnames(Xx)[i] <- input$moderators[i]
}
XX <- model.matrix(~.,Xx)
npred <- dim(XX)[2] - 1
prednames <- colnames(XX)
if(input$woods){
weights <- rep(0, length(steps))
for(i in 1:length(steps)){
weights[i] <- eval(parse(text=paste("input$'weight ", i, "'", sep="")))
}
admods <- weightfunction(effect=effect, v=v, npred, steps=steps, XX=XX, prednames, weights=weights, p=p)
}
else{
admods <- weightfunction(effect=effect, v=v, npred, steps=steps, XX=XX, prednames, p=p)
}
# format(admods, digits=input$digits)
admods
}
})
output$effects <- renderTable({
validate(need(input$file1 !="NULL", "Please upload a data file."))
})
makedefaultPlot <- function(effect, v){
range_effect <- max(effect) - min(effect)
range_v <- max(sqrt(v)) - min(sqrt(v))
lowbound_effect <- min(effect) - 0.05*range_effect
upbound_effect <- max(effect) + 0.05*range_effect
lowbound_v <- min(sqrt(v)) - 0.05*range_v
upbound_v <- max(sqrt(v)) + 0.05*range_v
plot(sqrt(v),effect, xlim=c(lowbound_v,upbound_v), ylim=c(lowbound_effect, upbound_effect), xlab="Standard Error", ylab="Effect Size")
}
makeotherPlot <- function(effect, v){
range_effect <- max(effect) - min(effect)
range_v <- max(sqrt(v)) - min(sqrt(v))
lowbound_effect <- min(effect) - 0.025*range_effect
upbound_effect <- max(effect) + 0.025*range_effect
lowbound_v <- min(sqrt(v)) - 0.025*range_v
upbound_v <- max(sqrt(v)) + 0.025*range_v
plot(effect,sqrt(v), xlim=c(lowbound_effect,upbound_effect), ylim=c(lowbound_v, upbound_v), xlab="Effect Size", ylab="Standard Error")
}
output$funnelplot <- renderPlot({
validate(need(input$file1 !="NULL", "Please upload a data file."),
need(input$effects !=0, "Please enter the column numbers of the data file containing your effect sizes and variances."))
effect <- filedata()[,input$effects]
v <- filedata()[,input$variances]
steps <- 1 - as.numeric(sort(input$steps))
range_v <- max(sqrt(v)) - min(sqrt(v))
lowbound_v <- min(sqrt(v)) - 0.05*range_v
upbound_v <- max(sqrt(v)) + 0.05*range_v
if(input$flip == FALSE){
if(input$interact == FALSE){
print(makedefaultPlot(effect, v))
if(input$contour == FALSE){
if(input$estimates == TRUE){
abline(h=unadjustweightnomods()[2,2], col="red")
}
if(input$estimates == TRUE && length(input$moderators) > 0){
abline(h=unadjustweightmods()[2,2], col="red")
}
if(input$estimates2 == TRUE){
abline(h=adjustweightnomods()[2,2], col="blue")
}
if(input$estimates2 == TRUE && length(input$moderators) > 0){
abline(h=adjustweightmods()[2,2], col="blue")
}
}
else{
testv <- seq(lowbound_v, upbound_v, 0.01)
for(i in 1:length(steps)){
lines(testv, 0 + -qnorm(steps[i])*testv)
lines(testv, 0 - -qnorm(steps[i])*testv)
# lines(testv, qnorm(steps[i], 0, testv))
}
if(input$estimates == TRUE){
abline(h=unadjustweightnomods()[2,2], col="red")
}
if(input$estimates == TRUE && length(input$moderators) > 0){
abline(h=unadjustweightmods()[2,2], col="red")
}
if(input$estimates2 == TRUE){
abline(h=adjustweightnomods()[2,2], col="blue")
}
if(input$estimates2 == TRUE && length(input$moderators) > 0){
abline(h=adjustweightmods()[2,2], col="blue")
}
}
}
}
else {
print(makeotherPlot(effect, v))
if(input$contour == FALSE){
if(input$estimates == TRUE){
abline(v=unadjustweightnomods()[2,2], col="red")
}
if(input$estimates == TRUE && length(input$moderators) > 0){
abline(v=unadjustweightmods()[2,2], col="red")
}
if(input$estimates2 == TRUE){
abline(v=adjustweightnomods()[2,2], col="blue")
}
if(input$estimates2 == TRUE && length(input$moderators) > 0){
abline(v=adjustweightmods()[2,2], col="blue")
}
}
else{
testv <- seq(lowbound_v, upbound_v, 0.01)
for(i in 1:length(steps)){
lines((0 + -qnorm(steps[i])*testv), testv)
lines((0 - -qnorm(steps[i])*testv), testv)
# lines(qnorm(steps[i], 0, testv), testv)
}
##### NOTE to self -- I never added lines at moderators.
### Possibly should do?
if(input$estimates == TRUE){
abline(v=unadjustweightnomods()[2,2], col="red")
}
if(input$estimates == TRUE && length(input$moderators) > 0){
abline(v=unadjustweightmods()[2,2], col="red")
}
if(input$estimates2 == TRUE){
abline(v=adjustweightnomods()[2,2], col="blue")
}
if(input$estimates2 == TRUE && length(input$moderators) > 0){
abline(v=adjustweightmods()[2,2], col="blue")
}
}
}
}
)
output$plotly <- renderPlot({
validate(need(input$file1 !="NULL", "Please upload a data file."),
need(input$effects !=0, "Please enter the column numbers of the data file containing your effect sizes and variances."))
effect <- filedata()[,input$effects]
v <- filedata()[,input$variances]
steps <- 1 - as.numeric(sort(input$steps))
if(input$flip==FALSE){
range_effect <- max(effect) - min(effect)
range_v <- max(sqrt(v)) - min(sqrt(v))
lowbound_effect <- min(effect) - 0.05*range_effect
upbound_effect <- max(effect) + 0.05*range_effect
lowbound_v <- min(sqrt(v)) - 0.05*range_v
upbound_v <- max(sqrt(v)) + 0.05*range_v
plot(sqrt(v),effect, xlim=c(lowbound_v,upbound_v), ylim=c(lowbound_effect, upbound_effect), xlab="Standard Error", ylab="Effect Size")
if(input$contour==TRUE){
testv <- seq(lowbound_v, upbound_v, 0.01)
for(i in 1:length(steps)){
lines(testv, 0 + -qnorm(steps[i])*testv)
lines(testv, 0 - -qnorm(steps[i])*testv)
}
}
if(input$estimates==TRUE){
abline(h=unadjustweightnomods()[2,2], col="red")
}
if(input$estimates2==TRUE){
abline(h=adjustweightnomods()[2,2], col="blue")
}
}
else{
range_effect <- max(effect) - min(effect)
range_v <- max(sqrt(v)) - min(sqrt(v))
lowbound_effect <- min(effect) - 0.025*range_effect
upbound_effect <- max(effect) + 0.025*range_effect
lowbound_v <- min(sqrt(v)) - 0.025*range_v
upbound_v <- max(sqrt(v)) + 0.025*range_v
plot(effect,sqrt(v), xlim=c(lowbound_effect,upbound_effect), ylim=c(lowbound_v, upbound_v), xlab="Effect Size", ylab="Standard Error")
if(input$contour==TRUE){
testv <- seq(lowbound_v, upbound_v, 0.01)
for(i in 1:length(steps)){
lines((0 + -qnorm(steps[i])*testv), testv)
lines((0 - -qnorm(steps[i])*testv), testv)
}
}
if(input$estimates==TRUE){
abline(v=unadjustweightnomods()[2,2], col="red")
}
if(input$estimates2==TRUE){
abline(v=adjustweightnomods()[2,2], col="blue")
}
}
############################ ADD OPTIONS HERE FOR OTHER CHECKBOXES
######### LINES AT ESTIMATES, CONTOUR LINES
})
output$info <- renderText({
xy_str <- function(e) {
if(is.null(e)) return("NULL\n")
paste0("x=", round(e$x, 1), " y=", round(e$y, 1), "\n")
}
xy_range_str <- function(e) {
if(is.null(e)) return("NULL\n")
paste0("xmin=", round(e$xmin, 1), " xmax=", round(e$xmax, 1),
" ymin=", round(e$ymin, 1), " ymax=", round(e$ymax, 1))
}
pval_str <- function(e) {
if(is.null(e)) return("NULL\n")
paste0("pval=", round((1 - abs(pnorm(e$x/e$y))), digits=2),"\n")
}
paste0(
"click: ", xy_str(input$plot_click),
"dblclick: ", xy_str(input$plot_dblclick),
"hover: ", pval_str(input$plot_hover),
"brush: ", xy_range_str(input$plot_brush)
)
})
output$density <- renderPlot({
validate(need(input$file1 !="NULL", "Please upload a data file."),need(input$effects !=0, "Please enter the column numbers of the data file containing your effect sizes and variances."),need(length(input$moderators) == 0, "Please remove the moderators from your model to view this plot. The plot does not incorporate moderators."))
effect <- filedata()[,input$effects]
v <- filedata()[,input$variances]
steps <- c(as.numeric(sort(input$steps)),1.00)
print(makeDensityPlot(effect, v, steps))
})
######### DENSITY PLOTS HERE TO NEXT LINE #############
makeDensityPlot <- function(effect, v, steps){
######## I THINK this works .... triple and quadruple check the damn thing!
#Identifying appropriate values
vc1 <- unadjustweightnomods()[1,2]
mu1 <- unadjustweightnomods()[2,2]
vc2 <- adjustweightnomods()[1,2]
mu2 <- adjustweightnomods()[2,2]
weights <- adjustweightnomods()[3:(length(steps)+1),2]
cuts <- steps
x_low_lim <- min(effect) - 2
x_up_lim <- max(effect) + 2
# print(c(vc1, mu1, vc2, mu2, weights, cuts, x_low_lim, x_up_lim))
xfull <- seq(x_low_lim,x_up_lim,.01)
########### Trying the harmonic mean from Hoaglin as the average
## conditional variance, rather than the median. The harmonic mean
## actually appears to do a worse job. Weird.
vi <- median(v)
# w <- 1/v
# s_squared <- (length(effect)-1)/( sum( w ) - sum( w^2 / sum( w ) ) )
# vi <- s_squared
###########
fx <- ( 1/(sqrt(2*pi*(vi + vc1))) ) * exp( -1/2*( (xfull - mu1)^2 / (vi + vc1) ) )
yfull <- fx
A0 <- sum(rep(.01,length(xfull))*yfull)
# fx2 <- ( 1/(sqrt(2*pi*(vi + vc2))) ) * exp( -1/2*( (xfull - mu2)^2 / (vi + vc2) ) )
fx2 <- ( 1/(sqrt(2*pi*(vi + vc1))) ) * exp( -1/2*( (xfull - mu1)^2 / (vi + vc1) ) )
testlist <- -1 * qnorm(steps, 0, sqrt(vi + vc2))
testxfull <- findInterval(xfull,sort(testlist))
xlist <- split(xfull, testxfull)
ylist <- split(fx2, testxfull)
weights2 <- rev(c(1, weights))
testyfull <- mapply("*", ylist, weights2)
A1 <- sum(rep(.01,length(unlist(xlist)))*unlist(testyfull))
#Creating the plot
plot(c(x_low_lim,x_up_lim), c(0,(max(as.numeric(unlist(testyfull))/A1)+0.10)), type='n', xlab='Sample Effect Size',
ylab='Density',axes=FALSE,lwd=2,font.lab=2,main='Expected and Adjusted Densities')
box(lwd=2)
axis(side=2,font=2)
axis(side=1,font=2)
abline(c(0,0),lwd=2)
#Drawing unadjusted density
# lines(xfull,yfull,lty=2,lwd=2)
lines(xfull,yfull/A0,lty=2,lwd=2)
# lines(as.numeric(unlist(xlist)), as.numeric(unlist(testyfull)))
lines(as.numeric(unlist(xlist)), as.numeric(unlist(testyfull))/A1)
print("TEST")
}
######################################################
output$funnelplot2 <- renderUI({
plotOutput("funnelplot", width=paste0(input$width, "%"), height=input$height)
})
output$downloadfunnelplot <- downloadHandler(
filename = function(){
paste('funnelplot', Sys.Date(), '.pdf', sep='')
},
content = function(FILE=NULL) {
effect <- filedata()[,input$effects]
v <- filedata()[,input$variances]
pdf(file=FILE)
if(input$flip == FALSE){
print(makedefaultPlot(effect,v))}
else{
print(makeotherPlot(effect,v))
}
dev.off()
}
)
output$unadjustedweightfunction <- renderTable({
if(length(input$moderators) == 0){
format(unadjustweightnomods(), digits=input$digits)
}
else{
format(unadjustweightmods(), digits=input$digits)
}
})
output$questionmark <- renderImage({
list(src = './www/questionmark.png',width=17,height=17, alt = "Question_Mark")
}, deleteFile=FALSE)
output$questionmark2 <- renderImage({
list(src = './www/questionmark.png',width=17,height=17, alt = "Question_Mark")
}, deleteFile=FALSE)
output$adjustedweightfunction <- renderTable({
if(length(input$moderators) == 0){
validate(need(input$file1 !="NULL", "Please upload a data file."),
need(input$effects !=0, "Please enter the column numbers of the data file containing your effect sizes and variances."),
need(input$steps !=0, "Please select at least one p-value cutpoint to include in your model."))
intervaltally <- function(p, steps) {
p1 <- cut(p, breaks=c(-Inf,steps), labels=steps)
return(p1) }
effect <- filedata()[,input$effects]
v <- filedata()[,input$variances]
steps <- c(as.numeric(sort(input$steps)),1.00)
if(input$effects != "NULL") {
if(input$selectp==TRUE){
p <- filedata()[,input$thesearemyp]
}
if(input$selectp==FALSE){
p <- 1-pnorm(effect/sqrt(v))
}
pvalues <- as.numeric(table(intervaltally(p, steps)))
}
format(adjustweightnomods(), digits=input$digits)
# adjustweightnomods()
}
else{
intervaltally <- function(p, steps) {
p1 <- cut(p, breaks=c(-Inf,steps), labels=steps)
return(p1) }
effect <- filedata()[,input$effects]
v <- filedata()[,input$variances]
steps <- c(as.numeric(sort(input$steps)),1.00)
if(input$effects != "NULL") {
if(input$selectp==TRUE){
p <- filedata()[,input$thesearemyp]
}
if(input$selectp==FALSE){
p <- 1-pnorm(effect/sqrt(v))
}
pvalues <- as.numeric(table(intervaltally(p, steps)))
}
format(adjustweightmods(), digits=input$digits)
}
})
output$likelihoodratio <- renderTable({
validate(need(input$file1 !="NULL", "Please upload a data file."),
need(input$effects !=0, "Please enter the column numbers of the data file containing your effect sizes and variances."),
need(input$steps !=0, "Please select at least one p-value cutpoint to include in your model."),
need(input$woods==FALSE, "This is not valid under the Vevea and Woods (2005) model."))
if(length(input$moderators) == 0){
effect <- filedata()[,input$effects]
v <- filedata()[,input$variances]
steps <- c(as.numeric(sort(input$steps)),1.00)
if(input$selectp==TRUE){
p <- filedata()[,input$thesearemyp]
}
if(input$selectp==FALSE){
p <- 1-pnorm(effect/sqrt(v))
}
format(likelihoodfunct(effect=effect, v=v, npred=0, steps=steps, 600,p=p),digits=input$digits)
}
else{
effect <- filedata()[,input$effects]
v <- filedata()[,input$variances]
steps <- c(as.numeric(sort(input$steps)),1.00)
npred <- length(input$moderators)
if(input$selectp==TRUE){
p <- filedata()[,input$thesearemyp]
}
if(input$selectp==FALSE){
p <- 1-pnorm(effect/sqrt(v))
}
number <- length(effect)
XX <- matrix(nrow=number,ncol=(npred+1))
XX[,1] <- rep(1,number)
for(i in 2:(npred+1)){ XX[,i] <- filedata()[,input$moderators[i - 1]] }
format(likelihoodfunct(effect=effect, v=v, npred=npred, steps=steps, XX,p=p),digits=input$digits) }
})
output$samplesizes <- renderTable({
validate(need(input$file1 !="NULL", "Please upload a data file."),
need(input$effects !=0, "Please enter the column numbers of the data file containing your effect sizes and variances."),
need(input$steps !=0,"Please select at least one p-value cutpoint to include in your model."))
effect <- filedata()[,input$effects]
v <- filedata()[,input$variances]
intervaltally <- function(p, steps) {
p1 <- cut(p, breaks=c(-Inf,steps), labels=steps)
return(p1) }
steps <- c(as.numeric(sort(input$steps)),1.00)
if(input$selectp){
p <- filedata()[,input$thesearemyp]
}
else{
p <- 1-pnorm(effect/sqrt(v))
}
pvalues <- as.numeric(table(intervaltally(p, steps)))
format(sampletable(p=p, pvalues=pvalues, steps=steps), digits=input$digits)
})
#toggleModal(session, "samplesizes", toggle="toggle")
output$numberofeffects <- renderTable({
validate(need(input$file1 !="NULL", "Please upload a data file."),
need(input$effects !=0, "Please enter the column numbers of the data file containing your effect sizes and variances."),
need(input$steps !=0,"Please select at least one p-value cutpoint to include in your model."))
effect <- filedata()[,input$effects]
results <- matrix(nrow=1,ncol=1)
results[1,1] <- length(effect)
resultsb <- data.frame(results, row.names=c("k"))
colnames(resultsb) <- c("Total Number of Effects")
format(resultsb, digits=input$digits)
})
})
|
/scratch/gouwar.j/cran-all/cranData/weightr/inst/shiny/server.R
|
library("shiny")
shinyUI(fluidPage(theme = "bootstrap.css",
titlePanel("The Vevea and Hedges Weight-Function Model for Publication Bias"),
sidebarLayout(
sidebarPanel(
"Choose a .csv or .txt file:", br(),
fileInput('file1', ' ',
accept=c('text/csv',
'text/comma-separated-values,text/plain',
'.csv','.txt')),
#tags$hr(),
"Does your data file contain a header?",
checkboxInput('header','Yes', TRUE),
"Are your data separated by commas, semicolons, tabs, or spaces?",
radioButtons('sep', ' ',
c(Commas=',',
Semicolons=';',
Tabs='\t',
Spaces=' '),
','),
"For columns of your data file including text, should quotes be included?",
radioButtons('quote', ' ',
c('No'='',
'Double Quotes'='"',
'Single Quotes'="'"),
''),p(),
#conditionalPanel(
# condition = "!is.null(filedata())",
# #bootstrapPage(div(
# #class="container-fluid",
# #div(class="row-fluid",
# #div(class="span6",
# h2("Source Code"),
# aceEditor("code", mode="r", value="x <- c(1, 2, 3); x"),
# actionButton("eval", "Evaluate"),
# #),
# #div(class="span6",
# #h2("Output"),
# #verbatimTextOutput("output"),
# #),
# #)
# #)),
# strong("Please enter the column numbers of the variables containing your effect sizes and sampling variances, in that order, separated by commas. Example: '1,2'"),p(),
# textInput('effects', ' ', value="0"),p(),
uiOutput("selecteffects"),p(),
uiOutput("selectvariances"),p(),
strong("Does your dataset contain p-values calculated from test statistics?"),p(),
checkboxInput("selectp",
label = "Yes", value = FALSE),p(),
conditionalPanel(
condition = "input.selectp",
uiOutput("thesearemyp")
),p(),
#strong("How many moderators would you like to include?"),p(),
uiOutput("selectmods"),p(),
#numericInput('npred', label = " ", value=0,
# min = 0, max = Inf),p(),
#strong("Please enter the column numbers of these moderators, separated by commas. Example: '3,4'"),p(),
#textInput('moderators', ' ', value="0"),p(),
#strong("Select at least one p-value cutpoint to include in your model. To include an interval not provided, type it in and press enter."),
#p(),
uiOutput("selectsteps"),p(),
strong("Estimate the Vevea and Woods (2005) model?"),p(),
checkboxInput("woods",
label = "Yes", value = FALSE),p(),
conditionalPanel(
strong("Enter a pre-specified weight for each of the p-value intervals."),p(),
condition = "input.woods",
uiOutput("presetweights")
),p(),
uiOutput("selectdigits")
# selectizeInput(
# 'steps', ' ', choices=c(0.001,
# 0.005,
# 0.010,
# 0.020,
# 0.025,
# 0.050,
# 0.100,
# 0.200,
# 0.250,
# 0.300,
# 0.350,
# 0.500,
# 0.600,
# 0.650,
# 0.700,
# 0.750,
# 0.800,
# 0.900,
# 0.950,
# 0.975,
# 0.980,
# 0.990,
# 0.995,
# 0.999),
# multiple=TRUE,
# selected=c(0.050), options=list(create=TRUE,openOnFocus=TRUE))
),
mainPanel(
tabsetPanel(type = "tabs",
tabPanel("About", icon=icon("book", lib="font-awesome"),
#strong('What is the Vevea and Hedges Weight-Function Model?'), p("This model was introduced by Jack L. Vevea and Larry V. Hedges in 1995. It was ..."), strong('How to Use the Model'), p("Use the panel on the left to read in your data file. While doing so, you can click on the tab labeled 'Data File' to verify that it has been uploaded correctly. You can read in your data as either a .txt file or a .csv file. You can also specify whether your data file is comma-, tab-, or space-delimited."), p("Once you are satisfied that your data was read correctly, scroll down and enter the numbers of the columns holding your effect sizes and sampling variances, in that order. The columns are numbered from left to right. For instance, if your effects are in the first column and your variances in the second, you should enter '1, 2' (without quotes.) Make sure that the first number represents the column of effects and the second the column of variances."), p("Next, you can specify the model. If you would like to include moderators, enter the number of moderators you'd like to include (i.e., if one moderator, enter '1' and so on). Then enter the column numbers of these moderators; if they are in columns three and four, enter '3, 4' (again, without quotes)."), p("Lastly, check the boxes corresponding to the p-value intervals you want to estimate. If you make no changes, the default model is estimated with two intervals (p-values less than 0.05, and p-values greater than 0.05 but less than 1.00 -- in other words, a distinction between significant vs. non-significant p-values). Note that you can assess one-tailed or two-tailed publication bias by checking the appropriate boxes; for more information, see Vevea & Hedges (1995) or Vevea & Woods (2005)."), p("Now that you've entered the data and specified your model, you can click on the 'Unadjusted Random-Effects Model' and 'Adjusted Random-Effects Model' tabs to view your results. There are several components of interest. The unadjusted model will present you with a variance component, intercept, and any moderator estimates from a random- or mixed-effects model that has not been adjusted for publication bias. The adjusted model will present you with adjusted estimates, in addition to weights that correspond to your specified p-value intervals. For instance, if you specified the default intervals of 0.05 and 1.00, the first interval will correspond to p-values less than 0.05, and the second to p-values between 0.05 and 1.00. The adjusted weight-function model always fixes the first weight to one for estimation purposes. This means your output will only contain one weight, which can be interpreted in relation to the first weight. In other words, if you specified the above intervals and obtained a weight of 0.50, your results indicate that effects with p-values greater than 0.05 are half as likely to survive the selection process as significant effects."), p("Keep in mind that all the weights in your output should be interpreted relative to the first weight, which is fixed to one -- i.e., if a weight is estimated at 1.76, p-values in that interval are 1.76 times as likely to survive the selection process as p-values in your first interval. If there are no observed effects or less than three observed effects in one of your p-value intervals, the program will print a warning. This is because the weights may be biased if there are not enough effect sizes in each interval. If this is the case, try re-specifying the p-value intervals."), p("The last part of the 'Adjusted Model' output that is of interest is the likelihood ratio test. This test compares the adjusted model to the unadjusted model, with degrees of freedom corresponding to the number of intervals estimated, and indicates whether the adjusted model fits your data significantly better than the unadjusted model -- that is, whether significant information is gained by estimating the selection model and adjusting the mean and moderator estimates."), strong('Concluding Thoughts'), p("Keep in mind that, as with all other assessments of publication bias, the results this model produces should be considered primarily as a sensitivity analysis. It may be a good idea to run this model multiple times on your data, specifying different p-value intervals each time, and to assess the magnitude of the adjusted mean estimate each time. For instance, if the unadjusted model produces a mean effect of d = 0.50 and, using the adjusted model, you obtain means ranging from d = 0.01 to d = 0.85, this is a sign that your results may not be robust to publication bias. Again, this, like all other assessments, is a sensitivity analysis; it will not allow you to firmly say that publication bias is or is not present, but it will allow you to say, 'If publication bias were present, my data would likely be robust to its effects.'"),
p("To use this model, follow the steps in the panel on the left to read in your data file. You can click the tab labeled 'Data File' to ensure your data were read in correctly. Use the drop-down menus to select the columns containing your effect sizes and sampling variances. Note that your data file must contain a column of effect sizes and their corresponding sampling variances for estimation to work. Lastly, select any moderator variables and p-value cutpoints to include in your model. The tabs labeled 'Funnel Plot' and 'Model Results' will then display a funnel plot of your effect sizes and the model output, respectively. For more information on use of the model, we refer you to Vevea and Hedges (1995) and Hedges and Vevea (1996)."),p("We advise users to keep in mind that, as with all other assessments of publication bias, the use of this model should be primarily considered a sensitivity analysis. It is a good idea to run this model multiple times on your data, specifying different p-value cutpoints each time, and to assess the magnitude of the adjusted mean estimate. For instance, if the unadjusted model produces a mean effect of d = 0.50 and, using the adjusted model, you obtain means ranging from d = -0.01 to d = 0.85, this is a sign that publication bias may be present in your data."), p("If you have questions or comments about using or interpreting the model, or if you have a bug or issue to report about this Shiny app, please feel free to send an email to Dr. Jack Vevea or Kathleen Coburn."), br(), strong('Authors:'), p(a("Dr. Jack L. Vevea", href="http://faculty.ucmerced.edu/jvevea/", target="_blank"), "(model and application)"),p(a("Kathleen Coburn", href="http://www.katiecoburn.weebly.com", target="_blank"),"(application)"),br(), strong('References:'), p(a("Hedges, L. V. & Vevea, J. L. (1996). Estimating effect size under publication bias: Small sample properties and robustness of a random effects selection model. Journal of Educational and Behavioral Statistics, 21, 299-333.", href="faculty.ucmerced.edu/jvevea/other/Hedges_Vevea_1996.pdf", target="_blank")), p(a("Vevea, J. L. & Hedges, L. V. (1995). A general linear model for estimating effect size in the presence of publication bias. Psychometrika, 60(3), 419-435.", href="faculty.ucmerced.edu/jvevea/other/Vevea_and_Hedges_1995.pdf", target="_blank" )),p(a("Vevea, J. L. & Woods, C. M. (2005). Publication bias in research synthesis: Sensitivity analysis using a priori weight functions. Psychological Methods, 10, 428.", href="faculty.ucmerced.edu/jvevea/other/Vevea_and_Woods_psy_meth.pdf", target="_blank"))),
tabPanel("Data File",icon=icon("list", lib="font-awesome"), tableOutput("contents")),
#tabPanel("Sampling Variance Computation", radioButtons("ES", label="What is your effect size metric?", choices=list("Odds Ratio" = 1, "Correlation" = 2, "Standardized Mean Difference" = 3, "Risk Ratio" = 4), selected=2),p(),c("Please enter the column number of the variable containing your effect sizes:"),p(), textInput("effectsb", ' ', value="0"),tableOutput("effectsc")),
tabPanel("Funnel Plot",icon=icon("desktop", lib="font-awesome"),checkboxInput('interact','Make funnel plot interactive', FALSE),p(),checkboxInput('flip','Plot effect sizes on x-axis', FALSE),p(),checkboxInput('estimates','Show unadjusted mean estimate (in red)',FALSE),p(),checkboxInput('estimates2','Show adjusted mean estimate (in blue)',FALSE),p(),checkboxInput('contour','Add contour lines to funnel plot at p-value cutpoints', FALSE), p(), conditionalPanel(condition="input.interact == false", uiOutput("funnelplot2"),p(),p("If you add contour lines to this plot, they will be drawn at your specified p-value cutpoints -- that is, a cutpoint at 0.05 will draw a 95% confidence interval, one at 0.10 will draw a 90%, and so on. If you have specified a lot of cutpoints, this may be confusing; you can always modify the cutpoints, but keep in mind that your model results will be affected as well."),sliderInput(inputId = "height", label = "Plot Height (px):", min = 0, max = 400, step = 1, value = 400),p(), sliderInput(inputId = "width", label = "Plot Width (%):", min = 0, max = 100, step = 1, value = 100),p(),downloadButton('downloadfunnelplot','Download the plot as a .pdf')), conditionalPanel(condition="input.interact == true", plotOutput("plotly", click="plot_click",dblclick="plot_dblclick",hover="plot_hover",brush="plot_brush"),p(),p("If you add contour lines to this plot, they will be drawn at your specified p-value cutpoints -- that is, a cutpoint at 0.05 will draw a 95% confidence interval, one at 0.10 will draw a 90%, and so on. If you have specified a lot of cutpoints, this may be confusing; you can always modify the cutpoints, but keep in mind that your model results will be affected as well."),p(),p("The following panel gives you information about the funnel plot. If you click on a point, double-click on it, hover over it, or highlight a range of points (brush), that information will appear below."), verbatimTextOutput("info"))),tabPanel("Density Plot",icon=icon("desktop", lib="font-awesome"),plotOutput("density"),strong('Interpretation:'), p(), p("If no publication bias is present, your effect sizes are assumed to be normally distributed with a mean equal to their unadjusted mean and a variance equal to their unadjusted variance component plus their typical sampling variance. This expected density, unaffected by publication bias, is depicted by the dashed line."), p("Once the user specifies some p-value intervals using the toolbar on the left, the model estimates weights for those intervals. The solid line depicts the adjusted density, where the expected density for effect sizes within your given p-value intervals is multiplied by the estimated weight for the interval. Greater density in an area therefore represents a greater likelihood of observing effect sizes. Remember that the weight for the first interval is fixed to one and other intervals should be interpreted relative to it.")),
#tabPanel("Unadjusted Random-Effects Model", tableOutput("unadjustedweightfunct")),
# tabPanel("Adjusted Random-Effects Model", textOutput("errormessage"),tableOutput("adjustedweightfunct"), strong("Likelihood ratio test comparing this model to its unadjusted version:"), p(),tableOutput("likelihoodratio")),
tabPanel("Model Results", icon=icon("desktop", lib="font-awesome"), strong("Unadjusted Model:"), p(),tableOutput("unadjustedweightfunction"),p(),strong("Adjusted Model:"),imageOutput("questionmark",width=17,height=17,inline=TRUE), p(),tableOutput("adjustedweightfunction"),p(), strong("Likelihood Ratio Test:"),imageOutput("questionmark2",width=17,height=17,inline=TRUE),p(),tableOutput("likelihoodratio"),p(),strong("Effect Sizes Per Interval:"),p(),tableOutput("samplesizes"),p(),tableOutput("numberofeffects"))
)
))))
|
/scratch/gouwar.j/cran-all/cranData/weightr/inst/shiny/ui.R
|
weightfunction <- function(effect, v, npred, steps, XX, prednames, weights=NULL, p) {
si <- sqrt(v)
effect <- effect
number <- length(effect)
v <- v
p <- p
neglike1 <- function(pars) {
vc = pars[1]
beta = pars[2:(npred+2)]
mn = XX%*%beta
eta = sqrt(v + vc)
b = 1/2 * sum(((effect-mn)/eta)^2)
c = sum(log(eta))
return(b+c)
}
neglike2 <- function(pars) {
vc = pars[1]
beta = pars[2:(npred+2)]
if(is.null(weights)==FALSE){
w <- weights
}
else{
w = c(1,pars[(npred+3):length(pars)])
}
mn = XX%*%beta
a = sum(log(w[wt]))
eta = sqrt(v + vc)
b = 1/2 * sum(((effect-mn)/eta)^2)
c = sum(log(eta))
Bij <- matrix(rep(0,number*nsteps),nrow=number,ncol=nsteps)
bi = -si * qnorm(steps[1])
Bij[,1] = 1-pnorm((bi-mn)/eta)
if(nsteps > 2){
for(j in 2:(length(steps)-1)) {
bi = -si * qnorm(steps[j])
bilast = -si * qnorm(steps[j-1])
Bij[,j] = pnorm((bilast-mn)/eta) - pnorm((bi-mn)/eta)
}
}
bilast = -si * qnorm(steps[length(steps)-1])
Bij[,length(steps)] = pnorm((bilast-mn)/eta)
swbij = 0
for(j in 1:length(steps)) swbij = swbij + w[j]*Bij[,j]
d = sum(log(swbij))
# (Uncomment if needed to see what's going wrong.) print(pars)
return(-a + b + c + d)
}
# }
gradient1 <- function(pars) {
vc = pars[1]
beta = pars[2:(npred+2)]
mn = XX%*%beta
eta2 = v + vc
a = 0.5*sum(1/eta2 - (effect-mn)^2/eta2^2)
b = rep(0,(1+npred))
for(i in 1:length(b)) b[i] = sum( (effect-mn)*-XX[,i]/eta2 )
return(matrix(c(a,b),nrow=1+length(b),ncol=1))
}
intervaltally <- function(p, steps) {
p1 <- cut(p, breaks=c(-Inf,steps), labels=steps)
return(p1)
}
#UNADJUSTED WITHOUT MODERATORS
if(steps == 600 && npred == 0 && XX == 600){
nsteps <- 0
XX <- cbind(rep(1,number))
pars <- c(mean(v)/4,mean(effect))
output1 <- optim(par=pars,fn=neglike1, lower=c(0,rep(-Inf,(npred+1))),method="L-BFGS-B",hessian=TRUE)
if(output1$convergence == 1){
print("Maximum iterations reached without convergence. Consider re-examining your model.")
}
if(output1$convergence >= 51){
print("The model has failed to converge and produced an error. Consider re-examining your model.")
}
Parameters <- output1$par
Standard_Errors <- sqrt(diag(solve(output1$hessian)))
results <- cbind(c(output1$par[1], output1$par[2]),Standard_Errors)
#results <- cbind(c("Parameters",output1$par[1], output1$par[2]),c("Standard Errors", sqrt(diag(solve(output1$hessian)))))
#return(grid.table(results, gp=gpar(fontsize=20), rows=c("Variance Component", "Intercept"), cols=c("Estimate", "Standard Error")))
# resultsb <- data.frame(results, row.names=c("Variance Component", "Intercept"))
# colnames(resultsb) <- c("Parameters", "Standard Errors")
rowlabels <- c("Variance Component", "Intercept")
resultsb <- data.frame(rowlabels,results)
colnames(resultsb) <- c("", "Parameters", "Standard Errors")
return(resultsb)
}
#ADJUSTED WITHOUT MODERATORS
if(steps != 600 && npred == 0 && XX == 600){
XX <- cbind(rep(1,number))
nsteps <- length(steps)
pars <- c(mean(v)/4, mean(effect), rep(1,nsteps-1))
# wt <- rep(1,number)
# for(i in 1:number) {
# for(j in 2:nsteps) {
# if (-si[i]*qnorm(steps[j]) <= effect[i] && effect[i] <= -si[i]*qnorm(steps[j-1])) wt[i] = j
# }
# if( effect[i] <= -si[i]*qnorm(steps[nsteps-1])) wt[i] = nsteps
# }
wt <- rep(1,number)
for(i in 1:number) {
if(p[i] <= steps[1]) wt[i] = 1
for(j in 2:nsteps) {
if (steps[j-1] < p[i] && p[i] <= steps[j]) wt[i] = j
}
if( p[i] > steps[nsteps-1] ) wt[i] = nsteps
}
#### Remember you changed this! #######
output2 <- optim(par=pars,fn=neglike2,
lower=c(0,rep(-Inf,1),rep(0.01,(nsteps-1))),
method="L-BFGS-B",hessian=TRUE)
# output2 <- nlminb(start=pars, objective=neglike2, lower=c(0,rep(-Inf,1),rep(0.01,(nsteps-1))), hessian=TRUE)
# print(output2)
if(output2$convergence == 1){
print("Maximum iterations reached without convergence. Consider re-examining your model.")
}
if(output2$convergence >= 51){
print("The model has failed to converge and produced an error. Consider re-examining your model.")
}
Parameters <- output2$par
results <- matrix(nrow=(length(output2$par)),ncol=2)
if(is.null(weights)){
results[,1] <- output2$par
results[,2] <- sqrt(diag(solve(output2$hessian)))
}
else{
results[,1] <- c(output2$par[1:2],weights[2:length(weights)])
results[,2] <- rep(NA, length(output2$par))
}
rowlabels <- c(0, length(output2$par))
rowlabels[1] <- "Variance Component"
rowlabels[2] <- "Intercept"
for(i in 3:(nsteps + 1)){
rowlabels[i] <- paste(c(steps[i - 2], "< p-values <", steps[i - 1], "weight"), collapse=" ")
}
# resultsb <- data.frame(results, row.names=c(rowlabels))
resultsb <- data.frame(rowlabels,results)
colnames(resultsb) <- c("", "Parameters", "Standard Errors")
# return(grid.table(results, gp=gpar(fontsize=20), rows=c(rowlabels), cols=c("Estimate", "Standard Error")))
# return(grid.table(results, rows=c(rowlabels), cols=c("Estimate", "Standard Error")))
# return(format(resultsb))
# print(resultsb)
# return(grid.table(resultsb, rows=c(rowlabels), cols=c("Estimate", "Standard Error")))
# return(gvisTable(resultsb))
return(resultsb)
}
#UNADJUSTED WITH MODERATORS
if(steps == 600 && npred >= 0 && XX != 600){
nsteps <- 0
pars <- c(mean(v)/4,mean(effect),rep(0,npred))
output1 <- optim(par=pars,fn=neglike1, lower=c(0,rep(-Inf,(npred+1))),method="L-BFGS-B",hessian=TRUE)
if(output1$convergence == 1){
print("Maximum iterations reached without convergence. Consider re-examining your model.")
}
if(output1$convergence >= 51){
print("The model has failed to converge and produced an error. Consider re-examining your model.")
}
results <- matrix(nrow=(length(output1$par)),ncol=2)
results[,1] <- output1$par
results[,2] <- sqrt(diag(solve(output1$hessian)))
predictornumber <- seq(1,npred,by=1)
rowlabels <- c(0, length(output1$par))
rowlabels[1] <- "Variance Component"
rowlabels[2] <- "Intercept"
for(i in 3:(npred + 2)){
rowlabels[i] <- prednames[i - 1]
}
# resultsb <- data.frame(results, row.names=c(rowlabels))
# colnames(resultsb) <- c("Parameters", "Standard Errors")
#return(grid.table(results, gp=gpar(fontsize=20), rows=c(rowlabels), cols=c("Estimate", "Standard Error")))
resultsb <- data.frame(rowlabels,results)
colnames(resultsb) <- c("", "Parameters", "Standard Errors")
return(resultsb)
}
#ADJUSTED WITH MODERATORS
if(steps != 600 && npred >= 0 && XX != 600){
nsteps <- length(steps)
pars <- c(mean(v)/4,mean(effect),rep(0,npred), rep(1,nsteps-1))
# wt <- rep(1,number)
# for(i in 1:number) {
# for(j in 2:nsteps) {
# if (-si[i]*qnorm(steps[j]) <= effect[i] && effect[i] <= -si[i]*qnorm(steps[j-1])) wt[i] = j
# }
# if( effect[i] <= -si[i]*qnorm(steps[nsteps-1])) wt[i] = nsteps
# }
wt <- rep(1,number)
for(i in 1:number) {
if(p[i] <= steps[1]) wt[i] = 1
for(j in 2:nsteps) {
if (steps[j-1] < p[i] && p[i] <= steps[j]) wt[i] = j
}
if( p[i] > steps[nsteps-1] ) wt[i] = nsteps
}
output2 <- optim(par=pars,fn=neglike2,
lower=c(0,rep(-Inf,(npred+1)),rep(0.01,(nsteps-1))),
method="L-BFGS-B",hessian=TRUE)
if(output2$convergence == 1){
print("Maximum iterations reached without convergence. Consider re-examining your model.")
}
if(output2$convergence >= 51){
print("The model has failed to converge and produced an error. Consider re-examining your model.")
}
results <- matrix(nrow=(length(output2$par)),ncol=2)
if(is.null(weights)){
results[,1] <- output2$par
results[,2] <- sqrt(diag(solve(output2$hessian)))
}
else{
results[,1] <- c(output2$par[1:(npred+2)],weights[2:length(weights)])
results[,2] <- rep(NA, length(output2$par))
}
predictornumber <- seq(1,npred,by=1)
rowlabels <- c(0, length(output2$par))
rowlabels[1] <- "Variance Component"
rowlabels[2] <- "Intercept"
for(i in 3:(npred + 2)){
rowlabels[i] <- prednames[i - 1]
}
# for(i in 3:(npred + 2)){
# rowlabels[i] <- paste(c("Slope #", predictornumber[i - 2]), collapse=" ")
# }
for(i in (3+npred):(length(output2$par))){
rowlabels[i] <- paste(c(steps[i - (2 + npred)], "< p-values <", steps[i - (1 + npred)], "weight"), collapse=" ")
}
# resultsb <- data.frame(results, row.names=c(rowlabels))
# colnames(resultsb) <- c("Parameters", "Standard Errors")
resultsb <- data.frame(rowlabels,results)
colnames(resultsb) <- c("", "Parameters", "Standard Errors")
# return(grid.table(results, gp=gpar(fontsize=20), rows=c(rowlabels), cols=c("Estimate", "Standard Error")))
return(resultsb)
}
}
likelihoodfunct <- function(effect, v, npred, steps, XX, p) {
si <- sqrt(v)
effect <- effect
number <- length(effect)
v <- v
p <- p
neglike1 <- function(pars) {
vc = pars[1]
beta = pars[2:(npred+2)]
mn = XX%*%beta
eta = sqrt(v + vc)
b = 1/2 * sum(((effect-mn)/eta)^2)
c = sum(log(eta))
return(b+c)
}
neglike2 <- function(pars) {
vc = pars[1]
beta = pars[2:(npred+2)]
w = c(1,pars[(npred+3):length(pars)])
mn = XX%*%beta
a = sum(log(w[wt]))
eta = sqrt(v + vc)
b = 1/2 * sum(((effect-mn)/eta)^2)
c = sum(log(eta))
Bij <- matrix(rep(0,number*nsteps),nrow=number,ncol=nsteps)
bi = -si * qnorm(steps[1])
Bij[,1] = 1-pnorm((bi-mn)/eta)
if(nsteps > 2){
for(j in 2:(length(steps)-1)) {
bi = -si * qnorm(steps[j])
bilast = -si * qnorm(steps[j-1])
Bij[,j] = pnorm((bilast-mn)/eta) - pnorm((bi-mn)/eta)
}
}
bilast = -si * qnorm(steps[length(steps)-1])
Bij[,length(steps)] = pnorm((bilast-mn)/eta)
swbij = 0
for(j in 1:length(steps)) swbij = swbij + w[j]*Bij[,j]
d = sum(log(swbij))
# (Uncomment if needed to see what's going wrong.) print(pars)
return(-a + b + c + d)
}
gradient1 <- function(pars) {
vc = pars[1]
beta = pars[2:(npred+2)]
mn = XX%*%beta
eta2 = v + vc
a = 0.5*sum(1/eta2 - (effect-mn)^2/eta2^2)
b = rep(0,(1+npred))
for(i in 1:length(b)) b[i] = sum( (effect-mn)*-XX[,i]/eta2 )
return(matrix(c(a,b),nrow=1+length(b),ncol=1))
}
intervaltally <- function(p, steps) {
p1 <- cut(p, breaks=c(-Inf,steps), labels=steps)
return(p1)
}
#WITHOUT MODERATORS
if(npred == 0 && XX == 600){
nsteps <- 0
XX <- cbind(rep(1,number))
pars <- c(mean(v)/4,mean(effect))
output1 <- optim(par=pars,fn=neglike1, gr=gradient1, lower=c(0,rep(-Inf,(npred+1))),method="L-BFGS-B",hessian=TRUE)
nsteps <- length(steps)
pars <- c(mean(v)/4, mean(effect), rep(1,nsteps-1))
# wt <- rep(1,number)
# for(i in 1:number) {
# for(j in 2:nsteps) {
# if (-si[i]*qnorm(steps[j]) <= effect[i] && effect[i] <= -si[i]*qnorm(steps[j-1])) wt[i] = j
# }
# if( effect[i] <= -si[i]*qnorm(steps[nsteps-1])) wt[i] = nsteps
# }
wt <- rep(1,number)
for(i in 1:number) {
if(p[i] <= steps[1]) wt[i] = 1
for(j in 2:nsteps) {
if (steps[j-1] < p[i] && p[i] <= steps[j]) wt[i] = j
}
if( p[i] > steps[nsteps-1] ) wt[i] = nsteps
}
output2 <- optim(par=pars,fn=neglike2,
lower=c(0,rep(-Inf,1),rep(0.01,(nsteps-1))),
method="L-BFGS-B",hessian=TRUE)
lrchisq <- 2*(output1$value - output2$value)
results <- cbind(output1$value, output2$value, lrchisq, (nsteps-1), (1-pchisq(lrchisq,(nsteps-1))) )
likelihood <- data.frame(results, row.names=c("Estimate"))
colnames(likelihood) <- c("Unadjusted Likelihood","Adjusted Likelihood", "2*Difference","df", "p-value")
return(likelihood)
}
#UNADJUSTED WITH MODERATORS
if(npred >= 0 && XX != 600){
nsteps <- 0
pars <- c(mean(v)/4,mean(effect),rep(0,npred))
output1 <- optim(par=pars,fn=neglike1, gr=gradient1, lower=c(0,rep(-Inf,(npred+1))),method="L-BFGS-B",hessian=TRUE)
nsteps <- length(steps)
pars <- c(mean(v)/4,mean(effect),rep(0,npred), rep(1,nsteps-1))
# wt <- rep(1,number)
# for(i in 1:number) {
# for(j in 2:nsteps) {
# if (-si[i]*qnorm(steps[j]) <= effect[i] && effect[i] <= -si[i]*qnorm(steps[j-1])) wt[i] = j
# }
# if( effect[i] <= -si[i]*qnorm(steps[nsteps-1])) wt[i] = nsteps
# }
wt <- rep(1,number)
for(i in 1:number) {
if(p[i] <= steps[1]) wt[i] = 1
for(j in 2:nsteps) {
if (steps[j-1] < p[i] && p[i] <= steps[j]) wt[i] = j
}
if( p[i] > steps[nsteps-1] ) wt[i] = nsteps
}
output2 <- optim(par=pars,fn=neglike2,
lower=c(0,rep(-Inf,(npred+1)),rep(0.01,(nsteps-1))),
method="L-BFGS-B",hessian=TRUE)
lrchisq <- 2*(output1$value - output2$value)
results <- cbind(output1$value, output2$value, lrchisq, (nsteps-1), (1-pchisq(lrchisq,(nsteps-1))) )
likelihood <- data.frame(results, row.names=c("Estimate"))
colnames(likelihood) <- c("Unadjusted Likelihood","Adjusted Likelihood", "2*Difference","df", "p-value")
return(likelihood)
}
}
sampletable <- function(p, pvalues, steps){
nsteps <- length(steps)
results <- matrix(nrow=length(pvalues),ncol=1)
results[,1] <- pvalues
rowlabels <- c(0, length(results[,1]))
rowlabels[1] <- paste(c("p-values <", steps[1]), collapse="")
for(i in 2:nsteps){
rowlabels[i] <- paste(c(steps[i - 1], "< p-values <", steps[i]), collapse=" ")
}
# resultsb <- data.frame(results, row.names=c(rowlabels))
# colnames(resultsb) <- c("Number of Effects")
resultsb <- data.frame(rowlabels,results)
colnames(resultsb) <- c("", "Number of Effects")
return(resultsb)
}
|
/scratch/gouwar.j/cran-all/cranData/weightr/inst/shiny/weightfunction.R
|
dummify <- function(x, show.na=FALSE, keep.na=FALSE){
if(!is.factor(x)){
stop("variable needs to be a factor")
}
levels(x) <- c(levels(x), "NAFACTOR")
x[is.na(x)] <- "NAFACTOR"
levs <- levels(x)
out <- model.matrix(~x-1)
colnames(out) <- levs
attributes(out)$assign <- NULL
attributes(out)$contrasts <- NULL
if(show.na==FALSE)
out <- out[,(colnames(out)=="NAFACTOR")==FALSE]
if(keep.na==TRUE)
out[x=="NAFACTOR",] <- matrix(NA, sum(x=="NAFACTOR"), dim(out)[2])
out
}
|
/scratch/gouwar.j/cran-all/cranData/weights/R/dummify.r
|
findwtdinteraction <- function(x, across, by=NULL, at=NULL, acrosslevs=NULL, bylevs=NULL,
atlevs=NULL, weight=NULL, dvname=NULL, acclevnames=NULL, bylevnames=NULL,
atlevnames=NULL, stdzacross=FALSE, stdzby=FALSE, stdzat=FALSE, limitlevs=20,
type="response", approach="prototypical", data=NULL, nsim=100){
UseMethod("findwtdinteraction")
}
|
/scratch/gouwar.j/cran-all/cranData/weights/R/findwtdinteraction.R
|
findwtdinteraction.default <- function(x, across, by=NULL, at=NULL, acrosslevs=NULL, bylevs=NULL, atlevs=NULL, weight=NULL, dvname=NULL, acclevnames=NULL, bylevnames=NULL, atlevnames=NULL, stdzacross=FALSE, stdzby=FALSE, stdzat=FALSE, limitlevs=20, type="response", approach="prototypical", data=NULL, nsim=100){
reg <- x
if(!is.null(data))
df <- data
if(is.null(data))
df <- reg$model
if(is.null(weight)){
if(!is.null(reg$prior.weight))
weight <- reg$prior.weight
if(is.null(reg$prior.weight) & !is.null(reg$weights))
weight <- reg$weights
if(is.null(weight))
weight <- rep(1, nrow(df))
}
if(length(weight)!=(nrow(df)))
stop("Weight vector length must match the number of complete cases in the regression.")
clsset <- sapply(lapply(df, class), function(x) x[1])
acclass <- class(df[,across])[1]
if(stdzacross==TRUE)
df[,across] <- stdz(df[,across], weight)
if(stdzby==TRUE & !is.null(by))
df[,by] <- stdz(df[,by], weight)
if(stdzat==TRUE & !is.null(at))
df[,at] <- stdz(df[,at], weight)
if(is.null(acrosslevs)){
if(stdzacross==TRUE)
acrosslevs <- c(-1,1)
if(stdzacross==FALSE)
acrosslevs <- sort(unique(df[,across]))
if(length(acrosslevs)>limitlevs & is.numeric(acrosslevs))
acrosslevs <- seq(min(df[,across], na.rm=TRUE), max(df[,across], na.rm=TRUE), (max(df[,across], na.rm=TRUE)-min(df[,across], na.rm=TRUE))/(limitlevs-1))
}
if(is.null(dvname))
dvname <- names(df)[1]
if(!is.null(by)){
byclass <- class(df[,by])[1]
if(is.null(bylevs)){
if(stdzby==TRUE)
bylevs <- c(-1,1)
if(stdzby==FALSE)
bylevs <- sort(unique(df[,by]))
if(length(bylevs)>limitlevs & is.numeric(bylevs))
bylevs <- seq(min(df[,by], na.rm=TRUE), max(df[,by], na.rm=TRUE), (max(df[,by], na.rm=TRUE)-min(df[,by], na.rm=TRUE))/(limitlevs-1))
}
}
if(is.null(by)){
by <- "All"
df[,by] <- "All"
bylevs <- "All"
hasby <- FALSE
stdzby <- FALSE
}
if(!is.null(at)){
atclass <- class(df[,at])[1]
if(is.null(atlevs)){
if(stdzat==TRUE)
atlevs <- c(-1,1)
if(stdzat==FALSE)
atlevs <- sort(unique(df[,at]))
if(length(atlevs)>limitlevs & is.numeric(atlevs))
atlevs <- seq(min(df[,at], na.rm=TRUE), max(df[,at], na.rm=TRUE), (max(df[,at], na.rm=TRUE)-min(df[,at], na.rm=TRUE))/(limitlevs-1))
}
hasat <- TRUE
if(is.null(atlevnames)){
if(stdzat==TRUE)
atlevnames <- paste(atlevs, "SD", sep="")
if(stdzat==FALSE)
atlevnames <- paste(atlevs)
}
}
if(is.null(at)){
at <- "All"
df[,at] <- "All"
atlevs <- "All"
atlevnames <- "All"
hasat <- FALSE
stdzat <- FALSE
}
if(is.null(bylevnames)){
if(stdzby==TRUE)
bylevnames <- paste(bylevs, "SD", sep="")
if(stdzby==FALSE)
bylevnames <- paste(bylevs)
}
if(is.null(acclevnames)){
if(stdzacross==TRUE)
acclevnames <- paste(acrosslevs, "SD", sep="")
if(stdzacross==FALSE)
acclevnames <- paste(acrosslevs)
}
levs <- acrosslevs
ol <- acrosslevs
lng <- length(acrosslevs)
if(sum(clsset=="matrix")>0)
stop(paste("Interactions Cannot Currently Be Resolved With Matrix Predictors, Please Insert Each Variable in", names(clsset)[clsset=="matrix"], "Separately in Regression Before Using This Tool")) # TRY TO MAKE THIS WORK EVENTUALLY
out <- NULL
out$Meta <- list(dvname=dvname, across=across, by=by, at=at)
out$Means <- as.list(1:length(atlevs))
names(out$Means) <- atlevnames
if(approach=="prototypical"){
pd <- data.frame(na.omit(df)[1:lng,])
for(i in 1:dim(pd)[2]){
if(class(pd[,i])[1]=="numeric")
pd[,i] <- rep(wtd.mean(df[,i], weight, na.rm=TRUE), lng)
if(class(pd[,i])[1]=="ordered")
pd[,i] <- ordered(rep(wtd.table(df[,i], weight)$x[cumsum(wtd.table(df[,i], weight)$sum.of.weights)/sum(wtd.table(df[,i], weight)$sum.of.weights)>=.5][1], lng), levels=levels(df[,i]))
if(class(pd[,i])[1]=="factor")
pd[,i] <- factor(rep(wtd.table(df[,i], weight)$x[wtd.table(df[,i], weight)$sum.of.weights==max(wtd.table(df[,i], weight)$sum.of.weights)][1], lng), levels=levels(df[,i])) # FIX MINOR BUG HERE WHERE TRAILING " " IN ORIGINAL FACTOR LEVEL CAN GET DROPPED
if(class(pd[,i])[1]=="logical")
pd[,i] <- as.logical(rep(wtd.table(df[,i], weight)$x[wtd.table(df[,i], weight)$sum.of.weights==max(wtd.table(df[,i], weight)$sum.of.weights)][1], lng))
}
out$Resp <- pd[1,!(colnames(pd) %in% c(dvname, at, across, by))]
out$SEs <- as.list(1:length(atlevs))
names(out$SEs) <- atlevnames
names(pd) <- names(df)
rownames(pd) <- 1:dim(pd)[1]
pd[,across] <- acrosslevs
for(a in 1:length(atlevs)){
pd[,at] <- atlevs[a]
if(!is.null(df[,at]))
class(pd[,at]) <- class(df[,at])
bylist <- as.list(bylevs)
out$Means[[a]] <- out$SEs[[a]] <- matrix(NA, length(bylevs), lng)
for(i in 1:length(bylevs)){
bylist[[i]] <- pd
bylist[[i]][,by] <- factor(rep(bylevs[i], length(bylist[[i]][,by])), levels=bylevs)
if(is.numeric(bylist[[i]][,by]))
bylist[[i]] <- pd
bylist[[i]][,by] <- rep(bylevs[i], length(bylist[[i]][,by]))
}
eachpred <- lapply(bylist, function(x) predict(reg, newdata=x, se.fit=TRUE, type=type))
means <- t(sapply(eachpred, function(x) x$fit))
out$Means[[a]] <- as.matrix(means)
ses <- t(sapply(eachpred, function(x) x$se.fit))
out$SEs[[a]] <- as.matrix(ses)
try(rownames(out$Means[[a]]) <- rownames(out$SEs[[a]]) <- bylevnames)
try(colnames(out$Means[[a]]) <- colnames(out$SEs[[a]]) <- acclevnames)
}
}
if(approach=="population"){
pd <- df
for(a in 1:length(atlevs)){
out$Means[[a]] <- matrix(NA, length(bylevs), length(acrosslevs))
rownames(out$Means[[a]]) <- bylevs
colnames(out$Means[[a]]) <- acrosslevs
for(b in 1:length(bylevs)){
for(c in 1:length(acrosslevs)){
pdn <- pd
pdn[,across] <- acrosslevs[c]
pdn[,by] <- bylevs[b]
pdn[,at] <- atlevs[a]
out$Means[[a]][b,c] <- wtd.mean(predict(reg, newdata=pdn, type=type), weights = weight)
}
}
try(rownames(out$Means[[a]]) <- bylevnames)
try(colnames(out$Means[[a]]) <- acclevnames)
}
}
if(approach=="by"){
pd <- df
for(a in atlevs){
out$Means[[a]] <- matrix(NA, length(bylevs), length(acrosslevs))
rownames(out$Means[[a]]) <- bylevs
colnames(out$Means[[a]]) <- acrosslevs
for(b in 1:length(bylevs)){
for(c in 1:length(acrosslevs)){
pdn <- pd[pd[,by]==bylevs[b],]
pdn[,across] <- acrosslevs[c]
pdn[,at] <- atlevs[a]
out$Means[[a]][b,c] <- wtd.mean(predict(reg, newdata=pdn, type=type), weights = weight[pd[,by]==bylevs[b]])
}
}
try(rownames(out$Means[[a]]) <- bylevnames)
try(colnames(out$Means[[a]]) <- acclevnames)
}
}
if(approach=="at"){
pd <- df
for(a in atlevs){
out$Means[[a]] <- matrix(NA, length(bylevs), length(acrosslevs))
rownames(out$Means[[a]]) <- bylevs
colnames(out$Means[[a]]) <- acrosslevs
for(b in bylevs){
for(c in acrosslevs){
pdn <- pd[pd[,at]==atlevs[a],]
pdn[,across] <- acrosslevs[c]
pdn[,by] <- bylevs[b]
out$Means[[a]][b,c] <- wtd.mean(predict(reg, newdata=pdn, type=type), weights = weight[pd[,at]==bylevs[a]])
}
}
try(rownames(out$Means[[a]]) <- bylevnames)
try(colnames(out$Means[[a]]) <- acclevnames)
}
}
if(approach=="atby"){
pd <- df
for(a in atlevs){
out$Means[[a]] <- matrix(NA, length(bylevs), length(acrosslevs))
for(b in bylevs){
for(c in acrosslevs){
pdn <- pd[pd[,at]==a & pd[,by]==b]
pdn[,across] <- c
out$Means[[a]][b,c] <- wtd.mean(predict(reg, newdata=x, type=type), weights = weight[pd[,by]==bylevs[b] & pd[,at]==bylevs[a]])
}
}
try(rownames(out$Means[[a]]) <- bylevnames)
try(colnames(out$Means[[a]]) <- acclevnames)
}
}
class(out) <- "interactpreds"
out
}
|
/scratch/gouwar.j/cran-all/cranData/weights/R/findwtdinteraction.default.R
|
findwtdinteraction.list <- function(x, across, by=NULL, at=NULL, acrosslevs=NULL, bylevs=NULL, atlevs=NULL, weight=NULL, dvname=NULL, acclevnames=NULL, bylevnames=NULL, atlevnames=NULL, stdzacross=FALSE, stdzby=FALSE, stdzat=FALSE, limitlevs=20, type="response", approach="prototypical", data=NULL, nsim=100){
predset <- lapply(x, function(g) findwtdinteraction(g, across, by, at, acrosslevs=acrosslevs, bylevs=bylevs, atlevs=atlevs, weight=weight, dvname=dvname, bylevnames=bylevnames, atlevnames=atlevnames, acclevnames=acclevnames, stdzacross, stdzby, stdzat, limitlevs=limitlevs, approach=approach, nsim=100))
allmns <- lapply(predset, function(m) m$Means)
if(table(table(sapply(allmns, length)))!=1)
stop("at variable values are inconsistent across imputations, please set atlevs before running")
if(table(table(unlist(sapply(allmns, function(x) sapply(x, function(y) dim(y)[1])))))!=1)
stop("by variable values are inconsistent across imputations, please set bylevs before running")
if(table(table(unlist(sapply(allmns, function(x) sapply(x, function(y) dim(y)[2])))))!=1)
stop("across variable values are inconsistent across imputations, please set acrosslevs before running")
allses <- lapply(predset, function(m) m$SEs)
allresp <- sapply(predset, function(m) m$Resp)
imputations <- length(allmns)
nlat <- length(allmns[[1]])
nlby <- dim(allmns[[1]][[1]])[1]
nlacross <- dim(allmns[[1]][[1]])[2]
impmns <- lapply(1:nlat, function(a) sapply(1:nlacross, function(c) sapply(1:nlby, function(b) mean(sapply(1:imputations, function(i) allmns[[i]][[a]][b,c])))))
impses <- lapply(1:nlat, function(a) sapply(1:nlacross, function(c) as.numeric(sapply(1:nlby, function(b) pool.scalar(sapply(1:imputations, function(i) allmns[[i]][[a]][b,c]), sapply(1:imputations, function(i) allses[[i]][[a]][b,c]))["t"]))))
for(i in 1:length(impmns)){
if(!is.vector(impmns[[i]])){
colnames(impmns[[i]]) <- colnames(impses[[i]]) <- colnames(allmns[[1]][[1]])
rownames(impmns[[i]]) <- rownames(impses[[i]]) <- rownames(allmns[[1]][[1]])
}
else
names(impmns[[i]]) <- names(impses[[i]]) <- colnames(allmns[[1]][[1]])
}
names(impses) <- names(impmns) <- names(allmns[[1]])
out <- NULL
out$RespMns <- sapply(as.data.frame(t(allresp)), function(x) try(mean(as.numeric(x), na.rm=TRUE)))
out$Resp <- allresp
out$Meta <- predset[[1]]$Meta
out$Means <- impmns
out$SEs <- impses
class(out) <- "interactpreds"
out
}
|
/scratch/gouwar.j/cran-all/cranData/weights/R/findwtdinteraction.list.R
|
findwtdinteraction.lmerMod <- function(x, across, by=NULL, at=NULL, acrosslevs=NULL, bylevs=NULL, atlevs=NULL, weight=NULL, dvname=NULL, acclevnames=NULL, bylevnames=NULL, atlevnames=NULL, stdzacross=FALSE, stdzby=FALSE, stdzat=FALSE, limitlevs=20, type="response", approach="prototypical", data=NULL, nsim=100){
reg <- x
if(!is.null(data))
df <- data
if(is.null(data))
df <- attributes(x)$frame
if(is.null(weight)){
if(!is.null(attributes(x)$frame[,"(weights)"]))
weight <- attributes(x)$frame[,"(weights)"]
if(is.null(weight))
weight <- rep(1, dim(df)[1])
}
if(length(weight)!=(dim(df)[1]))
stop("Weight vector length must match the number of complete cases in the regression.")
clsset <- sapply(lapply(df, class), function(x) x[1])
acclass <- class(df[,across])[1]
if(stdzacross==TRUE)
df[,across] <- stdz(df[,across], weight)
if(stdzby==TRUE & !is.null(by))
df[,by] <- stdz(df[,by], weight)
if(stdzat==TRUE & !is.null(at))
df[,at] <- stdz(df[,at], weight)
if(is.null(acrosslevs)){
if(stdzacross==TRUE)
acrosslevs <- c(-1,1)
if(stdzacross==FALSE)
acrosslevs <- sort(unique(df[,across]))
if(length(acrosslevs)>limitlevs & is.numeric(acrosslevs))
acrosslevs <- seq(min(df[,across], na.rm=TRUE), max(df[,across], na.rm=TRUE), (max(df[,across], na.rm=TRUE)-min(df[,across], na.rm=TRUE))/(limitlevs-1))
}
if(is.null(dvname))
dvname <- names(df)[1]
if(!is.null(by)){
byclass <- class(df[,by])[1]
if(is.null(bylevs)){
if(stdzby==TRUE)
bylevs <- c(-1,1)
if(stdzby==FALSE)
bylevs <- sort(unique(df[,by]))
if(length(bylevs)>limitlevs & is.numeric(bylevs))
bylevs <- seq(min(df[,by], na.rm=TRUE), max(df[,by], na.rm=TRUE), (max(df[,by], na.rm=TRUE)-min(df[,by], na.rm=TRUE))/(limitlevs-1))
}
}
if(is.null(by)){
by <- "All"
df[,by] <- "All"
bylevs <- "All"
hasby <- FALSE
stdzby <- FALSE
}
if(!is.null(at)){
atclass <- class(df[,at])[1]
if(is.null(atlevs)){
if(stdzat==TRUE)
atlevs <- c(-1,1)
if(stdzat==FALSE)
atlevs <- sort(unique(df[,at]))
if(length(atlevs)>limitlevs & is.numeric(atlevs))
atlevs <- seq(min(df[,at], na.rm=TRUE), max(df[,at], na.rm=TRUE), (max(df[,at], na.rm=TRUE)-min(df[,at], na.rm=TRUE))/(limitlevs-1))
}
hasat <- TRUE
if(is.null(atlevnames)){
if(stdzat==TRUE)
atlevnames <- paste(atlevs, "SD", sep="")
if(stdzat==FALSE)
atlevnames <- paste(atlevs)
}
}
if(is.null(at)){
at <- "All"
df[,at] <- "All"
atlevs <- "All"
atlevnames <- "All"
hasat <- FALSE
stdzat <- FALSE
}
if(is.null(bylevnames)){
if(stdzby==TRUE)
bylevnames <- paste(bylevs, "SD", sep="")
if(stdzby==FALSE)
bylevnames <- paste(bylevs)
}
if(is.null(acclevnames)){
if(stdzacross==TRUE)
acclevnames <- paste(acrosslevs, "SD", sep="")
if(stdzacross==FALSE)
acclevnames <- paste(acrosslevs)
}
levs <- acrosslevs
ol <- acrosslevs
lng <- length(acrosslevs)
if(sum(clsset=="matrix")>0)
stop(paste("Interactions Cannot Currently Be Resolved With Matrix Predictors, Please Insert Each Variable in", names(clsset)[clsset=="matrix"], "Separately in Regression Before Using This Tool")) # TRY TO MAKE THIS WORK EVENTUALLY
out <- NULL
out$Meta <- list(dvname=dvname, across=across, by=by, at=at)
out$Means <- as.list(1:length(atlevs))
names(out$Means) <- atlevnames
if(approach=="prototypical"){
pd <- data.frame(na.omit(df)[1:lng,])
for(i in 1:dim(pd)[2]){
if(class(pd[,i])[1]=="numeric")
pd[,i] <- rep(wtd.mean(df[,i], weight, na.rm=TRUE), lng)
if(class(pd[,i])[1]=="ordered")
pd[,i] <- ordered(rep(wtd.table(df[,i], weight)$x[cumsum(wtd.table(df[,i], weight)$sum.of.weights)/sum(wtd.table(df[,i], weight)$sum.of.weights)>=.5][1], lng), levels=levels(df[,i]))
if(class(pd[,i])[1]=="factor")
pd[,i] <- factor(rep(wtd.table(df[,i], weight)$x[wtd.table(df[,i], weight)$sum.of.weights==max(wtd.table(df[,i], weight)$sum.of.weights)][1], lng), levels=levels(df[,i])) # FIX MINOR BUG HERE WHERE TRAILING " " IN ORIGINAL FACTOR LEVEL CAN GET DROPPED
if(class(pd[,i])[1]=="logical")
pd[,i] <- as.logical(rep(wtd.table(df[,i], weight)$x[wtd.table(df[,i], weight)$sum.of.weights==max(wtd.table(df[,i], weight)$sum.of.weights)][1], lng))
}
out$Resp <- pd[1,!(colnames(pd) %in% c(dvname, at, across, by))]
out$SEs <- as.list(1:length(atlevs))
names(out$SEs) <- atlevnames
names(pd) <- names(df)
rownames(pd) <- 1:dim(pd)[1]
pd[,across] <- acrosslevs
for(a in 1:length(atlevs)){
pd[,at] <- atlevs[a]
if(!is.null(df[,at]))
class(pd[,at]) <- class(df[,at])
bylist <- as.list(bylevs)
out$Means[[a]] <- out$SEs[[a]] <- matrix(NA, length(bylevs), lng)
for(i in 1:length(bylevs)){
bylist[[i]] <- pd
bylist[[i]][,by] <- factor(rep(bylevs[i], length(bylist[[i]][,by])), levels=bylevs)
if(is.numeric(bylist[[i]][,by]))
bylist[[i]] <- pd
bylist[[i]][,by] <- rep(bylevs[i], length(bylist[[i]][,by]))
}
eachpred <- lapply(bylist, function(x) bootMer(reg, function(r) predict(r, newdata=x, type=type, re.form=NA), nsim=nsim))
means <- t(sapply(eachpred, function(x) x$t0))
out$Means[[a]] <- as.matrix(means)
ses <- t(sapply(eachpred, function(x) apply(x$t, 2, function(g) sd(g))))
out$SEs[[a]] <- as.matrix(ses)
try(rownames(out$Means[[a]]) <- rownames(out$SEs[[a]]) <- bylevnames)
try(colnames(out$Means[[a]]) <- colnames(out$SEs[[a]]) <- acclevnames)
}
}
if(approach=="population"){
pd <- df
for(a in 1:length(atlevs)){
out$Means[[a]] <- matrix(NA, length(bylevs), length(acrosslevs))
rownames(out$Means[[a]]) <- bylevs
colnames(out$Means[[a]]) <- acrosslevs
for(b in 1:length(bylevs)){
for(c in 1:length(acrosslevs)){
pdn <- pd
pdn[,across] <- acrosslevs[c]
pdn[,by] <- bylevs[b]
pdn[,at] <- atlevs[a]
out$Means[[a]][b,c] <- wtd.mean(predict(reg, newdata=pdn, type=type), weights = weight)
}
}
try(rownames(out$Means[[a]]) <- bylevnames)
try(colnames(out$Means[[a]]) <- acclevnames)
}
}
if(approach=="by"){
pd <- df
for(a in atlevs){
out$Means[[a]] <- matrix(NA, length(bylevs), length(acrosslevs))
rownames(out$Means[[a]]) <- bylevs
colnames(out$Means[[a]]) <- acrosslevs
for(b in 1:length(bylevs)){
for(c in 1:length(acrosslevs)){
pdn <- pd[pd[,by]==bylevs[b],]
pdn[,across] <- acrosslevs[c]
pdn[,at] <- atlevs[a]
out$Means[[a]][b,c] <- wtd.mean(predict(reg, newdata=pdn, type=type), weights = weight[pd[,by]==bylevs[b]])
}
}
try(rownames(out$Means[[a]]) <- bylevnames)
try(colnames(out$Means[[a]]) <- acclevnames)
}
}
if(approach=="at"){
pd <- df
for(a in atlevs){
out$Means[[a]] <- matrix(NA, length(bylevs), length(acrosslevs))
rownames(out$Means[[a]]) <- bylevs
colnames(out$Means[[a]]) <- acrosslevs
for(b in bylevs){
for(c in acrosslevs){
pdn <- pd[pd[,at]==atlevs[a],]
pdn[,across] <- acrosslevs[c]
pdn[,by] <- bylevs[b]
out$Means[[a]][b,c] <- wtd.mean(predict(reg, newdata=pdn, type=type), weights = weight[pd[,at]==bylevs[a]])
}
}
try(rownames(out$Means[[a]]) <- bylevnames)
try(colnames(out$Means[[a]]) <- acclevnames)
}
}
if(approach=="atby"){
pd <- df
for(a in atlevs){
out$Means[[a]] <- matrix(NA, length(bylevs), length(acrosslevs))
for(b in bylevs){
for(c in acrosslevs){
pdn <- pd[pd[,at]==a & pd[,by]==b]
pdn[,across] <- c
out$Means[[a]][b,c] <- wtd.mean(predict(reg, newdata=x, type=type), weights = weight[pd[,by]==bylevs[b] & pd[,at]==bylevs[a]])
}
}
try(rownames(out$Means[[a]]) <- bylevnames)
try(colnames(out$Means[[a]]) <- acclevnames)
}
}
class(out) <- "interactpreds"
out
}
|
/scratch/gouwar.j/cran-all/cranData/weights/R/findwtdinteraction.lmerMod.R
|
findwtdinteraction.mira <- function(x, across, by=NULL, at=NULL, acrosslevs=NULL, bylevs=NULL, atlevs=NULL, weight=NULL, dvname=NULL, acclevnames=NULL, bylevnames=NULL, atlevnames=NULL, stdzacross=FALSE, stdzby=FALSE, stdzat=FALSE, limitlevs=20, type="response", approach="prototypical", data=NULL, nsim=100){
reg <- x
predset <- lapply(reg$analyses, function(g) findwtdinteraction(g, across, by, at, acrosslevs=acrosslevs, bylevs=bylevs, atlevs=atlevs, weight=weight, dvname=dvname, bylevnames=bylevnames, atlevnames=atlevnames, acclevnames=acclevnames, stdzacross, stdzby, stdzat, limitlevs=limitlevs, approach=approach, nsim=nsim))
allmns <- lapply(predset, function(m) m$Means)
if(table(table(sapply(allmns, length)))!=1)
stop("at variable values are inconsistent across imputations, please set atlevs before running")
if(table(table(unlist(sapply(allmns, function(x) sapply(x, function(y) dim(y)[1])))))!=1)
stop("by variable values are inconsistent across imputations, please set bylevs before running")
if(table(table(unlist(sapply(allmns, function(x) sapply(x, function(y) dim(y)[2])))))!=1)
stop("across variable values are inconsistent across imputations, please set acrosslevs before running")
allses <- lapply(predset, function(m) m$SEs)
allresp <- sapply(predset, function(m) m$Resp)
imputations <- length(allmns)
nlat <- length(allmns[[1]])
nlby <- dim(allmns[[1]][[1]])[1]
nlacross <- dim(allmns[[1]][[1]])[2]
impmns <- lapply(1:nlat, function(a) sapply(1:nlacross, function(c) sapply(1:nlby, function(b) mean(sapply(1:imputations, function(i) allmns[[i]][[a]][b,c])))))
impses <- lapply(1:nlat, function(a) sapply(1:nlacross, function(c) as.numeric(sapply(1:nlby, function(b) pool.scalar(sapply(1:imputations, function(i) allmns[[i]][[a]][b,c]), sapply(1:imputations, function(i) allses[[i]][[a]][b,c]))["t"]))))
for(i in 1:length(impmns)){
if(!is.vector(impmns[[i]])){
colnames(impmns[[i]]) <- colnames(impses[[i]]) <- colnames(allmns[[1]][[1]])
rownames(impmns[[i]]) <- rownames(impses[[i]]) <- rownames(allmns[[1]][[1]])
}
else
names(impmns[[i]]) <- names(impses[[i]]) <- colnames(allmns[[1]][[1]])
}
names(impses) <- names(impmns) <- names(allmns[[1]])
out <- NULL
out$RespMns <- sapply(as.data.frame(t(allresp)), function(x) try(mean(as.numeric(x), na.rm=TRUE)))
out$Resp <- allresp
out$Meta <- predset[[1]]$Meta
out$Means <- impmns
out$SEs <- impses
class(out) <- "interactpreds"
out
}
|
/scratch/gouwar.j/cran-all/cranData/weights/R/findwtdinteraction.mira.R
|
findwtdinteraction.multinom <- function(x, across, by=NULL, at=NULL, acrosslevs=NULL, bylevs=NULL, atlevs=NULL, weight=NULL, dvname=NULL, acclevnames=NULL, bylevnames=NULL, atlevnames=NULL, stdzacross=FALSE, stdzby=FALSE, stdzat=FALSE, limitlevs=20, type="probs", approach="prototypical", data=NULL, nsim=100){
reg <- x
df <- data
clsset <- sapply(lapply(df, class), function(x) x[1])
acclass <- class(df[,across])[1]
if(stdzacross==TRUE)
df[,across] <- stdz(df[,across], weight)
if(stdzby==TRUE & !is.null(by))
df[,by] <- stdz(df[,by], weight)
if(stdzat==TRUE & !is.null(at))
df[,at] <- stdz(df[,at], weight)
if(is.null(acrosslevs)){
if(stdzacross==TRUE)
acrosslevs <- c(-1,1)
if(stdzacross==FALSE)
acrosslevs <- sort(unique(df[,across]))
if(length(acrosslevs)>limitlevs & is.numeric(acrosslevs))
acrosslevs <- seq(min(df[,across], na.rm=TRUE), max(df[,across], na.rm=TRUE), (max(df[,across], na.rm=TRUE)-min(df[,across], na.rm=TRUE))/(limitlevs-1))
}
if(is.null(weight)){
if(!is.null(reg$prior.weight))
weight <- reg$prior.weight
if(is.null(reg$prior.weight) & !is.null(reg$weights))
weight <- reg$weights
if(is.null(weight))
weight <- rep(1, dim(df)[1])
}
if(length(weight)==(dim(df)[1]-length(reg$na.action)))
df <- df[!(rownames(df) %in% reg$na.action),]
if(length(weight)!=(dim(df)[1]))
stop("Weight vector length must match the number of complete cases in the regression.")
if(is.null(dvname))
dvname <- names(attributes(reg$terms)$dataClasses)[1]
if(!is.null(by)){
byclass <- class(df[,by])[1]
if(is.null(bylevs)){
if(stdzby==TRUE)
bylevs <- c(-1,1)
if(stdzby==FALSE)
bylevs <- sort(unique(df[,by]))
if(length(bylevs)>limitlevs & is.numeric(bylevs))
bylevs <- seq(min(df[,by], na.rm=TRUE), max(df[,by], na.rm=TRUE), (max(df[,by], na.rm=TRUE)-min(df[,by], na.rm=TRUE))/(limitlevs-1))
}
}
if(is.null(by)){
by <- "All"
df[,by] <- "All"
bylevs <- "All"
hasby <- FALSE
stdzby <- FALSE
}
if(!is.null(at)){
atclass <- class(df[,at])[1]
if(is.null(atlevs)){
if(stdzat==TRUE)
atlevs <- c(-1,1)
if(stdzat==FALSE)
atlevs <- sort(unique(df[,at]))
if(length(atlevs)>limitlevs & is.numeric(atlevs))
atlevs <- seq(min(df[,at], na.rm=TRUE), max(df[,at], na.rm=TRUE), (max(df[,at], na.rm=TRUE)-min(df[,at], na.rm=TRUE))/(limitlevs-1))
}
hasat <- TRUE
if(is.null(atlevnames)){
if(stdzat==TRUE)
atlevnames <- paste(atlevs, "SD", sep="")
if(stdzat==FALSE)
atlevnames <- paste(atlevs)
}
}
if(is.null(at)){
at <- "All"
df[,at] <- "All"
atlevs <- "All"
atlevnames <- "All"
hasat <- FALSE
stdzat <- FALSE
}
if(is.null(bylevnames)){
if(stdzby==TRUE)
bylevnames <- paste(bylevs, "SD", sep="")
if(stdzby==FALSE)
bylevnames <- paste(bylevs)
}
if(is.null(acclevnames)){
if(stdzacross==TRUE)
acclevnames <- paste(acrosslevs, "SD", sep="")
if(stdzacross==FALSE)
acclevnames <- paste(acrosslevs)
}
levs <- acrosslevs
ol <- acrosslevs
lng <- length(acrosslevs)
pd <- data.frame(na.omit(df[1:lng,]))
for(i in 1:dim(pd)[2]){
if(class(pd[,i])[1]=="numeric")
pd[,i] <- rep(wtd.mean(df[,i], weight, na.rm=TRUE), lng)
if(class(pd[,i])[1]=="ordered")
pd[,i] <- ordered(rep(wtd.table(df[,i], weight)$x[cumsum(wtd.table(df[,i], weight)$sum.of.weights)/sum(wtd.table(df[,i], weight)$sum.of.weights)>=.5][1], lng), levels=levels(df[,i]))
if(class(pd[,i])[1]=="factor")
pd[,i] <- factor(rep(wtd.table(df[,i], weight)$x[wtd.table(df[,i], weight)$sum.of.weights==max(wtd.table(df[,i], weight)$sum.of.weights)][1], lng), levels=levels(df[,i]))
if(class(pd[,i])[1]=="logical")
pd[,i] <- as.logical(rep(wtd.table(df[,i], weight)$x[wtd.table(df[,i], weight)$sum.of.weights==max(wtd.table(df[,i], weight)$sum.of.weights)][1], lng))
}
if(sum(clsset=="matrix")>0)
stop(paste("Interactions Cannot Currently Be Resolved With Matrix Predictors, Please Insert Each Variable in", names(clsset)[clsset=="matrix"], "Separately in Regression Before Using This Tool")) # TRY TO MAKE THIS WORK EVENTUALLY
rownames(pd) <- 1:dim(pd)[1]
pd[,across] <- acrosslevs
out <- NULL
out$Resp <- pd[1,!(colnames(pd) %in% c(dvname, at, across, by))]
out$Meta <- list(dvname=dvname, across=across, by=by, at=at)
out$Means <- as.list(1:length(atlevs))
#out$SEs <- as.list(1:length(atlevs))
names(out$Means) <- atlevnames #names(out$SEs) <-
for(a in 1:length(atlevs)){
pd[,at] <- atlevs[a]
if(!is.null(df[,at]))
class(pd[,at]) <- class(df[,at])
bylist <- as.list(bylevs)
out$Means[[a]] <- rep(NA, length(bylevs))#, lng) # <- out$SEs[[a]]
for(i in 1:length(bylevs)){
bylist[[i]] <- pd
bylist[[i]][,by] <- factor(rep(bylevs[i], length(bylist[[i]][,by])), levels=bylevs)
if(is.numeric(bylist[[i]][,by]))
bylist[[i]] <- pd
bylist[[i]][,by] <- rep(bylevs[i], length(bylist[[i]][,by]))
}
eachpred <- lapply(bylist, function(x) predict(reg, newdata=x, type="prob"))
names(eachpred) <- bylevnames
for(i in 1:length(eachpred))
rownames(eachpred[[i]]) <- acclevnames
out$Means[[a]] <- eachpred
}
class(out) <- "interactpredsmnl"
out
}
|
/scratch/gouwar.j/cran-all/cranData/weights/R/findwtdinteraction.multinom.R
|
na.levs <- function(x, naset=NULL, setmid=NULL, set1=NULL, set0=NULL, setmean=NULL, setmedian=NULL, setmode=NULL, weight=NULL)
UseMethod("na.levs")
na.levs.factor <- function(x, naset=NULL, setmedian=NULL, setmode=NULL, weight=NULL){
x[x %in% naset] <- NA
q <- (x %in% setmedian)
r <- (x %in% setmode)
x[q] <- NA
x[r] <- NA
x <- droplevels(x)
if(!is.null(weight))
x[q] <- names(table(x))[eval(sapply(1:length(table(x)), function(g) sum(wtd.table(x, weights)$sum.of.weights[1:g])/sum(wtd.table(x, weights)$sum.of.weights))>.5)][1]
if(is.null(weight))
x[q] <- names(table(x))[eval(sapply(1:length(table(x)), function(g) sum(table(x)[1:g])/sum(table(x)))>.5)][1]
x[r] <- names(sort(table(x), decreasing=TRUE))[1]
x
}
na.levs.numeric <- function(x, naset=NULL, setmid=NULL, set1=NULL, set0=NULL, setmean=NULL, weight=NULL){
x[x %in% naset] <- NA
q <- (x %in% setmid)
r <- (x %in% set1)
s <- (x %in% set0)
t <- (x %in% setmean)
x[q] <- NA
x[r] <- NA
x[s] <- NA
x[t] <- NA
x <- as.numeric(x)
x <- (x-range(x, na.rm=TRUE)[1])/range((x-range(x, na.rm=TRUE)[1]), na.rm=TRUE)[2]
x[q] <- .5
x[r] <- 1
x[s] <- 0
if(!is.null(weight))
x[t] <- wtd.mean(x, weight, na.rm=TRUE)
else
x[t] <- mean(x, na.rm=TRUE)
x
}
na.levs.default <- function(x, naset=NULL, setmid=NULL, set1=NULL, set0=NULL, setmean=NULL, weight=NULL){
x <- as.numeric(x)
x[x %in% naset] <- NA
q <- (x %in% setmid)
r <- (x %in% set1)
s <- (x %in% set0)
t <- (x %in% setmean)
x[q] <- NA
x[r] <- NA
x[s] <- NA
x[t] <- NA
x <- as.numeric(x)
x <- (x-range(x, na.rm=TRUE)[1])/range((x-range(x, na.rm=TRUE)[1]), na.rm=TRUE)[2]
x[q] <- .5
x[r] <- 1
x[s] <- 0
if(!is.null(weight))
x[t] <- wtd.mean(x, weight, na.rm=TRUE)
else
x[t] <- mean(x, na.rm=TRUE)
x
}
|
/scratch/gouwar.j/cran-all/cranData/weights/R/na.levs.r
|
nalevs <- function(x, naset=NULL, setmid=NULL, set1=NULL, set0=NULL, setmean=NULL, weight=NULL){
x[x %in% naset] <- NA
q <- (x %in% setmid)
r <- (x %in% set1)
s <- (x %in% set0)
t <- (x %in% setmean)
x[q] <- NA
x[r] <- NA
x[s] <- NA
x[t] <- NA
x <- as.numeric(x)
x <- (x-range(x, na.rm=TRUE)[1])/range((x-range(x, na.rm=TRUE)[1]), na.rm=TRUE)[2]
x[q] <- .5
x[r] <- 1
x[s] <- 0
if(!is.null(weight))
x[t] <- wtd.mean(x, weight, na.rm=TRUE)
else
x[t] <- mean(x, na.rm=TRUE)
x
}
|
/scratch/gouwar.j/cran-all/cranData/weights/R/nalevs.r
|
onecor.wtd <- function(x, y, weight=NULL){
if(sum((!is.na(x))*(!is.na(y)))>0){
if(is.null(weight)){
weight <- rep(1, length(x))
}
use <- !is.na(y) & !is.na(x)
x <- x[use]
y <- y[use]
weight <- weight[use]
#r1 <- lm(stdz(y, weight=weight)~stdz(x, weight=weight), weight=weight)
#corcoef <- coef(summary(r1))[2,]
corcoef <- coef(summary(lm(stdz(y, weight=weight)~stdz(x, weight=weight), weights=weight)))[2,]
}
else
corcoef <- rep(NA, 4)
names(corcoef) <- rep("", 4)
corcoef
}
|
/scratch/gouwar.j/cran-all/cranData/weights/R/onecor.wtd.r
|
plotinteractpreds <- function(out, seplot=TRUE, ylim=NULL, main=NULL, xlab=NULL, ylab=NULL, legend=TRUE, placement="bottomright", lwd=3, add=FALSE, addby=TRUE, addat=FALSE, mfrow=NULL, linecol=NULL, secol=NULL, showbynamelegend=FALSE, showatnamelegend=FALSE, showoutnamelegend=FALSE, lty=NULL, density=30, startangle=45, ...){
UseMethod("plotinteractpreds")
}
|
/scratch/gouwar.j/cran-all/cranData/weights/R/plotinteractpreds.R
|
plotinteractpreds.default <- function(out, seplot=TRUE, ylim=NULL, main=NULL, xlab=NULL, ylab=NULL, legend=TRUE, placement="bottomright", lwd=3, add=FALSE, addby=TRUE, addat=FALSE, mfrow=NULL, linecol=NULL, secol=NULL, showbynamelegend=FALSE, showatnamelegend=FALSE, showoutnamelegend=FALSE, lty=NULL, density=30, startangle=45, ...){
oldmfrow <- par()$mfrow
if(class(out)!="interactpreds")
warning("This function may not work with data that is not generated using the findwtdinteraction function")
if(!is.null(mfrow))
par(mfrow=mfrow)
dvname <- out$Meta$dvname
across <- out$Meta$across
by <- out$Meta$by
at <- out$Meta$at
mins <- lapply(1:length(out$Means), function(x) out$Means[[x]]-1.96*out$SEs[[x]])
maxs <- lapply(1:length(out$Means), function(x) out$Means[[x]]+1.96*out$SEs[[x]])
if(is.null(ylim))
ylim <- c(min(unlist(mins), na.rm=TRUE), max(unlist(maxs), na.rm=TRUE))
atlevs <- try(names(out$Means))
bylevs <- try(rownames(out$Means[[1]]))
acrosslevs <- try(names(out$Means[[1]]))
if(!is.null(bylevs)){
acrosslevs <- try(colnames(out$Means[[1]]))
hasby=TRUE
bylevnames <- bylevs
}
if(is.null(bylevs)){
hasby <- FALSE
bylevs <- "All"
}
accnumeric <- suppressWarnings(sum(as.character(as.numeric(acrosslevs))==acrosslevs, na.rm=TRUE)==length(acrosslevs))
if(accnumeric==TRUE){
acrosslevs <- as.numeric(acrosslevs)
acrossvals <- as.numeric(acrosslevs)
}
if(accnumeric==FALSE)
acrossvals <- 1:length(acrosslevs)
if(is.null(xlim))
xlim <- c(min(acrossvals, na.rm=TRUE), max(acrossvals, na.rm=TRUE))
hasat <- !(is.null(atlevs) || length(atlevs)==1)
if(is.null(linecol) & addat==TRUE)
linecol <- gray(seq(0, .5, length.out=length(atlevs)))
if(is.null(linecol))
linecol <- "black"
if(is.null(secol) & addat==TRUE)
secol <- gray(seq(.3, .8, length.out=length(atlevs)))
if(is.null(secol))
secol <- "light gray"
atlegend <- atlevs
bylegend <- bylevs
if(showatnamelegend==TRUE)
atlegend <- paste(atlevs, at)
if(showbynamelegend==TRUE)
bylegend <- paste(bylevs, by)
premain <- main
for(a in 1:length(atlevs)){
if(is.null(premain)){
mainp <- paste("Interaction Plot of", dvname, "Across Levels of\n", across)
if(!is.null(bylevs) & length(bylevs)>1)
mainp <- paste(mainp, "By", by)
if(hasat==TRUE & addat==FALSE)
main <- paste(mainp, "At", at, "=", atlevs[a])
if(hasat==TRUE & addat==TRUE)
main <- paste(mainp, "At Each Level Of", at)
if(hasat==FALSE)
main <- mainp
}
if(is.null(ylab))
ylab <- dvname
if(is.null(xlab))
xlab <- across
if(add==FALSE){
plot(acrossvals, acrossvals, type="n", ylim=ylim, main=main, ylab=ylab, xlab=xlab, axes=FALSE, ...)
axis(1, at=c(-999,999))
axis(1, at=acrossvals, labels=acrosslevs)
axis(2, at=c(-999,999))
axis(2)
axis(3, at=c(-999,999))
axis(4, at=c(-999,999))
}
if(length(density)==1)
dense <- rep(density, length(bylevs))
if(length(linecol)==1 & hasat==TRUE & addat==TRUE)
linecol <- rep(linecol, length(atlevs))
if(length(linecol)==1)
linecol <- rep(linecol, length(bylevs))
if(length(secol)==1 & hasat==TRUE & addat==TRUE)
secol <- rep(secol, length(atlevs))
if(length(secol)==1)
secol <- rep(secol, length(bylevs))
if(seplot==TRUE){
if(hasby==TRUE){
for(i in 1:length(bylevs)){
if(seplot==TRUE & hasat==FALSE)
polygon(c(acrossvals, rev(acrossvals)), c(maxs[[a]][i,], rev(mins[[a]][i,])), density=dense[i], angle=startangle+10*(i-1), col=secol[i])# just added the [i] here, check that it works
if(seplot==TRUE & hasat==TRUE & addat==FALSE)
polygon(c(acrossvals, rev(acrossvals)), c(maxs[[a]][i,], rev(mins[[a]][i,])), density=dense[i], angle=startangle+10*(i-1), col=secol[i])# just added the [i] here, check that it works
if(seplot==TRUE & hasat==TRUE & addat==TRUE)
polygon(c(acrossvals, rev(acrossvals)), c(maxs[[a]][i,], rev(mins[[a]][i,])), density=dense[i], angle=startangle*a+10*(i-1), col=secol[a])
}
}
if(hasby==FALSE)
polygon(c(acrossvals, rev(acrossvals)), c(maxs[[a]], rev(mins[[a]])), density=density, angle=startangle*a, col=secol[a])
}
if(addat==TRUE)
add <- TRUE
if(!is.null(lty))
ltyset <- lty
if(is.null(lty))
ltyset <- 1:length(bylevs)
if(length(ltyset)!=length(bylevs)){
ltyset <- 1:length(bylevs)
warning("lty must have a length equal to the number of levels for the by variable (or 1 if there is no by variable), it has been reset to defaults")
}
if(hasby==TRUE){
for(i in 1:length(bylevs)){
if(hasat==FALSE || (hasat==TRUE & addat==FALSE))
lines(acrossvals, out$Means[[a]][i,], lty=ltyset[i], lwd=lwd, col=linecol[i])
if(hasat==TRUE & addat==TRUE)
lines(acrossvals, out$Means[[a]][i,], lty=ltyset[i], lwd=lwd, col=linecol[a])
if(legend==TRUE){
if(hasat==FALSE)
legend(x=placement, legend=bylegend, lty=ltyset, lwd=lwd, col=linecol)
if(hasat==TRUE & addat==FALSE)
legend(x=placement, legend=bylegend, lty=ltyset, lwd=lwd, col=linecol)
}
}
}
if(hasby==FALSE){
if(hasat==FALSE || (hasat==TRUE & addat==FALSE))
lines(acrossvals, out$Means[[a]], lty=ltyset, lwd=lwd, col=linecol)
if(hasat==TRUE & addat==TRUE)
lines(acrossvals, out$Means[[a]], lty=ltyset, lwd=lwd, col=linecol[a])
}
}
if(legend==TRUE & hasat==TRUE & addat==TRUE & hasby==TRUE)
legend(x=placement, legend=c(bylegend, atlegend), lty=c(1:length(bylevs), rep(1, length(atlevs))), lwd=lwd, col=c(rep("black", length(bylevnames)), linecol))
if(legend==TRUE & hasat==TRUE & addat==TRUE & hasby==FALSE)
legend(x=placement, legend=c(atlegend), lty=1:length(atlevs), lwd=lwd, col=linecol)
if(!is.null(mfrow))
par(mfrow=oldmfrow)
}
|
/scratch/gouwar.j/cran-all/cranData/weights/R/plotinteractpreds.default.R
|
plotinteractpreds.interactpreds <- function(out, seplot=TRUE, ylim=NULL, main=NULL, xlab=NULL, ylab=NULL, legend=TRUE, placement="bottomright", lwd=3, add=FALSE, addby=TRUE, addat=FALSE, mfrow=NULL, linecol=NULL, secol=NULL, showbynamelegend=FALSE, showatnamelegend=FALSE, showoutnamelegend=FALSE, lty=NULL, density=30, startangle=45, ...){
#oldmfrow <- par()$mfrow
if(class(out)!="interactpreds")
warning("This function may not work with data that is not generated using the findwtdinteraction function")
#if(!is.null(mfrow))
# par(mfrow=mfrow)
dvname <- out$Meta$dvname
across <- out$Meta$across
by <- out$Meta$by
at <- out$Meta$at
mins <- lapply(1:length(out$Means), function(x) out$Means[[x]]-1.96*out$SEs[[x]])
maxs <- lapply(1:length(out$Means), function(x) out$Means[[x]]+1.96*out$SEs[[x]])
if(is.null(ylim) | length(ylim)!=2)
ylim <- c(min(unlist(mins), na.rm=TRUE), max(unlist(maxs), na.rm=TRUE))
atlevs <- try(names(out$Means))
bylevs <- try(rownames(out$Means[[1]]))
acrosslevs <- try(names(out$Means[[1]]))
if(!is.null(bylevs)){
acrosslevs <- try(colnames(out$Means[[1]]))
hasby=TRUE
bylevnames <- bylevs
}
if(is.null(bylevs)){
acrosslevs <- try(colnames(out$Means[[1]]))
hasby <- FALSE
bylevs <- "All"
}
accnumeric <- suppressWarnings(sum(as.character(as.numeric(as.character(acrosslevs)))==as.character(acrosslevs), na.rm=TRUE)==length(acrosslevs))
if(isTRUE(accnumeric)){
acrosslevs <- as.numeric(as.character(acrosslevs))
acrossvals <- as.numeric(acrosslevs)
}
if(!isTRUE(accnumeric))
acrossvals <- 1:length(acrosslevs)
if(is.null(xlim) | length(xlim)!=2)
xlim <- range(acrossvals, na.rm=TRUE)
hasat <- !(is.null(atlevs) || length(atlevs)==1)
if(is.null(linecol) & isTRUE(addat))
linecol <- gray(seq(0, .5, length.out=length(atlevs)))
if(is.null(linecol))
linecol <- "black"
if(is.null(secol) & isTRUE(addat))
secol <- gray(seq(.3, .8, length.out=length(atlevs)))
if(is.null(secol))
secol <- "light gray"
atlegend <- atlevs
bylegend <- bylevs
if(showatnamelegend==TRUE)
atlegend <- paste(atlevs, at)
if(showbynamelegend==TRUE)
bylegend <- paste(bylevs, by)
premain <- main
for(a in 1:length(atlevs)){
if(is.null(premain)){
mainp <- paste("Interaction Plot of", dvname, "Across Levels of\n", across)
if(!is.null(bylevs) & length(bylevs)>1)
mainp <- paste(mainp, "By", by)
if(isTRUE(hasat) & !isTRUE(addat))
main <- paste(mainp, "At", at, "=", atlevs[a])
if(isTRUE(hasat) & isTRUE(addat))
main <- paste(mainp, "At Each Level Of", at)
if(!isTRUE(addat))
main <- mainp
}
if(is.null(ylab))
ylab <- dvname
if(is.null(xlab))
xlab <- across
if(add==FALSE){
plot(acrosslevs~acrossvals, type="n", ylim=ylim, main=main, ylab=ylab, xlab=xlab, axes=FALSE, ...)
axis(1, at=c(-999,999))
axis(1, at=acrossvals, labels=acrosslevs)
axis(2, at=c(-999,999))
axis(2)
axis(3, at=c(-999,999))
axis(4, at=c(-999,999))
}
if(length(density)==1)
dense <- rep(density, length(bylevs))
if(length(linecol)==1 & isTRUE(hasat) & isTRUE(addat))
linecol <- rep(linecol, length(atlevs))
if(length(linecol)==1)
linecol <- rep(linecol, length(bylevs))
if(length(secol)==1 & isTRUE(hasat) & isTRUE(addat))
secol <- rep(secol, length(atlevs))
if(length(secol)==1)
secol <- rep(secol, length(bylevs))
if(seplot==TRUE){
if(hasby==TRUE){
for(i in 1:length(bylevs)){
if(seplot==TRUE & !isTRUE(hasat))
polygon(c(acrossvals, rev(acrossvals)), c(maxs[[a]][i,], rev(mins[[a]][i,])), density=dense[i], angle=startangle+10*(i-1), col=secol[i])# just added the [i] here, check that it works
if(isTRUE(seplot) & isTRUE(hasat) & !isTRUE(addat))
polygon(c(acrossvals, rev(acrossvals)), c(maxs[[a]][i,], rev(mins[[a]][i,])), density=dense[i], angle=startangle+10*(i-1), col=secol[i])# just added the [i] here, check that it works
if(isTRUE(seplot) & isTRUE(hasat) & isTRUE(addat))
polygon(c(acrossvals, rev(acrossvals)), c(maxs[[a]][i,], rev(mins[[a]][i,])), density=dense[i], angle=startangle*a+10*(i-1), col=secol[a])
}
}
if(hasby==FALSE)
polygon(c(acrossvals, rev(acrossvals)), c(maxs[[a]], rev(mins[[a]])), density=density, angle=startangle*a, col=secol[a])
}
if(isTRUE(addat))
add <- TRUE
if(!is.null(lty))
ltyset <- lty
if(is.null(lty))
ltyset <- 1:length(bylevs)
if(length(ltyset)!=length(bylevs)){
ltyset <- 1:length(bylevs)
warning("lty must have a length equal to the number of levels for the by variable (or 1 if there is no by variable), it has been reset to defaults")
}
if(isTRUE(hasby)){
for(i in 1:length(bylevs)){
if(!isTRUE(hasat) || (isTRUE(hasat) & !isTRUE(addat)))
lines(acrossvals, out$Means[[a]][i,], lty=ltyset[i], lwd=lwd, col=linecol[i])
if(isTRUE(hasat) & isTRUE(addat))
lines(acrossvals, out$Means[[a]][i,], lty=ltyset[i], lwd=lwd, col=linecol[a])
if(legend==TRUE){
if(!isTRUE(hasat))
legend(x=placement, legend=bylegend, lty=ltyset, lwd=lwd, col=linecol)
if(isTRUE(hasat) & !isTRUE(addat))
legend(x=placement, legend=bylegend, lty=ltyset, lwd=lwd, col=linecol)
}
}
}
if(!isTRUE(hasby)){
if(!isTRUE(hasat) || (isTRUE(hasat) & !isTRUE(addat)))
lines(acrossvals, out$Means[[a]], lty=ltyset, lwd=lwd, col=linecol)
if(isTRUE(hasat) & isTRUE(addat))
lines(acrossvals, out$Means[[a]], lty=ltyset, lwd=lwd, col=linecol[a])
}
}
if(isTRUE(legend) & isTRUE(hasat) & isTRUE(addat) & isTRUE(hasby))
legend(x=placement, legend=c(bylegend, atlegend), lty=c(1:length(bylevs), rep(1, length(atlevs))), lwd=lwd, col=c(rep("black", length(bylevnames)), linecol))
if(isTRUE(legend) & isTRUE(hasat) & isTRUE(addat) & !isTRUE(hasby))
legend(x=placement, legend=c(atlegend), lty=1:length(atlevs), lwd=lwd, col=linecol)
#if(!is.null(mfrow))
# par(mfrow=oldmfrow)
}
|
/scratch/gouwar.j/cran-all/cranData/weights/R/plotinteractpreds.interactpreds.R
|
plotinteractpreds.interactpredsmnl <- function(out, seplot=FALSE, ylim=NULL, main=NULL, xlab=NULL, ylab=NULL, legend=TRUE, placement="bottomright", lwd=3, add=FALSE, addby=TRUE, addat=FALSE, mfrow=NULL, linecol=NULL, secol=NULL, showbynamelegend=FALSE, showatnamelegend=FALSE, showoutnamelegend=FALSE, lty=NULL, density=30, startangle=45, ...){
if(addat==TRUE){
warning("addat is not available for multinomial logit")
}
if(seplot==TRUE){
warning("standard errors are not yet available for multinomial logit, they will hopefully be added soon")
}
seplot <- FALSE
oldmfrow <- par()$mfrow
if(!is.null(mfrow))
par(mfrow=mfrow)
dvname <- out$Meta$dvname
across <- out$Meta$across
by <- out$Meta$by
at <- out$Meta$at
mins <- lapply(1:length(out$Means), function(x) out$Means[[x]])#-1.96*out$SEs[[x]])
maxs <- lapply(1:length(out$Means), function(x) out$Means[[x]])#+1.96*out$SEs[[x]])
if(is.null(ylim))
ylim <- c(min(unlist(mins), na.rm=TRUE), max(unlist(maxs), na.rm=TRUE))
atlevs <- try(names(out$Means))
bylevs <- try(names(out$Means[[1]]))
acrosslevs <- try(rownames(out$Means[[1]]))
outcats <- try(colnames(out$Means[[1]]))
if(!is.null(bylevs)){
acrosslevs <- try(rownames(out$Means[[1]][[1]]))
outcats <- try(colnames(out$Means[[1]][[1]]))
hasby=TRUE
bylevnames <- bylevs
}
if(is.null(bylevs)){
hasby <- FALSE
bylevs <- "All"
}
accnumeric <- suppressWarnings(sum(as.character(as.numeric(acrosslevs))==acrosslevs, na.rm=TRUE)==length(acrosslevs))
if(accnumeric==TRUE){
acrosslevs <- as.numeric(acrosslevs)
acrossvals <- as.numeric(acrosslevs)
}
if(accnumeric==FALSE)
acrossvals <- 1:length(acrosslevs)
if(is.null(xlim))
xlim <- c(min(acrossvals, na.rm=TRUE), max(acrossvals, na.rm=TRUE))
hasat <- !(is.null(atlevs) || length(atlevs)==1)
if(is.null(linecol) & addat==TRUE)
linecol <- gray(seq(0, .5, length.out=length(atlevs)))
if(is.null(linecol))
linecol <- "black"
atlegend <- atlevs
bylegend <- bylevs
outlegend <- outcats
if(showatnamelegend==TRUE)
atlegend <- paste(atlevs, at)
if(showbynamelegend==TRUE)
bylegend <- paste(bylevs, by)
if(showoutnamelegend==TRUE)
outlegend <- paste(outcats, dvname)
premain <- main
for(a in 1:length(atlevs)){
for(b in 1:length(bylevs)){
if(is.null(premain)){
mainp <- paste("Interaction Plot of", dvname, "Across Levels of\n", across)
if(!is.null(bylevs) & length(bylevs)>1 & addby==TRUE)
mainp <- paste(mainp, "By", by)
if(!is.null(bylevs) & length(bylevs)>1 & addby==FALSE)
mainp <- paste(mainp, "At", by, "=", bylevs[b])
if(hasat==TRUE & addby==FALSE)
mainp <- paste(mainp, "and")
if(hasat==TRUE & addat==FALSE)
main <- paste(mainp, "At", at, "=", atlevs[a])
if(hasat==TRUE & addat==TRUE)
main <- paste(mainp, "At Each Level Of", at)
if(hasat==FALSE)
main <- mainp
}
if(is.null(ylab))
ylab <- dvname
if(is.null(xlab))
xlab <- across
if(add==FALSE){
plot(acrossvals, acrossvals, type="n", ylim=ylim, main=main, ylab=ylab, xlab=xlab, axes=FALSE, ...)
axis(1, at=c(-999,999))
axis(1, at=acrossvals, labels=acrosslevs)
axis(2, at=c(-999,999))
axis(2)
axis(3, at=c(-999,999))
axis(4, at=c(-999,999))
}
if(length(density)==1)
dense <- rep(density, length(outcats))
if(length(linecol)==1 & hasby==TRUE & addby==TRUE)
linecol <- rep(linecol, length(bylevs))
if(length(linecol)==1)
linecol <- rep(linecol, length(outcats))
if(addby==TRUE)
add <- TRUE
if(!is.null(lty))
ltyset <- lty
if(is.null(lty))
ltyset <- 1:length(outcats)
if(length(ltyset)!=length(outcats)){
ltyset <- 1:length(outcats)
warning("lty must have a length equal to the number of outcome categories, it has been reset to defaults")
}
for(i in 1:length(outcats)){
if(hasby==FALSE || (hasby==TRUE & addby==FALSE))
lines(acrossvals, out$Means[[a]][[b]][,i], lty=ltyset[i], lwd=lwd, col=linecol[i])
if(hasby==TRUE & addby==TRUE)
lines(acrossvals, out$Means[[a]][[b]][,i], lty=ltyset[i], lwd=lwd, col=linecol[b])
if(legend==TRUE){
if(hasby==FALSE)
legend(x=placement, legend=outlegend, lty=ltyset, lwd=lwd, col=linecol)
if(hasby==TRUE & addby==FALSE)
legend(x=placement, legend=outlegend, lty=ltyset, lwd=lwd, col=linecol)
}
}
#if(hasby==FALSE || (hasby==TRUE & addby==FALSE))
# lines(acrossvals, out$Means[[a]][[b]], lty=ltyset, lwd=lwd, col=linecol)
#if(hasby==TRUE & addby==TRUE)
# lines(acrossvals, out$Means[[a]][[b]], lty=ltyset, lwd=lwd, col=linecol[b])
}
if(legend==TRUE & hasby==TRUE & addby==TRUE)
legend(x=placement, legend=c(outlegend, bylegend), lty=c(1:length(outcats), rep(1, length(bylevs))), lwd=lwd, col=c(rep("black", length(outcats)), linecol))
}
if(!is.null(mfrow))
par(mfrow=oldmfrow)
}
|
/scratch/gouwar.j/cran-all/cranData/weights/R/plotinteractpreds.interactpredsmnl.R
|
plotwtdinteraction <- function(x, across, by=NULL, at=NULL, acrosslevs=NULL, bylevs=NULL, atlevs=NULL, weight=NULL, dvname=NULL, acclevnames=NULL, bylevnames=NULL, atlevnames=NULL, stdzacross=FALSE, stdzby=FALSE, stdzat=FALSE, limitlevs=20, type="response", seplot=TRUE, ylim=NULL, main=NULL, xlab=NULL, ylab=NULL, legend=TRUE, placement="bottomright", lwd=3, add=FALSE, addby=TRUE, addat=FALSE, mfrow=NULL, linecol=NULL, secol=NULL, showbynamelegend=FALSE, showatnamelegend=FALSE, showoutnamelegend=FALSE, lty=NULL, density=30, startangle=45, approach="prototypical", data=NULL, nsim=100, ...){
out <- findwtdinteraction(x, across=across, by=by, at=at, acrosslevs=acrosslevs, bylevs=bylevs, atlevs=atlevs, weight=weight, dvname=dvname, acclevnames=acclevnames, bylevnames=bylevnames, atlevnames=atlevnames, stdzacross=stdzacross, stdzby=stdzby, stdzat=stdzat, limitlevs=limitlevs, type=type, approach=approach, data=data, nsim=nsim, ...)
plotinteractpreds(out, seplot=seplot, ylim=ylim, main=main, xlab=xlab, ylab=ylab, legend=legend, placement=placement, lwd=lwd, add=add, addat=addat, mfrow=mfrow, linecol=linecol, secol=secol, showbynamelegend=showbynamelegend, showatnamelegend=showatnamelegend, showoutnamelegend=showoutnamelegend, lty=lty, density=density, startangle=startangle, ...)
}
|
/scratch/gouwar.j/cran-all/cranData/weights/R/plotwtdinteraction.R
|
rd <- function(x, digits=2, add=TRUE, max=(digits+3)){
y <- round(x, digits=digits)
yk <- format(y, nsmall=digits)
nzero <- sum(unlist(y)==0)
if(add==TRUE){
while(nzero>0){
zeros <- y==0
digits <- digits+1
y[zeros] <- round(x, digits=digits)[zeros]
yk[zeros] <- format(y[zeros], nsmall=digits)
nzero <- sum(y==0)
if(digits>(max-1))
nzero <- 0
}
}
z <- sub("^([-]?)0[.]","\\1.", gsub(" +", "", yk))
z
}
|
/scratch/gouwar.j/cran-all/cranData/weights/R/rd.r
|
starmaker <- function(x, p.levels=c(.001, .01, .05, .1), symbols=c("***", "**", "*", "+")){
if(length(p.levels)!=length(symbols))
stop("p.levels and symbols must have the same number of items")
symbols <- c(symbols, "")
as.character(cut(abs(x), c(-99999, p.levels, 99999), labels=symbols))
}
|
/scratch/gouwar.j/cran-all/cranData/weights/R/starmaker.r
|
stdz <- function(x, weight=NULL){
if(is.null(weight)){
weight <- rep(1, length(x))
}
x <- x-wtd.mean(x, weight, na.rm=TRUE)
x <- x/sqrt(wtd.var(x, weight, na.rm=TRUE))
x
}
|
/scratch/gouwar.j/cran-all/cranData/weights/R/stdz.r
|
wpct <- function(x, weight=NULL, na.rm=TRUE, ...){
if(is.null(weight)){
weight <- rep(1, length(x))
}
y <- wtd.table(x, weight, na.rm=na.rm, ...)$sum.of.weights/sum(wtd.table(x, weight, na.rm=na.rm, ...)$sum.of.weights)
names(y) <- wtd.table(x, weight, na.rm=na.rm, ...)$x
z <- as.vector(y)
names(z) <- names(y)
if(is.logical(x))
z <- rev(z)
z
}
|
/scratch/gouwar.j/cran-all/cranData/weights/R/wpct.r
|
wtd.chi.sq <- function(var1, var2, var3=NULL, weight=NULL, na.rm=TRUE, drop.missing.levels=TRUE, mean1=TRUE){
if(is.null(weight)){
weight <- rep(1, length(var1))
}
if(mean1==TRUE)
weight <- weight/mean(weight, na.rm=TRUE)
if(na.rm==TRUE){
filt <- (!is.na(var1) & !is.na(var2))
if(!is.null(var3))
filt <- (!is.na(var1) & !is.na(var2) & !is.na(var3))
var1 <- var1[filt]
var2 <- var2[filt]
if(!is.null(var3))
var3 <- var3[filt]
weight <- weight[filt]
}
if(drop.missing.levels==TRUE){
if(is.factor(var1))
var1 <- drop.levels(var1)
if(is.factor(var2))
var2 <- drop.levels(var2)
if(!is.null(var3))
if(is.factor(var3))
var3 <- drop.levels(var3)
}
var12set <- unlist(summary(xtabs(weight~var1+var2))[c("statistic", "parameter", "p.value")])
names(var12set) <- c("Chisq", "df", "p.value")
out <- var12set
if(!is.null(var3)){
as.numeric(var3)
var123set <- unlist(summary(xtabs(weight~var1+var2+var3))[c("statistic", "parameter", "p.value")])
var13set <- unlist(summary(xtabs(weight~var1+var3))[c("statistic", "parameter", "p.value")])
var23set <- unlist(summary(xtabs(weight~var2+var3))[c("statistic", "parameter", "p.value")])
v12sep <- unlist(sapply(unique(var3), function(x) unlist(summary(xtabs(weight[var3==x]~var1[var3==x]+var2[var3==x]))[c("statistic", "parameter")])))
acvar3 <- c(sum(v12sep[1,])-var12set[1], sum(v12sep[2,])-var12set[2], 1-pchisq(sum(v12sep[1,])-var12set[1], sum(v12sep[2,])-var12set[2]))
v13sep <- unlist(sapply(unique(var2), function(x) unlist(summary(xtabs(weight[var2==x]~var1[var2==x]+var3[var2==x]))[c("statistic", "parameter")])))
acvar2 <- c(sum(v13sep[1,])-var13set[1], sum(v13sep[2,])-var13set[2], 1-pchisq(sum(v13sep[1,])-var13set[1], sum(v13sep[2,])-var13set[2]))
v23sep <- unlist(sapply(unique(var1), function(x) unlist(summary(xtabs(weight[var1==x]~var2[var1==x]+var3[var1==x]))[c("statistic", "parameter")])))
acvar1 <- c(sum(v23sep[1,])-var23set[1], sum(v23sep[2,])-var23set[2], 1-pchisq(sum(v23sep[1,])-var23set[1], sum(v23sep[2,])-var23set[2]))
out <- rbind(var123set, var12set, var13set, var23set, acvar1, acvar2, acvar3)
colnames(out) <- c("Chisq", "df", "p.value")
rownames(out) <- c("Using All Three Variables", "Using Var1 and Var2", "Using Var1 and Var3", "Using Var2 and Var3", "Difference Across Levels of Var1", "Difference Across Levels of Var2", "Difference Across Levels of Var3")
}
out
}
|
/scratch/gouwar.j/cran-all/cranData/weights/R/wtd.chi.sq.r
|
wtd.cor <- function(x, y=NULL, weight=NULL, mean1=TRUE, collapse=TRUE, bootse=FALSE, bootp=FALSE, bootn=1000){
x <- as.matrix(x)
xnm <- colnames(x)
if(is.null(weight)){
weight <- rep(1, dim(x)[1])
}
if(bootse==FALSE & bootp==TRUE)
warning("bootp can only be used with bootstrapped standard errors")
if(mean1==TRUE)
weight <- weight/mean(weight, na.rm=TRUE)
if(is.null(y)){
y <- x
}
y <- as.matrix(y)
ynm <- colnames(y)
if(is.null(xnm))
xnm <- "X"
if(is.null(ynm))
ynm <- "Y"
if(dim(x)[1]!=dim(y)[1])
stop("Cannot Correlate Variables of Different Lengths")
if(bootse==FALSE){
materset <- lapply(as.data.frame(x), function(x) lapply(as.data.frame(y), function(y) onecor.wtd(x, y, weight)))
est <- sapply(materset, function(q) sapply(q, function(g) g[1]))
se <- sapply(materset, function(q) sapply(q, function(g) g[2]))
tval <- sapply(materset, function(q) sapply(q, function(g) g[3]))
pval <- sapply(materset, function(q) sapply(q, function(g) g[4]))
out <- list(correlation=est, std.err=se, t.value=tval, p.value=pval)
}
if(bootse==TRUE){
est <- as.matrix(wtd.cors(x, y, weight))
samps <- lapply(1:bootn, function(g) sample(1:dim(x)[1], round(sum(weight, na.rm=TRUE), 0), replace=TRUE, prob=weight))
corset2 <- lapply(samps, function(q) as.matrix(cor(x[q,], y[q,], use="pairwise.complete.obs")))
eachcor <- lapply(1:dim(est)[1], function(a) sapply(1:dim(est)[2], function(b) unlist(sapply(corset2, function(g) g[a,b]))))
est2 <- sapply(eachcor, function(a) colMeans(a))
se <- sapply(eachcor, function(a) sqrt(apply(a, 2, var)))
tval <- est2/se
pval <- pchisq(tval^2, 1, lower.tail=FALSE)
if(bootp==TRUE)
pval <- sapply(eachcor, function(a) apply(a, 2, function(x) 2*min(c(sum(x>0 & !is.na(x))/sum(!is.na(x)), sum(x<0 & !is.na(x))/sum(!is.na(x))))))
if(length(ynm)>1 & length(xnm)>1){
colnames(est2) <- colnames(se) <- colnames(tval) <- colnames(pval) <- xnm
rownames(est2) <- rownames(se) <- rownames(tval) <- rownames(pval) <- ynm
}
out <- list(correlation=t(est), bootcor=est2, std.err=se, t.value=tval, p.value=pval)
}
if(is.vector(est) & collapse==TRUE || (1 %in% dim(est)) & collapse==TRUE){ #Fix Section
outpre <- out
if(bootse==FALSE)
out <- matrix(unlist(out), ncol=4, byrow=FALSE)
if(bootse==TRUE)
out <- matrix(unlist(out), ncol=5, byrow=FALSE)
nom <- xnm
if(length(xnm)==1)
nom <- ynm
rownames(out) <- nom
colnames(out) <- names(outpre)
}
out
}
|
/scratch/gouwar.j/cran-all/cranData/weights/R/wtd.cor.r
|
wtd.cors <- function(x, y=NULL, weight=NULL){
if(is.null(y)){
y <- x
}
q <- as.matrix(x)
r <- as.matrix(y)
if(is.null(weight)){
weight <- rep(1, dim(q)[1])
}
x <- q[!is.na(weight),]
y <- r[!is.na(weight),]
weight <- weight[!is.na(weight)]
out <- .Call("wcorr", as.matrix(x), as.matrix(y), as.double(weight), NAOK=TRUE, PACKAGE="weights")
## C code for this package was contributed by Marcus Schwemmle
if(!is.null(colnames(x)))
rownames(out) <- colnames(x)
if(!is.null(colnames(y)))
colnames(out) <- colnames(y)
out
}
|
/scratch/gouwar.j/cran-all/cranData/weights/R/wtd.cors.r
|
wtd.hist <- function (x, breaks = "Sturges", freq = NULL, probability = !freq,
include.lowest = TRUE, right = TRUE, density = NULL, angle = 45,
col = NULL, border = NULL, main = paste("Histogram of", xname),
xlim = range(breaks), ylim = NULL, xlab = xname, ylab, axes = TRUE,
plot = TRUE, labels = FALSE, nclass = NULL, weight=NULL, ...){ #weight command was added
## PLEASE NOTE THAT THIS SOFTWARE IS A COPIED AND MODIFIED VERSION OF THE hist.default FUNCTION PROVIDED IN THE R GRAPHICS PACKAGE AS OF THE TIME THIS SOFTWARE WAS MODIFIED. THAT SOFTWARE WAS COPYRIGHT OF R-CORE (2010) AND WAS MODIFIED UNDER THE TERMS OF GNU LICENSE 2. ALL CHANGES TO THE ORIGINAL SOFTWARE ARE NOTATED IN CODE.
# require(Hmisc) # Requirement for Hmisc was added
if (!is.numeric(x))
stop("'x' must be numeric")
if(is.null(weight)) # added
weight <- rep(1, length(x)) # added
xname <- paste(deparse(substitute(x), 500), collapse = "\n")
n <- sum(weight[is.finite(x)]) # modified
weight <- weight[is.finite(x)] # added
x <- x[is.finite(x)]
use.br <- !missing(breaks)
if (use.br) {
if (!missing(nclass))
warning("'nclass' not used when 'breaks' is specified")
}
else if (!is.null(nclass) && length(nclass) == 1)
breaks <- nclass
use.br <- use.br && (nB <- length(breaks)) > 1
if (use.br)
breaks <- sort(breaks)
else {
if (!include.lowest) {
include.lowest <- TRUE
warning("'include.lowest' ignored as 'breaks' is not a vector")
}
if (is.character(breaks)) {
breaks <- match.arg(tolower(breaks), c("sturges",
"fd", "freedman-diaconis", "scott"))
breaks <- switch(breaks, sturges = nclass.Sturges(x),
`freedman-diaconis` = , fd = nclass.FD(x), scott = nclass.scott(x),
stop("unknown 'breaks' algorithm"))
}
else if (is.function(breaks)) {
breaks <- breaks(x)
}
if (!is.numeric(breaks) || !is.finite(breaks) || breaks <
1)
stop("invalid number of 'breaks'")
breaks <- pretty(range(x), n = breaks, min.n = 1)
nB <- length(breaks)
if (nB <= 1)
stop("hist.default: pretty() error, breaks=", format(breaks))
}
h <- diff(breaks)
equidist <- !use.br || diff(range(h)) < 1e-07 * mean(h)
if (!use.br && any(h <= 0))
stop("'breaks' are not strictly increasing")
freq1 <- freq
if (is.null(freq)) {
freq1 <- if (!missing(probability))
!as.logical(probability)
else equidist
}
else if (!missing(probability) && any(probability == freq))
stop("'probability' is an alias for '!freq', however they differ.")
diddle <- 1e-07 * stats::median(diff(breaks))
fuzz <- if (right)
c(if (include.lowest) -diddle else diddle, rep.int(diddle,
length(breaks) - 1))
else c(rep.int(-diddle, length(breaks) - 1), if (include.lowest) diddle else -diddle)
fuzzybreaks <- breaks + fuzz
h <- diff(fuzzybreaks)
storage.mode(x) <- "numeric"
storage.mode(fuzzybreaks) <- "numeric"
counts <- as.numeric(xtabs(weight~cut(x, fuzzybreaks))) # modified
if (any(counts < 0))
stop("negative 'counts'. Internal Error in C-code for \"bincount\"")
if (sum(counts) < n-.01)
stop("some 'x' not counted; maybe 'breaks' do not span range of 'x'")
dens <- counts/(n * diff(breaks))
mids <- 0.5 * (breaks[-1L] + breaks[-nB])
r <- structure(list(breaks = breaks, counts = counts, intensities = dens,
density = dens, mids = mids, xname = xname, equidist = equidist),
class = "histogram")
if (plot) {
plot(r, freq = freq1, col = col, border = border, angle = angle,
density = density, main = main, xlim = xlim, ylim = ylim,
xlab = xlab, ylab = ylab, axes = axes, labels = labels,
...)
invisible(r)
}
else {
nf <- names(formals())
nf <- nf[is.na(match(nf, c("x", "breaks", "nclass", "plot",
"include.lowest", "weight", "right")))]
missE <- lapply(nf, function(n) substitute(missing(.),
list(. = as.name(n))))
not.miss <- !sapply(missE, eval, envir = environment())
if (any(not.miss))
warning(sprintf(ngettext(sum(not.miss), "argument %s is not made use of",
"arguments %s are not made use of"), paste(sQuote(nf[not.miss]),
collapse = ", ")), domain = NA)
r
}
}
|
/scratch/gouwar.j/cran-all/cranData/weights/R/wtd.hist.R
|
wtd.t.test <- function(x, y=0, weight=NULL, weighty=NULL, samedata=TRUE, alternative="two.tailed", mean1=TRUE, bootse=FALSE, bootp=FALSE, bootn=1000, drops="pairwise"){
if(is.null(weight)){
weight <- rep(1, length(x))
}
if(bootse==FALSE & bootp==TRUE)
warning("bootp can only be used with bootstrapped standard errors")
if(length(y)!=length(x) & length(y)>1){
if(samedata==TRUE)
warning("Treating data for x and y separately because they are of different lengths")
samedata <- FALSE
}
if(length(y)==1)
samedata <- FALSE
if(samedata==TRUE & drops=="pairwise"){
use <- !is.na(x) & !is.na(y) & !is.na(weight)
x <- x[use]
if(length(y)>1)
y <- y[use]
weight <- weight[use]
}
if(is.null(weighty) & samedata==TRUE){
weighty <- weight
}
if(is.null(weighty) & samedata==FALSE & length(y)>1){
warning("y has no weights, weights for y are assumed to be 1")
weighty <- rep(1, length(y))
}
if(mean1==TRUE){
weight <- weight/mean(weight, na.rm=TRUE)
if(length(y)>1)
weighty <- weighty/mean(weighty, na.rm=TRUE)
}
#require(Hmisc)
n <- sum(weight[!is.na(x)], na.rm=TRUE)
mx <- wtd.mean(x, weight, na.rm=TRUE)
vx <- wtd.var(x, weight, na.rm=TRUE)
if(length(y)==1){
dif <- mx-y
sx <- sqrt(vx)
se <- sx/sqrt(n)
if(bootse==TRUE){
samps <- lapply(1:bootn, function(g) sample(1:length(x), round(sum(weight, na.rm=TRUE), 0), replace=TRUE, prob=weight))
sepests <- sapply(samps, function(q) mean(x[q], na.rm=TRUE))-y
se <- sqrt(var(sepests))
}
t <- (mx-y)/se
df <- n-1
p.value <- (1-pt(abs(t), df))*2
if (alternative=="greater")
p.value <- pt(t, df, lower.tail=FALSE) ## one sided p-value (greater)
if (alternative=="less")
p.value <- pt(t, df, lower.tail=TRUE) ## one sided p-value (less)
if(bootp==TRUE & bootse==TRUE)
p.value <- 2*min(c(sum(sepests>y & !is.na(sepests))/sum(!is.na(sepests)), sum(sepests<y & !is.na(sepests))/sum(!is.na(sepests))))
if(bootp==TRUE & bootse==TRUE & alternative=="greater")
p.value <- sum(sepests>y & !is.na(sepests))/sum(!is.na(sepests))
if(bootp==TRUE & bootse==TRUE & alternative=="less")
p.value <- sum(sepests<y & !is.na(sepests))/sum(!is.na(sepests))
coef <- c(t, df, p.value)
out2 <- c(dif, mx, y, se)
names(coef) <- c("t.value", "df", "p.value")
names(out2) <- c("Difference", "Mean", "Alternative", "Std. Err")
out <- list("One Sample Weighted T-Test", coef, out2)
names(out) <- c("test", "coefficients", "additional")
}
if(length(y)>1){
n2 <- sum(weighty[!is.na(y)], na.rm=TRUE)
my <- wtd.mean(y, weighty, na.rm=TRUE)
vy <- wtd.var(y, weighty, na.rm=TRUE)
dif <- mx-my
sxy <- sqrt((vx/n)+(vy/n2))
if(bootse==TRUE){
samps1 <- lapply(1:bootn, function(g) sample(1:length(x), round(sum(weight, na.rm=TRUE), 0), replace=TRUE, prob=weight))
samps2 <- lapply(1:bootn, function(g) sample(1:length(y), round(sum(weighty, na.rm=TRUE), 0), replace=TRUE, prob=weighty))
sepests1 <- sapply(samps1, function(q) mean(x[q], na.rm=TRUE))
sepests2 <- sapply(samps2, function(q) mean(y[q], na.rm=TRUE))
sxy <- sqrt(var(sepests1-sepests2, na.rm=TRUE))
}
df <- (((vx/n)+(vy/n2))^2)/((((vx/n)^2)/(n-1))+((vy/n2)^2/(n2-1)))
t <- (mx-my)/sxy
p.value <- (1-pt(abs(t), df))*2
if (alternative=="greater")
p.value <- pt(t, df, lower.tail=FALSE) ## one sided p-value (greater)
if (alternative=="less")
p.value <- pt(t, df, lower.tail=TRUE) ## one sided p-value (less)
if(bootp==TRUE & bootse==TRUE)
p.value <- 2*min(c(sum(sepests1>sepests2 & !is.na(sepests1))/sum(!is.na(sepests1)), sum(sepests1<sepests2 & !is.na(sepests1))/sum(!is.na(sepests1))))
if(bootp==TRUE & bootse==TRUE & alternative=="greater")
p.value <- sum(sepests1>sepests2 & !is.na(sepests1))/sum(!is.na(sepests1))
if(bootp==TRUE & bootse==TRUE & alternative=="less")
p.value <- sum(sepests1<sepests2 & !is.na(sepests1))/sum(!is.na(sepests1))
coef <- c(t, df, p.value)
out2 <- c(dif, mx, my, sxy)
names(coef) <- c("t.value", "df", "p.value")
names(out2) <- c("Difference", "Mean.x", "Mean.y", "Std. Err")
out <- list("Two Sample Weighted T-Test (Welch)", coef, out2)
names(out) <- c("test", "coefficients", "additional")
}
out
}
|
/scratch/gouwar.j/cran-all/cranData/weights/R/wtd.t.test.r
|
# Based on attach.R from the tidyverse package
core <- c(
"dplyr",
"ggplot2",
"ks"
)
core_unloaded <- function() {
search <- paste0("package:", core)
core[!search %in% search()]
}
# Attach the package from the same package library it was loaded from before.
same_library <- function(pkg) {
loc <- if (pkg %in% loadedNamespaces()) dirname(getNamespaceInfo(pkg, "path"))
do.call(
"library",
list(pkg, lib.loc = loc, character.only = TRUE, warn.conflicts = FALSE)
)
}
weird_attach <- function() {
to_load <- core_unloaded()
if (length(to_load) == 0) {
return(invisible())
}
msg(
cli::rule(
left = crayon::bold("Attaching packages"),
right = paste0("weird ", package_version("weird"))
),
startup = TRUE
)
versions <- vapply(to_load, package_version, character(1))
packages <- paste0(
crayon::green(cli::symbol$tick), " ", crayon::blue(format(to_load)), " ",
crayon::col_align(versions, max(crayon::col_nchar(versions)))
)
if (length(packages) %% 2 == 1) {
packages <- append(packages, "")
}
col1 <- seq_len(length(packages) / 2)
info <- paste0(packages[col1], " ", packages[-col1])
msg(paste(info, collapse = "\n"), startup = TRUE)
suppressPackageStartupMessages(
lapply(to_load, same_library)
)
invisible()
}
package_version <- function(x) {
version <- as.character(unclass(utils::packageVersion(x))[[1]])
if (length(version) > 3) {
version[4:length(version)] <- crayon::red(as.character(version[4:length(version)]))
}
paste0(version, collapse = ".")
}
|
/scratch/gouwar.j/cran-all/cranData/weird/R/attach.R
|
#' @title Bagplot
#' @description Produces a bivariate bagplot. A bagplot is analagous to a
#' univariate boxplot, except it is in two dimensions. Like a boxplot, it
#' shows the median, a region containing 50% of the observations, a region
#' showing the remaining observations other than outliers, and any outliers.
#'
#' @param data A data frame or matrix containing the data.
#' @param var1 The name of the first variable to plot (a bare expression).
#' @param var2 The name of the second variable to plot (a bare expression).
#' @param scatterplot A logical argument indicating if a regular bagplot is required
#' (\code{FALSE}), or if a scatterplot in the same colors is required (\code{TRUE}).
#' @param col The colors to use in the order: median, bag, loop and outliers.
#' @param ... Other arguments are passed to the \code{\link[aplpack]{compute.bagplot}} function.
#' @return A ggplot object showing a bagplot or scatterplot of the data.
#' @author Rob J Hyndman
#' @references Rousseeuw, P. J., Ruts, I., & Tukey, J. W. (1999).
#' The bagplot: A bivariate boxplot. \emph{The American Statistician}, \bold{52}(4), 382–387.
#' @examples
#' gg_bagplot(n01, v1, v2)
#' gg_bagplot(n01, v1, v2, scatterplot = TRUE)
#' @rdname bagplot
#' @seealso
#' \code{\link[aplpack]{bagplot}}
#' @importFrom aplpack compute.bagplot
#' @importFrom ggplot2 geom_polygon geom_point ggplot aes
#' @importFrom dplyr select filter
#' @export
gg_bagplot <- function(data, var1, var2,
col = c(hdr_palette(color = "#00659e", prob = c(0.5, 0.99)), "#000000"),
scatterplot = FALSE, ...) {
data <- data |> select({{ var1 }}, {{ var2 }})
bp <- aplpack::compute.bagplot(as.matrix(data), na.rm = TRUE, approx.limit = 1000, ...)
cn <- colnames(data)
p <- data |>
ggplot(aes(x = {{ var1 }}, y = {{ var2 }}))
if (scatterplot) {
# Bag points
if (!is.null(bp$pxy.bag)) {
p <- p + geom_point(aes(x = {{ var1 }}, y = {{ var2 }}),
data = as.data.frame(bp$pxy.bag), color = col[2]
)
}
# Loop points
if (!is.null(bp$pxy.outer)) {
p <- p + geom_point(aes(x = {{ var1 }}, y = {{ var2 }}),
data = as.data.frame(bp$pxy.outer), color = col[3]
)
}
# Deepest point
colnames(bp$xy) <- cn
deep <- bp$xy |>
as.data.frame() |>
dplyr::filter(bp$hdepths == max(bp$hdepths))
p <- p + geom_point(aes(x = {{ var1 }}, y = {{ var2 }}),
data = deep, color = col[1]
)
} else {
loop <- as.data.frame(bp$hull.loop)
bag <- as.data.frame(bp$hull.bag)
# Show loop polygon
if (!is.null(loop)) {
colnames(loop) <- cn
p <- p + geom_polygon(aes(x = {{ var1 }}, y = {{ var2 }}), data = loop, fill = col[3])
}
# Show bag polygon
if (!is.null(bag)) {
colnames(bag) <- cn
p <- p + geom_polygon(aes(x = {{ var1 }}, y = {{ var2 }}), data = bag, fill = col[2])
}
}
if (!is.null(bp$pxy.outlier)) {
outliers <- as.data.frame(as.matrix(bp$pxy.outlier))
colnames(outliers) <- cn
p <- p + geom_point(aes(x = {{ var1 }}, y = {{ var2 }}), data = outliers, col = col[4])
}
if (!scatterplot) {
# Show median
p <- p + geom_point(aes(x = bp$center[1], y = bp$center[2]), col = col[1], size = 2)
}
return(p)
}
|
/scratch/gouwar.j/cran-all/cranData/weird/R/bagplot.R
|
# Based on conflicts.R from the tidyverse package
#' Conflicts between weird packages and other packages
#'
#' This function lists all the conflicts between packages in the weird collection
#' and other packages that you have loaded.
#'
#' Some conflicts are deliberately ignored: \code{intersect}, \code{union},
#' \code{setequal}, and \code{setdiff} from dplyr; and \code{intersect},
#' \code{union}, \code{setdiff}, and \code{as.difftime} from lubridate.
#' These functions make the base equivalents generic, so shouldn't negatively affect any
#' existing code.
#'
#' @return A list object of class \code{weird_conflicts}.
#' @export
#' @examples
#' weird_conflicts()
weird_conflicts <- function() {
envs <- grep("^package:", search(), value = TRUE)
envs <- purrr::set_names(envs)
objs <- invert(lapply(envs, ls_env))
conflicts <- purrr::keep(objs, ~ length(.x) > 1)
tidy_names <- paste0("package:", weird_packages())
conflicts <- purrr::keep(conflicts, ~ any(.x %in% tidy_names))
conflict_funs <- purrr::imap(conflicts, confirm_conflict)
conflict_funs <- purrr::compact(conflict_funs)
structure(conflict_funs, class = "weird_conflicts")
}
weird_conflict_message <- function(x) {
if (length(x) == 0) {
return("")
}
header <- cli::rule(
left = crayon::bold("Conflicts"),
right = "weird_conflicts"
)
pkgs <- x |> purrr::map(~ gsub("^package:", "", .))
others <- pkgs |> purrr::map(`[`, -1)
other_calls <- purrr::map2_chr(
others, names(others),
~ paste0(crayon::blue(.x), "::", .y, "()", collapse = ", ")
)
winner <- pkgs |> purrr::map_chr(1)
funs <- format(paste0(crayon::blue(winner), "::", crayon::green(paste0(names(x), "()"))))
bullets <- paste0(
crayon::red(cli::symbol$cross), " ", funs,
" masks ", other_calls,
collapse = "\n"
)
paste0(header, "\n", bullets)
}
#' @export
print.weird_conflicts <- function(x, ..., startup = FALSE) {
cli::cat_line(weird_conflict_message(x))
}
confirm_conflict <- function(packages, name) {
# Only look at functions
objs <- packages |>
purrr::map(~ get(name, pos = .)) |>
purrr::keep(is.function)
if (length(objs) <= 1) {
return()
}
# Remove identical functions
objs <- objs[!duplicated(objs)]
packages <- packages[!duplicated(packages)]
if (length(objs) == 1) {
return()
}
packages
}
ls_env <- function(env) {
x <- ls(pos = env)
if (identical(env, "package:dplyr")) {
x <- setdiff(x, c("intersect", "setdiff", "setequal", "union"))
}
if (identical(env, "package:lubridate")) {
x <- setdiff(x, c("intersect", "setdiff", "union", "as.difftime"))
}
x
}
|
/scratch/gouwar.j/cran-all/cranData/weird/R/conflicts.R
|
#' Cricket batting data for international test players
#'
#' A dataset containing career batting statistics for all international test
#' players (men and women) up to 6 October 2021.
#'
#' @format A data frame with 3754 rows and 15 variables:
#' \describe{
#' \item{Player}{Player name in form of "initials surname"}
#' \item{Country}{Country played for}
#' \item{Start}{First year of test playing career}
#' \item{End}{Last year of test playing career}
#' \item{Matches}{Number of matches played}
#' \item{Innings}{Number of innings batted}
#' \item{NotOuts}{Number of times not out}
#' \item{Runs}{Total runs scored}
#' \item{HighScore}{Highest score in an innings}
#' \item{HighScoreNotOut}{Was highest score not out?}
#' \item{Average}{Batting average at end of career}
#' \item{Hundreds}{Total number of 100s scored}
#' \item{Fifties}{Total number of 50s scored}
#' \item{Ducks}{Total number of 0s scored}
#' \item{Gender}{"Men" or "Women"}
#' }
#' @return Data frame
#' @examples
#' cricket_batting |>
#' filter(Innings > 20) |>
#' select(Player, Country, Matches, Runs, Average, Hundreds, Fifties, Ducks) |>
#' arrange(desc(Average))
#' @source \url{https://www.espncricinfo.com}
"cricket_batting"
#' Old faithful eruption data
#'
#' A data set containing data on recorded eruptions of the Old Faithful Geyser
#' in Yellowstone National Park, Wyoming, USA, from
#' 1 January 2015 to 1 October 2021.
#' Recordings are incomplete, especially during the winter months when observers
#' may not be present.
#'
#' @format A data frame with 2261 rows and 3 columns:
#' \describe{
#' \item{time}{Time eruption started}
#' \item{duration}{Duration of eruption in seconds}
#' \item{waiting}{Time to the following eruption}
#' }
#' @return Data frame
#' @examples
#' oldfaithful |>
#' filter(duration < 7000, waiting < 7000) |>
#' ggplot(aes(x = duration, y = waiting)) +
#' geom_point()
#' @source \url{https://geysertimes.org}
"oldfaithful"
#' Multivariate standard normal data
#'
#' A synthetic data set containing 1000 observations on 10 variables generated
#' from independent standard normal distributions.
#'
#' @format A data frame with 1000 rows and 10 columns.
#' @return Data frame
#' @examples
#' n01
"n01"
|
/scratch/gouwar.j/cran-all/cranData/weird/R/datasets.R
|
#' @export
print.kde <- function(x, ...) {
kde <- !(is.null(x$h) & is.null(x$H))
if (inherits(x$eval.points, "list")) {
d <- length(x$eval.points)
} else {
d <- 1L
}
if(!kde) {
cat("Density of: [",
paste0(x$names, collapse = ", "), "]\n", sep = "")
} else {
cat("Kernel density estimate of: [",
paste0(x$names, collapse = ", "), "]\n", sep = "")
}
if(d == 1L){
ngrid <- length(x$eval.points)
} else {
ngrid <- lapply(x$eval.points, length)
}
cat("Computed on a grid of size", paste(ngrid, collapse = " x "), "\n")
if(kde) {
cat("Bandwidth: ")
if (d == 1L) {
cat("h = ", format(x$h, digits = 4))
} else {
cat("H = \n")
cat(format(x$H, digits = 4), quote=FALSE)
}
}
invisible(x)
}
#' Convert data frame or matrix object to kde class
#'
#' A density specified as a data frame or matrix can be converted to a kde object.
#' This is useful for plotting the density using \code{\link{autoplot.kde}}.
#' As kde objects are defined on a grid, the density values are interpolated
#' based on the points in the data frame or matrix.
#'
#' @param object Data frame or matrix with numerical columns, where one column
#' (specified by `density_column`) contains the density values, and the
#' remaining columns define the points at which the density is evaluated.
#' @param density_column Name of the column containing the density values, specified
#' as a bare expression. If missing, the last column is used.
#' @param ngrid Number of points to use for the grid in each dimension. Default is
#' 10001 for univariate densities and 101 for multivariate densities.
#' @param ... Additional arguments are ignored.
#' @return An object of class "kde"
#' @author Rob J Hyndman
#' @examples
#' tibble(y = seq(-4, 4, by = 0.01), density = dnorm(y)) |>
#' as_kde()
#' @export
as_kde <- function(object, density_column, ngrid, ...) {
# Check columns of object are all numerical
object <- as.data.frame(object)
if(!all(sapply(object, is.numeric))) {
stop("All columns of object must be numeric")
}
# Check density_column is in object
if(missing(density_column)) {
density_column <- tail(colnames(object), 1)
} else {
density_column <- dplyr::as_label(dplyr::enquo(density_column))
}
if(!(density_column %in% colnames(object))) {
stop(paste(density_column, "not found"))
}
# Separate points from density values
den <- object[[density_column]]
object[[density_column]] <- NULL
# Find the dimension
d <- NCOL(object)
if (d == 1L) {
if(missing(ngrid)) {
ngrid <- 10001
}
# Interpolate density on finer grid
density <- list(eval.points = seq(min(object), max(object), length = ngrid))
density$estimate <- approx(object[[1]], den, xout = density$eval.points)$y
} else if (d == 2L) {
if(missing(ngrid)) {
ngrid <- 101
}
density <- density_on_grid(as.matrix(object), den, ngrid)
} else {
stop("Only univariate and bivariate densities are supported")
}
# Find falpha using quantile method
missing <- is.na(density$estimate)
samplex <- sample(density$estimate[!missing],
size = 50000, replace = TRUE,
prob = density$estimate[!missing]
)
density$cont <- quantile(samplex, prob = (99:1) / 100, type = 8)
# Set missing values to 0
density$estimate[is.na(density$estimate)] <- 0
# Add names
density$names <- colnames(object)
if(is.null(density$names)) {
if(d == 1) {
density$names <- "y"
} else {
density$names <- paste0("y", seq_len(d))
}
}
structure(density, class = "kde")
}
density_on_grid <- function(y, fy, ngrid) {
y <- as.matrix(y)
if (NCOL(y) != 2L) {
stop("y must be a matrix with 2 columns")
}
# Create grid of points
density <- list(eval.points = list(
seq(min(y[, 1]), max(y[, 1]), length = ngrid),
seq(min(y[, 2]), max(y[, 2]), length = ngrid)
))
# Bivariate interpolation
grid <- expand.grid(density$eval.points[[1]], density$eval.points[[2]])
ifun <- interpolation::interpfun(x = y[, 1], y = y[, 2], z = fy)
density$estimate <- ifun(grid[,1], grid[,2]) |>
matrix(nrow = ngrid)
return(density)
}
#' Robust bandwidth estimation for kernel density estimation
#'
#' @param data A numeric matrix or data frame.
#' @param method Method to use for selecting the bandwidth.
#' `robust_normal` uses a robust version of the normal reference rule.
#' `lookout` uses the topological data analysis approach that is part of the lookout algorithm.
#' @param max.iter How many times should the `lookout` method be iterated. That is, outliers
#' (probability < 0.05) are removed and the bandwidth is re-computed from the
#' remaining observations.
#' @return A matrix of bandwidths (or scalar in the case of univariate data).
#' @author Rob J Hyndman
#' @examples
#' # Univariate bandwidth calculation
#' kde_bandwidth(oldfaithful$duration)
#' # Bivariate bandwidth calculation
#' kde_bandwidth(oldfaithful[,2:3])
#' @export
kde_bandwidth <- function(data, method = c("robust_normal", "double", "lookout"),
max.iter = 2) {
method <- match.arg(method)
d <- NCOL(data)
n <- NROW(data)
if(d > 1) {
# Find robust covariance matrix of data
S <- robustbase::covOGK(data, sigmamu = robustbase::s_IQR)$cov
}
if(method != "lookout") {
k <- ifelse(method == "double", 2, 1)
if(d == 1L)
return(k * 1.06 * robustbase::s_IQR(data) * n^(-0.2))
else {
return((4/(n * (d + 2)))^(2/(d + 4)) * k^2 * S)
}
} else {
stop("Not yet implemented")
# # Initial estimate
# if(d == 1L) {
# S <- 1
# } else {
# # Normalize data
# U <- chol(solve(S))
# data <- as.matrix(data) %*% t(U)
# }
# h <- lookout::find_tda_bw(data, fast = (n > 1000)) |>
# suppressWarnings()
# iter <- 1
# oldh <- 0
# while(iter < max.iter & h != oldh) {
# iter <- iter + 1
# oldh <- h
# scores <- calc_kde_scores(data, h=h, H=h*diag(d))
# p <- lookout(density_scores = scores$scores, loo_scores = scores$loo) |>
# suppressWarnings()
# data <- as.matrix(data)[p > 0.05,]
# # Refined estimate
# h <- lookout::find_tda_bw(data, fast = (n > 1000))
# }
# return(h * S)
}
}
|
/scratch/gouwar.j/cran-all/cranData/weird/R/density.R
|
#' Produce ggplot of densities in 1 or 2 dimensions
#'
#' @details
#' This function produces a ggplot of the density estimate produced by `ks::kde()`.
#' For univariate densities, it produces a line plot of the density function, with
#' an optional ribbon showing some highest density regions (HDRs) and/or the observations.
#' For bivariate densities, it produces a contour plot of the density function, with
#' the observations optionally shown as points.
#' The mode can also be drawn as a point with the HDRs.
#' For bivariate densities, the combination of `fill = TRUE`, `show_points = TRUE`,
#' `show_mode = TRUE`, and `prob = c(0.5, 0.99)` is equivalent to an HDR boxplot.
#' For univariate densities, the combination of `show_hdr = TRUE`, `show_points = TRUE`,
#' `show_mode = TRUE`, and `prob = c(0.5, 0.99)` is equivalent to an HDR boxplot.
#'
#' @param object Probability density function as estimated by `ks::kde()`.
#' @param prob Probability of the HDR contours to be drawn (for a bivariate plot only).
#' @param fill If `TRUE`, and the density is bivariate, the bivariate contours
#' are shown as filled regions rather than lines.
#' @param show_hdr If `TRUE`, and the density is univariate, then the HDR regions
#' specified by `prob` are shown as a ribbon below the density.
#' @param show_points If `TRUE`, then individual points are plotted.
#' @param show_mode If `TRUE`, then the mode of the distribution is shown.
#' @param show_lookout If `TRUE`, then the observations with lookout probabilities less than 0.05 are shown in red.
#' @param color Color used for mode and HDR contours. If `palette = hdr_palette`,
#' it is also used as the basis for HDR regions.
#' @param palette Color palette function to use for HDR filled regions
#' (if `fill` is `TRUE` or `show_hdr` is `TRUE`).
#' @param alpha Transparency of points. When `fill` is `FALSE`, defaults to
#' min(1, 1000/n), where n is the number of observations. Otherwise, set to 1.
#' @param ... Additional arguments are currently ignored.
#' @return A ggplot object.
#' @author Rob J Hyndman
#' @examples
#' # Univariate density
#' c(rnorm(500), rnorm(500, 4, 1.5)) |>
#' kde() |>
#' autoplot(show_hdr = TRUE, prob= c(0.5, 0.95), color = "#c14b14")
#' ymat <- tibble(y1 = rnorm(5000), y2 = y1 + rnorm(5000))
#' ymat |>
#' kde(H = kde_bandwidth(ymat)) |>
#' autoplot(show_points = TRUE, alpha = 0.1, fill = TRUE)
#' @export
autoplot.kde <- function(object, prob = seq(9)/10, fill = FALSE,
show_hdr = FALSE, show_points = FALSE, show_mode = FALSE, show_lookout = FALSE,
color = "#00659e", palette = hdr_palette, alpha = ifelse(fill, 1, min(1, 1000/NROW(object$x))),
...) {
if (min(prob) <= 0 | max(prob) >= 1) {
stop("prob must be between 0 and 1")
}
if(identical(palette, hdr_palette)) {
colors <- hdr_palette(color = color, prob=prob)
} else {
colors <- palette(n = length(prob)+1)
}
if(inherits(object$eval.points, "list")) {
d <- length(object$eval.points)
} else {
d <- 1L
}
if(d > 2) {
stop("Only univariate and bivariate densities are supported")
}
if(show_points) {
if(is.null(object$x)) {
warning("No observations found")
}
}
if(d == 1L) {
density <- tibble(
y = object$eval.points,
density = object$estimate
)
if(show_hdr) {
hdr <- hdr_table(density = object, prob = prob)
}
} else {
hdr <- hdr_table(density = object, prob = prob)
density <- expand.grid(
y1 = object$eval.points[[1]],
y2 = object$eval.points[[2]]
)
density$density <- c(object$estimate)
}
if(d == 1L) {
# Plot univariate density
p <- density |>
ggplot() +
geom_line(aes(x=y, y=density)) +
labs(x = object$names[1])
maxden <- max(density$density)
if(show_points) {
# Only show points outside largest HDR
if(show_hdr) {
kscores <- calc_kde_scores(object$x, h = object$h,...)
fi <- exp(-kscores$scores)
threshold <- quantile(fi, prob = 1 - max(hdr$prob), type = 8)
show_x <- tibble::tibble(x = object$x[fi < threshold])
} else {
show_x <- tibble::tibble(x = object$x)
}
p <- p + ggplot2::geom_point(
data = show_x,
mapping = aes(x = x, y = -maxden/40),
alpha = alpha
)
if(show_lookout) {
if(!show_hdr) {
kscores <- calc_kde_scores(object$x, h = object$h,...)
}
lookout_highlight <- lookout(density_scores = kscores$scores, loo_scores = kscores$loo) < 0.05
lookout <- tibble(x = object$x[lookout_highlight])
p <- p + ggplot2::geom_point(
data = lookout, mapping = aes(x = x, y = -maxden/40),
color = "#ff0000"
)
}
}
if(show_hdr) {
prob <- unique(hdr$prob)
nhdr <- length(prob)
p <- p +
ggplot2::geom_rect(data = hdr,
aes(xmin = lower, xmax=upper, ymin = -maxden/20, ymax=0,
fill = factor(prob))) +
scale_fill_manual(
breaks = rev(prob),
values = colors[-1],
labels = paste0(100*rev(prob), "%")
) +
ggplot2::guides(fill = ggplot2::guide_legend(title = "HDR coverage"))
}
if(show_mode) {
p <- p +
ggplot2::geom_line(
data = expand.grid(mode = unique(hdr$mode), ends = c(0, -maxden/20)),
mapping = aes(x = mode, y = ends, group = mode),
color = color,
size = 1
)
}
} else {
# Plot the contours
p <- density |>
ggplot() +
labs(x = object$names[1], y = object$names[2])
if(show_points) {
# If fill, only show points outside largest HDR
if(fill) {
kscores <- calc_kde_scores(object$x, H = object$H,...)
fi <- exp(-kscores$scores)
threshold <- quantile(fi, prob = 1 - max(hdr$prob), type = 8)
show_x <- as.data.frame(x = object$x[fi < threshold,])
colnames(show_x)[1:2] <- c("x","y")
} else {
show_x <- as.data.frame(x = object$x)
colnames(show_x)[1:2] <- c("x","y")
}
p <- p + ggplot2::geom_point(
data = show_x,
mapping = aes(x = x, y = y),
alpha = alpha
)
if(show_lookout) {
if(!fill) {
kscores <- calc_kde_scores(object$x, H = object$H,...)
}
lookout_highlight <- lookout(density_scores = kscores$scores, loo_scores = kscores$loo) < 0.05
lookout <- as.data.frame(x = object$x[lookout_highlight,])
colnames(lookout)[1:2] <- c("x","y")
p <- p + ggplot2::geom_point(
data = lookout, mapping = aes(x = x, y = y),
color = "#ff0000"
)
}
}
if(fill) {
p <- p +
geom_contour_filled(aes(x = y1, y = y2, z = density),
breaks = rev(c(hdr$density, 100))) +
scale_fill_manual(
values = colors[-1],
labels = rev(paste0(100 * hdr$prob, "%"))
)
} else {
p <- p + geom_contour(aes(x = y1, y = y2, z = density),
breaks = hdr$density, color = color)
}
if(show_mode) {
p <- p +
ggplot2::geom_point(
data = density |> filter(density == max(density)),
mapping = aes(x = y1, y = y2),
color = color
)
}
p <- p + ggplot2::guides(fill = ggplot2::guide_legend(title = "HDR coverage"))
}
return(p)
}
#' Color palette designed for plotting Highest Density Regions
#'
#' A sequential color palette is returned, with the first color being `color`,
#' and the rest of the colors being a mix of `color` with increasing amounts of white.
#' If `prob` is provided, then the mixing proportions are determined by `prob` (and
#' n is ignored). Otherwise the mixing proportions are equally spaced between 0 and 1.
#'
#' @param n Number of colors in palette.
#' @param color First color of vector.
#' @param prob Vector of probabilities between 0 and 1.
#' @return A function that returns a vector of colors of length `length(prob) + 1`.
#' @examples
#' hdr_palette(prob = c(0.5, 0.99))
#' @export
hdr_palette <- function(n, color = "#00659e", prob = NULL) {
if(missing(prob)) {
prob <- seq(n-1)/n
} else if (min(prob) <= 0 | max(prob) >= 1) {
stop("prob must be between 0 and 1")
}
pc_colors <- grDevices::colorRampPalette(c(color, "white"))(150)[2:100]
idx <- approx(seq(99)/100, seq(99), prob, rule=2)$y
c(color, pc_colors[idx])
}
utils::globalVariables(c("x","y","y1","y2"))
|
/scratch/gouwar.j/cran-all/cranData/weird/R/ggplot.R
|
#' Statistical tests for anomalies using Grubbs' test and Dixon's test
#'
#' Grubbs' test (proposed in 1950) identifies possible anomalies in univariate
#' data using z-scores assuming the data come from a normal distribution.
#' Dixon's test (also from 1950) compares the difference in the largest two values
#' to the range of the data. Critical values for Dixon's test have been
#' computed using simulation with interpolation using a quadratic model on
#' logit(alpha) and log(log(n)).
#'
#' @details Grubbs' test is based on z-scores, and a point is identified as an
#' anomaly when the associated absolute z-score is greater than a threshold value.
#' A vector of logical values is returned, where \code{TRUE} indicates an anomaly.
#' This version of Grubbs' test looks for outliers anywhere in the sample.
#' Grubbs' original test came in several variations which looked for one outlier,
#' or two outliers in one tail, or two outliers on opposite tails. These variations
#' are implemented in the \code{\link[outliers]{grubbs.test}} function.
#' Dixon's test only considers the maximum (and possibly the minimum) as potential outliers.
#' @references Grubbs, F. E. (1950). Sample criteria for testing outlying observations.
#' *Annals of Mathematical Statistics*, 21(1), 27–58.
#' Dixon, W. J. (1950). Analysis of extreme values.
#' *Annals of Mathematical Statistics*, 21(4), 488–506.
#' @return A logical vector
#' @author Rob J Hyndman
#' @param y numerical vector of observations
#' @param alpha size of the test.
#' @seealso \code{\link[outliers]{grubbs.test}}, \code{\link[outliers]{dixon.test}}
#' @examples
#' x <- c(rnorm(1000), 5:10)
#' tibble(x = x) |> filter(grubbs_anomalies(x))
#' tibble(x = x) |> filter(dixon_anomalies(x))
#' y <- c(rnorm(1000), 5)
#' tibble(y = y) |> filter(grubbs_anomalies(y))
#' tibble(y = y) |> filter(dixon_anomalies(y))
#' @export
grubbs_anomalies <- function(y, alpha = 0.05) {
z <- (y - mean(y, na.rm = TRUE)) / stats::sd(y, na.rm = TRUE)
n <- length(y)
t2 <- stats::qt(1 - alpha / (2 * n), n - 2)
threshold <- (n - 1) / sqrt(n) * sqrt(t2^2 / (n - 2 + t2^2))
return(abs(z) > threshold)
}
# grubbs_test <- function(y, ...) {
# z <- (y - mean(y, na.rm = TRUE)) / stats::sd(y, na.rm = TRUE)
# n <- length(y)
# maxz <- max(abs(z))
# alpha <- c(10^(-(7:2)), seq(0.02,0.50, by=0.01))
# t2 <- qt(1 - alpha / (2 * n), n - 2)
# threshold <- (n - 1) / sqrt(n) * sqrt(t2^2 / (n - 2 + t2^2))
# p <-
#
# pval <- pt(maxz, n-2)
# output <- list(statistic = c(maxz = max(abs(z))), alternative = alt, p.value = pval,
# method = "Dixon test for outliers", data.name = DNAME)
# class(RVAL) <- "htest"
# return(RVAL)
#
# }
#' @rdname grubbs_anomalies
#' @param two_sided If \code{TRUE}, both minimum and maximums will be considered. Otherwise
#' only the maximum will be used. (Take negative values to consider only the minimum with
#' \code{two_sided=FALSE}.)
#' @export
dixon_anomalies <- function(y, alpha = 0.05, two_sided = TRUE) {
if (two_sided) {
miny <- which.min(y)
}
maxy <- which.max(y)
sorty <- sort(y)
n <- length(y)
if (two_sided) {
Q <- max(sorty[2] - sorty[1], sorty[n] - sorty[n-1]) / (sorty[n] - sorty[1])
} else {
Q <- (sorty[n] - sorty[n-1]) / (sorty[n] - sorty[1])
alpha <- 2 * alpha
}
# Find critical value using linear model fitted to simulated critical values
# Subset data to nearest alpha and n values
logit <- function(u) { log(u/(1-u)) }
loglog <- function(u) { log(log(u)) }
# Find four nearest alpha values
alpha_grid <- sort(unique(dixon_cv$alpha))
nearest_alpha <- (alpha_grid[order(abs(logit(alpha_grid) - logit(alpha)))])[1:4]
# Fit model using only alpha values?
alpha_only_model <- (n %in% 3:50)
# Find nearest n values
if (alpha_only_model) {
nearest_n <- n
} else {
# Find four nearest n values
n_grid <- sort(unique(dixon_cv$n))
nearest_n <- (n_grid[order(abs(loglog(n_grid) - loglog(n)))])[1:4]
}
cv_subset <- dixon_cv[dixon_cv$alpha %in% nearest_alpha & dixon_cv$n %in% nearest_n, ]
cv_subset$loglogn <- loglog(cv_subset$n)
cv_subset$logitalpha <- logit(cv_subset$alpha)
if (alpha_only_model) {
# Cubic interpolation to 4 points. 4 df
dixonfit <- stats::lm(log(cv) ~ poly(logitalpha, 3), data = cv_subset)
} else {
# Quadratic bivariate model to 16 points. 6 df
dixonfit <- stats::lm(log(cv) ~ poly(loglogn, 2) + poly(logitalpha, 2) + I(logitalpha * loglogn),
data = cv_subset
)
}
threshold <- exp(stats::predict(dixonfit,
newdata = data.frame(logitalpha = logit(alpha), loglogn = loglog(n))
))
# Return logical vector showing where outliers are
output <- rep(FALSE, n)
if (Q > threshold) {
if (two_sided) {
output[miny] <- (sorty[2] - sorty[1]) / (sorty[n] - sorty[1]) > threshold
}
output[maxy] <- (sorty[n] - sorty[n - 1]) / (sorty[n] - sorty[1]) > threshold
}
return(output)
}
#' @importFrom stats lm predict qt
#' @importFrom tibble tibble
|
/scratch/gouwar.j/cran-all/cranData/weird/R/grubbs.R
|
#' @title Table of Highest Density Regions
#' @description
#' Compute the highest density regions (HDR) for a kernel density estimate. The HDRs
#' are returned as a tibble with one row per interval and columns:
#' `prob` (giving the probability coverage),
#' `density` (the value of the density at the boundary of the HDR),
#' For one dimensional density functions, the tibble also has columns
#' `lower` (the lower ends of the intervals),
#' `upper` (the upper ends of the interval),
#' `mode` (the point at which the density is maximized within each interval).
#' @param y Numerical vector or matrix of data
#' @param density Probability density function, either estimated by `ks::kde()` or
#' a data frame or matrix with numerical columns that can be passed to `as_kde()`.
#' @param prob Probability of the HDR
#' @param h Bandwidth for univariate kernel density estimate. Default is \code{\link{kde_bandwidth}}.
#' @param H Bandwidth for multivariate kernel density estimate. Default is \code{\link{kde_bandwidth}}.
#' @param ... If `y` is supplied, other arguments are passed to \code{\link[ks]{kde}}.
#' Otherwise, additional arguments are passed to \code{\link{as_kde}}.
#' @return A tibble
#' @references Hyndman, R J. (1996) Computing and Graphing Highest Density Regions,
#' \emph{The American Statistician}, \bold{50}(2), 120–126.
#' @author Rob J Hyndman
#' @examples
#' # Univariate HDRs
#' y <- c(rnorm(100), rnorm(100, 3, 1))
#' hdr_table(y = y)
#' hdr_table(density = ks::kde(y))
#' x <- seq(-4, 4, by = 0.01)
#' hdr_table(density = data.frame(y = x, density = dnorm(x)), prob = 0.95)
#' # Bivariate HDRs
#' y <- cbind(rnorm(100), rnorm(100))
#' hdr_table(y = y)
#' grid <- seq(-4, 4, by=0.1)
#' density <- expand.grid(grid, grid) |>
#' mutate(density = dnorm(Var1) * dnorm(Var2))
#' hdr_table(density = density)
#' @export
hdr_table <- function(y = NULL, density = NULL,
prob = c(0.50, 0.99), h = kde_bandwidth(y, method = "double"),
H = kde_bandwidth(y, method = "double"), ...) {
if (min(prob) < 0 | max(prob) > 1) {
stop("prob must be between 0 and 1")
}
alpha <- sort(1 - prob)
if (!is.null(y)) {
# Data supplied
if (!is.null(density)) {
warning("Ignoring density")
}
n <- NROW(y)
if(NCOL(y) == 1L) {
density <- ks::kde(y, h = h,
gridsize = 10001, binned = n > 2000,
approx.cont = FALSE, ...)
} else {
density <- ks::kde(y, H = H,
gridsize = 101, binned = n > 2000,
approx.cont = FALSE, ...)
}
} else if (!inherits(density, "kde")) {
# Density given as list(y, density)
density <- as_kde(density, ...)
}
falpha <- approx(seq(99)/100, density$cont, xout = 1 - alpha)$y
falpha[is.na(falpha) & alpha < 0.01] <- min(density$estimate)
if(inherits(density$eval.points, "list")) {
d <- length(density$eval.points)
} else {
d <- 1L
}
if(d == 1L) {
# Find endpoints of each interval
hdr.store <- tibble(
prob = numeric(0),
lower = numeric(0),
upper = numeric(0),
mode = numeric(0),
density = numeric(0),
)
for (i in seq_along(alpha)) {
junk <- hdr.ends(density, falpha[i])$hdr
n <- length(junk) / 2
for (j in seq(n)) {
within <- density$eval.points >= junk[2 * j - 1] & density$eval.points <= junk[2 * j]
subden <- list(x = density$eval.points[within], y = density$estimate[within])
hdr.store <- dplyr::bind_rows(
hdr.store,
tibble(
prob = 1 - alpha[i], lower = junk[2 * j - 1], upper = junk[2 * j],
mode = subden$x[which.max(subden$y)],
density = falpha[i]
)
)
}
}
} else {
# Just return the density on the relevant contours
hdr.store <- tibble(
prob = 1-alpha,
density = falpha
)
}
return(hdr.store)
}
#' @title HDR plot
#' @description Produces a 1d or 2d box plot of HDR regions. The darker regions
#' contain observations with higher probability, while the lighter regions contain
#' points with lower probability. Points outside the largest HDR are shown as
#' individual points. Points with lookout probabilities
#' less than 0.05 are optionally shown in red.
#' @details The original HDR boxplot proposed by Hyndman (1996), R can be produced with
#' all arguments set to their defaults other than `lookout`.
#' @param data A data frame or matrix containing the data.
#' @param var1 The name of the first variable to plot (a bare expression).
#' @param var2 Optionally, the name of the second variable to plot (a bare expression).
#' @param prob A numeric vector specifying the coverage probabilities for the HDRs.
#' @param scatterplot A logical argument indicating if a regular HDR plot is required
#' (\code{FALSE}), or if a scatterplot in the same colors is required (\code{TRUE}).
#' @param color The base color to use for the mode. Colors for the HDRs are generated
#' by whitening this color.
#' @param show_lookout A logical argument indicating if the plot should highlight observations with "lookout"
#' probabilities less than 0.05.
#' @param ... Other arguments passed to \code{\link[ks]{kde}}.
#' @return A ggplot object showing an HDR plot or scatterplot of the data.
#' @author Rob J Hyndman
#' @references Hyndman, R J (1996) Computing and Graphing Highest Density Regions,
#' *The American Statistician*, **50**(2), 120–126. \url{https://robjhyndman.com/publications/hdr/}
#' Kandanaarachchi, S & Hyndman, R J (2022) "Leave-one-out kernel density estimates for outlier detection",
#' *J Computational & Graphical Statistics*, **31**(2), 586-599. \url{https://robjhyndman.com/publications/lookout/}
#' @examples
#' df <- data.frame(x = c(rnorm(1000), rnorm(1000, 5, 1)))
#' df$y <- df$x + rnorm(200, sd=2)
#' gg_hdrboxplot(df, x)
#' gg_hdrboxplot(df, x, y, scatterplot = TRUE)
#' oldfaithful |>
#' filter(duration < 7000, waiting < 7000) |>
#' gg_hdrboxplot(duration, waiting, scatterplot = TRUE)
#' cricket_batting |>
#' filter(Innings > 20) |>
#' gg_hdrboxplot(Average)
#'
#' @rdname hdrplot
#' @export
gg_hdrboxplot <- function(data, var1, var2 = NULL, prob = c(0.5, 0.99),
color = "#00659e", scatterplot = FALSE, show_lookout = TRUE, ...) {
if(missing(var1)) {
# Grab first variable
data <- as.data.frame(data)
var1 <- rlang::sym(names(data)[1])
if(NCOL(data) > 1L) {
message("No variable selected. Using ", rlang::as_name(var1))
}
}
v2 <- dplyr::as_label(dplyr::enquo(var2))
if(v2 == "NULL") {
d <- 1L
data <- data |> select({{ var1 }})
} else {
d <- 2L
data <- data |> select({{ var1 }}, {{ var2 }})
}
# Use autoplot if possible
if(d == 2L & !scatterplot) {
fit <- ks::kde(data[,1:2], H = kde_bandwidth(data[,1:2], method = "double"), binned = NROW(data) > 2000, ...)
return(autoplot(fit, prob = prob,
color = color, fill = TRUE, show_points = TRUE, show_mode = TRUE, show_lookout = show_lookout) +
ggplot2::guides(fill = "none", color = "none"))
}
# Otherwise build the plot
# Find colors for each region
kscores <- calc_kde_scores(as.matrix(data), ...)
fi <- exp(-kscores$scores)
if(show_lookout) {
lookout_highlight <- lookout(density_scores = kscores$scores, loo_scores = kscores$loo) < 0.05
} else {
lookout_highlight <- rep(FALSE, length(fi))
}
thresholds <- sort(quantile(fi, prob = 1 - prob, type = 8))
data <- data |>
dplyr::mutate(
density = fi,
group = cut(fi, breaks = c(0, thresholds, Inf), labels = FALSE),
group = factor(group, levels = rev(sort(unique(group))),
labels = c(paste0(sort(prob)*100, "%"),"Outside"))
)
colors <- c(hdr_palette(color = color, prob = prob), "#000000")
if(d == 1L) {
p <- ggplot()
if(!scatterplot) {
hdr <- hdr_table(data[[1]], prob = prob, ...)
p <- p +
# Just show points outside largest HDR (but not lookout) in black
ggplot2::geom_jitter(
data = data |> filter(density < min(thresholds), !lookout_highlight),
mapping = aes(x = {{ var1 }}, y = 0), width = 0, height = 0.8) +
# add HDRs as shaded regions
ggplot2::geom_rect(data = hdr,
aes(xmin = lower, xmax = upper, ymin=-1, ymax=1, fill = paste0(prob*100,"%"))) +
ggplot2::scale_fill_manual(values = colors[-1]) +
ggplot2::guides(fill = "none") +
# add modes
geom_line(
data = expand.grid(
mode = unique(hdr$mode[which.max(hdr$density)]),
ends = c(-1, 1)
),
mapping = aes(x = mode, y = ends, group = mode),
color = color,
size = 1
)
} else {
p <- p +
# Show all points in colors
ggplot2::geom_jitter(data = data |> filter(!lookout_highlight),
mapping = aes(x = {{ var1 }}, y = 0, col = group),
width = 0, height = 0.8) +
ggplot2::scale_color_manual(values = colors[-1]) +
ggplot2::guides(col = "none")
}
if(show_lookout) {
p <- p +
# add lookout points
ggplot2::geom_jitter(data = data |> filter(lookout_highlight),
mapping = aes(x = {{ var1 }}, y = 0),
width = 0, height = 0.8, color = "red")
}
# Remove y-axis and guide
p <- p + ggplot2::scale_y_discrete() + labs(y = "")
} else {
# Show all points in colors
mode <- data |> dplyr::filter(density == max(density))
p <- ggplot() +
ggplot2::geom_point(data = data |> filter(!lookout_highlight),
mapping = aes(x = {{ var1 }}, y = {{ var2 }}, col = group)) +
ggplot2::scale_color_manual(values = colors[-1]) +
ggplot2::geom_point(data = mode,
mapping= aes(x = {{ var1 }}, y = {{ var2 }}),
col = colors[1], size = 2) +
ggplot2::guides(col = "none")
if(show_lookout) {
p <- p +
# add lookout points
ggplot2::geom_point(data = data |> filter(lookout_highlight),
mapping = aes(x = {{ var1 }}, y = {{ var2 }}),
color = "red")
}
}
return(p)
}
# Remaining functions adapted from hdrcde package
hdr.ends <- function(den, falpha) {
# falpha is above the density, so the HDR does not exist
if (falpha > max(den$estimate)) {
return(list(falpha = falpha, hdr = NA))
}
# Return density at specific x values
f <- function(x, den, falpha) {
approx(den$eval.points, den$estimate - falpha, xout = x)$y
}
# Find all end points of HDR
intercept <- all_roots(f, interval = range(den$eval.points), den = den, falpha = falpha)
ni <- length(intercept)
if (ni == 0L) {
# No roots
if(falpha > min(den$eval.points))
stop("Unable to find HDR")
else {
intercept <- range(den$eval.points)
}
} else {
n <- length(den$eval.points)
# Check behaviour outside the smallest and largest intercepts
if (f(0.5 * (intercept[1] + den$eval.points[1]), den, falpha) > 0) {
intercept <- c(den$eval.points[1], intercept)
}
if (f(0.5 * (utils::tail(intercept, 1) + den$eval.points[n]), den, falpha) > 0) {
intercept <- c(intercept, den$eval.points[n])
}
}
# Check behaviour -- not sure if we need this now
if (length(intercept) %% 2) {
warning("Some HDRs are incomplete")
}
# intercept <- sort(unique(intercept))
return(list(falpha = falpha, hdr = intercept))
}
all_roots <- function(
f, interval,
lower = min(interval), upper = max(interval), n = 100L, ...) {
x <- seq(lower, upper, len = n + 1L)
fx <- f(x, ...)
roots <- x[which(fx == 0)]
fx2 <- fx[seq(n)] * fx[seq(2L, n + 1L, by = 1L)]
index <- which(fx2 < 0)
for (i in index) {
roots <- c(roots, stats::uniroot(f, lower = x[i], upper = x[i + 1L], ...)$root)
}
return(sort(roots))
}
#' @importFrom utils head tail
#' @importFrom tibble tibble
utils::globalVariables(c("ends", "type", "lower", "upper", "group"))
|
/scratch/gouwar.j/cran-all/cranData/weird/R/hdr.R
|
#' @title Local outlier factors
#' @description Compute local outlier factors using k nearest neighbours. A local
#' outlier factor is a measure of how anomalous each observation is based on
#' the density of neighbouring points.
#' The function uses \code{dbscan::\link[dbscan]{lof}} to do the calculation.
#' @param y Numerical matrix or vector of data
#' @param k Number of neighbours to include. Default: 5.
#' @param ... Additional arguments passed to \code{dbscan::\link[dbscan]{lof}}
#' @return Numerical vector containing LOF values
#' @author Rob J Hyndman
#' @examples
#' y <- c(rnorm(49), 5)
#' lof_scores(y)
#' @export
#' @seealso
#' \code{dbscan::\link[dbscan]{lof}}
#' @importFrom dbscan lof
lof_scores <- function(y, k = 10, ...) {
y <- na.omit(y)
lof <- dbscan::lof(as.matrix(y), minPts = k, ...)
lof[lof == Inf] <- 0
return(lof)
}
#' @title GLOSH scores
#' @description Compute Global-Local Outlier Score from Hierarchies. This is based
#' on hierarchical clustering where the minimum cluster size is k. The resulting
#' outlier score is a measure of how anomalous each observation is.
#' The function uses \code{dbscan::\link[dbscan]{hdbscan}} to do the calculation.
#' @param y Numerical matrix or vector of data
#' @param k Minimum cluster size. Default: 5.
#' @param ... Additional arguments passed to \code{dbscan::\link[dbscan]{hdbscan}}
#' @return Numerical vector containing GLOSH values
#' @author Rob J Hyndman
#' @examples
#' y <- c(rnorm(49), 5)
#' glosh_scores(y)
#' @export
#' @seealso
#' \code{dbscan::\link[dbscan]{glosh}}
#' @importFrom dbscan hdbscan
glosh_scores <- function(y, k = 10, ...) {
dbscan::hdbscan(as.matrix(y), minPts = k, ...)$outlier_scores
}
|
/scratch/gouwar.j/cran-all/cranData/weird/R/lof_scores.R
|
#' @title Density scores
#' @description Compute density scores or leave-one-out density scores from a
#' model or a kernel density estimate of a data set.
#' The density scores are defined as minus the log of the conditional density,
#' or kernel density estimate, at each observation.
#' The leave-one-out density scores (or LOO density scores) are obtained by
#' estimating the conditional density or kernel density estimate using all
#' other observations.
#' @details If the first argument is a numerical vector or matrix, then
#' a kernel density estimate is computed, using a Gaussian kernel,
#' with default bandwidth given by a robust normal reference rule.
#' Otherwise the model is used to compute the conditional
#' density function at each observation, from which the density scores (or
#' possibly the LOO density scores) are obtained.
#' @param object A model object or a numerical data set.
#' @param loo Should leave-one-out density scores be computed?
#' @author Rob J Hyndman
#' @return A numerical vector containing either the density scores, or the LOO
#' density scores.
#' @seealso
#' \code{\link{kde_bandwidth}}
#' \code{\link[ks]{kde}}
#' @export
density_scores <- function(object, loo = FALSE, ...) {
UseMethod("density_scores")
}
#' @rdname density_scores
#' @param h Bandwidth for univariate kernel density estimate. Default is \code{\link{kde_bandwidth}}.
#' @param H Bandwidth for multivariate kernel density estimate. Default is \code{\link{kde_bandwidth}}.
#' @param ... Other arguments are passed to \code{\link[ks]{kde}}.
#' @examples
#' # Density scores computed from bivariate data set
#' of <- oldfaithful |>
#' filter(duration < 7000, waiting < 7000) |>
#' mutate(
#' fscores = density_scores(cbind(duration, waiting)),
#' loo_fscores = density_scores(cbind(duration, waiting), loo = TRUE),
#' lookout_prob = lookout(density_scores = fscores, loo_scores = loo_fscores)
#' )
#' of |>
#' ggplot(aes(x = duration, y = waiting, color = lookout_prob < 0.01)) +
#' geom_point()
#' @export
density_scores.default <- function(
object, loo = FALSE,
h = kde_bandwidth(object, method = "double"),
H = kde_bandwidth(object, method = "double"), ...) {
object <- as.matrix(object)
tmp <- calc_kde_scores(object, h, H, ...)
if (loo) {
return(tmp$loo_scores)
} else {
return(tmp$scores)
}
}
#' @rdname density_scores
#' @param ... Other arguments are ignored.
#' @examples
#' # Density scores computed from bivariate KDE
#' f_kde <- kde(of[, 2:3], H = kde_bandwidth(of[, 2:3]))
#' of |>
#' mutate(
#' fscores = density_scores(f_kde),
#' loo_fscores = density_scores(f_kde, loo = TRUE)
#' )
#' @export
density_scores.kde <- function(object, loo = FALSE, ...) {
n <- NROW(object$x)
d <- NCOL(object$x)
# kde on a grid, but we need it at observations, so we will re-estimate
# interpolation is probably quicker, but less accurate and
# this works ok.
output <- calc_kde_scores(object$x, object$h, object$H, ...)
if (loo) {
return(output$loo_scores)
} else {
return(output$scores)
}
}
#' @rdname density_scores
#' @examples
#' # Density scores computed from linear model
#' of <- oldfaithful |>
#' filter(duration < 7200, waiting < 7200)
#' lm_of <- lm(waiting ~ duration, data = of)
#' of |>
#' mutate(
#' fscore = density_scores(lm_of),
#' loo_fscore = density_scores(lm_of, loo = TRUE),
#' lookout_prob = lookout(density_scores = fscore, loo_scores = loo_fscore)
#' ) |>
#' ggplot(aes(x = duration, y = waiting, color = lookout_prob < 0.02)) +
#' geom_point()
#' @export
density_scores.lm <- function(object, loo = FALSE, ...) {
e <- stats::residuals(object, type = "response")
h <- stats::hatvalues(object)
sigma2 <- sum(e^2, na.rm = TRUE) / object$df.residual
if (loo) {
resdf <- object$df.residual
sigma2 <- (sigma2 * resdf - e^2 / (1 - h)) / (resdf - 1)
}
r2 <- e^2 / ((1 - h) * sigma2)
return(0.5 * (log(2 * pi) + r2))
}
#' @rdname density_scores
#' @examples
#' # Density scores computed from GAM
#' of <- oldfaithful |>
#' filter(duration > 1, duration < 7200, waiting < 7200)
#' gam_of <- mgcv::gam(waiting ~ s(duration), data = of)
#' of |>
#' mutate(
#' fscore = density_scores(gam_of),
#' lookout_prob = lookout(density_scores = fscore)
#' ) |>
#' filter(lookout_prob < 0.02)
#' @importFrom stats approx dbinom density dnorm dpois na.omit
#' @export
density_scores.gam <- function(object, loo = FALSE, ...) {
if (loo) {
warning("Leave-one-out log scores unavailable for GAM models. Returning log scores.")
}
fit_aug <- broom::augment(object, type.predict = "response")
if (object$family$family == "gaussian") {
std.resid <- c(scale(fit_aug$.resid / fit_aug$.se.fit))
density_scores <- -dnorm(std.resid, log = TRUE)
} else if (object$family$family == "binomial") {
density_scores <- -dbinom(
x = object$y * object$prior.weights,
size = object$prior.weights, prob = fit_aug$.fitted, log = TRUE
)
} else if (object$family$family == "poisson") {
density_scores <- -dpois(object$y, lambda = fit_aug$.fitted, log = TRUE)
} else {
stop("Unsupported family")
}
return(density_scores)
}
# Compute value of density at each observation using kde
calc_kde_scores <- function(
y,
h = kde_bandwidth(y, method = "double"),
H = kde_bandwidth(y, method = "double"), ...) {
n <- NROW(y)
d <- NCOL(y)
# Estimate density at each observation
if (d == 1L) {
gridsize <- 10001
K0 <- 1 / (h * sqrt(2 * pi))
fi <- ks::kde(y,
h = h, gridsize = gridsize, binned = n > 2000,
eval.points = y, compute.cont = FALSE, ...
)$estimate
} else {
gridsize <- 101
K0 <- det(H)^(-1 / 2) * (2 * pi)^(-d / 2)
fi <- ks::kde(y,
H = H, gridsize = gridsize, binned = n > 2000,
eval.points = y, compute.cont = FALSE, ...
)$estimate
}
loo_scores <- -log(pmax(0, (n * fi - K0) / (n - 1)))
scores <- -log(pmax(0, fi))
return(list(scores = scores, loo_scores = loo_scores))
}
utils::globalVariables(c(
".resid", ".se.fit", ".std.resid", ".resid", ".sigma", ".hat",
"studentized_residuals"
))
|
/scratch/gouwar.j/cran-all/cranData/weird/R/log_scores.R
|
#' @title Lookout probabilities
#' @description Compute leave-one-out log score probabilities using a
#' Generalized Pareto distribution. These give the probability of each observation
#' being an anomaly.
#' @details This function can work with several object types.
#' If `object` is not `NULL`, then the object is passed to \code{\link{density_scores}}
#' to compute density scores (and possibly LOO density scores). Otherwise,
#' the density scores are taken from the `density_scores` argument, and the
#' LOO density scores are taken from the `loo_scores` argument. Then the Generalized
#' Pareto distribution is fitted to the scores, to obtain the probability of each observation.
#' @param object A model object or a numerical data set.
#' @param density_scores Numerical vector of log scores
#' @param loo_scores Optional numerical vector of leave-one-out log scores
#' @param threshold_probability Probability threshold when computing the POT model for the log scores.
#' @references Sevvandi Kandanaarachchi & Rob J Hyndman (2022) "Leave-one-out
#' kernel density estimates for outlier detection", *J Computational & Graphical
#' Statistics*, **31**(2), 586-599. \url{https://robjhyndman.com/publications/lookout/}
#' @return A numerical vector containing the lookout probabilities
#' @author Rob J Hyndman
#' @examples
#' # Univariate data
#' tibble(
#' y = c(5, rnorm(49)),
#' lookout = lookout(y)
#' )
#' # Bivariate data with score calculation done outside the function
#' tibble(
#' x = rnorm(50),
#' y = c(5, rnorm(49)),
#' fscores = density_scores(y),
#' loo_fscores = density_scores(y, loo = TRUE),
#' lookout = lookout(density_scores = fscores, loo_scores = loo_fscores)
#' )
#' # Using a regression model
#' of <- oldfaithful |> filter(duration < 7200, waiting < 7200)
#' fit_of <- lm(waiting ~ duration, data = of)
#' of |>
#' mutate(lookout_prob = lookout(fit_of)) |>
#' arrange(lookout_prob)
#' @importFrom stats quantile
#' @importFrom evd fpot pgpd
#' @export
lookout <- function(
object = NULL,
density_scores = NULL, loo_scores = density_scores,
threshold_probability = 0.95) {
if (!is.null(object)) {
if (!is.null(density_scores) | !is.null(loo_scores)) {
warning("Ignoring density_scores and loo_scores arguments and using object.")
}
if (is.data.frame(object) | inherits(object, "matrix") | inherits(object, "numeric")) {
tmp <- calc_kde_scores(as.matrix(object))
density_scores <- tmp$scores
loo_scores <- tmp$loo_scores
} else {
density_scores <- density_scores(object)
loo_scores <- density_scores(object, loo = TRUE) |> suppressWarnings()
}
}
threshold <- stats::quantile(density_scores, prob = threshold_probability, type = 8)
if (sum(density_scores > threshold) == 0L) {
warning("No scores above threshold.")
return(rep(1, length(density_scores)))
}
gpd <- evd::fpot(density_scores, threshold = threshold, std.err = FALSE)$estimate
evd::pgpd(
loo_scores,
loc = threshold,
scale = gpd["scale"], shape = gpd["shape"], lower.tail = FALSE
)
}
|
/scratch/gouwar.j/cran-all/cranData/weird/R/lookout.R
|
#' Compute robust multivariate scaled data
#'
#' @description A multivariate version of `base::scale()`, that takes account
#' of the covariance matrix of the data, and uses robust estimates
#' of center, scale and covariance by default. The centers are removed using medians, the
#' scale function is the IQR, and the covariance matrix is estimated using a
#' robust OGK estimate. The data are scaled using the Cholesky decomposition of
#' the inverse covariance. Then the scaled data are returned. This is useful for
#' computing pairwise Mahalanobis distances.
#'
#' @details Optionally, the centering and scaling can be done for each variable
#' separately, so there is no rotation of the data, by setting `cov = NULL`.
#' Also optionally, non-robust methods can be used by specifying `center = mean`,
#' scale = `stats::sd`, and `cov = stats::cov`. Any non-numeric columns are retained
#' with a warning.
#'
#' @param object A vector, matrix, or data frame containing some numerical data.
#' @param center A function to compute the center of each numerical variable. Set
#' to NULL if no centering is required.
#' @param scale A function to scale each numerical variable. When
#' `cov = robustbase::covOGK`, it is passed as the `sigmamu` argument.
#' @param cov A function to compute the covariance matrix. Set to NULL if no rotation required.
#' @param warning Should a warning be issued if non-numeric columns are ignored?
#' @return A vector, matrix or data frame of the same size and class as `object`,
#' but with numerical variables replaced by scaled versions.
#' @author Rob J Hyndman
#' @examples
#' # Univariate z-scores (no rotation)
#' mvscale(oldfaithful, center = mean, scale = sd, cov = NULL, warning = FALSE)
#' # Non-robust scaling with rotation
#' mvscale(oldfaithful, center = mean, cov = stats::cov, warning = FALSE)
# # Robust scaling
#' mvscale(oldfaithful, warning = FALSE)
#' # Robust Mahalanobis distances
#' oldfaithful |>
#' select(-time) |>
#' mvscale() |>
#' head(5) |>
#' dist()
#' @export
mvscale <- function(object, center = stats::median, scale = robustbase::s_IQR,
cov = robustbase::covOGK, warning = TRUE) {
d <- NCOL(object)
vec <- FALSE # Indicator if object is a vector
# We find the numerical columns and convert to a matrix
# First deal with vector inputs
if (d == 1L & !inherits(object, "matrix") & !inherits(object, "data.frame")) {
numeric_col <- is.numeric(object)
if (!numeric_col) {
stop("Input must be numeric")
}
vec <- TRUE
mat <- as.matrix(object)
} else if (inherits(object, "matrix")) {
# It is already a matrix
if (!is.numeric(object)) {
stop("Input must be numeric")
}
numeric_col <- rep(TRUE, NCOL(object))
mat <- object
} else { # It must be a data frame. So let's find the numeric columns
numeric_col <- unlist(lapply(object, is.numeric))
if (any(!numeric_col) & warning) {
warning(
"Ignoring non-numeric columns: ",
paste(names(object)[!numeric_col], collapse = ", ")
)
}
mat <- as.matrix(object[, numeric_col])
}
# Remove centers
if (!is.null(center)) {
med <- apply(mat, 2, center)
mat <- sweep(mat, 2L, med)
}
# Scale
if (d == 1L) {
if(!is.null(scale)) {
z <- mat / scale(mat)
} else {
z <- mat
}
if (vec) {
return(c(z))
}
} else if(!is.null(cov)) {
if (identical(cov, robustbase::covOGK)) {
S <- cov(mat, sigmamu = scale)$cov
} else {
S <- cov(mat)
}
U <- chol(solve(S))
z <- mat %*% t(U)
} else {
s <- apply(mat, 2, scale)
z <- sweep(mat, 2L, s, "/")
}
# Convert back to matrix, data frame or tibble if necessary
idx <- which(numeric_col)
for (i in seq_along(idx)) {
object[, idx[i]] <- z[, i]
}
# Rename columns if there has been rotation
if(!is.null(cov)) {
names(object)[numeric_col] <- paste0("z", seq(sum(numeric_col)))
}
return(object)
}
|
/scratch/gouwar.j/cran-all/cranData/weird/R/mvscale.R
|
#' Anomalies according to Peirce's and Chauvenet's criteria
#'
#' Peirce's criterion and Chauvenet's criterion were both proposed in the 1800s
#' as a way of determining what observations should be rejected in a univariate sample.
#'
#' @details These functions take a univariate sample `y` and return a logical
#' vector indicating which observations should be considered anomalies according
#' to either Peirce's criterion or Chauvenet's criterion.
#' @references Peirce, B. (1852). Criterion for the rejection of doubtful observations.
#' *The Astronomical Journal*, 2(21), 161–163.
#' @references Chauvenet, W. (1863). 'Method of least squares'. Appendix to
#' *Manual of Spherical and Practical Astronomy*, Vol.2, Lippincott, Philadelphia, pp.469-566.
#' @return A logical vector
#' @author Rob J Hyndman
#' @param y numerical vector of observations
#' @examples
#' y <- rnorm(1000)
#' tibble(y = y) |> filter(peirce_anomalies(y))
#' tibble(y = y) |> filter(chauvenet_anomalies(y))
#' @export
peirce_anomalies <- function(y) {
z <- (y - mean(y, na.rm = TRUE)) / stats::sd(y, na.rm = TRUE)
return(abs(z) > peirce_threshold(length(y)))
}
# Threshold based on Gould's paper when there are m=1 unknown quantities
# and n=1 suspicious observations
peirce_threshold <- function(n) {
# Check we have enough observations
if ((n - 2) <= 0) {
return(NaN)
}
# Initialize
x <- 1
oldx <- Inf
root2pie <- sqrt(2 / pi / exp(1))
# Eq (B) after taking logs
LnQN <- (n - 1) * log(n - 1) - n * log(n)
# Loop until convergence
while (abs(x - oldx) >= n * .Machine$double.eps) {
# Eq (D)
R1 <- 2 * exp(0.5 * (x^2 - 1)) * stats::pnorm(x, lower.tail = FALSE)
# Eq (A') after taking logs and solving for R (plug in lambda from top of page)
R2 <- exp(LnQN - (n - 1) * 0.5 * log((n - 1 - x^2) / (n - 2)))
# Find derivatives wrt x
R1d <- x * R1 - root2pie
R2d <- x * (n - 1) / (n - 1 - x^2) * R2
# Update x accordingly
oldx <- x
x <- oldx - (R1 - R2) / (R1d - R2d)
}
return(x)
}
#' @rdname peirce_anomalies
#' @export
chauvenet_anomalies <- function(y) {
z <- (y - mean(y, na.rm = TRUE)) / stats::sd(y, na.rm = TRUE)
return(abs(z) > stats::qnorm(1 - 0.25 / length(y)))
}
|
/scratch/gouwar.j/cran-all/cranData/weird/R/peirce.R
|
#' @title Stray scores
#' @description Compute stray scores indicating how anomalous each observation is.
#' @param y A vector, matrix, or data frame consisting of numerical variables.
#' @param ... Other arguments are passed to \code{\link[stray]{find_HDoutliers}}.
#' @return Numerical vector containing stray scores.
#' @author Rob J Hyndman
#' @examples
#' # Univariate data
#' y <- c(6, rnorm(49))
#' scores <- stray_scores(y)
#' threshold <- stray::find_threshold(scores, alpha = 0.01, outtail = "max", p = 0.5, tn = 50)
#' which(scores > threshold)
#' @export
#' @rdname stray_scores
stray_scores <- function(y, ...) {
stray::find_HDoutliers(data = y, ...)$out_scores
}
#' @title Stray anomalies
#' @description Test if observations are anomalies according to the stray algorithm.
#' @param y A vector, matrix, or data frame consisting of numerical variables.
#' @param ... Other arguments are passed to \code{\link[stray]{find_HDoutliers}}.
#' @author Rob J Hyndman
#' @examples
#' # Univariate data
#' y <- c(6, rnorm(49))
#' stray_anomalies(y)
#' # Bivariate data
#' y <- cbind(rnorm(50), c(5, rnorm(49)))
#' stray_anomalies(y)
#' @return Numerical vector containing logical values indicating if the
#' observation is identified as an anomaly using the stray algorithm.
#' @export
stray_anomalies <- function(y, ...) {
stray::find_HDoutliers(data = y, ...)$type == "outlier"
}
|
/scratch/gouwar.j/cran-all/cranData/weird/R/stray.R
|
# Based on utils.R from the tidyverse package
msg <- function(..., startup = FALSE) {
if (startup) {
if (!isTRUE(getOption("weird.quiet"))) {
packageStartupMessage(text_col(...))
}
} else {
message(text_col(...))
}
}
text_col <- function(x) {
# If RStudio not available, messages already printed in black
if (!rstudioapi::isAvailable()) {
return(x)
}
if (!rstudioapi::hasFun("getThemeInfo")) {
return(x)
}
theme <- rstudioapi::getThemeInfo()
if (isTRUE(theme$dark)) crayon::white(x) else crayon::black(x)
}
#' List all packages loaded by weird
#'
#' @param include_self Include weird in the list?
#' @return A character vector of package names.
#' @export
#' @examples
#' weird_packages()
weird_packages <- function(include_self = FALSE) {
raw <- utils::packageDescription("weird")$Imports
imports <- strsplit(raw, ",")[[1]]
parsed <- gsub("^\\s+|\\s+$", "", imports)
names <- vapply(strsplit(parsed, "\\s+"), "[[", 1, FUN.VALUE = character(1))
if (include_self) {
names <- c(names, "weird")
}
names
}
invert <- function(x) {
if (length(x) == 0) {
return()
}
stacked <- utils::stack(x)
tapply(as.character(stacked$ind), stacked$values, list)
}
style_grey <- function(level, ...) {
crayon::style(
paste0(...),
crayon::make_style(grDevices::grey(level), grey = TRUE)
)
}
|
/scratch/gouwar.j/cran-all/cranData/weird/R/utils.R
|
# This package is based on the tidyverse package, hence the copyright
# attribution to RStudio.
#' @keywords internal
"_PACKAGE"
#' @importFrom ggplot2 autoplot labs ggplot geom_line
#' @importFrom ggplot2 geom_contour_filled geom_contour scale_fill_manual
#' @export
ggplot2::autoplot
|
/scratch/gouwar.j/cran-all/cranData/weird/R/weird.R
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.