content
stringlengths
0
14.9M
filename
stringlengths
44
136
#' Graded Response Model #' @description Routine functions for the GRM #' @name model_grm NULL #' @rdname model_grm #' @param t ability parameters, 1d vector #' @param a discrimination parameters, 1d vector #' @param b item location parameters, 2d matrix #' @param D the scaling constant, 1.702 by default #' @param raw TRUE to return P* #' @examples #' with(model_grm_gendata(10, 5, 3), model_grm_prob(t, a, b)) #' @export model_grm_prob <- function(t, a, b, D=1.702, raw=FALSE){ n_p <- length(t) n_i <- nrow(b) n_c <- ncol(b)+1 p <- 1 / (1 + exp(D * a * outer(b, t, '-'))) if(raw) { p <- apply(p, 1, function(x) rbind(1, x, 0)) p <- aperm(array(p, dim=c(n_c+1, n_p, n_i)), c(2, 3, 1)) } else { p <- apply(p, 1, function(x) rbind(1, x) - rbind(x, 0)) p <- aperm(array(p, dim=c(n_c, n_p, n_i)), c(2, 3, 1)) } p } #' @rdname model_grm #' @examples #' with(model_grm_gendata(10, 5, 3), model_grm_info(t, a, b)) #' @export model_grm_info <- function(t, a, b, D=1.702){ p <- model_grm_prob(t, a, b, D) p_ <- aperm(apply(p, c(1, 2), function(x) rev(cumsum(c(0, rev(x))))), c(2, 3, 1)) num_cats <- dim(p)[3] dv1_p_ <- aperm(p_ * (1 - p_), c(2, 3, 1)) * D * a dv2_p_ <- aperm((1 - 2 * p_) * p_ * (1 - p_), c(2, 3, 1)) * (D * a)^2 dv1_p <- dv1_p_[,1:num_cats,] - dv1_p_[,-1,] dv1_p <- aperm(dv1_p, c(3, 1, 2)) dv2_p <- dv2_p_[,1:num_cats,] - dv2_p_[,-1,] dv2_p <- aperm(dv2_p, c(3, 1, 2)) 1 / p * dv1_p^2 - dv2_p } #' @rdname model_grm #' @param u the observed scores (starting from 0), 2d matrix #' @param log TRUE to return log-likelihood #' @examples #' with(model_grm_gendata(10, 5, 3), model_grm_lh(u, t, a, b)) #' @export model_grm_lh <- function(u, t, a, b, D=1.702, log=FALSE){ p <- model_grm_prob(t, a, b, D) ix <- model_polytomous_3dindex(u) lh <- array(p[ix], dim=dim(u)) if(log) lh <- log(lh) lh } #' @rdname model_grm #' @param n_p the number of people to be generated #' @param n_i the number of items to be generated #' @param n_c the number of score categories #' @param t_dist parameters of the normal distribution used to generate t-parameters #' @param a_dist parameters of the lognormal distribution used to generate a-parameters #' @param b_dist parameters of the normal distribution used to generate b-parameters #' @param missing the proportion or number of missing responses #' @examples #' model_grm_gendata(10, 5, 3) #' model_grm_gendata(10, 5, 3, missing=.1) #' @importFrom stats rnorm rlnorm runif #' @export model_grm_gendata <- function(n_p, n_i, n_c, t=NULL, a=NULL, b=NULL, D=1.702, t_dist=c(0, 1), a_dist=c(-.1, .2), b_dist=c(0, .8), missing=NULL){ if(is.null(t)) t <- rnorm(n_p, mean=t_dist[1], sd=t_dist[2]) if(is.null(a)) a <- rlnorm(n_i, meanlog=a_dist[1], sdlog=a_dist[2]) if(is.null(b)) { b <- matrix(rnorm(n_i * (n_c - 1), mean=b_dist[1], sd=b_dist[2]), nrow=n_i) b <- t(apply(b, 1, sort)) b <- matrix(b, nrow=n_i, ncol=n_c-1) } if(length(t) == 1) t <- rep(t, n_p) if(length(a) == 1) a <- rep(a, n_i) if(length(t) != n_p) stop('wrong dimensions for t') if(length(a) != n_i) stop('wrong dimensions for a') if(nrow(b) != n_i || ncol(b) != n_c - 1) stop('wrong dimensions for b') p <- model_grm_prob(t, a, b, D) u <- apply(p, 2, function(x) rowSums(runif(n_p) >= t(apply(x, 1, cumsum)))) if(!is.null(missing)){ missing <- floor(ifelse(missing < 1, missing * n_p * n_i, missing)) idx <- sample(length(u), missing) u[cbind(ceiling(idx/n_i), (idx-1)%%n_i+1)] <- NA } list(u=u, t=t, a=a, b=b) } #' @rdname model_grm #' @param param the parameter of the new scale: 't' or 'b' #' @param mean the mean of the new scale #' @param sd the standard deviation of the new scale #' @importFrom stats sd #' @export model_grm_rescale <- function(t, a, b, param=c("t", "b"), mean=0, sd=1){ scale <- switch(match.arg(param), "t"=t, "b"=b) slope <- sd / sd(scale) intercept <- mean - slope * mean(scale) t <- slope * t + intercept b <- slope * b + intercept a <- a / slope list(t=t, a=a, b=b) } #' @rdname model_grm #' @param type the type of plot, prob for ICC and info for IIFC #' @param total TRUE to sum values over items #' @param by_item TRUE to combine categories #' @param xaxis the values of x-axis #' @examples #' with(model_grm_gendata(10, 5, 3), model_grm_plot(a, b, type='prob')) #' with(model_grm_gendata(10, 5, 3), model_grm_plot(a, b, type='info', by_item=TRUE)) #' @import ggplot2 #' @importFrom stats aggregate #' @export model_grm_plot <- function(a, b, D=1.702, type=c('prob', 'info'), by_item=FALSE, total=FALSE, xaxis=seq(-6, 6, .1), raw=FALSE){ rs <- switch(match.arg(type), "prob"=model_grm_prob(xaxis, a, b, D, raw), "info"=model_grm_info(xaxis, a, b, D)) n_p <- dim(rs)[1] n_i <- dim(rs)[2] n_c <- dim(rs)[3] y <- NULL for(i in 1:n_i) y <- rbind(y, data.frame(theta=rep(xaxis, n_c), item=paste('Item', i), category=paste('Category', rep(1:n_c, each=n_p)), x=as.vector(rs[,i,]))) if(by_item) y <- rbind(y, cbind(aggregate(y$x, by=list(theta=y$theta, item=y$item), sum), category='Total')) if(total) y <- cbind(aggregate(y$x, by=list(theta=y$theta, category=y$category), sum), item='Total') y <- y[!is.na(y$x),] ggplot(y, aes_string(x="theta", y="x", color="category")) + geom_line() + facet_wrap(~item, scales='free') + xlab(expression(theta)) + ylab(type) + guides(color=FALSE) + theme_bw() + theme(legend.key=element_blank()) } #' @rdname model_grm #' @param show_mle TRUE to print maximum likelihood values #' @examples #' with(model_grm_gendata(5, 50, 3), model_grm_plot_loglh(u, a, b)) #' @import ggplot2 #' @export model_grm_plot_loglh <- function(u, a, b, D=1.702, xaxis=seq(-6, 6, .1), show_mle=FALSE){ n_p <- dim(u)[1] n_i <- dim(u)[2] n_t <- length(xaxis) rs <- array(NA, dim=c(n_p, n_t)) for(i in 1:n_t) rs[, i] <- rowSums(model_grm_lh(u, rep(xaxis[i], n_p), a, b, D, log=TRUE)) if(show_mle) print(apply(rs, 1, function(x){xaxis[which.max(x)]})) rs <- data.frame(theta=rep(xaxis, each=n_p), people=rep(1:n_p, n_t), value=as.vector(rs)) rs$people <- factor(rs$people) ggplot(rs, aes_string(x="theta", y="value", color="people")) + geom_line() + xlab(expression(theta)) + ylab("Log-likelihood") + guides(color=FALSE) + theme_bw() }
/scratch/gouwar.j/cran-all/cranData/xxIRT/R/module1_model_grm.R
#' Estimate 3-parameter-logistic model #' @description Estimate the 3PL model using the maximum likelihood estimation #' @name estimate_3pl NULL #' @rdname estimate_3pl #' @description \code{model_3pl_eap_scoring} scores response vectors using the EAP method #' @param prior the prior distribution #' @examples #' with(model_3pl_gendata(10, 40), cbind(true=t, est=model_3pl_eap_scoring(u, a, b, c)$t)) #' @importFrom stats dnorm #' @export model_3pl_eap_scoring <- function(u, a, b, c, D=1.702, prior=c(0, 1), bound=c(-3, 3)){ if(is.null(prior)) prior <- c(0, 1) quad <- hermite_gauss('11') quad$w <- quad$w * exp(quad$t^2) * dnorm(quad$t, prior[1], prior[2]) p <- model_3pl_prob(quad$t, a, b, c, D) lh <- exp(ifelse(is.na(u), 0, u) %*% t(log(p)) + ifelse(is.na(1 - u), 0, 1 - u) %*% t(log(1 - p))) t <- colSums(t(lh) * quad$w * quad$t) / colSums(t(lh) * quad$w) t[t < bound[1]] <- bound[1] t[t > bound[2]] <- bound[2] t_sd <- colSums(t(lh) * quad$w * outer(quad$t, t, '-')^2) / colSums(t(lh) * quad$w) t_sd <- sqrt(t_sd) list(t=t, sd=t_sd) } #' @rdname estimate_3pl #' @description \code{model_3pl_map_scoring} scores response vectors using the MAP method #' @examples #' with(model_3pl_gendata(10, 40), cbind(true=t, est=model_3pl_map_scoring(u, a, b, c)$t)) #' @export model_3pl_map_scoring <- function(u, a, b, c, D=1.702, prior=c(0, 1), bound=c(-3, 3), nr_iter=30, nr_conv=1e-3){ t <- rnorm(dim(u)[1], 0, .01) t_free <- rep(T, length(t)) for(m in 1:nr_iter){ dv_t <- model_3pl_dv_jmle(model_3pl_dv_Pt(t, a, b, c, D), u) dv_t$dv1 <- colSums(dv_t$dv1, na.rm=T) dv_t$dv2 <- colSums(dv_t$dv2, na.rm=T) if(!is.null(prior)){ dv_t$dv1 <- dv_t$dv1 - (t - prior[1]) / prior[2]^2 dv_t$dv2 <- dv_t$dv2 - 1 / prior[2]^2 } nr_t <- estimate_nr_iteration(t, t_free, dv_t, 1.0, 1.0, bound) t <- nr_t$param if(max(abs(nr_t$h)) < nr_conv) break } list(t=t) } #' @rdname estimate_3pl #' @keywords internal model_3pl_dv_Pt <- function(t, a, b, c, D){ p <- t(model_3pl_prob(t, a, b, c, D)) dv1 <- D * a * (p - c) * (1 - p) / (1 - c) dv2 <- (D * a / (1 - c))^2 * (p - c) * (1 - p) * (1 + c - 2*p) list(dv1=dv1, dv2=dv2, p=p) } #' @rdname estimate_3pl #' @keywords internal model_3pl_dv_Pa <- function(t, a, b, c, D){ p <- t(model_3pl_prob(t, a, b, c, D)) dv1 <- D * t(outer(t, b, '-')) * (p - c) * (1 - p) / (1 - c) dv2 <- (D * t(outer(t, b, '-')) / (1 - c))^2 * (p - c) * (1 - p) * (1 + c - 2*p) list(dv1=dv1, dv2=dv2, p=p) } #' @rdname estimate_3pl #' @keywords internal model_3pl_dv_Pb <- function(t, a, b, c, D){ p <- t(model_3pl_prob(t, a, b, c, D)) dv1 <- - D * a * (p - c) * (1 - p) / (1 - c) dv2 <- (D * a / (1 - c))^2 * (p - c) * (1 - p) * (1 + c - 2*p) list(dv1=dv1, dv2=dv2, p=p) } #' @rdname estimate_3pl #' @keywords internal model_3pl_dv_Pc <- function(t, a, b, c, D){ p <- t(model_3pl_prob(t, a, b, c, D)) dv1 <- (1 - p) / (1 - c) dv2 <- array(0, dim=dim(dv1)) list(dv1=dv1, dv2=dv2, p=p) } #' @rdname estimate_3pl #' @description \code{model_3pl_dv_jmle} calculates the first and second derivatives for #' the joint maximum likelihood estimation #' @keywords internal model_3pl_dv_jmle <- function(dv, u){ dv1 <- (t(u) - dv$p) / dv$p / (1 - dv$p) * dv$dv1 dv2 <- (t(u) - dv$p) / dv$p / (1 - dv$p) * dv$dv2 - ((t(u) - dv$p) / dv$p / (1 - dv$p) * dv$dv1)^2 list(dv1=dv1, dv2=dv2) } #' @rdname estimate_3pl #' @description \code{model_3pl_estimate_jmle} estimates the parameters using the #' joint maximum likelihood estimation (JMLE) method #' @param u observed response matrix, 2d matrix #' @param t ability parameters, 1d vector (fixed value) or NA (freely estimate) #' @param a discrimination parameters, 1d vector (fixed value) or NA (freely estimate) #' @param b difficulty parameters, 1d vector (fixed value) or NA (freely estimate) #' @param c pseudo-guessing parameters, 1d vector (fixed value) or NA (freely estimate) #' @param D the scaling constant, 1.702 by default #' @param iter the maximum iterations #' @param conv the convergence criterion of the -2 log-likelihood #' @param nr_iter the maximum iterations of newton-raphson #' @param nr_conv the convegence criterion for newton-raphson #' @param scale the meand and SD of the theta scale, N(0, 1) for JMLE by default #' @param bounds_t bounds of ability parameters #' @param bounds_a bounds of discrimination parameters #' @param bounds_b bounds of difficulty parameters #' @param bounds_c bounds of guessing parameters #' @param priors a list of prior distributions #' @param decay decay rate #' @param debug TRUE to print debuggin information #' @param true_params a list of true parameters for evaluating the estimation accuracy #' @examples #' \dontrun{ #' # generate data #' x <- model_3pl_gendata(2000, 40) #' # free estimation #' y <- model_3pl_estimate_jmle(x$u, true_params=x) #' # fix c-parameters #' y <- model_3pl_estimate_jmle(x$u, c=0, true_params=x) #' # no priors #' y <- model_3pl_estimate_jmle(x$u, priors=NULL, iter=30, debug=T) #' } #' @importFrom stats cor #' @importFrom reshape2 melt #' @import ggplot2 #' @export model_3pl_estimate_jmle <- function(u, t=NA, a=NA, b=NA, c=NA, D=1.702, iter=100, conv=1e-0, nr_iter=10, nr_conv=1e-3, scale=c(0, 1), bounds_t=c(-3, 3), bounds_a=c(.01, 2), bounds_b=c(-3, 3), bounds_c=c(0, .25), priors=list(t=c(0, 1), a=c(-.1, .2), b=c(0, 1), c=c(4, 20)), decay=1, debug=FALSE, true_params=NULL){ # internal config h_max <- 1 tracking <- list(fit=rep(NA, iter), t=rep(NA, iter), a=rep(NA, iter), b=rep(NA, iter), c=rep(NA, iter)) # initial values n_p <- nrow(u) n_i <- ncol(u) if(length(t) == 1) t <- rep(t, n_p) t[t_free <- is.na(t)] <- rnorm(sum(is.na(t)), 0, .01) if(length(a) == 1) a <- rep(a, n_i) a[a_free <- is.na(a)] <- rlnorm(sum(is.na(a)), -.1, .01) if(length(b) == 1) b <- rep(b, n_i) b[b_free <- is.na(b)] <- rnorm(sum(is.na(b)), 0, .01) if(length(c) == 1) c <- rep(c, n_i) c[c_free <- is.na(c)] <- rbeta(sum(is.na(c)), 4, 20) for(k in 1:iter){ # t parameters if(any(t_free)){ for(m in 1:nr_iter){ dv_t <- model_3pl_dv_jmle(model_3pl_dv_Pt(t, a, b, c, D), u) dv_t$dv1 <- colSums(dv_t$dv1, na.rm=T) dv_t$dv2 <- colSums(dv_t$dv2, na.rm=T) if(!is.null(priors$t)){ dv_t$dv1 <- dv_t$dv1 - (t - priors$t[1]) / priors$t[2]^2 dv_t$dv2 <- dv_t$dv2 - 1 / priors$t[2]^2 } nr_t <- estimate_nr_iteration(t, t_free, dv_t, h_max, decay, bounds_t) t <- nr_t$param if(max(abs(nr_t$h)) < nr_conv) break } # rescale thetas if(!is.null(scale)) t <- (t - mean(t)) / sd(t) * scale[2] + scale[1] } # b parameters if(any(b_free)){ for(m in 1:nr_iter){ dv_b <- model_3pl_dv_jmle(model_3pl_dv_Pb(t, a, b, c, D), u) dv_b$dv1 <- rowSums(dv_b$dv1, na.rm=T) dv_b$dv2 <- rowSums(dv_b$dv2, na.rm=T) if(!is.null(priors$b)){ dv_b$dv1 <- dv_b$dv1 - (b - priors$b[1]) / priors$b[2]^2 dv_b$dv2 <- dv_b$dv2 - 1 / priors$b[2]^2 } nr_b <- estimate_nr_iteration(b, b_free, dv_b, h_max, decay, bounds_b) b <- nr_b$param if(max(abs(nr_b$h)) < nr_conv) break } } # a parameters if(any(a_free)){ for(m in 1:nr_iter){ dv_a <- model_3pl_dv_jmle(model_3pl_dv_Pa(t, a, b, c, D), u) dv_a$dv1 <- rowSums(dv_a$dv1, na.rm=T) dv_a$dv2 <- rowSums(dv_a$dv2, na.rm=T) if(!is.null(priors$a)){ dv_a$dv1 <- dv_a$dv1 - 1/a * (1 + (log(a)-priors$a[1])/priors$a[2]^2) dv_a$dv2 <- dv_a$dv2 - 1/a^2 * (1/priors$a[2]^2 - (1 + (log(a)-priors$a[1])/priors$a[2]^2)) } nr_a <- estimate_nr_iteration(a, a_free, dv_a, h_max, decay, bounds_a) a <- nr_a$param if(max(abs(nr_a$h)) < nr_conv) break } } # estimate c parameters if(any(c_free)){ for(m in 1:nr_iter){ dv_c <- model_3pl_dv_jmle(model_3pl_dv_Pc(t, a, b, c, D), u) dv_c$dv1 <- rowSums(dv_c$dv1, na.rm=T) dv_c$dv2 <- rowSums(dv_c$dv2, na.rm=T) if(!is.null(priors$c)){ dv_c$dv1 <- dv_c$dv1 - ((priors$c[2]-1)/(1-c) - (priors$c[1]-1)/c) dv_c$dv2 <- dv_c$dv2 - ((priors$c[1]-1)/c^2 + (priors$c[2]-1)/(1-c)^2) } nr_c <- estimate_nr_iteration(c, c_free, dv_c, h_max, decay, bounds_c) c <- nr_c$param if(max(abs(nr_c$h)) < nr_conv) break } } decay <- decay * decay # model fit loglh <- -2 * sum(model_3pl_lh(u, t, a, b, c, D, log=TRUE), na.rm=T) if(debug) cat('iter #', k, ': -2 log-likelihood = ', round(loglh, 2), '\n', sep='') if(k > 1 && tracking$fit[k-1] - loglh < conv) break tracking$fit[k] <- loglh if(any(t_free)) tracking$t[k] <- mean(abs(nr_t$h[t_free])) if(any(a_free)) tracking$a[k] <- mean(abs(nr_a$h[a_free])) if(any(b_free)) tracking$b[k] <- mean(abs(nr_b$h[b_free])) if(any(c_free)) tracking$c[k] <- mean(abs(nr_c$h[c_free])) } # debugging if(debug){ xx <- with(tracking, data.frame(iteration=1:iter, fit=fit, t=t, a=a, b=b, c=c))[1:k, ] xx <- melt(xx, id.vars='iteration') xx <- xx[!is.na(xx$value), ] g <- ggplot(xx, aes_string(x="iteration", y="value", color="variable")) + geom_line() + facet_wrap(~variable, scales="free") + guides(color=F) + xlab('Iterations') + ylab('') + theme_bw() print(g) } # compare with true parameters if(!is.null(true_params)){ xx <- rbind(data.frame(true=true_params$t, est=t, params='t'), data.frame(true=true_params$a, est=a, params='a'), data.frame(true=true_params$b, est=b, params='b'), data.frame(true=true_params$c, est=c, params='c')) g <- ggplot(xx, aes_string(x="true", y="est", color="params")) + geom_point(alpha=.3) + geom_smooth(method='gam', se=F) + facet_wrap(~params, nrow=1, scales='free') + guides(color=F) + xlab('True Parameters') + ylab('Est. Parameters') + theme_bw() print(g) if(any(t_free)) cat('t: corr = ', round(cor(t, true_params$t), 3), ', rmse = ', round(rmse(t, true_params$t), 3),'\n', sep='') if(any(a_free)) cat('a: corr = ', round(cor(a, true_params$a), 3), ', rmse = ', round(rmse(a, true_params$a), 3),'\n', sep='') if(any(b_free)) cat('b: corr = ', round(cor(b, true_params$b), 3), ', rmse = ', round(rmse(b, true_params$b), 3),'\n', sep='') if(any(c_free)) cat('c: corr = ', round(cor(c, true_params$c), 3), ', rmse = ', round(rmse(c, true_params$c), 3),'\n', sep='') } list(t=t, a=a, b=b, c=c) } #' @rdname estimate_3pl #' @description \code{model_3pl_dv_mmle} calculates the first and second derivatives for #' the marginal maximum likelihood estimation #' @param pdv_fn the function to compute derivatives of P w.r.t the estimating parameters #' @keywords internal model_3pl_dv_mmle <- function(pdv_fn, u, quad, a, b, c, D){ n_p <- dim(u)[1] n_i <- dim(u)[2] n_q <- length(quad$t) p <- model_3pl_prob(quad$t, a, b, c, D) p_u1 <- t(ifelse(is.na(u), 0, u)) p_u0 <- t(ifelse(is.na(u), 0, 1-u)) ln_p <- log(p) ln_q <- log(1-p) p0 <- array(NA, c(n_i, n_p, n_q)) p1 <- array(NA, c(n_p, n_q)) for(q in 1:n_q){ p0[,,q] <- p_u1*ln_p[q,] + p_u0*ln_q[q,] p1[,q] <- colSums(p0[,,q], na.rm=T) } p0 <- aperm(exp(p0), c(2,1,3)) p1 <- exp(p1) p2 <- (p1 %*% quad$w)[,1] pdv <- pdv_fn(quad$t, a, b, c, D) dv_common <- t(quad$w * t(p1 / p2)) dv_u0 <- t((-1)^(u+1)) dv1 <- dv2 <- array(0, c(n_p, n_i)) for(q in 1:n_q) dv1 <- dv1 + dv_common[,q] / p0[,,q] * t(dv_u0*pdv$dv1[,q]) for(q in 1:n_q) dv2 <- dv2 + dv_common[,q] / p0[,,q] * t(dv_u0*pdv$dv2[,q]) dv2 <- dv2 - dv1^2 dv1 <- colSums(dv1, na.rm=T) dv2 <- colSums(dv2, na.rm=T) list(dv1=dv1, dv2=dv2) } #' @rdname estimate_3pl #' @description \code{model_3pl_estimate_mmle} estimates the parameters using the #' marginal maximum likelihood estimation (MMLE) method #' @param quad_degree the number of quadrature points #' @param scoring the scoring method: 'eap' or 'map' #' @examples #' \dontrun{ #' # generate data #' x <- model_3pl_gendata(2000, 40) #' # free estimation #' y <- model_3pl_estimate_mmle(x$u, true_params=x) #' # fix c-parameters #' y <- model_3pl_estimate_mmle(x$u, c=0, true_params=x) #' # no priors #' y <- model_3pl_estimate_mmle(x$u, priors=NULL, iter=30, debug=T) #' } #' @importFrom stats cor #' @importFrom reshape2 melt #' @import ggplot2 #' @export model_3pl_estimate_mmle <- function(u, t=NA, a=NA, b=NA, c=NA, D=1.702, iter=100, conv=1e-0, nr_iter=10, nr_conv=1e-3, bounds_t=c(-3, 3), bounds_a=c(.01, 2), bounds_b=c(-3, 3), bounds_c=c(0, .25), priors=list(t=c(0, 1), a=c(-.1, .2), b=c(0, 1), c=c(4, 20)), decay=1, quad_degree='11', scoring=c('eap', 'map'), debug=FALSE, true_params=NULL){ # internal config h_max <- 1 if(is.null(priors$t)) priors$t <- c(0, 1) quad <- hermite_gauss(quad_degree) quad$w <- quad$w * exp(quad$t^2) * dnorm(quad$t, priors$t[1], priors$t[2]) tracking <- list(fit=rep(NA, iter), t=rep(NA, iter), a=rep(NA, iter), b=rep(NA, iter), c=rep(NA, iter)) # initial values n_p <- nrow(u) n_i <- ncol(u) if(length(t) == 1) t <- rep(t, n_p) t[t_free <- is.na(t)] <- rnorm(sum(is.na(t)), 0, .01) if(length(a) == 1) a <- rep(a, n_i) a[a_free <- is.na(a)] <- rlnorm(sum(is.na(a)), -.1, .01) if(length(b) == 1) b <- rep(b, n_i) b[b_free <- is.na(b)] <- rnorm(sum(is.na(b)), 0, .01) if(length(c) == 1) c <- rep(c, n_i) c[c_free <- is.na(c)] <- rbeta(sum(is.na(c)), 4, 20) for(k in 1:iter){ # b parameters if(any(b_free)){ for(m in 1:nr_iter){ dv_b <- model_3pl_dv_mmle(model_3pl_dv_Pb, u, quad, a, b, c, D) if(!is.null(priors$b)){ dv_b$dv1 <- dv_b$dv1 - (b - priors$b[1]) / priors$b[2]^2 dv_b$dv2 <- dv_b$dv2 - 1 / priors$b[2]^2 } nr_b <- estimate_nr_iteration(b, b_free, dv_b, h_max, decay, bounds_b) b <- nr_b$param if(max(abs(nr_b$h)) < nr_conv) break } } # a parameters if(any(a_free)){ for(m in 1:nr_iter){ dv_a <- model_3pl_dv_mmle(model_3pl_dv_Pa, u, quad, a, b, c, D) if(!is.null(priors$a)){ dv_a$dv1 <- dv_a$dv1 - 1/a * (1 + (log(a)-priors$a[1])/priors$a[2]^2) dv_a$dv2 <- dv_a$dv2 - 1/a^2 * (1/priors$a[2]^2 - (1 + (log(a)-priors$a[1])/priors$a[2]^2)) } nr_a <- estimate_nr_iteration(a, a_free, dv_a, h_max, decay * .2, bounds_a) a <- nr_a$param if(max(abs(nr_a$h)) < nr_conv) break } } # estimate c parameters if(any(c_free)){ for(m in 1:nr_iter){ dv_c <- model_3pl_dv_mmle(model_3pl_dv_Pc, u, quad, a, b, c, D) if(!is.null(priors$c)){ dv_c$dv1 <- dv_c$dv1 - ((priors$c[2]-1)/(1-c) - (priors$c[1]-1)/c) dv_c$dv2 <- dv_c$dv2 - ((priors$c[1]-1)/c^2 + (priors$c[2]-1)/(1-c)^2) } nr_c <- estimate_nr_iteration(c, c_free, dv_c, h_max, decay, bounds_c) c <- nr_c$param if(max(abs(nr_c$h)) < nr_conv) break } } decay <- decay * decay # scoring if(any(t_free)) t[t_free] <- switch(match.arg(scoring, scoring), 'eap'=model_3pl_eap_scoring, 'map'=model_3pl_map_scoring)(u, a, b, c, D, prior=priors$t, bound=bounds_t)$t[t_free] # model fit loglik <- -2 * sum(model_3pl_lh(u, t, a, b, c, D, log=TRUE), na.rm=T) if(debug) cat('iter #', k, ': -2 log-likelihood = ', round(loglik, 2), '\n', sep='') if(k > 1 && tracking$fit[k-1] - loglik < conv) break tracking$fit[k] <- loglik if(any(a_free)) tracking$a[k] <- mean(abs(nr_a$h[a_free])) if(any(b_free)) tracking$b[k] <- mean(abs(nr_b$h[b_free])) if(any(c_free)) tracking$c[k] <- mean(abs(nr_c$h[c_free])) } # debugging if(debug){ xx <- with(tracking, data.frame(iteration=1:iter, fit=fit, a=a, b=b, c=c))[1:k, ] xx <- melt(xx, id.vars='iteration') xx <- xx[!is.na(xx$value), ] g <- ggplot(xx, aes_string(x="iteration", y="value", color="variable")) + geom_line() + facet_wrap(~variable, scales="free") + guides(color=F) + xlab('Iterations') + ylab('') + theme_bw() print(g) } # compare with true parameters if(!is.null(true_params)){ xx <- rbind(data.frame(true=true_params$t, est=t, params='t'), data.frame(true=true_params$a, est=a, params='a'), data.frame(true=true_params$b, est=b, params='b'), data.frame(true=true_params$c, est=c, params='c')) g <- ggplot(xx, aes_string(x="true", y="est", color="params")) + geom_point(alpha=.3) + geom_smooth(method='gam', se=F) + facet_wrap(~params, nrow=1, scales='free') + guides(color=F) + xlab('True Parameters') + ylab('Est. Parameters') + theme_bw() print(g) if(any(t_free)) cat('t: corr = ', round(cor(t, true_params$t), 3), ', rmse = ', round(rmse(t, true_params$t), 3),'\n', sep='') if(any(a_free)) cat('a: corr = ', round(cor(a, true_params$a), 3), ', rmse = ', round(rmse(a, true_params$a), 3),'\n', sep='') if(any(b_free)) cat('b: corr = ', round(cor(b, true_params$b), 3), ', rmse = ', round(rmse(b, true_params$b), 3),'\n', sep='') if(any(c_free)) cat('c: corr = ', round(cor(c, true_params$c), 3), ', rmse = ', round(rmse(c, true_params$c), 3),'\n', sep='') } list(t=t, a=a, b=b, c=c) } #' @rdname estimate_3pl #' @param index the indices of items being plotted #' @param intervals intervals on the x-axis #' @param show_points TRUE to show points #' @examples #' with(model_3pl_gendata(1000, 20), model_3pl_fitplot(u, t, a, b, c, index=c(1, 3, 5))) #' @importFrom reshape2 melt #' @import ggplot2 #' @export model_3pl_fitplot <- function(u, t, a, b, c, D=1.702, index=NULL, intervals=seq(-3, 3, .5), show_points=TRUE){ if(is.null(index)) index <- seq(b) groups <- cut(t, intervals, labels=(intervals[-length(intervals)] + intervals[-1]) / 2) obs <- aggregate(u, by=list(intervals=groups), mean, na.rm=TRUE)[, c(1, index+1)] obs <- melt(obs, id.vars='intervals', variable.name='items') obs[, 'type'] <- 'Observed' p <- model_3pl_prob(t, a, b, c, D) exp <- aggregate(p, by=list(intervals=groups), mean, na.rm=TRUE)[, c(1, index+1)] exp <- melt(exp, id.vars='intervals', variable.name='items') exp[, 'type'] <- 'Expected' data <- rbind(obs, exp) data$intervals <- as.numeric(levels(data$intervals)[data$intervals]) levels(data$items) <- gsub('V', 'Item ', levels(data$items)) g <- ggplot(data, aes_string('intervals', 'value', color='type', group='type')) + geom_line() + facet_wrap(~items) + xlab(expression(theta)) + ylab('Probability') + scale_color_discrete(guide=guide_legend("")) + theme_bw() if(show_points) g <- g + geom_point(fill='white', pch=1) g }
/scratch/gouwar.j/cran-all/cranData/xxIRT/R/module2_estimate_3pl.R
#' Estimate Generalizaed Partial Credit Model #' @description Estimate the GPCM using the maximum likelihood estimation #' @name estimate_gpcm NULL #' @rdname estimate_gpcm #' @description \code{model_gpcm_eap_scoring} scores response vectors using the EAP method #' @param prior the prior distribution #' @examples #' with(model_gpcm_gendata(10, 40, 3), cbind(true=t, est=model_gpcm_eap_scoring(u, a, b, d)$t)) #' @importFrom stats dnorm #' @export model_gpcm_eap_scoring <- function(u, a, b, d, D=1.702, prior=c(0, 1), bound=c(-3, 3)){ quad <- hermite_gauss('11') quad$w <- quad$w * exp(quad$t^2) * dnorm(quad$t, prior[1], prior[2]) n_p <- dim(u)[1] n_i <- dim(u)[2] n_q <- length(quad$t) p <- model_gpcm_prob(quad$t, a, b, d, D) ix <- model_polytomous_3dindex(u) lh <- array(NA, c(n_p, n_i, n_q)) for(q in 1:n_q) lh[,,q] <- array(p[q,,][ix[,-1]], c(n_p, n_i)) lh <- apply(lh, c(1, 3), prod, na.rm=T) t <- ((lh / (lh %*% quad$w)[,1]) %*% (quad$w * quad$t))[,1] t_sd <- ((lh / (lh %*% quad$w)[,1] * outer(t, quad$t, '-')^2) %*% quad$w)[,1] t_sd <- sqrt(t_sd) list(t=t, sd=t_sd) } #' @rdname estimate_gpcm #' @description \code{model_gpcm_map_scoring} scores response vectors using maximum a posteriori #' @examples #' with(model_gpcm_gendata(10, 40, 3), cbind(true=t, est=model_gpcm_map_scoring(u, a, b, d)$t)) #' @export model_gpcm_map_scoring <- function(u, a, b, d, D=1.702, prior=NULL, bound=c(-3, 3), nr_iter=30, nr_conv=1e-3){ ix <- model_polytomous_3dindex(u) t <- rnorm(dim(u)[1], 0, .01) t_free <- rep(T, length(t)) for(i in 1:nr_iter){ dv <- model_gpcm_dv_jmle(ix, model_gpcm_dv_Pt(t, a, b, d, D)) dv$dv1 <- rowSums(dv$dv1, na.rm=T) dv$dv2 <- rowSums(dv$dv2, na.rm=T) if(!is.null(prior)){ dv$dv1 <- dv$dv1 - (t - prior[1]) / prior[2]^2 dv$dv2 <- dv$dv2 - 1 / prior[2]^2 } nr <- estimate_nr_iteration(t, t_free, dv, 1.0, 1.0, bound) t <- nr$param if(max(abs(nr$h)) < nr_conv) break } list(t=t) } #' @rdname estimate_gpcm #' @keywords internal model_gpcm_dv_Pt <- function(t, a, b, d, D){ p <- model_gpcm_prob(t, a, b, d, D) p <- ifelse(is.na(p), 0, p) n_p <- dim(p)[1] n_i <- dim(p)[2] n_c <- dim(p)[3] dv1 <- apply(p, 2, function(x) x %*% 1:n_c) dv1 <- -1 * outer(dv1, 1:n_c, '-') * D * p dv1 <- aperm(aperm(dv1, c(2,1,3)) * a, c(2,1,3)) dv2 <- apply(dv1, 2, function(x) rep(x %*% 1:n_c, n_c)) dv2 <- aperm(array(dv2, c(n_p, n_c, n_i)), c(1,3,2)) * D * p dv2 <- aperm(aperm(dv2, c(2,1,3)) * a, c(2,1,3)) dv2 <- dv1^2 / p - dv2 list(p=p, dv1=dv1, dv2=dv2) } #' @rdname estimate_gpcm #' @keywords internal model_gpcm_dv_Pa <- function(t, a, b, d, D){ p <- model_gpcm_prob(t, a, b, d, D) n_p <- dim(p)[1] n_i <- dim(p)[2] n_c <- dim(p)[3] term0 <- outer(t, b - d, '-') term0 <- aperm(apply(term0, 1:2, cumsum), c(2,3,1)) term1 <- apply(p * term0, 1:2, function(x) rep(sum(x, na.rm=T), n_c)) term1 <- aperm(term1, c(2,3,1)) dv1 <- D * p * (term0 - term1) dv2 <- apply(dv1 * term0, 1:2, function(x) rep(sum(x, na.rm=T), n_c)) dv2 <- aperm(dv2, c(2,3,1)) * D * p dv2 <- dv1^2 / p - dv2 list(p=p, dv1=dv1, dv2=dv2) } #' @rdname estimate_gpcm #' @keywords internal model_gpcm_dv_Pb <- function(t, a, b, d, D){ p <- model_gpcm_prob(t, a, b, d, D) p <- ifelse(is.na(p), 0, p) n_p <- dim(p)[1] n_i <- dim(p)[2] n_c <- dim(p)[3] dv1 <- apply(p, 2, function(x) x %*% 1:n_c) dv1 <- outer(dv1, 1:n_c, '-') * D * p dv1 <- aperm(aperm(dv1, c(2,1,3)) * a, c(2,1,3)) dv2 <- apply(dv1, 2, function(x) rep(x %*% 1:n_c, n_c)) dv2 <- aperm(array(dv2, c(n_p, n_c, n_i)), c(1,3,2)) * D * p dv2 <- aperm(aperm(dv2, c(2,1,3)) * a, c(2,1,3)) dv2 <- dv1^2 / p + dv2 list(p=p, dv1=dv1, dv2=dv2) } #' @rdname estimate_gpcm #' @keywords internal model_gpcm_dv_Pd <- function(t, a, b, d, D){ p <- model_gpcm_prob(t, a, b, d, D) p <- ifelse(is.na(p), 0, p) n_p <- dim(p)[1] n_i <- dim(p)[2] n_c <- dim(p)[3] pdv1 <- pdv2 <- array(NA, c(n_p, n_i, n_c, n_c)) for(k in 1:n_c){ dv1 <- apply(p, 1:2, function(x) (k <= 1:n_c) * 1L - sum(x[k:n_c])) dv1 <- aperm(dv1, c(2,3,1)) dv1 <- D * p * dv1 dv1 <- aperm(aperm(dv1, c(2,1,3)) * a, c(2,1,3)) pdv1[,,,k] <- dv1 dv2 <- apply(dv1, 1:2, function(x) rep(sum(x[k:n_c]), n_c)) dv2 <- aperm(dv2, c(2,3,1)) dv2 <- D * p * dv2 dv2 <- aperm(aperm(dv2, c(2,1,3)) * a, c(2,1,3)) dv2 <- dv1^2 / p - dv2 pdv2[,,,k] <- dv2 } list(p=p, dv1=pdv1, dv2=pdv2) } #' @rdname estimate_gpcm #' @param ix the 3d indices #' @param dvp the derivatives of P #' @keywords internal model_gpcm_dv_jmle <- function(ix, dvp){ n_p <- max(ix[,1]) n_i <- max(ix[,2]) dv1 <- array(with(dvp, dv1[ix]/p[ix]), c(n_p, n_i)) dv2 <- array(with(dvp, dv2[ix]/p[ix]), c(n_p, n_i)) - dv1^2 list(dv1=dv1, dv2=dv2) } #' @rdname estimate_gpcm #' @description \code{model_gpcm_estimate_jmle} estimates the parameters using the #' joint maximum likelihood estimation (JMLE) method #' @param u the observed response matrix, 2d matrix #' @param t ability parameters, 1d vector (fixed value) or NA (freely estimate) #' @param a discrimination parameters, 1d vector (fixed value) or NA (freely estimate) #' @param b difficulty parameters, 1d vector (fixed value) or NA (freely estimate) #' @param d category parameters, 2d matrix (fixed value) or NA (freely estimate) #' @param D the scaling constant, 1.702 by default #' @param iter the maximum iterations #' @param conv the convergence criterion of the -2 log-likelihood #' @param nr_iter the maximum iterations of newton-raphson #' @param nr_conv the convegence criterion for newton-raphson #' @param scale the scale of theta parameters #' @param bounds_t bounds of ability parameters #' @param bounds_a bounds of discrimination parameters #' @param bounds_b bounds of location parameters #' @param bounds_d bounds of category parameters #' @param priors a list of prior distributions #' @param decay decay rate #' @param debug TRUE to print debuggin information #' @param true_params a list of true parameters for evaluating the estimation accuracy #' @examples #' \dontrun{ #' # generate data #' x <- model_gpcm_gendata(1000, 40, 3) #' # free calibration #' y <- model_gpcm_estimate_jmle(x$u, true_params=x) #' # no priors #' y <- model_gpcm_estimate_jmle(x$u, priors=NULL, true_params=x) #' } #' @importFrom stats cor #' @importFrom reshape2 melt #' @import ggplot2 #' @export model_gpcm_estimate_jmle <- function(u, t=NA, a=NA, b=NA, d=NA, D=1.702, iter=100, nr_iter=10, conv=1e-0, nr_conv=1e-3, scale=c(0, 1), bounds_t=c(-4, 4), bounds_a=c(.01, 2), bounds_b=c(-4, 4), bounds_d=c(-4, 4), priors=list(t=c(0, 1), a=c(-.1, .2), b=c(0, 1), d=c(0, 1)), decay=1, debug=FALSE, true_params=NULL){ # configuration h_max <- 1.0 tracking <- list(fit=rep(NA, iter), t=rep(NA, iter), a=rep(NA, iter), b=rep(NA, iter), d=rep(NA, iter)) # initial values n_p <- dim(u)[1] n_i <- dim(u)[2] n_c <- length(unique(as.vector(u))) u_ix <- model_polytomous_3dindex(u) if(length(t) == 1) t <- rep(t, n_p) t[t_free <- is.na(t)] <- rnorm(sum(is.na(t)), 0, .01) if(length(a) == 1) a <- rep(a, n_i) a[a_free <- is.na(a)] <- rlnorm(sum(is.na(a)), -.1, .01) if(length(b) == 1) b <- rep(b, n_i) b[b_free <- is.na(b)] <- rnorm(sum(is.na(b)), 0, .01) if(length(d) == 1) d <- array(d, dim=c(n_i, n_c)) d[d_free <- is.na(d)] <- rnorm(sum(is.na(d)), 0, .01) d_free[, 1] <- FALSE d[, 1] <- 0 d[,-1] <- d[,-1] - rowMeans(d[,-1]) # estimate parameters for (k in 1:iter){ # t parameters if(any(t_free)){ for(j in 1:nr_iter){ dv_t <- model_gpcm_dv_jmle(u_ix, model_gpcm_dv_Pt(t, a, b, d, D)) dv_t$dv1 <- rowSums(dv_t$dv1, na.rm=T) dv_t$dv2 <- rowSums(dv_t$dv2, na.rm=T) if(!is.null(priors$t)){ dv_t$dv1 <- dv_t$dv1 - (t - priors$t[1]) / priors$t[2]^2 dv_t$dv2 <- dv_t$dv2 - 1 / priors$t[2]^2 } nr_t <- estimate_nr_iteration(t, t_free, dv_t, h_max, decay, bounds_t) t <- nr_t$param if(max(abs(nr_t$h)) < nr_conv) break } # rescale theta if(!is.null(scale)) t <- (t - mean(t)) / sd(t) * scale[2] + scale[1] } # b parameters if(any(b_free)){ for(j in 1:nr_iter){ dv_b <- model_gpcm_dv_jmle(u_ix, model_gpcm_dv_Pb(t, a, b, d, D)) dv_b$dv1 <- colSums(dv_b$dv1, na.rm=T) dv_b$dv2 <- colSums(dv_b$dv2, na.rm=T) if(!is.null(priors$b)){ dv_b$dv1 <- dv_b$dv1 - (b - priors$b[1]) / priors$b[2]^2 dv_b$dv2 <- dv_b$dv2 - 1 / priors$b[2]^2 } nr_b <- estimate_nr_iteration(b, b_free, dv_b, h_max, decay, bounds_b) b <- nr_b$param if(max(abs(nr_b$h)) < nr_conv) break } } # d parameters if(any(d_free)){ for(j in 1:nr_iter){ dv <- model_gpcm_dv_Pd(t, a, b, d, D) dv_dh <- array(0, c(n_i, n_c)) for(m in 2:n_c){ dv_d <- model_gpcm_dv_jmle(u_ix, with(dv, list(p=p, dv1=dv1[,,,m], dv2=dv2[,,,m]))) dv_d$dv1 <- colSums(dv_d$dv1, na.rm=T) dv_d$dv2 <- colSums(dv_d$dv2, na.rm=T) if(!is.null(priors$d)){ dv_d$dv1 <- dv_d$dv1 - (d[,m] - priors$d[1]) / priors$d[2]^2 dv_d$dv2 <- dv_d$dv2 - 1 / priors$d[2]^2 } nr_d <- estimate_nr_iteration(d[,m], d_free[,m], dv_d, h_max, decay, bounds_d) d[,m] <- nr_d$param dv_dh[,m] <- nr_d$h } d[,-1] <- d[,-1] - rowMeans(d[,-1]) if(max(abs(dv_dh[,-1])) < nr_conv) break } } # a parameters if(any(a_free)){ for(j in 1:nr_iter){ dv_a <- model_gpcm_dv_jmle(u_ix, model_gpcm_dv_Pa(t, a, b, d, D)) dv_a$dv1 <- colSums(dv_a$dv1, na.rm=T) dv_a$dv2 <- colSums(dv_a$dv2, na.rm=T) if(!is.null(priors$a)){ dv_a$dv1 <- dv_a$dv1 - 1/a * (1 + (log(a)-priors$a[1])/priors$a[2]^2) dv_a$dv2 <- dv_a$dv2 - 1/a^2 * (1/priors$a[2]^2 - (1 + (log(a)-priors$a[1])/priors$a[2]^2)) } nr_a <- estimate_nr_iteration(a, a_free, dv_a, h_max, decay, bounds_a) a <- nr_a$param if(max(abs(nr_a$h)) < nr_conv) break } } decay <- decay * decay # model fit loglh <- -2 * sum(model_gpcm_lh(u, t, a, b, d, D, log=T), na.rm=T) if(debug) cat('iter #', k, ': -2 log-likelihood = ', round(loglh, 2), '\n', sep='') if(k > 1 && tracking$fit[k-1] - loglh < conv) break tracking$fit[k] <- loglh if(any(t_free)) tracking$t[k] <- mean(abs(nr_t$h[t_free])) if(any(a_free)) tracking$a[k] <- mean(abs(nr_a$h[a_free])) if(any(b_free)) tracking$b[k] <- mean(abs(nr_b$h[b_free])) if(any(d_free)) tracking$d[k] <- mean(abs(dv_dh[d_free])) } # debugging if(debug){ xx <- with(tracking, data.frame(iteration=1:iter, fit=fit, t=t, a=a, b=b, d=d))[1:k, ] xx <- melt(xx, id.vars='iteration') xx <- xx[!is.na(xx$value),] g <- ggplot(xx, aes_string(x="iteration", y="value", color="variable")) + geom_line() + facet_wrap(~variable, scales="free") + guides(color=F) + xlab('Iterations') + ylab('') + theme_bw() print(g) } # compare with true parameters if(!is.null(true_params)){ xx <- rbind(data.frame(true=true_params$t, est=t, params='t'), data.frame(true=true_params$a, est=a, params='a'), data.frame(true=true_params$b, est=b, params='b')) for(i in 2:n_c) xx <- rbind(xx, data.frame(true=true_params$d[,i], est=d[,i], params=paste('d',i,sep=''))) g <- ggplot(xx, aes_string(x="true", y="est", color="params")) + geom_point(alpha=.3) + geom_smooth(method='gam', se=F) + facet_wrap(~params, nrow=1, scales='free') + guides(color=F) + xlab('True Parameters') + ylab('Est. Parameters') + theme_bw() print(g) if(any(t_free)) cat('t: corr = ', round(cor(t, true_params$t), 3), ', rmse = ', round(rmse(t, true_params$t), 3),'\n', sep='') if(any(a_free)) cat('a: corr = ', round(cor(a, true_params$a), 3), ', rmse = ', round(rmse(a, true_params$a), 3),'\n', sep='') if(any(b_free)) cat('b: corr = ', round(cor(b, true_params$b), 3), ', rmse = ', round(rmse(b, true_params$b), 3),'\n', sep='') for(i in 2:n_c) if(any(d_free[,i])) cat('d_', i, ': corr = ', round(cor(d[,i], true_params$d[,i]), 3), ', rmse = ', round(rmse(d[,i], true_params$d[,i]), 3),'\n', sep='') } list(t=t, a=a, b=b, d=d) } #' @rdname estimate_gpcm #' @keywords internal model_gpcm_dv_mmle <- function(u_ix, quad, pdv){ n_p <- max(u_ix[,1]) n_i <- max(u_ix[,2]) n_q <- length(quad$t) p0 <- array(NA, c(n_p, n_i, n_q)) for(q in 1:n_q) p0[,,q] <- array(pdv$p[q,,][u_ix[,-1]], c(n_p, n_i)) p1 <- apply(p0, c(1, 3), prod, na.rm=T) p2 <- (p1 %*% quad$w)[,1] dv1 <- dv2 <- array(0, c(n_p, n_i)) dv_common <- t(t(p1 / p2) * quad$w) for(q in 1:n_q) dv1 <- dv1 + dv_common[,q] / p0[,,q] * array(pdv$dv1[q,,][u_ix[,-1]], c(n_p, n_i)) for(q in 1:n_q) dv2 <- dv2 + dv_common[,q] / p0[,,q] * (array(pdv$dv2[q,,][u_ix[,-1]], c(n_p, n_i)) - array(pdv$dv1[q,,][u_ix[,-1]], c(n_p, n_i)) * dv1) list(dv1=dv1, dv2=dv2) } #' @rdname estimate_gpcm #' @description \code{model_gpcm_estimate_mmle} estimates the parameters using the #' marginal maximum likelihood estimation (MMLE) method #' @param quad_degree the number of quadrature points #' @param scoring the scoring method: 'eap' or 'map' #' @examples #' \dontrun{ #' # generate data #' x <- model_gpcm_gendata(1000, 40, 3) #' # free estimation #' y <- model_gpcm_estimate_mmle(x$u, true_params=x) #' # no priors #' y <- model_gpcm_estimate_mmle(x$u, priors=NULL, true_params=x) #' } #' @importFrom stats cor #' @importFrom reshape2 melt #' @import ggplot2 #' @export model_gpcm_estimate_mmle <- function(u, t=NA, a=NA, b=NA, d=NA, D=1.702, iter=100, nr_iter=10, conv=1e-0, nr_conv=1e-3, bounds_t=c(-4, 4), bounds_a=c(.01, 2), bounds_b=c(-4, 4), bounds_d=c(-4, 4), priors=list(t=c(0, 1), a=c(-.1, .2), b=c(0, 1), d=c(0, 1)), decay=1, quad_degree='11', scoring=c('eap', 'map'), debug=FALSE, true_params=NULL){ # configuration h_max <- 1.0 if(is.null(priors$t)) priors$t <- c(0, 1) quad <- hermite_gauss(quad_degree) quad$w <- quad$w * exp(quad$t^2) * dnorm(quad$t, priors$t[1], priors$t[2]) tracking <- list(fit=rep(NA, iter), t=rep(NA, iter), a=rep(NA, iter), b=rep(NA, iter), d=rep(NA, iter)) # initial values n_p <- dim(u)[1] n_i <- dim(u)[2] n_c <- max(u) + 1 u_ix <- model_polytomous_3dindex(u) if(length(t) == 1) t <- rep(t, n_p) t[t_free <- is.na(t)] <- rnorm(sum(is.na(t)), 0, .01) if(length(a) == 1) a <- rep(a, n_i) a[a_free <- is.na(a)] <- rlnorm(sum(is.na(a)), -.1, .01) if(length(b) == 1) b <- rep(b, n_i) b[b_free <- is.na(b)] <- rnorm(sum(is.na(b)), 0, .01) if(length(d) == 1) d <- array(d, dim=c(n_i, n_c)) d[d_free <- is.na(d)] <- rnorm(sum(is.na(d)), 0, .01) d_free[, 1] <- FALSE d[, 1] <- 0 d[,-1] <- d[,-1] - rowMeans(d[,-1]) # estimate parameters for (k in 1:iter){ # b parameters if(any(b_free)){ for(n in 1:nr_iter){ dv_b <- model_gpcm_dv_mmle(u_ix, quad, model_gpcm_dv_Pb(quad$t, a, b, d, D)) dv_b$dv1 <- colSums(dv_b$dv1, na.rm=T) dv_b$dv2 <- colSums(dv_b$dv2, na.rm=T) if(!is.null(priors$b)){ dv_b$dv1 <- dv_b$dv1 - (b - priors$b[1]) / priors$b[2]^2 dv_b$dv2 <- dv_b$dv2 - 1 / priors$b[2]^2 } nr_b <- estimate_nr_iteration(b, b_free, dv_b, h_max, decay, bounds_b) b <- nr_b$param if(max(abs(nr_b$h)) < nr_conv) break } } # d parameters if(any(d_free)){ for(j in 1:nr_iter){ dv <- model_gpcm_dv_Pd(t, a, b, d, D) dv_h <- array(0, c(n_i, n_c)) for(m in 2:n_c){ dv_d <- model_gpcm_dv_mmle(u_ix, quad, with(dv, list(p=p, dv1=dv1[,,,m], dv2=dv2[,,,m]))) dv_d$dv1 <- colSums(dv_d$dv1, na.rm=T) dv_d$dv2 <- colSums(dv_d$dv2, na.rm=T) if(!is.null(priors$d)){ dv_d$dv1 <- dv_d$dv1 - (d[,m] - priors$d[1]) / priors$d[2]^2 dv_d$dv2 <- dv_d$dv2 - 1 / priors$d[2]^2 } nr_d <- estimate_nr_iteration(d[,m], d_free[,m], dv_d, h_max, decay, bounds_d) d[,m] <- nr_d$param dv_h[,m] <- nr_d$h } d[,-1] <- d[,-1] - rowMeans(d[,-1]) if(max(abs(dv_h[,-1])) < nr_conv) break } } # a parameters if(any(a_free)){ for(j in 1:nr_iter){ dv_a <- model_gpcm_dv_mmle(u_ix, quad, model_gpcm_dv_Pa(quad$t, a, b, d, D)) dv_a$dv1 <- colSums(dv_a$dv1, na.rm=T) dv_a$dv2 <- colSums(dv_a$dv2, na.rm=T) if(!is.null(priors$a)){ dv_a$dv1 <- dv_a$dv1 - 1/a * (1 + (log(a)-priors$a[1])/priors$a[2]^2) dv_a$dv2 <- dv_a$dv2 - 1/a^2 * (1/priors$a[2]^2 - (1 + (log(a)-priors$a[1])/priors$a[2]^2)) } nr_a <- estimate_nr_iteration(a, a_free, dv_a, h_max, decay, bounds_a) a <- nr_a$param if(max(abs(nr_a$h)) < nr_conv) break } } # scoring if(any(t_free)) t[t_free] <- switch(match.arg(scoring, scoring), 'eap'=model_gpcm_eap_scoring, 'map'=model_gpcm_map_scoring)(u, a, b, d, D, prior=priors$t, bound=bounds_t)$t[t_free] decay <- decay * decay # model fit loglh <- -2 * sum(model_gpcm_lh(u, t, a, b, d, D, log=T), na.rm=T) if(debug) cat('iter #', k, ': -2 log-likelihood = ', round(loglh, 2), '\n', sep='') if(k > 1 && tracking$fit[k-1] - loglh < conv) break tracking$fit[k] <- loglh if(any(a_free)) tracking$a[k] <- mean(abs(nr_a$h[a_free])) if(any(b_free)) tracking$b[k] <- mean(abs(nr_b$h[b_free])) if(any(d_free)) tracking$d[k] <- mean(abs(nr_d$h[d_free])) } # debugging if(debug){ xx <- with(tracking, data.frame(iteration=1:iter, fit=fit, t=t, a=a, b=b, d=d))[1:k, ] xx <- melt(xx, id.vars='iteration') xx <- xx[!is.na(xx$value),] g <- ggplot(xx, aes_string(x="iteration", y="value", color="variable")) + geom_line() + facet_wrap(~variable, scales="free") + guides(color=F) + xlab('Iterations') + ylab('') + theme_bw() print(g) } # compare with true parameters if(!is.null(true_params)){ xx <- rbind(data.frame(true=true_params$t, est=t, params='t'), data.frame(true=true_params$a, est=a, params='a'), data.frame(true=true_params$b, est=b, params='b')) for(i in 2:n_c) xx <- rbind(xx, data.frame(true=true_params$d[,i], est=d[,i], params=paste('d',i,sep=''))) g <- ggplot(xx, aes_string(x="true", y="est", color="params")) + geom_point(alpha=.3) + geom_smooth(method='gam', se=F) + facet_wrap(~params, nrow=1, scales='free') + guides(color=F) + xlab('True Parameters') + ylab('Est. Parameters') + theme_bw() print(g) if(any(t_free)) cat('t: corr = ', round(cor(t, true_params$t), 3), ', rmse = ', round(rmse(t, true_params$t), 3),'\n', sep='') if(any(a_free)) cat('a: corr = ', round(cor(a, true_params$a), 3), ', rmse = ', round(rmse(a, true_params$a), 3),'\n', sep='') if(any(b_free)) cat('b: corr = ', round(cor(b, true_params$b), 3), ', rmse = ', round(rmse(b, true_params$b), 3),'\n', sep='') for(i in 2:n_c) if(any(d_free[,i])) cat('d_', i, ': corr = ', round(cor(d[,i], true_params$d[,i]), 3), ', rmse = ', round(rmse(d[,i], true_params$d[,i]), 3),'\n', sep='') } list(t=t, a=a, b=b, d=d) } #' @rdname estimate_gpcm #' @param insert_d0 insert an initial category value #' @param index the indices of items being plotted #' @param intervals intervals on the x-axis #' @param show_points TRUE to show points #' @examples #' with(model_gpcm_gendata(1000, 20, 3), model_gpcm_fitplot(u, t, a, b, d, index=c(1, 3, 5))) #' @importFrom reshape2 melt #' @import ggplot2 #' @export model_gpcm_fitplot <- function(u, t, a, b, d, D=1.702, insert_d0=NULL, index=NULL, intervals=seq(-3, 3, .5), show_points=TRUE){ if(is.null(index)) index <- seq(b) groups <- cut(t, intervals, labels=(intervals[-length(intervals)] + intervals[-1]) / 2) obs <- aggregate(u, by=list(intervals=groups), mean, na.rm=TRUE)[, c(1, index+1)] obs <- melt(obs, id.vars='intervals', variable.name='items') obs[, 'type'] <- 'Observed' p <- model_gpcm_prob(t, a, b, d, D, insert_d0) p <- apply(p, 1:2, function(x) sum(x * (seq(x)-1), na.rm=T)) exp <- aggregate(p, by=list(intervals=groups), mean, na.rm=TRUE)[, c(1, index+1)] exp <- melt(exp, id.vars='intervals', variable.name='items') exp[, 'type'] <- 'Expected' data <- rbind(obs, exp) data$intervals <- as.numeric(levels(data$intervals)[data$intervals]) levels(data$items) <- gsub('V', 'Item ', levels(data$items)) g <- ggplot(data, aes_string('intervals', 'value', color='type', group='type')) + geom_line() + facet_wrap(~items) + xlab(expression(theta)) + ylab('Probability') + scale_color_discrete(guide=guide_legend("")) + theme_bw() if(show_points) g <- g + geom_point(fill='white', pch=1) g }
/scratch/gouwar.j/cran-all/cranData/xxIRT/R/module2_estimate_gpcm.R
#' Estimate Graded Response Model #' @description Estimate the GRM using the maximum likelihood estimation #' @name estimate_grm NULL #' @rdname estimate_grm #' @description \code{model_grm_eap_scoring} scores response vectors using the EAP method #' @param prior the prior distribution #' @examples #' with(model_grm_gendata(10, 50, 3), cbind(true=t, est=model_grm_eap_scoring(u, a, b)$t)) #' @importFrom stats dnorm #' @export model_grm_eap_scoring <- function(u, a, b, D=1.702, prior=c(0, 1), bound=c(-3, 3)){ quad <- hermite_gauss('11') quad$w <- quad$w * exp(quad$t^2) * dnorm(quad$t, prior[1], prior[2]) n_p <- dim(u)[1] n_i <- dim(u)[2] n_q <- length(quad$t) p <- model_grm_prob(quad$t, a, b, D) ix <- model_polytomous_3dindex(u) lh <- array(NA, c(n_p, n_i, n_q)) for(q in 1:n_q) lh[,,q] <- array(p[q,,][ix[,-1]], c(n_p, n_i)) lh <- apply(lh, c(1, 3), prod, na.rm=T) t <- ((lh / (lh %*% quad$w)[,1]) %*% (quad$w * quad$t))[,1] t_sd <- ((lh / (lh %*% quad$w)[,1] * outer(t, quad$t, '-')^2) %*% quad$w)[,1] t_sd <- sqrt(t_sd) list(t=t, sd=t_sd) } #' @rdname estimate_grm #' @description \code{model_grm_map_scoring} scores response vectors using the MAP method #' @examples #' with(model_grm_gendata(10, 50, 3), cbind(true=t, est=model_grm_map_scoring(u, a, b)$t)) #' @export model_grm_map_scoring <- function(u, a, b, D=1.702, prior=NULL, bound=c(-3, 3), nr_iter=30, nr_conv=1e-3){ ix <- model_polytomous_3dindex(u) t <- rnorm(dim(u)[1], 0, .01) t_free <- rep(T, length(t)) for(m in 1:nr_iter){ dv <- model_grm_dv_jmle(ix, model_grm_dv_Pt(t, a, b, D)) dv$dv1 <- rowSums(dv$dv1, na.rm=T) dv$dv2 <- rowSums(dv$dv2, na.rm=T) if(!is.null(prior)){ dv$dv1 <- dv$dv1 - (t - prior[1]) / prior[2]^2 dv$dv2 <- dv$dv2 - 1 / prior[2]^2 } nr <- estimate_nr_iteration(t, t_free, dv, 1.0, 1.0, bound) t <- nr$param if(max(abs(nr$h)) < nr_conv) break } list(t=t) } #' @rdname estimate_grm #' @keywords internal model_grm_dv_Pt <- function(t, a, b, D){ n_c <- ncol(b) + 1 p <- model_grm_prob(t, a, b, D, raw=T) dv1 <- aperm(aperm(p*(1-p), c(2,1,3)) * D * a, c(2,1,3)) dv1 <- dv1[,,1:n_c] - dv1[,,-1] dv2 <- aperm(aperm(p*(1-p)*(1-2*p), c(2,1,3)) * (D * a)^2, c(2,1,3)) dv2 <- dv2[,,1:n_c] - dv2[,,-1] p <- p[,,1:n_c] - p[,,-1] list(p=p, dv1=dv1, dv2=dv2) } #' @rdname estimate_grm #' @keywords internal model_grm_dv_Pa <- function(t, a, b, D){ n_c <- ncol(b) + 1 p <- model_grm_prob(t, a, b, D, raw=T) term0 <- D * outer(t, cbind(0, b, 0), '-') dv1 <- p * (1-p) * term0 dv1 <- dv1[,,1:n_c] - dv1[,,-1] dv2 <- p * (1-p) * (1-2*p) * term0^2 dv2 <- dv2[,,1:n_c] - dv2[,,-1] p <- p[,,1:n_c] - p[,,-1] list(p=p, dv1=dv1, dv2=dv2) } #' @rdname estimate_grm #' @keywords internal model_grm_dv_Pb <- function(t, a, b, D){ n_p <- length(t) n_i <- nrow(b) n_c <- ncol(b) + 1 p <- model_grm_prob(t, a, b, D, raw=T) dv1 <- dv2 <- array(0, c(n_p, n_i, n_c, n_c-1)) for(k in 1:(n_c-1)){ term0<- t(t(p[,,k+1]*(1-p[,,k+1])) * (-D * a)) dv1[,,k,k] <- -1 * term0 dv1[,,k+1,k] <- term0 term1<- t(t(p[,,k+1]*(1-p[,,k+1])*(1-2*p[,,k+1])) * (D*a)^2) dv2[,,k,k] <- -1 * term1 dv2[,,k+1,k] <- term1 } p <- p[,,1:n_c] - p[,,-1] list(p=p, dv1=dv1, dv2=dv2) } #' @rdname estimate_grm #' @param ix the 3d indices #' @param dvp the derivatives of P #' @keywords internal model_grm_dv_jmle <- function(ix, dvp){ n_p <- max(ix[,1]) n_i <- max(ix[,2]) dv1 <- array(with(dvp, dv1[ix]/p[ix]), c(n_p, n_i)) dv2 <- array(with(dvp, dv2[ix]/p[ix]), c(n_p, n_i)) - dv1^2 list(dv1=dv1, dv2=dv2) } #' @rdname estimate_grm #' @description \code{model_grm_estimate_jmle} estimates the parameters using the #' joint maximum likelihood estimation (JMLE) method #' @param u the observed response matrix, 2d matrix #' @param t ability parameters, 1d vector (fixed value) or NA (freely estimate) #' @param a discrimination parameters, 1d vector (fixed value) or NA (freely estimate) #' @param b difficulty parameters, 2d matrix (fixed value) or NA (freely estimate) #' @param D the scaling constant, 1.702 by default #' @param iter the maximum iterations #' @param conv the convergence criterion for the -2 log-likelihood #' @param nr_iter the maximum iterations of newton-raphson #' @param nr_conv the convegence criterion of newton-raphson #' @param scale the scale of theta parameters #' @param bounds_t bounds of ability parameters #' @param bounds_a bounds of discrimination parameters #' @param bounds_b bounds of location parameters #' @param priors a list of prior distributions #' @param decay decay rate #' @param debug TRUE to print debuggin information #' @param true_params a list of true parameters for evaluating the estimation accuracy #' @examples #' \dontrun{ #' # generate data #' x <- model_grm_gendata(1000, 40, 3) #' # free calibration #' y <- model_grm_estimate_jmle(x$u, true_params=x) #' # no priors #' y <- model_grm_estimate_jmle(x$u, priors=NULL, true_params=x) #' } #' @importFrom stats cor #' @importFrom reshape2 melt #' @import ggplot2 #' @export model_grm_estimate_jmle <- function(u, t=NA, a=NA, b=NA, D=1.702, iter=100, nr_iter=10, conv=1e-0, nr_conv=1e-3, scale=c(0, 1), bounds_t=c(-4, 4), bounds_a=c(.01, 2), bounds_b=c(-4, 4), priors=list(t=c(0, 1), a=c(-.1, .2), b=c(0, 1)), decay=1, debug=FALSE, true_params=NULL){ # configuration h_max <- 1.0 tracking <- list(fit=rep(NA, iter), t=rep(NA, iter), a=rep(NA, iter), b=rep(NA, iter)) # initial values n_p <- dim(u)[1] n_i <- dim(u)[2] n_c <- max(u) + 1 u_ix <- model_polytomous_3dindex(u) if(length(t) == 1) t <- rep(t, n_p) t[t_free <- is.na(t)] <- rnorm(sum(is.na(t)), 0, .01) if(length(a) == 1) a <- rep(a, n_i) a[a_free <- is.na(a)] <- rlnorm(sum(is.na(a)), -.1, .01) if(length(b) == 1) b <- array(b, c(n_i, n_c-1)) b[b_free <- is.na(b)] <- rnorm(sum(is.na(b)), 0, .1) b <- t(apply(b, 1, sort)) # estimate parameters for (k in 1:iter){ # t parameters if(any(t_free)){ for(j in 1:nr_iter){ dv_t <- model_grm_dv_jmle(u_ix, model_grm_dv_Pt(t, a, b, D)) dv_t$dv1 <- rowSums(dv_t$dv1, na.rm=T) dv_t$dv2 <- rowSums(dv_t$dv2, na.rm=T) if(!is.null(priors$t)){ dv_t$dv1 <- dv_t$dv1 - (t - priors$t[1]) / priors$t[2]^2 dv_t$dv2 <- dv_t$dv2 - 1 / priors$t[2]^2 } nr_t <- estimate_nr_iteration(t, t_free, dv_t, h_max, decay, bounds_t) t <- nr_t$param if(max(abs(nr_t$h)) < nr_conv) break } # rescale theta if(!is.null(scale)) t <- (t - mean(t)) / sd(t) * scale[2] + scale[1] } # b parameters if(any(b_free)){ for(j in 1:nr_iter){ dv_b <- model_grm_dv_Pb(t, a, b, D) dv_bh <- array(0, c(n_i, n_c-1)) for(m in 1:(n_c-1)){ dv <- model_grm_dv_jmle(u_ix, with(dv_b, list(p=p, dv1=dv1[,,,m], dv2=dv2[,,,m]))) dv$dv1 <- colSums(dv$dv1, na.rm=T) dv$dv2 <- colSums(dv$dv2, na.rm=T) if(!is.null(priors$b)){ dv$dv1 <- dv$dv1 - (b[,m] - priors$b[1]) / priors$b[2]^2 dv$dv2 <- dv$dv2 - 1 / priors$b[2]^2 } nr <- estimate_nr_iteration(b[,m], b_free[,m], dv, h_max, decay, bounds_b) b[,m] <- nr$param dv_bh[,m] <- nr$h } b <- t(apply(b, 1, sort)) if(max(abs(dv_bh)) < nr_conv) break } } # a parameters if(any(a_free)){ for(j in 1:nr_iter){ dv_a <- model_grm_dv_jmle(u_ix, model_grm_dv_Pa(t, a, b, D)) dv_a$dv1 <- colSums(dv_a$dv1, na.rm=T) dv_a$dv2 <- colSums(dv_a$dv2, na.rm=T) if(!is.null(priors$a)){ dv_a$dv1 <- dv_a$dv1 - 1/a * (1 + (log(a)-priors$a[1])/priors$a[2]^2) dv_a$dv2 <- dv_a$dv2 - 1/a^2 * (1/priors$a[2]^2 - (1 + (log(a)-priors$a[1])/priors$a[2]^2)) } nr_a <- estimate_nr_iteration(a, a_free, dv_a, h_max, decay, bounds_a) a <- nr_a$param if(max(abs(nr_a$h)) < nr_conv) break } } decay <- decay * decay # model fit loglh <- -2 * sum(model_grm_lh(u, t, a, b, D, log=T), na.rm=T) if(debug) cat('iter #', k, ': -2 log-likelihood = ', round(loglh, 2), '\n', sep='') if(k > 1 && tracking$fit[k-1] - loglh < conv) break tracking$fit[k] <- loglh if(any(t_free)) tracking$t[k] <- mean(abs(nr_t$h[t_free])) if(any(a_free)) tracking$a[k] <- mean(abs(nr_a$h[a_free])) if(any(b_free)) tracking$b[k] <- mean(abs(dv_bh[b_free])) } # debugging if(debug){ xx <- with(tracking, data.frame(iteration=1:iter, fit=fit, t=t, a=a, b=b))[1:k,] xx <- melt(xx, id.vars='iteration') xx <- xx[!is.na(xx$value), ] g <- ggplot(xx, aes_string(x="iteration", y="value", color="variable")) + geom_line() + facet_wrap(~variable, scales="free") + guides(color=F) + xlab('Iterations') + ylab('') + theme_bw() print(g) } # compare with true parameters if(!is.null(true_params)){ xx <- rbind(data.frame(true=true_params$t, est=t, params='t'), data.frame(true=true_params$a, est=a, params='a')) for(i in 1:(n_c-1)) xx <- rbind(xx, data.frame(true=true_params$b[,i], est=b[,i], params=paste('b',i,sep=''))) g <- ggplot(xx, aes_string(x="true", y="est", color="params")) + geom_point(alpha=.3) + geom_smooth(method='gam', se=F) + facet_wrap(~params, nrow=1, scales='free') + guides(color=F) + xlab('True Parameters') + ylab('Est. Parameters') + theme_bw() print(g) if(any(t_free)) cat('t: corr = ', round(cor(t, true_params$t), 3), ', rmse = ', round(rmse(t, true_params$t), 3),'\n', sep='') if(any(a_free)) cat('a: corr = ', round(cor(a, true_params$a), 3), ', rmse = ', round(rmse(a, true_params$a), 3),'\n', sep='') for(i in 1:(n_c-1)) if(any(b_free[,i])) cat('b_', i, ': corr = ', round(cor(b[,i], true_params$b[,i]), 3), ', rmse = ', round(rmse(b[,i], true_params$b[,i]), 3),'\n', sep='') } list(t=t, a=a, b=b) } #' @rdname estimate_grm #' @keywords internal model_grm_dv_mmle <- function(u_ix, quad, pdv){ n_p <- max(u_ix[,1]) n_i <- max(u_ix[,2]) n_q <- length(quad$t) p0 <- array(NA, c(n_p, n_i, n_q)) for(q in 1:n_q) p0[,,q] <- array(pdv$p[q,,][u_ix[,-1]], c(n_p, n_i)) p1 <- apply(p0, c(1, 3), prod, na.rm=T) p2 <- (p1 %*% quad$w)[,1] dv1 <- dv2 <- array(0, c(n_p, n_i)) dv_common <- t(t(p1 / p2) * quad$w) for(q in 1:n_q) dv1 <- dv1 + dv_common[,q] / p0[,,q] * array(pdv$dv1[q,,][u_ix[,-1]], c(n_p, n_i)) for(q in 1:n_q) dv2 <- dv2 + dv_common[,q] / p0[,,q] * (array(pdv$dv2[q,,][u_ix[,-1]], c(n_p, n_i)) - array(pdv$dv1[q,,][u_ix[,-1]], c(n_p, n_i)) * dv1) list(dv1=dv1, dv2=dv2) } #' @rdname estimate_grm #' @description \code{model_grm_estimate_mmle} estimates the parameters using the #' marginal maximum likelihood estimation (MMLE) method #' @param quad_degree the number of quadrature points #' @param scoring the scoring method: 'eap' or 'map' #' @examples #' \dontrun{ #' # generate data #' x <- model_grm_gendata(1000, 40, 3) #' # free estimation #' y <- model_grm_estimate_mmle(x$u, true_params=x) #' # no priors #' y <- model_grm_estimate_mmle(x$u, priors=NULL, true_params=x) #' } #' @importFrom stats cor #' @importFrom reshape2 melt #' @import ggplot2 #' @export model_grm_estimate_mmle <- function(u, t=NA, a=NA, b=NA, d=NA, D=1.702, iter=100, nr_iter=10, conv=1e-0, nr_conv=1e-3, bounds_t=c(-4, 4), bounds_a=c(.01, 2), bounds_b=c(-4, 4), bounds_d=c(-4, 4), priors=list(t=c(0, 1), a=c(-.1, .2), b=c(0, 1)), decay=1, quad_degree='11', scoring=c('eap', 'map'), debug=FALSE, true_params=NULL){ # configuration h_max <- 1.0 if(is.null(priors$t)) priors$t <- c(0, 1) quad <- hermite_gauss(quad_degree) quad$w <- quad$w * exp(quad$t^2) * dnorm(quad$t, priors$t[1], priors$t[2]) tracking <- list(fit=rep(NA, iter), t=rep(NA, iter), a=rep(NA, iter), b=rep(NA, iter), d=rep(NA, iter)) # initial values n_p <- dim(u)[1] n_i <- dim(u)[2] n_c <- max(u) + 1 u_ix <- model_polytomous_3dindex(u) if(length(t) == 1) t <- rep(t, n_p) t[t_free <- is.na(t)] <- rnorm(sum(is.na(t)), 0, .01) if(length(a) == 1) a <- rep(a, n_i) a[a_free <- is.na(a)] <- rlnorm(sum(is.na(a)), -.1, .01) if(length(b) == 1) b <- array(b, c(n_i, n_c-1)) b[b_free <- is.na(b)] <- rnorm(sum(is.na(b)), 0, .1) b <- t(apply(b, 1, sort)) # estimate parameters for (k in 1:iter){ # b parameters if(any(b_free)){ for(j in 1:nr_iter){ dv_b <- model_grm_dv_Pb(t, a, b, D) dv_bh <- array(0, c(n_i, n_c-1)) for(m in 2:n_c-1){ dv <- model_grm_dv_mmle(u_ix, quad, with(dv_b, list(p=p, dv1=dv1[,,,m], dv2=dv2[,,,m]))) dv$dv1 <- colSums(dv$dv1, na.rm=T) dv$dv2 <- colSums(dv$dv2, na.rm=T) if(!is.null(priors$b)){ dv$dv1 <- dv$dv1 - (b[,m] - priors$b[1]) / priors$b[2]^2 dv$dv2 <- dv$dv2 - 1 / priors$b[2]^2 } nr <- estimate_nr_iteration(b[,m], b_free[,m], dv, h_max, decay, bounds_b) b[,m] <- nr$param dv_bh[,m] <- nr$h } b <- t(apply(b, 1, sort)) if(max(abs(dv_bh)) < nr_conv) break } } # a parameters if(any(a_free)){ for(j in 1:nr_iter){ dv_a <- model_grm_dv_mmle(u_ix, quad, model_grm_dv_Pa(quad$t, a, b, D)) dv_a$dv1 <- colSums(dv_a$dv1, na.rm=T) dv_a$dv2 <- colSums(dv_a$dv2, na.rm=T) if(!is.null(priors$a)){ dv_a$dv1 <- dv_a$dv1 - 1/a * (1 + (log(a)-priors$a[1])/priors$a[2]^2) dv_a$dv2 <- dv_a$dv2 - 1/a^2 * (1/priors$a[2]^2 - (1 + (log(a)-priors$a[1])/priors$a[2]^2)) } nr_a <- estimate_nr_iteration(a, a_free, dv_a, h_max, decay, bounds_a) a <- nr_a$param if(max(abs(nr_a$h)) < nr_conv) break } } # scoring if(any(t_free)) t[t_free] <- switch(match.arg(scoring, scoring), 'eap'=model_grm_eap_scoring, 'map'=model_grm_map_scoring)(u, a, b, D, prior=priors$t, bound=bounds_t)$t[t_free] decay <- decay * decay # model fit loglh <- -2 * sum(model_grm_lh(u, t, a, b, D, log=T), na.rm=T) if(debug) cat('iter #', k, ': -2 log-likelihood = ', round(loglh, 2), '\n', sep='') if(k > 1 && tracking$fit[k-1] - loglh < conv) break tracking$fit[k] <- loglh if(any(a_free)) tracking$a[k] <- mean(abs(nr_a$h[a_free])) if(any(b_free)) tracking$d[k] <- mean(abs(dv_bh[b_free])) } # debugging if(debug){ xx <- with(tracking, data.frame(iteration=1:iter, fit=fit, t=t, a=a, b=b))[1:k, ] xx <- melt(xx, id.vars='iteration') xx <- xx[!is.na(xx$value), ] g <- ggplot(xx, aes_string(x="iteration", y="value", color="variable")) + geom_line() + facet_wrap(~variable, scales="free") + guides(color=F) + xlab('Iterations') + ylab('') + theme_bw() print(g) } # compare with true parameters if(!is.null(true_params)){ xx <- rbind(data.frame(true=true_params$t, est=t, params='t'), data.frame(true=true_params$a, est=a, params='a')) for(i in 2:n_c-1) xx <- rbind(xx, data.frame(true=true_params$b[,i], est=b[,i], params=paste('b',i,sep=''))) g <- ggplot(xx, aes_string(x="true", y="est", color="params")) + geom_point(alpha=.3) + geom_smooth(method='gam', se=F) + facet_wrap(~params, nrow=1, scales='free') + guides(color=F) + xlab('True Parameters') + ylab('Est. Parameters') + theme_bw() print(g) if(any(t_free)) cat('t: corr = ', round(cor(t, true_params$t), 3), ', rmse = ', round(rmse(t, true_params$t), 3),'\n', sep='') if(any(a_free)) cat('a: corr = ', round(cor(a, true_params$a), 3), ', rmse = ', round(rmse(a, true_params$a), 3),'\n', sep='') for(i in 2:n_c-1) if(any(b_free[,i])) cat('b_', i, ': corr = ', round(cor(b[,i], true_params$b[,i]), 3), ', rmse = ', round(rmse(b[,i], true_params$b[,i]), 3),'\n', sep='') } list(t=t, a=a, b=b) } #' @rdname estimate_grm #' @param index the indices of items being plotted #' @param intervals intervals on the x-axis #' @param show_points TRUE to show points #' @examples #' with(model_grm_gendata(1000, 20, 3), model_grm_fitplot(u, t, a, b, index=c(1, 3, 5))) #' @importFrom reshape2 melt #' @import ggplot2 #' @export model_grm_fitplot <- function(u, t, a, b, D=1.702, index=NULL, intervals=seq(-3, 3, .5), show_points=TRUE){ if(is.null(index)) index <- seq(b) groups <- cut(t, intervals, labels=(intervals[-length(intervals)] + intervals[-1]) / 2) obs <- aggregate(u, by=list(intervals=groups), mean, na.rm=TRUE)[, c(1, index+1)] obs <- melt(obs, id.vars='intervals', variable.name='items') obs[, 'type'] <- 'Observed' p <- model_grm_prob(t, a, b, D) p <- apply(p, 1:2, function(x) sum(x * (seq(x)-1), na.rm=T)) exp <- aggregate(p, by=list(intervals=groups), mean, na.rm=TRUE)[, c(1, index+1)] exp <- melt(exp, id.vars='intervals', variable.name='items') exp[, 'type'] <- 'Expected' data <- rbind(obs, exp) data$intervals <- as.numeric(levels(data$intervals)[data$intervals]) levels(data$items) <- gsub('V', 'Item ', levels(data$items)) g <- ggplot(data, aes_string('intervals', 'value', color='type', group='type')) + geom_line() + facet_wrap(~items) + xlab(expression(theta)) + ylab('Probability') + scale_color_discrete(guide=guide_legend("")) + theme_bw() if(show_points) g <- g + geom_point(fill='white', pch=1) g }
/scratch/gouwar.j/cran-all/cranData/xxIRT/R/module2_estimate_grm.R
#' Automated Test Assembly (ATA) #' @name ata #' @examples #' \dontrun{ #' ## generate a pool of 100 items #' n_items <- 100 #' pool <- with(model_3pl_gendata(1, nitems), data.frame(id=1:n_items, a=a, b=b, c=c)) #' pool$content <- sample(1:3, n_items, replace=TRUE) #' pool$time <- round(rlnorm(n_items, log(60), .2)) #' pool$group <- sort(sample(1:round(n_items/3), n_items, replace=TRUE)) #' #' ## ex. 1: four 10-item forms, maximize b parameter #' x <- ata(pool, 4, len=10, max_use=1) #' x <- ata_obj_relative(x, "b", "max") #' x <- ata_solve(x, timeout=5) #' data.frame(form=1:4, b=sapply(x$items, function(x) mean(x$b))) #' #' ## ex. 2: four 10-item forms, minimize b parameter #' x <- ata(pool, 4, len=10, max_use=1) #' x <- ata_obj_relative(x, "b", "min", negative=TRUE) #' x <- ata_solve(x, as.list=FALSE, timeout=5) #' with(x$items, aggregate(b, by=list(form=form), mean)) #' #' ## ex. 3: two 10-item forms, mean(b)=0, sd(b)=1 #' ## content = (3, 3, 4), avg. time = 58--62 seconds #' constr <- data.frame(name='content',level=1:3, min=c(3,3,4), max=c(3,3,4), stringsAsFactors=F) #' constr <- rbind(constr, c('time', NA, 58*10, 62*10)) #' x <- ata(pool, 2, len=10, max_use=1) #' x <- ata_obj_absolute(x, pool$b, 0*10) #' x <- ata_obj_absolute(x, (pool$b-0)^2, 1*10) #' for(i in 1:nrow(constr)) #' x <- with(constr, ata_constraint(x, name[i], min[i], max[i], level=level[i])) #' x <- ata_solve(x, timeout=5) #' sapply(x$items, function(x) c(mean=mean(x$b), sd=sd(x$b))) #' #' ## ex. 4: two 10-item forms, max TIF over (-1, 1), consider item sets #' x <- ata(pool, 2, len=10, max_use=1, group="group") #' x <- ata_obj_relative(x, seq(-1, 1, .5), 'max') #' x <- ata_solve(x, timeout=5) #' plot(x) #' } NULL #' @rdname ata #' @description \code{ata} initiates an ATA model #' @param pool item pool, a data.frame #' @param num_form number of forms to be assembled #' @param len test length of each form #' @param max_use maximum use of each item #' @param ... options, e.g. group, common_items, overlap_items #' @details #' The ATA model stores the definition of a MIP model. \code{ata_solve} #' converts the model definition to a real MIP object and attempts to solve it. #' @export ata <- function(pool, num_form=1, len=NULL, max_use=NULL, ...){ if(!is.data.frame(pool)) pool <- as.data.frame(pool, stringsAsFactors=FALSE) if(!'b' %in% colnames(pool)) warning('b parameters are not found in the pool') if(!'a' %in% colnames(pool)) warning('a parameters are not found in the pool') if(!'c' %in% colnames(pool)) warning('c parameters are not found in the pool') opts <- list(...) if(is.null(opts$D)) opts$D <- 1.702 # break down forms for common items and overlap items if(!is.null(opts$common_items)){ form_map <- cbind(1:num_form, num_form + 1) } else if(!is.null(opts$overlap_items)){ form_map <- cbind(1:num_form, 1:num_form+num_form, c(num_form*2, 1:(num_form-1)+num_form)) } else if(!is.null(opts$form_map)) { form_map <- opts$form_map } else { form_map <- matrix(1:num_form, ncol=1) } num_form <- max(form_map) # group item sets if(is.null(opts$group)) { group <- 1:nrow(pool) } else if(is.character(opts$group)) { if(opts$group %in% colnames(pool)) group <- pool[, opts$group] else stop('the item-set grouping variable is not found in the pool') } else if(length(group) != nrow(pool)) { stop('the item-set grouping variable has a different length from the number of items') } else { group <- opts$group } group <- as.numeric(factor(group)) num_item <- length(unique(group)) # the number of items and num_lpvar <- num_item * num_form + 2 # LP: x's (binary) + y1 (continuous) + y2 (continuous) obj <- c(rep(0, num_lpvar - 2), 1, 1) names(obj) <- c(paste("f", rep(1:num_form, each=num_item), "v", rep(1:num_item, num_form), sep=""), "y1", "y2") # x's are binary and y is continuous types <- c(rep("B", num_lpvar - 2), "C", "C") # placehoders for fixing values bounds <- list(idx=c(), lb=c(), ub=c()) # TRUE to maximize, FALSE to minimize max <- TRUE # TRUE if the y is expected to be negative negative <- FALSE # constraints: coefficient matrix, directions, and right-hand-side values mat <- matrix(nrow=0, ncol=num_lpvar, dimnames=list(NULL, names(obj))) dir <- rhs <- NULL x <- list(num_item=num_item, num_form=num_form, num_lpvar=num_lpvar, pool=pool, group=group, form_map=form_map, obj=obj, mat=mat, dir=dir, rhs=rhs, types=types, bounds=bounds, max=max, negative=negative, opts=opts) class(x) <- "ata" # add constraint: test length if(!is.null(len) && length(len) == 1) x <- ata_constraint(x, 1, min=len, max=len) if(!is.null(len) && length(len) == 2) x <- ata_constraint(x, 1, min=len[1], max=len[2]) if(!is.null(len) && length(len) > 2) stop("invalid length.") if(!is.null(opts$common_items)) x <- ata_constraint(x, 1, min=opts$common_items, max=opts$common_items, forms=num_form, internal_index=TRUE) if(!is.null(opts$overlap_items)) x <- ata_constraint(x, 1, min=opts$overlap_items, max=opts$overlap_items, forms=unique(as.vector(form_map[,-1])), internal_index=TRUE) # add constraint: max_use if(!is.null(max_use)) x <- ata_item_use(x, max=max_use) x } #' @rdname ata #' @param x an ATA object #' @export print.ata <- function(x, ...){ cat("Assemble", x$num_form, "forms from", x$num_item, "items/sets.\n") if(is.null(x$items)) { cat("The LP hasn't been solved yet.\n") } else { cat("The LP has been solve:\n") items <- x$items if(!is.data.frame(items)) items <- Reduce(rbind, items, NULL) if(nrow(items) <= 10) { print(items) } else { print(items[1:5,]) cat("...\n") print(items[-4:0 + nrow(items),]) } cat("See more results in 'x$result' or 'x$items' (x is the ata object).") } invisible(x) } #' @rdname ata #' @import ggplot2 #' @export plot.ata <- function(x, ...){ if(class(x) != "ata") stop("not an 'ata' object") if(is.null(x$items)) stop("lp hasn't been solved yet") opts <- list(...) if(is.null(opts$theta)) opts$theta <- round(seq(-3, 3, .1), 1) num_theta <- length(opts$theta) items <- x$items if(is.data.frame(items)) items <- split(items, f=items$form) data <- lapply(items, function(item) { info <- rowSums(model_3pl_info(opts$theta, item$a, item$b, item$c, D=x$opts$D)) cbind(t=opts$theta, info=info, form=item$form[1]) }) data <- Reduce(rbind, data, NULL) data <- as.data.frame(data) data$form <- factor(paste("Form", data$form)) ggplot(data, aes_string(x="t", y="info", color="form")) + geom_line() + xlab(expression(theta)) + ylab("Information") + theme_bw() + theme(legend.key=element_blank()) + guides(color=guide_legend("Forms")) } #' @rdname ata #' @description \code{ata_obj_relative} adds a relative objective to the model #' @param coef coefficients of the objective function #' @param mode optimization mode: 'max' for maximization and 'min' for minimization #' @param tol the tolerance paraemter #' @param negative \code{TRUE} when the objective function is expected to be negative #' @param forms forms where objectives are added. \code{NULL} for all forms #' @param collapse \code{TRUE} to collapse into one objective function #' @param internal_index \code{TRUE} to use internal form indices #' @details #' \code{ata_obj_relative}: #' when mode='max', maximize (y-tol), subject to y <= sum(x) <= y+tol; #' when mode='min', minimize (y+tol), subject to y-tol <= sum(x) <= y. #' When \code{negative} is \code{TRUE}, y < 0, tol > 0. #' \code{coef} can be a numeric vector that has the same length with the pool or forms, #' or a variable name in the pool, or a numeric vector of theta points. #' When \code{tol} is \code{NULL}, it is optimized; when \code{FALSE}, ignored; #' when a number, fixed; when a range, constrained with lower and upper bounds. #' @export ata_obj_relative <- function(x, coef, mode=c('max', 'min'), tol=NULL, negative=FALSE, forms=NULL, collapse=FALSE, internal_index=FALSE, ...){ if(class(x) != "ata") stop("not an 'ata' object") opts <- list(...) forms <- ata_form_index(x, forms, collapse, internal_index) coef <- ata_obj_coef(x, coef, compensate=FALSE) x$negative <- negative # optimization direction x$max <- switch(match.arg(mode), "max"=TRUE, "min"=FALSE) if(x$max){ x$obj[(x$num_lpvar-1):x$num_lpvar] <- c(1, -1) } else { x$obj[(x$num_lpvar-1):x$num_lpvar] <- c(1, 1) } # tolerance parameter if(!is.null(tol)) if(length(tol) == 2) { x$bounds$idx <- c(x$bounds$idx, x$num_lpvar) x$bounds$lb <- c(x$bounds$lb, tol[1]) x$bounds$ub <- c(x$bounds$ub, tol[2]) } else if(is.numeric(tol)){ x$bounds$idx <- c(x$bounds$idx, x$num_lpvar) x$bounds$lb <- c(x$bounds$lb, tol) x$bounds$ub <- c(x$bounds$ub, tol) } else if(!tol) { x$obj[x$num_lpvar] <- 0 } # objective for each form mat <- matrix(0, nrow=nrow(forms) * nrow(coef) * 2, ncol=x$num_lpvar) dir <- rhs <- rep(NA, nrow(forms) * nrow(coef) * 2) for(i in 1:nrow(forms)) { f <- forms[i, ] ind <- outer(1:x$num_item, (f - 1) * x$num_item, "+") ind <- as.vector(ind) for(j in 1:nrow(coef)) { row <- (j - 1) * 2 + (i - 1) * nrow(coef) * 2 mat[row + 1, ind] <- rep(coef[j, ], length(f)) mat[row + 2, ind] <- rep(coef[j, ], length(f)) if(x$max){ mat[row + 1, (x$num_lpvar-1):x$num_lpvar] <- c(-1, 0) mat[row + 2, (x$num_lpvar-1):x$num_lpvar] <- c(-1, -1) } else { mat[row + 1, (x$num_lpvar-1):x$num_lpvar] <- c(-1, 1) mat[row + 2, (x$num_lpvar-1):x$num_lpvar] <- c(-1, 0) } dir[row + 1:2] <- c(">=", "<=") rhs[row + 1:2] <- 0 } } ata_append_constraints(x, mat, dir, rhs) } #' @rdname ata #' @description \code{ata_obj_absolute} adds an absolute objective to the model #' @param target the target values of the objective function #' @param equal_tol \code{TRUE} to force upward and downward tolerance to be equal #' @param tol_up the range of upward tolerance #' @param tol_down the range of downward tolerance #' @details #' \code{ata_obj_absolute} minimizes y0+y1 subject to t-y0 <= sum(x) <= t+y1. #' @export ata_obj_absolute <- function(x, coef, target, equal_tol=FALSE, tol_up=NULL, tol_down=NULL, forms=NULL, collapse=FALSE, internal_index=FALSE, ...){ if(class(x) != "ata") stop("not an 'ata' object") opts <- list(...) forms <- ata_form_index(x, forms, collapse, internal_index) coef <- ata_obj_coef(x, coef, compensate=FALSE) if(length(target) == 1) target <- rep(target, nrow(coef)) if(length(target) != nrow(coef)) stop("invalid target length.") # optimization direction x$max <- FALSE x$obj[x$num_lpvar+(-1:0)] <- 1 # objective for each form mat <- matrix(0, nrow=nrow(forms) * nrow(coef) * 2, ncol=x$num_lpvar) dir <- rhs <- rep(NA, nrow(forms) * nrow(coef) * 2) for(i in 1:nrow(forms)){ f <- forms[i,] ind <- outer(1:x$num_item, (f - 1) * x$num_item, "+") ind <- as.vector(ind) for(j in 1:nrow(coef)){ row <- (j - 1) * 2 + (i - 1) * nrow(coef) * 2 mat[row + 1, ind] <- rep(coef[j, ], length(f)) mat[row + 1, x$num_lpvar - 1] <- 1 mat[row + 2, ind] <- rep(coef[j, ], length(f)) mat[row + 2, x$num_lpvar - 0] <- -1 dir[row + 1:2] <- c(">=", "<=") rhs[row + 1:2] <- target[j] } } # constrains on tolerance parameters if(equal_tol){ mat <- rbind(mat, c(rep(0, x$num_lpvar - 2), 1, -1)) dir <- c(dir, '=') rhs <- c(rhs, 0) } if(!is.null(tol_down)){ if(length(tol_down) != 2) stop('invalid tol_down input. expect a vector of 2 elements.') x$bounds$idx <- c(x$bounds$idx, x$num_lpvar-1) x$bounds$lb <- c(x$bounds$lb, tol_down[1]) x$bounds$ub <- c(x$bounds$ub, tol_down[2]) } if(!is.null(tol_up)){ if(length(tol_up) != 2) stop('invalid tol_up input. expect a vector of 2 elements.') x$bounds$idx <- c(x$bounds$idx, x$num_lpvar) x$bounds$lb <- c(x$bounds$lb, tol_up[1]) x$bounds$ub <- c(x$bounds$ub, tol_up[2]) } ata_append_constraints(x, mat, dir, rhs) } #' @rdname ata #' @description \code{ata_constraint} adds a constraint to the model #' @param level the level of a categorical variable to be constrained #' @param min the lower bound of the constraint #' @param max the upper bound of the constraint #' @details #' When \code{level} is \code{NA}, it is assumed that the constraint is on #' a quantitative item property; otherwise, a categorical item property. #' \code{coef} can be a variable name, a constant, or a numeric vector that has #' the same size as the pool. #' @importFrom stats aggregate #' @export ata_constraint <- function(x, coef, min=NA, max=NA, level=NULL, forms=NULL, collapse=FALSE, internal_index=FALSE){ if(class(x) != "ata") stop("not an 'ata' object") if(is.na(min) && is.na(max)) return(x) if(!is.na(min) && !is.na(max) && min > max) stop("min is greater than max.") forms <- ata_form_index(x, forms, collapse, internal_index) if(length(coef) == 1 && is.character(coef) && coef %in% colnames(x$pool)) { # if a variable name, then retrieve and aggregate coef <- x$pool[, coef] if(!is.null(level) && !is.na(level)) coef <- as.integer(coef == level) coef <- aggregate(coef, by=list(group=x$group), sum)[,-1] } else if(length(coef) == 1 && is.numeric(coef)) { # if a numeric value, then replicate values and aggregate coef <- aggregate(rep(coef, nrow(x$pool)), by=list(group=x$group), sum)[,-1] } else if(length(coef) == x$num_item * ncol(forms)) { # if a numeric vector (item-group-level), then do nothing coef <- coef } else if(length(coef) == nrow(x$pool)) { # if a numeric vector (item-level), then aggregate coef <- aggregate(coef, by=list(group=x$group), sum)[,-1] } else { stop("invalid coef") } n <- ifelse(!is.na(min) && !is.na(max) && min != max, nrow(forms) * 2, nrow(forms)) mat <- matrix(0, nrow=n, ncol=x$num_lpvar) dir <- rhs <- rep(NA, n) for(i in 1:nrow(forms)) { f <- forms[i,] ind <- outer(1:x$num_item, (f - 1) * x$num_item, "+") ind <- as.vector(ind) if(!is.na(min) && is.na(max)) { mat[i, ind] <- coef dir[i] <- ">=" rhs[i] <- min } else if(is.na(min) && !is.na(max)) { mat[i, ind] <- coef dir[i] <- "<=" rhs[i] <- max } else if(min == max) { mat[i, ind] <- coef dir[i] <- "=" rhs[i] <- min } else { mat[(i - 1) * 2 + 1, ind] <- coef mat[(i - 1) * 2 + 2, ind] <- coef dir[(i - 1) * 2 + 1:2] <- c(">=", "<=") rhs[(i - 1) * 2 + 1:2] <- c(min, max) } } ata_append_constraints(x, mat, dir, rhs) } #' @rdname ata #' @description \code{ata_item_use} limits the minimum and maximum usage for items #' @param items a vector of item indices, \code{NULL} for all items #' @export ata_item_use <- function(x, min=NA, max=NA, items=NULL){ if(class(x) != "ata") stop("not an 'ata' object") if(is.na(min) && is.na(max)) stop('min and max are both NA') if(is.null(items)) items <- 1:x$num_item if(any(!items %in% 1:x$num_item)) stop("invalid items input.") nitems <- length(items) n <- sum(!is.na(min), !is.na(max)) mat <- matrix(0, nrow=nitems * n, ncol=x$num_lpvar) for(i in 1:length(items)) { ind <- items[i] + (1:x$num_form - 1) * x$num_item mat[(i - 1) * n + 1:n, ind] <- 1 } if(!is.na(min) && is.na(max)){ dir <- rep(">=", nitems) rhs <- rep(min, nitems) } else if(is.na(min) && !is.na(max)){ dir <- rep("<=", nitems) rhs <- rep(max, nitems) } else { dir <- rep(c(">=", "<="), nitems) rhs <- rep(c(min, max), nitems) } ata_append_constraints(x, mat, dir, rhs) } #' @rdname ata #' @description \code{ata_item_enemy} adds an enemy-item constraint to the model #' @export ata_item_enemy <- function(x, items){ if(class(x) != "ata") stop("not an 'ata' object") if(any(!items %in% 1:x$num_item)) stop("invalid item index") mat <- matrix(0, nrow=nrow(x$form_map), ncol=x$num_lpvar) for(i in 1:nrow(x$form_map)){ f <- x$form_map[i, ] ind <- items + (f - 1) * x$num_item mat[i, ind] <- 1 } dir <- rep("<=", x$num_form) rhs <- rep(1, x$num_form) ata_append_constraints(x, mat, dir, rhs) } #' @rdname ata #' @description \code{ata_item_fixedvalue} forces an item to be selected or not selected #' @export ata_item_fixedvalue <- function(x, items, min=NA, max=NA, forms){ if(class(x) != "ata") stop("not an 'ata' object") if(any(!items %in% 1:x$num_item)) stop("invalid items input.") if(length(forms) > 1) stop('fix values in one form at each time.') n <- length(items) if(length(min) == 1) min <- rep(min, n) if(length(max) == 1) max <- rep(max, n) if(length(min) != n || length(max) != n) stop("invalid min or max length.") x$bounds$idx <- c(x$bounds$idx, items+(forms-1)*x$num_item) x$bounds$lb <- c(x$bounds$lb, min) x$bounds$ub <- c(x$bounds$ub, max) x } #' @rdname ata #' @description \code{ata_solve} solves the MIP model #' @param solver use 'lpsolve' for lp_solve 5.5 or 'glpk' for GLPK #' @param as.list \code{TRUE} to return results in a list; otherwise, a data frame #' @param details \code{TRUE} to print detailed information #' @param time_limit the time limit in seconds passed along to solvers #' @param message \code{TRUE} to print messages from solvers #' @details #' \code{ata_solve} takes control options in \code{...}. #' For lpsolve, see \code{lpSolveAPI::lp.control.options}. #' For glpk, see \code{glpkAPI::glpkConstants}\cr #' Once the model is solved, additional data are added to the model. #' \code{status} shows the status of the solution, \code{optimum} #' the optimal value of the objective fucntion found in the solution, #' \code{obj_vars} the values of two critical variables in the objective #' function, \code{result} the assembly results in a binary matrix, and #' \code{items} the assembled items #' @export ata_solve <- function(x, solver=c('lpsolve', 'glpk'), as.list=TRUE, details=TRUE, time_limit=10, message=FALSE, ...) { if(class(x) != "ata") stop("not an 'ata' object") rs <- switch(match.arg(solver, solver), 'lpsolve'=ata_solve_lpsolve(x, time_limit, message, ...), 'glpk'=ata_solve_glpk(x, time_limit, message, ...)) x$code <- rs$code x$status <- rs$status x$optimum <- rs$optimum x$obj_vars <- rs$obj_vars if(all(rs$result == 0)) { x$result <- x$items <- NULL if(details) warning("No solution for the LP model.\n") } else { if(details) cat(rs$status, ', optimum: ', round(rs$optimum, 3), ' (', paste(round(rs$obj_vars, 3), collapse=', '), ')\n', sep='') x$result <- rs$result items <- list() for(i in 1:nrow(x$form_map)){ f <- x$form_map[i, ] f <- f[!is.na(f)] selection <- apply(x$result[, f, drop=FALSE] == 1, 1, any) selection <- seq(x$num_item)[selection] items[[i]] <- cbind(x$pool[x$group %in% selection, ], form=i) } if(!as.list) items <- Reduce(rbind, items, NULL) x$items <- items } x }
/scratch/gouwar.j/cran-all/cranData/xxIRT/R/module3_ata.R
#' Helper functions of ATA #' @description miscellaneous helper functions of ATA #' @name ata_helpers NULL #' @rdname ata_helpers #' @description \code{ata_append_constraints} appends constraint definitions to the model #' @param mat coefficient matrix #' @param dir direction #' @param rhs right-hand-side value #' @keywords internal ata_append_constraints <- function(x, mat, dir, rhs) { x$mat <- rbind(x$mat, mat) x$dir <- c(x$dir, dir) x$rhs <- c(x$rhs, rhs) x } #' @rdname ata_helpers #' @description \code{ata_form_index} converts input forms into actual form indices in the model #' @param forms indices of forms #' @param collapse \code{TRUE} to collaspe forms into one form #' @param internal_index \code{TRUE} to use internal form indices #' @keywords internal ata_form_index <- function(x, forms, collapse, internal_index){ if(internal_index){ if(is.null(forms)) forms <- 1:x$num_form if(any(!forms %in% 1:x$num_form)) stop('invalid form indices') forms <- as.matrix(forms) } else { if(is.null(forms)) forms <- 1:nrow(x$form_map) if(any(!forms %in% 1:nrow(x$form_map))) stop('invalid form indices') forms <- x$form_map[forms, , drop=FALSE] } if(collapse) forms <- matrix(unique(as.vector(forms)), nrow=1) forms } #' @rdname ata_helpers #' @description \code{ata_obj_coef} processes input coefficients of the objective functions #' @param coef coefficients #' @param compensate \code{TRUE} to combine coefficients #' @importFrom stats aggregate #' @keywords internal ata_obj_coef <- function(x, coef, compensate){ if(length(coef) == x$num_item){ # if a vector of given values (item-group-level), then convert to matrix coef <- matrix(coef, nrow=1) } else if(length(coef) == nrow(x$pool)) { # if a vector of given values (item-level), then aggregate and conver to matrix coef <- aggregate(coef, by=list(x$group), sum)[,-1] coef <- matrix(coef, nrow=1) } else if(is.numeric(coef)) { # if a vector of theta's, then compute infomation and aggregate coef <- with(x$pool, model_3pl_info(coef, a, b, c, D=x$opts$D)) coef <- aggregate(t(coef), by=list(group=x$group), sum)[,-1] coef <- t(as.matrix(coef)) } else if(is.character(coef) && all(coef %in% colnames(x$pool))) { # if a variable name, then retrieve value and aggregate coef <- aggregate(x$pool[,coef], by=list(group=x$group), sum)[,-1] coef <- t(as.matrix(coef)) } else { stop("invalid coefficients") } if(compensate) coef <- matrix(colSums(coef), nrow=1) round(coef, 2) } #' @rdname ata_helpers #' @description \code{ata_solve_lpsolve} solves the the MIP model using lp_solve #' @param time_limit the time limit in seconds passed along to solvers #' @param message \code{TRUE} to print messages from solvers #' @param ... additional control parameters for solvers #' @import lpSolveAPI #' @keywords internal ata_solve_lpsolve <- function(x, time_limit, message, ...) { if(class(x) != "ata") stop("not an 'ata' object") lp <- make.lp(0, x$num_lpvar) # (max): direction lp.control(lp, sense=ifelse(x$max, "max", "min")) # set bound for y: positive = (lb=0); negative = (ub = 0) if(x$negative) set.bounds(lp, lower=-Inf, upper=0, x$num_lpvar-1) # (obj): objective function set.objfn(lp, x$obj, seq_along(x$obj)) # (type): x's = binary, y = continuous types <- sapply(x$types, function(x) switch(x, "B"="binary", "I"="integer", "C"="real")) for(i in seq_along(types)) set.type(lp, i, types[i]) # (bounds): column bounds if(!is.null(x$bounds$idx)) with(x$bounds, for(i in 1:length(idx)) { set.bounds(lp, if(!is.na(lb[i])) lower=lb[i], if(!is.na(ub[i])) upper=ub[i], columns=idx[i]) }) # (mat): constraints for(i in 1:nrow(x$mat)) add.constraint(lp, x$mat[i,], x$dir[i], x$rhs[i]) # solve lp.control(lp, mip.gap=c(.01, .01), epsint=.10, presolve="lindep", timeout=time_limit) lp.control(lp, verbose=ifelse(message, 'normal', 'neutral')) lp.control(lp, ...) code <- solve(lp) status <- switch(as.character(code), '0'="optimal solution found", '1'="the model is sub-optimal", '2'="the model is infeasible", '3'="the model is unbounded", '4'="the model is degenerate", '5'="numerical failure encountered", '6'="process aborted", '7'="timeout", '9'="the model was solved by presolve", '10'="the branch and bound routine failed", '11'="the branch and bound was stopped because of a break-at-first or break-at-value", '12'="a feasible branch and bound solution was found", '13'="no feasible branch and bound solution was found") optimum <- get.objective(lp) result <- matrix(get.variables(lp)[1:(x$num_lpvar-2)], ncol=x$num_form, byrow=FALSE) obj_vars <- get.variables(lp)[(x$num_lpvar-1):x$num_lpvar] if(!code %in% c(0, 1, 9)) result <- matrix(0, nrow=nrow(result), ncol=ncol(result)) list(code=code, status=status, optimum=optimum, result=result, obj_vars=obj_vars) } #' @rdname ata_helpers #' @description \code{ata_solve_glpk} solves the the MIP model using GLPK #' @import glpkAPI #' @keywords internal ata_solve_glpk <- function(x, time_limit, message, ...) { if(class(x) != "ata") stop("not an 'ata' object") opts <- list(...) # set up the problem lp <- initProbGLPK() addRowsGLPK(lp, nrow(x$mat)) addColsGLPK(lp, ncol(x$mat)) # (max): optimization direction setObjDirGLPK(lp, ifelse(x$max, GLP_MAX, GLP_MIN)) # (obj): obj functions setObjCoefsGLPK(lp, seq(x$num_lpvar), x$obj) # (types): x's = binary, y's = continuous for(j in seq(x$num_lpvar)[x$types == 'B']) setColKindGLPK(lp, j, GLP_BV) for(j in seq(x$num_lpvar)[x$types == 'C']) setColBndGLPK(lp, j, GLP_LO, 0, 0) if(x$negative) setColBndGLPK(lp, x$num_lpvar-1, GLP_UP, 0, 0) ## fixed values if(!is.null(x$bounds$idx)) for(j in 1:length(x$bounds$idx)) if(is.na(x$bound$lb[j])){ setColBndGLPK(lp, x$bounds$idx[j], GLP_UP, 0, x$bounds$ub[j]) } else if(is.na(x$bound$ub[j])){ setColBndGLPK(lp, x$bounds$idx[j], GLP_LO, x$bounds$lb[j], 0) } else { setColBndGLPK(lp, x$bounds$idx[j], GLP_DB, x$bounds$lb[j], x$bounds$ub[j]) } # # check column bounds # cbind(getColsLowBndsGLPK(lp, 1:x$num_lpvar), getColsUppBndsGLPK(lp, 1:x$num_lpvar)) # (mat) ind <- x$mat != 0 ia <- rep(1:nrow(x$mat), ncol(x$mat))[ind] ja <- rep(1:ncol(x$mat), each=nrow(x$mat))[ind] ar <- x$mat[ind] loadMatrixGLPK(lp, length(ar), ia, ja, ar) # (dir & rhs): row bounds dir <- sapply(x$dir, function(x) switch(x, '>='=GLP_LO, '<='=GLP_UP, '='=GLP_FX)) setRowsBndsGLPK(lp, 1:nrow(x$mat), x$rhs, x$rhs, dir) # # check row bounds # cbind(getRowsLowBndsGLPK(lp, 1:nrow(x$mat)), getRowsUppBndsGLPK(lp, 1:nrow(x$mat))) # solve setMIPParmGLPK(PRESOLVE, GLP_ON) setMIPParmGLPK(MIP_GAP, 0.01) setMIPParmGLPK(TM_LIM, 1000 * time_limit) setMIPParmGLPK(MSG_LEV, ifelse(message, GLP_MSG_ON, GLP_MSG_OFF)) for(i in seq_along(opts)) setMIPParmGLPK(get(names(opts)[i]), opts[[i]]) code <- solveMIPGLPK(lp) status <- switch(as.character(code), '0'="optimal solution found", '1'='invalid basis', '2'='singular matrix', '3'='ill-conditioned matrix', '4'='invalid bounds', '5'='solver failed', '6'='objective lower limit reached', '7'='objective upper limit reached', '8'='iteration limit exceeded', '9'='time limit exceeded', '10'='no primal feasible solution', '11'='no dual feasible solution', '12'='root LP optimum not provided', '13'='search terminated by application', '14'='relative mip gap tolerance reached', '15'='no primal/dual feasible solution', '16'='no convergence', '17'='numerical instability', '18'='invalid data', '19'='result out of range') optimum <- mipObjValGLPK(lp) result <- matrix(mipColsValGLPK(lp)[1:(x$num_lpvar-2)], ncol=x$num_form, byrow=FALSE) obj_vars <- mipColsValGLPK(lp)[(x$num_lpvar-1):x$num_lpvar] list(code=code, status=status, optimum=optimum, result=result, obj_vars=obj_vars) }
/scratch/gouwar.j/cran-all/cranData/xxIRT/R/module3_ata_helpers.R
#' Simulation of Computerized Adaptive Testing (CAT) #' @name cat_sim #' @examples #' \dontrun{ #' ## generate a 100-item pool #' num_items <- 100 #' pool <- with(model_3pl_gendata(1, num_items), data.frame(a=a, b=b, c=c)) #' pool$set_id <- sample(1:30, num_items, replace=TRUE) #' pool$content <- sample(1:3, num_items, replace=TRUE) #' pool$time <- round(rlnorm(num_items, mean=4.1, sd=.2)) #' #' ## MLE, EAP, and hybrid estimation rule #' cat_sim(1.0, pool, min=10, max=20, estimate_rule=cat_estimate_mle) #' cat_sim(1.0, pool, min=10, max=20, estimate_rule=cat_estimate_eap) #' cat_sim(1.0, pool, min=10, max=20, estimate_rule=cat_estimate_hybrid) #' #' ## SE, MI, and CI stopping rule #' cat_sim(1.0, pool, min=10, max=20, stop_se=.3) #' cat_sim(1.0, pool, min=10, max=20, stop_mi=.6) #' cat_sim(1.0, pool, min=10, max=20, stop_cut=0) #' cat_sim(1.0, pool, min=10, max=20, stop_cut=0, ci_width=2.58) #' #' ## maximum information selection with item sets #' cat_sim(1.0, pool, min=10, max=20, group="set_id")$admin #' #' ## maximum information with item exposure control #' cat_sim(1.0, pool, min=10, max=20, info_random=5)$admin #' #' ## Constrained-CAT selection rule with and without initial randomness #' cat_sim(1.0, pool, min=10, max=20, select_rule=cat_select_ccat, #' ccat_var="content", ccat_perc=c("1"=.2, "2"=.3, "3"=.5)) #' cat_sim(1.0, pool, min=10, max=20, select_rule=cat_select_ccat, ccat_random=5, #' ccat_var="content", ccat_perc=c("1"=.2, "2"=.3, "3"=.5)) #' #' ## Shadow-test selection rule #' cons <- data.frame(var='content', level=1:3, min=c(3,3,4), max=c(3,3,4)) #' cons <- rbind(cons, data.frame(var='time', level=NA, min=55*10, max=65*10)) #' cat_sim(1.0, pool, min=10, max=10, select_rule=cat_select_shadow, constraints=cons) #' #' ## Projection-based stopping rule #' cons <- data.frame(var='content', level=1:3, min=5, max=15) #' cons <- rbind(cons, data.frame(var='time', level=NA, min=60*20, max=60*40)) #' cat_sim(1.0, pool, min=20, max=40, select_rule=cat_select_shadow, stop_rule=cat_stop_projection, #' projection_method="diff", stop_cut=0, constraints=cons) #' } NULL #' @rdname cat_sim #' @description \code{cat_sim} runs a simulation of CAT. Use \code{theta} in options to set the starting #' value of theta estimate. #' @param true the true theta #' @param pool the item pool (data.frame) #' @param ... option/control parameters #' @return \code{cat_sim} returns a \code{cat} object #' @details #' \code{...} takes a variety of option/control parameters for the simulations from users. #' \code{min} and {max} are mandatory for setting limits on the test length. User-defined #' selection, estimation, and stopping rules are also passed to the simulator via options.\cr #' To write a new rule, the function siganiture must be: \code{function(len, theta, stats, admin, pool, opts)}. #' See built-in rules for examples. #' @importFrom stats runif #' @export cat_sim <- function(true, pool, ...){ if(!is.data.frame(pool)) pool <- as.data.frame(pool, stringsAsFactors=FALSE) if(!all(c("a", "b", "c") %in% colnames(pool))) stop("cannot find a-, b-, or c-parameters in item pool") opts <- list(...) if(is.null(opts$min)) stop("minimum length is missing") if(is.null(opts$max)) stop("maximum length is missing") if(opts$min < 0 || opts$min > opts$max) stop("invalid min/max length values: ", opts$min, " -- ", opts$max) if(nrow(pool) < opts$max) stop("insufficient items in item pool: ", nrow(pool)) theta <- ifelse(is.null(opts$theta), 0, opts$theta) if(is.null(opts$D)) opts$D <- 1.702 if(is.null(opts$select_rule)) select_rule <- cat_select_maxinfo else select_rule <- opts$select_rule if(is.null(opts$estimate_rule)) estimate_rule <- cat_estimate_mle else estimate_rule <- opts$estimate_rule if(is.null(opts$stop_rule)) stop_rule <- cat_stop_default else stop_rule <- opts$stop_rule len <- 0 stats <- matrix(nrow=opts$max, ncol=4, dimnames=list(NULL, c("u", "t", "se", "info"))) admin <- NULL while(len < opts$max){ # select items and update pool selection <- select_rule(len, theta, stats, admin, pool, opts) item <- selection$item item <- item[0:min(nrow(item), opts$max - len), ] pool <- selection$pool n <- nrow(item) len <- len + n admin <- rbind(admin, item) # generate responses p <- model_3pl_prob(true, item$a, item$b, item$c, opts$D)[1, ] u <- as.integer(p > runif(n)) stats[1:n + (len - n), "u"] <- u # estimate theta <- estimate_rule(len, theta, stats, admin, pool, opts) info <- sum(model_3pl_info(theta, admin$a, admin$b, admin$c, opts$D)) se <- 1 / sqrt(info) stats[1:n + (len - n), "t"] <- theta stats[1:n + (len - n), "se"] <- se stats[1:n + (len - n), "info"] <- info # stop? if(stop_rule(len, theta, stats, admin, pool, opts)) break } admin <- cbind(stats[1:len, ], admin) rs <- list(pool=pool, admin=admin, true=true, theta=theta) class(rs) <- "cat" rs } #' @rdname cat_sim #' @description \code{cat_estimate_mle} is the maximum likelihood estimation rule. Use #' \code{map_len} to apply MAP to the first K items and use \code{map_prior} to set the #' prior for MAP. #' @param len the current test length #' @param theta the current theta estimate #' @param stats a matrix of responses, theta estimate, information and std error #' @param admin a data frame of administered items #' @param opts a list of option/control parameters #' @return an estimation rule should return a theta estimate #' @export cat_estimate_mle <- function(len, theta, stats, admin, pool, opts){ u <- stats[1:len, "u"] u <- matrix(rep(u, each=2), nrow=2) if(is.null(opts$map_len)) opts$map_len <- 10 if(is.null(opts$map_prior)) opts$map_prior <- c(0, 1) if (len < opts$map_len) priors <- list(t=opts$map_prior) else priors <- NULL with(admin, model_3pl_estimate_jmle(u=u, a=a[1:len], b=b[1:len], c=c[1:len], D=opts$D, scale=NULL, priors=priors))$t[1] } #' @rdname cat_sim #' @description \code{cat_estimate_eap} is the expected a posteriori estimation rule, #' using \code{eap_mean} and \code{eap_sd} option parameters as the prior #' @export cat_estimate_eap <- function(len, theta, stats, admin, pool, opts){ eap_mean <- ifelse(is.null(opts$eap_mean), 0, opts$eap_mean) eap_sd <- ifelse(is.null(opts$eap_sd), 1, opts$eap_sd) u <- stats[1:len, "u"] u <- matrix(rep(u, each=2), nrow=2) with(admin, model_3pl_eap_scoring(u=u, a=a[1:len], b=b[1:len], c=c[1:len], D=opts$D))$t[1] } #' @rdname cat_sim #' @description \code{cat_estimate_hybrid} is a hybrid estimation rule, which uses MLE for #' mixed responses and EAP for all 1's or 0's responses #' @export cat_estimate_hybrid <- function(len, theta, stats, admin, pool, opts){ u <- stats[1:len, "u"] if(all(u==0) || all(u==1)){ theta <- cat_estimate_eap(len, theta, stats, admin, pool, opts) } else { theta <- cat_estimate_mle(len, theta, stats, admin, pool, opts) } theta } #' @rdname cat_sim #' @description \code{cat_stop_default} is a three-way stopping rule. When \code{stop_se} #' is set in the options, it uses the standard error stopping rule. When #' \code{stop_mi} is set in the options, it uses the minimum information stopping rule. When #' \code{stop_cut} is set in the options, it uses the confidence interval (set by \code{ci_width}) #' stopping rule. #' @return a stopping rule should return a boolean: \code{TRUE} to stop the CAT, \code{FALSE} to continue #' @importFrom stats qnorm #' @export cat_stop_default <- function(len, theta, stats, admin, pool, opts){ if(len < opts$min) return(FALSE) if(len > opts$max) return(TRUE) if(!is.null(opts$stop_se)){ se <- stats[len, "se"] return(se <= opts$stop_se) } else if(!is.null(opts$stop_mi)){ info <- model_3pl_info(theta, pool$a, pool$b, pool$c, opts$D)[1, ] return(max(info) <= opts$stop_mi) } else if(!is.null(opts$stop_cut)){ se <- stats[len, "se"] ci_width <- ifelse(is.null(opts$ci_width), qnorm(.975), opts$ci_width) lb <- theta - ci_width * se ub <- theta + ci_width * se return(lb > opts$stop_cut || ub < opts$stop_cut) } FALSE } #' @rdname cat_sim #' @description \code{cat_select_maxinfo} is the maximum information selection rule. Use \code{group} #' (a numeric vector) to group items belonging to the same set. Use \code{info_random} to implement #' the random-esque item exposure control method. #' @return a selection rule should return a list of (a) the selected item and (b) the updated pool #' @export cat_select_maxinfo <- function(len, theta, stats, admin, pool, opts){ if(is.null(opts$group)) group <- 1:nrow(pool) else group <- pool[, opts$group] info <- model_3pl_info(theta, pool$a, pool$b, pool$c, opts$D)[1, ] info <- aggregate(info, by=list(group), mean) colnames(info) <- c("group", "info") random <- min(ifelse(is.null(opts$info_random), 1, opts$info_random), nrow(info)) index <-info$group[order(-info$info)[1:random]] if(length(index) > 1) index <- sample(index, 1) index <- group %in% index list(item=pool[index,], pool=pool[!index,]) } #' @rdname cat_sim #' @description \code{cat_select_ccat} is the constrained CAT selection rule. Use #' \code{ccat_var} to set the content variable in the pool. Use \code{ccat_perc} to set #' the desired content distribution, with the name of each element being the content code #' and tue value of each element being the percentage. Use \code{ccat_random} to add randomness #' to initial item selections. #' @export cat_select_ccat <- function(len, theta, stats, admin, pool, opts){ if(is.null(opts$ccat_var)) stop("ccat_var is misisng") if(is.null(opts$ccat_perc)) stop("ccat_perc is missing") initial_random <- ifelse(is.null(opts$ccat_random), 0, opts$ccat_random) info <- data.frame(id=1:nrow(pool), domain=pool[,opts$ccat_var]) info$info <- with(pool, model_3pl_info(theta, a, b, c, opts$D))[1, ] if(len == 0) curr_perc <- rep(0, length(opts$ccat_perc)) else curr_perc <- freq(admin[1:len, opts$ccat_var], names(opts$ccat_perc))$perc if(len < initial_random) domain <- sample(names(opts$ccat_perc), 1) else domain <- names(opts$ccat_perc)[which.max(opts$ccat_perc - curr_perc)] info <- info[info$domain == domain, ] random <- min(nrow(info), ifelse(is.null(opts$info_random), 1, opts$info_random)) index <- info$id[order(-info$info)[1:random]] if(length(index) > 1) index <- sample(index, 1) list(item=pool[index, ], pool=pool[-index, ]) } #' @rdname cat_sim #' @description \code{cat_select_shadow} is the shadow-test selection rule. Use \code{shadow_id} #' to group item sets. Use \code{constraints} to set constraints. Constraints should be in a data.frame #' with four columns: var (variable name), level (variable level, \code{NA} for quantitative variable), #' min (lower bound), and max (upper bound). #' @export cat_select_shadow <- function(len, theta, stats, admin, pool, opts){ if(!"shadow_id" %in% colnames(pool)) pool$shadow_id <- 1:nrow(pool) if(is.null(opts$constraints)) stop("constraints is missing in the options") if(!all(colnames(opts$constraints) %in% c("var", "level", "min", "max"))) stop("shadow_constr should be a data.frame with 4 columns: var, level, min, and max") if(is.factor(opts$constraints$var)) opts$constraints$var <- levels(opts$constraints$var)[opts$constraints$var] if(is.factor(opts$constraints$level)) opts$constraints$level <- levels(opts$constraints$level)[opts$constraints$level] x <- ata(pool, 1, len=c(opts$min, opts$max), 1) x <- ata_obj_relative(x, theta, "max") for(i in 1:nrow(opts$constraints)) x <- with(opts$constraints[i,], ata_constraint(x, var, min=min, max=max, level=level)) if(!is.null(admin)) x <- ata_item_fixedvalue(x, match(admin$shadow_id, pool$shadow_id), min=1, forms=1) x <- ata_solve(x, as.list=FALSE, details=F) if(is.null(x$items)) stop("Failed to assemble a shadow test") x$items <- x$items[!x$items$shadow_id %in% admin$shadow_id, ] info <- data.frame(id=x$items$shadow_id, info=with(x$items, model_3pl_info(theta, a, b, c, opts$D))[1,]) random <- min(nrow(info), ifelse(is.null(opts$info_random), 1, opts$info_random)) index <- info$id[order(-info$info)[1:random]] if(length(index) > 1) index <- sample(index, 1) list(item=pool[index, ], pool=pool) } #' @rdname cat_sim #' @param x a \code{cat} object #' @export print.cat <- function(x, ...){ if(class(x) != "cat") stop("Not a 'cat' object.") len <- nrow(x$admin) cat("true=", round(x$true, 2), ", est.=", round(x$theta, 2), ", se=", round(x$admin$se[len], 2), ", p=", round(mean(x$admin$u), 2), ", used ", len, " items (", sum(x$admin$u)," correct).\n", sep="") cat("Belows is a history of the CAT:\n") if(len <= 10) { print(x$admin) } else { print(x$admin[1:5, ]) cat("...\n") print(x$admin[1:5 + len - 5, ]) } invisible(x) } #' @rdname cat_sim #' @import ggplot2 #' @export plot.cat <- function(x, ...){ if(class(x) != "cat") stop("Not a 'cat' object.") opts <- list(...) if(is.null(opts$ylim)) opts$ylimc <- c(-3, 3) len <- nrow(x$admin) x$admin$lb <- x$admin$t - 1.96 * x$admin$se x$admin$ub <- x$admin$t + 1.96 * x$admin$se x$admin$pos <- 1:len x$admin$Responses <- factor(x$admin$u, levels=c(0, 1), labels=c("Wrong", "Right")) ggplot(data=x$admin, aes_string(x="pos", y="t", color="Responses")) + geom_point(aes_string(size="se")) + geom_linerange(aes_string(ymin="lb", ymax="ub"), linetype=3) + geom_point(aes(x=len, y=x$true), color="coral", pch=4, size=3) + coord_cartesian(ylim=opts$ylim) + scale_size_continuous(range=c(1, 3)) + xlab("Position") + ylab(expression(paste("Est. ", theta))) + guides(size=F, alpha=F) + theme_bw() + theme(legend.key=element_blank()) } #' @rdname cat_sim #' @description \code{cat_stop_projection} is the projection-based stopping rule. Use #' \code{projection_method} to choose the projection method ('info' or 'diff'). Use #' \code{stop_cut} to set the cut score. Use \code{constraints} to set the constraints. #' Constraints should be a data.frame with columns: var (variable name), #' level (variable level, \code{NA} for quantitative varialbe), min (lower bound), max (upper bound) #' @export cat_stop_projection <- function(len, theta, stats, admin, pool, opts){ if(len < opts$min) return(FALSE) if(len >= opts$max) return(TRUE) method <- match.arg(opts$projection_method, c('info', 'diff')) if(is.null(opts$stop_cut)) stop('stop_cut is missing in the options') if(is.null(opts$constraints)) stop("constraints is missing in the options") if(!all(colnames(opts$constraints) %in% c("var", "level", "min", "max"))) stop("shadow_constr should be a data.frame with 4 columns: var, level, min, and max") if(is.factor(opts$constraints$var)) opts$constraints$var <- levels(opts$constraints$var)[opts$constraints$var] if(is.factor(opts$constraints$level)) opts$constraints$level <- levels(opts$constraints$level)[opts$constraints$level] pool <- unique(rbind(pool, admin)) if(method == 'info'){ x <- ata(pool, 1, len=opts$max, 1) x <- ata_obj_relative(x, theta, "max") for(i in 1:nrow(opts$constraints)) x <- with(opts$constraints, ata_constraint(x, var[i], min=min[i], max=max[i], level=level[i])) x <- ata_item_fixedvalue(x, admin$shadow_id, min=1, forms=1) x <- ata_solve(x, as.list=FALSE, details=F) if(is.null(x$items)) stop("Failed to assemble a projection test") u <- c(stats[1:len, "u"], rep(1, opts$max - len)) u <- matrix(rep(u, each=2), nrow=2) theta_ub <- with(x$items, model_3pl_estimate_jmle(u, a=a, b=b, c=c, D=opts$D, scale=NULL, priors=NULL))$t[1] u <- c(stats[1:len, "u"], rep(0, opts$max - len)) u <- matrix(rep(u, each=2), nrow=2) theta_lb <- with(x$items, model_3pl_estimate_jmle(u, a=a, b=b, c=c, D=opts$D, scale=NULL, priors=NULL))$t[1] } else if(method == 'diff'){ if(is.null(opts$proj_width)) opts$proj_width <- 1.96 x <- ata(pool, 1, len=opts$max, 1) x <- ata_obj_absolute(x, "b", (theta + opts$proj_width * stats[len, "se"]) * opts$max) for(i in 1:nrow(opts$constraints)) x <- with(opts$constraints, ata_constraint(x, var[i], min=min[i], max=max[i], level=level[i])) x <- ata_item_fixedvalue(x, admin$shadow_id, min=1, forms=1) x <- ata_solve(x, as.list=FALSE, details=F) if(is.null(x$items)) stop("Failed to assemble a projection test") u <- c(stats[1:len, "u"], rep(1, opts$max - len)) u <- matrix(rep(u, each=2), nrow=2) theta_ub <- with(x$items, model_3pl_estimate_jmle(u, a=a, b=b, c=c, D=opts$D, scale=NULL, priors=NULL))$t[1] x <- ata(pool, 1, len=opts$max, 1) x <- ata_obj_absolute(x, "b", (theta - opts$proj_width * stats[len, "se"]) * opts$max) for(i in 1:nrow(opts$constraints)) x <- with(opts$constraints, ata_constraint(x, var[i], min=min[i], max=max[i], level=level[i])) x <- ata_item_fixedvalue(x, admin$shadow_id, min=1, forms=1) x <- ata_solve(x, as.list=FALSE, details=F) if(is.null(x$items)) stop("Failed to assemble a projection test") u <- c(stats[1:len, "u"], rep(0, opts$max - len)) u <- matrix(rep(u, each=2), nrow=2) theta_lb <- with(x$items, model_3pl_estimate_jmle(u, a=a, b=b, c=c, D=opts$D, scale=NULL, priors=NULL))$t[1] } (theta_lb > opts$stop_cut || theta_ub < opts$stop_cut) }
/scratch/gouwar.j/cran-all/cranData/xxIRT/R/module4_cat.R
#' Computerized Multistage Testing (MST) #' @name mst #' @examples #' \dontrun{ #' ## generate item pool #' num_item <- 300 #' pool <- with(model_3pl_gendata(1, num_item), data.frame(a=a, b=b, c=c)) #' pool$id <- 1:num_item #' pool$content <- sample(1:3, num_item, replace=TRUE) #' pool$time <- round(rlnorm(num_item, 4, .3)) #' pool$group <- sort(sample(1:round(num_item/3), num_item, replace=TRUE)) #' #' ## ex. 1: 1-2-2 MST, 2 panels, topdown #' ## 20 items in total and 10 items in content area 1 in each route #' ## maximize info. at -1 and 1 for easy and hard routes #' x <- mst(pool, "1-2-2", 2, 'topdown', len=20, max_use=1) #' x <- mst_obj(x, theta=-1, indices=1:2) #' x <- mst_obj(x, theta=1, indices=3:4) #' x <- mst_constraint(x, "content", 10, 10, level=1) #' x <- mst_assemble(x, timeout=5) #' plot(x, byroute=TRUE) #' for(p in 1:x$num_panel) #' for(r in 1:x$num_route) { #' route <- paste(x$route[r, 1:x$num_stage], collapse='-') #' count <- sum(mst_get_items(x, panel_ix=p, route_ix=r)$content==1) #' cat('panel=', p, ', route=', route, ': ', count, ' items in content area #1\n', sep='') #' } #' #' ## ex. 2: 1-2-3 MST, 2 panels, bottomup, #' ## remove two routes with large theta change: 1-2-6, 1-3-4 #' ## 10 items in total and 4 items in content area 2 in each module #' ## maximize info. at -1, 0 and 1 for easy, medium, and hard modules #' x <- mst(pool, "1-2-3", 2, 'bottomup', len=10, max_use=1) #' x <- mst_route(x, c(1, 2, 6), "-") #' x <- mst_route(x, c(1, 3, 4), "-") #' x <- mst_obj(x, theta= 0, indices=c(1, 5)) #' x <- mst_obj(x, theta=-1, indices=c(2, 4)) #' x <- mst_obj(x, theta= 1, indices=c(3, 6)) #' x <- mst_constraint(x, "content", 4, 4, level=2) #' x <- mst_assemble(x, timeout=10) #' plot(x, byroute=FALSE) #' for(p in 1:x$num_panel) #' for(m in 1:x$num_module){ #' count <- sum(mst_get_items(x, panel_ix=p, module_ix=m)$content==2) #' cat('panel=', p, ', module=', m, ': ', count, ' items in content area #2\n', sep='') #' } #' #' ## ex.3: same with ex.2 (w/o content constraints), but group-based #' x <- mst(pool, "1-2-3", 2, 'bottomup', len=12, max_use=1, group="group") #' x <- mst_route(x, c(1, 2, 6), "-") #' x <- mst_route(x, c(1, 3, 4), "-") #' x <- mst_obj(x, theta= 0, indices=c(1, 5)) #' x <- mst_obj(x, theta=-1, indices=c(2, 4)) #' x <- mst_obj(x, theta= 1, indices=c(3, 6)) #' x <- mst_assemble(x, timeout=10) #' plot(x, byroute=FALSE) #' for(p in 1:x$num_panel) #' for(m in 1:x$num_module){ #' items <- mst_get_items(x, panel_ix=p, module_ix=m) #' cat('panel=', p, ', module=', m, ': ', length(unique(items$id)), ' items from ', #' length(unique(items$group)), ' groups\n', sep='') #' } #' #' ## ex.4: 2 panels of 1-2-3 top-down design #' ## 20 total items and 10 items in content area 3 #' ## 6+ items in stage 1 & 2 #' x <- mst(pool, "1-2-3", 2, "topdown", len=20, max_use=1) #' x <- mst_route(x, c(1, 2, 6), "-") #' x <- mst_route(x, c(1, 3, 4), "-") #' x <- mst_obj(x, theta=-1, indices=1) #' x <- mst_obj(x, theta=0, indices=2:3) #' x <- mst_obj(x, theta=1, indices=4) #' x <- mst_constraint(x, "content", 10, 10, level=3) #' x <- mst_stage_length(x, 1:2, min=6) #' x <- mst_assemble(x, timeout=15) #' head(x$items) #' plot(x, byroute=FALSE) #' for(p in 1:x$num_panel) #' for(s in 1:x$num_stage){ #' items <- mst_get_items(x, panel_ix=p, stage_ix=s) #' cat('panel=', p, ', stage=', s, ': ', length(unique(items$id)), ' items\n', sep='') #' } #' #' ## ex.5: same with ex.4, but use RDP instead of stage length to control routing errors #' x <- mst(pool, "1-2-3", 2, "topdown", len=20, max_use=1) #' x <- mst_route(x, c(1, 2, 6), "-") #' x <- mst_route(x, c(1, 3, 4), "-") #' x <- mst_obj(x, theta=-1, indices=1) #' x <- mst_obj(x, theta=0, indices=2:3) #' x <- mst_obj(x, theta=1, indices=4) #' x <- mst_constraint(x, "content", 10, 10, level=3) #' x <- mst_rdp(x, 0, 2:3, .1) #' x <- mst_module_mininfo(x, 0, 5, 2:3) #' x <- mst_assemble(x, timeout=15) #' plot(x, byroute=FALSE) #' } NULL #' @rdname mst #' @description \code{mst} creates a multistage (MST) object for assembly #' @param pool the item pool (data.frame) #' @param design the MST design (string): e.g., "1-3", "1-2-2", "1-2-3" #' @param num_panel the number of panels (integer) #' @param method the design method (string): 'topdown' or 'bottomup' #' @param len the module/route length (integer) #' @param max_use the maximum selection of items (integer) #' @param group the grouping variable (string or vector) #' @details #' There are two methods for designing a MST. The bottom-up approach adds objectives #' and constraints on individual modules, whereas the topdown approach adds objectives #' and constraints directly on routes. #' @export mst <- function(pool, design, num_panel, method=c('topdown', 'bottomup'), len=NULL, max_use=NULL, group=NULL, ...){ method <- match.arg(method) design <- as.integer(unlist(strsplit(design, split="-"))) num_stage <- length(design) num_module <- sum(design) opts <- list(...) if(is.null(opts$D)) opts$D <- 1.702 # module-index map module <- NULL for(s in 1:num_stage) for(m in 1:design[s]) module <- rbind(module, c(stage=s, module=m)) module <- data.frame(module, index=1:nrow(module)) # route-index map route <- list() for(i in 1:num_stage) route[[i]] <- module[module$stage == i, "index"] route <- expand.grid(route) colnames(route) <- paste("stage", 1:num_stage, sep="") route$index <- 1:nrow(route) num_route <- nrow(route) # ata x <- list(pool=pool, design=design, method=method, num_item=nrow(pool), num_panel=num_panel, num_stage=num_stage, num_module=num_module, num_route=num_route, module=module, route=route, ata=ata(pool, num_form=num_panel*num_module, group=group), opts=opts) class(x) <- "mst" # constraint: test length if(!is.null(len) && length(len) == 1) x <- mst_constraint(x, 1, len, len) if(!is.null(len) && length(len) == 2) x <- mst_constraint(x, 1, len[1], len[2]) if(!is.null(len) && length(len) > 2) stop("the length argument is too long.") # constraint: max_use if(!is.null(max_use)) x$ata <- ata_item_use(x$ata, max=max_use) # constraint: minimum stage length x <- mst_stage_length(x, 1:num_stage, min=1) x } #' @rdname mst #' @description \code{mst_route} adds/removes a route to/from the MST #' @param x the MST object #' @param route a MST route represented by a vector of module indices #' @param op "+" to add a route and "-" to remove a route #' @export mst_route <- function(x, route, op=c("+", "-")){ if(class(x) != "mst") stop("not a 'mst' object: ", class(x)) op <- match.arg(op) index <- apply(x$route[, 1:x$num_stage], 1, function(r) all(r == route)) if(op == "+") { if(any(index)) stop("the route already exists") if(!all(route %in% 1:x$num_module)) stop("invalid route: module index is out of bound.") x$route <- rbind(x$route, c(route, NA)) } else if(op == "-") { if(!any(index)) stop("the route hasn't been added yet") x$route <- x$route[!index, ] } # reindex routes by stages index <- apply(x$route[, 1:x$num_stage], 1, function(r) sum(r * 10^(x$num_stage - 1:x$num_stage))) x$route <- x$route[order(index), ] x$route$index <- 1:nrow(x$route) x$num_route <- nrow(x$route) x } #' @rdname mst #' @description \code{mst_get_indices} maps the input indices to the actual indices #' @keywords internal mst_get_indices <- function(x, indices){ if(x$method == 'topdown'){ if(is.null(indices)) indices <- x$route[, 1:x$num_stage] else indices <- subset(x$route, x$route$index %in% indices)[, 1:x$num_stage] } else if(x$method == 'bottomup') { if(is.null(indices)) indices <- data.frame(module=1:x$num_module) else indices <- data.frame(module=indices) } indices } #' @rdname mst #' @description \code{mst_obj} adds objective functions to the MST #' @param theta a theta point or interval over which the TIF is optimized #' @param indices the indices of the route (topdown) or the module (bottomup) where objectives are added #' @param target the target values of the TIF objectives. \code{NULL} for maximization #' @export mst_obj <- function(x, theta, indices=NULL, target=NULL, ...) { if(class(x) != "mst") stop("not a 'mst' object: ", class(x)) indices <- mst_get_indices(x, indices) theta <- round(theta, 2) for(i in 1:x$num_panel) { for(j in 1:nrow(indices)) { f <- unlist(indices[j, ]) + (i - 1) * x$num_module if(is.null(target) || is.na(target)) { x$ata <- ata_obj_relative(x$ata, theta, mode="max", forms=f, collapse=TRUE, ...) } else { x$ata <- ata_obj_absolute(x$ata, theta, target=target, forms=f, collapse=TRUE, ...) } } } x } #' @rdname mst #' @description \code{mst_constraint} adds constraints to the MST #' @param coef the coefficients of the constraint #' @param level the constrained level, \code{NA} for quantitative variable #' @param min the lower bound of the constraint #' @param max the upper bound of the constraint #' @export mst_constraint <- function(x, coef, min=NA, max=NA, level=NULL, indices=NULL){ if(class(x) != "mst") stop("not a 'mst' object: ", class(x)) indices <- mst_get_indices(x, indices) for(i in 1:x$num_panel){ for(j in 1:nrow(indices)){ f <- unlist(indices[j,] + (i - 1) * x$num_module) x$ata <- ata_constraint(x$ata, coef, min, max, level, forms=f, collapse=TRUE) } } x } #' @rdname mst #' @description \code{mst_stage_length} sets length limits on stages #' @param stages the stage indices #' @export mst_stage_length <- function(x, stages, min=NA, max=NA){ if(class(x) != "mst") stop("not a 'mst' object: ", class(x)) if(length(min) == 1) min <- rep(min, length(stages)) if(length(max) == 1) max <- rep(max, length(stages)) if(length(stages) != length(min) || length(stages) != length(max)) stop("different lengths in stage, min and max") for(i in 1:length(stages)){ if(!stages[i] %in% 1:x$num_stage) stop("invalid stage input") f <- subset(x$module, x$module$stage == stages[i])$index f <- as.vector(outer(f, (1:x$num_panel - 1) * x$num_module, "+")) x$ata <- ata_constraint(x$ata, 1, min[i], max[i], forms=f, collapse=FALSE) } x } #' @rdname mst #' @description \code{mst_rdp} anchors the routing decision point (rdp) between adjacent modules #' @param tol tolerance parameter (numeric) #' @importFrom stats aggregate #' @export mst_rdp <- function(x, theta, indices, tol=0) { if(class(x) != "mst") stop("not a 'mst' object: ", class(x)) if(length(theta) != 1) stop("rdp is not a single theta point") if(length(indices) != 2 || abs(indices[1] - indices[2]) != 1) stop("modules are not adjacent") info <- round(aggregate(model_3pl_info(theta, x$pool$a, x$pool$b, x$pool$c, D=x$opts$D)[1, ], by=list(group=x$ata$group), sum)[, 2], 2) coef <- c(info, -1 * info) for(i in 1:x$num_panel) x$ata <- ata_constraint(x$ata, coef, -tol, tol, forms=indices + (i - 1) * x$num_module, collapse=TRUE) x } #' @rdname mst #' @description \code{mst_module_mininfo} sets the minimum information for modules #' @param thetas theta points, a vector #' @importFrom stats aggregate #' @export mst_module_info <- function(x, thetas, min, max, indices) { if(class(x) != "mst") stop("not a 'mst' object: ", class(x)) if(any(indices < 1 | indices > x$num_module)) stop("invalid module index") if(length(min) == 1) min <- rep(min, length(thetas)) if(length(max) == 1) max <- rep(max, length(thetas)) if(length(min) != length(thetas) || length(max) != length(thetas)) stop('min/max has a different length from thetas') for(i in 1:length(thetas)){ info <- with(x$pool, model_3pl_info(thetas[i], a, b, c, D=x$opts$D))[1, ] coef <- aggregate(info, by=list(group=x$ata$group), sum)[, 2] coef <- round(coef, 2) for(j in 1:x$num_panel) x$ata <- ata_constraint(x$ata, coef, min=min[i], max=max[i], forms=indices+(j-1)*x$num_module) } x } #' @rdname mst #' @description \code{mst_assemble} assembles the mst #' @export mst_assemble <- function(x, ...){ if(class(x) != "mst") stop("not a 'mst' object: ", class(x)) opts <- list(...) solver <- ifelse(is.null(opts$solver), 'lpsolve', opts$solver) x$ata <- ata_solve(x$ata, as.list=FALSE, ...) if(!is.null(x$ata$items)) { items <- x$ata$items items$module <- (items$form - 1) %% x$num_module + 1 items$panel <- ceiling(items$form / x$num_module) items$stage <- x$module$stage[match(items$module, x$module$index)] items$form <- NULL x$items <- items } x } #' @rdname mst #' @param ... further arguments #' @export print.mst <- function(x, ...){ cat("The MST design has", x$num_stage, "stages,", x$num_module, "modules, and", x$num_route, "routes:\n") cat("route map:\n") print(x$route) if(!is.null(x$items)){ cat("\nAssembled forms:\n") items <- x$items if(!is.data.frame(x$items)) items <- Reduce(rbind, items, NULL) if(nrow(items) > 10){ print(items[1:5, ]) cat("...\n") print(items[-4:0 + nrow(items),]) } else { print(items) } cat("See more results in 'items'.") } else { cat("MST hasn't been assembled yet.") } invisible(x) } #' @rdname mst #' @details #' \code{plot.mst} draws module information functions when \code{byroute=FALSE} #' and route information functions when \code{byroute=TRUE} #' @import ggplot2 #' @export plot.mst <- function(x, ...){ if(class(x) != "mst") stop("not a 'mst' object: ", class(x)) if(is.null(x$items)) stop('the mst has not been assembled yet.') opts <- list(...) if(is.null(opts$byroute)) opts$byroute <- FALSE if(is.null(opts$theta)) opts$theta <- round(seq(-3, 3, .1), 1) data <- NULL if(opts$byroute) { for(i in 1:x$num_route){ for(j in 1:x$num_panel){ items <- mst_get_items(x, panel_ix=j, route_ix=i) info <- with(items, rowSums(model_3pl_info(opts$theta, a, b, c, D=x$opts$D))) data <- rbind(data, data.frame(t=opts$theta, info=info, panel=j, route=i)) } } data$panel <- factor(paste("Panel", data$panel)) data$route <- factor(data$route, levels=1:x$num_route, labels=apply(x$route[, 1:x$num_stage], 1, paste, collapse="-")) g <- ggplot(data, aes_string(x="t", y="info", color="route")) + geom_line() + xlab(expression(theta)) + ylab("Information") + theme_bw() + theme(legend.key=element_blank()) + guides(color=guide_legend("Routes")) + facet_grid(. ~ panel) } else { for(i in 1:x$num_panel){ for(j in 1:x$num_module){ items <- mst_get_items(x, panel_ix=i, module_ix=j) info <- with(items, rowSums(model_3pl_info(opts$theta, a, b, c, D=x$opts$D))) data <- rbind(data, data.frame(t=opts$theta, info=info, panel=items$panel[1], stage=items$stage[1], module=items$module[1])) } } data$panel <- factor(paste("Panel", data$panel)) data$stage <- factor(paste("Stage", data$stage)) data$module <- factor(paste("Module", data$module)) g <- ggplot(data, aes_string(x="t", y="info", color="module")) + geom_line() + xlab(expression(theta)) + ylab("Information") + theme_bw() + theme(legend.key=element_blank()) + guides(color=guide_legend("Modules")) + facet_grid(panel ~ stage) } g } #' @rdname mst #' @description \code{mst_get_items} extracts items from the assembly results #' @param panel_ix the panel index, an int vector #' @param stage_ix the stage index, an int vector #' @param module_ix the module index, an int vector #' @param route_ix the route index, an integer #' @export mst_get_items <- function(x, panel_ix=NULL, stage_ix=NULL, module_ix=NULL, route_ix=NULL){ if(class(x) != "mst") stop("not a 'mst' object: ", class(x)) if(is.null(x$items)) stop('the mst has not been assembled yet.') items <- x$items if(!is.null(panel_ix)) items <- subset(items, items$panel %in% panel_ix) if(!is.null(stage_ix)) items <- subset(items, items$stage %in% stage_ix) if(!is.null(module_ix)) items <- subset(items, items$module %in% module_ix) if(!is.null(route_ix)) items <- subset(items, items$module %in% unlist(x$route[x$route$index == route_ix, 1:x$num_stage])) items }
/scratch/gouwar.j/cran-all/cranData/xxIRT/R/module5_mst.R
#' Simulation of Multistage Testing #' @name mst_sim #' @examples #' \dontrun{ #' ## assemble a MST #' nitems <- 200 #' pool <- with(model_3pl_gendata(1, nitems), data.frame(a=a, b=b, c=c)) #' pool$content <- sample(1:3, nrow(pool), replace=TRUE) #' x <- mst(pool, "1-2-2", 2, 'topdown', len=20, max_use=1) #' x <- mst_obj(x, theta=-1, indices=1) #' x <- mst_obj(x, theta=0, indices=2:3) #' x <- mst_obj(x, theta=1, indices=4) #' x <- mst_constraint(x, "content", 6, 6, level=1) #' x <- mst_constraint(x, "content", 6, 6, level=2) #' x <- mst_constraint(x, "content", 8, 8, level=3) #' x <- mst_stage_length(x, 1:2, min=5) #' x <- mst_assemble(x) #' #' ## ex. 1: administer the MST using fixed RDP for routing #' x_sim <- mst_sim(x, .5, list(stage1=0, stage2=0)) #' plot(x_sim) #' #' ## ex. 2: administer the MST using the max. info. for routing #' x_sim <- mst_sim(x, .5) #' plot(x_sim, ylim=c(-5, 5)) #' } NULL #' @description \code{mst_sim} simulates a MST administration #' @param x the assembled MST #' @param true the true theta parameter (numeric) #' @param rdp routing decision points (list) #' @param ... additional option/control parameters #' @importFrom stats runif #' @export mst_sim <- function(x, true, rdp=NULL, ...){ if(class(x) != "mst") stop("not a 'mst' object: ", class(x)) if(is.null(x$items)) stop("the mst has not been assembled yet") opts <- list(...) if(is.null(opts$t_prior)) prior <- NULL else prior <- list(t=opts$t_prior) # inits if(is.null(opts$panel)) opts$panel <- sample(1:x$num_panel, 1) panel_items <- mst_get_items(x, panel_ix=opts$panel) theta <- ifelse(is.null(opts$theta), 0, opts$theta) admin <- NULL stats <- matrix(nrow=x$num_stage, ncol=4, dimnames=list(NULL, c("route", "t", "info", "se"))) # routing decision points if(!is.null(rdp)) { if(length(rdp) != x$num_stage - 1) stop("invalid routing decision points.") rdp <- Reduce(rbind, lapply(rdp, function(x) data.frame(lower=c(-Inf, x), upper=c(x, Inf)))) rdp$index <- 2:x$num_module } # MST administration for(i in 1:x$num_stage){ # select module if(i == 1) { next_module <- unique(x$route[, i]) next_module <- sample(next_module, 1) } else { next_module <- x$route[x$route[, i-1] == stats[i-1, "route"], i] next_module <- sort(unique(next_module)) if(is.null(rdp)) { info <- model_3pl_info(theta, panel_items$a, panel_items$b, panel_items$c)[1, ] info <- aggregate(info, by=list(module=panel_items$module), sum) info <- info[info$module %in% next_module, ] next_module <- info$module[which.max(info$x)] } else { module_rdp <- subset(rdp, rdp$index %in% next_module) module_rdp$lower[1] <- -Inf module_rdp$upper[nrow(module_rdp)] <- Inf next_module <- min(subset(module_rdp, theta < module_rdp$upper)$index) } } # generate responses items <- subset(panel_items, panel_items$stage == i & panel_items$module == next_module) rsp <- as.integer(model_3pl_prob(true, items$a, items$b, items$c)[1, ] >= runif(nrow(items))) admin <- rbind(admin, cbind(items, rsp=rsp)) # estimate ability theta <- model_3pl_estimate_jmle(matrix(rep(admin$rsp, each=2), nrow=2), a=admin$a, b=admin$b, c=admin$c, scale=NULL, priors=prior)$t[1] info <- sum(model_3pl_info(theta, admin$a, admin$b, admin$c)) se <- 1 / sqrt(info) stats[i, c('route', 't', 'info', 'se')] <- c(next_module, theta, info, se) } stats <- as.data.frame(stats) stats$nitems <- sapply(stats$route, function(xx) sum(admin$module == xx)) rs <- list(panel=panel_items, admin=admin, stats=stats, true=true, theta=theta) class(rs) <- "mst_sim" rs } #' @rdname mst_sim #' @export print.mst_sim <- function(x, ...){ cat("mst simulation: true=", round(x$true, 2), ", est.=", round(x$theta, 2), ":\n", sep="") print(round(x$stats, 2)) cat("Call x$admin to see administered items ('x' is the mst_sim object).\n") } #' @rdname mst_sim #' @importFrom stats qnorm #' @import ggplot2 #' @export plot.mst_sim <- function(x, ...) { opts <- list(...) if(is.null(opts$ci_width)) opts$ci_width <- qnorm(.975) if(is.null(opts$ylim)) opts$ylim <- c(-3, 3) x$admin$Position <- seq(nrow(x$admin)) x$admin$Responses <- factor(x$admin$rsp, levels=c(0, 1), labels=c('Wrong', 'Right')) x$stats$lb <- x$stats$t - opts$ci_width * x$stats$se x$stats$ub <- x$stats$t + opts$ci_width * x$stats$se x$stats$position <- cumsum(x$stats$nitems) ggplot(x$admin, aes_string(x="Position", y="b")) + geom_point(aes_string(size="a", color="Responses")) + geom_pointrange(data=x$stats, aes_string(x="position", y="t", ymin="lb", ymax="ub"), lty=2, pch=4, col="coral") + xlab("Position") + ylab("Item Difficulty") + guides(size=F, fill=F) + coord_cartesian(ylim=opts$ylim) + scale_size_continuous(range=c(1, 3)) + theme_bw() }
/scratch/gouwar.j/cran-all/cranData/xxIRT/R/module5_mst_sim.R
#' @useDynLib xxhashlite, .registration=TRUE NULL
/scratch/gouwar.j/cran-all/cranData/xxhashlite/R/aaa.R
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #' Calculate the hash of an arbitrary R object. #' #' This function will calculate the hash of any object understood by #' \code{base::serialize()}. #' #' @param robj Any R object #' @param algo Select the specific xxhash algorithm. Default: 'xxh128'. #' (the latest algorithm in the xxhash family) #' Valid values: 'xxh32', 'xxh64', 'xxh128', 'xxh3' #' @param as_raw Return the hash as a raw vector of bytes instead of string? #' Default: FALSE. If TRUE, then the raw bytes are returned in big-endian #' order - which is what \code{xxHash} considers the \emph{canonical} form. #' #' @return String representation of hash. If \code{as_raw = TRUE} then a #' raw vector is returned instead. #' #' @export #' #' @examples #' xxhash(mtcars) #' xxhash(mtcars, algo = 'xxh3', as_raw = TRUE) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ xxhash <- function(robj, algo = 'xxh128', as_raw = FALSE) { .Call(xxhash_, robj, algo, as_raw) } #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #' Calculate the hash of a raw vector or string #' #' This performs a hash of the raw bytes - not of the serialized representation. #' #' @inheritParams xxhash #' @param vec raw vector or single character string #' #' @return String representation of hash. If \code{as_raw = TRUE} then a #' raw vector is returned instead. #' #' @export #' #' @examples #' vec <- "hello" #' xxhash_raw(vec) #' vec <- as.raw(c(0x01, 0x02, 0x99)) #' xxhash_raw(vec) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ xxhash_raw <- function(vec, algo = 'xxh128', as_raw = FALSE) { .Call(xxhash_raw_, vec, algo, as_raw) } #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #' Calculate the hash of a file #' #' @inheritParams xxhash_raw #' @param file filename #' #' @return String representation of hash. If \code{as_raw = TRUE} then a #' raw vector is returned instead. #' #' @export #' #' @examples #' filename <- system.file('DESCRIPTION', package = 'base', mustWork = TRUE) #' xxhash_file(filename) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ xxhash_file <- function(file, algo = 'xxh128', as_raw = FALSE) { .Call(xxhash_file_, normalizePath(file), algo, as_raw) }
/scratch/gouwar.j/cran-all/cranData/xxhashlite/R/xxhash.R
cal_entropy <- function(x) { freqs <- table(x)/length(x) freqs = freqs[freqs>0] return(-sum(freqs * log(freqs))/log(length(x))) }
/scratch/gouwar.j/cran-all/cranData/yaConsensus/R/cal_entropy.R
ccolors <- function (k) { if (k > 7) { tmp <- grep("gray", colors()) tmp <- c(tmp, grep("grey", colors())) tmp <- c(tmp, grep("white", colors())) tmp <- colors()[-tmp] tmp <- tmp[(1:k) * floor(length(tmp)/k)] } else tmp <- palette()[1:k] return(tmp) }
/scratch/gouwar.j/cran-all/cranData/yaConsensus/R/ccolors.R
consensus.byFeature <- function(ddata, runs = 1000, epsilon = 0.1, # ddata is a sample X gene matrix distMethod = "euclidean", hcMethod = "ward.D2", prefix = NULL) { # if(class(ddata) != "matrix") stop("The input data is not a data matrix.") # suppressPackageStartupMessages(require(foreach)) mm <- floor(epsilon * ncol(ddata)) llabels <- rownames(ddata) ttime <- system.time({ hhclust <- foreach(k = 1:runs) %dopar% { these_features <- sample.int(ncol(ddata), mm, replace = FALSE) ddist <- dist(ddata[, these_features], method = distMethod) return(hclust(ddist, method = hcMethod))} })[3] ans <- list() ans$distMethod <- distMethod ans$hcMethod <- hcMethod ans$labels <- llabels ans$bySample <- FALSE ans$epsilon <- epsilon ans$subsetDimension <- mm ans$runs <- runs ans$hclust <- hhclust names(ttime) <- NULL ans$elapsed_time <- ttime ans$ncores <- getDoParWorkers() class(ans) <- "yaConsensus" if(!is.null(prefix)) { ans$fname <- paste0(prefix, "_yaConsensus_eps", ans$epsilon * 100, "pct_runs", ans$runs, "_byFeatures.RData") aConsensus <- ans save(aConsensus, file = ans$fname) message("yaConsensus data structure saved in ", ans$fname) } invisible(ans) }
/scratch/gouwar.j/cran-all/cranData/yaConsensus/R/consensus.byFeature.R
consensus.bySample <- function (ddata, runs = 1000, epsilon = .65, distMethod = "euclidean", hcMethod = "ward.D2", prefix = NULL) { # if (class(ddata) != "dist") stop("A 'distance' object is required.") # require(foreach) if ("dist" %in% class(ddata)) { n <- attr(ddata, "Size") mm <- floor(epsilon * n) distMethod <- attr(ddata, "method") llabels <- attr(ddata, "Labels") ttime <- system.time({ hhclust <- foreach(k = 1:runs) %dopar% { these_samples <- sample.int(n, mm, replace = FALSE) tmp <- hclust(as.dist(as.matrix(ddata)[these_samples, these_samples]), method = hcMethod) return(tmp)} })[3] } else { n <- nrow(ddata) mm <- floor(epsilon * n) llabels <- rownames(ddata) ttime <- system.time({ hhclust <- foreach(k = 1:runs) %dopar% { these_samples <- sample.int(nrow(ddata), mm, replace = FALSE) ddist <- dist(ddata[these_samples,], method = distMethod) return(hclust(ddist, method = hcMethod))} })[3] } ans <- list() ans$distMethod <- distMethod ans$hcMethod <- hcMethod ans$labels <- llabels ans$bySample <- TRUE ans$epsilon <- epsilon ans$subsetDimension <- mm ans$runs <- runs ans$samples <- NULL ans$hclust <- hhclust names(ttime) <- NULL ans$elapsed_time <- ttime ans$ncores <- getDoParWorkers() class(ans) <- "yaConsensus" if(!is.null(prefix)) { ans$fname <- paste0(prefix, "_yaConsensus_eps", ans$epsilon * 100, "pct_runs", ans$runs, "_bySamples.RData") aConsensus <- ans save(aConsensus, file = ans$fname) message("yaConsensus data structure saved in ", ans$fname) } #attr(ans,"package") invisible(ans) }
/scratch/gouwar.j/cran-all/cranData/yaConsensus/R/consensus.bySample.R
consensus.diss <- function (cclusters, similarity = FALSE) { getLoadedDLLs() n <- ncol(cclusters) H <- nrow(cclusters) m <- n * (n - 1)/2 tmp <- rep(0, m) storage.mode(cclusters) <- "integer" storage.mode(n) <- "integer" storage.mode(H) <- "integer" storage.mode(m) <- "integer" storage.mode(tmp) <- "integer" ans <- .Fortran("cconsensus", cclusters, n, H, tmp, m)[[4]] ans <- as.double(ans) if(sum(cclusters < 1) > 0) { cclusters <- 0 + (cclusters > 0) tmp <- rep(0, m) storage.mode(cclusters) <- "integer" storage.mode(n) <- "integer" storage.mode(H) <- "integer" storage.mode(m) <- "integer" storage.mode(tmp) <- "integer" tmp <- .Fortran("cconsensus", cclusters, n, H, tmp, m)[[4]] tmp <- as.double(tmp) } else tmp <- H ans <- ans/tmp if(!similarity) ans <- 1 - ans attr(ans, "Size") <- n attr(ans, "Labels") <- colnames(cclusters) attr(ans, "Diag") <- FALSE attr(ans, "method") <- "consensus" class(ans) <- "dist" return(ans) }
/scratch/gouwar.j/cran-all/cranData/yaConsensus/R/consensus.diss.R
get_cluster_accuracy <- function(theoretical, estimated) return(1-mean(unlist(lapply(unique(estimated), function(x){cal_entropy(theoretical[estimated == x])})), na.rm = TRUE))
/scratch/gouwar.j/cran-all/cranData/yaConsensus/R/get_cluster_accuracy.R
get_cluster_purity <- function(theoretical, estimated) return(1-mean(unlist(lapply(unique(theoretical), function(x){cal_entropy(estimated[theoretical == x])})), na.rm = TRUE))
/scratch/gouwar.j/cran-all/cranData/yaConsensus/R/get_cluster_purity.R
get_consensus_dissimilarity <- function (obj, G = 2) { ans_template <- rep(0, length(obj$labels)) names(ans_template) <- obj$labels clusters <- sapply(obj$hclust, function(x) { ans_tmp <- ans_template tmp <- cutree(x, k = G) ans_tmp[names(tmp)] <- tmp return(ans_tmp) }) obj$consensus.diss <- consensus.diss(t(clusters)) obj$G <- G obj$hclust <- NULL invisible(obj) }
/scratch/gouwar.j/cran-all/cranData/yaConsensus/R/get_consensus_dissimilarity.R
get_metrics <- function(theoretical, estimated, verbose = TRUE) { eca <- get_cluster_accuracy(theoretical, estimated) ecp <- get_cluster_purity(theoretical, estimated) avg <- sqrt(eca * ecp) thNofC <- length(unique(theoretical)) estNofC <- length(unique(estimated)) ans <- c(round(c(eca, ecp, avg), 4), thNofC, estNofC) names(ans) <- c("eca", "ecp", "average", "thNofC", "estNofC") if(verbose) { message("cluster accuracy (eca): ", ans[1]) message("cluster purity (ecp): ", ans[2]) message("G index (geometric average of eca, and ecp): ", ans[3]) message("no. of clusters in theoretical partition: ", ans[4]) message("no. of clusters in estimated partition: ", ans[5]) } return(ans) }
/scratch/gouwar.j/cran-all/cranData/yaConsensus/R/get_metrics.R
match_colors <- function(refClust, consClust) { (ttable <- table(consClust, refClust)) (ttable <- ttable/rowSums(ttable)) (whichMaxTable <- apply(ttable, 1, which.max)) ans <- colnames(ttable)[whichMaxTable] names(ans) <- rownames(ttable) return(ans) }
/scratch/gouwar.j/cran-all/cranData/yaConsensus/R/match_colors.R
plot.yaConsensus <- function (x, G = 2, annotation = NULL, annotation.colorCode = NULL, matching_clustering = NULL, consensus_colors = NULL, ...) { if(!is.null(annotation) & is.null(annotation.colorCode)) stop("annotation.colorCode required.") x <- get_consensus_dissimilarity(x, G = G) # suppressPackageStartupMessages(require(pheatmap)) hhc <- hclust(x$consensus.diss, method = "complete") clust <- factor(cutree(hhc, k = G)) levels(clust) <- paste0("cc", levels(clust)) annotation_col <- annotation_row <- data.frame(consensus = clust, row.names = names(clust)) if(!is.null(consensus_colors)) clust.col <- consensus_colors else if(is.null(matching_clustering)) clust.col <- ccolors(G) else { if(matching_clustering %in% colnames(annotation)) { wwhich <- which(colnames(annotation) == matching_clustering) clust.col <- match_colors(annotation[, wwhich], clust) clust.col <- annotation.colorCode[clust.col] } else stop("The matching_color provided does not match any of the column in the annotation." ) } names(clust.col) <- levels(clust) ann_colors <- list(consensus = clust.col) if(!is.null(annotation)) for(k in 1:ncol(annotation)) { annotation_row$tmp <- factor(annotation[,k]) ann_colors$tmp <- annotation.colorCode[levels(annotation_row$tmp)] colnames(annotation_row)[ncol(annotation_row)] <- colnames(annotation)[k] names(ann_colors)[length(ann_colors)] <- colnames(annotation)[k] } this_color <- ifelse(x$bySample, "red2", "royalblue") tmp <- 1- as.matrix(x$consensus.diss)[hhc$order,] pheatmap(tmp, cluster_rows = FALSE, show_rownames = FALSE, cluster_cols = hhc, cutree_cols = G, show_colnames = FALSE, annotation_col=annotation_col, annotation_row=annotation_row, annotation_colors = ann_colors, color = colorRampPalette(c("white", this_color))(50), xlab = "ste") annotation_row$consensus.col <- clust levels(annotation_row$consensus.col) <- clust.col[annotation_row$consensus.col] ans <- list(annotation = annotation_row, ann_colors = ann_colors, hclust = hhc, statistics = print.yaConsensus(x, verbose = FALSE) ) class(ans) <- "yaConsensus_plot" invisible(ans) }
/scratch/gouwar.j/cran-all/cranData/yaConsensus/R/plot.yaConsensus.R
print.yaConsensus <- function(x, verbose = T, ...) { results <- list('consensus type:' = ifelse(x$bySample, " sampling of samples", " sampling of features"), 'sampling rate: ' = paste0(round(100*x$epsilon, 2), "%"), 'number of samplings: ' = x$runs, 'inner distance: ' = x$distMethod, 'inner hc method: ' = x$hcMethod, 'outer distance: ' = "consensus", 'outer hc method: ' = "complete", 'running time (seconds): ' = round(x$elapsed_time, 4), 'number of cores: ' = x$ncores) if(verbose) { message("yaConsensus statistics") for(k in 1:length(results)) message("\t", names(results)[k], results[[k]]) } invisible(results) }
/scratch/gouwar.j/cran-all/cranData/yaConsensus/R/print.yaConsensus.R
print.yaConsensus_plot <- function(x, verbose = T, ...) { results <- x$statistics if(verbose) { message("yaConsensus statistics") for(k in 1:length(results)) message("\t", names(results)[k], results[[k]]) } invisible(results) }
/scratch/gouwar.j/cran-all/cranData/yaConsensus/R/print.yaConsensus_plot.R
summary.yaConsensus <- function(object, verbose = T, ...) {print.yaConsensus(object, verbose = verbose)}
/scratch/gouwar.j/cran-all/cranData/yaConsensus/R/summary.yaConsensus.R
summary.yaConsensus_plot <- function(object, given = NULL, ...) { if(is.null(given)) results <- print.yaConsensus_plot(object) else { results <- print.yaConsensus_plot(object, verbose = FALSE) j <- 1 if(!(given[j] %in% colnames(object$annotation))) stop(given[j], " not found as a-priori clustering.") wwhich <- which(colnames(object$annotation) == given[j]) tmp <- get_metrics(object$annotation[, wwhich], object$annotation$consensus, verbose = FALSE) results$'entropy consensus accuracy' <- paste0(round(100 * tmp[1], 2), "%") results$'entropy consensus precision' <- paste0(round(100 * tmp[2], 2), "%") results$'entropy consensus (average)' <- paste0(round(100 * tmp[3], 2), "%") names(results)[length(results)-2] <- paste0(names(results)[length(results)-2], ", given ", given[j], ": ") names(results)[length(results)-1] <- paste0(names(results)[length(results)-1], ", given ", given[j], ": ") names(results)[length(results)] <- paste0(names(results)[length(results)], ", given ", given[j], ": ") message("yaConsensus statistics") for(k in 1:length(results)) message("\t", names(results)[k], results[[k]]) } results <- object$statistics #results invisible(results) }
/scratch/gouwar.j/cran-all/cranData/yaConsensus/R/summary.yaConsensus_plot.R
yaConsensus <- function(ddata, runs = 1000, epsilon = .65, is_by_sample = TRUE, distMethod = "euclidean", hcMethod = "ward.D2", prefix = NULL) { if(is_by_sample) ans <- consensus.bySample(ddata, runs = runs, epsilon = epsilon, hcMethod = hcMethod, distMethod = distMethod, prefix = prefix) else ans <- consensus.byFeature(ddata, runs = runs, epsilon = epsilon, distMethod = distMethod, hcMethod = hcMethod, prefix = prefix) invisible(ans) }
/scratch/gouwar.j/cran-all/cranData/yaConsensus/R/yaConsensus.R
"ann" <- function(ref, target, k=1, eps=0.0, tree.type="kd", search.type="standard", bucket.size=1, split.rule="sl_midpt", shrink.rule="simple", verbose=TRUE, ...){ formal.args <- names(formals(sys.function(sys.parent()))) elip.args <- names(list(...)) for(i in elip.args){ if(! i %in% formal.args) warning("'",i, "' is not an argument") } if(missing(ref)){stop("error: ref must be specified")} if(missing(target)){stop("error: target must be specified")} if(! is.matrix(ref)){stop("error: ref must be matrix")} if(! is.matrix(target)){stop("error: target must be matrix")} if(k <= 0){stop("error: k must be int > 0")} if(eps < 0.0){stop("error: eps must be int >= 0.0")} if(!tree.type %in% c("kd", "bd", "brute")){stop("error: tree.type must be 'kd', 'bd', or 'brute'")} if(!search.type %in% c("standard", "priority")){stop("error: tree.type must be 'standard' or 'priority'")} pri <- FALSE if(search.type == "priority") pri <- TRUE if(nrow(ref) == 0 || nrow(target) == 0){stop("error: nrow(ref) and nrow(target) must be > 0")} if(ncol(ref) != ncol(target)){stop("error: ncol(ref) must equal ncol(target)")} if(k > nrow(ref)){stop("error: k must be <= nrow(ref)")} if(bucket.size <=0){stop("error: bucket.size must be > 0")} split <- list("standard"=0, "midpt"=1, "fair"=2, "sl_midpt"=3, "sl_fair"=4, "suggest"=5) if(!split.rule %in% names(split)){stop("error: ",split.rule ," is not a valid split rule, choices are 'standard', 'midpt', 'fair', 'sl_midptm', 'sl_fair', and 'suggest'")} split.rule <- split[[split.rule]] shrink <- list("none"=0, "simple"=1, "centroid"=2, "suggest"=3) if(!shrink.rule %in% names(shrink)){stop("error: ",shrink.rule ," is not a valid shrink rule for the bd-tree, choices are 'none', 'simple', 'centroid', and 'suggest'")} shrink.rule <- shrink[[shrink.rule]] storage.mode(ref) <- storage.mode(target) <- "double" if(tree.type == "bd" && any(duplicated(ref))){stop("error: duplicate pattern found in the 'ref' matrix; therefore, bd-tree cannot be used")} args <- list("ref"=ref, "target"=target, "k"=as.integer(k), "eps"=as.double(eps), "tree.type"=tree.type, "priority"=as.integer(pri), "bucket.size"=as.integer(bucket.size), "split.rule"=as.integer(split.rule), "shrink.rule"=as.integer(shrink.rule), "verbose"=as.integer(verbose)) out <- .Call("annf", args) out$k <- k out$tree.type <- tree.type if(tree.type %in% c("kd", "bd")){ out$eps <- eps out$search.type <- search.type out$bucket.size <- bucket.size out$split.rule <- split.rule if(tree.type == "bd") out$shrink.rule <- shrink.rule } class(out) <- "ann" out }
/scratch/gouwar.j/cran-all/cranData/yaImpute/R/ann.R
applyMask <- function (object, refGroups=NULL, trgGroups=NULL, method="removeWhenCommon", k=1) { if(!inherits(object, "yai")) stop("object must be of class yai") valid <- c("removeWhenCommon","keepWhenCommon") if (is.na(match(method,valid))) stop (paste("method must be one of",paste(valid,collapse<-", "))) if (is.null(refGroups) | is.null(trgGroups)) stop("refGroups and trgGroups must be defined") if (k >= object$k) stop("new value of k (",k,") must be less than old value (",object$k,")") object$call <- match.call() refGrp <- refGroups[match(object$neiIdsTrgs,rownames(object$xRefs))] lrefGrp <- if (method == "removeWhenCommon") refGrp != trgGroups else refGrp == trgGroups dim(lrefGrp) <- dim(object$neiIdsTrgs) # tvec is an offset in the storage of neiIdsTrgs and neiDstTrgs. At this point # The kth member is the offset of the first row of the kth column tvec <- 0:(ncol(lrefGrp)-1) * nrow(lrefGrp) # ans is the value of tvec corresponding the the columns to keep for each row. ans <- apply(lrefGrp,1,function(x,tvec,k) tvec[x][1:k],tvec,k) # if k>1, we need to reorganize ans and delete the dimensions so it is a vector. if (k>1) { ans <- t(ans) dim(ans) <- NULL } # now add the row numbers to ans...to get the final offsets. ans <- rep(1:nrow(lrefGrp),k) + ans rnB <- rownames(object$neiIdsTrgs) cnI <- colnames(object$neiIdsTrgs)[1:k] cnD <- colnames(object$neiDstTrgs)[1:k] object$neiIdsTrgs <- object$neiIdsTrg[ans] object$neiDstTrgs <- object$neiDstTrg[ans] dim (object$neiIdsTrgs) <- c(nrow(lrefGrp),k) dim (object$neiDstTrgs) <- c(nrow(lrefGrp),k) rownames(object$neiIdsTrgs) <- rnB rownames(object$neiDstTrgs) <- rnB colnames(object$neiIdsTrgs) <- cnI colnames(object$neiDstTrgs) <- cnD object$k <- k object$noRefs <- TRUE object$neiIdsRefs <- NULL object$neiDstRefs <- NULL object }
/scratch/gouwar.j/cran-all/cranData/yaImpute/R/applyMask.R
AsciiGridPredict <- function(object,xfiles,outfiles,xtypes=NULL,lon=NULL, lat=NULL,rows=NULL,cols=NULL, nodata=NULL,myPredFunc=NULL,...) { if (missing(xfiles) || is.null(xfiles)) stop ("xfiles required") if (missing(outfiles) || is.null(outfiles)) stop ("outfiles required") if (!inherits(outfiles,"list")) outfiles=as.list(outfiles) if (is.null(names(outfiles)) && length(outfiles) == 1) names(outfiles) = "predict" if (is.null(names(xfiles))) stop ("xfiles elements must be named") if (is.null(names(outfiles))) stop ("outfiles elements must be named") return ( AsciiGridImpute(object,xfiles,outfiles,xtypes=xtypes,lon=lon, lat=lat,rows=rows,cols=cols, nodata=nodata,myPredFunc=myPredFunc,...) ) } AsciiGridImpute <- function(object,xfiles,outfiles,xtypes=NULL,ancillaryData=NULL, ann=NULL,lon=NULL,lat=NULL,rows=NULL,cols=NULL,nodata=NULL, myPredFunc=NULL,...) { if (missing(xfiles) || is.null(xfiles)) stop ("xfiles required") if (missing(outfiles) || is.null(outfiles)) stop ("outfiles required") if (is.null(names(xfiles))) stop ("xfiles elements must be named") if (is.null(names(outfiles))) stop ("outfiles elements must be named") # outLegend is a list of factor levels and their index values for output # inLegend is a the same idea but for input. outLegend=NULL inLegend=NULL # nasum is a matrix of the number of NAs generated for each row nasum=NULL # make sure there is a type for every xfile if (is.null(xtypes)) { xtypes=xfiles xtypes[]="numeric" } else { tmp=xtypes xtypes=xfiles xtypes[]=NA xtypes[names(tmp)]=tmp xtypes[is.na(xtypes)]="numeric" } # there needs to be an input file for every xvar in the imputation. if (is.null(object) || !inherits(object,"yai")) { have=names(xfiles) } else { have=intersect(xvars(object),names(xfiles)) if (length(have) != length(xvars(object))) { lout = if (length(have)==0) xvars(object) else setdiff(xvars(object),have) stop(paste("required maps are missing for variables:", paste(lout ,collapse=", "))) } # trim the list of xfiles to those needed. xfiles = xfiles[match(xvars(object),names(xfiles))] } # deal with ancillaryData and build allY allY = 0 if (!is.null(ancillaryData) && inherits(object,"yai")) { if (length(intersect(class(ancillaryData),c("matrix","data.frame"))) > 0 && nrow(ancillaryData[rownames(object$yRefs),,FALSE]) == nrow(object$yRefs) && length(intersect(rownames(ancillaryData),rownames(object$yRefs))) == nrow(object$yRefs) ) { toKeep = intersect(union(colnames(ancillaryData),colnames(object$yRefs)),names(outfiles) ) if (length(toKeep) == 0) allY = NULL else { fromAn=intersect(toKeep,colnames(ancillaryData)) fromRe=setdiff(intersect(toKeep,colnames(object$yRefs)),fromAn) if (length(fromAn)>0 && length(fromRe)>0) allY = data.frame(cbind(ancillaryData[rownames(object$yRefs),],object$yRefs)[,toKeep], row.names=rownames(object$yRefs)) else if (length(fromAn)>0) allY = data.frame(ancillaryData[rownames(object$yRefs),toKeep], row.names=rownames(object$yRefs)) else if (length(fromRe)>0) allY = data.frame(object$yRefs[,toKeep], row.names=rownames(object$yRefs)) colnames(allY)=toKeep } } if (is.null(names(allY))) stop ("ancillaryData can not be used because no variables match the names in outfiles") } if (is.null(colnames(allY)) && inherits(object,"yai")) { toKeep = intersect(colnames(object$yRefs),names(outfiles)) if (length(toKeep) == 0) allY = NULL else { allY = data.frame(object$yRefs[,toKeep],row.names=rownames(object$yRefs)) colnames(allY) = toKeep } } # if using yai, deal with ann if (inherits(object,"yai") && is.null(ann)) ann=object$ann # set some flags used below predYes = length(intersect(names(outfiles),c("predict" ))) == 1 distYes = length(intersect(names(outfiles),c("distance"))) == 1 && inherits(object,"yai") useidYes= length(intersect(names(outfiles),c("useid" ))) == 1 && inherits(object,"yai") sumIlls=NULL # make a list of input file handles and open the files. infh = vector("list",length=length(xfiles)) names(infh)=names(xfiles) for (i in 1:length(xfiles)) infh[[i]]=file(xfiles[[i]],open="rt") on.exit(lapply(infh,close)) # get and check headers from each input file header=NULL postWarn=TRUE for (i in 1:length(infh)) { newhead = readLines(infh[[i]],n=6) if (is.null(header)) header=newhead else { if (!identical(newhead,header)) { cat ("Map headers are not equal\nHeader from file: ",xfiles[[i-1]],"\n") print (header) cat ("\nHeader from file: ",xfiles[[i]],"\n") print (newhead) flush.console() if (postWarn) warning ("map headers don't match.") postWarn=FALSE } header=newhead } } # write the "common" header to all the output files. getVal=function(header,tok) { for (lin in header) { l = scan (text=lin,what="character",quiet=TRUE) if (toupper(l[1]) == tok) return (as.numeric(l[2])) } return (NA) } nc = getVal(header,"NCOLS") nr = getVal(header,"NROWS") xllc= getVal(header,"XLLCORNER") yllc= getVal(header,"YLLCORNER") csz = getVal(header,"CELLSIZE") nodv= getVal(header,"NODATA_VALUE") if (any(unlist(lapply(c(nc,nr,xllc,yllc,csz,nodv),is.na)))) stop ("header error in input maps") if (!is.null(lon)) { cols=c(0,0) lon=sort(lon) cols[1]=round((lon[1]-xllc)/csz)+1 cols[2]=round((lon[2]-xllc)/csz) } if (!is.null(lat)) { rows=c(0,0) lat=sort(lat) rows[2]=nr-round((lat[1]-yllc)/csz) rows[1]=nr-round((lat[2]-yllc)/csz)+1 } if (is.null(rows) && is.null(cols) && is.null(nodata)) #header does not change { for (i in 1:length(outfiles)) cat (header,file=outfiles[[i]],sep="\n") newnr = nr newnc = nc nodata= nodv rows=c(1,nr) cols=c(1,nc) } else #header changes { if (is.null(nodata)) nodata=nodv if (is.null(rows)) rows=c(1,nr) if (rows[1]<1) rows[1]=1 if (rows[2]>nr) rows[2]=nr if (rows[1]>rows[2]) rows[1]=rows[2] if (is.null(cols)) cols=c(1,nc) if (cols[1]<1) cols[1]=1 if (cols[2]>nc) cols[2]=nc if (cols[1]>cols[2]) cols[1]=cols[2] newnr = rows[2]-rows[1]+1 newnc = cols[2]-cols[1]+1 if (rows[2] != nr) yllc = yllc+(csz*(nr-rows[2])) if (cols[1] != 1) xllc = xllc+(csz*(cols[1]-1)) for (i in 1:length(outfiles)) { cat("NCOLS ",as.character(newnc), "\n",file=outfiles[[i]],sep="") cat("NROWS ",as.character(newnr), "\n",file=outfiles[[i]],sep="",append=TRUE) cat("XLLCORNER ",as.character(xllc), "\n",file=outfiles[[i]],sep="",append=TRUE) cat("YLLCORNER ",as.character(yllc), "\n",file=outfiles[[i]],sep="",append=TRUE) cat("CELLSIZE ",as.character(csz ), "\n",file=outfiles[[i]],sep="",append=TRUE) cat("NODATA_VALUE ",as.character(nodata),"\n",file=outfiles[[i]],sep="",append=TRUE) } } # set up the xlevels. In randomForest version >= 4.5-20, the xlevels # are stored in the forest. xlevels = object$xlevels if (is.null(xlevels) && inherits(object,"randomForest")) xlevels=object$forest$xlevels if (!is.null(xlevels)) { # we need just the variable names... names(xlevels) = lapply(names(xlevels), function (x) unlist(all.vars(formula(paste("~",x)))[1])) if (length(xlevels)>0) for (i in names(xlevels)) if (is.numeric(xlevels[[i]]) && length(xlevels[[i]]) == 1) xlevels[[i]] = NULL if (length(xlevels) == 0) xlevels=NULL else { inLegend=lapply(xlevels,data.frame) inLegend=unionDataJoin(inLegend) names(inLegend)=names(xlevels) } } nskip=rows[1]-1 dpr = max(newnr %/% 100,1) if (nskip > 0) cat("Rows to skip: ",nskip," ") cat("Rows per dot: ",dpr," Rows to do:",newnr,"\nToDo: ") for (ir in 1:floor(newnr/dpr)) cat (".") cat ("\nDone: ");flush.console() nodout=suppressWarnings(as.numeric(nodata)) ircur=0 for (ir in rows[1]:rows[2]) { indata = vector("list",length=length(xfiles)) names(indata)=names(infh) for (i in 1:length(infh)) { indata[[i]]=scan(infh[[i]],nlines=1,what=vector(mode=xtypes[[i]],length=0), skip=nskip,quiet=TRUE) nod = indata[[i]] == nodv if (any(nod)) indata[[i]][nod] = NA } nskip=0 rowLens = unlist(lapply(indata,length)) if (any(rowLens[1] != rowLens)) { cat ("Row lengths for row ",ir,"\n") print(rowLens) flush.console() stop ("Unequal row lengths.") } if (newnc == nc) newdata=data.frame(indata) else newdata=data.frame(indata)[cols[1]:cols[2],,FALSE] width=floor(log(nrow(newdata),10))+1 rownames(newdata)=paste(ir,formatC(1:nrow(newdata),width=width,flag="0"),sep="x") origRowNames=rownames(newdata) newdata=na.omit(newdata) omitted=origRowNames[as.vector(attr(newdata,"na.action"))] moreOrigRowNames=NULL if (!is.null(xlevels) && length(omitted)<length(origRowNames)) { moreOrigRowNames=rownames(newdata) factorMatch = get("factorMatch",asNamespace("yaImpute")) newdata=factorMatch(newdata,xlevels) ills = attr(newdata,"illegalLevelCounts") if(inherits(ills, "list")) #if( class(ills)=="list" ) { if (is.null(sumIlls)) { sumIlls = ills warning ("NA's generated due to illegal level(s).") } else { addIllegalLevels = get("addIllegalLevels",asNamespace("yaImpute")) sumIlls = addIllegalLevels(sumIlls,ills) } omitted=c(omitted,moreOrigRowNames[as.vector(attr(newdata,"na.action"))]) } } # tag the vector so newtargets() will not duplicate # the creation of this attribute data else attr(newdata,"illegalLevelCounts")=0 if (length(omitted)==length(origRowNames)) # all missing. { outdata=data.frame(matrix(nodout,length(omitted),length(outfiles)), row.names=omitted) names(outdata)=names(outfiles) } else { if (!is.null(myPredFunc)) { outdata=myPredFunc(object,newdata,...) if (!inherits(outdata,"data.frame")) { cns = colnames(outdata) outdata=data.frame(predict=outdata,row.names=rownames(newdata)) if (!is.null(cns)) colnames(outdata) = cns } else rownames(outdata)=rownames(newdata) } else if (is.null(object)) { outdata=newdata } else if (inherits(object,"yai")) { outdata = NULL saveNames=rownames(newdata) rownames(newdata)=paste("m",as.character(1:nrow(newdata)),sep="!") new = newtargets(object,newdata,k=NULL,ann=ann) if (!is.null(allY)) outdata = impute(new,ancillaryData=allY,observed=FALSE,...) rownames(outdata)=saveNames if (distYes) dists = data.frame(distance=new$neiDstTrgs[,1], row.names=rownames(newdata)) else dists = NULL if (useidYes) useIds= data.frame(useid=match(new$neiIdsTrgs[,1], rownames(object$xRefs)),row.names=rownames(newdata)) else useIds= NULL if (!is.null(outdata) && !is.null(dists) ) outdata=cbind(outdata,dists) else if (is.null(outdata)) outdata=dists if (!is.null(outdata) && !is.null(useIds)) outdata=cbind(outdata,useIds) else if (is.null(outdata)) outdata=useIds } else { outdata=predict(object,newdata,...) if (!inherits(outdata,"data.frame")) { cns = colnames(outdata) outdata=data.frame(predict=outdata,row.names=rownames(newdata)) if (!is.null(cns)) colnames(outdata) = cns } else rownames(outdata)=rownames(newdata) } if (is.null(outLegend)) { outLegend=vector("list",ncol(outdata)) names(outLegend)=names(outdata) for (n in names(outLegend)) outLegend[[n]]=if (is.factor(outdata[,n])) levels(outdata[,n]) else NULL } else { for (n in names(outLegend)) { if (is.factor(outdata[,n])) { for (lev in levels(outdata[,n])) { if (length(grep(lev,outLegend[[n]],fixed=TRUE)) == 0) outLegend[[n]] = c(outLegend[[n]],lev) } } } } #convert factors to numbers that match the outLegend for (n in colnames(outdata)) if (is.factor(outdata[,n])) outdata[,n] <- match(levels(outdata[,n])[outdata[,n]],outLegend[[n]]) if (nrow(outdata) != nrow(newdata)) { cat ("First six lines non-missing predictions for row ",ir,"\n") print(head(outdata)) cat ("First six lines of non-missing xfiles data for row ",ir,"\n") head(head(newdata)) flush.console() stop ("Unexpected results for row = ",ir) } outrs = nrow(outdata) # the predict might send NA's, change them to no data outtmp = na.omit(outdata) if (outrs > nrow(outtmp)) { nasum = if (is.null(nasum)) c(ir,outrs-nrow(outtmp)) else rbind(nasum,c(ir,outrs-nrow(outtmp))) for (icol in 1:ncol(outdata)) outdata[is.na(outdata[,icol]),icol]=nodout } if (length(omitted)>0) { # add omitted observations back into data frame in the proper location more = data.frame(matrix(nodout,length(omitted),length(names(outdata))), row.names=omitted) names(more)=names(outdata) for (i in 1:ncol(more)) if (is.factor(outdata[,i])) more[,i]=as.factor(more[,i]) outdata = rbind(outdata,more) outdata = outdata[sort(rownames(outdata),index.return = TRUE)$ix,,FALSE] } } for (i in 1:length(outfiles)) { vname=names(outfiles)[i] if (length(intersect(names(outdata),vname))==0) { cat ("\nFirst six lines of predicted data for map row: ",ir,"\n") print(head(outdata)) flush.console() stop (vname," is not present in the predicted data") } cat (outdata[,vname],"\n",file=outfiles[[i]],append=TRUE) } ircur=ircur+1 if (ircur>=dpr) { ircur=0 cat (".");flush.console() } } cat ("\n");flush.console() for (n in names(outLegend)) { outLegend[[n]]=as.data.frame(outLegend[[n]],stringsAsFactors=FALSE) names(outLegend[[n]])=n } if (! is.null(nasum)) { nasum=data.frame(maprow=nasum[,1],count=nasum[,2]) cat ("Summary of unexpected NA values generated from predict:") print (nasum) warning("Unexpected NA values generated") } if (length(outLegend)>0) { outLegend=unionDataJoin(outLegend) cat ("Legend of levels in output grids:\n") print (outLegend) } else outLegend=NULL if (! is.null(inLegend)) { cat ("Legend of levels in input grids (assumed):\n") print (inLegend) } if (!is.null(sumIlls)) { cat ("Factors with levels found in input maps but not present in data used to fit the model\n") cat ("(number of map cells; counts do not include cells coded NODATA on other maps):\n") if (inherits(sumIlls[[1]], "table")) print (sumIlls) #if (class(sumIlls[[1]]) == "table") print (sumIlls) else { for (a in names(sumIlls)) { sumIlls[[a]]=as.data.frame(sumIlls[[a]]) colnames(sumIlls[[a]])=a } sumIlls = unionDataJoin(sumIlls) print (sumIlls) cat ("\n") } } invisible(list(unexpectedNAs=nasum,illegalLevels=sumIlls,outputLegend=outLegend,inputLegend=inLegend)) }
/scratch/gouwar.j/cran-all/cranData/yaImpute/R/asciigridimpute.R
buildConsensus <- function (reps, noTrgs=FALSE, noRefs=FALSE, k=NULL) { if (!all(unlist(lapply(reps,function (x) class(x) == "yai")))) stop("class must be yai for all reps") cl=match.call() if (length(reps) == 1) { warning ("only one rep, nothing to do.") return (reps[[1]]) } mink = min(unlist(lapply(reps,function (x) x$k))) if (!is.null(k)) { if (k > mink) { warning ("k=",k," ignored, replaced by k=", mink) k = mink } } else { k = mink } rowNT = NULL rowNR = NULL if (!noTrgs) { rowNT = lapply(reps,function (x) if (is.null(x$neiIdsTrgs)) NULL else rownames(x$neiIdsTrgs)) if (all(is.null(unlist(rowNT)))) noTrgs = TRUE else { if (all(unlist(lapply(rowNT,function(x,y) identical(x,y), rowNT[[1]])))) rowNT = rowNT[[1]] else { rids = NULL for (r in rowNT) rids = union(rids,r) rowNT = rids } } } if (!noRefs) { rowNR = lapply(reps,function (x) if (is.null(x$neiIdsRefs)) NULL else rownames(x$neiIdsRefs)) if (all(is.null(unlist(rowNR)))) noRefs = TRUE else { if (all(unlist(lapply(rowNR,function(x,y) identical(x,y), rowNR[[1]])))) rowNR = rowNR[[1]] else { rids = NULL for (r in rowNR) rids = union(rids,r) rowNR = rids } } } if (!noTrgs) { rowNT = setdiff(rowNT,rowNR) if (length(rowNT) == 0) { rowNT = NULL noTrgs = TRUE } } else rowNT = NULL if (noTrgs & noRefs) stop("Can't find neighbors in any objects") #build bootstrap sample weights cnts = table(unlist(lapply(reps,function (x) unique(x$bootstrap)))) if (length(cnts) == 1) wts = NULL else { wts = length(reps)/cnts names(wts) = names(cnts) } # define an internal function to do the mergers mkMerge <- function (kIds,kDst,rown,nreps,wts) { if (is.null(rown)) # assume all rows line up which is much faster { kIds = matrix(unlist(kIds),ncol=nreps) kDst = matrix(unlist(kDst),ncol=nreps) } else { # need to merge the rows to create the matrix. kid = matrix("",ncol=length(reps),nrow=length(rown)) kds = matrix(NA,ncol=length(reps),nrow=length(rown)) rownames(kid) = rown rownames(kds) = rown for (i in 1:nreps) { idx = match(rown,names(kIds[[i]])) kid[,i] = kIds[[i]][idx] kds[,i] = kDst[[i]][idx] } kid[kid == "" ] = NA kIds = kid kDst = kds } newIds = apply(kIds,1,function (x,wts) { cnts = table(x) if (is.null(wts)) names(which.max(cnts)) else names(which.max(cnts*wts[names(cnts)])) }, wts) newDst = vector("numeric",length(newIds)) for (i in 1:length(newIds)) { inc = kIds[i,] == newIds[i] inc[is.na(inc)] = FALSE newDst[i] = mean(kDst[i,inc]) } list(ids=newIds,dst=newDst) } ############## end of function definition idsT = if (noTrgs) NULL else list() dstT = if (noTrgs) NULL else list() idsR = if (noRefs) NULL else list() dstR = if (noRefs) NULL else list() mxk = k for (k in 1:mxk) { if (!noTrgs) { kIds = lapply(reps,function (x,k) x$neiIdsTrgs[,k], k) kDst = lapply(reps,function (x,k) x$neiDstTrgs[,k], k) tmp = mkMerge(kIds,kDst,rowNT,length(reps),wts) idsT[[k]] = tmp$ids dstT[[k]] = tmp$dst } if (!noRefs) { kIds = lapply(reps,function (x,k) x$neiIdsRefs[,k], k) kDst = lapply(reps,function (x,k) x$neiDstRefs[,k], k) tmp = mkMerge(kIds,kDst,rowNR,length(reps),wts) idsR[[k]] = tmp$ids dstR[[k]] = tmp$dst } } if (!noTrgs) { idsT = do.call(cbind,idsT) rownames(idsT) = rowNT colnames(idsT) = paste0("Id.k",1:mxk) dstT = do.call(cbind,dstT) rownames(dstT) = rowNT colnames(dstT) = paste0("Dst.k",1:mxk) } if (!noRefs) { idsR = do.call(cbind,idsR) rownames(idsR) = rowNR colnames(idsR) = paste0("Id.k",1:mxk) dstR = do.call(cbind,dstR) rownames(dstR) = rowNR colnames(dstR) = paste0("Dst.k",1:mxk) } # build a merged list of yRefs, and xRefs # find out if all the column names are the same. clsI = TRUE for (i in 1:(length(reps)-1)) { if (!identical(colnames(reps[[i]]$yRefs),colnames(reps[[i+1]]$yRefs)) || !identical(colnames(reps[[i]]$xRefs),colnames(reps[[i+1]]$xRefs))) { clsI = FALSE break } } if (clsI) { idx = if (is.null(rowNR)) NULL else na.omit(match(rowNR,rownames(reps[[1]]$xRefs))) yRefs = if (is.null(idx)) reps[[1]]$yRefs else reps[[1]]$yRefs[idx,] xRefs = if (is.null(idx)) reps[[1]]$xRefs else reps[[1]]$xRefs[idx,] for (i in 2:length(reps)) { rowNR = setdiff(rowNR,rownames(xRefs)) if (length(rowNR) == 0) break idx = na.omit(match(rowNR,rownames(reps[[i]]$xRefs))) if (length(idx) > 0) { yRefs = rbind(yRefs,reps[[i]]$yRefs[idx,,drop=FALSE]) xRefs = rbind(xRefs,reps[[i]]$xRefs[idx,,drop=FALSE]) } } } else { yRefs = reps[[1]]$yRefs xRefs = reps[[1]]$xRefs for (i in 2:length(reps)) { if (!identical(yRefs,reps[[i]]$yRefs)) yRefs = unionDataJoin(yRefs,reps[[i]]$yRefs,warn=FALSE) if (!identical(xRefs,reps[[i]]$xRefs)) xRefs = unionDataJoin(xRefs,reps[[i]]$xRefs,warn=FALSE) } } # build a merged list of xall xall = reps[[1]]$xall for (i in 2:length(reps)) { if (!identical(xall,reps[[i]]$xall)) xall = unionDataJoin(xall,reps[[i]]$xall,warn=FALSE) } mymean = function(x) { if (is.null(ncol(x))) { ans = if (is.factor(x)) NA else mean(x) } else { ans=as.numeric(rep(NA,ncol(x))) names(ans)=colnames(x) for (i in 1:ncol(x)) if (!is.factor(x[,i])) ans[i]=mean(x[,i]) } ans } mysd = function(x) { if (is.null(ncol(x))) { ans = if (is.factor(x)) NA else sd(x) } else { ans=as.numeric(rep(NA,ncol(x))) names(ans)=colnames(x) for (i in 1:ncol(x)) if (!is.factor(x[,i])) ans[i]=sd(x[,i]) } ans } xScale=list(center=mymean(xRefs),scale=mysd(xRefs)) yScale=list(center=mymean(yRefs),scale=mysd(yRefs)) out=list(call=cl,yRefs=yRefs,xRefs=xRefs,obsDropped=NULL,yDrop=NULL,bootstrap=FALSE, xDrop=NULL,trgRows=rowNT,xall=xall,cancor=NULL,theFormula=NULL, ftest=NULL,yScale=yScale,xScale=xScale,ccaVegan=NULL,ranForest=NULL, ICA=NULL,k=mxk,projector=NULL,nVec=NULL,pVal=NULL,method="consensus",ann=FALSE, neiDstTrgs=dstT,neiIdsTrgs=idsT, neiDstRefs=dstR,neiIdsRefs=idsR) class(out)="yai" out }
/scratch/gouwar.j/cran-all/cranData/yaImpute/R/buildConsensus.R
compare.yai = function (...,ancillaryData=NULL,vars=NULL,method="rmsd",scale=TRUE) { if (missing(...)) stop ("... required") okClasses <- c("yai","impute.yai") for (object in list(...)) if (length(intersect(class(object),okClasses))==0) stop("object classes must be one of ",paste(okClasses,collapse=", ")) args <- list(...) if (length(intersect(method,c("rmsd","cor")))==0) stop("method must be rmsd or cor") names(args) <- as.list(substitute(list(...)))[-1] #who would of guessed that this is the way! ans <- list() scales <- list() tag <- if (method=="rmsd") "rmsdS" else "cor" meth <- match(method,c("rmsd","cor")) i <- 0 for (object in list(...)) { i <- i+1 if (inherits(object,"yai")) object <- impute.yai(object,ancillaryData=ancillaryData,vars=vars,observed=TRUE) one <- switch(meth, rmsd.yai(object,vars=vars,scale=scale), cor.yai(object,vars=vars), NULL) names(one) <- paste(names(args)[i],tag,sep=".") ans[[i]] <- one if (meth == 1) scales[[i]] <- attributes(one)$scale } names(ans) <- names(args) ans <- unionDataJoin(ans) class(ans) <- c("compare.yai",class(ans)) if (length(scales) > 0) { names(scales) <- names(args) ident = TRUE if (length(scales) > 1) { s1 <- scales[[1]] for (i in 2:length(scales)) { if (!identical (s1,scales[[i]])) { warning ("not all scale factors are the same.") ident = FALSE break } } } attr(ans,"scales") <- if (ident) scales[[1]] else scales } ans }
/scratch/gouwar.j/cran-all/cranData/yaImpute/R/compare.yai.R
# Computes the correlation between observed and imputed observations # Arguments: # object is of class yai or yai.impute If the object is yai, impute is called # with observed=TRUE. # vars is a list of variables, if NULL, all those with imputed values are processed. # ... passed to the impute funciton when it is called cor.yai = function (object,vars=NULL,...) { if (missing(object)) stop ("object required.") if (class(object)[1] == "yai") object = impute.yai(object,vars=vars,observed=TRUE,...) if (is.null(object)) stop ("no imputations found using this object") object=na.omit(object) if (is.null(vars)) vars=names(object) vi=paste(unique(strsplit(vars,".o",fixed=TRUE))) vi=intersect(vi,names(object)) notFound=setdiff(vars,names(object)) if (length(notFound)>0) warning ("variables not found: ",paste(notFound,collapse=", ")) if (length(vi) == 0) stop("nothing to compute") vo=paste(vi,"o",sep=".") notFound=setdiff(vo,names(object)) if (length(notFound)>0) warning ("variables not found: ",paste(notFound,collapse=", ")) vo=intersect(vo,names(object)) both=intersect(paste(unique(strsplit(vo,".o",fixed=TRUE))),vi) if (length(both) == 0) stop("nothing to compute") vo=paste(both,"o",sep=".") cors=data.frame(rep(NA,length(vo)),row.names=both) names(cors)="r" for (i in 1:length(both)) if (!is.factor(object[,both[i]])) cors[i,1]=cor(object[,both[i]],object[,vo[i]]) cors }
/scratch/gouwar.j/cran-all/cranData/yaImpute/R/cor.yai.R
correctBias <- function (object,trgVal,trgValCI=NULL,nStdev=1.5,excludeRefIds=NULL,trace=FALSE) { if(!inherits(object, "yai")) stop("object must be of class yai") if (missing(trgVal)) stop("trgVal must be defined.") trgValExpression = if (is.expression(trgVal)) trgVal else #if (class(trgVal) == "character") parse(text=trgVal) else NULL if(inherits(trgVal, "character")) parse(text=trgVal) else NULL if (!is.expression(trgValExpression)) stop ("trgVal can not be coerced into an expression.") trgVals <- NULL trgVal <- NULL trgValsd <- NULL trgValCIc <- NULL trgValData <- as.data.frame(cbind(object$yRefs,object$xRefs)) if (is.null(excludeRefIds)) trgValDataCopy <- trgValData else { trgValDataCopy = NULL if (inherits(excludeRefIds, "character")) { if (excludeRefIds[1] != "all") excludeRefIds <- na.omit(match(excludeRefIds,rownames(trgValData))) } #if (class(excludeRefIds) == "numeric" || class(excludeRefIds) == "integer" ) if(inherits(excludeRefIds, "numeric") || inherits(excludeRefIds, "integer")) { excludeRefIds <- unique(sort(excludeRefIds)) if (length(excludeRefIds) > 0 && nrow(trgValData)-length(excludeRefIds) > 1) trgValDataCopy <- trgValData[-excludeRefIds,] } } if (!is.null(trgValDataCopy)) { trgVals <- eval(trgValExpression,trgValDataCopy) trgVal <- mean(trgVals) trgValsd <- sd(trgVals)/sqrt(nrow(trgValDataCopy)) trgValCIc <- c(trgVal-(trgValsd*nStdev),trgVal+(trgValsd*nStdev)) } # if the user specified the CI, use it instead. if (is.null(trgValCI)) trgValCI <- trgValCIc if (is.null(trgValCI)) stop ("trgValCI was not supplied and can not be computed.") if (trace) {cat ("Target CI=", trgValCI,"\n");flush.console()} newobject <- object pass = 0 if (object$k-1 > 0) { for (pass in 1:(object$k-1)) { curVals <- eval(trgValExpression, trgValData[newobject$neiIdsTrgs[,1],,drop=FALSE]) curVal <- if (is.null(trgVals)) mean(curVals) else mean(c(curVals,trgVals)) if (curVal >= trgValCI[1] & curVal <= trgValCI[2]) { if (trace) cat ("trgValCI=",trgValCI," curVal=",curVal) if (pass == 1) { pass = 0 if (trace) {cat (" -- no bias to correct.\n"); flush.console()} } else { pass=pass-1 if (trace) { cat (" -- target CI reached, passes made=",pass,"\n") flush.console() } } break } toHigh <- curVal > trgValCI[2] curBias <- if (toHigh) curVal - trgValCI[2] else curVal - trgValCI[1] # dV is the contribution to the bias of each observation. dV <- (eval(trgValExpression, trgValData[newobject$neiIdsTrgs[,pass+1],,drop=FALSE]) - curVals) / length(curVals) dDst <- newobject$neiDstTrgs[,pass+1] - newobject$neiDstTrgs[,1] if (toHigh) { ntoTry <- sum(dV < 0) dDst[dV >= 0] <- .Machine$double.xmax } else { ntoTry <- sum(dV > 0) dDst[dV <= 0] <- .Machine$double.xmax } ord <- sort(dDst,index.return=TRUE)$ix bsum <- cumsum(dV[ord[1:ntoTry]]) toSwitch <- if (toHigh) which(bsum+curBias <= 0)[1] else which(bsum+curBias >= 0)[1] if (is.na(toSwitch)) toSwitch <- length(bsum) if (trace) { cat ("trgValCI=",trgValCI," pass=",pass," curVal=",curVal, " curBias=",curBias," ntoTry=",ntoTry,"toSwitch=", toSwitch,"\n") flush.console() } newobject$neiDstTrgs[ord[1:toSwitch],1] <- newobject$neiDstTrgs[ord[1:toSwitch],pass+1] newobject$neiIdsTrgs[ord[1:toSwitch],1] <- newobject$neiIdsTrgs[ord[1:toSwitch],pass+1] } } else { pass = 0 curVals <- eval(trgValExpression, trgValData[newobject$neiIdsTrgs[,1],,drop=FALSE]) curVal <- if (is.null(trgVals)) mean(curVals) else mean(c(curVals,trgVals)) if (trace) { cat ("trgValCI=",trgValCI," curVal=",curVal," k is 1 -- nothing to do.\n") flush.console() } } # get rid of the k>1 columns... newobject$neiDstTrgs <- newobject$neiDstTrgs[,1,drop=FALSE] newobject$neiIdsTrgs <- newobject$neiIdsTrgs[,1,drop=FALSE] newobject$neiDstRefs <- newobject$neiDstRefs[,1,drop=FALSE] newobject$neiIdsRefs <- newobject$neiIdsRefs[,1,drop=FALSE] newobject$k <- 1 newobject$call <- list(yai=newobject$call,correctBias=match.call()) newobject$biasParameters <- list(trgValCI = trgValCI, curVal = curVal, npasses = pass, oldk=object$k) newobject }
/scratch/gouwar.j/cran-all/cranData/yaImpute/R/correctbias.R
ensembleImpute <- function (imputes,method="mean",...) { cl = match.call() posM = c("mean","median") if (!(method %in% posM)) stop ('method="',method,'" must be one of ', paste0('"',posM,'"',collapse=", ")) for (i in 1:length(imputes)) if (("yai" %in% class(imputes[[i]]))) imputes[[i]] = impute.yai(imputes[[i]],...) colns = unique(unlist(lapply(imputes,function(x) colnames(x)))) ctf = if (method!="mean") median else mean rowns = sort(unique(unlist(lapply(imputes,function (x) rownames(x))))) ave = list() sd = list() N = list() methods = list() for (cl in colns) { one = matrix(unlist(lapply(imputes,function (x,rowns,cl) x[rowns,cl],rowns,cl)), nrow=length(rowns)) n = apply(one,1,function (x) sum(!is.na(x))) if (any(n != length(imputes))) N[[cl]] = n if (mode(one) == "character") { ave[[cl]] = apply(one,1,function(x) { x = na.omit(x) if (length(x) == 0) return(NA) x = table(x) x = x+(runif(length(x))*.01) names(x)[which.max(x)] }) ave[[cl]] = as.factor(ave[[cl]]) methods[[cl]] = "mode" } else { ave[[cl]] = apply(one,1,ctf,na.rm=TRUE) ave[[cl]][is.nan(ave[[cl]])] = NA sd [[cl]] = apply(one,1,function (x) { x = na.omit(x) if (length(x) > 1) sd(x) else 0 }) methods[[cl]] = method } } ans = as.data.frame(ave) rownames(ans) = rowns class(ans) = c("impute.yai","data.frame") if (length(sd)>0) { sumsgtz = unlist(lapply(sd,sum)) > 0 if (any(sumsgtz)) { sd = as.data.frame(sd[sumsgtz]) rownames(sd) = rowns attr(ans,"sd") = sd } } attr(ans,"N") = if (length(N)>0) as.data.frame(N) else length(imputes) attr(ans,"methods") = unlist(methods) ans }
/scratch/gouwar.j/cran-all/cranData/yaImpute/R/ensembleImpute.R
errorStats <- function(mahal,...,scale=FALSE,pzero=0.1,plg=0.5,seeMethod="lm") { obsMinusImp = function (object,...,vars=NULL) { if (missing(object)) stop ("object required.") if (class(object)[1] == "yai") object = impute(object,vars=vars,observed=TRUE,...) if (is.null(object)) stop ("no imputations found using this object") object=na.omit(object) if (is.null(vars)) vars=names(object) vi=paste(unique(strsplit(vars,".o",fixed=TRUE))) vi=intersect(vi,names(object)) notFound=setdiff(vars,names(object)) if (length(notFound)>0) warning ("variables not found: ",paste(notFound,collapse=", ")) if (length(vi) == 0) stop("nothing to compute") vo=paste(vi,"o",sep=".") notFound=setdiff(vo,names(object)) if (length(notFound)>0) warning ("variables not found: ",paste(notFound,collapse=", ")) vo=intersect(vo,names(object)) both=intersect(paste(unique(strsplit(vo,".o",fixed=TRUE))),vi) if (length(both) == 0) stop("nothing to compute") vo=paste(both,"o",sep=".") obsMinusImp=matrix(NA,nrow(object),length(both)) colnames(obsMinusImp)=both rownames(obsMinusImp)=rownames(object) for (i in 1:length(both)) { if (!is.factor(object[,both[i]])) obsMinusImp[,i]=object[,both[i]]-object[,vo[i]] } obsMinusImp } frmmsd0 = function(x,label,pzero) { if (missing(x)) stop ("x required.") if(!inherits(x, "yai")) stop ("class must be yai") if (x$method != "mahalanobis") stop ("method must be mahalanobis") if (is.null(x$neiDstRef)) stop ("reference neighbors must be present") xr=obsMinusImp(x)^2 if (ncol(xr) == 0) stop ("nothing to compute") xdstt=notablyDistant(x,p=1-pzero)$threshold obs=x$neiDstRef[,1]<xdstt if (sum(obs)<20) warning ("number of observations used is ",sum(obs)," -- consider increasing pzero") rmmsd0=matrix(NA,ncol(xr),1) rownames(rmmsd0)=colnames(xr) colnames(rmmsd0)=paste(label,"rmmsd0",sep=".") for (var in rownames(rmmsd0)) rmmsd0[var,1]=mean(xr[obs,var]) rmmsd0[rmmsd0<=0] = NA sqrt(rmmsd0) } frmsdlg = function(x,label,plg) { if (missing(x)) stop ("x required.") if(!inherits(x, "yai")) stop ("class must be yai") if (is.null(x$neiDstRef)) stop ("reference neighbors must be present") xr=obsMinusImp(x)^2 if (ncol(xr) == 0) stop ("nothing to compute") xdstt=notablyDistant(x,p=plg)$threshold obs=x$neiDstRef[,1]>xdstt if (sum(obs)<20) warning ("number of observations used is ",sum(obs)," -- consider increasing plg") rmsdlg=matrix(NA,ncol(xr),1) rownames(rmsdlg)=colnames(xr) colnames(rmsdlg)=paste(label,"rmsdlg",sep=".") for (var in rownames(rmsdlg)) rmsdlg[var,1]=mean(xr[obs,var]) rmsdlg[rmsdlg<0] = NA sqrt(rmsdlg) } frmsd2 = function(x,label) { rmsd=rmsd.yai(x,scale=FALSE) colnames(rmsd)=paste(label,"rmsd",sep=".") rmsd } fsee = function(x,label,method) { if (missing(x)) stop ("x required.") if(!inherits(x, "yai")) stop ("class must be yai") mfg = paste(xvars(x),collapse=",5)+s(") mfl = paste(xvars(x),collapse="+") data=cbind(x$yRefs,x$xRefs) see=matrix(NA,ncol(x$yRefs),1) rownames(see)=yvars(x) colnames(see)=paste(label,"see",sep=".") for (var in rownames(see)) { if (method=="gam") { if (!requireNamespace(gam)) { stop ("install package gam and try again") # the purpose of this line of code is to suppress CRAN check notes gam <- function (...) NULL } g = try(gam(as.formula(paste(var,"~s(",mfg,")",sep="")),data=data)) } else g = try( lm(as.formula(paste(var,"~",mfl,sep="")),data=data)) if (is.element("lm",class(g))) see[var,1]=mean(resid(g)^2) } see[see<0] = NA sqrt(see) } fsei = function(rmsd,rmmsd0) { sei = rmsd^2 - (rmmsd0^2/2) sei[sei<0] = NA colnames(sei)=paste(strsplit(colnames(rmsd),".",fixed=TRUE)[[1]][1],"sei",sep=".") sqrt(sei) } fdstc = function(rmsd,rmmsd0) { dstc =rmsd^2 - rmmsd0^2 dstc[dstc<0] = NA colnames(dstc)=paste(strsplit(colnames(rmsd),".",fixed=TRUE)[[1]][1],"dstc",sep=".") sqrt(dstc) } fmlf = function(see,rmmsd0) { mlf = see^2 - (rmmsd0^2/2) mlf[mlf<0] = NA colnames(mlf)=paste(strsplit(colnames(see),".",fixed=TRUE)[[1]][1],"mlf",sep=".") sqrt(mlf) } ############## # compute basic error statistics for the mahalanobis method if (mahal$method != "mahalanobis") stop ("method for first argument must be mahalanobis") label = deparse(substitute(mahal)) rmmsd0= frmmsd0(mahal,label,pzero) rmsdlg= frmsdlg(mahal,label,plg) rmsd = frmsd2(mahal,label) see = fsee(mahal,label,seeMethod) sei = fsei(rmsd,rmmsd0) dstc = fdstc(rmsd,rmmsd0) mlf = fmlf(see,rmmsd0) out = cbind(see,rmmsd0,mlf,rmsd,rmsdlg,sei,dstc) if (scale) { scl = 1/mahal$yScale$scale[rownames(rmsd)] out = out*scl } # if there are other objects, compute error statistics for them... args = list(...) if (length(args)==0) { ans = vector("list",2) names(ans) = c("common",label) ans[[1]] = out[,1:3] ans[[2]] = out[,4:7] } else { names(args) = as.list(substitute(list(...)))[-1] ans = vector("list",length(args)+2) names(ans) = c("common",label,names(args)) ans[[1]] = out[,1:3] ans[[2]] = out[,4:7] i = 2 for (object in args) { i = i+1 label = names(ans)[i] rmsd = frmsd2(object,label)[rownames(rmmsd0),,drop=FALSE] rmsdlg= frmsdlg(object,label,plg=plg)[rownames(rmmsd0),,drop=FALSE] sei = fsei(rmsd,rmmsd0) dstc = fdstc(rmsd,rmmsd0) out = cbind(rmsd,rmsdlg,sei,dstc) if (scale) { scl = 1/object$yScale$scale[rownames(rmsd)] out = out*scl } ans[[i]] <- out } } ans }
/scratch/gouwar.j/cran-all/cranData/yaImpute/R/errorstats.R
findFactors = function(x) { if (is.null(x)) return(NULL) if (is.data.frame(x)) { factors=rep(FALSE,ncol(x)) for (i in 1:ncol(x)) factors[i]=is.factor(x[,i]) } else if (is.matrix(x)) factors=rep(FALSE,ncol(x)) else factors=is.factor(x) factors } factorMatch = function (x,xlevels) { if (is.matrix(x) || !is.data.frame(x)) return(x = x) if (!is.null(attr(x,"illegalLevelCounts"))) return(x = x) facts=intersect(names(x),names(xlevels)) if (length(facts)==0) { attr(x,"illegalLevelCounts")= 0 return(x = x) } nas=NULL miss=NULL for (varName in facts) { fx=as.integer(x[,varName]) fx[fx>length(xlevels[[varName]])] = NA fx[fx<1]=NA attr(fx,"levels")=xlevels[[varName]] attr(fx,"class")="factor" if (any(is.na(fx))) { lx = table(x[,varName]) mtb = lx[setdiff(names(lx),as.character(1:length(xlevels[[varName]])) ) ] if(is.null(miss)) { miss=list(mtb) names(miss)=varName } else { miss=c(miss,list(mtb)) names(miss)[length(miss)]=varName } } x[,varName]=fx } if (is.null(miss)) { attr(x,"illegalLevelCounts")= 0 } else { x=na.omit(x) attr(x,"illegalLevelCounts")= miss } x } addIllegalLevels=function(a1,a2) { if (inherits(a1, "data.frame")) a1=attr(a1,"illegalLevelCounts") if (inherits(a2, "data.frame")) a2=attr(a2,"illegalLevelCounts") vars=union(names(a1),names(a2)) if (length(vars)==0) return(NULL) out =vector(mode = "list", length = length(vars)) names(out)=vars for (var in vars) { m1 = a1[[var]] if (!is.null(m1)) { m1 = as.matrix(m1) colnames(m1)="m1" } m2 = a2[[var]] if (!is.null(m2)) { m2 = as.matrix(m2) colnames(m2)="m2" } if (is.null(m1) && is.null(m2)) return (NULL) else if (is.null(m1)) both = m2 else if (is.null(m2)) both = m1 else both = unionDataJoin(m1,m2) both[is.na(both)]=0 both = as.matrix(apply (both,1,sum)) colnames(both)=var out[[var]]=both } out }
/scratch/gouwar.j/cran-all/cranData/yaImpute/R/findFactors.R
# Takes a "yai" object and returns a "for-use" table. # # When method=="kth" # By default (kth=NULL), only the best pick for an observation is listed in the # use column. For a reference observations, it is always selected to represent # itself. However, when kth is not NULL, the kth neighbor is reported. # # When method=="random" or "randomWeighted" # For each target, kth is selected at random from the k neighbors. When # "randomWeighted" is used, 1/(1+d) is used as a probability weight factor # in selecting the kth neighbor, where d is the distance. # In this case kth is normally NULL, and set to k for the object. When kth is # not null, it is used as the upper limit on the number of neighbors to consider. # # When targetsOnly is true, reporting of references is not done. # # The rowname is a target id, first column is the reference id that # is select to represent the target, the second is the corresponding # distance. Every reference is included as a target as well. In those # cases, the rowname and the use value are the same and the distance # is zero. foruse = function (object,kth=NULL,method="kth",targetsOnly=FALSE) { if(!inherits(object, "yai")) stop ("class must be yai") valid=c("kth","random","randomWeighted") if (is.na(match(method,valid))) stop (paste("method must be one of",paste(valid,collapse=", "))) if (method != "kth" && is.null(kth)) kth=object$k if (!is.null(kth)) { if (kth>object$k) kth=object$k if (kth<1) kth=NULL } if (!is.null(object$neiIdsRefs) && !targetsOnly) { if (is.null(kth)) fu1=data.frame(use=rownames(object$neiIdsRefs), dist=rep(0,nrow(object$neiIdsRefs)), stringsAsFactors = FALSE) else { if (method != "kth") { ans = lapply(1:nrow(object$neiIdsRefs), function (x,kth,Ids,Dst,method) { k = sample.int(kth,1,prob=if (method=="random") NULL else 1/(Dst[x,]+1)) list(Ids[x,k], Dst[x,k]) }, kth, object$neiIdsRefs, object$neiDstRefs, method) fu1=data.frame(use=unlist(lapply(ans,function (x) x[[1]])), dist=unlist(lapply(ans,function (x) x[[2]])), stringsAsFactors = FALSE) } else { if (is.null(kth)) kth=1 fu1=data.frame(use=object$neiIdsRefs[,kth], dist=object$neiDstRefs[,kth], stringsAsFactors = FALSE) } } rownames(fu1)=rownames(object$neiIdsRefs) } else fu1=NULL if (!is.null(object$neiIdsTrgs)) { if (method != "kth") { ans = lapply(1:nrow(object$neiIdsTrgs), function (x,kth,Ids,Dst,method) { k = sample(kth,1,prob=if (method=="random") NULL else 1/(Dst[x,]+1)) list(Ids[x,k], Dst[x,k]) }, kth, object$neiIdsTrgs, object$neiDstTrgs, method) fu2=data.frame(use=unlist(lapply(ans,function (x) x[[1]])), dist=unlist(lapply(ans,function (x) x[[2]])), stringsAsFactors = FALSE) } else { if (is.null(kth)) kth=1 fu2=data.frame(use=object$neiIdsTrgs[,kth], dist=object$neiDstTrgs[,kth], stringsAsFactors = FALSE) } rownames(fu2)=rownames(object$neiIdsTrgs) } else fu2=NULL if (is.null(fu1) & is.null(fu2)) return (NULL) if (is.null(fu1)) ans = fu2 else if (is.null(fu2)) ans = fu1 else ans = rbind(fu1,fu2) class(ans)=c("data.frame","foruse.yaImpute") ans }
/scratch/gouwar.j/cran-all/cranData/yaImpute/R/foruse.R
grmsd <- function (...,ancillaryData=NULL,vars=NULL,wts=NULL,rtnVectors=FALSE, imputeMethod="closest") { if (missing(...)) stop ("... required") args <- list(...) argLabs <- as.list(substitute(list(...)))[-1] names(args) <- if (is.null(names(argLabs))) unlist(argLabs) else { fixNames <- names(argLabs) == "" names(argLabs)[fixNames] <- argLabs[fixNames] names(argLabs) } okClasses <- c("yai","impute.yai","data.frame","matrix","lm") if (!is.null(wts)) { if (any(wts < 0) || sum(wts) <= 0) stop("wts must be positive and sum > 0") } mgd <- list() for (objName in names(args)) { object <- args[[objName]] if (!inherits(object,okClasses)) { warning("object ",objName," class is not one of ",paste(okClasses,collapse=", ")) next } if (inherits(object,"yai")) object <- impute.yai(object,ancillaryData=ancillaryData,vars=vars,observed=TRUE, method=imputeMethod) # try to allow "lm" objects. This code may fail as there are many # methods in R that inherit from "lm". if (inherits(object,"lm")) { pr <- predict(object) ob <- pr + resid(object) # only one column? if (is.null(dim(pr))) { object <- cbind(pr,ob) colnames(object) = c(objName,paste0(objName,".o")) } else { colnames(ob) = paste0(colnames(ob),".o") object <- cbind(pr,ob) } } object <- na.omit(object) if (nrow(object) == 0) { warning("argument ",objName," has no rows.") next } if (inherits(object,"matrix") & mode(object) != "numeric") { warning("argument ",objName," must be numeric.") next } facts = if (inherits(object,"matrix")) FALSE else unlist(lapply(object,is.factor)) if (any(facts)) { if (all(facts)) { warning("all variables are factors in ",objName) next } else { nams <- names(facts)[facts] nams <- nams[-grep("[.]o$",nams)] warning("factor(s) have been removed from ",objName,": ",paste0(nams,collapse=", ")) object <- object[,!facts,drop=FALSE] } } useVars <- if (is.null(vars)) colnames(object) else { ov <- grep ("[.]o$",vars) ov <- if (length(ov) == 0) unique(c(vars,paste0(vars,".o"))) else vars intersect(ov,colnames(object)) } if (length(useVars) == 0) { warning ("needed variables not found in ",objName) next } ov = useVars[grep ("[.]o$",useVars)] if (length(ov) == 0) { warning ("no observed variables found in ",objName) next } pv <- unique(sub("[.]o$","",ov)) pv <- intersect(pv,useVars) if (length(pv) == 0) { warning("nothing to compute in ",objName) next } ob <- as.matrix(object[,ov,drop=FALSE]) pr <- as.matrix(object[,pv,drop=FALSE]) qr <- qr(ob) uvars <- qr$pivot[1:qr$rank] if (length(uvars) < length(ov)) warning("rank deficiency in ",objName," was addressed by removing: ", paste0(c(colnames(ob)[qr$pivot[(qr$rank+1):length(qr$pivot)]], colnames(pr)[qr$pivot[(qr$rank+1):length(qr$pivot)]]),collapse=", ")) p <- solve(chol(cov(ob[,uvars,drop=FALSE]))) ob <- as.matrix(ob[,uvars]) %*% p pr <- as.matrix(pr[,uvars]) %*% p wt <- wts wt <- if (is.null(wt)) rep(1,ncol(pr)) else { if (length(names(wt)) > 0) { names(wt) <- sub("[.]o$","",names(wt)) wt <- na.omit(wt[names(pr)]) } if (length(wt) != ncol(pr)) { warning ("weights do not match variables in ",objName," and were ignored.") wt <- rep(1,ncol(pr)) } wt } wt <- wt/sum(wt) md <- apply((pr-ob),1,function (x,wt) sum((x^2)*wt), wt) mgd[[objName]] <- if (rtnVectors) sqrt(md) else sqrt(mean(md)) } if (rtnVectors) { idx <- sort(unlist(lapply(mgd,function (x) sqrt(mean(x)))),index.return=TRUE)$ix mgd[idx] } else sort(unlist(mgd)) }
/scratch/gouwar.j/cran-all/cranData/yaImpute/R/grmsd.R
# Imputes the observation for a variable from a reference observation to a # target observation. Also, imputes a value for a reference from other # references. This practice is useful for validation. # # Arguments: # object is of class yai (built using function yai) # ancillaryData is a data frame of reference, target, or both observations # with variables that may or may not be in original problem. # Imputations are made for these variables using the neighbor # relationships found in object. # method defines how continuous variables are imputed, where: # closest = the value from the nearest neighbor is imputed for # target observations. For reference observations, # the value form the observation itself is excluded. # When k==1, closest is always used. # mean = the mean of the k neighbors is taken # median = the median of the k neighbors is taken # dstWeighted = a weighted average is taken over the k # neighbors where the weights are (1/(1+d))/sum(1/(1+d)) # method.factor defines how factors are imputed, default is to use the # "method", where: # closest = same as continous (always used with k==1). # mean|median = actually, the mode. The factor level that is the most # frequent # dstWeighted = the factor level with the largest weight, # where weights are (1/(1+d))/sum(1/(1+d)) # k is the number of neighbors to use. When NULL, all those available # in the yai object are used. # vars a character vector of variable names desired. If NULL (the default), # then the list is formed by taking all variables in the input object. # observed is a flag, when TRUE, the returned data frame includes # the observed values (see value, below) # Value: # A data frame of class impute.yai with all variables # in the problem or just those listed in "vars" if it is not # null. When observed is TRUE, the set of variables # is duplicated with .o added to the names and each is # the observed value when one exists for the observation. impute <- function(object,...) UseMethod("impute") impute.yai <- function (object,ancillaryData=NULL,method="closest", method.factor=method,k=NULL, vars=NULL,observed=TRUE,...) { # ======== predict functions used internally findFactors = get("findFactors",asNamespace("yaImpute")) pred.c <- function (refs,ids,w=NULL,method="closest",k=1,vars,observed) { if (is.null(vars)) vars <- colnames(refs) else vars <- intersect(vars,colnames(refs)) if (is.null(vars) | length(vars)==0) return (NULL) if (method=="closest" || k==1) { ans <- refs[ids[,1],vars,FALSE] } else if (method=="mean" || method== "median") { rws <- match(ids[,1:k],rownames(refs)) ans <- lapply(vars, function (v,rws,refs,nr,func) { rfs <- refs[rws,v] dim(rfs) <- c(nr,length(rfs)/nr) apply(rfs,1,func) }, rws, refs, nrow(ids), if (method=="mean") mean else median) names(ans) <- vars ans <- as.data.frame(ans) } else if (method=="dstWeighted") { if (is.null(w)) { warning ("w is required when method is dstWeighted") return(NULL) } wei <- t(apply(w[,1:k,drop=FALSE],1, function (x) {x <- 1/(1+x); x/sum(x)})) rws <- match(ids[,1:k],rownames(refs)) ans <- lapply(vars, function (v,rws,refs,wei) { rfs <- refs[rws,v] dim(rfs) <- dim(wei) apply(rfs*wei,1,sum) }, rws, refs, wei) names(ans) <- vars ans <- as.data.frame(ans) } rownames(ans) <- rownames(ids) if (observed) { obs <- matrix(data=NA, nrow = nrow(ans), ncol = ncol(ans)) rownames(obs) <- rownames(ans) colnames(obs) <- vars obs <- as.data.frame(obs) commonRows <- intersect(rownames(ans),rownames(refs)) if (length(commonRows)>0) obs[commonRows,vars] <- refs[commonRows,vars] colnames(obs) <- paste(vars,"o",sep=".") ans <- cbind(ans,obs) } ans } pred.f <- function (refs,ids,w=NULL,method="closest",k=1,vars,observed) { if (is.null(vars)) vars <- colnames(refs) else vars <- intersect(vars,colnames(refs)) if (is.null(vars) | length(vars)==0) return (NULL) if (method=="closest" || k==1) { ans <- data.frame(refs[ids[,1],vars,FALSE]) } else { wei <- if (method != "dstWeighted" || is.null(w)) NULL else t(apply(w[,1:k,drop=FALSE],1, function (x) {x <- 1/(1+x); x/sum(x)})) rws <- match(ids[,1:k],rownames(refs)) ans <- lapply(vars, function (v,rws,refs,wei,nr) { rfs <- as.character(refs[rws,v]) dim(rfs) <- c(nr,length(rfs)/nr) if (is.null(wei)) { apply(rfs,1,function (x) { t <- table(x) t <- t + (runif(length(t)) * 0.01) names(which.max(t)) }) } else { a <- vector("character",nrow(wei)) for (i in 1:nrow(wei)) { t <- tapply(wei[i,],rfs[i,],sum) t <- t + (runif(length(t)) * 0.01 * min(t)) a[i] <- names(which.max(t)) } a } }, rws, refs, wei, nrow(ids)) names(ans) <- vars ans <- as.data.frame(ans,rownames=rownames(ids)) } rownames(ans) <- rownames(ids) if (observed) { obs <- matrix(data=NA, nrow = nrow(ans), ncol = ncol(ans)) rownames(obs) <- rownames(ans) colnames(obs) <- vars obs <- as.data.frame(obs) commonRows <- intersect(rownames(ans),rownames(refs)) if (length(commonRows)>0) { for (var in vars) { obs[commonRows,var] <- levels(refs[,var])[refs[commonRows,var]] obs[,var] <- factor(obs[,var]) } } names(obs) <- paste(vars,"o",sep=".") ans <- cbind(ans,obs) } ans } pred <- function (refs,ids,w=NULL,method="closest", method.factor="closest",k=1,vars,observed) { factors <- findFactors(refs) nfactors <- sum(factors) if (nfactors>0 && method.factor != "closest" && k==1) { warning ("method.factor was set to closest because k=1") method.factor <- "closest" } if (nfactors == 0) out <- pred.c(refs=refs,ids=ids,w=w,method=method, k=k,vars=vars,observed=observed) else if (nfactors == ncol(refs)) out <- pred.f(refs=refs,ids=ids,w=w,method=method.factor, k=k,vars=vars,observed=observed) else { tmp <- data.frame(refs[,!factors],row.names=rownames(refs)) colnames(tmp) <- colnames(refs)[!factors] p1 <- pred.c(refs=tmp,ids=ids,w=w,method=method, k=k,vars=vars,observed=observed) tmp <- data.frame(refs[,factors],row.names=rownames(refs)) colnames(tmp) <- colnames(refs)[factors] p2 <- pred.f(refs=tmp,ids=ids,w=w,method=method.factor, k=k,vars=vars,observed=observed) if (is.null(p1) && is.null(p2)) out <- NULL else if (is.null(p1)) out <- p2 else if (is.null(p2)) out <- p1 else out <- cbind(p1,p2) } out } # =========================== if (missing(object)) stop ("object required.") if(!inherits(object, "yai")) stop ("class must be yai") if (is.null(vars)) { if (is.null(ancillaryData)) { if (object$method != "randomForest") vars <- yvars(object) else if (names(object$ranForest)[[1]] == "unsupervised") vars <- xvars(object) } else { if (! is.data.frame(ancillaryData)) ancillaryData <- as.data.frame(ancillaryData) vars <- colnames(ancillaryData) } } posMethods <- c("closest","mean","median","dstWeighted") if (length(intersect(method,posMethods))==0) stop (paste("method=",method," must be one of: {", paste(posMethods,collapse=", "),"}",sep="")) if (is.null(k)) k <- object$k if (k>object$k || k==0) { warning ("k out of range, set to ",object$k) k <- object$k } if (method != "closest" && k==1) { warning ("method was set to closest because k==1") method <- "closest" } if (is.null(ancillaryData)) { r <- NULL if (length(object$neiIdsRefs)>0) { if (!(ncol(object$yRefs) == 1 && names(object$yRefs)[1]=="ydummy")) yPredRefs <- pred(refs=object$yRefs,ids=object$neiIdsRefs, w=object$neiDstRefs,method=method,method.factor=method.factor, k=k,vars=vars,observed=observed) else yPredRefs <- NULL xPredRefs <- pred(refs=object$xall,ids=object$neiIdsRefs, w=object$neiDstRefs,method=method,method.factor=method.factor, k=k,vars=vars,observed=observed) if (is.null(yPredRefs) && is.null(xPredRefs)) r <- NULL else if (is.null(yPredRefs)) r <- xPredRefs else if (is.null(xPredRefs)) r <- yPredRefs else r <- cbind(yPredRefs,xPredRefs) } t <- NULL if (length(object$neiIdsTrgs)>0) { if (!(ncol(object$yRefs) == 1 && names(object$yRefs)[1]=="ydummy")) yPredTrgs <- pred(refs=object$yRefs,ids=object$neiIdsTrgs, w=object$neiDstTrgs,method=method,method.factor=method.factor, k=k,vars=vars,observed=observed) else yPredTrgs <- NULL xPredTrgs <- pred(refs=object$xall,ids=object$neiIdsTrgs, w=object$neiDstTrgs,method=method,method.factor=method.factor, k=k,vars=vars,observed=observed) if (is.null(yPredTrgs) && is.null(xPredTrgs)) t <- NULL else if (is.null(yPredTrgs)) t <- xPredTrgs else if (is.null(xPredTrgs)) t <- yPredTrgs else t <- cbind(yPredTrgs,xPredTrgs) } if (is.null(r) && is.null(t)) out <- NULL else if (is.null(r)) out <- t else if (is.null(t)) out <- r else out <- rbind(r,t) scale <- data.frame(center=c(object$xScale$center,object$yScale$center), scale=c(object$xScale$scale, object$yScale$scale)) } else { out <- NULL if (!is.null(vars)) { ancillaryData <- ancillaryData[,vars,FALSE] if (is.null(ncol(ancillaryData))) stop ("requested variables not present in ancillaryData") } rownames(ancillaryData) <- as.character(rownames(ancillaryData)) ids <- as.character(rownames(object$xRefs)) common <- intersect(ids,rownames(ancillaryData)) missing <- setdiff(common,rownames(ancillaryData)) if (length(missing) != 0) warning (paste("no data for",length(missing), "observations:",paste(missing[1:min(15,length(missing))],collapse=","))) w <- rbind(object$neiDstRefs,object$neiDstTrgs) ids <- rbind(object$neiIdsRefs,object$neiIdsTrgs) out <- pred(refs=ancillaryData,ids=ids,w=w,method=method, method.factor=method.factor,k=k,vars=vars,observed=observed) # find the sd/mean of the data and attach these as an attribute notFactors <- !findFactors(ancillaryData) if (sum(notFactors) > 0) { scale <- matrix(data=NA,nrow=ncol(ancillaryData),ncol=2) rownames(scale) <- colnames(ancillaryData) colnames(scale) <- c("center","scale") scale[notFactors,"center"] <- apply(ancillaryData[, rownames(scale)[notFactors],FALSE],2,mean,na.rm=TRUE) scale[notFactors,"scale" ] <- apply(ancillaryData[, rownames(scale)[notFactors],FALSE],2,sd, na.rm=TRUE) } else scale=NULL } if (!is.null(out)) { class(out) <- c("impute.yai","data.frame") if (!is.null(scale)) attr(out,"scale") <- scale } out }
/scratch/gouwar.j/cran-all/cranData/yaImpute/R/impute.R
# Takes a "yai" or "foruse.yaImpute" object and returns an array # of the n reference observations that are most often used as # data sources. mostused = function (object,n=20,kth=NULL) { if (is.null(object)) stop ("object required.") if (inherits(object, "yai")) object = foruse(object,kth=kth) if (is.null(object)) stop ("no neighbors found using this object") if (class(object)[2] != "foruse.yaImpute") stop("class must be yai or foruse.yaImpute") tab=table(object[,1]) class(tab)="vector" tab=data.frame(tab,row.names=names(tab)) n=min(n,nrow(tab)) sort(tab[,1],decreasing = TRUE)[1:n] }
/scratch/gouwar.j/cran-all/cranData/yaImpute/R/mostused.R
# Arguments: # object is of class "yai" created by function yai. # newdata is a matrix or dataframe that contains data for x variables # k is the number of neighbors desired (see function yai). # # Value: A list of class "yai", that is a copy of the input object with # the following members replaced: # # call: the call # # obsDropped: a list of the rownames for observations dropped for various # reasons (missing data). # # trgRows: a list of the rownames for target observations as a subset # of observations in xall (normally, all rows). # # xall: the x variables for all observations. # # neiDstTrgs: A data frame of distances between a target (identified by it's # rowname) and the k references. There are k columns. # # neiIdsTrgs: A data frame of reference identifications that correspond to # neiDstTrgs. # # k: new value of k, if null, the value is taken from object. # # ann: use ann or not...if null, the value is taken from object. # newtargets=function(object,newdata,k=NULL,ann=NULL) { if(!inherits(object, "yai")) stop ("object must be class yai") if (object$method == "ensemble") stop ("newtargets can not be found for objects with method 'ensemble'.") if (is.null(newdata) | nrow(newdata)==0) stop ("newdata is required") if (object$method == "gnn") if (!requireNamespace ("vegan")) stop("install vegan and try again") if (object$method == "randomForest") if (!requireNamespace ("randomForest")) stop("install randomForest and try again") if (object$method == "gower") { stop("install gower and try again") # the purpose of this line of code is to suppress CRAN check notes gower_topn <- function (...) NULL } else { gower_topn <- gower::gower_topn } if (!requireNamespace ("gower")) stop("install gower and try again") sumSqDiff=function(x,y) { d=x-y; sum(d*d) } factorMatch = get("factorMatch",asNamespace("yaImpute")) if (is.null(ann)) ann=object$ann if (!is.null(k)) object$k=k object$call=match.call() obsDropped=NULL # don't redo the factor matching for objects that come already done. if (is.null(attr(newdata,"illegalLevelCounts")) && length(intersect(xvars(object),names(object$xlevels))) > 0) { newdata = factorMatch(newdata,object$xlevels) if (is.list(attr(newdata,"illegalLevelCounts"))) { warning ("NA's generated due to illegal level(s).") cat ("Illegal levels\n") print(attr(newdata,"illegalLevelCounts")) } } if (is.null(object$theFormula)) { have=intersect(colnames(object$xRefs),colnames(newdata)) if (length(have) != length(colnames(object$xRefs))) { missing = setdiff(colnames(object$xRefs),colnames(newdata)) stop(paste("required column(s) missing:", paste (missing, collapse=", "))) } xall=na.omit(as.data.frame(newdata[,have])) colnames(xall) = have obsDropped=names(attributes(na.omit(xall))$na.action) if (length(obsDropped)>0) warning (nrow(newdata)-nrow(xall)," observation(s) removed") } else { xall=model.frame(object$theFormula$x,newdata) if (!is.null(object$xDrop)) xall=xall[,!object$xDrop,drop=FALSE] obsDropped=setdiff(rownames(newdata),rownames(xall)) if (length(obsDropped)) warning (length(obsDropped)," observation(s) removed") } if (nrow(xall) == 0) stop ("no observations") trgs=setdiff(rownames(xall),rownames(object$xRefs)) if (nrow(xall) != length(trgs)) { obsDropped=union(obsDropped,intersect(rownames(object$xRefs),rownames(xall))) warning (nrow(xall)-length(trgs)," row(s) in newdata are original references and ignored") } theCols = colnames(object$xRefs) # may be changed for reduced rank, depending on method. if (object$method %in% c("msn","msn2","msnPP","mahalanobis","ica")) { theCols = rownames(object$projector) xcvRefs=scale(object$xRefs,center=object$xScale$center,scale=object$xScale$scale) if (length(theCols)<ncol(xcvRefs)) xcvRefs=xcvRefs[,theCols,drop=FALSE] } xTrgs=as.data.frame(xall[trgs,theCols,drop=FALSE]) # this is needed by randomForest and gnn unscalled. if (nrow(xTrgs)==0) stop("no observations") if (object$method == "gnn") # gnn { # create a projected space for the reference observations xcvRefs=predict(object$ccaVegan,type="lc",rank="full") xcvRefs=xcvRefs %*% diag(sqrt(object$ccaVegan$CCA$eig/sum(object$ccaVegan$CCA$eig))) # create a projected space for the unknowns (target observations) xcvTrgs=scale(xTrgs,center=object$xScale$center,scale=object$xScale$scale) xcvTrgs=predict(object$ccaVegan,newdata=as.data.frame(xcvTrgs),type="lc",rank="full") xcvTrgs=xcvTrgs %*% diag(sqrt(object$ccaVegan$CCA$eig/sum(object$ccaVegan$CCA$eig))) nVec = ncol(xcvRefs) } else if (object$method == "randomForest") # randomForest { nodes=NULL predObs = if (is.null(attr(object$ranForest,"rfRefNodeSort"))) rbind(object$xRefs,xTrgs) else xTrgs for (i in 1:length(object$ranForest)) { nodeset=attr(predict(object$ranForest[[i]],predObs, proximity=FALSE,nodes=TRUE),"nodes") if (is.null(nodeset)) stop("randomForest did not return nodes") colnames(nodeset)=paste(colnames(nodeset),i,sep=".") nodes=if (is.null(nodes)) nodeset else cbind(nodes,nodeset) } if (is.null(attr(object$ranForest,"rfRefNodeSort"))) { INTrefNodes=as.integer(nodes[rownames(object$xRefs),]) INTnrow=as.integer(nrow(object$xRefs)) INTncol=as.integer(ncol(nodes)) INTsort = INTrefNodes dim(INTsort) = c(INTnrow,INTncol) INTsort=apply(INTsort,2,function (x) sort(x,index.return = TRUE, decreasing = FALSE)$ix-1) attributes(INTsort)=NULL INTsort = as.integer(INTsort) nodes = nodes[rownames(xTrgs),] } else { INTrefNodes = attr(object$ranForest,"rfRefNodeSort")[["INTrefNodes"]] INTnrow = attr(object$ranForest,"rfRefNodeSort")[["INTnrow"]] INTncol = attr(object$ranForest,"rfRefNodeSort")[["INTncol"]] INTsort = attr(object$ranForest,"rfRefNodeSort")[["INTsort"]] } } else if (object$method == "random") { xcvRefs=data.frame(random=runif(nrow(object$xRefs)),row.names=rownames(object$xRefs)) xcvTrgs=data.frame(random=runif(length(trgs)),row.names=trgs) } else if (object$method %in% c("msn","msn2","msnPP","mahalanobis","ica")) { xcvRefs=as.matrix(xcvRefs[,theCols,drop=FALSE]) %*% object$projector xcvTrgs=scale(xTrgs,center=object$xScale$center,scale=object$xScale$scale) xcvTrgs=as.matrix(xcvTrgs[,theCols,drop=FALSE]) %*% object$projector } else if (object$method == "euclidean") { xcvRefs=scale(object$xRefs,center=object$xScale$center,scale=object$xScale$scale) xcvRefs=as.matrix(xcvRefs[,theCols,drop=FALSE]) xcvTrgs=scale(xTrgs,center=object$xScale$center,scale=object$xScale$scale) xcvTrgs=as.matrix(xcvTrgs[,theCols,drop=FALSE]) } else if (object$method == "gower") { xcvRefs=object$xRefs[,theCols,drop=FALSE] xcvTrgs=xTrgs[,theCols,drop=FALSE] } else # method is raw { xcvRefs=as.matrix(object$xRefs[,theCols,drop=FALSE]) xcvTrgs=as.matrix(xTrgs[,theCols,drop=FALSE]) } neiDstTrgs=matrix(data=NA,nrow=length(trgs),ncol=object$k) rownames(neiDstTrgs)=trgs colnames(neiDstTrgs)=paste("Dst.k",1:object$k,sep="") neiIdsTrgs=neiDstTrgs colnames(neiIdsTrgs)=paste("Id.k",1:object$k,sep="") if (object$method %in% c("msn","msn2","msnPP","mahalanobis","ica", "euclidean","gnn","raw")) { if (ann & nrow(xcvTrgs)>0) { k=object$k ann.out=ann(xcvRefs, xcvTrgs, k, verbose=FALSE)$knnIndexDist neiDstTrgs[TRUE]=sqrt(ann.out[,(k+1):ncol(ann.out)]) for (i in 1:k) neiIdsTrgs[,i]=rownames(xcvRefs)[ann.out[,i]] rownames(neiDstTrgs)=rownames(neiIdsTrgs) } else { for (row in rownames(xcvTrgs)) { d=sqrt(sort(apply(xcvRefs,MARGIN=1,sumSqDiff,xcvTrgs[row,])))[1:object$k] neiDstTrgs[row,]=d neiIdsTrgs[row,]=names(d) } } } else if (object$method == "gower") { gow = gower_topn(x=xcvRefs,y=xcvTrgs,n=k) for (i in 1:object$k) { neiDstTrgs[,i]=gow$distance[i,] neiIdsTrgs[,i]=rownames(xcvTrgs)[gow$index[i,]] } } else if (object$method == "randomForest") { prox=lapply(apply(nodes,1,as.list),function (x) { prx=.Call("rfoneprox", INTrefNodes, INTsort, INTnrow, INTncol, as.integer(x), vector("integer",INTnrow)) if (object$k > 1) px=sort(prx,index.return = TRUE, decreasing = TRUE)$ix[1:object$k] else px=which.max(prx) c(prx[px],px) # counts followed by pointers to references }) for (i in 1:object$k) { neiDstTrgs[,i]=unlist(lapply(prox,function (x,i) (INTncol-x[i])/INTncol,i)) neiIdsTrgs[,i]=unlist(lapply(prox,function (x,i,k,Rnames) Rnames[x[k+i]],i,object$k,rownames(object$xRefs))) } } else if (object$method == "random") { l=k+1 d = matrix(unlist(lapply(xcvTrgs[[1]],function (x, xcv, l) { sort((xcv-x)^2,index.return=TRUE)$ix[2:l] },xcvRefs[[1]],l)),nrow=nrow(xcvTrgs),ncol=k,byrow=TRUE) for (ic in 1:ncol(d)) { neiDstTrgs[,ic]=abs(xcvTrgs[,1]-xcvRefs[d[,ic],1]) neiIdsTrgs[,ic]=rownames(xcvRefs)[d[,ic]] } } else # default { stop("no code for specified method") } # if bootstrap, then modify the reference ID's in the result ID tables. if (length(object$bootstrap) > 1) neiIdsTrgs[] = sub("\\.[0-9]$","",neiIdsTrgs[]) object$obsDropped=obsDropped object$trgRows=trgs addX = setdiff (rownames(object$xRefs),rownames(xall)) if (length(addX) > 0) xall = rbind(xall,object$xRefs[addX,]) object$xall=xall object$neiDstTrgs=neiDstTrgs object$neiIdsTrgs=neiIdsTrgs noRefs=TRUE object$neiDstRefs=NULL object$neiIdsRefs=NULL object$ann=ann object }
/scratch/gouwar.j/cran-all/cranData/yaImpute/R/newtargets.R
notablyDifferent <- function (object,vars=NULL,threshold=NULL,p=.05,...) { if (missing(object)) stop ("object required.") if(!inherits(object, "yai")) stop ("object must be class yai") refIds <- rownames(object$neiDstRefs) if (length(refIds) == 0) stop ("references are required") cl <- match.call() trgIds <- rownames(object$neiDstTrgs) if (is.null(vars)) vars=xvars(object) impObj <- impute.yai(object,vars=vars,observed=TRUE,...) if (is.null(impObj)) stop ("no imputations found using this object") nuke <- unlist(lapply(impObj,function (x) all(is.na(x)))) nuke=nuke[nuke] if (length(nuke) > 0) impObj <- impObj[,-match(names(nuke),names(impObj)),drop=FALSE] nuke <- unlist(lapply(impObj,function (x) is.factor(x))) nuke <- nuke[nuke] if (length(nuke) > 0) impObj <- impObj[,-match(names(nuke),names(impObj)),drop=FALSE] impObj <- na.omit(impObj) if (is.null(vars)) vars <- names(impObj) vi <- paste(unique(strsplit(vars,".o",fixed=TRUE))) vi <- intersect(vi,names(impObj)) notFound <- setdiff(vars,names(impObj)) if (length(notFound)>0) warning ("variables not found or had missing values: ", paste(notFound,collapse=", ")) if (length(vi) == 0) stop("nothing to compute") vo <- paste(vi,"o",sep=".") notFound <- setdiff(vo,names(impObj)) if (length(notFound)>0) warning ("variables not found or had missing values: ", paste(notFound,collapse=", ")) vo <- intersect(vo,names(impObj)) both <- intersect(paste(unique(strsplit(vo,".o",fixed=TRUE))),vi) if (length(both) == 0) stop("no variables with observed and imputed values") vo <- paste(both,"o",sep=".") var <- unlist(lapply(impObj[,vo,drop=FALSE],var)) names(var) = sub(".o","",names(var),fixed = TRUE) diff <- impObj[,both,drop=FALSE]-impObj[,vo,drop=FALSE] for (nam in both) diff[,nam] = (diff[,nam]*diff[,nam])/var[nam] rmsd <- sqrt(apply(diff,1,mean)) if (is.null(threshold)) threshold <- quantile(rmsd[refIds],1-p) ans <- list(call=cl,vars=both,threshold=threshold, notablyDifferent.refs=sort(rmsd[refIds][rmsd[refIds]>threshold]), notablyDifferent.trgs=sort(rmsd[trgIds][rmsd[trgIds]>threshold]), rmsdS.refs=sort(rmsd[refIds]), rmsdS.trgs=sort(rmsd[trgIds])) class(ans) <- "notablyDifferent" ans } plot.notablyDifferent <- function (x,add=FALSE,...) { if (missing(x)) stop ("x required") if (inherits(x, "list")) { if (!all(unlist(lapply(x,function (xx) class(xx)=="notablyDifferent")))) stop ("all members in the x list must be class notablyDifferent") ans <- matrix(unlist(lapply(x,function (xx) { all <- c(xx$rmsdS.refs,xx$rmsdS.trgs) c(max (all), length(all)) })),length(x),2,byrow=TRUE) xlim <- c(1,max(ans[,2])) ylim <- c(0,max(ans[,1])) for (i in 1:length(x)) { plot.notablyDifferent(x[[i]],xlim=xlim,ylim=ylim,add=i>1,col=i,...) myusr = par()$usr par(usr=c(0,1,0,1)) cy <- par()$cxy[2] cy <- cy*1.1 if (i == 1) text(x=.05,y=.95,pos=4,"Legend") text(x=.05,y=.95-(i+2)*cy,pos=4, if (is.null(names(x)[i])) paste("Case",i) else names(x)[i], col=i) par(usr=myusr) } } else { if(!inherits(x, "notablyDifferent")) stop ("x must be class notablyDifferent") all <- c(x$rmsdS.refs,x$rmsdS.trgs) pch <- c(rep(1,length(x$rmsdS.refs)),rep(2,length(x$rmsdS.trgs))) names(pch) <- names(all) all <- sort(all) pch[names(x$notablyDifferent.refs)] <- 19 pch[names(x$notablyDifferent.trgs)] <- 17 xx <- 1:length(all) if (add) points(x=xx,y=all,pch=pch[names(all)],...) else plot(x=xx,y=all,pch=pch[names(all)], main="Imputation Error Profile", ylab="Scaled RMSD",xlab="Observation",...) abline(h=x$threshold,...) if (!add) { myusr = par()$usr par(usr=c(0,1,0,1)) cxy = par()$cxy*1.1 bxy = c(.05,.95) points(x=cxy[1] +bxy[1], y=bxy[2]- cxy[2], pch=1) points(x=cxy[1]*2+bxy[1], y=bxy[2]- cxy[2], pch=19) text( x=cxy[1]*3+bxy[1], y=bxy[2]- cxy[2], pos=4,"References") points(x=cxy[1] +bxy[1], y=bxy[2]-2*cxy[2], pch=2) points(x=cxy[1]*2+bxy[1], y=bxy[2]-2*cxy[2], pch=17) text( x=cxy[1]*3+bxy[1], y=bxy[2]-2*cxy[2], pos=4,"Targets") par(usr=myusr) } } }
/scratch/gouwar.j/cran-all/cranData/yaImpute/R/notablydifferent.R
# Provides a list of notably distant targets. # # Arguments: # object is of class yai (built using function yai), it # must contain reference distances if threshold is null. # threshold is a threshold value used in the calculations, if # NULL it is computed using the p value. # p is the percentile point in the log normal distribution used # to compute the threshold when it is null. # method="quantile": the threshold is computed using function "quantile" # to pick the (1-p)th percentile point in the set of distances. # "distribution": the threshold is based on a percentile points # of a assumed distribution. # and otherwise the "distribution" assumptions are followed. # Value: # List of two data frames that contain 1) the references that are notably # distant from other references, and 2) the targets that are notably distant # from the references, the threshold used and lastly the method used. notablyDistant <- function (object,kth=1,threshold=NULL,p=0.01, method="distribution") { if (missing(object)) stop ("object required.") if (!inherits(object, "yai")) stop ("class must be yai") if (kth>object$k) kth <- object$k if (kth<1) kth <- 1 if (is.null(threshold)) { threshold <- NA if (is.null(object$neiDstRefs)) stop ("distances among references are required when threshold is NULL") if (method=="distribution") { # use the beta disrtibution, distances are 0<=d<=1 if (object$method %in% c("randomForest","random")) { m <- mean(object$neiDstRefs[,kth]) ss <- var(object$neiDstRefs[,kth]) if (!is.nan(ss) & !is.nan(m)) { v <- m*((m*(1-m)/ss)-1) w <- (1-m)*((m*(1-m)/ss)-1) threshold <- qbeta(p,v,w,lower.tail=FALSE) } } else # use the lognormal distribution, distances are 0<=d { zeros <- object$neiDstRefs[,kth]<=0 if (sum(zeros)==0) obs <- log(object$neiDstRefs[,kth]) else { smz <- min(object$neiDstRefs[!zeros,kth]) obs <- object$neiDstRefs[,kth] obs[zeros] <- smz*.5 obs <- log(obs) warning ("when computing threshold, ",sum(zeros)," zero distances of ", length(obs)," references were set to ",format(smz*.5)) } m <- mean(obs) s <- sd(obs) if (!is.nan(s) & !is.nan(m)) threshold <- exp(s*qnorm(p, mean=0, sd=1, lower.tail=FALSE, log.p=FALSE)+m) } if (is.nan(threshold)) { threshold <- Inf warning ("threshold can not be computed, set to Inf") } } else { if (method != "quantile") { method="quantile" warning("method set to quantile") } threshold <- quantile(object$neiDstRefs[,kth],probs=1-p) } } findNDist <- function (ids,dst,names,threshold) { out <- data.frame(use=ids,dist=dst,row.names=names) out <- out[out[,2]>threshold,] if (nrow(out)>1) { ix <- sort(out[,2],decreasing=TRUE, index.return=TRUE)$ix out <- out[ix,] } out } if (!is.null(object$neiDstRefs)) distRefs <- findNDist(object$neiIdsRefs[,kth],object$neiDstRefs[,kth],rownames(object$neiIdsRefs),threshold) else distRefs=NULL if (!is.null(object$neiIdsTrgs)) distTrgs <- findNDist(object$neiIdsTrgs[,kth],object$neiDstTrgs[,kth],rownames(object$neiIdsTrgs),threshold) else distTrgs=NULL list(notablyDistantRefs=distRefs, notablyDistantTrgs=distTrgs, threshold=threshold, method=method) }
/scratch/gouwar.j/cran-all/cranData/yaImpute/R/notablydistant.R
# Builds observed over imputed plots in an imputed data frame. # Arguments: # x is of class yai or yai.impute If the object is yai, impute is called # with observed=TRUE. # vars is a list of variables for the plots, if NULL, all those with imputed and # observed values are plotted. # pointColor is the color value (or vector) for the xy plots # lineColor is the color of the 1:1 plot. # spineColor is the color vector of the spineplot # residual, plot residuals when TRUE, imputed over observed when FALSE. # ... passed to the impute funciton when it is called and passed to the plot function. plot.yai = function (x,vars=NULL,pointColor=1,lineColor=2,spineColor=NULL,residual=FALSE,...) { if (missing(x)) stop ("x required.") sub=deparse(substitute(x)) if (class(x)[1] == "yai") x = impute.yai(x,vars=vars,observed=TRUE,...) if (is.null(x)) stop ("no imputations found using this x") if (is.null(vars)) vars=names(x) vi=paste(unique(strsplit(vars,".o",fixed=TRUE))) vi=intersect(vi,names(x)) notFound=setdiff(vars,names(x)) if (length(notFound)>0) warning ("variables not found: ",paste(notFound,collapse=", ")) if (length(vi) == 0) stop("nothing to plot") vo=paste(vi,"o",sep=".") notFound=setdiff(vo,names(x)) if (length(notFound)>0) warning ("variables not found: ",paste(notFound,collapse=", ")) vo=intersect(vo,names(x)) both=intersect(paste(unique(strsplit(vo,".o",fixed=TRUE))),vi) if (length(both) == 0) stop("nothing to plot") n=length(both) rows=floor(sqrt(n)) if (rows==0) rows=1 if (rows*rows == n) cols=rows else cols=rows+1 if (rows*cols < n) rows=rows+1 oldpar=par(mfcol=c(rows,cols)) on.exit(par(oldpar)) if (is.null(pointColor)) pointColor=1 for (imp in both) { obs=paste(imp,"o",sep=".") if ((is.factor(x[,imp])|is.factor(x[,obs]))) { p=try(spineplot(x=x[,imp],y=x[,obs],xlab="Imputed",ylab="Observed", col=spineColor)) if (inherits(p, "try-error")) warning ("no plot could be created for ",imp) } else { if (residual) { #TODO: figure out how to avoid using suppressWarnings and still pass ... when it contains non-graphical arguments suppressWarnings(plot(x=x[,imp],y=x[,obs]-x[,imp],ylab="Residual",xlab="Imputed",col=pointColor,...)) abline(0,0,col=lineColor) } else { suppressWarnings(plot(x=x[,imp],y=x[,obs],xlab="Imputed",ylab="Observed",col=pointColor,...)) abline(0,1,col=lineColor) } } mtext(imp,font=par("font.main"),line=.7) } mtext(sub,outer = TRUE,line=-1.6, cex=1.5) } plot.impute.yai <- plot.yai # function to plot the results of running compare plot.compare.yai=function(x,pointColor=1,lineColor=2,...) { if (!("compare.yai" %in% class(x))) stop("class must include compare.yai") addpoints=function(x,y,...) {points(x,y,col=pointColor,...);abline(0,1,col=lineColor)} pairs(x,lower.panel=addpoints,upper.panel=addpoints, xlim=c(0,max(max(x,na.rm=TRUE),1)),ylim=c(0,max(max(x,na.rm=TRUE),1)),...) }
/scratch/gouwar.j/cran-all/cranData/yaImpute/R/plot.yai.R
predict.yai <- function(object,newdata,...) { if (missing(newdata)) impute.yai(object,...) else { al <- list(...) impute.yai(newtargets(object,newdata,al$k,al$ann),...) } }
/scratch/gouwar.j/cran-all/cranData/yaImpute/R/predict.yai.R
print.yai = function(x,...) { if (missing(x)) stop ("x required.") if (class(x)[1] != "yai") stop("arg class must be yai") cat ("\nCall:\n") print (x$call) if (length(x$obsDropped)== 0) cat ("0 observations dropped\n") else cat (length(x$obsDropped),"observations dropped: ", x$obsDropped[1:min(15,length(x$obsDropped))],"...\n") cat ("method used: ",x$method,"\n") if (is.null(x$cancor)) { cat ("Cancor not run\n") } else { cat ("Cancor report:\n") print(format(data.frame(cor=x$cancor$cor, F=x$ftest$F, Pr.F=x$ftest$pgF, Sig=c(" ","NS")[(x$ftest$pgF>x$pVal)+1]),digits=4)) cat (x$nVec,"vectors used, pVal=",x$pVal,"\n") cat ("cancor$xcoef:\n") print(x$cancor$xcoef[,1:x$nVec]) } if (!is.null(x$projector)) { cat ("Projector:\n") print (x$projector) } if (is.null(x$ccaVegan)) { cat ("CCA not run\n") } else { cat ("CCA analysis:\n") if (!requireNamespace ("vegan")) stop("install vegan and try again") print (x$ccaVegan) } if (is.null(x$ranForest)) { cat ("randomForest not run\n") } else { cat ("randomForest analysis:\n") if (!requireNamespace ("randomForest")) stop("install randomForest and try again") print(yaiRFsummary(x)) } cat (sum(x$yDrop),"y variables dropped ", paste(names(x$yDrop[x$yDrop]),collapse=","),"\n") cat (sum(x$xDrop),"x variables dropped ", paste(names(x$xDrop[x$xDrop]),collapse=","),"\n") if (x$ann & x$method!="randomForest") cat ("Note: ann used\n") else cat ("ann not used\n") if (length(x$neiDstTrgs)==0) cat ("No target neighbors computed.\n") else { nPr=min(10,nrow(x$neiDstTrgs)) part=data.frame(x$neiDstTrgs[1:nPr,,drop=FALSE],x$neiIdsTrgs[1:nPr,,drop=FALSE], stringsAsFactors = FALSE) names(part)=c(colnames(x$neiDstTrgs),colnames(x$neiIdsTrgs)) cat ("First",nPr,"targets:\n") print (part) } if (length(x$neiDstRefs)==0) cat ("No reference neighbors computed.\n") else { nPr=min(10,nrow(x$neiDstRefs)) part=data.frame(x$neiDstRefs[1:nPr,,drop=FALSE],x$neiIdsRefs[1:nPr,,drop=FALSE], stringsAsFactors = FALSE) names(part)=c(colnames(x$neiDstRefs),colnames(x$neiIdsRefs)) cat ("First",nPr,"references:\n") print (part) } if (!is.null(x$biasParameters)) { cat ("Bias correction parameters:\n") cat ("trgVal CI =",x$biasParameters$trgValCI, " curVal =",x$biasParameters$curVal, "\nNumber of passes used =",x$biasParameters$npasses, " of ",x$biasParameters$oldk-1,"possible\n") } } summary.yai = function (object,...) print.yai(object,...)
/scratch/gouwar.j/cran-all/cranData/yaImpute/R/print.yai.R
# Computes the RMSD (root mean square difference) between observed # and imputed observations # RMSD is like RMSE # Arguments: # object is of class yai or yai.impute If the object is yai, impute is called # with observed=TRUE. # vars is a list of variables, if NULL, all those with imputed values are processed. # ... passed to the impute funciton when it is called # scale if true, scale the rmsd by the std dev of the observed. # Value: # A data frame with the rownames as vars and the column as RMSD rmsd.yai <- function (object,vars=NULL,scale=FALSE,...) { if (missing(object)) stop ("object required.") if (class(object)[1] == "yai") object = impute.yai(object,vars=vars,observed=TRUE,...) if (is.null(object)) stop ("no imputations found using this object") nuke = unlist(lapply(object,function (x) all(is.na(x)))) nuke=nuke[nuke] if (length(nuke) > 0) object = object[,-match(names(nuke),names(object)),drop=FALSE] object = na.omit(object) if (is.null(vars)) vars=names(object) vi=paste(unique(strsplit(vars,".o",fixed=TRUE))) vi=intersect(vi,names(object)) notFound=setdiff(vars,names(object)) if (length(notFound)>0) warning ("variables not found or had missing values: ",paste(notFound,collapse=", ")) if (length(vi) == 0) stop("nothing to compute") vo=paste(vi,"o",sep=".") notFound=setdiff(vo,names(object)) if (length(notFound)>0) warning ("variables not found or had missing values: ",paste(notFound,collapse=", ")) vo=intersect(vo,names(object)) both=intersect(paste(unique(strsplit(vo,".o",fixed=TRUE))),vi) if (length(both) == 0) stop("nothing to compute") vo=paste(both,"o",sep=".") rmsd=data.frame(rep(NA,length(vo)),row.names=both) names(rmsd)=if (scale || length(scale)>1) "rmsdS" else "rmsd" usedScale = list() for (i in 1:length(both)) { if (!is.factor(object[,both[i]])) { rmsd[i,1]=sqrt(mean(((object[,both[i]]-object[,vo[i]])^2))) if (scale || length(scale)>1) { div = NULL if (length(scale) > 1) div = scale[[both[i]]] if (is.null(div) || is.na(div)) div=attr(object,"scale")[both[i],"scale"] # in data if (is.null(div) || is.na(div)) div = sd(object[,vo[i]]) #use observed when needed. usedScale[[both[i]]] = div rmsd[i,1] = if (!is.na(div) && div > 0.01) rmsd[i,1]/div else NA } } } if (length(usedScale) > 0) attr(rmsd,"scale") = unlist(usedScale) rmsd } rmsd <- rmsd.yai
/scratch/gouwar.j/cran-all/cranData/yaImpute/R/rmsd.yai.R
unionDataJoin=function(...,warn=TRUE) { # creates a data frame that has the rows defined by a union of all rownames in the # arguments and columns defined by a union of all colnames in the arguments. # a single argument can be a list of data frames or matrices # # when warn is TRUE, columns that occur in more than one source are listed as a warning. args=list(...) if (length(args) == 1 && inherits(args[[1]], "list")) args=args[[1]] for (d in args) { if (!is.data.frame(d) && !is.matrix(d)) stop ("arguments or list members must be matrices or data frames") if (is.matrix(d)) { if (is.null(colnames(d))) stop ("column names are requried within all input matrices") if (is.null(rownames(d))) stop ("row names are requried within all input matrices") if (length(unique(colnames(d))) != length(colnames(d))) stop("column names must be unique within all input matrices") } } rows=NULL cols=NULL haveCol=NULL for (d in args) { rows=union(rows,rownames(d)) haveCol=union(intersect(cols,colnames(d)),haveCol) cols=union(cols,colnames(d)) } if (warn & length(haveCol)>0) warning ("Columns: \"",paste(haveCol,collapse=", "), "\" were defined more than once") all=matrix(data=NA,nrow=length(rows),ncol=length(cols)) all=data.frame(all) rownames(all)=rows colnames(all)=cols factors=rep(FALSE,length(cols)) names(factors)=cols for (d in args) { theCols=colnames(d) if (is.data.frame(d)) { for (var in theCols) { if (is.factor(d[,var])) { factors[var] = TRUE all[rownames(d),var]=levels(d[,var])[d[,var]] } else all[rownames(d),var]=d[,var,drop=FALSE] } } else all[rownames(d),theCols]=d } for (var in colnames(all)) if (factors[var]) all[,var]=as.factor(all[,var]) all }
/scratch/gouwar.j/cran-all/cranData/yaImpute/R/uniondatajoin.R
varSelection <- function (x,y,method="addVars",yaiMethod="msn",imputeMethod="closest", wts=NULL,nboot=20,trace=FALSE, useParallel=if (.Platform$OS.type == "windows") FALSE else TRUE,...) { if (missing(x)) stop ("x must be specified.") if (missing(y)) stop ("y must be specified.") okMethods <- c("addVars","delVars") if (!(method %in% okMethods)) stop("method=\"",method,"\" must be one of: \"", paste0(okMethods,collapse="\", \""),"\"") if (is.null(wts)) wts <- rep(1,ncol(y)) if (useParallel && .Platform$OS.type != "Windows" && requireNamespace ("parallel")) { myapply <- parallel::mclapply } else { if (useParallel) warning("package parallel was not loaded and is not being used") myapply <- lapply } cl <- match.call() bootstrap <- nboot > 0 # load required packages...this is done here so that forked # processes will have the required packages...different logic is needed # to support parallel on windows. if (yaiMethod == "gnn") # (GNN), make sure we have package vegan loaded { if (!requireNamespace ("vegan")) stop("install vegan and try again") } if (yaiMethod == "ica") # (ica), make sure we have package fastICA loaded { if (!requireNamespace ("fastICA")) stop("install fastICA and try again") } if (yaiMethod == "randomForest") # make sure we have package randomForest loaded { if (!requireNamespace ("randomForest")) stop("install randomForest and try again") } if (yaiMethod == "msnPP") # make sure we have package ccaPP loaded { if (!requireNamespace ("ccaPP")) stop("install ccaPP and try again") } if (yaiMethod == "gower") # make sure we have package gower loaded { if (!requireNamespace ("gower")) stop("install gower and try again") } # single variable elimination logic: if (method=="delVars") { allErr <- unlist( myapply(1:max(1,nboot), function (i,x,y,wts,yaiMethod,...) suppressWarnings(grmsd(one=suppressWarnings(yai(x=x,y=y, method=yaiMethod,bootstrap=bootstrap,...)), ancillaryData=y,wts=wts)) ,x,y,wts,yaiMethod,bootstrap,...) ) if (trace) cat ("With all vars, mean grmsd (over boostraps) = ",mean(allErr), "; stddev=",sd(allErr),"; Num cols = ",ncol(x),"\n",sep="") xa <- x selvars <- list(None=allErr) while (ncol(xa) > 1) { err <- list() for (var in 1:ncol(xa)) err[[var]] <- unlist(myapply(1:max(1,nboot), function (i,xa,y,wts,var,yaiMethod,bootstrap,...) suppressWarnings(grmsd(one=suppressWarnings(yai(x=xa[,-var, drop=FALSE],y=y, method=yaiMethod, bootstrap=bootstrap,...)), ancillaryData=y,wts=wts)), xa,y,wts,var,yaiMethod,bootstrap,...) ) names(err) <- names(xa) # drop the variable that creates the least error by not including it. del <- which.min(unlist(lapply(err,mean))) selvars[[names(del)]] <- as.vector(unlist(err[del])) xa <- xa[,-del,drop=FALSE] remVars <- colnames(xa) if (trace) cat ("Delete var= ",names(del), "; mean grmsd (over boostraps) = ",mean(err[[del]]), "; stddev=",sd(err[[del]]),"; Num cols remaining= ",ncol(xa),"\n",sep="") } } else if (method=="addVars") { remVars <- NULL selvars <- list() keep <- NULL while (length(keep) < ncol(x)) { err <- list() for (var in setdiff(names(x),keep)) { xa <- x[,c(keep,var),drop=FALSE] err[[var]] <- unlist(myapply(1:max(1,nboot), function (i,xa,y,wts,yaiMethod,bootstrap,...) suppressWarnings(grmsd(one=suppressWarnings(yai(x=xa,y=y, method=yaiMethod,bootstrap=bootstrap,...)), ancillaryData=y,wts=wts)), xa,y,wts,yaiMethod,bootstrap,...) ) } # keep the variable that reduces the error the most when it is included add <- names(which.min(unlist(lapply(err,mean)))) selvars[[add]] <- as.vector(unlist(err[add])) keep <- c(keep,add) if (trace) cat ("Added var= ",add, "; mean grmsd (over boostraps) = ",mean(err[[add]]), "; stddev=",sd(err[[add]]),"; Num cols being used= ", ncol(xa),"\n",sep="") } } err <- lapply(selvars,function (x) c (mean(x),sd(x))) rn <- names(err) err <- matrix(unlist(err),ncol=2,byrow=TRUE) rownames(err) <- rn colnames(err) <- c("mean","sd") rtn <- list(call=cl,grmsd=err,allgrmsd=selvars,method=method) if (!is.null(remVars)) rtn$remVars <- remVars class(rtn) <- c("varSel","list") rtn } plot.varSel <- function (x,main=NULL,nbest=NULL,arrows=TRUE,...) { if (missing(x)) stop ("x must be present") x = x if (!inherits(x,"varSel")) stop ("class of x must be varSel") if (is.null(main)) main <- switch(x$method, addVars="Mean distance as variables are added", delVars="Mean distance as variables are removed", stop("method '",x$method,"' not found in x")) if (is.null(nbest)) nbest <- length(bestVars(x)) bcc <- rep(gray(.35),length(x$allgrmsd)) if (is.null(x$remVars)) bcc[1:nbest] <- "black" else bcc[(length(bcc)-nbest+2):length(bcc)] <- "black" orgmar <- par()$mar par(mar=c(6,3,2,1)) boxplot(x$allgrmsd,border=bcc,horizontal= FALSE, las=2,main=main,...) lines(x$grmsd[,1] ,x=1:nrow(x$grmsd),col=2) lines(x$grmsd[,1]+x$grmsd[,2],x=1:nrow(x$grmsd),col=3,lty=2) lines(x$grmsd[,1]-x$grmsd[,2],x=1:nrow(x$grmsd),col=3,lty=2) if (is.null(x$remVars)) { if (arrows & nbest>2) { ytop <- par()$usr[4]-par()$cxy[2] arrows(x0=nbest,y0=ytop,x1=1+par()$cxy[1]*.5,y1=ytop,length=par()$cin[2]*.5) text (paste0("Best ",nbest),x=nbest,y=ytop-par()$cxy[2],pos=2) } } else { ytop <- par()$usr[4]-par()$cxy[2] txt <- paste0("Remaining variable", if (length(x$remVars) > 1) "s" else "",": ", paste0(x$remVars,collapse=", ")) text (txt,x=par()$usr[2],y=par()$usr[3]+(par()$cxy[2]*.5),pos=2) x0 <- length(x$allgrmsd)-nbest+2 if (arrows & nbest>2) { arrows(x0=x0,y0=ytop,x1=length(x$allgrmsd)-par()$cxy[1]*.5,y1=ytop, length=par()$cin[2]*.5) text (paste0("Best ",nbest),x=x0,y=ytop-par()$cxy[2],pos=4) } } par(mar=orgmar) invisible(NULL) } bestVars <- function (obj,nbest=NULL) { if (missing(obj)) stop ("obj must be present") if (!inherits(obj,"varSel")) stop ("class of obj must be varSel") grmsd <- switch(obj$method, addVars=obj$grmsd[,1], delVars=rev(obj$grmsd[2:nrow(obj$grmsd),1]), stop("method '",obj$method,"' not found in obj")) if (is.null(nbest)) { le <- length(grmsd) nbest <- if (le > 2) { s <- (grmsd[le]-grmsd[1])/le ss <- unlist(lapply(2:(le-1), function (i,ss) (ss[i-1]+ss[i])/2,diff(grmsd))) sb <- abs(s) > abs(ss) if (any(sb)) min(le,which.max(sb)+1) else le } else le } vars <- if (!is.null(obj$remVars)) c(obj$remVars,names(grmsd)) else names(grmsd) vars[1:min(nbest,length(vars))] }
/scratch/gouwar.j/cran-all/cranData/yaImpute/R/varSelection.R
# vars returns a list of variables used in the problem # xvars and yvars only returns x's and y's xvars = function (object) colnames(object$xRefs) yvars = function (object) colnames(object$yRefs) vars = function (object) list(xvars=colnames(object$xRefs),yvars=colnames(object$yRefs))
/scratch/gouwar.j/cran-all/cranData/yaImpute/R/vars.yai.R
# For each row, finds the column that has the maximum value. Returns # a data frame with two columns: The first is the column name corresponding # to the column of maximum value and the second is the correspond maximum. # The first column is convereted to a factor. # # if the max is zero, the maxCol is identified as "zero". # if there are too many "factors" (too many meaning over "nbig") in column 1, # the lowest maxs are all termed "other". # # Intended use is to transform community ecology data for use in yai # where method is randomForest. whatsMax <- function (x,nbig=30) { num <- vector("logical",ncol(x)) for (i in 1:ncol(x)) num[i] <- is.numeric(x[,i]) if (sum(as.numeric(num))==0) stop ("no numeric columns") ag <- deparse(substitute(x)) orgRows <- nrow(x) x <- na.omit(x) if (nrow(x) != orgRows) warning (orgRows-nrow(x)," rows had missing data and were deleted") n <- colnames(x) ans <- apply(x,1,function (x1) which.max(x1)) nm <- lapply(ans,function (i,n) n[i], colnames(x)) ans <- apply(x,1,function (x1) x1[which.max(x1)]) ans <- as.data.frame(list(a=unlist(nm),unlist(ans)),stringsAsFactors=FALSE) rownames(ans) <- rownames(x) colnames(ans) <- paste(ag,c("maxCol","maxVal"),sep=".") ans[ans[,2]==0,1] <- "zero" nf <- length(levels(as.factor(ans[,1]))) if (nf > nbig) { tops <- names(sort(tapply(ans[,2],as.factor(ans[,1]),sum),decreasing = TRUE)) ans[ans[,1] %in% tops[nbig:length(tops)],1] <- "other" } ans[,1] <- as.factor(ans[,1]) ans[,2] <- as.numeric(ans[,2]) ans }
/scratch/gouwar.j/cran-all/cranData/yaImpute/R/whatsMax.R
yai <- function(x=NULL,y=NULL,data=NULL,k=1,noTrgs=FALSE,noRefs=FALSE, nVec=NULL,pVal=.05,method="msn",ann=TRUE,mtry=NULL,ntree=500, rfMode="buildClasses",bootstrap=FALSE,ppControl=NULL, sampleVars=NULL,rfXsubsets=NULL) { # define functions used internally. sumSqDiff=function(x,y) { d=x-y; sum(d*d) } findFactors = get("findFactors",asNamespace("yaImpute")) ftest.cor = function (p,q,N,cor) { s=min(p,q) if (s==0) stop ("p and q must be > 0") if (length(cor) < s) stop ("cor too short") lamda=array(dim=s) k=1:s for (i in k) lamda[i]=prod(1-cor[i:s]^2) r=(N-s-1)-((abs(p-q)+1)/2) Ndf=(p-k+1)*(q-k+1) u=(Ndf-2)/4 xx=((p-k+1)^2+(q-k+1)^2)-5 t=vector(mode="numeric",length=s) for (i in k) if (xx[i]>0) t[i]=sqrt(((p-k[i]+1)^2*(q-k[i]+1)^2-4)/xx[i]) lamda.invt=lamda^(1/t) Ddf=(r*t)-(2*u) setNA = Ddf < 1 | Ndf < 1 firstNA = which(setNA == TRUE) if (length(firstNA) == 0) firstNA=0 if (length(firstNA) > 1) firstNA=firstNA[1] if (firstNA > 0) setNA[firstNA:length(setNA)] = TRUE F=((1-lamda.invt)/lamda.invt)*(Ddf/Ndf) F[setNA] = NA pgF = F firstNA = if (firstNA > 1) firstNA else length(F) { pgF[1:firstNA]=pf(F[1:firstNA],Ndf[1:firstNA],Ddf[1:firstNA], lower.tail=FALSE) pgF[setNA] = NA } list(F=F,pgF=pgF) } mymean = function(x) { if (is.null(ncol(x))) { ans = if (is.factor(x)) NA else mean(x) } else { ans=as.numeric(rep(NA,ncol(x))) names(ans)=colnames(x) for (i in 1:ncol(x)) if (!is.factor(x[,i])) ans[i]=mean(x[,i]) } ans } mysd = function(x) { if (is.null(ncol(x))) { ans = if (is.factor(x)) NA else sd(x) } else { ans=as.numeric(rep(NA,ncol(x))) names(ans)=colnames(x) for (i in 1:ncol(x)) if (!is.factor(x[,i])) ans[i]=sd(x[,i]) } ans } #=============================================== # ARGUMENT and DATA screening methodSet=c("msn","msn2","msnPP","mahalanobis","ica","euclidean","gnn", "randomForest","raw","random","gower") if (!(method %in% methodSet)) stop (paste("method not one of:",paste(methodSet,collapse =", "))) if (method == "gnn") # (GNN), make sure we have package vegan loaded { if (!requireNamespace ("vegan")) { stop("install vegan and try again") # the purpose of this line of code is to suppress CRAN check notes cca <- rda <- function (...) NULL } else { cca <- vegan::cca rda <- vegan::rda } } if (method == "ica") # (ica), make sure we have package fastICA loaded { if (!requireNamespace ("fastICA")) { stop("install fastica and try again") # the purpose of this line of code is to suppress CRAN check notes fastICA <- function (...) NULL } else { fastICA <- fastICA::fastICA } } if (method == "randomForest") # make sure we have package randomForest loaded { if (!requireNamespace ("randomForest")) { stop("install randomForest and try again") # the purpose of this line of code is to suppress CRAN check notes randomForest <- function (...) NULL } else { randomForest <- randomForest::randomForest } } if (method == "msnPP") # make sure we have package ccaPP loaded { if (!requireNamespace ("ccaPP")) { stop("install ccaPP and try again") # the purpose of this line of code is to suppress CRAN check notes fastMAD <- ccaGrid <- ccaProj <- function (...) NULL } else { fastMAD <- ccaPP::fastMAD ccaGrid <- ccaPP::ccaGrid ccaProj <- ccaPP::ccaProj } } if (method == "gower") # make sure we have package gower loaded { if (!requireNamespace ("gower")) { stop("install gower and try again") # the purpose of this line of code is to suppress CRAN check notes gower_topn <- function (...) NULL } else { gower_topn <- gower::gower_topn } } cl=match.call() obsDropped=NULL theFormula=NULL yall=NULL if (is.data.frame(x) | is.matrix(x)) { if (mode(rownames(x)) != "character") rownames(x)=as.character(rownames(x)) xall=na.omit (as.data.frame(x)) if (nrow(xall) != nrow(x)) { warning (nrow(x)-nrow(xall)," x observation(s) removed") obsDropped=names(attributes(na.omit(x))$na.action) } if (!is.null(y)) { if (is.null(dim(y))) { if (length(y) == nrow (x)) y=data.frame(y,row.names=rownames(x), stringsAsFactors = TRUE) else stop(paste0("when formulas are not used,", " y must be a matrix, dataframe,", " or a vector the same length of rows in x")) } if (is.matrix(y) | is.data.frame(y)) { if (mode(rownames(y)) != "character") rownames(y)=as.character(rownames(y)) yall=na.omit(as.data.frame(y)) if (nrow(yall) != nrow(as.data.frame(y))) { warning (nrow(y)-nrow(yall)," y observation(s) removed") obsDropped=union(obsDropped,names(attributes(na.omit(y))$na.action)) } } theFormula=NULL } } else if (inherits(x,"formula")) { if (inherits(y,"formula")) yall=model.frame(y,data=data) xall=model.frame(x,data=data) obsDropped=setdiff(rownames(data),rownames(xall)) if (length(obsDropped)) warning (length(obsDropped)," observation(s) removed") theFormula=list(x=x,y=y) } else stop ("x is missing or not a matrix or a dataframe") if (is.null(yall) & (method %in% c("mahalanobis","ica", "euclidean","randomForest","raw","gower"))) { ydum=TRUE yall=data.frame(ydummy=rep(1,nrow(xall)),row.names=rownames(xall)) } else ydum=FALSE if (is.null(yall)) stop("y is missing") if (nrow(xall) == 0) stop ("no observations in x") if (! (method %in% c("random","randomForest","gower"))) { fy=0 if (!(method %in% c("mahalanobis","ica","euclidean","raw"))) fy=sum(findFactors(yall)) if (fy+sum(findFactors(xall)>0)>0) stop("factors allowed only for methods randomForest, random, or gower") } refs=intersect(rownames(yall),rownames(xall)) if (length(refs) == 0) stop ("no reference observations.") # if X variable subsets are used, make sure we are using method="randomForest" if (!is.null(rfXsubsets) && method != "randomForest") { warning ("specification of rfXsubsets is ignored when method is not randomForest.") rfXsubsets = NULL } if (!is.null(rfXsubsets)) { vtok = match(unique(unlist(rfXsubsets)),names(xall)) if (any(is.na(vtok))) stop("one or more variables in rfXsubsets are not present in x.") xall = xall[,vtok] } # if sampling variables, set up xRefs and yRefs accordingly if (!is.null(sampleVars)) { if (length(sampleVars) == 1 && is.null(names(sampleVars))) sampleVars=rep(sampleVars,2) names(sampleVars) = if (is.null(names(sampleVars))) c("X","Y") else toupper(names(sampleVars)) nx = match("X",names(sampleVars)) ny = match("Y",names(sampleVars)) nx = if (!is.na(nx)) sampleVars[nx] else 0 ny = if (!is.na(ny)) sampleVars[ny] else 0 if (nx > 0) { nx = if (nx < 1.) max(1, ncol(xall)*nx) else min(nx, ncol(xall)) nxn = sample(1:ncol(xall),nx) xall = xall[,nxn,drop=FALSE] } if (ny > 0) { ny = if (ny < 1.) max(1, ncol(yall)*ny) else min(ny, ncol(yall)) nyn = sample(1:ncol(yall),ny) yall = yall[,nyn,drop=FALSE] } } # if this is a bootstrap run, draw the sample. if (bootstrap) { if (length (grep ("\\.[0-9]$",rownames(xall))) > 0) stop ("rownames must not end with .[0-9] when bootstrap is true.") bootsamp <- sort(sample(x=refs, size=length(refs), replace=TRUE)) yRefs=yall[bootsamp,,drop=FALSE] xRefs=xall[bootsamp,,drop=FALSE] refs = bootsamp } else { yRefs=yall[refs,,drop=FALSE] xRefs=xall[refs,,drop=FALSE] } trgs=setdiff(rownames(xall),refs) if (method == "gnn") # remove rows with zero sums or vegan will error off... { zero = apply(yRefs,1,sum) <= 0 ndrop=sum(zero) if (ndrop>0) { warning (ndrop,paste0(" rows have y-variable row sums <= 0 were ", "converted to target observations for method gnn")) if (ndrop==nrow(yRefs)) stop ("all references were deleted") obsDropped=union(obsDropped,refs[zero]) refs=refs[!zero] yRefs=yall[refs,,drop=FALSE] xRefs=xall[refs,,drop=FALSE] trgs=setdiff(rownames(xall),refs) } # now remove columns with zero sums. yDrop=apply(yRefs,2,sum) <= 0 if (sum(yDrop) > 0) warning ("y variables with zero sums: ", paste(colnames(yRefs)[yDrop],collapse=",")) if (sum(yDrop) == ncol(yRefs)) stop("no y variables") if (sum(yDrop) > 0) yRefs=yRefs[,!yDrop,drop=FALSE] } # initial scale values (maybe reduced by some methods). if (method != "msnPP") { xScale=list(center=mymean(xRefs),scale=mysd(xRefs)) yScale=list(center=mymean(yRefs),scale=mysd(yRefs)) } else { msn3cntr = function (x) { if (is.factor(x)) return (list(NULL,NULL)) cM = fastMAD(x) # uses fastMAD for scaling. if (cM$MAD == 0) { cM$MAD = sd(x) cM$center = mean(x) } cM } ce=matrix(unlist(apply(xRefs,2,msn3cntr)),ncol=2,byrow=TRUE) rownames(ce) = colnames(xRefs) xScale=list(center=ce[,1],scale=ce[,2]) ce=matrix(unlist(apply(yRefs,2,msn3cntr)),ncol=2,byrow=TRUE) rownames(ce) = colnames(yRefs) yScale=list(center=ce[,1],scale=ce[,2]) } # for all methods except randomForest, random, raw, and gower # variables with zero variance are dropped. if (!(method %in% c("randomForest","random","raw","gower"))) { xDrop=xScale$scale < 1e-10 if (sum(xDrop) > 0) warning ("x variables with zero variance: ", paste(colnames(xRefs)[xDrop],collapse=",")) if (sum(xDrop) == ncol(xRefs)) stop("no x variables") if (sum(xDrop) > 0) { xRefs=xRefs[,!xDrop,drop=FALSE] xScale$scale=xScale$scale[!xDrop] xScale$center=xScale$center[!xDrop] } } else xDrop=NULL # for most methods, xRefs must be a matrix. if (! (method %in% c("randomForest","gower")) && !is.matrix(xRefs)) xRefs=as.matrix(xRefs) # define these elements as NULL, some will be redefined below. cancor=NULL ftest=NULL yDrop=NULL projector=NULL ccaVegan=NULL ranForest=NULL xTrgs=NULL xcvRefs=NULL xcvTrgs=NULL ICA=NULL #======= Define projector (if used), scale the variables, and project the # reference space. Also project the target space if it is being used. if (method %in% c("msn","msn2","msnPP")) # msn (all kinds) { yDrop=yScale$scale < 1e-10 if (sum(yDrop) > 0) warning ("y variables with zero variance: ", paste(colnames(yRefs)[yDrop],collapse=",")) if (sum(yDrop) == ncol(yRefs)) stop("no y variables") if (sum(yDrop) > 0) { yRefs=yRefs[,!yDrop,drop=FALSE] yScale$scale=yScale$scale[!yDrop] yScale$center=yScale$center[!yDrop] } xcvRefs=scale(xRefs,center=xScale$center,scale=xScale$scale) ycvRefs=scale(yRefs,center=yScale$center,scale=yScale$scale) if (method %in% c("msn","msn2")) # msn and msn2 { cancor=cancor(xcvRefs,ycvRefs,xcenter = FALSE, ycenter = FALSE) theCols = rownames(cancor$xcoef) # scale the coefficients so that the cononical vectors will have unit variance. cscal = 1/apply(xcvRefs[,theCols,drop=FALSE] %*% cancor$xcoef[,1,drop=FALSE],2,sd) cancor$ycoef = cancor$ycoef * cscal cancor$xcoef = cancor$xcoef * cscal } else { # msnPP meth="spearman" ppfunc=ccaGrid if (!is.null(ppControl)) { if (is.null(names(ppControl))) stop ("ppControl must have named strings.") for (ppn in names(ppControl)) { if (ppn == "method") meth = ppControl[[ppn]] else if (ppn == "search") ppfunc = if (ppControl[[ppn]] == "data" || ppControl[[ppn]] == "proj") ccaProj else ccaGrid else stop ("ppControl named element ",ppn," is invalid") } } # solve the canoncial correlation analysis via projection pursuit cancor=ppfunc(xcvRefs,ycvRefs,method=meth,fallback=TRUE, k=min(ncol(xcvRefs),ncol(ycvRefs)),nVec) # save the results using names and attributes that correspond # to the cancor results cancor$ycoef = cancor$B rownames(cancor$ycoef) = colnames(ycvRefs) cancor$xcoef = cancor$A rownames(cancor$xcoef) = colnames(xcvRefs) theCols = rownames(cancor$xcoef) cancor$A = NULL cancor$B = NULL class(cancor) = "list" } ftest=ftest.cor(p=nrow(cancor$ycoef),q=nrow(cancor$xcoef), N=nrow(yRefs),cancor$cor) if (is.null(nVec)) { fcheck = ftest$pgF[!is.na(ftest$pgF)] if (length(fcheck)> 0) nVec=max(1,length(fcheck)-sum(fcheck>pVal)) } if (is.null(nVec)) nVec=1 nVec=min(nVec,length(cancor$cor)) nVec=max(nVec,1) if (method %in% c("msn","msnPP")) projector = cancor$xcoef[,1:nVec,drop=FALSE] %*% diag(cancor$cor[1:nVec,drop=FALSE],nVec,nVec) if (method == "msn2") { if (any (1/(1-cancor$cor[1:nVec,drop=FALSE]^2) < .Machine$double.eps*10000)) nVec=1 if (any (1/(1-cancor$cor[1:nVec,drop=FALSE]^2) < .Machine$double.eps*10000)) stop("msn2 can not be run likely because there are too few obesrvations.") projector = cancor$xcoef[,1:nVec,drop=FALSE] %*% diag(cancor$cor[1:nVec,drop=FALSE],nVec,nVec) %*% diag(sqrt(1/(1-cancor$cor[1:nVec,drop=FALSE]^2)),nVec,nVec) if (any (projector == -Inf | projector == Inf | is.na(projector) | is.nan(projector))) stop ("msn2 can not be run.") } if (length(theCols)<ncol(xRefs)) { #just get the names and create a logical if (is.null(xDrop)) xDrop=xScale$center==0 remove=setdiff(colnames(xRefs),theCols) xDrop[remove]=TRUE warning ("x variables with colinearity: ",paste(remove,collapse=",")) xRefs=xRefs[,theCols,drop=FALSE] xScale$center=xScale$center[theCols] xScale$scale=xScale$scale[theCols] xcvRefs=scale(xRefs,center=xScale$center,scale=xScale$scale) } xcvRefs=xcvRefs %*% projector if (!noTrgs && length(trgs) > 0) { xTrgs=xall[trgs,theCols,drop=FALSE] xcvTrgs=scale(xTrgs,center=xScale$center,scale=xScale$scale) xcvTrgs=xcvTrgs %*% projector } } else if (method == "mahalanobis") { xcvRefs=scale(xRefs,center=xScale$center,scale=xScale$scale) qr = qr(xcvRefs) # maybe we are not at full rank xcvRefs=xcvRefs[,qr$pivot[1:qr$rank],drop=FALSE] projector = solve(chol(cov(xcvRefs))) theCols = colnames(projector) if (length(theCols)<ncol(xRefs)) { #just get the names and create a logical if (is.null(xDrop)) xDrop=xScale$center==0 remove=setdiff(colnames(xRefs),theCols) xDrop[remove]=TRUE warning ("x variables with colinearity: ",paste(remove,collapse=",")) xRefs=xRefs[,theCols,drop=FALSE] xScale$center=xScale$center[theCols] xScale$scale=xScale$scale[theCols] } nVec = ncol(projector) # same as qr$rank xcvRefs=xcvRefs %*% projector if (!noTrgs && length(trgs) > 0) { xTrgs=xall[trgs,theCols,drop=FALSE] xcvTrgs=scale(xTrgs,center=xScale$center,scale=xScale$scale) xcvTrgs=xcvTrgs %*% projector } } else if (method == "ica") { xcvRefs=scale(xRefs,center=xScale$center,scale=xScale$scale) qr = qr(xcvRefs) # maybe we are not at full rank xcvRefs=xcvRefs[,qr$pivot[1:qr$rank],drop=FALSE] a=fastICA(xcvRefs,ncol(xcvRefs),method="C",) ICA=list(S=a$S,K=a$K,A=a$A,W=a$W) projector = a$K %*% a$W colnames(projector)=colnames(xcvRefs) rownames(projector)=colnames(xcvRefs) theCols = colnames(xcvRefs) if (length(theCols)<ncol(xRefs)) { if (is.null(xDrop)) xDrop=xScale$center==0 remove=setdiff(colnames(xRefs),theCols) xDrop[remove]=TRUE warning ("x variables with colinearity: ",paste(remove,collapse=",")) xRefs=xRefs[,theCols,drop=FALSE] xScale$center=xScale$center[theCols] xScale$scale=xScale$scale[theCols] } nVec = ncol(projector) # same as qr$rank xcvRefs=xcvRefs %*% projector if (!noTrgs && length(trgs) > 0) { xTrgs=xall[trgs,theCols,drop=FALSE] xcvTrgs=scale(xTrgs,center=xScale$center,scale=xScale$scale) xcvTrgs=xcvTrgs %*% projector } } else if (method == "euclidean") { xcvRefs=scale(xRefs,center=xScale$center,scale=xScale$scale) nVec = ncol(xRefs) if (!noTrgs && length(trgs) > 0) { xTrgs=xall[trgs,!xDrop,drop=FALSE] xcvTrgs=scale(xTrgs,center=xScale$center,scale=xScale$scale) } } else if (method %in% c("raw")) { xcvRefs=xRefs nVec = ncol(xRefs) if (!noTrgs && length(trgs) > 0) { xTrgs=xall[trgs,,drop=FALSE] xcvTrgs=as.matrix(xTrgs) } } else if (method %in% c("gower")) { xcvRefs=xRefs nVec = ncol(xRefs) if (!noTrgs && length(trgs) > 0) { xTrgs=xall[trgs,,drop=FALSE] xcvTrgs=xTrgs } ann=FALSE } else if (method == "gnn") # GNN { xcvRefs=scale(xRefs,center=xScale$center,scale=xScale$scale) ccaVegan = cca(X=yRefs, Y=xcvRefs) if (is.null(ccaVegan$CCA) | ccaVegan$CCA$rank == 0) { warning (paste("cca() in package vegan failed, likely cause is", "too few X or Y variables.\nAttemping rda(),", "which is not well tested in the yaImpute package.")) ccaVegan = rda(X=yRefs, Y=xcvRefs) } # create a projected space for the reference observations xcvRefs=predict(ccaVegan,type="lc",rank="full") xcvRefs=xcvRefs %*% diag(sqrt(ccaVegan$CCA$eig/sum(ccaVegan$CCA$eig))) # create a projected space for the unknowns (target observations) if (!noTrgs && length(trgs) > 0) { xTrgs=xall[trgs,,drop=FALSE] xcvTrgs=scale(xTrgs,center=xScale$center,scale=xScale$scale) xcvTrgs=predict(ccaVegan, newdata=as.data.frame(xcvTrgs),type="lc",rank="full") xcvTrgs=xcvTrgs %*% diag(sqrt(ccaVegan$CCA$eig/sum(ccaVegan$CCA$eig))) } nVec = ncol(xcvRefs) } else if (method == "randomForest") { rfBuildClasses=NULL xTrgs=xall[trgs,1,drop=FALSE] rfVersion=packageDescription("randomForest")[["Version"]] if (compareVersion(rfVersion,"4.5-22") < 0) stop("Update your version of randomForest.") if (is.null(ntree)) ntree=500 if (ydum) { if (!is.null(rfXsubsets)) warning(paste0("Specification of rfXsubsets", " ignored when unsupervised randomForest is run.")) yone=NULL mt = if (is.null(mtry)) max(floor(sqrt(ncol(xRefs))),1) else min(mtry, ncol(xRefs)) ranForest=randomForest(x=xRefs,y=yone,proximity=FALSE,importance=TRUE, keep.forest=TRUE,mtry=mt,ntree=ntree) ranForest$type="yaImputeUnsupervised" ranForest=list(unsupervised=ranForest) } else { ranForest=vector("list",ncol(yRefs)) if (length(ntree) < ncol(yRefs)) ntree=rep(max(50, floor(ntree/ncol(yRefs))),ncol(yRefs)) for (i in 1:ncol(yRefs)) { xN = names(xRefs) if (!is.null(rfXsubsets)) { yV = names(yRefs)[i] if (!is.null(rfXsubsets[[yV]])) xN = intersect(rfXsubsets[[yV]],xN) if (length(xN)==0) stop ("rfXsubsets is empty for Y-variable ",yV) } yone=yRefs[,i] if (!is.factor(yone)) { if (is.null(rfBuildClasses) && rfMode=="buildClasses") rfBuildClasses=TRUE if (is.null(rfBuildClasses)) { # if the version is prior to 19 if (compareVersion(rfVersion,"4.5-19") < 0) { warning(paste0("yaImpute directly supports regression for ", "continuous y's for randomForest version 4.5-19 and later.")) rfBuildClasses=TRUE } else rfBuildClasses=FALSE } if (rfBuildClasses) { yone=as.numeric(yone) breaks <- pretty(yone, n = min(20,nclass.Sturges(yone)),min.n = 1) div <- diff(breaks)[1] yone=as.factor(floor(yone/div)) } } mt = if (is.null(mtry)) max(floor(sqrt(length(xN))), 1) else min(mtry, length(xN)) ranForest[[i]]=randomForest(x=xRefs[,xN,FALSE], y=yone,proximity=FALSE,importance=TRUE,keep.forest=TRUE, mtry=mt,ntree=ntree[i]) } names(ranForest)=colnames(yRefs) } nodes=NULL for (i in 1:length(ranForest)) { nodeset=attr(predict(ranForest[[i]],xall, proximity=FALSE,nodes=TRUE),"nodes") if (is.null(nodeset)) stop("randomForest did not return nodes") colnames(nodeset)=paste(colnames(nodeset),i,sep=".") nodes=if (is.null(nodes)) nodeset else cbind(nodes,nodeset) } if (bootstrap) { rn = sub("\\.[0-9]$","",rownames(xRefs)) refNodes = nodes[rn,] rownames(refNodes) = rownames(xRefs) } else refNodes = nodes[rownames(xRefs),] INTrefNodes=as.integer(refNodes) INTnrow=as.integer(nrow(xRefs)) INTncol=as.integer(ncol(nodes)) INTsort = INTrefNodes dim(INTsort) = c(INTnrow,INTncol) INTsort=apply(INTsort,2,function (x) sort(x,index.return = TRUE, decreasing = FALSE)$ix-1) attributes(INTsort)=NULL INTsort = as.integer(INTsort) attr(ranForest,"rfRefNodeSort") = list(INTrefNodes=INTrefNodes, INTnrow=INTnrow, INTncol=INTncol, INTsort=INTsort) } else if (method == "random") { nVec = 1 ann=FALSE xcvRefs=data.frame(random=runif(nrow(xRefs)),row.names=rownames(xRefs)) if (!noTrgs && length(trgs) > 0) xcvTrgs= data.frame(random=runif(length(trgs)),row.names=trgs) } else # default { stop("no code for specified method") } # if bootstrap, then modify the reference list essentually removing the # duplicate samples. For randomForest, correct processing is done above. if (bootstrap && method != "randomForest") { unq = unique(bootsamp) if (!is.null(xcvRefs)) xcvRefs = xcvRefs[unq,,drop=FALSE] xRefs = xRefs[unq,,drop=FALSE] } k=min(k,nrow(xRefs)) # ======= find neighbors for TARGETS if (noTrgs || length(trgs) == 0) { neiDstTrgs=NULL neiIdsTrgs=NULL } else { neiDstTrgs=matrix(data=NA,nrow=length(trgs),ncol=k) rownames(neiDstTrgs)=trgs colnames(neiDstTrgs)=paste("Dst.k",1:k,sep="") neiIdsTrgs=matrix(data="",nrow=length(trgs),ncol=k) rownames(neiIdsTrgs)=trgs colnames(neiIdsTrgs)=paste("Id.k",1:k,sep="") if (method %in% c("msn","msn2","msnPP","mahalanobis", "ica","euclidean","gnn","raw")) { if (ann) { ann.out=ann(xcvRefs, xcvTrgs, k, verbose=FALSE)$knnIndexDist neiDstTrgs[TRUE]=sqrt(ann.out[,(k+1):ncol(ann.out)]) for (i in 1:k) neiIdsTrgs[,i]=rownames(xcvRefs)[ann.out[,i]] rownames(neiDstTrgs)=rownames(neiIdsTrgs) } else { for (row in rownames(xcvTrgs)) { d=sqrt(sort(apply(xcvRefs,MARGIN=1,sumSqDiff,xcvTrgs[row,]))[1:k]) neiDstTrgs[row,]=d neiIdsTrgs[row,]=names(d) } } } else if (method == "random") { l=k+1 d = matrix(unlist(lapply(xcvTrgs[[1]],function (x, xcv, l) { sort((xcv-x)^2,index.return=TRUE)$ix[2:l] },xcvRefs[[1]],l)),nrow=nrow(xcvTrgs),ncol=k,byrow=TRUE) for (ic in 1:ncol(d)) { neiDstTrgs[,ic]=abs(xcvTrgs[,1]-xcvRefs[d[,ic],1]) neiIdsTrgs[,ic]=rownames(xcvRefs)[d[,ic]] } } else if (method == "gower") { gow = gower_topn(x=xcvRefs,y=xcvTrgs,n=k) for (i in 1:k) { neiDstTrgs[,i]=gow$distance[i,] neiIdsTrgs[,i]=rownames(xcvTrgs)[gow$index[i,]] } } else if (method == "randomForest") { prox=lapply(apply(nodes[rownames(xTrgs),,drop=FALSE],1,as.list),function (x) { prx=.Call("rfoneprox", INTrefNodes, INTsort, INTnrow, INTncol, as.integer(x), vector("integer",INTnrow)) if (k > 1) px=sort(prx,index.return = TRUE, decreasing = TRUE)$ix[1:k] else px=which.max(prx) c(prx[px],px) # counts followed by pointers to references }) for (i in 1:k) { neiDstTrgs[,i]=unlist(lapply(prox,function (x,i) (INTncol-x[i])/INTncol,i)) neiIdsTrgs[,i]=unlist(lapply(prox,function (x,i,k,Rnames) Rnames[x[k+i]],i,k,rownames(xRefs))) } } else # default { stop("no code for specified method") } } # ======= find neighbors for REFERENCES if (noRefs) { neiDstRefs=NULL neiIdsRefs=NULL } else { neiDstRefs=matrix(data=NA,nrow=nrow(xRefs),ncol=k) rownames(neiDstRefs)=rownames(xRefs) colnames(neiDstRefs)=paste("Dst.k",1:k,sep="") neiIdsRefs=matrix(data="",nrow=nrow(xRefs),ncol=k) rownames(neiIdsRefs)=rownames(xRefs) colnames(neiIdsRefs)=paste("Id.k",1:k,sep="") l=k+1 if (method %in% c("msn","msn2","msnPP","mahalanobis","ica","euclidean", "gnn","raw")) { if (ann & nrow(xcvRefs)> 0) { ann.out=ann(xcvRefs, xcvRefs, l, verbose=FALSE)$knnIndexDist neiDstRefs[TRUE]=sqrt(ann.out[,(l+2):ncol(ann.out)]) # check for a second neighbor being the reference itself (can happen # if the first and second neighbors both have distance of zero). fix = ann.out[,1] != 1:nrow(ann.out) if (any(fix)) ann.out[fix,2] = ann.out[fix,1] for (i in 2:l) { neiIdsRefs[,(i-1)]=rownames(xcvRefs)[ann.out[,i]] } rownames(neiDstRefs)=rownames(neiIdsRefs) } else { for (row in 1:nrow(xcvRefs)) { d=sort(apply(xcvRefs,MARGIN=1,sumSqDiff,xcvRefs[row,])[-row])[1:k] neiDstRefs[row,]=d neiIdsRefs[row,]=names(d) } } } else if (method == "gower") { gow = gower_topn(x=xcvRefs,y=xcvRefs,n=l) for (i in 2:l) { neiDstRefs[,(i-1)]=gow$distance[i,] neiIdsRefs[,(i-1)]=rownames(xcvRefs)[gow$index[i,]] } } else if (method == "randomForest") { prox=lapply(apply(refNodes,1,as.list),function (x) { prx=.Call("rfoneprox", INTrefNodes, INTsort, INTnrow, INTncol, as.integer(x), vector("integer",INTnrow)) if (k > 1) px=sort(prx,index.return = TRUE, decreasing = TRUE)$ix[2:l] else { px=which.max(prx) prx[px]=-1 px=which.max(prx) } c(prx[px],px) # counts followed by pointers to references }) for (i in 1:k) { neiDstRefs[,i]=unlist(lapply(prox,function (x,i) (INTncol-x[i])/INTncol,i)) neiIdsRefs[,i]=unlist(lapply(prox,function (x,i,k,Rnames) Rnames[x[k+i]],i,k,rownames(xRefs))) } } else if (method == "random") { l=k+1 d = matrix(unlist(lapply(xcvRefs[[1]],function (x, xcv, l) { sort((xcv-x)^2,index.return=TRUE)$ix[2:l] },xcvRefs[[1]],l)),nrow=nrow(xcvRefs),ncol=k,byrow=TRUE) for (ic in 1:ncol(d)) { neiDstRefs[,ic]=abs(xcvRefs[,1]-xcvRefs[d[,ic],1]) neiIdsRefs[,ic]=rownames(xcvRefs)[d[,ic]] } } else # default { stop("no code for specified method") } } xlevels=NULL fa=findFactors(xRefs) if (sum(fa)>0) { xlevels=vector(mode="list",length=sum(fa)) kk=0 for (i in 1:length(fa)) { if (fa[i]) { kk=kk+1 xlevels[[kk]]=levels(xRefs[,i]) names(xlevels)[[kk]]=names(xRefs)[i] } } } out=list(call=cl,yRefs=yRefs,xRefs=xRefs,obsDropped=obsDropped,yDrop=yDrop, bootstrap= if (bootstrap) bootsamp else bootstrap, xDrop=xDrop,trgRows=trgs,xall=xall,cancor=cancor,theFormula=theFormula, ftest=ftest,yScale=yScale,xScale=xScale,ccaVegan=ccaVegan,ranForest=ranForest, ICA=ICA,k=k,projector=projector,nVec=nVec,pVal=pVal,method=method,ann=ann, xlevels=xlevels,neiDstTrgs=neiDstTrgs,neiIdsTrgs=neiIdsTrgs, neiDstRefs=neiDstRefs,neiIdsRefs=neiIdsRefs,rfXsubsets=rfXsubsets) class(out)="yai" out }
/scratch/gouwar.j/cran-all/cranData/yaImpute/R/yai.R
# Creates a summary various aspects of several randomForests (one for each variable) # when method randomForest is used. yaiRFsummary = function(object, nTop=0) { if(!inherits(object, "yai")) stop ("arg must be of class yai") if (object$method != "randomForest") stop ("method must be randomForest") if (!requireNamespace("randomForest")) stop("install randomForest and try again") scaledImportance = yaiVarImp(object, nTop, plot=FALSE) error = vector(mode="numeric",length=length(names(object$ranForest))) errtag = vector(mode="character",length=length(names(object$ranForest))) levels = vector(mode="integer",length=length(names(object$ranForest))) ntree = vector(mode="integer",length=length(names(object$ranForest))) type = vector(mode="character",length=length(names(object$ranForest))) i = 0 for (Rf in object$ranForest) { i = i+1 type[i] = Rf$type if(Rf$type == "regression") { error [i] = round(100*Rf$rsq[length(Rf$rsq)], digits=2) errtag[i] = "%var explained" levels[i] = NA } else if(Rf$type == "classification") { error [i] = Rf$err.rate[Rf$ntree,"OOB"] errtag[i] = "OOB error rate" levels[i] = nrow(Rf$confusion) } else { error [i] = NA errtag[i] = "N/A" levels[i] = NA } ntree [i] = Rf$ntree } forestAttributes=data.frame(ntree,error,errtag,levels,type) rownames(forestAttributes)=names(object$ranForest) list(forestAttributes=forestAttributes,scaledImportance=scaledImportance) }
/scratch/gouwar.j/cran-all/cranData/yaImpute/R/yaiRFsummary.R
# Creates a summary of the variable importance scores over several randomForests (one # for each variable) when method randomForest is used. These values are then scaled. yaiVarImp = function(object, nTop=20, plot=TRUE, ...) { if(!inherits(object, "yai")) stop ("arg must be of class yai") if (object$method != "randomForest") stop ("method must be randomForest") if (!requireNamespace ("randomForest")) { stop("install randomForest and try again") # the purpose of this line of code is to suppress CRAN check notes importance <- function (...) NULL } else importance <- randomForest::importance scaledImportance = matrix(NA, nrow = length(names(object$ranForest)), ncol=length(xvars(object))) colnames(scaledImportance) = xvars(object) rownames(scaledImportance) = names(object$ranForest) i = 0 for (Rf in object$ranForest) { i = i+1 one = importance(Rf) scale = FALSE attr = if (Rf$type == "regression") "%IncMSE" else "MeanDecreaseAccuracy" if (nrow(one)>1) scale = sd(one[,attr])>0 imports = scale(one[,attr],center=TRUE,scale=scale) scaledImportance[i,rownames(imports)] = imports } if (is.na(nTop) | nTop == 0) nTop=ncol(scaledImportance) scaledImportance = data.frame(scaledImportance) nTop = min(ncol(scaledImportance), nTop) best = sort(apply(scaledImportance, 2, median, na.rm=TRUE), decreasing = TRUE, index.return = TRUE)$ix[1:nTop] if (plot) { plt = par()$plt oldplt = plt plt[1] = .2 boxplot(as.data.frame(scaledImportance[,best,drop=FALSE]), horizontal=TRUE, par(plt=plt), las=1, main=deparse(substitute(object)), xlab="Scaled Importance",...) par(plt=oldplt) invisible(scaledImportance[,best,FALSE]) } else return(scaledImportance[,best,FALSE]) }
/scratch/gouwar.j/cran-all/cranData/yaImpute/R/yaiVarImp.R
`F.test.cca` <- function(x, ...) { #Extract basic object information if(!("cca"%in%class(x))) stop("Object of class cca required.\n") s <- length(x$corr) p <- length(x$xlab) q <- length(x$ylab) N <- NROW(x$canvarx) k <- 1:s #Compute statistic and df lambda <- sapply(k,function(i){prod(1-x$corr[i:s]^2)}) r <- (N-s-1)-((abs(p-q)+1)/2) Ndf <- (p-k+1)*(q-k+1) u <- (Ndf-2)/4 xx <- ((p-k+1)^2+(q-k+1)^2)-5 t <- sqrt(((p-k+1)^2*(q-k+1)^2-4)/xx) ilambda <- lambda^(1/t) Ddf <- (r*t)-(2*u) Fstat <- ((1-ilambda)/ilambda)*(Ddf/Ndf) pgF <- pf(Fstat,Ndf,Ddf,lower.tail=FALSE) #Assemble and return the results out <- list() out$corr <- x$corr out$statistic <- Fstat out$parameter <- cbind(Ndf,Ddf) colnames(out$parameter) <- c("num df", "denom df") out$p.value <- pgF out$method <- "F test for significance of canonical correlations" out$data.name <- names(x$corr) class(out)<-c("F.test.cca","htest") out }
/scratch/gouwar.j/cran-all/cranData/yacca/R/F.test.cca.R
`cca` <- function(x, y, xlab=colnames(x), ylab=colnames(y), xcenter=TRUE, ycenter=TRUE, xscale=FALSE, yscale=FALSE, standardize.scores=TRUE, use="complete.obs", na.rm=TRUE, use.eigs=FALSE, max.dim=Inf, reg.param=NULL){ #Perform the preliminaries if(is.data.frame(x)) #Some routines require matrices... x<-as.matrix(x) if(is.data.frame(y)) y<-as.matrix(y) #Center the data matrices, if needed if(xcenter||xscale) x<-scale(x,scale=xscale,center=xcenter) if(ycenter||yscale) y<-scale(y,scale=yscale,center=ycenter) if(is.null(dim(x))) #Make sure we have matrices x<-matrix(x,ncol=1) if(is.null(dim(y))) y<-matrix(y,ncol=1) #Find out how large these data matrices are nx<-dim(x)[2] ny<-dim(y)[2] ncv<-min(nx,ny,max.dim) cvlab<-paste("CV",1:ncv) o<-list() #Get covariance matrices cxx<-cov(x,use=use) cyy<-cov(y,use=use) cxy<-cov(x,y,use=use) cyx<-t(cxy) #Regularize, if desired if(!is.null(reg.param)){ reg.param<-rep(reg.param,length=2) cxx<-cxx+diag(nx)*reg.param[1] cyy<-cyy+diag(ny)*reg.param[2] } #Find the projections if(!use.eigs){ #More stable, but slower ey<-eigen(solve(cyy,cyx)%*%solve(cxx,cxy)) }else{ #Less stable, but possibly faster ey<-RSpectra::eigs(solve(cyy,cyx)%*%solve(cxx,cxy),k=ncv,which="LM") } #Note: we use Re to filter out tiny complex values that can arise #due to numerical noise ey$values<-Re(ey$values) ey$vectors<-Re(ey$vectors) #Debug info - uncomment to trace the solution #print(solve(cyy,cyx)%*%solve(cxx,cxy)) #print(ey$val) #print(eigen(solve(cyy,cyx)%*%solve(cxx,cxy))$val) ex<-list(values=Re(ey$values),vectors=Re(solve(cxx,cxy)%*%(ey$vec))) o$corr<-(ex$val[1:ncv])^0.5 names(o$corr)<-cvlab o$corrsq<-o$corr^2 #Get the variance accounted for by each canonical variate names(o$corrsq)<-cvlab o$xcoef<-ex$vec[,1:ncv,drop=FALSE] rownames(o$xcoef)<-xlab colnames(o$xcoef)<-cvlab o$ycoef<-ey$vec[,1:ncv,drop=FALSE] rownames(o$ycoef)<-ylab colnames(o$ycoef)<-cvlab #Find the canonical variates (using the coefficients) and compute structural correlation information o$canvarx<-x%*%o$xcoef #Construct the canonical variates rownames(o$canvarx)<-rownames(x) colnames(o$canvarx)<-cvlab o$canvary<-y%*%o$ycoef rownames(o$canvary)<-rownames(y) colnames(o$canvary)<-cvlab if(standardize.scores){ #If needed, standardize the scores/coefs sdx<-apply(o$canvarx,2,sd) sdy<-apply(o$canvary,2,sd) o$canvarx<-sweep(o$canvarx,2,sdx,"/") o$canvary<-sweep(o$canvary,2,sdy,"/") o$xcoef<-sweep(o$xcoef,2,sdx,"/") o$ycoef<-sweep(o$ycoef,2,sdy,"/") } o$xstructcorr<-cor(x,o$canvarx,use=use) #Find structural correlations rownames(o$xstructcorr)<-xlab colnames(o$xstructcorr)<-cvlab o$ystructcorr<-cor(y,o$canvary,use=use) rownames(o$ystructcorr)<-ylab colnames(o$ystructcorr)<-cvlab o$xstructcorrsq<-o$xstructcorr^2 #Find variance explained by structural correlations rownames(o$xstructcorrsq)<-xlab colnames(o$xstructcorrsq)<-cvlab o$ystructcorrsq<-o$ystructcorr^2 rownames(o$ystructcorrsq)<-ylab colnames(o$ystructcorrsq)<-cvlab o$xcrosscorr<-cor(x,o$canvary,use=use) #Find cross-correlations rownames(o$xcrosscorr)<-xlab colnames(o$xcrosscorr)<-cvlab o$ycrosscorr<-cor(y,o$canvarx,use=use) rownames(o$ycrosscorr)<-ylab colnames(o$ycrosscorr)<-cvlab o$xcrosscorrsq<-o$xcrosscorr^2 #Find variance exp. by cross-correlations rownames(o$xcrosscorrsq)<-xlab colnames(o$xcrosscorrsq)<-cvlab o$ycrosscorrsq<-o$ycrosscorr^2 rownames(o$ycrosscorrsq)<-ylab colnames(o$ycrosscorrsq)<-cvlab o$xcancom<-apply(o$xstructcorrsq,1,sum,na.rm=na.rm) #Find the canonical communalities (total var explained) names(o$xcancom)<-xlab o$ycancom<-apply(o$ystructcorrsq,1,sum,na.rm=na.rm) names(o$ycancom)<-ylab o$xcanvad<-apply(o$xstructcorrsq,2,mean,na.rm=na.rm) #Find the canonical variate adequacies names(o$xcanvad)<-cvlab o$ycanvad<-apply(o$ystructcorrsq,2,mean,na.rm=na.rm) names(o$ycanvad)<-cvlab o$xvrd<-o$xcanvad*o$corrsq #Find the redundancy indices (Rd) for X|Y and Y|X names(o$xvrd)<-cvlab o$yvrd<-o$ycanvad*o$corrsq names(o$yvrd)<-cvlab o$xrd<-sum(o$xvrd,na.rm=na.rm) o$yrd<-sum(o$yvrd,na.rm=na.rm) bartvbase<--(NROW(x)-1-(nx+ny+1)/2) #Bartlett's chisq stats o$chisq<-bartvbase*(sum(log(1-o$corr^2))-c(0,cumsum(log(1-o$corr^2))[-ncv])) o$df<-(nx+1-(1:ncv))*(ny+1-(1:ncv)) names(o$chisq)<-cvlab names(o$df)<-cvlab o$xlab<-xlab #Save labels, just in case o$ylab<-ylab o$reg.param<-reg.param #print(eigen(solve(cxx)%*%cxy%*%solve(cyy)%*%cyx)) #Return the results class(o)<-"cca" o }
/scratch/gouwar.j/cran-all/cranData/yacca/R/cca.R
`helio.plot` <- function(c,cv=1,xvlab=c$xlab,yvlab=c$ylab,x.name="X Variables",y.name="Y Variables",lab.cex=1,wid.fact=0.75,main="Helio Plot",sub=paste("Canonical Variate",cv,sep=""),zero.rad=30,range.rad=20,name.padding=5,name.cex=1.5,axis.circ=c(-1,1),x.group=rep(0,dim(c$xstructcorr)[1]),y.group=rep(0,dim(c$ystructcorr)[1]),type="correlation"){ #First, open up a new window plot.new() plot.window(c(-100,100),c(-100,100)) #Assume, for convenience, a +-100 world #Set the appropriate data, depending on whether this is a correlation or a variance plot if(type=="correlation"){ xdat<-c$xstructcorr ydat<-c$ystructcorr }else if(type=="variance"){ xdat<-c$xstructcorrsq ydat<-c$ystructcorrsq }else stop(paste("Plot type ",type," not supported.\n",sep="")) #Set radii for inner, middle, outer, and name circles ir<-zero.rad-range.rad mr<-zero.rad or<-zero.rad+range.rad nr<-zero.rad+range.rad+name.padding #Next, put the dividing line and any axis circles in place lines(c(0,0),c(-90,90)) lines(mr*sin(2*pi*((0:100)/100)),mr*cos(2*pi*((0:100)/100)),lty=1) if(!is.null(axis.circ)) for(i in 1:length(axis.circ)) lines((mr+range.rad*axis.circ[i])*sin(2*pi*((0:100)/100)),(mr+range.rad*axis.circ[i])*cos(2*pi*((0:100)/100)),lty=3) #Label the two halves of the circle text(-50,95,label=x.name,cex=name.cex) text(50,95,label=y.name,cex=name.cex) #Label the ranges #text(rep(0,6),c(-45,-25,-5,5,25,45),label=c(1,0,-1,-1,0,1),pos=c(rep(2,3),rep(4,3)),cex=0.85,offset=0.1) #Get the number of variables in each set nx<-dim(xdat)[1] ny<-dim(ydat)[1] #Place rectangles and names for the x (left) variables for(i in 1:nx){ #First, place rectangles if(xdat[i,cv]>0) #Set fill color - black if positive, unfilled if negative bcol<-1 else bcol<-NA bang<-(-pi/(nx+1))*i #Determine the angle binc<-pi/(max(nx,ny)+1)*wid.fact/2 bwinc<-ir*sin(binc) #Determine the box width increment (based on binc at inner circle) bx<-vector() bx[1]<-mr*sin(bang)-bwinc*cos(-bang) bx[2]<-(mr+range.rad*xdat[i,cv])*sin(bang)-bwinc*cos(-bang) bx[3]<-(mr+range.rad*xdat[i,cv])*sin(bang)+bwinc*cos(-bang) bx[4]<-mr*sin(bang)+bwinc*cos(-bang) by<-vector() by[1]<-mr*cos(bang)-bwinc*sin(-bang) by[2]<-(mr+range.rad*xdat[i,cv])*cos(bang)-bwinc*sin(-bang) by[3]<-(mr+range.rad*xdat[i,cv])*cos(bang)+bwinc*sin(-bang) by[4]<-mr*cos(bang)+bwinc*sin(-bang) polygon(bx,by,col=bcol,lty=1) #Draw the box #Next, place names text(nr*sin(bang),nr*cos(bang),label=xvlab[i],srt=(3*pi/2-bang)*(360/(2*pi)),pos=2,cex=lab.cex) } #Place rectangles and names for the y (right) variables for(i in 1:ny){ #First, place rectangles if(ydat[i,cv]>0) #Set fill color - black if positive, unfilled if negative bcol<-1 else bcol<-NA bang<-(pi/(ny+1))*i #Determine the angle binc<-pi/(max(nx,ny)+1)*wid.fact/2 bwinc<-ir*sin(binc) #Determine the box width increment (based on binc at inner circle) bx<-vector() bx[1]<-mr*sin(bang)-bwinc*cos(-bang) bx[2]<-(mr+range.rad*ydat[i,cv])*sin(bang)-bwinc*cos(-bang) bx[3]<-(mr+range.rad*ydat[i,cv])*sin(bang)+bwinc*cos(-bang) bx[4]<-mr*sin(bang)+bwinc*cos(-bang) by<-vector() by[1]<-mr*cos(bang)-bwinc*sin(-bang) by[2]<-(mr+range.rad*ydat[i,cv])*cos(bang)-bwinc*sin(-bang) by[3]<-(mr+range.rad*ydat[i,cv])*cos(bang)+bwinc*sin(-bang) by[4]<-mr*cos(bang)+bwinc*sin(-bang) polygon(bx,by,col=bcol,lty=1) #Draw the box #Next, place names text(nr*sin(bang),nr*cos(bang),label=yvlab[i],srt=(pi/2-bang)*(360/(2*pi)),pos=4,cex=lab.cex) } #Perform grouping for the X variables, if needed. 0 means ungrouped, numbers above it are grouped. if((!is.null(x.group))&(max(x.group)>0)){ for(i in unique(x.group)) if(i>0){ #Find first and last occurrence (they'd damn well better be sorted!) gvect<-(x.group%in%i)*(1:length(x.group)) gvect<-gvect[gvect>0] minang<-min(gvect)*(-pi/(nx+1)) maxang<-max(gvect)*(-pi/(nx+1)) #Add a nice looking grouping thingee lines(((or+nr)/2)*sin((((0:100)/100)*(maxang-minang)+minang)),((or+nr)/2)*cos((((0:100)/100)*(maxang-minang)+minang)),lty=1) lines(c(((or+nr)/2)*sin(minang),nr*sin(minang)),c(((or+nr)/2)*cos(minang),nr*cos(minang)),lty=1) lines(c(((or+nr)/2)*sin(maxang),nr*sin(maxang)),c(((or+nr)/2)*cos(maxang),nr*cos(maxang)),lty=1) } } #Perform grouping for the Y variables, if needed. 0 means ungrouped, numbers above it are grouped. if((!is.null(y.group))&(max(y.group)>0)){ for(i in unique(y.group)) if(i>0){ #Find first and last occurrence (they'd damn well better be sorted!) gvect<-(y.group%in%i)*(1:length(y.group)) gvect<-gvect[gvect>0] minang<-min(gvect)*(pi/(ny+1)) maxang<-max(gvect)*(pi/(ny+1)) #Add a nice looking grouping thingee lines(((or+nr)/2)*sin((((0:100)/100)*(maxang-minang)+minang)),((or+nr)/2)*cos((((0:100)/100)*(maxang-minang)+minang)),lty=1) lines(c(((or+nr)/2)*sin(minang),nr*sin(minang)),c(((or+nr)/2)*cos(minang),nr*cos(minang)),lty=1) lines(c(((or+nr)/2)*sin(maxang),nr*sin(maxang)),c(((or+nr)/2)*cos(maxang),nr*cos(maxang)),lty=1) } } #Add title, if one is listed title(main=main,sub=sub) }
/scratch/gouwar.j/cran-all/cranData/yacca/R/helio.plot.R
`plot.cca` <- function(x,...){ #Set the plotting parameters ncv<-length(x$corr) oldpar<-par(no.readonly=TRUE) on.exit(par(oldpar)) par(mfrow=c(ceiling(sqrt(ncv)),ceiling(sqrt(ncv)))) #Plot the data on each canonical variate for(i in 1:ncv){ plot(x$canvarx[,i],x$canvary[,i],xlab="X",ylab="Y",main=paste("Canonical Variate Plot - Variate",i,sep=" "),...) abline(mean(x$canvary[,i],na.rm=TRUE)-x$corr[i]*mean(x$canvarx[,i],na.rm=TRUE),x$corr[i]) text(mean(x$canvarx[,i],na.rm=TRUE),mean(x$canvary[,i],na.rm=TRUE),label=paste("r=",round(x$corr[i],digits=2),sep=""),pos=1,srt=180/pi*atan(x$corr[i])) } #Redundancy plots par(mfrow=c(1,1),ask=TRUE) h<-cbind(x$xvrd,x$yvrd) #Get the redundancy information h<-rbind(c(x$xrd,x$yrd),h) colnames(h)<-c("X Given Y","Y Given X") rownames(h)<-c("Total",paste("CV",1:ncv)) barplot(h,beside=TRUE,main="Canonical Variate Redundancy Plot",ylim=c(0,1),ylab="Fraction of Variance Explained",legend.text=rownames(h),col=rainbow(ncv+1)) #Plot the loadings on each canonical variate using the helio plot par(mfrow=c(ceiling(sqrt(ncv)),ceiling(sqrt(ncv)))) for(i in 1:ncv){ helio.plot(x,cv=i,main=paste("Structural Correlations for CV",i)) } #Now repeat for variances deposited par(mfrow=c(ceiling(sqrt(ncv)),ceiling(sqrt(ncv)))) for(i in 1:ncv){ helio.plot(x,cv=i,main=paste("Explained Variance for CV",i),type="variance",axis.circ=c(0.5,1),range.rad=25) } }
/scratch/gouwar.j/cran-all/cranData/yacca/R/plot.cca.R
`print.F.test.cca` <- function(x, ...) { cat("\n\tF Test for Canonical Correlations (Rao's F Approximation)\n\n") ctab <- cbind(x$corr, x$statistic, x$parameter, x$p.value) colnames(ctab) <- c("Corr","F","Num df","Den df","Pr(>F)") rownames(ctab) <- names(x$corr) printCoefmat(ctab) cat("\n") }
/scratch/gouwar.j/cran-all/cranData/yacca/R/print.F.test.cca.R
`print.cca` <- function(x, ...){ cat("\nCanonical Correlation Analysis\n\n") #Show the correlations cat("Canonical Correlations:\n") print(x$corr) #Show the coefficients cat("\nX Coefficients:\n") print(x$xcoef) cat("\nY Coefficients:\n") print(x$ycoef) #Structural correlations cat("\nStructural Correlations (Loadings) - X Vars:\n") print(x$xstructcorr) cat("\nStructural Correlations (Loadings) - Y Vars:\n") print(x$ystructcorr) cat("\nAggregate Redundancy Coefficients (Total Variance Explained):\n") cat("\tX | Y:",x$xrd,"\n") cat("\tY | X:",x$yrd,"\n") cat("\n") }
/scratch/gouwar.j/cran-all/cranData/yacca/R/print.cca.R
`print.summary.cca` <- function(x, ...){ cat("\nCanonical Correlation Analysis - Summary\n\n") #Show the correlations cat("\nCanonical Correlations:\n\n") print(x$corr) cat("\nShared Variance on Each Canonical Variate:\n\n") print(x$corrsq) #Bartlett's chisq test tab<-cbind(x$corrsq,x$chisq,x$df,1-pchisq(x$chisq,df=x$df)) colnames(tab)<-c("rho^2","Chisq","df","Pr(>X)") rownames(tab)<-paste("CV",1:length(x$corr)) cat("\nBartlett's Chi-Squared Test:\n\n") printCoefmat(tab) #Show the coefficients cat("\n\nCanonical Variate Coefficients:\n") cat("\n\tX Vars:\n") print(x$xcoef) cat("\n\tY Vars:\n") print(x$ycoef) #Structural correlations cat("\n\nStructural Correlations (Loadings):\n") cat("\n\tX Vars:\n") print(x$xstructcorr) cat("\n\tY Vars:\n") print(x$ystructcorr) cat("\n\nFractional Variance Deposition on Canonical Variates:\n") cat("\n\tX Vars:\n") print(x$xstructcorrsq) cat("\n\tY Vars:\n") print(x$ystructcorrsq) #Redundancy measures cat("\n\nCanonical Communalities (Fraction of Total Variance\nExplained for Each Variable, Within Sets):\n") cat("\n\tX Vars:\n") print(x$xcancom) cat("\n\tY Vars:\n") print(x$ycancom) cat("\n\nCanonical Variate Adequacies (Fraction of Total Variance\nExplained by Each CV, Within Sets):\n",fill=TRUE) cat("\n\tX Vars:\n") print(x$xcanvad) cat("\n\tY Vars:\n") print(x$ycanvad) cat("\n\nRedundancy Coefficients (Fraction of Total Variance\nExplained by Each CV, Across Sets):\n",fill=TRUE) cat("\n\tX | Y:\n") print(x$xvrd) cat("\n\tY | X:\n") print(x$yvrd) cat("\n\nAggregate Redundancy Coefficients (Total Variance\nExplained by All CVs, Across Sets):\n",fill=TRUE) cat("\tX | Y:",x$xrd,"\n") cat("\tY | X:",x$yrd,"\n") cat("\n") }
/scratch/gouwar.j/cran-all/cranData/yacca/R/print.summary.cca.R
`summary.cca` <- function(object, ...){ s<-object class(s)<-"summary.cca" s }
/scratch/gouwar.j/cran-all/cranData/yacca/R/summary.cca.R
###################################################################### # # zzz.R # # copyright (c) 2018, Carter T. Butts <[email protected]> # Last Modified 2/25/18 # Licensed under the GNU General Public License version 3 # # Part of the R/yacca package; based on the zzz.R files from sna # and other statnet packages (all hail). # # .onAttach is run when the package is loaded with library(yacca) # ###################################################################### .onAttach <- function(libname, pkgname){ temp<-packageDescription("yacca") msg<-paste(temp$Package,": ",temp$Title,"\n", "Version ",temp$Version, " created on ", temp$Date,".\n", sep="") msg<-paste(msg,"copyright (c) 2008, Carter T. Butts, University of California-Irvine\n",sep="") msg<-paste(msg,'For citation information, type citation("yacca").\n') msg<-paste(msg,'Type help("yacca-package") to get started.\n') packageStartupMessage(msg) }
/scratch/gouwar.j/cran-all/cranData/yacca/R/zzz.R
#' Generate a list of index for the n-fold cross-validation #' #' The function \code{folds} generates a list of index for the n-fold cross-validation #' #' @param idx A vector of index list #' @param n The number of n folds #' @param seed The seed value to generate random n-fold index #' #' @return A list of n-fold index #' #' @examples #' folds(seq(10), n = 3, seed = 2020) folds <- function(idx, n, seed = 1) { g <- with(set.seed(seed), sample(idx, length(idx))) %% n + 1 r <- split(idx, g) names(r) <- paste('Fold', seq(n), sep = '') return(r) }
/scratch/gouwar.j/cran-all/cranData/yager/R/folds.R
#' Generate random numbers of latin hypercube sampling #' #' The function \code{gen_latin} generates a vector of random numbers by latin hypercube sampling #' #' @param min The minimum value of random numbers #' @param max The maxinum value of random numbers #' @param n The number of random numbers to gernate #' @param seed The seed value of random number generation #' #' @return A vector of random numbers bounded by the min and max #' #' @examples #' gen_latin(0, 1, 10, 2020) gen_latin <- function(min = 0, max = 1, n, seed = 1) { set.seed(seed) return(round(min + (max - min) * c(lhs::randomLHS(n, k = 1)), 8)) }
/scratch/gouwar.j/cran-all/cranData/yager/R/gen_latin.R
#' Generate sobol sequence #' #' The function \code{gen_sobol} generates a vector of scrambled sobol sequence #' #' @param min The minimum value of random numbers #' @param max The maxinum value of random numbers #' @param n The number of random numbers to gernate #' @param seed The seed value of random number generation #' #' @return A vector of sobol sequence bounded by the min and max #' #' @examples #' gen_sobol(0, 1, 10, 2020) gen_sobol <- function(min = 0, max = 1, n, seed = 1) { return(round(min + (max - min) * randtoolbox::sobol(n, dim = 1, scrambling = 3, seed = seed), 8)) }
/scratch/gouwar.j/cran-all/cranData/yager/R/gen_sobol.R
#' Generate Uniform random numbers #' #' The function \code{gen_unifm} generates a vector of uniform random numbers #' #' @param min The minimum value of random numbers #' @param max The maxinum value of random numbers #' @param n The number of random numbers to gernate #' @param seed The seed value of random number generation #' #' @return A vector of uniform random numbers bounded by the min and max #' #' @examples #' gen_unifm(0, 1, 10, 2020) gen_unifm <- function(min = 0, max = 1, n, seed = 1) { set.seed(seed) return(round(min + (max - min) * runif(n), 8)) }
/scratch/gouwar.j/cran-all/cranData/yager/R/gen_unifm.R
#' Create a general regression neural network #' #' The function \code{grnn.fit} creates a general regression neural network (GRNN) #' #' @param x The matrix of predictors #' @param y The vector of response variable #' @param w The vector of weights with default = 1 for each record #' @param sigma The scalar of smoothing parameter #' #' @return A general regression neural network object #' #' @references #' Donald Specht. (1991). A General Regression Neural Network. #' #' @examples #' data(iris, package = "datasets") #' Y <- ifelse(iris[, 5] == "setosa", 1, 0) #' X <- scale(iris[, 1:4]) #' gnet <- grnn.fit(x = X, y = Y) grnn.fit <- function(x, y, sigma = 1, w = rep(1, length(y))) { ### CHECK X MATRIX ### if (is.matrix(x) == F) stop("x needs to be a matrix.", call. = F) if (anyNA(x) == T) stop("NA found in x.", call. = F) ### CHECK Y VECTOR ### if (is.vector(y) == F) stop("y needs to be a vector.", call. = F) if (anyNA(y) == T) stop("NA found in y.", call. = F) if (length(y) != nrow(x)) stop("x and y need to share the same length.", call. = F) ### CHECK W VECTOR ### if (is.vector(w) == F) stop("w needs to be a vector.", call. = F) if (anyNA(w) == T) stop("NA found in w.", call. = F) if (length(w) != nrow(x)) stop("x and w need to share the same length.", call. = F) ### CHECK SIGMA ### if (sigma <= 0) stop("sigma needs to be positive", call. = F) gn <- structure(list(), class = "General Regression Neural Net") gn$x <- x gn$y <- y gn$w <- w gn$sigma <- sigma return(gn) }
/scratch/gouwar.j/cran-all/cranData/yager/R/grnn.fit.R
#' Derive the importance rank of all predictors used in the GRNN #' #' The function \code{grnn.imp} derives the importance rank of all predictors used in the GRNN #' It essentially is a wrapper around the function \code{grnn.x_imp}. #' #' @param net The GRNN object generated by grnn.fit() #' @param class TRUE or FALSE, whether it is for the classification or not #' #' @return A dataframe with important values of all predictors in the GRNN #' #' @seealso \code{\link{grnn.x_imp}} #' #' @examples #' data(iris, package = "datasets") #' Y <- ifelse(iris[, 5] == "setosa", 1, 0) #' X <- scale(iris[, 1:3]) #' gnet <- grnn.fit(x = X, y = Y) #' \dontrun{ #' grnn.imp(net = gnet, class = TRUE) #' } grnn.imp <- function(net, class = FALSE) { if (class(net) != "General Regression Neural Net") stop("net needs to be a GRNN.", call. = F) if (!(class %in% c(TRUE, FALSE))) stop("the class input is not correct.", call. = F) cls <- parallel::makeCluster(min(ncol(net$x), parallel::detectCores() - 1), type = "PSOCK") obj <- c("net", "class", "grnn.fit", "grnn.predone", "grnn.predict", "grnn.x_imp") parallel::clusterExport(cls, obj, envir = environment()) rst1 <- data.frame(idx = seq(ncol(net$x)), Reduce(rbind, parallel::parLapply(cls, seq(ncol(net$x)), function(i) grnn.x_imp(net, i, class = class)))) parallel::stopCluster(cls) rst2 <- rst1[with(rst1, order(-imp1, -imp2)), ] row.names(rst2) <- NULL return(rst2) }
/scratch/gouwar.j/cran-all/cranData/yager/R/grnn.imp.R
#' Derive the marginal effect of a predictor used in a GRNN #' #' The function \code{grnn.margin} derives the marginal effect of a predictor used in a GRNN #' by assuming mean values for the rest predictors #' #' @param net The GRNN object generated by grnn.fit() #' @param i The ith predictor in the GRNN #' @param plot TRUE or FALSE to plot the marginal effect #' #' @return A plot of the marginal effect or a dataframe of the marginal effect #' #' @seealso \code{\link{grnn.partial}} #' #' @examples #' data(iris, package = "datasets") #' Y <- ifelse(iris[, 5] == "setosa", 1, 0) #' X <- scale(iris[, 1:4]) #' gnet <- grnn.fit(x = X, y = Y) #' grnn.margin(gnet, 1, plot = FALSE) grnn.margin <- function(net, i, plot = TRUE) { if (class(net) != "General Regression Neural Net") stop("net needs to be a GRNN.", call. = F) if (i > ncol(net$x)) stop("the selected variable is out of bound.", call. = F) if (!(plot %in% c(T, F))) stop("the plot input is not correct.", call. = F) xname <- colnames(net$x)[i] n <- length(unique(net$x[, i])) x <- matrix(rep(colMeans(net$x), n), nrow = n, byrow = T) x[, i] <- sort(unique(net$x[, i])) rst <- data.frame(x = x[, i], p = grnn.predict(net, x)) if (plot == TRUE) { plot(rst[, 1], rst[, 2], type = "b", lty = 4, lwd = 3, ylab = '', xlab = xname, main = "Marginal Effect", pch = 16, cex = 1.5, col = "red", cex.main = 1, cex.lab = 1, yaxt = 'n') rug(rst[, 1], col = 'green4', ticksize = 0.03, lwd = 3) } else { return(rst) } }
/scratch/gouwar.j/cran-all/cranData/yager/R/grnn.margin.R
#' Optimize the optimal value of GRNN smoothing parameter based on AUC #' #' The function \code{grnn.optmiz_auc} optimize the optimal value of GRNN smoothing parameter by cross-validation. #' It is applicable to the classification. #' #' @param net A GRNN object generated by grnn.fit() #' @param lower A scalar for the lower bound of the smoothing parameter #' @param upper A scalar for the upper bound of the smoothing parameter #' @param nfolds A scalar for the number of n-fold, 4 by default #' @param seed The seed value for the n-fold cross-validation, 1 by default #' @param method A scalar referring to the optimization method, 1 for Golden section searc and 2 for Brent’s method #' #' @return The best outcome #' #' @seealso \code{\link{grnn.search_auc}} #' #' @examples #' data(iris, package = "datasets") #' Y <- ifelse(iris[, 5] == "setosa", 1, 0) #' X <- scale(iris[, 1:4]) #' gnet <- grnn.fit(x = X, y = Y) #' \dontrun{ #' grnn.optmiz_auc(net = gnet, lower = 3, upper = 7, nfolds = 2) #' } grnn.optmiz_auc <- function(net, lower = 0, upper, nfolds = 4, seed = 1, method = 1) { if (class(net) != "General Regression Neural Net") stop("net needs to be a GRNN.", call. = F) if (!(method %in% c(1, 2))) stop("the method is not supported.", call. = F) fd <- folds(seq(nrow(net$x)), n = nfolds, seed = seed) cv <- function(s) { cls <- parallel::makeCluster(min(nfolds, parallel::detectCores() - 1), type = "PSOCK") obj <- c("fd", "net", "grnn.fit", "grnn.predone", "grnn.predict") parallel::clusterExport(cls, obj, envir = environment()) rs <- Reduce(rbind, parallel::parLapply(cls, fd, function(f) data.frame(ya = net$y[f], yp = grnn.predict(grnn.fit(net$x[-f, ], net$y[-f], sigma = s), net$x[f, ])))) parallel::stopCluster(cls) return(MLmetrics::AUC(y_pred = rs$yp, y_true = rs$ya)) } if (method == 1) { rst <- optimize(f = cv, interval = c(lower, upper), maximum = T) } else if (method == 2) { rst <- optim(par = mean(lower, upper), fn = cv, lower = lower, upper = upper, method = "Brent", control = list(fnscale = -1)) } return(data.frame(sigma = rst[[1]], auc = rst[[2]])) }
/scratch/gouwar.j/cran-all/cranData/yager/R/grnn.optmiz_auc.R
#' Calculate predicted values of GRNN by using parallelism #' #' The function \code{grnn.parpred} calculates a vector of GRNN predicted values based on an input matrix #' #' @param net The GRNN object generated by grnn.fit() #' @param x The matrix of input predictors #' #' @return A vector of predicted values #' #' @seealso \code{\link{grnn.predict}} #' #' @examples #' data(iris, package = "datasets") #' Y <- ifelse(iris[, 5] == "setosa", 1, 0) #' X <- scale(iris[, 1:4]) #' gnet <- grnn.fit(x = X, y = Y) #' grnn.parpred(gnet, X[seq(5), ]) grnn.parpred <- function(net, x) { if (class(net) != "General Regression Neural Net") stop("net needs to be a GRNN.", call. = F) if (is.matrix(x) == F) stop("x needs to be a matrix.", call. = F) if (anyNA(x) == T) stop("NA found in x.", call. = F) if (ncol(x) != ncol(net$x)) stop("x dimension is not consistent with grnn.", call. = F) cls <- parallel::makeCluster(min(floor(nrow(x) / 3), parallel::detectCores() - 1), type = "PSOCK") obj <- c("net", "x", "grnn.predone", "grnn.predict") parallel::clusterExport(cls, obj, envir = environment()) spx <- parallel::parLapplyLB(cls, parallel::clusterSplit(cls, seq(nrow(x))), function(c_) x[c_, ]) rst <- parallel::parLapplyLB(cls, spx, function(x_) grnn.predict(net, x_)) parallel::stopCluster(cls) return(Reduce(c, rst)) }
/scratch/gouwar.j/cran-all/cranData/yager/R/grnn.parpred.R
#' Derive the partial effect of a predictor used in a GRNN #' #' The function \code{grnn.partial} derives the partial effect of a predictor used in a GRNN #' by average-out values of the rest predictors. #' #' @param net The GRNN object generated by grnn.fit() #' @param i The ith predictor in the GRNN #' @param plot TRUE or FALSE to plot the partial effect #' #' @return A plot of the partial effect or a dataframe of the partial effect #' #' @seealso \code{\link{grnn.margin}} #' #' @examples #' data(iris, package = "datasets") #' Y <- ifelse(iris[, 5] == "setosa", 1, 0) #' X <- scale(iris[, 1:4]) #' gnet <- grnn.fit(x = X, y = Y) #' \dontrun{ #' grnn.partial(gnet, 1, plot = FALSE) #' } grnn.partial <- function(net, i, plot = TRUE) { if (class(net) != "General Regression Neural Net") stop("net needs to be a GRNN.", call. = F) if (i > ncol(net$x)) stop("the selected variable is out of bound.", call. = F) if (!(plot %in% c(T, F))) stop("the plot input is not correct.", call. = F) xname <- colnames(net$x)[i] xi <- sort(unique(net$x[, i])) partial <- function(x_i) { x <- net$x x[, i] <- rep(x_i, length(net$y)) return(data.frame(x = x_i, p = mean(grnn.predict(net, x)))) } cls <- parallel::makeCluster(min(length(xi), parallel::detectCores() - 1), type = "PSOCK") obj <- c("net", "grnn.fit", "grnn.predone", "grnn.predict") parallel::clusterExport(cls, obj, envir = environment()) rst <- Reduce(rbind, parallel::parLapply(cls, xi, partial)) parallel::stopCluster(cls) if (plot == T) { plot(rst[, 1], rst[, 2], type = "b", lty = 4, lwd = 3, ylab = '', xlab = xname, main = "Partial Dependence", pch = 16, cex = 1.5, col = "royalblue", cex.main = 1, cex.lab = 1, yaxt = 'n') rug(rst[, 1], col = 'green4', ticksize = 0.03, lwd = 3) } else { return(rst) } }
/scratch/gouwar.j/cran-all/cranData/yager/R/grnn.partial.R
#' Derive the PFI rank of all predictors used in the GRNN #' #' The function \code{grnn.pfi} derives the PFI rank of all predictors used in the GRNN #' It essentially is a wrapper around the function \code{grnn.x_pfi}. #' #' @param net The GRNN object generated by grnn.fit() #' @param class TRUE or FALSE, whether it is for the classification or not #' @param ntry The number of random permutations to try, 1e3 times by default #' @param seed The seed value for the random permutation #' #' @return A dataframe with PFI values of all predictors in the GRNN #' #' @seealso \code{\link{grnn.x_pfi}} #' #' @examples #' data(iris, package = "datasets") #' Y <- ifelse(iris[, 5] == "setosa", 1, 0) #' X <- scale(iris[, 1:3]) #' gnet <- grnn.fit(x = X, y = Y) #' \dontrun{ #' grnn.pfi(net = gnet, class = TRUE) #' } grnn.pfi <- function(net, class = FALSE, ntry = 1e3, seed = 1) { if (class(net) != "General Regression Neural Net") stop("net needs to be a GRNN.", call. = F) if (!(class %in% c(TRUE, FALSE))) stop("the class input is not correct.", call. = F) cls <- parallel::makeCluster(min(ncol(net$x), parallel::detectCores() - 1), type = "PSOCK") obj <- c("net", "class", "grnn.fit", "grnn.predone", "grnn.predict", "grnn.x_pfi", "ntry", "seed") parallel::clusterExport(cls, obj, envir = environment()) rst1 <- data.frame(idx = seq(ncol(net$x)), Reduce(rbind, parallel::parLapply(cls, seq(ncol(net$x)), function(i) grnn.x_pfi(net, i, class = class, ntry = ntry, seed = seed)))) parallel::stopCluster(cls) rst2 <- rst1[with(rst1, order(-pfi)), ] row.names(rst2) <- NULL return(rst2) }
/scratch/gouwar.j/cran-all/cranData/yager/R/grnn.pfi.R
#' Calculate predicted values of GRNN #' #' The function \code{grnn.predict} calculates a vector of GRNN predicted values based on an input matrix #' #' @param net The GRNN object generated by grnn.fit() #' @param x The matrix of input predictors #' #' @return A vector of predicted values #' #' @seealso \code{\link{grnn.predone}} #' #' @examples #' data(iris, package = "datasets") #' Y <- ifelse(iris[, 5] == "setosa", 1, 0) #' X <- scale(iris[, 1:4]) #' gnet <- grnn.fit(x = X, y = Y) #' grnn.predict(gnet, X[seq(5), ]) grnn.predict <- function(net, x) { if (class(net) != "General Regression Neural Net") stop("net needs to be a GRNN object.", call. = F) if (is.matrix(x) == F) stop("x needs to be a matrix.", call. = F) if (anyNA(x) == T) stop("NA found in x.", call. = F) if (ncol(x) != ncol(net$x)) stop("x dimension is not consistent with grnn.", call. = F) return(Reduce(c, lapply(split(x, seq(nrow(x))), function(x_) grnn.predone(net, x_)))) }
/scratch/gouwar.j/cran-all/cranData/yager/R/grnn.predict.R
#' Calculate a predicted value of GRNN #' #' The function \code{grnn.predone} calculates a predicted value of GRNN based on an input vector #' #' @param net The GRNN object generated by grnn.fit() #' @param x The vector of input predictors #' @param type A scalar, 1 for euclidean distance and 2 for manhattan distance #' #' @return A scalar of the predicted value #' #' @references #' Donald Specht. (1991). A General Regression Neural Network. #' #' @seealso \code{\link{grnn.fit}} #' #' @examples #' data(iris, package = "datasets") #' Y <- ifelse(iris[, 5] == "setosa", 1, 0) #' X <- scale(iris[, 1:4]) #' gnet <- grnn.fit(x = X, y = Y) #' for (i in seq(5)) print(grnn.predone(gnet, X[i, ])) grnn.predone <- function(net, x, type = 1) { if (class(net) != "General Regression Neural Net") stop("net needs to be a GRNN object.", call. = F) ### CHECK INPUT X VECTOR ### if (is.vector(x) == F) stop("x needs to be a vector.", call. = F) if (anyNA(x) == T) stop("NA found in x.", call. = F) if (length(x) != ncol(net$x)) stop("x dimension is not consistent with grnn.", call. = F) ### CHECK INPUT TYPE (CURRENTLY SUPPORTING 1 / 2) ### if (!(type %in% c(1, 2))) stop("the type is not supported.", call. = F) # xl <- split(net$x, seq(nrow(net$x))) xl <- matrix(rep(x, length(net$y)), nrow = length(net$y), byrow = TRUE) if (type == 1) { ### EUCLIDEAN DISTANCE BY DEFAULT ### num <- sum(net$w * net$y * exp(-(rowSums((xl - net$x) ^ 2)) / (2 * (net$sigma ^ 2)))) den <- sum(net$w * exp(-(rowSums((xl - net$x) ^ 2)) / (2 * (net$sigma ^ 2)))) # num <- sum(net$w * net$y * exp(-Reduce(c, lapply(xl, function(xi) sum((x - xi) ^ 2))) / (2 * (net$sigma ^ 2)))) # den <- sum(net$w * exp(-Reduce(c, lapply(xl, function(xi) sum((x - xi) ^ 2))) / (2 * (net$sigma ^ 2)))) } else if (type == 2) { ### MANHATTAN DISTANCE ### num <- sum(net$w * net$y * exp(-(rowSums(abs(xl - net$x))) / net$sigma)) den <- sum(net$w * exp(-(rowSums(abs(xl - net$x))) / net$sigma)) # num <- sum(net$w * net$y * exp(-Reduce(c, lapply(xl, function(xi) sum(abs(x - xi)))) / net$sigma)) # den <- sum(net$w * exp(-Reduce(c, lapply(xl, function(xi) sum(abs(x - xi)))) / net$sigma)) } return(num / den) }
/scratch/gouwar.j/cran-all/cranData/yager/R/grnn.predone.R
#' Calculate a predicted value of GRNN #' #' The function \code{grnn.predone} calculates a predicted value of GRNN based on an input vector #' #' @param net The GRNN object generated by grnn.fit() #' @param x The vector of input predictors #' @param type A scalar, 1 for euclidean distance and 2 for manhattan distance #' #' @return A scalar of the predicted value #' #' @references #' Donald Specht. (1991). A General Regression Neural Network. #' #' @seealso \code{\link{grnn.fit}} #' #' @examples #' data(iris, package = "datasets") #' Y <- ifelse(iris[, 5] == "setosa", 1, 0) #' X <- scale(iris[, 1:4]) #' gnet <- grnn.fit(x = X, y = Y) #' for (i in seq(5)) print(grnn.predone(gnet, X[i, ])) grnn.predone <- function(net, x, type = 1) { if (class(net) != "General Regression Neural Net") stop("net needs to be a GRNN object.", call. = F) ### CHECK INPUT X VECTOR ### if (is.vector(x) == F) stop("x needs to be a vector.", call. = F) if (anyNA(x) == T) stop("NA found in x.", call. = F) if (length(x) != ncol(net$x)) stop("x dimension is not consistent with grnn.", call. = F) ### CHECK INPUT TYPE (CURRENTLY SUPPORTING 1 / 2) ### if (!(type %in% c(1, 2))) stop("the type is not supported.", call. = F) xl <- split(net$x, seq(nrow(net$x))) if (type == 1) { ### EUCLIDEAN DISTANCE BY DEFAULT ### num <- sum(net$w * net$y * exp(-Reduce(c, lapply(xl, function(xi) sum((x - xi) ^ 2))) / (2 * (net$sigma ^ 2)))) den <- sum(net$w * exp(-Reduce(c, lapply(xl, function(xi) sum((x - xi) ^ 2))) / (2 * (net$sigma ^ 2)))) } else if (type == 2) { ### MANHATTAN DISTANCE ### num <- sum(net$w * net$y * exp(-Reduce(c, lapply(xl, function(xi) sum(abs(x - xi)))) / net$sigma)) den <- sum(net$w * exp(-Reduce(c, lapply(xl, function(xi) sum(abs(x - xi)))) / net$sigma)) } return(num / den) }
/scratch/gouwar.j/cran-all/cranData/yager/R/grnn.predonev1.R
#' Search for the optimal value of GRNN smoothing parameter based on AUC #' #' The function \code{grnn.search_auc} searches for the optimal value of GRNN smoothing parameter by cross-validation. #' It is applicable to the classification. #' #' @param net A GRNN object generated by grnn.fit() #' @param sigmas A numeric vector to search for the best smoothing parameter #' @param nfolds A scalar for the number of n-fold, 4 by default #' @param seed The seed value for the n-fold cross-validation, 1 by default #' #' @return The list of all searching outcomes and the best outcome #' #' @examples #' data(iris, package = "datasets") #' Y <- ifelse(iris[, 5] == "setosa", 1, 0) #' X <- scale(iris[, 1:4]) #' gnet <- grnn.fit(x = X, y = Y) #' grnn.search_auc(net = gnet, sigmas = c(3, 5, 7), nfolds = 2) grnn.search_auc <- function(net, sigmas, nfolds = 4, seed = 1) { if (class(net) != "General Regression Neural Net") stop("net needs to be a GRNN.", call. = F) if (is.vector(sigmas) != T) stop("sigmas needs to be a vector.", call. = F) fd <- folds(seq(nrow(net$x)), n = nfolds, seed = seed) cv <- function(s) { rs <- Reduce(rbind, lapply(fd, function(f) data.frame(ya = net$y[f], yp = grnn.predict(grnn.fit(net$x[-f, ], net$y[-f], sigma = s), net$x[f, ])))) return(data.frame(sigma = s, auc = MLmetrics::AUC(y_pred = rs$yp, y_true = rs$ya))) } cls <- parallel::makeCluster(min(nfolds, parallel::detectCores() - 1), type = "PSOCK") obj <- c("fd", "net", "grnn.fit", "grnn.predone", "grnn.predict") parallel::clusterExport(cls, obj, envir = environment()) rst <- Reduce(rbind, parallel::parLapply(cls, sigmas, cv)) parallel::stopCluster(cls) return(list(test = rst, best = rst[rst$auc == max(rst$auc), ])) }
/scratch/gouwar.j/cran-all/cranData/yager/R/grnn.search_auc.R
#' Search for the optimal value of GRNN smoothing parameter based on r-square #' #' The function \code{grnn.search_rsq} searches for the optimal value of GRNN smoothing parameter by cross-validation. #' It is applicable to the functional approximation #' #' @param net A GRNN object generated by grnn.fit() #' @param sigmas A numeric vector to search for the best smoothing parameter #' @param nfolds A scalar for the number of n-fold, 4 by default #' @param seed The seed value for the n-fold cross-validation, 1 by default #' #' @return The list of all searching outcomes and the best outcome #' #' @examples #' data(iris, package = "datasets") #' Y <- ifelse(iris[, 5] == "setosa", 1, 0) #' X <- scale(iris[, 1:4]) #' gnet <- grnn.fit(x = X, y = Y) #' grnn.search_rsq(net = gnet, sigmas = seq(3), nfolds = 2) grnn.search_rsq <- function(net, sigmas, nfolds = 4, seed = 1) { if (class(net) != "General Regression Neural Net") stop("net needs to be a GRNN.", call. = F) if (is.vector(sigmas) != T) stop("sigmas needs to be a vector.", call. = F) fd <- folds(seq(nrow(net$x)), n = nfolds, seed = seed) cv <- function(s) { rs <- Reduce(rbind, lapply(fd, function(f) data.frame(ya = net$y[f], yp = grnn.predict(grnn.fit(net$x[-f, ], net$y[-f], sigma = s), net$x[f, ])))) return(data.frame(sigma = s, r2 = MLmetrics::R2_Score(y_pred = rs$yp, y_true = rs$ya))) } cls <- parallel::makeCluster(min(nfolds, parallel::detectCores() - 1), type = "PSOCK") obj <- c("fd", "net", "grnn.fit", "grnn.predone", "grnn.predict") parallel::clusterExport(cls, obj, envir = environment()) rst <- Reduce(rbind, parallel::parLapply(cls, sigmas, cv)) parallel::stopCluster(cls) return(list(test = rst, best = rst[rst$r2 == max(rst$r2), ])) }
/scratch/gouwar.j/cran-all/cranData/yager/R/grnn.search_rsq.R
#' Derive the importance of a predictor used in the GRNN #' #' The function \code{grnn.x_imp} derives the importance of a predictor used in the GRNN #' by using the loss of predictability after eliminating the impact of the predictor in interest. #' #' @param net The GRNN object generated by grnn.fit() #' @param i The ith predictor in the GRNN #' @param class TRUE or FALSE, whether it is for the classification or not #' #' @return A vector with the variable name and two values of importance measurements, namely "imp1" and "imp2". #' The "imp1" measures the loss of predictability after replacing all values of the predictor with its mean. #' The "imp2" measures the loss of predictability after dropping the predictor from the GRNN. #' #' @seealso \code{\link{grnn.x_pfi}} #' #' @examples #' data(iris, package = "datasets") #' Y <- ifelse(iris[, 5] == "setosa", 1, 0) #' X <- scale(iris[, 1:4]) #' gnet <- grnn.fit(x = X, y = Y) #' grnn.x_imp(net = gnet, 1) grnn.x_imp <- function(net, i, class = FALSE) { if (class(net) != "General Regression Neural Net") stop("net needs to be a GRNN.", call. = F) if (i > ncol(net$x)) stop("the selected variable is out of bound.", call. = F) if (!(class %in% c(TRUE, FALSE))) stop("the class input is not correct.", call. = F) xname <- colnames(net$x)[i] x <- net$x x[, i] <- rep(mean(net$x[, i]), length(net$y)) if (class == TRUE) { auc0 <- MLmetrics::AUC(grnn.predict(net, net$x), net$y) auc1 <- MLmetrics::AUC(grnn.predict(net, x), net$y) auc2 <- MLmetrics::AUC(grnn.predict(grnn.fit(x = x[, -i], y = net$y, sigma = net$sigma), x[, -i]), net$y) imp1 <- round(max(0, 1 - auc1 / auc0), 8) imp2 <- round(max(0, 1 - auc2 / auc0), 8) } else { rsq0 <- MLmetrics::R2_Score(grnn.predict(net, net$x), net$y) rsq1 <- MLmetrics::R2_Score(grnn.predict(net, x), net$y) rsq2 <- MLmetrics::R2_Score(grnn.predict(grnn.fit(x = x[, -i], y = net$y, sigma = net$sigma), x[, -i]), net$y) imp1 <- round(max(0, 1 - rsq1 / rsq0), 8) imp2 <- round(max(0, 1 - rsq2 / rsq0), 8) } return(data.frame(var = xname, imp1 = imp1, imp2 = imp2)) }
/scratch/gouwar.j/cran-all/cranData/yager/R/grnn.x_imp.R
#' Derive the permutation feature importance of a predictor used in the GRNN #' #' The function \code{grnn.x_pfi} derives the permutation feature importance (PFI) of a predictor used in the GRNN #' #' @param net The GRNN object generated by grnn.fit() #' @param i The ith predictor in the GRNN #' @param class TRUE or FALSE, whether it is for the classification or not #' @param ntry The number of random permutations to try, 1e3 times by default #' @param seed The seed value for the random permutation #' #' @return A vector with the variable name and the PFI value. #' #' @seealso \code{\link{grnn.x_imp}} #' #' @examples #' data(iris, package = "datasets") #' Y <- ifelse(iris[, 5] == "setosa", 1, 0) #' X <- scale(iris[, 1:4]) #' gnet <- grnn.fit(x = X, y = Y) #' grnn.x_pfi(net = gnet, 1) grnn.x_pfi <- function(net, i, class = FALSE, ntry = 1e3, seed = 1) { if (class(net) != "General Regression Neural Net") stop("net needs to be a GRNN.", call. = F) if (!(class %in% c(TRUE, FALSE))) stop("the class input is not correct.", call. = F) xname <- colnames(net$x)[i] set.seed(seed) seeds <- floor(runif(ntry) * 1e8) ol <- lapply(seeds, function(s) with(set.seed(s), sample(seq(nrow(net$x)), nrow(net$x), replace = F))) cl <- Reduce(c, lapply(ol, function(o) abs(cor(seq(nrow(net$x)), o)))) x <- net$x x[, i] <- net$x[ol[[which(cl == min(cl))]], i] if (class == TRUE) { auc0 <- MLmetrics::AUC(grnn.predict(net, net$x), net$y) auc1 <- MLmetrics::AUC(grnn.predict(net, x), net$y) pfi <- round(max(0, 1 - auc1 / auc0), 8) } else { rsq0 <- MLmetrics::R2_Score(grnn.predict(net, net$x), net$y) rsq1 <- MLmetrics::R2_Score(grnn.predict(net, x), net$y) pfi <- round(max(0, 1 - rsq1 / rsq0), 8) } return(data.frame(var = xname, pfi = pfi)) }
/scratch/gouwar.j/cran-all/cranData/yager/R/grnn.x_pfi.R
#' R6 Class Representing a Ticker #' #' @description #' Base class for getting all data related to indices from Yahoo Finance API. #' #' @param index Index for which data has to be retrieved. #' #' @docType class #' @format An R6 class object #' @name Index-class #' #' @export Index <- R6::R6Class( "Index", public = list( #' @field index Index for which data is retrieved index = NULL, #' @description #' Create a new Index object #' @param index Index #' @examples #' nifty_50 <- Index$new('^NSEI') #' @return A new `Index` object initialize = function(index = NA) { if (validate(index)) { self$index <- index } else { message("Not a valid index.") return(invisible(NULL)) } }, #' @description #' Set a new index. #' @param index New index #' @examples #' indice <- Index$new('^NSEI') #' indice$set_index('^NDX') set_index = function(index) { if (validate(index)) { self$index <- index } else { message("Not a valid index.") return(invisible(NULL)) } }, #' @description #' Retrieves historical data #' @param period Length of time. Defaults to \code{'ytd'}. Valid values are: #' \itemize{ #' \item \code{'1d'} #' \item \code{'5d'} #' \item \code{'1mo'} #' \item \code{'3mo'} #' \item \code{'6mo'} #' \item \code{'1y'} #' \item \code{'2y'} #' \item \code{'5y'} #' \item \code{'10y'} #' \item \code{'ytd'} #' \item \code{'max'} #' } #' @param interval Time between data points. Defaults to \code{'1d'}. Valid values are: #' \itemize{ #' \item \code{'1m'} #' \item \code{'2m'} #' \item \code{'5m'} #' \item \code{'15m'} #' \item \code{'30m'} #' \item \code{'60m'} #' \item \code{'90m'} #' \item \code{'1h'} #' \item \code{'1d'} #' \item \code{'5d'} #' \item \code{'1wk'} #' \item \code{'1mo'} #' \item \code{'3mo'} #' } #' @param start Specific starting date. \code{String} or \code{date} object in \code{yyyy-mm-dd} format. #' @param end Specific ending date. \code{String} or \code{date} object in \code{yyyy-mm-dd} format. #' @return A \code{data.frame}. #' @examples #' \donttest{ #' nifty <- Index$new('^NSEI') #' nifty$get_history(start = '2022-07-01', interval = '1d') #' nifty$get_history(start = '2022-07-01', end = '2022-07-14', interval = '1d') #' nifty$get_history(period = '1mo', interval = '1d') #' } get_history = function(period = 'ytd', interval = '1d', start = NULL, end = NULL) { if (!is.null(start)) { start_date <- as.numeric(as.POSIXct(ymd(start, tz = "UTC"), tz = "UTC")) } if (!is.null(end)) { end_date <- as.numeric(as.POSIXct(ymd(end, tz = "UTC"), tz = "UTC")) } path <- 'v8/finance/chart/' end_point <- paste0(path, self$index) url <- modify_url(url = private$base_url, path = end_point) if (!is.null(start) && !is.null(end)) { qlist <- list(period1 = start_date, period2 = end_date, interval = interval) } else if (!is.null(start) && is.null(end)) { qlist <- list(period1 = start_date, period2 = round(as.numeric(as.POSIXct(now("UTC")))), interval = interval) } else { qlist <- list(range = period, interval = interval) } if (!curl::has_internet()) { message("No internet connection.") return(invisible(NULL)) } resp <- GET(url, query = qlist) parsed <- fromJSON(content(resp, "text", encoding = "UTF-8"), simplifyVector = FALSE) if (http_error(resp)) { message( cat( "Yahoo Finance API request failed", '\n', paste('Status:', status_code(resp)), '\n', paste('Type:', http_status(resp)$category), '\n', paste('Mesage:', parsed$quoteSummary$error$code), '\n', paste('Description:', parsed$quoteSummary$error$description, '\n'), sep = '' ) ) return(invisible(NULL)) } else { data <- parsed %>% use_series(chart) %>% use_series(result) %>% extract2(1) indicators <- data %>% use_series(indicators) %>% use_series(quote) %>% extract2(1) result <- data.frame( date = as_datetime(unlist(data$timestamp)), volume = flatten_list(indicators$volume), high = flatten_list(indicators$high), low = flatten_list(indicators$low), open = flatten_list(indicators$open), close = flatten_list(indicators$close) ) intervals <- c('1d', '5d', '1wk', '1mo', '3mo') if (interval %in% intervals) { adj_close <- data %>% use_series(indicators) %>% use_series(adjclose) %>% extract2(1) %>% use_series(adjclose) result$adj_close <- adj_close } return(result) } } ), private = list( base_url = 'https://query2.finance.yahoo.com' ) )
/scratch/gouwar.j/cran-all/cranData/yahoofinancer/R/index.R
#' Currencies #' #' List of currencies Yahoo Finance supports. #' #' @examples #' \donttest{ #' get_currencies() #' } #' #' @return Symbol, short and long name of the currencies. #' #' @export #' get_currencies <- function() { base_url <- 'https://query1.finance.yahoo.com' path <- 'v1/finance/currencies' url <- modify_url(url = base_url, path = path) if (!curl::has_internet()) { message("No internet connection.") return(invisible(NULL)) } resp <- GET(url) parsed <- fromJSON(content(resp, "text", encoding = "UTF-8"), simplifyVector = FALSE) if (http_error(resp)) { message( cat( "Yahoo Finance API request failed", '\n', paste('Status:', status_code(resp)), '\n', paste('Type:', http_status(resp)$category), '\n', paste('Mesage:', parsed$quoteSummary$error$code), '\n', paste('Description:', parsed$quoteSummary$error$description, '\n'), sep = '' ) ) return(invisible(NULL)) } else { data <- parsed %>% use_series(currencies) %>% use_series(result) data.frame( short_name = map_chr(data, 'shortName'), long_name = map_chr(data, 'longName'), symbol = map_chr(data, 'symbol'), local_long_name = map_chr(data, 'localLongName') ) } } #' Market Summary #' #' Summary info of relevant exchanges for specific country. #' #' @param country Name of the country. #' #' @return A \code{data.frame}. #' #' @examples #' \donttest{ #' get_market_summary(country = 'US') #' } #' #' @export #' get_market_summary <- function(country = 'US') { base_url <- 'https://query1.finance.yahoo.com' path <- 'v6/finance/quote/marketSummary' url <- modify_url(url = base_url, path = path) qlist <- list(region = country) if (!curl::has_internet()) { message("No internet connection.") return(invisible(NULL)) } resp <- GET(url, query = qlist) parsed <- fromJSON(content(resp, "text", encoding = "UTF-8"), simplifyVector = FALSE) if (http_error(resp)) { message( cat( "Yahoo Finance API request failed", '\n', paste('Status:', status_code(resp)), '\n', paste('Type:', http_status(resp)$category), '\n', paste('Mesage:', parsed$quoteSummary$error$code), '\n', paste('Description:', parsed$quoteSummary$error$description, '\n'), sep = '' ) ) return(invisible(NULL)) } else { parsed %>% use_series(marketSummaryResponse) %>% use_series(result) } } #' Trending securities #' #' List of trending securities for specific country. #' #' @param country Name of the country. #' @param count Number of securities. #' #' @return Securities trending in the country. #' #' @examples #' \donttest{ #' get_trending() #' } #' #' @export #' get_trending <- function(country = 'US', count = 10) { base_url <- 'https://query1.finance.yahoo.com' path <- 'v1/finance/trending/' end_point <- paste0(path, country) url <- modify_url(url = base_url, path = end_point) qlist <- list(count = count) if (!curl::has_internet()) { message("No internet connection.") return(invisible(NULL)) } resp <- GET(url, query = qlist) parsed <- fromJSON(content(resp, "text", encoding = "UTF-8"), simplifyVector = FALSE) if (http_error(resp)) { message( cat( "Yahoo Finance API request failed", '\n', paste('Status:', status_code(resp)), '\n', paste('Type:', http_status(resp)$category), '\n', paste('Mesage:', parsed$quoteSummary$error$code), '\n', paste('Description:', parsed$quoteSummary$error$description, '\n'), sep = '' ) ) return(invisible(NULL)) } else { data <- parsed %>% use_series(finance) %>% use_series(result) if (length(data) > 0) { data %>% extract2(1) %>% use_series(quote) %>% map_chr('symbol') } else { message('No trending securities.') } } } #' Currency converter #' #' Retrieve current conversion rate between two currencies as well as historical rates. #' #' @param from Currency to convert from. #' @param to Currency to convert to. #' @param start Specific starting date. \code{String} or \code{date} object in \code{yyyy-mm-dd} format. #' @param end Specific ending date. \code{String} or \code{date} object in \code{yyyy-mm-dd} format. #' @param period Length of time. Defaults to \code{'ytd'} Valid values are: #' \itemize{ #' \item \code{'1d'} #' \item \code{'5d'} #' \item \code{'1mo'} #' \item \code{'3mo'} #' \item \code{'6mo'} #' \item \code{'1y'} #' \item \code{'2y'} #' \item \code{'5y'} #' \item \code{'10y'} #' \item \code{'ytd'} #' \item \code{'max'} #' } #' @param interval Time between data points. Defaults to \code{'1d'} Valid values are: #' \itemize{ #' \item \code{'1h'} #' \item \code{'1d'} #' \item \code{'5d'} #' \item \code{'1wk'} #' \item \code{'1mo'} #' \item \code{'3mo'} #' } #' #' @return A \code{data.frame}. #' #' @examples #' \donttest{ #' currency_converter('GBP', 'USD', '2022-07-01', '2022-07-10') #' currency_converter('GBP', 'USD', period = '1mo', interval = '1d') #' } #' #' @export #' currency_converter <- function(from = 'EUR', to = 'USD', start = NULL, end = NULL, period = 'ytd', interval = '1d') { if (!is.null(start)) { start_date <- as.numeric(as.POSIXct(ymd(start, tz = "UTC"), tz = "UTC")) } if (!is.null(end)) { end_date <- as.numeric(as.POSIXct(ymd(end, tz = "UTC"), tz = "UTC")) } base_url <- 'https://query1.finance.yahoo.com' path <- 'v8/finance/chart/' cors_domain <- 'finance.yahoo.com' end_point <- paste0(path, from, to, '=X') url <- modify_url(url = base_url, path = end_point) if (!is.null(start) && !is.null(end)) { qlist <- list(period1 = start_date, period2 = end_date, interval = interval, corsDomain = cors_domain) } else if (!is.null(start) && is.null(end)) { qlist <- list(period1 = start_date, period2 = round(as.numeric(as.POSIXct(now("UTC")))), interval = interval, corsDomain = cors_domain) } else { qlist <- list(range = period, interval = interval, corsDomain = cors_domain) } if (!curl::has_internet()) { message("No internet connection.") return(invisible(NULL)) } resp <- GET(url, query = qlist) parsed <- fromJSON(content(resp, "text", encoding = "UTF-8"), simplifyVector = FALSE) if (http_error(resp)) { message( cat( "Yahoo Finance API request failed", '\n', paste('Status:', status_code(resp)), '\n', paste('Type:', http_status(resp)$category), '\n', paste('Mesage:', parsed$quoteSummary$error$code), '\n', paste('Description:', parsed$quoteSummary$error$description, '\n'), sep = '' ) ) return(invisible(NULL)) } else { data <- parsed %>% use_series(chart) %>% use_series(result) %>% extract2(1) indicators <- data %>% use_series(indicators) %>% use_series(quote) %>% extract2(1) result <- data.frame( date = as_datetime(unlist(data$timestamp)), high = flatten_list(indicators$high), low = flatten_list(indicators$low), open = flatten_list(indicators$open), close = flatten_list(indicators$close), volume = flatten_list(indicators$volume) ) intervals <- c('1d', '5d', '1wk', '1mo', '3mo') if (interval %in% intervals) { adj_close <- data %>% use_series(indicators) %>% use_series(adjclose) %>% extract2(1) %>% use_series(adjclose) %>% unlist() result$adj_close <- adj_close } return(subset(result, !is.na(volume))) } }
/scratch/gouwar.j/cran-all/cranData/yahoofinancer/R/others.R
#' R6 Class Representing a Ticker #' #' @description #' Base class for getting all data related to ticker from Yahoo Finance API. #' #' @param symbol Symbol for which data has to be retrieved. #' #' @importFrom magrittr %>% use_series extract2 extract #' @importFrom jsonlite fromJSON #' @import R6 httr purrr lubridate stringr #' @docType class #' @format An R6 class object #' @name Ticker-class #' #' @export Ticker <- R6::R6Class( "Ticker", public = list( #' @field symbol Symbol for which data is retrieved. symbol = NULL, #' @description #' Create a new Ticker object. #' @param symbol Symbol. #' @examples #' aapl <- Ticker$new('aapl') #' @return A new `Ticker` object initialize = function(symbol = NA) { if (validate(symbol)) { self$symbol <- symbol } else { message("Not a valid symbol.") return(invisible(NULL)) } }, #' @description #' Set a new symbol. #' @param symbol New symbol #' @examples #' aapl <- Ticker$new('aapl') #' aapl$set_symbol('msft') set_symbol = function(symbol) { if (validate(symbol)) { self$symbol <- symbol } else { message("Not a valid symbol.") return(invisible(NULL)) } }, #' @description #' Retrieves historical pricing data. #' @param period Length of time. Defaults to \code{'ytd'}. Valid values are: #' \itemize{ #' \item \code{'1d'} #' \item \code{'5d'} #' \item \code{'1mo'} #' \item \code{'3mo'} #' \item \code{'6mo'} #' \item \code{'1y'} #' \item \code{'2y'} #' \item \code{'5y'} #' \item \code{'10y'} #' \item \code{'ytd'} #' \item \code{'max'} #' } #' @param interval Time between data points. Defaults to \code{'1d'}. Valid values are: #' \itemize{ #' \item \code{'1m'} #' \item \code{'2m'} #' \item \code{'5m'} #' \item \code{'15m'} #' \item \code{'30m'} #' \item \code{'60m'} #' \item \code{'90m'} #' \item \code{'1h'} #' \item \code{'1d'} #' \item \code{'5d'} #' \item \code{'1wk'} #' \item \code{'1mo'} #' \item \code{'3mo'} #' } #' @param start Specific starting date. \code{String} or \code{date} object in \code{yyyy-mm-dd} format. #' @param end Specific ending date. \code{String} or \code{date} object in \code{yyyy-mm-dd} format. #' @return A \code{data.frame}. #' @examples #' \donttest{ #' aapl <- Ticker$new('aapl') #' aapl$get_history(start = '2022-07-01', interval = '1d') #' aapl$get_history(start = '2022-07-01', end = '2022-07-14', interval = '1d') #' aapl$get_history(period = '1mo', interval = '1d') #' } get_history = function(period = 'ytd', interval = '1d', start = NULL, end = NULL) { if (!is.null(start)) { start_date <- as.numeric(as.POSIXct(ymd(start, tz = "UTC"), tz = "UTC")) } if (!is.null(end)) { end_date <- as.numeric(as.POSIXct(ymd(end, tz = "UTC"), tz = "UTC")) } path <- 'v8/finance/chart/' end_point <- paste0(path, self$symbol) url <- modify_url(url = private$base_url, path = end_point) if (!is.null(start) && !is.null(end)) { qlist <- list(period1 = start_date, period2 = end_date, interval = interval) } else if (!is.null(start) && is.null(end)) { qlist <- list(period1 = start_date, period2 = round(as.numeric(as.POSIXct(now("UTC")))), interval = interval) } else { qlist <- list(range = period, interval = interval) } if (!curl::has_internet()) { message("No internet connection.") return(invisible(NULL)) } resp <- GET(url, query = qlist) parsed <- fromJSON(content(resp, "text", encoding = "UTF-8"), simplifyVector = FALSE) if (http_error(resp)) { message( cat( "Yahoo Finance API request failed", '\n', paste('Status:', status_code(resp)), '\n', paste('Type:', http_status(resp)$category), '\n', paste('Mesage:', parsed$quoteSummary$error$code), '\n', paste('Description:', parsed$quoteSummary$error$description, '\n'), sep = '' ) ) return(invisible(NULL)) } else { data <- parsed %>% use_series(chart) %>% use_series(result) %>% extract2(1) indicators <- data %>% use_series(indicators) %>% use_series(quote) %>% extract2(1) result <- data.frame( date = as_datetime(unlist(data$timestamp)), volume = get_metric(indicators, 'volume'), high = get_metric(indicators, 'high'), low = get_metric(indicators, 'low'), open = get_metric(indicators, 'open'), close = get_metric(indicators, 'close') ) intervals <- c('1d', '5d', '1wk', '1mo', '3mo') if (interval %in% intervals) { adj_close <- data %>% use_series(indicators) %>% use_series(adjclose) %>% extract2(1) %>% use_series(adjclose) result$adj_close <- adj_close } return(result) } } ), active = list( #' @field valuation_measures Retrieves valuation measures for most recent four quarters valuation_measures = function() { path <- 'ws/fundamentals-timeseries/v1/finance/timeseries/' end_point <- paste0(path, self$symbol) url <- modify_url(url = private$base_url, path = end_point) measure <- paste0('quarterly', c('MarketCap', 'EnterpriseValue', 'PeRatio', 'ForwardPeRatio', 'PegRatio', 'PsRatio', 'PbRatio', 'EnterprisesValueRevenueRatio', 'EnterprisesValueEBITDARatio'), collapse = ',') qlist <- list(type = measure, period1 = 493590046, period2 = round(as.numeric(now())), corsDomain = private$cors_domain) if (!curl::has_internet()) { message("No internet connection.") return(invisible(NULL)) } resp <- GET(url, query = qlist) parsed <- fromJSON(content(resp, "text", encoding = "UTF-8"), simplifyVector = FALSE) if (http_error(resp)) { message( cat( "Yahoo Finance API request failed", '\n', paste('Status:', status_code(resp)), '\n', paste('Type:', http_status(resp)$category), '\n', paste('Mesage:', parsed$quoteSummary$error$code), '\n', paste('Description:', parsed$quoteSummary$error$description, '\n'), sep = '' ) ) return(invisible(NULL)) } else { data <- parsed %>% use_series(timeseries) %>% use_series(result) data.frame( date = date(as_datetime(unlist(data[[1]]$timestamp))), enterprise_value = private$extract_valuation(data, 'quarterlyEnterpriseValue'), enterprise_value_ebitda_ratio = private$extract_valuation(data, 'quarterlyEnterprisesValueEBITDARatio'), enterprise_value_revenue_ratio = private$extract_valuation(data, 'quarterlyEnterprisesValueRevenueRatio'), forward_pe_ratio = private$extract_valuation(data, 'quarterlyForwardPeRatio'), market_cap = private$extract_valuation(data, 'quarterlyMarketCap'), pb_ratio = private$extract_valuation(data, 'quarterlyPbRatio'), pe_ratio = private$extract_valuation(data, 'quarterlyPeRatio'), peg_ratio = private$extract_valuation(data, 'quarterlyPegRatio'), ps_ratio = private$extract_valuation(data, 'quarterlyPsRatio') ) } }, #' @field option_chain Option chain data for all expiration dates for a given symbol option_chain = function() { path <- 'v7/finance/options/' end_point <- paste0(path, self$symbol) url <- modify_url(url = private$base_url, path = end_point) qlist <- list(getAllData = 'True', corsDomain = private$cors_domain) if (!curl::has_internet()) { message("No internet connection.") return(invisible(NULL)) } resp <- GET(url, query = qlist) parsed <- fromJSON(content(resp, "text", encoding = "UTF-8"), simplifyVector = FALSE) if (http_error(resp)) { message( cat( "Yahoo Finance API request failed", '\n', paste('Status:', status_code(resp)), '\n', paste('Type:', http_status(resp)$category), '\n', paste('Mesage:', parsed$quoteSummary$error$code), '\n', paste('Description:', parsed$quoteSummary$error$description, '\n'), sep = '' ) ) return(invisible(NULL)) } else { data <- parsed %>% use_series(optionChain) %>% use_series(result) %>% extract2(1) %>% use_series(options) calls <- data %>% map_dfr('calls') calls$option_type <- 'call' puts <- data %>% map_dfr('puts') puts$option_type <- 'put' result <- rbind(calls, puts) names(result) <- str_replace_all(names(result), '[A-Z]', private$snake_case) result$expiration <- as_datetime(result$expiration) result$last_trade_date <- as_datetime(result$last_trade_date) col_order <- c('expiration', 'option_type', 'contract_symbol', 'strike', 'currency', 'last_price', 'change', 'percent_change', 'open_interest', 'bid', 'ask', 'contract_size', 'last_trade_date', 'implied_volatility', 'in_the_money', 'volume') option_chain <- result[, col_order] return(option_chain) } }, #' @field option_expiration_dates Option expiration dates option_expiration_dates = function() { path <- 'v7/finance/options/' end_point <- paste0(path, self$symbol) url <- modify_url(url = private$base_url, path = end_point) qlist <- list(getAllData = 'True', corsDomain = private$cors_domain) if (!curl::has_internet()) { message("No internet connection.") return(invisible(NULL)) } resp <- GET(url, query = qlist) parsed <- fromJSON(content(resp, "text", encoding = "UTF-8"), simplifyVector = FALSE) if (http_error(resp)) { message( cat( "Yahoo Finance API request failed", '\n', paste('Status:', status_code(resp)), '\n', paste('Type:', http_status(resp)$category), '\n', paste('Mesage:', parsed$quoteSummary$error$code), '\n', paste('Description:', parsed$quoteSummary$error$description, '\n'), sep = '' ) ) return(invisible(NULL)) } else { parsed %>% use_series(optionChain) %>% use_series(result) %>% extract2(1) %>% use_series(expirationDates) %>% map_dbl(extract) %>% as_datetime() %>% date() } }, #' @field option_strikes Option strikes option_strikes = function() { path <- 'v7/finance/options/' end_point <- paste0(path, self$symbol) url <- modify_url(url = private$base_url, path = end_point) qlist <- list(getAllData = 'True', corsDomain = private$cors_domain) if (!curl::has_internet()) { message("No internet connection.") return(invisible(NULL)) } resp <- GET(url, query = qlist) parsed <- fromJSON(content(resp, "text", encoding = "UTF-8"), simplifyVector = FALSE) if (http_error(resp)) { message( cat( "Yahoo Finance API request failed", '\n', paste('Status:', status_code(resp)), '\n', paste('Type:', http_status(resp)$category), '\n', paste('Mesage:', parsed$quoteSummary$error$code), '\n', paste('Description:', parsed$quoteSummary$error$description, '\n'), sep = '' ) ) return(invisible(NULL)) } else { parsed %>% use_series(optionChain) %>% use_series(result) %>% extract2(1) %>% use_series(strikes) %>% map_dbl(extract) } }, #' @field quote Get real-time quote information for given symbol quote = function() { path <- 'v7/finance/options/' end_point <- paste0(path, self$symbol) url <- modify_url(url = private$base_url, path = end_point) qlist <- list(getAllData = 'True', corsDomain = private$cors_domain) if (!curl::has_internet()) { message("No internet connection.") return(invisible(NULL)) } resp <- GET(url, query = qlist) parsed <- fromJSON(content(resp, "text", encoding = "UTF-8"), simplifyVector = FALSE) if (http_error(resp)) { message( cat( "Yahoo Finance API request failed", '\n', paste('Status:', status_code(resp)), '\n', paste('Type:', http_status(resp)$category), '\n', paste('Mesage:', parsed$quoteSummary$error$code), '\n', paste('Description:', parsed$quoteSummary$error$description, '\n'), sep = '' ) ) return(invisible(NULL)) } else { parsed %>% use_series(optionChain) %>% use_series(result) %>% extract2(1) %>% use_series(quote) } }, #' @field recommendations Recommended symbols recommendations = function() { path <- 'v6/finance/recommendationsbysymbol/' end_point <- paste0(path, self$symbol) url <- modify_url(url = private$base_url, path = end_point) qlist <- list(corsDomain = private$cors_domain) if (!curl::has_internet()) { message("No internet connection.") return(invisible(NULL)) } resp <- GET(url, query = qlist) parsed <- fromJSON(content(resp, "text", encoding = "UTF-8"), simplifyVector = FALSE) if (http_error(resp)) { message( cat( "Yahoo Finance API request failed", '\n', paste('Status:', status_code(resp)), '\n', paste('Type:', http_status(resp)$category), '\n', paste('Mesage:', parsed$quoteSummary$error$code), '\n', paste('Description:', parsed$quoteSummary$error$description, '\n'), sep = '' ) ) return(invisible(NULL)) } else { data <- parsed %>% use_series(finance) %>% use_series(result) %>% extract2(1) %>% use_series(recommendedSymbols) data.frame( symbol = map_chr(data, 'symbol'), score = map_dbl(data, 'score') ) } }, #' @field technical_insights Technical indicators for given symbol technical_insights = function() { path <- 'ws/insights/v2/finance/insights' url <- modify_url(url = private$base_url, path = path) qlist <- list(symbol = self$symbol, corsDomain = private$cors_domain) if (!curl::has_internet()) { message("No internet connection.") return(invisible(NULL)) } resp <- GET(url, query = qlist) parsed <- fromJSON(content(resp, "text", encoding = "UTF-8"), simplifyVector = FALSE) if (http_error(resp)) { message( cat( "Yahoo Finance API request failed", '\n', paste('Status:', status_code(resp)), '\n', paste('Type:', http_status(resp)$category), '\n', paste('Mesage:', parsed$quoteSummary$error$code), '\n', paste('Description:', parsed$quoteSummary$error$description, '\n'), sep = '' ) ) return(invisible(NULL)) } else { parsed %>% use_series(finance) %>% use_series(result) } } ), private = list( base_url = 'https://query2.finance.yahoo.com', path = 'v10/finance/quoteSummary/', cors_domain = 'finance.yahoo.com', # resp_data = function(symbol, module) { # end_point <- paste0(private$path, symbol) # url <- modify_url(url = private$base_url, path = end_point) # qlist <- list(modules = module, corsDomain = private$cors_domain) # resp <- GET(url, query = qlist) # parsed <- fromJSON(content(resp, "text", encoding = "UTF-8"), simplifyVector = FALSE) # list(resp = resp, parsed = parsed) # }, # parse_data = function(parsed) { # parsed %>% # use_series(quoteSummary) %>% # use_series(result) %>% # extract2(1) # }, # display_error = function(resp, parsed) { # cat( # "Yahoo Finance API request failed", '\n', # paste('Status:', status_code(resp)), '\n', # paste('Type:', http_status(resp)$category), '\n', # paste('Mesage:', parsed$quoteSummary$error$code), '\n', # paste('Description:', parsed$quoteSummary$error$description, '\n'), # sep = '' # ) # }, # display_data = function(req) { # private$parse_data(req$parsed) # }, snake_case = function(x) { paste0('_', tolower(x)) }, extract_valuation = function(data, measure) { data %>% map_if(function(x) 'quarterlyEnterpriseValue' %in% names(x), 'quarterlyEnterpriseValue') %>% map_depth(2, 'reportedValue') %>% map_depth(2, 'raw') %>% unlist() } ) )
/scratch/gouwar.j/cran-all/cranData/yahoofinancer/R/ticker.R
#' Symbol validation #' #' Validate symbols before retrieving data. #' #' @param symbol Ticker, index or fund name. #' #' @examples #' validate("aapl") #' validate("aapls") #' #' @export #' validate <- function(symbol = NULL) { base_url <- 'https://query2.finance.yahoo.com' path <- 'v6/finance/quote/validate' url <- modify_url(url = base_url, path = path) qlist <- list(symbols = symbol) if (!curl::has_internet()) { message("No internet connection.") return(invisible(NULL)) } resp <- GET(url, query = qlist) parsed <- fromJSON(content(resp, "text", encoding = "UTF-8"), simplifyVector = FALSE) if (http_error(resp)) { message( cat( "Yahoo Finance API request failed", '\n', paste('Status:', status_code(resp)), '\n', paste('Type:', http_status(resp)$category), '\n', paste('Mesage:', parsed$quoteSummary$error$code), '\n', paste('Description:', parsed$quoteSummary$error$description, '\n'), sep = '' ) ) return(invisible(NULL)) } else { parsed %>% use_series(symbolsValidation) %>% use_series(result) %>% extract2(1) %>% extract2(1) } } flatten_list <- function(x) { unlist(lapply(x, function(m) ifelse(is.null(m), NA, m))) } get_metric <- function(data, metric) { data[[metric]] %>% map(., ~ifelse(is.null(.x), NA, .x)) %>% unlist() }
/scratch/gouwar.j/cran-all/cranData/yahoofinancer/R/utils.R
#' \code{yahoofinancer} package #' #' Fetch Data from Yahoo Finance API #' #' See the README on #' \href{https://github.com/rsquaredacademy/yahoofinancer}{GitHub} #' #' @docType package #' @keywords internal #' @name yahoofinancer #' @aliases yahoofinancer-package "_PACKAGE" if (getRversion() >= "2.15.1") { utils::globalVariables(c(".", "adjclose", "chart", "currencies", "finance", "marketSummaryResponse", "quoteResponse", "result", "volume", "symbolsValidation")) }
/scratch/gouwar.j/cran-all/cranData/yahoofinancer/R/yahoofinancer.R
`as.yaml` <- function(x, line.sep = c('\n', '\r\n', '\r'), indent = 2, omap = FALSE, column.major = TRUE, unicode = TRUE, precision = getOption('digits'), indent.mapping.sequence = FALSE, handlers = NULL) { line.sep <- match.arg(line.sep) res <- .Call(C_serialize_to_yaml, x, line.sep, indent, omap, column.major, unicode, precision, indent.mapping.sequence, handlers, PACKAGE="yaml") Encoding(res) <- "UTF-8" res }
/scratch/gouwar.j/cran-all/cranData/yaml/R/as.yaml.R
`read_yaml` <- function(file, fileEncoding = "UTF-8", text, error.label, readLines.warn=TRUE, ...) { if (missing(file) && !missing(text)) { if (missing(error.label)) { error.label <- NULL } file <- textConnection(text, encoding = "UTF-8") on.exit(close(file)) } else if (is.character(file)) { if (missing(error.label)) { error.label <- file } file <- if (nzchar(fileEncoding)) file(file, "rt", encoding = fileEncoding) else file(file, "rt") on.exit(close(file)) } else if (inherits(file, "connection")) { if (missing(error.label)) { # try to guess filename s <- try(summary(file), silent = TRUE) if (!inherits(s, "try-error") && is.list(s) && "description" %in% names(s)) { error.label <- s$description } } if (!isOpen(file, "rt")) { open(file, "rt") on.exit(close(file)) } } else { stop("'file' must be a character string or connection") } string <- paste(readLines(file,warn=readLines.warn), collapse="\n") yaml.load(string, error.label = error.label, ...) }
/scratch/gouwar.j/cran-all/cranData/yaml/R/read_yaml.R
verbatim_logical <- function(x) { result <- tolower(as.logical(x)) class(result) <- "verbatim" return(result) }
/scratch/gouwar.j/cran-all/cranData/yaml/R/verbatim_logical.R
`write_yaml` <- function(x, file, fileEncoding = "UTF-8", ...) { result <- as.yaml(x, ...) if (is.character(file)) { file <- if (nzchar(fileEncoding)) { file(file, "w", encoding = fileEncoding) } else { file(file, "w") } on.exit(close(file)) } else if (!isOpen(file, "w")) { open(file, "w") on.exit(close(file)) } if (!inherits(file, "connection")) { stop("'file' must be a character string or connection") } cat(result, file=file, sep="") }
/scratch/gouwar.j/cran-all/cranData/yaml/R/write_yaml.R
`yaml.load` <- function(string, as.named.list = TRUE, handlers = NULL, error.label = NULL, eval.expr = getOption("yaml.eval.expr", FALSE), merge.precedence = c("order", "override"), merge.warning = FALSE) { string <- enc2utf8(paste(string, collapse = "\n")) eval.warning <- missing(eval.expr) && is.null(getOption("yaml.eval.expr")) merge.precedence <- match.arg(merge.precedence) .Call(C_unserialize_from_yaml, string, as.named.list, handlers, error.label, eval.expr, eval.warning, merge.precedence, merge.warning, PACKAGE="yaml") }
/scratch/gouwar.j/cran-all/cranData/yaml/R/yaml.load.R
`yaml.load_file` <- function(input, error.label, readLines.warn=TRUE, ...) { if (missing(error.label)) { if (inherits(input, "connection")) { # try to guess filename s <- try(summary(input), silent = TRUE) if (!inherits(s, "try-error") && is.list(s) && "description" %in% names(s)) { error.label <- s$description } } else if (is.character(input) && nzchar(input[1])) { error.label <- input[1] } else { error.label <- NULL } } if (is.character(input)) { con <- file(input, encoding = 'UTF-8') on.exit(close(con), add = TRUE) } else { con <- input } yaml.load(readLines(con, warn=readLines.warn), error.label = error.label, ...) }
/scratch/gouwar.j/cran-all/cranData/yaml/R/yaml.load_file.R
.onUnload <- function(libpath) { library.dynam.unload("yaml", libpath) }
/scratch/gouwar.j/cran-all/cranData/yaml/R/zzz.R
#' Alias a Data Frame #' #' Aliases a data.frame. #' Replaces column names with labels, where present. #' Stores column name as 'alias' attribute. #' #' @param object data.frame #' @param ... optional unquoted names of target columns #' @export #' @keywords internal #' @method alias data.frame #' @importFrom stats alias #' @family labels #' @family deprecated #' @return aliased data.frame #' @examples #' library(magrittr) #' d <- data.frame(x = 1:10, y = 1:10, z = 1:10) #' d %<>% modify(x, label = 'Independent Value') #' d %<>% modify(y, label = 'Dependent Value') #' d #' alias(d) #' alias(d, y) alias.data.frame <- function(object, ...){ x <- object y <- selected(x, ...) for(col in y){ lab <- attr(x[[col]], 'label') attr(x[[col]], 'alias') <- col if(length(lab) == 1) names(x)[match(col, names(x))] <- lab } class(x) <- union('aliased', class(x)) x }
/scratch/gouwar.j/cran-all/cranData/yamlet/R/alias.R
#' Append Units #' #' Appends units attribute to label attribute. #' Generic, with methods #' \code{\link{append_units.default}} and #' \code{\link{append_units.data.frame}}. #' For a more general strategy see \code{\link{modify}}. #' #' @param x object #' @param ... passed arguments #' @export #' @keywords internal #' @family deprecated #' @family labels #' @return see methods #' @examples #' # see methods append_units <- function(x, ...)UseMethod('append_units') #' Append Units By Default #' #' Units attribute is wrapped in \code{open} and #' \code{close}, and appended to label. #' Result is assigned to \code{target} attribute #' (default: 'label'). #' If style is 'latex' or 'plotmath', #' all elements are treated as spork #' (\code{\link{as_spork}}) and coerced #' to canonical form before concatenation. #' #' #' @param x object #' @param ... passed to \code{\link{as_latex}}, \code{\link{as_plotmath}} #' @param open character to precede units #' @param close character to follow units #' @param style one of 'plain', 'latex', or 'plotmath' #' @param target attribute name for appended result #' @export #' @importFrom spork as_spork #' @importFrom spork as_plotmath #' @importFrom spork as_latex #' @importFrom spork plotmathToken #' @importFrom spork latexToken #' @keywords internal #' @family labels #' @return same class as x, with sub-class 'latex' or 'plotmath' depending on \code{style} #' @examples #' library(units) #' library(magrittr) #' x <- 1:10 #' attr(x, 'label') <- 'acceleration' #' units(x) <- 'm/s^2' #' y <- as_units('kg') #' x %>% attr('label') #' x %>% append_units %>% attr('label') #' y %>% attr('label') #' y %>% append_units %>% attr('label') #' x %>% append_units(style = 'plain') #' x %>% append_units(style = 'plotmath') #' x %>% append_units(style = 'latex') #' #' append_units.default <- function( x, ..., open = getOption( 'yamlet_append_units_open' , ' (' ), close = getOption('yamlet_append_units_close', ')' ), style = getOption('yamlet_append_units_style','plain'), target = getOption('yamlet_append_units_target', 'label') ){ stopifnot(style %in% c('plain', 'latex','plotmath')) lab <- attr(x, 'label') unit <- attr(x, 'units') unit <- as.character(unit) # coerces symbolic_units nicely, without damaging count of non-singular units # but drops names if(!inherits(attr(x, 'units'),'symbolic_units')){ names(unit) <- names(attr(x, 'units')) } if(is.null(lab)) lab <- '' nms <- names(lab) lab <- as.list(lab) if( length(unit)!= length(lab) & length(unit) > 1 # zero or 1 is fine )warning('length of units does not match length of labels') if(!identical(names(lab), names(unit)))warning('names of units do not match names of labels') unit <- rep(unit, length(lab)) open <- rep(open, length(lab)) close <- rep(close, length(lab)) if(length(unit)){ # can't test for NULL because as.character(NULL) above is not NULL # unit <- as.character(unit) for(i in seq_along(lab)){ lab[[i]] <- c( label = lab[[i]], open = open[[i]], units = unit[[i]], close = close[[i]] ) } } if(style == 'plain'){ for(i in seq_along(lab)){ lab[[i]] <- paste(lab[[i]], collapse = '') } } if(style == 'latex'){ for(i in seq_along(lab)){ lab[[i]] <- as_spork(as_spork(lab[[i]])) lab[[i]] <- paste(lab[[i]], collapse = '') lab[[i]] <- as_latex(as_spork(lab[[i]]), ...) } } if(style == 'plotmath'){ for(i in seq_along(lab)){ lab[[i]] <- as_spork(as_spork(lab[[i]])) lab[[i]] <- paste(lab[[i]], collapse = '') lab[[i]] <- as.expression(as_plotmath(as_spork(lab[[i]]), ...)) } } names(lab) <- nms if(length(lab) == 1) lab <- lab[[1]] attr(x, target) <- lab x } #' Append Units for Data Frame #' #' Appends units for data.frame. #' For finer control, consider applying #' \code{\link{append_units.default}} #' to individual columns. #' #' @param x data.frame #' @param ... named arguments passed to default method, un-named are columns to alter scope #' @export #' @keywords internal #' @family labels #' @return data.frame #' @examples #' library(magrittr) #' file <- system.file(package = 'yamlet', 'extdata','quinidine.csv') #' file %>% decorate %>% explicit_guide %>% append_units %>% decorations(Age, glyco) #' file %>% decorate %>% explicit_guide %>% append_units(glyco) %>% decorations(Age, glyco) append_units.data.frame <- function(x, ...){ vars <- selected(x, ...) mods <- named(...) for(var in vars){ # pass only named arguments x[[var]] <- do.call(append_units, c(list(x[[var]]),mods)) } #x[] <- lapply(x, append_units, ...) x } #' @export spork::latexToken
/scratch/gouwar.j/cran-all/cranData/yamlet/R/append_units.R
#' Enforce Canonical Order #' #' Enforce canonical order. Generic, with #' method: \code{\link{canonical.decorated}}. #' @param x object #' @param ... passed arguments #' @return list #' @family canonical #' @export #' @keywords internal canonical <- function(x, ...)UseMethod('canonical') #' Sort Decorations #' #' Enforces canonical attribute order for class 'decorated'. #' Set of default_keys will be augmented with all observed attribute names #' and will be expanded or reduced as necessary for each #' data item. #' #' @param x decorated #' @param default_keys attribute names in preferred order #' @param ... ignored #' @export #' @family canonical #' @family interface #' @return decorated #' @examples #' # make some decorated data #' library(magrittr) #' x <- data.frame(x = 1, y = 1, z = factor('a')) #' x %<>% decorate(' #' x: [ guide: mm, desc: this, label: foo ] #' "y": [ guide: bar, desc: other ] #' ') #' #' # retrieve decorations: label not first! #' decorations(x) #' #' # sort label first by default #' decorations(canonical(x)) #' #' # equivalent invocation #' canonical(decorations(x)) #' canonical.decorated <- function( x, default_keys = getOption('yamlet_default_keys',list('label','guide')), ... ){ for(i in seq_len(ncol(x))){ default_keys <- union(default_keys, names(attributes(x[[i]]))) } for(i in seq_len(ncol(x))){ at <- attributes(x[[i]]) nms <- names(at) use <- unlist(intersect(default_keys, nms)) at <- at[use] if(length(nms)) attributes(x[[i]]) <- at } x } #' Sort Yamlet #' #' Enforces canonical attribute order for class 'yamlet'. #' Set of default_keys will be augmented with all observed attribute names #' and will be expanded or reduced as necessary for each #' data item. #' #' @param x yamlet #' @param default_keys attribute names in preferred order #' @param ... ignored #' @export #' @keywords internal #' @family canonical #' @return decorated #' @examples #' library(magrittr) #' x <- data.frame(x = 1, y = 1, z = factor('a')) #' x %<>% decorate(' #' x: [ guide: mm, desc: this, label: foo ] #' "y": [ guide: bar, desc: other ] #' ') #' #' decorations(x) #' decorations(canonical(x)) #' canonical(decorations(x)) #' write_yamlet(x) #' canonical.yamlet <- function( x, default_keys = getOption('yamlet_default_keys',list('label','guide')), ... ){ for(i in seq_along(x)){ default_keys <- union(default_keys, names(x[[i]])) } for(i in seq_along(x)){ nms <- names(x[[i]]) use <- unlist(intersect(default_keys, nms)) if(length(nms)) x[[i]] <- x[[i]][use] } x }
/scratch/gouwar.j/cran-all/cranData/yamlet/R/canonical.R
#' Classify Something #' #' Classifies something. #' Generic, with method \code{\link{classified.default}} #' @param x object of dispatch #' @param ... passed arguments #' @export #' @return see methods #' @keywords internal #' @family classified #' @examples #' example(classified.default) classified <- function(x, ...)UseMethod('classified') #' Create Classified from Factor #' #' Creates classified from factor. Uses \code{\link{classified.default}}, #' but supplies existing levels by default. #' #' @export #' @return 'classified' 'factor' #' @param x see \code{\link{factor}} #' @param levels passed to \code{\link{classified.default}}; defaults to \code{levels(x)} #' @param labels passed to \code{\link{classified.default}}; must be same length as levels(after removing values in \code{exclude}) and must not contain duplicates #' @param exclude see \code{\link{factor}} #' @param ordered see \code{\link{factor}} #' @param nmax see \code{\link{factor}} #' @param token informative label for messages #' @param ... ignored #' @importFrom dplyr distinct #' @family classified #' @examples #' a <- factor(c('c','b','a')) #' levels(classified(a)) #' attr(classified(a), 'codelist') classified.factor <- function( x = character(), levels, labels, exclude = NA, ordered = is.ordered(x), nmax = NA, token = character(0), ... ){ stopifnot(is.character(token), length(token) <= 1) if(missing(levels)) levels <- match.fun('levels')(x) levels <- setdiff(levels, exclude) if(missing(labels)) labels <- levels stopifnot(identical(length(levels), length(labels))) if(any(duplicated(labels)))(stop(paste( collapse = ': ', c(token, 'duplicated labels not supported in this context')))) y <- classified.default( x, levels = levels, labels = labels, exclude = exclude, ordered = ordered, nmax = NA, ... ) y } #' Create Classified by Default #' #' Creates a factor of subclass 'classified', #' for which there are attribute-preserving methods. #' In particular, classified has a codelist attribute #' indicating the origin of its levels: it is #' constructed from the codelist attribute of x #' if available, or from 'levels' and 'labels' #' by default. Unlike the case for \code{\link{factor}}, #' length of labels cannot be one (i.e., different from #' length of levels). #' #' @export #' @return 'classified' 'factor' #' @param x see \code{\link{factor}} #' @param levels see \code{\link{factor}} #' @param labels see \code{\link{factor}}, must have same length as levels #' @param exclude see \code{\link{factor}} #' @param ordered see \code{\link{factor}} #' @param nmax see \code{\link{factor}} #' @param token informative label for messages #' @param ... ignored #' @importFrom dplyr distinct #' @family classified #' @examples #' #' # classified creates a factor with a corresponding codelist attribute #' classified(c('a','b','c')) #' #' # codelist 'remembers' the origins of levels #' classified(c('a','b','c'), labels = c('A','B','C')) #' #' # classified is 'reversible' #' library(magrittr) #' c('a','b','c') %>% #' classified(labels = c('A','B','C')) %>% #' unclassified classified.default <- function( x = character(), levels, labels, exclude = NA, ordered = is.ordered(x), nmax = NA, token = character(0), ... ){ cl <- attr(x,'codelist') # could be NULL # if we have a codelist, use it if(!is.null(cl)){ attr(x,'codelist') <- NULL # before working with codelist, honor the exclude request bad <- sapply(cl, function(val)val %in% exclude) cl <- cl[!bad] # mimic non-NA exclude behavior: # @ 0.10.12, commenting next (nonsensical?) # if(length(exclude) == 0) cl <- c(cl, NA) # default levels and labels if(missing(levels)){ levels <- unlist(cl) } if(missing(labels)){ labels <- names(cl) if(is.null(labels))labels <- rep('', length(levels)) labels[labels == ''] <- levels[labels == ''] } } # if no codelist, set up default labels and levels if (missing(levels)) { y <- unique(x, nmax = nmax) ind <- order(y) levels <- unique(as.character(y)[ind]) levels <- setdiff(levels, exclude) } if(missing(labels)){ labels <- as.character(levels) } # at this point, levels and labels should have matching length # should be true using defaults if(length(levels) != length(labels))stop( paste( collapse = ': ', c( token, 'classified requires labels and levels of the same length' ) ) ) # under some circumstances, levels has names, which may be NA # then data.frame inherits NA rownames which is an error. names(levels) <- NULL names(labels) <- NULL codes <- data.frame(levels = levels, labels = labels) if(any(duplicated(codes))){ duplicated <- anyDuplicated(codes) msg <- paste0( 'dropping duplicated levels, e.g.: ', codes$levels[[duplicated]], ' (', codes$labels[[duplicated]], ')' ) msg <- paste(collapse = ': ', c(token, msg)) warning(msg) codes <- unique(codes) } if(any(duplicated(codes$levels))){ duplicated <- anyDuplicated(codes$levels) msg <- paste0( 'level(s) cross-labelled, e.g.: ', codes$levels[[duplicated]], ': ', paste( collapse = ', ', codes$labels[codes$levels == codes$levels[[duplicated]]] ) ) msg <- paste(collapse = ': ', token, msg) warning(msg) } if(any(duplicated(codes$labels))){ duplicated <- anyDuplicated(codes$labels) msg <- paste0( 'levels like-labelled, e.g.: ', paste(collapse = ', ', codes$levels[codes$labels == codes$labels[[duplicated]]]), ': ', codes$labels[[duplicated]] ) msg <- paste(collapse = ': ', token, msg) warning(msg) } # having dropped any duplicates, we unpack codes labels <- codes$labels levels <- codes$levels # in every case, make a good codelist codelist <- as.list(labels) names(codelist) <- levels # simplify codelist if possible if(identical(paste(names(codelist)), paste(unlist(codelist)))) { names(codelist) <- NULL # codelist <- unlist(codelist) # @v0.8.9 for consistency with other methods } # call factor() z <- factor( x = x, levels = levels, labels = labels, exclude = exclude, # but exclusions will have already occurred ordered = ordered, nmax = nmax ) # enforce attributes nms <- names(attributes(x)) nms <- setdiff(nms, c('class','levels')) for(nm in nms){ attr(z, nm) <- attr(x, nm) } attr(z, 'codelist') <- codelist # enforce class class(z) <- union('classified', class(z)) # return z } # Coerce to Classified # # Coerce something to classified. # Generic, with method for factor. # Deprecated. Prefer classified(). # # @param x object # @param ... passed arguments # @export # @keywords internal # @family classified # @return see methods # @examples # example(as_classified.factor) # as_classified <- function(x, ...)UseMethod('as_classified') # Coerce Factor to Classified # # Coerce factor to classified. # Creates a factor that retains attributes during subsetting. # Deprecated. Prefer classified(). # # @param x factor # @param ... ignored arguments # @export # @keywords internal # @family classified # @return class 'classified' 'factor' # @examples # class(as_classified(factor(letters))) # as_classified.factor <- function(x, ...){ # class(x) <- union('classified', class(x)) # x # } # http://adv-r.had.co.nz/S3.html # When implementing a vector class, you should implement these methods: #length, [, [<-, [[, [[<-, c. #' Subset Classified #' #' Subsets classified factor, retaining attributes. #' @param x classified factor #' @param ... passed to next method #' @export #' @keywords internal #' @family classified #' @return class 'classified' 'factor' #' @examples #' a <- classified(letters[1:3]) #' attr(a, 'label') <- 'foo' #' a <- a[1:3] #' attributes(a) `[.classified` <- function(x, ...){ y <- NextMethod() # contrasts and levels will have been handled nms <- names(attributes(x)) nms <- setdiff(nms, c('contrasts','levels')) for(nm in nms){ attr(y, nm) <- attr(x, nm) } y } #' Element-select Classified #' #' Selects element of classified factor, retaining attributes. #' @param x classified factor #' @param ... passed to next method #' @export #' @keywords internal #' @family classified #' @return class 'classified' 'factor' #' @examples #' a <- classified(letters[1:3]) #' attr(a, 'label') <- 'foo' #' a <- a[[2]] #' attributes(a) `[[.classified` <- function(x, ...){ y <- NextMethod() # contrasts and levels will have been handled nms <- names(attributes(x)) nms <- setdiff(nms, c('contrasts','levels')) for(nm in nms){ attr(y, nm) <- attr(x, nm) } y } #' Assign Subset of Classified #' #' Assigns subset of classified factor, retaining attributes. #' @param x classified factor #' @param ... passed to next method #' @export #' @keywords internal #' @family classified #' @return class 'classified' 'factor' #' @examples #' a <- classified(letters[1:3]) #' a[2:3] <- 'a' #' str(a) #' class(a) `[<-.classified` <- function(x, ..., value){ y <- NextMethod() # class and levels will have been handled nms <- names(attributes(x)) nms <- setdiff(nms, c('levels')) # implicitly restore class for(nm in nms){ attr(y, nm) <- attr(x, nm) } y } #' Assign Element of Classified #' #' Assigns element of classified factor, retaining attributes. #' @param x classified factor #' @param ... passed to next method #' @export #' @keywords internal #' @family classified #' @return class 'classified' 'factor' #' @examples #' a <- classified(letters[1:3]) #' a[[3]] <- 'a' #' str(a) #' class(a) `[[<-.classified` <- function(x, ..., value){ y <- NextMethod() # class and levels will have been handled nms <- names(attributes(x)) nms <- setdiff(nms, c('levels')) # implicitly restore class for(nm in nms){ attr(y, nm) <- attr(x, nm) } y } #' Combine Classified #' #' Combines classified factor, retaining attributes. #' Attributes other than levels and codelist are taken #' from the first argument. Attribute 'levels' is #' supplied by next method. Attribute 'codelist' #' is the combined codelists in sequence of #' all (dots) arguments, after silently removing #' exact duplicates, and then removing #' duplicated names with warning. #' #' @param ... passed to next method #' @param recursive passed to unlist() internally #' @export #' @keywords internal #' @family classified #' @return class 'classified' 'factor' #' @examples #' a <- classified(letters[1:3]) #' b <- classified(letters[3:5]) #' c <- c(a,b) #' c #' class(c) #' `c.classified` <- function( ..., recursive = TRUE ){ c_factor <- function (..., recursive = TRUE) { # i.e. c.factor() from R 4.1.0 x <- list(...) y <- unlist(x, recursive = recursive) if ( inherits(y, "factor") && all(vapply(x, inherits,NA, "ordered")) && (length(unique(lapply(x, levels))) == 1L) ) class(y) <- c("ordered", "factor") y } # y <- NextMethod() # not back-compatible before R 4.1.0 y <- c_factor(..., recursive = recursive) # class and levels will have been handled all <- list(...) x <- all[[1]] nms <- names(attributes(x)) nms <- setdiff(nms, c('levels')) # implicitly restore class for(nm in nms){ attr(y, nm) <- attr(x, nm) } # combine levels codelist <- list() for(i in 1:length(all)){ codelist <- c(codelist, attr(all[[i]], 'codelist')) } # explicit names if(is.null(names(codelist)))names(codelist) <- unlist(codelist) # codelist names can be be NA but not blank names(codelist)[which(names(codelist) == '')] <- unlist(codelist)[which(names(codelist) == '')] codelist <- codelist[!duplicated(codelist)] # silently remove exact dups if(any(duplicated(names(codelist))))warning('conflicting codelist specifications') codelist <- codelist[!duplicated(names(codelist))] #if(all(names(codelist) == unlist(codelist))){ if(identical(names(codelist), as.character(unlist(codelist)))){ names(codelist) <- NULL codelist <- unlist(codelist) } attr(y,'codelist') <- codelist y } #' Classify Data Frame #' #' Coerces items in data.frame with codelist attribute to 'classified': #' a factor with a codelist attribute. #' #' @param x data.frame #' @param ... passed to \code{\link[dplyr]{select}} to limit column scope #; also passed to \code{\link{classified.default}} to modify behavior #' @param exclude see \code{\link{factor}} #' @param ordered see \code{\link{factor}} #' @param nmax see \code{\link{factor}} #' @export #' @keywords internal #' @return data.frame #' @family classified #' @family interface #' @examples #' library(magrittr) #' file <- system.file(package = 'yamlet', 'extdata','quinidine.csv') #' x <- decorate(file) #' x %>% explicit_guide %>% decorations(Age, Race, Heart:glyco) #' x %>% explicit_guide %>% classified %>% decorations(Age, Race, Heart:glyco) #' x %>% explicit_guide %>% classified(Heart:glyco) %>% decorations(Age, Race, Heart:glyco) classified.data.frame <- function( x, ..., exclude = NA, ordered = is.ordered(x), nmax = NA ){ my_class <- class(x) for(nm in selected(x,...)){ if('codelist' %in% names(attributes(x[[nm]]))){ # grouped_df can drop subclass! x[[nm]] <- classified( x[[nm]], exclude = exclude, ordered = ordered, nmax = nmax, token = nm ) } } class(x) <- my_class x } #' Classify Decorated Vector #' #' Coerces dvec to 'classified': #' a factor with a codelist attribute. #' Results may differ if explicit_guide() #' is called first. #' #' @param x dvec #' @param ... un-named arguments ignored. Named arguments passed to \code{\link{classified.default}} to modify behavior #' @param exclude see \code{\link{factor}} #' @param ordered see \code{\link{factor}} #' @param nmax see \code{\link{factor}} #' @param token informative label for messages #' @export #' @keywords internal #' @return classified #' @family classified #' @family dvec #' @examples #' library(magrittr) #' x <- as_dvec(1:3) #' attr(x, 'guide') <- list(a = 1, b = 2, c = 3) #' x %>% str #' x %>% classified %>% str #' x %>% explicit_guide %>% classified %>% str classified.dvec <- function( x, ..., exclude = NA, ordered = is.ordered(x), nmax = NA, token = character(0) ){ y <- unclass(x) y <- classified( y, exclude = exclude, ordered = ordered, nmax = nmax, token = token, ... ) y } #' Coerce Classified to Integer #' #' Coerces classified to integer. #' Result is like \code{as.integer(as.numeric(x)) + offset} #' but has a guide attribute: a list of integers #' whose names are the original levels of x. #' If you need a simple integer, consider coercing first to numeric. #' #' @param x classified, see \code{\link{classified}} #' @param offset an integer value to add to intermediate result #' @param ... passed to \code{\link{desolve}} #' @param persistence whether to return 'dvec' (is.integer(): TRUE) or just integer. # @param exclude_attr discard these when preserving attributes of x in result #' @export #' @family classified #' @return integer (possibly of class dvec) #' @examples #' library(magrittr) #' #' # create factor with codelist attribute #' classified(c('knife','fork','spoon')) #' #' # give back a simple numeric #' classified(c('knife','fork','spoon')) %>% as.numeric #' #' # intentionally preserve levels as 'guide' attribute #' classified(c('knife','fork','spoon')) %>% as.integer #' #' # implement offset #' classified(c('knife','fork','spoon')) %>% as.integer(-1) #' #' # globally defeat the 'persistence' paradigm #' options(yamlet_persistence = FALSE) #' c('knife','fork','spoon') %>% #' classified %>% #' as.integer %>% #' class # integer #' #' # remove option to restore default persistence paradigm #' options(yamlet_persistence = NULL) #' c('knife','fork','spoon') %>% #' classified %>% #' as.integer %>% #' class # dvec #' #' # locally defeat persistence paradigm #' c('knife','fork','spoon') %>% #' classified %>% #' as.integer(persistence = FALSE) %>% #' class # integer #' #' as.integer.classified <- function( x, offset = 0L, ..., persistence = getOption('yamlet_persistence', TRUE) #, #exclude_attr = getOption("yamlet_as.integer_exclude_attr", c("class", "levels", "codelist")) ){ stopifnot( length(offset) == 1, !is.na(offset), as.integer(offset) == offset ) offset <- as.integer(offset) # note: levels(x) should be same as unlist(attr(x, 'codelist')) # y <- as.numeric(x, ...) # y <- as.integer(y, ...) # explicitly casting to int as of 0.9.0 # y <- y + offset # z <- mimic(x, y, ...) # drops levels! # x has a codelist and seq gives integer vals <- seq_along(attr(x, 'codelist')) vals <- vals + offset names(attr(x, 'codelist')) <- vals r <- desolve(x, persistence = TRUE, ...) # gives guide instead of codelist at 0.9.0 # at this point, r should be dvec # passing persistence to desolve fails because there is no # vector method for implicit_guide (only a data.frame method) if(!persistence) { r <- unclass(r) } r } #' Create Classified from Classified #' #' See \code{\link{classified.default}}. #' Formerly (version 0.10.10), calling classified() on a #' classified object was a non-operation. #' Currently we call factor(x, ...) and then #' try to reconcile the codelist attribute with resulting #' levels. #' #' By default classified is idempotent, such that classified(classified(x)) is #' the same as classified(x). In contrast, factor(factor(x)) will drop unused #' levels (not shown). To drop unused levels, use classified(classified(x), drop = TRUE). #' #' @export #' @return 'classified' 'factor' #' @param x classified #' @param levels passed to \code{\link{factor}}; defaults to \code{levels(x)} #' @param labels passed to \code{\link{factor}}; must be same length as levels(after removing values in \code{exclude} and unused levels if \code{drop} is TRUE) and must not contain duplicates #' @param exclude passed to \code{\link{factor}} #' @param ordered passed to \code{\link{factor}} #' @param nmax passed to \code{\link{factor}} #' @param drop whether to drop unused levels #' @param ... ignored #' @keywords internal #' @family classified #' @examples #' #' a <- 4:6 #' attr(a, 'codelist') <- list(d = 4, e = 5, f = 6, g = 7) #' b <- classified(a) #' a #' b #' class(b) #' classified(b) #' identical(b, classified(b)) classified.classified <- function( x, levels, labels, exclude = NULL, ordered = is.ordered(x), nmax = NA, drop = FALSE, ... ){ if(missing(levels)) levels <- match.fun('levels')(x) levels <- setdiff(levels, exclude) if(drop) levels <- levels[levels %in% x] if(missing(labels)) labels <- levels stopifnot(identical(length(levels), length(labels))) if(any(duplicated(labels)))(stop('duplicated labels not supported in this context')) codelist <- attr(x, 'codelist') nms <- names(codelist) # from (character) vals <- as.character(unlist(codelist)) # to (coerced to character) stopifnot(identical(levels(x), vals)) # continuity check: should always be true y <- factor( x, levels = levels, labels = labels, exclude = exclude, ordered = ordered, nmax = nmax ) # now we rebuild the codelist # nms is the original form and order # levels(y) is the current from and order # we need a codelist with levels(y) but names from nms # i.e., we need to (re) map names to the current levels # the current levels though derive from the provided labels # current level order should prevail, # labels should be traced to provided levels, # and thence to provided (codelist) vals, # and thence to provided (codelist) nms codelist <- as.list(type.convert(levels(y), as.is = TRUE)) # what provided values of 'levels' match existing values of 'levels', # which are taken from provided 'labels'? was <- levels[match(levels(y), labels)] # now we have each former level for existing levels(y) # in an order corresponding to levels(y) # Those former levels were necessarily among the vals of former codelist. # we recover the meanings from nms meant <- nms[match(was, vals)] # now we know what these levels meant originally. Possibly nothing. Possibly NA. names(codelist) <- meant # all this manipulation could introduce multiple NA as codelist names. # in fact, codelist names should never be duplicated. if(any(duplicated(meant))){ example <- meant[duplicated(meant)][[1]] warning('codelist names should not contain duplicates, e.g. ', example) } # enforce attributes nms <- names(attributes(x)) nms <- setdiff(nms, c('class','levels','codelist','guide')) for(nm in nms){ attr(y, nm) <- attr(x, nm) } attr(y, 'codelist') <- codelist class(y) <- union('classified', class(y)) y } # Abbreviate Classified # # Abbreviated class name for 'classified'. # # @export # @importFrom vctrs vec_ptype_abbr # @method vec_ptype_abbr classified # @return character # @keywords internal # @param x classified # @param ... ignored # @examples # cat(vec_ptype_abbr(classified(0))) # vec_ptype_abbr.classified <- function(x, ...) { # "clsfd" # } #' @importFrom pillar type_sum #' @export pillar::type_sum #' Summarize Type of Classified #' #' Summarizes type of classified. #' #' @param x classified #' @importFrom pillar type_sum #' @export #' @keywords internal #' @method type_sum classified #' @examples #' type_sum(classified(0)) type_sum.classified <- function(x){ 'clfac' }
/scratch/gouwar.j/cran-all/cranData/yamlet/R/classified.R
#' Test Object is Conditional #' #' Tests whether object is conditional. #' @param x character #' @param ... passed arguments #' @export #' @keywords internal #' @family conditional #' @return logical isConditional <- function(x, ...)UseMethod('isConditional') #' Test Object is Conditional by Default #' #' Tests whether object is conditional by default. Coerces to list. #' @param x default #' @param ... passed arguments #' @export #' @keywords internal #' @family conditional #' @return logical #' isConditional.default <- function(x,...)isConditional(as.list(x),...) #' Test List is Conditional #' #' Tests whether a list is conditional. #' Evaluates names of x on data and looks for meaningful result. #' Returns TRUE if list has names and #' all evaluate to logicals with length equal #' to number of rows in data. #' @param x list #' @param data environment for variable lookup #' @param ... passed arguments #' @export #' @keywords internal #' @family conditional #' @return length-one logical isConditional.list <- function(x, data,...){ nms <- names(x) if(!length(nms))return(FALSE) vals <- lapply( nms, function(i)try( silent = TRUE, eval( parse(text = i), envir = data, enclos = NULL ) ) ) logi <- sapply(vals, inherits, 'logical') len <- sapply(vals, length) return(all(logi & len == nrow(data))) } #' Test Value is Levels #' #' Tests whether value is levels. #' @param x character #' @param ... passed arguments #' @export #' @keywords internal #' @family levels #' @return logical isLevels <- function(x, ...)UseMethod('isLevels') #' Test Value is Levels by Default #' #' Tests whether value is levels by default. Coerces to character. #' @param x default #' @param ... passed arguments #' @export #' @keywords internal #' @family levels #' @return logical isLevels.default <- function(x, table, ...)isLevels(as.character(x), table, ...) #' Test Character Value is Levels #' #' Tests whether character value is levels. #' Looks for any matches to vector. #' Uses \code{\link{intersect}}, which is fairly flexible #' respecting underlying data types (character 0 can match integer 0, etc.). #' @param x default #' @param table lookup vector #' @param ... passed arguments #' @export #' @keywords internal #' @family levels #' @return logical isLevels.character <- function(x, table, ...){ as.logical(length(intersect(x,table)) >= 1) }
/scratch/gouwar.j/cran-all/cranData/yamlet/R/conditional.R
globalVariables('token') #' Conditionalize Attributes #' #' Conditionalizes attributes of something. #' Generic, with method for data.frame. #' @param x object #' @param ... passed arguments #' @export #' @keywords internal #' @family conditionalize #' @return see methods #' @examples #' example(conditionalize.data.frame) conditionalize <- function(x, ...)UseMethod('conditionalize') #' Conditionalize Attributes of Data Frame #' #' Conditionalizes attributes of data.frame. #' Creates a conditional \code{attribute} definition #' for \code{column} by mapping \code{value} to #' \code{test}. Only considers records where #' both \code{test} and \code{value} are defined, #' and gives an error if there is not one-to-one mapping. #' Can be used with write methods as an alternative #' to hand-coding conditional metadata. #' #' If the test column is character, individual #' elements should not contain both single and #' double quotes. For the conditional expressions, #' these values will be single-quoted by default, #' or double-quoted if they contain single quotes. #' #' @param x data.frame #' @param column unquoted name of column to conditionalize #' @param attribute unquoted name of attribute to create for column #' @param test unquoted name of column to test #' @param value unquoted name of column supplying attribute value #' @param ... ignored arguments #' @importFrom rlang ensym #' @importFrom rlang := #' @importFrom dplyr mutate #' @importFrom dplyr distinct #' @export #' @keywords internal #' @family conditionalize #' @return class 'decorated' 'data.frame' #' @examples #' library(magrittr) #' library(dplyr) #' library(csv) #' file <- system.file(package = 'yamlet', 'extdata','phenobarb.csv') #' x <- as.csv(file) #' head(x,3) #' #' # suppose we have an event label stored as a column: #' #' x %<>% mutate(evid = ifelse( #' event == 'dose', #' 'dose of drug administered', #' 'serum phenobarbital concentration' #' ) #' ) #' #' # We can define a conditional label for 'value' #' # by mapping evid to event: #' #' x %<>% conditionalize(value, label, event, evid) #' #' x %>% as_yamlet #' x %>% write_yamlet #' conditionalize.data.frame <- function(x, column, attribute, test, value, ...){ col <- as.character(ensym(column)) atr <- as.character(ensym(attribute)) tst <- ensym(test) val <- ensym(value) y <- filter(x, !is.na(!!tst), !is.na(!!val)) map <- distinct(y, !!tst, !!val) if( ( nrow(map) ) != (nrow(distinct(y, !!tst))) )stop("'", as.character(val), "' not cleanly mapped to defined '", as.character(tst),"'") #map <- mutate(map, !!tst := as.character(!!tst)) if(is.factor(map[[as.character(tst)]])){ map[[as.character(tst)]] <- as.character(map[[as.character(tst)]]) } m <- map[[as.character(tst)]] if(is.character(m)){ if(any(grepl("'", m) & grepl('"', m))){ stop(as.character(tst), ' has mixed single and double quotes') } map <- mutate(map, token = ifelse(grepl("'", !!tst), '"', "'")) map <- mutate(map, !!tst := paste0(token, !!tst, token)) } map <- mutate(map, !!tst := paste(tst, '==', !!tst)) out <- as.list(map[[as.character(val)]]) names(out) <- map[[as.character(tst)]] attr(x[[col]], atr) <- out x }
/scratch/gouwar.j/cran-all/cranData/yamlet/R/conditionalize.R