content
stringlengths
0
14.9M
filename
stringlengths
44
136
## File Name: gdm_calc_distributionmoments.R ## File Version: 0.09 ####################################################################### # moments of distribution gdm_calc_distributionmoments <- function( D, G, pi.k, theta.k ) { mean.trait <- sd.trait <- skewness.trait <- matrix( 0, nrow=D, ncol=G ) for (dd in 1:D){ for (gg in 1:G){ mean.trait[dd,gg] <- sum( theta.k[,dd] * pi.k[, gg ] ) sd.trait[dd,gg] <- sqrt( sum( theta.k[,dd]^2 * pi.k[, gg ] ) - mean.trait[dd,gg]^2 ) skewness.trait[dd,gg] <- sum( ( theta.k[,dd] - mean.trait[dd,gg] )^3 * pi.k[, gg ] ) / sd.trait[dd,gg]^3 } } rownames(skewness.trait) <- rownames(sd.trait) <- rownames(mean.trait) <- colnames(theta.k) colnames(skewness.trait) <- colnames(sd.trait) <- colnames(mean.trait) <- paste0("Group",1:G) #***** # correlation matrices correlation.trait <- as.list(1:G) names(correlation.trait) <- colnames(mean.trait) for (gg in 1:G){ mean.gg <- rep(0,D) Sigma.gg <- diag(0,D) for (dd in 1:D){ mean.gg[dd] <- sum( pi.k[,gg] * theta.k[,dd] ) } for (dd1 in 1:D){ for (dd2 in dd1:D){ Sigma.gg[dd1,dd2] <- sum( pi.k[,gg] * (theta.k[,dd1] - mean.gg[dd1] )*(theta.k[,dd2] - mean.gg[dd2] ) ) Sigma.gg[dd2,dd1] <- Sigma.gg[dd1,dd2] } } rownames(Sigma.gg) <- colnames(Sigma.gg) <- rownames(mean.trait) correlation.trait[[gg]] <- stats::cov2cor(Sigma.gg + diag(10^(-5),D) ) } #--- OUTPUT res <- list( mean.trait=mean.trait, sd.trait=sd.trait, skewness.trait=skewness.trait, correlation.trait=correlation.trait) return(res) } .gdm.calc.distributionmoments <- gdm_calc_distributionmoments
/scratch/gouwar.j/cran-all/cranData/CDM/R/gdm_calc_distributionmoments.R
## File Name: gdm_calc_ic.R ## File Version: 0.11 ############################################################# # calculation of information criteria and number of parameters gdm_calc_ic <- function( dev, dat, G, skillspace, irtmodel, K,D,TD,I,b.constraint,a.constraint, mean.constraint, Sigma.constraint, delta.designmatrix, standardized.latent, data0, centerslopes, TP, centerintercepts, centered.latent ) { ic <- list( "deviance"=dev, "n"=nrow(data0) ) ic$traitpars <- 0 ic$itempars <- 0 #****** # Until now this works in one dimension # trait parameters: normal skillspace if ( skillspace=="normal" ){ if (irtmodel=="1PL" & ( D==1 )){ ic$traitpars <- 2*(G-1) + 1 } if ( ( irtmodel %in% c("2PL","2PLcat") ) & (D==1) ){ ic$traitpars <- 2*(G-1) if (!standardized.latent){ ic$traitpars <- ic$traitpars + 2 } } if (D > 1 ){ ic$traitpars <- 2 * D*G + D*(D-1)/2*G if ( ! is.null(mean.constraint) ){ ic$traitpars <- ic$traitpars - nrow(mean.constraint) } if ( ! is.null(Sigma.constraint) ){ ic$traitpars <- ic$traitpars - nrow(Sigma.constraint) } } } # end normal #****** # trait parameters: loglinear skillspace if ( skillspace=="loglinear" ){ ic$traitpars <- G*(ncol(delta.designmatrix) - 1) } if ( skillspace=="full" ){ ic$traitpars <- G*(TP-1) } if ( skillspace=="est" ){ ic$traitpars <- G*(TP-1) + TP*TD } #************************************************ # item parameters b ic$itempars.b <- I*K if ( ! is.null(b.constraint)){ ic$itempars.b <- ic$itempars.b - nrow(b.constraint) } #************************************************ # item parameters a ic$itempars.a <- 0 if ( irtmodel=="2PL"){ ic$itempars.a <- I*TD if ( ! is.null(a.constraint)){ a.constraint2 <- a.constraint[ a.constraint[,3]==1,, drop=FALSE] ic$itempars.a <- ic$itempars.a - nrow(a.constraint2) } } ic$centeredintercepts <- (centerintercepts)*D ic$centeredslopes <- (centerslopes)*D if ( irtmodel=="2PLcat"){ ic$itempars.a <- I*TD*K if ( ! is.null(a.constraint)){ ic$itempars.a <- ic$itempars.a - nrow(a.constraint) } } #*********************************************** # information criteria ic$itempars <- ic$itempars.a + ic$itempars.b - ic$centeredintercepts - ic$centeredslopes ic$np <- ic$itempars + ic$traitpars # AIC ic$AIC <- dev + 2*ic$np # BIC ic$BIC <- dev + ( log(ic$n) )*ic$np # CAIC (conistent AIC) ic$CAIC <- dev + ( log(ic$n) + 1 )*ic$np # corrected AIC ic$AICc <- ic$AIC + 2*ic$np * ( ic$np + 1 ) / ( ic$n - ic$np - 1 ) return(ic) } ################################################################### .gdm.calc.ic <- gdm_calc_ic
/scratch/gouwar.j/cran-all/cranData/CDM/R/gdm_calc_ic.R
## File Name: gdm_calc_loglikelihood.R ## File Version: 0.09 gdm_calc_loglikelihood <- function(irtmodel, skillspace, b, a, centerintercepts, centerslopes, TD, Qmatrix, Ngroup, pi.k, delta.designmatrix, delta, G, theta.k, D, mean.constraint, Sigma.constraint, standardized.latent, p.aj.xi, group, ind.group, weights, thetaDes, I, K, gwt0, dat, resp.ind.list, use.freqpatt, p.xi.aj, TP ) { #--------------------------------- # constraints on parameters b <- gdm_est_b_centerintercepts( b=b, centerintercepts=centerintercepts, TD=TD, Qmatrix=Qmatrix ) if (irtmodel=="2PL"){ a <- gdm_est_a_centerslopes( a=a, centerslopes=centerslopes, Qmatrix=Qmatrix, TD=TD ) } #---------------------------------- #--- constraints on skill space if ( skillspace=="loglinear" ){ res <- gdm_est_skillspace( Ngroup=Ngroup, pi.k=pi.k, Z=delta.designmatrix, G=G, delta=delta, estimate=FALSE ) pi.k <- res$pi.k delta <- res$delta } if ( skillspace=="normal" ){ res <- gdm_est_normalskills( pi.k=pi.k, theta.k=theta.k, irtmodel=irtmodel, G=G, D=D, mean.constraint=mean.constraint, Sigma.constraint=Sigma.constraint, standardized.latent=standardized.latent, p.aj.xi=p.aj.xi, group=group, ind.group=ind.group, weights=weights, b=b, a=a ) pi.k <- res$pi.k b <- res$b a <- res$a } if ( skillspace=="est" ){ thetaDes <- theta.k } #--- probabilities probs <- gdm_calc_prob( a=a, b=b, thetaDes=thetaDes, Qmatrix=Qmatrix, I=I, K=K, TP=TP, TD=TD ) #--- posterior res.hwt <- gdm_calc_posterior( probs=probs, gwt0=gwt0, dat=dat, I=I, resp.ind.list=resp.ind.list ) p.xi.aj <- res.hwt$hwt res <- gdm_calc_deviance( G=G, use.freqpatt=use.freqpatt, ind.group=ind.group, p.xi.aj=p.xi.aj, pi.k=pi.k, weights=weights ) ll <- res$ll #--- output res <- list(ll=ll, pi.k=pi.k, theta.k=theta.k, thetaDes=thetaDes, a=a, b=b, delta=delta) return(res) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/gdm_calc_loglikelihood.R
## File Name: gdm_calc_post.R ## File Version: 0.09 ############################################################### # calculation of posterior probabilities gdm_calc_post <- function(pi.k, group, p.xi.aj, weights, G, ind.group, use.freqpatt ) { # posterior probabilities P( \alpha_l | X_i ) sel <- 1 if ( use.freqpatt & (G>1) ){ sel <- 2 } #***************** if ( sel==1 ){ prior <- ( t( pi.k ) )[ group, ] p.aj.xi <- prior * p.xi.aj p.aj.xi <- p.aj.xi / rowSums( p.aj.xi ) # calculate pi.k for (gg in 1:G){ # gg <- 1 ind.gg <- ind.group[[gg]] wgg <- weights[ind.gg] if (G==1){ pi.k[,gg] <- colSums( p.aj.xi * wgg ) / sum( wgg ) } if (G>1){ pi.k[,gg] <- colSums( p.aj.xi[ ind.gg, ] * wgg ) / sum( wgg ) } } } #*********************** if ( sel==2 ){ # if use.freqpatt==TRUE for multiple groups # calculate pi.k p.aj.xi <- list(1:G) for (gg in 1:G){ # gg <- 1 wgg <- weights[,gg] ind.gg <- which( wgg > 0 ) NP <- length(ind.gg) wgg <- wgg[ind.gg] prior <- ( t( pi.k[,gg] ) )[ rep(1,NP), ] p.aj.xi.gg <- prior * p.xi.aj[ind.gg,] p.aj.xi.gg <- p.aj.xi.gg / rowSums( p.aj.xi.gg ) p.aj.xi[[gg]] <- p.aj.xi.gg pi.k[,gg] <- colSums( p.aj.xi.gg * wgg ) / sum( wgg ) } } #----- OUTPUT res <- list("pi.k"=pi.k, "p.aj.xi"=p.aj.xi ) return(res) } .gdm.calc.post <- gdm_calc_post
/scratch/gouwar.j/cran-all/cranData/CDM/R/gdm_calc_post.R
## File Name: gdm_calc_posterior.R ## File Version: 0.02 gdm_calc_posterior <- function(probs, gwt0, dat, I, resp.ind.list) { res <- cdm_calc_posterior( rprobs=probs, gwt=gwt0, resp=dat, nitems=I, resp.ind.list=resp.ind.list, normalization=FALSE, thetasamp.density=NULL, snodes=0 ) return(res) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/gdm_calc_posterior.R
## File Name: gdm_calc_prob.R ## File Version: 0.04 ####################################### # calculate probability in the GDM gdm_calc_prob <- function( a, b, thetaDes, Qmatrix, I, K, TP, TD) { probs <- array( 0, dim=c(I,K+1,TP) ) # categories 0, ..., K for (kk in 1:K){ l0 <- matrix( b[,kk], nrow=I,ncol=TP) for (td in 1:TD){ # kk <- 1 # category 1 # td <- 1 l0 <- l0 + a[, td, kk ] * Qmatrix[, td, kk] * matrix( thetaDes[, td ], nrow=I,ncol=TP, byrow=T) } probs[,kk+1,] <- l0 } probs <- exp( probs ) probs1 <- probs[,1,] for (kk in 2:(K+1)){ probs1 <- probs1 + probs[,kk,] } for (kk in 1:(K+1)){ probs[,kk,] <- probs[,kk,] / probs1 } return(probs) } .gdm.calc.prob <- gdm_calc_prob
/scratch/gouwar.j/cran-all/cranData/CDM/R/gdm_calc_prob.R
## File Name: gdm_collect_itempars.R ## File Version: 0.07 ################################################################ # collect item parameters gdm_collect_itempars <- function( data, K, D, b, a, TD, thetaDes, irtmodel, se.b, se.a, data0) { # collect item parameters item <- data.frame( "item"=colnames(data0), "N"=colSums(1-is.na(data0) ) ) item$M <- colMeans(data0, na.rm=T) # b parameters se.b[ b < -9999 ] <- NA b[ b < -9999 ] <- NA se.a[ a < -9999 ] <- NA a[ a < -9999 ] <- NA for (kk in 1:K){ item[, paste0( "b.Cat", kk) ] <- b[,kk] } for (dd in 1:TD){ if ( irtmodel %in% c("1PL", "2PL") ){ item[, paste0( "a.", colnames(thetaDes)[dd] ) ] <- a[,dd,1] } if ( irtmodel %in% c("2PLcat") ){ for (kk in 1:K){ item[, paste0( "a.", colnames(thetaDes)[dd], ".Cat", kk ) ] <- a[,dd,kk] } se.a[ a==-99999 ] <- NA a[ a==-99999 ] <- NA } } #--- OUTPUT res <- list(item=item, b=b, se.b=se.b, a=a ) return(res) } .gdm.collect.itempars <- gdm_collect_itempars
/scratch/gouwar.j/cran-all/cranData/CDM/R/gdm_collect_itempars.R
## File Name: gdm_constraints_itempars.R ## File Version: 0.09 ############################################### # constraints for item parameters gdm_constraints_itempars <- function( b.constraint, a.constraint, K, TD, Qmatrix, a ) { for (kk in 1:K){ for( td in 1:TD){ ind.kk <- which( Qmatrix[,td, kk]==0 ) a[ ind.kk, td, kk ] <- 0 if ( length( ind.kk) > 0 ){ a1 <- cbind( ind.kk, td, kk, 0 ) a.constraint <- rbind( a.constraint, a1 ) } } } if ( ! is.null( a.constraint) ){ a.constraint <- as.matrix( a.constraint ) } res <- list( a.constraint=a.constraint, b.constraint=b.constraint, a=a) return(res) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/gdm_constraints_itempars.R
## File Name: gdm_constraints_itempars2.R ## File Version: 0.07 ########################################################## # constraints on item parameters gdm_constraints_itempars2 <- function( b.constraint, a.constraint, K, TD,I, dat ) { K.item <- apply( dat, 2, max ) for (ii in 1:I){ # ii <- 1 K.ii <- K.item[ii] if ( K.ii < K ){ for ( kk in (K.ii+1):K){ b.constraint <- rbind( b.constraint, cbind( ii, kk, -99999 ) ) for (td in 1:TD){ a.constraint <- rbind( a.constraint, cbind( ii, td, kk, 0 ) ) } } } } res <- list(K.item=K.item, a.constraint=a.constraint, b.constraint=b.constraint ) return(res) } ###############################################################
/scratch/gouwar.j/cran-all/cranData/CDM/R/gdm_constraints_itempars2.R
## File Name: gdm_create_delta_designmatrix.R ## File Version: 0.09 ######################################################## # create delta design matrix gdm_create_delta_designmatrix <- function( delta.designmatrix, TP, D, theta.k, skill.levels,G) { if ( is.null(delta.designmatrix) ){ delta.designmatrix <- rep(1,TP) for (dd in 1:D){ for ( pp in 1:(min( skill.levels[dd]-1,3) ) ){ delta.designmatrix <- cbind( delta.designmatrix, theta.k[,dd]^pp ) } } if (D>1){ for (dd1 in 1:(D-1) ){ for (dd2 in (dd1+1):D) { delta.designmatrix <- cbind( delta.designmatrix, theta.k[,dd1]*theta.k[,dd2] ) } } } } delta <- matrix(0,ncol(delta.designmatrix),G) covdelta <- NULL res <- list( delta=delta, covdelta=covdelta, delta.designmatrix=delta.designmatrix ) return(res) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/gdm_create_delta_designmatrix.R
## File Name: gdm_data_prep.R ## File Version: 0.10 ############################################ # gdm data preparation gdm_data_prep <- function( dat, data, weights, group ) { I <- ncol(dat) N <- nrow(dat) data1 <- data data1[ is.na(data1) ] <- 9 cat("************************************************************\n") cat("Data preparation\n") cat("Number of rows in data=", nrow(data1), "\n") ; utils::flush.console() item.patt.subj <- data1[,1] for ( ii in 2:I){ item.patt.subj <- paste( item.patt.subj, data1[,ii], sep="") } #**** # arrange groups if ( is.null(group)){ G <- 1 group <- rep(1,N) } else { gr2 <- unique( sort(paste( group ) )) G <- length(gr2) group <- match( group, gr2 ) } # calculate frequency of each item response pattern in case of one group if (G==1){ if ( is.null(weights) ){ weights <- rep(1,N) } a2 <- rowsum( weights, item.patt.subj) item.patt <- a2[,1] # define data frame 'item.patt' with item response pattern and its frequency (weight) item.patt <- cbind( "pattern"=names(item.patt), "freq"=as.numeric(as.vector( item.patt ) ) ) weights <- as.numeric(paste(item.patt[,"freq"])) } #*** # multiple group case if ( is.null(weights) ){ weights <- rep(1,N) } if (G>1){ for (gg in 1:G){ ind.gg <- which( group==gg ) a2 <- rowsum( weights[ind.gg], item.patt.subj[ind.gg] ) a2 <- data.frame( "pattern"=rownames(a2), a2[,1] ) colnames(a2)[2] <- paste0("freq.Gr", gg) rownames(a2) <- NULL if (gg==1){ item.patt <- a2 } if (gg > 1){ item.patt <- merge( item.patt, a2, by="pattern", all=TRUE ) } item.patt[ is.na(item.patt) ] <- 0 } weights <- item.patt[,-1] } #*** # reconstruct data N <- nrow(item.patt) dat <- matrix(NA, N, I ) for (ii in 1:I){ dat[,ii ] <- as.numeric( substring( item.patt[,"pattern"], ii,ii) ) } dat.resp <- 1-(dat==9) data <- dat dat[ dat.resp==0] <- 0 cat("Number of response patterns=", nrow(dat), "\n") utils::flush.console() res <- list( weights=weights, dat=dat, dat.resp=dat.resp, data=data, item.patt=item.patt ) return(res) } #################################################### .gdm.data.prep <- gdm_data_prep
/scratch/gouwar.j/cran-all/cranData/CDM/R/gdm_data_prep.R
## File Name: gdm_est_a.R ## File Version: 0.14 ########################################### # estimation of a # Q matrix [1:I, 1:TD, 1:K] # thetaDes [TP,TD] # n.ik [ TP, I, K+1, G ] # N.ik [ TP, I, G ] # probs [I, K+1, TP ] gdm_est_a <- function(probs, n.ik, N.ik, I, K, G,a,a.constraint,TD, Qmatrix,thetaDes,TP, max.increment, b, msteps, convM, centerslopes, decrease.increments=TRUE ) { iter <- 1 parchange <- 1 a00 <- a eps <- 1E-10 maxa <- max.increment + 0 * a while( ( iter <=msteps ) & ( parchange > convM ) ){ a0 <- a probs <- gdm_calc_prob( a=a, b=b, thetaDes=thetaDes, Qmatrix=Qmatrix, I=I, K=K, TP=TP, TD=TD ) # 1st derivative d2.b <- d1.b <- array( 0, dim=c(I, TD ) ) for (td in 1:TD){ for (gg in 1:G){ for (kk in 2:(K+1)){ QM <- matrix( Qmatrix[, td, kk-1 ], nrow=TP, ncol=I, byrow=TRUE ) v1 <- colSums( n.ik[,,kk,gg] * QM * thetaDes[, td ] ) v2 <- N.ik[,,gg] * QM * thetaDes[,td] * t( probs[,kk,] ) v2 <- colSums(v2) d1.b[, td] <- d1.b[,td] + v1 - v2 } } } # 2nd derivative for (td in 1:TD){ for (ii in 1:I){ v1 <- l0 <- 0 for (gg in 1:G){ for (kk in 2:(K+1) ){# kk <- 2 v1 <- v1 + N.ik[,ii,gg] * as.vector( ( Qmatrix[ii,td,kk-1] * thetaDes[, td ] )^2 * t( probs[ii,kk,] ) ) l0 <- l0 + as.vector ( Qmatrix[ii,td,kk-1] * thetaDes[, td ] * t( probs[ii,kk,] ) ) } } d2.b[ii,td] <- sum(v1) - sum( l0^2 * N.ik[,ii,gg] ) } } #--- calc increments res <- cdm_calc_increment( d1=d1.b, d2=d2.b, max.increment=max.increment ) increment <- res$increment max.increment <- res$max.increment a[,,1] <- a[,,1] + increment se.a <- sqrt( 1 / abs( d2.b + eps ) ) if (K>1){ for (kk in 2:K){ a[,,kk] <- a[,,1] } } #--- constraints res <- gdm_est_a_constraints( a=a, se.a=se.a, a.constraint=a.constraint, increment=increment ) a <- res$a se.a <- res$se.a increment <- res$increment #--- centerslopes a <- gdm_est_a_centerslopes( a=a, centerslopes=centerslopes, Qmatrix=Qmatrix, TD=TD ) parchange <- max( abs(a-a0) ) iter <- iter + 1 } # end iter #------------ max.increment.a <- max(abs(a-a00)) / 1.005 if (decrease.increments){ max.increment.a <- max.increment.a / 1.01 } #-- OUTPUT res <- list( a=a, se.a=se.a, max.increment.a=max.increment.a) return(res) } .gdm.est.a <- gdm_est_a
/scratch/gouwar.j/cran-all/cranData/CDM/R/gdm_est_a.R
## File Name: gdm_est_a_cat.R ## File Version: 0.13 ########################################### # estimation of a gdm_est_a_cat <- function(probs, n.ik, N.ik, I, K, G,a,a.constraint,TD, Qmatrix,thetaDes,TP, max.increment, b, msteps, convM, decrease.increments=TRUE ){ iter <- 1 parchange <- 1 a00 <- a eps <- 1E-10 max.increment0 <- max.increment while( ( iter <=msteps ) & ( parchange > convM ) ){ a0 <- a probs <- gdm_calc_prob( a=a, b=b, thetaDes=thetaDes, Qmatrix=Qmatrix, I=I, K=K, TP=TP, TD=TD ) # 1st derivative d2.b <- d1.b <- array( 0, dim=c(I, TD, K ) ) for (td in 1:TD){ for (kk in 2:(K+1)){ for (gg in 1:G){ QM <- matrix( Qmatrix[,td,kk-1], nrow=TP, ncol=I, byrow=TRUE) v1 <- colSums( n.ik[,,kk,gg] * QM * thetaDes[, td ] ) v2 <- N.ik[,,gg] * QM * thetaDes[,td] * t( probs[,kk,] ) v2 <- colSums(v2) d1.b[, td, kk-1] <- d1.b[, td, kk-1] + v1 - v2 } } } # 2nd derivative for (td in 1:TD){ for (ii in 1:I){ v1 <- l0 <- 0 for (kk in 2:(K+1) ){ # kk <- 2 v1 <- l0 <- 0 for (gg in 1:G){ v1 <- N.ik[,ii,gg] * as.vector( ( Qmatrix[ii,td,kk-1] * thetaDes[, td ] )^2 * t( probs[ii,kk,] ) ) l0 <- as.vector ( Qmatrix[ii,td,kk-1] * thetaDes[, td ] * t( probs[ii,kk,] ) ) d2.b[ii,td,kk-1] <- d2.b[ii,td,kk-1] + sum(v1) - sum( l0^2 * N.ik[,ii,gg] ) } } } } #--- calc increments res <- cdm_calc_increment( d1=d1.b, d2=d2.b, max.increment=max.increment ) increment <- res$increment max.increment <- res$max.increment a <- a + increment se.a <- sqrt( 1 / abs( d2.b + eps ) ) if ( ! is.null( a.constraint) ){ a[ a.constraint[,1:3,drop=FALSE] ] <- a.constraint[,4,drop=FALSE] se.a[ a.constraint[,1:3,drop=FALSE] ] <- 0 increment[ a.constraint[,1:3,drop=FALSE] ] <- 0 } iter <- iter + 1 parchange <- max( abs( a - a0 )) } # iter #------------- #-- final trimming of the increment res <- cdm_increment_trimming_after_mstep( parm=a, parm0=a00, max.increment0=max.increment0, type=2 ) a <- res$parm max.increment.a <- res$max.increment0 if (decrease.increments){ max.increment.a <- max.increment.a / 1.01 } #---- OUTPUT res <- list( a=a, se.a=se.a, max.increment.a=max.increment.a) return(res) } .gdm.est.a.cat <- gdm_est_a_cat
/scratch/gouwar.j/cran-all/cranData/CDM/R/gdm_est_a_cat.R
## File Name: gdm_est_a_centerslopes.R ## File Version: 0.03 gdm_est_a_centerslopes <- function(a, centerslopes, Qmatrix, TD ) { if (centerslopes){ if (TD>1){ m11 <- t( colSums( a[,,1] ) / colSums( Qmatrix ) ) a[,,1] <- a[,,1] / m11[ rep(1,I), ] } if (TD==1){ m11 <- t( colSums( a ) / colSums( Qmatrix ) ) a <- a / m11[ rep(1,I), ] } } return(a) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/gdm_est_a_centerslopes.R
## File Name: gdm_est_a_constraints.R ## File Version: 0.02 gdm_est_a_constraints <- function(a, se.a, a.constraint, increment ) { if ( ! is.null( a.constraint) ){ a[ a.constraint[,1:3,drop=FALSE] ] <- a.constraint[,4,drop=FALSE] se.a[ a.constraint[,1:3,drop=FALSE] ] <- 0 increment[ a.constraint[,1:2,drop=FALSE] ] <- 0 } #--- output res <- list( a=a, se.a=se.a, increment=increment ) return(res) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/gdm_est_a_constraints.R
## File Name: gdm_est_b.R ## File Version: 0.41 ########################################################################### # estimation of b parameters gdm_est_b <- function(probs, n.ik, N.ik, I, K, G, b, b.constraint, max.increment, a, thetaDes, Qmatrix, TP, TD, msteps, convM, centerintercepts, decrease.increments=TRUE ) { max.increment0 <- max.increment iter <- 1 parchange <- 1 eps <- 1E-10 b00 <- b while( ( iter <=msteps ) & ( parchange > convM) ){ b0 <- b probs <- gdm_calc_prob( a=a, b=b, thetaDes=thetaDes, Qmatrix=Qmatrix, I=I, K=K, TP=TP, TD=TD ) d2.b <- d1.b <- matrix( 0, nrow=I,ncol=K) for (kk in 2:(K+1)){ probs_kk <- probs[,kk,] for (gg in 1:G){ t_Nik_gg <- t(N.ik[,,gg]) d1.b[,kk-1] <- d1.b[,kk-1] - rowSums( t(n.ik[,,kk,gg]) - t_Nik_gg * probs_kk ) d2.b[,kk-1] <- d2.b[,kk-1] + rowSums( t_Nik_gg * ( 1 - probs_kk ) * probs_kk ) } } #--- calc increments res <- cdm_calc_increment( d1=-d1.b, d2=d2.b, max.increment=max.increment ) increment <- res$increment max.increment <- res$max.increment b <- b + increment se.b <- sqrt( 1 / abs( d2.b + eps ) ) #-- parameter fixings res <- cdm_include_fixed_parameters( parm=b, se_parm=se.b, parm_fixed=b.constraint ) b <- res$parm se.b <- res$se_parm #-- centerintercepts b <- gdm_est_b_centerintercepts( b=b, centerintercepts=centerintercepts, TD=TD, Qmatrix=Qmatrix ) iter <- iter + 1 parchange <- max( abs(b0-b)) } max.increment.b <- max( abs( b - b00 )) #-- final trimming of the increment res <- cdm_increment_trimming_after_mstep( parm=b, parm0=b00, max.increment0=max.increment0, type=2 ) b <- res$parm max.increment.b <- res$max.increment0 #--- decrease increments if (decrease.increments){ max.increment.b <- max.increment.b/1.01 } #--- OUTPUT res <- list( b=b, se.b=se.b, max.increment.b=max.increment.b) return(res) } .gdm.est.b <- gdm_est_b
/scratch/gouwar.j/cran-all/cranData/CDM/R/gdm_est_b.R
## File Name: gdm_est_b_centerintercepts.R ## File Version: 0.02 gdm_est_b_centerintercepts <- function(b, centerintercepts, TD, Qmatrix ) { if ( centerintercepts ){ if (TD==1){ b <- b - mean(b) } if (TD > 1){ for (dd in 1:TD){ ind.dd <- which( Qmatrix[,dd,1] > 0 ) m1 <- sum( b[ind.dd,] ) / ( ncol(b) * length(ind.dd) ) b[ind.dd,] <- b[ind.dd,] - m1 } } } return(b) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/gdm_est_b_centerintercepts.R
## File Name: gdm_est_normalskills.R ## File Version: 0.15 ############################################################## # estimation of skill distribution under normality gdm_est_normalskills <- function( pi.k, theta.k, irtmodel, G, D, mean.constraint, Sigma.constraint, standardized.latent, p.aj.xi, group, ind.group, weights, b, a ) { # mean.constraint [ dimension, group, value ] # Sigma.constraint [ dimension1, dimension2, group, value ] #----------------------------------------- #-------- unidimensional model ----------- #----------------------------------------- if (D==1){ for (gg in 1:G){ res <- cdm_fit_normal(x=theta.k, w=pi.k[,gg]) mg <- res$Mu sdg <- sqrt(res$Sigma) #--------- mean constraint if ( ( ! is.null ( mean.constraint )) ){ i1 <- mean.constraint[ mean.constraint[,2]==gg,, drop=FALSE] if ( ( nrow(i1)==1 ) & (G>1) ){ if ( ( gg==1 ) & (i1[,1]==1) & (i1[,2]==1) ){ b <- b + ( mg - i1[3] ) } mg <- i1[3] } if ( nrow(i1) > 0 ){ mg <- i1[,3] } } # end mean constraint #--------- sigma constraint if ( ( ! is.null ( Sigma.constraint ) ) ){ i1 <- Sigma.constraint[ Sigma.constraint[,3]==gg,, drop=FALSE] if ( ( nrow(i1)==1 ) & (G>1) ){ if ( ( gg==1 ) & (i1[,1]==1) & (i1[,2]==1) ){ a <- a * sdg / sqrt(i1[4]) } sdg <- sqrt(i1[4]) } } # end sigma constraint pi.k[,gg] <- cdm_sumnorm( stats::dnorm( theta.k[,1],mean=mg, sd=sdg) ) } } #----------------------------------------- #-------- multidimensional model --------- #----------------------------------------- if (D>1){ for (gg in 1:G){ res <- cdm_fit_normal(x=theta.k, w=pi.k[,gg] ) mean.gg <- res$Mu Sigma.gg <- res$Sigma Sigma.gg <- cdm_add_ridge_diagonal(x=Sigma.gg, eps=1E-10 ) #----- mu constraint m.gg <- mean.constraint[ mean.constraint[,2]==1, ] if ( ! is.null(mean.constraint)){ if( dim(m.gg)[1] > 0 ){ mean.gg[ m.gg[,1] ] <- m.gg[,3] } } s.gg <- Sigma.constraint[ Sigma.constraint[,3]==1, ] #----- sigma constraint if ( ! is.null(Sigma.constraint)){ if( dim(s.gg)[1] > 0 ){ c1 <- stats::cov2cor( Sigma.gg ) d1 <- diag(Sigma.gg) s.gg1 <- s.gg[ s.gg[,1]==s.gg[,2], ] if ( nrow(s.gg1) > 0 ){ d1[ s.gg1[,1:2] ] <- s.gg[,4] } d1 <- outer( sqrt(d1), sqrt(d1) )*c1 s.gg2 <- s.gg[ s.gg[,1] !=s.gg[,2], ] if ( nrow(s.gg1) > 0 ){ d1[ s.gg1[,1:2] ] <- s.gg[,4] d1[ s.gg1[,c(2,1)] ] <- s.gg[,4] } Sigma.gg <- d1 } } pi.k[,gg] <- cdm_sumnorm( mvtnorm::dmvnorm( theta.k, mean=mean.gg, sigma=Sigma.gg ) ) } # end gg } # end multidimensional model #--- OUTPUT res <- list(pi.k=pi.k, b=b, a=a ) return(res) } #************************************************************* .gdm.est.normalskills <- gdm_est_normalskills
/scratch/gouwar.j/cran-all/cranData/CDM/R/gdm_est_normalskills.R
## File Name: gdm_est_skillspace.R ## File Version: 0.09 ########################################################################### # reduced skillspace estimation gdm_est_skillspace <- function(Ngroup, pi.k, Z, G, delta, eps=1E-10, estimate=TRUE ) { covdelta <- as.list(1:G) covbeta <- NULL for (gg in 1:G){ if (estimate){ ntheta <- cdm_sumnorm( Ngroup[gg] * pi.k[,gg] ) lntheta <- log(ntheta+eps) mod <- stats::lm( lntheta ~ 0 + Z, weights=ntheta ) covbeta <- vcov(mod) beta <- coef(mod) delta[,gg] <- beta } pi.k[,gg] <- cdm_sumnorm( exp( Z %*% delta[,gg] ) / Ngroup[gg] ) covdelta[[gg]] <- covbeta } #--- OUTPUT res <- list( pi.k=pi.k, delta=delta, covdelta=covdelta ) return(res) } .gdm.est.skillspace <- gdm_est_skillspace
/scratch/gouwar.j/cran-all/cranData/CDM/R/gdm_est_skillspace.R
## File Name: gdm_est_skillspace_traits.R ## File Version: 0.13 ##################################################### # estimation of skill space gdm_est_skillspace_traits <- function( n.ik, a, b, theta.k, Qmatrix, I, K, TP, TD, numdiff.parm, max.increment, msteps, convM ) { n.ik0 <- apply( n.ik, c(1,2,3), sum ) h <- numdiff.parm parchange <- 1000 iter <- 1 se.theta.k <- 0 * theta.k Q1 <- matrix( 0, nrow=TP, ncol=TD) #-- define likelihood function and list of arguments prob_fct <- gdm_calc_prob prob_args <- list( a=a, b=b, thetaDes=theta.k, Qmatrix=Qmatrix, I=I, K=K, TP=TP, TD=TD ) parm_args_varname <- "thetaDes" #--------- begin M-steps while( ( iter <=msteps ) & (parchange > convM ) ){ theta.k0 <- theta.k for ( dd in 1:TD){ Q0 <- Q1 Q0[,dd] <- 1 # calculate log-likelihood prob_args[[ parm_args_varname ]] <- theta.k0 pjk <- do.call( what=prob_fct, args=prob_args) prob_args[[ parm_args_varname ]] <- theta.k0 + h*Q0 pjk1 <- do.call( what=prob_fct, args=prob_args) prob_args[[ parm_args_varname ]] <- theta.k0 - h*Q0 pjk2 <- do.call( what=prob_fct, args=prob_args) #-- compute increments res <- gdm_numdiff_index( pjk=pjk, pjk1=pjk1, pjk2=pjk2, n.ik=n.ik0, max.increment=max.increment, numdiff.parm=numdiff.parm ) increment <- res$increment d2 <- res$d2 theta.k[,dd] <- theta.k[,dd] + increment se.theta.k[,dd] <- 1 / sqrt( abs(d2) ) } iter <- iter + 1 parchange <- max( abs( theta.k - theta.k0 )) } #--- OUTPUT res <- list( theta.k=theta.k, se.theta.k=se.theta.k ) return(res) } ########################################################## .gdm.est.skillspace.traits <- gdm_est_skillspace_traits
/scratch/gouwar.j/cran-all/cranData/CDM/R/gdm_est_skillspace_traits.R
## File Name: gdm_inits_b.R ## File Version: 0.04 gdm_inits_b <- function( dat0, dat.resp0, I, K ) { b <- matrix( 0, nrow=I, ncol=K ) for (ii in 1:K){ # ii <- 1 cm1 <- colMeans( ( dat0 >=ii )*dat.resp0 ) b[,ii] <- stats::qlogis( ( cm1 + .01 ) / 1.02 ) } #--- output return(b) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/gdm_inits_b.R
## File Name: gdm_numdiff_index.R ## File Version: 0.09 #################################################################### # general function for numerical differentiation # diffindex aggregates across super items gdm_numdiff_index <- function( pjk, pjk1, pjk2, n.ik, max.increment, numdiff.parm, eps=1E-10 ) { h <- numdiff.parm an.ik <- aperm( n.ik, c(2,3,1) ) #--- log-likelihood ll0 <- cdm_calc_ll_with_counts( an.ik=an.ik, pjk=pjk ) ll1 <- cdm_calc_ll_with_counts( an.ik=an.ik, pjk=pjk1 ) ll2 <- cdm_calc_ll_with_counts( an.ik=an.ik, pjk=pjk2 ) #--- derivatives res <- cdm_ll_numerical_differentiation( ll0=ll0, ll1=ll1, ll2=ll2, h=h ) d1 <- res$d1 d2 <- res$d2 #--- calculate increment res <- cdm_calc_increment( d1=d1, d2=d2, max.increment=max.increment, type=2 ) increment <- res$increment #--- output res <- list(increment=increment, d1=d1, d2=d2, ll0=ll0) return(res) } .gdm.numdiff.index <- gdm_numdiff_index
/scratch/gouwar.j/cran-all/cranData/CDM/R/gdm_numdiff_index.R
## File Name: gdm_person_parameters.R ## File Version: 0.08 ########################################### # person parameter estimates gdm_person_parameters <- function( data, D, theta.k, p.xi.aj, p.aj.xi, weights ) { #************************** person <- data.frame("case"=1:(nrow(data)), "M"=rowMeans( data, na.rm=T) ) EAP.rel <- rep(0,D) names(EAP.rel) <- colnames(theta.k) nstudl <- rep(1,nrow(data)) doeap <- TRUE if ( is.list( p.aj.xi)){ p.aj.xi <- p.aj.xi[[1]] nstudl <- rep(1,nrow(p.aj.xi ) ) weights <- weights[,1] weights <- weights[ weights > 0 ] doeap <- FALSE } if (doeap ){ for (dd in 1:D){ #dd <- 1 dd1 <- colnames(theta.k)[dd] person$EAP <- rowSums( p.aj.xi * outer( nstudl, theta.k[,dd] ) ) person$SE.EAP <- sqrt(rowSums( p.aj.xi * outer( nstudl, theta.k[,dd]^2 ) ) - person$EAP^2) EAP.variance <- stats::weighted.mean( person$EAP^2, weights ) - ( stats::weighted.mean( person$EAP, weights ) )^2 EAP.error <- stats::weighted.mean( person$SE.EAP^2, weights ) EAP.rel[dd] <- EAP.variance / ( EAP.variance + EAP.error ) colnames(person)[ which( colnames(person)=="EAP" ) ] <- paste("EAP.", dd1, sep="") colnames(person)[ which( colnames(person)=="SE.EAP" ) ] <- paste("SE.EAP.", dd1, sep="") } # MLE mle.est <- theta.k[ max.col( p.xi.aj ),, drop=FALSE] colnames(mle.est) <- paste0( "MLE.", names(EAP.rel)) person <- cbind( person, mle.est ) # MAP mle.est <- theta.k[ max.col( p.aj.xi ),, drop=FALSE] colnames(mle.est) <- paste0( "MAP.", names(EAP.rel)) person <- cbind( person, mle.est ) } #--- OUTPUT res <- list( person=person, EAP.rel=EAP.rel ) return(res) } ########################################################################### .gdm.person.parameters <- gdm_person_parameters
/scratch/gouwar.j/cran-all/cranData/CDM/R/gdm_person_parameters.R
## File Name: gdm_prep_calc_counts.R ## File Version: 0.06 gdm_prep_calc_counts <- function(K, G, group, weights, dat.resp, dat.ind, use.freqpatt) { dat.ind2 <- as.list( 1:(K+1) ) ind.group <- as.list( 1:G ) for (kk in 1:(K+1)){ l1 <- as.list(1:G) for (gg in 1:G){ if ( ! use.freqpatt ){ ind.gg <- which( group==gg ) ind.group[[gg]] <- ind.gg dkk <- (dat.ind[[kk]])[ ind.gg, ] l1[[gg]] <- dkk * dat.resp[ind.gg,] * weights[ind.gg] } if ( use.freqpatt ){ dkk <- dat.ind[[kk]] if (G>1){ wgg <- weights[,gg] } if (G==1){ wgg <- weights ind.group[[gg]] <- which( group==gg) } l1[[gg]] <- dkk * dat.resp * wgg } } # end gg dat.ind2[[kk]] <- l1 } #-------- output res <- list(ind.group=ind.group, dat.ind2=dat.ind2) return(res) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/gdm_prep_calc_counts.R
## File Name: gdm_proc_response_indicators.R ## File Version: 0.05 gdm_proc_response_indicators <- function(dat.resp) { I <- ncol(dat.resp) resp.ind.list <- list( 1:I ) for (ii in 1:I){ resp.ind.list[[ii]] <- which( dat.resp[,ii]==1) } return(resp.ind.list) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/gdm_proc_response_indicators.R
## File Name: gdm_progress_em_algorithm.R ## File Version: 0.15 gdm_progress_em_algorithm <- function( progress, disp, iter, dev, dev0, b_change, a_change, deltadiff, dev_digits=4, parm_digits=6 ) { if (progress){ cat(disp) cat("Iteration", iter, " ", paste( Sys.time() ), "\n" ) cat( paste( " Deviance ", "=", " ", round( dev, dev_digits ), if (iter > 1 ){ paste0(" | Deviance change ", "=", " ") } else {""}, if( iter>1){round( - dev + dev0, parm_digits )} else { ""},sep="") ) if ( dev > dev0 & (iter>1 ) ){ cat( " Deviance increases!") } cat("\n") cat( paste( " Maximum item intercept parameter change", "=", round( b_change, parm_digits ), " \n" ) ) cat( paste( " Maximum item slope parameter change", "=", round( a_change, parm_digits ), " \n" ) ) cat( paste( " Maximum distribution parameter change", "=", round( deltadiff, parm_digits ), " \n" ) ) utils::flush.console() } }
/scratch/gouwar.j/cran-all/cranData/CDM/R/gdm_progress_em_algorithm.R
## File Name: gdm_thetadesign.R ## File Version: 0.03 ########################################## # Theta design matrix gdm_thetadesign <- function( theta.k, thetaDes, Qmatrix ) { D <- 1 # default dimension 1 #--- definition of theta.k if ( ! is.null(Qmatrix) ){ D <- ncol(Qmatrix) if ( length( dim(Qmatrix))==2 ){ Q1 <- array( 0, dim=c(dim(Qmatrix),1) ) Q1[,,1] <- Qmatrix Qmatrix <- Q1 } } w1 <- ( is.vector( theta.k) ) & ( ! is.list( theta.k) ) if ( w1 ){ theta.k <- matrix( theta.k, ncol=1 ) if (D>1){ th1 <- as.list(1:D) for (dd in 1:D){ th1[[dd]] <- theta.k } theta.k <- th1 } } if ( is.list(theta.k) ){ tk <- theta.k theta.k <- expand.grid( theta.k ) colnames(theta.k) <- names(tk) } theta.k <- as.matrix(theta.k) D <- ncol(theta.k) if ( is.null( colnames(theta.k) ) ){ colnames(theta.k) <- paste0("F",1:D) } if ( is.null(thetaDes) ){ # thetaDes [TP,TD] TD <- D thetaDes <- matrix( theta.k, ncol=TD ) colnames(theta.k) -> colnames(thetaDes) } TP <- nrow(thetaDes) TD <- ncol(thetaDes) res <- list(D=D, TD=TD, TP=TP,theta.k=theta.k, thetaDes=thetaDes, Qmatrix=Qmatrix ) return(res) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/gdm_thetadesign.R
## File Name: ideal.response.pattern.R ## File Version: 0.137 #-- computation of ideal response pattern ideal.response.pattern <- function( q.matrix, skillspace=NULL, rule="DINA" ) { K <- ncol(q.matrix) q.matrix0 <- q.matrix if ( is.null(skillspace) ){ skillspace <- data.frame( rbind( rep(0,K), rep(1,K) ) ) skillspace <- as.matrix( expand.grid( as.list( skillspace ) ) ) if ( ! is.null( colnames(q.matrix) ) ){ colnames(skillspace) <- colnames(q.matrix) } } skillspace0 <- skillspace if (rule=="DINO"){ skillspace <- 1-skillspace } # compute ideal response pattern skillspace <- as.matrix(skillspace) q.matrix <- as.matrix(q.matrix) idealresp <- cdm_rcpp_ideal_resp_pattern( qmatrix=q.matrix, skillspace=skillspace ) #* DINO rule if (rule=="DINO"){ idealresp <- 1-idealresp } #-- output res <- list( idealresp=idealresp, skillspace=skillspace0, rule=rule, q.matrix=q.matrix0 ) return(res) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/ideal.response.pattern.R
## File Name: item_by_group.R ## File Version: 0.19 ########################################################## # creates an extended dataset with item responses in which # items are defined as combinations of original items and group item_by_group <- function( dat, group, invariant=NULL, rm.empty=TRUE ) { vars <- colnames(dat) some_invariant_items <- ( ! is.null(invariant) ) if ( some_invariant_items ){ vars <- setdiff(vars,invariant) } I <- length(vars) group_unique <- sort( unique(group) ) G <- length(group_unique) #*** create extended dataset dat2 <- matrix( NA, nrow=nrow(dat), ncol=I*G ) cn <- sapply( vars, FUN=function(vv){ paste0( vv, "_group", group_unique ) }, simplify=FALSE) colnames(dat2) <- unlist(cn) for (gg in 1:G){ # gg <- 1 ind_gg <- which( group==group_unique[gg] ) for (ii in 1:I){ # ii <- 1 dat2[ ind_gg, G*(ii-1) + gg ] <- dat[ ind_gg, vars[ii] ] } } #--- include invariant items if ( some_invariant_items ){ dat2a <- dat[, invariant] dat2 <- cbind( dat2a, dat2 ) } #--- remove empty columns if (rm.empty){ ind <- which( colMeans( is.na(dat2) )==1 ) if ( length(ind) > 0 ){ dat2 <- dat2[, - ind ] } } #--- include some attributes: variables and variable indices attr(dat2,"noninvariant") <- vars attr(dat2,"invariant") <- invariant attr(dat2,"noninvariant_index") <- match( vars, colnames(dat)) attr(dat2,"noninvariant_index_extended") <- rep( attr(dat2,"noninvariant_index"), each=G ) attr(dat2,"invariant_index") <- match( invariant, colnames(dat)) attr(dat2,"all_index") <- c( attr(dat2,"invariant_index"), attr(dat2,"noninvariant_index_extended") ) cn <- colnames(dat2) names(cn) <- NULL colnames(dat2) <- cn #--- output return(dat2) } ############################################################
/scratch/gouwar.j/cran-all/cranData/CDM/R/item_by_group.R
## File Name: itemfit.rmsea.R ## File Version: 0.34 ########################################################### # RMSEA Item fit itemfit.rmsea <- function( n.ik, pi.k, probs, itemnames=NULL) { # probs ... [ classes, items, categories ] # n.ik ... [ classes, items, categories, groups ] if (is.vector(pi.k)){ pi.k <- matrix(pi.k, ncol=1) } # RMSEA (RMSD statistic) for all groups itemfit.rmsea <- itemfit_rmsea_helper( n.ik=n.ik, pi.k=pi.k, probs=probs ) if ( ! is.null(itemnames) ){ names(itemfit.rmsea) <- itemnames } # groupwise RMSEA G <- dim(n.ik)[4] I <- dim(n.ik)[2] rmsea.groups <- matrix( NA, I, G ) if ( ! is.null(itemnames) ){ rownames(rmsea.groups) <- itemnames } for (gg in 1:G){ rmsea.groups[,gg] <- itemfit_rmsea_helper( n.ik=n.ik[,,,gg,drop=FALSE], pi.k=pi.k, probs=probs ) } res <- list( "rmsea"=itemfit.rmsea, "rmsea.groups"=rmsea.groups ) return(res) } ##########################################
/scratch/gouwar.j/cran-all/cranData/CDM/R/itemfit.rmsea.R
## File Name: itemfit.sx2.R ## File Version: 3.251 #*** Item fit according to the S-X^2 statistic #*** Orlando & Thissen (2000, 2003) itemfit.sx2 <- function( object, Eik_min=1, progress=TRUE ) { mod <- object #***************************** # object of class gdm if ( inherits(mod,"gdm") ){ if ( dim( mod$n.ik)[4] > 1){ stop("Only applicable in One-group case!") } pi.k <- mod$pi.k[,1] pjk <- mod$pjk data <- mod$data I <- ncol(data) if ( mod$irtmodel=="1PL" ){ npars <- rep( 1, I ) } if ( mod$irtmodel=="2PL" ){ npars <- rep( 2, I ) } } #--- object of class smirt (sirt) if ( inherits(mod,"smirt") ){ pi.k <- mod$pi.k[,1] pjk <- aperm( mod$probs, c(3,1,2) ) D <- ncol( object$Qmatrix ) dimQ <- dim(object$Qmatrix) Qmatrix <- matrix( object$Qmatrix, nrow=dimQ[1], ncol=dimQ[2] ) irtmodel <- object$irtmodel if ( is.null( mod$se.a ) ){ if (irtmodel=="noncomp"){ npars <- rowSums(Qmatrix) } if (irtmodel !="noncomp"){ npars <- 1+0*rowSums(Qmatrix) } } if ( ! is.null( mod$se.a ) ){ if (irtmodel=="noncomp"){ npars <- 2*rowSums(Qmatrix) } if (irtmodel !="noncomp"){ npars <- 1+rowSums(Qmatrix) } } data <- mod$dat } #--- rasch.mml (in sirt) if ( inherits(object,"rasch.mml") ){ pi.k <- object$trait.distr[,2] pjk0 <- object$pjk pjk <- array( 0, dim=c( dim(pjk0), 2 ) ) pjk[,,2] <- pjk0 pjk[,,1] <- 1 - pjk0 data <- object$dat npars <- 1*( object$item$est.a > 0 ) + 1*( object$item$est.b > 0 ) + 1*( object$item$est.c > 0 ) + 1*( object$item$est.d > 0 ) } #--- din (in CDM) if (inherits(object,"din") ){ data <- object$data pi.k <- object$attribute.patt$class.prob pjk <- aperm( object$pjk, c(3,1,2) ) npars <- rep(2,ncol(data)) } #--- gdina (in CDM) if (inherits(object,"gdina") ){ data <- object$data pi.k <- object$attribute.patt$class.prob pjk <- aperm( object$pjk, c(3,1,2) ) npars <- unlist( lapply( object$delta, FUN=function(ll){ length(ll) } ) ) } #--- tam.mml (in TAM) if (inherits(object,"tam.mml")){ data <- object$resp I <- ncol(data) pi.k <- object$pi.k if ( is.matrix(pi.k) ){ if (ncol(pi.k) > 1){ cat("Used first group for assessment of fit.\n") } pi.k <- pi.k[,1] } pjk <- aperm( object$rprobs, c(3,1,2) ) npars <- rep(1,I) if ( object$irtmodel=="2PL"){ npars <- rep(2,I) } } #--- data preparation I <- ncol(data) sumscore <- rowSums( data ) N <- nrow(data) P1 <- pjk[,,2] Q1 <- pjk[,,1] pi.kI <- matrix( pi.k, nrow=dim(pjk)[1], ncol=dim(pjk)[2]+1 ) # check input data if ( sum( is.na(data) ) > 0 ){ stop("No missing responses are allowed!") } if ( max(data) > 1 ){ stop("Only dichotomous responses are allowed!") } # distribution sum scores sumscore.distribution <- sapply( 0:I, FUN=function(ss){ sum( sumscore==ss) } ) # score distribution scoredistribution <- itemfit_sx2_calc_scoredistribution_R( pjk=pjk ) itemtable <- NULL itemfit.stat <- data.frame( "item"=colnames(data), "itemindex"=1:I ) if (progress){ i3 <- c(1,diff( floor( 10 * ( 1:I )/ (I+1) )+1 )) cat( paste0( "|", paste0( rep("*", 10), collapse=""), "|\n|") ) } # calculate fit for item ii eps <- 1E-10 for (ii in 1:I){ res <- itemfit_sx2_calc_itemfit_oneitem( ii=ii, pjk=pjk, pi.k=pi.k, P1=P1, I=I, Eik_min=Eik_min, sumscore.distribution=sumscore.distribution, scoredistribution=scoredistribution, data=data, sumscore=sumscore ) itemtable <- rbind( itemtable, res$table2.ii ) r1 <- res$table2.ii itemfit.stat[ ii, "S-X2" ] <- sum( r1$Nik * ( r1$oik - r1$eik )^2 / ( r1$eik * ( 1 - r1$eik) + eps ) ) itemfit.stat[ ii, "df" ] <- nrow(r1) - npars[ii] itemfit.stat[ii,"p"] <- 1 - stats::pchisq( itemfit.stat[ ii, "S-X2" ], df=itemfit.stat[ ii, "df" ] ) itemfit.stat[ ii, "S-X2_df" ] <- itemfit.stat[ ii, "S-X2" ] / itemfit.stat[ ii, "df" ] xg <- itemfit.stat[ ii, "S-X2" ] - itemfit.stat[ ii, "df" ] itemfit.stat[ ii, "RMSEA" ] <- sqrt( ( ifelse( xg > 0, xg, 0 ) ) / ( N - 1) / itemfit.stat[ ii, "df" ] ) itemfit.stat[ii,"Nscgr"] <- nrow(r1) itemfit.stat[ii,"Npars"] <- npars[ii] if (progress){ if (i3[ii]==1 ){ cat("-") ; utils::flush.console() } } } if (progress){ cat("|\n") } itemfit.stat[,"p.holm"] <- stats::p.adjust( itemfit.stat[,"p"], method="holm") res <- list( "itemfit.stat"=itemfit.stat, "itemtable"=itemtable, "I"=I ) class(res) <- "itemfit.sx2" return(res) } ############################################################################# # summary of item fit summary.itemfit.sx2 <- function(object,...) { itemfit.stat <- object$itemfit.stat i1 <- itemfit.stat i1[,-1] <- round( itemfit.stat[,-1], 3 ) cat("Please check degrees of freedom (number of estimated paramters) carefully!\n") cat("They are maybe not correctly calculated.\n") cat("****** df=Nscgr - Npars ******* \n\n") print(i1) cat("\n-- Average Item Fit Statistics --\n") cat( paste0( "S-X2=", round( mean( itemfit.stat[,"S-X2"] ), 3 ) ) ) cat( paste0( " | S-X2_df=", round( mean( itemfit.stat[,"S-X2_df"] ), 3 ) ), "\n") } ############################################################################# ################################################################################### plot.itemfit.sx2 <- function(x,ask=TRUE,...) { object <- x itemtable <- object$itemtable itemfit.stat <- object$itemfit.stat I <- object$I # loop over all items for (ii in 1:I){ descii <- itemfit.stat[ ii, ] title.ii <- paste0( "Item ", descii$item, " | S-X2(df=", descii$df, ")=", round( descii[, "S-X2"], 3), ", p=", round( descii$p,3), "\n S-X2/df=", round( descii[,"S-X2_df"], 3 ), " | RMSEA=", round( descii[,"RMSEA"], 3 ) ) itemtable.ii <- itemtable[ itemtable$itemindex==ii, ] graphics::plot( itemtable.ii$score, itemtable.ii$oik, xlim=c(1,I-1), ylim=c(0,1), type="o", pch=16, xlab="Score group", ylab="Observed and expected frequency", main=title.ii) graphics::lines( itemtable.ii$score, itemtable.ii$eik, lty=2, lwd=2) graphics::legend( 1, 1, c("Observed frequency", "Expected frequency" ), pch=c(16,NA), lty=1:2, lwd=c(1,2) ) graphics::par( ask=ask) } } ###################################################################################
/scratch/gouwar.j/cran-all/cranData/CDM/R/itemfit.sx2.R
## File Name: itemfit_rmsea_helper.R ## File Version: 0.05 #-- auxiliary function itemfit.rmsea itemfit_rmsea_helper <- function( n.ik, pi.k, probs ){ # probs ... [ classes, items, categories ] # n.ik ... [ classes, items, categories, groups ] # N.ik ... [ classes, items, categories] N.ik <- n.ik[,,,1] G <- dim(n.ik)[4] pitot <- pi.k[,1] eps <- 1E-10 if (G>1){ for (gg in 2:G ){ N.ik <- N.ik + n.ik[,,,gg] pitot <- pitot + pi.k[,gg] } } # calculate summed counts N.ik_tot <- array( 0, dim=dim(N.ik) ) N.ik_tot[,,1] <- N.ik[,,1,drop=FALSE] K <- dim(N.ik)[3] for (kk in 2:K){ N.ik_tot[,,1] <- N.ik_tot[,,1,drop=FALSE] + N.ik[,,kk,drop=FALSE] } for (kk in 2:K){ N.ik_tot[,,kk] <- N.ik_tot[,,1] } # calculate itemwise statistics p.ik_observed <- N.ik / ( N.ik_tot + eps ) p.ik_observed[ is.na( p.ik_observed ) ] <- 0 # define class weights pi.k_tot <- array( 0, dim=dim(p.ik_observed) ) for (kk in 1:K){ pi.k_tot[,,kk] <- matrix( pitot, nrow=dim(pi.k_tot)[1], ncol=dim(pi.k_tot)[2], byrow=FALSE ) } # calculate statistics dist.item <- pi.k_tot * ( p.ik_observed - probs )^2 h1 <- dist.item[,,1] for (kk in 2:K){ h1 <- h1 + dist.item[,,kk] } itemfit.rmsea <- sqrt( colSums( abs(h1 + eps ) ) ) return(itemfit.rmsea) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/itemfit_rmsea_helper.R
## File Name: itemfit_sx2_calc_itemfit_oneitem.R ## File Version: 0.09 ################################################# # calculation of item fit for one item itemfit_sx2_calc_itemfit_oneitem <- function( ii, pjk, pi.k, P1, I, Eik_min, sumscore.distribution, scoredistribution, data, sumscore ) { eps <- 1E-10 pjk.ii <- pjk[,-ii,] P1ii <- pjk.ii[,,2] Q1ii <- pjk.ii[,,1] scored.ii <- cdm_rcpp_itemfit_sx2_calc_scoredistribution( P1=P1ii, Q1=Q1ii ) eik_t2 <- colSums( scoredistribution * pi.k ) eik_t1 <- c(0,colSums( P1[,ii] * scored.ii * pi.k ) ) # P1 is the probability of passing the item eik <- eik_t1 / eik_t2 N <- nrow(data) oik <- sapply( 0:I, FUN=function(ss){ mean( data[ sumscore==ss, ii ] ) } ) dfr.ii <- data.frame( item=colnames(data)[ii], itemindex=ii, score=0:I, Nik=sumscore.distribution, oik=oik, eik=eik ) dfr.ii$eik_t1 <- eik_t1 dfr.ii$eik_t2 <- eik_t2 dfr.ii$Eik <- N * eik_t2 dfr.ii[ 1, "oik" ] <- 0 dfr.ii[ is.na(dfr.ii) ] <- 0 #**** merging of categories x1 <- floor( I/2 ) dfr2.ii <- NULL mm1 <- 2 #** from below while( mm1 <=x1 ){ t1 <- 0 ss <- mm1-1 while( t1 < Eik_min){ ss <- ss + 1 t1 <- t1 + dfr.ii[ ss, "Eik" ] } mm2 <- ss dfr2vv <- dfr.ii[ mm1:mm2, ] dfr2.iivv <- data.frame( item=colnames(data)[ii], itemindex=ii ) dfr2.iivv$score.min <- min( dfr2vv$score ) dfr2.iivv$score.max <- max( dfr2vv$score ) dfr2.iivv$score <- mean( dfr2vv$score ) dfr2.iivv$Nik <- sum( dfr2vv$Nik ) dfr2.iivv$oik <- sum( dfr2vv$Nik*dfr2vv$oik ) / sum( dfr2vv$Nik + eps) dfr2.iivv$eik <- sum( dfr2vv$eik_t1 ) / sum( dfr2vv$eik_t2 + eps ) dfr2.iivv$Eik <- sum( dfr2vv$Eik ) dfr2.ii <- rbind( dfr2.ii, dfr2.iivv ) mm1 <- mm2 + 1 } dfr2a.ii <- dfr2.ii #*** from above dfr2.ii <- NULL mm1 <- I while( mm1 > x1 ){ t1 <- 0 ss <- mm1+1 while( t1 < Eik_min){ ss <- ss - 1 t1 <- t1 + dfr.ii[ ss, "Eik" ] } mm2 <- ss dfr2vv <- dfr.ii[ mm2:mm1, ] dfr2.iivv <- data.frame( "item"=colnames(data)[ii], "itemindex"=ii ) dfr2.iivv$score.min <- min( dfr2vv$score ) dfr2.iivv$score.max <- max( dfr2vv$score ) dfr2.iivv$score <- mean( dfr2vv$score ) dfr2.iivv$Nik <- sum( dfr2vv$Nik ) dfr2.iivv$oik <- sum( dfr2vv$Nik*dfr2vv$oik ) / sum( dfr2vv$Nik + eps) dfr2.iivv$eik <- sum( dfr2vv$eik_t1 ) / sum( dfr2vv$eik_t2 + eps ) dfr2.iivv$Eik <- sum( dfr2vv$Eik ) dfr2.ii <- rbind( dfr2.ii, dfr2.iivv ) mm1 <- mm2 - 1 } dfr2.ii <- rbind( dfr2a.ii, dfr2.ii ) dfr2.ii <- dfr2.ii[ order( dfr2.ii$score), ] res <- list( table1.ii=dfr.ii, table2.ii=dfr2.ii ) return(res) } ########################################################################## .calc.itemfit.oneitem <- itemfit_sx2_calc_itemfit_oneitem
/scratch/gouwar.j/cran-all/cranData/CDM/R/itemfit_sx2_calc_itemfit_oneitem.R
## File Name: itemfit_sx2_calc_scoredistribution_R.R ## File Version: 0.04 ########################################################################## # calculate distribution of sum score itemfit_sx2_calc_scoredistribution_R <- function( pjk ) { # pjk .... [ TP, I, 2 ] ... [ theta points, items, 2 categories ] P1 <- pjk[,,2] Q1 <- pjk[,,1] TP <- nrow(P1) I <- ncol(P1) score <- seq( 0, I, 1 ) scoredistribution <- matrix(NA, TP, I+1 ) scoredistribution[,1] <- Q1[,1] scoredistribution[,2] <- P1[,1] for (ii in 2:I){ scoredistribution0 <- scoredistribution scoredistribution[,ii+1] <- P1[,ii] * scoredistribution0[,ii] for (kk in seq( 0, ii - 2, 1 ) ){ scoredistribution[,ii-kk] <- Q1[,ii] * scoredistribution0[,ii-kk] + P1[,ii] * scoredistribution0[,ii-kk-1] } scoredistribution[,1] <- Q1[,ii] * scoredistribution0[,1] } return(scoredistribution) } ############################################################################## .calc.scoredistribution.cdm <- itemfit_sx2_calc_scoredistribution_R
/scratch/gouwar.j/cran-all/cranData/CDM/R/itemfit_sx2_calc_scoredistribution_R.R
## File Name: jkestimates.R ## File Version: 0.11 ################################################ # jkestimates jkestimates <- function( est, parsM, fayfac ) { RR <- ncol(parsM) M_pars <- rowMeans( parsM ) parsMres <- parsM - M_pars # variance covariance matrix of estimators vcov_pars <- tcrossprod( parsMres ) dfr <- data.frame( "est"=est, "jkest"=est - fayfac * (RR-1) * ( M_pars - est) ) dfr$jkse <- sqrt( diag( vcov_pars )) res <- list("dfr"=dfr, "vcov_pars"=vcov_pars ) return(res) } ################################################
/scratch/gouwar.j/cran-all/cranData/CDM/R/jkestimates.R
## File Name: label_significance_level.R ## File Version: 0.04 ############################ # helper function significance level label_significance_level <- function( values, levels, labels ){ ix <- sort( levels, index.return=TRUE)$ix levels <- levels[ix] labels <- labels[ix] NL <- length(levels) l1 <- "" values[ is.na(values) ] <- 1.2 for (ll in 1:NL){ l1 <- ifelse( values < levels[NL-ll+1], labels[NL-ll+1], l1) } return(l1) } ###########################################
/scratch/gouwar.j/cran-all/cranData/CDM/R/label_significance_level.R
## File Name: logLik_CDM.R ## File Version: 0.12 ######################################### # Log likelihood functions ######################################### # din class logLik.din <- function (object, ...) { # extract log-likelihood out <- object$loglike # number of parameters attr(out, "df") <- sum( object$Npars ) # extract number of observations attr(out, "nobs") <- object$I class(out) <- "logLik" return(out) } ######################################### # gdina class logLik.gdina <- function (object, ...) { out <- object$loglike attr(out, "df") <- sum( object$Npars ) attr(out, "nobs") <- sum(object$N) class(out) <- "logLik" return(out) } ########################################### # gdm class logLik.gdm <- function (object, ...) { out <- object$loglike attr(out, "df") <- sum( object$Npars ) attr(out, "nobs") <- sum(object$N) class(out) <- "logLik" return(out) } ############################################# ######################################### # mcdina class logLik.mcdina <- function (object, ...) { out <- object$loglike attr(out, "df") <- sum( object$Npars ) attr(out, "nobs") <- sum(object$I) class(out) <- "logLik" return(out) } ############################################# ######################################### # slca class logLik.slca <- function (object, ...) { out <- object$loglike attr(out, "df") <- sum( object$Npars ) attr(out, "nobs") <- sum(object$N) class(out) <- "logLik" return(out) } ############################################# ######################################### # reglca class logLik.reglca <- function (object, ...) { out <- object$loglike attr(out, "df") <- sum(object$Npars) attr(out, "nobs") <- sum(object$N) class(out) <- "logLik" return(out) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/logLik_CDM.R
## File Name: log_dgnorm.R ## File Version: 0.06 ## kernel of the generalized normal distribution log_dgnorm <- function( x, loc, scale, power ) { if ( power < 1E3 ){ y <- - ( abs( x - loc ) / scale )^power / power } else { y <- rep(0, length(x) ) } return(y) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/log_dgnorm.R
## File Name: logpars2rrumpars.R ## File Version: 0.05 ############################################### # log parametrization to rum parametrization # pi * r1^(1-a1) * r2^(1-a2) #=pi * r1 * r2 * (1/r1)^a1 * (1/r2)^a2 logpars2rrumpars <- function(delta_jj){ v1 <- delta_jj N <- length(v1) v1 <- 1 / exp( delta_jj ) v1[1] <- exp(sum( delta_jj)) return(v1) } ###############################################
/scratch/gouwar.j/cran-all/cranData/CDM/R/logpars2rrumpars.R
## File Name: mcdina.R ## File Version: 0.936 #- Multiple Choice DINA Model #- mcdina model (de la Torre, 2009) mcdina <- function( dat, q.matrix, group=NULL, itempars="gr", weights=NULL, skillclasses=NULL, zeroprob.skillclasses=NULL, reduced.skillspace=TRUE, conv.crit=0.0001, dev.crit=.1, maxit=1000, progress=TRUE ) { # prepare data s1 <- Sys.time() cl <- match.call() dat <- as.matrix(dat) # zero/one entries, q.matrix from ordinary DINA model res0 <- mcdina_proc_qmatrix( dat=dat, q.matrix=q.matrix ) dat <- res0$dat q.matrix0 <- q.matrix <- res0$q.matrix # handle polytomous attributes res1 <- mcdina_proc_modify_qmatrix( q.matrix=q.matrix, skillclasses=skillclasses ) q.matrix <- res1$q.matrix q.matrix0 <- res1$q.matrix0 skillclasses <- res1$skillclasses skillclasses0 <- res1$skillclasses0 maxmaxattr <- res1$maxmaxattr #- data check res <- mcdina_check_data(dat=dat, q.matrix=q.matrix) dat0 <- dat dat_na <- is.na(dat) dat.resp <- 1* ( 1 - dat_na ) dat_resp_bool <- ! dat_na dat[ dat.resp==0 ] <- 1 dat_ <- dat - 1 eps <- 1e-10 I <- ncol(dat) # number of items CC <- max( q.matrix[,2] ) # maximal number of categories K <- ncol(q.matrix)-2 # number of skills if (K<=3 ){ reduced.skillspace <- FALSE } # group identifier if ( is.null(group) ){ group <- rep(1,nrow(dat))} group0 <- group group0_unique <- sort( unique( group ) ) group <- match( group, group0_unique ) group <- group - 1 G <- length( unique(group) ) # weights if ( is.null(weights) ){ weights <- rep(1,nrow(dat))} # define skill classes if ( is.null(skillclasses) ){ skillclasses <- as.matrix( expand.grid( as.data.frame( rbind( rep(0,K), rep(1,K) ) ) ) ) } classes <- cdm_matrixstring( matr=skillclasses, string="P" ) rownames(skillclasses) <- classes TP <- nrow(skillclasses) # define specification of estimation of item parameters if ( mean( itempars=="gr" )==1 ){ itempars <- rep( "gr", I ) } if ( ( mean( itempars=="gr" ) < 1 ) & ( length(itempars) !=I ) ){ itempars <- rep( "jo", I ) } # prepare latent responses res <- mcdina_proc_test_latent_response( q.matrix=q.matrix, K=K, TP=TP, skillclasses=skillclasses, classes=classes ) lc <- res$lc lr <- res$lr itemstat <- res$itemstat itemstat$G <- G itemstat$partype <- itempars itemstat$N.pars <- itemstat$N.lr * (itemstat$N.cat - 1 ) itemstat$N.pars <- ifelse( itemstat$partype=="gr", itemstat$N.pars*itemstat$G, itemstat$N.pars ) # list of lr lc_list <- lr_list <- list(1:I) for (ii in 1:I){ lr_list[[ii]] <- lr[ lr$item==ii, ] lc_list[[ii]] <- lc[ lc$item==ii, ] } # delta parameter inits res <- mcdina_init_delta( lc=lc, lr=lr ) delta_ideal <- res$delta_ideal delta0 <- res$delta # delta parameters delta <- array( 0, dim=c(I,CC,CC,G) ) for (gg in 1:G){ delta[,,,gg] <- delta0 } # init probabilities probs <- array( 0, dim=c(I,CC,TP,G ) ) # init latent class distribution pi.k <- rep( 1 / TP, TP ) pi.k <- matrix( pi.k, nrow=TP, ncol=G ) # counts latent responses lr_counts <- array(0, dim=c(I,CC,G) ) #***************************** # define reduced skillspace Z <- Z.skillspace <- NULL if ( reduced.skillspace ){ A <- skillclasses attr.patt <- skillclasses maxAttr <- 1 # combinations kombis <- utils::combn( K, 2 ) KK <- ncol(kombis) B <- NULL for (kk in 1:KK){ B <- cbind( B, attr.patt[, kombis[1,kk] ] * attr.patt[, kombis[2,kk] ] ) } Z <- cbind( 1, A, B ) ncolZ <- ncol(Z) v1 <- c("Int", paste("A",1:K, sep="") ) v1 <- c(v1,apply( kombis, 2, FUN=function(ll){ paste( paste( "A", ll, sep=""), collapse="_" ) } )) colnames(Z) <- v1 m1 <- which( maxAttr > 1 ) if ( max(maxAttr) > 1 ){ Z1 <- Z[, m1, drop=FALSE ]^2 colnames(Z1) <- paste0( colnames(q.matrix)[m1], "*2") Z <- cbind( Z, Z1 ) } if ( ! is.null(Z.skillspace) ){ Z <- Z.skillspace } # check for equal columns Z <- Z[, ! duplicated( t(Z) ) ] ncolZ <- ncol(Z) } iter <- dev <- 0 max.par.change <- 1000 devchange <- 100 # display for progress disp <- "...........................................................\n" #**************************************** #************ begin algorithm *********** while ( ( iter < maxit ) & ( ( max.par.change > conv.crit ) | ( devchange > dev.crit ) ) ) { #z0 <- Sys.time() #--- (0) collect old parameters dev0 <- dev delta0 <- delta #--- (1) calculate probabilities for (gg in 1:G){ # gg <- 1 for (ii in 1:I){ # ii <- 1 lr.ii <- lr_list[[ii]] probs[ii,,,gg] <- delta[ ii,, lr.ii$lr_index, gg] } } # cat("calc probs ") ; z1 <- Sys.time(); print(z1-z0) ; z0 <- z1 #--- (2) calculate likelihood probs_ <- as.matrix( array( probs, dim=c(I, CC*TP*G) ) ) res <- cdm_rcpp_mcdina_probs_pcm_groups( dat=dat_, dat_resp_bool=dat_resp_bool, group=group, probs=probs_, CC=CC, TP=TP ) f.yi.qk <- res$fyiqk # cat("calc like ") ; z1 <- Sys.time(); print(z1-z0) ; z0 <- z1 #--- (3) calculate posterior and expected counts res1 <- cdm_rcpp_mcdina_calccounts_pcm_groups( dat=dat_, dat_resp_bool=dat_resp_bool, group=group, fyiqk=f.yi.qk, pik=pi.k, CC=CC, weights=weights ) n.ik <- array( res1$nik, dim=c( I, CC, TP, G ) ) count_pik <- res1$count_pik for (gg in 1:G){ pi.k[,gg] <- count_pik[,gg] / sum( count_pik[,gg] ) } # set some probabilities of skill classes to zero if ( ! is.null(zeroprob.skillclasses ) ){ pi.k[ zeroprob.skillclasses, ] <- 0 } LL <- res1$LL dev <- -2*LL f.qk.yi <- res1$fqkyi # cat("calc post ") ; z1 <- Sys.time(); print(z1-z0) ; z0 <- z1 #--- (4) log-linear smoothing of skill class distribution if (reduced.skillspace){ pi.k <- mcdina_est_reduced_skillspace(pi.k=pi.k, Z=Z) } # cat("calc smoothing distribution") ; z1 <- Sys.time(); print(z1-z0) ; z0 <- z1 #--- (5) calculate updated item parameters res1 <- mcdina_est_item( n.ik=n.ik, lr_list=lr_list, lc_list=lc_list, delta=delta, I=I, G=G, eps=eps, itemstat=itemstat, itempars=itempars, lr_counts=lr_counts ) delta <- res1$delta lr_counts <- res1$lr_counts # cat("calc item parameters") ; z1 <- Sys.time(); print(z1-z0) ; z0 <- z1 #--- (11) convergence max.par.change <- max( abs( delta - delta0 ) ) devchange <- abs( dev- dev0) iter <- iter + 1 #--- (99) display progress if (progress) { cat(disp) cat("Iteration", iter, " ", paste( Sys.time() ), "\n" ) cat( "Deviance=", round( dev, 5 ) ) g11 <- - ( dev - dev0 ) if (iter >1){ cat(" | Deviance change=", round( -(dev-dev0), 7) ) if (g11 < 0 ){ cat( "\n**** Deviances decreases! Check for nonconvergence. ****\n") } } cat("\n" ) cat("Maximum parameter change:", round( max.par.change, 6), "\n") utils::flush.console() } } #*************** end algorithm *********** #***************************************** # include information criteria ic <- mcdina_calc_ic( dev=dev, weights=weights, itemstat=itemstat, pi.k=pi.k, G=G, I=I, zeroprob.skillclasses=zeroprob.skillclasses, reduced.skillspace=reduced.skillspace, Z=Z ) # include standard error se.delta <- mcdina_calc_se_delta( delta=delta, n.ik=n.ik, probs=probs, lr_list=lr_list, lc_list=lc_list, itemstat=itemstat, I=I, G=G, itempars=itempars, lr_counts=lr_counts, CC=CC ) # labeling rownames(pi.k) <- classes colnames(pi.k) <- paste0("Group.", group0_unique ) # rename skill classes in case of polytomous attributes if (maxmaxattr > 1 ){ skillclasses <- skillclasses0 lc$Q <- cdm_matrixstring(q.matrix0[,-c(1:2) ], "Q" ) q.matrix <- q.matrix0 } # item summary table item <- mcdina_collect_itempars( I=I, lc=lc, itempars=itempars, itemstat=itemstat, dat=dat, G=G, CC=CC, delta=delta, se.delta=se.delta, group0_unique=group0_unique ) # skill pattern skill.patt <- mcdina_skill_patt( q.matrix=q.matrix, skillclasses=skillclasses, G=G, pi.k=pi.k, group0_unique=group0_unique ) # person classification mle.class <- skillclasses[ max.col( f.yi.qk ), ] map.class <- skillclasses[ max.col( f.qk.yi ), ] N11 <- nrow(mle.class) K11 <- ncol(mle.class) K12 <- nrow(skillclasses) eap.class <- matrix( 0, nrow=N11, ncol=K11 ) colnames(eap.class) <- colnames(mle.class) for (kk in 1:K11){ sckk <- matrix( skillclasses[,kk], nrow=N11, ncol=K12, byrow=TRUE ) eap.class[,kk] <- rowSums( sckk * f.qk.yi ) } #---- OUTPUT res <- list( item=item, posterior=f.qk.yi, like=f.yi.qk, ic=ic, q.matrix=q.matrix, pik=probs, delta=delta, se.delta=se.delta, itemstat=itemstat, n.ik=n.ik, deviance=dev, attribute.patt=pi.k, attribute.patt.splitted=skillclasses, skill.patt=skill.patt, MLE.class=mle.class, MAP.class=map.class, EAP.class=eap.class, dat=dat0, skillclasses=skillclasses, group=group0, lc=lc, lr=lr, iter=iter, itempars=itempars, weights=weights, I=nrow(dat), G=G, CC=CC, loglike=-dev/2, AIC=ic$AIC, BIC=ic$BIC, Npars=ic$np ) res$converged <- iter < maxit res$control$weights <- weights res$control$group <- group s2 <- Sys.time() res$time <- list( s1=s1, s2=s2, timediff=s2-s1) cat("----------------------------------- \n") cat("Start:", paste( s1), "\n") cat("End:", paste(s2), "\n") cat("Difference:", print(s2 -s1), "\n") cat("----------------------------------- \n") class(res) <- "mcdina" res$call <- cl return(res) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/mcdina.R
## File Name: mcdina.simul.R ## File Version: 0.194 simul.mcdina <- function( alpha, pars_lc, pars_lr, skillcl ) { # requireNamespace("sirt") skills <- alpha N <- length(alpha) I <- max( pars_lc$item ) CC <- max( pars_lc$cats ) dat <- matrix( NA, nrow=N, ncol=I ) colnames(dat) <- paste0("I",1:I) # calculate probabilities and simulate for (ii in 1:I){ lc.ii <- pars_lc[ pars_lc$item==ii, ] lc.ii <- lc.ii[ lc.ii$sum==1, ] lr.ii <- pars_lr[ pars_lr$item==ii, ] lr.unique <- paste( unique( lr.ii$lr ) ) # compute latent response pattern for item ii lr.ii <- paste(lr.ii[ match( skillcl[ skills ], lr.ii$skillclass ), "lr" ]) probs <- lc.ii[ match( lr.ii, paste(lc.ii$lr) ), grep( "Cat", colnames(pars_lc ) ) ] Nc <- ncol(probs) rn <- stats::runif(N) # probs1 <- sirt::rowCumsums.sirt(matr=as.matrix(probs)) eval(parse(text=paste("probs1 <- sirt","::", "rowCumsums.sirt(matr=as.matrix(probs))"))) # dat[,ii] <- sirt::rowIntervalIndex.sirt(matr=probs1,rn=rn) eval(parse(text=paste("dat[,ii] <- sirt","::", "rowIntervalIndex.sirt(matr=probs1,rn=rn)"))) print(paste0( "Item ",ii )) utils::flush.console() } return(dat) } # Examples: ## > pars_lc ## item cats lr max.cat lr_index Q Cat0 Cat1 Cat2 Cat3 sum cat ## 1 1 1 LR0 0 1 Q000 0.80 0.10 0.05 0.05 1 1 ## 2 1 2 LR1 0 2 Q100 0.10 0.60 0.10 0.20 1 2 ## 3 1 3 LR2 0 3 Q010 0.25 0.10 0.60 0.05 1 3 ## 4 1 4 LR3 1 4 Q110 0.02 0.02 0.16 0.80 1 4 ## 5 2 1 LR0 0 1 Q000 0.70 0.10 0.15 0.05 1 1 ## 6 2 2 LR12 0 2 Q100 0.10 0.35 0.50 0.05 1 2 ## 7 2 3 LR12 0 2 Q100 0.00 0.00 0.00 0.00 0 3 ## 8 2 4 LR3 1 3 Q110 0.05 0.10 0.20 0.65 1 4 ## 9 3 1 LR01 0 1 Q000 0.25 0.55 0.10 0.10 1 1 ## 10 3 2 LR01 0 1 Q000 0.00 0.00 0.00 0.00 0 2 ## 11 3 3 LR2 1 2 Q110 0.04 0.01 0.85 0.10 1 3 ## 12 3 4 LR3 0 3 Q100 0.10 0.10 0.20 0.60 1 4 ## [...] ## > pars_lr ## item skillclass skillclass_index lr lr_index ## 1 1 P000 1 LR0 1 ## 2 1 P100 2 LR1 2 ## 3 1 P010 3 LR2 3 ## 4 1 P110 4 LR3 4 ## 5 1 P001 5 LR0 1 ## 6 1 P101 6 LR1 2 ## 7 1 P011 7 LR2 3 ## 8 1 P111 8 LR3 4 ## 9 2 P000 1 LR0 1 ## 10 2 P100 2 LR12 2 ## 11 2 P010 3 LR0 1 ## 12 2 P110 4 LR3 3 ## 13 2 P001 5 LR0 1 ## 14 2 P101 6 LR12 2 ## 15 2 P011 7 LR0 1 ## 16 2 P111 8 LR3 3 ## 17 3 P000 1 LR01 1 ## 18 3 P100 2 LR3 3 ## 19 3 P010 3 LR01 1 ## 20 3 P110 4 LR2 2 ## 21 3 P001 5 LR01 1 ## 22 3 P101 6 LR3 3 ## 23 3 P011 7 LR01 1 ## [...] ## pars_lr is created by .mcdina.prep.test.latent.response. ## The core of pars_lc is also created by this function, only ## probabilities must be specified by the user.
/scratch/gouwar.j/cran-all/cranData/CDM/R/mcdina.simul.R
## File Name: mcdina_calc_ic.R ## File Version: 0.01 ################################################# # mcdina information criteria mcdina_calc_ic <- function( dev, weights, itemstat, pi.k, G, I, zeroprob.skillclasses, reduced.skillspace, Z ) { ic <- list( "deviance"=dev, "n"=sum(weights), "loglik"=-dev/2 ) ic$G <- G ic$itempars <- sum( itemstat$N.pars) ic$traitpars <- G*(nrow(pi.k)-1 - length( zeroprob.skillclasses ) ) if ( reduced.skillspace ){ ic$traitpars <- G * ncol(Z) } ic$np <- ic$itempars + ic$traitpars ic$Nskillclasses <- nrow(pi.k) - length( zeroprob.skillclasses ) # AIC ic$AIC <- dev + 2*ic$np # BIC ic$BIC <- dev + ( log(ic$n) )*ic$np # CAIC (consistent AIC) ic$CAIC <- dev + ( log(ic$n) + 1 )*ic$np # corrected AIC ic$AICc <- ic$AIC + 2*ic$np * ( ic$np + 1 ) / ( ic$n - ic$np - 1 ) return(ic) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/mcdina_calc_ic.R
## File Name: mcdina_calc_se_delta.R ## File Version: 0.01 # standard errors mcdina mcdina_calc_se_delta <- function( delta, n.ik, probs, lr_list, lc_list, itemstat, I, G, itempars, lr_counts, CC ) { se.delta <- delta for (ii in 1:I){ for (gg in 1:G){ lc.ii <- lc_list[[ii]] lr.ii <- lr_list[[ii]] se.delta[ii,,,gg] <- sqrt( delta[ii,,,gg] * ( 1 - delta[ii,,,gg] ) / matrix( lr_counts[ii,,gg], nrow=CC, ncol=CC, byrow=TRUE) ) } # end gg } # end ii return(se.delta) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/mcdina_calc_se_delta.R
## File Name: mcdina_check_data.R ## File Version: 0.07 mcdina_check_data <- function(dat, q.matrix) { I <- ncol(dat) print_warning <- FALSE for (ii in 1L:I){ values_ii <- sort(unique( stats::na.omit(dat[,ii]) )) q_ii <- q.matrix[ q.matrix$item==ii, "categ"] non_def <- setdiff(values_ii, q_ii) if (length(non_def)>0){ v1 <- paste0("Non-defined category for item ", colnames(dat)[ii], ": ") v2 <- paste0(non_def, collapse=" ") cat(paste0(v1,v2,"\n")) print_warning <- TRUE } } if (print_warning){ stop("Modify data or Q-matrix input!\n") } }
/scratch/gouwar.j/cran-all/cranData/CDM/R/mcdina_check_data.R
## File Name: mcdina_collect_itempars.R ## File Version: 0.04 # collect item parameters mcdina_collect_itempars <- function( I, lc, itempars, itemstat, dat, G, CC, delta, se.delta, group0_unique ) { item <- NULL for (ii in 1:I){ lc.ii <- lc[ lc$item==ii, ] ip.ii <- itempars[ii] itemstat.ii <- itemstat[ii,] if ( ip.ii=="gr" ){ G1 <- G } else { G1 <- 1 } for (gg in 1:G1){ # gg <- 1 delta.ii <- delta[ ii,,,gg ] se.delta.ii <- se.delta[ ii,,,gg ] for (cc in 1:itemstat.ii$N.lr ){ lc.ii.cc <- lc.ii[ lc.ii$lr_index==cc, ] lc.ii.cc <- lc.ii.cc[1,] item.cc <- data.frame( "item"=colnames( dat )[ii], "itemnr"=lc.ii.cc$item ) item.cc$lr <- lc.ii.cc$lr item.cc$lr_level <- lc.ii.cc$lr_level item.cc$Q <- lc.ii.cc$Q item.cc$lr_index <- lc.ii.cc$lr_index item.cc$max.cat <- lc.ii.cc$max.cat item.cc$partype <- itemstat.ii$partype item.cc$group <- group0_unique[gg] if ( item.cc$partype !="gr" ){ item.cc$group <- NA } d1 <- t( delta.ii[,cc] ) colnames(d1) <- paste0( "Cat", 1:CC ) if ( itemstat.ii$N.cat < CC ){ d1[ 1, seq(itemstat.ii$N.cat + 1, CC ) ] <- NA } item.cc <- cbind( item.cc, d1 ) d1 <- t( se.delta.ii[,cc] ) if ( itemstat.ii$N.cat < CC ){ d1[ 1, seq(itemstat.ii$N.cat + 1, CC ) ] <- NA } colnames(d1) <- paste0( "se.Cat", 1:CC ) item.cc <- cbind( item.cc, d1 ) item <- rbind( item, item.cc ) } } } item <- item[ ! is.na(item$itemnr), ] return(item) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/mcdina_collect_itempars.R
## File Name: mcdina_est_item.R ## File Version: 0.19 ####################################################### # mcdina estimate item parameters mcdina_est_item <- function( n.ik, lr_list, lc_list, delta, I, G, eps, itemstat, itempars, lr_counts ) { for (ii in 1:I){ lr.ii <- lr_list[[ii]] lr_index.ii <- lr.ii[, "lr_index"] lc.ii <- lc_list[[ii]] #****************** # group specific item parameters if ( itempars[ii]=="gr"){ for (gg in 1:G){ # gg <- 1 n.ik.ii.gg <- t( n.ik[ii,,,gg] ) n.ik.ii.gg_aggr <- as.matrix( rowsum( n.ik.ii.gg, lr_index.ii ) ) n.ik.ii.gg_aggr1 <- n.ik.ii.gg_aggr+eps cn <- rowSums( n.ik.ii.gg_aggr1 ) lr_counts[ii, 1:itemstat$N.lr[ii], gg ] <- cn delta.new <- t( n.ik.ii.gg_aggr / matrix( cn, nrow=nrow(n.ik.ii.gg_aggr), ncol=ncol(n.ik.ii.gg_aggr), byrow=FALSE ) ) delta[ii,,1:itemstat[ii,"N.lr"],gg] <- delta.new } } #****************** # group specific item parameters if ( itempars[ii] !="gr"){ n.ik.ii.gg <- t( n.ik[ii,,,1] ) for (gg in 2:G){ n.ik.ii.gg <- n.ik.ii.gg + t( n.ik[ii,,,1] ) } n.ik.ii.gg_aggr <- as.matrix( rowsum( n.ik.ii.gg, lr_index.ii ) ) n.ik.ii.gg_aggr1 <- n.ik.ii.gg_aggr+eps cn <- rowSums( n.ik.ii.gg_aggr1 ) lr_counts[ii, 1:itemstat$N.lr[ii], 1:G ] <- cn delta.new <- t( n.ik.ii.gg_aggr / matrix( cn, nrow=nrow(n.ik.ii.gg_aggr), ncol=ncol(n.ik.ii.gg_aggr), byrow=FALSE ) ) for (gg in 1:G){ delta[ii,,1:itemstat[ii,"N.lr"],gg] <- delta.new } } # end itempars[ii] !="gr" } # end ii res <- list("delta"=delta, "lr_counts"=lr_counts ) return(res) } ############################################################
/scratch/gouwar.j/cran-all/cranData/CDM/R/mcdina_est_item.R
## File Name: mcdina_est_reduced_skillspace.R ## File Version: 0.03 mcdina_est_reduced_skillspace <- function(pi.k, Z) { G <- ncol(pi.k) for (gg in 1:G){ ntheta <- pi.k[,gg] res <- gdina_reduced_skillspace( ntheta=ntheta, Z=Z, reduced.skillspace.method=2 ) pi.k[,gg] <- res$attr.prob } return(pi.k) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/mcdina_est_reduced_skillspace.R
## File Name: mcdina_init_delta.R ## File Version: 0.01 ################################################### # initial estimate of item parameters delta mcdina_init_delta <- function( lc, lr ) { I <- max( lc$item ) lc$cats <- lc$cats CC <- max(lc$cats) delta_ideal <- delta <- array( 0, dim=c(I, CC, CC ) ) delta_ideal[ as.matrix( lc[, c("item", "cats", "lr_index" ) ] ) ] <- 1 eps <- 1E-10 # define initial delta estimate for (ii in 1:I){ dii <- delta_ideal[ii,,] lcii <- lc[ lc$item==ii, ] Ncii <- nrow(lcii) for (cc in 1:CC){ dii.cc <- dii[,cc] delta[ii,,cc] <- dii.cc * ( 0.8 / sum( dii.cc + eps) ) + (1-dii.cc) * ( .2 / sum( ( 1-dii.cc) + eps ) ) if (Ncii < CC ){ delta[ii,seq(Ncii+1,CC),cc] <- 0 delta[ii,,cc] <- delta[ii,,cc] / sum( delta[ii,,cc] ) } if ( sum( dii.cc )==0 ){ delta[ii,,cc] <- 0 } } } res <- list( "delta"=delta, "delta_ideal"=delta_ideal ) return(res) } ############################################################
/scratch/gouwar.j/cran-all/cranData/CDM/R/mcdina_init_delta.R
## File Name: mcdina_proc_item_latent_response.R ## File Version: 0.08 ############################################## # compute preparation table for one item mcdina_proc_item_latent_response <- function( ii, q.matrix, K, TP, skillclasses, classes ) { q.ii <- q.matrix[ q.matrix[,1]==ii, ] # categories cats.ii <- q.ii[,2] CC <- length(cats.ii) # calculate relevant attributes qsum <- rowSums( q.ii[, 1:K + 2 ] ) index.max <- which( qsum==max(qsum) ) # necessary attributes for item ii attr.ii <- which( q.ii[ index.max[1], 1:K + 2] > 0 ) if ( length(attr.ii)==0 ){ attr.ii <- 1:K } q.ii.red <- q.ii[, attr.ii + 2, drop=FALSE] # calculate matrix with skill classes sk.ii1 <- sk.ii2 <- matrix( 0, nrow=TP, ncol=CC) colnames(sk.ii1) <- colnames(sk.ii2) <- paste0("Cat", cats.ii ) rownames(sk.ii1) <- rownames(sk.ii2) <- rownames(skillclasses) for (cc in 1:CC){ sk.ii2[, cc] <- 1 * ( rowSums( skillclasses[, attr.ii, drop=FALSE] !=q.ii.red[rep(cc,TP),] )==0 ) tmp1 <- skillclasses[, attr.ii, drop=FALSE] %*% t( q.ii.red[cc,] ) sk.ii1[, cc] <- 1 * ( tmp1 >=sum( q.ii.red[cc,] ) ) sk.ii1[, cc] <- tmp1*sk.ii1[, cc] } sk.ii1 <- 1 * ( sk.ii1 > 0 ) v1.ii <- which( rowSums( sk.ii1 )==0 ) i5 <- which( rowSums( q.ii.red )==0 ) sk.ii1[ v1.ii, i5 ] <- 1 ind.ii <- which( rowSums( sk.ii2 )==0 ) sk.ii2[ind.ii, ] <- sk.ii1[ ind.ii, ] # define latent response groups lg <- "LR" for (cc in 1:CC){ lg <- paste0( lg, ifelse( sk.ii2[,cc]==1, cats.ii[cc], "") ) } groups <- sort( unique(lg) ) lr <- data.frame("item"=ii, "skillclass"=classes, "skillclass_index"=1:TP, "lr"=lg ) lr$lr_index <- match( lr$lr, groups ) # unique latent groups lg1 <- sapply( cats.ii, FUN=function(cc){ grep( cc, groups) } ) lc <- data.frame("item"=ii, "cats"=cats.ii, "lr"=groups[ lg1 ] ) lc$max.cat <- 0 lc$max.cat[ index.max ] <- 1 lc$lr_index <- match( lc$lr, groups ) lc$Q <- cdm_matrixstring( q.ii[, 1:K + 2 ], "Q" ) lc$lr_level <- rowSums( q.ii[, 1:K + 2 ]) lc <- lc[ order( paste( lc$lr_level, lc$cats) ), ] lc$lr_level <- paste0( lc$lr_level, LETTERS[ match( lc$lr, unique(lc$lr) ) ] ) lc <- lc[ order( paste( lc$cats) ), ] # item statistics itemstat <- data.frame("item"=ii, "N.cat"=CC, "N.lr"=length(groups) ) itemstat$N.attr <- length(attr.ii) res <- list("lr"=lr, "lc"=lc, "itemstat"=itemstat) return(res) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/mcdina_proc_item_latent_response.R
## File Name: mcdina_proc_modify_qmatrix.R ## File Version: 0.02 # modify q-matrix mcdina_proc_modify_qmatrix <- function( q.matrix, skillclasses) { # create new q.matrix K <- ncol(q.matrix) - 2 maxattr <- apply( q.matrix[,-c(1:2) ], 2, max ) qmatrix_mod <- NULL q.matrix1 <- q.matrix[,1:2] K1 <- max(maxattr) res <- list( "q.matrix"=q.matrix, "q.matrix0"=NULL, "maxmaxattr"=K1, "skillclasses"=skillclasses, "skillclasses0"=skillclasses, "qmatrix_mod"=NULL ) if (K1 > 1 ){ m1 <- matrix( 0:K1, nrow=K1+1, ncol=K ) skillclasses <- as.matrix( expand.grid( as.data.frame( m1) ) ) colnames(skillclasses) <- colnames(q.matrix)[ -c(1:2) ] # create modified q-matrix for (kk in 1:K){ # kk <- 1 qmatrix_mod.kk <- data.frame( "attr_index"=kk, "maxattr"=maxattr[kk] ) skillclasses <- skillclasses[ skillclasses[,kk] <=maxattr[kk], ] for (zz in 1:(maxattr[kk] ) ){ # zz <- 1 name <- paste0( colnames(q.matrix)[kk+2], ".L", zz ) q.matrix1[, name ] <- 1 * ( q.matrix[, kk + 2] >=zz ) } qmatrix_mod <- rbind( qmatrix_mod, qmatrix_mod.kk ) } qmatrix_mod$start <- c(1,cumsum( qmatrix_mod$maxattr)[ - K ] + 1 ) qmatrix_mod$end <- cumsum( qmatrix_mod$maxattr) skillclasses0 <- skillclasses rownames(skillclasses0) <- cdm_matrixstring( skillclasses0, "P" ) skillclasses <- as.data.frame(skillclasses) # create modified skillclasses for (kk in 1:K){ # kk <- 1 for (zz in 1:(maxattr[kk] ) ){ # zz <- 1 name <- paste0( colnames(q.matrix)[kk+2], ".L", zz ) skillclasses[, name ] <- 1 * ( skillclasses[, kk ] >=zz ) } } skillclasses <- skillclasses[, - c(1:K) ] rownames(skillclasses) <- cdm_matrixstring( skillclasses, "P" ) res$q.matrix <- q.matrix1 res$skillclasses <- as.matrix(skillclasses) res$skillclasses0 <- skillclasses0 res$q.matrix0 <- q.matrix res$qmatrix_mod <- qmatrix_mod } return(res) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/mcdina_proc_modify_qmatrix.R
## File Name: mcdina_proc_qmatrix.R ## File Version: 0.01 mcdina_proc_qmatrix <- function( dat, q.matrix ) { if ( min( dat, na.rm=TRUE )==0 ){ dat <- dat + 1 I <- ncol(dat) if ( nrow(q.matrix)==I ){ q1 <- data.frame( "item"=1:I, "categ"=2, q.matrix ) q0 <- data.frame( "item"=1:I, "categ"=1, 0+0*q.matrix ) q.matrix <- rbind( q0, q1 ) q.matrix <- q.matrix[ order( 100 * q.matrix$item + q.matrix$categ ), ] } } res <- list("dat"=dat, "q.matrix"=q.matrix ) return(res) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/mcdina_proc_qmatrix.R
## File Name: mcdina_proc_test_latent_response.R ## File Version: 0.05 ########################################## # preparation function for whole test mcdina_proc_test_latent_response <- function( q.matrix, K, TP, skillclasses, classes ) { I <- length( unique(q.matrix[,1])) lr <- NULL lc <- NULL itemstat <- NULL for (ii in 1:I){ res <- mcdina_proc_item_latent_response( ii=ii, q.matrix=q.matrix, K=K, TP=TP, skillclasses=skillclasses, classes=classes ) lr <- rbind( lr, res$lr ) lc <- rbind( lc, res$lc ) itemstat <- rbind( itemstat, res$itemstat ) } res <- list("lr"=lr, "lc"=lc, "itemstat"=itemstat) return(res) } ###############################################
/scratch/gouwar.j/cran-all/cranData/CDM/R/mcdina_proc_test_latent_response.R
## File Name: mcdina_skill_patt.R ## File Version: 0.01 # skill probabilities mcdina_skill_patt <- function( q.matrix, skillclasses, G, pi.k, group0_unique) { maxK <- max( q.matrix[, -c(1:2) ] ) K <- ncol(skillclasses) skill.patt <- matrix( NA, nrow=K, ncol=(maxK+1)*G ) skill.patt <- as.data.frame( skill.patt) zz <- 1 for (kk in 0:maxK){ # kk <- 0 for (gg in 1:G){ # gg <- 1 for (ss in 1:K){ #ss <- 1 skill.patt[ ss, zz ] <- sum( pi.k[ skillclasses[,ss]==kk,gg] ) ind <- which( skillclasses[,ss]==kk ) if ( length(ind)==0 ){ skill.patt[ss,zz] <- NA } } colnames(skill.patt)[zz] <- paste0("skill.prob", kk, ".group", group0_unique[gg] ) zz <- zz+1 } } rownames(skill.patt) <- colnames(q.matrix)[ - c(1:2) ] return(skill.patt) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/mcdina_skill_patt.R
## File Name: modelfit.cor.R ## File Version: 1.27 ############################################################################# modelfit.cor <- function( data, posterior, probs ){ K <- max( apply( data, 2, max, na.rm=TRUE ) ) if ( K>1 ){ stop("modelfit.cor only allows for dichotomous data\n") } data.resp <- 1 - is.na(data) data[ is.na(data) ] <- 9 data1 <- data*data.resp I <- ncol(data) # calculate counts (ignore weights here!!) n11 <- t( ( data==1) * data.resp ) %*% ( ( data==1) * data.resp ) n10 <- t( ( data==1) * data.resp ) %*% ( ( data==0) * data.resp ) n01 <- t( ( data==0) * data.resp ) %*% ( ( data==1) * data.resp ) n00 <- t( ( data==0) * data.resp ) %*% ( ( data==0) * data.resp ) # p1 <- colMeans( ( data==1) * data.resp ) # p0 <- colMeans( ( data==0) * data.resp ) # expected counts # exp1 <- rep(NA, I ) # for (ii in 1:I){ # ii <- 1 # pr.ii1 <- matrix( probs[ii,2,], nrow=nrow(data), ncol=dim(probs)[3], byrow=T ) # p3ii <- pr.ii1 * posterior # exp1[ii] <- sum( rowSums( p3ii ) * data.resp[,ii ] ) / sum( data.resp[,ii] ) # exp1[ii] <- sum( colSums( posterior * data.resp[,ii] ) * probs[ii,2,] ) / sum( data.resp[,ii] ) # } #******************************** # covariances ip <- itempairs <- t( combn(I,2 ) ) colnames(itempairs) <- c("item1", "item2" ) itempairs <- as.data.frame( itempairs ) itempairs$n11 <- n11[ ip ] itempairs$n10 <- n10[ ip ] itempairs$n01 <- n01[ ip ] itempairs$n00 <- n00[ ip ] itempairs$n <- rowSums( itempairs[, c("n11","n10", "n01","n00") ] ) itempairs$Exp00 <- itempairs$Exp01 <- itempairs$Exp10 <- itempairs$Exp11 <- NA itempairs$corExp <- itempairs$corObs <- NA m1 <- matrix( c(1,1,1,0,0,1,0,0), 4, 2, byrow=T ) # define further quantities itempairs$X2 <- NA # itempairs$G2 <- NA itempairs$RESIDCOV <- NA itempairs$Q3 <- NA #*** # calculate expected score for every person and every item exp.ii.jj <- posterior %*% t( probs[,2,] ) #*** for (ii in 1:(I-1) ){ for (jj in (ii+1):I){ # ii <- 1 # jj <- 2 diijj <- data.resp[,ii ]*data.resp[,jj ] ii1 <- which ( itempairs$item1==ii & itempairs$item2==jj ) ps.iijj <- colSums( posterior[ data.resp[,ii]*data.resp[,jj]>0, ] ) # pr.ii1 <- matrix( probs[ii,2,], nrow=nrow(data), ncol=dim(probs)[3], byrow=T ) # pr.jj1 <- matrix( probs[jj,2,], nrow=nrow(data), ncol=dim(probs)[3], byrow=T ) # p3ii <- pr.ii1 * pr.jj1 * posterior # itempairs[ii1,"Exp11"] <- sum( rowSums( p3ii ) * diijj ) itempairs[ii1,"Exp11"] <- sum( probs[ii,2,]*probs[jj,2,] * ps.iijj ) # pr.ii1 <- matrix( probs[ii,2,], nrow=nrow(data), ncol=dim(probs)[3], byrow=T ) # pr.jj1 <- matrix( probs[jj,1,], nrow=nrow(data), ncol=dim(probs)[3], byrow=T ) # p3ii <- pr.ii1 * pr.jj1 * posterior # itempairs[ii1,"Exp10"] <- sum( rowSums( p3ii ) * diijj ) itempairs[ii1,"Exp10"] <- sum( probs[ii,2,]*probs[jj,1,] * ps.iijj ) # pr.ii1 <- matrix( probs[ii,1,], nrow=nrow(data), ncol=dim(probs)[3], byrow=T ) # pr.jj1 <- matrix( probs[jj,2,], nrow=nrow(data), ncol=dim(probs)[3], byrow=T ) # p3ii <- pr.ii1 * pr.jj1 * posterior # itempairs[ii1,"Exp01"] <- sum( rowSums( p3ii ) * diijj ) itempairs[ii1,"Exp01"] <- sum( probs[ii,1,]*probs[jj,2,] * ps.iijj ) # pr.ii1 <- matrix( probs[ii,1,], nrow=nrow(data), ncol=dim(probs)[3], byrow=T ) # pr.jj1 <- matrix( probs[jj,1,], nrow=nrow(data), ncol=dim(probs)[3], byrow=T ) # p3ii <- pr.ii1 * pr.jj1 * posterior # itempairs[ii1,"Exp00"] <- sum( rowSums( p3ii ) * diijj ) itempairs[ii1,"Exp00"] <- sum( probs[ii,1,]*probs[jj,1,] * ps.iijj ) itempairs[ii1, "corObs"] <- .corr.wt( x=m1[,1,drop=FALSE], y=m1[,2,drop=FALSE], w=as.numeric( itempairs[ii1,c("n11","n10","n01","n00") ] ) ) itempairs[ii1, "corExp"] <- .corr.wt( x=m1[,1,drop=FALSE], y=m1[,2,drop=FALSE], w=as.numeric( itempairs[ii1,c("Exp11","Exp10","Exp01","Exp00") ] ) ) #*** # Q3 statistic # pr.ii1 <- matrix( probs[ii,2,], nrow=nrow(data), ncol=dim(probs)[3], byrow=T ) # pr.jj1 <- matrix( probs[jj,2,], nrow=nrow(data), ncol=dim(probs)[3], byrow=T ) # p3ii <- pr.ii1 * posterior # expii <- rowSums( p3ii ) # p3jj <- pr.jj1 * posterior # expjj <- rowSums( p3jj ) # calculate residuals data.res <- data[, c(ii,jj) ] - exp.ii.jj[, c(ii,jj) ] data.res <- data.res[ diijj==1, ] itempairs[ii1,"Q3"] <- stats::cor(data.res)[1,2] } } ############################## itempairs$X2 <- ( itempairs$n00 - itempairs$Exp00 )^2 / itempairs$Exp00 + ( itempairs$n10 - itempairs$Exp10 )^2 / itempairs$Exp10 + ( itempairs$n01 - itempairs$Exp01 )^2 / itempairs$Exp01 + ( itempairs$n11 - itempairs$Exp11 )^2 / itempairs$Exp11 # G2 # itempairs$G2 <- itempairs$n00 * log( itempairs$Exp00 / max( itempairs$n00, .01 ) ) + # itempairs$n01 * log( itempairs$Exp01 / max( itempairs$n01, .01 ) ) + # itempairs$n10 * log( itempairs$Exp10 / max( itempairs$n10, .01 ) ) + # itempairs$n11 * log( itempairs$Exp11 / max( itempairs$n11, .01 ) ) # itempairs$G2 <- -2*itempairs$G2 itempairs$RESIDCOV <- ( itempairs$n11 * itempairs$n00 - itempairs$n10 * itempairs$n01 ) / itempairs$n^2 - ( itempairs$Exp11 * itempairs$Exp00 - itempairs$Exp10 * itempairs$Exp01 ) / itempairs$n^2 ############################## # labels itempairs$item1 <- colnames(data)[ itempairs$item1 ] itempairs$item2 <- colnames(data)[ itempairs$item2 ] # residual of correlation itempairs$fcor <- cdm_fisherz( itempairs$corObs ) - cdm_fisherz( itempairs$corExp ) #---- # p values and p value adjustments adjustments # X2 statistic itempairs$X2_df <- 1 itempairs$X2_p <- 1 - stats::pchisq(itempairs$X2, df=1 ) itempairs$X2_p.holm <- stats::p.adjust( itempairs$X2_p, method="holm") itempairs$X2_sig.holm <- 1 * ( itempairs$X2_p.holm < .05 ) itempairs$X2_p.fdr <- stats::p.adjust( itempairs$X2_p, method="fdr") # fcor statistic itempairs$fcor_se <- ( itempairs$n - 3 )^(-1/2) itempairs$fcor_z <- itempairs$fcor / itempairs$fcor_se itempairs$fcor_p <- 1 - stats::pnorm( abs(itempairs$fcor_z ) ) itempairs$fcor_p.holm <- stats::p.adjust( itempairs$fcor_p, method="holm") itempairs$fcor_p.fdr <- stats::p.adjust( itempairs$fcor_p, method="fdr") #********************** # model fit modelfit <- data.frame( "est"=c( mean( abs( itempairs$corObs - itempairs$corExp ), na.rm=TRUE), sqrt( mean( ( itempairs$corObs - itempairs$corExp )^2, na.rm=TRUE ) ), mean( itempairs$X2, na.rm=TRUE ), # mean( itempairs$G2), mean( 100*abs(itempairs$RESIDCOV ), na.rm=TRUE ), mean( abs( itempairs$Q3 ), na.rm=TRUE) ) ) rownames(modelfit) <- c("MADcor", "SRMSR", "MX2", # "MG2", "100*MADRESIDCOV", "MADQ3" ) # "pfit" <- data.frame( "item"=colnames(data), "pObs"=p1, "pExp"=exp1 ) #***** # summary statistics modelfit.test <- data.frame("type"=c("max(X2)","abs(fcor)"), "value"=c( max( itempairs$X2), max( abs(itempairs$fcor) ) ), "p"=c( min( itempairs$X2_p.holm), min( itempairs$fcor_p.holm) ) ) #**** # print results # print( round(modelfit,5), digits=3 ) # cat("MAD Correlation (Observed minus Expected)", round( MADcor, 4 ), "\n" ) res <- list( "modelfit.stat"=modelfit, "itempairs"=itempairs, "modelfit.test"=modelfit.test ) return(res) } ####################################################################### .corr.wt <- function( x, y, w=rep(1,length(x))) { # stopifnot(length(x)==dim(y)[2] ) w <- w / sum(w) # Center x and y, using the weighted means x <- x - sum(x * w) ty <- y - sum( y * w) # Compute the variance vx <- sum(w * x * x) vy <- sum(w * ty * ty) # Compute the covariance vxy <- sum(ty * x * w) # Compute the correlation vxy / sqrt(vx * vy) } ## 00 n00 ## 10 n10 ## 01 n01 ## 11 n11 ## w <- w / sum(w) ## w <- nij / ( n00 + n01 + n10 + n11 ) ## xm=sum( x * w )=( 0*n00 + 1*n10 + 0*n01 + 1*n11 ) / N=( n10 + n11 ) / N ## ym=sum( y*w)=(n01+n11) / N ## --- ## variance: Because it is a binary variable, it is p(1-p) ## if p denotes proportion ## Cov( X, Y )=E(X*Y) - E(X)*E(Y) ## E(X*Y)=n11 / N
/scratch/gouwar.j/cran-all/cranData/CDM/R/modelfit.cor.R
## File Name: modelfit.cor.din.R ## File Version: 2.242 ###--- Model fit for din object modelfit.cor.din <- function( dinobj, jkunits=0 ) { mod <- dinobj # classes din and gdm if ( inherits(dinobj, c("din","gdina")) ){ data <- as.matrix( mod$data ) posterior <- mod$posterior probs <- mod$pjk } # class gdm if (inherits(mod,"gdm") ){ jkunits <- 0 probs <- aperm( mod$pjk, c(2,3,1) ) posterior <- mod$posterior data <- as.matrix(mod$data) } res <- modelfit.cor2( data=data, posterior=posterior, probs=probs ) #**** Jackknife HJJ <- sum( abs( jkunits) ) if ( HJJ > 0 ){ data <- dinobj$data weights <- dinobj$control$weights q.matrix <- dinobj$q.matrix guess.init <- dinobj$guess$est slip.init <- dinobj$slip$est c1 <- dinobj$control N <- nrow(data) if ( length(jkunits)==1){ jkunits <- min( N, jkunits ) jkunits <- floor( jkunits * ( 1:N ) / (N+1) ) + 1 } JJ <- length( unique( jkunits ) ) jkunits <- match( jkunits, unique(jkunits ) ) ms <- res$modelfit.stat ms.jack <- matrix( NA, nrow=nrow(ms), ncol=JJ ) rownames(ms.jack) <- rownames(ms) cat( paste0("|", paste( rep("*",20), collapse="" ), "|\n|") ) progressbar_init <- 1:JJ progressbar_init <- diff( floor( 20 * ( 1:JJ ) / (JJ+1) ) + 1 ) progressbar_init <- c(20-sum(progressbar_init), progressbar_init ) for (jj in 1:JJ){ data.jj <- data[ jkunits !=jj, ] weights.jj <- weights[ jkunits !=jj ] #--- DINA/DINO model if (inherits(dinobj,"din") ){ mod.jj <- din( data=data.jj, q.matrix=q.matrix, skillclasses=c1$skillclasses, conv.crit=c1$conv.crit, dev.crit=c1$dev.crit, maxit=c1$maxit, constraint.guess=c1$constraint.guess, constraint.slip=c1$constraint.slip, guess.init=guess.init, slip.init=slip.init, guess.equal=c1$guess.equal, slip.equal=c1$slip.equal, zeroprob.skillclasses=c1$zeroprob.skillclasses, weights=weights.jj, rule=c1$rule, wgt.overrelax=c1$wgt.overrelax, wgtest.overrelax=c1$wgtest.overrelax, param.history=FALSE, progress=FALSE ) } #--- GDINA model if (inherits(dinobj,"gdina") ){ mod.jj<- gdina( data=data.jj, q.matrix, skillclasses=c1$skillclasses, conv.crit=c1$conv.crit, dev.crit=c1$dev.crit, maxit=c1$maxit, linkfct=c1$linkfct, Mj=c1$Mj, group=c1$group[ jkunits !=jj ], method=c1$method, delta.designmatrix=c1$delta.designmatrix, delta.basispar.lower=c1$delta.basispar.lower, delta.basispar.upper=c1$delta.basispar.upper, zeroprob.skillclasses=c1$zeroprob.skillclasses, reduced.skillspace=c1$reduced.skillspace, HOGDINA=c1$HOGDINA, Z.skillspace=c1$Z.skillspace, weights=weights.jj, rule=c1$rule, progress=FALSE, progress.item=FALSE ) } #*** evaluate model fit f1jj <- modelfit.cor2( data=mod.jj$data, posterior=mod.jj$posterior, probs=mod.jj$pjk ) ms.jack[,jj] <- f1jj$modelfit.stat[,1] if ( progressbar_init[jj]==1 ){ cat("-") ; utils::flush.console() } } cat("|\n") res$modelfit.stat.jack <- ms.jack # pseudo values ms1 <- ms[,1] psx <- ms1 + ( JJ-1 )* ( ms1 - ms.jack ) # jackknife estimate ms$jkunits <- JJ ms$jk_est <- rowMeans( psx ) ms$jk_se <- sqrt( rowSums( ( psx - ms$jk_est )^2 ) / (JJ-1 ) / JJ ) ms$jk_se <- sqrt( apply( ms.jack, 1, FUN=function(ll){ sum( ( ll - mean(ll) )^2 ) } ) * (JJ-1) / JJ ) ms$est_low <- ms$jk_est - 1.96 * ms$jk_se ms$est_upp <- ms$jk_est + 1.96 * ms$jk_se res$modelfit.stat <- ms } ###**** output class(res) <- "modelfit.cor.din" return(res) } #*** summary summary.modelfit.cor.din <- function( object, ... ) { cat("Test of Global Model Fit\n") obji <- object$modelfit.test for (vv in seq(2,ncol(obji))){ obji[,vv] <- round( obji[,vv], 3 ) } print(obji) cat("\nFit Statistics\n") obji <- object$modelfit.stat for (vv in seq(1,ncol(obji))){ obji[,vv] <- round( obji[,vv], 3 ) } print(obji) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/modelfit.cor.din.R
## File Name: modelfit.cor2.R ## File Version: 3.821 modelfit.cor2 <- function( data, posterior, probs ) { # z0 <- Sys.time() K <- max( apply( data, 2, max, na.rm=TRUE ) ) if ( K>1 ){ stop("modelfit.cor only allows for dichotomous data\n") } data <- as.matrix(data) I <- ncol(data) posterior <- as.matrix(posterior) # cat("start ") ; z1 <- Sys.time(); print(z1-z0) ; z0 <- z1 data_na <- is.na(data) data.resp <- 1 - data_na data[ data_na ] <- 9 data1 <- data data1[ data_na ] <- 0 data_resp_bool <- ! data_na res <- cdm_rcpp_modelfit_cor_counts( data=data, data_resp_bool=data_resp_bool) n11 <- res$n11 n01 <- res$n01 n10 <- res$n10 n00 <- res$n00 #----- covariances ip <- itempairs <- as.matrix( t( utils::combn(I,2 ) ) ) colnames(itempairs) <- c("item1", "item2" ) itempairs <- as.data.frame( itempairs ) itempairs$n11 <- n11[ ip ] itempairs$n10 <- n10[ ip ] itempairs$n01 <- n01[ ip ] itempairs$n00 <- n00[ ip ] itempairs$n <- rowSums( itempairs[, c("n11","n10", "n01","n00") ] ) #---- calculate expected score for every person and every item exp.ii.jj <- posterior %*% t( probs[,2,] ) probs1 <- as.matrix(probs[, 2, ]) probs0 <- as.matrix(probs[, 1, ]) ip1 <- as.matrix(ip-1) res <- cdm_rcpp_modelfit_cor2( posterior=posterior, data=data, data_resp_bool=data_resp_bool, probs1=probs1, probs0=probs0, ip=ip1, expiijj=exp.ii.jj ) r1 <- res$itempair_stat itempairs$Exp11 <- r1[,1] itempairs$Exp10 <- r1[,2] itempairs$Exp01 <- r1[,3] itempairs$Exp00 <- r1[,4] eps <- 1e-10 itempairs[, -c(1,2)] <- itempairs[, -c(1,2)] + eps # observed correlation n <- itempairs$n m1 <- ( itempairs$n10 + itempairs$n11 ) / n m2 <- ( itempairs$n01 + itempairs$n11 ) / n t1 <- itempairs$n11 / n - m1 * m2 itempairs$corObs <- t1 / sqrt( m1 * ( 1 - m1 ) * m2 * ( 1-m2 ) ) # observed correlation m1 <- ( itempairs$Exp10 + itempairs$Exp11 ) / n m2 <- ( itempairs$Exp01 + itempairs$Exp11 ) / n t1 <- itempairs$Exp11 / n - m1 * m2 itempairs$corExp <- t1 / sqrt( m1 * ( 1 - m1 ) * m2 * ( 1-m2 ) ) # define further quantities itempairs$X2 <- NA itempairs$RESIDCOV <- NA itempairs$Q3 <- res$Q3 itempairs$X2 <- ( itempairs$n00 - itempairs$Exp00 )^2 / itempairs$Exp00 + ( itempairs$n10 - itempairs$Exp10 )^2 / itempairs$Exp10 + ( itempairs$n01 - itempairs$Exp01 )^2 / itempairs$Exp01 + ( itempairs$n11 - itempairs$Exp11 )^2 / itempairs$Exp11 itempairs$RESIDCOV <- ( itempairs$n11 * itempairs$n00 - itempairs$n10 * itempairs$n01 ) / itempairs$n^2 - ( itempairs$Exp11 * itempairs$Exp00 - itempairs$Exp10 * itempairs$Exp01 ) / itempairs$n^2 #**** labels itempairs$item1 <- colnames(data)[ itempairs$item1 ] itempairs$item2 <- colnames(data)[ itempairs$item2 ] #-- absolute difference in correlations itempairs$fcor <- cdm_fisherz( itempairs$corObs ) - cdm_fisherz( itempairs$corExp ) itempairs <- itempairs[ itempairs$n > 0, ] #--- p values and p value adjustments adjustments # X2 statistic itempairs$X2_df <- 1 itempairs$X2_p <- 1 - stats::pchisq(itempairs$X2, df=1 ) itempairs$X2_p.holm <- stats::p.adjust( itempairs$X2_p, method="holm") itempairs$X2_sig.holm <- 1 * ( itempairs$X2_p.holm < .05 ) itempairs$X2_p.fdr <- stats::p.adjust( itempairs$X2_p, method="fdr") # fcor statistic itempairs$fcor_se <- ( itempairs$n - 3 )^(-1/2) itempairs$fcor_z <- itempairs$fcor / itempairs$fcor_se itempairs$fcor_p <- 1 - stats::pnorm( abs(itempairs$fcor_z ) ) itempairs$fcor_p.holm <- stats::p.adjust( itempairs$fcor_p, method="holm") itempairs$fcor_p.fdr <- stats::p.adjust( itempairs$fcor_p, method="fdr") #***** model fit modelfit <- data.frame( "est"=c( mean( abs( itempairs$corObs - itempairs$corExp ), na.rm=TRUE), sqrt( mean( ( itempairs$corObs - itempairs$corExp )^2, na.rm=TRUE ) ), mean( itempairs$X2 ), # mean( itempairs$G2), mean( 100*abs(itempairs$RESIDCOV ), na.rm=TRUE ), mean( abs( itempairs$Q3 ), na.rm=TRUE), mean( abs( itempairs$Q3 - mean(itempairs$Q3,na.rm=TRUE) ), na.rm=TRUE ) ) ) rownames(modelfit) <- c("MADcor", "SRMSR", "MX2", # "MG2", "100*MADRESIDCOV", "MADQ3", "MADaQ3" ) modelfit <- modelfit[ ! ( rownames(modelfit) %in% c("MX2") ),, drop=FALSE ] #***** # summary statistics modelfit.test <- data.frame("type"=c("max(X2)","abs(fcor)"), "value"=c( max( itempairs$X2), max( abs(itempairs$fcor) ) ), "p"=c( min( itempairs$X2_p.holm), min( itempairs$fcor_p.holm) ) ) #**** statistics for use in IRT.compareModels statlist <- data.frame( "maxX2"=modelfit.test[1,"value"], "p_maxX2"=modelfit.test[1,"p"] ) h1 <- modelfit$est statlist <- cbind( statlist, t(h1 ) ) names(statlist)[-c(1:2) ] <- rownames(modelfit) #--- output print results res <- list( "modelfit.stat"=modelfit, "itempairs"=itempairs, "modelfit.test"=modelfit.test, "statlist"=statlist ) return(res) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/modelfit.cor2.R
## File Name: numerical_Hessian.R ## File Version: 0.38 ############################################################################## # numerical computation of the Hessian matrix numerical_Hessian <- function(par, FUN, h=1E-5, gradient=FALSE, hessian=TRUE, diag_only=FALSE, ... ) { NP <- length(par) f0 <- FUN( x=par, ... ) fm <- fh <- rep(NA,NP) # f(x+h) f2h <- rep(NA,NP) # f(x+2*h) hess <- matrix(NA,nrow=NP, ncol=NP) fhh <- hess #** select h parameters according to size of parameters abs_par <- abs(par) hvec <- h * ifelse( abs_par > 1, abs_par, 1 ) #--- loop for computing f(x+h) for (ii in 1:NP){ # f(x+h) par1 <- par par1[ii] <- par[ii] + hvec[ii] fh[ii] <- FUN( x=par1, ...) # f(x-h) par1 <- par par1[ii] <- par[ii] - hvec[ii] fm[ii] <- FUN( x=par1, ...) } #--- computation of the gradient if (gradient){ grad1 <- res <- ( fh - fm ) / (2*hvec) } #------ # second partial derivatives # d F / dx dy # dF/dx=g(x,y)=( F(x+h,y) - F(x,y) ) / h # (dF/dx)/dy=( g(x,y+h) - g(x,y) ) / h #=( F(x+h,y+h) - F(x,y+h) - F(x+h,y) + F(x,y+h) ) / h^2 #---- hessian if ( hessian ){ fh1 <- matrix( fh, nrow=NP, ncol=NP, byrow=TRUE) fh2 <- matrix( fh, nrow=NP, ncol=NP, byrow=FALSE) #--- computation f(x+2*h) for (ii in 1:NP){ par1 <- par par1[ii] <- par[ii] + 2*hvec[ii] f2h[ii] <- FUN( x=par1, ... ) } #--- computation f(x+h,y+h) if ( ! diag_only ){ for (ii in 1:NP){ for (jj in 1:NP){ if (ii < jj){ par1 <- par par1[ii] <- par[ii] + hvec[ii] par1[jj] <- par[jj] + hvec[jj] fhh[ii,jj] <- fhh[jj,ii] <- FUN( x=par1, ... ) } } } } h_squared <- outer( hvec, hvec ) hess <- ( fhh - fh1 - fh2 + f0 ) / h_squared diag(hess) <- ( f2h - 2*fh + f0)/ hvec^2 res <- hess } if ( gradient & hessian ){ res <- list( "grad"=grad1, "hessian"=hess, "value"=f0) } return(res) } ##############################################################################
/scratch/gouwar.j/cran-all/cranData/CDM/R/numerical_Hessian.R
## File Name: numerical_Hessian_partial.R ## File Version: 0.15 numerical_Hessian_partial <- function(par, FUN, h=1E-5, coordinate=1, ... ) { ii <- coordinate f0 <- FUN( x=par, ... ) #** select h parameters according to size of parameters abs_par <- abs(par) hvec <- h * ifelse( abs_par > 1, abs_par, 1 ) #--- for computing f(x+h) par1 <- par par1[ii] <- par[ii] + hvec[ii] f1 <- FUN( x=par1, ...) #--- computation x-h par1[ii] <- par[ii] - hvec[ii] f2 <- FUN( x=par1, ...) #--- gradient and Hessian grad <- (f1 - f2 ) / (2*hvec[ii]) hessian <- (f1 + f2 - 2*f0) / hvec[ii]^2 #--- output res <- list( f0=f0, f1=f1, f2=f2, grad=grad, hessian=hessian) return(res) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/numerical_Hessian_partial.R
## File Name: numerical_gradient.R ## File Version: 0.12 numerical_gradient <- function(par, FUN, h=1E-5, ...) { val <- FUN(par, ...) NV <- length(val) NP <- length(par) mat <- matrix(NA, nrow=NV, ncol=NP) for (coord in 1:NP){ res0 <- numerical_Hessian_partial(par=par, FUN=FUN, h=h, coordinate=coord, ...) mat[,coord] <- res0$grad } return(mat) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/numerical_gradient.R
## File Name: osink.R ## File Version: 1.09 osink <- function( file, suffix, append=FALSE) { if ( ! is.null( file ) ){ sink( paste0( file, suffix), split=TRUE, append=append ) } }
/scratch/gouwar.j/cran-all/cranData/CDM/R/osink.R
## File Name: personfit.appropriateness.R ## File Version: 1.12 ######################################################## # personfit.appropriateness statistics ######################################################## ######################################################################################## # function personfit appropriateness statistic personfit.appropriateness <- function( data, probs, skillclassprobs, h=.001, eps=1E-10, maxiter=30, conv=1E-5, max.increment=.1, progress=TRUE ){ # data input data.resp <- 1 - is.na( data ) data[ is.na(data) ] <- 0 N <- nrow(data) L <- dim(probs)[3] skillclassprobsM <- matrix( skillclassprobs, nrow=N, ncol=L, byrow=TRUE ) I <- ncol(data) # algorithm appr=1 rho <- rep(.6, N) appr.type <- 1 if (progress){ cat("***************************************\n") cat("Appropriateness Type ", appr.type ) } res1 <- .calc.personfit.appr.algorithm( probs, data, data.resp, N, L, I, appr.type, rho, skillclassprobsM, eps=eps, maxiter=maxiter, conv=conv, max.increment=max.increment, h=h, progress=progress ) # algorithm appr=0 appr.type <- 0 if (progress){ cat("***************************************\n") cat("Appropriateness Type ", appr.type ) } rho <- rep(.5,N) res0 <- .calc.personfit.appr.algorithm( probs, data, data.resp, N, L, I, appr.type, rho, skillclassprobsM, eps=eps, maxiter=maxiter, conv=conv, max.increment=max.increment, h=h, progress=progress ) # summaries dfr <- data.frame( "appr.type"=c(1,0), "M.rho"=c( mean(res1$rho), mean(res0$rho)), "SD.rho"=c( stats::sd( res1$rho), stats::sd(res0$rho) ) ) dfr$median.SE.rho <- c( stats::median( res1$se.rho), stats::median(res0$se.rho) ) dfr$prop.sign.T2 <- c( mean(res1$p<.05), mean(res0$p <.05) ) rownames(dfr) <- c("Spuriously High Scorers", "Spuriously Low Scorers" ) res2 <- list( "summary"=dfr, "personfit.appr.type1"=res1, "personfit.appr.type0"=res0 ) class(res2) <- "personfit.appropriateness" return(res2) } #################################################################################### # S3 methods # summary summary.personfit.appropriateness <- function( object, digits=3, ... ){ print( round( object$summary, digits=3) ) } #*********************************** # plot method plot.personfit.appropriateness <- function( x, cexpch=.65, ... ){ graphics::par(mfrow=c(2,2)) # type=1 x1 <- x$personfit.appr.type1 N1 <- nrow(x1) graphics::hist( x1$rho, main="Appropriateness Type 1", freq=TRUE, breaks=seq(0,1, length=20), xlab=expression(rho), ylim=c(0,N1) ) graphics::plot( c(0,1), c(0,.5), type="n", xlab=expression(rho), ylab=expression( p( T[2] ) ), main="Spuriously High Scorer" ) x1a <- x1[ x1$p >=.05, ] graphics::points( x1a$rho, x1a$p, pch=1, cex=cexpch ) x1a <- x1[ x1$p < .05, ] graphics::points( x1a$rho, x1a$p, pch=17, cex=cexpch, col=2) # type=0 x1 <- x$personfit.appr.type0 graphics::hist( x1$rho, main="Appropriateness Type 0", freq=TRUE, breaks=seq(0,1, length=20), xlab=expression(rho), ylim=c(0,N1) ) graphics::plot( c(0,1), c(0,.5), type="n", xlab=expression(rho), ylab=expression( p( T[2] ) ), main="Spuriously Low Scorer") x1a <- x1[ x1$p >=.05, ] graphics::points( x1a$rho, x1a$p, pch=1, cex=cexpch ) x1a <- x1[ x1$p < .05, ] graphics::points( x1a$rho, x1a$p, pch=17, cex=cexpch, col=2) graphics::par( mfrow=c(1,1)) } #################################################################################### ######################################################################################### # algorithm calculation appropriateness statistics .calc.personfit.appr.algorithm <- function( probs, data, data.resp, N, L, I, appr.type, rho, skillclassprobsM, eps=1E-15, maxiter=30, conv=1E-8, max.increment=.1, h=.0001, progress=TRUE ){ rho <- rep(.4, N ) abs.incr <- 1 iter <- 0 while( ( abs.incr > conv) & ( iter < maxiter ) ){ rho0 <- rho rho[ rho > 1 - h ] <- 1 - 2*h rho[ rho < 2*h ] <- 2*h ll0 <- .calc.ll.personfit.appropriateness( probs, data, data.resp, N, L, I, appr.type=appr.type, rho=rho, skillclassprobsM, eps=eps ) rho1 <- rho + h ll1 <- .calc.ll.personfit.appropriateness( probs, data, data.resp, N, L, I, appr.type=appr.type, rho=rho1, skillclassprobsM, eps=eps ) rho2 <- rho - h ll2 <- .calc.ll.personfit.appropriateness( probs, data, data.resp, N, L, I, appr.type=appr.type, rho=rho2, skillclassprobsM, eps=eps ) # first derivative deriv1 <- ( ll1 - ll0 ) / h # second derivative deriv2 <- ( ll1 - 2*ll0 + ll2 ) / h^2 # update rho increment <- deriv1 / abs( deriv2 ) increment <- ifelse( abs(increment) > max.increment, max.increment*sign(increment), increment ) rho <- rho + increment abs.incr <- max( abs( rho - rho0) ) max.increment <- abs.incr * .9^(iter-1 ) iter <- iter+1 if (progress){ cat("\nIteration", iter, "| Max. rho parameter change=", round( abs.incr, 8 ) ) ; utils::flush.console() } } cat("\n") #******************* end iterations rho[ rho > 1 - h ] <- 1 - 2*h rho[ rho < 2*h ] <- 2*h # log-likelihood evaluated at rho=0 ll.rho0 <- .calc.ll.personfit.appropriateness( probs, data, data.resp, N, L, I, appr.type=appr.type, rho=0*rho+h/2, skillclassprobsM, eps=eps ) res <- data.frame( "rho"=rho, "se.rho"=sqrt( 1 / abs( deriv2 ) ), "ll.rho"=ll0, "ll.0"=ll.rho0, "T2"=2*(ll0-ll.rho0) ) res[ res$T2 < 0, "T2" ] <- 0 res$p <- ( 1 - stats::pchisq( abs( res$T2 ), df=1) ) / 2 return(res) } ######################################################################################### ######################################################################### # calculation of individual likelihood .calc.ll.personfit.appropriateness <- function( probs, data, data.resp, N, L, I, appr.type, rho, skillclassprobsM, eps=1E-10 ){ ll <- matrix( 1, nrow=N, ncol=L) for (ii in 1:I){ # ii <- 1 prob.ii <- probs[ ii, 2, ] prob.ii[ prob.ii < eps ] <- eps prob.iiM <- matrix( prob.ii, nrow=N, ncol=L, byrow=TRUE ) prob.iiM <- (1-rho)*prob.iiM + rho * appr.type ll.ii <- data[,ii] * prob.iiM + (1-data[,ii]) * (1-prob.iiM) ll.ii <- data.resp[,ii] * ll.ii + ( 1 - data.resp[,ii] ) ll <- ll * ll.ii } # calculate individual total log-likelihood ll0 <- log( rowSums( ll*skillclassprobsM ) ) return(ll0) } #########################################################################
/scratch/gouwar.j/cran-all/cranData/CDM/R/personfit.appropriateness.R
## File Name: plot.din.R ## File Version: 2.20 ################################################################################ # summary method for objects of class "din" # ################################################################################ plot.din <- function(x, items=c(1:ncol(x$data)), pattern="", uncertainty=0.1, top.n.skill.classes=6, pdf.file="", hide.obs=FALSE, display.nr=1:4, ask=TRUE, ...){ # Call: generic # Input: # x: object of class din # items: an index vector giving the items to be visualized in the first plot # pattern: an optional character specifying a response pattern of an examinee, # whose attributes are then analyzed in a seperate grafic. # uncertainty: a numeric between 0 and 0.5 giving the uncertanity bounds for # deriving the observed skill occurrence probabilities in plot 2 # and the simplified deterministic attribute profiles in plot 5. # top.n.skill.classes: a numeric, specifying the number of skill classes, # starting with the most frequent, to be plotted in plot 3. Default value is 6. # pdf.file: an optional character string. If specified the graphics obtained # from the function plot.din are provided in a pdf file. # hide.idi: an optional logical value. If set to \code{TRUE}, the IDI curve in # first graphic is not displayed. # hide.obs: an optional logical value. If set to \code{TRUE}, the polygonal # chain for observed frequencies of skill class probabilities in the # second graphic is not displayed. # display.nr: an optional numeric or numeric vector. If specified, only the # plots in display.nr are shown. # ask: request a user input before the next figure is drawn; cf. ask option in ?par manual page # Output: none ################################################################################ # check consistency of input # ################################################################################ hide.idi <- FALSE # define pattern in case of vector inputz if ( ( pattern[1] !="" ) & ( length( pattern) > 1 ) ){ pattern <- paste( pattern, collapse="") } suc <- which(unique(x$pattern[,"pattern"])==pattern) #subject under control if(pdf.file!="") try(pdf(file=pdf.file, ...)) try(if(uncertainty<0||uncertainty>.5|| #uncertainty >=0, <=.5 top.n.skill.classes<0||top.n.skill.classes>2^length(x$skill.patt) ) #top.n.skill.classes >=0, <=2^K warning("check your plot parameter specifications. See Help-files.")) old.par <- graphics::par(no.readonly=TRUE) on.exit( graphics::par(old.par)) ################################################################################ # Plot 1 # ################################################################################ if(1 %in% display.nr){ # extract information errors <- rbind(x$guess[,1],x$slip[,1])[,items] colnames(errors) <- items errors[2,] <- 1 - errors[2,] # if(!is.null(colnames(x$data)[items])){ # colnames(errors) <- colnames(x$data)[items] # }else{ # colnames(errors) <- items # } # generate plot graphics::barplot(errors, ylim=c(0,1.19), beside=TRUE, col=c("gray","darkred"), xlab="Item index", ylab="Probability", cex.lab=1.3) if(!hide.idi){ # if(any(apply(errors, 2, function(x) 1-x[1] - x[2] < 0 ))){ if( FALSE ){ # warning(paste("Item discrimination index < 0 for item", # which(apply(errors, 2, function(x) 1-x[1]-x[2] < 0 )),"\n")) }else{ graphics::legend("topright",c("Guessing probability","Non-Slipping probability"), lty=c(1,1), pch=c(NA,NA), lwd=c(2,2), col=c("gray","darkred"), bg="gray97") # lines(seq(2,2+3*(ncol(errors)-1),3),apply(errors, 2, function(x) 1-x[1]-x[2] ), lty=1) # points(seq(2,2+3*(ncol(errors)-1),3),apply(errors, 2, function(x) 1-x[1]-x[2] ), pch=19, cex=1.5) # legend("topright",c("guessing parameter","slipping parameter", "item discrimination index"), # lty=c(1,1,1), pch=c(NA,NA,19), lwd=c(2,2,2), col=c("gray","darkred", "black"), bg="gray97") } }else{ graphics::legend("topright",c("guessing parameter","slipping parameter"), lty=c(1,1), lwd=c(2,2), col=c("gray","darkred"), bg="gray97") } if(pdf.file=="" & ask) graphics::par(ask=TRUE) if(1==display.nr[length(display.nr)]) graphics::par(ask=FALSE) } ################################################################################ # Plot 2 # ################################################################################ if(2 %in% display.nr){ # extract information skill.patterns <- x$skill.patt[length(x$skill.patt):1,] ind <- match( apply(x$item.patt.split, 1, paste, collapse=""), unique(x$pattern)[,"pattern"] ) EAP <- ifelse( unique(x$pattern)[ ind, grep("post.attr", colnames(x$pattern) ) ] > 0.5 + uncertainty, 1, NA ) master <- colSums( apply( EAP, 2, function(y) y*x$item.patt.freq), na.rm=TRUE ) master <- ( master/ nrow(x$data) )[length(x$skill.patt):1] # generate plot graphics::par(yaxt="n") graphics::barplot(skill.patterns, horiz=TRUE, # ylim=c(0,length(skill.patterns)*1.2+0.9), ylim=c(0,length(skill.patterns)*1.2), xlim=c(0,1), xlab="Skill mastery probability", axes=F, cex.lab=1.3, col="gray") graphics::axis(1,at=seq(0,1,0.2)) graphics::axis(3,at=seq(0,1,0.2)) if ( is.null( attributes(x$q.matrix)$skill.labels ) ){ attr( x$q.matrix, "skill.labels") <- colnames(x$q.matrix ) } graphics::text(attributes(x$q.matrix)$skill.labels[length(x$skill.patt):1], x=c(rep(0.01,length(skill.patterns))), y=seq(0.7, 0.7+1.2*(length(skill.patterns)-1),1.2), col="black", pos=4, cex=1.3) if(!hide.obs){ # legend("topright",c("Marginal skill probability", "Percentage of masters (EAP)"), # lty=c(1,1), pch=c(NA,19), lwd=c(2,2), col=c("gray", "black"), bg="gray97") # points(x=master, y=seq(0.7,0.7+1.2*(length(skill.patterns)-1),1.2),pch=19, cex=1.3) # lines(x=master, y=seq(0.7,0.7+1.2*(length(skill.patterns)-1),1.2),lty=1) } if(pdf.file=="" & ask) graphics::par(ask=TRUE) if(2==display.nr[length(display.nr)]) graphics::par(ask=FALSE) graphics::par(yaxt="s") } ################################################################################ # Plot 3 # ################################################################################ if(3 %in% display.nr){ # extract information patt.fq <- x$attribute.patt[,1] main.effects <- which(rownames(x$attribute.patt)%in% rownames(x$attribute.patt[order(x$attribute.patt[,1], decreasing=TRUE),][ 1:min(top.n.skill.classes, 2^length(x$skill.patt)), ]) ) # generate plot graphics::par(xaxt="n"); graphics::par(mar=c(6,4,4,2)+0.1) graphics::plot(c(0:(length(patt.fq)+1)),c(0,t(patt.fq),0),type="h", ylab="Skill class probability", xlab="", ylim=c(0,max(patt.fq)+.02),cex.lab=1.3, col=c(NA,rep("black",length(patt.fq)), NA)) graphics::par(xaxt="s") graphics::axis(1, at=main.effects, las=2, labels=rownames(x$attribute.patt)[main.effects], cex.axis=.8) # print(patt.fq) eps <- .2 PP <- length(patt.fq) if (PP<65){ for (pp in 1:PP){ # pp <- 1 graphics::rect( xleft=pp-eps, ybottom=0, xright=pp+eps, ytop=patt.fq[pp], col="black" ) } } graphics::par(mar=c(5,4,4,2)+0.1) if(pdf.file=="" & ask) graphics::par(ask=TRUE) if(3==display.nr[length(display.nr)]) graphics::par(ask=FALSE) } ################################################################################ # Plot 4 # ################################################################################ if(4 %in% display.nr){ if(pattern!=""){ if(length(suc)==0) warning("The specified pattern was not achieved.") # if a pattern is specified extract information post.skill <- as.matrix(unique(x$pattern)[suc,grep("post.attr", colnames(x$pattern))])[nrow(x$skill.patt):1] names(post.skill) <- colnames(x$q.matrix)[nrow(x$skill.patt):1] # generate plot graphics::par(mar=c(5,4,4,2)+0.1) graphics::par(mgp=c(3.5,1,0)) graphics::par(yaxt="n") graphics::barplot(post.skill, horiz=TRUE, xlab=paste("Skill probabilities conditional on response pattern\n", pattern,sep=""), xlim=c(0,1), axes=FALSE, cex.lab=1.3, col="gray") graphics::axis(1,at=seq(0,1,0.2)) graphics::abline(v=c(.5-uncertainty,.5+uncertainty),lty=1,col="darkred", lwd=2) graphics::axis(3, at=c( (.5-uncertainty)/2, .5, .5+uncertainty+(1-(.5+uncertainty))/2 ), tick=F, labels=c("not mastered", "unclassified", "mastered"), cex.axis=1.3, mgp=c(3,0,0)) if ( is.null( attributes(x$q.matrix)$skill.labels ) ){ attr( x$q.matrix, "skill.labels") <- colnames(x$q.matrix ) } graphics::text(attributes(x$q.matrix)$skill.labels[length(x$skill.patt):1], x=c(rep(0.01,length(row.names(x$skill.patt)))), y=seq(0.7,0.7+1.2*(length(row.names(x$skill.patt))-1),1.2),col="black", pos=4, cex=1.3) graphics::par(yaxt="s") graphics::par(mar=c(5,4,4,2)+0.1) graphics::par(mgp=c(3,1,0)) if(4==display.nr[length(display.nr)]) graphics::par(ask=FALSE) } } # reset open plot parameter if(pdf.file!="") try(dev.off()) graphics::par(old.par) invisible() }
/scratch/gouwar.j/cran-all/cranData/CDM/R/plot.din.R
## File Name: plot.gdina.R ## File Version: 0.11 plot.gdina <- function( x, ask=FALSE, ... ) { probitem <- x$probitem I <- max( probitem$itemno ) for (ii in 1:I){ pii <- probitem[ probitem$itemno==ii, ] graphics::barplot( pii$prob, ylim=c(0,1), xlab="Skill Pattern", names.arg=pii$skillcomb, main=paste0( "Item ", pii[1,"item" ], " (Rule ", x$rule[ii], ")\n", "Attributes ", pii[1,"partype.attr"] ), ask=ask ) } }
/scratch/gouwar.j/cran-all/cranData/CDM/R/plot.gdina.R
## File Name: plot.gdm.R ## File Version: 0.16 #** S3 plot method for gdm function plot.gdm <- function( x, perstype="EAP", group=1, barwidth=.1, histcol=1, cexcor=3, pchpers=16, cexpers=.7, ... ) { object <- x theta.k <- object$theta.k pi.k <- object$pi.k[,group] Ndim <- ncol(theta.k) mean.trait <- object$mean.trait[,group] sd.trait <- object$sd.trait[,group] cor.trait <- object$correlation.trait[[group]] # extract person parameters person <- object$person[ object$group==group, ] person <- person[, grep( paste0(perstype,"."), colnames(person) ) ] # define plot grid plotgrid <- as.data.frame( expand.grid( 1:Ndim, 1:Ndim )[,c(2,1) ] ) plotgrid$type <- "" plotgrid[ plotgrid[,1]==plotgrid[,2], "type" ] <- "hist" plotgrid[ plotgrid[,1] < plotgrid[,2], "type" ] <- "cornumber" plotgrid[ plotgrid[,1] > plotgrid[,2], "type" ] <- "scatterEAP" PG <- nrow(plotgrid) graphics::par( mfrow=c(Ndim,Ndim) ) for (pp in 1:PG){ if ( paste(plotgrid$type)[pp]=="cornumber"){ plot_gdm_cor_numbers( cor.trait=cor.trait, dim1=plotgrid[pp,1], dim2=plotgrid[pp,2], cexcor=cexcor) } if ( paste(plotgrid$type)[pp]=="scatterEAP"){ plot_gdm_pers( person=person, dim1=plotgrid[pp,1], dim2=plotgrid[pp,2], pchpers=pchpers, cexpers=cexpers, perstype=perstype ) } if ( paste(plotgrid$type)[pp]=="hist"){ plot_gdm_hist( theta.k=theta.k, pi.k=pi.k, object=object, dim=plotgrid[pp,1], group=group, barwidth=barwidth, histcol=histcol, mean.trait=mean.trait, sd.trait=sd.trait ) } } graphics::par( mfrow=c(1,1)) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/plot.gdm.R
## File Name: plot.slca.R ## File Version: 0.09 ################################################### # plot slca plot.slca <- function( x, group=1, ... ) { pi.k <- x$pi.k TP <- nrow(pi.k) xlabels <- seq(1, TP) graphics::barplot( pi.k[,group], xlab="Class", ylab="Probability", names.arg=xlabels, main=paste0("Class Distribution | Group ", group ), ... ) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/plot.slca.R
## File Name: plot_gdm_cor_numbers.R ## File Version: 0.02 #- correlations between dimensions plot_gdm_cor_numbers <- function( cor.trait, dim1, dim2, cexcor) { graphics::plot( c(0,1), c(0,1), type="n", axes=FALSE, xlab="", ylab="") graphics::text( .5, .50, paste0( round( cor.trait[dim1,dim2],3)), cex=cexcor) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/plot_gdm_cor_numbers.R
## File Name: plot_gdm_hist.R ## File Version: 0.01 #-- histogram plot plot_gdm_hist <- function( theta.k, pi.k, object, dim, group, barwidth, histcol, mean.trait, sd.trait ) { dd <- dim a1 <- stats::aggregate( pi.k, list( theta.k[, dd] ), sum ) mainpl <- paste0("Dim", dd, " | M=", round( mean.trait[dd], 3 ), " | SD=",round( sd.trait[dd], 3 ) ) graphics::plot( a1[,1], a1[,2], type="n", xlab=paste0("theta (Dim", dd, ")" ), ylab="Probability", main=mainpl) AA <- nrow(a1) for ( aa in 1:AA){ graphics::rect(xleft=a1[aa,1] - barwidth/2, ybottom=0, xright=a1[aa,1] + barwidth/2, ytop=a1[aa,2], col=histcol) } }
/scratch/gouwar.j/cran-all/cranData/CDM/R/plot_gdm_hist.R
## File Name: plot_gdm_pers.R ## File Version: 0.01 # plot person parameters plot_gdm_pers <- function( person, dim1, dim2, pchpers, cexpers, perstype ) { dd1 <- dim1 dd2 <- dim2 graphics::plot( person[,dd1], person[,dd2], xlab=paste0(perstype, " Dim",dd1), ylab=paste0(perstype, " Dim",dd2), pch=pchpers, cex=cexpers) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/plot_gdm_pers.R
## File Name: plot_item_mastery.R ## File Version: 0.05 plot_item_mastery <- function(object, pch=c(16,17), lty=c(1,2), ...) { UseMethod("plot_item_mastery") }
/scratch/gouwar.j/cran-all/cranData/CDM/R/plot_item_mastery.R
## File Name: plot_item_mastery_main.R ## File Version: 0.25 plot_item_mastery_main <- function(object, pch=c(16,17), lty=c(1,2), ...) { probs <- IRT.irfprob(object) irf_dim <- dim(probs) I <- irf_dim[1] K <- irf_dim[2] TP <- irf_dim[3] if (K>2){ stop("Plot function can only be used for dichotomous data.\n") } graphics::plot(c(1,I), c(0,1.1), type="n", xlab="Item index", ylab="Probability", axes=FALSE, ...) if (I <=10){ item_by <- 1} if (I > 10){ item_by <- 2} if (I > 24){ item_by <- 3} if (I >=50){ item_by <- 5} item_labels <- seq(1,I,by=item_by) graphics::axis(1, at=item_labels) graphics::axis(2, at=seq(0,1,.2) ) graphics::abline(h=1, col="gray") probs_ii <- probs[,2,] for (ll in 1:2){ if (ll==1){ ind_ll <- 1 } else { ind_ll <- TP } prob_plot <- probs_ii[,ind_ll] graphics::lines(1:I, prob_plot, lty=lty[ll]) graphics::points(1:I, prob_plot, pch=pch[ll]) } graphics::legend(x="top", c("Non-masters", "Masters"), lty=lty, pch=pch, horiz=TRUE) } plot_item_mastery.din <- plot_item_mastery_main plot_item_mastery.gdina <- plot_item_mastery_main
/scratch/gouwar.j/cran-all/cranData/CDM/R/plot_item_mastery_main.R
## File Name: predict.CDM.R ## File Version: 0.07 ################################################ # predict method in CDM package predict.din <- function( object, group=1, ... ){ dat <- as.matrix( object$dat ) res <- IRT.predict( object, dat=dat, group=group ) return(res) } ################################################# predict.gdm <- predict.din predict.gdina <- predict.din predict.mcdina <- predict.din predict.slca <- predict.din ##################################################
/scratch/gouwar.j/cran-all/cranData/CDM/R/predict.CDM.R
## File Name: prep_data_long_format.R ## File Version: 0.03 prep_data_long_format <- function(data) { data_long <- cdm_rcpp_data_prep_long_format(data=data) class(data_long) <- c("matrix", "data_long_format") return(data_long) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/prep_data_long_format.R
## File Name: print.din.R ## File Version: 2.10 ################################################################################ # print method for objects of class "din" # ################################################################################ print.din <- function(x, ... ){ cat("Estimation of Mixed DINA/DINO Model\n\n") d1 <- utils::packageDescription("CDM") cat( paste( d1$Package, " ", d1$Version, " (", d1$Date, ")", sep=""), "\n" ) cat("\nCall:\n", paste(deparse(x$call), sep="\n", collapse="\n"), "\n\n", sep="") #*** parameters cat(paste0("Number of cases=", x$I, "\n") ) cat(paste0("Number of items=", ncol(x$data), "\n") ) cat(paste0("Number of skill dimensions=", ncol(x$q.matrix), "\n") ) cat(paste0("Number of skill classes=", nrow(x$attribute.patt), "\n") ) cat(paste0("Number of parameters=", sum(x$Npars), "\n") ) cat(paste0(" # item parameters=", x$Npars$itempars, "\n") ) cat(paste0(" # skill distribution parameters=", x$Npars$skillpars, "\n") ) #*** likelihood cat( paste0( "\nLog-Likelihood=", round( x$loglike,2 ), "\n") ) #*** information criteria cat( paste0( "AIC=", round( x$AIC,0 ), "\n") ) cat( paste0( "BIC=", round( x$BIC,0 ), "\n") ) invisible(x) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/print.din.R
## File Name: print.gdina.R ## File Version: 0.10 ################################################################################ # print method class gdina # ################################################################################ print.gdina <- function(x, ... ){ cat("Estimation of GDINA Model\n\n") d1 <- utils::packageDescription("CDM") cat( paste( d1$Package, " ", d1$Version, " (", d1$Date, ")", sep=""), "\n" ) cat("\nCall:\n", paste(deparse(x$call), sep="\n", collapse="\n"), "\n\n", sep="") #*** parameters cat(paste0("Number of cases=", x$N, "\n") ) cat(paste0("Number of groups=", x$G, "\n") ) cat(paste0("Number of items=", ncol(x$data), "\n") ) cat(paste0("Number of skill dimensions=", ncol(x$q.matrix), "\n") ) cat(paste0("Number of skill classes=", nrow(x$attribute.patt), "\n") ) cat(paste0("Number of parameters=", x$Npars, "\n") ) cat(paste0(" # item parameters=", x$Nipar, "\n") ) cat(paste0(" # skill distribution parameters=", x$Nskillpar, "\n") ) #*** likelihood cat( paste0( "\nLog-Likelihood=", round( x$loglike,2 ), "\n") ) #*** information criteria cat( paste0( "AIC=", round( x$AIC,0 ), "\n") ) cat( paste0( "BIC=", round( x$BIC,0 ), "\n") ) invisible(x) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/print.gdina.R
## File Name: print.gdm.R ## File Version: 0.09 ################################################################################ # print method class gdm ################################################################################ print.gdm <- function(x, ... ){ cat("Estimation of general diagnostic model\n\n") d1 <- utils::packageDescription("CDM") cat( paste( d1$Package, " ", d1$Version, " (", d1$Date, ")", sep=""), "\n" ) cat("\nCall:\n", paste(deparse(x$call), sep="\n", collapse="\n"), "\n\n", sep="") #*** parameters cat(paste0("Number of cases=", x$N, "\n") ) cat(paste0("Number of groups=", x$G, "\n") ) cat(paste0("Number of items=", ncol(x$data), "\n") ) cat(paste0("Number of skill dimensions=", dim(x$Qmatrix)[3], "\n") ) cat(paste0("Number of skill classes=", nrow(x$pi.k), "\n") ) cat(paste0("Number of estimated parameters=", x$Npars, "\n") ) cat(paste0(" # item parameters=", x$ic$itempars, "\n") ) cat(paste0(" # skill distribution parameters=", x$ic$traitpars, "\n") ) #*** likelihood cat( paste0( "\nLog-Likelihood=", round( x$loglike,2 ), "\n") ) #*** information criteria cat( paste0( "AIC=", round( x$AIC,0 ), "\n") ) cat( paste0( "BIC=", round( x$BIC,0 ), "\n") ) invisible(x) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/print.gdm.R
## File Name: print.mcdina.R ## File Version: 0.10 ################################################################################ # print method class mcdina ################################################################################ print.mcdina <- function(x, ... ){ cat("Estimation of multiple choice DINA model\n\n") d1 <- utils::packageDescription("CDM") cat( paste( d1$Package, " ", d1$Version, " (", d1$Date, ")", sep=""), "\n" ) cat("\nCall:\n", paste(deparse(x$call), sep="\n", collapse="\n"), "\n\n", sep="") #*** parameters cat(paste0("Number of cases=", x$I, "\n") ) cat(paste0("Number of groups=", x$G, "\n") ) cat(paste0("Number of items=", ncol(x$dat), "\n") ) cat(paste0("Number of skill dimensions=", ncol(x$q.matrix) - 2, "\n") ) cat(paste0("Number of skill classes=", nrow(x$attribute.patt), "\n") ) cat(paste0("Number of parameters=", x$Npars, "\n") ) cat(paste0(" # item parameters=", x$ic$itempars, "\n") ) cat(paste0(" # skill distribution parameters=", x$ic$traitpars, "\n") ) #*** likelihood cat( paste0( "\nLog-Likelihood=", round( x$loglike,2 ), "\n") ) #*** information criteria cat( paste0( "AIC=", round( x$AIC,0 ), "\n") ) cat( paste0( "BIC=", round( x$BIC,0 ), "\n") ) invisible(x) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/print.mcdina.R
## File Name: print.slca.R ## File Version: 0.10 ################################################################################ # print method class gdm ################################################################################ print.slca <- function(x, ... ) { cat("Estimation of structured latent class analysis\n\n") d1 <- utils::packageDescription("CDM") cat( paste( d1$Package, " ", d1$Version, " (", d1$Date, ")", sep=""), "\n" ) cat("\nCall:\n", paste(deparse(x$call), sep="\n", collapse="\n"), "\n\n", sep="") #*** parameters cat(paste0("Number of cases=", x$N, "\n") ) cat(paste0("Number of groups=", x$G, "\n") ) cat(paste0("Number of items=", ncol(x$data), "\n") ) cat(paste0("Number of skill classes=", nrow(x$pi.k), "\n") ) cat(paste0("Number of parameters=", x$Npars, "\n") ) cat(paste0(" # item parameters=", x$ic$itempars, "\n") ) cat(paste0(" # skill distribution parameters=", x$ic$traitpars, "\n") ) #*** likelihood cat( paste0( "\nLog-Likelihood=", round( x$loglike,2 ), "\n") ) #*** information criteria cat( paste0( "AIC=", round( x$AIC,0 ), "\n") ) cat( paste0( "BIC=", round( x$BIC,0 ), "\n") ) invisible(x) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/print.slca.R
## File Name: print.summary.din.R ## File Version: 1.29 ################################################################################ # print method for objects of class "summary.din" # ################################################################################ print.summary.din <- function(x, ...){ # Call: generic # Input: object of class summary.din # Print: prints the named list, of an object of class summary.din ################################################################################ # console output # ################################################################################ # if(is.null(x$log.file)){ d <- utils::packageDescription("CDM") packageStartupMessage(paste(d$Package," ", d$Version," (Built ",d$Date,")",sep="")) # cat("Call:\n", x$CALL, "\n") cat("Call:\n", x$call, "\n\n") cat( "Date of Analysis:", paste( x$end.analysis ), "\n" ) cat("Computation Time:", print( x$end.analysis - x$start.analysis),"\n\n") # "\nItem discrimination index:\n") # print(as.table(x$IDI)) # cat("\nSummary of skill pattern distribution:\n") # print(x$SKILL.CLASS.PROB) cat("\nDeviance","=", x$deviance, " | Log-Likelihood=", round( x$din.object$loglike,3), "\n") #*** iterations cat( "\nNumber of iterations:", x$din.object$iter, "\n") if ( ! x$din.object$converged ){ cat("Maximum number of iterations was reached.\n") } #*** cat( "\nNumber of item parameters:", x$Npars[,1], "\n") cat( "Number of skill class parameters:", x$Npars[,2], "\n") cat("\nInformation criteria:", "\n AIC","=", x$AIC, "\n BIC","=", x$BIC, "\n") cat("\nMean of RMSEA item fit:", round( x$din.object$mean.rmsea,3 ), "\n") cat("\nItem parameters\n") obji <- x$item rownames(obji) <- NULL print( obji, digits=3 ) cat("\nMarginal skill probabilities:\n") print(x$din.object$skill.patt, digits=4) # tetrachoric skill correlations if( ncol(x$din.object$q.matrix ) > 1 ){ obji <- skill.cor(x$din.object)$cor.skills cat("\nTetrachoric correlations among skill dimensions\n") print( obji, digits=4 ) } cat("\nSkill Pattern Probabilities \n\n") xt <- round( x$din.object$attribute.patt[,1], digits=5 ) names(xt) <- rownames( x$din.object$attribute.patt ) print(xt) # }else{ ################################################################################ # logfile output # ################################################################################ # tr <- try({sink(file=x$log.file)}) if (FALSE){ tr <- try({sink(file=paste0( x$log.file, "__SUMMARY.Rout") )}) if(is.null(tr)){ gowidth <- getOption("width") options(width=10000) cat("#.......................................................\n") d <- utils::packageDescription("CDM") # c1 <- citation("CDM") cat(paste( "This is CDM package version ", d$Version, " (", d$Date, ")\n",sep="")) # print(c1) cat("\n") cat("#-------------------------\n") cat("# SUMMARY OF ANALYSIS\n") cat( "Start:", paste(x$start.analysis )) cat( "\nEnd :", paste(x$end.analysis )) cat("\n#-------------------------\n\n") cat("Model rule:",x$display, "\n") cat("Number of observations:",nrow(x$data), "\n") cat("Number of items:",nrow(x$q.matrix), "\n") cat("Labels of items:", paste(rownames(x$q.matrix), collapse=", "), "\n") cat("Number of skills:",ncol(x$q.matrix), "\n") cat("Labels of skills:", paste(attributes(x$q.matrix)$skill.labels, collapse=", "), "\n") cat("Q-Matrix:\n\n") print(data.frame(x$q.matrix)) cat("\n#-------------------------\n") cat("# SUMMARY OF MODEL FIT\n") cat("#-------------------------\n\n") cat("Loglikelihood:", x$loglike, "\n") cat("AIC:", x$AIC, "\n") cat("BIC:", x$BIC, "\n\n") cat("Item discrimination index:\n\n") print(as.table(x$IDI)) cat("\nSummary of skill pattern distribution:\n\n") print(x$SKILL.CLASS.PROB) cat("\n#-------------------------\n") cat("SUMMARY OF MODEL RESULTS\n") cat("#-------------------------\n\n") cat("Item parameter estimates:\n\n") print(x$coef) cat("\nSkill probability:\n\n") print(x$skill.patt) cat("\nSkill pattern occurrence probability:\n\n") print(x$attribute.patt) cat("\nSkill class assignment and skill assignment probabilities for respective response pattern:\n\n") print(cbind("freq"=as.vector(table(x$pattern[,1])), unique(x$pattern))) sink() options(width=gowidth) cat("\nExtensive summary written to log file:\n", x$log.file,"\n") }else cat("\nError while trying to write summary to log file:\n", tr[1]) } }
/scratch/gouwar.j/cran-all/cranData/CDM/R/print.summary.din.R
## File Name: reduced_skillspace_beta_2_probs.R ## File Version: 0.01 reduced_skillspace_beta_2_probs <- function( Z, beta ) { res <- cdm_sumnorm( exp( Z %*% beta )[,1] ) return(res) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/reduced_skillspace_beta_2_probs.R
## File Name: reglca.R ## File Version: 0.8296 reglca <- function( dat, nclasses, weights=NULL, group=NULL, regular_type="scad", regular_lam=0, sd_noise_init=1, item_probs_init=NULL, class_probs_init=NULL, random_starts=1, random_iter=20, conv=1E-5, h=1E-4, mstep_iter=10, maxit=1000, verbose=TRUE, prob_min=.0001) { CALL <- match.call() s1 <- Sys.time() est_type <- NULL #--- data processing TP <- nclasses res <- reglca_proc_data( dat=dat, weights=weights, group=group ) dat <- res$dat dat_resp <- res$dat_resp resp.ind.list <- res$resp.ind.list dat0 <- res$dat0 I <- res$I weights <- res$weights N <- res$N W <- res$W K <- res$K dat.ind2 <- res$dat.ind2 G <- res$G group <- res$group groups_unique <- res$groups_unique ind_groups <- res$ind_groups N_groups <- res$N_groups #--- initial probabilities res <- reglca_init_parameters( nclasses=nclasses, dat0=dat0, sd_noise_init=sd_noise_init, item_probs_init=item_probs_init, class_probs_init=class_probs_init, random_starts=random_starts, G=G, est_type=est_type) class_probs_random_starts <- class_probs <- res$class_probs item_probs_random_starts <- item_probs <- res$item_probs est_type <- res$est_type random_starts <- res$random_starts use_random_starts <- res$use_random_starts xsi <- res$xsi alpha <- alpha_init <- 1 use_dpm <- FALSE #--- object random starts control_random_starts <- list( random_starts=random_starts, opt_fct=rep(NA, random_starts), item_probs=list(), class_probs=list(), random_iter=random_iter, random_start_temp=1, use_random_starts=use_random_starts ) if ( use_random_starts ){ item_probs <- item_probs_random_starts[[1]] class_probs <- class_probs_random_starts[[1]] } #--- settings cd_steps <- mstep_iter max_increment <- max_increment0 <- .2 devchange <- 1E10 opt_fct <- like.new <- loglike <- -1E300 iter <- 1 iterate <- TRUE #*************** EM algorithm ************************ while (iterate){ # z0 <- Sys.time() item_probs0 <- item_probs class_probs0 <- class_probs loglikeold <- like.new #--- arrange probabilities pjM <- array( 0, dim=c(I,2,nclasses) ) pjM[,2,] <- item_probs pjM[,1,] <- 1 - item_probs #--- calculate individual likelihood p.xi.aj <- reglca_calc_individual_likelihood( N=N, nclasses=nclasses, pjM=pjM, dat=dat, I=I, resp.ind.list=resp.ind.list ) #--- calculate posterior res <- reglca_calc_individual_posterior( class_probs=class_probs, p.xi.aj=p.xi.aj, N=N, nclasses=nclasses, weights=weights, W=W, G=G, ind_groups=ind_groups, N_groups=N_groups ) p.aj.xi <- res$p.aj.xi class_probs <- res$class_probs #--- smoothing with Dirichlet process mixture if (use_dpm){ res <- reglca_dpm_smoothing( p.aj.xi=p.aj.xi, weights=weights, nclasses=nclasses, alpha=alpha ) alpha <- res$alpha class_probs <- res$class_probs } #--- calculate expected counts res <- reglca_calc_counts( weights=weights, dat=dat, dat.resp=dat_resp, p.aj.xi=p.aj.xi, K=K, n.ik=n.ik, TP=TP, I=I, dat.ind2=dat.ind2, ind_groups=ind_groups, G=G ) n.ik <- res$n.ik N.ik <- res$N.ik #--- item parameter estimation res <- reglca_mstep_item_parameters( I=I, n.ik=n.ik, N.ik=N.ik, h=h, mstep_iter=mstep_iter, conv=conv, regular_lam=regular_lam, regular_type=regular_type, cd_steps=cd_steps, item_probs=item_probs, max_increment=max_increment, iter=iter, G=G, prob_min=prob_min, est_type=est_type, xsi=xsi) item_probs <- res$item_probs penalty <- res$penalty n_par <- res$n_par n_reg <- res$n_reg max_increment <- res$max_increment n_reg_item <- res$n_reg_item opt_fct_item_sum <- res$opt_fct_item_sum max.par.change <- max( max( abs(item_probs - item_probs0) ), max( abs( class_probs - class_probs0) ) ) #--- calculate deviance res <- reglca_calc_deviance( p.xi.aj=p.xi.aj, class_probs=class_probs, weights=weights, loglike=loglike, penalty=penalty, opt_fct=opt_fct, ind_groups=ind_groups, G=G, N_groups=N_groups) like.new <- res$like.new likediff <- res$likediff opt_fct <- res$opt_fct opt_fct_change <- res$opt_fct_change loglike <- like.new #--- display progress res <- reglca_progress_em_algorithm( like.new=like.new, loglikeold=loglikeold, max.par.change=max.par.change, iter=iter, progress=verbose, penalty=penalty, opt_fct=opt_fct, opt_fct_change=opt_fct_change, n_reg=n_reg, control_random_starts=control_random_starts, opt_fct_item_sum=opt_fct_item_sum ) utils::flush.console() iter <- iter + 1 # new iteration number devchange <- abs( 2*(like.new-loglikeold) ) if ( max.par.change < conv ){ iterate <- FALSE } if ( devchange < conv ){ iterate <- FALSE } if ( iter > maxit ){ iterate <- FALSE } #--- handle random starts res <- reglca_monitor_random_starts( control_random_starts=control_random_starts, iter=iter, opt_fct=opt_fct, item_probs=item_probs, class_probs=class_probs, max_increment0=max_increment0, max_increment=max_increment, item_probs_random_starts=item_probs_random_starts, class_probs_random_starts=class_probs_random_starts ) control_random_starts <- res$control_random_starts max_increment <- res$max_increment iter <- res$iter item_probs <- res$item_probs class_probs <- res$class_probs if (control_random_starts$use_random_starts){ iterate <- TRUE } } #******* end EM algorithm #--- information algorithm iter <- iter - 1 converged <- ( iter < maxit ) #--- process results rownames(item_probs) <- colnames(dat) colnames(item_probs) <- paste0("Class",1:nclasses) if (G==1){ names(class_probs) <- colnames(item_probs) } else { rownames(class_probs) <- colnames(item_probs) colnames(class_probs) <- paste0("Group_", groups_unique) } item <- data.frame("item"=colnames(dat), "n_reg"=n_reg_item, item_probs ) nd <- dim(n.ik) if (G==1){ n.ik <- array(n.ik, dim=c(nd,1) ) } #--- information criteria res <- reglca_calc_ic( loglike=loglike, nclasses=nclasses, I=I, N=N, n_reg=n_reg, G=G ) Npars <- res$Npars AIC <- res$AIC BIC <- res$BIC CAIC <- res$CAIC Nskillpar <- res$Nskillpar Nipar <- res$Nipar deviance <- res$deviance ic <- res$ic #--- output time <- list(s1=s1, s2=Sys.time() ) res <- list( item_probs=item_probs, class_probs=class_probs, p.aj.xi=p.aj.xi, p.xi.aj=p.xi.aj, loglike=-deviance/2, deviance=deviance, AIC=AIC, BIC=BIC, CAIC=CAIC, Npars=Npars, Nskillpar=Nskillpar, G=G, group=group, groups_unique=groups_unique, N_groups=N_groups, n.ik=n.ik, Nipar=Nipar, n_reg=n_reg, n_reg_item=n_reg_item, item=item, pjk=pjM, regular_type=regular_type, regular_lam=regular_lam, alpha=alpha, penalty=- penalty, opt_fct=opt_fct, dat0=dat0, dat=dat, dat.resp=dat_resp, weights=weights, N=N, W=W, I=I, nclasses=nclasses, iter=iter, maxit=maxit, converged=converged, time=time, call=CALL, ic=ic ) class(res) <- "reglca" return(res) } # cat("calc_like ") ; z1 <- Sys.time(); print(z1-z0) ; z0 <- z1
/scratch/gouwar.j/cran-all/cranData/CDM/R/reglca.R
## File Name: reglca_bound_classprobs.R ## File Version: 0.04 #-- bound skill class probabilities reglca_bound_classprobs <- function(class_probs, min_class_probs=1e-4) { if (any(is.na(class_probs))){ TP <- length(class_probs) class_probs <- rep(1/TP, TP) } ind <- class_probs < min_class_probs if (sum(ind)>0){ class_probs[ind] <- min_class_probs class_probs <- cdm_sumnorm(class_probs) } return(class_probs) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/reglca_bound_classprobs.R
## File Name: reglca_calc_counts.R ## File Version: 0.09 #----- calculation of expected counts reglca_calc_counts <- function(weights, dat, dat.resp, p.aj.xi, K, n.ik, TP, I, dat.ind2, ind_groups, G ) { #--- single group if (G==1){ N.ik <- matrix( 0, nrow=TP, ncol=I) n.ik <- array( 0, dim=c(TP,I,K+1) ) for (kk in 1:(K+1)){ dkk2 <- dat.ind2[[kk]] n.ik[,,kk] <- crossprod( p.aj.xi, dkk2 ) N.ik <- N.ik + n.ik[,,kk] } } #--- multiple groups if (G>1){ N.ik <- array( 0, dim=c(TP, I, G) ) n.ik <- array( 0, dim=c(TP,I,K+1, G) ) for (gg in 1:G){ ind_gg <- ind_groups[[gg]] for (kk in 1:(K+1)){ dkk2 <- dat.ind2[[kk]] n.ik[,,kk,gg] <- crossprod( p.aj.xi[ind_gg,], dkk2[ ind_gg, ] ) N.ik[,,gg] <- N.ik[,,gg] + n.ik[,,kk,gg] } } } #----- output res <- list(n.ik=n.ik, N.ik=N.ik) return(res) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/reglca_calc_counts.R
## File Name: reglca_calc_deviance.R ## File Version: 0.11 reglca_calc_deviance <- function( p.xi.aj, class_probs, weights, loglike, penalty=0, opt_fct=0, ind_groups, G, N_groups ) { eps <- 1E-30 p.xi.aj[ p.xi.aj > 1 ] <- 1 - eps p.xi.aj[ p.xi.aj < 0 ] <- eps N <- nrow(p.xi.aj) if (G==1){ class_probs_mat <- cdm_matrix2( class_probs, nrow=N ) } else { class_probs_mat <- matrix(NA, nrow=N, ncol=nrow(class_probs) ) for (gg in 1:G){ ind_gg <- ind_groups[[gg]] class_probs_mat[ ind_gg, ] <- cdm_matrix2( class_probs[,gg], nrow=N_groups[gg] ) } } l1 <- rowSums( p.xi.aj * class_probs_mat ) + eps l1[ l1 < 0 ] <- eps like.new <- sum( log( l1 ) * weights) likediff <- abs( loglike - like.new ) #--- regularization opt_fct_old <- opt_fct opt_fct <- -2*like.new + 2*penalty opt_fct_change <- - opt_fct + opt_fct_old #--- OUTPUT res <- list( like.new=like.new, likediff=likediff, opt_fct=opt_fct, opt_fct_change=opt_fct_change) return(res) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/reglca_calc_deviance.R
## File Name: reglca_calc_ic.R ## File Version: 0.07 reglca_calc_ic <- function( loglike, nclasses, I, N, n_reg, G) { Nskillpar <- G*(nclasses - 1) Nipar <- I*nclasses - n_reg Npars <- Nipar + Nskillpar aic <- -2*loglike + 2 * Npars bic <- -2*loglike + Npars*log(N) caic <- -2*loglike + ( log(N) + 1 ) * Npars deviance <- -2*loglike #* create object ic ic <- list(deviance=deviance, AIC=aic, BIC=bic, CAIC=caic) #---- OUTPUT res <- list(Npars=Npars, AIC=aic, BIC=bic, CAIC=caic, Nskillpar=Nskillpar, Nipar=Nipar, deviance=deviance, n_reg=n_reg, ic=ic ) return(res) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/reglca_calc_ic.R
## File Name: reglca_calc_individual_likelihood.R ## File Version: 0.03 reglca_calc_individual_likelihood <- function(N, nclasses, pjM, dat, I, resp.ind.list) { h1 <- matrix( 1, nrow=N, ncol=nclasses ) p.xi.aj <- cdm_calc_posterior( rprobs=pjM, gwt=h1, resp=dat, nitems=I, resp.ind.list=resp.ind.list, normalization=FALSE, thetasamp.density=NULL, snodes=0 )$hwt #--- OUTPUT return(p.xi.aj) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/reglca_calc_individual_likelihood.R
## File Name: reglca_calc_individual_posterior.R ## File Version: 0.116 reglca_calc_individual_posterior <- function(class_probs, p.xi.aj, N, nclasses, weights, W, G, ind_groups, N_groups, min_class_probs=1e-3) { #--- single group if (G==1){ p.aj.xi <- cdm_matrix2( class_probs, nrow=N ) * p.xi.aj p.aj.xi <- p.aj.xi / rowSums( p.aj.xi ) class_probs <- colSums( p.aj.xi * weights / W ) class_probs <- reglca_bound_classprobs(class_probs=class_probs, min_class_probs=min_class_probs) } #--- multiple groups if (G>1){ p.aj.xi <- matrix(1, nrow=N, ncol=nclasses) for (gg in 1:G){ p.aj.xi[ ind_groups[[gg]], ] <- cdm_matrix2( class_probs[,gg], nrow=N_groups[gg] ) } p.aj.xi <- p.aj.xi * p.xi.aj p.aj.xi <- p.aj.xi / rowSums( p.aj.xi ) for (gg in 1:G){ ind_gg <- ind_groups[[gg]] weights_gg <- weights[ind_gg] class_probs[,gg] <- colSums( p.aj.xi[ind_gg,] * weights_gg / W[gg] ) class_probs[,gg] <- reglca_bound_classprobs(class_probs=class_probs[,gg], min_class_probs=min_class_probs) } } #---- OUTPUT res <- list( p.aj.xi=p.aj.xi, class_probs=class_probs) return(res) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/reglca_calc_individual_posterior.R
## File Name: reglca_calc_probs.R ## File Version: 0.17 reglca_calc_probs <- function(parm, eps=1E-10) { probs <- cumsum(parm) M1 <- max(probs) M0 <- min(probs) if ( ( M1>=1-eps ) | ( M0<=eps ) ){ M1a <- max(M1, 1) M0a <- min(M0, 0) probs <- ( probs - M0a + eps / 2 ) / ( M1a - M0a + eps ) } return(probs) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/reglca_calc_probs.R
## File Name: reglca_dpm_smoothing.R ## File Version: 0.06 reglca_dpm_smoothing <- function( p.aj.xi, weights, nclasses, alpha, dpm_maxit=10, dpm_conv=1E-4 ) { freq_classes <- colSums( p.aj.xi * weights ) # update vh iterate <- TRUE eps <- 1E-20 iter <- 0 vh <- rep(1,nclasses) while( iterate ){ alpha0 <- alpha vh0 <- vh for (tt in 1:nclasses){ vh[tt] <- freq_classes[tt] / ( sum( freq_classes[ seq(tt,nclasses) ] ) + alpha - 1 ) } ind <- which( vh > 1 - eps ) if ( length(ind)>0){ vh[ seq( ind, nclasses) ] <- 1 - eps } # update alpha alpha <- ( 1 - nclasses ) / sum( log( 1 - vh[ seq(1, nclasses - 1 ) ] ) ) iter <- iter + 1 parm_change <- max( c( abs( alpha - alpha0 ), abs( vh - vh0 ) )) if ( iter==dpm_maxit ){ iterate <- FALSE } if ( parm_change < dpm_conv ){ iterate <- FALSE } } class_probs <- dpm_calc_probs( vh=vh ) #--- output res <- list( alpha=alpha, class_probs=class_probs) return(res) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/reglca_dpm_smoothing.R
## File Name: reglca_fit_probabilities.R ## File Version: 0.4416 reglca_fit_probabilities <- function( freq, pi_class, lambda, parm_init=NULL, regular_type="scad", h=1E-4, maxit=100, conv=1E-5, cd_steps=5, max_increment=1, verbose=TRUE, prob_min=0, ii=NULL, iter=NULL, xsi=NULL, est_type="CD" ) { #--- order frequencies NP <- length(freq) freq_index <- data.frame(index=1:NP, freq=freq, pi_class=pi_class ) freq_index <- freq_index[ order(freq_index$freq), ] freq <- freq_index$freq pi_class <- freq_index$pi_class C <- pi_class*freq W <- pi_class*(1-freq) #--- init parameters if ( is.null(parm_init) ){ parm_init <- c( freq[1], diff(freq) ) } parm <- parm_init iter <- 0 iterate <- TRUE #---- begin iterations while(iterate){ res <- reglca_fit_probabilities_fit_function( parm=parm, lambda=lambda, C=C, W=W, regular_type=regular_type ) ll <- res$ll pen <- res$pen fit_fct <- res$fit_fct parm_old <- parm for (pp in 1:NP){ parm <- reglca_update_parameter( parm=parm, pp=pp, C=C, W=W, h=h, lambda=lambda, regular_type=regular_type, cd_steps=cd_steps, conv=conv, max_increment=max_increment, vt=NULL, prob_min=prob_min, ii=ii, iter=iter) } #-- normalize probabilities parm <- reglca_normalize_probabilities(parm=parm) parchange <- max( abs( parm - parm_old )) if (iter > maxit){ iterate <- FALSE } if (parchange < conv){ iterate <- FALSE } iter <- iter + 1 if (verbose){ cat( paste0("Iteration ", iter, " | Max. parm. change=", round( parchange, 6), " | Fit function=", round( fit_fct, 6), "\n") ) utils::flush.console() } } freq_index$fitted <- cumsum(parm) probs <- rep(0,NP) probs[ freq_index$index ] <- freq_index$fitted n_par <- sum( abs(parm) > 1E-10 ) #--- output res <- list( parm=parm, n_par=n_par, probs=probs, ll=ll, pen=pen, fit_fct=fit_fct) return(res) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/reglca_fit_probabilities.R
## File Name: reglca_fit_probabilities_fit_function.R ## File Version: 0.07 reglca_fit_probabilities_fit_function <- function( parm, lambda, C, W, regular_type="scad") { probs0 <- reglca_calc_probs(parm=parm) ll0 <- reglca_freq_ll( x=probs0, C=C, W=W ) pen0 <- - sum( cdm_penalty_values(x=parm[-1], regular_type=regular_type, regular_lam=lambda) ) opt0 <- ll0 + pen0 #--- output res <- list( ll=ll0, pen=pen0, fit_fct=opt0) return(res) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/reglca_fit_probabilities_fit_function.R
## File Name: reglca_freq_ll.R ## File Version: 0.161 reglca_freq_ll <- function(x, C, W, eps=1e-20) { res <- sum( C * cdm_log(x, eps) + W * cdm_log( 1-x, eps) ) return(res) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/reglca_freq_ll.R
## File Name: reglca_init_parameters.R ## File Version: 0.168 reglca_init_parameters <- function( nclasses, dat0, sd_noise_init, item_probs_init, class_probs_init, random_starts, G, est_type=NULL) { if (is.null(est_type)){ est_type <- "CD" } use_random_starts <- FALSE means <- colMeans(dat0, na.rm=TRUE ) I <- ncol(dat0) #--- initial class probabilities if ( is.null(class_probs_init) ){ class_probs <- reglca_init_parameters_class_probs( nclasses=nclasses, sd_noise_init=sd_noise_init, G=G) } else { class_probs <- class_probs_init random_starts <- 1 } #--- item probabilities qmeans <- stats::qnorm(means) if ( is.null(item_probs_init) ){ item_probs <- reglca_init_parameters_item_probs( qmeans=qmeans, I=I, nclasses=nclasses, sd_noise_init=sd_noise_init, parm_range=1 ) } else { item_probs <- item_probs_init random_starts <- 1 } #- transformation into logit parameters xsi <- NULL if (est_type=="DIFF"){ xsi <- stats::qlogis(p=item_probs) } #--- item probabilities in case of random starts if (random_starts > 1){ item_probs <- list() sd_noise_init <- max( sd_noise_init, .01 ) for (rr in 1:random_starts){ item_probs[[rr]] <- reglca_init_parameters_item_probs( qmeans=qmeans, I=I, nclasses=nclasses, sd_noise_init=sd_noise_init, parm_range=1 ) } } #--- class probabilities in case of random starts if (random_starts > 1){ class_probs <- list() for (rr in 1:random_starts){ class_probs[[rr]] <- reglca_init_parameters_class_probs( nclasses=nclasses, sd_noise_init=sd_noise_init, G=G) } use_random_starts <- TRUE } #--- output res <- list( class_probs=class_probs, item_probs=item_probs, random_starts=random_starts, use_random_starts=use_random_starts, est_type=est_type, xsi=xsi ) return(res) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/reglca_init_parameters.R
## File Name: reglca_init_parameters_class_probs.R ## File Version: 0.07 reglca_init_parameters_class_probs <- function( nclasses, sd_noise_init, G) { parm <- stats::qnorm( rep( 1/nclasses, nclasses ) ) parm <- parm + stats::rnorm( nclasses, sd=sd_noise_init ) class_probs <- cdm_sumnorm( stats::pnorm( parm ) ) if (G > 1){ class_probs <- matrix( class_probs, nrow=nclasses, ncol=G, byrow=FALSE) } return(class_probs) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/reglca_init_parameters_class_probs.R
## File Name: reglca_init_parameters_item_probs.R ## File Version: 0.02 reglca_init_parameters_item_probs <- function( qmeans, I, nclasses, sd_noise_init, parm_range=1 ) { I <- length(qmeans) item_probs <- matrix( NA, nrow=I, ncol=nclasses) for (ii in 1:I){ item_probs[ii,] <- qmeans[ii] + parm_range*seq(-1,1, length=nclasses) + stats::rnorm( nclasses, mean=0, sd=sd_noise_init ) } item_probs <- stats::pnorm( item_probs ) return(item_probs) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/reglca_init_parameters_item_probs.R
## File Name: reglca_monitor_random_starts.R ## File Version: 0.06 reglca_monitor_random_starts <- function( control_random_starts, iter, opt_fct, item_probs, class_probs, max_increment0, max_increment, item_probs_random_starts, class_probs_random_starts ) { if ( control_random_starts$use_random_starts ){ random_starts <- control_random_starts$random_starts if (iter > control_random_starts$random_iter ){ rr0 <- control_random_starts$random_start_temp control_random_starts$opt_fct[rr0] <- opt_fct control_random_starts$item_probs[[rr0]] <- item_probs control_random_starts$class_probs[[rr0]] <- class_probs control_random_starts$max_increment[[rr0]] <- max_increment rr <- rr0 + 1 if (rr > random_starts){ control_random_starts$use_random_starts <- FALSE ind <- which.min(control_random_starts$opt_fct)[1] item_probs <- control_random_starts$item_probs[[ind]] class_probs <- control_random_starts$class_probs[[ind]] max_increment <- control_random_starts$max_increment[[ind]] iter <- control_random_starts$random_iter + 1 } else { control_random_starts$random_start_temp <- rr item_probs <- item_probs_random_starts[[rr]] class_probs <- class_probs_random_starts[[rr]] max_increment <- max_increment0 iter <- 1 } } } #---- output res <- list(control_random_starts=control_random_starts, max_increment=max_increment, iter=iter, item_probs=item_probs, class_probs=class_probs) return(res) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/reglca_monitor_random_starts.R
## File Name: reglca_mstep_item_parameters.R ## File Version: 0.501 reglca_mstep_item_parameters <- function(I, n.ik, N.ik, h, mstep_iter, conv, regular_lam, regular_type, cd_steps, item_probs, max_increment, iter, G, fac=1.02, prob_min=0, est_type="CD", xsi=NULL) { penalty <- rep(0,I) n_par <- rep(0,I) n_reg <- 0 n_reg_item <- rep(0,I) nclasses <- ncol(item_probs) expected_loglike <- rep(0,I) opt_fct_item_sum <- rep(0,I) bounds <- c( prob_min, 1-prob_min) #-- sum counts in case of multiple groups if (G>1){ ND <- dim(n.ik) n.ik0 <- n.ik N.ik0 <- N.ik n.ik <- array( 0, dim=ND[1:3] ) N.ik <- matrix( 0, nrow=ND[1], ncol=ND[2] ) for (gg in 1:G){ n.ik <- n.ik + n.ik0[,,,gg] N.ik <- N.ik + N.ik0[,,gg] } } item_probs0 <- item_probs for (ii in 1:I){ freq <- n.ik[,ii,2] / N.ik[,ii] pi_class <- cdm_sumnorm(N.ik[,ii]) res <- reglca_fit_probabilities( freq=freq, pi_class=pi_class, h=h, maxit=mstep_iter, conv=conv, verbose=FALSE, parm_init=NULL, lambda=regular_lam, regular_type=regular_type, cd_steps=cd_steps, max_increment=max_increment, prob_min=prob_min, ii=ii, iter=iter, xsi=xsi, est_type=est_type) expected_loglike[ii] <- res$ll opt_fct_item_sum[ii] <- res$ll + res$pen incr <- res$probs - item_probs[ii,] incr <- cdm_trim_increment( increment=incr, max.increment=max_increment, type=1 ) item_probs[ii,] <- item_probs[ii,] + incr item_probs[ii,] <- cdm_squeeze( x=item_probs[ii,], bounds=bounds) penalty[ii] <- sum(N.ik[,ii]) * res$pen n_par[ii] <- res$n_par n_reg_item[ii] <- nclasses - res$n_par n_reg <- n_reg + nclasses - res$n_par } penalty <- sum(penalty) n_par <- sum(n_par) opt_fct_item_sum <- sum(opt_fct_item_sum) max_increment <- min( max_increment, max( abs( item_probs - item_probs0 ) ) ) / fac #--- output res <- list( item_probs=item_probs, penalty=penalty, n_par=n_par, n_reg=n_reg, max_increment=max_increment, n_reg_item=n_reg_item, opt_fct_item_sum=opt_fct_item_sum) return(res) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/reglca_mstep_item_parameters.R
## File Name: reglca_normalize_probabilities.R ## File Version: 0.04 reglca_normalize_probabilities <- function(parm) { eps <- 1E-10 probs <- cumsum(parm) M <- max(probs) if (M > 1){ probs <- probs / ( M + eps ) parm <- c( probs[1], diff(probs) ) } return(parm) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/reglca_normalize_probabilities.R
## File Name: reglca_proc_data.R ## File Version: 0.12 reglca_proc_data <- function( dat, weights, group ) { dat0 <- dat N <- nrow(dat) I <- ncol(dat) dat_resp <- 1 - is.na(dat) dat[ ! dat_resp ] <- 0 dat <- as.matrix(dat) resp.ind.list <- list( 1:I ) for (ii in 1:I){ resp.ind.list[[ii]] <- which( dat_resp[,ii]==1) } if ( is.null(weights) ){ weights <- rep(1,N) } W <- sum(weights) #--- groups if (is.null(group)){ G <- 1 groups_unique <- NULL ind_groups <- NULL N_groups <- NULL } else { groups_unique <- sort( unique(group)) group <- match( group, groups_unique) G <- length(groups_unique) N_groups <- rep(NA, G) ind_groups <- list() W <- rep(0,G) for (gg in 1:G){ ind_gg <- which( group==groups_unique[gg] ) ind_groups[[gg]] <- ind_gg N_groups[gg] <- length(ind_gg) W[gg] <- sum( weights[ind_gg] ) } } #--- indicator datasets K <- max(dat) dat.ind2 <- list() for (kk in 1:(K+1)){ r1 <- ( dat==kk-1 ) * ( dat_resp ) * weights dat.ind2[[kk]] <- r1 } #---- output res <- list( dat=dat, dat_resp=dat_resp, resp.ind.list=resp.ind.list, dat0=dat0, I=I, weights=weights, N=N, W=W, K=K, dat.ind2=dat.ind2, G=G, group=group, groups_unique=groups_unique, ind_groups=ind_groups, N_groups=N_groups ) return(res) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/reglca_proc_data.R
## File Name: reglca_progress_em_algorithm.R ## File Version: 0.19 reglca_progress_em_algorithm <- function( like.new, loglikeold, max.par.change, iter, progress, penalty, opt_fct, opt_fct_change, n_reg, control_random_starts, opt_fct_item_sum ) { digits_par_change <- 6 digits_opt_fct <- 5 digits_opt_fct_change <- 7 #---- no random starts if (progress & ( ! control_random_starts$use_random_starts) ) { print_fct_change <- FALSE cat("---------------------------------------------------------------------------------\n") cat("Iteration", iter, " ", paste( Sys.time() ), "\n" ) cat( "Deviance","=", round( -2*like.new, digits_opt_fct ) ) devchange <- 2*(like.new-loglikeold) if ((iter >1) & print_fct_change ){ cat(" | Deviance change","=", round( 2*(like.new-loglikeold), digits_opt_fct_change) ) } cat("\n" ) cat( "Penalty","=", round( - penalty, digits_opt_fct ), " | number of regularized parameters","=", n_reg, "\n") cat( "Optimization function","=", round( opt_fct, digits_opt_fct ) ) if ((iter>1) & print_fct_change){ cat(" | Function change","=", round( opt_fct_change, digits_opt_fct_change) ) } cat("\n") cat( "Sum of item-wise optimization function values","=", round( opt_fct_item_sum, digits_opt_fct ) ) cat("\n") cat("Maximum parameter change:", round( max.par.change, digits_par_change), "\n") } #--- random starts if (progress & ( control_random_starts$use_random_starts) ){ cat("Random start", control_random_starts$random_start_temp, " Iteration", iter, " | Deviance","=", round( -2*like.new, digits_opt_fct ), " | Penalty","=", round( - penalty, digits_opt_fct ) ) cat("\n") } }
/scratch/gouwar.j/cran-all/cranData/CDM/R/reglca_progress_em_algorithm.R
## File Name: reglca_scad_threshold.R ## File Version: 0.04 reglca_scad_threshold <- function(beta, lambda, a=3.7) { sign_beta <- sign(beta) #-- 2*lambda < abs(beta) < a*lambda y <- ( ( a - 1 ) * beta - sign_beta * a * lambda ) / ( a - 2 ) #-- abs(beta) > a*lambda y <- ifelse( abs(beta) > a*lambda, beta, y ) #-- abs(beta) < 2*lambda z <- ( abs(beta) - lambda ) z <- ifelse( z < 0, 0, z ) y <- ifelse( abs(beta) < 2*lambda, z*sign_beta, y ) return(y) }
/scratch/gouwar.j/cran-all/cranData/CDM/R/reglca_scad_threshold.R