content
stringlengths
0
14.9M
filename
stringlengths
44
136
ExpandDH = function(DH, Fo = 1) { DH[is.na(DH)] = 0 DH[DH$AMT > 0 & DH$CMT == 1, "AMT"] = Fo*DH[DH$AMT > 0 & DH$CMT == 1, "AMT"] DH[, "BOLUS"] = 0 DH[DH$AMT > 0 & DH$RATE == 0, "BOLUS"] = DH[DH$AMT > 0 & DH$RATE == 0, "AMT"] DH[, "RATE2"] = 0 BlankRow = DH[1, ] BlankRow[1, ] = rep(0, ncol(DH)) RateDat = DH[DH$RATE > 0, ] nRate = nrow(RateDat) for (i in 1:nRate) { cRATE = RateDat[i, "RATE"] cCMT = RateDat[i, "CMT"] cTIME = RateDat[i, "TIME"] cDUR = RateDat[i, "AMT"] / RateDat[i, "RATE"] cEND = cTIME + cDUR if (!(cEND %in% DH[, "TIME"])) { BlankRow[1, "TIME"] = cEND DH = rbind(DH, BlankRow) DH = DH[order(DH$TIME), ] } DH[DH$TIME >= cTIME & DH$TIME < (cTIME + cDUR), "RATE2"] = DH[DH$TIME >= cTIME & DH$TIME < (cTIME + cDUR), "RATE2"] + cRATE DH[DH$TIME >= cTIME & DH$TIME < (cTIME + cDUR), "CMT"] = cCMT } return(DH) }
/scratch/gouwar.j/cran-all/cranData/wnl/R/ExpandDH.R
#ObjEst = function(vPara) #{ # b0 = exp(vPara - e$alpha) # vPara = vector(length=e$nPara0) # vPara[e$toEst] = b0/(b0 + 1)*(e$UB - e$LB) + e$LB # if (e$fix[1] != 0) vPara[e$fix] = e$IE0[e$fix] # return(e$Obj(vPara)) #} ObjEst = function(vPara) { b0 = exp(vPara - e$alpha) vPara = b0/(b0 + 1)*(e$UB - e$LB) + e$LB return(e$Obj(vPara)) } ObjDef = function(vPara0) # Default Obj for Covariance Step (original obj) { vPara = vector(length=e$nPara0) vPara[e$toEst] = vPara0 if (e$fix[1] != 0) vPara[e$fix] = e$IE0[e$fix] Fi = e$Fx(vPara[1:e$nTheta0]) Ri = e$Y - Fi e$Fi = Fi if (e$Error == "A") { Ci = rep(vPara[e$SGindex0], e$nRec) } else if (e$Error == "POIS") { Ci = vPara[e$SGindex0]*Fi # Caution on Fi is zero Ci[Fi == 0] = 1 # Weight cannot be calculated with zero values Ri[Fi == 0] = 0 } else if (e$Error == "P") { Ci = vPara[e$SGindex0]*Fi*Fi # Caution on Fi is zero Ci[Fi == 0] = 1 # Weight cannot be calculated with zero values Ri[Fi == 0] = 0 } else if (e$Error == "C") { Ci = rep(vPara[e$SGindex0[1]], e$nRec) + vPara[e$SGindex0[2]]*Fi*Fi # } else if (e$Error == "CFA") { # Additive fixed propertional # Ci = (e$FASD + sqrt(vPara[e$SGindex[1]])*Fi)^2 } else if (e$Error == "S") { Si = e$Sx(vPara[1:e$nTheta0]) Ci = vPara[e$SGindex0]*Si Ci[Si == 0] = 1 # Weight cannot be calculated with zero values Ri[Si == 0] = 0 } return(sum(log(Ci) + Ri*Ri/Ci)) } ObjLS = function(vPara) # Default Obj for Covariance Step (original obj) { Fi = e$Fx(vPara) Ri = e$Y - Fi if (e$Error == "POIS") { Ri[Fi != 0] = Ri[Fi != 0] / sqrt(Fi[Fi != 0]) # Fi should not contain zero. Ri[Fi == 0] = 0 } else if (e$Error == "P") { Ri[Fi != 0] = Ri[Fi != 0] / Fi[Fi != 0] # Fi should not contain zero. Ri[Fi == 0] = 0 } return(sum(Ri*Ri)) } #ObjNo = function(vPara) #{ # Ri = e$Y - e$Fx(vPara[1:e$nTheta]) # Ri[e$Y == -1] = 0 # return(e$SumLogCi + sum(Ri*Ri/e$Ci)) #}
/scratch/gouwar.j/cran-all/cranData/wnl/R/Objs.R
Secondary = function(Formula, PE, COV) { nArg = length(PE) gr0 = deriv(Formula, names(PE), function.arg=names(PE), func=TRUE) gr1 = do.call("gr0", as.list(PE)) PE2 = gr1[1] gr2 = attr(gr1,"gradient") SE2 = sqrt(gr2 %*% COV %*% t(gr2)) CV2 = SE2/PE2*100 Result = c(PE2, SE2, CV2) names(Result) = c("PE", "SE", "RSE") return(Result) }
/scratch/gouwar.j/cran-all/cranData/wnl/R/Secondary.R
SolComp2 = function(K10, K12, K21) { # Get Lambdas Sum = K10 + K12 + K21 Disc = sqrt(Sum*Sum - 4*K10*K21) L1 = (Sum + Disc)/2 L2 = (Sum - Disc)/2 L = c(L1, L2) # Get Coefficients Div = c(L2 - L1, L1 - L2) # Divisor, denominator if (prod(Div) == 0) stop("Roots should be distinct real values.") Co = array(dim=c(2, 2, 2)) # Dividend, numerator, NCOMP=2 Co[1, , 1] = K21 - L Co[1, , 2] = K21 Co[2, , 1] = K12 Co[2, , 2] = K10 + K12 - L for (i in 1:2) Co[, i, ] = Co[, i, ]/Div[i] return(list(L=L, Co=Co)) }
/scratch/gouwar.j/cran-all/cranData/wnl/R/SolComp2.R
SolComp3 = function(K10, K12, K21, K13, K31) { # Get Lambdas A1 = K10 + K12 + K13 + K21 + K31 A2 = K10*K21 + K10*K31 + K12*K31 + K13*K21 + K21*K31 A3 = K21*K31*K10 Q = (A1*A1 - 3*A2)/9 RQ = 2*sqrt(Q) R = (2*A1*A1*A1 - 9*A1*A2 + 27*A3)/54 M = Q*Q*Q - R*R if (M < 0) stop("Error: Not real roots.") Th = acos(8*R/(RQ*RQ*RQ)) L1 = RQ*cos(Th/3) + A1/3 L2 = RQ*cos((Th + 2*pi)/3) + A1/3 L3 = RQ*cos((Th + 4*pi)/3) + A1/3 L = c(L1, L2, L3) # Get Coefficients D1 = (L[2] - L[1])*(L[3] - L[1]) D2 = (L[1] - L[2])*(L[3] - L[2]) D3 = (L[1] - L[3])*(L[2] - L[3]) Div = c(D1, D2, D3) # Divisor, denominator if (prod(Div) == 0) stop("Roots should be distinct real values.") Co = array(dim=c(3, 3, 3)) # Dividend, numerator, NCOMP=3 Co[1,,1] = (K21 - L)*(K31 - L) Co[1,,2] = K21*(K31 - L) Co[1,,3] = K31*(K21 - L) Co[2,,1] = K12*(K31 - L) Co[2,,2] = (K10 + K12 + K13 - L)*(K31 - L) - K31*K13 Co[2,,3] = K12*K31 Co[3,,1] = K13*(K21 - L) Co[3,,2] = K21*K13 Co[3,,3] = (K10 + K12 + K13 - L)*(K21 - L) - K21*K12 for (i in 1:3) Co[,i,] = Co[,i,]/Div[i] return(list(L=L, Co=Co)) }
/scratch/gouwar.j/cran-all/cranData/wnl/R/SolComp3.R
cmpChi = function(r1, r2) { npar1 = dim(r1$Cov)[1] npar2 = dim(r2$Cov)[1] ofv1 = r1$'Objective Function Value' ofv2 = r2$'Objective Function Value' if ((npar2 == npar1) & abs(ofv2 - ofv1) > .Machine$double.eps) { p.val = 0 } else if (abs(ofv2 - ofv1) <= .Machine$double.eps & npar1 != npar2 ) { p.val = 1 } else if ((npar2 - npar1)*(ofv2 - ofv1) < 0) { p.val = 1 - pchisq(abs(ofv2 - ofv1), abs(npar2 - npar1)) } else { p.val = 0 } return(p.val) }
/scratch/gouwar.j/cran-all/cranData/wnl/R/cmpChi.R
dx = function(r) { nRec = length(r$Prediction) EstNames = colnames(r$Est) wt = rep(0, nRec) if ("AddErrVar" %in% EstNames) wt = wt + r$Est["PE","AddErrVar"] if ("PoisErrVar" %in% EstNames) wt = wt + r$Prediction*r$Est["PE","PoisErrVar"] if ("PropErrVar" %in% EstNames) wt = wt + r$Prediction^2*r$Est["PE","PropErrVar"] if ("ScaleErrVar" %in% EstNames) wt = r$Scale*r$Est["PE","ScaleErrVar"] if (attr(dev.cur(), "names") == "null device") { dev.new(width=12, height=6) DefPar = par(mfrow=c(1, 2)) } plot(r$Prediction, r$Prediction + r$Residual, xlab="Prediction", ylab="Observation", pch=16) abline(a=0, b=1, lty=3) if (min(wt) > 0) { plot(r$Prediction, r$Residual/sqrt(wt), xlab="Prediction", ylab="Normalized Residual", pch=16) abline(h=(-10:10), lty=3) abline(h=0) } else { plot(r$Prediction, r$Residual, xlab="Prediction", ylab="Residual", pch=16) abline(h=0) } if ("DefPar" %in% ls()) par(DefPar) }
/scratch/gouwar.j/cran-all/cranData/wnl/R/dx.R
hSkew = function(rx) { # rx: result of nls bx = coef(rx) pName = names(bx) z = length(bx) ssq = sigma(rx)^2 m = length(resid(rx)) d1 = eval(rx$data) fm1 = formula(rx) tm1 = intersect(attr(terms(fm1), "term.labels"), colnames(d1)) if (length(tm1) > 0) { for (i in 1:length(tm1)) assign(tm1[i], d1[, tm1[i]], envir=as.environment("Autoloads")) } f1 = deriv(fm1, pName, function.arg=pName, func=TRUE, hessian=TRUE) rf1 = do.call(f1, as.list(bx)) return(Hougaard(attr(rf1, "gradient"), attr(rf1, "hessian"), ssq)) }
/scratch/gouwar.j/cran-all/cranData/wnl/R/hSkew.R
nComp = function(Sol, Ka=0, DH) { L = Sol$L Co = Sol$Co NCOMP = length(L) if (NCOMP < 2) stop("Compartment count should be at least 2.") if (NCOMP != dim(Co)[1]) stop("Lengths of lambda and coefficients mismatch.") NTIME = nrow(DH) if (NTIME < 2) stop("Dosing history table should have at least two rows.") X = matrix(rep(0, (NCOMP + 1)*NTIME), ncol=(NCOMP + 1), nrow=NTIME) for (i in 2:NTIME) { pX = X[i - 1, ] for (j in 1:(NCOMP + 1)) { if (DH[i - 1, "CMT"] == j & DH[i - 1, "BOLUS"] > 0) { pX[j] = pX[j] + DH[i - 1, "BOLUS"] } } dT = DH[i, "TIME"] - DH[i - 1, "TIME"] # delta T cR = DH[i - 1, "RATE2"] # Infusion Rate E = exp(-L*dT) # Exponentials Xo = rep(0, NCOMP) for (j in 1:NCOMP) Xo = Xo + pX[1 + j] * Co[, , j] %*% E # Bolus if (cR > 0) Xo = Xo + ((cR*Co[, , 1]) %*% ((1 - E)/L)) # Infusion Ea = exp(-Ka*dT) # Oral if (pX[1] > 0) Xo = Xo + Ka*pX[1]*(Co[, , 1] %*% ((E - Ea)/(Ka - L))) X[i, ] = c(pX[1]*Ea, Xo) } return(X) }
/scratch/gouwar.j/cran-all/cranData/wnl/R/nComp.R
nlr = function(Fx, Data, pNames, IE, LB, UB, Error="A", ObjFx=ObjDef, SecNames, SecForms, Method="L-BFGS-B", Sx, conf.level=0.95, k, fix=0) { # e = new.env(parent=globalenv()) # environment should exist before call this function t1 = Sys.time() e$Fx = Fx # function of structural model. Fx should return a vector of the same length to Y if (toupper(Error) == "S") e$Sx = Sx # Scale (inverse weight) function. Sx should return a matrix of the same length rows to Y e$DATA = Data # Fx may use this if ("DV2" %in% colnames(Data)) { e$Y = Data[, c("DV1", "DV2")] e$nRec = nrow(e$Y) } else { if (!("DV" %in% colnames(Data))) stop("Data should have 'DV' column.") if (mean(abs(Data[, "DV"])) < 1e-6 | mean(abs(Data[, "DV"])) > 1e6) warning("DV is too large or too small. Rescale it.") e$Y = Data[, "DV"] # Observation values, Data should have "DV" column. e$nRec = length(e$Y) if (sum(is.na(Data[, "DV"])) > 0) stop("DV column should not have NAs.") } if (length(pNames) != length(IE)) stop("pNames and IE should match.") e$pNames = pNames # parameter names in the order of Fx arguments e$IE = IE # initial estimate of Fx arguments e$nTheta = length(IE) e$Error = toupper(trimws(Error)) e$AddErrVar = min(1e5, (min(e$Y[e$Y > 0])/4)^2) # initial estimate of addtive error variance e$PoisErrVar = 1 # initial estimate of proportional error variance e$PropErrVar = 0.1 # initial estimate of proportional error variance # e$PowErrPow = 0.5 # initial estimate of power error power # e$PowErrVar = 0.1 # initial estimate of power error variance e$ScaleErrVar = 1 # initial estimate of scale (inverse weight) vector of error variance e$Obj = ObjFx # e$FASD = rep(FASD, e$nRec) # Fixed Additive Error Variance Vector if (e$Error == "A") { e$nEps = 1 e$IE = c(e$IE, e$AddErrVar) e$pNames = c(e$pNames, "AddErrVar") } else if (e$Error == "POIS") { e$nEps = 1 e$IE = c(e$IE, e$PoisErrVar) e$pNames = c(e$pNames, "PoisErrVar") } else if (e$Error == "P") { e$nEps = 1 e$IE = c(e$IE, e$PropErrVar) e$pNames = c(e$pNames, "PropErrVar") } else if (e$Error == "C") { e$nEps = 2 e$IE = c(e$IE, e$AddErrVar, e$PropErrVar) e$pNames = c(e$pNames, "AddErrVar", "PropErrVar") # } else if (e$Error == "CFA") { # Additive Fixed Proportional # e$nEps = 1 # e$IE = c(e$IE, e$PropErrVar) # e$pNames = c(e$pNames, "PropErrVar") } else if (e$Error == "S") { e$nEps = 1 e$IE = c(e$IE, e$ScaleErrVar) e$pNames = c(e$pNames, "ScaleErrVar") } else { e$nEps = 0 } e$nPara = e$nTheta + e$nEps # number of parameters if (missing(LB)) { e$LB = rep(0, e$nPara) # lower bound } else { e$LB = c(LB, rep(0, e$nEps)) } if (missing(UB)) { e$UB = rep(1e+6, e$nPara) # upper bound } else { e$UB = c(UB, rep(1e+6, e$nEps)) } # if (e$Error == "POW") { # e$LB[e$nTheta+1] = 0 # e$UB[e$nTheta+1] = 1 # } e$alpha = 0.1 - log((e$IE - e$LB)/(e$UB - e$LB)/(1 - (e$IE - e$LB)/(e$UB - e$LB))) # scaling constant, NOT maximal allowable probabiliy of type 1 error if (e$nEps > 0) { e$SGindex = (e$nTheta + 1):(e$nTheta + e$nEps) # index of error variance(s) } else { e$SGindex = 0 # e$SG1 = SG1 # e$SG2 = SG2 # e$Ci = cbind(rep(SG1*SG1, e$nRec), rep(SG2*SG2, e$nRec)) # e$SumLogCi = sum(log(e$Ci)) } e$IE0 = e$IE e$nPara0 = e$nPara e$nTheta0 = e$nTheta e$SGindex0 = e$SGindex e$fix0 = fix e$fix = e$fix0 e$toEst = which(!(1:e$nPara %in% e$fix)) e$pNames= e$pNames[e$toEst] e$IE = e$IE[e$toEst] e$LB = e$LB[e$toEst] e$UB = e$UB[e$toEst] e$nTheta = sum(1:e$nTheta %in% e$toEst) e$nPara = length(e$toEst) e$nEps = sum((1 + e$nTheta0):e$nPara0 %in% e$toEst) if (e$nEps > 0) { e$SGindex = (e$nTheta + 1):(e$nTheta + e$nEps) # index of error variance(s) } else { e$SGindex = 0 } e$alpha = e$alpha[e$toEst] e$r = optim(rep(0.1, e$nPara), ObjEst, method=Method) e$PE = exp(e$r$par - e$alpha)/(exp(e$r$par - e$alpha) + 1)*(e$UB - e$LB) + e$LB # e$PE0 = vector(length=e$nPara0) # e$PE0[e$toEst] = e$PE # e$PE0[e$fix] = e$IE0[e$fix] # e$InvCov = hessian(e$Obj, e$PE0)/2 # FinalEst from EstStep() e$InvCov = hessian(e$Obj, e$PE)/2 # FinalEst from EstStep() e$InvCov = e$InvCov e$Cov = try(solve(e$InvCov), silent=T) if (!is.matrix(e$Cov)) { e$Cov = g2inv(e$InvCov) warning("Hessian is singular") } colnames(e$Cov) = e$pNames rownames(e$Cov) = e$pNames for (i in 1:e$nPara) { if (e$Cov[i, i] < 0) { e$Cov[i, i] = -e$Cov[i, i] # e$Cov[i, i] = 0 warning("Negative variance encountered. Variance estimation is unreliable!") } } e$SE = sqrt(diag(e$Cov)) e$Correl = cov2cor(e$Cov) e$EigenVal = eigen(e$Correl)$values ## Hougaard Skewness if (e$Error == "A") { e$PE0 = vector(length=e$nPara0) e$PE0[e$toEst] = e$PE e$PE0[e$fix] = e$IE0[e$fix] e$J = nGradient(e$Fx, e$PE0[1:e$nTheta0]) e$H = nHessian(e$Fx, e$PE0[1:e$nTheta0]) e$HouSkew = Hougaard(e$J, e$H, e$PE0[e$SGindex0]) names(e$HouSkew) = pNames } ## SE if (e$SGindex[1] > 0) { e$PE = c(e$PE, sqrt(e$PE[e$SGindex])) e$SE = c(e$SE, e$SE[e$SGindex]/2/sqrt(e$PE[e$SGindex])) # Delta method, See Wackerly p484, gr = (x^0.5)' = 0.5x^(-0.5), gr^2 = 1/(4*x) } e$RSE = e$SE/e$PE*100 e$Est = rbind(e$PE, e$SE, e$RSE) # colnames(e$Est) = c(e$pNames) if(e$Error == "A") { colnames(e$Est) = c(e$pNames, "AddErrSD") } else if (e$Error == "POIS") { colnames(e$Est) = c(e$pNames, "PoisErrSD") } else if (e$Error == "P") { colnames(e$Est) = c(e$pNames, "PropErrSD") } else if (e$Error == "C") { colnames(e$Est) = c(e$pNames, "AddErrSD", "PropErrSD") # } else if (e$Error == "POW") { # colnames(e$Est) = c(e$pNames, "PowErrSD") # } else if (e$Error == "CFA") { # colnames(e$Est) = c(e$pNames, "PropErrSD") } else if (e$Error == "S") { colnames(e$Est) = c(e$pNames, "ScaleErrSD") } rownames(e$Est) = c("PE", "SE", "RSE") if (!missing(SecNames)) { nSec = length(SecNames) tRes = matrix(nrow=3, ncol=nSec) colnames(tRes) = SecNames rownames(tRes) = c("PE", "SE", "RSE") for (i in 1:nSec) { tRes[, i] = t(Secondary(SecForms[[i]], e$Est["PE", 1:e$nPara], e$Cov)) } e$Est = cbind(e$Est, tRes) } tCut = qt(0.5 + conf.level/2, max(e$nRec - e$nPara, 1)) e$Est = rbind(e$Est, LL=e$Est["PE",] - tCut*e$Est["SE",], UL=e$Est["PE",] + tCut*e$Est["SE",]) zCut = qnorm(0.5 + conf.level/2) # 1.959964 e$zCI = rbind(e$Est["PE",] - zCut*e$Est["SE",], e$Est["PE",] + zCut*e$Est["SE",]) rownames(e$zCI) = c("LL", "UL") e$Residual = e$Y - e$Fi if (Error == "NOSG") { e$Residual[e$Y == -1] = 0 e$nRec = sum(e$Y != -1) } ## Likelihood Profile cAdd = e$nRec*log(2*pi) nRes = 51 if (!missing(k)) { logk = log(k) } else if (e$nRec == 1) { logk = log(2/(1 - conf.level)) } else { logk = e$nRec/2*log(1 + qf(conf.level, 1, e$nRec - 1)/(e$nRec - 1)) logk = min(logk, log(2/(1 - conf.level))) # Pawitan p240 k = 20 -> p < 0.05 } # logk = ifelse(missing(k), q, log(k)) # If nRec > 60, this is OK. # logk = ifelse(missing(k), qt(0.5 + conf.level/2, e$nRec), log(k)) # logk = ifelse(missing(k), qt(0.5 + conf.level/2, max(e$nRec - 1, 1))^2/2, log(k)) # logk = ifelse(missing(k), qf(conf.level, 1, max(e$nRec - 1, 1))/2, log(k)) # logk = min(logk, log(2/(1 - conf.level))) # Pawitan p240 k = 20 -> p < 0.05 e$fCut = 2*logk # used in pProf(), do not remove fx = function(x, j, ylevel) { tPar = e$PE tPar[j] = x e$Obj(tPar) - e$r$value - ylevel # Obj is 2LL not LL !!! } options(warn=-1) # before calling uniroot e$mParB = matrix(nrow=2, ncol=e$nPara) # matrix parameter bound colnames(e$mParB) = e$pNames[1:e$nPara] eps = 1e-10 for (j in 1:e$nPara) { if (j > e$nTheta) { stepSize = e$PE[j]/9 # variance terms } else { stepSize = ifelse(e$SE[j] > abs(e$PE[j])/100 & e$SE[j] < 2*abs(e$PE[j]), e$SE[j], abs(e$PE[j]/9)) } tLL = e$PE[j] - stepSize minL = ifelse(e$LB[j] < 0, -1e6, 0) tObj = fx(tLL, j, ylevel=2*e$fCut) # while (tObj >= 0) { # if infinite, reduce to 0.3 # tLL = 0.3*tLL - 0.7*e$PE[j] # tObj = fx(tLL, j, 2*e$fCut) # } while (tObj <= 0 & tLL > minL) { # if negative, increase by 2 tLL = 3*tLL - 2*e$PE[j] tObj = fx(tLL, j, 2*e$fCut) } if (e$LB[j] >= 0 & tLL < 0) tLL = eps # e$mParB[1, j] = tLL rTemp = try(uniroot(fx, c(tLL, e$PE[j]), j=j, ylevel=1.9*e$fCut), silent=TRUE) if (!inherits(rTemp, "try-error")) { e$mParB[1, j] = rTemp$root } else { e$mParB[1, j] = ifelse(e$LB[j] < 0, e$LB[j], max(eps, e$PE[j]/100)) } tUL = e$PE[j] + stepSize tObj = fx(tUL, j, ylevel=2*e$fCut) # while (tObj >= 0) { # if infinite, reduce to 0.3 # tUL = 0.3*tUL - 0.7*e$PE[j] # tObj = fx(tUL, j, 2*e$fCut) # } while (tObj <= 0 & tUL < 1e6) { # if negative, increase by 2 tUL = 3*tUL - 2*e$PE[j] tObj = fx(tUL, j, 2*e$fCut) } # e$mParB[2, j] = tUL rTemp = try(uniroot(fx, c(e$PE[j], tUL), j=j, ylevel=1.9*e$fCut), silent=TRUE) if (!inherits(rTemp, "try-error")) { e$mParB[2, j] = rTemp$root } else { e$mParB[2, j] = ifelse(e$UB[j] < 0, e$UB[j], 100*e$PE[j]) } } e$LI = matrix(nrow=2, ncol=e$nPara) # Likelihood interval colnames(e$LI) = e$pNames[1:e$nPara] rownames(e$LI) = c("LL", "UL") attr(e$LI, "k") = exp(logk) for (j in 1:e$nPara) { rTemp = try(uniroot(fx, c(e$mParB[1, j], e$PE[j]), j=j, ylevel=e$fCut), silent=TRUE) if (!inherits(rTemp, "try-error")) { e$LI[1, j] = rTemp$root # if (e$PE[j] - e$mParB[1, j] > 2*(e$PE[j] - e$LI[1, j])) e$mParB[1, j] = 1.5*e$LI[1, j] - 0.5*e$PE[j] # PE - 1.5(PE -LL) if (e$mParB[1, j] < 2*e$LI[1, j] - e$PE[j]) e$mParB[1, j] = 1.5*e$LI[1, j] - 0.5*e$PE[j] # PE - 1.5(PE -LL) } else { e$LI[1, j] = ifelse(j > e$nTheta | e$LB[j] >= 0, 0, -Inf) } rTemp = try(uniroot(fx, c(e$PE[j], e$mParB[2, j]), j=j, ylevel=e$fCut), silent=TRUE) if (!inherits(rTemp, "try-error")) { e$LI[2, j] = rTemp$root # if (e$mParB[2, j] - e$PE[j] > 2*(e$LI[2, j] - e$PE[j])) e$mParB[2, j] = 1.5*e$LI[2, j] - 0.5*e$PE[j] # PE + 1.5(UL - PE) if (e$mParB[2, j] > 2*e$LI[2, j] - e$PE[j]) e$mParB[2, j] = 1.5*e$LI[2, j] - 0.5*e$PE[j] # PE + 1.5(UL - PE) } else { e$LI[2, j] = +Inf } } options(warn=0) # end of calling uniroot if (e$SGindex[1] > 0) { e$LI = cbind(e$LI, sqrt(e$LI[, e$SGindex])) colnames(e$LI) = colnames(e$Est)[1:(e$nPara + length(e$SGindex))] } if (!missing(SecNames)) { nSec = length(SecNames) tRes = matrix(nrow=2, ncol=nSec) colnames(tRes) = SecNames rownames(tRes) = c("LL", "UL") for (i in 1:nSec) { fx2 = deriv(SecForms[[i]], e$pNames, function.arg=e$pNames, func=TRUE) tv1 = do.call("fx2", as.list(e$LI[1, e$pNames])) tv2 = do.call("fx2", as.list(e$LI[2, e$pNames])) tRes[, i] = sort(c(tv1, tv2)) } e$LI = cbind(e$LI, tRes) } colnames(e$LI) = colnames(e$Est) attr(e$LI, "k") = exp(logk) e$mOFV = matrix(nrow=nRes, ncol=e$nPara) e$mPar = matrix(nrow=nRes, ncol=e$nPara) colnames(e$mOFV) = e$pNames[1:e$nPara] colnames(e$mPar) = e$pNames[1:e$nPara] for (j in 1:e$nPara) { e$mPar[, j] = seq(e$mParB[1, j], e$mParB[2, j], length.out=nRes) for (i in 1:nRes) { tPar = e$PE[1:e$nPara0] tPar[j] = e$mPar[i, j] e$mOFV[i, j] = cAdd + e$Obj(tPar) # -2LL } } ## e$run = run.test(e$Residual) e$'-2LL' = e$nRec*log(2*pi) + e$r$value e$AIC = e$'-2LL' + 2*e$nPara e$AICc = e$AIC + 2*e$nPara*(e$nPara + 1)/(e$nRec - e$nPara - 1) e$BIC = e$'-2LL' + e$nPara*log(e$nRec) e$Elapsed = difftime(Sys.time(), t1) if (e$Error == "A") { Result = list(e$Est, e$LI, e$HouSkew, e$Cov, e$run, e$r$value, e$'-2LL', e$AIC, e$AICc, e$BIC, e$r$covergence, e$r$message, e$Fi, e$Residual) names(Result) = c("Est", "LI", "Skewness", "Cov", "run", "Objective Function Value", "-2LL", "AIC", "AICc", "BIC", "Convergence", "Message", "Prediction", "Residual") } else { Result = list(e$Est, e$LI, e$Cov, e$run, e$r$value, e$'-2LL', e$AIC, e$AICc, e$BIC, e$r$covergence, e$r$message, e$Fi, e$Residual) names(Result) = c("Est", "LI", "Cov", "run", "Objective Function Value", "-2LL", "AIC", "AICc", "BIC", "Convergence", "Message", "Prediction", "Residual") } Len0 = length(Result) Name0 = names(Result) if (toupper(Error) != "S") { Result[[Len0 + 1]] = e$Elapsed names(Result) = c(Name0, "Elapsed Time") } else { Scale = Sx(e$Est["PE", 1:e$nTheta]) Result[[Len0 + 1]] = Scale Result[[Len0 + 2]] = e$Elapsed names(Result) = c(Name0, "Scale", "Elapsed Time") } return(Result) }
/scratch/gouwar.j/cran-all/cranData/wnl/R/nlr.R
pComp = function(dComp, dRate, Shape="rect", Col=NA, Bx=0.3, By=0.2, Cex=1.0, Lwd=3, Radius=0.3, thIn=pi/2, thOut=pi/2, ...) { if (!(is.data.frame(dComp) & is.data.frame(dRate))) stop("Two input data.frames are needed!") Shape = substr(toupper(trimws(Shape)), 1, 4) if (Shape == "CIRC") { Bx = Radius By = Radius } th0 = atan2(By, Bx) InOutL = 2*By d1 = dComp nComp = NROW(d1) if (nComp == 0) stop("There should be at least one compartment!") xmax = max(d1$xPos) + Bx xmin = min(d1$xPos) - Bx xRange = xmax - xmin tx = xRange/100 # tiny delta x d1$yPos = max(d1$Level) - d1$Level ymax = max(d1$yPos) + 2.5*By ymin = min(d1$yPos) - 2.5*By yRange = ymax - ymin ty = yRange/100 # tiny delta y ## Draw boxes & texts plot(0, 0, type="n", xlim=c(xmin, xmax), ylim=c(ymin, ymax), xlab="", ylab="", bty="n", axes=F, ...) if (Shape == "CIRC") { for (i in 1:nComp) { x0 = d1[i, "xPos"] y0 = d1[i, "yPos"] th = c(seq(0, 2*pi, length.out=200), 0) x = x0 + Radius*cos(th) y = y0 + Radius*sin(th) polygon(x, y, lwd=Lwd, col=Col) } } else { for (i in 1:nComp) { x0 = d1[i, "xPos"] y0 = d1[i, "yPos"] x = c(x0 - Bx, x0 + Bx, x0 + Bx, x0 - Bx, x0 - Bx) y = c(y0 + By, y0 + By, y0 - By, y0 - By, y0 + By) polygon(x, y, lwd=Lwd, col=Col) } } text(d1$xPos, d1$yPos, d1$Name, cex=Cex) ## Draw Rates d2 = dRate nRate = NROW(d2) if (nRate == 0) invisible() if (nrow(unique(dRate[,c("From", "To")])) != nrow(dRate)) stop("Rate data.frame should have unique rates!") d2$Both = FALSE for (i in 1:nrow(d2)) { cFrom = d2[i, "From"] cTo = d2[i, "To"] if (nrow(d2[d2$From == cTo & d2$To == cFrom,]) > 0) d2[i, "Both"] = TRUE } d2$s0 = NA #side d2$s1 = NA d2$q01 = NA # quadrant d2$q10 = NA d2$x0 = NA d2$y0 = NA d2$x1 = NA d2$y1 = NA for (i in 1:nRate) { n1 = d2[i, "From"] # From compartment number n2 = d2[i, "To"] # To compartment number ## determine ceter of From point if (n1 == 0) { # input side cx0 = d1[d1$No == n2, "xPos"] - InOutL*cos(thIn) cy0 = d1[d1$No == n2, "yPos"] + InOutL*sin(thIn) + 1.5*By } else { # input side cx0 = d1[d1$No == n1, "xPos"] cy0 = d1[d1$No == n1, "yPos"] } ## determine ceter of To point if (n2 > nComp) { # normal compartment cx1 = d1[d1$No == n1, "xPos"] + InOutL*cos(thOut) cy1 = d1[d1$No == n1, "yPos"] - InOutL*sin(thOut) - 1.5*By } else { # output side cx1 = d1[d1$No == n2, "xPos"] cy1 = d1[d1$No == n2, "yPos"] } mxa = mean(c(cx0, cx1)) mya = mean(c(cy0, cy1)) th1 = atan2(cy1 - cy0, cx1 - cx0) th1 = ifelse(th1 >= 0, th1, th1 + 2*pi) q01 = floor(th1/ (pi/2)) + 1 # quadrant if (th1 >= th0 & th1 <= (pi - th0)) { s0 = 3 x0 = cx0 y0 = cy0 + By } else if (th1 > (pi - th0) & th1 < pi + th0) { s0 = 2 x0 = cx0 - Bx y0 = cy0 } else if (th1 >= pi + th0 & th1 <= 2*pi - th0) { s0 = 1 x0 = cx0 y0 = cy0 - By } else { s0 = 4 x0 = cx0 + Bx y0 = cy0 } th2 = atan2(cy0 - cy1, cx0 - cx1) th2 = ifelse(th2 >= 0, th2, th2 + 2*pi) q10 = floor(th2/ (pi/2)) + 1 if (th2 >= th0 & th2 <= (pi - th0)) { s1 = 3 x1 = cx1 y1 = cy1 + By } else if (th2 > (pi - th0) & th2 < pi + th0) { s1 = 2 x1 = cx1 - Bx y1 = cy1 } else if (th2 >= pi + th0 & th2 <= 2*pi - th0) { s1 = 1 x1 = cx1 y1 = cy1 - By } else { s1 = 4 x1 = cx1 + Bx y1 = cy1 } if (d2[i, "Both"]) { if (y0 == y1) { y0 = y0 + ty*sign(x1 - x0) y1 = y1 + ty*sign(x1 - x0) } else if (x0 == x1) { x0 = x0 + 0.5*tx*sign(y1 - y0) x1 = x1 + 0.5*tx*sign(y1 - y0) } else if ((s0 == 3 & q01 == 1) | (s0 == 1 & q01 == 4)) { x0 = cx0 + 3*tx x1 = cx1 - tx } else if ((s0 == 4 & q01 == 1) | (s0 == 2 & q01 == 2) ) { y0 = cy0 + ty y1 = cy1 - 3*ty } else if ((s0 == 3 & q01 == 2) | (s0 == 1 & q01 == 3)) { x0 = cx0 - 3*tx x1 = cx1 + tx } else if ((s0 == 4 & q01 == 4) | (s0 == 2 & q01 == 3)) { y0 = cy0 - ty y1 = cy1 + 3*ty } } arrows(x0, y0, x1, y1, length=0.12, lwd=Lwd, angle=20) # d2[i, c("s0", "s1", "q01", "q10", "x0", "y0", "x1", "y1")] = c(s0, s1, q01, q10, x0, y0, x1, y1) mx = mean(c(x0, x1)) my = mean(c(y0, y1)) if (y0 == y1) { # same level text(mx, my + sign(x1 - x0)*2.5*ty, d2[i, "Name"], cex=Cex) } else if (y0 > y1) { # from high to low text(mx - tx, my, d2[i, "Name"], cex=Cex, adj=1) } else { # from low to high text(mx + tx, my, d2[i, "Name"], cex=Cex, adj=0) } } }
/scratch/gouwar.j/cran-all/cranData/wnl/R/pComp.R
pProf = function(Bag = e, Title = "", ...) { if (Bag$nPara <= 2) { mfRow = 1 mfCol = 2 } else { mfRow = ceiling(sqrt(Bag$nPara)) mfCol = ceiling(Bag$nPara/mfRow) } oPar = par(mfrow=c(mfRow, mfCol)) Args = list(...) if (is.null(Args$ylab)) Args$ylab = "-2LL" if (is.null(Args$type)) Args$type = "l" for (j in 1:Bag$nPara) { x0 = Bag$mPar[, j] y0 = Bag$mOFV[, j] x = x0[y0 < Bag$`-2LL` + 5*Bag$fCut] y = y0[y0 < Bag$`-2LL` + 5*Bag$fCut] if (is.finite(min(x)) & is.finite(max(x)) & is.finite(min(y, na.rm=T)) & is.finite(max(y, na.rm=T))) { Args$x = x Args$y = y RdUdL = format((Bag$LI[2, j] - Bag$PE[j])/(Bag$PE[j] - Bag$LI[1, j]), digits=3) Args$xlab = paste0(Bag$pNames[j], " = ", format(Bag$PE[j], digits=2), ", dU/dL = ", RdUdL) do.call(plot, Args) abline(h = Bag$'-2LL' + Bag$fCut, lty=2) abline(v = Bag$PE[j], lty=3) text(Bag$LI[, j], Bag$'-2LL', labels = format(Bag$LI[, j], digits=2)) } } if (trimws(Title) != "") title(Title, outer=TRUE) par(oPar) }
/scratch/gouwar.j/cran-all/cranData/wnl/R/pProf.R
wnl5 = function(Fx, Data, pNames, IE, LB, UB, Error="A", ObjFx=ObjLS) { # e = new.env(parent=globalenv()) # environment should exist t1 = Sys.time() e$Fx = Fx # function of structural model. Fx should return a vector of the same length to Y e$DATA = Data # Fx use this e$Y = Data[,"DV"] # Observation values, Data should have "DV" column. e$nRec = length(e$Y) e$IE = IE # initial estimate of Fx arguments e$nPara = length(IE) e$Error = toupper(trimws(Error)) e$Obj = ObjFx if (missing(LB)) { e$LB = rep(0, e$nPara) # lower bound } else { e$LB = LB } if (missing(UB)) { e$UB = rep(1e+6, e$nPara) # upper bound } else { e$UB = UB } e$alpha = 0.1 - log((e$IE - e$LB)/(e$UB - e$LB)/(1 - (e$IE - e$LB)/(e$UB - e$LB))) e$r = optim(rep(0.1, e$nPara), ObjEst, method="L-BFGS-B") e$PE = exp(e$r$par - e$alpha)/(exp(e$r$par - e$alpha) + 1)*(e$UB - e$LB) + e$LB e$Hess = hessian(e$Obj, e$PE) e$Eigen = eigen(e$Hess) e$Cond = sqrt(max(e$Eigen$values)/min(e$Eigen$values)) e$Pred = e$Fx(e$PE) e$Residual = e$Y - e$Pred e$run.test = run.test(e$Residual) e$WRSS = e$r$value e$AIC = e$nRec*log(e$WRSS) + 2*e$nPara e$SBC = e$nRec*log(e$WRSS) + e$nPara*log(e$nRec) e$Elapsed = difftime(Sys.time(), t1) names(e$PE) = pNames Result = list(e$PE, e$WRSS, e$run.test, e$AIC, e$SBC, e$Cond, e$r$covergence, e$r$message, e$Pred, e$Residual, e$Elapsed) names(Result) = c("PE", "WRSS", "run", "AIC", "SBC", "Condition Number", "Convergence", "Message", "Prediction", "Residual", "Elapsed Time") return(Result) }
/scratch/gouwar.j/cran-all/cranData/wnl/R/wnl5.R
.onAttach <- function(...) { mydate <- date() x <- regexpr("[0-9]{4}", mydate) this.year <- substr(mydate, x[1], x[1] + attr(x, "match.length") - 1) packageStartupMessage("\n## W-NOMINATE Ideal Point Package") packageStartupMessage("## Copyright 2006 -", this.year) packageStartupMessage("## Keith Poole, Jeffrey Lewis, James Lo, and Royce Carroll") packageStartupMessage("## Support provided by the U.S. National Science Foundation") packageStartupMessage("## NSF Grant SES-0611974\n") #require("pscl", quietly=TRUE) } .onUnload <- function(libpath) { library.dynam.unload("wnominate", libpath) }
/scratch/gouwar.j/cran-all/cranData/wnominate/R/header.R
# Function: add.cutline # Reads in output from W-NOMINATE and adds a cutting line to existing plot # INPUTS: a numeric vector of length 4, cutData # midpoint1d<-cutData[1] # spread1d<-cutData[2] # midpoint2d<-cutData[3] # spread2d<-cutData[4] add.cutline <- function(cutData,weight,lwd=2) { slope <- -cutData[2]/(cutData[4]*weight) if (is.na(slope)) { x <- c(cutData[1],cutData[1]) y <- c(sqrt(1-cutData[1]^2),-sqrt(1-cutData[1]^2)) slope <- NA intercept <- NA } else { intercept <- -slope*cutData[1]+cutData[3] x <- c( (-slope*intercept + sqrt( (slope*intercept)^2 - (1+slope*slope)*(intercept*intercept-1)))/(1+slope*slope), (-slope*intercept - sqrt( (slope*intercept)^2 - (1+slope*slope)*(intercept*intercept-1)))/(1+slope*slope) ) if (is.na(x[1])) { warning("Couldn't solve for points on the unit circle!\n") x<-NA y<-NA slope<-NA intercept<-NA } else { y <- intercept + slope*x y[y < -1] <- -sqrt(1-x[y<1]^2) y[y > 1] <- sqrt(1-x[y>1]^2) } } lines(x,y,lwd=lwd) } plot.angles <- function(x, main.title="Cutting Line Angles", x.title="Angle in Degrees", y.title="Count",dims=c(1,2),...) { if(!is(x, "nomObject")) stop("Input is not of class 'nomObject'.") if(x$dimensions==1) stop("All angles in 1D NOMINATE are 90 degrees.") if(length(dims)!=2) stop("'dims' must be an integer vector of length 2.") weight<-x$weight[dims[2]]/x$weight[dims[1]] contrained <- ((abs(x$rollcalls[,paste("spread",dims[1],"D",sep="")]) > 0.0 | abs(x$rollcalls[,paste("spread",dims[2],"D",sep="")]) > 0.0) & (x$rollcalls[,paste("midpoint",dims[1],"D",sep="")]**2 + x$rollcalls[,paste("midpoint",dims[2],"D",sep="")]**2) < .95) cutvector1 <- na.omit(x$rollcalls[contrained,paste("spread",dims[2],"D",sep="")]*weight/ sqrt(x$rollcalls[contrained,paste("spread",dims[1],"D",sep="")]^2 + weight^2*x$rollcalls[contrained,paste("spread",dims[2],"D",sep="")]^2)) cutvector2 <- -1*na.omit(x$rollcalls[contrained,paste("spread",dims[1],"D",sep="")]/ sqrt(x$rollcalls[contrained,paste("spread",dims[1],"D",sep="")]^2 + weight^2*x$rollcalls[contrained,paste("spread",dims[2],"D",sep="")]^2)) cutvector1[cutvector2<0] <- -cutvector1[cutvector2<0] cutvector2[cutvector2<0] <- -cutvector2[cutvector2<0] angles <- atan2(cutvector2,cutvector1)*180/pi suppressWarnings(hist(angles, breaks=seq(0,180,10), main=main.title, xlab=x.title, ylab=y.title, cex.main=1.2, cex.lab=1.2, font.main=2, axes=FALSE, ,...)) axis(2) axis(1, at=seq(0,180,10)) } plot.cutlines <- function(x, main.title="Cutting Lines", d1.title="First Dimension", d2.title="Second Dimension", lines=50,dims=c(1,2),lwd=2,...) { if(!is(x, "nomObject")) stop("Input is not of class 'nomObject'.") if(x$dimensions==1) stop("All angles in 1D NOMINATE are 90 degrees.") if(length(dims)!=2) stop("'dims' must be an integer vector of length 2.") if(lines<1) stop("'Lines' must be less than 1.") constrained <- ((abs(x$rollcalls[,"spread1D"]) > 0.0 | abs(x$rollcalls[,"spread2D"]) > 0.0) & (x$rollcalls[,"midpoint1D"]**2 + x$rollcalls[,"midpoint2D"]**2) < .95) cutlineData <- cbind(x$rollcalls[constrained,paste("midpoint",dims[1],"D",sep="")], x$rollcalls[constrained,paste("spread",dims[1],"D",sep="")], x$rollcalls[constrained,paste("midpoint",dims[2],"D",sep="")], x$rollcalls[constrained,paste("spread",dims[2],"D",sep="")]) cutlineData <- na.omit(cutlineData) suppressWarnings(symbols(x=0, y=0, circles=1, inches=FALSE, asp=1, main=main.title, xlab=d1.title, ylab=d2.title, xlim=c(-1.0,1.0), ylim=c(-1.0,1.0), cex.main=1.2, cex.lab=1.2, font.main=2, lwd=2, fg="grey", frame.plot=FALSE,...)) if(lines<dim(cutlineData)[1]) cutlineData <- cutlineData[sample(1:dim(cutlineData)[1],lines),] suppressWarnings(apply(cutlineData, 1, add.cutline, weight=x$weights[dims[2]]/x$weights[dims[1]],lwd=lwd)) } plot.coords <- function (x, main.title="W-NOMINATE Coordinates", d1.title="First Dimension", d2.title="Second Dimension", dims=c(1,2), plotBy="party", color=TRUE, shape=TRUE, cutline=NULL, Legend=TRUE, legend.x=0.8,legend.y=1,...) { if(!is(x, "nomObject")) stop("Input is not of class 'nomObject'.") if(!any(colnames(x$legislators)==plotBy)){ warning("Variable '", plotBy ,"' does not exist in your W-NOMINATE object.") types <- rep("Leg",dim(x$legislators)[1]) } else { types <- x$legislators[,plotBy] } if(length(dims)!=2 & x$dimensions!=1) stop("'dims' must be an integer vector of length 2.") # determine number of parties nparties <- length(unique(types)) # set default colors and shapes colorlist <- c("darkblue", "firebrick", "darkcyan", "darkgreen", "darkmagenta", "darkolivegreen", "darkorange", "darkorchid", "darkred", "darksalmon", "darkseagreen", "darkslateblue", "darkslategray", "darkturquoise", "darkviolet", "deeppink", "deepskyblue", "dodgerblue") shapes <- rep(c(16,15,17,18,19,3,4,8),3) # color and shape options if (color==FALSE) colorlist <- sample(colors()[160:220],50) if (shape==FALSE) shapes <- rep(16,50) if(x$dimensions==1){ coord1D <- x$legislators[,"coord1D"] ranking <- rank(x$legislators[,"coord1D"]) plot(seq(-1,1,length=length(coord1D)), 1:length(coord1D), type="n", cex.main=1.2, cex.lab=1.2, font.main=2, xlab="First Dimension Nominate", ylab="Rank", main="1D W-NOMINATE Plot") if(Legend) legend(0.67, 0.7*length(coord1D), unique(types), pch=shapes[1:nparties], col=colorlist[1:nparties], cex=0.7) for(i in 1:nparties) suppressWarnings(points(coord1D[types==unique(types)[i]], ranking[types==unique(types)[i]],pch=shapes[i],col=colorlist[i],cex=1.1,lwd=2)) } else { #2 Dimensional Case begins here coord1D <- x$legislators[,paste("coord",dims[1],"D",sep="")] coord2D <- x$legislators[,paste("coord",dims[2],"D",sep="")] # Plotting suppressWarnings(symbols(x = 0, y = 0, circles = 1, inches = FALSE, asp = 1, main=main.title, xlab=d1.title, ylab=d2.title, xlim=c(-1.0,1.0), ylim=c(-1.0,1.0), cex.main=1.2, cex.lab=1.2, font.main=2, lwd=2, fg="grey", frame.plot=FALSE,...)) if(!is.null(cutline)) { for(i in 1:length(cutline)){ if(all(is.na(x$rollcalls[cutline[i],]))) stop("Roll call for cutline did not meet minimum lopsidedness requirements.") add.cutline(c(x$rollcalls[cutline[i],paste("midpoint",dims[1],"D",sep="")], x$rollcalls[cutline[i],paste("spread",dims[1],"D",sep="")], x$rollcalls[cutline[i],paste("midpoint",dims[2],"D",sep="")], x$rollcalls[cutline[i],paste("spread",dims[2],"D",sep="")]), weight=x$weights[dims[2]]/x$weights[dims[1]], lwd=2) } } if(Legend) legend(legend.x, legend.y, unique(types), pch=shapes[1:nparties], bty="n",col=colorlist[1:nparties], cex=0.7) for(i in 1:nparties) suppressWarnings(points(coord1D[types==unique(types)[i]], coord2D[types==unique(types)[i]],pch=shapes[i],col=colorlist[i],cex=1.1,lwd=2)) } } plot.scree <- function(x, main.title="Scree Plot", x.title="Dimension", y.title="Eigenvalue",...) { if(!is(x, "nomObject")) stop("Input is not of class 'nomObject'.") if(is.null(x$eigenvalues)) stop("No eigenvalues exist in this W-NOMINATE object.") suppressWarnings(plot(1:20, x$eigenvalues[1:20], type='o', main=main.title, xlab=x.title, ylab=y.title, cex.main=1.2, cex.lab=1.2, font.main=2, lwd=1, pch=16, axes=FALSE, ...)) axis(2) axis(1, at=1:20) } plot.nomObject <- function(x,dims=c(1,2),...) { if(!is(x, "nomObject")) stop("Input is not of class 'nomObject'.") if(length(dims)!=2 & x$dimensions!=1) stop("'dims' must be an integer vector of length 2.") if(x$dimensions==1) { par(mfrow=c(1,2)) suppressWarnings(plot.coords(x,dims=dims)) suppressWarnings(plot.scree(x,dims=dims)) } else { par(mfrow=c(2,2)) suppressWarnings(plot.coords(x,dims=dims)) suppressWarnings(plot.angles(x,dims=dims)) suppressWarnings(plot.scree(x,dims=dims)) suppressWarnings(plot.cutlines(x,dims=dims,lwd=1)) } } summary.nomObject<-function(object,verbose=FALSE,...){ if(!is(object, "nomObject")) stop("Input is not of class 'nomObject'.") cat("\n\nSUMMARY OF W-NOMINATE OBJECT") cat("\n----------------------------\n") cat("\nNumber of Legislators:\t ", dim(na.omit(object$legislators))[1], " (", dim(object$legislators)[1]-dim(na.omit(object$legislators))[1], " legislators deleted)", sep="") cat("\nNumber of Votes:\t ", dim(na.omit(object$rollcalls))[1], " (", dim(object$rollcalls)[1]-dim(na.omit(object$rollcalls))[1], " votes deleted)", sep="") cat("\nNumber of Dimensions:\t ", object$dimensions) correctYea<-sum(as.numeric(object$legislators[,"correctYea"]),na.rm=TRUE) allYea<-correctYea+sum(as.numeric(object$legislators[,"wrongNay"]),na.rm=TRUE) correctNay<-sum(as.numeric(object$legislators[,"correctNay"]),na.rm=TRUE) allNay<-correctNay+sum(as.numeric(object$legislators[,"wrongYea"]),na.rm=TRUE) cat("\nPredicted Yeas:\t\t ", correctYea, " of ", allYea, " (", round(100*correctYea/allYea,1), "%) predictions correct", sep="") cat("\nPredicted Nays:\t\t ", correctNay, " of ", allNay, " (", round(100*correctNay/allNay,1), "%) predictions correct", sep="") cat("\nCorrect Classifiction:\t ", paste(round(object$fits[1:object$dimensions],2),"%",sep=""), sep=" ") cat("\nAPRE:\t\t\t ", round(object$fits[(object$dimensions+1):(2*object$dimensions)],3), sep=" ") cat("\nGMP:\t\t\t ", round(object$fits[(2*object$dimensions+1):(3*object$dimensions)],3), "\n\n\n", sep=" ") if(!verbose) { cat("The first 10 legislator estimates are:\n") if(object$dimensions!=1) { round(object$legislators[1:10,paste("coord",1:object$dimensions,"D",sep="")],3) } else{ round(object$legislators[1:10,c("coord1D","se1D")],3) } } else { if(object$dimensions!=1) { round(object$legislators[,paste("coord",1:object$dimensions,"D",sep="")],3) } else{ round(object$legislators[,c("coord1D","se1D")],3) } } }
/scratch/gouwar.j/cran-all/cranData/wnominate/R/methods.R
#.First.lib <- function(lib,pkg) { # library.dynam("wnominate",pkg,lib) #} # function: qnprob # Note: Beta and dimweight are unused and only passed # to match nomprob function qnprob <- function(yea,nay,ideal,Beta,dimweight,normal=1) { if (normal==1) { plink = pnorm } else { plink = plogis } cons <- matrix(1,nrow(ideal),1) %*% t( apply(nay*nay,1,sum) - apply(yea*yea,1,sum) ) discrim <- 2*(yea - nay) ystar <- cons + ideal %*% t(discrim) plink(ystar) } generateTestData <- function(legislators=20, rcVotes=100, yea=matrix(runif(rcVotes,min=-0.2,max=0.7),nrow=rcVotes), nay=matrix(runif(rcVotes,min=-0.7,max=0.2),nrow=rcVotes), ideal=matrix(rnorm(legislators),nrow=legislators), Beta=15, dimweight=0.5,normal=1, seed = NULL, utility='nominate') { if (!is.null(seed)) { set.seed(seed) } if (utility == 'nominate') { probfunc <- nomprob } else{ if (utility == 'QN') { probfunc <- qnprob } else { stop("'utility' argument must be 'nominate' or 'QN'") } } fakeData<-probfunc(yea,nay,ideal,Beta,dimweight,normal) fakeData <- (fakeData > matrix(runif(legislators*rcVotes),legislators,rcVotes))*1 fakeData[fakeData==0]<-6 lopsided<-rep(FALSE,rcVotes) legis.names<-paste("Legislator", 1:legislators, sep="") icpsrState <- sample(c(10,56,63,94), size=legislators,replace=TRUE) state<-icpsrState state[state==10]<-"Alaska" state[state==56]<-"California" state[state==63]<-"Ohio" state[state==94]<-"Texas" cd<-sample(1:435, size=legislators, replace=TRUE) icpsrLegis <- sample(1:100, size=legislators, replace=TRUE) partyCode <- sample(c(200,100), size=legislators,replace=TRUE) partyName <- partyCode partyName[partyName==200]<-"Republican" partyName[partyName==100]<-"Democrat" codes=list(yea=1:3,nay=4:6,notInLegis=0,missing=7:9) legdata<-data.frame(state=state, icpsrState=icpsrState, cd=cd, icpsrLegis=icpsrLegis, party=partyName, party=partyCode) rownames(legdata) <- legis.names rownames(fakeData) <- legis.names colnames(fakeData) <- paste("V",1:dim(fakeData)[2],sep="") rcObject<-list(votes=fakeData, codes=codes, n=legislators, m=rcVotes, lopsided=lopsided, legis.data=legdata, vote.data=NULL, desc=NULL,source=NULL) class(rcObject) <- c("rollcall") rcObject } nomprob <- function(yea, nay, ideal, Beta, dimweight, normal=1) { res <- .C("nomprob", as.double(t(yea)), as.double(t(nay)), as.double(t(ideal)), as.double(Beta), as.double(dimweight), as.integer(dim(yea)[1]), as.integer(dim(ideal)[1]), as.integer(dim(ideal)[2]), yeaProb = double( dim(ideal)[1]*dim(yea)[1]), as.integer(normal)) matrix(res$yeaProb,nrow=dim(ideal)[1],byrow=FALSE) } ################################################################################### #checkMember(): Takes vote matrix and returns total number of retained # votes for each member. Used as wnominate() helper. ################################################################################### checkMember <- function(votes) { apply(votes!=9,1,sum) } ################################################################################## #checkVote(): Takes vote matrix and returns total number of retained votes # for each member. Used as wnominate() helper. ################################################################################## checkVote <- function(votes) { minorityPercent <- pmin(apply(votes==1,2,sum),apply(votes==6,2,sum)) minorityPercent <- minorityPercent/apply(votes!=9,2,sum) minorityPercent[is.nan(minorityPercent)]<-0 return(minorityPercent) } wnominate <- function(rcObject, ubeta=15, uweights=0.5, dims=2, minvotes=20, lop=0.025, trials=3, polarity, verbose=FALSE) { cat("\nPreparing to run W-NOMINATE...\n\n\tChecking data...\n\n") start <- proc.time() # The following modifies an arugment, but I think we have to live with this # one... If we don't, then we have to copy rcObjects$votes which is not a good # thing memorywise. if(!is.null(rcObject$codes)) { rcObject$votes[rcObject$votes %in% rcObject$codes$yea] <- 1 rcObject$votes[rcObject$votes %in% rcObject$codes$nay] <- 6 rcObject$votes[rcObject$votes %in% c(rcObject$codes$missing, rcObject$codes$notInLegis)] <- 9 } #Core Error Checking if(!is(rcObject, "rollcall")) stop("Input is not of class 'rollcall'.") if(dims>10) stop("'dims' cannot exceed 10\n") if(dims<1) stop("'dims' cannot be negative\n") if(trials<1) stop("'trials' cannot be negative\n") polarityToUse <- polarity if(is.character(polarityToUse)) { if(length(setdiff(polarityToUse,rownames(rcObject$votes)))!=0) { cat("\tThe following legislators do not exist:\n\t\t", setdiff(polarityToUse,rownames(rcObject$votes)),"\n") stop("'polarity' is incorrectly specified\n") } duplicates<-rownames(rcObject$votes)[duplicated(rownames(rcObject$votes))] if(length(intersect(duplicates,polarityToUse))!=0){ cat("\tThe following legislators have duplicate names:\n\t\t", intersect(duplicates,polarityToUse),"\n") stop("'polarity' is incorrectly specified\n") } polarityToUse<-match(polarityToUse,rownames(rcObject$votes)) } if(is.list(polarityToUse)) { column<-which(colnames(rcObject$legis.data)==polarityToUse[[1]]) if(length(column)==0) stop("Variable '", polarityToUse[[1]] ,"' does not exist in 'legis.data'") if(length(setdiff(polarityToUse[[2]],rcObject$legis.data[,column])==0)) { cat("\tThe following items from your variable do not exist:\n\t\t", setdiff(polarityToUse[[2]],rcObject$legis.data[,column]),"\n") stop("'polarity' is incorrectly specified\n") } duplicates<-rcObject$legis.data[duplicated(rcObject$legis.data[,column]),column] if(length(intersect(duplicates,polarityToUse[[2]]))!=0) { cat("\tThe following legislators have duplicate items from your list:\n\t\t", intersect(duplicates,polarityToUse[[2]]),"\n") stop("'polarity' is incorrectly specified\n") } polarityToUse <- match(polarityToUse[[2]], rcObject$legis.data[,column]) } if(dims!=length(polarityToUse)) stop("'polarity' must be a vector of length 'dims'\n") if(max(polarityToUse)>rcObject$n | min(polarityToUse)<1) { cat("\tThe following legislators do not exist:\n\t\t", polarityToUse[which(polarityToUse>rcObject$n | polarityToUse<1)],"\n") stop("'polarity' is incorrectly specified\n") } if((lop<0) | (lop>1)) stop("'lop' does not fall between 0 to 1.\n") entry<-as.numeric(sort(unique(as.integer(rcObject$votes)), na.last=TRUE)) #as.numeric is required because there is an R bug at work here if(!(identical(entry,c(1,6,9)) | identical(entry,c(1,6)) )) { cat("\tData contains the following values: ", entry, "\n") stop("Data contains values other than 1 or 6 or 9.\n") } #Check legislators and members for minimum requirements until there is no further change memberVotes <- checkMember(rcObject$votes) minorityVoteShare <- checkVote(rcObject$votes) tempvotes<-rcObject$votes tempvotes[memberVotes<minvotes,]<-9 tempvotes[,minorityVoteShare<=lop]<-9 memberVotes2 <- checkMember(tempvotes) minorityVoteShare2 <- checkVote(tempvotes) while(!identical(memberVotes,memberVotes2)|!identical(minorityVoteShare,minorityVoteShare2)){ memberVotes <- memberVotes2 minorityVoteShare <- minorityVoteShare2 tempvotes[memberVotes2<minvotes,] <- 9 tempvotes[,minorityVoteShare2<=lop] <- 9 memberVotes2 <- checkMember(tempvotes) minorityVoteShare2 <- checkVote(tempvotes) } members2dump <- memberVotes < minvotes votes2dump <- minorityVoteShare <= lop N <- sum(!members2dump) M <- sum(!votes2dump) #Check that legislators used to specify polarities are not deleted, then #adjust them for deleted legislators if(any(members2dump[polarityToUse])) { cat("\t\tThe following legislators fail minimum vote requirements:\n\t\t", rownames(rcObject$votes)[intersect(which(members2dump),polarityToUse)], "\n") stop("\t'polarity' is incorrectly specified\n") } for(i in 1:dims) polarityToUse[i] <- polarityToUse[i]-sum(members2dump[1:polarityToUse[i]]) if(length(polarityToUse)==1) polarityToUse <- c(polarityToUse,1) #Print dumped legislators and dumped votes, then calculate percentages remaining. if(all(!members2dump)) { cat("\t\tAll members meet minimum vote requirements.\n\n") } else { tempvotes <- rcObject$votes[,!votes2dump] if (verbose) { cat("\t\tMembers dropped:\n") for(i in 1:sum(members2dump)) cat("\t\t", rownames(rcObject$votes)[which(members2dump)[i]], "(voted on only", checkMember(tempvotes)[which(members2dump)[i]], "of", dim(tempvotes)[2], "retained votes).\n") } cat("\t\t...", sum(members2dump) ,"of", dim(tempvotes)[1], "total members dropped.\n\n") } if(all(!votes2dump)) { cat("\t\tAll votes meet minimum lopsidedness requirement.\n\n") } else { tempvotes <- rcObject$votes[!members2dump,] cat("\t\tVotes dropped:\n") if (verbose) { for(i in 1:sum(votes2dump)) cat(sprintf("\t\tNumber %.0f (minority size = %2.1f", which(votes2dump)[i], round(100*checkVote(tempvotes)[which(votes2dump)[i]],1)), "%)\n",sep="") } cat("\t\t...", sum(votes2dump) ,"of", dim(tempvotes)[2], "total votes dropped.\n\n") } cat("\tRunning W-NOMINATE...\n\n") flush.console() fakedims<-max(dims,2) res <- .Fortran("wnom", #inputs as.integer(rcObject$votes[!members2dump,!votes2dump]), as.integer(N), as.integer(M), as.integer(fakedims), as.integer(trials), as.integer(polarityToUse), as.integer(dims),# #input/output ubeta=as.single(ubeta), uweights=as.single(rep(uweights,dims)), #outputs classify=single((N+M)*4), fits=single(3*dims), gmp=single(N+M), idealpoints=single(dims*N), covariances=single(((dims*(dims+1))/2)*N), midpoints=single(dims*M), spreads=single(dims*M), eigenvalues=single(N), exitstatus=integer(1), #wnom() call continued, with dynamic memory allocation in R integer(N*M), #LDATA2 single(N+M), #GMPGMP(NUMMEMBERS+NUMVOTES) single(3*fakedims), #XFITS(3*DIMS) single(N*N), #ZMAT2(NUMMEMBERS,NUMMEMBERS) single(N), #WVEC2(NUMMEMBERS) single(N*N), #DSTAR(NUMMEMBERS,NUMMEMBERS) single(fakedims*N), #XDATA(NUMMEMBERS,DIMS) single(N), #XXX(NUMMEMBERS) single(fakedims*N), #XDATA3(NUMMEMBERS,DIMS) single(fakedims*M), #ZMID(NUMVOTES,DIMS) single(fakedims*M), #DYN(NUMVOTES,DIMS) single(4*N), #XSAVE(NUMMEMBERS,2,2) single(4*M), #ZSAVE(NUMVOTES,2,2) single(2*M), #CSAVE(NUMVOTES,2) integer(M), #KAV(NUMVOTES) integer(M), #KAY(NUMVOTES) integer(M), #KAN(NUMVOTES) single(N), #XD(NUMMEMBERS) integer(N), #ISENS(NUMMEMBERS) integer(M), #LERIC(NUMVOTES) single(M*N*2), #PSI(NUMMEMBERS,NUMVOTES,2) single(N*2*fakedims), #XMEANX(2*NUMMEMBERS,DIMS) single(N*3*fakedims), #STDDEV(3*NUMMEMBERS,DIMS) single(N*2*fakedims*fakedims), #COVX(2*NUMMEMBERS,DIMS,DIMS) single(N*2*fakedims*fakedims), #COVX2(2*NUMMEMBERS,DIMS,DIMS) single((M+N+111)*4), #KPJP(NUMMEMBERS+NUMVOTES+111,4) single(M+N+111), #YBIGL(NUMMEMBERS+NUMVOTES+111) single(M+N+111), #YYBIGL(NUMMEMBERS+NUMVOTES+111) single(M*2*fakedims), #STDDVZ(NUMVOTES,2,DIMS) single(N*fakedims), #STDDVX(NUMMEMBERS,DIMS) single(M+N+111), #LMO(NUMMEMBERS+NUMVOTES+111) single(M*N), #POOLE(NUMMEMBERS,NUMVOTES) single(N*fakedims), #TRUEX(NUMMEMBERS,DIMS) single(N*fakedims), #TRUEX2(NUMMEMBERS,DIMS) single(M*fakedims), #TRUEZMID(NUMVOTES,DIMS) single(M*fakedims), #TRUEDYN(NUMVOTES,DIMS) single(M*N), #PROBMAT(NUMMEMBERS,NUMVOTES) single(N*fakedims), #XTARGET(NUMMEMBERS,DIMS) single(N*fakedims), #XMAT0(NUMMEMBERS,DIMS) single(N*fakedims), #XSAVE2(NUMMEMBERS,DIMS) single(N*fakedims), #XSAVE3(NUMMEMBERS,DIMS) single(N*fakedims)) #XMAT(NUMMEMBERS,DIMS) if(res$exitstatus!=1) stop("\n\n====== wnominate9707.f did not execute properly ======\n\n") #Roll wnom() output into a nomObject #classify is a rowwise matrix that begins with legislators first legClassify <- matrix(res$classify[1:(4*res[[2]])], nrow=res[[2]], ncol=4, byrow=TRUE) rcClassify <- matrix(res$classify[(4*res[[2]]+1):length(res$classify)], nrow=res[[3]], ncol=4, byrow=TRUE) ## These equations preceeded wnomiante 0.96 and are incorrect, according to May 29, 2011 email #legPRE <- apply(cbind(legClassify[,1]+legClassify[,3],legClassify[,2]+legClassify[,4]),1, min) #legPRE <- (legPRE-legClassify[,2]-legClassify[,3])/legPRE rcPRE <- apply(cbind(rcClassify[,1]+rcClassify[,3],rcClassify[,2]+rcClassify[,4]),1, min) rcPRE <- (rcPRE-rcClassify[,2]-rcClassify[,3])/rcPRE ## Legislator Correct Classification replaces PRE from May 29, 2011 ## CC = (correctYea+correctNay)/(correctYea+wrongYea+wrongNay+correctNay) legCC <- (legClassify[,1] + legClassify[,4])/apply(legClassify,1,sum) tempRC<-cbind(rcClassify, res$gmp[(res[[2]]+1):length(res$gmp)], rcPRE, matrix(res$spread,nrow=res[[3]],ncol=dims), matrix(res$midpoint,nrow=res[[3]],ncol=dims)) tempLegis<-cbind(legClassify, res$gmp[1:res[[2]]], legCC, matrix(res$idealpoints, nrow=res[[2]], ncol=dims), matrix(res$covariance,nrow=res[[2]],ncol=dims+choose(dims,2))) legislators<-matrix(NA,rcObject$n,6+2*dims+choose(dims,2)) rollcalls<-matrix(NA,rcObject$m,6+2*dims) legislators[!members2dump,]<-tempLegis rollcalls[!votes2dump,]<-tempRC if(choose(dims,2)==0){ colnames(legislators)<-c("correctYea", "wrongYea", "wrongNay", "correctNay", "GMP", "CC", paste("coord",1:dims,"D",sep=""), paste("se",1:dims,"D",sep="")) } else { colnames(legislators)<-c("correctYea", "wrongYea", "wrongNay", "correctNay", "GMP", "CC", paste("coord",1:dims,"D",sep=""), paste("se",1:dims, "D",sep=""), paste("corr.",1:choose(dims,2),sep="")) } rownames(legislators)<-rownames(rcObject$votes) colnames(rollcalls)<-c("correctYea", "wrongYea", "wrongNay", "correctNay", "GMP", "PRE", paste("spread",1:dims,"D",sep=""), paste("midpoint",1:dims,"D",sep="")) if(!is.null(rcObject$legis.data)) legislators <- cbind(as.data.frame(rcObject$legis.data),legislators) fits <- as.numeric(res$fits) names(fits) <- c(paste("correctclass",1:dims,"D",sep=""), paste("apre",1:dims,"D",sep=""), paste("gmp",1:dims,"D",sep="")) nomObject<-list(legislators=as.data.frame(legislators), rollcalls=as.data.frame(rollcalls), dimensions=dims, eigenvalues=res$eigenvalues,beta=res$ubeta,weights=res$uweights,fits=fits) class(nomObject) <- c("nomObject") cat("W-NOMINATE estimation completed successfully.") cat("\nW-NOMINATE took", (proc.time()-start)[3],"seconds to execute.\n\n") nomObject } #end wnominate()
/scratch/gouwar.j/cran-all/cranData/wnominate/R/wnominate.R
### R code from vignette source 'wnominate.Rnw' ################################################### ### code chunk number 1: one ################################################### library(wnominate) #sen90 <- readKH("ftp://voteview.com/sen90kh.ord") data(sen90) #Does same thing as above sen90 ################################################### ### code chunk number 2: oneandhalf ################################################### selector <- c(21,22,44,45,46,47,48,49,50,53,54,55,56,58,59,60,61,62,65,66,67,68,69,70,71,72,73,74,75,77,78,80,81,82,83,84,87,99,100,101,105,118,119,120,128,129,130,131,132,133,134,135,141,142,143,144,145,147,149,151,204,209,211,218,219,220,221,222,223,224,225,226,227,228,229,237,238,239,252,253,257,260,261,265,266,268,269,270,276,281,290,292,293,294,295,296,302,309,319,321,322,323,324,325,327,330,331,332,333,335,336,337,339,340,346,347,357,359,367,375,377,378,379,381,384,386,392,393,394,405,406,410,418,427,437,442,443,444,448,449,450,454,455,456,459,460,461,464,465,467,481,487,489,490,491,492,493,495,497,501,502,503,504,505,506,507,514,515,522,523,529,539,540,541,542,543,544,546,548,549,550,551,552,553,554,555,556,557,558,559,560,561,562,565,566,567,568,569,571,584,585,586,589,590,592,593,594,595) sen90$m <- length(selector) sen90$votes <- sen90$votes[,selector] ################################################### ### code chunk number 3: two ################################################### rownames(sen90$votes)[1:12] result <- wnominate(sen90, polarity=c(2,5)) ################################################### ### code chunk number 4: three ################################################### summary(result) ################################################### ### code chunk number 5: four ################################################### plot(result) ################################################### ### code chunk number 6: five ################################################### par(mfrow=c(1,1)) plot.coords(result,cutline=14) ################################################### ### code chunk number 7: UN1 ################################################### rm(list=ls(all=TRUE)) data(UN) UN<-as.matrix(UN) UN[1:5,1:6] ################################################### ### code chunk number 8: UN2 ################################################### UNnames<-UN[,1] legData<-matrix(UN[,2],length(UN[,2]),1) colnames(legData)<-"party" UN<-UN[,-c(1,2)] ################################################### ### code chunk number 9: UN3 ################################################### rc <- rollcall(UN, yea=c(1,2,3), nay=c(4,5,6), missing=c(7,8,9),notInLegis=0, legis.names=UNnames, legis.data=legData, desc="UN Votes", source="www.voteview.com") result<-wnominate(rc,polarity=c(1,1)) ################################################### ### code chunk number 10: UN5 ################################################### summary(result) ################################################### ### code chunk number 11: UN4 ################################################### plot(result) ################################################### ### code chunk number 12: random1 ################################################### yp <- matrix(rep(0.3,6),nrow=6) np <- matrix(rep(-0.2,6),nrow=6) ideal <- matrix(c(rep(-0.2,5),rep(0.2,5)),nrow=10) nomprob(yp,np,ideal,15,0.5) ################################################### ### code chunk number 13: random2 ################################################### dat <- generateTestData(legislators=100, rcVotes=1000) result <- wnominate(dat,polarity=1,dims=1) summary(result) ################################################### ### code chunk number 14: random3 ################################################### plot(result)
/scratch/gouwar.j/cran-all/cranData/wnominate/inst/doc/wnominate.R
#' wodd_format #' #' @description wodd_format a private function #' #' @param wodd_name string. "S0", "S1", "M". etc #' #' @return A string wodd_format <- function(wodd_name) { if(is.character(wodd_name)){ wodd_name <- gsub("S0", "", wodd_name) wodd_name <- gsub("S1", "S", wodd_name) wodd_name }else{ stop("wodd_name must be a string") } } #' raw_wodd #' #' @description raw_wodd a private function #' #' @param index int #' #' @return A vector #' #' @import glue #' @importFrom purrr map_chr #' #' @export raw_wodd <- function(index) { if(is.integer(index)){ int <- 2^index power <- 0 counted <- FALSE if (int >= 16) { while (counted == FALSE) { if (int >= 16^power) { power <- power + 1 } else { counted <- TRUE power <- power - 1 } } } if ((16^(power) * 1) == int) { return(wodd_format(glue::glue("S{power}"))) } else if ((16^(power) * 2) == int) { return(wodd_format(glue::glue("S{power}M"))) } else if ((16^(power) * 4) == int) { return(wodd_format(glue::glue("S{power}F"))) } else if ((16^(power) * 8) == int) { return(wodd_format(glue::glue("S{power}E"))) } } else{ stop("index must be an integer") } } #' make_wodd_name #' #' @description make_wodd_name a private function #' #' @param index int #' #' @return A vector #' #' @importFrom purrr map_chr #' #' @export make_wodd_name <- function(index) { if(is.integer(index)){ wodd_depth <- seq(1:index) wodd_name <- map_chr(.x = wodd_depth, .f = raw_wodd) wodd_name }else{ stop("index must be an integer") } } #' select_wodd_name_from_table #' #' @description select_wodd_name_from_table a private function #' #' @param index int #' #' @return A vector #' #' @importFrom dplyr filter #' @import tibble #' @importFrom magrittr %>% #' #' @export #' #' @examples #' select_wodd_name_from_table(1L) select_wodd_name_from_table <- function(index) { if(is.integer(index)){ if (index > 100) { stop("depth > 100 which is larger than precalculated table of wodd names") } else { first_100_wodds <- c( "M", "F", "E", "S", "SM", "SF", "SE", "S2", "S2M", "S2F", "S2E", "S3", "S3M", "S3F", "S3E", "S4", "S4M", "S4F", "S4E", "S5", "S5M", "S5F", "S5E", "S6", "S6M", "S6F", "S6E", "S7", "S7M", "S7F", "S7E", "S8", "S8M", "S8F", "S8E", "S9", "S9M", "S9F", "S9E", "S0", "S0M", "S0F", "S0E", "S1", "S1M", "S1F", "S1E", "S2", "S2M", "S2F", "S2E", "S3", "S3M", "S3F", "S3E", "S4", "S4M", "S4F", "S4E", "S5", "S5M", "S5F", "S5E", "S6", "S6M", "S6F", "S6E", "S7", "S7M", "S7F", "S7E", "S8", "S8M", "S8F", "S8E", "S9", "S9M", "S9F", "S9E", "S20", "S20M", "S20F", "S20E", "S21", "S21M", "S21F", "S21E", "S22", "S22M", "S22F", "S22E", "S23", "S23M", "S23F", "S23E", "S24", "S24M", "S24F", "S24E", "S25" ) first_100_wodds[1:index] } }else{ stop("index must be an integer") } } #' Get sample size from depth #' #' @description Calculates the sample size needed given an alpha level and depth #' #' @param d an integer depth #' @param alpha alpha level such as 0.1, 0.05, 0.01. An alpha of 0.05 would be associated with a 95 percent confidence interval #' @param conservative a bool. default is FALSE. If TRUE then a conservative (larger) sample size is returned. #' #' @return a float sample size #' #' @export #' #' @examples #' get_n_from_depth(7L, 0.01) get_n_from_depth <- function(d, alpha = 0.05, conservative = TRUE){ if(!is.integer(d)){ stop("d (depth) must be an integer") }else{ if (!is.null(alpha)){ if (conservative == FALSE){ sample_size <- 2^((d-1) + log2(2 * (qnorm(1 - (alpha / 2))^2))) floor(sample_size) # a simple round down of the decimals # not necessary just convenience }else{ sample_size <- 2^((d) + log2(2 * (qnorm(1 - (alpha / 2))^2))) floor(sample_size) # a simple round down of the decimals # not necessary just convenience } } } } #' Get depth from sample size #' #' @description Calculates the depth given a sample size and alpha level #' #' @param n an integer scalar sample size #' @param alpha alpha level such as 0.1, 0.05, 0.01. An alpha of 0.05 would be associated with a 95 percent confidence interval #' #' @return an integer depth #' #' @export #' #' @examples #' get_depth_from_n(1e4L, 0.05) get_depth_from_n <- function(n, alpha = 0.05){ if(is.integer(n)){ if(!is.null(alpha)){ stopifnot(is.numeric(alpha) && length(alpha) == 1) stopifnot(alpha > 0 && alpha < 1) k <- as.integer(floor(log2(n) - log2(2 * (qnorm(1 - (alpha / 2))^2))) + 1L) k } }else{ stop("n must be an integer. Here is an example get_depth_from_n(n=1e4L, 0.05). suffix the number with L to force value to be an integer") } } #' Calculate whisker odds #' #' @description makes whisker odds #' #' @param y A vector of values #' @param alpha the alpha level, such as 0.05 which is the compliment of the confidence interval, such as 0.95 #' @param include_tail_area a binary. #' If true then include a column of tail area 2^(i) #' @param include_outliers a binary. #' If true include a column of outliers beyond the last wodd depth #' @param include_depth a binary. #' If true include a column indicating the depth of the letter value #' #' @return A dataframe of wodds #' \item{lower_value}{lower value} #' \item{wodd_name}{Name of wodd} #' \item{upper_value}{upper value} #' #' @importFrom dplyr select filter bind_rows mutate #' @importFrom tibble tibble #' @importFrom magrittr %>% #' @importFrom purrr map_chr #' @importFrom stats qnorm median quantile #' #' @export #' #' @examples #' set.seed(42) #' wodds(rnorm(1e4, 0, 1)) wodds <- function(y, alpha = 0.05, include_tail_area = FALSE, include_outliers = FALSE, include_depth = FALSE) { lower_value <- upper_value <- wodd_name <- NULL data <- sort(y) n <- length(data) alpha <- alpha # rule 3 k <- get_depth_from_n(as.integer(n), 0.05) lvl <- (k - 1) * 2 nq <- lvl - 1 qs <- rep(0, nq) # initialize array of quantiles f <- function(n) { (1 + floor(n)) / 2 } for (i in 1:k) { # median calculation if (i == 1) { d <- f(n) qs[i - 1] <- 0.5 d } else { d <- f(d) d } if (ceiling(d) != floor(d)) { l_idx1 <- as.integer(floor(d)) l_idx2 <- as.integer(ceiling(d)) u_idx1 <- as.integer(floor(n - d + 1)) u_idx2 <- as.integer(ceiling(n - d + 1)) ql <- mean(c(l_idx1 / n, l_idx2 / n)) qu <- mean(c(u_idx1 / n, u_idx2 / n)) } else { ql <- as.integer(d) / n qu <- as.integer(floor(n - d + 1)) / n } qs[((i - 1) * 2) - 1] <- ql qs[(i - 1) * 2] <- qu vf <- quantile(data, qs) } vf <- c(median(data), median(data), vf) lower <- vf[seq(1, length(vf), 2)] upper <- vf[seq(2, length(vf), 2)] depth <- seq(1, k) tail_area <- 2^depth wodd_depth_name <- wodds::select_wodd_name_from_table(k) o_upper <- sort(y[y > max(upper)]) o_lower <- sort(y[y < min(lower)], decreasing = TRUE) o_max_len <- max(length(o_upper), length(o_lower)) length(o_upper) <- o_max_len length(o_lower) <- o_max_len length(depth) <- o_max_len + length(depth) length(tail_area) <- o_max_len + length(tail_area) o_name <- paste0("O", seq(1, o_max_len)) if (o_max_len > 0){ df_o <- tibble::tibble(lower_value = o_lower, wodd_name = o_name, upper_value = o_upper) df_base <- tibble::tibble(lower_value = lower, wodd_name = wodd_depth_name, upper_value = upper) df_with_outliers <- dplyr::bind_rows(df_base, df_o) }else{ df_o <- tibble::tibble(lower_value = NULL, wodd_name = NULL, upper_value = NULL) df_base <- tibble::tibble(lower_value = lower, wodd_name = wodd_depth_name, upper_value = upper) df_with_outliers <- dplyr::bind_rows(df_base) } df_with_outliers_depth <- df_with_outliers %>% dplyr::mutate(depth = depth) %>% dplyr::select(depth, lower_value, wodd_name, upper_value) df_with_outliers_depth_tail_area <- df_with_outliers %>% dplyr::mutate(depth = depth, tail_area = tail_area) %>% dplyr::select(depth, tail_area, lower_value, wodd_name, upper_value) if (include_outliers == TRUE) { if (include_tail_area == FALSE & include_depth == FALSE) { df_with_outliers_depth_tail_area %>% dplyr::select(-tail_area, -depth) } else if (include_tail_area == TRUE & include_depth == FALSE) { df_with_outliers_depth_tail_area %>% dplyr::select(-depth) } else if (include_tail_area == TRUE & include_depth == TRUE) { df_with_outliers_depth_tail_area }else if (include_tail_area == FALSE & include_depth == TRUE){ df_with_outliers_depth_tail_area %>% dplyr::select(-tail_area) } } else { df_with_depth_tail_area <- df_with_outliers_depth_tail_area %>% dplyr::filter(!is.na(depth)) if (include_tail_area == FALSE & include_depth == FALSE) { df_with_depth_tail_area %>% dplyr::select(-tail_area, -depth) } else if (include_tail_area == TRUE & include_depth == FALSE) { df_with_depth_tail_area %>% dplyr::select(-depth) } else if (include_tail_area == TRUE & include_depth == TRUE) { df_with_depth_tail_area }else if (include_tail_area == FALSE & include_depth == TRUE){ df_with_depth_tail_area %>% dplyr::select(-tail_area) } } }
/scratch/gouwar.j/cran-all/cranData/wodds/R/wodds.R
#' WOE #' #' @author #' Sudarson Mothilal Thoppay #' @title #' Weigth of Evidence #' @description #' Computes the Weight of Evidence and Information Value between Dependent and Independent variable. #' #' @name woe #' @param Data : Name of Data Set #' @param Independent : Name of the Independent Variable #' @param Continuous : True if the variable is continuous, False if variable is Ordinal or Nominal #' @param Dependent : Name of the Targer Variable #' @param C_Bin : Count of Bins to be computed #' @param Good : Which categorical variable do you want to be good #' @param Bad : Which categorical variable do you want to be bad #' @return Returns a DataSet with computed WoE and IV values on success or 0 on Failure #' @note #' "woe" shows the log-odds ratio between between Goods and Bads. #' In the Bivalued Dependenet variable, one value represents Goods and others are bads. #' In Detail with an Example: #' Let Dependent varaible be ATTRITED (0,1) and Independent variable be TENURE #' where, 1-Attrited, 0-Non Attrited. #' If I wish to check WOE and IV of Tenure with ATTRITED to know if Tenure has an effect in getting attrited, #' Then good would be 1 and bad=0. #' If I wish to check WOE and IV of Tenure with ATTRITED to know if Tenure has an effect in not getting attrited, #' Then good would be 0 and bad=1. #' @examples #' woe(Data=mtcars,"cyl",FALSE,"am",10,Bad=0,Good=1) #' woe(Data=mtcars,"mpg",TRUE,"am",10,Bad=0,Good=1) #' #' @export woe<-function(Data,Independent,Continuous,Dependent,C_Bin,Bad,Good) { continuous=Continuous C_Bin=C_Bin-1 success<-0 success2<-0 j=0 for(i in 1:ncol(Data)) { if(colnames(Data[i])==Dependent) { j=i success<-1 break } } for(Ind in 1:ncol(Data)) { if(colnames(Data[Ind])==Independent) { j=Ind success2<-1 break } } Ind<-j if(success==1 & success2==1) { CNO_Target=i Data[which(Data[,CNO_Target]==Bad),CNO_Target]=0 Data[which(Data[,CNO_Target]==Good),CNO_Target]=1 if(Continuous==TRUE) { BIN<-.sub_woe(Data,Ind,CNO_Target,C_Bin) colnames(BIN)<-c("BIN","MIN","MAX","GOOD","BAD","TOTAL","BAD%","GOOD%","TOTAL%","WOE","IV","BAD_SPLIT","GOOD_SPLIT") BIN<-BIN[,c(1,2,3,5,4,6,7,8,9,10,11,12,13)] } else { BIN<-.sub_woe_ON(Data,Ind,CNO_Target,C_Bin) colnames(BIN)<-c("BIN","BAD","GOOD","TOTAL","BAD%","GOOD%","TOTAL%","WOE","IV","BAD_SPLIT","GOOD_SPLIT") } return(BIN) } else { if(success==0) { print(paste("Variable",Dependent,"is missing in Data set ")) } if(success2==0) { print(paste("Variable",Independent,"is missing in Data set ")) } if(success==0 & success2==0) { print(paste("Variables",Independent,",",Dependent,"are missing in Data set ")) } return(0) } } .sub_woe<-function(Data,CNO_Continuous,CNO_Target,C_Bin){ DataSet<-data.frame(matrix(ncol=3,nrow=nrow(Data))) DataSet$X1<-Data[,CNO_Continuous] DataSet$X2<-Data[,CNO_Target] #DS<-arrange(DataSet,DataSet$X1) DS<-DataSet[order(DataSet$X1),] rowno<-nrow(DS) BIN<-1 SHIFT<-round(rowno/C_Bin) for(i in 1:rowno){ if(i<=SHIFT) { DS[i,3]<-BIN } else { SHIFT<-SHIFT+round(rowno/C_Bin) BIN<-BIN+1 DS[i,3]<-BIN } } BINDATA<-data.frame(matrix(ncol=1,nrow=length(table(DS$X3)))) colnames(BINDATA)[]<-c("Bin") for(i in 1:nrow(BINDATA)) { BINDATA$Bin[i]<-i } BINDATA["mini"]<-NA BINDATA["maxi"]<-NA BINDATA["total_continuous"]<-NA BINDATA["Attrited"]<-NA BINDATA["Existed"]<-NA BINDATA["Total"]<-NA for(i in 1:nrow(BINDATA)) { x<-which(DS$X3==i) DS[x,]->temp BINDATA$mini[i]<-temp$X1[which.min(temp$X1)] BINDATA$maxi[i]<-temp$X1[which.max(temp$X1)] BINDATA$total_continuous[i]<-sum(temp$X1) BINDATA$Existed[i]<-length(which(temp$X2==0)) BINDATA$Attrited[i]<-length(which(temp$X2==1)) BINDATA$Total[i]<-length(x) } BIN<-BINDATA BIN["AVGI"]<-NA BIN["P_Existed"]<-NA BIN["P_Attrited"]<-NA BIN["P_Total"]<-NA BIN["woe"]<-NA BIN["iv"]<-NA data=BIN sum_b_existed=sum(BIN$Existed) sum_b_attrited=sum(BIN$Attrited) sum_b_total=sum(BIN$Total) BIN[,8]<-round((BIN[,4]/BIN[,7]),3) BIN[,9]<-round((BIN[,6]/sum_b_existed),3) BIN[,10]<-round((BIN[,5]/sum_b_attrited),3) BIN[,11]<-round((BIN[,7]/sum_b_total),3) BIN[,12]<-round(log(BIN[,10]/BIN[,9],base = exp(1)),3)*100 BIN[,13]<-round(log(BIN[,10]/BIN[,9],base = exp(1))*(BIN[,10]-BIN[,9]),3) BIN$EXISTED_PERCENT<-NA BIN$ATTR_PERCENT<-NA for(i in 1:nrow(BIN)) { BIN$EXISTED_PERCENT[i]<-round(BIN$Existed[i]/BIN$Total[i],3) BIN$ATTR_PERCENT[i]<-round(BIN$Attrited[i]/BIN$Total[i],3) } BIN<-BIN[,c(1,2,3,5,6,7,9,10,11,12,13,14,15)] return(BIN) } .sub_woe_ON<-function(Data,CNO_Continuous,CNO_Target,C_Bin){ DataSet<-data.frame(matrix(ncol=2,nrow=nrow(Data))) DataSet$X1<-Data[,CNO_Continuous] DataSet$X2<-Data[,CNO_Target] #DS<-arrange(DataSet,DataSet$X1) DS<-DataSet[order(DataSet$X1),] rowno<-nrow(DS) x<-table(DS[,1],DS[,2]) DataSet<-data.frame(matrix(ncol=(ncol(x)+1),nrow=nrow(x))) for(i in 1:ncol(x)) { for(j in 1:nrow(x)) { DataSet[j,i]<-x[j,i] } } for(i in 1:nrow(x)) { DataSet[i,(ncol(x)+1)]<-rownames(x)[i] } DataSet$Total<-NA DataSet<-DataSet[,c(3,1,2,4)] colnames(DataSet)<-c("BUCKET","Existed","Attrited","Total") BIN<-DataSet BIN[,4]<-BIN[,2]+BIN[,3] BIN["P_Existed"]<-NA BIN["P_Attrited"]<-NA BIN["P_Total"]<-NA BIN["woe"]<-NA BIN["iv"]<-NA sum_b_existed=sum(BIN$Existed) sum_b_attrited=sum(BIN$Attrited) sum_b_total=sum(BIN$Total) for(i in 1:nrow(BIN)) { BIN[i,5]<-round((BIN[i,2]/sum_b_existed),3) #P_Existed<-Existed/sum_E BIN[i,6]<-round((BIN[i,3]/sum_b_attrited),3) BIN[i,7]<-round((BIN[i,4]/sum_b_total),3) } for(i in 1:nrow(BIN)) { BIN$woe[i]=round(log(BIN$P_Attrited[i]/BIN$P_Existed[i],base=exp(1)),3)*100 BIN$iv[i] =round(log(BIN$P_Attrited[i]/BIN$P_Existed[i],base=exp(1))*(BIN$P_Attrited[i]-BIN$P_Existed[i]),3) } BIN$EXISTED_PERCENT<-NA BIN$ATTR_PERCENT<-NA for(i in 1:nrow(BIN)) { BIN$EXISTED_PERCENT[i]<-round(BIN$Existed[i]/BIN$Total[i],3) BIN$ATTR_PERCENT[i]<-round(BIN$Attrited[i]/BIN$Total[i],3) } return(BIN) }
/scratch/gouwar.j/cran-all/cranData/woe/R/woe.R
#' German Credit Data #' #' Credit data that classifies debtors described by #' a set of attributes as good or bad credit risks. #' See source link below for detailed information. #' #' @docType data #' @keywords data #' @name germancredit #' @usage data(germancredit) #' @format A data frame with 21 variables #' (numeric and factors) and 1000 observations. #' @source \url{https://archive.ics.uci.edu/ml/datasets/Statlog+(German+Credit+Data)} #' @examples #' # Load German credit data and create subset #' data(germancredit) #' df <- germancredit[, c('creditability', 'credit.amount', 'duration.in.month', #' 'savings.account.and.bonds', 'purpose')] #' # Display structure of the subset (data frame) #' str(df) NULL
/scratch/gouwar.j/cran-all/cranData/woeBinning/R/germancredit.R
##### This is the actual binning function for numeric variables and factors. ##### woe.binning.2 <- function(df, target.var, pred.var, min.perc.total, min.perc.class, stop.limit, abbrev.fact.levels, bad, good) { #### Build subsets with target and predictor variable df <- df[, c(target.var, pred.var)] # used for final binning dfrm <- df[, c(target.var, pred.var)] # used for iterative merging of bins colnames(dfrm)[1] <- paste("target.var") colnames(dfrm)[2] <- paste("predictor.var") #### Check if numerical variable or factor was provided as predictor and apply appropriate binning technique ### Binning in case a numerical variable was selected if ( length(unique(dfrm[,1]))==2 && is.numeric(dfrm[,2]) ) { ## Derive number of initial bins from min.perc.total parameter max.bins <- trunc(1/min.perc.total) ## Derive cutpoints for bins (with similar frequency) cutpoints <- quantile(dfrm$predictor.var,(0:max.bins)/max.bins, na.rm=TRUE) innercutpoints <- cutpoints[2:(length(cutpoints)-1)] # remove outer (observed) boudaries cutpoints <- c(-Inf, innercutpoints, +Inf) # add -Inf, +Inf to cutpoints cutpoints <- unique(cutpoints) # remove multiple cutpoints with same value ## Calculate initial crosstab from binned variable and target variable ## to identify and merge sparse bins # Compute binned variable from cutpoints and add it to the subset data frame dfrm$predictor.var.binned <- cut(dfrm$predictor.var, cutpoints, labels = NULL, include.lowest = FALSE, right = TRUE, dig.lab = 10, ordered_result = TRUE) # Compute crosstab from binned variable and target variable and covert it to a data frame freq.table <- table(dfrm$predictor.var.binned, dfrm$target.var, useNA="always") row.names(freq.table)[is.na(row.names(freq.table))] <- 'Missing' # Replace NA in row.names with string 'Missing' woe.dfrm <- as.data.frame.matrix(freq.table) # Convert frequency table to data frame woe.dfrm <- woe.dfrm[, c(good, bad)] # Select columns with raw frequencies only # Compute columns percents for target classes from crosstab frequencies woe.dfrm$col.perc.a <- woe.dfrm[,1]/sum(woe.dfrm[,1]) woe.dfrm$col.perc.b <- woe.dfrm[,2]/sum(woe.dfrm[,2]) # Correct column percents in case of 0 frequencies (in case of no NA skip last row) if ( !anyNA(df[,2]) ) { if ( min(woe.dfrm[-nrow(woe.dfrm),1],na.rm=TRUE)==0 | min(woe.dfrm[-nrow(woe.dfrm),2],na.rm=TRUE)==0 ) { woe.dfrm$col.perc.a[-nrow(woe.dfrm)] <- (woe.dfrm$col.perc.a[-nrow(woe.dfrm)]+0.0001)/sum(woe.dfrm$col.perc.a[-nrow(woe.dfrm)]+0.0001) woe.dfrm$col.perc.b[-nrow(woe.dfrm)] <- (woe.dfrm$col.perc.b[-nrow(woe.dfrm)]+0.0001)/sum(woe.dfrm$col.perc.b[-nrow(woe.dfrm)]+0.0001) } } else { if ( min(woe.dfrm[,1],na.rm=TRUE)==0 | min(woe.dfrm[,2],na.rm=TRUE)==0 ) { woe.dfrm$col.perc.a <- (woe.dfrm$col.perc.a+0.0001)/sum(woe.dfrm$col.perc.a+0.0001) woe.dfrm$col.perc.b <- (woe.dfrm$col.perc.b+0.0001)/sum(woe.dfrm$col.perc.b+0.0001) } } # Check for bins (without last regular and without NA bin) if frequencies < percentage limit specified above # (in reverse order to remain correct reference to cutpoints) for (i in (nrow(woe.dfrm)-2):1) { if (woe.dfrm$col.perc.a[i]<min.perc.class | woe.dfrm$col.perc.b[i]<min.perc.class | ((woe.dfrm[i,1]+woe.dfrm[i,2])/(sum(woe.dfrm[,1],na.rm=TRUE)+sum(woe.dfrm[,2],na.rm=TRUE)))<min.perc.total) { # Remove cutpoint cutpoints <- cutpoints[-c((i+1))] # Compute binned variable from cutpoints and add it to the subset data frame dfrm$predictor.var.binned <- cut(dfrm$predictor.var, cutpoints, labels = NULL, include.lowest = FALSE, right = TRUE, dig.lab = 10, ordered_result = TRUE) # Compute crosstab from binned variable and target variable and covert it to a data frame freq.table <- table(dfrm$predictor.var.binned, dfrm$target.var, useNA="always") row.names(freq.table)[is.na(row.names(freq.table))] <- 'Missing' # Replace NA in row.names with string 'Missing' woe.dfrm <- as.data.frame.matrix(freq.table) # Convert frequency table to data frame woe.dfrm <- woe.dfrm[, c(good, bad)] # Select columns with raw frequencies only # Compute columns percents for target classes from crosstab frequencies woe.dfrm$col.perc.a <- woe.dfrm[,1]/sum(woe.dfrm[,1]) woe.dfrm$col.perc.b <- woe.dfrm[,2]/sum(woe.dfrm[,2]) # Correct column percents in case of 0 frequencies (in case of no NA skip last row) if ( !anyNA(df[,2]) ) { if ( min(woe.dfrm[-nrow(woe.dfrm),1],na.rm=TRUE)==0 | min(woe.dfrm[-nrow(woe.dfrm),2],na.rm=TRUE)==0 ) { woe.dfrm$col.perc.a[-nrow(woe.dfrm)] <- (woe.dfrm$col.perc.a[-nrow(woe.dfrm)]+0.0001)/sum(woe.dfrm$col.perc.a[-nrow(woe.dfrm)]+0.0001) woe.dfrm$col.perc.b[-nrow(woe.dfrm)] <- (woe.dfrm$col.perc.b[-nrow(woe.dfrm)]+0.0001)/sum(woe.dfrm$col.perc.b[-nrow(woe.dfrm)]+0.0001) } } else { if ( min(woe.dfrm[,1],na.rm=TRUE)==0 | min(woe.dfrm[,2],na.rm=TRUE)==0 ) { woe.dfrm$col.perc.a <- (woe.dfrm$col.perc.a+0.0001)/sum(woe.dfrm$col.perc.a+0.0001) woe.dfrm$col.perc.b <- (woe.dfrm$col.perc.b+0.0001)/sum(woe.dfrm$col.perc.b+0.0001) } } } # Stop in case 3 cutpoints (-Inf, x, +Inf) are reached if ( length(cutpoints)==3 ) { break } } # Check for last regular bin if frequencies < percentage limit specified above (only in case number of cutpoints > 3 if ( length(cutpoints)>3 ) { if (woe.dfrm$col.perc.a[(nrow(woe.dfrm)-1)]<min.perc.class | woe.dfrm$col.perc.b[(nrow(woe.dfrm)-1)]<min.perc.class | ((woe.dfrm[nrow(woe.dfrm)-1,1]+woe.dfrm[nrow(woe.dfrm)-1,2])/(sum(woe.dfrm[,1],na.rm=TRUE)+sum(woe.dfrm[,2],na.rm=TRUE)))<min.perc.total) { # Remove cutpoint cutpoints <- cutpoints[-c(nrow(woe.dfrm)-1)] # Compute binned variable from cutpoints and add it to the subset data frame dfrm$predictor.var.binned <- cut(dfrm$predictor.var, cutpoints, labels = NULL, include.lowest = FALSE, right = TRUE, dig.lab = 10, ordered_result = TRUE) # Compute crosstab from binned variable and target variable and covert it to a data frame freq.table <- table(dfrm$predictor.var.binned, dfrm$target.var, useNA="always") row.names(freq.table)[is.na(row.names(freq.table))] <- 'Missing' # Replace NA in row.names with string 'Missing' woe.dfrm <- as.data.frame.matrix(freq.table) # Convert frequency table to data frame woe.dfrm <- woe.dfrm[, c(good, bad)] # Select columns with raw frequencies only # Compute columns percents for target classes from crosstab frequencies woe.dfrm$col.perc.a <- woe.dfrm[,1]/sum(woe.dfrm[,1]) woe.dfrm$col.perc.b <- woe.dfrm[,2]/sum(woe.dfrm[,2]) # Correct column percents in case of 0 frequencies (in case of no NA skip last row) if ( !anyNA(df[,2]) ) { if ( min(woe.dfrm[-nrow(woe.dfrm),1],na.rm=TRUE)==0 | min(woe.dfrm[-nrow(woe.dfrm),2],na.rm=TRUE)==0 ) { woe.dfrm$col.perc.a[-nrow(woe.dfrm)] <- (woe.dfrm$col.perc.a[-nrow(woe.dfrm)]+0.0001)/sum(woe.dfrm$col.perc.a[-nrow(woe.dfrm)]+0.0001) woe.dfrm$col.perc.b[-nrow(woe.dfrm)] <- (woe.dfrm$col.perc.b[-nrow(woe.dfrm)]+0.0001)/sum(woe.dfrm$col.perc.b[-nrow(woe.dfrm)]+0.0001) } } else { if ( min(woe.dfrm[,1],na.rm=TRUE)==0 | min(woe.dfrm[,2],na.rm=TRUE)==0 ) { woe.dfrm$col.perc.a <- (woe.dfrm$col.perc.a+0.0001)/sum(woe.dfrm$col.perc.a+0.0001) woe.dfrm$col.perc.b <- (woe.dfrm$col.perc.b+0.0001)/sum(woe.dfrm$col.perc.b+0.0001) } } } } ## After sparse bins are merged: ## Merge bins with similar WOE values and calculate corresponding WOE table and IV step by step ## until 2 bins are left (i.e. 3 cutpoints: -Inf, middle cutpoint, +Inf) while ( length(cutpoints)>2 ) { # Compute binned variable from cutpoints and add it to the subset data frame dfrm$predictor.var.binned <- cut(dfrm$predictor.var, cutpoints, labels = NULL, include.lowest = FALSE, right = TRUE, dig.lab = 10, ordered_result = TRUE) # Compute crosstab from binned variable and target variable and covert it to a data frame freq.table <- table(dfrm$predictor.var.binned, dfrm$target.var, useNA="always") row.names(freq.table)[is.na(row.names(freq.table))] <- 'Missing' # Replace NA in row.names with string 'Missing' woe.dfrm <- as.data.frame.matrix(freq.table) # Convert frequency table to data frame woe.dfrm <- woe.dfrm[, c(good, bad)] # Select columns with raw frequencies only # Compute WOE and information value (IV) from crosstab frequencies woe.dfrm$col.perc.a <- woe.dfrm[,1]/sum(woe.dfrm[,1]) woe.dfrm$col.perc.b <- woe.dfrm[,2]/sum(woe.dfrm[,2]) # Correct column percents in case of 0 frequencies (in case of no NA skip last row) if ( !anyNA(df[,2]) ) { if ( min(woe.dfrm[-nrow(woe.dfrm),1],na.rm=TRUE)==0 | min(woe.dfrm[-nrow(woe.dfrm),2],na.rm=TRUE)==0 ) { woe.dfrm$col.perc.a[-nrow(woe.dfrm)] <- (woe.dfrm$col.perc.a[-nrow(woe.dfrm)]+0.0001)/sum(woe.dfrm$col.perc.a[-nrow(woe.dfrm)]+0.0001) woe.dfrm$col.perc.b[-nrow(woe.dfrm)] <- (woe.dfrm$col.perc.b[-nrow(woe.dfrm)]+0.0001)/sum(woe.dfrm$col.perc.b[-nrow(woe.dfrm)]+0.0001) } } else { if ( min(woe.dfrm[,1],na.rm=TRUE)==0 | min(woe.dfrm[,2],na.rm=TRUE)==0 ) { woe.dfrm$col.perc.a <- (woe.dfrm$col.perc.a+0.0001)/sum(woe.dfrm$col.perc.a+0.0001) woe.dfrm$col.perc.b <- (woe.dfrm$col.perc.b+0.0001)/sum(woe.dfrm$col.perc.b+0.0001) } } woe.dfrm$woe <- 100*log(woe.dfrm$col.perc.a/woe.dfrm$col.perc.b) woe.dfrm$woe[is.finite(woe.dfrm$woe)==FALSE] <- NA # convert Inf, -Inf and NaN to NA woe.dfrm$woe.lag <- c(NA, embed(woe.dfrm$woe,2)[,2]) woe.dfrm$woe.diff <- abs(woe.dfrm$woe-woe.dfrm$woe.lag) woe.dfrm$iv.bins <- (woe.dfrm$col.perc.a-woe.dfrm$col.perc.b)*woe.dfrm$woe/100 # Calculate total IV for current binning iv.total <- sum(woe.dfrm$iv.bins, na.rm=TRUE) # Collect total IVs for different binning solutions ifelse (exists('iv.total.collect', inherits=FALSE), iv.total.collect <- cbind(iv.total.collect, iv.total), iv.total.collect <- iv.total) # In case IV decreases by more than percentage specified by stop.limit parameter above # restore former binning solution (cutpoints) and leave loop if ( length(iv.total.collect)>1 ) { actual.iv.decrease <- ((iv.total.collect[length(iv.total.collect)-1]-iv.total.collect[length(iv.total.collect)])/(iv.total.collect[length(iv.total.collect)-1])) if ( (actual.iv.decrease>stop.limit) && (exists('stop.limit.exceeded', inherits=FALSE)==FALSE) ) { cutpoints.final <- cutpoints.backup woe.dfrm.final <- woe.dfrm.backup stop.limit.exceeded <- TRUE # indicates that stop limit is exceeded to prevent overriding the final solution } } # Save first cutpoint solution and corresponding WOE values as final solution (is used in case no WOE merging will be applied) if ( exists('cutpoints.backup', inherits=FALSE)==FALSE ) { cutpoints.final <- cutpoints woe.dfrm.final <- woe.dfrm } # Saves binning solution after last merging step in case the IV stop limit was not exceeded if ( (exists('stop.limit.exceeded', inherits=FALSE)==FALSE) && (length(cutpoints)==3) ) { cutpoints.final <- cutpoints woe.dfrm.final <- woe.dfrm } # Save backups of current cutpoints and corresponding WOE values before merging to be able to retrieve solution in case IV decrease is too strong cutpoints.backup <- cutpoints woe.dfrm.backup <- woe.dfrm # Determine the index of the minimum WOE difference between adjacent bins and # merge bins with minimum WOE difference (apart from the last 'Missing' bin) min.woe.diff <- which(woe.dfrm$woe.diff[-nrow(woe.dfrm)]==min(woe.dfrm$woe.diff[-nrow(woe.dfrm)], na.rm=TRUE)) cutpoints <- cutpoints[-c(min.woe.diff)] } ## Compute final IV iv.total.final <- sum(woe.dfrm.final$iv.bins, na.rm=TRUE) ## Save final binning solution via look-up-table for deployment lower.cutpoints.final.dfrm <- as.data.frame(cutpoints.final) upper.cutpoints.final.dfrm <- rbind(as.data.frame(cutpoints.final[-1]),'Missing') look.up.table <- cbind(woe.dfrm.final[, 5,drop=FALSE], lower.cutpoints.final.dfrm, upper.cutpoints.final.dfrm) # if ( look.up.table[nrow(look.up.table),1]==0 ) { look.up.table[nrow(look.up.table),1] <- NA } # replace WOE=0 in Missing row with NA (because this only occurs in case Missing Data does not occur during binning) look.up.table <- cbind.data.frame(look.up.table, iv.total.final, woe.dfrm.final[, c(1,2,3,4,8),drop=FALSE]) # add column with final total Information Value } ### Binning in case a factor was selected if ( length(unique(dfrm[,1]))==2 && is.factor(dfrm[,2]) ) { ## Copy predictor variable to prepare binning/recoding dfrm$predictor.var.binned <- dfrm$predictor.var ## Handling of NAs if ( anyNA(dfrm$predictor.var.binned)==TRUE ) { levels(dfrm$predictor.var.binned) <- c(levels(dfrm$predictor.var.binned), "Missing") # add factor level 'Missing' dfrm$predictor.var.binned[is.na(dfrm$predictor.var.binned)] <- "Missing" # replace NA with string 'Missing' } ## Prepare binned factor in INPUT data (levels may be merged in subsequent steps) df[,ncol(df)+1] <- df[, c(pred.var)] colnames(df)[ncol(df)] <- paste(pred.var,".binned",sep="") # Handling of NAs if ( anyNA(df[,ncol(df)])==TRUE ) { levels(df[,ncol(df)]) <- c(levels(df[,ncol(df)]), "Missing") # add factor level 'Missing' df[,ncol(df)][is.na(df[,ncol(df)])] <- "Missing" # replace NA with string 'Missing' } ## Calculate initial crosstab from binned variable and target variable ## to identify and merge sparse bins # Compute crosstab from binned variable and target variable and covert it to a data frame freq.table <- table(dfrm$predictor.var.binned, dfrm$target.var) woe.dfrm <- as.data.frame.matrix(freq.table) # Convert frequency table to data frame woe.dfrm <- woe.dfrm[, c(good, bad)] # Select columns with raw frequencies only # Compute WOE and information value (IV) from crosstab frequencies woe.dfrm$col.perc.a <- woe.dfrm[,1]/sum(woe.dfrm[,1]) woe.dfrm$col.perc.b <- woe.dfrm[,2]/sum(woe.dfrm[,2]) # Correct column percents in case of 0 frequencies if ( min(woe.dfrm[,1],na.rm=TRUE)==0 | min(woe.dfrm[,2],na.rm=TRUE)==0 ) { woe.dfrm$col.perc.a <- (woe.dfrm$col.perc.a+0.0001)/sum(woe.dfrm$col.perc.a+0.0001) woe.dfrm$col.perc.b <- (woe.dfrm$col.perc.b+0.0001)/sum(woe.dfrm$col.perc.b+0.0001) } # Merge factor levels with frequencies < percentage limit specified above to "misc. level" (associated with pos. and neg. WOE values) woe.dfrm$sparse.merge[woe.dfrm$col.perc.a<min.perc.class | woe.dfrm$col.perc.b<min.perc.class | ((woe.dfrm[,1]+woe.dfrm[,2])/(sum(woe.dfrm[,1],na.rm=TRUE)+sum(woe.dfrm[,2],na.rm=TRUE)))<min.perc.total] <- 1 woe.dfrm.sparse.subset <- na.omit(woe.dfrm) woe.dfrm.sparse.subset$sparse.merge[woe.dfrm.sparse.subset$col.perc.a <= woe.dfrm.sparse.subset$col.perc.b] <- -1 woe.dfrm.sparse.subset.pos <- woe.dfrm.sparse.subset[woe.dfrm.sparse.subset$sparse.merge==1, ] woe.dfrm.sparse.subset.neg <- woe.dfrm.sparse.subset[woe.dfrm.sparse.subset$sparse.merge==-1, ] levels(dfrm$predictor.var.binned)[levels(dfrm$predictor.var.binned)%in%(row.names(woe.dfrm.sparse.subset.pos))] <- "misc. level pos." levels(dfrm$predictor.var.binned)[levels(dfrm$predictor.var.binned)%in%(row.names(woe.dfrm.sparse.subset.neg))] <- "misc. level neg." ## After sparse levels are merged: ## Merge levels with similar WOE values and calculate corresponding WOE table and IV step by step until ## 2 regular bins (+ Missing or 'misc. level') are left while ( length(levels(dfrm$predictor.var.binned))>3 ) { # Compute crosstab from binned variable and target variable and covert it to a data frame freq.table <- table(dfrm$predictor.var.binned, dfrm$target.var) #row.names(freq.table)[is.na(row.names(freq.table))] <- 'Missing' # Replace NA in row.names with string 'Missing' woe.dfrm <- as.data.frame.matrix(freq.table) # Convert frequency table to data frame woe.dfrm <- woe.dfrm[, c(good, bad)] # Select columns with raw frequencies only # Compute WOE and information value (IV) from crosstab frequencies woe.dfrm$col.perc.a <- woe.dfrm[,1]/sum(woe.dfrm[,1]) woe.dfrm$col.perc.b <- woe.dfrm[,2]/sum(woe.dfrm[,2]) # Correct column percents in case of 0 frequencies if ( min(woe.dfrm[,1],na.rm=TRUE)==0 | min(woe.dfrm[,2],na.rm=TRUE)==0 ) { woe.dfrm$col.perc.a <- (woe.dfrm$col.perc.a+0.0001)/sum(woe.dfrm$col.perc.a+0.0001) woe.dfrm$col.perc.b <- (woe.dfrm$col.perc.b+0.0001)/sum(woe.dfrm$col.perc.b+0.0001) } woe.dfrm$woe <- 100*log(woe.dfrm$col.perc.a/woe.dfrm$col.perc.b) woe.dfrm$woe[is.finite(woe.dfrm$woe)==FALSE] <- NA # convert Inf, -Inf and NaN to NA woe.dfrm <- woe.dfrm[order(woe.dfrm$woe),] # sort data via WOE values woe.dfrm$woe.lag <- c(NA, embed(woe.dfrm$woe,2)[,2]) woe.dfrm$woe.diff <- abs(woe.dfrm$woe-woe.dfrm$woe.lag) woe.dfrm$iv.bins <- (woe.dfrm$col.perc.a-woe.dfrm$col.perc.b)*woe.dfrm$woe/100 # Calculate total IV for current binning iv.total <- sum(woe.dfrm$iv.bins, na.rm=TRUE) # Collect total IVs for different binning solutions ifelse (exists('iv.total.collect', inherits=FALSE), iv.total.collect <- cbind(iv.total.collect, iv.total), iv.total.collect <- iv.total) # In case IV decreases by more than percentage specified by stop.limit parameter above # restore former binning solution (cutpoints) and leave loop if ( length(iv.total.collect)>1 ) { actual.iv.decrease <- ((iv.total.collect[length(iv.total.collect)-1]-iv.total.collect[length(iv.total.collect)])/(iv.total.collect[length(iv.total.collect)-1])) if ( (actual.iv.decrease>stop.limit) && (exists('stop.limit.exceeded', inherits=FALSE)==FALSE) ) { stop.limit.exceeded <- TRUE # indicates that stop limit is exceeded to prevent overriding the final solution } } # Merge until 2 regular bins remain if ( length(levels(dfrm$predictor.var.binned))>3 ) { # Merge levels with most similar WOE values min.woe.diff <- which(woe.dfrm$woe.diff==min(woe.dfrm$woe.diff, na.rm=TRUE)) levels(dfrm$predictor.var.binned)[levels(dfrm$predictor.var.binned)%in%c(row.names(woe.dfrm)[min.woe.diff][[1]][1],row.names(woe.dfrm)[min.woe.diff-1][[1]][1])] <- paste(row.names(woe.dfrm)[min.woe.diff][[1]][1], "+", row.names(woe.dfrm)[min.woe.diff-1][[1]][1]) # Save names of the factor levels that are merged list.level.a <- as.list(row.names(woe.dfrm)[min.woe.diff][[1]][1]) list.level.b <- as.list(row.names(woe.dfrm)[min.woe.diff-1][[1]][1]) # Collect names of the factor levels that are merged in lists (until stop criteria is reached) if ( exists('list.level.a.collected', inherits=FALSE)==FALSE ) { list.level.a.collected <- list.level.a list.level.b.collected <- list.level.b } else { if ( exists('stop.limit.exceeded', inherits=FALSE)==FALSE ) { list.level.a.collected <- c(list.level.a.collected, list.level.a) list.level.b.collected <- c(list.level.b.collected, list.level.b) } else { list.level.a.collected <- list.level.a.collected[1:length(list.level.a.collected)-1] list.level.b.collected <- list.level.b.collected[1:length(list.level.b.collected)-1] } } } } ### Apply FINAL binning to INPUT data ## Merge factor levels # Merge sparse levels levels(df[,ncol(df)])[levels(df[,ncol(df)])%in%(row.names(woe.dfrm.sparse.subset.pos))] <- "misc. level pos." levels(df[,ncol(df)])[levels(df[,ncol(df)])%in%(row.names(woe.dfrm.sparse.subset.neg))] <- "misc. level neg." # Merge levels with similar WOE values if ( exists('list.level.a.collected', inherits=FALSE)==TRUE ) { for ( i in 1:length(list.level.a.collected) ) { levels(df[,ncol(df)])[levels(df[,ncol(df)])%in%c(list.level.a.collected[i],list.level.b.collected[i])] <- paste(list.level.a.collected[i], "+", list.level.b.collected[i]) } } ## Repeat generating WOE table for selected binning solution # Compute crosstab from binned variable and target variable and covert it to a data frame freq.table.final <- table(df[,ncol(df)], dfrm$target.var) #row.names.final(freq.table.final)[is.na(row.names(freq.tabl.finale))] <- 'Missing' # replace NA in row.names with string 'Missing' woe.dfrm.final <- as.data.frame.matrix(freq.table.final) # convert frequency table to data frame woe.dfrm.final <- woe.dfrm.final[, c(good, bad)] # Select columns with raw frequencies only # Compute WOE and information value (IV) from crosstab frequencies woe.dfrm.final$col.perc.a <- woe.dfrm.final[,1]/sum(woe.dfrm.final[,1]) woe.dfrm.final$col.perc.b <- woe.dfrm.final[,2]/sum(woe.dfrm.final[,2]) # Correct column percents in case of 0 frequencies if ( min(woe.dfrm.final[,1],na.rm=TRUE)==0 | min(woe.dfrm.final[,2],na.rm=TRUE)==0 ) { woe.dfrm.final$col.perc.a <- (woe.dfrm.final$col.perc.a+0.0001)/sum(woe.dfrm.final$col.perc.a+0.0001) woe.dfrm.final$col.perc.b <- (woe.dfrm.final$col.perc.b+0.0001)/sum(woe.dfrm.final$col.perc.b+0.0001) } woe.dfrm.final$woe <- 100*log(woe.dfrm.final$col.perc.a/woe.dfrm.final$col.perc.b) woe.dfrm.final$woe[is.finite(woe.dfrm.final$woe)==FALSE] <- NA # convert Inf, -Inf and NaN to NA woe.dfrm.final <- woe.dfrm.final[order(woe.dfrm.final$woe),] # sort data via WOE values woe.dfrm.final$iv.bins <- (woe.dfrm.final$col.perc.a-woe.dfrm.final$col.perc.b)*woe.dfrm.final$woe/100 iv.total.final <- sum(woe.dfrm.final$iv.bins, na.rm=TRUE) ## Add variable with corresponding WOE values for final binning # Save row order of input data as ID variable df$initial.order.id <- 1:nrow(df) # Add final binned (numerical) variable with WOE values (via left join with WOE table) df <- merge(df, woe.dfrm.final[,5,drop=FALSE], by.x=colnames(df)[ncol(df)-1], by.y=0, all.x=TRUE) colnames(df)[ncol(df)] <- paste(pred.var,".binned.woe",sep="") # Restore initial column and row order and get rid of initial.order.id and row names df <- df[order(df$initial.order.id), ] df <- subset(df, select=c(2:(ncol(df)-2),1,ncol(df))) row.names(df) <- NULL ## Save final binning solution via look-up-table for deployment levels(df[,pred.var]) <- c(levels(df[,pred.var]), "Missing") # add factor level 'Missing' df[,pred.var][is.na(df[,pred.var])] <- "Missing" # replace NA with string 'Missing' look.up.table <- aggregate(df[,ncol(df)], list(df[,pred.var], df[,ncol(df)-1]), mean, na.rm=TRUE) look.up.table <- cbind.data.frame(look.up.table, iv.total.final) # add column with final total Information Value colnames(look.up.table)[3] <- "woe" look.up.table <- merge(look.up.table, woe.dfrm.final[ , -which(names(woe.dfrm.final) %in% c("woe"))], by.x=2, by.y=0) look.up.table <- look.up.table[order(look.up.table$woe, look.up.table$Group.2),] # sort by woe value and merged bin name # In case the misc. level consists only of only NA rename it 'Missing' if ( length(which(look.up.table[,2]=='Missing'))==1 && length(which(look.up.table[,1]=="misc. level neg."))==1 ) { if ( (which(look.up.table[,2]=='Missing') == which(look.up.table[,1]=='misc. level neg.')) ) { levels(look.up.table[,1]) <- c(levels(look.up.table[,2]), 'Missing') # add factor level 'Missing' look.up.table[,1][look.up.table[,2]=='Missing'] <- 'Missing' } } if ( length(which(look.up.table[,2]=='Missing'))==1 && length(which(look.up.table[,1]=="misc. level pos."))==1 ) { if ( (which(look.up.table[,2]=='Missing') == which(look.up.table[,1]=='misc. level pos.')) ) { levels(look.up.table[,1]) <- c(levels(look.up.table[,2]), 'Missing') # add factor level 'Missing' look.up.table[,1][look.up.table[,2]=='Missing'] <- 'Missing' } } # Abbreviate long factor levels (in case they are longer than specified or longer than 1000 characters) if ( abbrev.fact.levels==0 && 1000<max(nchar(as.character(look.up.table$Group.2))) ) { abbrev.fact.levels <- 1000 } if ( abbrev.fact.levels>0 && abbrev.fact.levels<max(nchar(as.character(look.up.table$Group.2))) ) { look.up.table$Group.2 <- as.factor(abbreviate(look.up.table$Group.2, abbrev.fact.levels)) # actual abbrevation look.up.table$Group.2 <- as.factor(gsub("[*+*]", " ", look.up.table$Group.2)) # remove + signs look.up.table$Group.2 <- as.factor(gsub(" +", " ", look.up.table$Group.2)) # remove double blanks } } #### Check for correct variable specification and #### generate requested output, in case specification is correct ### Display warning message in case of incorrect predictor variable specification if ( (is.numeric(dfrm[,2])==FALSE) && (is.factor(dfrm[,2])==FALSE) ) { warning("Incorrect variable specification.\nPredictor variable needs to be a numeric variable or a factor.") } ### Generate requested output, in case specification is correct else { ## Function passes the final binning solution as look-up table look.up.table } } #' @title Binning via Fine and Coarse Classing #' #' @description #' \code{woe.binning} generates a supervised fine and coarse classing of numeric #' variables and factors with respect to a dichotomous target variable. Its parameters #' provide flexibility in finding a binning that fits specific data characteristics #' and practical needs. #' #' @section Binning of Numeric Variables: #' Numeric variables (continuous and ordinal) are binned by merging initial classes with #' similar frequencies. The number of initial bins results from the \emph{min.perc.total} #' parameter: min.perc.total will result in trunc(1/min.perc.total) initial bins, #' whereby \emph{trunc} is needed to guarantee bins with similar frequencies. #' For example \emph{min.perc.total=0.07} will cause trunc(14.3)=14 initial classes. #' Next, if \emph{min.perc.class}>0, bins with sparse target classes will be merged with #' the next upper bin, and in case of the last bin with the next lower one. NAs have #' their own bin and will not be merged with others. Finally nearby bins with most similar #' weight of evidence (WOE) values are joined step by step until the information value #' (IV) decreases more than specified by a percentage value (\emph{stop.limit} parameter) #' or until two bins are reached. #' @section Binning of Factors: #' Factors (categorical variables) are binned by merging factor levels. As a start sparse #' levels (defined via the \emph{min.perc.total} and \emph{min.perc.class} parameters) #' are merged to a \sQuote{miscellaneous} level: if possible, respective levels (including #' sparse NAs) are bundled as \sQuote{misc. level pos.} (associated with positive WOE #' values), respectively as \sQuote{misc. level neg.} (associated with negative WOE #' values). In case a misc. level contains only NAs it will be named \sQuote{Missing}. #' Afterwards levels with similar WOE values are joined step by step until the information #' value (IV) decreases more than specified by a percentage value (\emph{stop.limit} parameter) #' or until two bins are reached. #' @section Adjustment of 0 Frequencies: #' In case the crosstab of the bins with the target classes contains frequencies = 0 #' the column percentages are adjusted to be able to compute the WOE and IV values: #' the offset 0.0001 (=0.01\%) is added to each column percentage cell and the column #' percentages are recomputed then. This allows considering bins associated with one target #' class only, but may cause extreme WOE values for these bins. If a correction is not #' appropriate choose \emph{min.perc.class}>0; bins with sparse target classes will be #' merged then before computing any WOE or IV value. #' @section Handling of Missing Data: #' Cases with NAs in the target variable will be ignored. For predictor variables the following #' applies: in case NAs already occurred when generating the binning solution #' the code \sQuote{Missing} is displayed and a corresponding WOE value can be computed. #' (Note that factor NAs may be joined with other sparse levels to a \sQuote{miscellaneous} #' level - see above; only this \sQuote{miscellaneous} level will be displayed then.) #' In case NAs occur in the deployment scenario only \sQuote{Missing} is #' displayed for numeric variables and \sQuote{unknown} for factors; and #' the corresponding WOE values will be NA then, as well. #' #' @usage #' woe.binning(df, target.var, pred.var, min.perc.total, #' min.perc.class, stop.limit, abbrev.fact.levels, event.class) #' #' @return #' \code{woe.binning} generates an object containing the information necessary #' for studying and applying the realized binning solution. When saved #' it can be used with the functions \code{\link{woe.binning.plot}}, \code{\link{woe.binning.table}} #' and \code{\link{woe.binning.deploy}}. #' #' @param df #' Name of data frame with input data. #' @param target.var #' Name of dichotomous target variable in quotes. Only target variables with #' two distinct values (e.g. 0, 1 or \dQuote{Y}, \dQuote{N}) are accepted; #' cases with NAs in the target variable will be ignored. #' @param pred.var #' Name of predictor variable(s) to be binned in quotes. #' A single variable name can be provided, e.g. \dQuote{varname1}, or a list of #' variable names, e.g. c(\dQuote{varname1}, \dQuote{varname2}). Alternatively one #' can repeat the name of the input data frame; the function will be applied #' to all its variables apart from the target variable then. #' Numeric variables and factors are supported and may contain NAs. #' @param min.perc.total #' For numeric variables this parameter defines the number of initial #' classes before any merging is applied. For example \emph{min.perc.total=0.05} #' (5\%) will result in 20 initial classes. For factors the original #' levels with a percentage below this limit are collected in a \sQuote{miscellaneous} #' level before the merging based on the \emph{min.perc.class} and on the #' WOE starts. Increasing the \emph{min.perc.total} parameter will avoid #' sparse bins. Accepted range: 0.0001-0.2; default: 0.05. #' @param min.perc.class #' If a column percentage of one of the target classes within a bin is #' below this limit (e.g. below 0.01=1\%) then the respective bin will be #' joined with others. In case of numeric variables adjacent predictor classes #' are merged. For factors respective levels (including sparse NAs) are #' assigned to a \sQuote{miscellaneous} level. Setting \emph{min.perc.class}>0 #' may provide more reliable WOE values. Accepted range: 0-0.2; #' default: 0, i.e. no merging with respect to sparse target classes #' is applied. #' @param stop.limit #' Stops WOE based merging of the predictor's classes/levels in case the #' resulting information value (IV) decreases more than \emph{x}\% (e.g. 0.05 = 5\%) #' compared to the preceding binning step. \emph{stop.limit=0} will skip any #' WOE based merging. Increasing the \emph{stop.limit} will simplify the binning #' solution and may avoid overfitting. Accepted range: 0-0.5; default: 0.1. #' @param abbrev.fact.levels #' Abbreviates the names of new (merged) factor levels via the base R #' \code{\link{abbreviate}} function in case the specified number of #' characters is exceeded. Accepted range: 0-1000; default: 200. #' 0 will prevent applying any abbreviation, i.e. only factor levels with #' more than 1000 characters will be truncated then. #' This option is particularly relevant in case one wants to generate dummy #' variables via the \code{\link{woe.binning.deploy}} function, because the #' factor levels will be part of the dummy variable names then. #' @param event.class #' Optional parameter for specifying the class of the target event. This #' class typically indicates a negative event like a loan default or a #' disease. Use integers (e.g. 1) or characters in quotes (e.g. \dQuote{bad}). #' This class will be represented by negative WOE values then. #' #' @family binning functions #' #' @examples #' # Load German credit data and create subset #' data(germancredit) #' df <- germancredit[, c('creditability', 'credit.amount', 'duration.in.month', #' 'savings.account.and.bonds', 'purpose')] #' #' # Bin a single numeric variable #' binning <- woe.binning(df, 'creditability', 'duration.in.month', #' min.perc.total=0.05, min.perc.class=0.01, #' stop.limit=0.1, event.class='bad') #' #' # Bin a single factor #' binning <- woe.binning(df, 'creditability', 'purpose', #' min.perc.total=0.05, min.perc.class=0, stop.limit=0.1, #' abbrev.fact.levels=50, event.class='bad') #' #' # Bin two variables (one numeric and one factor) #' # with default parameter settings #' binning <- woe.binning(df, 'creditability', c('credit.amount','purpose')) #' #' # Bin all variables of the data frame (apart from the target variable) #' # with default parameter settings #' binning <- woe.binning(df, 'creditability', df) #' #' @importFrom stats aggregate #' @importFrom stats embed #' @importFrom stats na.omit #' @importFrom stats quantile #' #' @export ##### This function calls the actual binning function above for every specified predictor variable that needs to be binned. ##### woe.binning <- function(df, target.var, pred.var, min.perc.total, min.perc.class, stop.limit, abbrev.fact.levels, event.class) { #### Warning message and defaults in case parameters are not specified if ( missing(df)==TRUE || missing(target.var)==TRUE || missing(pred.var)==TRUE ) { warning("Incorrect specification of data frame and/or variables.") } if ( missing(min.perc.total)==TRUE ) { min.perc.total=0.05 } if ( min.perc.total<0.0001 || min.perc.total>0.2 || !is.numeric(min.perc.total) ) { warning("Incorrect parameter specification; accepted min.perc.total parameter range is 0.0001-0.2. Parameter was set to default (0.05).") min.perc.total=0.05 } if ( missing(min.perc.class)==TRUE ) { min.perc.class=0 } if ( min.perc.class<0 || min.perc.class>0.2 || !is.numeric(min.perc.class) ) { warning("Incorrect parameter specification; accepted min.perc.class parameter range is 0-0.2. Parameter was set to default (0).") min.perc.class=0 } if ( missing(stop.limit)==TRUE ) { stop.limit=0.1 } if ( stop.limit<0 || stop.limit>0.5 || !is.numeric(stop.limit) ) { warning("Incorrect parameter specification; accepted stop.limit parameter range is 0-0.05. Parameter was set to default (0.1).") stop.limit=0.1 } if ( missing(abbrev.fact.levels)==TRUE ) { abbrev.fact.levels=200 } if ( abbrev.fact.levels<0 || abbrev.fact.levels>1000 ) { warning("Incorrect parameter specification; accepted abbrev.fact.levels parameter range is 0-10000. Parameter was set to default (200).") abbrev.fact.levels=200 } #### Display warning message in case of incorrect target variable specification if ( !(length(unique(df[,target.var][!is.na(df[,target.var])]))==2) ) { warning("Incorrect variable specification.\nTarget variable must have two distinct values (NAs are accepted).") } #### Display warning message in case none of the target classes matches the specified event.class parameter if ( !missing(event.class) ) { if ( (unique(df[,target.var])[1]==event.class || unique(df[,target.var])[2]==event.class)==FALSE ) { warning("None of the target classes matches the specified event.class parameter.") } } #### In case bad class was specified assign 'good' and 'bad' codes (the latter will be associated with negative WOE values then) if ( !missing(event.class) ) { if ( unique(df[,target.var])[1]==event.class ) { bad <- unique(df[,target.var])[1] good <- unique(df[,target.var])[2] } else { bad <- unique(df[,target.var])[2] good <- unique(df[,target.var])[1] } } else { bad <- unique(df[,target.var])[1] good <- unique(df[,target.var])[2] } bad <- toString(bad) good <- toString(good) #### Gather names and look-up tables (with binned classes and WOE values) for each predictor variable in a list if ( is.data.frame(pred.var)==TRUE ) { pred.var <- as.list(colnames(subset(df, select=-c(which( colnames(df)==target.var ))))) # convert variable names of data frame into a list (without target variable) } else { as.list(pred.var) # provide variable name(s) as a list } #### Subset: consider only cases without NA in target variable df <- df[!is.na(df[,target.var]),] #### Call actual binning function and put binning solutions together with respective variable names into a list binning <- lapply(pred.var, function(x) woe.binning.2(df, target.var, x, min.perc.total, min.perc.class, stop.limit, abbrev.fact.levels, bad, good)) #### Read names and IV total values in the list and put them together with the binning tables names.of.pred.var <- lapply(pred.var, function(x) x) iv.total.list <- lapply(binning, function(x) colMeans(x[4])) binning <- matrix(c(names.of.pred.var, binning, iv.total.list),ncol=3) #### Sort via IV total binning <- binning[rev(sort.list(as.numeric(binning[,3]))),] binning }
/scratch/gouwar.j/cran-all/cranData/woeBinning/R/woe.binning.R
##### This is the actual binning deployment function for numeric variables and factors. ##### woe.binning.deploy.2 <- function(df, pred.var, look.up.table, add.woe.or.dum.var) { ### Binning in case a numerical variable was selected if ( is.numeric(df[, c(pred.var)]) ) { # Add variable with binned intervals dfrm.binned <- cut(df[, c(pred.var)], look.up.table[,2], labels = NULL, include.lowest = FALSE, right = TRUE, dig.lab = 10, ordered_result = FALSE) dfrm.binned <- as.data.frame(dfrm.binned) colnames(dfrm.binned)[1] <- paste(pred.var,"binned",sep=".") levels(dfrm.binned[,1]) <- c(levels(dfrm.binned[,1]), "Missing") # add factor level 'Missing' dfrm.binned[,1][is.na(dfrm.binned[,1])] <- 'Missing' # replace NA with string 'Missing' # Add variable with corresponding WOE values if ( add.woe.or.dum.var=="woe" ) { dfrm.binned[,2] <- look.up.table[,1][match(dfrm.binned[,1] , row.names(look.up.table))] colnames(dfrm.binned)[2] <- paste("woe",pred.var,"binned",sep=".") dfrm.binned[,2][is.na(dfrm.binned[,2])] <- look.up.table[length(look.up.table[,1]),1] # replace NA in original numeric variable with corresponding WOE value } # Add dummy variables for binned classes if ( add.woe.or.dum.var=="dum" ) { binned.var <- dfrm.binned[,1] for ( level in unique(binned.var) ){ # Remove special characters from binned intervals level <- gsub("(","",level, fixed = TRUE) level <- gsub("]","",level, fixed = TRUE) level <- gsub(",",".",level, fixed = TRUE) dfrm.binned[paste("dum",pred.var,gsub(" ","",level),"binned",sep=".")] <- ifelse(binned.var==level,1,0) } } } ### Binning in case a factor was selected if ( is.factor(df[, c(pred.var)]) ) { # Add variable with binned levels dfrm.binned <- df[, c(pred.var)] # Copy original predictor variable dfrm.binned <- as.data.frame(dfrm.binned) levels(dfrm.binned[,1]) <- c(levels(dfrm.binned[,1]), "Missing") # add factor level 'Missing' dfrm.binned[,1][is.na(dfrm.binned[,1])] <- 'Missing' # replace NA with string 'Missing' dfrm.binned[,1] <- look.up.table[,1][match(dfrm.binned[,1], look.up.table[,2])] # replace original factor level with aggregated level from look-up table colnames(dfrm.binned)[1] <- paste(pred.var,"binned",sep=".") levels(dfrm.binned[,1]) <- c(levels(dfrm.binned[,1]), "unknown") # add factor level 'unknown' dfrm.binned[,1][is.na(dfrm.binned[,1])] <- "unknown" # in case original factor level is unknown replace with "unknown" # Add variable with corresponding WOE values if ( add.woe.or.dum.var=="woe" ) { dfrm.binned[,2] <- look.up.table[,3][match(dfrm.binned[,1], look.up.table[,1])] colnames(dfrm.binned)[2] <- paste("woe",pred.var,"binned",sep=".") } # Add dummy variables for binned levels if ( add.woe.or.dum.var=="dum" ) { for ( level in unique(dfrm.binned[,1]) ){ dfrm.binned[paste("dum",pred.var,gsub("[^[:alnum:]]","",level),"binned",sep=".")] <- ifelse(dfrm.binned[,1]==level,1,0) # only alphanumeric characters are allowed } } } ### Pass dataframe with binned variables dfrm.binned } #' @title Deployment of Binning #' #' @description #' \code{woe.binning.deploy} applies the binning solution generated and saved via the \code{\link{woe.binning}} #' or \code{\link{woe.tree.binning}} function to (new) data. #' #' @section General Procedure: #' \code{woe.binning.deploy} applies the binning information that was generated from the \code{woe.binning} #' or \code{woe.tree.binning} function to a data frame. In this data frame the names of the variables #' to be binned need to be identical to the ones used with the \code{woe.binning} #' or \code{woe.tree.binning} function. For each variable a binned version will be added. #' Optionally a variable with associated weight of evidence (WOE) values or corresponding #' dummy variables (one dummy variable for each final bin) are provided. #' @section Handling of Missing Data: #' In case NAs already occurred during the \code{woe.binning} or \code{woe.tree.binning} binning process the code #' \sQuote{Missing} is displayed and a corresponding WOE value can be computed. #' In case NAs only occur in the deployment scenario \sQuote{Missing} is #' displayed for numeric variables and \sQuote{unknown} for factors; and #' the corresponding WOE values will be NAs then, as well. #' @section Handling of Unknown Factor Levels: #' For factor levels that have not been provided in generating the #' binning solution via the \code{woe.binning} or \code{woe.tree.binning} function a new factor #' level \sQuote{unknown} is displayed and the corresponding WOE value will be NA. #' #' @usage #' woe.binning.deploy(df, binning, min.iv.total, add.woe.or.dum.var) #' #' @param df #' Name of the data frame the binning solution - that was generated via the function \code{woe.binning} #' or \code{woe.tree.binning} - should be applied to. The variable names and types (numerical or factor) #' need to be identical to the ones used during the generation of the binning solution. #' @param binning #' Binning information generated from the \code{woe.binning} or \code{woe.tree.binning} function. #' Contains names of the input predictor variables and the #' corresponding binning, WOE and IV information, which is used to #' add a binned variable to a copy of the input data. #' @param min.iv.total #' If the IV total value of a binned variable falls below this limit (e.g. 0.1) #' it will not be added to the data. Just omit this parameter in case you would #' like to add all binned variables (default). #' @param add.woe.or.dum.var #' \emph{add.woe.or.dum.var=\dQuote{woe}} adds an additional variable with WOE scores #' and \emph{=\dQuote{dum}} additional dummy variables for each (aggregated) level #' of the binned variable. In case of dummy variables make sure that you have set #' an appropriate \emph{abbrev.fact.levels} parameter in the \code{woe.binning} or \code{woe.tree.binning} function #' to avoid too long variable names. In principle, only alphanumeric characters #' and dots (.) will be used for variable names. Just omit this parameter in case you #' don't need additional variables. #' #' @examples #' # Load German credit data and create a subset #' data(germancredit) #' df <- germancredit[, c('creditability', 'credit.amount', 'duration.in.month', #' 'savings.account.and.bonds', 'purpose')] #' #' # Bin all variables of the data frame (apart from the target variable) #' # with default parameter settings #' binning <- woe.binning(df, 'creditability', df) #' #' # Deploy the binning solution to the data frame #' # (add all binned variables and corresponding WOE variables) #' df.with.binned.vars.added <- woe.binning.deploy(df, binning, #' add.woe.or.dum.var='woe') #' #' # Deploy the binning solution to the data frame #' # (add binned variables with IV>=0.1 and corresponding dummy variables) #' df.with.binned.vars.added <- woe.binning.deploy(df, binning, #' min.iv.total=0.1, #' add.woe.or.dum.var='dum') #' #' @export ##### This function calls the actual binning deployment function above for every specified predictor variable that needs to be binned. ##### woe.binning.deploy <- function(df, binning, min.iv.total, add.woe.or.dum.var) { #### Warning message and default in case iv.limits parameter is not (correctly) specified if ( !missing(min.iv.total)==TRUE ) { if ( min.iv.total<=0 || !is.numeric(min.iv.total) ) { warning("Incorrect parameter specification; accepted min.iv.total parameter needs to be > 0.") } if ( min.iv.total>max(unlist(binning[,3])) ) { warning("Incorrect parameter specification; min.iv.total parameter is > all observed IVs. Try smaller parameter value or omit parameter.") } } else { min.iv.total=0 } #### Default in case add.woe.or.dum.var parameter is not specified if ( missing(add.woe.or.dum.var)==TRUE ) { add.woe.or.dum.var="none" } #### Subset of binning solution with binned variables with IV total > min.iv.total if ( min.iv.total>0 ) { binning <- binning[binning[,3]>=min.iv.total,] } if ( (length(binning)/3) == 1 ) { dfrm.binned <- woe.binning.deploy.2(df, binning[1][[1]], binning[2][[1]], add.woe.or.dum.var) df <- cbind(df, dfrm.binned) # add binned variables to input data frame } else { for (i in 1:(length(binning)/3)) { dfrm.binned <- woe.binning.deploy.2(df, binning[i,1][[1]], binning[i,2][[1]], add.woe.or.dum.var) df <- cbind(df, dfrm.binned) # add binned variables to input data frame } } #### Pass dataframe with binned variables df }
/scratch/gouwar.j/cran-all/cranData/woeBinning/R/woe.binning.deploy.R
##### This is the actual ploting function for binned numeric variables and factors. ##### woe.binning.plot.2 <- function(pred.var, look.up.table, multiple.plots) { # In case of a look-up table for a factor remove duplicates and save binned categories as row names # In case of a look-up table for a numeric variable make bin names more readable if ( colnames(look.up.table)[1]=='Group.2' ) { look.up.table <- subset(look.up.table, !duplicated(look.up.table$Group.2)) row.names(look.up.table) <- look.up.table$Group.2 } else { row.names(look.up.table) <- gsub("]", "", row.names(look.up.table), fixed = TRUE) row.names(look.up.table) <- gsub(" ", "", row.names(look.up.table), fixed = TRUE) row.names(look.up.table) <- gsub("^.*\\,","<= ",row.names(look.up.table)) } # Remove rows with NAs (for WOE values) look.up.table <- na.omit(look.up.table) # Format plot: wrapping of too long (x axis) labels wrap.it <- function(x, len) { sapply(x, function(y) paste(strwrap(y, len), collapse = "\n"), USE.NAMES = FALSE) } wrap.labels <- function(x, len) { if (is.list(x)) { lapply(x, wrap.it, len) } else { wrap.it(x, len) } } wr.lap <- wrap.labels(row.names(look.up.table), 25) # wrapping after 30 characters # Display plot barplot(look.up.table$woe, main=pred.var, ylab=paste("WOE"), names.arg=wr.lap, ylim=c(-100*ceiling(max(abs(look.up.table$woe),na.rm=TRUE)/100),100*ceiling(max(abs(look.up.table$woe),na.rm=TRUE)/100)), las=2, cex.main=1, cex.names=0.9, cex.axis=0.8, cex.lab=0.8, col=gray.colors(nrow(look.up.table))) mtext(paste("IV =", format(round(look.up.table$iv.total.final[1], 3), nsmall=3, scientific=FALSE)), line=-0.5, cex=0.8) } #' @title Visualization of Binning #' #' @description #' \code{woe.binning.plot} visualizes the binning solution generated and saved via the \code{\link{woe.binning}} #' or \code{\link{woe.tree.binning}} function. #' #' @details #' For each predictor variable \code{woe.binning.plot} generates a weight of evidence #' (WOE) plot. In case of multiple predictors an additional plot with variables ranked #' via the information value (IV) will be displayed. #' #' @usage #' woe.binning.plot(binning, multiple.plots, plot.range) #' #' @param binning #' Binning information generated from the \code{woe.binning} or \code{woe.tree.binning} function. #' Contains names of the input predictor variables and the #' corresponding binning, WOE and IV information, which is used to #' generate the WOE and IV plots. #' @param multiple.plots #' In case the binning solution contains several predictor variables they will #' be visualized via multiple plots (max. four WOE plots per graph window). #' Use \emph{multiple.plots=FALSE} to avoid this and to display single plots in #' separate windows. #' #' @param plot.range #' Range of variables that should be plotted in quotes. For example \dQuote{1:10} #' will generate WOE plots and one IV plot for the ten variables with the #' highest IV values, \dQuote{11:20} for the next ten variables and so on. #' Just omit this parameter to visualize all binned variables (default). #' #' @examples #' # Load German credit data #' data(germancredit) #' df <- germancredit #' #' # Bin all variables of the data frame (apart from the target variable) #' # with default parameter settings #' binning <- woe.binning(df, 'creditability', df) #' #' # Plot all binned variables as multiple plots #' woe.binning.plot(binning) #' #' # Plot only the first four binned variables with the highest IV value #' # as multiple plots #' woe.binning.plot(binning, plot.range='1:4') #' #' # Plot the binned variables in single plots #' woe.binning.plot(binning, multiple.plots=FALSE) #' #' @importFrom stats na.omit #' @importFrom grDevices dev.new #' @importFrom grDevices gray.colors #' @importFrom graphics barplot #' @importFrom graphics par #' @importFrom graphics mtext #' #' @export ##### This function calls the actual plotting function above for every specified predictor variable that has been binned. ##### woe.binning.plot <- function(binning, multiple.plots, plot.range) { # Set default in case multiple plots parameter is not specified if ( missing(multiple.plots)==TRUE ) { multiple.plots<-TRUE } # Specify list of variables that should be plotted if ( missing(plot.range)==FALSE ) { binning <- binning[as.numeric(strsplit(plot.range, ":")[[1]][1]):as.numeric(strsplit(plot.range, ":")[[1]][2]),] } for (i in 1:(length(binning)/3)) { # Check if multiple plots (max. 4 per window) should be used if ( (multiple.plots==TRUE) && ((length(binning)/3)>1) ) { if ( (i==1) && (i<=4) ) { if ( (length(binning)/3)==2 ) { par(mfrow=c(1,2), mai=c(2.4,1,0.8,0.8)) } else { par(mfrow=c(2,2), mai=c(1.6,0.8,0.4,0.4)) } } } # Open a new window for each plot and set margins (in case of one variable only or multiple plot option is disabled) if ( (multiple.plots==FALSE) || ((length(binning)/3)==1) ) { dev.new() par(oma = c(6, 0, 0, 0)) } # Generate WOE plot for each predictor variable if ( (length(binning)/3)==1 ) { df.input <- woe.binning.plot.2(binning[1][[1]], binning[2][[1]], multiple.plots) } else { df.input <- woe.binning.plot.2(binning[i,1][[1]], binning[i,2][[1]], multiple.plots) } # Check if further multiple plots (max. 4 per window) are needed if ( (multiple.plots==TRUE) && ((i/4)%%1==0) && (i<(length(binning)/3)) ) { # Open new window for the plot and set margins dev.new() par(mfrow=c(2,2), mai=c(1.6,0.8,0.4,0.4)) } } # In case of more than one predictor variable display an additional plot with variables ranked via IV if ( (length(binning)/3)>1 ) { predictor.names <- as.data.frame(unlist(binning[,1])) predictor.names[,1] <- strtrim(predictor.names[,1], 40) # truncate predictor names in plot iv.totals.with.duplicates <- lapply(binning[,2], `[[`, 'iv.total.final') iv.totals <- unlist(lapply(iv.totals.with.duplicates, `[[`, 1)) iv.table <- cbind(predictor.names,iv.totals) iv.table <- iv.table[order(iv.totals),] iv.table$combined <- paste(iv.table[,1], " IV=", format(round(iv.table$iv.totals, 3), nsmall=3, scientific=FALSE), sep="") # Open new window for the plot dev.new() # Format plot: wrapping of too long axis labels wrap.it <- function(x, len) { sapply(x, function(y) paste(strwrap(y, len), collapse = "\n"), USE.NAMES = FALSE) } wrap.labels <- function(x, len) { if (is.list(x)) { lapply(x, wrap.it, len) } else { wrap.it(x, len) } } wr.lap <- wrap.labels(iv.table$combined, 1) # wrapping in case of blanks par(mar=c(0, 15, 3, 1), xpd=TRUE) # defining margins barplot(iv.table$iv.totals, horiz=TRUE, main="Variables Ranked by Information Value", names.arg=wr.lap, xlim=c(0,ceiling(10*max(iv.table$iv.totals,na.rm=TRUE))/10), las=2, cex.main=1, cex.names=0.8, cex.axis=0.8, col=gray.colors(nrow(iv.table), start=0.9, end=0.3)) } }
/scratch/gouwar.j/cran-all/cranData/woeBinning/R/woe.binning.plot.R
##### This is the actual function for tabulation of the binned numeric variables and factors. ##### woe.binning.table.2 <- function(pred.var, look.up.table) { # In case of a look-up table for a factor remove duplicates and save binned categories as row names if ( colnames(look.up.table)[1]=='Group.2' ) { look.up.table <- subset(look.up.table, !duplicated(look.up.table$Group.2)) # remove duplicates (i.e. keep only rows with merged levels, not with original levels) woe.table <- as.data.frame(look.up.table$Group.2) colnames(woe.table)[1] <- paste("Final.Bin") woe.table$Total.Count <- look.up.table[,5] + look.up.table[,6] woe.table$Total.Distr. <- woe.table$Total.Count/sum(woe.table$Total.Count) woe.table[,4] <- look.up.table[,5] colnames(woe.table)[4] <- paste(gsub("[^[:alnum:]]","",colnames(look.up.table)[5]),"Count",sep=".") woe.table[,5] <- look.up.table[,6] colnames(woe.table)[5] <- paste(gsub("[^[:alnum:]]","",colnames(look.up.table)[6]),"Count",sep=".") woe.table[,6] <- look.up.table[,7] colnames(woe.table)[6] <- paste(gsub("[^[:alnum:]]","",colnames(look.up.table)[5]),"Distr.",sep=".") woe.table[,7] <- look.up.table[,8] colnames(woe.table)[7] <- paste(gsub("[^[:alnum:]]","",colnames(look.up.table)[6]),"Distr.",sep=".") woe.table[nrow(woe.table)+1,c(2:7)] <- colSums(woe.table[c(2:7)], na.rm=TRUE) levels(woe.table$Final.Bin) <- c(levels(woe.table$Final.Bin), "Total") woe.table$Final.Bin[nrow(woe.table)] <- "Total" woe.table[,3] <- paste(format(round(100*woe.table[,3], 1), nsmall = 1),"%",sep="") woe.table[,6] <- paste(format(round(100*woe.table[,6], 1), nsmall = 1),"%",sep="") woe.table[,7] <- paste(format(round(100*woe.table[,7], 1), nsmall = 1),"%",sep="") woe.table[,8] <- woe.table[,5]/woe.table[,2] woe.table[,8] <- paste(format(round(100*woe.table[,8], 1), nsmall = 1),"%",sep="") colnames(woe.table)[8] <- paste(gsub("[^[:alnum:]]","",colnames(look.up.table)[6]),"Rate",sep=".") WOE <- as.data.frame(c(look.up.table[,3],NA)) woe.table[,9] <- format(round(WOE, 1), nsmall = 1) colnames(woe.table)[9] <- "WOE" IV <- as.data.frame(c(look.up.table[,9], look.up.table[1,4])) woe.table[,10] <- format(round(IV, 3), nsmall = 3) colnames(woe.table)[10] <- "IV" woe.table } else { woe.table <- as.data.frame(row.names(look.up.table)) colnames(woe.table)[1] <- paste("Final.Bin") # Make bin names more readable woe.table[,1] <- gsub("]", "", woe.table[,1], fixed = TRUE) woe.table[,1] <- gsub(" ", "", woe.table[,1], fixed = TRUE) woe.table[,1] <- gsub("^.*\\,","<= ",woe.table[,1]) woe.table$Total.Count <- look.up.table[,5] + look.up.table[,6] woe.table$Total.Distr. <- woe.table$Total.Count/sum(woe.table$Total.Count) woe.table[,4] <- look.up.table[,5] colnames(woe.table)[4] <- paste(gsub("[^[:alnum:]]","",colnames(look.up.table)[5]),"Count",sep=".") woe.table[,5] <- look.up.table[,6] colnames(woe.table)[5] <- paste(gsub("[^[:alnum:]]","",colnames(look.up.table)[6]),"Count",sep=".") woe.table[,6] <- look.up.table[,7] colnames(woe.table)[6] <- paste(gsub("[^[:alnum:]]","",colnames(look.up.table)[5]),"Distr.",sep=".") woe.table[,7] <- look.up.table[,8] colnames(woe.table)[7] <- paste(gsub("[^[:alnum:]]","",colnames(look.up.table)[6]),"Distr.",sep=".") woe.table[nrow(woe.table)+1,c(2:7)] <- colSums(woe.table[c(2:7)], na.rm=TRUE) levels(woe.table$Final.Bin) <- c(levels(woe.table$Final.Bin), "Total") woe.table$Final.Bin[nrow(woe.table)] <- "Total" woe.table[,3] <- paste(format(round(100*woe.table[,3], 1), nsmall = 1),"%",sep="") woe.table[,6] <- paste(format(round(100*woe.table[,6], 1), nsmall = 1),"%",sep="") woe.table[,7] <- paste(format(round(100*woe.table[,7], 1), nsmall = 1),"%",sep="") woe.table[,8] <- woe.table[,5]/woe.table[,2] woe.table[,8] <- paste(format(round(100*woe.table[,8], 1), nsmall = 1),"%",sep="") colnames(woe.table)[8] <- paste(gsub("[^[:alnum:]]","",colnames(look.up.table)[6]),"Rate",sep=".") WOE <- as.data.frame(c(look.up.table[,1],NA)) woe.table[,9] <- format(round(WOE, 1), nsmall = 1) colnames(woe.table)[9] <- "WOE" IV <- as.data.frame(c(look.up.table[,9], look.up.table[1,4])) woe.table[,10] <- format(round(IV, 3), nsmall = 3) colnames(woe.table)[10] <- "IV" # Remove Missing Data row in case of no NAs if ( woe.table[nrow(woe.table)-1,2]==0 ) { woe.table <- woe.table[-c(nrow(woe.table)-1),] } woe.table } } #' @title Tabulation of Binning #' #' @description #' \code{woe.binning.table} tabulates the binning solution generated and saved via the \code{\link{woe.binning}} #' or \code{\link{woe.tree.binning}} function. #' #' @details #' For each predictor variable \code{woe.binning.table} generates a table (data frame). #' This table contains the final bin labels, total counts, total distribution (column percentages), #' counts for the first and the second target class, distribution of the first and the second target #' class (column percentages), rate (row percentages) of the target event specified via the #' \emph{event.class} parameter in the \code{woe.binning} or \code{woe.tree.binning} function, as well as weight of evidence #' (WOE) and information values (IV). #' #' @usage #' woe.binning.table(binning) #' #' @param binning #' Binning information generated from the \code{woe.binning} or \code{woe.tree.binning} function. #' Contains names of the input predictor variables and the #' corresponding binning, counts, WOE and IV information, which is used to #' generate the tables. #' #' @examples #' # Load German credit data and create a subset #' data(germancredit) #' df <- germancredit[, c('creditability', 'credit.amount', 'duration.in.month', #' 'savings.account.and.bonds', 'purpose')] #' #' # Bin all variables of the data frame (apart from the target variable) #' # with default parameter settings #' binning <- woe.binning(df, 'creditability', df) #' #' # Tabulate the binned variables #' tabulate.binning <- woe.binning.table(binning) #' tabulate.binning #' #' \dontrun{ #' #' # Plot a layouted table (using the gridExtra library) for a specific #' # variable (in this example for the first binned variable #' # with the highest IV value) #' library(gridExtra) #' grid.table(tabulate.binning[[1]], #' theme = ttheme_default(core=list(bg_params= #' list(fill=c(rep(c('grey95','grey90'), #' length.out=nrow(tabulate.binning[[1]])-1), #' '#BCC7BD')),fg_params=list(cex=0.8)), #' colhead=list(fg_params=list(cex=0.8))), #' rows=NULL) #' } #' #' @export ##### This function calls the actual tabulation function above for every specified predictor variable that has been binned. ##### woe.binning.table <- function(binning) { # Declare list woe.tables <- list() # Add WOE tables to list and use name of predictor variable as name of the respective list element if ( (length(binning)/3)==1 ) { woe.tables[[paste("WOE Table for",binning[1][[1]])]] <- woe.binning.table.2(binning[1][[1]], binning[2][[1]]) } else { for (i in 1:(length(binning)/3)) { woe.tables[[paste("WOE Table for",binning[i,1][[1]])]] <- woe.binning.table.2(binning[i,1][[1]], binning[i,2][[1]]) } } # Pass WOE tables woe.tables }
/scratch/gouwar.j/cran-all/cranData/woeBinning/R/woe.binning.table.R
##### This is the actual tree-like binning function for numeric variables and factors. ##### woe.tree.binning.2 <- function(df, target.var, pred.var, min.perc.total, min.perc.class, stop.limit, abbrev.fact.levels, bad, good) { #### Build subsets with target and predictor variable df <- df[, c(target.var, pred.var)] # used for final binning dfrm <- df[, c(target.var, pred.var)] # used for iterative merging and splitting of bins colnames(dfrm)[1] <- paste("target.var") colnames(dfrm)[2] <- paste("predictor.var") #### Check if numerical variable or factor was provided as predictor and apply appropriate binning technique ### Binning in case a numerical variable was selected if ( length(unique(dfrm[,1]))==2 && is.numeric(dfrm[,2]) ) { ## Derive number of initial bins from min.perc.total parameter max.bins <- trunc(1/min.perc.total) ## Derive cutpoints for bins (with similar frequency) cutpoints <- quantile(dfrm$predictor.var,(0:max.bins)/max.bins, na.rm=TRUE) innercutpoints <- cutpoints[2:(length(cutpoints)-1)] # remove outer (observed) boudaries cutpoints <- c(-Inf, innercutpoints, +Inf) # add -Inf, +Inf to cutpoints cutpoints <- unique(cutpoints) # remove multiple cutpoints with same value ## Calculate initial crosstab from binned variable and target variable ## to identify and merge sparse bins # Compute binned variable from cutpoints and add it to the subset data frame dfrm$predictor.var.binned <- cut(dfrm$predictor.var, cutpoints, labels = NULL, include.lowest = FALSE, right = TRUE, dig.lab = 10, ordered_result = TRUE) # Compute crosstab from binned variable and target variable and covert it to a data frame freq.table <- table(dfrm$predictor.var.binned, dfrm$target.var, useNA="always") row.names(freq.table)[is.na(row.names(freq.table))] <- 'Missing' # Replace NA in row.names with string 'Missing' woe.dfrm <- as.data.frame.matrix(freq.table) # Convert frequency table to data frame woe.dfrm <- woe.dfrm[, c(good, bad)] # Select columns with raw frequencies only # Compute columns percents for target classes from crosstab frequencies woe.dfrm$col.perc.a <- woe.dfrm[,1]/sum(woe.dfrm[,1]) woe.dfrm$col.perc.b <- woe.dfrm[,2]/sum(woe.dfrm[,2]) # Correct column percents in case of 0 frequencies (in case of no NA skip last row) if ( !anyNA(df[,2]) ) { if ( min(woe.dfrm[-nrow(woe.dfrm),1],na.rm=TRUE)==0 | min(woe.dfrm[-nrow(woe.dfrm),2],na.rm=TRUE)==0 ) { woe.dfrm$col.perc.a[-nrow(woe.dfrm)] <- (woe.dfrm$col.perc.a[-nrow(woe.dfrm)]+0.0001)/sum(woe.dfrm$col.perc.a[-nrow(woe.dfrm)]+0.0001) woe.dfrm$col.perc.b[-nrow(woe.dfrm)] <- (woe.dfrm$col.perc.b[-nrow(woe.dfrm)]+0.0001)/sum(woe.dfrm$col.perc.b[-nrow(woe.dfrm)]+0.0001) } } else { if ( min(woe.dfrm[,1],na.rm=TRUE)==0 | min(woe.dfrm[,2],na.rm=TRUE)==0 ) { woe.dfrm$col.perc.a <- (woe.dfrm$col.perc.a+0.0001)/sum(woe.dfrm$col.perc.a+0.0001) woe.dfrm$col.perc.b <- (woe.dfrm$col.perc.b+0.0001)/sum(woe.dfrm$col.perc.b+0.0001) } } # Check for bins (without last regular and without NA bin) if frequencies < percentage limit specified above # (in reverse order to remain correct reference to cutpoints) for (i in (nrow(woe.dfrm)-2):1) { if (woe.dfrm$col.perc.a[i]<min.perc.class | woe.dfrm$col.perc.b[i]<min.perc.class | ((woe.dfrm[i,1]+woe.dfrm[i,2])/(sum(woe.dfrm[,1],na.rm=TRUE)+sum(woe.dfrm[,2],na.rm=TRUE)))<min.perc.total) { # Remove cutpoint cutpoints <- cutpoints[-c((i+1))] # Compute binned variable from cutpoints and add it to the subset data frame dfrm$predictor.var.binned <- cut(dfrm$predictor.var, cutpoints, labels = NULL, include.lowest = FALSE, right = TRUE, dig.lab = 10, ordered_result = TRUE) # Compute crosstab from binned variable and target variable and covert it to a data frame freq.table <- table(dfrm$predictor.var.binned, dfrm$target.var, useNA="always") row.names(freq.table)[is.na(row.names(freq.table))] <- 'Missing' # Replace NA in row.names with string 'Missing' woe.dfrm <- as.data.frame.matrix(freq.table) # Convert frequency table to data frame woe.dfrm <- woe.dfrm[, c(good, bad)] # Select columns with raw frequencies only # Compute columns percents for target classes from crosstab frequencies woe.dfrm$col.perc.a <- woe.dfrm[,1]/sum(woe.dfrm[,1]) woe.dfrm$col.perc.b <- woe.dfrm[,2]/sum(woe.dfrm[,2]) # Correct column percents in case of 0 frequencies (in case of no NA skip last row) if ( !anyNA(df[,2]) ) { if ( min(woe.dfrm[-nrow(woe.dfrm),1],na.rm=TRUE)==0 | min(woe.dfrm[-nrow(woe.dfrm),2],na.rm=TRUE)==0 ) { woe.dfrm$col.perc.a[-nrow(woe.dfrm)] <- (woe.dfrm$col.perc.a[-nrow(woe.dfrm)]+0.0001)/sum(woe.dfrm$col.perc.a[-nrow(woe.dfrm)]+0.0001) woe.dfrm$col.perc.b[-nrow(woe.dfrm)] <- (woe.dfrm$col.perc.b[-nrow(woe.dfrm)]+0.0001)/sum(woe.dfrm$col.perc.b[-nrow(woe.dfrm)]+0.0001) } } else { if ( min(woe.dfrm[,1],na.rm=TRUE)==0 | min(woe.dfrm[,2],na.rm=TRUE)==0 ) { woe.dfrm$col.perc.a <- (woe.dfrm$col.perc.a+0.0001)/sum(woe.dfrm$col.perc.a+0.0001) woe.dfrm$col.perc.b <- (woe.dfrm$col.perc.b+0.0001)/sum(woe.dfrm$col.perc.b+0.0001) } } } # Stop in case 3 cutpoints (-Inf, x, +Inf) are reached if ( length(cutpoints)==3 ) { break } } # Check for last regular bin if frequencies < percentage limit specified above (only in case number of cutpoints > 3 if ( length(cutpoints)>3 ) { if (woe.dfrm$col.perc.a[(nrow(woe.dfrm)-1)]<min.perc.class | woe.dfrm$col.perc.b[(nrow(woe.dfrm)-1)]<min.perc.class | ((woe.dfrm[nrow(woe.dfrm)-1,1]+woe.dfrm[nrow(woe.dfrm)-1,2])/(sum(woe.dfrm[,1],na.rm=TRUE)+sum(woe.dfrm[,2],na.rm=TRUE)))<min.perc.total) { # Remove cutpoint cutpoints <- cutpoints[-c(nrow(woe.dfrm)-1)] # Compute binned variable from cutpoints and add it to the subset data frame dfrm$predictor.var.binned <- cut(dfrm$predictor.var, cutpoints, labels = NULL, include.lowest = FALSE, right = TRUE, dig.lab = 10, ordered_result = TRUE) # Compute crosstab from binned variable and target variable and covert it to a data frame freq.table <- table(dfrm$predictor.var.binned, dfrm$target.var, useNA="always") row.names(freq.table)[is.na(row.names(freq.table))] <- 'Missing' # Replace NA in row.names with string 'Missing' woe.dfrm <- as.data.frame.matrix(freq.table) # Convert frequency table to data frame woe.dfrm <- woe.dfrm[, c(good, bad)] # Select columns with raw frequencies only # Compute columns percents for target classes from crosstab frequencies woe.dfrm$col.perc.a <- woe.dfrm[,1]/sum(woe.dfrm[,1]) woe.dfrm$col.perc.b <- woe.dfrm[,2]/sum(woe.dfrm[,2]) # Correct column percents in case of 0 frequencies (in case of no NA skip last row) if ( !anyNA(df[,2]) ) { if ( min(woe.dfrm[-nrow(woe.dfrm),1],na.rm=TRUE)==0 | min(woe.dfrm[-nrow(woe.dfrm),2],na.rm=TRUE)==0 ) { woe.dfrm$col.perc.a[-nrow(woe.dfrm)] <- (woe.dfrm$col.perc.a[-nrow(woe.dfrm)]+0.0001)/sum(woe.dfrm$col.perc.a[-nrow(woe.dfrm)]+0.0001) woe.dfrm$col.perc.b[-nrow(woe.dfrm)] <- (woe.dfrm$col.perc.b[-nrow(woe.dfrm)]+0.0001)/sum(woe.dfrm$col.perc.b[-nrow(woe.dfrm)]+0.0001) } } else { if ( min(woe.dfrm[,1],na.rm=TRUE)==0 | min(woe.dfrm[,2],na.rm=TRUE)==0 ) { woe.dfrm$col.perc.a <- (woe.dfrm$col.perc.a+0.0001)/sum(woe.dfrm$col.perc.a+0.0001) woe.dfrm$col.perc.b <- (woe.dfrm$col.perc.b+0.0001)/sum(woe.dfrm$col.perc.b+0.0001) } } } } ## After sparse levels are merged: ## Tree-based iterative partitioning of bins until IV based stop criteria is reached ## or 2 aggregated bins are left (i.e. 3 cutpoints: -Inf, middle cutpoint, +Inf). innercutpoints <- cutpoints[2:(length(cutpoints)-1)] if ( length(cutpoints)>2 ) { for (i in 1:(length(innercutpoints)-1)) { for (i in 1:length(innercutpoints)) { if ( exists('selected.cuts', inherits=FALSE) ) { pred.var.cut <- cut(dfrm$predictor.var, c(-Inf, selected.cuts, innercutpoints[i], Inf), labels=NULL, include.lowest=FALSE, right=TRUE, dig.lab=10, ordered_result=TRUE) } else { pred.var.cut <- cut(dfrm$predictor.var, c(-Inf, innercutpoints[i], Inf), labels=NULL, include.lowest=FALSE, right=TRUE, dig.lab=10, ordered_result=TRUE) } freq.table <- table(pred.var.cut, dfrm$target.var, useNA="always") row.names(freq.table)[is.na(row.names(freq.table))] <- 'Missing' # Replace NA in row.names with string 'Missing' woe.dfrm <- as.data.frame.matrix(freq.table) # Convert frequency table to data frame woe.dfrm <- woe.dfrm[, c(good, bad)] # Select columns with raw frequencies only woe.dfrm$col.perc.a <- woe.dfrm[,1]/sum(woe.dfrm[,1]) woe.dfrm$col.perc.b <- woe.dfrm[,2]/sum(woe.dfrm[,2]) # Correct column percents in case of 0 frequencies (in case of no NA skip last row) if ( !anyNA(df[,2]) ) { if ( min(woe.dfrm[-nrow(woe.dfrm),1],na.rm=TRUE)==0 | min(woe.dfrm[-nrow(woe.dfrm),2],na.rm=TRUE)==0 ) { woe.dfrm$col.perc.a[-nrow(woe.dfrm)] <- (woe.dfrm$col.perc.a[-nrow(woe.dfrm)]+0.0001)/sum(woe.dfrm$col.perc.a[-nrow(woe.dfrm)]+0.0001) woe.dfrm$col.perc.b[-nrow(woe.dfrm)] <- (woe.dfrm$col.perc.b[-nrow(woe.dfrm)]+0.0001)/sum(woe.dfrm$col.perc.b[-nrow(woe.dfrm)]+0.0001) } } else { if ( min(woe.dfrm[,1],na.rm=TRUE)==0 | min(woe.dfrm[,2],na.rm=TRUE)==0 ) { woe.dfrm$col.perc.a <- (woe.dfrm$col.perc.a+0.0001)/sum(woe.dfrm$col.perc.a+0.0001) woe.dfrm$col.perc.b <- (woe.dfrm$col.perc.b+0.0001)/sum(woe.dfrm$col.perc.b+0.0001) } } woe.dfrm$woe <- 100*log(woe.dfrm$col.perc.a/woe.dfrm$col.perc.b) woe.dfrm$woe[is.finite(woe.dfrm$woe)==FALSE] <- NA # convert Inf, -Inf and NaN to NA woe.dfrm$iv.bins <- (woe.dfrm$col.perc.a-woe.dfrm$col.perc.b)*woe.dfrm$woe/100 iv.total <- sum(woe.dfrm$iv.bins, na.rm=TRUE) ifelse (exists('iv.total.collect', inherits=FALSE), iv.total.collect <- cbind(iv.total.collect, iv.total), iv.total.collect <- iv.total) } # Restore former solution in case stop criteria is reached and exit loop if ( exists('max.iv.total.collect.backup', inherits=FALSE) ) { if ( (max.iv.total.collect.backup+max.iv.total.collect.backup*stop.limit)>max(iv.total.collect) ) { innercutpoints <- innercutpoints.backup break } } # Backups to be able to restore former solution in case stop criteria is reached max.iv.total.collect.backup <- max(iv.total.collect) innercutpoints.backup <- innercutpoints # Get index of cutpoint with highest IV and reset iv.total.collect index.optimal.cut <- which(iv.total.collect==max(iv.total.collect))[1] iv.total.collect <- NULL # collect and sort selected cuts ifelse (exists('selected.cuts', inherits=FALSE), selected.cuts <- cbind(selected.cuts, innercutpoints[index.optimal.cut[sort.list(index.optimal.cut)]]), selected.cuts <- innercutpoints[index.optimal.cut[sort.list(index.optimal.cut)]]) selected.cuts <- sort(selected.cuts) selected.cuts <- unique(selected.cuts) # Remove selected cutpoint from cutpoint list innercutpoints <- innercutpoints[-index.optimal.cut] } #print(selected.cuts) pred.var.cut <- cut(dfrm$predictor.var, c(-Inf, selected.cuts, Inf), labels = NULL, include.lowest = FALSE, right = TRUE, dig.lab = 10, ordered_result=TRUE) freq.table <- table(pred.var.cut, dfrm$target.var, useNA="always") row.names(freq.table)[is.na(row.names(freq.table))] <- 'Missing' # Replace NA in row.names with string 'Missing' woe.dfrm.final <- as.data.frame.matrix(freq.table) # Convert frequency table to data frame woe.dfrm.final <- woe.dfrm.final[, c(good, bad)] # Select columns with raw frequencies only woe.dfrm.final$col.perc.a <- woe.dfrm.final[,1]/sum(woe.dfrm.final[,1]) woe.dfrm.final$col.perc.b <- woe.dfrm.final[,2]/sum(woe.dfrm.final[,2]) # Correct column percents in case of 0 frequencies (in case of no NA skip last row) if ( !anyNA(df[,2]) ) { if ( min(woe.dfrm.final[-nrow(woe.dfrm.final),1],na.rm=TRUE)==0 | min(woe.dfrm.final[-nrow(woe.dfrm.final),2],na.rm=TRUE)==0 ) { woe.dfrm.final$col.perc.a[-nrow(woe.dfrm.final)] <- (woe.dfrm.final$col.perc.a[-nrow(woe.dfrm.final)]+0.0001)/sum(woe.dfrm.final$col.perc.a[-nrow(woe.dfrm.final)]+0.0001) woe.dfrm.final$col.perc.b[-nrow(woe.dfrm.final)] <- (woe.dfrm.final$col.perc.b[-nrow(woe.dfrm.final)]+0.0001)/sum(woe.dfrm.final$col.perc.b[-nrow(woe.dfrm.final)]+0.0001) } } else { if ( min(woe.dfrm.final[,1],na.rm=TRUE)==0 | min(woe.dfrm.final[,2],na.rm=TRUE)==0 ) { woe.dfrm.final$col.perc.a <- (woe.dfrm.final$col.perc.a+0.0001)/sum(woe.dfrm.final$col.perc.a+0.0001) woe.dfrm.final$col.perc.b <- (woe.dfrm.final$col.perc.b+0.0001)/sum(woe.dfrm.final$col.perc.b+0.0001) } } } woe.dfrm.final$woe <- 100*log(woe.dfrm.final$col.perc.a/woe.dfrm.final$col.perc.b) woe.dfrm.final$woe[is.finite(woe.dfrm.final$woe)==FALSE] <- NA # convert Inf, -Inf and NaN to NA woe.dfrm.final$iv.bins <- (woe.dfrm.final$col.perc.a-woe.dfrm.final$col.perc.b)*woe.dfrm.final$woe/100 # Add cutpoints needed for deployment cutpoints.final <- c(-Inf, selected.cuts, Inf) woe.dfrm.final$cutpoints.final <- cutpoints.final upper.cutpoints.final.dfrm <- rbind(as.data.frame(cutpoints.final[-1]),'Missing') woe.dfrm.final <- cbind(woe.dfrm.final, upper.cutpoints.final.dfrm) # Compute final IV iv.total.final <- sum(woe.dfrm.final$iv.bins, na.rm=TRUE) woe.dfrm.final$iv.total.final <- iv.total.final ## Save final binning solution via look-up-table for deployment look.up.table <- woe.dfrm.final[,c(5,7:9,1:4,6)] } ### Binning in case a factor was selected if ( length(unique(dfrm[,1]))==2 && is.factor(dfrm[,2]) ) { ## Copy predictor variable to prepare binning/recoding dfrm$predictor.var.binned <- dfrm$predictor.var ## Handling of NAs if ( anyNA(dfrm$predictor.var.binned)==TRUE ) { levels(dfrm$predictor.var.binned) <- c(levels(dfrm$predictor.var.binned), "Missing") # add factor level 'Missing' dfrm$predictor.var.binned[is.na(dfrm$predictor.var.binned)] <- "Missing" # replace NA with string 'Missing' } ## Prepare binned factor in INPUT data (levels may be merged in subsequent steps) df[,ncol(df)+1] <- df[, c(pred.var)] colnames(df)[ncol(df)] <- paste(pred.var,".binned",sep="") # Handling of NAs if ( anyNA(df[,ncol(df)])==TRUE ) { levels(df[,ncol(df)]) <- c(levels(df[,ncol(df)]), "Missing") # add factor level 'Missing' df[,ncol(df)][is.na(df[,ncol(df)])] <- "Missing" # replace NA with string 'Missing' } ## Calculate initial crosstab from binned variable and target variable ## to identify and merge sparse bins # Compute crosstab from binned variable and target variable and covert it to a data frame freq.table <- table(dfrm$predictor.var.binned, dfrm$target.var) woe.dfrm <- as.data.frame.matrix(freq.table) # Convert frequency table to data frame woe.dfrm <- woe.dfrm[, c(good, bad)] # Select columns with raw frequencies only # Compute WOE and information value (IV) from crosstab frequencies woe.dfrm$col.perc.a <- woe.dfrm[,1]/sum(woe.dfrm[,1]) woe.dfrm$col.perc.b <- woe.dfrm[,2]/sum(woe.dfrm[,2]) # Correct column percents in case of 0 frequencies if ( min(woe.dfrm[,1],na.rm=TRUE)==0 | min(woe.dfrm[,2],na.rm=TRUE)==0 ) { woe.dfrm$col.perc.a <- (woe.dfrm$col.perc.a+0.0001)/sum(woe.dfrm$col.perc.a+0.0001) woe.dfrm$col.perc.b <- (woe.dfrm$col.perc.b+0.0001)/sum(woe.dfrm$col.perc.b+0.0001) } # Merge factor levels with frequencies < percentage limit specified above to "misc. level" (associated with pos. and neg. WOE values) woe.dfrm$sparse.merge[woe.dfrm$col.perc.a<min.perc.class | woe.dfrm$col.perc.b<min.perc.class | ((woe.dfrm[,1]+woe.dfrm[,2])/(sum(woe.dfrm[,1],na.rm=TRUE)+sum(woe.dfrm[,2],na.rm=TRUE)))<min.perc.total] <- 1 woe.dfrm.sparse.subset <- na.omit(woe.dfrm) woe.dfrm.sparse.subset$sparse.merge[woe.dfrm.sparse.subset$col.perc.a <= woe.dfrm.sparse.subset$col.perc.b] <- -1 woe.dfrm.sparse.subset.pos <- woe.dfrm.sparse.subset[woe.dfrm.sparse.subset$sparse.merge==1, ] woe.dfrm.sparse.subset.neg <- woe.dfrm.sparse.subset[woe.dfrm.sparse.subset$sparse.merge==-1, ] levels(dfrm$predictor.var.binned)[levels(dfrm$predictor.var.binned)%in%(row.names(woe.dfrm.sparse.subset.pos))] <- "misc. level pos." levels(dfrm$predictor.var.binned)[levels(dfrm$predictor.var.binned)%in%(row.names(woe.dfrm.sparse.subset.neg))] <- "misc. level neg." ## After sparse levels are merged: ## Tree-based partitioning of bins sorted by WOE vales # Compute crosstab from binned variable and target variable and covert it to a data frame freq.table <- table(dfrm$predictor.var.binned, dfrm$target.var) #row.names(freq.table)[is.na(row.names(freq.table))] <- 'Missing' # Replace NA in row.names with string 'Missing' woe.dfrm <- as.data.frame.matrix(freq.table) # Convert frequency table to data frame woe.dfrm <- woe.dfrm[, c(good, bad)] # Select columns with raw frequencies only # Compute WOE and information value (IV) from crosstab frequencies woe.dfrm$col.perc.a <- woe.dfrm[,1]/sum(woe.dfrm[,1]) woe.dfrm$col.perc.b <- woe.dfrm[,2]/sum(woe.dfrm[,2]) # Correct column percents in case of 0 frequencies if ( min(woe.dfrm[,1],na.rm=TRUE)==0 | min(woe.dfrm[,2],na.rm=TRUE)==0 ) { woe.dfrm$col.perc.a <- (woe.dfrm$col.perc.a+0.0001)/sum(woe.dfrm$col.perc.a+0.0001) woe.dfrm$col.perc.b <- (woe.dfrm$col.perc.b+0.0001)/sum(woe.dfrm$col.perc.b+0.0001) } woe.dfrm$woe <- 100*log(woe.dfrm$col.perc.a/woe.dfrm$col.perc.b) woe.dfrm$woe[is.finite(woe.dfrm$woe)==FALSE] <- NA # convert Inf, -Inf and NaN to NA woe.dfrm <- woe.dfrm[order(woe.dfrm$woe),] # sort data via WOE values woe.dfrm$iv.bins <- (woe.dfrm$col.perc.a-woe.dfrm$col.perc.b)*woe.dfrm$woe/100 # In case there are more than 2 regulare bins (+ Missing bin) left: # iterative split bins into binary subsets (tree-like, i.e. 1. split # -> 2 aggregated bins, 2. split -> 3 aggregated bins, etc.) and realize # solution with total IV value that fullfills the stop crieria if ( (anyNA(df[,2]) && nrow(woe.dfrm)>3) || (!anyNA(df[,2]) && nrow(woe.dfrm)>2) ) { for ( i in 1:1:(nrow(woe.dfrm-2)) ) { for ( i in 1:(nrow(woe.dfrm)-1) ) { woe.dfrm$trycut[1:i] <- 'a' # 1. node woe.dfrm$trycut[(i+1):nrow(woe.dfrm)] <- 'b' # 2. node if ( !'cut' %in% names(woe.dfrm) ) { woe.dfrm.try <- aggregate(woe.dfrm[,3:4], by=list(woe.dfrm$trycut), 'sum') } else { woe.dfrm.try <- aggregate(woe.dfrm[,3:4], by=list(woe.dfrm$trycut, woe.dfrm$cut), 'sum') } woe.dfrm.try$woe <- 100*log(woe.dfrm.try$col.perc.a/woe.dfrm.try$col.perc.b) woe.dfrm.try$woe[is.finite(woe.dfrm.try$woe)==FALSE] <- NA # convert Inf, -Inf and NaN to NA woe.dfrm.try$iv.bins <- (woe.dfrm.try$col.perc.a-woe.dfrm.try$col.perc.b)*woe.dfrm.try$woe/100 iv.total <- sum(woe.dfrm.try$iv.bins, na.rm=TRUE) ifelse (exists('iv.total.collect', inherits=FALSE), iv.total.collect <- cbind(iv.total.collect, iv.total), iv.total.collect <- iv.total) } index.optimal.cut <- which(iv.total.collect==max(iv.total.collect))[1] # Restore former solution in case stop criteria is reached and exit loop if ( exists('max.iv.total.collect.backup', inherits=FALSE) ) { if ( (max.iv.total.collect.backup+max.iv.total.collect.backup*stop.limit)>max(iv.total.collect) ) { break } } # Backup to be able to restore former solution in case stop criteria is reached max.iv.total.collect.backup <- max(iv.total.collect) iv.total.collect <- NULL woe.dfrm$cuttemp <- 'm' # all incl. Missing woe.dfrm$cuttemp[1:index.optimal.cut] <- 'a' # 1. node woe.dfrm$cuttemp[(index.optimal.cut+1):nrow(woe.dfrm)] <- 'b' # 2. node if ( !'cut' %in% names(woe.dfrm) ) { woe.dfrm$cut <- woe.dfrm$cuttemp } else { woe.dfrm$cut <- paste(woe.dfrm$cut, woe.dfrm$cuttemp, sep="") } } } woe.dfrm$Group.1 <- row.names(woe.dfrm) woe.dfrm$Group.2 <- row.names(woe.dfrm) # Merge names of factor levels to be joined in a new variable if ( (anyNA(df[,2]) && nrow(woe.dfrm)>3) || (!anyNA(df[,2]) && nrow(woe.dfrm)>2) ) { for ( i in (nrow(woe.dfrm)-1):1 ) { if ( woe.dfrm$cut[i]==woe.dfrm$cut[i+1] ) { woe.dfrm$Group.2[i] <- paste(row.names(woe.dfrm)[i], "+", woe.dfrm$Group.2[i+1]) } } for ( i in 2:nrow(woe.dfrm) ) { if ( woe.dfrm$cut[i]==woe.dfrm$cut[i-1] ) { woe.dfrm$Group.2[i] <- woe.dfrm$Group.2[i-1] } } } else { # In case of only 2 regular bins (+ 1 missing data bin) build the data frame structure that is expected by the final procedure woe.dfrm$trycut <- NA woe.dfrm$cuttemp <- NA woe.dfrm$cut <- "a" woe.dfrm$cut[2] <- "b" if ( nrow(woe.dfrm)>2 ) { woe.dfrm$cut[3] <- "c" } woe.dfrm <- woe.dfrm[,c(1:6,9:11,7,8)] } # Restore original factor level names and original counts via outer join (because they may have be lost by former aggregating to misc. levels) woe.dfrm.sparse.subset$misc[woe.dfrm.sparse.subset$sparse.merge==1] <- "misc. level pos." woe.dfrm.sparse.subset$misc[woe.dfrm.sparse.subset$sparse.merge==-1] <- "misc. level neg." woe.dfrm.sparse.subset$original.names <- row.names(woe.dfrm.sparse.subset) # Rename variables with aggregated count vor misc. bins to avoid name conflicts in merging colnames(woe.dfrm)[1] <- paste(colnames(woe.dfrm)[1], "aggr", sep=".") colnames(woe.dfrm)[2] <- paste(colnames(woe.dfrm)[2], "aggr", sep=".") # Merge woe.dfrm <- merge( woe.dfrm.sparse.subset[,c(6:7,1:2)], woe.dfrm, by.x=1, by.y=10, all=TRUE) # Restore original factor level names woe.dfrm$Group.1 <- woe.dfrm$misc woe.dfrm$Group.1[!is.na(woe.dfrm$original.names)] <- woe.dfrm$original.names[!is.na(woe.dfrm$original.names)] # Restore original counts woe.dfrm[,3][is.na(woe.dfrm[,3])] <- woe.dfrm[,5][is.na(woe.dfrm[,3])] woe.dfrm[,4][is.na(woe.dfrm[,4])] <- woe.dfrm[,6][is.na(woe.dfrm[,4])] # Remove unnecessary count variables woe.dfrm <- woe.dfrm[, -c(5,6)] # Realize final bin aggregation resulting from the tree-like procedure above # and compute corresponding WOE and IV values woe.dfrm.aggr <- aggregate(woe.dfrm[,3:4], by=list(woe.dfrm$cut), 'sum') colnames(woe.dfrm.aggr)[1] <- 'cut' woe.dfrm.aggr$col.perc.a <- woe.dfrm.aggr[,2]/sum(woe.dfrm.aggr[,2]) woe.dfrm.aggr$col.perc.b <- woe.dfrm.aggr[,3]/sum(woe.dfrm.aggr[,3]) # Correct column percents in case of 0 frequencies if ( min(woe.dfrm.aggr[,2],na.rm=TRUE)==0 | min(woe.dfrm.aggr[,3],na.rm=TRUE)==0 ) { woe.dfrm.aggr$col.perc.a <- (woe.dfrm.aggr$col.perc.a+0.0001)/sum(woe.dfrm.aggr$col.perc.a+0.0001) woe.dfrm.aggr$col.perc.b <- (woe.dfrm.aggr$col.perc.b+0.0001)/sum(woe.dfrm.aggr$col.perc.b+0.0001) } woe.dfrm.aggr$woe <- 100*log(woe.dfrm.aggr$col.perc.a/woe.dfrm.aggr$col.perc.b) woe.dfrm.aggr$woe[is.finite(woe.dfrm.aggr$woe)==FALSE] <- NA # convert Inf, -Inf and NaN to NA woe.dfrm.aggr <- woe.dfrm.aggr[order(woe.dfrm.aggr$woe),] # sort data via WOE values woe.dfrm.aggr$iv.bins <- (woe.dfrm.aggr$col.perc.a-woe.dfrm.aggr$col.perc.b)*woe.dfrm.aggr$woe/100 woe.dfrm.aggr$iv.total.final <- sum(woe.dfrm.aggr$iv.bins, na.rm=TRUE) # Merge the table with the final WOE and IV values with the table containing the original and aggregated bin names look.up.table <- merge(woe.dfrm.aggr, woe.dfrm[11:13], by.x=1, by.y=1) look.up.table <- look.up.table[,c(9,10,6,8,2:5,7)] look.up.table <- look.up.table[order(look.up.table$woe, look.up.table$Group.2),] # sort by woe value and merged bin name # Convert variables with original and aggregated factor levels into factors look.up.table$Group.1 <- factor(look.up.table$Group.1) look.up.table$Group.2 <- factor(look.up.table$Group.2) # In case the misc. level consists only of only NA rename it 'Missing' if ( length(which(look.up.table[,2]=='Missing'))==1 && length(which(look.up.table[,1]=="misc. level neg."))==1 ) { if ( (which(look.up.table[,2]=='Missing') == which(look.up.table[,1]=='misc. level neg.')) ) { levels(look.up.table[,1]) <- c(levels(look.up.table[,2]), 'Missing') # add factor level 'Missing' look.up.table[,1][look.up.table[,2]=='Missing'] <- 'Missing' } } if ( length(which(look.up.table[,2]=='Missing'))==1 && length(which(look.up.table[,1]=="misc. level pos."))==1 ) { if ( (which(look.up.table[,2]=='Missing') == which(look.up.table[,1]=='misc. level pos.')) ) { levels(look.up.table[,1]) <- c(levels(look.up.table[,2]), 'Missing') # add factor level 'Missing' look.up.table[,1][look.up.table[,2]=='Missing'] <- 'Missing' } } # Abbreviate long factor levels (in case they are longer than specified or longer than 1000 characters) if ( abbrev.fact.levels==0 && 1000<max(nchar(as.character(look.up.table$Group.2))) ) { abbrev.fact.levels <- 1000 } if ( abbrev.fact.levels>0 && abbrev.fact.levels<max(nchar(as.character(look.up.table$Group.2))) ) { look.up.table$Group.2 <- as.factor(abbreviate(look.up.table$Group.2, abbrev.fact.levels)) # actual abbrevation look.up.table$Group.2 <- as.factor(gsub("[*+*]", " ", look.up.table$Group.2)) # remove + signs look.up.table$Group.2 <- as.factor(gsub(" +", " ", look.up.table$Group.2)) # remove double blanks } } #### Check for correct variable specification and #### generate requested output, in case specification is correct ### Display warning message in case of incorrect predictor variable specification if ( (is.numeric(dfrm[,2])==FALSE) && (is.factor(dfrm[,2])==FALSE) ) { warning("Incorrect variable specification.\nPredictor variable needs to be a numeric variable or a factor.") } ### Generate requested output, in case specification is correct else { ## Function passes the final binning solution as look-up table look.up.table } } #' @title Binning via Tree-Like Segmentation #' #' @description #' \code{woe.tree.binning} generates a supervised tree-like segmentation of numeric variables #' and factors with respect to a dichotomous target variable. Its parameters provide #' flexibility in finding a binning that fits specific data characteristics and practical #' needs. #' #' @section Binning of Numeric Variables: #' Numeric variables (continuous and ordinal) are binned beginning with initial classes with #' similar frequencies. The number of initial bins results from the \emph{min.perc.total} #' parameter: min.perc.total will result in trunc(1/min.perc.total) initial bins, #' whereby \emph{trunc} is needed to guarantee bins with similar frequencies. #' For example \emph{min.perc.total=0.07} will cause trunc(14.3)=14 initial classes. #' Next, if \emph{min.perc.class}>0, bins with sparse target classes will be merged with #' the next upper bin, and in case of the last bin with the next lower one. NAs have #' their own bin and will not be merged with others. Finally the actual tree-like procedure #' starts: binary splits iteratively assign nearby classes with similar weight of evidence #' (WOE) values to segments in a way that maximizes the resulting information value (IV). #' The procedure stops when the IV increases less then specified by a percentage value #' (\emph{stop.limit} parameter). #' @section Binning of Factors: #' Factors (categorical variables) are binned via factor levels. As a start sparse levels #' (defined via the \emph{min.perc.total} and \emph{min.perc.class} parameters) are merged #' to a \sQuote{miscellaneous} level: if possible, respective levels (including sparse NAs) #' are bundled as \sQuote{misc. level pos.} (associated with positive WOE values), respectively #' as \sQuote{misc. level neg.} (associated with negative WOE values). In case a misc. level #' contains only NAs it will be named \sQuote{Missing}. Afterwards the actual tree-like #' procedure starts: binary splits iteratively assign levels with similar WOE values to #' segments in a way that maximizes the resulting information value (IV). The procedure stops #' when the IV increases less then specified by a percentage value (\emph{stop.limit} parameter). #' @section Adjustment of 0 Frequencies: #' In case the crosstab of the bins with the target classes contains frequencies = 0 #' the column percentages are adjusted to be able to compute the WOE and IV values: #' the offset 0.0001 (=0.01\%) is added to each column percentage cell and the column #' percentages are recomputed then. This allows considering bins associated with one target #' class only, but may cause extreme WOE values for these bins. If a correction is not #' appropriate choose \emph{min.perc.class}>0; bins with sparse target classes will be #' merged then before computing any WOE or IV value. #' @section Handling of Missing Data: #' Cases with NAs in the target variable will be ignored. For predictor variables the following #' applies: in case NAs already occurred when generating the binning solution #' the code \sQuote{Missing} is displayed and a corresponding WOE value can be computed. #' (Note that factor NAs may be joined with other sparse levels to a \sQuote{miscellaneous} #' level - see above; only this \sQuote{miscellaneous} level will be displayed then.) #' In case NAs occur in the deployment scenario only \sQuote{Missing} is #' displayed for numeric variables and \sQuote{unknown} for factors; and #' the corresponding WOE values will be NA then, as well. #' #' @usage #' woe.tree.binning(df, target.var, pred.var, min.perc.total, #' min.perc.class, stop.limit, abbrev.fact.levels, event.class) #' #' @return #' \code{woe.tree.binning} generates an object with the information necessary #' for studying and applying the realized binning solution. When saved #' it can be used with the functions \code{\link{woe.binning.plot}}, \code{\link{woe.binning.table}} #' and \code{\link{woe.binning.deploy}}. #' #' @param df #' Name of data frame with input data. #' @param target.var #' Name of dichotomous target variable in quotes. Only target variables with #' two distinct values (e.g. 0, 1 or \dQuote{Y}, \dQuote{N}) are accepted; #' cases with NAs in the target variable will be ignored. #' @param pred.var #' Name of predictor variable(s) to be binned in quotes. #' A single variable name can be provided, e.g. \dQuote{varname1}, or a list of #' variable names, e.g. c(\dQuote{varname1}, \dQuote{varname2}). Alternatively one #' can repeat the name of the input data frame; the function will be applied #' to all its variables apart from the target variable then. #' Numeric variables and factors are supported and may contain NAs. #' @param min.perc.total #' For numeric variables this parameter defines the number of initial #' classes before any merging or tree-like splitting is applied. For example #' \emph{min.perc.total=0.05} (5\%) will result in 20 initial classes. For factors #' the original levels with a percentage below this limit are collected in a #' \sQuote{miscellaneous} level before the merging based on the \emph{min.perc.class} #' and the tree-like splitting based on the WOE values starts. Increasing the #' \emph{min.perc.total} parameter will avoid sparse bins. Accepted range: 0.0001-0.2; #' default: 0.01. #' @param min.perc.class #' If a column percentage of one of the target classes within a bin is #' below this limit (e.g. below 0.01=1\%) then the respective bin will be #' joined with others. In case of numeric variables adjacent predictor classes #' are merged. For factors respective levels (including sparse NAs) are #' assigned to a \sQuote{miscellaneous} level. Setting \emph{min.perc.class}>0 #' may provide more reliable WOE values. Accepted range: 0-0.2; #' default: 0, i.e. no merging with respect to sparse target classes #' is applied. #' @param stop.limit #' Stops WOE based segmentation of the predictor's classes/levels in case the #' resulting information value (IV) increases less than \emph{x}\% (e.g. 0.05 = 5\%) #' compared to the preceding binning step. Increasing the \emph{stop.limit} will #' simplify the binning solution and may avoid overfitting. Accepted range: 0-0.5; #' default: 0.1. #' @param abbrev.fact.levels #' Abbreviates the names of new (merged) factor levels via the base R #' \code{\link{abbreviate}} function in case the specified number of #' characters is exceeded. Accepted range: 0-1000; default: 200. #' 0 will prevent applying any abbreviation, i.e. only factor levels with #' more than 1000 characters will be truncated then. #' This option is particularly relevant in case one wants to generate dummy #' variables via the \code{\link{woe.binning.deploy}} function, because the #' factor levels will be part of the dummy variable names then. #' @param event.class #' Optional parameter for specifying the class of the target event. This #' class typically indicates a negative event like a loan default or a #' disease. Use integers (e.g. 1) or characters in quotes (e.g. \dQuote{bad}). #' This class will be represented by negative WOE values then. #' #' @family binning functions #' #' @examples #' # Load German credit data and create subset #' data(germancredit) #' df <- germancredit[, c('creditability', 'credit.amount', 'duration.in.month', #' 'savings.account.and.bonds', 'purpose')] #' #' # Bin a single numeric variable #' binning <- woe.tree.binning(df, 'creditability', 'duration.in.month', #' min.perc.total=0.01, min.perc.class=0.01, #' stop.limit=0.1, event.class='bad') #' #' # Bin a single factor #' binning <- woe.tree.binning(df, 'creditability', 'purpose', #' min.perc.total=0.05, min.perc.class=0, stop.limit=0.1, #' abbrev.fact.levels=50, event.class='bad') #' #' # Bin two variables (one numeric and one factor) #' # with default parameter settings #' binning <- woe.tree.binning(df, 'creditability', c('credit.amount','purpose')) #' #' # Bin all variables of the data frame (apart from the target variable) #' # with default parameter settings #' binning <- woe.tree.binning(df, 'creditability', df) #' #' @importFrom stats aggregate #' @importFrom stats embed #' @importFrom stats na.omit #' @importFrom stats quantile #' #' @export ##### This function calls the actual tree-like binning function above for every specified predictor variable that needs to be binned. ##### woe.tree.binning <- function(df, target.var, pred.var, min.perc.total, min.perc.class, stop.limit, abbrev.fact.levels, event.class) { #### Warning message and defaults in case parameters are not specified if ( missing(df)==TRUE || missing(target.var)==TRUE || missing(pred.var)==TRUE ) { warning("Incorrect specification of data frame and/or variables.") } if ( missing(min.perc.total)==TRUE ) { min.perc.total=0.01 } if ( min.perc.total<0.0001 || min.perc.total>0.2 || !is.numeric(min.perc.total) ) { warning("Incorrect parameter specification; accepted min.perc.total parameter range is 0.0001-0.2. Parameter was set to default (0.01).") min.perc.total=0.01 } if ( missing(min.perc.class)==TRUE ) { min.perc.class=0 } if ( min.perc.class<0 || min.perc.class>0.2 || !is.numeric(min.perc.class) ) { warning("Incorrect parameter specification; accepted min.perc.class parameter range is 0-0.2. Parameter was set to default (0).") min.perc.class=0 } if ( missing(stop.limit)==TRUE ) { stop.limit=0.1 } if ( stop.limit<0 || stop.limit>0.5 || !is.numeric(stop.limit) ) { warning("Incorrect parameter specification; accepted stop.limit parameter range is 0-0.05. Parameter was set to default (0.1).") stop.limit=0.1 } if ( missing(abbrev.fact.levels)==TRUE ) { abbrev.fact.levels=200 } if ( abbrev.fact.levels<0 || abbrev.fact.levels>1000 ) { warning("Incorrect parameter specification; accepted abbrev.fact.levels parameter range is 0-10000. Parameter was set to default (200).") abbrev.fact.levels=200 } #### Display warning message in case of incorrect target variable specification if ( !(length(unique(df[,target.var][!is.na(df[,target.var])]))==2) ) { warning("Incorrect variable specification.\nTarget variable must have two distinct values (NAs are accepted).") } #### Display warning message in case none of the target classes matches the specified event.class parameter if ( !missing(event.class) ) { if ( (unique(df[,target.var])[1]==event.class || unique(df[,target.var])[2]==event.class)==FALSE ) { warning("None of the target classes matches the specified event.class parameter.") } } #### In case bad class was specified assign 'good' and 'bad' codes (the latter will be associated with negative WOE values then) if ( !missing(event.class) ) { if ( unique(df[,target.var])[1]==event.class ) { bad <- unique(df[,target.var])[1] good <- unique(df[,target.var])[2] } else { bad <- unique(df[,target.var])[2] good <- unique(df[,target.var])[1] } } else { bad <- unique(df[,target.var])[1] good <- unique(df[,target.var])[2] } bad <- toString(bad) good <- toString(good) #### Gather names and look-up tables (with binned classes and WOE values) for each predictor variable in a list if ( is.data.frame(pred.var)==TRUE ) { pred.var <- as.list(colnames(subset(df, select=-c(which( colnames(df)==target.var ))))) # convert variable names of data frame into a list (without target variable) } else { as.list(pred.var) # provide variable name(s) as a list } #### Subset: consider only cases without NA in target variable df <- df[!is.na(df[,target.var]),] #### Call actual binning function and put binning solutions together with respective variable names into a list binning <- lapply(pred.var, function(x) woe.tree.binning.2(df, target.var, x, min.perc.total, min.perc.class, stop.limit, abbrev.fact.levels, bad, good)) #### Read names and IV total values in the list and put them together with the binning tables names.of.pred.var <- lapply(pred.var, function(x) x) iv.total.list <- lapply(binning, function(x) colMeans(x[4])) binning <- matrix(c(names.of.pred.var, binning, iv.total.list),ncol=3) #### Sort via IV total binning <- binning[rev(sort.list(as.numeric(binning[,3]))),] binning }
/scratch/gouwar.j/cran-all/cranData/woeBinning/R/woe.tree.binning.R
###Function to get model fit diagnostics given a STBDwDM object #' #' diagnostics #' #' Calculates diagnostic metrics using output from the \code{\link{STBDwDM}} model. #' #' @param obj A \code{\link{STBDwDM}} model object for which diagnostics #' are desired from. #' #' @param diags A vector of character strings indicating the diagnostics to compute. #' Options include: Deviance Information Criterion ("dic"), d-infinity ("dinf") and #' Watanabe-Akaike information criterion ("waic"). At least one option must be included. #' Note: The probit model cannot compute the DIC or WAIC diagnostics due to computational #' issues with computing the multivariate normal CDF. #' #' @param keepDeviance A logical indicating whether the posterior deviance distribution #' is returned (default = FALSE). #' #' @param keepPPD A logical indicating whether the posterior predictive distribution #' at each observed location is returned (default = FALSE). #' #' @details To assess model fit, DIC, d-infinity and WAIC are used. DIC is based on the #' deviance statistic and penalizes for the complexity of a model with an effective #' number of parameters estimate pD (Spiegelhalter et al 2002). The d-infinity posterior #' predictive measure is an alternative diagnostic tool to DIC, where d-infinity=P+G. #' The G term decreases as goodness of fit increases, and P, the penalty term, inflates #' as the model becomes over-fit, so small values of both of these terms and, thus, small #' values of d-infinity are desirable (Gelfand and Ghosh 1998). WAIC is invariant to #' parametrization and is asymptotically equal to Bayesian cross-validation #' (Watanabe 2010). WAIC = -2 * (lppd - p_waic_2). Where lppd is the log pointwise #' predictive density and p_waic_2 is the estimated effective number of parameters #' based on the variance estimator from Vehtari et al. 2016. (p_waic_1 is the mean #' estimator). #' #' @return \code{diagnostics} returns a list containing the diagnostics requested and #' possibly the deviance and/or posterior predictive distribution objects. #' #' @author Samuel I. Berchuck #' #' @references Gelfand, A. E., & Ghosh, S. K. (1998). Model choice: a minimum posterior predictive loss approach. Biometrika, 1-11. #' @references Spiegelhalter, D. J., Best, N. G., Carlin, B. P., & Van Der Linde, A. (2002). Bayesian measures of model complexity and fit. Journal of the Royal Statistical Society: Series B (Statistical Methodology), 64(4), 583-639. #' @references Vehtari, A., Gelman, A., & Gabry, J. (2016). Practical Bayesian model evaluation using leave-one-out cross-validation and WAIC. Statistics and Computing, 1-20. #' @references Watanabe, S. (2010). Asymptotic equivalence of Bayes cross validation and widely applicable information criterion in singular learning theory. Journal of Machine Learning Research, 11(Dec), 3571-3594. #' #' @export diagnostics <- function(obj, diags = c("dic", "dinf", "waic"), keepDeviance = FALSE, keepPPD = FALSE) { ###Check Inputs if (missing(obj)) stop('"obj" is missing') if (!is.STBDwDM(obj)) stop('"obj" must be of class STBDwDM') if (sum((!diags %in% c("dic", "dinf", "waic"))) > 0) stop('"diags" must contain at least one of "dic", "dinf" or "waic"') if (!is.logical(keepDeviance)) stop('"keepDeviance" must be a logical') if (!is.logical(keepPPD)) stop('"keepPPD" must be a logical') ###Unload STBDwDM objects DatObj <- obj$datobj DatAug <- obj$dataug ###Check Inputs Again if ((DatObj$FamilyInd == 1) & (sum(diags %in% c("dic", "waic")) > 0)) stop ('"probit" model cannot be used with "dic" or "waic"') #probit model can't do likelihood diagnostics ###Set seed for reproducibility set.seed(54) ###Set data objects M <- DatObj$M Z <- DatObj$Z AdjacentEdgesBoolean <- DatObj$AdjacentEdgesBoolean W <- DatObj$W EyeM <- DatObj$EyeM Rho <- DatObj$Rho FamilyInd <- DatObj$FamilyInd Nu <- DatObj$Nu YObserved <- DatObj$YObserved WeightsInd <- DatObj$WeightsInd ###Construct parameter object Para <- list() Para$Mu <- obj$mu Para$Tau2 <- obj$tau2 Para$Alpha <- obj$alpha MuMean <- apply(obj$mu, 2, mean) Tau2Mean <- apply(obj$tau2, 2, mean) AlphaMean <- apply(obj$alpha, 2, mean) CovMean <- JointCovarianceCube(WAlphaCube(AlphaMean, Z, W, M, Nu, WeightsInd), Tau2Mean, EyeM, Rho, M, Nu) Para$MuMean <- MuMean Para$CovMean <- CovMean ###Set mcmc object NKeep <- dim(obj$phi)[1] ###Compute Log-likelihood using Rcpp function GetLogLik LogLik <- NULL if (("dic" %in% diags) | ("waic" %in% diags)) { ###Compute log-likelihood requireNamespace("mvtnorm", quietly = TRUE) #Requred for pmvnorm in Rcpp function if (DatObj$FamilyInd == 0) { NBelowCount <- c(0,0) YStarNonZero <- list() for (i in 1:DatObj$Nu) YStarNonZero[[i]] <- i DatAug$NBelowCount <- NBelowCount DatAug$YStarNonZero <- YStarNonZero } LogLik <- GetLogLik(DatObj, Para, DatAug, NKeep) } ###Compute DIC diagnostics dic <- NULL if ("dic" %in% diags) { ###Compute mean log-likelihood if (FamilyInd == 0) LogLikMean <- GetLogLikMean(DatObj, Para, DatAug) if (FamilyInd == 1) LogLikMean <- GetLogLikMean(DatObj, Para, DatAug) if (FamilyInd == 2) LogLikMean <- GetLogLikMean(DatObj, Para, DatAug) ###Calculate DIC objects DBar <- -2 * mean(LogLik) DHat <- -2 * LogLikMean pD <- DBar - DHat DIC <- DBar + pD dic <- list(dic = DIC, pd = pD) } ###Compute PPD diagnostics ppd <- PPD <- NULL if ("dinf" %in% diags) { ###Get PPD PPD <- SamplePPD(DatObj, Para, NKeep) ###Compute PPD Diagnostics PPDMean <- apply(PPD, 1, mean) PPDVar <- apply(PPD, 1, var) P <- sum(PPDVar) G <- sum( (PPDMean - YObserved) ^ 2) DInf <- G + P ppd <- list(p = P, g = G, dinf = DInf) } ###Compute WAIC diagnostics waic <- NULL if ("waic" %in% diags) { ###Get WAIC # The calculation of Waic! Returns lppd, p_waic_1, p_waic_2, and waic, which we define # as 2*(lppd - p_waic_2), as recommmended in BDA lppd <- log( apply(exp(LogLik), 2, mean) ) p_waic_1 <- 2 * (lppd - apply(LogLik, 2, mean) ) p_waic_2 <- apply(LogLik, 2, var) waic <- -2 * lppd + 2 * p_waic_2 waic <- list(waic = waic, p_waic = p_waic_2, lppd = lppd, p_waic_1 = p_waic_1) } ###Output diagnostics if (!keepDeviance & !keepPPD) diags <- list(dic = dic, dinf = ppd, waic = waic) if (!keepDeviance & keepPPD) diags <- list(dic = dic, dinf = ppd, waic = waic, PPD = t(PPD)) if (keepDeviance & !keepPPD) diags <- list(dic = dic, dinf = ppd, waic = waic, deviance = -2 * LogLik) if (keepDeviance & keepPPD) diags <- list(dic = dic, dinf = ppd, waic = waic, deviance = -2 * LogLik, PPD = t(PPD)) return(diags) }
/scratch/gouwar.j/cran-all/cranData/womblR/R/DIAG_diagnostics.R
#' Garway-Heath angles for the HFA-II #' #' These Garway-Heath angles are used as the dissimilarity metric when implementing the boundary #' detection model for a longitudinal series of visual fields. #' #' @usage data(GarwayHeath) #' #' @format A vector with length 54, where each entry represents the angle (in degrees) that the #' underlying retinal nerve fiber enters the optic nerve head. The measure ranges from 0-360, #' where 0 is designated at the 9-o’clock position (right eye) and angles are counted counter #' clockwise. These angles are estimates for the Humphrey Field Analyzer-II (Carl Zeiss Meditec #' Inc., Dublin, CA). The 26th and 35th entries are missing as they correspond to a natural #' blind spot. #' #' @references Garway-Heath, et al. (2000). Ophthalmology 107:10:1809–1815. #' (\href{https://pubmed.ncbi.nlm.nih.gov/11013178/}{PubMed}) "GarwayHeath"
/scratch/gouwar.j/cran-all/cranData/womblR/R/GarwayHeath-data.R
#' HFAII Queen Adjacency Matrix #' #' Binary adjacency matrix for the Humphrey Field Analyzer-II (Carl Zeiss Meditec Inc., Dublin, CA) #' #' @usage data(HFAII_Queen) #' #' @format This adjacency matrix is formated using queen neighbor criteria, meaning two locations on #' the visual field are only considered neighbors if they share an edge or corner. The adjacency matrix is a #' 54 x 54 dimensional binary object with zeros on the diagonal and the column and row sums are equal #' to the number of neighbors. #' "HFAII_Queen"
/scratch/gouwar.j/cran-all/cranData/womblR/R/HFAII_Queen-data.R
#' HFAII Queen Hemisphere Adjacency Matrix #' #' Binary adjacency matrix for the Humphrey Field Analyzer-II (Carl Zeiss Meditec Inc., Dublin, CA) #' #' @usage data(HFAII_QueenHF) #' #' @format This adjacency matrix is formated using queen neighbor criteria, meaning two locations on #' the visual field are only considered neighbors if they share an edge or corner. An additional criterion #' is included that locations are not considered neighbors if they fall within different hemispheres on the #' visual field. The adjacency matrix is a 54 x 54 dimensional binary object with zeros on the diagonal and #' the column and row sums are equal to the number of neighbors. "HFAII_QueenHF"
/scratch/gouwar.j/cran-all/cranData/womblR/R/HFAII_QueenHF-data.R
#' HFAII Rook Adjacency Matrix #' #' Binary adjacency matrix for the Humphrey Field Analyzer-II (Carl Zeiss Meditec Inc., Dublin, CA) #' #' @usage data(HFAII_Rook) #' #' @format This adjacency matrix is formated using rook neighbor criteria, meaning two locations on #' the visual field are only considered neighbors if they share an edge. The adjacency matrix is a #' 54 x 54 dimensional binary object with zeros on the diagonal and the column and row sums are equal #' to the number of neighbors. #' "HFAII_Rook"
/scratch/gouwar.j/cran-all/cranData/womblR/R/HFAII_Rook-data.R
CheckInputs <- function(Y, DM, W, Time, Starting, Hypers, Tuning, MCMC, Family, TemporalStructure, Distance, Weights, Rho, ScaleY, ScaleDM) { ###Data dimensions N <- length(Y) M <- length(DM) Nu <- length(Time) ###Family if (!Family %in% c("normal", "probit", "tobit")) stop('Family: must be one of "normal", "probit" or "tobit"') ###Distance if (!Distance %in% c("euclidean", "circumference")) stop('Distance: must be one of "euclidean" or "circumference"') ###Weights if (!Weights %in% c("continuous", "binary")) stop('Weights: must be one of "continuous" or "binary"') ###Temporal structure if (!TemporalStructure %in% c("exponential", "ar1")) stop('TemporalStructure: must be one of "exponential" or "ar1"') ###Rho if (missing(Rho)) stop("Rho: missing") if (!is.scalar(Rho)) stop('Rho must be a scalar') if (is.na(Rho)) stop('Rho cannot be NA') if (!is.finite(Rho)) stop('Rho cannot be infinite') if (!((Rho < 1) & (Rho > 0))) stop('Rho must be in (0, 1)') ###ScaleY if (missing(ScaleY)) stop("ScaleY: missing") if (!is.scalar(ScaleY)) stop('ScaleY must be a scalar') if (is.na(ScaleY)) stop('ScaleY cannot be NA') if (!is.finite(ScaleY)) stop('ScaleY cannot be infinite') if (!(ScaleY > 0)) stop('ScaleY must be positive') ###ScaleDM if (missing(ScaleDM)) stop("ScaleDM: missing") if (!is.scalar(ScaleDM)) stop('ScaleDM must be a scalar') if (is.na(ScaleDM)) stop('ScaleDM cannot be NA') if (!is.finite(ScaleDM)) stop('ScaleDM cannot be infinite') if (!(ScaleDM > 0)) stop('ScaleDM must be positive') ###Data checks for Y if (!is.numeric(Y)) stop('Y must be a vector') if (length(Y) != N) stop(paste0('Y must have length ', N)) if (any(is.na(Y))) stop("Y may have no missing values") if (any(!is.finite(Y))) stop("Y must have strictly finite entries") if ((Family == "probit") & ((sum(Y == 1) + sum(Y == 0)) != N)) stop('Y: for "probit" observed data must be binary') if ((Family == "tobit") & (any(Y < 0))) stop('Y: for "tobit" observed data must be non-negative') ###Data checks for DM if (!is.numeric(DM)) stop('DM must be a vector') if (length(DM) != M) stop(paste0('DM must have length ', M)) if (any(is.na(DM))) stop("DM may have no missing values") if (any(!is.finite(DM))) stop("DM must have strictly finite entries") ###Data checks for W if (!is.matrix(W)) stop('W must be a matrix') if (!dim(W)[1] == M) stop(paste0('W must be a ',M ,' x ', M, ' dimensional matrix')) if (!dim(W)[2] == M) stop('W must be square') if (sum(!((W) == t(W))) > 0) stop('W must be symmetric') if (length(table(W)) > 2) stop('W must only contain binaries (i.e. 0\'s or 1\'s)') if (any(diag(W) != 0)) stop('W must have only zeros on the diagonal') ###Data checks for Time if (!is.numeric(Time)) stop('Time must be a vector') if (length(Time) != Nu) stop(paste0('Time must have length ', Nu)) if (any(is.na(Time))) stop("Time may have no missing values") if (any(!is.finite(Time))) stop("Time must have strictly finite entries") if (is.unsorted(Time)) stop('Time vector is not in increasing order') if (!all(Time >= 0)) stop('Time vector has at least one negative point') ###Verify dimensions M_W <- dim(W)[1] if (M != M_W) stop('DM and W have contradictory dimensions') if ((N / M) != Nu) stop('Time, DM and Y have contradictory dimensions') ###Hypers if (!is.null(Hypers)) { if (!is.list(Hypers)) stop('Hypers must be a list') if (!all(names(Hypers) %in% c("Delta", "T", "Phi"))) stop('Hypers: Can only contain lists with names "Delta", "T" and "Phi"') ###If delta hyperparameters are provided if ("Delta" %in% names(Hypers)) { if (!is.list(Hypers$Delta)) stop('Hypers: "Delta" must be a list') if (!"MuDelta" %in% names(Hypers$Delta)) stop('Hypers: "MuDelta" value missing') if (!is.numeric(Hypers$Delta$MuDelta)) stop('Hypers: "MuDelta" must be a vector') if (length(Hypers$Delta$MuDelta) != 3) stop('Hypers: "MuDelta" must be length 3') if (!all(!is.na(Hypers$Delta$MuDelta))) stop('Hypers: "MuDelta" cannot have missing values') if (!all(is.finite(Hypers$Delta$MuDelta))) stop('Hypers: "MuDelta" cannot have infinite values') if (!"OmegaDelta" %in% names(Hypers$Delta)) stop('Hypers: "OmegaDelta" value missing') if (!is.matrix(Hypers$Delta$OmegaDelta)) stop('Hypers: "OmegaDelta" must be a matrix') if (!dim(Hypers$Delta$OmegaDelta)[1] == 3) stop('Hypers: "OmegaDelta" must be 3 dimensional') if (!all(!is.na(Hypers$Delta$OmegaDelta))) stop('Hypers: "OmegaDelta" cannot have missing values') if (!all(is.finite(Hypers$Delta$OmegaDelta))) stop('Hypers: "OmegaDelta" cannot have infinite values') if (!dim(Hypers$Delta$OmegaDelta)[2] == 3) stop('Hypers: "OmegaDelta" must be square') if (sum( !( (Hypers$Delta$OmegaDelta) == t(Hypers$Delta$OmegaDelta) ) ) > 0) stop('Hypers: "OmegaDelta" must be symmetric') if ((det(Hypers$Delta$OmegaDelta) - 0) < 0.00001) stop('Hypers: "OmegaDelta" is close to singular') if (!all(!is.na(Hypers$Delta$OmegaDelta))) stop('Hypers: "OmegaDelta" cannot have missing values') } ###If T hyperparameters are provided if ("T" %in% names(Hypers)) { if (!is.list(Hypers$T)) stop('Hypers: "T" must be a list') if (!"Xi" %in% names(Hypers$T)) stop('Hypers: "Xi" value missing') if (!is.scalar(Hypers$T$Xi)) stop('Hypers: "Xi" must be a scalar') if (is.na(Hypers$T$Xi)) stop('Hypers: "Xi" cannot be NA') if (!is.finite(Hypers$T$Xi)) stop('Hypers: "Xi" cannot be infinite') if (Hypers$T$Xi < 3) stop('Hypers: "Xi" must be greater than or equal to 3') if (!"Psi" %in% names(Hypers$T)) stop('Hypers: "Psi" value missing') if (!is.matrix(Hypers$T$Psi)) stop('Hypers: "Psi" must be a matrix') if (!dim(Hypers$T$Psi)[1] == 3) stop('Hypers: "Psi" must be 3 dimensional') if (!all(!is.na(Hypers$T$Psi))) stop('Hypers: "Psi" cannot have missing values') if (!all(is.finite(Hypers$T$Psi))) stop('Hypers: "Psi" cannot have infinite values') if (!dim(Hypers$T$Psi)[2] == 3) stop('Hypers: "Psi" must be square') if (sum( !( (Hypers$T$Psi) == t(Hypers$T$Psi) ) ) > 0) stop('Hypers: "Psi" must be symmetric') if ((det(Hypers$T$Psi) - 0) < 0.00001) stop('Hypers: "Psi" is close to singular') } ###If phi hyperparameters are provided if ("Phi" %in% names(Hypers)) { if (!is.list(Hypers$Phi)) stop('Hypers: "Phi" must be a list') if (!"APhi" %in% names(Hypers$Phi)) stop('Hypers: "APhi" value missing') if (!is.scalar(Hypers$Phi$APhi)) stop('Hypers: "APhi" must be a scalar') if (is.na(Hypers$Phi$APhi)) stop('Hypers: "APhi" cannot be NA') if (!is.finite(Hypers$Phi$APhi)) stop('Hypers: "APhi" cannot be infinite') if (!"BPhi" %in% names(Hypers$Phi)) stop('Hypers: "BPhi" value missing') if (!is.scalar(Hypers$Phi$BPhi)) stop('Hypers: "BPhi" must be a scalar') if (is.na(Hypers$Phi$BPhi)) stop('Hypers: "BPhi" cannot be NA') if (!is.finite(Hypers$Phi$BPhi)) stop('Hypers: "BPhi" cannot be infinite') if (TemporalStructure == "exponential") { if (Hypers$Phi$APhi <= 0) stop('Hypers: For "exponential" correlation "APhi" must be strictly positive') if (Hypers$Phi$BPhi <= 0) stop('Hypers: For "exponential" correlation "BPhi" must be strictly positive') if (Hypers$Phi$BPhi < Hypers$Phi$APhi) stop('Hypers: "BPhi" must be greater than "APhi"') } if (TemporalStructure == "ar1") { if (Hypers$Phi$APhi < 0) stop('Hypers: For "ar1" correlation "APhi" must be in [0, 1]') if (Hypers$Phi$APhi > 1) stop('Hypers: For "ar1" correlation "APhi" must be in [0, 1]') if (Hypers$Phi$BPhi < 0) stop('Hypers: For "ar1" correlation "BPhi" must be in [0, 1]') if (Hypers$Phi$BPhi > 1) stop('Hypers: For "ar1" correlation "BPhi" must be in [0, 1]') if (Hypers$Phi$BPhi < Hypers$Phi$APhi) stop('Hypers: "BPhi" must be greater than "APhi"') } } ###End Hyperparameters } ###Starting Values if (!is.null(Starting)) { if (!is.list(Starting)) stop('Starting must be a list') if (!all(names(Starting) %in% c("Delta", "T", "Phi"))) stop('Starting: Can only contain objects with names "Delta", "T" and "Phi"') ###If delta starting values is provided if ("Delta" %in% names(Starting)) { if (!is.numeric(Starting$Delta)) stop('Starting: "Delta" must be a vector') if (length(Starting$Delta) != 3) stop('Starting: "Delta" must be length 3') if (!all(!is.na(Starting$Delta))) stop('Starting: "Delta" cannot have missing values') if (!all(is.finite(Starting$Delta))) stop('Starting: "Delta" cannot have infinite values') } ###If T starting values is provided if ("T" %in% names(Starting)) { if (!is.matrix(Starting$T)) stop('Starting: "T" must be a matrix') if (!dim(Starting$T)[1] == 3) stop('Starting: "T" must be 3 dimensional') if (!dim(Starting$T)[2] == 3) stop('Starting: "T" must be square') if (!all(!is.na(Starting$T))) stop('Starting: "T" cannot have missing values') if (!all(is.finite(Starting$T))) stop('Starting: "T" cannot have infinite values') if (sum( !( (Starting$T) == t(Starting$T) ) ) > 0) stop('Starting: "T" must be symmetric') if ((det(Starting$T) - 0) < 0.00001) stop('Starting: "T" is close to singular') } ###If phi starting values is provided if ("Phi" %in% names(Starting)) { if (!is.scalar(Starting$Phi)) stop('Starting: "Phi" must be a scalar') if (is.na(Starting$Phi)) stop('Starting: "Phi" cannot be NA') if (!is.finite(Starting$Phi)) stop('Starting: "Phi" cannot be infinite') # I make sure that Phi is in [APhi, BPhi] in CreateHyPara(); } ###End Starting Values } ###Tuning Values if (!is.null(Tuning)) { if (!is.list(Tuning)) stop('Tuning must be a list') if (!all(names(Tuning) %in% c("Theta2", "Theta3", "Phi"))) stop('Tuning: Can only contain objects with names "Theta2", "Theta3" and "Phi"') ###If theta2 tuning values are provided if ("Theta2" %in% names(Tuning)) { if (!is.numeric(Tuning$Theta2)) stop('Tuning: "Theta2" must be a vector') if (length(Tuning$Theta2) != Nu) stop(paste0('Tuning: "Theta2" must have length ', Nu)) if (!all(!is.na(Tuning$Theta2))) stop('Tuning: "Theta2" cannot have missing values') if (!all(is.finite(Tuning$Theta2))) stop('Tuning: "Theta2" cannot have infinite values') if (any(Tuning$Theta2 < 0)) stop('Tuning: "Theta2" must have non-negative components') } ###If theta3 tuning values are provided if ("Theta3" %in% names(Tuning)) { if (!"Theta3" %in% names(Tuning)) stop('Tuning: "Theta3" value missing') if (!is.numeric(Tuning$Theta3)) stop('Tuning: "Theta3" must be a vector') if (length(Tuning$Theta3) != Nu) stop(paste0('Tuning: "Theta3" must have length ', Nu)) if (!all(!is.na(Tuning$Theta3))) stop('Tuning: "Theta3" cannot have missing values') if (!all(is.finite(Tuning$Theta3))) stop('Tuning: "Theta3" cannot have infinite values') if (any(Tuning$Theta3 < 0)) stop('Tuning: "Theta3" must have non-negative components') } ###If phi tuning value is provided if ("Phi" %in% names(Tuning)) { if (!is.scalar(Tuning$Phi)) stop('Tuning: "Phi" must be a scalar') if (is.na(Tuning$Phi)) stop('Tuning: "Phi" cannot be NA') if (!is.finite(Tuning$Phi)) stop('Tuning: "Phi" cannot be infinite') if (Tuning$Phi < 0) stop('Tuning: "Phi" must be non-negative') } ###End Tuning Values } ###MCMC Values if (!is.null(MCMC)) { if (!is.list(MCMC)) stop('MCMC must be a list') if (!all(names(MCMC) %in% c("NBurn", "NSims", "NThin", "NPilot"))) stop('MCMC: Can only contain objects with names "NBurn", "NSims", "NThin" and "NPilot"') ###If NBurn is provided if ("NBurn" %in% names(MCMC)) { if (!is.scalar(MCMC$NBurn)) stop('MCMC: "NBurn" must be a scalar') if (is.na(MCMC$NBurn)) stop('MCMC: "NBurn" cannot be NA') if (!is.finite(MCMC$NBurn)) stop('MCMC: "NBurn" cannot be infinite') if (!is.wholenumber(MCMC$NBurn) | MCMC$NBurn < 0) stop('MCMC: "NBurn" must be a non-negative integer') if (MCMC$NBurn < 100) stop('MCMC: "NBurn" must be at least 100') } ###If NSims is provided if ("NSims" %in% names(MCMC)) { if (!is.scalar(MCMC$NSims)) stop('MCMC: "NSims" must be a scalar') if (is.na(MCMC$NSims)) stop('MCMC: "NSims" cannot be NA') if (!is.finite(MCMC$NSims)) stop('MCMC: "NSims" cannot be infinite') if (!is.wholenumber(MCMC$NSims) | MCMC$NSims <= 0) stop('MCMC: "NSims" must be a positive integer') if (MCMC$NSims < 100) stop('MCMC: "NSims" must be at least 100') } ###If NThin is provided if ("NThin" %in% names(MCMC)) { if (!is.scalar(MCMC$NThin)) stop('MCMC: "NThin" must be a scalar') if (is.na(MCMC$NThin)) stop('MCMC: "NThin" cannot be NA') if (!is.finite(MCMC$NThin)) stop('MCMC: "NThin" cannot be infinite') if (!is.wholenumber(MCMC$NThin) | MCMC$NThin <= 0) stop('MCMC: "NThin" must be a positive integer') # if (!is.wholenumber(MCMC$NSims / MCMC$NThin)) stop('MCMC: "NThin" must be a factor of "NSims"') enforced in CreateMCMC(); } ###If NPilot is provided if ("NPilot" %in% names(MCMC)) { if (!is.scalar(MCMC$NPilot)) stop('MCMC: "NPilot" must be a scalar') if (is.na(MCMC$NPilot)) stop('MCMC: "NPilot" cannot be NA') if (!is.finite(MCMC$NPilot)) stop('MCMC: "NPilot" cannot be infinite') if (!is.wholenumber(MCMC$NPilot) | MCMC$NPilot < 0) stop('MCMC: "NPilot" must be a positive integer') # if (!is.wholenumber(MCMC$NBurn / MCMC$NPilot)) stop('MCMC: "NPilot" must be a factor of "NBurn"') enforced in CreateMCMC(); } ###End MCMC Values } } ###Helper Functions is.scalar <- function(x) ((is.numeric(x)) & (length(x) == 1)) is.wholenumber <- function(x, tol = .Machine$double.eps^0.5) abs(x - round(x)) < tol
/scratch/gouwar.j/cran-all/cranData/womblR/R/MCMC_CheckInputs.R
###Function for reading in sampler inputs and creating a list object that contains all relavent data objects-------------------- CreateDatObj <- function(Y, DM, W, Time, Rho, ScaleY, ScaleDM, TemporalStructure, Family, Distance, Weights) { ###Data objects YObserved <- Y / ScaleY #scale observed data N <- length(YObserved) #total observations M <- dim(W)[1] #number of spatial locations Nu <- N / M #number of visits NTheta <- 3 * Nu #number of theta parameters YObserved <- matrix(YObserved, ncol = 1) DM <- matrix(DM, ncol = 1) ###Temporal distance matrix TimeDist <- abs( outer(Time, Time, "-")) ###Dynamic data object (updated with data augmentation) YStarWide <- matrix(YObserved, nrow = M, ncol = Nu) ###Matrix Objects OneM <- matrix(rep(1, M), nrow = M , ncol = 1) OneNu <- matrix(rep(1, Nu), nrow = Nu, ncol = 1) EyeM <- diag(M) EyeNu <- diag(Nu) EyeNTheta <- diag(3 * Nu) Eye3 <- diag(3) ZDelta <- kronecker(OneNu, Eye3) ###Create Z vector if (Distance == "euclidean") Dist <- function(x, y) abs(x - y) if (Distance == "circumference") Dist <- function(x, y) pmin(abs(x - y), (360 - pmax(x, y) + pmin(x, y))) #circumference of optic nerve DM_Grid <- expand.grid(DM, DM) Z_Vector <- Dist(DM_Grid[ , 1], DM_Grid[ , 2]) Z_Matrix <- matrix(Z_Vector, nrow = dim(W)[1], ncol = dim(W)[1], byrow = TRUE) AdjacentEdgesBoolean <- (W == 1) & (!lower.tri(W)) Z <- matrix(Z_Matrix[AdjacentEdgesBoolean] / ScaleDM, ncol = 1) AdjacentEdgesBoolean <- matrix(which(AdjacentEdgesBoolean) - 1, ncol = 1) ###Assign temporal correlation structure if (TemporalStructure == "exponential") TempCorInd <- 0 if (TemporalStructure == "ar1") TempCorInd <- 1 ###Family indicator if (Family == "normal") FamilyInd <- 0 if (Family == "probit") FamilyInd <- 1 if (Family == "tobit") FamilyInd <- 2 ###Weights indicator if (Weights == "continuous") WeightsInd <- 0 if (Weights == "binary") WeightsInd <- 1 ###Make parameters global DatObj <- list() DatObj$YObserved <- YObserved DatObj$ScaleY <- ScaleY DatObj$ScaleDM <- ScaleDM DatObj$YStarWide <- YStarWide DatObj$DM <- DM DatObj$W <- W DatObj$TimeDist <- TimeDist DatObj$Rho <- Rho DatObj$N <- N DatObj$M <- M DatObj$Nu <- Nu DatObj$NTheta <- NTheta DatObj$OneM <- OneM DatObj$OneNu <- OneNu DatObj$EyeM <- EyeM DatObj$EyeNu <- EyeNu DatObj$EyeNTheta <- EyeNTheta DatObj$Eye3 <- Eye3 DatObj$ZDelta <- ZDelta DatObj$Z <- Z DatObj$AdjacentEdgesBoolean <- AdjacentEdgesBoolean DatObj$TempCorInd <- TempCorInd DatObj$FamilyInd <- FamilyInd DatObj$WeightsInd <- WeightsInd DatObj$Time <- Time return(DatObj) } ###Function to create Hyperparameter Object------------------------------------------------------------------------------------ CreateHyPara <- function(Hypers, DatObj) { ###Set data objects TimeDist <- DatObj$TimeDist TempCorInd <- DatObj$TempCorInd ###Which parameters are user defined? UserHypers <- names(Hypers) ###Set hyperparameters for Delta if ("Delta" %in% UserHypers) { MuDelta <- matrix(Hypers$Delta$MuDelta, nrow = 3) OmegaDeltaInv <- solve(Hypers$Delta$OmegaDelta) OmegaDeltaInvMuDelta <- OmegaDeltaInv %*% MuDelta } if (!"Delta" %in% UserHypers) { MuDelta <- matrix(c(3, 0, 0), nrow = 3) OmegaDeltaInv <- diag(c(0.001, 0.001, 1)) OmegaDeltaInvMuDelta <- OmegaDeltaInv %*% MuDelta } ###Set hyperparameters for T if ("T" %in% UserHypers) { Xi <- Hypers$T$Xi Psi <- Hypers$T$Psi } if (!"T" %in% UserHypers) { Xi <- 3 + 1 Psi <- diag(3) } ###Set hyperparameters for Phi if ("Phi" %in% UserHypers) { APhi <- Hypers$Phi$APhi BPhi <- Hypers$Phi$BPhi } if (!"Phi" %in% UserHypers) { minDiff <- min( TimeDist[ TimeDist > 0 ] ) maxDiff <- max( TimeDist[ TimeDist > 0 ] ) if (TempCorInd == 0) { # exponential BPhi <- -log(0.01) / minDiff #shortest diff goes down to 1% APhi <- -log(0.95) / maxDiff #longest diff goes up to 95% } if (TempCorInd == 1) { # ar1 APhi <- 0.01 ^ (1 / minDiff) #shortest diff goes down to 1% BPhi <- 0.95 ^ (1 / maxDiff) #longest diff goes up to 95% } } ###Create object for hyperparameters HyPara <- list() HyPara$OmegaDeltaInvMuDelta <- OmegaDeltaInvMuDelta HyPara$OmegaDeltaInv <- OmegaDeltaInv HyPara$Xi <- Xi HyPara$Psi <- Psi HyPara$APhi <- APhi HyPara$BPhi <- BPhi return(HyPara) } ###Function for creating an object containing relevant Metropolis information--------------------------------------------------- CreateMetrObj <- function(Tuning, DatObj) { ###Set Data Objects Nu <- DatObj$Nu ###Which parameters are user defined? UserTuners <- names(Tuning) ###Set tuning parameters for Theta2 if ("Theta2" %in% UserTuners) MetropTheta2 <- Tuning$Theta2 if (!"Theta2" %in% UserTuners) MetropTheta2 <- rep(1, Nu) ###Set tuning parameters for Theta3 if ("Theta3" %in% UserTuners) MetropTheta3 <- Tuning$Theta3 if (!"Theta3" %in% UserTuners) MetropTheta3 <- rep(1, Nu) ###Set tuning parameter for Phi if ("Phi" %in% UserTuners) MetropPhi <- Tuning$Phi if (!"Phi" %in% UserTuners) MetropPhi <- 1 ###Set acceptance rate counters AcceptanceTheta2 <- AcceptanceTheta3 <- rep(0, Nu) AcceptancePhi <- 0 ###Return metropolis object MetrObj <- list() MetrObj$MetropTheta2 <- MetropTheta2 MetrObj$AcceptanceTheta2 <- AcceptanceTheta2 MetrObj$MetropTheta3 <- MetropTheta3 MetrObj$AcceptanceTheta3 <- AcceptanceTheta3 MetrObj$MetropPhi <- MetropPhi MetrObj$AcceptancePhi <- AcceptancePhi return(MetrObj) } ###Function for creating inital parameter object------------------------------------------------------------------------------- CreatePara <- function(Starting, DatObj, HyPara) { ###Set data objects W <- DatObj$W Rho <- DatObj$Rho TimeDist <- DatObj$TimeDist Nu <- DatObj$Nu M <- DatObj$M ZDelta <- DatObj$ZDelta Z <- DatObj$Z AdjacentEdgesBoolean <- DatObj$AdjacentEdgesBoolean EyeM <- DatObj$EyeM TempCorInd <- DatObj$TempCorInd EyeNTheta <- DatObj$EyeNTheta OneNu <- DatObj$OneNu WeightsInd <- DatObj$WeightsInd ###Set hyperparameter objects APhi <- HyPara$APhi BPhi <- HyPara$BPhi ###Which parameters are user defined? UserStarters <- names(Starting) ###Set initial value of Delta if ("Delta" %in% UserStarters) Delta <- matrix(Starting$Delta, nrow = 3) if (!"Delta" %in% UserStarters) Delta <- matrix(c(3, 0, 0), nrow = 3, ncol = 1) ###Set intial value of T if ("T" %in% UserStarters) T <- Starting$T if (!"T" %in% UserStarters) T <- diag(3) ###Set initial values of Phi if ("Phi" %in% UserStarters) { Phi <- Starting$Phi if ((Phi <= APhi) | (Phi >= BPhi)) stop('Starting: "Phi" must be in the interval (APhi, BPhi)') } if (!"Phi" %in% UserStarters) Phi <- mean(c(APhi, BPhi)) ###Set inital value of Theta (both matrix and vector form) VecTheta <- ZDelta %*% Delta Theta <- matrix(VecTheta, nrow = 3, ncol = Nu) ###Transform to level 1 parameters Mu <- Theta[1 , ] Tau2 <- exp( Theta[2 , ] ) ^ 2 Alpha <- exp( Theta[ 3 , ] ) ###Create covariance arrays that can be converted to arma::cubes WAlphas <- WAlphaCube(Alpha, Z, W, M, Nu, WeightsInd) JointCovariances <- JointCovarianceCube(WAlphas, Tau2, EyeM, Rho, M, Nu) RootiLikelihoods <- RootiLikelihoodCube(JointCovariances, EyeM, M, Nu) ###Prior covariance objects SIGMAPhi <- SIGMA(Phi, TempCorInd, TimeDist, Nu) SIGMAPhiInv <- CholInv(SIGMAPhi) TInv <- Inv3(T) CovTheta <- kronecker(SIGMAPhi, T) CovThetaInv <- kronecker(SIGMAPhiInv, TInv) RootiTheta <- GetRooti(CovTheta, EyeNTheta) MMat <- Delta %*% t(OneNu) MeanTheta <- ZDelta %*% Delta ###Save parameter objects Para <- list() Para$Mu <- Mu Para$Tau2 <- Tau2 Para$Alpha <- Alpha Para$WAlphas <- WAlphas Para$JointCovariances <- JointCovariances Para$RootiLikelihoods <- RootiLikelihoods Para$VecTheta <- VecTheta Para$Theta <- Theta Para$Delta <- Delta Para$MeanTheta <- MeanTheta Para$T <- T Para$TInv <- TInv Para$Phi <- Phi Para$SIGMAPhi <- SIGMAPhi Para$SIGMAPhiInv <- SIGMAPhiInv Para$CovThetaInv <- CovThetaInv Para$RootiTheta <- RootiTheta Para$MMat <- MMat return(Para) } ###Function that creates the data augmentation (i.e. Tobit) booleans------------------------------------------------------------ CreateDatAug <- function(DatObj) { ###Set data object YObserved <- DatObj$YObserved FamilyInd <- DatObj$FamilyInd YStarWide <- DatObj$YStarWide M <- DatObj$M Nu <- DatObj$Nu ###Initialize Data Augmentation Object DatAug <- NULL ###Normal objects if (FamilyInd == 0) { DatAug$NBelow <- 0 DatAug$NAbove <- 0 DatAug$TobitIndeces <- matrix(c(0,0,0,0),2,2) DatAug$ProbitIndeces <- matrix(c(0,0,0,0),2,2) } ###Probit objects if (FamilyInd == 1) { TobitBoolean <- YObserved <= 0 WhichBelow <- which(TobitBoolean) NBelow <- length(WhichBelow) TobitBooleanMat <- matrix(TobitBoolean, nrow = M, ncol = Nu) YStarBelow <- list() for (i in 1 : Nu) YStarBelow[[i]] <- YStarWide[!TobitBooleanMat[,i], i] NBelowList <- unlist(lapply(YStarBelow, f<-function(x) M - length(x))) TobitIndeces <- which(TobitBooleanMat, arr.ind = TRUE) TobitIndeces <- TobitIndeces - 1 ProbitBoolean <- YObserved > 0 WhichAbove <- which(ProbitBoolean) NAbove <- length(WhichAbove) ProbitBooleanMat <- matrix(ProbitBoolean, nrow = M, ncol = Nu) YStarAbove <- list() for (i in 1 : Nu) YStarAbove[[i]] <- YStarWide[!ProbitBooleanMat[,i], i] NAboveList <- unlist(lapply(YStarAbove, f<-function(x) M - length(x))) ProbitIndeces <- which(ProbitBooleanMat, arr.ind = TRUE) ProbitIndeces <- ProbitIndeces - 1 ###Save objects DatAug <- list() DatAug$NBelow <- NBelow DatAug$NAbove <- NAbove DatAug$TobitIndeces <- TobitIndeces DatAug$ProbitIndeces <- ProbitIndeces } ###Tobit objects if (FamilyInd == 2) { TobitBoolean <- YObserved <= 0 WhichBelow <- which(TobitBoolean) NBelow <- length(WhichBelow) TobitBooleanMat <- matrix(TobitBoolean, nrow = M, ncol = Nu) YStarNonZero <- list() for (i in 1 : Nu) YStarNonZero[[i]] <- YStarWide[!TobitBooleanMat[,i], i] NBelowCount <- unlist(lapply(YStarNonZero, f<-function(x) M - length(x))) TobitIndeces <- which(TobitBooleanMat, arr.ind = TRUE) TobitIndeces <- TobitIndeces - 1 # ZDatAug <- model.matrix(~-1 + as.factor(TobitIndeces[,2])) # attributes(ZDatAug) <- NULL # ZDatAug <- structure(ZDatAug, class = "matrix", dim = c(NBelow, Nu)) # WDatAug <- array(FALSE, dim = c(M, M, Nu)) # for (i in 1:NBelow) { # Visit <- TobitIndeces[i, 2] + 1 # Location <- TobitIndeces[i, 1] + 1 # WDatAug[ , Location, Visit] <- rep(TRUE, M) # } # WDatAug <- matrix(which(WDatAug) - 1, ncol = 1) ###Save objects DatAug <- list() DatAug$WhichBelow <- WhichBelow DatAug$NBelow <- NBelow DatAug$TobitBooleanMat <- TobitBooleanMat DatAug$YStarNonZero <- YStarNonZero DatAug$NBelowCount <- NBelowCount DatAug$TobitIndeces <- TobitIndeces DatAug$ProbitIndeces <- matrix(c(0,0,0,0),2,2) DatAug$NAbove <- 2 # DatAug$ZDatAug <- ZDatAug # DatAug$WDatAug <- WDatAug } return(DatAug) } ###Function that creates inputs for MCMC sampler-------------------------------------------------------------------------------- CreateMcmc <- function(MCMC, DatObj) { ###Set data objects Nu <- DatObj$Nu ###Which parameters are user defined? UserMCMC <- names(MCMC) ###Set MCMC objects if ("NBurn" %in% UserMCMC) NBurn <- MCMC$NBurn if (!"NBurn" %in% UserMCMC) NBurn <- 10000 if ("NSims" %in% UserMCMC) NSims <- MCMC$NSims if (!"NSims" %in% UserMCMC) NSims <- 100000 if ("NThin" %in% UserMCMC) NThin <- MCMC$NThin if (!"NThin" %in% UserMCMC) NThin <- 10 if ("NPilot" %in% UserMCMC) NPilot <- MCMC$NPilot if (!"NPilot" %in% UserMCMC) NPilot <- 20 ###One last check of MCMC user inputs is.wholenumber <- function(x, tol = .Machine$double.eps^0.5) abs(x - round(x)) < tol if (!is.wholenumber(NSims / NThin)) stop('MCMC: "NThin" must be a factor of "NSims"') if (!is.wholenumber(NBurn / NPilot)) stop('MCMC: "NPilot" must be a factor of "NBurn"') ###Create MCMC objects NTotal <- NBurn + NSims WhichKeep <- NBurn + ( 1 : (NSims / NThin) ) * NThin NKeep <- length(WhichKeep) ###Pilot adaptation objects WhichPilotAdapt <- ( 1 : NPilot ) * NBurn / NPilot PilotAdaptDenominator <- WhichPilotAdapt[1] ###Burn-in progres bar BarLength <- 50 #Burn-in bar length (arbitrary) BurnInProgress <- seq(1 / BarLength, 1, 1 / BarLength) WhichBurnInProgress <- sapply(BurnInProgress, function(x) tail(which(1 : NBurn <= x * NBurn), 1)) ###Progress output objects SamplerProgress <- seq(0.1, 1.0, 0.1) #Intervals of progress update (arbitrary) WhichSamplerProgress <- sapply(SamplerProgress, function(x) tail(which(1 : NSims <= x * NSims), 1)) + NBurn WhichBurnInProgressInt <- sapply(SamplerProgress, function(x) tail(which(1 : NBurn <= x * NBurn), 1)) ###Save objects MCMC <- list() MCMC$NBurn <- NBurn MCMC$NSims <- NSims MCMC$NThin <- NThin MCMC$NPilot <- NPilot MCMC$NTotal <- NTotal MCMC$WhichKeep <- WhichKeep MCMC$NKeep <- NKeep MCMC$WhichPilotAdapt <- WhichPilotAdapt MCMC$PilotAdaptDenominator <- PilotAdaptDenominator MCMC$BurnInProgress <- BurnInProgress MCMC$WhichBurnInProgress <- WhichBurnInProgress MCMC$WhichBurnInProgressInt <- WhichBurnInProgressInt MCMC$BarLength <- BarLength MCMC$WhichSamplerProgress <- WhichSamplerProgress return(MCMC) } ###Function that creates a storage object for raw samples----------------------------------------------------------------------- CreateStorage <- function(DatObj, McmcObj) { ###Set data objects Nu <- DatObj$Nu ###Set MCMC objects NKeep <- McmcObj$NKeep ###Create storage object Out <- matrix(nrow = (3 * Nu + 3 + 6 + 1), ncol = NKeep) return(Out) }
/scratch/gouwar.j/cran-all/cranData/womblR/R/MCMC_Create.R
#' MCMC sampler for spatiotemporal boundary detection with dissimilarity metric. #' #' \code{STBDwDM} is a Markov chain Monte Carlo (MCMC) sampler for a spatiotemporal #' boundary detection model using the Bayesian hierarchical framework. #' #' @param Y An \code{N} dimensional vector containing the observed outcome data. #' Here, \code{N = M * Nu}, where \code{M} represents the number of spatial locations #' and \code{Nu} the number of temporal visits. The observations in \code{Y} must be first #' ordered spatially and then temporally, meaning the first \code{M} observations #' in \code{Y} should come from the initial time point. #' #' @param DM An \code{M} dimensional vector containing a dissimilarity metric #' for each spatial location. The order of the spatial locations must match the order from #' \code{Y}. #' #' @param W An \code{M x M} dimensional binary adjacency matrix for dictating the #' spatial neigborhood structure. #' #' @param Time A \code{Nu} dimensional vector containing the observed time points for each #' vector of outcomes in increasing order. #' #' @param Starting Either \code{NULL} or a \code{list} containing starting values #' to be specified for the MCMC sampler. If \code{NULL} is not chosen then none, some or all #' of the starting values may be specified. #' #' When \code{NULL} is chosen then default starting values are automatically generated. #' Otherwise a \code{list} must be provided with names \code{Delta}, \code{T} or #' \code{Phi} containing appropriate objects. \code{Delta} must be a \code{3} dimensional #' vector, \code{T} a \code{3 x 3} dimensional matrix and \code{Phi} a scalar. #' #' @param Hypers Either \code{NULL} or a \code{list} containing hyperparameter values #' to be specified for the MCMC sampler. If \code{NULL} is not chosen then none, some or all #' of the hyperparameter values may be specified. #' #' When \code{NULL} is chosen then default hyperparameter values are automatically #' generated. These default hyperparameters are described in detail in (Berchuck et al.). #' Otherwise a \code{list} must be provided with names \code{Delta}, \code{T} or #' \code{Phi} containing further hyperparameter information. These objects are themselves #' \code{lists} and may be constructed as follows. #' #' \code{Delta} is a \code{list} with two objects, \code{MuDelta} and \code{OmegaDelta}. #' \code{MuDelta} represents the mean component of the multivariate normal hyperprior and #' must be a \code{3} dimensional vector, while \code{OmegaDelta} represents the covariance #' and must be a \code{3 x 3} dimensional matrix. #' #' \code{T} is a \code{list} with two objects, \code{Xi} and \code{Psi}. \code{Xi} #' represents the degrees of freedom parameter for the inverse-Wishart hyperprior and #' must be a real number scalar, while \code{Psi} represents the scale matrix #' and must be a \code{3 x 3} dimensional positive definite matrix. #' #' \code{Phi} is a \code{list} with two objects, \code{APhi} and \code{BPhi}. \code{APhi} #' represents the lower bound for the uniform hyperprior, while \code{BPhi} represents #' the upper bound. The bounds must be specified carefully. For example, if the exponential #' temporal correlation structure is chosen both bounds must be restricted to be non-negative. #' #' @param Tuning Either \code{NULL} or a \code{list} containing tuning values #' to be specified for the MCMC Metropolis steps. If \code{NULL} is not chosen then all #' of the tuning values must be specified. #' #' When \code{NULL} is chosen then default tuning values are automatically generated to #' \code{1}. Otherwise a \code{list} must be provided with names \code{Theta2}, #' \code{Theta3} and \code{Phi}. \code{Theta2} and \code{Theta3} must be #' \code{Nu} dimensional vectors and \code{Phi} a scalar. Each containing tuning variances #' for their corresponding Metropolis updates. #' #' @param MCMC Either \code{NULL} or a \code{list} containing input values to be used #' for implementing the MCMC sampler. If \code{NULL} is not chosen then all #' of the MCMC input values must be specified. #' #' \code{NBurn}: The number of sampler scans included in the burn-in phase. (default = #' \code{10,000}) #' #' \code{NSims}: The number of post-burn-in scans for which to perform the #' sampler. (default = \code{100,000}) #' #' \code{NThin}: Value such that during the post-burn-in phase, only every #' \code{NThin}-th scan is recorded for use in posterior inference (For return values #' we define, NKeep = NSims / NThin (default = \code{10}). #' #' \code{NPilot}: The number of times during the burn-in phase that pilot adaptation #' is performed (default = \code{20}) #' #' @param Family Character string indicating the distribution of the observed data. Options #' include: \code{"normal"}, \code{"probit"}, \code{"tobit"}. #' #' @param TemporalStructure Character string indicating the temporal structure of the #' time observations. Options include: \code{"exponential"} and \code{"ar1"}. #' #' @param Distance Character string indicating the distance metric for computing the #' dissimilarity metric. Options include: \code{"euclidean"} and \code{"circumference"}. #' #' @param Weights Character string indicating the type of weight used. Options include: #' \code{"continuous"} and \code{"binary"}. #' #' @param Rho A scalar in \code{(0,1)} that dictates the magnitude of local spatial sharing. #' By default it is fixed at \code{0.99} as suggested by Lee and Mitchell (2012). #' #' @param ScaleY A positive scalar used for scaling the observed data, \code{Y}. This is #' used to aid numerically for MCMC convergence, as scaling large observations often #' stabilizes chains. By default it is fixed at \code{10}. #' #' @param ScaleDM A positive scalar used for scaling the dissimilarity metric distances, #' \code{DM}. This is used to aid numerically for MCMC convergence. as scaling spatial #' distances is often used for improved MCMC convergence. By default it is fixed at \code{100}. #' #' @param Seed An integer value used to set the seed for the random number generator #' (default = 54). #' #' @details Details of the underlying statistical model can be found in the article by #' Berchuck et al. (2018), "Diagnosing Glaucoma Progression with Visual Field Data Using #' a Spatiotemporal Boundary Detection Method", <arXiv:1805.11636>. #' #' @return \code{STBDwDM} returns a list containing the following objects #' #' \describe{ #' #' \item{\code{mu}}{\code{NKeep x Nu} \code{matrix} of posterior samples for \code{mu}. The #' t-th column contains posterior samples from the the t-th time point.} #' #' \item{\code{tau2}}{\code{NKeep x Nu} \code{matrix} of posterior samples for \code{tau2}. #' The t-th column contains posterior samples from the the t-th time point.} #' #' \item{\code{alpha}}{\code{NKeep x Nu} \code{matrix} of posterior samples for \code{alpha}. #' The t-th column contains posterior samples from the the t-th time point.} #' #' \item{\code{delta}}{\code{NKeep x 3} \code{matrix} of posterior samples for \code{delta}. #' The columns have names that describe the samples within them.} #' #' \item{\code{T}}{\code{NKeep x 6} \code{matrix} of posterior samples for \code{T}. The #' columns have names that describe the samples within them. The row is listed first, e.g., #' \code{t32} refers to the entry in row \code{3}, column \code{2}.} #' #' \item{\code{phi}}{\code{NKeep x 1} \code{matrix} of posterior samples for \code{phi}.} #' #' \item{\code{metropolis}}{\code{(2 * Nu + 1) x 2} \code{matrix} of metropolis #' acceptance rates and tuners that result from the pilot adaptation. The first \code{Nu} #' correspond to the \code{Theta2} (i.e. \code{tau2}) parameters, the next \code{Nu} correspond to #' the \code{Theta3} (i.e. \code{alpha}) parameters and the last row give the \code{phi} values.} #' #' \item{\code{runtime}}{A \code{character} string giving the runtime of the MCMC sampler.} #' #' \item{\code{datobj}}{A \code{list} of data objects that are used in future \code{STBDwDM} functions #' and should be ignored by the user.} #' #' \item{\code{dataug}}{A \code{list} of data augmentation objects that are used in future #' \code{STBDwDM} functions and should be ignored by the user.} #' #' } #' #' @author Samuel I. Berchuck #' @references Berchuck et al. (2018), "Diagnosing Glaucoma Progression with Visual Field Data Using #' a Spatiotemporal Boundary Detection Method", <arXiv:1805.11636>. #' @export STBDwDM <- function(Y, DM, W, Time, Starting = NULL, Hypers = NULL, Tuning = NULL, MCMC = NULL, Family = "tobit", TemporalStructure = "exponential", Distance = "circumference", Weights = "continuous", Rho = 0.99, ScaleY = 10, ScaleDM = 100, Seed = 54) { ###Function inputs # Y = Y # DM = DM # W = W # Time = Time # Starting = Starting # Hypers = Hypers # Tuning = Tuning # MCMC = MCMC # Family = "tobit" # TemporalStructure = "exponential" # Distance = "circumference" # Weights = "continuous" # Rho = 0.99 # ScaleY = 10 # ScaleDM = 100 # Seed = 54 ###Check for missing objects if (missing(Y)) stop("Y: missing") if (missing(DM)) stop("DM: missing") if (missing(W)) stop("W: missing") if (missing(Time)) stop("Time: missing") ###Check model inputs CheckInputs(Y, DM, W, Time, Starting, Hypers, Tuning, MCMC, Family, TemporalStructure, Distance, Weights, Rho, ScaleY, ScaleDM) ####Set seed for reproducibility set.seed(Seed) ###Check to see if the job is interactive Interactive <- interactive() ###Create objects for use in sampler DatObj <- CreateDatObj(Y, DM, W, Time, Rho, ScaleY, ScaleDM, TemporalStructure, Family, Distance, Weights) HyPara <- CreateHyPara(Hypers, DatObj) MetrObj <- CreateMetrObj(Tuning, DatObj) Para <- CreatePara(Starting, DatObj, HyPara) DatAug <- CreateDatAug(DatObj) McmcObj <- CreateMcmc(MCMC, DatObj) RawSamples <- CreateStorage(DatObj, McmcObj) ###Time MCMC sampler BeginTime <- Sys.time() ###Run MCMC sampler in Rcpp RegObj <- STBDwDM_Rcpp(DatObj, HyPara, MetrObj, Para, DatAug, McmcObj, RawSamples, Interactive) ###Set regression objects RawSamples <- RegObj$rawsamples MetrObj <- RegObj$metropolis ###End time FinishTime <- Sys.time() RunTime <- FinishTime - BeginTime ###Collect output to be returned DatObjOut <- OutputDatObj(DatObj) DatAugOut <- OutputDatAug(DatAug) Metropolis <- SummarizeMetropolis(DatObj, MetrObj, McmcObj) Samples <- FormatSamples(DatObj, RawSamples) ###Return spBDwDM object STBDwDM <- list(mu = Samples$Mu, tau2 = Samples$Tau2, alpha = Samples$Alpha, delta = Samples$Delta, T = Samples$T, phi = Samples$Phi, metropolis = Metropolis, datobj = DatObjOut, dataug = DatAugOut, runtime = paste0("Model runtime: ",round(RunTime, 2), " ",attr(RunTime, "units"))) STBDwDM <- structure(STBDwDM, class = "STBDwDM") return(STBDwDM) ###End STBDwDM function }
/scratch/gouwar.j/cran-all/cranData/womblR/R/MCMC_STBDwDM.R
###Function for summarizing the raw MCMC samples------------------------------------------------------------------- FormatSamples <- function(DatObj, RawSamples) { ###Set data objects Nu <- DatObj$Nu ###Format raw samples Mu <- t(RawSamples[1 : Nu, ]) Tau2 <- t(RawSamples[(1 : Nu) + Nu, ]) Alpha <- t(RawSamples[(1 : Nu) + 2 * Nu, ]) Delta <- t(RawSamples[(3 * Nu + 1) : (3 * Nu + 3), ]) T <- t(RawSamples[(3 * Nu + 4) : (3 * Nu + 9), ]) Phi <- t(RawSamples[(3 * Nu + 10), ,drop = FALSE]) colnames(Mu) <- paste0("mu", 1 : Nu) colnames(Tau2) <- paste0("tau2", 1 : Nu) colnames(Alpha) <- paste0("alpha", 1 : Nu) colnames(Delta) <- paste0("delta", 1 : 3) colnames(T) <- c("t11", "t21", "t22", "t31", "t32", "t33") colnames(Phi) <- "phi" Out <- list(Mu = Mu, Tau2 = Tau2, Alpha = Alpha, Delta = Delta, T = T, Phi = Phi) return(Out) } ###Function for creating a data object that contains objects needed for ModelFit----------------------------------- OutputDatObj <- function(DatObj) { ###Collect needed objects DatObjOut <- list(M = DatObj$M, Nu = DatObj$Nu, Z = DatObj$Z, AdjacentEdgesBoolean = DatObj$AdjacentEdgesBoolean, W = DatObj$W, EyeM = DatObj$EyeM, OneM = DatObj$OneM, OneNu = DatObj$OneNu, YStarWide = DatObj$YStarWide, Rho = DatObj$Rho, FamilyInd = DatObj$FamilyInd, ScaleY = DatObj$ScaleY, YObserved = DatObj$YObserved, ScaleDM = DatObj$ScaleDM, TempCorInd = DatObj$TempCorInd, WeightsInd = DatObj$WeightsInd, Time = DatObj$Time) return(DatObjOut) } ###Function for creating a data augmentation object that contains objects needed for ModelFit---------------------- OutputDatAug <- function(DatAug) { ###Collect needed objects DatAugOut <- list(NBelow = DatAug$NBelow, NBelowCount = DatAug$NBelowCount, TobitIndeces = DatAug$TobitIndeces, YStarNonZero = DatAug$YStarNonZero) return(DatAugOut) } ###Function for summarizing Metropolis objects post sampler-------------------------------------------------------- SummarizeMetropolis <- function(DatObj, MetrObj, McmcObj) { ###Set data object Nu <- DatObj$Nu ###Set MCMC object NSims <- McmcObj$NSims ###Set Metropolis objects MetropTheta2 <- MetrObj$MetropTheta2 AcceptanceTheta2 <- MetrObj$AcceptanceTheta2 MetropTheta3 <- MetrObj$MetropTheta3 AcceptanceTheta3 <- MetrObj$AcceptanceTheta3 MetropPhi <- MetrObj$MetropPhi AcceptancePhi <- MetrObj$AcceptancePhi ###Summarize and output TuningParameters <- c(MetropTheta2, MetropTheta3, MetropPhi) AcceptanceCount <- c(AcceptanceTheta2, AcceptanceTheta3, AcceptancePhi) AcceptancePcts <- AcceptanceCount / NSims MetrSummary <- cbind(AcceptancePcts, TuningParameters) rownames(MetrSummary) <- c(paste0("Theta2", 1 : Nu), paste0("Theta3", 1 : Nu), "Phi") colnames(MetrSummary) <- c("acceptance", "tuner") return(MetrSummary) } ###Verify the class of our regression object------------------------------------------------------------------------ #' is.STBDwDM #' #' \code{is.STBDwDM} is a general test of an object being interpretable as a #' \code{\link{STBDwDM}} object. #' #' @param x object to be tested. #' #' @details The \code{\link{STBDwDM}} class is defined as the regression object that #' results from the \code{\link{STBDwDM}} regression function. #' #' @export is.STBDwDM <- function(x) { identical(attributes(x)$class, "STBDwDM") }
/scratch/gouwar.j/cran-all/cranData/womblR/R/MCMC_Utilities.R
###Function for plotting posterior adjacencies Wij(alpha_t) #' #' PlotAdjacency #' #' Plots a heat map of the differential light sensitivity on the Humphrey Field #' Analyzer-II visual field. #' #' @param Wij a \code{\link{PosteriorAdj}} object. #' #' @param Visit either an integer \code{(1,...,Nu)} indicating the visit number for which #' you want to get the adjacencies to plot or NA. If NA, then the plot will produce the #' dissimilarity metric at each adjacency. #' #' @param stat either "mean" or "sd" (only used for Visit != NA). #' #' @param main an overall title for the plot. #' #' @param color.scheme a vector of colors to be used to show the adjacencies changing. #' #' @param edgewidth a scalar indicating the width of the edges. #' #' @param cornerwidth a scalar indicating the width of the corners. #' #' @param lwd.border a scalar indicating width of the visual field border. #' #' @param color.bs one color specifying the blind spot. #' #' @param zlim the limits used for the legend (default are c(0,1)). #' #' @param legend logical, indicating whether the legend should be present (default = TRUE). #' #' @param DM a dissimilarity metric to be plotted at each location on the visual field (default = NULL). #' #' @param W an adjacency matrix that specifies the visual field, required if Wij is not provided (default = NULL). #' #' @details \code{PlotAdjacency} is used in the application of glaucoma progression to #' plot the posterior mean and standard deviation neighborhood adjacencies across the #' visual field. #' #' @examples #' ###Define blind spot locations on the HFA-II #' blind_spot <- c(26, 35) #' #' ###Load visual field adjacency matrix #' W <- HFAII_Queen[ -blind_spot, -blind_spot] #' #' ###Load Garway-Heath angles for dissimiliarity metric #' DM <- GarwayHeath[-blind_spot] #Uses Garway-Heath angles object "GarwayHeath" #' #' ###Adjacency plots #' PlotAdjacency(W = W, DM = DM, zlim = c(0, 180), Visit = NA, #' main = "Garway-Heath dissimilarity metric\n across the visual field") #' #' @author Samuel I. Berchuck #' #' @export PlotAdjacency <- function(Wij, Visit = 1, stat = "mean", main = "Estimated Adjacencies", color.scheme = c("Black","White"), edgewidth = 2, cornerwidth = 1 / 4, lwd.border = 3, color.bs = "gray", zlim = c(0, 1), legend = TRUE, DM = NULL, W = NULL) { ##Note: Depends on library classInt # You need the suggested package for this function if (!requireNamespace("classInt", quietly = TRUE)) { stop("classInt needed for this function to work. Please install it.", call. = FALSE) } ###Logical function to check for colors areColors <- function(x) { sapply(x, function(X) { tryCatch(is.matrix(col2rgb(X)), error = function(e) FALSE) }) } ###Check inputs if (missing(Wij)) { if (is.null(W)) stop('"W" is required when "Wij" is missing') if (!is.na(Visit)) stop('"Visit" must be NA when Wij is missing') if (!any(areColors(color.scheme))) stop('"color.scheme" can only include colors') ###Border information AdjacentEdgesBoolean <- (W == 1) & (!lower.tri(W)) Dist <- function(x, y) pmin(abs(x - y), (360 - pmax(x, y) + pmin(x, y))) #arc length of optic nerve DM_Grid <- expand.grid(DM, DM) Z_Vector <- Dist(DM_Grid[ , 1], DM_Grid[ , 2]) Z_Matrix <- matrix(Z_Vector, nrow = dim(W)[1], ncol = dim(W)[1], byrow = TRUE) Z <- matrix(Z_Matrix[AdjacentEdgesBoolean], ncol = 1) Boundary <- cbind(which(AdjacentEdgesBoolean, arr.ind = TRUE), Z) Boundary[ ,3] <- 180 - Boundary[ ,3] NAdjacency <- dim(Boundary)[1] Degree <- DM ###Set color pallete col.breaks <- seq(zlim[1],zlim[2],length.out=(length(unique(Boundary[,3]))+1)) col.br <- colorRampPalette(color.scheme) col.pal <- col.br(NAdjacency) # suppressWarnings(fixed_obs<-classIntervals(Boundary[,3],n=length(unique(Boundary[,3])))) suppressWarnings(fixed_obs<-classInt::classIntervals(Boundary[,3],style="fixed",fixedBreaks=col.breaks)) color.adj<-classInt::findColours(fixed_obs,col.pal) ###Plotting functions and Parameters lwd<-edgewidth lend<-2 format2<-function(x) format(round(x,2),nsmall=2) format0<-function(x) format(round(x,0),nsmall=0) tri<-cornerwidth point<-function(x,y,ulbr=TRUE, col=colorAdj) { if (!ulbr) { polygon(c(x,x+tri,x),c(y,y,y+tri),col=col,border=col) polygon(c(x,x-tri,x),c(y,y,y-tri),col=col,border=col) } if (ulbr) { polygon(c(x,x-tri,x),c(y,y,y+tri),col=col,border=col) polygon(c(x,x+tri,x),c(y,y,y-tri),col=col,border=col) } } ###Create edge indicator EdgesInd<-numeric(length=NAdjacency) WhichEdges<-c(1,2,3,5,7,9,11,13,15,17,18,20,22,24,26,28,30,32,34,36,38,40,42,43,45,47,49,51,53,55,57,59,61,63,65,67, 69,71,72,75,77,79,81,83,85,87,89,91,93,95,96,97,99,102,104,106,108,110,112,114,116,118,119,122,123,124,126,129,131,133,135,137,139,141,143,145,147,149,152,154,156,158,160,162) EdgesInd[WhichEdges]<-rep(1,length(WhichEdges)) WhichCorners<-which(EdgesInd==0) ###Create plot pardefault <- suppressWarnings(par(no.readonly = T)) par(mfcol = c(1, 1), pty = "m", mai = c(0, 0, 0.75, 0)) plot(1,1,xlim=c(1,13),ylim=c(1.25,8.75),type="n",xaxt="n",yaxt="n",bty="n",ylab="",xlab="",main=main, asp = 1, cex.main = 1.5) ###Plot edges for (i in WhichEdges) { Indeces<-as.numeric(Boundary[i,1:2]) colorAdj<-color.adj[i] if (identical(Indeces,c(1,2))) segments(5,8,5,9,lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(2,3))) segments(6,8,6,9,lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(3,4))) segments(7,8,7,9,lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(1,5))) point(4,8,ulbr=FALSE, colorAdj) if (identical(Indeces,c(1,6))) segments(4,8,5,8,lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(2,6))) point(5,8,ulbr=FALSE, colorAdj) if (identical(Indeces,c(5,6))) segments(4,7,4,8,lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(1,7))) point(5,8,ulbr=TRUE, colorAdj) if (identical(Indeces,c(2,7))) segments(5,8,6,8,lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(3,7))) point(6,8,ulbr=FALSE, colorAdj) if (identical(Indeces,c(6,7))) segments(5,7,5,8, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(2,8))) point(6,8,ulbr=TRUE, colorAdj) if (identical(Indeces,c(3,8))) segments(6,8,7,8, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(4,8))) point(7,8,ulbr=FALSE, colorAdj) if (identical(Indeces,c(7,8))) segments(6,7,6,8, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(3,9))) point(7,8,ulbr=TRUE, colorAdj) if (identical(Indeces,c(4,9))) segments(7,8,8,8, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(8,9))) segments(7,7,7,8, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(4,10))) point(8,8,ulbr=TRUE, colorAdj) if (identical(Indeces,c(9,10))) segments(8,7,8,8, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(5,11))) point(3,7,ulbr=FALSE, colorAdj) if (identical(Indeces,c(5,12))) segments(3,7,4,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(6,12))) point(4,7,ulbr=FALSE, colorAdj) if (identical(Indeces,c(11,12))) segments(3,6,3,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(5,13))) point(4,7,ulbr=TRUE, colorAdj) if (identical(Indeces,c(6,13))) segments(4,7,5,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(7,13))) point(5,7,ulbr=FALSE, colorAdj) if (identical(Indeces,c(12,13))) segments(4,6,4,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(6,14))) point(5,7,ulbr=TRUE, colorAdj) if (identical(Indeces,c(7,14))) segments(5,7,6,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(8,14))) point(6,7,ulbr=FALSE, colorAdj) if (identical(Indeces,c(13,14))) segments(5,6,5,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(7,15))) point(6,7,ulbr=TRUE, colorAdj) if (identical(Indeces,c(8,15))) segments(6,7,7,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(9,15))) point(7,7,ulbr=FALSE, colorAdj) if (identical(Indeces,c(14,15))) segments(6,6,6,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(8,16))) point(7,7,ulbr=TRUE, colorAdj) if (identical(Indeces,c(9,16))) segments(7,7,8,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(10,16))) point(8,7,ulbr=FALSE, colorAdj) if (identical(Indeces,c(15,16))) segments(7,6,7,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(9,17))) point(8,7,ulbr=TRUE, colorAdj) if (identical(Indeces,c(10,17))) segments(8,7,9,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(16,17))) segments(8,6,8,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(10,18))) point(9,7,ulbr=TRUE, colorAdj) if (identical(Indeces,c(17,18))) segments(9,6,9,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(11,19))) point(2,6,ulbr=FALSE, colorAdj) if (identical(Indeces,c(11,20))) segments(2,6,3,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(12,20))) point(3,6,ulbr=FALSE, colorAdj) if (identical(Indeces,c(19,20))) segments(2,5,2,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(11,21))) point(3,6,ulbr=TRUE, colorAdj) if (identical(Indeces,c(12,21))) segments(3,6,4,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(13,21))) point(4,6,ulbr=FALSE, colorAdj) if (identical(Indeces,c(20,21))) segments(3,5,3,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(12,22))) point(4,6,ulbr=TRUE, colorAdj) if (identical(Indeces,c(13,22))) segments(4,6,5,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(14,22))) point(5,6,ulbr=FALSE, colorAdj) if (identical(Indeces,c(21,22))) segments(4,5,4,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(13,23))) point(5,6,ulbr=TRUE, colorAdj) if (identical(Indeces,c(14,23))) segments(5,6,6,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(15,23))) point(6,6,ulbr=FALSE, colorAdj) if (identical(Indeces,c(22,23))) segments(5,5,5,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(14,24))) point(6,6,ulbr=TRUE, colorAdj) if (identical(Indeces,c(15,24))) segments(6,6,7,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(16,24))) point(7,6,ulbr=FALSE, colorAdj) if (identical(Indeces,c(23,24))) segments(6,5,6,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(15,25))) point(7,6,ulbr=TRUE, colorAdj) if (identical(Indeces,c(16,25))) segments(7,6,8,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(17,25))) point(8,6,ulbr=FALSE, colorAdj) if (identical(Indeces,c(24,25))) segments(7,5,7,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(17,26))) point(9,6,ulbr=TRUE, colorAdj) if (identical(Indeces,c(18,26))) segments(9,6,10,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(19,27))) segments(1,5,2,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(20,27))) point(2,5,ulbr=FALSE, colorAdj) if (identical(Indeces,c(19,28))) point(2,5,ulbr=TRUE, colorAdj) if (identical(Indeces,c(20,28))) segments(2,5,3,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(21,28))) point(3,5,ulbr=FALSE, colorAdj) if (identical(Indeces,c(27,28))) segments(2,4,2,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(20,29))) point(3,5,ulbr=TRUE, colorAdj) if (identical(Indeces,c(21,29))) segments(3,5,4,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(22,29))) point(4,5,ulbr=FALSE, colorAdj) if (identical(Indeces,c(28,29))) segments(3,4,3,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(21,30))) point(4,5,ulbr=TRUE, colorAdj) if (identical(Indeces,c(22,30))) segments(4,5,5,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(23,30))) point(5,5,ulbr=FALSE, colorAdj) if (identical(Indeces,c(29,30))) segments(4,4,4,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(22,31))) point(5,5,ulbr=TRUE, colorAdj) if (identical(Indeces,c(23,31))) segments(5,5,6,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(24,31))) point(6,5,ulbr=FALSE, colorAdj) if (identical(Indeces,c(30,31))) segments(5,4,5,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(23,32))) point(6,5,ulbr=TRUE, colorAdj) if (identical(Indeces,c(24,32))) segments(6,5,7,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(25,32))) point(7,5,ulbr=FALSE, colorAdj) if (identical(Indeces,c(31,32))) segments(6,4,6,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(24,33))) point(7,5,ulbr=TRUE, colorAdj) if (identical(Indeces,c(25,33))) segments(7,5,8,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(32,33))) segments(7,4,7,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(26,34))) segments(9,5,10,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(27,35))) point(2,4,ulbr=TRUE, colorAdj) if (identical(Indeces,c(28,35))) segments(2,4,3,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(29,35))) point(3,4,ulbr=FALSE, colorAdj) if (identical(Indeces,c(28,36))) point(3,4,ulbr=TRUE, colorAdj) if (identical(Indeces,c(29,36))) segments(3,4,4,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(30,36))) point(4,4,ulbr=FALSE, colorAdj) if (identical(Indeces,c(35,36))) segments(3,3,3,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(29,37))) point(4,4,ulbr=TRUE, colorAdj) if (identical(Indeces,c(30,37))) segments(4,4,5,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(31,37))) point(5,4,ulbr=FALSE, colorAdj) if (identical(Indeces,c(36,37))) segments(4,3,4,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(30,38))) point(5,4,ulbr=TRUE, colorAdj) if (identical(Indeces,c(31,38))) segments(5,4,6,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(32,38))) point(6,4,ulbr=FALSE, colorAdj) if (identical(Indeces,c(37,38))) segments(5,3,5,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(31,39))) point(6,4,ulbr=TRUE, colorAdj) if (identical(Indeces,c(32,39))) segments(6,4,7,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(33,39))) point(7,4,ulbr=FALSE, colorAdj) if (identical(Indeces,c(38,39))) segments(6,3,6,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(32,40))) point(7,4,ulbr=TRUE, colorAdj) if (identical(Indeces,c(33,40))) segments(7,4,8,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(39,40))) segments(7,3,7,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(33,41))) point(8,4,ulbr=TRUE, colorAdj) if (identical(Indeces,c(34,41))) point(9,4,ulbr=FALSE, colorAdj) if (identical(Indeces,c(40,41))) segments(8,3,8,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(34,42))) segments(9,4,10,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(41,42))) segments(9,3,9,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(35,43))) point(3,3,ulbr=TRUE, colorAdj) if (identical(Indeces,c(36,43))) segments(3,3,4,3, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(37,43))) point(4,3,ulbr=FALSE, colorAdj) if (identical(Indeces,c(36,44))) point(4,3,ulbr=TRUE, colorAdj) if (identical(Indeces,c(37,44))) segments(4,3,5,3, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(38,44))) point(5,3,ulbr=FALSE, colorAdj) if (identical(Indeces,c(43,44))) segments(4,2,4,3, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(37,45))) point(5,3,ulbr=TRUE, colorAdj) if (identical(Indeces,c(38,45))) segments(5,3,6,3, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(39,45))) point(6,3,ulbr=FALSE, colorAdj) if (identical(Indeces,c(44,45))) segments(5,2,5,3, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(38,46))) point(6,3,ulbr=TRUE, colorAdj) if (identical(Indeces,c(39,46))) segments(6,3,7,3, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(40,46))) point(7,3,ulbr=FALSE, colorAdj) if (identical(Indeces,c(45,46))) segments(6,2,6,3, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(39,47))) point(7,3,ulbr=TRUE, colorAdj) if (identical(Indeces,c(40,47))) segments(7,3,8,3, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(41,47))) point(8,3,ulbr=FALSE, colorAdj) if (identical(Indeces,c(46,47))) segments(7,2,7,3, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(40,48))) point(8,3,ulbr=TRUE, colorAdj) if (identical(Indeces,c(41,48))) segments(8,3,9,3, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(42,48))) point(9,3,ulbr=FALSE, colorAdj) if (identical(Indeces,c(47,48))) segments(8,2,8,3, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(43,49))) point(4,2,ulbr=TRUE, colorAdj) if (identical(Indeces,c(44,49))) segments(4,2,5,2, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(45,49))) point(5,2,ulbr=FALSE, colorAdj) if (identical(Indeces,c(44,50))) point(5,2,ulbr=TRUE, colorAdj) if (identical(Indeces,c(45,50))) segments(5,2,6,2, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(46,50))) point(6,2,ulbr=FALSE, colorAdj) if (identical(Indeces,c(49,50))) segments(5,1,5,2, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(45,51))) point(6,2,ulbr=TRUE, colorAdj) if (identical(Indeces,c(46,51))) segments(6,2,7,2, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(47,51))) point(7,2,ulbr=FALSE, colorAdj) if (identical(Indeces,c(50,51))) segments(6,1,6,2, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(46,52))) point(7,2,ulbr=TRUE, colorAdj) if (identical(Indeces,c(47,52))) segments(7,2,8,2, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(48,52))) point(8,2,ulbr=FALSE, colorAdj) if (identical(Indeces,c(51,52))) segments(7,1,7,2, lwd=lwd,lend=lend,col=colorAdj) } ###Plot corners for (i in WhichCorners) { Indeces<-as.numeric(Boundary[i,1:2]) colorAdj<-color.adj[i] if (identical(Indeces,c(1,2))) segments(5,8,5,9,lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(2,3))) segments(6,8,6,9,lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(3,4))) segments(7,8,7,9,lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(1,5))) point(4,8,ulbr=FALSE, colorAdj) if (identical(Indeces,c(1,6))) segments(4,8,5,8,lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(2,6))) point(5,8,ulbr=FALSE, colorAdj) if (identical(Indeces,c(5,6))) segments(4,7,4,8,lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(1,7))) point(5,8,ulbr=TRUE, colorAdj) if (identical(Indeces,c(2,7))) segments(5,8,6,8,lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(3,7))) point(6,8,ulbr=FALSE, colorAdj) if (identical(Indeces,c(6,7))) segments(5,7,5,8, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(2,8))) point(6,8,ulbr=TRUE, colorAdj) if (identical(Indeces,c(3,8))) segments(6,8,7,8, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(4,8))) point(7,8,ulbr=FALSE, colorAdj) if (identical(Indeces,c(7,8))) segments(6,7,6,8, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(3,9))) point(7,8,ulbr=TRUE, colorAdj) if (identical(Indeces,c(4,9))) segments(7,8,8,8, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(8,9))) segments(7,7,7,8, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(4,10))) point(8,8,ulbr=TRUE, colorAdj) if (identical(Indeces,c(9,10))) segments(8,7,8,8, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(5,11))) point(3,7,ulbr=FALSE, colorAdj) if (identical(Indeces,c(5,12))) segments(3,7,4,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(6,12))) point(4,7,ulbr=FALSE, colorAdj) if (identical(Indeces,c(11,12))) segments(3,6,3,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(5,13))) point(4,7,ulbr=TRUE, colorAdj) if (identical(Indeces,c(6,13))) segments(4,7,5,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(7,13))) point(5,7,ulbr=FALSE, colorAdj) if (identical(Indeces,c(12,13))) segments(4,6,4,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(6,14))) point(5,7,ulbr=TRUE, colorAdj) if (identical(Indeces,c(7,14))) segments(5,7,6,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(8,14))) point(6,7,ulbr=FALSE, colorAdj) if (identical(Indeces,c(13,14))) segments(5,6,5,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(7,15))) point(6,7,ulbr=TRUE, colorAdj) if (identical(Indeces,c(8,15))) segments(6,7,7,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(9,15))) point(7,7,ulbr=FALSE, colorAdj) if (identical(Indeces,c(14,15))) segments(6,6,6,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(8,16))) point(7,7,ulbr=TRUE, colorAdj) if (identical(Indeces,c(9,16))) segments(7,7,8,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(10,16))) point(8,7,ulbr=FALSE, colorAdj) if (identical(Indeces,c(15,16))) segments(7,6,7,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(9,17))) point(8,7,ulbr=TRUE, colorAdj) if (identical(Indeces,c(10,17))) segments(8,7,9,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(16,17))) segments(8,6,8,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(10,18))) point(9,7,ulbr=TRUE, colorAdj) if (identical(Indeces,c(17,18))) segments(9,6,9,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(11,19))) point(2,6,ulbr=FALSE, colorAdj) if (identical(Indeces,c(11,20))) segments(2,6,3,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(12,20))) point(3,6,ulbr=FALSE, colorAdj) if (identical(Indeces,c(19,20))) segments(2,5,2,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(11,21))) point(3,6,ulbr=TRUE, colorAdj) if (identical(Indeces,c(12,21))) segments(3,6,4,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(13,21))) point(4,6,ulbr=FALSE, colorAdj) if (identical(Indeces,c(20,21))) segments(3,5,3,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(12,22))) point(4,6,ulbr=TRUE, colorAdj) if (identical(Indeces,c(13,22))) segments(4,6,5,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(14,22))) point(5,6,ulbr=FALSE, colorAdj) if (identical(Indeces,c(21,22))) segments(4,5,4,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(13,23))) point(5,6,ulbr=TRUE, colorAdj) if (identical(Indeces,c(14,23))) segments(5,6,6,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(15,23))) point(6,6,ulbr=FALSE, colorAdj) if (identical(Indeces,c(22,23))) segments(5,5,5,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(14,24))) point(6,6,ulbr=TRUE, colorAdj) if (identical(Indeces,c(15,24))) segments(6,6,7,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(16,24))) point(7,6,ulbr=FALSE, colorAdj) if (identical(Indeces,c(23,24))) segments(6,5,6,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(15,25))) point(7,6,ulbr=TRUE, colorAdj) if (identical(Indeces,c(16,25))) segments(7,6,8,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(17,25))) point(8,6,ulbr=FALSE, colorAdj) if (identical(Indeces,c(24,25))) segments(7,5,7,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(17,26))) point(9,6,ulbr=TRUE, colorAdj) if (identical(Indeces,c(18,26))) segments(9,6,10,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(19,27))) segments(1,5,2,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(20,27))) point(2,5,ulbr=FALSE, colorAdj) if (identical(Indeces,c(19,28))) point(2,5,ulbr=TRUE, colorAdj) if (identical(Indeces,c(20,28))) segments(2,5,3,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(21,28))) point(3,5,ulbr=FALSE, colorAdj) if (identical(Indeces,c(27,28))) segments(2,4,2,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(20,29))) point(3,5,ulbr=TRUE, colorAdj) if (identical(Indeces,c(21,29))) segments(3,5,4,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(22,29))) point(4,5,ulbr=FALSE, colorAdj) if (identical(Indeces,c(28,29))) segments(3,4,3,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(21,30))) point(4,5,ulbr=TRUE, colorAdj) if (identical(Indeces,c(22,30))) segments(4,5,5,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(23,30))) point(5,5,ulbr=FALSE, colorAdj) if (identical(Indeces,c(29,30))) segments(4,4,4,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(22,31))) point(5,5,ulbr=TRUE, colorAdj) if (identical(Indeces,c(23,31))) segments(5,5,6,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(24,31))) point(6,5,ulbr=FALSE, colorAdj) if (identical(Indeces,c(30,31))) segments(5,4,5,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(23,32))) point(6,5,ulbr=TRUE, colorAdj) if (identical(Indeces,c(24,32))) segments(6,5,7,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(25,32))) point(7,5,ulbr=FALSE, colorAdj) if (identical(Indeces,c(31,32))) segments(6,4,6,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(24,33))) point(7,5,ulbr=TRUE, colorAdj) if (identical(Indeces,c(25,33))) segments(7,5,8,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(32,33))) segments(7,4,7,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(26,34))) segments(9,5,10,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(27,35))) point(2,4,ulbr=TRUE, colorAdj) if (identical(Indeces,c(28,35))) segments(2,4,3,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(29,35))) point(3,4,ulbr=FALSE, colorAdj) if (identical(Indeces,c(28,36))) point(3,4,ulbr=TRUE, colorAdj) if (identical(Indeces,c(29,36))) segments(3,4,4,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(30,36))) point(4,4,ulbr=FALSE, colorAdj) if (identical(Indeces,c(35,36))) segments(3,3,3,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(29,37))) point(4,4,ulbr=TRUE, colorAdj) if (identical(Indeces,c(30,37))) segments(4,4,5,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(31,37))) point(5,4,ulbr=FALSE, colorAdj) if (identical(Indeces,c(36,37))) segments(4,3,4,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(30,38))) point(5,4,ulbr=TRUE, colorAdj) if (identical(Indeces,c(31,38))) segments(5,4,6,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(32,38))) point(6,4,ulbr=FALSE, colorAdj) if (identical(Indeces,c(37,38))) segments(5,3,5,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(31,39))) point(6,4,ulbr=TRUE, colorAdj) if (identical(Indeces,c(32,39))) segments(6,4,7,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(33,39))) point(7,4,ulbr=FALSE, colorAdj) if (identical(Indeces,c(38,39))) segments(6,3,6,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(32,40))) point(7,4,ulbr=TRUE, colorAdj) if (identical(Indeces,c(33,40))) segments(7,4,8,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(39,40))) segments(7,3,7,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(33,41))) point(8,4,ulbr=TRUE, colorAdj) if (identical(Indeces,c(34,41))) point(9,4,ulbr=FALSE, colorAdj) if (identical(Indeces,c(40,41))) segments(8,3,8,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(34,42))) segments(9,4,10,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(41,42))) segments(9,3,9,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(35,43))) point(3,3,ulbr=TRUE, colorAdj) if (identical(Indeces,c(36,43))) segments(3,3,4,3, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(37,43))) point(4,3,ulbr=FALSE, colorAdj) if (identical(Indeces,c(36,44))) point(4,3,ulbr=TRUE, colorAdj) if (identical(Indeces,c(37,44))) segments(4,3,5,3, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(38,44))) point(5,3,ulbr=FALSE, colorAdj) if (identical(Indeces,c(43,44))) segments(4,2,4,3, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(37,45))) point(5,3,ulbr=TRUE, colorAdj) if (identical(Indeces,c(38,45))) segments(5,3,6,3, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(39,45))) point(6,3,ulbr=FALSE, colorAdj) if (identical(Indeces,c(44,45))) segments(5,2,5,3, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(38,46))) point(6,3,ulbr=TRUE, colorAdj) if (identical(Indeces,c(39,46))) segments(6,3,7,3, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(40,46))) point(7,3,ulbr=FALSE, colorAdj) if (identical(Indeces,c(45,46))) segments(6,2,6,3, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(39,47))) point(7,3,ulbr=TRUE, colorAdj) if (identical(Indeces,c(40,47))) segments(7,3,8,3, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(41,47))) point(8,3,ulbr=FALSE, colorAdj) if (identical(Indeces,c(46,47))) segments(7,2,7,3, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(40,48))) point(8,3,ulbr=TRUE, colorAdj) if (identical(Indeces,c(41,48))) segments(8,3,9,3, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(42,48))) point(9,3,ulbr=FALSE, colorAdj) if (identical(Indeces,c(47,48))) segments(8,2,8,3, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(43,49))) point(4,2,ulbr=TRUE, colorAdj) if (identical(Indeces,c(44,49))) segments(4,2,5,2, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(45,49))) point(5,2,ulbr=FALSE, colorAdj) if (identical(Indeces,c(44,50))) point(5,2,ulbr=TRUE, colorAdj) if (identical(Indeces,c(45,50))) segments(5,2,6,2, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(46,50))) point(6,2,ulbr=FALSE, colorAdj) if (identical(Indeces,c(49,50))) segments(5,1,5,2, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(45,51))) point(6,2,ulbr=TRUE, colorAdj) if (identical(Indeces,c(46,51))) segments(6,2,7,2, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(47,51))) point(7,2,ulbr=FALSE, colorAdj) if (identical(Indeces,c(50,51))) segments(6,1,6,2, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(46,52))) point(7,2,ulbr=TRUE, colorAdj) if (identical(Indeces,c(47,52))) segments(7,2,8,2, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(48,52))) point(8,2,ulbr=FALSE, colorAdj) if (identical(Indeces,c(51,52))) segments(7,1,7,2, lwd=lwd,lend=lend,col=colorAdj) } ###Add border hloop<-list(4:7,c(3,8),c(2,9),1,NULL,1,c(2,9),c(3,8),4:7) vloop<-list(4:5,c(3,6),c(2,7),c(1,8),NULL,NULL,NULL,c(1,8),c(2,7),3:6) for (j in 1:9) { for (i in hloop[[j]]) { segments(i,j,i+1,j,lwd=lwd.border) } } for (i in 1:10) { for (j in vloop[[i]]) { segments(i,j,i,j+1,lwd=lwd.border) } } ###Add blind spot rect(8,4,9,6,col=color.bs,border=color.bs) ###Add legend if (legend) { NColors<-length(col.pal) Vertical<-seq(3,7,length.out=NColors) if (stat == "mean") for (i in 1:NColors) segments(11,Vertical[i],11.75,Vertical[i],col=rev(col.pal)[i],lwd=1.5) if (stat == "sd") for (i in 1:NColors) segments(11,Vertical[i],11.75,Vertical[i],col=(col.pal)[i],lwd=1.5) minx<-zlim[1] maxx<-zlim[2] LegendPV<-seq(minx,maxx,length.out=5) segments(11.75,3,11.75,7,lwd=2) segments(11,3,11,7,lwd=2) segments(11,7,11.75,7,lwd=2) segments(11,3,11.75,3,lwd=2) for (i in 1:length(LegendPV)) { if ((stat == "mean") & (!is.na(Visit))) { if (is.na(Visit)) text(12.75,(3:7)[i],format0(rev(LegendPV)[i])) if (!is.na(Visit)) text(12.75,(3:7)[i],format2(rev(LegendPV)[i])) } if ((stat == "sd")& ((!is.na(Visit)))) { if (is.na(Visit)) text(12.75,(3:7)[i],format0((LegendPV)[i])) if (!is.na(Visit)) text(12.75,(3:7)[i],format2((LegendPV)[i])) } if (is.na(Visit)) { if (is.na(Visit)) text(12.75,(3:7)[i],format0((LegendPV)[i])) if (!is.na(Visit)) text(12.75,(3:7)[i],format2((LegendPV)[i])) } segments(11.75,(3:7)[i],12,(3:7)[i],lwd=2) } if (is.na(Visit)) text(11.5, 7.5, expression(paste("Degree (",degree,")"))) if (!is.na(Visit) & stat == "mean") text(11.4, 7.5, expression(paste("E[",w[ij],"(",alpha[t],")]"))) if (!is.na(Visit) & stat == "sd") text(11.4, 7.5, expression(paste("sd[",w[ij],"(",alpha[t],")]"))) } if (!is.null(Degree)) { ###Add garway angles text(4.5,8.5,Degree[1]) text(5.5,8.5,Degree[2]) text(6.5,8.5,Degree[3]) text(7.5,8.5,Degree[4]) text(3.5,7.5,Degree[5]) text(4.5,7.5,Degree[6]) text(5.5,7.5,Degree[7]) text(6.5,7.5,Degree[8]) text(7.5,7.5,Degree[9]) text(8.5,7.5,Degree[10]) text(2.5,6.5,Degree[11]) text(3.5,6.5,Degree[12]) text(4.5,6.5,Degree[13]) text(5.5,6.5,Degree[14]) text(6.5,6.5,Degree[15]) text(7.5,6.5,Degree[16]) text(8.5,6.5,Degree[17]) text(9.5,6.5,Degree[18]) text(1.5,5.5,Degree[19]) text(2.5,5.5,Degree[20]) text(3.5,5.5,Degree[21]) text(4.5,5.5,Degree[22]) text(5.5,5.5,Degree[23]) text(6.5,5.5,Degree[24]) text(7.5,5.5,Degree[25]) text(9.5,5.5,Degree[26]) text(1.5,4.5,Degree[27]) text(2.5,4.5,Degree[28]) text(3.5,4.5,Degree[29]) text(4.5,4.5,Degree[30]) text(5.5,4.5,Degree[31]) text(6.5,4.5,Degree[32]) text(7.5,4.5,Degree[33]) text(9.5,4.5,Degree[34]) text(2.5,3.5,Degree[35]) text(3.5,3.5,Degree[36]) text(4.5,3.5,Degree[37]) text(5.5,3.5,Degree[38]) text(6.5,3.5,Degree[39]) text(7.5,3.5,Degree[40]) text(8.5,3.5,Degree[41]) text(9.5,3.5,Degree[42]) text(3.5,2.5,Degree[43]) text(4.5,2.5,Degree[44]) text(5.5,2.5,Degree[45]) text(6.5,2.5,Degree[46]) text(7.5,2.5,Degree[47]) text(8.5,2.5,Degree[48]) text(4.5,1.5,Degree[49]) text(5.5,1.5,Degree[50]) text(6.5,1.5,Degree[51]) text(7.5,1.5,Degree[52]) } ###Return par to default par(pardefault) } ###Check inputs if (!missing(Wij)) { if (!is.PosteriorAdj(Wij)) stop('"Wij" is not a PosteriorAdj object') Nu <- as.numeric(unlist(strsplit(colnames(Wij)[dim(Wij)[2]], "sd"))[2]) if (!(Visit %in% c(NA,1:Nu))) stop('"Visit" must be either NA or an integer between 1 and Nu') if (!(stat %in% c("mean", "sd"))) stop('"stat" must be one of "mean" or "sd"') if (!any(areColors(color.scheme))) stop('"color.scheme" can only include colors') ###Border information if (is.na(Visit)) { Boundary <- Wij[ , 1 : 3] Boundary[ ,3] <- 180 - Boundary[ ,3] } if (!is.na(Visit) & stat == "mean") Boundary <- Wij[ , c(1, 2, (2 * Visit) + 2)] if (!is.na(Visit) & stat == "sd") Boundary <- Wij[ , c(1, 2, (2 * Visit) + 3)] NAdjacency <- dim(Boundary)[1] Degree <- DM ###Set color pallete col.breaks <- seq(zlim[1],zlim[2],length.out=(length(unique(Boundary[,3]))+1)) col.br <- colorRampPalette(color.scheme) col.pal <- col.br(NAdjacency) # suppressWarnings(fixed_obs<-classIntervals(Boundary[,3],n=length(unique(Boundary[,3])))) suppressWarnings(fixed_obs<-classInt::classIntervals(Boundary[,3],style="fixed",fixedBreaks=col.breaks)) color.adj<-classInt::findColours(fixed_obs,col.pal) ###Plotting functions and Parameters lwd<-edgewidth lend<-2 format2<-function(x) format(round(x,2),nsmall=2) format0<-function(x) format(round(x,0),nsmall=0) tri<-cornerwidth point<-function(x,y,ulbr=TRUE, col=colorAdj) { if (!ulbr) { polygon(c(x,x+tri,x),c(y,y,y+tri),col=col,border=col) polygon(c(x,x-tri,x),c(y,y,y-tri),col=col,border=col) } if (ulbr) { polygon(c(x,x-tri,x),c(y,y,y+tri),col=col,border=col) polygon(c(x,x+tri,x),c(y,y,y-tri),col=col,border=col) } } ###Create edge indicator EdgesInd<-numeric(length=NAdjacency) WhichEdges<-c(1,2,3,5,7,9,11,13,15,17,18,20,22,24,26,28,30,32,34,36,38,40,42,43,45,47,49,51,53,55,57,59,61,63,65,67, 69,71,72,75,77,79,81,83,85,87,89,91,93,95,96,97,99,102,104,106,108,110,112,114,116,118,119,122,123,124,126,129,131,133,135,137,139,141,143,145,147,149,152,154,156,158,160,162) EdgesInd[WhichEdges]<-rep(1,length(WhichEdges)) WhichCorners<-which(EdgesInd==0) ###Create plot pardefault <- suppressWarnings(par(no.readonly = T)) par(mfcol = c(1, 1), pty = "m", mai = c(0, 0, 0.75, 0)) plot(1,1,xlim=c(1,13),ylim=c(1.25,8.75),type="n",xaxt="n",yaxt="n",bty="n",ylab="",xlab="",main=main, asp = 1, cex.main = 1.5) ###Plot edges for (i in WhichEdges) { Indeces<-as.numeric(Boundary[i,1:2]) colorAdj<-color.adj[i] if (identical(Indeces,c(1,2))) segments(5,8,5,9,lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(2,3))) segments(6,8,6,9,lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(3,4))) segments(7,8,7,9,lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(1,5))) point(4,8,ulbr=FALSE, colorAdj) if (identical(Indeces,c(1,6))) segments(4,8,5,8,lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(2,6))) point(5,8,ulbr=FALSE, colorAdj) if (identical(Indeces,c(5,6))) segments(4,7,4,8,lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(1,7))) point(5,8,ulbr=TRUE, colorAdj) if (identical(Indeces,c(2,7))) segments(5,8,6,8,lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(3,7))) point(6,8,ulbr=FALSE, colorAdj) if (identical(Indeces,c(6,7))) segments(5,7,5,8, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(2,8))) point(6,8,ulbr=TRUE, colorAdj) if (identical(Indeces,c(3,8))) segments(6,8,7,8, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(4,8))) point(7,8,ulbr=FALSE, colorAdj) if (identical(Indeces,c(7,8))) segments(6,7,6,8, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(3,9))) point(7,8,ulbr=TRUE, colorAdj) if (identical(Indeces,c(4,9))) segments(7,8,8,8, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(8,9))) segments(7,7,7,8, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(4,10))) point(8,8,ulbr=TRUE, colorAdj) if (identical(Indeces,c(9,10))) segments(8,7,8,8, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(5,11))) point(3,7,ulbr=FALSE, colorAdj) if (identical(Indeces,c(5,12))) segments(3,7,4,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(6,12))) point(4,7,ulbr=FALSE, colorAdj) if (identical(Indeces,c(11,12))) segments(3,6,3,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(5,13))) point(4,7,ulbr=TRUE, colorAdj) if (identical(Indeces,c(6,13))) segments(4,7,5,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(7,13))) point(5,7,ulbr=FALSE, colorAdj) if (identical(Indeces,c(12,13))) segments(4,6,4,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(6,14))) point(5,7,ulbr=TRUE, colorAdj) if (identical(Indeces,c(7,14))) segments(5,7,6,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(8,14))) point(6,7,ulbr=FALSE, colorAdj) if (identical(Indeces,c(13,14))) segments(5,6,5,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(7,15))) point(6,7,ulbr=TRUE, colorAdj) if (identical(Indeces,c(8,15))) segments(6,7,7,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(9,15))) point(7,7,ulbr=FALSE, colorAdj) if (identical(Indeces,c(14,15))) segments(6,6,6,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(8,16))) point(7,7,ulbr=TRUE, colorAdj) if (identical(Indeces,c(9,16))) segments(7,7,8,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(10,16))) point(8,7,ulbr=FALSE, colorAdj) if (identical(Indeces,c(15,16))) segments(7,6,7,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(9,17))) point(8,7,ulbr=TRUE, colorAdj) if (identical(Indeces,c(10,17))) segments(8,7,9,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(16,17))) segments(8,6,8,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(10,18))) point(9,7,ulbr=TRUE, colorAdj) if (identical(Indeces,c(17,18))) segments(9,6,9,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(11,19))) point(2,6,ulbr=FALSE, colorAdj) if (identical(Indeces,c(11,20))) segments(2,6,3,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(12,20))) point(3,6,ulbr=FALSE, colorAdj) if (identical(Indeces,c(19,20))) segments(2,5,2,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(11,21))) point(3,6,ulbr=TRUE, colorAdj) if (identical(Indeces,c(12,21))) segments(3,6,4,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(13,21))) point(4,6,ulbr=FALSE, colorAdj) if (identical(Indeces,c(20,21))) segments(3,5,3,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(12,22))) point(4,6,ulbr=TRUE, colorAdj) if (identical(Indeces,c(13,22))) segments(4,6,5,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(14,22))) point(5,6,ulbr=FALSE, colorAdj) if (identical(Indeces,c(21,22))) segments(4,5,4,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(13,23))) point(5,6,ulbr=TRUE, colorAdj) if (identical(Indeces,c(14,23))) segments(5,6,6,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(15,23))) point(6,6,ulbr=FALSE, colorAdj) if (identical(Indeces,c(22,23))) segments(5,5,5,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(14,24))) point(6,6,ulbr=TRUE, colorAdj) if (identical(Indeces,c(15,24))) segments(6,6,7,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(16,24))) point(7,6,ulbr=FALSE, colorAdj) if (identical(Indeces,c(23,24))) segments(6,5,6,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(15,25))) point(7,6,ulbr=TRUE, colorAdj) if (identical(Indeces,c(16,25))) segments(7,6,8,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(17,25))) point(8,6,ulbr=FALSE, colorAdj) if (identical(Indeces,c(24,25))) segments(7,5,7,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(17,26))) point(9,6,ulbr=TRUE, colorAdj) if (identical(Indeces,c(18,26))) segments(9,6,10,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(19,27))) segments(1,5,2,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(20,27))) point(2,5,ulbr=FALSE, colorAdj) if (identical(Indeces,c(19,28))) point(2,5,ulbr=TRUE, colorAdj) if (identical(Indeces,c(20,28))) segments(2,5,3,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(21,28))) point(3,5,ulbr=FALSE, colorAdj) if (identical(Indeces,c(27,28))) segments(2,4,2,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(20,29))) point(3,5,ulbr=TRUE, colorAdj) if (identical(Indeces,c(21,29))) segments(3,5,4,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(22,29))) point(4,5,ulbr=FALSE, colorAdj) if (identical(Indeces,c(28,29))) segments(3,4,3,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(21,30))) point(4,5,ulbr=TRUE, colorAdj) if (identical(Indeces,c(22,30))) segments(4,5,5,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(23,30))) point(5,5,ulbr=FALSE, colorAdj) if (identical(Indeces,c(29,30))) segments(4,4,4,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(22,31))) point(5,5,ulbr=TRUE, colorAdj) if (identical(Indeces,c(23,31))) segments(5,5,6,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(24,31))) point(6,5,ulbr=FALSE, colorAdj) if (identical(Indeces,c(30,31))) segments(5,4,5,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(23,32))) point(6,5,ulbr=TRUE, colorAdj) if (identical(Indeces,c(24,32))) segments(6,5,7,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(25,32))) point(7,5,ulbr=FALSE, colorAdj) if (identical(Indeces,c(31,32))) segments(6,4,6,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(24,33))) point(7,5,ulbr=TRUE, colorAdj) if (identical(Indeces,c(25,33))) segments(7,5,8,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(32,33))) segments(7,4,7,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(26,34))) segments(9,5,10,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(27,35))) point(2,4,ulbr=TRUE, colorAdj) if (identical(Indeces,c(28,35))) segments(2,4,3,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(29,35))) point(3,4,ulbr=FALSE, colorAdj) if (identical(Indeces,c(28,36))) point(3,4,ulbr=TRUE, colorAdj) if (identical(Indeces,c(29,36))) segments(3,4,4,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(30,36))) point(4,4,ulbr=FALSE, colorAdj) if (identical(Indeces,c(35,36))) segments(3,3,3,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(29,37))) point(4,4,ulbr=TRUE, colorAdj) if (identical(Indeces,c(30,37))) segments(4,4,5,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(31,37))) point(5,4,ulbr=FALSE, colorAdj) if (identical(Indeces,c(36,37))) segments(4,3,4,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(30,38))) point(5,4,ulbr=TRUE, colorAdj) if (identical(Indeces,c(31,38))) segments(5,4,6,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(32,38))) point(6,4,ulbr=FALSE, colorAdj) if (identical(Indeces,c(37,38))) segments(5,3,5,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(31,39))) point(6,4,ulbr=TRUE, colorAdj) if (identical(Indeces,c(32,39))) segments(6,4,7,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(33,39))) point(7,4,ulbr=FALSE, colorAdj) if (identical(Indeces,c(38,39))) segments(6,3,6,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(32,40))) point(7,4,ulbr=TRUE, colorAdj) if (identical(Indeces,c(33,40))) segments(7,4,8,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(39,40))) segments(7,3,7,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(33,41))) point(8,4,ulbr=TRUE, colorAdj) if (identical(Indeces,c(34,41))) point(9,4,ulbr=FALSE, colorAdj) if (identical(Indeces,c(40,41))) segments(8,3,8,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(34,42))) segments(9,4,10,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(41,42))) segments(9,3,9,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(35,43))) point(3,3,ulbr=TRUE, colorAdj) if (identical(Indeces,c(36,43))) segments(3,3,4,3, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(37,43))) point(4,3,ulbr=FALSE, colorAdj) if (identical(Indeces,c(36,44))) point(4,3,ulbr=TRUE, colorAdj) if (identical(Indeces,c(37,44))) segments(4,3,5,3, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(38,44))) point(5,3,ulbr=FALSE, colorAdj) if (identical(Indeces,c(43,44))) segments(4,2,4,3, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(37,45))) point(5,3,ulbr=TRUE, colorAdj) if (identical(Indeces,c(38,45))) segments(5,3,6,3, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(39,45))) point(6,3,ulbr=FALSE, colorAdj) if (identical(Indeces,c(44,45))) segments(5,2,5,3, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(38,46))) point(6,3,ulbr=TRUE, colorAdj) if (identical(Indeces,c(39,46))) segments(6,3,7,3, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(40,46))) point(7,3,ulbr=FALSE, colorAdj) if (identical(Indeces,c(45,46))) segments(6,2,6,3, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(39,47))) point(7,3,ulbr=TRUE, colorAdj) if (identical(Indeces,c(40,47))) segments(7,3,8,3, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(41,47))) point(8,3,ulbr=FALSE, colorAdj) if (identical(Indeces,c(46,47))) segments(7,2,7,3, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(40,48))) point(8,3,ulbr=TRUE, colorAdj) if (identical(Indeces,c(41,48))) segments(8,3,9,3, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(42,48))) point(9,3,ulbr=FALSE, colorAdj) if (identical(Indeces,c(47,48))) segments(8,2,8,3, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(43,49))) point(4,2,ulbr=TRUE, colorAdj) if (identical(Indeces,c(44,49))) segments(4,2,5,2, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(45,49))) point(5,2,ulbr=FALSE, colorAdj) if (identical(Indeces,c(44,50))) point(5,2,ulbr=TRUE, colorAdj) if (identical(Indeces,c(45,50))) segments(5,2,6,2, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(46,50))) point(6,2,ulbr=FALSE, colorAdj) if (identical(Indeces,c(49,50))) segments(5,1,5,2, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(45,51))) point(6,2,ulbr=TRUE, colorAdj) if (identical(Indeces,c(46,51))) segments(6,2,7,2, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(47,51))) point(7,2,ulbr=FALSE, colorAdj) if (identical(Indeces,c(50,51))) segments(6,1,6,2, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(46,52))) point(7,2,ulbr=TRUE, colorAdj) if (identical(Indeces,c(47,52))) segments(7,2,8,2, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(48,52))) point(8,2,ulbr=FALSE, colorAdj) if (identical(Indeces,c(51,52))) segments(7,1,7,2, lwd=lwd,lend=lend,col=colorAdj) } ###Plot corners for (i in WhichCorners) { Indeces<-as.numeric(Boundary[i,1:2]) colorAdj<-color.adj[i] if (identical(Indeces,c(1,2))) segments(5,8,5,9,lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(2,3))) segments(6,8,6,9,lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(3,4))) segments(7,8,7,9,lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(1,5))) point(4,8,ulbr=FALSE, colorAdj) if (identical(Indeces,c(1,6))) segments(4,8,5,8,lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(2,6))) point(5,8,ulbr=FALSE, colorAdj) if (identical(Indeces,c(5,6))) segments(4,7,4,8,lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(1,7))) point(5,8,ulbr=TRUE, colorAdj) if (identical(Indeces,c(2,7))) segments(5,8,6,8,lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(3,7))) point(6,8,ulbr=FALSE, colorAdj) if (identical(Indeces,c(6,7))) segments(5,7,5,8, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(2,8))) point(6,8,ulbr=TRUE, colorAdj) if (identical(Indeces,c(3,8))) segments(6,8,7,8, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(4,8))) point(7,8,ulbr=FALSE, colorAdj) if (identical(Indeces,c(7,8))) segments(6,7,6,8, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(3,9))) point(7,8,ulbr=TRUE, colorAdj) if (identical(Indeces,c(4,9))) segments(7,8,8,8, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(8,9))) segments(7,7,7,8, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(4,10))) point(8,8,ulbr=TRUE, colorAdj) if (identical(Indeces,c(9,10))) segments(8,7,8,8, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(5,11))) point(3,7,ulbr=FALSE, colorAdj) if (identical(Indeces,c(5,12))) segments(3,7,4,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(6,12))) point(4,7,ulbr=FALSE, colorAdj) if (identical(Indeces,c(11,12))) segments(3,6,3,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(5,13))) point(4,7,ulbr=TRUE, colorAdj) if (identical(Indeces,c(6,13))) segments(4,7,5,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(7,13))) point(5,7,ulbr=FALSE, colorAdj) if (identical(Indeces,c(12,13))) segments(4,6,4,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(6,14))) point(5,7,ulbr=TRUE, colorAdj) if (identical(Indeces,c(7,14))) segments(5,7,6,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(8,14))) point(6,7,ulbr=FALSE, colorAdj) if (identical(Indeces,c(13,14))) segments(5,6,5,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(7,15))) point(6,7,ulbr=TRUE, colorAdj) if (identical(Indeces,c(8,15))) segments(6,7,7,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(9,15))) point(7,7,ulbr=FALSE, colorAdj) if (identical(Indeces,c(14,15))) segments(6,6,6,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(8,16))) point(7,7,ulbr=TRUE, colorAdj) if (identical(Indeces,c(9,16))) segments(7,7,8,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(10,16))) point(8,7,ulbr=FALSE, colorAdj) if (identical(Indeces,c(15,16))) segments(7,6,7,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(9,17))) point(8,7,ulbr=TRUE, colorAdj) if (identical(Indeces,c(10,17))) segments(8,7,9,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(16,17))) segments(8,6,8,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(10,18))) point(9,7,ulbr=TRUE, colorAdj) if (identical(Indeces,c(17,18))) segments(9,6,9,7, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(11,19))) point(2,6,ulbr=FALSE, colorAdj) if (identical(Indeces,c(11,20))) segments(2,6,3,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(12,20))) point(3,6,ulbr=FALSE, colorAdj) if (identical(Indeces,c(19,20))) segments(2,5,2,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(11,21))) point(3,6,ulbr=TRUE, colorAdj) if (identical(Indeces,c(12,21))) segments(3,6,4,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(13,21))) point(4,6,ulbr=FALSE, colorAdj) if (identical(Indeces,c(20,21))) segments(3,5,3,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(12,22))) point(4,6,ulbr=TRUE, colorAdj) if (identical(Indeces,c(13,22))) segments(4,6,5,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(14,22))) point(5,6,ulbr=FALSE, colorAdj) if (identical(Indeces,c(21,22))) segments(4,5,4,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(13,23))) point(5,6,ulbr=TRUE, colorAdj) if (identical(Indeces,c(14,23))) segments(5,6,6,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(15,23))) point(6,6,ulbr=FALSE, colorAdj) if (identical(Indeces,c(22,23))) segments(5,5,5,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(14,24))) point(6,6,ulbr=TRUE, colorAdj) if (identical(Indeces,c(15,24))) segments(6,6,7,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(16,24))) point(7,6,ulbr=FALSE, colorAdj) if (identical(Indeces,c(23,24))) segments(6,5,6,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(15,25))) point(7,6,ulbr=TRUE, colorAdj) if (identical(Indeces,c(16,25))) segments(7,6,8,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(17,25))) point(8,6,ulbr=FALSE, colorAdj) if (identical(Indeces,c(24,25))) segments(7,5,7,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(17,26))) point(9,6,ulbr=TRUE, colorAdj) if (identical(Indeces,c(18,26))) segments(9,6,10,6, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(19,27))) segments(1,5,2,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(20,27))) point(2,5,ulbr=FALSE, colorAdj) if (identical(Indeces,c(19,28))) point(2,5,ulbr=TRUE, colorAdj) if (identical(Indeces,c(20,28))) segments(2,5,3,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(21,28))) point(3,5,ulbr=FALSE, colorAdj) if (identical(Indeces,c(27,28))) segments(2,4,2,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(20,29))) point(3,5,ulbr=TRUE, colorAdj) if (identical(Indeces,c(21,29))) segments(3,5,4,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(22,29))) point(4,5,ulbr=FALSE, colorAdj) if (identical(Indeces,c(28,29))) segments(3,4,3,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(21,30))) point(4,5,ulbr=TRUE, colorAdj) if (identical(Indeces,c(22,30))) segments(4,5,5,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(23,30))) point(5,5,ulbr=FALSE, colorAdj) if (identical(Indeces,c(29,30))) segments(4,4,4,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(22,31))) point(5,5,ulbr=TRUE, colorAdj) if (identical(Indeces,c(23,31))) segments(5,5,6,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(24,31))) point(6,5,ulbr=FALSE, colorAdj) if (identical(Indeces,c(30,31))) segments(5,4,5,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(23,32))) point(6,5,ulbr=TRUE, colorAdj) if (identical(Indeces,c(24,32))) segments(6,5,7,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(25,32))) point(7,5,ulbr=FALSE, colorAdj) if (identical(Indeces,c(31,32))) segments(6,4,6,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(24,33))) point(7,5,ulbr=TRUE, colorAdj) if (identical(Indeces,c(25,33))) segments(7,5,8,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(32,33))) segments(7,4,7,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(26,34))) segments(9,5,10,5, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(27,35))) point(2,4,ulbr=TRUE, colorAdj) if (identical(Indeces,c(28,35))) segments(2,4,3,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(29,35))) point(3,4,ulbr=FALSE, colorAdj) if (identical(Indeces,c(28,36))) point(3,4,ulbr=TRUE, colorAdj) if (identical(Indeces,c(29,36))) segments(3,4,4,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(30,36))) point(4,4,ulbr=FALSE, colorAdj) if (identical(Indeces,c(35,36))) segments(3,3,3,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(29,37))) point(4,4,ulbr=TRUE, colorAdj) if (identical(Indeces,c(30,37))) segments(4,4,5,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(31,37))) point(5,4,ulbr=FALSE, colorAdj) if (identical(Indeces,c(36,37))) segments(4,3,4,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(30,38))) point(5,4,ulbr=TRUE, colorAdj) if (identical(Indeces,c(31,38))) segments(5,4,6,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(32,38))) point(6,4,ulbr=FALSE, colorAdj) if (identical(Indeces,c(37,38))) segments(5,3,5,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(31,39))) point(6,4,ulbr=TRUE, colorAdj) if (identical(Indeces,c(32,39))) segments(6,4,7,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(33,39))) point(7,4,ulbr=FALSE, colorAdj) if (identical(Indeces,c(38,39))) segments(6,3,6,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(32,40))) point(7,4,ulbr=TRUE, colorAdj) if (identical(Indeces,c(33,40))) segments(7,4,8,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(39,40))) segments(7,3,7,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(33,41))) point(8,4,ulbr=TRUE, colorAdj) if (identical(Indeces,c(34,41))) point(9,4,ulbr=FALSE, colorAdj) if (identical(Indeces,c(40,41))) segments(8,3,8,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(34,42))) segments(9,4,10,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(41,42))) segments(9,3,9,4, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(35,43))) point(3,3,ulbr=TRUE, colorAdj) if (identical(Indeces,c(36,43))) segments(3,3,4,3, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(37,43))) point(4,3,ulbr=FALSE, colorAdj) if (identical(Indeces,c(36,44))) point(4,3,ulbr=TRUE, colorAdj) if (identical(Indeces,c(37,44))) segments(4,3,5,3, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(38,44))) point(5,3,ulbr=FALSE, colorAdj) if (identical(Indeces,c(43,44))) segments(4,2,4,3, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(37,45))) point(5,3,ulbr=TRUE, colorAdj) if (identical(Indeces,c(38,45))) segments(5,3,6,3, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(39,45))) point(6,3,ulbr=FALSE, colorAdj) if (identical(Indeces,c(44,45))) segments(5,2,5,3, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(38,46))) point(6,3,ulbr=TRUE, colorAdj) if (identical(Indeces,c(39,46))) segments(6,3,7,3, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(40,46))) point(7,3,ulbr=FALSE, colorAdj) if (identical(Indeces,c(45,46))) segments(6,2,6,3, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(39,47))) point(7,3,ulbr=TRUE, colorAdj) if (identical(Indeces,c(40,47))) segments(7,3,8,3, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(41,47))) point(8,3,ulbr=FALSE, colorAdj) if (identical(Indeces,c(46,47))) segments(7,2,7,3, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(40,48))) point(8,3,ulbr=TRUE, colorAdj) if (identical(Indeces,c(41,48))) segments(8,3,9,3, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(42,48))) point(9,3,ulbr=FALSE, colorAdj) if (identical(Indeces,c(47,48))) segments(8,2,8,3, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(43,49))) point(4,2,ulbr=TRUE, colorAdj) if (identical(Indeces,c(44,49))) segments(4,2,5,2, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(45,49))) point(5,2,ulbr=FALSE, colorAdj) if (identical(Indeces,c(44,50))) point(5,2,ulbr=TRUE, colorAdj) if (identical(Indeces,c(45,50))) segments(5,2,6,2, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(46,50))) point(6,2,ulbr=FALSE, colorAdj) if (identical(Indeces,c(49,50))) segments(5,1,5,2, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(45,51))) point(6,2,ulbr=TRUE, colorAdj) if (identical(Indeces,c(46,51))) segments(6,2,7,2, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(47,51))) point(7,2,ulbr=FALSE, colorAdj) if (identical(Indeces,c(50,51))) segments(6,1,6,2, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(46,52))) point(7,2,ulbr=TRUE, colorAdj) if (identical(Indeces,c(47,52))) segments(7,2,8,2, lwd=lwd,lend=lend,col=colorAdj) if (identical(Indeces,c(48,52))) point(8,2,ulbr=FALSE, colorAdj) if (identical(Indeces,c(51,52))) segments(7,1,7,2, lwd=lwd,lend=lend,col=colorAdj) } ###Add border hloop<-list(4:7,c(3,8),c(2,9),1,NULL,1,c(2,9),c(3,8),4:7) vloop<-list(4:5,c(3,6),c(2,7),c(1,8),NULL,NULL,NULL,c(1,8),c(2,7),3:6) for (j in 1:9) { for (i in hloop[[j]]) { segments(i,j,i+1,j,lwd=lwd.border) } } for (i in 1:10) { for (j in vloop[[i]]) { segments(i,j,i,j+1,lwd=lwd.border) } } ###Add blind spot rect(8,4,9,6,col=color.bs,border=color.bs) ###Add legend if (legend) { NColors<-length(col.pal) Vertical<-seq(3,7,length.out=NColors) if (stat == "mean") for (i in 1:NColors) segments(11,Vertical[i],11.75,Vertical[i],col=rev(col.pal)[i],lwd=1.5) if (stat == "sd") for (i in 1:NColors) segments(11,Vertical[i],11.75,Vertical[i],col=(col.pal)[i],lwd=1.5) minx<-zlim[1] maxx<-zlim[2] LegendPV<-seq(minx,maxx,length.out=5) segments(11.75,3,11.75,7,lwd=2) segments(11,3,11,7,lwd=2) segments(11,7,11.75,7,lwd=2) segments(11,3,11.75,3,lwd=2) for (i in 1:length(LegendPV)) { if ((stat == "mean") & (!is.na(Visit))) { if (is.na(Visit)) text(12.75,(3:7)[i],format0(rev(LegendPV)[i])) if (!is.na(Visit)) text(12.75,(3:7)[i],format2(rev(LegendPV)[i])) } if ((stat == "sd")& ((!is.na(Visit)))) { if (is.na(Visit)) text(12.75,(3:7)[i],format0((LegendPV)[i])) if (!is.na(Visit)) text(12.75,(3:7)[i],format2((LegendPV)[i])) } if (is.na(Visit)) { if (is.na(Visit)) text(12.75,(3:7)[i],format0((LegendPV)[i])) if (!is.na(Visit)) text(12.75,(3:7)[i],format2((LegendPV)[i])) } segments(11.75,(3:7)[i],12,(3:7)[i],lwd=2) } if (is.na(Visit)) text(11.5, 7.5, expression(paste("Degree (",degree,")"))) if (!is.na(Visit) & stat == "mean") text(11.4, 7.5, expression(paste("E[",w[ij],"(",alpha[t],")]"))) if (!is.na(Visit) & stat == "sd") text(11.4, 7.5, expression(paste("sd[",w[ij],"(",alpha[t],")]"))) } if (!is.null(Degree)) { ###Add garway angles text(4.5,8.5,Degree[1]) text(5.5,8.5,Degree[2]) text(6.5,8.5,Degree[3]) text(7.5,8.5,Degree[4]) text(3.5,7.5,Degree[5]) text(4.5,7.5,Degree[6]) text(5.5,7.5,Degree[7]) text(6.5,7.5,Degree[8]) text(7.5,7.5,Degree[9]) text(8.5,7.5,Degree[10]) text(2.5,6.5,Degree[11]) text(3.5,6.5,Degree[12]) text(4.5,6.5,Degree[13]) text(5.5,6.5,Degree[14]) text(6.5,6.5,Degree[15]) text(7.5,6.5,Degree[16]) text(8.5,6.5,Degree[17]) text(9.5,6.5,Degree[18]) text(1.5,5.5,Degree[19]) text(2.5,5.5,Degree[20]) text(3.5,5.5,Degree[21]) text(4.5,5.5,Degree[22]) text(5.5,5.5,Degree[23]) text(6.5,5.5,Degree[24]) text(7.5,5.5,Degree[25]) text(9.5,5.5,Degree[26]) text(1.5,4.5,Degree[27]) text(2.5,4.5,Degree[28]) text(3.5,4.5,Degree[29]) text(4.5,4.5,Degree[30]) text(5.5,4.5,Degree[31]) text(6.5,4.5,Degree[32]) text(7.5,4.5,Degree[33]) text(9.5,4.5,Degree[34]) text(2.5,3.5,Degree[35]) text(3.5,3.5,Degree[36]) text(4.5,3.5,Degree[37]) text(5.5,3.5,Degree[38]) text(6.5,3.5,Degree[39]) text(7.5,3.5,Degree[40]) text(8.5,3.5,Degree[41]) text(9.5,3.5,Degree[42]) text(3.5,2.5,Degree[43]) text(4.5,2.5,Degree[44]) text(5.5,2.5,Degree[45]) text(6.5,2.5,Degree[46]) text(7.5,2.5,Degree[47]) text(8.5,2.5,Degree[48]) text(4.5,1.5,Degree[49]) text(5.5,1.5,Degree[50]) text(6.5,1.5,Degree[51]) text(7.5,1.5,Degree[52]) } ###Return par to default par(pardefault) } ###End Function }
/scratch/gouwar.j/cran-all/cranData/womblR/R/PLOT_Adjacency.R
###Function used to plot sensitivity values on the visual field #' #' PlotSensitivity #' #' Plots a heat map of the differential light sensitivity on the Humphrey Field #' Analyzer-II visual field. #' #' @param Y variable to be plotted on the visual field (e.g. differential light sensitivity). #' @param main an overall title for the plot. #' @param zlim the limits used for the legend (default are the minimum and maximum of Y). #' @param color a vector of character strings representing the color palette. #' @param col.bs color of the blind spot locations (default = "grey"). #' @param bins the number of bins used to refine the color palette for the figure and legend. #' @param legend logical, indicating whether the legend should be present (default = TRUE). #' @param legend.lab a label for the legend (default = "DLS (dB)"). #' @param legend.round integer, indicating the digits that the legend labels are rounded to #' (default = 0). #' @param legend.vals integer, indicating the number of labels values to be included on the legend (default = 5). #' @param border logical, indicating whether there should be a border around the visual field (default = TRUE). #' @details \code{PlotSensitivity} is used in the application of glaucoma progression to #' plot a variable across the visual field in the form of a heat map. #' @examples #' data(VFSeries) #' PlotSensitivity(Y = VFSeries$DLS[VFSeries$Visit == 1], #' main = "Sensitivity estimate (dB) at each \n location on visual field", #' legend.lab = "DLS (dB)", #' zlim = c(10, 35), #' bins = 250) #' @author Samuel I. Berchuck #' @export PlotSensitivity <- function(Y = Y, main = "Sensitivity Estimate (dB) at each \nlocation on visual field", legend.lab = "DLS (dB)", zlim = c(10, 35), bins = 200, border = TRUE, legend = TRUE, color = c("yellow", "orange", "red"), col.bs = "grey", legend.round = 0, legend.vals = 5) { ##Note: Depends on library classInt # You need the suggested package for this function if (!requireNamespace("classInt", quietly = TRUE)) { stop("classInt needed for this function to work. Please install it.", call. = FALSE) } ###Check zlim missing if (missing(zlim)) zlim <- c(min(Y), max(Y)) ###Create Legend Cutoffs labs <- levels(cut(zlim, bins)) labs <- cbind(lower = as.numeric(sub("\\((.+),.*","\\1", labs)), upper = as.numeric(sub("[^,]*,([^]]*)\\]","\\1", labs))) legvals <- as.numeric(c(labs[1, 1], labs[ , 2])) legvals[1] <- -Inf legvals[length(legvals)] <- Inf ###Get color specification colbr <- colorRampPalette(color) colpal <- colbr(bins) ###Get colors for each observation # cuts <- as.character(apply(matrix(Y[!is.na(Y)], ncol = 1), 1, cut, legvals, labels = colpal)) cuts <- cut(Y[!is.na(Y)], breaks = legvals) cuts <- colpal[as.numeric(cuts)] ###Create plotting functions square <- function(x, y, col) symbols(x, y, squares = 1, fg = col, bg = col, inches = FALSE, add = TRUE) format0 <- function(x, legend.round) format(round(x,legend.round),nsmall=legend.round) ###Get square coordinates Loc <- data.frame(x = c(4:7, 3:8, 2:9, 1:9, 1:9, 2:9, 3:8, 4:7), y = c(rep(1, 4), rep(2, 6), rep(3, 8), rep(4, 9), rep(5, 9), rep(6, 8), rep(7, 6), rep(8, 4))) Loc <- Loc[order(Loc$y, decreasing = TRUE),] rownames(Loc) <- 1 : 54 Loc <- Loc[-c(26, 35), ] #remove blind spot ###Initiate figure with squares pardefault <- suppressWarnings(par(no.readonly = T)) par(mfcol = c(1, 1), pty = "m", mai = c(0, 0, 0.75, 0)) # plot(1, 1, main = main, type = "n", yaxt = "n", xaxt = "n", bty = "n", xlim = c(-2, 14), ylim = c(2, 7), asp = 1, ylab = "", xlab = "") plot(1, 1, type = "n", yaxt = "n", xaxt = "n", bty = "n", xlim = c(0.5, 13), ylim = c(2, 7), asp = 1, ylab = "", xlab = "") title(main = main, cex.main = 1.7) for (i in 1 : 52) { x <- Loc[i, 1] + 0.5 y <- Loc[i ,2] + 0.5 square(x, y, col = cuts[i]) } square(8 + 0.5, 5 + 0.5, col = col.bs) square(8 + 0.5, 4 + 0.5, col = col.bs) ###Add border if (border) { hloop<-list(4:7,c(3,8),c(2,9),1,NULL,1,c(2,9),c(3,8),4:7) vloop<-list(4:5,c(3,6),c(2,7),c(1,8),NULL,NULL,NULL,c(1,8),c(2,7),3:6) for (j in 1:9) { for (i in hloop[[j]]) { segments(i,j,i+1,j,lwd = 1.5) } } for (i in 1:10) { for (j in vloop[[i]]) { segments(i,j,i,j+1,lwd = 1.5) } } } ###Add legend if (legend) { if (missing(zlim)) zlim <- c(min(Y), max(Y)) NColors <- length(colpal) Vertical <- seq(3, 7, length.out = NColors) for (i in 1 : NColors) segments(11, Vertical[i], 11.75, Vertical[i], col = colpal[i], lwd = 1.5) minx <- zlim[1] maxx <- zlim[2] LegendPV <- seq(minx, maxx, length.out = legend.vals) segments(11.75, 3, 11.75, 7, lwd = 1.5) segments(11 ,3 ,11 ,7 , lwd = 1.5) segments(11 ,7 ,11.75, 7, lwd = 1.5) segments(11 ,3 ,11.75, 3, lwd = 1.5) for (i in 1 : length(LegendPV)) { text(12.75, seq(3, 7, length.out = legend.vals)[i], format0(LegendPV[i], legend.round)) segments(11.75, seq(3, 7, length.out = legend.vals)[i], 12, seq(3, 7, length.out = legend.vals)[i], lwd = 1.5) } text(11.5, 7.5, legend.lab) } ###Return to default par setting par(pardefault) ###End function }
/scratch/gouwar.j/cran-all/cranData/womblR/R/PLOT_Sensitivity.R
###Function for plotting a time series of data at each location on the visual field #' #' PlotVfTimeSeries #' #' Plots a time series at each location of the Humphrey Field Analyzer-II visual field . #' #' @param Y a time series variable to be plotted. #' #' @param Location a variable corresponding to the location on the visual field #' that the time series variable was observed. #' #' @param Time a variable corresponding to the time that the time series variable #' was observed. #' #' @param main an overall title for the plot. #' #' @param xlab a title for the x axis. #' #' @param ylab a title for the y axis. #' #' @param line.col color for the regression line, either character string corresponding #' to a color or a integer (default = "red"). #' #' @param line.reg logical, determines if there are regression lines printed (default = TRUE). #' #' @param line.type integer, specifies the type of regression line printed (default = 1). #' #' @details \code{PlotVfTimeSeries} is used in the application of glaucoma progression. #' In each cell is the observed DLS at each location over visits, with the red line #' representing a linear regression trend. #' #' @examples #' data(VFSeries) #' PlotVfTimeSeries(Y = VFSeries$DLS, #' Location = VFSeries$Location, #' Time = VFSeries$Time, #' main = "Visual field sensitivity time series \n at each location", #' xlab = "Days from baseline visit", #' ylab = "Differential light sensitivity (dB)") #' #' #' @author Samuel I. Berchuck #' #' @export PlotVfTimeSeries <- function(Y, Location, Time, main = "Visual field sensitivity time series \n at each location", xlab = "Time from first visit (days)", ylab = "Sensitivity (dB)", line.col = "red", line.reg = TRUE, line.type = 1) { ###Logical function to check for colors areColors <- function(x) { sapply(x, function(X) { tryCatch(is.matrix(col2rgb(X)), error = function(e) FALSE) }) } ###Check Inputs is.wholenumber <- function(x, tol = .Machine$double.eps^0.5) abs(x - round(x)) < tol if (missing(Y)) stop('"Y" is missing') if (missing(Location)) stop('"Location" is missing') if (missing(Time)) stop('"Time" is missing') if (!is.character(main)) stop('"main" must be a character string') if (!is.character(xlab)) stop('"xlab" must be a character string') if (!is.character(ylab)) stop('"ylab" must be a character string') if (!any(areColors(line.col))) stop('"line.col" can only include colors') if (!is.logical(line.reg)) stop('"line.reg" must be a logical') if (!is.wholenumber(line.type)) stop('"line.type" must be an integer') ###Function inputs # Y <- YObserved # Location <- Location # Time <- Time # main = "Visual field sensitivity time series \n at each location" # xlab = "Time from first visit (days)" # ylab = "Sensitivity (dB)" pardefault <- suppressWarnings(par(no.readonly = T)) ###Collect and sort data VF <- data.frame(cbind(Location, Time, Y)) VF <- VF[order(VF$Time), ] VF <- VF[order(VF$Location), ] ###Compute Summary Statistics max.VF <- max(abs(range(Y))) max.Time <- max(abs(Time)) y_breaks <- round(seq(0, 40, by = 10)) x_breaks <- round(seq(0, 100*(max.Time%/%100 + as.logical(max.Time%%100)), by = 100)) #Round up to the nearest 100th ###Create layout matrix layout.matrix<-matrix(c(0,0,0,1,2,3,4,0,0, 0,0,5,6,7,8,9,10,0, 0,11,12,13,14,15,16,17,18, 19,20,21,22,23,24,25,26,27, 28,29,30,31,32,33,34,35,36, 0,37,38,39,40,41,42,43,44, 0,0,45,46,47,48,49,50,0, 0,0,0,51,52,53,54,0,0),nrow=8,ncol=9,byrow=TRUE) pp<-layout(layout.matrix,rep(1,3),rep(1,9),TRUE) # layout.show(pp) ###Clarify Blind Spot all <- 1 : max(VF$Location) blind_spot <- c(26, 35) remaining <- all[-blind_spot] ###Plot Time Series at Each Location par(mar = c(0, 0, 0, 0), oma = c(5, 10, 10, 5), mgp = c(3, 1, 0)) for (i in 1 : max(VF$Location)) { if (i %in% remaining) { ph <- VF[VF$Location == i, ] plot(ph[ , 2], ph[ , 3], type = "l", xaxt = "n", yaxt = "n", xlim = c(0, max.Time), ylim = c(0, 40)) points(ph[ , 2], ph[ , 3], pch = ".") if (line.reg) abline(lm(ph[ , 3] ~ ph[ , 2]), col = line.col, lty = line.type) } if (i %in% blind_spot) plot(ph[ , 2], ph[ , 3], type = "n", xaxt = "n", yaxt = "n", xlim = c(0, max.Time), ylim = c(0, 40)) if (i %in% c(52, 54)) axis(1, at = x_breaks) if (i %in% c(1, 3)) axis(3, at = x_breaks) if (i %in% c(5, 19, 37, 51)) axis(2, at = y_breaks, las = 2) if (i %in% c(4, 18, 36, 50)) axis(4, at = y_breaks, las = 2) } ###Add Title title(main = list(main, cex = 2.5, col = "black", font = 2), xlab = list(xlab, cex = 2, col = "black", font = 1), ylab = list(ylab, cex = 2, col = "black", font = 1), outer = TRUE) ###Return par to default suppressMessages(par(pardefault)) ###End function }
/scratch/gouwar.j/cran-all/cranData/womblR/R/PLOT_VFTimeSeries.R
###Function to get posteior adjacency values for each location #' #' PosteriorAdj #' #' Calculates the posterior mean and standard deviation for the neighborhood adjacencies #' from the \code{\link{STBDwDM}} model. #' #' @param object a \code{\link{STBDwDM}} model object for which predictions #' are desired from. #' #' @details The function \code{PosteriorAdj} calculates the posterior mean and standard #' deviation of the neighborhood adjacencies for each pairwise location. The neighborhood #' structure used to do this comes from Berchuck et al. 2017. #' #' @return \code{PosteriorAdj} returns a matrix containing the following columns. #' #' \describe{ #' #' \item{\code{i}}{Location \code{i} (i.e. which row/column on the adjacency matrix W).} #' #' \item{\code{j}}{Location \code{j} (i.e. which row/column on the adjacency matrix W).} #' #' \item{\code{DM}}{The dissimilarity metric between locations \code{i} and \code{j}.} #' #' \item{\code{meant}}{The posterior mean of the neighborhood adjacency between location #' \code{i} and \code{j} at time \code{t, t = 1, ... , Nu}.} #' #' \item{\code{sdt}}{The posterior mean of the neighborhood adjacency between location #' \code{i} and \code{j} at time \code{t, t = 1, ... , Nu}.} #' #' } #' #' @author Samuel I. Berchuck #' @export PosteriorAdj <- function(object) { ###Check Inputs if (missing(object)) stop('"object" is missing') if (!is.STBDwDM(object)) stop('"object" must be of class STBDwDM') ###Set data objects DatObj <- object$datobj Z <- DatObj$Z # AdjacentEdgesBoolean <- DatObj$AdjacentEdgesBoolean Nu <- DatObj$Nu ScaleDM <- DatObj$ScaleDM W <- DatObj$W AdjacentEdgesBoolean <- (W == 1) & (!lower.tri(W)) ###Set parameter objects NKeep <- dim(object$mu)[1] Alpha <- object$alpha ###Get posterior Wij ZMat <- as.matrix(Z, ncol = 1) Wij <- cbind(which(AdjacentEdgesBoolean, arr.ind = TRUE), ZMat * ScaleDM) WijMat <- matrix(0, ncol = length(Z), nrow = NKeep) for (t in 1 : Nu) { for (s in 1 : NKeep) { WijMat[s, ] <- exp( -Alpha[s, t] * Z) } Wij <- cbind(Wij, apply(WijMat, 2, mean), apply(WijMat, 2, sd)) } colnames(Wij) <- c("i", "j", "DM", paste(c("mean", "sd"), rep(1 : Nu, each = 2), sep="")) Wij <- structure(Wij, class = "PosteriorAdj") return(Wij) } ###Verify the class of our regression object------------------------------------------------------------------------ #' is.PosteriorAdj #' #' \code{is.PosteriorAdj} is a general test of an object being interpretable as a #' \code{\link{PosteriorAdj}} object. #' #' @param x object to be tested. #' #' @details The \code{\link{PosteriorAdj}} class is defined as the posterior adjacency #' object that results from the \code{\link{PosteriorAdj}} function. #' #' @export is.PosteriorAdj <- function(x) { identical(attributes(x)$class, "PosteriorAdj") }
/scratch/gouwar.j/cran-all/cranData/womblR/R/POST_PosteriorWij.R
###Function to get model fit diagnostics given a STBDwDM object #' #' predict.STBDwDM #' #' Predicts future observations from the \code{\link{STBDwDM}} model. #' #' @param object a \code{\link{STBDwDM}} model object for which predictions #' are desired from. #' #' @param NewTimes a numeric vector including desired time(s) points for prediction. #' #' @param ... other arguments. #' #' @details \code{predict.STBDwDM} uses Bayesian krigging to predict vectors at future #' time points. The function returns the krigged observed outcomes along with the #' observational level parameters (\code{mu}, \code{tau}, and \code{alpha}). #' #' @return \code{predict.STBDwDM} returns a list containing the following objects. #' #' \describe{ #' #' \item{\code{MuTauAlpha}}{A \code{list} containing three matrices, \code{mu}, #' \code{tau} and \code{alpha}. Each matrix is dimension \code{NKeep x s}, where #' \code{s} is the number of new time points. Each matrix contains posterior #' samples obtained by Bayesian krigging.} #' #' \item{\code{Y}}{A \code{list} containing \code{s} posterior predictive distribution #' matrices. Each matrix is dimension \code{NKeep x s}, where \code{s} #' is the number of new time points. Each matrix is obtained through Bayesian krigging.} #' #' } #' #' @author Samuel I. Berchuck #' @export ###Prediction function for spBDwDM function predict.STBDwDM <- function(object, NewTimes, ...) { ###Check Inputs if (missing(object)) stop('"object" is missing') if (!is.STBDwDM(object)) stop('"object" must be of class STBDwDM') if (missing(NewTimes)) stop('"NewTimes" is missing') if (!is.numeric(NewTimes)) stop('NewTimes must be a vector') if (any(is.na(NewTimes))) stop("NewTimes may have no missing values") if (any(!is.finite(NewTimes))) stop("NewTimes must have strictly finite entries") if (!all(NewTimes >= 0)) stop('NewTimes vector has at least one negative entry') ###Set seed for reproducibility set.seed(54) ###Set data objects DatObj <- object$datobj Nu <- DatObj$Nu M <- DatObj$M ###Create updated distance matrix TimeFixed <- DatObj$Time Time <- sort(c(TimeFixed, NewTimes)) TimeDist <- abs(outer(Time, Time, "-" )) NNewVisits <- length(NewTimes) NewVisits <- OriginalVisits <- NULL for (i in 1:NNewVisits) NewVisits <- c(NewVisits, which(NewTimes[i] == Time) - 1) for (i in 1:Nu) OriginalVisits <- c(OriginalVisits, which(TimeFixed[i] == Time) - 1) ###Update DatObj DatObj$NewVisits <- NewVisits DatObj$OriginalVisits <- OriginalVisits DatObj$TimeDist <- TimeDist DatObj$NNewVisits <- NNewVisits ###Set mcmc object NKeep <- dim(object$phi)[1] ###Create parameter object Para <- list() Para$Mu <- object$mu Para$Tau2 <- object$tau2 Para$Alpha <- object$alpha Para$Delta <- object$delta Para$T <- object$T Para$Phi <- object$phi ###Obtain samples of mu, tau and alpha using Bayesian krigging ThetaKrig <- ThetaKrigging(DatObj, Para, NKeep) ###Obtain samples of observed Y YKrig <- YKrigging(DatObj, ThetaKrig, NKeep) ###Format theta samples for output FutureThetaArray <- array(ThetaKrig, dim = c(NKeep, 3, NNewVisits)) Mu <- matrix(FutureThetaArray[ , 1 , ], ncol = NNewVisits) Tau2 <- matrix(exp(FutureThetaArray[ , 2 , ])^2, ncol = NNewVisits) Alpha <- matrix(exp(FutureThetaArray[ , 3 , ]), ncol = NNewVisits) colnames(Mu) <- paste0("mu", NewVisits + 1) colnames(Tau2) <- paste0("tau2", NewVisits + 1) colnames(Alpha) <- paste0("alpha", NewVisits + 1) LevelOneKrig <- list(mu = Mu, tau2 = Tau2, alpha = Alpha) ###Format Y samples for output FutureYArray <- array(t(YKrig), dim = c(NKeep, M, NNewVisits)) PpdKrig <- list() for (i in 1:NNewVisits) PpdKrig[[i]] <- FutureYArray[ , , i] PpdKrig <- lapply(PpdKrig, f <- function(x) {colnames(x) <- paste0("loc", 1:M); return(x);}) names(PpdKrig) <- paste0("y", NewVisits + 1) ###Return formated samples return(list(MuTauAlpha = LevelOneKrig, Y = PpdKrig)) }
/scratch/gouwar.j/cran-all/cranData/womblR/R/PRED_predict.R
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 GetRooti <- function(Cov, Eye) { .Call('_womblR_GetRooti', PACKAGE = 'womblR', Cov, Eye) } JointCovarianceCube <- function(WAlphas, Tau2, EyeM, Rho, M, Nu) { .Call('_womblR_JointCovarianceCube', PACKAGE = 'womblR', WAlphas, Tau2, EyeM, Rho, M, Nu) } RootiLikelihoodCube <- function(JointCovariances, EyeM, M, Nu) { .Call('_womblR_RootiLikelihoodCube', PACKAGE = 'womblR', JointCovariances, EyeM, M, Nu) } SIGMA <- function(Phi, TempCorInd, TimeDist, Nu) { .Call('_womblR_SIGMA', PACKAGE = 'womblR', Phi, TempCorInd, TimeDist, Nu) } WAlphaCube <- function(Alpha, Z, W, M, Nu, WeightsInd) { .Call('_womblR_WAlphaCube', PACKAGE = 'womblR', Alpha, Z, W, M, Nu, WeightsInd) } GetLogLik <- function(DatObj_List, Para_List, DatAug_List, NKeep) { .Call('_womblR_GetLogLik', PACKAGE = 'womblR', DatObj_List, Para_List, DatAug_List, NKeep) } GetLogLikMean <- function(DatObj_List, Para_List, DatAug_List) { .Call('_womblR_GetLogLikMean', PACKAGE = 'womblR', DatObj_List, Para_List, DatAug_List) } SamplePPD <- function(DatObj_List, Para_List, NKeep) { .Call('_womblR_SamplePPD', PACKAGE = 'womblR', DatObj_List, Para_List, NKeep) } STBDwDM_Rcpp <- function(DatObj_List, HyPara_List, MetrObj_List, Para_List, DatAug_List, McmcObj_List, RawSamples, Interactive) { .Call('_womblR_STBDwDM_Rcpp', PACKAGE = 'womblR', DatObj_List, HyPara_List, MetrObj_List, Para_List, DatAug_List, McmcObj_List, RawSamples, Interactive) } ThetaKrigging <- function(DatObj_List, Para_List, NKeep) { .Call('_womblR_ThetaKrigging', PACKAGE = 'womblR', DatObj_List, Para_List, NKeep) } YKrigging <- function(DatObj_List, ThetaKrig, NKeep) { .Call('_womblR_YKrigging', PACKAGE = 'womblR', DatObj_List, ThetaKrig, NKeep) } CholInv <- function(Cov) { .Call('_womblR_CholInv', PACKAGE = 'womblR', Cov) } Inv3 <- function(A) { .Call('_womblR_Inv3', PACKAGE = 'womblR', A) } makeSymm <- function(A) { .Call('_womblR_makeSymm', PACKAGE = 'womblR', A) }
/scratch/gouwar.j/cran-all/cranData/womblR/R/RcppExports.R
#' Visual field series for one patient. #' #' A dataset containing 9 visual field series from a patient of the Vein Pulsation Study #' Trial in Glaucoma and the Lions Eye Institute trial registry, Perth, Western Australia. #' #' @usage data(VFSeries) #' #' @format A data frame with 486 rows and 4 variables: #' \describe{ #' \item{Visit}{The visual field test visit number, (1, 2, ... , 9).} #' \item{DLS}{The observed outcome variable, differential light sensitivity (DLS).} #' \item{Time}{The time of the visual field test (in days from baseline).} #' \item{Location}{The location on the visual field of a Humphrey Field Analyzer-II #' (Carl Zeiss Meditec Inc., Dublin, CA) (1, 2, ... , 54).} #' } #' @source \url{https://anzctr.org.au/Trial/Registration/TrialReview.aspx?ACTRN=12608000274370} "VFSeries"
/scratch/gouwar.j/cran-all/cranData/womblR/R/VFSeries-data.R
#' womblR #' #' This package implements a spatiotemporal boundary detection #' with a dissimilarity metric for areal data with inference in a Bayesian setting #' using Markov chain Monte Carlo (MCMC). The response variable can be modeled as #' Gaussian (no nugget), probit or Tobit link and spatial correlation is introduced #' at each time point through a conditional autoregressive (CAR) prior. Temporal #' correlation is introduced through a hierarchical structure and can be specified as #' exponential or first-order autoregressive. Full details of the the package can be found #' in the accompanying vignette. Furthermore, the details of the package can be found in #' "Diagnosing Glaucoma Progression with Visual Field Data Using a Spatiotemporal Boundary #' Detection Method", by Berchuck et al (2018), <arXiv:1805.11636>. The paper is in press #' at the Journal of the American Statistical Association. #' #' @author Samuel I. Berchuck \email{[email protected]} #' #' @name womblR #' @docType package #' @import Rcpp #' @importFrom graphics abline axis layout par plot points polygon title segments symbols rect text #' @importFrom grDevices col2rgb colorRampPalette #' @importFrom utils tail #' @importFrom stats lm sd var #' @importFrom msm rtnorm #' @importFrom mvtnorm pmvnorm #' @useDynLib womblR NULL
/scratch/gouwar.j/cran-all/cranData/womblR/R/womblR-package.R
## ---- echo = FALSE------------------------------------------------------------ ###Start with a clean space # rm(list = ls()) ###Take care of some stuff that I don't want the user to see... # path.package <- "/Users/Sam/Documents/Sam/School/Dissertation/Packages/womblR/" # suppressMessages(devtools::load_all(path.package)) #loads scripts # suppressMessages(devtools::document(path.package)) #creates documentation ###Make sure to remove devtools from Suggests line in DESCRIPTION before submission ## ----------------------------------------------------------------------------- library(womblR) ## ----------------------------------------------------------------------------- head(VFSeries) ## ---- fig.align="center", fig.width = 5.5, fig.height = 5.5------------------- PlotVfTimeSeries(Y = VFSeries$DLS, Location = VFSeries$Location, Time = VFSeries$Time, main = "Visual field sensitivity time series \n at each location", xlab = "Days from baseline visit", ylab = "Differential light sensitivity (dB)", line.col = 1, line.type = 1, line.reg = FALSE) ## ----------------------------------------------------------------------------- blind_spot <- c(26, 35) # define blind spot VFSeries <- VFSeries[order(VFSeries$Location), ] # sort by location VFSeries <- VFSeries[order(VFSeries$Visit), ] # sort by visit VFSeries <- VFSeries[!VFSeries$Location %in% blind_spot, ] # remove blind spot locations Y <- VFSeries$DLS # define observed outcome data ## ----------------------------------------------------------------------------- Time <- unique(VFSeries$Time) / 365 # years since baseline visit print(Time) ## ----------------------------------------------------------------------------- W <- HFAII_Queen[-blind_spot, -blind_spot] # visual field adjacency matrix ## ----------------------------------------------------------------------------- DM <- GarwayHeath[-blind_spot] # Garway-Heath angles ## ---- fig.align="center", fig.width = 5.5, fig.height = 5.5------------------- PlotAdjacency(W = W, DM = DM, zlim = c(0, 180), Visit = NA, main = "Garway-Heath dissimilarity metric\n across the visual field") ## ----------------------------------------------------------------------------- TimeDist <- abs(outer(Time, Time, "-")) TimeDistVec <- TimeDist[lower.tri(TimeDist)] minDiff <- min(TimeDistVec) maxDiff <- max(TimeDistVec) PhiUpper <- -log(0.01) / minDiff # shortest diff goes down to 1% PhiLower <- -log(0.95) / maxDiff # longest diff goes up to 95% ## ----------------------------------------------------------------------------- Hypers <- list(Delta = list(MuDelta = c(3, 0, 0), OmegaDelta = diag(c(1000, 1000, 1))), T = list(Xi = 4, Psi = diag(3)), Phi = list(APhi = PhiLower, BPhi = PhiUpper)) ## ----------------------------------------------------------------------------- Starting <- list(Delta = c(3, 0, 0), T = diag(3), Phi = 0.5) ## ----------------------------------------------------------------------------- Nu <- length(Time) # calculate number of visits Tuning <- list(Theta2 = rep(1, Nu), Theta3 = rep(1, Nu), Phi = 1) ## ----------------------------------------------------------------------------- MCMC <- list(NBurn = 10000, NSims = 10000, NThin = 10, NPilot = 20) ## ---- include = FALSE--------------------------------------------------------- reg.STBDwDM <- STBDwDM(Y = Y, DM = DM, W = W, Time = Time, Starting = Starting, Hypers = Hypers, Tuning = Tuning, MCMC = MCMC) ## ---- eval = FALSE------------------------------------------------------------ # reg.STBDwDM <- STBDwDM(Y = Y, DM = DM, W = W, Time = Time, # Starting = Starting, Hypers = Hypers, Tuning = Tuning, MCMC = MCMC, # Family = "tobit", # TemporalStructure = "exponential", # Distance = "circumference", # Weights = "continuous", # Rho = 0.99, # ScaleY = 10, # ScaleDM = 100, # Seed = 54) # ## Burn-in progress: |*************************************************| # ## Sampler progress: 0%.. 10%.. 20%.. 30%.. 40%.. 50%.. 60%.. 70%.. 80%.. 90%.. 100%.. ## ----------------------------------------------------------------------------- names(reg.STBDwDM) ## ----------------------------------------------------------------------------- library(coda) ## ----------------------------------------------------------------------------- Mu <- as.mcmc(reg.STBDwDM$mu) Tau2 <- as.mcmc(reg.STBDwDM$tau2) Alpha <- as.mcmc(reg.STBDwDM$alpha) Delta <- as.mcmc(reg.STBDwDM$delta) T <- as.mcmc(reg.STBDwDM$T) Phi <- as.mcmc(reg.STBDwDM$phi) ## ---- fig.width = 7.18, fig.height = 5.2, echo = FALSE------------------------ par(mfrow = c(2, 3)) traceplot(Mu[, 1], ylab = expression(mu[1]), main = expression(paste("Posterior of " ~ mu[1]))) traceplot(Tau2[, 1], ylab = expression(tau[1]^2), main = expression(paste("Posterior of " ~ tau[1]^2))) traceplot(Alpha[, 1], ylab = expression(alpha[1]), main = expression(paste("Posterior of " ~ alpha[1]))) traceplot(Delta[, 1], ylab = expression(delta[1]), main = expression(paste("Posterior of " ~ delta[1]))) traceplot(T[, 1], ylab = expression(paste(T['1,1'])), main = expression(paste("Posterior of " ~ T['1,1']))) traceplot(Phi, ylab = expression(phi), main = expression(paste("Posterior" ~ phi))) ## ---- echo = FALSE------------------------------------------------------------ c(geweke.diag(Mu)$z[1], geweke.diag(Tau2)$z[1], geweke.diag(Alpha)$z[1],geweke.diag(Delta)$z[1], geweke.diag(T)$z[1],geweke.diag(Phi)$z) ## ---- echo = FALSE, fig.width = 7.18, fig.height = 2.6------------------------ Xlab = "Days from baseline visit" X = Time * 365 par(mfcol = c(1, 3)) plot(X, apply(Mu, 2, mean), main = expression(paste("Posterior mean of ",mu," over time")), ylab = expression(mu), xlab = Xlab) plot(X, apply(Tau2, 2, mean), main = expression(paste("Posterior mean of ",tau^2," over time")), ylab = expression(tau^2), xlab = Xlab) plot(X, apply(Alpha, 2, mean), main = expression(paste("Posterior mean of ",alpha," over time")), ylab = expression(alpha), xlab = Xlab) ## ----------------------------------------------------------------------------- CVAlpha <- apply(Alpha, 1, cv <- function(x) sd(x) / mean(x)) STCV <- c(mean(CVAlpha), sd(CVAlpha), quantile(CVAlpha, probs = c(0.025, 0.975))) names(STCV)[1:2] <- c("Mean", "SD") print(STCV) ## ----------------------------------------------------------------------------- Wij <- PosteriorAdj(reg.STBDwDM) ## ----------------------------------------------------------------------------- Wij[1:6, 1:7] ## ---- fig.align="center", fig.width = 4.5, fig.height = 4.5------------------- ColorScheme1 <- c("Black", "#636363", "#bdbdbd", "#f0f0f0", "White") PlotAdjacency(Wij, Visit = 3, stat = "mean", main = "Posterior mean adjacencies at \n visit 3 across the visual field", color.scheme = ColorScheme1) ## ---- fig.align="center", fig.width = 4.5, fig.height = 4.5------------------- ColorScheme2 <- rev(ColorScheme1) zlimSD <- quantile(Wij[,c(5,7,9,11,13,15,17,19,21)], probs = c(0, 1)) PlotAdjacency(Wij, Visit = 4, stat = "sd", main = "Posterior SD of adjacencies at \n visit 4 across the visual field", zlim = zlimSD, color.scheme = ColorScheme2) ## ----------------------------------------------------------------------------- Diags <- diagnostics(reg.STBDwDM, diags = c("dic", "dinf", "waic"), keepDeviance = TRUE) ## ---- fig.align = 'center', fig.width = 4, fig.height = 3.3------------------- Deviance <- as.mcmc(Diags$deviance) traceplot(Deviance, ylab = "Deviance", main = "Posterior Deviance") ## ---- eval = FALSE------------------------------------------------------------ # print(Diags) ## ---- echo = FALSE------------------------------------------------------------ unlist(Diags$dic) unlist(Diags$dinf) unlist(Diags$waic) ## ----------------------------------------------------------------------------- NewTimes <- Time[Nu] + c(50, 100) / 365 ## ----------------------------------------------------------------------------- Predictions <- predict(reg.STBDwDM, NewTimes) ## ----------------------------------------------------------------------------- names(Predictions) ## ----------------------------------------------------------------------------- names(Predictions$MuTauAlpha) head(Predictions$MuTauAlpha$alpha) ## ----------------------------------------------------------------------------- names(Predictions$Y) ## ---- fig.align = 'center', fig.width = 5.5, fig.height = 5.5----------------- PlotSensitivity(Y = apply(Predictions$Y$y10, 2, median), main = "Sensitivity estimate (dB) at each \n location on visual field", legend.lab = "DLS (dB)", legend.round = 2, bins = 250, border = FALSE)
/scratch/gouwar.j/cran-all/cranData/womblR/inst/doc/womblR-example.R
--- title: "Introduction to using R package: `womblR`" author: "Samuel I. Berchuck" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{womblR-example} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ## Use of `womblR` This is a brief description of how to use the `womblR` package within the context of glaucoma progression. We begin by loading the package. ```{r, echo = FALSE} ###Start with a clean space # rm(list = ls()) ###Take care of some stuff that I don't want the user to see... # path.package <- "/Users/Sam/Documents/Sam/School/Dissertation/Packages/womblR/" # suppressMessages(devtools::load_all(path.package)) #loads scripts # suppressMessages(devtools::document(path.package)) #creates documentation ###Make sure to remove devtools from Suggests line in DESCRIPTION before submission ``` ```{r} library(womblR) ``` In the `womblR` package there is a longitudinal series of visual fields that we will use to exemplify the statistical models contained in the package. The data object is called `VFSeries` and has four variables, `Visit`, `DLS`, `Time` and `Location`. The data object loads automatically; here's what the data looks like, ```{r} head(VFSeries) ``` The variable `Visit` represents the visual field test visit number, `DLS` the observed outcome variable, differential light sensitvity, `Time` the time of the visual field test (in days from baseline visit) and `Location` the spatial location on the visual field that the observation occured. To help illuminate visual field data we can use the `PlotVFTimeSeries` function. `PlotVFTimeSeries` is a function that plots the observered visual field data over time at each location on the visual field. ```{r, fig.align="center", fig.width = 5.5, fig.height = 5.5} PlotVfTimeSeries(Y = VFSeries$DLS, Location = VFSeries$Location, Time = VFSeries$Time, main = "Visual field sensitivity time series \n at each location", xlab = "Days from baseline visit", ylab = "Differential light sensitivity (dB)", line.col = 1, line.type = 1, line.reg = FALSE) ``` The figure above demonstrates the visual field from a Humphrey Field Analyzer-II testing machine, which generates 54 spatial locations (only 52 informative locations, note the 2 blanks spots corresponding to the blind spot). At each visual field test a patient is assessed for vision loss. ## Format data for `STBDwDM` We can now begin to think about preparing objects for use in the the Spatiotemporal Boundary Detection with Dissimilarity Metric model function (`STBDwDM`). According to the manual, the observed data `Y` must be first ordered spatially and then temporally. Furthermore, we will remove all locations that correspond to the natural blind spot (which in the Humphrey Field Analyzer-II correspond to locations 26 and 35). ```{r} blind_spot <- c(26, 35) # define blind spot VFSeries <- VFSeries[order(VFSeries$Location), ] # sort by location VFSeries <- VFSeries[order(VFSeries$Visit), ] # sort by visit VFSeries <- VFSeries[!VFSeries$Location %in% blind_spot, ] # remove blind spot locations Y <- VFSeries$DLS # define observed outcome data ``` Now that we have assigned the observed outcomed `Y` we move onto the temporal variable `Time`. For visual field data we define this to be the time from the baseline visit. We obtain the unique days from the baseline visit and scale them to be on the year scale. ```{r} Time <- unique(VFSeries$Time) / 365 # years since baseline visit print(Time) ``` Our example patient has nine visual field visits and the last visit occured 2.57 years after the baseline visit. ## Adjacency matrix and dissimilarity metric We now specify the adjacency matrix, `W`, and dissimilarity metric, `DM`. There are three adjacency matrices for the Humphrey Field Analyzer-II visual field that are supplied by the `womblR` package, `HFAII_Queen`, `HFAII_QueenHF`, and `HFAII_Rook`. `HFAII_Queen` and `HFAII_QueenHF` both define adjacencies as edges and corners (i.e., the movements of a queen in chess), while `HFAII_Rook` only defines an adjacency as a neighbor that shares an edge (i.e., a rook in chess). The `HFAII_QueenHF` adjacency matrix does not allow neighbors to share information between the northern and southern hemispheres of the visual field. In this analysis we use the standard queen specification. The adjacency objects are preloaded and contain the blind spot, so we define our adjacency matrix as follows. ```{r} W <- HFAII_Queen[-blind_spot, -blind_spot] # visual field adjacency matrix ``` Now we turn our attention to assigning a dissimilarity metric. The dissimilarity metric we use in this data application are the Garway-Heath angles that describe the underlying location that the retinal nerve fibers enter the optic disc. These angles (measured in degrees) are included with `womblR` in the object `GarwayHeath`. We create the dissimilarity metric object `DM`. ```{r} DM <- GarwayHeath[-blind_spot] # Garway-Heath angles ``` The `womblR` package provides a plotting function `PlotAdjacency` that can be used to display a dissimilarity metric over the spatial structure of the visual field. We demonstrate it using the Garway-Heath angles. ```{r, fig.align="center", fig.width = 5.5, fig.height = 5.5} PlotAdjacency(W = W, DM = DM, zlim = c(0, 180), Visit = NA, main = "Garway-Heath dissimilarity metric\n across the visual field") ``` Now that we have specified the data objects `Y`, `DM`, `W` and `Time`, we will customize the objects that characterize Bayesian Markov chain Monte Carlo (MCMC) methods, in particular hyperparameters, starting values, metroplis tuning values and MCMC inputs. ## MCMC Characteristics We begin be specifying the hyperparameters for the model. The parameter $\phi$ is uniformly distributed with bounds, $a_{\phi}$ and $b_{\phi}$. The bounds for $\phi$ cannot be specified arbitrarily since it is important to account for the magnitude of time elapsed. We specify the following upper and lower bounds for $\phi$ to dictate temporal correlation close to independence or strong correlation, resulting in a weakly informative prior distribution. ```{r} TimeDist <- abs(outer(Time, Time, "-")) TimeDistVec <- TimeDist[lower.tri(TimeDist)] minDiff <- min(TimeDistVec) maxDiff <- max(TimeDistVec) PhiUpper <- -log(0.01) / minDiff # shortest diff goes down to 1% PhiLower <- -log(0.95) / maxDiff # longest diff goes up to 95% ``` Then, we can create a hyperparameters `list` object, `Hypers`, that can be used for `STBDwDM`. ```{r} Hypers <- list(Delta = list(MuDelta = c(3, 0, 0), OmegaDelta = diag(c(1000, 1000, 1))), T = list(Xi = 4, Psi = diag(3)), Phi = list(APhi = PhiLower, BPhi = PhiUpper)) ``` Here, $\delta$ has a multivariate normal distribution with mean parameter $\boldsymbol{\mu}_{\delta}$ and covariance, $\boldsymbol{\Omega}_{\delta}$ and $\mathbf{T}$ has an inverse-Wishart distribution with degrees of freedom $\xi$ and scale matrix, $\Psi$ (See the help manual for `STBDwDM` for further details). Specify a `list` object, `Starting`, that contains the starting values for the hyperparameters. ```{r} Starting <- list(Delta = c(3, 0, 0), T = diag(3), Phi = 0.5) ``` Provide tuning parameters for the metropolis steps in the MCMC sampler. ```{r} Nu <- length(Time) # calculate number of visits Tuning <- list(Theta2 = rep(1, Nu), Theta3 = rep(1, Nu), Phi = 1) ``` We set `Tuning` to the default setting of all ones and let the pilot adaptation in the burn-in phase tune the acceptance rates to the appropriate range. Finally, we set the MCMC inputs using the `MCMC` list object. ```{r} MCMC <- list(NBurn = 10000, NSims = 10000, NThin = 10, NPilot = 20) ``` We specify that our model will run for a burn-in period of 10,000 scans, followed by 10,000 scans after burn-in. In the burn-in period there will be 20 iterations of pilot adaptation evenly spaced out over the period. Finally, the final number of samples to be used for inference will be thinned down to 1,000 based on the thinning number of 10. We suggest running the sampler 250,000 iterations after burn-in, but in the vignette we are limited by compilation time. ## Spatiotemporal boundary dection with dissimilarity metric model We have now specified all model objects and are prepared to implement the `STBDwDM` regression object. To demonstrate the `STBDwDM` object we will use all of its options, even those that are being used in their default settings. ```{r, include = FALSE} reg.STBDwDM <- STBDwDM(Y = Y, DM = DM, W = W, Time = Time, Starting = Starting, Hypers = Hypers, Tuning = Tuning, MCMC = MCMC) ``` ```{r, eval = FALSE} reg.STBDwDM <- STBDwDM(Y = Y, DM = DM, W = W, Time = Time, Starting = Starting, Hypers = Hypers, Tuning = Tuning, MCMC = MCMC, Family = "tobit", TemporalStructure = "exponential", Distance = "circumference", Weights = "continuous", Rho = 0.99, ScaleY = 10, ScaleDM = 100, Seed = 54) ## Burn-in progress: |*************************************************| ## Sampler progress: 0%.. 10%.. 20%.. 30%.. 40%.. 50%.. 60%.. 70%.. 80%.. 90%.. 100%.. ``` The first line of arguments are the data objects, `Y`, `DM`, `W`, and `Time`. These objects must be specified for `STBDwDM` to run. The second line of objects are the MCMC characteristics objects we defined previously. These objects do not need to be defined for `STBDwDM` to function, but are provided for the user to custimize the model to their choosing. If they are not provided, defaults are given. Next, we specify that `Family` be equal to `tobit` since we know that visual field data is censored. Furthermore, we specify `TemporalStructure` to be the `exponential` temporal correlation structure. Our distance metric on the visual field is based on the circumference of the optic disc, so we define `Distance` to be `circumference`. Then, the adjacency weights are specified to be `continuous`, as opposed to the `binary` specification of Lee and Mitchell (2011). Finally, we define the following scalar variables, `Rho`, `ScaleY`, `ScaleDM`, and `Seed`, which are defined in the manual for `STBDwDM`. The following are the returned objects from `STBDwDM`. ```{r} names(reg.STBDwDM) ``` The object `reg.STBDwDM` contains raw MCMC samples for parameters $\mu_t$ (`mu`), $\tau_t^2$ (`tau2`), $\alpha_{tGH}$ (`alpha`), $\boldsymbol{\delta}$ (`delta`), $\mathbf{T}$ (`T`) and $\phi$ (`phi`), metropolis acceptance rates and final tuning parameters (`metropolis`) and model runtime (`runtime`). The objects `datobj` and `dataug` can be ignored as they are for later use in secondary functions. ## Assessing model convergence Before analyzing the raw MCMC samples from our model we want to verify that there are no convergence issues. We begin by loading the `coda` package. ```{r} library(coda) ``` Then we convert the raw `STBDwDM` MCMC objects to `coda` package `mcmc` objects. ```{r} Mu <- as.mcmc(reg.STBDwDM$mu) Tau2 <- as.mcmc(reg.STBDwDM$tau2) Alpha <- as.mcmc(reg.STBDwDM$alpha) Delta <- as.mcmc(reg.STBDwDM$delta) T <- as.mcmc(reg.STBDwDM$T) Phi <- as.mcmc(reg.STBDwDM$phi) ``` We begin by checking traceplots of the parameters. For conciseness, we present one traceplot for each parameter type. ```{r, fig.width = 7.18, fig.height = 5.2, echo = FALSE} par(mfrow = c(2, 3)) traceplot(Mu[, 1], ylab = expression(mu[1]), main = expression(paste("Posterior of " ~ mu[1]))) traceplot(Tau2[, 1], ylab = expression(tau[1]^2), main = expression(paste("Posterior of " ~ tau[1]^2))) traceplot(Alpha[, 1], ylab = expression(alpha[1]), main = expression(paste("Posterior of " ~ alpha[1]))) traceplot(Delta[, 1], ylab = expression(delta[1]), main = expression(paste("Posterior of " ~ delta[1]))) traceplot(T[, 1], ylab = expression(paste(T['1,1'])), main = expression(paste("Posterior of " ~ T['1,1']))) traceplot(Phi, ylab = expression(phi), main = expression(paste("Posterior" ~ phi))) ``` From the figure, it is clear that the traceplots exhibit some poor behavior. However, these traceplots are nicely behaved considering the number of iterations the MCMC sampler ran. The traceplots demonstrate that the parameters have converged to their stationary distribution, but still need more samples to rid themselves of autocorrelation. Finally, we present the corresponding test statistics from the Geweke diagnostic test. ```{r, echo = FALSE} c(geweke.diag(Mu)$z[1], geweke.diag(Tau2)$z[1], geweke.diag(Alpha)$z[1],geweke.diag(Delta)$z[1], geweke.diag(T)$z[1],geweke.diag(Phi)$z) ``` Since none of these test statistics are terribly large in the absolute value there is not strong evidence that our model did not converge. ## Post model fit analysis Once we have verified that we do not have any convergence issues, we can begin to think about analyzing the raw MCMC samples. A nice summary for `STBDwDM` is to plot the posterior mean of each of the level 1 parameters over time. ```{r, echo = FALSE, fig.width = 7.18, fig.height = 2.6} Xlab = "Days from baseline visit" X = Time * 365 par(mfcol = c(1, 3)) plot(X, apply(Mu, 2, mean), main = expression(paste("Posterior mean of ",mu," over time")), ylab = expression(mu), xlab = Xlab) plot(X, apply(Tau2, 2, mean), main = expression(paste("Posterior mean of ",tau^2," over time")), ylab = expression(tau^2), xlab = Xlab) plot(X, apply(Alpha, 2, mean), main = expression(paste("Posterior mean of ",alpha," over time")), ylab = expression(alpha), xlab = Xlab) ``` This figure gives a nice summary of the model findings. In particular, the plot of the $\alpha_{tGH}$ demonstrate a non-linear trend and the capabilty of `STBDwDM` to smooth temporal effects. We now demonstrate how to calculate the posterior distribution of the coefficient of variation (cv) of $\alpha_{tGH}$. ```{r} CVAlpha <- apply(Alpha, 1, cv <- function(x) sd(x) / mean(x)) STCV <- c(mean(CVAlpha), sd(CVAlpha), quantile(CVAlpha, probs = c(0.025, 0.975))) names(STCV)[1:2] <- c("Mean", "SD") print(STCV) ``` STCV (i.e., the posterior mean) was shown to be predictive of glaucome progression, so it is important to be able to compute this value. Here STCV is calculated to be `r round(STCV[1],2)`. Another component of the model that is important to explore are the adjacencies themselves, $w_{ij}$. As a function of $\alpha_{tGH}$ these adjacencies can be calculated generally, and the `womblR` function has provided a function `PosteriorAdj` to compute them. ```{r} Wij <- PosteriorAdj(reg.STBDwDM) ``` The function `PosteriorAdj` function takes in the `STBDwDM` regression object and returns a `PosteriorAdj` object that contains the posterior mean and standard deviation for each adjacency at each visit. ```{r} Wij[1:6, 1:7] ``` For visual field data, the function `PlotAdjacency` can be used to plot the mean and standard deviations of the adjacencies at each of the visits over the visual field surface. We plot the mean adjacencies at visit 3. ```{r, fig.align="center", fig.width = 4.5, fig.height = 4.5} ColorScheme1 <- c("Black", "#636363", "#bdbdbd", "#f0f0f0", "White") PlotAdjacency(Wij, Visit = 3, stat = "mean", main = "Posterior mean adjacencies at \n visit 3 across the visual field", color.scheme = ColorScheme1) ``` And now, we plot the standard deviation of the adjacencies at visit 4. ```{r, fig.align="center", fig.width = 4.5, fig.height = 4.5} ColorScheme2 <- rev(ColorScheme1) zlimSD <- quantile(Wij[,c(5,7,9,11,13,15,17,19,21)], probs = c(0, 1)) PlotAdjacency(Wij, Visit = 4, stat = "sd", main = "Posterior SD of adjacencies at \n visit 4 across the visual field", zlim = zlimSD, color.scheme = ColorScheme2) ``` The function `PlotAdjacency` provides a visual tool for assessing change on the visual field over time. ## Compute diagnostics The `diagnostics` function in the `womblR` package can be used to calculate various diagnostic metrics. The function takes in the `STBDwDM` regression object. ```{r} Diags <- diagnostics(reg.STBDwDM, diags = c("dic", "dinf", "waic"), keepDeviance = TRUE) ``` The `diagnostics` function calculates diagnostics that depend on both the log-likelihood and posterior predictive distribtuion. So, if any of these diagnostics are specified, one or both of these must be sampled from. The `keepDeviance` and `keepPPD` indicate whether or not these distributions should be saved for the user. We indicate that we would like the output to be saved for the log-likelihood (i.e., deviance). We explore the output by looking at the traceplot of the deviance. ```{r, fig.align = 'center', fig.width = 4, fig.height = 3.3} Deviance <- as.mcmc(Diags$deviance) traceplot(Deviance, ylab = "Deviance", main = "Posterior Deviance") ``` This distribution has converged nicely, which is not surprising, given that the other model parameters have converged. Now we can look at the diagnostics. ```{r, eval = FALSE} print(Diags) ``` ```{r, echo = FALSE} unlist(Diags$dic) unlist(Diags$dinf) unlist(Diags$waic) ``` ## Future prediction The `womblR` package provides the `predict.STBDwDM` function for sampling from the posterior predictive distribution at future time points of the observed data. This is different from the posterior predictive distribution obtained from the `diagnostics` function, because that distribution is for the observed time points and is automatically obtained given the posterior samples from `STBDwDM`. In order to obtain future samples, you first need samples from the posterior distribution of the future $\mu_t$, $\tau_t^2$, and $\alpha_t$ parameters. The `predict.STBDwDM` first samples these parameters and then samples from the future distribution of the observed outcome variable, returning both. We begin by specifying the future time points we want to predict as 50 and 100 days past the most recent visit. ```{r} NewTimes <- Time[Nu] + c(50, 100) / 365 ``` Then, we use `predict.STBDwDM` to calculate the future posterior predictive distribution. ```{r} Predictions <- predict(reg.STBDwDM, NewTimes) ``` We can see that `predict.STBDwDM` returns a `list` containing two `lists`. ```{r} names(Predictions) ``` The object `MuTauAlpha` is a `list` containing three matrices with the posterior distributions of the future level 1 parameters. ```{r} names(Predictions$MuTauAlpha) head(Predictions$MuTauAlpha$alpha) ``` While the object `Y` is a `list` containing however many matrices correspond to the number of new future time points (here: 2). ```{r} names(Predictions$Y) ``` You can plot a heat map representation of the posterior prediction distribution using the function `PlotSensitivity`. ```{r, fig.align = 'center', fig.width = 5.5, fig.height = 5.5} PlotSensitivity(Y = apply(Predictions$Y$y10, 2, median), main = "Sensitivity estimate (dB) at each \n location on visual field", legend.lab = "DLS (dB)", legend.round = 2, bins = 250, border = FALSE) ``` This figure shows the median posterior predictive heat map over the visual field at the future visit in 50 days past the final observed visit. The `PlotSensitivity` function can be used for plotting any observations on the visual field surface.
/scratch/gouwar.j/cran-all/cranData/womblR/inst/doc/womblR-example.Rmd
--- title: "Introduction to using R package: `womblR`" author: "Samuel I. Berchuck" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{womblR-example} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ## Use of `womblR` This is a brief description of how to use the `womblR` package within the context of glaucoma progression. We begin by loading the package. ```{r, echo = FALSE} ###Start with a clean space # rm(list = ls()) ###Take care of some stuff that I don't want the user to see... # path.package <- "/Users/Sam/Documents/Sam/School/Dissertation/Packages/womblR/" # suppressMessages(devtools::load_all(path.package)) #loads scripts # suppressMessages(devtools::document(path.package)) #creates documentation ###Make sure to remove devtools from Suggests line in DESCRIPTION before submission ``` ```{r} library(womblR) ``` In the `womblR` package there is a longitudinal series of visual fields that we will use to exemplify the statistical models contained in the package. The data object is called `VFSeries` and has four variables, `Visit`, `DLS`, `Time` and `Location`. The data object loads automatically; here's what the data looks like, ```{r} head(VFSeries) ``` The variable `Visit` represents the visual field test visit number, `DLS` the observed outcome variable, differential light sensitvity, `Time` the time of the visual field test (in days from baseline visit) and `Location` the spatial location on the visual field that the observation occured. To help illuminate visual field data we can use the `PlotVFTimeSeries` function. `PlotVFTimeSeries` is a function that plots the observered visual field data over time at each location on the visual field. ```{r, fig.align="center", fig.width = 5.5, fig.height = 5.5} PlotVfTimeSeries(Y = VFSeries$DLS, Location = VFSeries$Location, Time = VFSeries$Time, main = "Visual field sensitivity time series \n at each location", xlab = "Days from baseline visit", ylab = "Differential light sensitivity (dB)", line.col = 1, line.type = 1, line.reg = FALSE) ``` The figure above demonstrates the visual field from a Humphrey Field Analyzer-II testing machine, which generates 54 spatial locations (only 52 informative locations, note the 2 blanks spots corresponding to the blind spot). At each visual field test a patient is assessed for vision loss. ## Format data for `STBDwDM` We can now begin to think about preparing objects for use in the the Spatiotemporal Boundary Detection with Dissimilarity Metric model function (`STBDwDM`). According to the manual, the observed data `Y` must be first ordered spatially and then temporally. Furthermore, we will remove all locations that correspond to the natural blind spot (which in the Humphrey Field Analyzer-II correspond to locations 26 and 35). ```{r} blind_spot <- c(26, 35) # define blind spot VFSeries <- VFSeries[order(VFSeries$Location), ] # sort by location VFSeries <- VFSeries[order(VFSeries$Visit), ] # sort by visit VFSeries <- VFSeries[!VFSeries$Location %in% blind_spot, ] # remove blind spot locations Y <- VFSeries$DLS # define observed outcome data ``` Now that we have assigned the observed outcomed `Y` we move onto the temporal variable `Time`. For visual field data we define this to be the time from the baseline visit. We obtain the unique days from the baseline visit and scale them to be on the year scale. ```{r} Time <- unique(VFSeries$Time) / 365 # years since baseline visit print(Time) ``` Our example patient has nine visual field visits and the last visit occured 2.57 years after the baseline visit. ## Adjacency matrix and dissimilarity metric We now specify the adjacency matrix, `W`, and dissimilarity metric, `DM`. There are three adjacency matrices for the Humphrey Field Analyzer-II visual field that are supplied by the `womblR` package, `HFAII_Queen`, `HFAII_QueenHF`, and `HFAII_Rook`. `HFAII_Queen` and `HFAII_QueenHF` both define adjacencies as edges and corners (i.e., the movements of a queen in chess), while `HFAII_Rook` only defines an adjacency as a neighbor that shares an edge (i.e., a rook in chess). The `HFAII_QueenHF` adjacency matrix does not allow neighbors to share information between the northern and southern hemispheres of the visual field. In this analysis we use the standard queen specification. The adjacency objects are preloaded and contain the blind spot, so we define our adjacency matrix as follows. ```{r} W <- HFAII_Queen[-blind_spot, -blind_spot] # visual field adjacency matrix ``` Now we turn our attention to assigning a dissimilarity metric. The dissimilarity metric we use in this data application are the Garway-Heath angles that describe the underlying location that the retinal nerve fibers enter the optic disc. These angles (measured in degrees) are included with `womblR` in the object `GarwayHeath`. We create the dissimilarity metric object `DM`. ```{r} DM <- GarwayHeath[-blind_spot] # Garway-Heath angles ``` The `womblR` package provides a plotting function `PlotAdjacency` that can be used to display a dissimilarity metric over the spatial structure of the visual field. We demonstrate it using the Garway-Heath angles. ```{r, fig.align="center", fig.width = 5.5, fig.height = 5.5} PlotAdjacency(W = W, DM = DM, zlim = c(0, 180), Visit = NA, main = "Garway-Heath dissimilarity metric\n across the visual field") ``` Now that we have specified the data objects `Y`, `DM`, `W` and `Time`, we will customize the objects that characterize Bayesian Markov chain Monte Carlo (MCMC) methods, in particular hyperparameters, starting values, metroplis tuning values and MCMC inputs. ## MCMC Characteristics We begin be specifying the hyperparameters for the model. The parameter $\phi$ is uniformly distributed with bounds, $a_{\phi}$ and $b_{\phi}$. The bounds for $\phi$ cannot be specified arbitrarily since it is important to account for the magnitude of time elapsed. We specify the following upper and lower bounds for $\phi$ to dictate temporal correlation close to independence or strong correlation, resulting in a weakly informative prior distribution. ```{r} TimeDist <- abs(outer(Time, Time, "-")) TimeDistVec <- TimeDist[lower.tri(TimeDist)] minDiff <- min(TimeDistVec) maxDiff <- max(TimeDistVec) PhiUpper <- -log(0.01) / minDiff # shortest diff goes down to 1% PhiLower <- -log(0.95) / maxDiff # longest diff goes up to 95% ``` Then, we can create a hyperparameters `list` object, `Hypers`, that can be used for `STBDwDM`. ```{r} Hypers <- list(Delta = list(MuDelta = c(3, 0, 0), OmegaDelta = diag(c(1000, 1000, 1))), T = list(Xi = 4, Psi = diag(3)), Phi = list(APhi = PhiLower, BPhi = PhiUpper)) ``` Here, $\delta$ has a multivariate normal distribution with mean parameter $\boldsymbol{\mu}_{\delta}$ and covariance, $\boldsymbol{\Omega}_{\delta}$ and $\mathbf{T}$ has an inverse-Wishart distribution with degrees of freedom $\xi$ and scale matrix, $\Psi$ (See the help manual for `STBDwDM` for further details). Specify a `list` object, `Starting`, that contains the starting values for the hyperparameters. ```{r} Starting <- list(Delta = c(3, 0, 0), T = diag(3), Phi = 0.5) ``` Provide tuning parameters for the metropolis steps in the MCMC sampler. ```{r} Nu <- length(Time) # calculate number of visits Tuning <- list(Theta2 = rep(1, Nu), Theta3 = rep(1, Nu), Phi = 1) ``` We set `Tuning` to the default setting of all ones and let the pilot adaptation in the burn-in phase tune the acceptance rates to the appropriate range. Finally, we set the MCMC inputs using the `MCMC` list object. ```{r} MCMC <- list(NBurn = 10000, NSims = 10000, NThin = 10, NPilot = 20) ``` We specify that our model will run for a burn-in period of 10,000 scans, followed by 10,000 scans after burn-in. In the burn-in period there will be 20 iterations of pilot adaptation evenly spaced out over the period. Finally, the final number of samples to be used for inference will be thinned down to 1,000 based on the thinning number of 10. We suggest running the sampler 250,000 iterations after burn-in, but in the vignette we are limited by compilation time. ## Spatiotemporal boundary dection with dissimilarity metric model We have now specified all model objects and are prepared to implement the `STBDwDM` regression object. To demonstrate the `STBDwDM` object we will use all of its options, even those that are being used in their default settings. ```{r, include = FALSE} reg.STBDwDM <- STBDwDM(Y = Y, DM = DM, W = W, Time = Time, Starting = Starting, Hypers = Hypers, Tuning = Tuning, MCMC = MCMC) ``` ```{r, eval = FALSE} reg.STBDwDM <- STBDwDM(Y = Y, DM = DM, W = W, Time = Time, Starting = Starting, Hypers = Hypers, Tuning = Tuning, MCMC = MCMC, Family = "tobit", TemporalStructure = "exponential", Distance = "circumference", Weights = "continuous", Rho = 0.99, ScaleY = 10, ScaleDM = 100, Seed = 54) ## Burn-in progress: |*************************************************| ## Sampler progress: 0%.. 10%.. 20%.. 30%.. 40%.. 50%.. 60%.. 70%.. 80%.. 90%.. 100%.. ``` The first line of arguments are the data objects, `Y`, `DM`, `W`, and `Time`. These objects must be specified for `STBDwDM` to run. The second line of objects are the MCMC characteristics objects we defined previously. These objects do not need to be defined for `STBDwDM` to function, but are provided for the user to custimize the model to their choosing. If they are not provided, defaults are given. Next, we specify that `Family` be equal to `tobit` since we know that visual field data is censored. Furthermore, we specify `TemporalStructure` to be the `exponential` temporal correlation structure. Our distance metric on the visual field is based on the circumference of the optic disc, so we define `Distance` to be `circumference`. Then, the adjacency weights are specified to be `continuous`, as opposed to the `binary` specification of Lee and Mitchell (2011). Finally, we define the following scalar variables, `Rho`, `ScaleY`, `ScaleDM`, and `Seed`, which are defined in the manual for `STBDwDM`. The following are the returned objects from `STBDwDM`. ```{r} names(reg.STBDwDM) ``` The object `reg.STBDwDM` contains raw MCMC samples for parameters $\mu_t$ (`mu`), $\tau_t^2$ (`tau2`), $\alpha_{tGH}$ (`alpha`), $\boldsymbol{\delta}$ (`delta`), $\mathbf{T}$ (`T`) and $\phi$ (`phi`), metropolis acceptance rates and final tuning parameters (`metropolis`) and model runtime (`runtime`). The objects `datobj` and `dataug` can be ignored as they are for later use in secondary functions. ## Assessing model convergence Before analyzing the raw MCMC samples from our model we want to verify that there are no convergence issues. We begin by loading the `coda` package. ```{r} library(coda) ``` Then we convert the raw `STBDwDM` MCMC objects to `coda` package `mcmc` objects. ```{r} Mu <- as.mcmc(reg.STBDwDM$mu) Tau2 <- as.mcmc(reg.STBDwDM$tau2) Alpha <- as.mcmc(reg.STBDwDM$alpha) Delta <- as.mcmc(reg.STBDwDM$delta) T <- as.mcmc(reg.STBDwDM$T) Phi <- as.mcmc(reg.STBDwDM$phi) ``` We begin by checking traceplots of the parameters. For conciseness, we present one traceplot for each parameter type. ```{r, fig.width = 7.18, fig.height = 5.2, echo = FALSE} par(mfrow = c(2, 3)) traceplot(Mu[, 1], ylab = expression(mu[1]), main = expression(paste("Posterior of " ~ mu[1]))) traceplot(Tau2[, 1], ylab = expression(tau[1]^2), main = expression(paste("Posterior of " ~ tau[1]^2))) traceplot(Alpha[, 1], ylab = expression(alpha[1]), main = expression(paste("Posterior of " ~ alpha[1]))) traceplot(Delta[, 1], ylab = expression(delta[1]), main = expression(paste("Posterior of " ~ delta[1]))) traceplot(T[, 1], ylab = expression(paste(T['1,1'])), main = expression(paste("Posterior of " ~ T['1,1']))) traceplot(Phi, ylab = expression(phi), main = expression(paste("Posterior" ~ phi))) ``` From the figure, it is clear that the traceplots exhibit some poor behavior. However, these traceplots are nicely behaved considering the number of iterations the MCMC sampler ran. The traceplots demonstrate that the parameters have converged to their stationary distribution, but still need more samples to rid themselves of autocorrelation. Finally, we present the corresponding test statistics from the Geweke diagnostic test. ```{r, echo = FALSE} c(geweke.diag(Mu)$z[1], geweke.diag(Tau2)$z[1], geweke.diag(Alpha)$z[1],geweke.diag(Delta)$z[1], geweke.diag(T)$z[1],geweke.diag(Phi)$z) ``` Since none of these test statistics are terribly large in the absolute value there is not strong evidence that our model did not converge. ## Post model fit analysis Once we have verified that we do not have any convergence issues, we can begin to think about analyzing the raw MCMC samples. A nice summary for `STBDwDM` is to plot the posterior mean of each of the level 1 parameters over time. ```{r, echo = FALSE, fig.width = 7.18, fig.height = 2.6} Xlab = "Days from baseline visit" X = Time * 365 par(mfcol = c(1, 3)) plot(X, apply(Mu, 2, mean), main = expression(paste("Posterior mean of ",mu," over time")), ylab = expression(mu), xlab = Xlab) plot(X, apply(Tau2, 2, mean), main = expression(paste("Posterior mean of ",tau^2," over time")), ylab = expression(tau^2), xlab = Xlab) plot(X, apply(Alpha, 2, mean), main = expression(paste("Posterior mean of ",alpha," over time")), ylab = expression(alpha), xlab = Xlab) ``` This figure gives a nice summary of the model findings. In particular, the plot of the $\alpha_{tGH}$ demonstrate a non-linear trend and the capabilty of `STBDwDM` to smooth temporal effects. We now demonstrate how to calculate the posterior distribution of the coefficient of variation (cv) of $\alpha_{tGH}$. ```{r} CVAlpha <- apply(Alpha, 1, cv <- function(x) sd(x) / mean(x)) STCV <- c(mean(CVAlpha), sd(CVAlpha), quantile(CVAlpha, probs = c(0.025, 0.975))) names(STCV)[1:2] <- c("Mean", "SD") print(STCV) ``` STCV (i.e., the posterior mean) was shown to be predictive of glaucome progression, so it is important to be able to compute this value. Here STCV is calculated to be `r round(STCV[1],2)`. Another component of the model that is important to explore are the adjacencies themselves, $w_{ij}$. As a function of $\alpha_{tGH}$ these adjacencies can be calculated generally, and the `womblR` function has provided a function `PosteriorAdj` to compute them. ```{r} Wij <- PosteriorAdj(reg.STBDwDM) ``` The function `PosteriorAdj` function takes in the `STBDwDM` regression object and returns a `PosteriorAdj` object that contains the posterior mean and standard deviation for each adjacency at each visit. ```{r} Wij[1:6, 1:7] ``` For visual field data, the function `PlotAdjacency` can be used to plot the mean and standard deviations of the adjacencies at each of the visits over the visual field surface. We plot the mean adjacencies at visit 3. ```{r, fig.align="center", fig.width = 4.5, fig.height = 4.5} ColorScheme1 <- c("Black", "#636363", "#bdbdbd", "#f0f0f0", "White") PlotAdjacency(Wij, Visit = 3, stat = "mean", main = "Posterior mean adjacencies at \n visit 3 across the visual field", color.scheme = ColorScheme1) ``` And now, we plot the standard deviation of the adjacencies at visit 4. ```{r, fig.align="center", fig.width = 4.5, fig.height = 4.5} ColorScheme2 <- rev(ColorScheme1) zlimSD <- quantile(Wij[,c(5,7,9,11,13,15,17,19,21)], probs = c(0, 1)) PlotAdjacency(Wij, Visit = 4, stat = "sd", main = "Posterior SD of adjacencies at \n visit 4 across the visual field", zlim = zlimSD, color.scheme = ColorScheme2) ``` The function `PlotAdjacency` provides a visual tool for assessing change on the visual field over time. ## Compute diagnostics The `diagnostics` function in the `womblR` package can be used to calculate various diagnostic metrics. The function takes in the `STBDwDM` regression object. ```{r} Diags <- diagnostics(reg.STBDwDM, diags = c("dic", "dinf", "waic"), keepDeviance = TRUE) ``` The `diagnostics` function calculates diagnostics that depend on both the log-likelihood and posterior predictive distribtuion. So, if any of these diagnostics are specified, one or both of these must be sampled from. The `keepDeviance` and `keepPPD` indicate whether or not these distributions should be saved for the user. We indicate that we would like the output to be saved for the log-likelihood (i.e., deviance). We explore the output by looking at the traceplot of the deviance. ```{r, fig.align = 'center', fig.width = 4, fig.height = 3.3} Deviance <- as.mcmc(Diags$deviance) traceplot(Deviance, ylab = "Deviance", main = "Posterior Deviance") ``` This distribution has converged nicely, which is not surprising, given that the other model parameters have converged. Now we can look at the diagnostics. ```{r, eval = FALSE} print(Diags) ``` ```{r, echo = FALSE} unlist(Diags$dic) unlist(Diags$dinf) unlist(Diags$waic) ``` ## Future prediction The `womblR` package provides the `predict.STBDwDM` function for sampling from the posterior predictive distribution at future time points of the observed data. This is different from the posterior predictive distribution obtained from the `diagnostics` function, because that distribution is for the observed time points and is automatically obtained given the posterior samples from `STBDwDM`. In order to obtain future samples, you first need samples from the posterior distribution of the future $\mu_t$, $\tau_t^2$, and $\alpha_t$ parameters. The `predict.STBDwDM` first samples these parameters and then samples from the future distribution of the observed outcome variable, returning both. We begin by specifying the future time points we want to predict as 50 and 100 days past the most recent visit. ```{r} NewTimes <- Time[Nu] + c(50, 100) / 365 ``` Then, we use `predict.STBDwDM` to calculate the future posterior predictive distribution. ```{r} Predictions <- predict(reg.STBDwDM, NewTimes) ``` We can see that `predict.STBDwDM` returns a `list` containing two `lists`. ```{r} names(Predictions) ``` The object `MuTauAlpha` is a `list` containing three matrices with the posterior distributions of the future level 1 parameters. ```{r} names(Predictions$MuTauAlpha) head(Predictions$MuTauAlpha$alpha) ``` While the object `Y` is a `list` containing however many matrices correspond to the number of new future time points (here: 2). ```{r} names(Predictions$Y) ``` You can plot a heat map representation of the posterior prediction distribution using the function `PlotSensitivity`. ```{r, fig.align = 'center', fig.width = 5.5, fig.height = 5.5} PlotSensitivity(Y = apply(Predictions$Y$y10, 2, median), main = "Sensitivity estimate (dB) at each \n location on visual field", legend.lab = "DLS (dB)", legend.round = 2, bins = 250, border = FALSE) ``` This figure shows the median posterior predictive heat map over the visual field at the future visit in 50 days past the final observed visit. The `PlotSensitivity` function can be used for plotting any observations on the visual field surface.
/scratch/gouwar.j/cran-all/cranData/womblR/vignettes/womblR-example.Rmd
##--#########################################--## #### Available Species Codes and Assignments #### ##--#########################################--## #' Available species in \pkg{woodValuationDE}, their codes, and parameter #' assignments #' #' The function returns the available species, species codes, and assignments of #' species to species groups for the economic valuation. #' #' @param method argument that is currently not used, but offers the possibility #' to implement alternative parameters and functions in the #' future. #' @return A list with the species, species codes, and assignments to economic #' species groups available in \pkg{woodValuationDE}. #' @examples #' get_species_codes() #' @import dplyr #' #' @export get_species_codes <- function(method = "fuchs.orig") { list( species = dplyr::select(params.wood.value$species.codes, "species.code.nds", "species.code.en", "name.scientific"), codes = c("english.species.names" = "en", "species.codes.used.in.lower.saxony" = "nds"), econ.assignments = dplyr::select(params.wood.value$species.codes, "species.code.nds", "species.code.bodelschwingh.revenues", "species.code.bodelschwingh.costs", "species.code.calamity.group") ) %>% return() }
/scratch/gouwar.j/cran-all/cranData/woodValuationDE/R/get_species_codes.R
##--###############--## #### Harvest Costs #### ##--###############--## #' Harvest costs per cubic meter skidded volume #' #' The function estimates harvest costs per cubic meter skidded wood volume #' applying the harvest costs function of v. Bodelschwingh (2018). Consequences #' of disturbances and calamities are implemented based on Dieter (2001), #' Moellmann and Moehring (2017), and Fuchs et al. (2022a, 2022b). Apart from #' Dieter (2001) and Moellmann and Moehring (2017), all functions and factors #' are based on data from HessenForst, the public forest service of the Federal #' State of Hesse in Germany. For further details see the \pkg{woodValuationDE} #' \href{https://github.com/Forest-Economics-Goettingen/woodValuationDE}{README}. #' #' @param diameter.q Quadratic mean of the diameter at breast height (dbh) of #' the harvested trees \eqn{[cm]}{[cm]}. #' @param species Tree species, using an available \code{species.code.type}. For #' a list with the available species and codes call #' \code{\link{get_species_codes}}. #' @param cost.level Accessibility of the stand for logging operations #' expressed as an integer of \code{1:3}, with \code{1} for #' standard conditions without limitations, \code{2} for #' moist sites or sites with a slope between 36 \% and 58 \%, #' and \code{3} for slopes > 58 \%. The \code{cost.level}s #' refer to the harvest cost model of v. Bodelschwingh #' (2018). #' @param calamity.type Defines the disturbance or calamity situation to allow #' for the consideration of lower net revenues in the case #' of salvage harvests. The calamity type determines the #' applied consequences of disturbances/calamities, #' implemented as factors for reduced revenues and higher #' harvest costs. By default no calamity is assumed #' \code{"none"}; \code{"calamity.dieter.2001"} #' refers to a general larger calamity applying the #' corrections according to Dieter (2001); five parameter #' sets were implemented according to Moellmann and #' Moehring (2017): \code{fire.small.moellmann.2017} refers #' to damages of only some trees by fire (only conifers) #' while \code{fire.large.moellmann.2017} assumes that at #' least one compartment was affected, the same applies for #' \code{storm.small.moellmann.2017} and #' \code{storm.large.moellmann.2017} referring to damages #' by storm (available for coniferous and deciduous #' species), \code{insects.moellmann.2017} refers to #' damages by insects; \code{"ips.fuchs.2022a"} refers to #' quality losses due to infestations by the European #' spruce bark beetle or \code{"ips.timely.fuchs.2022a"} #' for timely salvage fellings in less advanced attack #' stages (both according to Fuchs et al. 2022a); and #' \code{"stand.damage.fuchs.2022b"} to disturbances #' affecting only one stand, #' \code{"regional.disturbances.fuchs.2022b"} to #' disturbances with effects on the regional wood market #' and \code{"transregional.calamity.fuchs.2022b"} to #' calamities affecting transregional wood markets (the #' last three referring to Fuchs et al. 2022b). #' User-defined types can be implemented via the #' \code{calamity.factors} argument. #' @param calamity.factors Summands \eqn{[EUR m^{-3}]}{[EUR m^(-3)]} #' and factors to consider the consequences of #' disturbances and calamities on wood revenues and #' harvest costs. \code{"baseline"} provides a tibble #' based on the references listed in #' \code{calamity.type} (for details see #' \href{https://github.com/Forest-Economics-Goettingen/woodValuationDE}{README} #' of \pkg{woodValuationDE}). Alternatively, users can #' provide a tibble with the same structure. #' @param species.code.type Type of code in which \code{species} is given. #' \code{"en"} for English species names or #' \code{"nds"} for numeric species codes used in Lower #' Saxony, Germany. For a list with the available #' species and codes call #' \code{\link{get_species_codes}}. #' @param method argument that is currently not used, but offers the possibility #' to implement alternative parameters and functions in the #' future. #' @return A vector with harvest costs per cubic meter skidded volume #' \eqn{[EUR m^{-3}]}{[EUR m^(-3)]}. The volume refers to the skidded #' wood volume, provided by \code{\link{vol_skidded}}. #' @references Dieter, Matthias (2001): Land expectation values for spruce and #' beech calculated with Monte Carlo modelling techniques. For. #' Policy Econ. 2 (2), S. 157-166. #' \doi{10.1016/S1389-9341(01)00045-4}. #' @references Fuchs, Jasper M.; Hittenbeck, Anika; Brandl, Susanne; Schmidt, #' Matthias; Paul, Carola (2022a): Adaptation Strategies for #' Spruce Forests - Economic Potential of Bark Beetle Management and #' Douglas Fir Cultivation in Future Tree Species Portfolios. #' Forestry 95 (2) 229-246. \doi{10.1093/forestry/cpab040} #' @references Fuchs, Jasper M.; v. Bodelschwingh, Hilmar; Lange, Alexander; #' Paul, Carola; Husmann, Kai (2022b): Quantifying the #' consequences of disturbances on wood revenues with Impulse #' Response Functions. For. Policy Econ. 140, art. 102738. #' \doi{10.1016/j.forpol.2022.102738}. #' @references Moellmann, Torsten B.; Moehring, Bernhard (2017): A practical way #' to integrate risk in forest management decisions. Ann. For. Sci. #' 74 (4), S.75. \doi{10.1007/s13595-017-0670-x} #' @references v. Bodelschwingh, Hilmar (2018): Oekonomische Potentiale von #' Waldbestaenden. Konzeption und Abschaetzung im Rahmen einer #' Fallstudie in hessischen Staatswaldflaechen (Economic Potentials #' of Forest Stands and Their Consideration in Strategic Decisions). #' Bad Orb: J.D. Sauerlaender`s Verlag (Schriften zur Forst- und #' Umweltoekonomie, 47). #' @examples #' harvest_costs(40, #' "beech") #' #' # species codes Lower Saxony (Germany) #' harvest_costs(40, #' 211, #' species.code.type = "nds") #' #' # vector input #' harvest_costs(seq(20, 50, 5), #' "spruce") #' #' harvest_costs(40, #' rep(c("beech", "spruce"), #' each = 3), #' cost.level = rep(1:3, 2)) #' #' harvest_costs(40, #' rep("spruce", 6), #' calamity.type = c("none", #' "ips.fuchs.2022a", #' "ips.timely.fuchs.2022a", #' "stand.damage.fuchs.2022b", #' "regional.disturbance.fuchs.2022b", #' "transregional.calamity.fuchs.2022b")) #' #' # user-defined calamities with respective changes in harvest costs #' harvest_costs(40, #' rep("spruce", 3), #' calamity.type = c("none", #' "my.own.calamity.1", #' "my.own.calamity.2"), #' calamity.factors = dplyr::tibble( #' calamity.type = rep(c("none", #' "my.own.calamity.1", #' "my.own.calamity.2"), #' each = 2), #' species.group = rep(c("softwood", #' "deciduous"), #' times = 3), #' revenues.factor = c(1.0, 1.0, #' 0.8, 0.8, #' 0.2, 0.2), #' cost.factor = c(1.0, 1.0, #' 1.5, 1.5, #' 1.0, 1.0), #' cost.additional = c(0, 0, #' 0, 0, #' 5, 5))) #' @import dplyr #' #' @export harvest_costs <- function( diameter.q, species, cost.level = 1, calamity.type = "none", calamity.factors = "baseline", species.code.type = "en", method = "fuchs.orig" ) { if (is.character(calamity.factors)) { if (calamity.factors != "baseline") { stop("Provided value for calamity.factors unknown. See package README for further details.") } if (any(calamity.type == "calamity.dieter.2001")) { warning("You used 'calamity.dieter.2001': Since Dieter (2001) refers to net revenues when considering the consequences of calamities, we here also reduce the harvest costs. However, this is completely counterintuitive and only the derived net revenues are meaningful to interpret.") } } else { if (!(all(colnames(calamity.factors) == colnames(params.wood.value$calamity.factors))) & !(all(is.character(calamity.factors$calamity.type))) & !(all(is.character(calamity.factors$species.group))) & !(all(is.numeric(calamity.factors$revenues.factor))) & !(all(is.numeric(calamity.factors$cost.factor))) & !(all(is.numeric(calamity.factors$cost.additional)))) { stop("Structure of calamity.factors does not match the required structure. See package README for further details.") } params.wood.value$calamity.factors <- calamity.factors } harvest.costs <- tibble( diameter.q = diameter.q, species = species, cost.level = cost.level, calamity.type = calamity.type ) %>% # assign the appropriate parameterized species (group) mutate(calamity.group = recode_species(species, species.code.type, "calamity.group"), species = recode_species(species, species.code.type, "bodelschwingh.costs")) %>% # add specific parameters left_join(params.wood.value$harvest.costs, by = c("species" = "species.code.bodelschwingh", "cost.level" = "cost.level")) %>% left_join(params.wood.value$calamity.factors, by = c("calamity.type" = "calamity.type", "calamity.group" = "species.group")) %>% mutate( # calculate harvest costs, considering a maximum cost level harvest.costs = .data$a * .data$diameter.q^.data$b + .data$c, harvest.costs = if_else(.data$harvest.costs < .data$max, .data$harvest.costs, .data$max), # consider consequences of calamities harvest.costs = .data$harvest.costs * .data$cost.factor + .data$cost.additional) if (any(is.na(c(harvest.costs$cost.factor, harvest.costs$cost.additional)))) { warning("For at least one species the calamity value cost.factor or cost.additional was NA.") } harvest.costs %>% pull(harvest.costs) %>% return() }
/scratch/gouwar.j/cran-all/cranData/woodValuationDE/R/harvest_costs.R
##--################--## #### Recode Species #### ####################--## #' Convert species names to codes #' #' The function converts species names in codes and assigns species groups for #' the wood valuation procedure. #' #' @param species.code.orig Species code to be converted. For a list with the #' available species and codes call #' \code{\link{get_species_codes}}. #' @param source.format Code format of the original code. For a list with the #' available species and codes call #' \code{\link{get_species_codes}}. #' @param target.format Code format to be returned or an assignment to a species #' group for the economic valuation. For a list with the #' available species and codes call #' \code{\link{get_species_codes}}. #' @param method argument that is currently not used, but offers the possibility #' to implement alternative parameters and functions in the #' future. #' @import dplyr #' #' @noRd recode_species <- function(species.code.orig, source.format, target.format, method = "fuchs.orig") { # test: existing source formats if (!(paste0("species.code.", source.format) %in% colnames(params.wood.value$species.codes))) { stop("recode_species: Unknown source format!") } # test: existing target formats if (!(paste0("species.code.", target.format) %in% colnames(params.wood.value$species.codes))) { stop("recode_species: Unknown target format!") } # test: existing species if (!all(species.code.orig %in% pull(params.wood.value$species.codes, !!paste0("species.code.", source.format)))) { # missing species codes species.codes.missing <- species.code.orig[ !(species.code.orig %in% pull(params.wood.value$species.codes, !!paste0("species.code.", source.format))) ] stop(paste0("recode_species: Unknown species codes (", species.codes.missing, ")") ) } # add new codes species.codes.translated <- species.code.orig %>% as_tibble %>% left_join(params.wood.value$species.codes, by = c("value" = paste0("species.code.", source.format))) %>% pull(!!paste0("species.code.", target.format)) if (length(species.codes.translated) != length(species.code.orig)) { stop("At least one species code isn't unique. No species could be assigned!") } return(species.codes.translated) }
/scratch/gouwar.j/cran-all/cranData/woodValuationDE/R/recode_species.R
##--##############################--## #### Share of Salable Wood Volume #### ##--##############################--## #' Relative share in the volume over bark that is salable #' #' The function estimates the salable share of the wood volume. It is expressed #' in relation to the volume over bark (German unit: Vfm m.R.) as usually #' provided by yield tables and forest simulators. This includes all pulp wood, #' sawlog, and fuel wood assortments. The share of salable wood is required to #' derive the wood revenues per cubic meter volume over bark. The function is #' based on the assortment tables from Offer and Staupendahl (2018) and its #' derivation is described in Fuchs et al. (in preparation). The underlying #' assortment tables are based on data from HessenForst, the public forest #' service of the Federal State of Hesse in Germany. For further details see the #' \pkg{woodValuationDE} #' \href{https://github.com/Forest-Economics-Goettingen/woodValuationDE}{README}. #' #' @param diameter.q Quadratic mean of the diameter at breast height (dbh) of #' the harvested trees \eqn{[cm]}{[cm]}. #' @param species Tree species, using an available \code{species.code.type}. For #' a list with the available species and codes call #' \code{\link{get_species_codes}}. #' @param value.level Stand quality expressed as an integer of \code{1:3}, with #' \code{1} for an extraordinarily high stand quality with #' high shares of wood suitable for high-valued usages such #' as furniture, \code{2} for a moderate quality, and #' \code{3} for a low quality (e.g., trees with thick #' branches). The \code{value.level}s refer to the applied #' assortment tables of Offer and Staupendahl (2018). #' @param logging.method Logging method, with \code{"manually"} for #' motor-manual logging using a chain saw, #' \code{"harvester"} for logging with highly mechanized #' forest harvesters, or \code{"combined"} for a #' combination of the previous methods dependent on the #' mean diameter. #' @param species.code.type Type of code in which \code{species} is given. #' \code{"en"} for English species names or #' \code{"nds"} for numeric species codes used in Lower #' Saxony, Germany. For a list with the available #' species and codes call #' \code{\link{get_species_codes}}. #' @param method argument that is currently not used, but offers the possibility #' to implement alternative parameters and functions in the #' future. #' @return A vector with relative shares of salable wood volume. #' @references Fuchs, Jasper M.; Husmann, Kai; v. Bodelschwingh, Hilmar; Koster, #' Roman; Staupendahl, Kai; Offer, Armin; Moehring, Bernhard, Paul, #' Carola (in preparation): woodValuationDE: A consistent framework #' for calculating stumpage values in Germany (technical note) #' @references Offer, Armin; Staupendahl, Kai (2018): Holzwerbungskosten- und #' Bestandessortentafeln (Wood Harvest Cost and Assortment #' Tables). Kassel: HessenForst (publisher). #' @examples #' vol_salable(40, #' "beech") #' #' # species codes Lower Saxony (Germany) #' vol_salable(40, #' 211, #' species.code.type = "nds") #' #' # vector input #' vol_salable(seq(20, 50, 5), #' "spruce") #' #' vol_salable(rep(seq(20, 50, 10), #' 2), #' rep(c("beech", "spruce"), #' each = 4)) #' #' vol_salable(rep(seq(20, 50, 10), #' 2), #' rep(c("beech", "spruce"), #' each = 4), #' logging.method = rep(c("manually", "harvester"), #' each = 4)) #' @import dplyr #' #' @export vol_salable <- function( diameter.q, species, value.level = 2, logging.method = "combined", species.code.type = "en", method = "fuchs.orig" ) { vol.salable <- tibble(diameter.q = diameter.q, species = species, value.level = value.level, logging.method = logging.method) %>% # assign the appropriate parameterized species (group) mutate(species = recode_species(species, species.code.type, "bodelschwingh.revenues")) %>% # add the specific parameters left_join(params.wood.value$vol.salable, by = c("species" = "species.code", "logging.method" = "logging.method", "value.level" = "value.level")) %>% mutate(vol.salable = .data$A * exp(-exp(.data$zm / .data$A * exp(1) * (.data$tw - diameter.q)))) # test whether the diameters are in the range of the original data sets if (any(vol.salable$diameter.q < vol.salable$min.dq) | any(vol.salable$diameter.q > vol.salable$max.dq)) { warning("Relative salable volume: At least one diameter leads to extrapolation!") } vol.salable %>% pull(vol.salable) %>% return() }
/scratch/gouwar.j/cran-all/cranData/woodValuationDE/R/vol_salable.R
##--##############################--## #### Share of Skidded Wood Volume #### ##--##############################--## #' Relative share in the volume over bark that is skidded #' #' The function estimates the skidded share of the wood volume. It is expressed #' in relation to the volume over bark (German unit: Vfm m.R.) as usually #' provided by yield tables and forest simulators. This includes all pulp wood #' and sawlog assortments. It is assumed that the fuel wood assortments are #' processed by buyers themselves and that they are thus not commercially #' delivered to the forest road. The share of salable wood is required to derive #' the costs for harvesting and skidding per cubic meter volume over bark. The #' function is based on the assortment tables from Offer and Staupendahl (2018) #' and its derivation is described in Fuchs et al. (in preparation). The #' underlying assortment tables are based on data from HessenForst, the public #' forest service of the Federal State of Hesse in Germany. For further details #' see the \pkg{woodValuationDE} #' \href{https://github.com/Forest-Economics-Goettingen/woodValuationDE}{README}. #' #' @param diameter.q Quadratic mean of the diameter at breast height (dbh) of #' the harvested trees \eqn{[cm]}{[cm]}. #' @param species Tree species, using an available \code{species.code.type}. For #' a list with the available species and codes call #' \code{\link{get_species_codes}}. #' @param value.level Stand quality expressed as an integer of \code{1:3}, with #' \code{1} for an extraordinarily high stand quality with #' high shares of wood suitable for high-valued usages such #' as furniture, \code{2} for a moderate quality, and #' \code{3} for a low quality (e.g., trees with thick #' branches). The \code{value.level}s refer to the applied #' assortment tables of Offer and Staupendahl (2018). #' @param logging.method Logging method, with \code{"manually"} for #' motor-manual logging using a chain saw, #' \code{"harvester"} for logging with highly mechanized #' forest harvesters, or \code{"combined"} for a #' combination of the previous methods dependent on the #' mean diameter. #' @param species.code.type Type of code in which \code{species} is given. #' \code{"en"} for English species names or #' \code{"nds"} for numeric species codes used in Lower #' Saxony, Germany. For a list with the available #' species and codes call #' \code{\link{get_species_codes}}. #' @param method argument that is currently not used, but offers the possibility #' to implement alternative parameters and functions in the #' future. #' @return A vector with relative shares of skidded wood volume. #' @references Fuchs, Jasper M.; Husmann, Kai; v. Bodelschwingh, Hilmar; Koster, #' Roman; Staupendahl, Kai; Offer, Armin; Moehring, Bernhard, Paul, #' Carola (in preparation): woodValuationDE: A consistent framework #' for calculating stumpage values in Germany (technical note) #' @references Offer, Armin; Staupendahl, Kai (2018): Holzwerbungskosten- und #' Bestandessortentafeln (Wood Harvest Cost and Assortment #' Tables). Kassel: HessenForst (publisher). #' @examples #' vol_skidded(40, #' "beech") #' #' # species codes Lower Saxony (Germany) #' vol_skidded(40, #' 211, #' species.code.type = "nds") #' #' # vector input #' vol_skidded(seq(20, 50, 5), #' "spruce") #' #' vol_skidded(rep(seq(20, 50, 10), #' 2), #' rep(c("beech", "spruce"), #' each = 4)) #' #' vol_skidded(rep(seq(20, 50, 10), #' 2), #' rep(c("beech", "spruce"), #' each = 4), #' logging.method = rep(c("manually", "harvester"), #' each = 4)) #' @import dplyr #' #' @export vol_skidded <- function( diameter.q, species, value.level = 2, logging.method = "combined", species.code.type = "en", method = "fuchs.orig" ) { vol.skidded <- tibble(diameter.q = diameter.q, species = species, value.level = value.level, logging.method = logging.method) %>% # assign the appropriate parameterized species (group) mutate(species = recode_species(species, species.code.type, "bodelschwingh.revenues")) %>% # add the specific parameters left_join(params.wood.value$vol.skidded, by = c("species" = "species.code", "logging.method" = "logging.method", "value.level" = "value.level")) %>% mutate(vol.skidded = .data$A * exp(-exp(.data$zm / .data$A * exp(1) * (.data$tw - diameter.q)))) # test whether the diameters are in the range of the original data sets if (any(vol.skidded$diameter.q < vol.skidded$min.dq) | any(vol.skidded$diameter.q > vol.skidded$max.dq)) { warning("Relative skidded volume: At least one diameter leads to extrapolation!") } vol.skidded %>% pull(vol.skidded) %>% return() }
/scratch/gouwar.j/cran-all/cranData/woodValuationDE/R/vol_skidded.R
##--################--## #### Wood Net Revenue #### ##--################--## #' Net revenues for wood volumes over bark #' #' The function is a wrapper for the wood valuation framework provided by #' \pkg{woodValuationDE}. It calls \code{\link{wood_valuation}} and returns only #' the net revenues for the user-provided wood volume over bark. The underlying #' functions were derived based on data from HessenForst, the public forest #' service of the Federal State of Hesse in Germany. For further details #' see the \pkg{woodValuationDE} #' \href{https://github.com/Forest-Economics-Goettingen/woodValuationDE}{README}. #' #' @param volume Wood volume \eqn{[m^{3}]}{[m^3]}, referring to volume over bark #' of the trees to be harvested, as usually provided by yield #' tables and forest simulators (German unit: Vfm). #' @param diameter.q Quadratic mean of the diameter at breast height (dbh) of #' the harvested trees \eqn{[cm]}{[cm]}. #' @param species Tree species, using an available \code{species.code.type}. For #' a list with the available species and codes call #' \code{\link{get_species_codes}}. #' @param value.level Stand quality expressed as an integer of \code{1:3}, with #' \code{1} for an extraordinarily high stand quality with #' high shares of wood suitable for high-valued usages such #' as furniture, \code{2} for a moderate quality, and #' \code{3} for a low quality (e.g., trees with thick #' branches). The \code{value.level}s refer to the applied #' assortment tables of Offer and Staupendahl (2018). #' @param cost.level Accessibility of the stand for logging operations #' expressed as an integer of \code{1:3}, with \code{1} for #' standard conditions without limitations, \code{2} for #' moist sites or sites with a slope between 36 \% and 58 \%, #' and \code{3} for slopes > 58 \%. The \code{cost.level}s #' refer to the harvest cost model of v. Bodelschwingh #' (2018). #' @param logging.method Logging method, with \code{"manually"} for #' motor-manual logging using a chain saw, #' \code{"harvester"} for logging with highly mechanized #' forest harvesters, or \code{"combined"} for a #' combination of the previous methods dependent on the #' mean diameter. #' @param price.ref.assortment Wood price of the reference assortments allowing #' to consider market fluctuations. Default is #' \code{"baseline"} referring to the prices from 2010 to #' 2015 in Hesse, Germany (for details see #' \href{https://github.com/Forest-Economics-Goettingen/woodValuationDE}{README} #' of \pkg{woodValuationDE} or v. Bodelschwingh (2018)). #' Alternatively, users can provide a tibble with the #' same structure. The column species uses the specified #' \code{species.code.type}. #' @param calamity.type Defines the disturbance or calamity situation to allow #' for the consideration of lower net revenues in the case #' of salvage harvests. The calamity type determines the #' applied consequences of disturbances/calamities, #' implemented as factors for reduced revenues and higher #' harvest costs. By default no calamity is assumed #' \code{"none"}; \code{"calamity.dieter.2001"} #' refers to a general larger calamity applying the #' corrections according to Dieter (2001); five parameter #' sets were implemented according to Moellmann and #' Moehring (2017): \code{fire.small.moellmann.2017} refers #' to damages of only some trees by fire (only conifers) #' while \code{fire.large.moellmann.2017} assumes that at #' least one compartment was affected, the same applies for #' \code{storm.small.moellmann.2017} and #' \code{storm.large.moellmann.2017} referring to damages #' by storm (available for coniferous and deciduous #' species), \code{insects.moellmann.2017} refers to #' damages by insects; \code{"ips.fuchs.2022a"} refers to #' quality losses due to infestations by the European #' spruce bark beetle or \code{"ips.timely.fuchs.2022a"} #' for timely salvage fellings in less advanced attack #' stages (both according to Fuchs et al. 2022a); and #' \code{"stand.damage.fuchs.2022b"} to disturbances #' affecting only one stand, #' \code{"regional.disturbances.fuchs.2022b"} to #' disturbances with effects on the regional wood market #' and \code{"transregional.calamity.fuchs.2022b"} to #' calamities affecting transregional wood markets (the #' last three referring to Fuchs et al. 2022b). #' User-defined types can be implemented via the #' \code{calamity.factors} argument. #' @param calamity.factors Summands \eqn{[EUR m^{-3}]}{[EUR m^(-3)]} #' and factors to consider the consequences of #' disturbances and calamities on wood revenues and #' harvest costs. \code{"baseline"} provides a tibble #' based on the references listed in #' \code{calamity.type} (for details see #' \href{https://github.com/Forest-Economics-Goettingen/woodValuationDE}{README} #' of \pkg{woodValuationDE}). Alternatively, users can #' provide a tibble with the same structure. #' @param species.code.type Type of code in which \code{species} is given. #' \code{"en"} for English species names or #' \code{"nds"} for numeric species codes used in Lower #' Saxony, Germany. For a list with the available #' species and codes call #' \code{\link{get_species_codes}}. #' @param method argument that is currently not used, but offers the possibility #' to implement alternative parameters and functions in the #' future. #' @return A vector with the total net revenues for the entire volume over bark #' \eqn{[EUR]}{[EUR]}. #' @references Dieter, Matthias (2001): Land expectation values for spruce and #' beech calculated with Monte Carlo modelling techniques. For. #' Policy Econ. 2 (2), S. 157-166. #' \doi{10.1016/S1389-9341(01)00045-4}. #' @references Fuchs, Jasper M.; Hittenbeck, Anika; Brandl, Susanne; Schmidt, #' Matthias; Paul, Carola (2022a): Adaptation Strategies for #' Spruce Forests - Economic Potential of Bark Beetle Management and #' Douglas Fir Cultivation in Future Tree Species Portfolios. #' Forestry 95 (2) 229-246. \doi{10.1093/forestry/cpab040} #' @references Fuchs, Jasper M.; v. Bodelschwingh, Hilmar; Lange, Alexander; #' Paul, Carola; Husmann, Kai (2022b): Quantifying the #' consequences of disturbances on wood revenues with Impulse #' Response Functions. For. Policy Econ. 140, art. 102738. #' \doi{10.1016/j.forpol.2022.102738}. #' @references Fuchs, Jasper M.; Husmann, Kai; v. Bodelschwingh, Hilmar; Koster, #' Roman; Staupendahl, Kai; Offer, Armin; Moehring, Bernhard, Paul, #' Carola (in preparation): woodValuationDE: A consistent framework #' for calculating stumpage values in Germany (technical note) #' @references Moellmann, Torsten B.; Moehring, Bernhard (2017): A practical way #' to integrate risk in forest management decisions. Ann. For. Sci. #' 74 (4), S.75. \doi{10.1007/s13595-017-0670-x} #' @references Offer, Armin; Staupendahl, Kai (2018): Holzwerbungskosten- und #' Bestandessortentafeln (Wood Harvest Cost and Assortment #' Tables). Kassel: HessenForst (publisher). #' @references v. Bodelschwingh, Hilmar (2018): Oekonomische Potentiale von #' Waldbestaenden. Konzeption und Abschaetzung im Rahmen einer #' Fallstudie in hessischen Staatswaldflaechen (Economic Potentials #' of Forest Stands and Their Consideration in Strategic Decisions). #' Bad Orb: J.D. Sauerlaender`s Verlag (Schriften zur Forst- und #' Umweltoekonomie, 47). #' @examples #' wood_net_revenues(1, #' 40, #' "beech") #' #' # species codes Lower Saxony (Germany) #' wood_net_revenues(seq(10, 70, 20), #' 40, #' 211, #' species.code.type = "nds") #' #' # vector input #' wood_net_revenues(10, #' seq(20, 50, 5), #' "spruce") #' #' wood_net_revenues(10, #' 40, #' rep(c("beech", "spruce"), #' each = 9), #' value.level = rep(rep(1:3, 2), #' each = 3), #' cost.level = rep(1:3, 6)) #' #' wood_net_revenues(10, #' 40, #' rep("spruce", 6), #' calamity.type = c("none", #' "ips.fuchs.2022a", #' "ips.timely.fuchs.2022a", #' "stand.damage.fuchs.2022b", #' "regional.disturbance.fuchs.2022b", #' "transregional.calamity.fuchs.2022b")) #' #' # user-defined calamities with respective changes in harvest costs and wood revenues #' wood_net_revenues(10, #' 40, #' rep("spruce", 3), #' calamity.type = c("none", #' "my.own.calamity.1", #' "my.own.calamity.2"), #' calamity.factors = dplyr::tibble( #' calamity.type = rep(c("none", #' "my.own.calamity.1", #' "my.own.calamity.2"), #' each = 2), #' species.group = rep(c("softwood", #' "deciduous"), #' times = 3), #' revenues.factor = c(1.0, 1.0, #' 0.8, 0.8, #' 0.2, 0.2), #' cost.factor = c(1.0, 1.0, #' 1.5, 1.5, #' 1.0, 1.0), #' cost.additional = c(0, 0, #' 0, 0, #' 5, 5))) #' #' # adapted market situation by providing alternative prices for the reference assortments #' wood_net_revenues(10, #' 40, #' c("oak", "beech", "spruce")) #' wood_net_revenues(10, #' 40, #' c("oak", "beech", "spruce"), #' price.ref.assortment = dplyr::tibble( #' species = c("oak", "beech", "spruce"), #' price.ref.assortment = c(300, 80, 50))) #' #' @import dplyr #' #' @export wood_net_revenues <- function( volume, diameter.q, species, value.level = 2, cost.level = 1, logging.method = "combined", price.ref.assortment = "baseline", calamity.type = "none", calamity.factors = "baseline", species.code.type = "en", method = "fuchs.orig" ) { wood_valuation(volume, diameter.q, species, value.level, cost.level, logging.method, price.ref.assortment, calamity.type, calamity.factors, species.code.type ) %>% pull(.data$wood.net.revenue) %>% return() }
/scratch/gouwar.j/cran-all/cranData/woodValuationDE/R/wood_net_revenue.R
##--###############--## #### Wood Revenues #### ##--###############--## #' Wood revenues per cubic meter salable volume #' #' The function estimates wood revenues per cubic meter salable volume using the #' wood revenue model of v. Bodelschwingh (2018), which is based on the #' assortment tables from Offer and Staupendahl (2018). Consequences of #' disturbances and calamities are implemented based on Dieter (2001), Moellmann #' and Moehring (2017), and Fuchs et al. (2022a, 2022b). Apart from Dieter #' (2001) and Moellmann and Moehring (2017), all functions and factors are based #' on data from HessenForst, the public forest service of the Federal State of #' Hesse in Germany. For further details see the \pkg{woodValuationDE} #' \href{https://github.com/Forest-Economics-Goettingen/woodValuationDE}{README}. #' #' @param diameter.q Quadratic mean of the diameter at breast height (dbh) of #' the harvested trees \eqn{[cm]}{[cm]}. #' @param species Tree species, using an available \code{species.code.type}. For #' a list with the available species and codes call #' \code{\link{get_species_codes}}. #' @param value.level Stand quality expressed as an integer of \code{1:3}, with #' \code{1} for an extraordinarily high stand quality with #' high shares of wood suitable for high-valued usages such #' as furniture, \code{2} for a moderate quality, and #' \code{3} for a low quality (e.g., trees with thick #' branches). The \code{value.level}s refer to the applied #' assortment tables of Offer and Staupendahl (2018). #' @param logging.method Logging method, with \code{"manually"} for #' motor-manual logging using a chain saw, #' \code{"harvester"} for logging with highly mechanized #' forest harvesters, or \code{"combined"} for a #' combination of the previous methods dependent on the #' mean diameter. #' @param price.ref.assortment Wood price of the reference assortments allowing #' to consider market fluctuations. Default is #' \code{"baseline"} referring to the prices from 2010 to #' 2015 in Hesse, Germany (for details see #' \href{https://github.com/Forest-Economics-Goettingen/woodValuationDE}{README} #' of \pkg{woodValuationDE} or v. Bodelschwingh (2018)). #' Alternatively, users can provide a tibble with the #' same structure. The column species uses the specified #' \code{species.code.type}. #' @param calamity.type Defines the disturbance or calamity situation to allow #' for the consideration of lower net revenues in the case #' of salvage harvests. The calamity type determines the #' applied consequences of disturbances/calamities, #' implemented as factors for reduced revenues and higher #' harvest costs. By default no calamity is assumed #' \code{"none"}; \code{"calamity.dieter.2001"} #' refers to a general larger calamity applying the #' corrections according to Dieter (2001); five parameter #' sets were implemented according to Moellmann and #' Moehring (2017): \code{fire.small.moellmann.2017} refers #' to damages of only some trees by fire (only conifers) #' while \code{fire.large.moellmann.2017} assumes that at #' least one compartment was affected, the same applies for #' \code{storm.small.moellmann.2017} and #' \code{storm.large.moellmann.2017} referring to damages #' by storm (available for coniferous and deciduous #' species), \code{insects.moellmann.2017} refers to #' damages by insects; \code{"ips.fuchs.2022a"} refers to #' quality losses due to infestations by the European #' spruce bark beetle or \code{"ips.timely.fuchs.2022a"} #' for timely salvage fellings in less advanced attack #' stages (both according to Fuchs et al. 2022a); and #' \code{"stand.damage.fuchs.2022b"} to disturbances #' affecting only one stand, #' \code{"regional.disturbances.fuchs.2022b"} to #' disturbances with effects on the regional wood market #' and \code{"transregional.calamity.fuchs.2022b"} to #' calamities affecting transregional wood markets (the #' last three referring to Fuchs et al. 2022b). #' User-defined types can be implemented via the #' \code{calamity.factors} argument. #' @param calamity.factors Summands \eqn{[EUR m^{-3}]}{[EUR m^(-3)]} #' and factors to consider the consequences of #' disturbances and calamities on wood revenues and #' harvest costs. \code{"baseline"} provides a tibble #' based on the references listed in #' \code{calamity.type} (for details see #' \href{https://github.com/Forest-Economics-Goettingen/woodValuationDE}{README} #' of \pkg{woodValuationDE}). Alternatively, users can #' provide a tibble with the same structure. #' @param species.code.type Type of code in which \code{species} is given. #' \code{"en"} for English species names or #' \code{"nds"} for numeric species codes used in Lower #' Saxony, Germany. For a list with the available #' species and codes call #' \code{\link{get_species_codes}}. #' @param method argument that is currently not used, but offers the possibility #' to implement alternative parameters and functions in the #' future. #' @return A vector with wood revenues per cubic meter #' \eqn{[EUR m^{-3}]}{[EUR m^(-3)]}. The volume refers to the salable #' wood volume, provided by \code{\link{vol_salable}}. #' @references Dieter, Matthias (2001): Land expectation values for spruce and #' beech calculated with Monte Carlo modelling techniques. For. #' Policy Econ. 2 (2), S. 157-166. #' \doi{10.1016/S1389-9341(01)00045-4}. #' @references Fuchs, Jasper M.; Hittenbeck, Anika; Brandl, Susanne; Schmidt, #' Matthias; Paul, Carola (2022a): Adaptation Strategies for #' Spruce Forests - Economic Potential of Bark Beetle Management and #' Douglas Fir Cultivation in Future Tree Species Portfolios. #' Forestry 95 (2) 229-246. \doi{10.1093/forestry/cpab040} #' @references Fuchs, Jasper M.; v. Bodelschwingh, Hilmar; Lange, Alexander; #' Paul, Carola; Husmann, Kai (2022b): Quantifying the #' consequences of disturbances on wood revenues with Impulse #' Response Functions. For. Policy Econ. 140, art. 102738. #' \doi{10.1016/j.forpol.2022.102738}. #' @references Fuchs, Jasper M.; Husmann, Kai; v. Bodelschwingh, Hilmar; Koster, #' Roman; Staupendahl, Kai; Offer, Armin; Moehring, Bernhard, Paul, #' Carola (in preparation): woodValuationDE: A consistent framework #' for calculating stumpage values in Germany (technical note) #' @references Moellmann, Torsten B.; Moehring, Bernhard (2017): A practical way #' to integrate risk in forest management decisions. Ann. For. Sci. #' 74 (4), S.75. \doi{10.1007/s13595-017-0670-x} #' @references Moellmann, Torsten B.; Moehring, Bernhard (2017): A practical way #' to integrate risk in forest management decisions. Ann. For. Sci. #' 74 (4), S.75. \doi{10.1007/s13595-017-0670-x} #' @references v. Bodelschwingh, Hilmar (2018): Oekonomische Potentiale von #' Waldbestaenden. Konzeption und Abschaetzung im Rahmen einer #' Fallstudie in hessischen Staatswaldflaechen (Economic Potentials #' of Forest Stands and Their Consideration in Strategic Decisions). #' Bad Orb: J.D. Sauerlaender`s Verlag (Schriften zur Forst- und #' Umweltoekonomie, 47). #' @examples #' wood_revenues(40, #' "beech") #' #' # species codes Lower Saxony (Germany) #' wood_revenues(40, #' 211, #' species.code.type = "nds") #' #' # vector input #' wood_revenues(seq(20, 50, 5), #' "spruce") #' #' wood_revenues(40, #' rep(c("beech", "spruce"), #' each = 3), #' value.level = rep(1:3, 2)) #' #' wood_revenues(40, #' rep("spruce", 7), #' calamity.type = c("none", #' "calamity.dieter.2001", #' "ips.fuchs.2022a", #' "ips.timely.fuchs.2022a", #' "stand.damage.fuchs.2022b", #' "regional.disturbance.fuchs.2022b", #' "transregional.calamity.fuchs.2022b")) #' #' # user-defined calamities with respective changes in wood revenues #' wood_revenues(40, #' rep("spruce", 3), #' calamity.type = c("none", #' "my.own.calamity.1", #' "my.own.calamity.2"), #' calamity.factors = dplyr::tibble( #' calamity.type = rep(c("none", #' "my.own.calamity.1", #' "my.own.calamity.2"), #' each = 2), #' species.group = rep(c("softwood", #' "deciduous"), #' times = 3), #' revenues.factor = c(1.0, 1.0, #' 0.8, 0.8, #' 0.2, 0.2), #' cost.factor = c(1.0, 1.0, #' 1.5, 1.5, #' 1.0, 1.0), #' cost.additional = c(0, 0, #' 0, 0, #' 5, 5))) #' #' # adapted market situation by providing alternative prices for the reference assortments #' wood_revenues(40, #' c("oak", "beech", "spruce")) #' wood_revenues(40, #' c("oak", "beech", "spruce"), #' price.ref.assortment = dplyr::tibble( #' species = c("oak", "beech", "spruce"), #' price.ref.assortment = c(300, 80, 50))) #' #' @import dplyr #' #' @export wood_revenues <- function( diameter.q, species, value.level = 2, logging.method = "combined", price.ref.assortment = "baseline", calamity.type = "none", calamity.factors = "baseline", species.code.type = "en", method = "fuchs.orig" ) { if (is.character(calamity.factors)) { if (calamity.factors != "baseline") { stop("Provided value for calamity.factors unknown. See package README for further details.") } } else { if (!(all(colnames(calamity.factors) == colnames(params.wood.value$calamity.factors))) & !(all(is.character(calamity.factors$calamity.type))) & !(all(is.character(calamity.factors$species.group))) & !(all(is.numeric(calamity.factors$revenues.factor))) & !(all(is.numeric(calamity.factors$cost.factor))) & !(all(is.numeric(calamity.factors$cost.additional)))) { stop("Structure of calamity.factors does not match the required structure. See package README for further details.") } params.wood.value$calamity.factors <- calamity.factors } wood.revenues <- tibble( diameter.q = diameter.q, species = species, value.level = value.level, logging.method = logging.method, calamity.type = calamity.type ) %>% # assign the appropriate parameterized species (group) mutate(calamity.group = recode_species(species, species.code.type, "calamity.group"), species = recode_species(species, species.code.type, "bodelschwingh.revenues")) %>% # add specific parameters left_join(params.wood.value$wood.revenues, by = c("species" = "species.code.bodelschwingh", "value.level" = "value.level", "logging.method" = "logging.method")) %>% left_join(params.wood.value$calamity.factors, by = c("calamity.type" = "calamity.type", "calamity.group" = "species.group")) %>% mutate( wood.revenues = (.data$a * diameter.q^4 + .data$b * diameter.q^3 + .data$c * diameter.q^2 + .data$d * diameter.q + .data$e) * # apply calamity factor .data$revenues.factor) %>% # add minimum and maximum diameter to check for extrapolation left_join(params.wood.value$vol.salable, by = c("species" = "species.code", "value.level" = "value.level", "logging.method" = "logging.method")) # test whether the diameters are in the range of the original data sets if (any(wood.revenues$diameter.q < wood.revenues$min.dq) | any(wood.revenues$diameter.q > wood.revenues$max.dq)) { warning("wood_revenues: At least one diameter leads to extrapolation!") } # apply price factor for market fluctuations if not "baseline" if (is.character(price.ref.assortment)) { if (price.ref.assortment != "baseline") { stop("Provided value for price.ref.assortment unknown. See package README for further details.") } } else { if (!(all(colnames(price.ref.assortment) == colnames(params.wood.value$prices.ref.assortments))) & !(all(is.numeric(price.ref.assortment$species))) & !(all(is.numeric(price.ref.assortment$price.ref.assortment)))) { stop("Structure of price.ref.assortment does not match the required structure. See package README for further details.") } price.ref.assortment <- price.ref.assortment %>% mutate( species = recode_species(species, species.code.type, "bodelschwingh.revenues") ) price.factors <- params.wood.value$prices.ref.assortments %>% rename(price.ref.assortment.bodel = price.ref.assortment) %>% left_join(price.ref.assortment, by = "species") %>% mutate(price.factor.market = .data$price.ref.assortment / .data$price.ref.assortment.bodel) wood.revenues <- wood.revenues %>% left_join(price.factors, by = "species") %>% mutate(wood.revenues = .data$wood.revenues * .data$price.factor.market) } if (any(is.na(wood.revenues$revenues.factor))) { warning("For at least one species the calamity value revenues.factor was NA.") } wood.revenues %>% pull(wood.revenues) %>% return() }
/scratch/gouwar.j/cran-all/cranData/woodValuationDE/R/wood_revenues.R
##--################--## #### Wood Valuation #### ##--################--## #' All steps of the monetary valuation of wood volumes over bark #' #' The function is a wrapper for the entire procedure of wood valuation provided #' by \pkg{woodValuationDE}. It estimates the share of salable (for revenues) #' and skidded volume (for harvest costs), the wood revenues, and the harvest #' costs. Finally, it derives the net revenues for the user-provided wood volume #' over bark. The underlying functions were derived based on data from #' HessenForst, the public forest service of the Federal State of Hesse in #' Germany. For further details see the \pkg{woodValuationDE} #' \href{https://github.com/Forest-Economics-Goettingen/woodValuationDE}{README}. #' #' @param volume Wood volume \eqn{[m^{3}]}{[m^3]}, referring to volume over bark #' of the trees to be harvested, as usually provided by yield #' tables and forest simulators (German unit: Vfm m.R.). #' @param diameter.q Quadratic mean of the diameter at breast height (dbh) of #' the harvested trees \eqn{[cm]}{[cm]}. #' @param species Tree species, using an available \code{species.code.type}. For #' a list with the available species and codes call #' \code{\link{get_species_codes}}. #' @param value.level Stand quality expressed as an integer of \code{1:3}, with #' \code{1} for an extraordinarily high stand quality with #' high shares of wood suitable for high-valued usages such #' as furniture, \code{2} for a moderate quality, and #' \code{3} for a low quality (e.g., trees with thick #' branches). The \code{value.level}s refer to the applied #' assortment tables of Offer and Staupendahl (2018). #' @param cost.level Accessibility of the stand for logging operations #' expressed as an integer of \code{1:3}, with \code{1} for #' standard conditions without limitations, \code{2} for #' moist sites or sites with a slope between 36 \% and 58 \%, #' and \code{3} for slopes > 58 \%. The \code{cost.level}s #' refer to the harvest cost model of v. Bodelschwingh #' (2018). #' @param logging.method Logging method, with \code{"manually"} for #' motor-manual logging using a chain saw, #' \code{"harvester"} for logging with highly mechanized #' forest harvesters, or \code{"combined"} for a #' combination of the previous methods dependent on the #' mean diameter. #' @param price.ref.assortment Wood price of the reference assortments allowing #' to consider market fluctuations. Default is #' \code{"baseline"} referring to the prices from 2010 to #' 2015 in Hesse, Germany (for details see #' \href{https://github.com/Forest-Economics-Goettingen/woodValuationDE}{README} #' of \pkg{woodValuationDE} or v. Bodelschwingh (2018)). #' Alternatively, users can provide a tibble with the #' same structure. The column species uses the specified #' \code{species.code.type}. #' @param calamity.type Defines the disturbance or calamity situation to allow #' for the consideration of lower net revenues in the case #' of salvage harvests. The calamity type determines the #' applied consequences of disturbances/calamities, #' implemented as factors for reduced revenues and higher #' harvest costs. By default no calamity is assumed #' \code{"none"}; \code{"calamity.dieter.2001"} #' refers to a general larger calamity applying the #' corrections according to Dieter (2001); five parameter #' sets were implemented according to Moellmann and #' Moehring (2017): \code{fire.small.moellmann.2017} refers #' to damages of only some trees by fire (only conifers) #' while \code{fire.large.moellmann.2017} assumes that at #' least one compartment was affected, the same applies for #' \code{storm.small.moellmann.2017} and #' \code{storm.large.moellmann.2017} referring to damages #' by storm (available for coniferous and deciduous #' species), \code{insects.moellmann.2017} refers to #' damages by insects; \code{"ips.fuchs.2022a"} refers to #' quality losses due to infestations by the European #' spruce bark beetle or \code{"ips.timely.fuchs.2022a"} #' for timely salvage fellings in less advanced attack #' stages (both according to Fuchs et al. 2022a); and #' \code{"stand.damage.fuchs.2022b"} to disturbances #' affecting only one stand, #' \code{"regional.disturbances.fuchs.2022b"} to #' disturbances with effects on the regional wood market #' and \code{"transregional.calamity.fuchs.2022b"} to #' calamities affecting transregional wood markets (the #' last three referring to Fuchs et al. 2022b). #' User-defined types can be implemented via the #' \code{calamity.factors} argument. #' @param calamity.factors Summands \eqn{[EUR m^{-3}]}{[EUR m^(-3)]} #' and factors to consider the consequences of #' disturbances and calamities on wood revenues and #' harvest costs. \code{"baseline"} provides a tibble #' based on the references listed in #' \code{calamity.type} (for details see #' \href{https://github.com/Forest-Economics-Goettingen/woodValuationDE}{README} #' of \pkg{woodValuationDE}). Alternatively, users can #' provide a tibble with the same structure. #' @param species.code.type Type of code in which \code{species} is given. #' \code{"en"} for English species names or #' \code{"nds"} for numeric species codes used in Lower #' Saxony, Germany. For a list with the available #' species and codes call #' \code{\link{get_species_codes}}. #' @param method argument that is currently not used, but offers the possibility #' to implement alternative parameters and functions in the #' future. #' @return A tibble with all steps of the wood valuation (harvest quantities, #' harvest costs \eqn{[EUR m^{-3}]}{[EUR m^(-3)]}, wood revenues #' \eqn{[EUR m^{-3}]}{[EUR m^(-3)]}, and total net revenues #' \eqn{[EUR]}{[EUR]}). #' @references Dieter, Matthias (2001): Land expectation values for spruce and #' beech calculated with Monte Carlo modelling techniques. For. #' Policy Econ. 2 (2), S. 157-166. #' \doi{10.1016/S1389-9341(01)00045-4}. #' @references Fuchs, Jasper M.; Hittenbeck, Anika; Brandl, Susanne; Schmidt, #' Matthias; Paul, Carola (2022a): Adaptation Strategies for #' Spruce Forests - Economic Potential of Bark Beetle Management and #' Douglas Fir Cultivation in Future Tree Species Portfolios. #' Forestry 95 (2) 229-246. \doi{10.1093/forestry/cpab040} #' @references Fuchs, Jasper M.; v. Bodelschwingh, Hilmar; Lange, Alexander; #' Paul, Carola; Husmann, Kai (2022b): Quantifying the #' consequences of disturbances on wood revenues with Impulse #' Response Functions. For. Policy Econ. 140, art. 102738. #' \doi{10.1016/j.forpol.2022.102738}. #' @references Fuchs, Jasper M.; Husmann, Kai; v. Bodelschwingh, Hilmar; Koster, #' Roman; Staupendahl, Kai; Offer, Armin; Moehring, Bernhard, Paul, #' Carola (in preparation): woodValuationDE: A consistent framework #' for calculating stumpage values in Germany (technical note) #' @references Moellmann, Torsten B.; Moehring, Bernhard (2017): A practical way #' to integrate risk in forest management decisions. Ann. For. Sci. #' 74 (4), S.75. \doi{10.1007/s13595-017-0670-x} #' @references Offer, Armin; Staupendahl, Kai (2018): Holzwerbungskosten- und #' Bestandessortentafeln (Wood Harvest Cost and Assortment #' Tables). Kassel: HessenForst (publisher). #' @references v. Bodelschwingh, Hilmar (2018): Oekonomische Potentiale von #' Waldbestaenden. Konzeption und Abschaetzung im Rahmen einer #' Fallstudie in hessischen Staatswaldflaechen (Economic Potentials #' of Forest Stands and Their Consideration in Strategic Decisions). #' Bad Orb: J.D. Sauerlaender`s Verlag (Schriften zur Forst- und #' Umweltoekonomie, 47). #' @examples #' wood_valuation(1, #' 40, #' "beech") #' #' # species codes Lower Saxony (Germany) #' wood_valuation(seq(10, 70, 20), #' 40, #' 211, #' species.code.type = "nds") #' #' # vector input #' wood_valuation(10, #' seq(20, 50, 5), #' "spruce") #' #' wood_valuation(10, #' 40, #' rep(c("beech", "spruce"), #' each = 9), #' value.level = rep(rep(1:3, 2), #' each = 3), #' cost.level = rep(1:3, 6)) #' #' wood_valuation(10, #' 40, #' rep("spruce", 6), #' calamity.type = c("none", #' "ips.fuchs.2022a", #' "ips.timely.fuchs.2022a", #' "stand.damage.fuchs.2022b", #' "regional.disturbance.fuchs.2022b", #' "transregional.calamity.fuchs.2022b")) #' #' # user-defined calamities with respective changes in harvest costs and wood revenues #' wood_valuation(10, #' 40, #' rep("spruce", 3), #' calamity.type = c("none", #' "my.own.calamity.1", #' "my.own.calamity.2"), #' calamity.factors = dplyr::tibble( #' calamity.type = rep(c("none", #' "my.own.calamity.1", #' "my.own.calamity.2"), #' each = 2), #' species.group = rep(c("softwood", #' "deciduous"), #' times = 3), #' revenues.factor = c(1.0, 1.0, #' 0.8, 0.8, #' 0.2, 0.2), #' cost.factor = c(1.0, 1.0, #' 1.5, 1.5, #' 1.0, 1.0), #' cost.additional = c(0, 0, #' 0, 0, #' 5, 5))) #' #' # adapted market situation by providing alternative prices for the reference assortments #' wood_valuation(10, #' 40, #' c("oak", "beech", "spruce")) #' wood_valuation(10, #' 40, #' c("oak", "beech", "spruce"), #' price.ref.assortment = dplyr::tibble( #' species = c("oak", "beech", "spruce"), #' price.ref.assortment = c(300, 80, 50))) #' #' @import dplyr #' #' @export wood_valuation <- function( volume, diameter.q, species, value.level = 2, cost.level = 1, logging.method = "combined", price.ref.assortment = "baseline", calamity.type = "none", calamity.factors = "baseline", species.code.type = "en", method = "fuchs.orig" ) { tibble( volume = volume, diameter.q = diameter.q, species = species, cost.level = cost.level, value.level = value.level, logging.method = logging.method, calamity.type = calamity.type ) %>% mutate( vol.salable = vol_salable(diameter.q, species, value.level, logging.method, species.code.type), vol.skidded = vol_skidded(diameter.q, species, value.level, logging.method, species.code.type), wood.revenues = wood_revenues(diameter.q, species, value.level, logging.method, price.ref.assortment, calamity.type, calamity.factors, species.code.type), harvest.costs = harvest_costs(diameter.q, species, cost.level, calamity.type, calamity.factors, species.code.type), wood.net.revenue.m3 = (.data$vol.salable * .data$wood.revenues) - (.data$vol.skidded * .data$harvest.costs), wood.net.revenue = .data$volume * .data$wood.net.revenue.m3 ) %>% return() }
/scratch/gouwar.j/cran-all/cranData/woodValuationDE/R/wood_valuation.R
#' admnrev #' #' Wooldridge Source: Data from the National Highway Traffic Safety Administration: “A Digest of State Alcohol-Highway Safety Related Legislation,” U.S. Department of Transportation, NHTSA. I used the third (1985), eighth (1990), and 13th (1995) editions. Data loads lazily. #' #' @section Notes: This is not so much a data set as a summary of so-called “administrative per se” laws atthe state level, for three different years. It could be supplemented with drunk-driving fatalities for a nice econometric analysis. In addition, the data for 2000 or later years can be added, forming the basis for a term project. Many other explanatory variables could be included. Unemployment rates, state-level tax rates on alcohol, and membership in MADD are just a few possibilities. #' #' Used in Text: not used #' #' @docType data #' #' @usage data('admnrev') #' #' @format A data.frame with 153 observations on 5 variables: #' \itemize{ #' \item \strong{state:} state postal code #' \item \strong{year:} 85, 90, or 95 #' \item \strong{admnrev:} =1 if admin. revoc. law #' \item \strong{daysfrst:} days suspended, first offense #' \item \strong{daysscnd:} days suspended, second offense #' } #' @source \url{https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_isbn_issn=9781111531041} #' @examples str(admnrev) "admnrev"
/scratch/gouwar.j/cran-all/cranData/wooldridge/R/admnrev.R
#' affairs #' #' Wooldridge Source: R.C. Fair (1978), “A Theory of Extramarital Affairs,” Journal of Political Economy 86, 45-61, 1978. I collected the data from Professor Fair’s web cite at the Yale University Department of Economics. He originally obtained the data from a survey by Psychology Today. Data loads lazily. #' #' @section Notes: This is an interesting data set for problem sets starting in Chapter 7. Even though naffairs (number of extramarital affairs a woman reports) is a count variable, a linear model can be used to model its conditional mean as an approximation. Or, you could ask the students to estimate a linear probability model for the binary indicator affair, equal to one of the woman reports having any extramarital affairs. One possibility is to test whether putting the single marriage rating variable, ratemarr, is enough, against the alternative that a full set of dummy variables is needed; see pages 239-240 for a similar example. This is also a good data set to illustrate Poisson regression (using naffairs) in Section 17.3 or probit and logit (using affair) in Section 17.1. #' #' Used in Text: not used #' #' @docType data #' #' @usage data('affairs') #' #' @format A data.frame with 601 observations on 19 variables: #' \itemize{ #' \item \strong{id:} identifier #' \item \strong{male:} =1 if male #' \item \strong{age:} in years #' \item \strong{yrsmarr:} years married #' \item \strong{kids:} =1 if have kids #' \item \strong{relig:} 5 = very relig., 4 = somewhat, 3 = slightly, 2 = not at all, 1 = anti #' \item \strong{educ:} years schooling #' \item \strong{occup:} occupation, reverse Hollingshead scale #' \item \strong{ratemarr:} 5 = vry hap marr, 4 = hap than avg, 3 = avg, 2 = smewht unhap, 1 = vry unhap #' \item \strong{naffairs:} number of affairs within last year #' \item \strong{affair:} =1 if had at least one affair #' \item \strong{vryhap:} ratemarr == 5 #' \item \strong{hapavg:} ratemarr == 4 #' \item \strong{avgmarr:} ratemarr == 3 #' \item \strong{unhap:} ratemarr == 2 #' \item \strong{vryrel:} relig == 5 #' \item \strong{smerel:} relig == 4 #' \item \strong{slghtrel:} relig == 3 #' \item \strong{notrel:} relig == 2 #' } #' @source \url{https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_isbn_issn=9781111531041} #' @examples str(affairs) "affairs"
/scratch/gouwar.j/cran-all/cranData/wooldridge/R/affairs.R
#' airfare #' #' Wooldridge Source: Jiyoung Kwon, a former doctoral student in economics at MSU, kindly provided these data, which she obtained from the Domestic Airline Fares Consumer Report by the U.S. Department of Transportation. Data loads lazily. #' #' @section Notes: This data set nicely illustrates the different estimates obtained when applying pooled OLS, random effects, and fixed effects. #' #' Used in Text: pages 506-507, 581 #' #' @docType data #' #' @usage data('airfare') #' #' @format A data.frame with 4596 observations on 14 variables: #' \itemize{ #' \item \strong{year:} 1997, 1998, 1999, 2000 #' \item \strong{id:} route identifier #' \item \strong{dist:} distance, in miles #' \item \strong{passen:} avg. passengers per day #' \item \strong{fare:} avg. one-way fare, $ #' \item \strong{bmktshr:} fraction market, biggest carrier #' \item \strong{ldist:} log(distance) #' \item \strong{y98:} =1 if year == 1998 #' \item \strong{y99:} =1 if year == 1999 #' \item \strong{y00:} =1 if year == 2000 #' \item \strong{lfare:} log(fare) #' \item \strong{ldistsq:} ldist^2 #' \item \strong{concen:} = bmktshr #' \item \strong{lpassen:} log(passen) #' } #' @source \url{https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_isbn_issn=9781111531041} #' @examples str(airfare) "airfare"
/scratch/gouwar.j/cran-all/cranData/wooldridge/R/airfare.R
#' alcohol #' #' Wooldridge Source: Terza, J.V. (2002), “Alcohol Abuse and Employment: A Second Look,” Journal of Applied Econometrics 17, 393-404. I obtained these data from the Journal of Applied Econometrics data archive at http://qed.econ.queensu.ca/jae/. Data loads lazily. #' #' @section #' #' Used in Text: page 629 #' #' @docType data #' #' @usage data('alcohol') #' #' @format A data.frame with 9822 observations on 33 variables: #' \itemize{ #' \item \strong{abuse:} =1 if abuse alcohol #' \item \strong{status:} out of workforce = 1; unemployed = 2, employed = 3 #' \item \strong{unemrate:} state unemployment rate #' \item \strong{age:} age in years #' \item \strong{educ:} years of schooling #' \item \strong{married:} =1 if married #' \item \strong{famsize:} family size #' \item \strong{white:} =1 if white #' \item \strong{exhealth:} =1 if in excellent health #' \item \strong{vghealth:} =1 if in very good health #' \item \strong{goodhealth:} =1 if in good health #' \item \strong{fairhealth:} =1 if in fair health #' \item \strong{northeast:} =1 if live in northeast #' \item \strong{midwest:} =1 if live in midwest #' \item \strong{south:} =1 if live in south #' \item \strong{centcity:} =1 if live in central city of MSA #' \item \strong{outercity:} =1 if in outer city of MSA #' \item \strong{qrt1:} =1 if interviewed in first quarter #' \item \strong{qrt2:} =1 if interviewed in second quarter #' \item \strong{qrt3:} =1 if interviewed in third quarter #' \item \strong{beertax:} state excise tax, $ per gallon #' \item \strong{cigtax:} state cigarette tax, cents per pack #' \item \strong{ethanol:} state per-capita ethanol consumption #' \item \strong{mothalc:} =1 if mother an alcoholic #' \item \strong{fathalc:} =1 if father an alcoholic #' \item \strong{livealc:} =1 if lived with alcoholic #' \item \strong{inwf:} =1 if status > 1 #' \item \strong{employ:} =1 if employed #' \item \strong{agesq:} age^2 #' \item \strong{beertaxsq:} beertax^2 #' \item \strong{cigtaxsq:} cigtax^2 #' \item \strong{ethanolsq:} ethanol^2 #' \item \strong{educsq:} educ^2 #' } #' @source \url{https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_isbn_issn=9781111531041} #' @examples str(alcohol) "alcohol"
/scratch/gouwar.j/cran-all/cranData/wooldridge/R/alcohol.R
#' apple #' #' Wooldridge Source: These data were used in the doctoral dissertation of Jeffrey Blend, Department of Agricultural Economics, Michigan State University, 1998. The thesis was supervised by Professor Eileen van Ravensway. Drs. Blend and van Ravensway kindly provided the data, which were obtained from a telephone survey conducted by the Institute for Public Policy and Social Research at MSU. Data loads lazily. #' #' @section Notes: This data set is close to a true experimental data set because the price pairs facing a family were randomly determined. In other words, the family head was presented with prices for the eco-labeled and regular apples, and then asked how much of each kind of apple the family would buy at the given prices. As predicted by basic economics, the own price effect is negative (and strong) and the cross price effect is positive (and strong). While the main dependent variable, ecolbs, piles up at zero, estimating a linear model is still worthwhile. Interestingly, because the survey design induces a strong positive correlation between the prices of eco-labeled and regular apples, there is an omitted variable problem if either of the price variables is dropped from the demand equation. A good exam question is to show a simple regression of ecolbs on ecoprc and then a multiple regression on both prices, and ask students to decide whether the price variables must be positively or negatively correlated. #' #' Used in Text: pages 201, 223, 266, 626-627 #' #' @docType data #' #' @usage data('apple') #' #' @format A data.frame with 660 observations on 17 variables: #' \itemize{ #' \item \strong{id:} respondent identifier #' \item \strong{educ:} years schooling #' \item \strong{date:} date: month/day/year #' \item \strong{state:} home state #' \item \strong{regprc:} price of regular apples #' \item \strong{ecoprc:} price of ecolabeled apples #' \item \strong{inseason:} =1 if interviewed in Nov. #' \item \strong{hhsize:} household size #' \item \strong{male:} =1 if male #' \item \strong{faminc:} family income, thousands #' \item \strong{age:} in years #' \item \strong{reglbs:} quantity regular apples, pounds #' \item \strong{ecolbs:} quantity ecolabeled apples, lbs #' \item \strong{numlt5:} # in household younger than 5 #' \item \strong{num5_17:} # in household 5 to 17 #' \item \strong{num18_64:} # in household 18 to 64 #' \item \strong{numgt64:} # in household older than 64 #' } #' @source \url{https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_isbn_issn=9781111531041} #' @examples str(apple) "apple"
/scratch/gouwar.j/cran-all/cranData/wooldridge/R/apple.R
#' approval #' #' Wooldridge Source: Harbridge, L., J. Krosnick, and J.M. Wooldridge (forthcoming), “Presidential Approval and Gas Prices: Sociotropic or Pocketbook Influence?” in New Explorations in Political Psychology, ed. J. Krosnick. New York: Psychology Press (Taylor and Francis Group). Professor Harbridge kindly provided the data, of which I have used a subset. Data loads lazily. #' #' @section #' #' Used in Text: 343, 371, 400 #' #' @docType data #' #' @usage data('approval') #' #' @format A data.frame with 78 observations on 16 variables: #' \itemize{ #' \item \strong{id: }{id} #' \item \strong{month: }{month} #' \item \strong{year: }{year} #' \item \strong{sp500: }{S&P 500 index} #' \item \strong{cpi: }{Consumer Price Index} #' \item \strong{cpifood: }{CPI for food} #' \item \strong{approve: }{Gallup approval rate, percent} #' \item \strong{gasprice: }{average gas price, cents} #' \item \strong{unemploy: }{unemployment rate, percent} #' \item \strong{katrina: }{=1 for three months after Hurricane Katrina} #' \item \strong{rgasprice: }{real gas price, 100*(gasprice/cpi)} #' \item \strong{lrgasprice: }{log(rgasprice)} #' \item \strong{sep11: }{=1 for 09/2001 and two months following} #' \item \strong{iraqinvade: }{=1 for three months after Iraq invasion} #' \item \strong{lsp500: }{log(sp500)} #' \item \strong{lcpifood: }{log(cpifood)} #' } #' @source \url{http://www.cengage.com/c/introductory-econometrics-a-modern-approach-6e-wooldridge} #' @examples str(approval) "approval"
/scratch/gouwar.j/cran-all/cranData/wooldridge/R/approval.R
#' athlet1 #' #' Wooldridge Sources: Peterson's Guide to Four Year Colleges, 1994 and 1995 (24th and 25th editions). Princeton University Press. Princeton, NJ. The Official 1995 College Basketball Records Book, 1994, NCAA. 1995 Information Please Sports Almanac (6th edition). Houghton Mifflin. New York, NY. Data loads lazily. #' #' @section Notes: These data were collected by Patrick Tulloch, an MSU economics major, for a term project. The “athletic success” variables are for the year prior to the enrollment and academic data. Updating these data to get a longer stretch of years, and including appearances in the “Sweet 16” NCAA basketball tournaments, would make for a more convincing analysis. With the growing popularity of women’s sports, especially basketball, an analysis that includes success in women’s athletics would be interesting. #' #' Used in Text: page 697 #' #' @docType data #' #' @usage data('athlet1') #' #' @format A data.frame with 118 observations on 23 variables: #' \itemize{ #' \item \strong{year:} 1992 or 1993 #' \item \strong{apps:} # applics for admission #' \item \strong{top25:} perc frsh class in 25 hs perc #' \item \strong{ver500:} perc frsh >= 500 on verbal SAT #' \item \strong{mth500:} perc frsh >= 500 on math SAT #' \item \strong{stufac:} student-faculty ratio #' \item \strong{bowl:} = 1 if bowl game in prev yr #' \item \strong{btitle:} = 1 if men's cnf chmps prv yr #' \item \strong{finfour:} = 1 if men's final 4 prv yr #' \item \strong{lapps:} log(apps) #' \item \strong{d93:} =1 if year = 1993 #' \item \strong{avg500:} (ver500+mth500)/2 #' \item \strong{cfinfour:} change in finfour #' \item \strong{clapps:} change in lapps #' \item \strong{cstufac:} change in stufac #' \item \strong{cbowl:} change in bowl #' \item \strong{cavg500:} change in avg500 #' \item \strong{cbtitle:} change in btitle #' \item \strong{lapps_1:} lapps lagged #' \item \strong{school:} name of university #' \item \strong{ctop25:} change in top25 #' \item \strong{bball:} =1 if btitle or finfour #' \item \strong{cbball:} change in bball #' } #' @source \url{https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_isbn_issn=9781111531041} #' @examples str(athlet1) "athlet1"
/scratch/gouwar.j/cran-all/cranData/wooldridge/R/athlet1.R
#' athlet2 #' #' Wooldridge Sources: Peterson's Guide to Four Year Colleges, 1995 (25th edition). Princeton University Press. 1995 Information Please Sports Almanac (6th edition). Houghton Mifflin. New York, NY Data loads lazily. #' #' @section Notes: These data were collected by Paul Anderson, an MSU economics major, for a term project. The score from football outcomes for natural rivals (Michigan-Michigan State, California-Stanford, Florida-Florida State, to name a few) is matched with application and academic data. The application and tuition data are for Fall 1994. Football records and scores are from 1993 football season. Extended these data to obtain a long stretch of panel data and other “natural” rivals could be very interesting. #' #' Used in Text: page 697 #' #' @docType data #' #' @usage data('athlet2') #' #' @format A data.frame with 30 observations on 10 variables: #' \itemize{ #' \item \strong{dscore:} home scr. - vist. scr., 1993 #' \item \strong{dinstt:} diff. in-state tuit., 1994 #' \item \strong{doutstt:} diff. out-state tuit., 1994 #' \item \strong{htpriv:} =1 if home team priv. sch. #' \item \strong{vtpriv:} =1 if vist. team priv. sch. #' \item \strong{dapps:} diff. in applications, 1994 #' \item \strong{htwrd:} =1 if home win. record, 1993 #' \item \strong{vtwrd:} =1 if vist. win. record, 1993 #' \item \strong{dwinrec:} htwrd - vtwrd #' \item \strong{dpriv:} htpriv - vtpriv #' } #' @source \url{https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_isbn_issn=9781111531041} #' @examples str(athlet2) "athlet2"
/scratch/gouwar.j/cran-all/cranData/wooldridge/R/athlet2.R
#' attend #' #' Wooldridge Source: These data were collected by Professors Ronald Fisher and Carl Liedholm during a term in which they both taught principles of microeconomics at Michigan State University. Professors Fisher and Liedholm kindly gave me permission to use a random subset of their data, and their research assistant at the time, Jeffrey Guilfoyle, who completed his Ph.D. in economics at MSU, provided helpful hints. Data loads lazily. #' #' @section Notes: The attendance figures were obtained by requiring students to slide their ID cards through a magnetic card reader, under the supervision of a teaching assistant. You might have the students use final, rather than the standardized variable, so that they can see the statistical significance of each variable remains exactly the same. The standardized variable is used only so that the coefficients measure effects in terms of standard deviations from the average score. #' #' Used in Text: pages 111, 152, 199-200, 222 #' #' @docType data #' #' @usage data('attend') #' #' @format A data.frame with 680 observations on 11 variables: #' \itemize{ #' \item \strong{attend:} classes attended out of 32 #' \item \strong{termGPA:} GPA for term #' \item \strong{priGPA:} cumulative GPA prior to term #' \item \strong{ACT:} ACT score #' \item \strong{final:} final exam score #' \item \strong{atndrte:} percent classes attended #' \item \strong{hwrte:} percent homework turned in #' \item \strong{frosh:} =1 if freshman #' \item \strong{soph:} =1 if sophomore #' \item \strong{missed:} number of classes missed #' \item \strong{stndfnl:} (final - mean)/sd #' } #' @source \url{https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_isbn_issn=9781111531041} #' @examples str(attend) "attend"
/scratch/gouwar.j/cran-all/cranData/wooldridge/R/attend.R
#' audit #' #' Wooldridge Source: These data come from a 1988 Urban Institute audit study in the Washington, D.C. area. I obtained them from the article “The Urban Institute Audit Studies: Their Methods and Findings,” by James J. Heckman and Peter Siegelman. In Fix, M. and Struyk, R., eds., Clear and Convincing Evidence: Measurement of Discrimination in America. Washington, D.C.: Urban Institute Press, 1993, 187-258. Data loads lazily. #' #' @section #' #' Used in Text: pages 776-777, 784, 787 #' #' @docType data #' #' @usage data('audit') #' #' @format A data.frame with 241 observations on 3 variables: #' \itemize{ #' \item \strong{w:} =1 if white app. got job offer #' \item \strong{b:} =1 if black app. got job offer #' \item \strong{y:} b - w #' } #' @source \url{https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_isbn_issn=9781111531041} #' @examples str(audit) "audit"
/scratch/gouwar.j/cran-all/cranData/wooldridge/R/audit.R
#' barium #' #' Wooldridge Source: C.M. Krupp and P.S. Pollard (1999), Market Responses to Antidumpting Laws: Some Evidence from the U.S. Chemical Industry, Canadian Journal of Economics 29, 199-227. Dr. Krupp kindly provided the data. They are monthly data covering February 1978 through December 1988. Data loads lazily. #' #' @section Note: Rather than just having intercept shifts for the different regimes, one could conduct a full Chow test across the different regimes. #' #' Used in Text: pages 361-362, 372, 377, 426, 442-443, 445, 663, 665, 672 #' #' @docType data #' #' @usage data('barium') #' #' @format A data.frame with 131 observations on 31 variables: #' \itemize{ #' \item \strong{chnimp:} Chinese imports, bar. chl. #' \item \strong{bchlimp:} total imports bar. chl. #' \item \strong{befile6:} =1 for all 6 mos before filing #' \item \strong{affile6:} =1 for all 6 mos after filing #' \item \strong{afdec6:} =1 for all 6 mos after decision #' \item \strong{befile12:} =1 all 12 mos before filing #' \item \strong{affile12:} =1 all 12 mos after filing #' \item \strong{afdec12:} =1 all 12 mos after decision #' \item \strong{chempi:} chemical production index #' \item \strong{gas:} gasoline production #' \item \strong{rtwex:} exchange rate index #' \item \strong{spr:} =1 for spring months #' \item \strong{sum:} =1 for summer months #' \item \strong{fall:} =1 for fall months #' \item \strong{lchnimp:} log(chnimp) #' \item \strong{lgas:} log(gas) #' \item \strong{lrtwex:} log(rtwex) #' \item \strong{lchempi:} log(chempi) #' \item \strong{t:} time trend #' \item \strong{feb:} =1 if month is feb #' \item \strong{mar:} =1 if month is march #' \item \strong{apr:} #' \item \strong{may:} #' \item \strong{jun:} #' \item \strong{jul:} #' \item \strong{aug:} #' \item \strong{sep:} #' \item \strong{oct:} #' \item \strong{nov:} #' \item \strong{dec:} #' \item \strong{percchn:} percent imports from china #' } #' @source \url{https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_isbn_issn=9781111531041} #' @examples str(barium) "barium"
/scratch/gouwar.j/cran-all/cranData/wooldridge/R/barium.R
#' beauty #' #' Wooldridge Source: Hamermesh, D.S. and J.E. Biddle (1994), “Beauty and the Labor Market,” American Economic Review 84, 1174-1194. Professor Hamermesh kindly provided me with the data. For manageability, I have included only a subset of the variables, which results in somewhat larger sample sizes than reported for the regressions in the Hamermesh and Biddle paper. Data loads lazily. #' #' @section #' #' Used in Text: pages 238-239, 265-266 #' #' @docType data #' #' @usage data('beauty') #' #' @format A data.frame with 1260 observations on 17 variables: #' \itemize{ #' \item \strong{wage:} hourly wage #' \item \strong{lwage:} log(wage) #' \item \strong{belavg:} =1 if looks <= 2 #' \item \strong{abvavg:} =1 if looks >=4 #' \item \strong{exper:} years of workforce experience #' \item \strong{looks:} from 1 to 5 #' \item \strong{union:} =1 if union member #' \item \strong{goodhlth:} =1 if good health #' \item \strong{black:} =1 if black #' \item \strong{female:} =1 if female #' \item \strong{married:} =1 if married #' \item \strong{south:} =1 if live in south #' \item \strong{bigcity:} =1 if live in big city #' \item \strong{smllcity:} =1 if live in small city #' \item \strong{service:} =1 if service industry #' \item \strong{expersq:} exper^2 #' \item \strong{educ:} years of schooling #' } #' @source \url{https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_isbn_issn=9781111531041} #' @examples str(beauty) "beauty"
/scratch/gouwar.j/cran-all/cranData/wooldridge/R/beauty.R
#' benefits #' #' Wooldridge Data loads lazily. #' #' @section #' #' #' #' @docType data #' #' @usage data('benefits') #' #' @format A data.frame with 1848 observations on 18 variables: #' \itemize{ #' \item \strong{distid:} district identifier #' \item \strong{schid:} school identifier #' \item \strong{lunch:} percent eligible, free lunch #' \item \strong{enroll:} school enrollment #' \item \strong{staff:} staff per 1000 students #' \item \strong{exppp:} expenditures per pupil #' \item \strong{avgsal:} average teacher salary, $ #' \item \strong{avgben:} average teacher non-salary benefits, $ #' \item \strong{math4:} percent passing 4th grade math test #' \item \strong{story4:} percent passing 4th grade reading test #' \item \strong{bs:} avgben/avgsal #' \item \strong{lavgsal:} log(avgsal) #' \item \strong{lenroll:} log(enroll) #' \item \strong{lstaff:} log(staff) #' \item \strong{bsbar:} within-district avg of bs #' \item \strong{lunchbar:} within-district avg of lunch #' \item \strong{lenrollbar:} within-district avg of lenroll #' \item \strong{lstaffbar:} within-district avg of lstaff #' } #' @source \url{https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_isbn_issn=9781111531041} #' @examples str(benefits) "benefits"
/scratch/gouwar.j/cran-all/cranData/wooldridge/R/benefits.R
#' beveridge #' #' Wooldridge Data loads lazily. #' #' @section #' #' #' #' @docType data #' #' @usage data('beveridge') #' #' @format A data.frame with 135 observations on 8 variables: #' \itemize{ #' \item \strong{month:} dec 200 through feb 2012 #' \item \strong{urate:} unemployment rate, percent #' \item \strong{vrate:} vacancy rate, percent #' \item \strong{t:} linear time trend #' \item \strong{urate_1:} L.urate #' \item \strong{vrate_1:} L.vrate #' \item \strong{curate:} D.urate #' \item \strong{cvrate:} D.vrate #' } #' @source \url{https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_isbn_issn=9781111531041} #' @examples str(beveridge) "beveridge"
/scratch/gouwar.j/cran-all/cranData/wooldridge/R/beveridge.R
#' big9salary #' #' Wooldridge Source: O. Baser and E. Pema (2003), “The Return of Publications for Economics Faculty,” Economics Bulletin 1, 1-13. Professors Baser and Pema kindly provided the data. Data loads lazily. #' #' @section Notes: This is an unbalanced panel data set in the sense that as many as three years of data are available for each faculty member but where some have fewer than three years. It is not clear that something like a fixed effects or first differencing analysis makes sense: in effect, approaches that remove the heterogeneity control for too much by controlling for unobserved heterogeneity which, in this case, includes faculty intelligence, talent, and motivation. Presumably these factors enter into the publication index. It is hard to think we want to hold the main factors driving productivity fixed when trying to measure the effect of productivity on salary. Pooled OLS regression with “cluster robust” standard errors seems more natural. On the other hand, if we want to measure the return to having a degree from a top 20 Ph.D. program then we would want to control for factors that cause selection into a top 20 program. Unfortunately, this variable does not change over time, and so FD and FE are not applicable. #' #' Used in Text: not used #' #' @docType data #' #' @usage data('big9salary') #' #' @format A data.frame with 786 observations on 30 variables: #' \itemize{ #' \item \strong{id:} person identifier #' \item \strong{year:} 92, 95, or 99 #' \item \strong{salary:} annual salary, $ #' \item \strong{pubindx:} publication index #' \item \strong{totpge:} standardized total article pages #' \item \strong{assist:} =1 if assistant professor #' \item \strong{assoc:} =1 if associate professor #' \item \strong{prof:} =1 if full professor #' \item \strong{chair:} =1 if department chair #' \item \strong{top20phd:} =1 if Ph.D. from top 20 dept. #' \item \strong{yearphd:} year Ph.D. obtained #' \item \strong{female:} =1 if female #' \item \strong{osu:} =1 if Ohio State U. #' \item \strong{iowa:} =1 if U. Iowa #' \item \strong{indiana:} =1 if Indiana U. #' \item \strong{purdue:} =1 if Purdue U. #' \item \strong{msu:} =1 if Michigan State U. #' \item \strong{minn:} =1 if U. Minnesota #' \item \strong{mich:} =1 if U. Michigan #' \item \strong{wisc:} =1 if U. Wisconsin #' \item \strong{illinois:} =1 if U. Illinois #' \item \strong{y92:} =1 if year == 92 #' \item \strong{y95:} =1 if year == 95 #' \item \strong{y99:} =1 if year == 99 #' \item \strong{lsalary:} log(salary) #' \item \strong{exper:} years since first teaching job #' \item \strong{expersq:} exper^2 #' \item \strong{pubindxsq:} pubindx^2 #' \item \strong{pubindx0:} =1 if pubindx == 0 #' \item \strong{lpubindx:} log(pubindx) if pubindx > 0 #' } #' @source \url{https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_isbn_issn=9781111531041} #' @examples str(big9salary) "big9salary"
/scratch/gouwar.j/cran-all/cranData/wooldridge/R/big9salary.R
#' bwght #' #' Wooldridge Source: J. Mullahy (1997), “Instrumental-Variable Estimation of Count Data Models: Applications to Models of Cigarette Smoking Behavior,” Review of Economics and Statistics 79, 596-593. Professor Mullahy kindly provided the data. He obtained them from the 1988 National Health Interview Survey. Data loads lazily. #' #' @section #' #' Used in Text: pages 18, 61, 110, 151, 165, 178, 184, 187-188, 258-259, 522-523 #' #' @docType data #' #' @usage data('bwght') #' #' @format A data.frame with 1388 observations on 14 variables: #' \itemize{ #' \item \strong{faminc:} 1988 family income, $1000s #' \item \strong{cigtax:} cig. tax in home state, 1988 #' \item \strong{cigprice:} cig. price in home state, 1988 #' \item \strong{bwght:} birth weight, ounces #' \item \strong{fatheduc:} father's yrs of educ #' \item \strong{motheduc:} mother's yrs of educ #' \item \strong{parity:} birth order of child #' \item \strong{male:} =1 if male child #' \item \strong{white:} =1 if white #' \item \strong{cigs:} cigs smked per day while preg #' \item \strong{lbwght:} log of bwght #' \item \strong{bwghtlbs:} birth weight, pounds #' \item \strong{packs:} packs smked per day while preg #' \item \strong{lfaminc:} log(faminc) #' } #' @source \url{https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_isbn_issn=9781111531041} #' @examples str(bwght) "bwght"
/scratch/gouwar.j/cran-all/cranData/wooldridge/R/bwght.R
#' bwght2 #' #' Wooldridge Source: Dr. Zhehui Luo, a recent MSU Ph.D. in economics and Visiting Research Associate in the Department of Epidemiology at MSU, kindly provided these data. She obtained them from state files linking birth and infant death certificates, and from the National Center for Health Statistics natality and mortality data. Data loads lazily. #' #' @section Notes: There are many possibilities with this data set. In addition to number of prenatal visits, smoking and alcohol consumption (during pregnancy) are included as explanatory variables. These can be added to equations of the kind found in Exercise C6.10. In addition, the one- and five-minute APGAR scores are included. These are measures of the well being of infants just after birth. An interesting feature of the score is that it is bounded between zero and 10, making a linear model less than ideal. Still, a linear model would be informative, and you might ask students about predicted values less than zero or greater than 10. #' #' Used in Text: pages 184, 223 #' #' @docType data #' #' @usage data('bwght2') #' #' @format A data.frame with 1832 observations on 23 variables: #' \itemize{ #' \item \strong{mage:} mother's age, years #' \item \strong{meduc:} mother's educ, years #' \item \strong{monpre:} month prenatal care began #' \item \strong{npvis:} total number of prenatal visits #' \item \strong{fage:} father's age, years #' \item \strong{feduc:} father's educ, years #' \item \strong{bwght:} birth weight, grams #' \item \strong{omaps:} one minute apgar score #' \item \strong{fmaps:} five minute apgar score #' \item \strong{cigs:} avg cigarettes per day #' \item \strong{drink:} avg drinks per week #' \item \strong{lbw:} =1 if bwght <= 2000 #' \item \strong{vlbw:} =1 if bwght <= 1500 #' \item \strong{male:} =1 if baby male #' \item \strong{mwhte:} =1 if mother white #' \item \strong{mblck:} =1 if mother black #' \item \strong{moth:} =1 if mother is other #' \item \strong{fwhte:} =1 if father white #' \item \strong{fblck:} =1 if father black #' \item \strong{foth:} =1 if father is other #' \item \strong{lbwght:} log(bwght) #' \item \strong{magesq:} mage^2 #' \item \strong{npvissq:} npvis^2 #' } #' @source \url{https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_isbn_issn=9781111531041} #' @examples str(bwght2) "bwght2"
/scratch/gouwar.j/cran-all/cranData/wooldridge/R/bwght2.R
#' campus #' #' Wooldridge Source: These data were collected by Daniel Martin, a former MSU undergraduate, for a final project. They come from the FBI Uniform Crime Reports and are for the year 1992. Data loads lazily. #' #' @section Notes: Colleges and universities are now required to provide much better, more detailed crime data. A very rich data set can now be obtained, even a panel data set for colleges across different years. Statistics on male/female ratios, fraction of men/women in fraternities or sororities, policy variables – such as a “safe house” for women on campus, as was started at MSU in 1994 – could be added as explanatory variables. The crime rate in the host town would be a good control. #' #' Used in Text: pages 131-132 #' #' @docType data #' #' @usage data('campus') #' #' @format A data.frame with 97 observations on 7 variables: #' \itemize{ #' \item \strong{enroll:} total enrollment #' \item \strong{priv:} =1 if private college #' \item \strong{police:} employed officers #' \item \strong{crime:} total campus crimes #' \item \strong{lcrime:} log(crime) #' \item \strong{lenroll:} log(enroll) #' \item \strong{lpolice:} log(police) #' } #' @source \url{https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_isbn_issn=9781111531041} #' @examples str(campus) "campus"
/scratch/gouwar.j/cran-all/cranData/wooldridge/R/campus.R
#' card #' #' Wooldridge Source: D. Card (1995), Using Geographic Variation in College Proximity to Estimate the Return to Schooling, in Aspects of Labour Market Behavior: Essays in Honour of John Vanderkamp. Ed. L.N. Christophides, E.K. Grant, and R. Swidinsky, 201-222. Toronto: University of Toronto Press. Professor Card kindly provided these data. Data loads lazily. #' #' @section Notes: Computer Exercise C15.3 is important for analyzing these data. There, it is shown that the instrumental variable, `nearc4`, is actually correlated with `IQ`, at least for the subset of men for which an IQ score is reported. However, the correlation between `nearc4`` and `IQ`, once the other explanatory variables are netted out, is arguably zero. At least, it is not statistically different from zero. In other words, `nearc4` fails the exogeneity requirement in a simple regression model but it passes, at least using the crude test described above, if controls are added to the wage equation. For a more advanced course, a nice extension of Card's analysis is to allow the return to education to differ by race. A relatively simple extension is to include black education (blackeduc) as an additional explanatory variable; its natural instrument is blacknearc4. #' #' Used in Text: pages 526-527, 547 #' #' @docType data #' #' @usage data('card') #' #' @format A data.frame with 3010 observations on 34 variables: #' \itemize{ #' \item \strong{id:} person identifier #' \item \strong{nearc2:} =1 if near 2 yr college, 1966 #' \item \strong{nearc4:} =1 if near 4 yr college, 1966 #' \item \strong{educ:} years of schooling, 1976 #' \item \strong{age:} in years #' \item \strong{fatheduc:} father's schooling #' \item \strong{motheduc:} mother's schooling #' \item \strong{weight:} NLS sampling weight, 1976 #' \item \strong{momdad14:} =1 if live with mom, dad at 14 #' \item \strong{sinmom14:} =1 if with single mom at 14 #' \item \strong{step14:} =1 if with step parent at 14 #' \item \strong{reg661:} =1 for region 1, 1966 #' \item \strong{reg662:} =1 for region 2, 1966 #' \item \strong{reg663:} =1 for region 3, 1966 #' \item \strong{reg664:} =1 for region 4, 1966 #' \item \strong{reg665:} =1 for region 5, 1966 #' \item \strong{reg666:} =1 for region 6, 1966 #' \item \strong{reg667:} =1 for region 7, 1966 #' \item \strong{reg668:} =1 for region 8, 1966 #' \item \strong{reg669:} =1 for region 9, 1966 #' \item \strong{south66:} =1 if in south in 1966 #' \item \strong{black:} =1 if black #' \item \strong{smsa:} =1 in in SMSA, 1976 #' \item \strong{south:} =1 if in south, 1976 #' \item \strong{smsa66:} =1 if in SMSA, 1966 #' \item \strong{wage:} hourly wage in cents, 1976 #' \item \strong{enroll:} =1 if enrolled in school, 1976 #' \item \strong{KWW:} knowledge world of work score #' \item \strong{IQ:} IQ score #' \item \strong{married:} =1 if married, 1976 #' \item \strong{libcrd14:} =1 if lib. card in home at 14 #' \item \strong{exper:} age - educ - 6 #' \item \strong{lwage:} log(wage) #' \item \strong{expersq:} exper^2 #' } #' @source \url{https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_isbn_issn=9781111531041} #' @examples str(card) "card"
/scratch/gouwar.j/cran-all/cranData/wooldridge/R/card.R
#' catholic #' #' Wooldridge Source: Altonji, J.G., T.E. Elder, and C.R. Taber (2005), “An Evaluation of Instrumental Variable Strategies for Estimating the Effects of Catholic Schooling,” Journal of Human Resources 40, 791-821. Professor Elder kindly provided a subset of the data, with some variables stripped away for confidentiality reasons. Data loads lazily. #' #' @section #' #' Used in Text: pages 267, 551 #' #' @docType data #' #' @usage data('catholic') #' #' @format A data.frame with 7430 observations on 13 variables: #' \itemize{ #' \item \strong{id: }{person identifier} #' \item \strong{read12: }{reading standardized score} #' \item \strong{math12: }{mathematics standardized score} #' \item \strong{female: }{=1 if female} #' \item \strong{asian: }{=1 if Asian} #' \item \strong{hispan: }{=1 if Hispanic} #' \item \strong{black: }{=1 if black} #' \item \strong{motheduc: }{mother's years of education} #' \item \strong{fatheduc: }{father's years of education} #' \item \strong{lfaminc: }{log of family income} #' \item \strong{hsgrad: }{=1 if graduated from high school by 1994} #' \item \strong{cathhs: }{=1 if attended Catholic HS} #' \item \strong{parcath: }{=1 if a parent reports being Catholic} #' } #' @source \url{http://www.cengage.com/c/introductory-econometrics-a-modern-approach-6e-wooldridge} #' @examples str(catholic) "catholic"
/scratch/gouwar.j/cran-all/cranData/wooldridge/R/catholic.R
#' cement #' #' Wooldridge Source: J. Shea (1993), “The Input-Output Approach to Instrument Selection,” Journal of Business and Economic Statistics 11, 145-156. Professor Shea kindly provided these data. Data loads lazily. #' #' @section Notes: Compared with Shea’s analysis, the producer price index (PPI) for fuels and power has been replaced with the PPI for petroleum. The data are monthly and have not been seasonally adjusted. #' #' Used in Text: pages 579 #' #' @docType data #' #' @usage data('cement') #' #' @format A data.frame with 312 observations on 30 variables: #' \itemize{ #' \item \strong{year:} 1964-1989 #' \item \strong{month:} 1-12 #' \item \strong{prccem:} BLS ppi for cement #' \item \strong{ipcem:} industrial prod. index, cement #' \item \strong{prcpet:} ppi for crude petroleum #' \item \strong{rresc:} real residential construction #' \item \strong{rnonc:} real nonres. construction #' \item \strong{ip:} aggregate index of indus. prod. #' \item \strong{rdefs:} real defense spending #' \item \strong{milemp:} military employment #' \item \strong{gprc:} log(prccem) - log(prccem[_n-1]) #' \item \strong{gcem:} log(ipcem) - log(ipcem[_n-1]) #' \item \strong{gprcpet:} log(prcpet) - log(prcpet[_n-1]) #' \item \strong{gres:} log(rresc) - log(rresc[_n-1]) #' \item \strong{gnon:} log(rnonc) - log(rnonc[_n-1]) #' \item \strong{gip:} log(ip) - log(ip[_n-1]) #' \item \strong{gdefs:} log(rdefs) - log(rdefs[_n-1]) #' \item \strong{gmilemp:} log(milemp) - log(milemp[_n-1]) #' \item \strong{jan:} =1 if month == 1 #' \item \strong{feb:} =1 if month == 2 #' \item \strong{mar:} =1 if month == 3 #' \item \strong{apr:} =1 if month == 4 #' \item \strong{may:} =1 if month == 5 #' \item \strong{jun:} =1 if month == 6 #' \item \strong{jul:} =1 if month == 7 #' \item \strong{aug:} =1 if month == 8 #' \item \strong{sep:} =1 if month == 9 #' \item \strong{oct:} =1 if month == 10 #' \item \strong{nov:} =1 if month == 11 #' \item \strong{dec:} =1 if month == 12 #' } #' @source \url{https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_isbn_issn=9781111531041} #' @examples str(cement) "cement"
/scratch/gouwar.j/cran-all/cranData/wooldridge/R/cement.R
#' census2000 #' #' Wooldridge Source: Obtained from the United States Census Bureau by Professor Alberto Abadie of the Harvard Kennedy School of Government. Professor Abadie kindly provided the data. Data loads lazily. #' #' @section #' #' Used in Text: pages 452-453 #' #' @docType data #' #' @usage data('census2000') #' #' @format A data.frame with 29501 observations on 6 variables: #' \itemize{ #' \item \strong{state: }{State (ICPSR code)} #' \item \strong{puma: }{Public Use Microdata Area} #' \item \strong{educ: }{educational attainment} #' \item \strong{lweekinc: }{log(weekly income)} #' \item \strong{exper: }{years workforce experience} #' \item \strong{expersq: }{exper^2} #' } #' @source \url{http://www.cengage.com/c/introductory-econometrics-a-modern-approach-6e-wooldridge} #' @examples str(census2000) "census2000"
/scratch/gouwar.j/cran-all/cranData/wooldridge/R/census2000.R
#' ceosal1 #' #' Wooldridge Source: I took a random sample of data reported in the May 6, 1991 issue of Businessweek. Data loads lazily. #' #' @section Notes: This kind of data collection is relatively easy for students just learning data analysis, and the findings can be interesting. A good term project is to have students collect a similar data set using a more recent issue of Businessweek, and to find additional variables that might explain differences in CEO compensation. My impression is that the public is still interested in CEO compensation. An interesting question is whether the list of explanatory variables included in this data set now explain less of the variation in log(salary) than they used to. #' #' Used in Text: pages 32, 35-36, 39, 159-160, 218-219, 260-261, 263, 685, 692-693 #' #' @docType data #' #' @usage data('ceosal1') #' #' @format A data.frame with 209 observations on 12 variables: #' \itemize{ #' \item \strong{salary:} 1990 salary, thousands $ #' \item \strong{pcsalary:} percent change salary, 89-90 #' \item \strong{sales:} 1990 firm sales, millions $ #' \item \strong{roe:} return on equity, 88-90 avg #' \item \strong{pcroe:} percent change roe, 88-90 #' \item \strong{ros:} return on firm's stock, 88-90 #' \item \strong{indus:} =1 if industrial firm #' \item \strong{finance:} =1 if financial firm #' \item \strong{consprod:} =1 if consumer product firm #' \item \strong{utility:} =1 if transport. or utilties #' \item \strong{lsalary:} natural log of salary #' \item \strong{lsales:} natural log of sales #' } #' @source \url{https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_isbn_issn=9781111531041} #' @examples str(ceosal1) "ceosal1"
/scratch/gouwar.j/cran-all/cranData/wooldridge/R/ceosal1.R
#' ceosal2 #' #' Wooldridge Source: See CEOSAL1.RAW Data loads lazily. #' #' @section Notes: Compared with CEOSAL1.RAW, in this CEO data set more information about the CEO, rather than about the company, is included. #' #' Used in Text: pages 64, 111, 163, 214, 335, 699 #' #' @docType data #' #' @usage data('ceosal2') #' #' @format A data.frame with 177 observations on 15 variables: #' \itemize{ #' \item \strong{salary:} 1990 compensation, $1000s #' \item \strong{age:} in years #' \item \strong{college:} =1 if attended college #' \item \strong{grad:} =1 if attended graduate school #' \item \strong{comten:} years with company #' \item \strong{ceoten:} years as ceo with company #' \item \strong{sales:} 1990 firm sales, millions #' \item \strong{profits:} 1990 profits, millions #' \item \strong{mktval:} market value, end 1990, mills. #' \item \strong{lsalary:} log(salary) #' \item \strong{lsales:} log(sales) #' \item \strong{lmktval:} log(mktval) #' \item \strong{comtensq:} comten^2 #' \item \strong{ceotensq:} ceoten^2 #' \item \strong{profmarg:} profits as percent of sales #' } #' @source \url{https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_isbn_issn=9781111531041} #' @examples str(ceosal2) "ceosal2"
/scratch/gouwar.j/cran-all/cranData/wooldridge/R/ceosal2.R
#' charity #' #' Wooldridge Source: P.H. Franses and R. Paap (2001), Quantitative Models in Marketing Research. Cambridge: Cambridge University Press. Professor Franses kindly provided the data. Data loads lazily. #' #' @section Notes: This data set can be used to illustrate probit and Tobit models, and to study the linear approximations to them. #' #' Used in Text: pages 65, 112-113, 266-267, 628 #' #' @docType data #' #' @usage data('charity') #' #' @format A data.frame with 4268 observations on 8 variables: #' \itemize{ #' \item \strong{respond:} =1 if responded with gift #' \item \strong{gift:} amount of gift, Dutch guilders #' \item \strong{resplast:} =1 if responded to most recent mailing #' \item \strong{weekslast:} number of weeks since last response #' \item \strong{propresp:} response rate to mailings #' \item \strong{mailsyear:} number of mailings per year #' \item \strong{giftlast:} amount of most recent gift #' \item \strong{avggift:} average of past gifts #' } #' @source \url{https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_isbn_issn=9781111531041} #' @examples str(charity) "charity"
/scratch/gouwar.j/cran-all/cranData/wooldridge/R/charity.R
#' consump #' #' Wooldridge Source: I collected these data from the 1997 Economic Report of the President. Specifically, the data come from Tables B-71, 15, 29, and 32. Data loads lazily. #' #' @section Notes: For a student interested in time series methods, updating this data set and using it in a manner similar to that in the text could be acceptable as a final project. #' #' Used in Text: pages 377-378, 408-409, 442, 570-571, 579, 673 #' #' @docType data #' #' @usage data('consump') #' #' @format A data.frame with 37 observations on 24 variables: #' \itemize{ #' \item \strong{year:} 1959-1995 #' \item \strong{i3:} 3 mo. T-bill rate #' \item \strong{inf:} inflation rate; CPI #' \item \strong{rdisp:} disp. inc., 1992 $, bils. #' \item \strong{rnondc:} nondur. cons., 1992 $, bils. #' \item \strong{rserv:} services, 1992 $, bils. #' \item \strong{pop:} population, 1000s #' \item \strong{y:} per capita real disp. inc. #' \item \strong{rcons:} rnondc + rserv #' \item \strong{c:} per capita real cons. #' \item \strong{r3:} i3 - inf; real ex post int. #' \item \strong{lc:} log(c) #' \item \strong{ly:} log(y) #' \item \strong{gc:} lc - lc[_n-1] #' \item \strong{gy:} ly - ly[_n-1] #' \item \strong{gc_1:} gc[_n-1] #' \item \strong{gy_1:} gy[_n-1] #' \item \strong{r3_1:} r3[_n-1] #' \item \strong{lc_ly:} lc - ly #' \item \strong{lc_ly_1:} lc_ly[_n-1] #' \item \strong{gc_2:} gc[_n-2] #' \item \strong{gy_2:} gy[_n-2] #' \item \strong{r3_2:} r3[_n-2] #' \item \strong{lc_ly_2:} lc_ly[_n-2] #' } #' @source \url{https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_isbn_issn=9781111531041} #' @examples str(consump) "consump"
/scratch/gouwar.j/cran-all/cranData/wooldridge/R/consump.R
#' corn #' #' Wooldridge Source: G.E. Battese, R.M. Harter, and W.A. Fuller (1988), “An Error-Components Model for Prediction of County Crop Areas Using Survey and Satellite Data,” Journal of the American Statistical Association 83, 28-36. This small data set is reported in the article. Data loads lazily. #' #' @section Notes: You could use these data to illustrate simple regression when the population intercept should be zero: no corn pixels should predict no corn planted. The same can be done with the soybean measures in the data set. #' #' Used in Text: pages 791-792 #' #' @docType data #' #' @usage data('corn') #' #' @format A data.frame with 37 observations on 5 variables: #' \itemize{ #' \item \strong{county:} county number #' \item \strong{cornhec:} corn per hectare #' \item \strong{soyhec:} soybeans per hectare #' \item \strong{cornpix:} corn pixels per hectare #' \item \strong{soypix:} soy pixels per hectare #' } #' @source \url{https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_isbn_issn=9781111531041} #' @examples str(corn) "corn"
/scratch/gouwar.j/cran-all/cranData/wooldridge/R/corn.R
#' countymurders #' #' Wooldridge Source: Compiled by J. Monroe Gamble for a Summer Research Opportunities Program (SROP) at Michigan State University, Summer 2014. Monroe obtained data from the U.S. Census Bureau, the FBI Uniform Crime Reports, and the Death Penalty Information Center. Data loads lazily. #' #' @section #' #' Used in Text: pages 16, 58, 431, 457 #' #' @docType data #' #' @usage data('countymurders') #' #' @format A data.frame with 37349 observations on 20 variables: #' \itemize{ #' \item \strong{arrests: }{# of murder arrests} #' \item \strong{countyid: }{county identifier: 1000*statefips + countyfips} #' \item \strong{density: }{population density; per square mile} #' \item \strong{popul: }{county population} #' \item \strong{perc1019: }{percent pop. age 10-19} #' \item \strong{perc2029: }{percent pop. age 20-29} #' \item \strong{percblack: }{percent population black} #' \item \strong{percmale: }{percent population male} #' \item \strong{rpcincmaint: }{real per capita income maintenance} #' \item \strong{rpcpersinc: }{real per capita personal income} #' \item \strong{rpcunemins: }{real per capita unem insurance payments} #' \item \strong{year: }{1980-1996} #' \item \strong{murders: }{# of murders} #' \item \strong{murdrate: }{murders per 10,000 people} #' \item \strong{arrestrate: }{murder arrests per 10,000} #' \item \strong{statefips: }{state FIPS code} #' \item \strong{countyfips: }{county FIPS code} #' \item \strong{execs: }{# of executions} #' \item \strong{lpopul: }{log(popul)} #' \item \strong{execrate: }{executions per 10,000} #' } #' @source \url{http://www.cengage.com/c/introductory-econometrics-a-modern-approach-6e-wooldridge} #' @examples str(countymurders) "countymurders"
/scratch/gouwar.j/cran-all/cranData/wooldridge/R/countymurders.R
#' cps78_85 #' #' Wooldridge Source: Professor Henry Farber, now at Princeton University, compiled these data from the 1978 and 1985 Current Population Surveys. Professor Farber kindly provided these data when we were colleagues at MIT. Data loads lazily. #' #' @section Notes: Obtaining more recent data from the CPS allows one to track, over a long period of time, the changes in the return to education, the gender gap, black-white wage differentials, and the union wage premium. #' #' Used in Text: pages 451, 476 #' #' @docType data #' #' @usage data('cps78_85') #' #' @format A data.frame with 1084 observations on 15 variables: #' \itemize{ #' \item \strong{educ:} years of schooling #' \item \strong{south:} =1 if live in south #' \item \strong{nonwhite:} =1 if nonwhite #' \item \strong{female:} =1 if female #' \item \strong{married:} =1 if married #' \item \strong{exper:} age - educ - 6 #' \item \strong{expersq:} exper^2 #' \item \strong{union:} =1 if belong to union #' \item \strong{lwage:} log hourly wage #' \item \strong{age:} in years #' \item \strong{year:} 78 or 85 #' \item \strong{y85:} =1 if year == 85 #' \item \strong{y85fem:} y85*female #' \item \strong{y85educ:} y85*educ #' \item \strong{y85union:} y85*union #' } #' @source \url{https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_isbn_issn=9781111531041} #' @examples str(cps78_85) "cps78_85"
/scratch/gouwar.j/cran-all/cranData/wooldridge/R/cps78_85.R
#' cps91 #' #' Wooldridge Source: Professor Daniel Hamermesh, at the University of Texas, compiled these data from the May 1991 Current Population Survey. Professor Hamermesh kindly provided these data. Data loads lazily. #' #' @section Notes: This is much bigger than the other CPS data sets even though the sample is restricted to married women. (CPS91.RAW contains many more observations than MROZ.RAW, too.) In addition to the usual human capital variables for the women in the sample, we have information on the husband. Therefore, we can estimate a labor supply function as in Chapter 16, although the validity of potential experience as an IV for log(wage) is questionable. (MROZ.RAW contains an actual experience variable.) Perhaps more convincing is to add hours to the wage offer equation, and instrument hours with indicators for young and old children. This data set also contains a union membership indicator. The web site for the National Bureau of Economic Research makes it very easy now to download CPS data files in a variety offormats. Go to http://www.nber.org/data/cps_basic.html. #' #' Used in Text: page 627-628 #' #' @docType data #' #' @usage data('cps91') #' #' @format A data.frame with 5634 observations on 24 variables: #' \itemize{ #' \item \strong{husage:} husband's age #' \item \strong{husunion:} =1 if hus. in union #' \item \strong{husearns:} hus. weekly earns #' \item \strong{huseduc:} husband's yrs schooling #' \item \strong{husblck:} =1 if hus. black #' \item \strong{hushisp:} =1 if hus. hispanic #' \item \strong{hushrs:} hus. weekly hours #' \item \strong{kidge6:} =1 if have child >= 6 #' \item \strong{earns:} wife's weekly earnings #' \item \strong{age:} wife's age #' \item \strong{black:} =1 if wife black #' \item \strong{educ:} wife's yrs schooling #' \item \strong{hispanic:} =1 if wife hispanic #' \item \strong{union:} =1 if wife in union #' \item \strong{faminc:} annual family income #' \item \strong{husexp:} huseduc - husage - 6 #' \item \strong{exper:} age - educ - 6 #' \item \strong{kidlt6:} =1 if have child < 6 #' \item \strong{hours:} wife's weekly hours #' \item \strong{expersq:} exper^2 #' \item \strong{nwifeinc:} non-wife inc, $1000s #' \item \strong{inlf:} =1 if wife in labor force #' \item \strong{hrwage:} earns/hours #' \item \strong{lwage:} log(hrwage) #' } #' @source \url{https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_isbn_issn=9781111531041} #' @examples str(cps91) "cps91"
/scratch/gouwar.j/cran-all/cranData/wooldridge/R/cps91.R
#' crime1 #' #' Wooldridge Source: J. Grogger (1991), “Certainty vs. Severity of Punishment,” Economic Inquiry 29, 297-309. Professor Grogger kindly provided a subset of the data he used in his article. Data loads lazily. #' #' @section #' #' Used in Text: pages 82-83, 173-174, 180, 252-253, 275, 299, 305-306, 607-608, 625 #' #' @docType data #' #' @usage data('crime1') #' #' @format A data.frame with 2725 observations on 16 variables: #' \itemize{ #' \item \strong{narr86:} # times arrested, 1986 #' \item \strong{nfarr86:} # felony arrests, 1986 #' \item \strong{nparr86:} # property crme arr., 1986 #' \item \strong{pcnv:} proportion of prior convictions #' \item \strong{avgsen:} avg sentence length, mos. #' \item \strong{tottime:} time in prison since 18 (mos.) #' \item \strong{ptime86:} mos. in prison during 1986 #' \item \strong{qemp86:} # quarters employed, 1986 #' \item \strong{inc86:} legal income, 1986, $100s #' \item \strong{durat:} recent unemp duration #' \item \strong{black:} =1 if black #' \item \strong{hispan:} =1 if Hispanic #' \item \strong{born60:} =1 if born in 1960 #' \item \strong{pcnvsq:} pcnv^2 #' \item \strong{pt86sq:} ptime86^2 #' \item \strong{inc86sq:} inc86^2 #' } #' @source \url{https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_isbn_issn=9781111531041} #' @examples str(crime1) "crime1"
/scratch/gouwar.j/cran-all/cranData/wooldridge/R/crime1.R
#' crime2 #' #' Wooldridge Source: These data were collected by David Dicicco, a former MSU undergraduate, for a final project. They came from various issues of the County and City Data Book, and are for the years 1982 and 1985. Unfortunately, I do not have the list of cities. Data loads lazily. #' #' @section Notes: Very rich crime data sets, at the county, or even city, level, can be collected using the FBI’s Uniform Crime Reports. These data can be matched up with demographic and economic data, at least for census years. The County and City Data Book contains a variety of statistics, but the years do not always match up. These data sets can be used investigate issues such as the effects of casinos on city or county crime rates. #' #' Used in Text: pages 313-314, 459-460 #' #' @docType data #' #' @usage data('crime2') #' #' @format A data.frame with 92 observations on 34 variables: #' \itemize{ #' \item \strong{pop:} population #' \item \strong{crimes:} total number index crimes #' \item \strong{unem:} unemployment rate #' \item \strong{officers:} number police officers #' \item \strong{pcinc:} per capita income #' \item \strong{west:} =1 if city in west #' \item \strong{nrtheast:} =1 if city in NE #' \item \strong{south:} =1 if city in south #' \item \strong{year:} 82 or 87 #' \item \strong{area:} land area, square miles #' \item \strong{d87:} =1 if year = 87 #' \item \strong{popden:} people per sq mile #' \item \strong{crmrte:} crimes per 1000 people #' \item \strong{offarea:} officers per sq mile #' \item \strong{lawexpc:} law enforce. expend. pc, $ #' \item \strong{polpc:} police per 1000 people #' \item \strong{lpop:} log(pop) #' \item \strong{loffic:} log(officers) #' \item \strong{lpcinc:} log(pcinc) #' \item \strong{llawexpc:} log(lawexpc) #' \item \strong{lpopden:} log(popden) #' \item \strong{lcrimes:} log(crimes) #' \item \strong{larea:} log(area) #' \item \strong{lcrmrte:} log(crmrte) #' \item \strong{clcrimes:} change in lcrimes #' \item \strong{clpop:} change in lpop #' \item \strong{clcrmrte:} change in lcrmrte #' \item \strong{lpolpc:} log(polpc) #' \item \strong{clpolpc:} change in lpolpc #' \item \strong{cllawexp:} change in llawexp #' \item \strong{cunem:} change in unem #' \item \strong{clpopden:} change in lpopden #' \item \strong{lcrmrt_1:} lcrmrte lagged #' \item \strong{ccrmrte:} change in crmrte #' } #' @source \url{https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_isbn_issn=9781111531041} #' @examples str(crime2) "crime2"
/scratch/gouwar.j/cran-all/cranData/wooldridge/R/crime2.R
#' crime3 #' #' Wooldridge Source: E. Eide (1994), Economics of Crime: Deterrence of the Rational Offender. Amsterdam: North Holland. The data come from Tables A3 and A6. Data loads lazily. #' #' @section Notes: These data are for the years 1972 and 1978 for 53 police districts in Norway. Much larger data sets for more years can be obtained for the United States, although a measure of the “clear-up” rate is needed. #' #' Used in Text: pages 464-465, 477-478 #' #' @docType data #' #' @usage data('crime3') #' #' @format A data.frame with 106 observations on 12 variables: #' \itemize{ #' \item \strong{district:} district number #' \item \strong{year:} 72 or 78 #' \item \strong{crime:} crimes per 1000 people #' \item \strong{clrprc1:} clear-up perc, prior year #' \item \strong{clrprc2:} clear-up perc, two-years prior #' \item \strong{d78:} =1 if year = 78 #' \item \strong{avgclr:} (clrprc1 + clrprc2)/2 #' \item \strong{lcrime:} log(crime) #' \item \strong{clcrime:} change in lcrime #' \item \strong{cavgclr:} change in avgclr #' \item \strong{cclrprc1:} change in clrprc1 #' \item \strong{cclrprc2:} change in clrprc2 #' } #' @source \url{https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_isbn_issn=9781111531041} #' @examples str(crime3) "crime3"
/scratch/gouwar.j/cran-all/cranData/wooldridge/R/crime3.R
#' crime4 #' #' Wooldridge Source: From C. Cornwell and W. Trumball (1994), “Estimating the Economic Model of Crime with Panel Data,” Review of Economics and Statistics 76, 360-366. Professor Cornwell kindly provided the data. Data loads lazily. #' #' @section Notes: Computer Exercise C16.7 shows that variables that might seem to be good instrumental variable candidates are not always so good, especially after applying a transformation such as differencing across time. You could have the students do an IV analysis for just, say, 1987. #' #' Used in Text: pages 471-472, 479, 504, 580 #' #' @docType data #' #' @usage data('crime4') #' #' @format A data.frame with 630 observations on 59 variables: #' \itemize{ #' \item \strong{county:} county identifier #' \item \strong{year:} 81 to 87 #' \item \strong{crmrte:} crimes committed per person #' \item \strong{prbarr:} 'probability' of arrest #' \item \strong{prbconv:} 'probability' of conviction #' \item \strong{prbpris:} 'probability' of prison sentenc #' \item \strong{avgsen:} avg. sentence, days #' \item \strong{polpc:} police per capita #' \item \strong{density:} people per sq. mile #' \item \strong{taxpc:} tax revenue per capita #' \item \strong{west:} =1 if in western N.C. #' \item \strong{central:} =1 if in central N.C. #' \item \strong{urban:} =1 if in SMSA #' \item \strong{pctmin80:} perc. minority, 1980 #' \item \strong{wcon:} weekly wage, construction #' \item \strong{wtuc:} wkly wge, trns, util, commun #' \item \strong{wtrd:} wkly wge, whlesle, retail trade #' \item \strong{wfir:} wkly wge, fin, ins, real est #' \item \strong{wser:} wkly wge, service industry #' \item \strong{wmfg:} wkly wge, manufacturing #' \item \strong{wfed:} wkly wge, fed employees #' \item \strong{wsta:} wkly wge, state employees #' \item \strong{wloc:} wkly wge, local gov emps #' \item \strong{mix:} offense mix: face-to-face/other #' \item \strong{pctymle:} percent young male #' \item \strong{d82:} =1 if year == 82 #' \item \strong{d83:} =1 if year == 83 #' \item \strong{d84:} =1 if year == 84 #' \item \strong{d85:} =1 if year == 85 #' \item \strong{d86:} =1 if year == 86 #' \item \strong{d87:} =1 if year == 87 #' \item \strong{lcrmrte:} log(crmrte) #' \item \strong{lprbarr:} log(prbarr) #' \item \strong{lprbconv:} log(prbconv) #' \item \strong{lprbpris:} log(prbpris) #' \item \strong{lavgsen:} log(avgsen) #' \item \strong{lpolpc:} log(polpc) #' \item \strong{ldensity:} log(density) #' \item \strong{ltaxpc:} log(taxpc) #' \item \strong{lwcon:} log(wcon) #' \item \strong{lwtuc:} log(wtuc) #' \item \strong{lwtrd:} log(wtrd) #' \item \strong{lwfir:} log(wfir) #' \item \strong{lwser:} log(wser) #' \item \strong{lwmfg:} log(wmfg) #' \item \strong{lwfed:} log(wfed) #' \item \strong{lwsta:} log(wsta) #' \item \strong{lwloc:} log(wloc) #' \item \strong{lmix:} log(mix) #' \item \strong{lpctymle:} log(pctymle) #' \item \strong{lpctmin:} log(pctmin) #' \item \strong{clcrmrte:} lcrmrte - lcrmrte[_n-1] #' \item \strong{clprbarr:} lprbarr - lprbarr[_n-1] #' \item \strong{clprbcon:} lprbconv - lprbconv[_n-1] #' \item \strong{clprbpri:} lprbpri - lprbpri[t-1] #' \item \strong{clavgsen:} lavgsen - lavgsen[t-1] #' \item \strong{clpolpc:} lpolpc - lpolpc[t-1] #' \item \strong{cltaxpc:} ltaxpc - ltaxpc[t-1] #' \item \strong{clmix:} lmix - lmix[t-1] #' } #' @source \url{https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_isbn_issn=9781111531041} #' @examples str(crime4) "crime4"
/scratch/gouwar.j/cran-all/cranData/wooldridge/R/crime4.R
#' discrim #' #' Wooldridge Source: K. Graddy (1997), “Do Fast-Food Chains Price Discriminate on the Race and Income Characteristics of an Area?” Journal of Business and Economic Statistics 15, 391-401. Professor Graddy kindly provided the data set. Data loads lazily. #' #' @section Notes: If you want to assign a common final project, this would be a good data set. There are many possible dependent variables, namely, prices of various fast-food items. The key variable is the fraction of the population that is black, along with controls for poverty, income, housing values, and so on. These data were also used in a famous study by David Card and Alan Krueger on estimation of minimum wage effects on employment. See the book by Card and Krueger, Myth and Measurement, 1997, Princeton University Press, for a detailed analysis. #' #' Used in Text: pages 112, 166, 699-700 #' #' @docType data #' #' @usage data('discrim') #' #' @format A data.frame with 410 observations on 37 variables: #' \itemize{ #' \item \strong{psoda:} price of medium soda, 1st wave #' \item \strong{pfries:} price of small fries, 1st wave #' \item \strong{pentree:} price entree (burger or chicken), 1st wave #' \item \strong{wagest:} starting wage, 1st wave #' \item \strong{nmgrs:} number of managers, 1st wave #' \item \strong{nregs:} number of registers, 1st wave #' \item \strong{hrsopen:} hours open, 1st wave #' \item \strong{emp:} number of employees, 1st wave #' \item \strong{psoda2:} price of medium soday, 2nd wave #' \item \strong{pfries2:} price of small fries, 2nd wave #' \item \strong{pentree2:} price entree, 2nd wave #' \item \strong{wagest2:} starting wage, 2nd wave #' \item \strong{nmgrs2:} number of managers, 2nd wave #' \item \strong{nregs2:} number of registers, 2nd wave #' \item \strong{hrsopen2:} hours open, 2nd wave #' \item \strong{emp2:} number of employees, 2nd wave #' \item \strong{compown:} =1 if company owned #' \item \strong{chain:} BK = 1, KFC = 2, Roy Rogers = 3, Wendy's = 4 #' \item \strong{density:} population density, town #' \item \strong{crmrte:} crime rate, town #' \item \strong{state:} NJ = 1, PA = 2 #' \item \strong{prpblck:} proportion black, zipcode #' \item \strong{prppov:} proportion in poverty, zipcode #' \item \strong{prpncar:} proportion no car, zipcode #' \item \strong{hseval:} median housing value, zipcode #' \item \strong{nstores:} number of stores, zipcode #' \item \strong{income:} median family income, zipcode #' \item \strong{county:} county label #' \item \strong{lpsoda:} log(psoda) #' \item \strong{lpfries:} log(pfries) #' \item \strong{lhseval:} log(hseval) #' \item \strong{lincome:} log(income) #' \item \strong{ldensity:} log(density) #' \item \strong{NJ:} =1 for New Jersey #' \item \strong{BK:} =1 if Burger King #' \item \strong{KFC:} =1 if Kentucky Fried Chicken #' \item \strong{RR:} =1 if Roy Rogers #' } #' @source \url{https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_isbn_issn=9781111531041} #' @examples str(discrim) "discrim"
/scratch/gouwar.j/cran-all/cranData/wooldridge/R/discrim.R
#' driving #' #' Wooldridge Source: Freeman, D.G. (2007), “Drunk Driving Legislation and Traffic Fatalities: New Evidence on BAC 08 Laws,” Contemporary Economic Policy 25, 293--308. Professor Freeman kindly provided the data. Data loads lazily. #' #' @section Notes: Several more years of data are available and may further shed light on the effectiveness of several traffic laws. #' #' Used in Text: not used, but see page 695 #' #' @docType data #' #' @usage data('driving') #' #' @format A data.frame with 1200 observations on 56 variables: #' \itemize{ #' \item \strong{year:} 1980 through 2004 #' \item \strong{state:} 48 continental states, alphabetical #' \item \strong{sl55:} speed limit == 55 #' \item \strong{sl65:} speed limit == 65 #' \item \strong{sl70:} speed limit == 70 #' \item \strong{sl75:} speed limit == 75 #' \item \strong{slnone:} no speed limit #' \item \strong{seatbelt:} =0 if none, =1 if primary, =2 if secondary #' \item \strong{minage:} minimum drinking age #' \item \strong{zerotol:} zero tolerance law #' \item \strong{gdl:} graduated drivers license law #' \item \strong{bac10:} blood alcohol limit .10 #' \item \strong{bac08:} blood alcohol limit .08 #' \item \strong{perse:} administrative license revocation (per se law) #' \item \strong{totfat:} total traffic fatalities #' \item \strong{nghtfat:} total nighttime fatalities #' \item \strong{wkndfat:} total weekend fatalities #' \item \strong{totfatpvm:} total fatalities per 100 million miles #' \item \strong{nghtfatpvm:} nighttime fatalities per 100 million miles #' \item \strong{wkndfatpvm:} weekend fatalities per 100 million miles #' \item \strong{statepop:} state population #' \item \strong{totfatrte:} total fatalities per 100,000 population #' \item \strong{nghtfatrte:} nighttime fatalities per 100,000 population #' \item \strong{wkndfatrte:} weekend accidents per 100,000 population #' \item \strong{vehicmiles:} vehicle miles traveled, billions #' \item \strong{unem:} unemployment rate, percent #' \item \strong{perc14_24:} percent population aged 14 through 24 #' \item \strong{sl70plus:} sl70 + sl75 + slnone #' \item \strong{sbprim:} =1 if primary seatbelt law #' \item \strong{sbsecon:} =1 if secondary seatbelt law #' \item \strong{d80:} =1 if year == 1980 #' \item \strong{d81:} #' \item \strong{d82:} #' \item \strong{d83:} #' \item \strong{d84:} #' \item \strong{d85:} #' \item \strong{d86:} #' \item \strong{d87:} #' \item \strong{d88:} #' \item \strong{d89:} #' \item \strong{d90:} #' \item \strong{d91:} #' \item \strong{d92:} #' \item \strong{d93:} #' \item \strong{d94:} #' \item \strong{d95:} #' \item \strong{d96:} #' \item \strong{d97:} #' \item \strong{d98:} #' \item \strong{d99:} #' \item \strong{d00:} #' \item \strong{d01:} #' \item \strong{d02:} #' \item \strong{d03:} #' \item \strong{d04:} =1 if year == 2004 #' \item \strong{vehicmilespc:} #' } #' @source \url{https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_isbn_issn=9781111531041} #' @examples str(driving) "driving"
/scratch/gouwar.j/cran-all/cranData/wooldridge/R/driving.R
#' earns #' #' Wooldridge Source: Economic Report of the President, 1989, Table B-47. The data are for the non-farm business sector. Data loads lazily. #' #' @section Notes: These data could be usefully updated, but changes in reporting conventions in more recent ERPs may make that difficult. #' #' Used in Text: pages 363-364, 398, 407 #' #' @docType data #' #' @usage data('earns') #' #' @format A data.frame with 41 observations on 14 variables: #' \itemize{ #' \item \strong{year:} 1947 to 1987 #' \item \strong{wkearns:} avg. real weekly earnings #' \item \strong{wkhours:} avg. weekly hours #' \item \strong{outphr:} output per labor hour #' \item \strong{hrwage:} wkearns/wkhours #' \item \strong{lhrwage:} log(hrwage) #' \item \strong{loutphr:} log(outphr) #' \item \strong{t:} time trend: t=1 to 47 #' \item \strong{ghrwage:} lhrwage - lhrwage[_n-1] #' \item \strong{goutphr:} loutphr - loutphr[_n-1] #' \item \strong{ghrwge_1:} ghrwage[_n-1] #' \item \strong{goutph_1:} goutphr[_n-1] #' \item \strong{goutph_2:} goutphr[_n-2] #' \item \strong{lwkhours:} log(wkhours) #' } #' @source \url{https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_isbn_issn=9781111531041} #' @examples str(earns) "earns"
/scratch/gouwar.j/cran-all/cranData/wooldridge/R/earns.R
#' econmath #' #' Wooldridge Source: Compiled by Professor Charles Ballard, Michigan State University Department of Economics. Professor Ballard kindly provided the data. Data loads lazily. #' #' @section #' #' Used in Text: 167, 185 #' #' @docType data #' #' @usage data('econmath') #' #' @format A data.frame with 856 observations on 17 variables: #' \itemize{ #' \item \strong{age: }{age in years} #' \item \strong{work: }{hours worked per week} #' \item \strong{study: }{hours studying per week} #' \item \strong{econhs: }{=1 if economics in high school} #' \item \strong{colgpa: }{college GPA, beginning semester} #' \item \strong{hsgpa: }{high school GPA} #' \item \strong{acteng: }{ACT English score} #' \item \strong{actmth: }{ACT math score} #' \item \strong{act: }{ACT composite} #' \item \strong{mathscr: }{math quiz score, 0-10} #' \item \strong{male: }{=1 if male} #' \item \strong{calculus: }{=1 if taken calculus course} #' \item \strong{attexc: }{=1 if past attndce 'excellent'} #' \item \strong{attgood: }{=1 if past attndce 'good'} #' \item \strong{fathcoll: }{=1 if father has BA} #' \item \strong{mothcoll: }{=1 if mother has BA} #' \item \strong{score: }{course score, in percent} #' } #' @source \url{http://www.cengage.com/c/introductory-econometrics-a-modern-approach-6e-wooldridge} #' @examples str(econmath) "econmath"
/scratch/gouwar.j/cran-all/cranData/wooldridge/R/econmath.R
#' elem94_95 #' #' Wooldridge Source: Culled from a panel data set used by Leslie Papke in her paper “The Effects of Spending on Test Pass Rates: Evidence from Michigan” (2005), Journal of Public Economics 89, 821-839. Data loads lazily. #' #' @section Notes: Starting in 1995, the Michigan Department of Education stopped reporting average teacher benefits along with average salary. This data set includes both variables, at the school level, and can be used to study the salary-benefits tradeoff, as in Chapter 4. There are a few suspicious benefits/salary ratios, and so this data set makes a good illustration of the impact of outliers in Chapter 9. #' #' Used in Text: pages 166-167, 341-342 #' #' @docType data #' #' @usage data('elem94_95') #' #' @format A data.frame with 1848 observations on 14 variables: #' \itemize{ #' \item \strong{distid:} district identifier #' \item \strong{schid:} school identifier #' \item \strong{lunch:} percent eligible, free lunch #' \item \strong{enrol:} enrollment #' \item \strong{staff:} staff per 1000 students #' \item \strong{exppp:} expenditures per pupil #' \item \strong{avgsal:} average teacher salary, $ #' \item \strong{avgben:} average teacher non-salary benefits, $ #' \item \strong{math4:} percent passing 4th grade math test #' \item \strong{story4:} percent passing 4th grade reading test #' \item \strong{bs:} avgben/avgsal #' \item \strong{lavgsal:} log(avgsal) #' \item \strong{lenrol:} log(enrol) #' \item \strong{lstaff:} log(staff) #' } #' @source \url{https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_isbn_issn=9781111531041} #' @examples str(elem94_95) "elem94_95"
/scratch/gouwar.j/cran-all/cranData/wooldridge/R/elem94_95.R
#' engin #' #' Wooldridge Source: Thada Chaisawangwong, a former graduate student at MSU, obtained these data for a term project in applied econometrics. They come from the Material Requirement Planning Survey carried out in Thailand during 1998. Data loads lazily. #' #' @section Notes: This is a nice change of pace from wage data sets for the United States. These data are for engineers in Thailand, and represents a more homogeneous group than data sets that consist of people across a variety of occupations. Plus, the starting salary is also provided in the data set, so factors affecting wage growth – and not just wage levels at a given point in time – can be studied. This is a good data set for a common term project that tests basic understanding of multiple regression and the interpretation of models with a logarithm for a dependent variable. #' #' Used in Text: not used #' #' @docType data #' #' @usage data('engin') #' #' @format A data.frame with 403 observations on 17 variables: #' \itemize{ #' \item \strong{male:} =1 if male #' \item \strong{educ:} highest grade completed #' \item \strong{wage:} monthly salary, Thai baht #' \item \strong{swage:} starting wage #' \item \strong{exper:} years on current job #' \item \strong{pexper:} previous experience #' \item \strong{lwage:} log(wage) #' \item \strong{expersq:} exper^2 #' \item \strong{highgrad:} =1 if high school graduate #' \item \strong{college:} =1 if college graduate #' \item \strong{grad:} =1 if some graduate school #' \item \strong{polytech:} =1 if a polytech #' \item \strong{highdrop:} =1 if no high school degree #' \item \strong{lswage:} log(swage) #' \item \strong{pexpersq:} pexper^2 #' \item \strong{mleeduc:} male*educ #' \item \strong{mleeduc0:} male*(educ - 14) #' } #' @source \url{https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_isbn_issn=9781111531041} #' @examples str(engin) "engin"
/scratch/gouwar.j/cran-all/cranData/wooldridge/R/engin.R
#' expendshares #' #' Wooldridge Source: Blundell, R., A. Duncan, and K. Pendakur (1998), “Semiparametric Estimation and Consumer Demand,” Journal of Applied Econometrics 13, 435-461. I obtained these data from the Journal of Applied Econometrics data archive at http://qed.econ.queensu.ca/jae/. Data loads lazily. #' #' @section Notes: The dependent variables in this data set – the expenditure shares – are necessarily bounded between zero and one. The linear model is at best an approximation, but the usual IV estimator likely gives good estimates of the average partial effects. #' #' Used in Text: pages 581-582 #' #' @docType data #' #' @usage data('expendshares') #' #' @format A data.frame with 1519 observations on 13 variables: #' \itemize{ #' \item \strong{sfood:} share of food expenditures (out of total) #' \item \strong{sfuel:} share of fuel expenditures #' \item \strong{sclothes:} share of clothing expenditures #' \item \strong{salcohol:} share of alcohol expenditures #' \item \strong{stransport:} share of transportation expenditures #' \item \strong{sother:} share of other expenditures #' \item \strong{totexpend:} total expenditure, British pounds per week #' \item \strong{income:} family income, British pounds per week #' \item \strong{age:} age of household head #' \item \strong{kids:} number of children: 1 or 2 #' \item \strong{ltotexpend:} log(totexpend) #' \item \strong{lincome:} log(income) #' \item \strong{agesq:} age^2 #' } #' @source \url{https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_isbn_issn=9781111531041} #' @examples str(expendshares) "expendshares"
/scratch/gouwar.j/cran-all/cranData/wooldridge/R/expendshares.R
#' ezanders #' #' Wooldridge Source: L.E. Papke (1994), “Tax Policy and Urban Development: Evidence from the Indiana Enterprise Zone Program,” Journal of Public Economics 54, 37-49. Professor Papke kindly provided these data. Data loads lazily. #' #' @section Notes: These are actually monthly unemployment claims for the Anderson enterprise zone. Papke used annualized data, across many zones and non-zones, in her original analysis. #' #' Used in Text: page 377 #' #' @docType data #' #' @usage data('ezanders') #' #' @format A data.frame with 108 observations on 25 variables: #' \itemize{ #' \item \strong{month:} name of month #' \item \strong{uclms:} unemployment claims #' \item \strong{ez:} =1 if enterprise zone #' \item \strong{year:} 1980 through 1988 #' \item \strong{y81:} =1 if year == 1981 #' \item \strong{y82:} #' \item \strong{y83:} #' \item \strong{y84:} #' \item \strong{y85:} #' \item \strong{y86:} #' \item \strong{y87:} #' \item \strong{y88:} #' \item \strong{luclms:} log(uclms) #' \item \strong{jan:} =1 if month == JAN #' \item \strong{feb:} #' \item \strong{mar:} #' \item \strong{apr:} #' \item \strong{may:} #' \item \strong{jun:} #' \item \strong{jul:} #' \item \strong{aug:} #' \item \strong{sep:} #' \item \strong{oct:} #' \item \strong{nov:} #' \item \strong{dec:} #' } #' @source \url{https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_isbn_issn=9781111531041} #' @examples str(ezanders) "ezanders"
/scratch/gouwar.j/cran-all/cranData/wooldridge/R/ezanders.R
#' ezunem #' #' Wooldridge Source: See EZANDERS.RAW Data loads lazily. #' #' @section Notes: A very good project is to have students analyze enterprise, empowerment, or renaissance zone policies in their home states. Many states now have such programs. A few years of panel data straddling periods of zone designation, at the city or zip code level, could make a nice study. #' #' Used in Text: pages 470, 504 #' #' @docType data #' #' @usage data('ezunem') #' #' @format A data.frame with 198 observations on 37 variables: #' \itemize{ #' \item \strong{year:} 1980 to 1988 #' \item \strong{uclms:} unemployment claims #' \item \strong{ez:} =1 if have enterprise zone #' \item \strong{d81:} =1 if year == 1981 #' \item \strong{d82:} =1 if year == 1982 #' \item \strong{d83:} =1 if year == 1983 #' \item \strong{d84:} =1 if year == 1984 #' \item \strong{d85:} =1 if year == 1985 #' \item \strong{d86:} =1 if year == 1986 #' \item \strong{d87:} =1 if year == 1987 #' \item \strong{d88:} =1 if year == 1988 #' \item \strong{c1:} =1 if city == 1 #' \item \strong{c2:} =1 if city == 2 #' \item \strong{c3:} =1 if city == 3 #' \item \strong{c4:} #' \item \strong{c5:} #' \item \strong{c6:} #' \item \strong{c7:} #' \item \strong{c8:} #' \item \strong{c9:} #' \item \strong{c10:} #' \item \strong{c11:} #' \item \strong{c12:} #' \item \strong{c13:} #' \item \strong{c14:} #' \item \strong{c15:} #' \item \strong{c16:} #' \item \strong{c17:} #' \item \strong{c18:} #' \item \strong{c19:} #' \item \strong{c20:} #' \item \strong{c21:} #' \item \strong{c22:} =1 if city == 22 #' \item \strong{luclms:} log(uclms) #' \item \strong{guclms:} luclms - luclms[_n-1] #' \item \strong{cez:} ez - ez[_n-1] #' \item \strong{city:} city identifier, 1 through 22 #' } #' @source \url{https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_isbn_issn=9781111531041} #' @examples str(ezunem) "ezunem"
/scratch/gouwar.j/cran-all/cranData/wooldridge/R/ezunem.R
#' fair #' #' Wooldridge Source: R.C. Fair (1996), “Econometrics and Presidential Elections,” Journal of Economic Perspectives 10, 89-102. The data set is provided in the article. Data loads lazily. #' #' @section Notes: An updated version of this data set, through the 2004 election, is available at Professor Fair’s web site at Yale University: http://fairmodel.econ.yale.edu/rayfair/pdf/2001b.htm. Students might want to try their own hands at predicting the most recent election outcome, but they should be restricted to no more than a handful of explanatory variables because of the small sample size. #' #' Used in Text: pages 362-363, 440, 442 #' #' @docType data #' #' @usage data('fair') #' #' @format A data.frame with 21 observations on 28 variables: #' \itemize{ #' \item \strong{year:} 1916 to 1992, by 4 #' \item \strong{V:} prop. dem. vote #' \item \strong{I:} =1 if demwh, -1 if repwh #' \item \strong{DPER:} incumbent running #' \item \strong{DUR:} duration #' \item \strong{g3:} avg ann grwth rte, prev 3 qrts #' \item \strong{p15:} avg ann inf rate, prev 15 qtrs #' \item \strong{n:} quarters of good news #' \item \strong{g2:} avg ann grwth rte, prev 2 qrts #' \item \strong{gYR:} ann grwth rte, prev year #' \item \strong{p8:} avg ann inf rate, prev 8 qtrs #' \item \strong{p2YR:} inf rte over 2 yr period #' \item \strong{Ig2:} I*g2 #' \item \strong{Ip8:} I*p8 #' \item \strong{demwins:} =1 if V > .5 #' \item \strong{In:} I*n #' \item \strong{d:} =1 in 1920, 1944,1948 #' \item \strong{Id:} I*d #' \item \strong{Ig3:} I*g3 #' \item \strong{Ip151md:} I*p15*(1-d) #' \item \strong{In1md:} I*n*(1-d) #' } #' @source \url{https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_isbn_issn=9781111531041} #' @examples str(fair) "fair"
/scratch/gouwar.j/cran-all/cranData/wooldridge/R/fair.R
#' fertil1 #' #' Wooldridge Source: W. Sander, “The Effect of Women’s Schooling on Fertility,” Economics Letters 40, 229-233.Professor Sander kindly provided the data, which are a subset of what he used in his article. He compiled the data from various years of the National Opinion Resource Center’s General Social Survey. Data loads lazily. #' #' @section Notes: (1) Much more recent data can be obtained from the National Opinion Research Center website, http://www.norc.org/GSS+Website/Download/. Very rich pooled cross sections can be constructed to study a variety of issues – not just changes in fertility over time. It would be interesting to analyze a similar data set for a developing country, especially where efforts have been made to emphasize birth control. Some measure of access to birth control could be useful if it varied by region. Sometimes, one can find policy changes in the advertisement or availability of contraceptives. #' #' Used in Text: pages 449-450, 476, 541, 625, 681 #' #' @docType data #' #' @usage data('fertil1') #' #' @format A data.frame with 1129 observations on 27 variables: #' \itemize{ #' \item \strong{year:} 72 to 84, even #' \item \strong{educ:} years of schooling #' \item \strong{meduc:} mother's education #' \item \strong{feduc:} father's education #' \item \strong{age:} in years #' \item \strong{kids:} # children ever born #' \item \strong{black:} = 1 if black #' \item \strong{east:} = 1 if lived in east at 16 #' \item \strong{northcen:} = 1 if lived in nc at 16 #' \item \strong{west:} = 1 if lived in west at 16 #' \item \strong{farm:} = 1 if on farm at 16 #' \item \strong{othrural:} = 1 if other rural at 16 #' \item \strong{town:} = 1 if lived in town at 16 #' \item \strong{smcity:} = 1 if in small city at 16 #' \item \strong{y74:} = 1 if year = 74 #' \item \strong{y76:} #' \item \strong{y78:} #' \item \strong{y80:} #' \item \strong{y82:} #' \item \strong{y84:} #' \item \strong{agesq:} age^2 #' \item \strong{y74educ:} #' \item \strong{y76educ:} #' \item \strong{y78educ:} #' \item \strong{y80educ:} #' \item \strong{y82educ:} #' \item \strong{y84educ:} #' } #' @source \url{https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_isbn_issn=9781111531041} #' @examples str(fertil1) "fertil1"
/scratch/gouwar.j/cran-all/cranData/wooldridge/R/fertil1.R
#' fertil2 #' #' Wooldridge Source: These data were obtained by James Heakins, a former MSU undergraduate, for a term project. They come from Botswana’s 1988 Demographic and Health Survey. Data loads lazily. #' #' @section Notes: Currently, this data set is used only in one computer exercise. Since the dependent variable of interest – number of living children or number of children every born – is a count variable, the Poisson regression model discussed in Chapter 17 can be used. However, some care is required to combine Poisson regression with an endogenous explanatory variable (educ). I refer you to Chapter 19 of my book Econometric Analysis of Cross Section and Panel Data. Even in the context of linear models, much can be done beyond Computer Exercise C15.2. At a minimum, the binary indicators for various religions can be added as controls. One might also interact the schooling variable, educ, with some of the exogenous explanatory variables. #' #' Used in Text: page 547 #' #' @docType data #' #' @usage data('fertil2') #' #' @format A data.frame with 4361 observations on 27 variables: #' \itemize{ #' \item \strong{mnthborn:} month woman born #' \item \strong{yearborn:} year woman born #' \item \strong{age:} age in years #' \item \strong{electric:} =1 if has electricity #' \item \strong{radio:} =1 if has radio #' \item \strong{tv:} =1 if has tv #' \item \strong{bicycle:} =1 if has bicycle #' \item \strong{educ:} years of education #' \item \strong{ceb:} children ever born #' \item \strong{agefbrth:} age at first birth #' \item \strong{children:} number of living children #' \item \strong{knowmeth:} =1 if know about birth control #' \item \strong{usemeth:} =1 if ever use birth control #' \item \strong{monthfm:} month of first marriage #' \item \strong{yearfm:} year of first marriage #' \item \strong{agefm:} age at first marriage #' \item \strong{idlnchld:} 'ideal' number of children #' \item \strong{heduc:} husband's years of education #' \item \strong{agesq:} age^2 #' \item \strong{urban:} =1 if live in urban area #' \item \strong{urb_educ:} urban*educ #' \item \strong{spirit:} =1 if religion == spirit #' \item \strong{protest:} =1 if religion == protestant #' \item \strong{catholic:} =1 if religion == catholic #' \item \strong{frsthalf:} =1 if mnthborn <= 6 #' \item \strong{educ0:} =1 if educ == 0 #' \item \strong{evermarr:} =1 if ever married #' } #' @source \url{https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_isbn_issn=9781111531041} #' @examples str(fertil2) "fertil2"
/scratch/gouwar.j/cran-all/cranData/wooldridge/R/fertil2.R
#' fertil3 #' #' Wooldridge Source: L.A. Whittington, J. Alm, and H.E. Peters (1990), “Fertility and the Personal Exemption: Implicit Pronatalist Policy in the United States,” American Economic Review 80, 545-556. The data are given in the article. Data loads lazily. #' #' @section #' #' Used in Text: pages 358, 377, 378, 397-398, 401, 408, 441, 649, 664-665, 673 #' #' @docType data #' #' @usage data('fertil3') #' #' @format A data.frame with 72 observations on 24 variables: #' \itemize{ #' \item \strong{gfr:} births per 1000 women 15-44 #' \item \strong{pe:} real value pers. exemption, $ #' \item \strong{year:} 1913 to 1984 #' \item \strong{t:} time trend, t=1,...,72 #' \item \strong{tsq:} t^2 #' \item \strong{pe_1:} pe[_n-1] #' \item \strong{pe_2:} pe[_n-2] #' \item \strong{pe_3:} pe[_n-3] #' \item \strong{pe_4:} pe[_n-4] #' \item \strong{pill:} =1 if year >= 1963 #' \item \strong{ww2:} =1, 1941 to 1945 #' \item \strong{tcu:} t^3 #' \item \strong{cgfr:} change in gfr: gfr - gfr_1 #' \item \strong{cpe:} pe - pe_1 #' \item \strong{cpe_1:} cpe[_n-1] #' \item \strong{cpe_2:} cpe[_n-2] #' \item \strong{cpe_3:} cpe[_n-3] #' \item \strong{cpe_4:} cpe[_n-4] #' \item \strong{gfr_1:} gfr[_n-1] #' \item \strong{cgfr_1:} cgfr[_n-1] #' \item \strong{cgfr_2:} cgfr[_n-2] #' \item \strong{cgfr_3:} cgfr[_n-3] #' \item \strong{cgfr_4:} cgfr[_n-4] #' \item \strong{gfr_2:} gfr[_n-2] #' } #' @source \url{https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_isbn_issn=9781111531041} #' @examples str(fertil3) "fertil3"
/scratch/gouwar.j/cran-all/cranData/wooldridge/R/fertil3.R