content
stringlengths
0
14.9M
filename
stringlengths
44
136
poisson.CARar2 <- function(formula, data=NULL, W, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.tau2=NULL, rho.S=NULL, rho.T=NULL, MALA=TRUE, verbose=TRUE) { ############################################## #### Format the arguments and check for errors ############################################## #### Verbose a <- common.verbose(verbose) #### Frame object frame.results <- common.frame(formula, data, "poisson") N.all <- frame.results$n p <- frame.results$p X <- frame.results$X X.standardised <- frame.results$X.standardised X.sd <- frame.results$X.sd X.mean <- frame.results$X.mean X.indicator <- frame.results$X.indicator offset <- frame.results$offset Y <- frame.results$Y which.miss <- frame.results$which.miss n.miss <- frame.results$n.miss #### Determine the number of spatial and temporal units W.quants <- common.Wcheckformat.leroux(W) K <- W.quants$n N <- N.all / K offset.mat <- matrix(offset, nrow=K, ncol=N, byrow=FALSE) #### Check on MALA argument if(length(MALA)!=1) stop("MALA is not length 1.", call.=FALSE) if(!is.logical(MALA)) stop("MALA is not logical.", call.=FALSE) #### Check on the rho arguments if(is.null(rho.S)) { rho <- runif(1) fix.rho.S <- FALSE }else { rho <- rho.S fix.rho.S <- TRUE } if(!is.numeric(rho)) stop("rho.S is fixed but is not numeric.", call.=FALSE) if(rho<0 ) stop("rho.S is outside the range [0, 1].", call.=FALSE) if(rho>1 ) stop("rho.S is outside the range [0, 1].", call.=FALSE) if(is.null(rho.T)) { alpha <- c(runif(1), runif(1)) fix.rho.T <- FALSE }else { alpha <- rho.T fix.rho.T <- TRUE } if(!is.numeric(alpha)) stop("rho.T is fixed but is not numeric.", call.=FALSE) if(length(alpha)!=2) stop("rho.T is fixed but is not of length 2.", call.=FALSE) #### Priors if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p) if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p) if(is.null(prior.tau2)) prior.tau2 <- c(1, 0.01) prior.beta.check(prior.mean.beta, prior.var.beta, p) prior.var.check(prior.tau2) ## Compute the blocking structure for beta block.temp <- common.betablock(p) beta.beg <- block.temp[[1]] beta.fin <- block.temp[[2]] n.beta.block <- block.temp[[3]] list.block <- as.list(rep(NA, n.beta.block*2)) for(r in 1:n.beta.block) { list.block[[r]] <- beta.beg[r]:beta.fin[r]-1 list.block[[r+n.beta.block]] <- length(list.block[[r]]) } #### MCMC quantities - burnin, n.sample, thin common.burnin.nsample.thin.check(burnin, n.sample, thin) ######################## #### Run the MCMC chains ######################## if(n.chains==1) { #### Only 1 chain results <- poisson.CARar2MCMC(Y=Y, offset=offset, X.standardised=X.standardised, W=W, rho=rho, alpha=alpha, fix.rho.S=fix.rho.S, fix.rho.T=fix.rho.T, K=K, N=N, N.all=N.all, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, verbose=verbose, chain=1) }else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores==1) { #### Multiple chains in series results <- as.list(rep(NA, n.chains)) for(i in 1:n.chains) { results[[i]] <- poisson.CARar2MCMC(Y=Y, offset=offset, X.standardised=X.standardised, W=W, rho=rho, alpha=alpha, fix.rho.S=fix.rho.S, fix.rho.T=fix.rho.T, K=K, N=N, N.all=N.all, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, verbose=verbose, chain=i) } }else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores>1 & ceiling(n.cores)==floor(n.cores)) { #### Multiple chains in parallel results <- as.list(rep(NA, n.chains)) if(verbose) { compclust <- makeCluster(n.cores, outfile="CARBayesSTprogress.txt") cat("The current progress of the model fitting algorithm has been output to CARBayesSTprogress.txt in the working directory") }else { compclust <- makeCluster(n.cores) } results <- clusterCall(compclust, fun=poisson.CARar2MCMC, Y=Y, offset=offset, X.standardised=X.standardised, W=W, rho=rho, alpha=alpha, fix.rho.S=fix.rho.S, fix.rho.T=fix.rho.T, K=K, N=N, N.all=N.all, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, verbose=verbose, chain="all") stopCluster(compclust) }else { stop("n.chains or n.cores are not positive integers.", call.=FALSE) } #### end timer if(verbose) { cat("\nSummarising results.\n") }else {} ################################### #### Summarise and save the results ################################### if(n.chains==1) { #### If n.chains==1 ## Compute the acceptance rates accept.final <- rep(NA, 4) names(accept.final) <- c("beta", "phi", "rho.S", "rho.T") accept.final[1] <- 100 * results$accept[1] / results$accept[2] accept.final[2] <- 100 * results$accept[3] / results$accept[4] if(!fix.rho.S) accept.final[3] <- 100 * results$accept[5] / results$accept[6] if(!fix.rho.T) accept.final[4] <- 100 ## Compute the fitted deviance mean.beta <- apply(results$samples.beta,2,mean) regression.mat <- matrix(X.standardised %*% mean.beta, nrow=K, ncol=N, byrow=FALSE) mean.phi <- matrix(apply(results$samples.phi, 2, mean), nrow=K, ncol=N) fitted.mean <- as.numeric(exp(offset.mat + mean.phi + regression.mat)) deviance.fitted <- -2 * sum(dpois(x=as.numeric(Y), lambda=fitted.mean, log=TRUE), na.rm=TRUE) modelfit <- common.modelfit(results$samples.loglike, deviance.fitted) ## Create the fitted values and residuals fitted.values <- apply(results$samples.fitted, 2, mean) response.residuals <- as.numeric(Y) - fitted.values pearson.residuals <- response.residuals /sqrt(fitted.values) residuals <- data.frame(response=response.residuals, pearson=pearson.residuals) ## Transform the parameters back to the origianl covariate scale. samples.beta.orig <- common.betatransform(results$samples.beta, X.indicator, X.mean, X.sd, p, FALSE) ## Create the samples object if(fix.rho.S & fix.rho.T) { samples.rhoext <- NA }else if(fix.rho.S & !fix.rho.T) { samples.rhoext <- results$samples.alpha names(samples.rhoext) <- c("rho1.T", "rho2.T") }else if(!fix.rho.S & fix.rho.T) { samples.rhoext <- results$samples.rho names(samples.rhoext) <- "rho.S" }else { samples.rhoext <- cbind(results$samples.rho, results$samples.alpha) colnames(samples.rhoext) <- c("rho.S", "rho1.T", "rho2.T") } samples <- list(beta=mcmc(samples.beta.orig), phi=mcmc(results$samples.phi), tau2=mcmc(results$samples.tau2), rho=mcmc(samples.rhoext), fitted=mcmc(results$samples.fitted), Y=mcmc(results$samples.Y)) ## Create a summary object n.keep <- floor((n.sample - burnin)/thin) summary.beta <- t(rbind(apply(samples$beta, 2, mean), apply(samples$beta, 2, quantile, c(0.025, 0.975)))) summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.final[names(accept.final)=="beta"],p), effectiveSize(samples$beta), geweke.diag(samples$beta)$z) rownames(summary.beta) <- colnames(X) colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag") summary.tau2 <- c(mean(results$samples.tau2), quantile(results$samples.tau2, c(0.025, 0.975)), n.keep, 100, effectiveSize(samples$tau2), geweke.diag(samples$tau2)$z) summary.rho <- array(NA, c(3,7)) row.names(summary.rho) <- c("rho.S", "rho1.T", "rho2.T") if(!fix.rho.S) { summary.rho[1, 1:3] <- c(mean(results$samples.rho), quantile(results$samples.rho, c(0.025, 0.975))) summary.rho[1, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.S"], effectiveSize(results$samples.rho), geweke.diag(results$samples.rho)$z) }else { summary.rho[1, 1:3] <- c(rho, rho, rho) summary.rho[1, 4:7] <- rep(NA, 4) } if(!fix.rho.T) { summary.rho[2, 1:3] <- c(mean(results$samples.alpha[ ,1]), quantile(results$samples.alpha[ ,1], c(0.025, 0.975))) summary.rho[2, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.T"], effectiveSize(results$samples.alpha[ ,1]), geweke.diag(results$samples.alpha[ ,1])$z) summary.rho[3, 1:3] <- c(mean(results$samples.alpha[ ,2]), quantile(results$samples.alpha[ ,2], c(0.025, 0.975))) summary.rho[3, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.T"], effectiveSize(results$samples.alpha[ ,2]), geweke.diag(results$samples.alpha[ ,2])$z) }else { summary.rho[2, 1:3] <- c(alpha[1], alpha[1], alpha[1]) summary.rho[2, 4:7] <- rep(NA, 4) summary.rho[3, 1:3] <- c(alpha[2], alpha[2], alpha[2]) summary.rho[3, 4:7] <- rep(NA, 4) } summary.results <- rbind(summary.beta, summary.tau2, summary.rho) rownames(summary.results)[(p+1)] <- "tau2" summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4) summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1) }else { #### If n.chains > 1 ## Compute the acceptance rates accept.final <- rep(NA, 4) names(accept.final) <- c("beta", "phi", "rho.S", "rho.T") accept.temp <- lapply(results, function(l) l[["accept"]]) accept.temp2 <- do.call(what=rbind, args=accept.temp) accept.final[1] <- 100 * sum(accept.temp2[ ,1]) / sum(accept.temp2[ ,2]) accept.final[2] <- 100 * sum(accept.temp2[ ,3]) / sum(accept.temp2[ ,4]) if(!fix.rho.S) accept.final[3] <- 100 * sum(accept.temp2[ ,5]) / sum(accept.temp2[ ,6]) if(!fix.rho.T) accept.final[4] <- 100 ## Extract the samples into separate matrix and list objects samples.beta.list <- lapply(results, function(l) l[["samples.beta"]]) samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list) samples.phi.list <- lapply(results, function(l) l[["samples.phi"]]) samples.phi.matrix <- do.call(what=rbind, args=samples.phi.list) if(!fix.rho.S) { samples.rho.list <- lapply(results, function(l) l[["samples.rho"]]) samples.rho.matrix <- do.call(what=rbind, args=samples.rho.list) } if(!fix.rho.T) { samples.alpha.list <- lapply(results, function(l) l[["samples.alpha"]]) samples.alpha.matrix <- do.call(what=rbind, args=samples.alpha.list) } samples.tau2.list <- lapply(results, function(l) l[["samples.tau2"]]) samples.tau2.matrix <- do.call(what=rbind, args=samples.tau2.list) samples.loglike.list <- lapply(results, function(l) l[["samples.loglike"]]) samples.loglike.matrix <- do.call(what=rbind, args=samples.loglike.list) samples.fitted.list <- lapply(results, function(l) l[["samples.fitted"]]) samples.fitted.matrix <- do.call(what=rbind, args=samples.fitted.list) if(n.miss>0) samples.Y.list <- lapply(results, function(l) l[["samples.Y"]]) ## Compute the fitted deviance mean.beta <- apply(samples.beta.matrix,2,mean) regression.mat <- matrix(X.standardised %*% mean.beta, nrow=K, ncol=N, byrow=FALSE) mean.phi <- matrix(apply(samples.phi.matrix, 2, mean), nrow=K, ncol=N) fitted.mean <- as.numeric(exp(offset.mat + mean.phi + regression.mat)) deviance.fitted <- -2 * sum(dpois(x=as.numeric(Y), lambda=fitted.mean, log=TRUE), na.rm=TRUE) modelfit <- common.modelfit(samples.loglike.matrix, deviance.fitted) ## Create the fitted values and residuals fitted.values <- apply(samples.fitted.matrix, 2, mean) response.residuals <- as.numeric(Y) - fitted.values pearson.residuals <- response.residuals /sqrt(fitted.values) residuals <- data.frame(response=response.residuals, pearson=pearson.residuals) ## Transform the parameters back to the original covariate scale. samples.beta.list <- samples.beta.list for(j in 1:n.chains) { samples.beta.list[[j]] <- common.betatransform(samples.beta.list[[j]], X.indicator, X.mean, X.sd, p, FALSE) } samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list) ## Create MCMC objects beta.mcmc <- mcmc.list(lapply(samples.beta.list, mcmc)) phi.mcmc <- mcmc.list(lapply(samples.phi.list, mcmc)) fitted.mcmc <- mcmc.list(lapply(samples.fitted.list, mcmc)) tau2.mcmc <- mcmc.list(lapply(samples.tau2.list, mcmc)) if(n.miss>0) { Y.mcmc <- mcmc.list(lapply(samples.Y.list, mcmc)) }else { Y.mcmc <- NA } if(fix.rho.S & fix.rho.T) { rhoext.mcmc <- NA }else if(fix.rho.S & !fix.rho.T) { for(j in 1:n.chains) { colnames(samples.alpha.list[[j]]) <- c("rho1.T", "rho2.T") } rhoext.mcmc <- mcmc.list(lapply(samples.alpha.list, mcmc)) }else if(!fix.rho.S & fix.rho.T) { for(j in 1:n.chains) { colnames(samples.rho.list[[j]]) <- c("rho.S") } rhoext.mcmc <- mcmc.list(lapply(samples.rho.list, mcmc)) }else { rho.temp <- as.list(rep(NA, n.chains)) for(j in 1:n.chains) { rho.temp[[j]] <- cbind(samples.rho.list[[j]], samples.alpha.list[[j]]) colnames(rho.temp[[j]]) <- c("rho.S", "rho1.T", "rho2.T") } rhoext.mcmc <- mcmc.list(lapply(rho.temp, mcmc)) } samples <- list(beta=beta.mcmc, phi=phi.mcmc, rho=rhoext.mcmc, tau2=tau2.mcmc, fitted=fitted.mcmc, Y=Y.mcmc) ## create a summary object n.keep <- floor((n.sample - burnin)/thin) * n.chains summary.beta <- t(rbind(apply(samples.beta.matrix, 2, mean), apply(samples.beta.matrix, 2, quantile, c(0.025, 0.975)))) summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.final[names(accept.final)=="beta"],p), effectiveSize(beta.mcmc), gelman.diag(beta.mcmc)$psrf[ ,2]) rownames(summary.beta) <- colnames(X) colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "PSRF (upper 95% CI)") summary.tau2 <- c(mean(samples.tau2.matrix), quantile(samples.tau2.matrix, c(0.025, 0.975)), n.keep, 100, effectiveSize(tau2.mcmc), gelman.diag(tau2.mcmc)$psrf[ ,2]) summary.rho <- array(NA, c(3,7)) row.names(summary.rho) <- c("rho.S", "rho1.T", "rho2.T") if(!fix.rho.S) { temp <- mcmc.list(lapply(samples.rho.list, mcmc)) summary.rho[1, 1:3] <- c(mean(samples.rho.matrix), quantile(samples.rho.matrix, c(0.025, 0.975))) summary.rho[1, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.S"], effectiveSize(temp), gelman.diag(temp)$psrf[ ,2]) }else { summary.rho[1, 1:3] <- c(rho, rho, rho) summary.rho[1, 4:7] <- rep(NA, 4) } if(!fix.rho.T) { temp <- mcmc.list(lapply(samples.alpha.list, mcmc)) summary.rho[2, 1:3] <- c(mean(samples.alpha.matrix[ ,1]), quantile(samples.alpha.matrix[ ,1], c(0.025, 0.975))) summary.rho[2, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.T"], effectiveSize(temp[ ,1]), gelman.diag(temp[ ,1])$psrf[ ,2]) summary.rho[3, 1:3] <- c(mean(samples.alpha.matrix[ ,2]), quantile(samples.alpha.matrix[ ,2], c(0.025, 0.975))) summary.rho[3, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.T"], effectiveSize(temp[ ,2]), gelman.diag(temp[ ,2])$psrf[ ,2]) }else { summary.rho[2, 1:3] <- c(alpha[1], alpha[1], alpha[1]) summary.rho[2, 4:7] <- rep(NA, 4) summary.rho[3, 1:3] <- c(alpha[2], alpha[2], alpha[2]) summary.rho[3, 4:7] <- rep(NA, 4) } summary.results <- rbind(summary.beta, summary.tau2, summary.rho) rownames(summary.results)[(p+1)] <- "tau2" summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4) summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1) } ################################### #### Compile and return the results ################################### model.string <- c("Likelihood model - Poisson (log link function)", "\nLatent structure model - Autoregressive order 2 CAR model\n") n.total <- floor((n.sample - burnin) / thin) * n.chains mcmc.info <- c(n.total, n.sample, burnin, thin, n.chains) names(mcmc.info) <- c("Total samples", "n.sample", "burnin", "thin", "n.chains") results.final <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=NULL, formula=formula, model=model.string, mcmc.info=mcmc.info, X=X) class(results.final) <- "CARBayesST" if(verbose) { b<-proc.time() cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n") }else {} return(results.final) }
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/poisson.CARar2.R
poisson.CARar2MCMC <- function(Y, offset, X.standardised, W, rho, alpha, fix.rho.S, fix.rho.T, K, N, N.all, p, which.miss, n.miss, burnin, n.sample, thin, MALA, n.beta.block, list.block, prior.mean.beta, prior.var.beta, prior.tau2, verbose, chain) { #Rcpp::sourceCpp("src/CARBayesST.cpp") #source("R/common.functions.R") #library(spdep) #library(truncnorm) #library(MASS) # # ############################################ #### Set up the key elements before sampling ############################################ #### Generate the initial parameter values mod.glm <- glm(Y~X.standardised-1, offset=offset, family="quasipoisson") beta.mean <- mod.glm$coefficients beta.sd <- sqrt(diag(summary(mod.glm)$cov.scaled)) beta <- rnorm(n=length(beta.mean), mean=beta.mean, sd=beta.sd) log.Y <- log(Y) log.Y[Y==0] <- -0.1 res.temp <- log.Y - X.standardised %*% beta - offset res.sd <- sd(res.temp, na.rm=TRUE)/5 phi <- rnorm(n=N.all, mean=0, sd = res.sd) tau2 <- var(phi)/10 #### Specify matrix quantities Y.DA <- Y offset.mat <- matrix(offset, nrow=K, ncol=N, byrow=FALSE) regression.mat <- matrix(X.standardised %*% beta, nrow=K, ncol=N, byrow=FALSE) phi.mat <- matrix(phi, nrow=K, ncol=N, byrow=FALSE) fitted <- exp(as.numeric(offset.mat + regression.mat + phi.mat)) #### Matrices to store samples n.keep <- floor((n.sample - burnin)/thin) samples.beta <- array(NA, c(n.keep, p)) samples.phi <- array(NA, c(n.keep, N.all)) samples.tau2 <- array(NA, c(n.keep, 1)) if(!fix.rho.S) samples.rho <- array(NA, c(n.keep, 1)) if(!fix.rho.T) samples.alpha <- array(NA, c(n.keep, 2)) samples.fitted <- array(NA, c(n.keep, N.all)) samples.loglike <- array(NA, c(n.keep, N.all)) if(n.miss>0) samples.Y <- array(NA, c(n.keep, n.miss)) #### Specify the Metropolis quantities accept <- rep(0,6) proposal.sd.phi <- 0.1 proposal.sd.rho <- 0.05 proposal.sd.beta <- 0.01 tau2.shape <- prior.tau2[1] + N.all/2 #### CAR quantities W.quants <- common.Wcheckformat.leroux(W) W <- W.quants$W W.triplet <- W.quants$W.triplet W.n.triplet <- W.quants$n.triplet W.triplet.sum <- W.quants$W.triplet.sum n.neighbours <- W.quants$n.neighbours W.begfin <- W.quants$W.begfin #### Create the determinant if(!fix.rho.S) { Wstar <- diag(apply(W,1,sum)) - W Wstar.eigen <- eigen(Wstar) Wstar.val <- Wstar.eigen$values det.Q.W <- 0.5 * sum(log((rho * Wstar.val + (1-rho)))) }else {} #### Check for islands W.list<- mat2listw(W, style = "B") W.nb <- W.list$neighbours W.islands <- n.comp.nb(W.nb) islands <- W.islands$comp.id n.islands <- max(W.islands$nc) if(rho==1 & alpha[1]==2 & alpha[2]==-1) { tau2.shape <- prior.tau2[1] + prior.tau2[1] + ((N-2) * (K-n.islands))/2 }else if(rho==1) { tau2.shape <- prior.tau2[1] + prior.tau2[1] + (N * (K-n.islands))/2 }else if(alpha[1]==2 & alpha[2]==-1) { tau2.shape <- prior.tau2[1] + prior.tau2[1] + ((N-2) * K)/2 }else {} #### Start timer if(verbose) { cat("\nMarkov chain", chain, "- generating", n.keep, "post burnin and thinned samples.\n", sep = " ") progressBar <- txtProgressBar(style = 3) percentage.points<-round((1:100/100)*n.sample) }else { percentage.points<-round((1:100/100)*n.sample) } ############################## #### Generate the MCMC samples ############################## #### Create the MCMC samples for(j in 1:n.sample) { #################################### ## Sample from Y - data augmentation #################################### if(n.miss>0) { Y.DA[which.miss==0] <- rpois(n=n.miss, lambda=fitted[which.miss==0]) }else {} Y.DA.mat <- matrix(Y.DA, nrow=K, ncol=N, byrow=FALSE) #################### ## Sample from beta #################### offset.temp <- as.numeric(offset.mat + phi.mat) if(MALA) { temp <- poissonbetaupdateMALA(X.standardised, N.all, p, beta, offset.temp, Y.DA, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block) }else { temp <- poissonbetaupdateRW(X.standardised, N.all, p, beta, offset.temp, Y.DA, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block) } beta <- temp[[1]] accept[1] <- accept[1] + temp[[2]] accept[2] <- accept[2] + n.beta.block regression.mat <- matrix(X.standardised %*% beta, nrow=K, ncol=N, byrow=FALSE) #################### ## Sample from phi #################### phi.offset <- offset.mat + regression.mat den.offset <- rho * W.triplet.sum + 1 - rho temp1 <- poissonar2carupdateRW(W.triplet, W.begfin, W.triplet.sum, K, N, phi.mat, tau2, alpha[1], alpha[2], rho, Y.DA.mat, proposal.sd.phi, phi.offset, den.offset) phi.temp <- temp1[[1]] phi <- as.numeric(phi.temp) - mean(as.numeric(phi.temp)) phi.mat <- matrix(phi, nrow=K, ncol=N, byrow=FALSE) accept[3] <- accept[3] + temp1[[2]] accept[4] <- accept[4] + K*N #################### ## Sample from alpha #################### if(!fix.rho.T) { #### Construct the quadratic forms temp2 <- alphaquadformcompute(W.triplet, W.triplet.sum, W.n.triplet, K, N, phi.mat, rho, tau2) #### Construct the precision matrix alpha.prec <- array(c(temp2[[1]], temp2[[3]], temp2[[3]], temp2[[2]]), c(2,2)) alpha.var <- solve(alpha.prec) #### Construct the mean vector U2 <- (temp2[[1]] * temp2[[5]] - temp2[[3]] * temp2[[4]]) / (temp2[[2]] * temp2[[1]] - temp2[[3]]^2) U1 <- (1 / temp2[[3]]) * (temp2[[5]] - temp2[[2]] * U2) alpha.mean <- c(U1, U2) alpha <- mvrnorm(n=1, mu=alpha.mean, Sigma=alpha.var) }else {} #################### ## Samples from tau2 #################### temp3 <- tauquadformcomputear2(W.triplet, W.triplet.sum, W.n.triplet, K, N, phi.mat, rho, alpha[1], alpha[2]) tau2.scale <- temp3 + prior.tau2[2] tau2 <- 1 / rgamma(1, tau2.shape, scale=(1/tau2.scale)) ################## ## Sample from rho ################## if(!fix.rho.S) { proposal.rho <- rtruncnorm(n=1, a=0, b=1, mean=rho, sd=proposal.sd.rho) temp4 <- tauquadformcomputear2(W.triplet, W.triplet.sum, W.n.triplet, K, N, phi.mat, proposal.rho, alpha[1], alpha[2]) det.Q.W.proposal <- 0.5 * sum(log((proposal.rho * Wstar.val + (1-proposal.rho)))) logprob.current <- N * det.Q.W - temp3 / tau2 logprob.proposal <- N * det.Q.W.proposal - temp4 / tau2 hastings <- log(dtruncnorm(x=rho, a=0, b=1, mean=proposal.rho, sd=proposal.sd.rho)) - log(dtruncnorm(x=proposal.rho, a=0, b=1, mean=rho, sd=proposal.sd.rho)) prob <- exp(logprob.proposal - logprob.current + hastings) if(prob > runif(1)) { rho <- proposal.rho det.Q.W <- det.Q.W.proposal accept[5] <- accept[5] + 1 }else { } accept[6] <- accept[6] + 1 }else {} ######################### ## Calculate the deviance ######################### lp <- as.numeric(offset.mat + regression.mat + phi.mat) fitted <- exp(lp) loglike <- dpois(x=as.numeric(Y), lambda=fitted, log=TRUE) ################### ## Save the results ################### if(j > burnin & (j-burnin)%%thin==0) { ele <- (j - burnin) / thin samples.beta[ele, ] <- beta samples.phi[ele, ] <- as.numeric(phi) samples.tau2[ele, ] <- tau2 if(!fix.rho.S) samples.rho[ele, ] <- rho if(!fix.rho.T) samples.alpha[ele, ] <- alpha samples.fitted[ele, ] <- fitted samples.loglike[ele, ] <- loglike if(n.miss>0) samples.Y[ele, ] <- Y.DA[which.miss==0] }else {} ######################################## ## Self tune the acceptance probabilties ######################################## if(ceiling(j/100)==floor(j/100) & j < burnin) { #### Update the proposal sds if(p>2) { proposal.sd.beta <- common.accceptrates1(accept[1:2], proposal.sd.beta, 40, 50) }else { proposal.sd.beta <- common.accceptrates1(accept[1:2], proposal.sd.beta, 30, 40) } proposal.sd.phi <- common.accceptrates1(accept[3:4], proposal.sd.phi, 40, 50) if(!fix.rho.S) proposal.sd.rho <- common.accceptrates2(accept[5:6], proposal.sd.rho, 40, 50, 0.5) accept <- rep(0,6) }else {} ################################ ## print progress to the console ################################ if(j %in% percentage.points & verbose) { setTxtProgressBar(progressBar, j/n.sample) } } ############################################ #### Return the results to the main function ############################################ #### Compile the results if(n.miss==0) samples.Y <- NA if(fix.rho.S) samples.rho <- NA if(fix.rho.T) samples.alpha <- NA chain.results <- list(samples.beta=samples.beta, samples.phi=samples.phi, samples.tau2=samples.tau2, samples.rho=samples.rho, samples.alpha=samples.alpha, samples.loglike=samples.loglike, samples.fitted=samples.fitted, samples.Y=samples.Y, accept=accept) #### Return the results return(chain.results) }
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/poisson.CARar2MCMC.R
#-------------------------------------------------------------------------------------------------------------------------------------------------------- # Bayesian hierarchical mixed-effects model for clustering areas based on disease risk trends (Poisson version) #-------------------------------------------------------------------------------------------------------------------------------------------------------- poisson.CARclustrends <- function(formula, data=NULL, W, burnin, n.sample, thin=1, trends=NULL, changepoint=NULL, knots=NULL, prior.mean.beta=NULL, prior.var.beta=NULL, prior.mean.gamma=NULL, prior.var.gamma=NULL, prior.lambda=NULL, prior.tau2=NULL, Nchains=4, verbose=TRUE) { #------------------------------------------------------------------------------------------------------------------------------------------------------- # Check on the verbose option #------------------------------------------------------------------------------------------------------------------------------------------------------- a <- common.verbose(verbose) #------------------------------------------------------------------------------------------------------------------------------------------------------- # Check trends vector #------------------------------------------------------------------------------------------------------------------------------------------------------- All.Trends <- c("Constant", "LD", "LI", "CP", "CT", "MD", "MI") Total.trends <- length(All.Trends) - 2 # minus 2 as can't include both LD/MD or LI/MI #------------------------------------------------------------------------------------------------------------------------------------------------------- # Check that a trend vector has been given #------------------------------------------------------------------------------------------------------------------------------------------------------- if(is.null(trends)) stop("At least two trends, with one being the constant trend, have to be given.", call.=FALSE) trends <- unique(trends) #------------------------------------------------------------------------------------------------------------------------------------------------------- # Check that the constant trend is selected #------------------------------------------------------------------------------------------------------------------------------------------------------- if((All.Trends[1] %in% trends) & length(trends) == 1 | !(All.Trends[1] %in% trends)) { stop("The constant trend has to be selected alongside at least one other trend.", call.=FALSE) } #------------------------------------------------------------------------------------------------------------------------------------------------------- # Check to see if correct trends inputted #------------------------------------------------------------------------------------------------------------------------------------------------------- if(!all(trends %in% All.Trends)) stop("Incorrect trend selected.", call.=FALSE) #------------------------------------------------------------------------------------------------------------------------------------------------------- # Check that LI and MI are both not included #------------------------------------------------------------------------------------------------------------------------------------------------------- if(all(c("LI", "MI") %in% trends)) stop("Select only one of LI or MI as the increasing trend.", call.=FALSE) #------------------------------------------------------------------------------------------------------------------------------------------------------- # Check that LD and MD are both not included #------------------------------------------------------------------------------------------------------------------------------------------------------- if(all(c("LD", "MD") %in% trends)) stop("Select only one of LD or MD as the decreasing trend.", call.=FALSE) #------------------------------------------------------------------------------------------------------------------------------------------------------- # Check that the changepoint is included and within the given time period #------------------------------------------------------------------------------------------------------------------------------------------------------- if(any(c("CP", "CT") %in% trends) & is.null(changepoint)) stop("A changepoint needs to be included for the changepoint trends (CP, CT).", call.=FALSE) if(any(c("CP", "CT") %in% trends) & length(changepoint) != 1) stop("The changepoint should be a scalar.", call.=FALSE) if(any(c("CP", "CT") %in% trends) & !is.null(changepoint)) { if(changepoint < 1) stop("The changepoint should be positive.", call.=FALSE) } #------------------------------------------------------------------------------------------------------------------------------------------------------- # Check the number of knots for the monotonic trends #------------------------------------------------------------------------------------------------------------------------------------------------------- if(any(c("MD", "MI") %in% trends) & is.null(knots)) stop("The number of knots has to be chosen for the monotonic trends (MD, MI).", call.=FALSE) if(any(c("MD", "MI") %in% trends) & length(knots) != 1) stop("The number of knots should be a scalar.", call.=FALSE) if(any(c("MD", "MI") %in% trends) & !is.null(knots)) { if(knots < 1) stop("The number of knots should be positive.", call.=FALSE) } #------------------------------------------------------------------------------------------------------------------------------------------------------- # The constant trend does not need to be included within the trends vector #------------------------------------------------------------------------------------------------------------------------------------------------------- N.trends <- length(trends) #------------------------------------------------------------------------------------------------------------------------------------------------------- # Set number of knots to 0 if monotonic trends not included #------------------------------------------------------------------------------------------------------------------------------------------------------- if(!any(c("MD", "MI") %in% trends)) knots <- 0 #------------------------------------------------------------------------------------------------------------------------------------------------------- # Track positions of each of the possible trends #------------------------------------------------------------------------------------------------------------------------------------------------------- Trends.pos <- c("Constant", "LD", "LI", rep("CP", 2), rep("CT", 2), rep("MD", knots + 1), rep("MI", knots + 1)) Trends.pos.numeric <- c(1, 2, 3, rep(4, 2), rep(5, 2), rep(6, knots + 1), rep(7, knots + 1)) Trends.pos.num <- length(Trends.pos) #------------------------------------------------------------------------------------------------------------------------------------------------------- # Track positions of the chosen trends #------------------------------------------------------------------------------------------------------------------------------------------------------- Trends.id <- which(Trends.pos %in% trends) Trends.sel <- length(Trends.pos[Trends.id]) Trends.id <- Trends.id[Trends.id != 1] #------------------------------------------------------------------------------------------------------------------------------------------------------- # Vector for the number of gamma parameters associated with each of the trends #------------------------------------------------------------------------------------------------------------------------------------------------------- params.trends <- c(0, 1, 1, rep(1, 2), rep(1, 2), rep(1, knots + 1), rep(1, knots + 1)) Total.params.trends <- sum(params.trends) params.selected <- sum(params.trends[Trends.id]) #------------------------------------------------------------------------------------------------------------------------------------------------------- # Matrix containing tracking positions of associated gamma parameters #------------------------------------------------------------------------------------------------------------------------------------------------------- Trend.pairs <- matrix(c(1, 0, 2, 0, 3, 0, 4, 5, 6, 7), ncol = 2, byrow = TRUE) rownames(Trend.pairs) <- c("Constant", "LD", "LI", "CP", "CT") #------------------------------------------------------------------------------------------------------------------------------------------------------- # Include corresponding information for the monotonic trends #------------------------------------------------------------------------------------------------------------------------------------------------------- col.knots1 <- seq(from = max(Trend.pairs) + 1, to = max(Trend.pairs) + (2 * (knots + 1)), by = 1) col.knots2 <- c(0, rep(-1, knots), 0, rep(-1, knots)) col.knots2[which(col.knots2 == 0)] <- col.knots1[which(col.knots2 == 0)] row.knots <- matrix(c(col.knots1, col.knots2), ncol = 2) rownames(row.knots) <- c(rep("MD", knots + 1), rep("MI", knots + 1)) row.knots <- row.knots[which(rownames(row.knots) %in% trends), ] Trend.pairs <- rbind(Trend.pairs, row.knots) Trend.pairs <- Trend.pairs[which(rownames(Trend.pairs) %in% trends), ] n.sel <- nrow(Trend.pairs) #------------------------------------------------------------------------------------------------------------------------------------------------------- # Update tracking positions for the selected gamma parameters #------------------------------------------------------------------------------------------------------------------------------------------------------- Trend.pairs.update <- Trend.pairs CP.check <- 1 for(i in 1:n.sel) { if(Trend.pairs[i, 2] == 0) { Trend.pairs.update[i, 1] <- i }else if(Trend.pairs[i, 2] > 0) { if(rownames(Trend.pairs)[i] %in% c("CP", "CT")) { Trend.pairs.update[i, 1] <- Trend.pairs.update[(i-1), 1] + CP.check Trend.pairs.update[i, 2] <- Trend.pairs.update[i, 1] + 1 CP.check <- CP.check + 1 }else if(rownames(Trend.pairs)[i] %in% c("MD", "MI")) { if(Trend.pairs.update[(i-1), 2] > 0) { Mono.check <- 2 }else { Mono.check <- 1 } Trend.pairs.update[i, ] <- Trend.pairs.update[(i-1), 1] + Mono.check } }else if(Trend.pairs[i, 2] < 0) { Trend.pairs.update[i, 1] <- Trend.pairs.update[(i-1), 1] + 1 } } #------------------------------------------------------------------------------------------------------------------------------------------------------- # Track positions of the gamma parameters selected by the given trends #------------------------------------------------------------------------------------------------------------------------------------------------------- Trend.names <- rownames(Trend.pairs.update) gamma.pos <- rep(0, Trends.pos.num) pos.gamma <- unique(Trend.pairs.update[which(Trend.pairs %in% Trends.id)]) gamma.pos[Trends.id] <- pos.gamma[order(pos.gamma)] #------------------------------------------------------------------------------------------------------------------------------------------------------- # Check the number of MCMC chains is >= 2 #------------------------------------------------------------------------------------------------------------------------------------------------------- if(Nchains <= 1) stop("the number of chains has to be greater than or equal 2.", call.=FALSE) if(Nchains %% 1 != 0) stop("the number of chains needs to be an integer.", call.=FALSE) #------------------------------------------------------------------------------------------------------------------------------------------------------- # Format the arguments and check for errors #------------------------------------------------------------------------------------------------------------------------------------------------------- frame.results <- common.frame(formula, data, "poisson") N.all <- frame.results$n p <- frame.results$p X <- frame.results$X X.standardised <- frame.results$X.standardised X.sd <- frame.results$X.sd X.mean <- frame.results$X.mean X.indicator <- frame.results$X.indicator offset <- frame.results$offset Y <- frame.results$Y if(p>1) stop("No covariates are allowed in this model due to identifiability issues.", call.=FALSE) #------------------------------------------------------------------------------------------------------------------------------------------------------- # Check that the changepoint is included and within time period #------------------------------------------------------------------------------------------------------------------------------------------------------- if(any(c("CP", "CT") %in% trends) & !is.null(changepoint)) { if(!(changepoint >= 1 & changepoint <= N)) stop("The changepoint needs to be within the time period.", call.=FALSE) } #------------------------------------------------------------------------------------------------------------------------------------------------------- # Check that the number of knots is not greater than the number of time points #------------------------------------------------------------------------------------------------------------------------------------------------------- if(any(c("MD", "MI") %in% trends) & !is.null(knots)) { if(knots > N) stop("The number of knots cannot be greater than the number of time points.", call.=FALSE) } #------------------------------------------------------------------------------------------------------------------------------------------------------ # Spatial quantities #------------------------------------------------------------------------------------------------------------------------------------------------------ W.quants <- common.Wcheckformat.leroux(W) K <- W.quants$n N <- N.all / K W <- W.quants$W W.triplet <- W.quants$W.triplet W.n.triplet <- W.quants$n.triplet W.triplet.sum <- W.quants$W.triplet.sum W.neighbours <- W.quants$n.neighbours W.begfin <- W.quants$W.begfin #------------------------------------------------------------------------------------------------------------------------------------------------------- # Check and specify the priors #------------------------------------------------------------------------------------------------------------------------------------------------------- if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p) if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p) if(is.null(prior.mean.gamma)) prior.mean.gamma <- rep(0, params.selected) if(is.null(prior.var.gamma)) prior.var.gamma <- rep(100000, params.selected) prior.mean.trends <- rep(0, Trends.pos.num) prior.mean.trends[Trends.id] <- prior.mean.gamma prior.var.trends <- rep(1000, Trends.pos.num) prior.var.trends[Trends.id] <- prior.var.gamma if(is.null(prior.lambda)) prior.lambda <- rep(1, N.trends) if(is.null(prior.tau2)) prior.tau2 <- c(1, 0.01) prior.beta.check(prior.mean.beta, prior.var.beta, p) prior.var.check(prior.tau2) if(length(prior.mean.gamma) != params.selected) stop("the prior mean for gamma is the wrong length.", call.=FALSE) if(!is.numeric(prior.mean.gamma)) stop("the prior mean for gamma is not numeric.", call.=FALSE) if(sum(is.na(prior.mean.gamma)) != 0) stop("the prior mean for gamma is missing.", call.=FALSE) if(prior.mean.trends[2] > 0) stop("the prior mean for the LD trend should be non-positive.", call.=FALSE) if(prior.mean.trends[3] < 0) stop("the prior mean for the LI trend should be non-negative.", call.=FALSE) if(prior.mean.trends[4] < 0) stop("the prior mean for the increase in CP trend should be non-negative.", call.=FALSE) if(prior.mean.trends[5] > 0) stop("the prior mean for the decrease in CP trend should be non-positive.", call.=FALSE) if(prior.mean.trends[6] > 0) stop("the prior mean for the decrease in CT trend should be non-positive.", call.=FALSE) if(prior.mean.trends[7] < 0) stop("the prior mean for the increase in CT trend should be non-negative.", call.=FALSE) if(any(prior.mean.trends[8:(8 + knots)] > 0)) stop("the prior mean for the MD trend should be non-positive.", call.=FALSE) if(any(prior.mean.trends[(8 + knots + 1):(8 + knots + 1) + knots] < 0)) stop("the prior mean for the MI trend should be non-negative.", call.=FALSE) if(length(prior.var.gamma)!= params.selected) stop("the prior variance for gamma is the wrong length.", call.=FALSE) if(!is.numeric(prior.var.gamma)) stop("the prior variance for gamma is not numeric.", call.=FALSE) if(sum(is.na(prior.var.gamma))!= 0) stop("the prior variance for gamma is missing.", call.=FALSE) if(min(prior.var.gamma) <= 0) stop("the prior variance for gamma is less than zero", call.=FALSE) if(length(prior.lambda) != N.trends) stop("the prior value for lambda is the wrong length.", call.=FALSE) if(!is.numeric(prior.lambda)) stop("the prior value for lambda is not numeric.", call.=FALSE) if(sum(is.na(prior.lambda)) != 0) stop("the prior value for lambda has missing values.", call.=FALSE) #------------------------------------------------------------------------------------------------------------------------------------------------------- # Specify the initial parameter values #------------------------------------------------------------------------------------------------------------------------------------------------------- beta <- glm(Y~X.standardised-1, offset=offset, family=poisson)$coefficients beta <- matrix(beta, nrow = p, ncol = Nchains) proposal.corr.beta <- solve(t(X.standardised) %*% X.standardised) chol.proposal.corr.beta <- chol(proposal.corr.beta) #------------------------------------------------------------------------------------------------------------------------------------------------------- # Different initial beta values for each chain #------------------------------------------------------------------------------------------------------------------------------------------------------- proposal.sd.beta <- rep(0.1, Nchains) beta <- matcomp(chol.proposal.corr.beta, beta, proposal.sd.beta, p, Nchains) gamma <- array(0, c(Trends.sel, Nchains)) for (i in Trends.id) { if(i == 2 | i == 5 | i == 6 | (i %in% 8:(8 + knots))) { gamma[gamma.pos[i], ] <- rtruncnorm(n=Nchains, b = 0, mean = 0, sd = 0.1) }else if (i == 3 | i == 4 | i == 7 | (i %in% (8 + knots + 1):(8 + knots + 1 + knots))) { gamma[gamma.pos[i], ] <- rtruncnorm(n=Nchains, a = 0, mean = 0, sd = 0.1) } } gamma.mat <- array(0, c(N.all, Trends.sel, Nchains)) for (i in Trends.id) { gamma.mat[,gamma.pos[i],] <- matN(gamma[gamma.pos[i], ], N.all, Nchains) } tau2 <- runif(Nchains, 0, 1) rho <- runif(Nchains, 0, 1) lambda <- t(rdirichlet(Nchains, prior.lambda)) w <- array(NA, c(K, N.trends, Nchains)) phi <- array(NA, c(K, Nchains)) for (i in 1:Nchains) { w[, , i] <- t(rmultinom(K, 1, lambda[, i])) phi[, i] <- rnorm(K, mean = 0, sd = 0.01) } kronN <- rep(1, N) phimat <- kronecker(kronN, phi) wmat <- kronecker(kronN, w) w.chain.mat <- matrix(aperm(w, c(1, 3, 2)), nrow = K * Nchains, ncol = N.trends) #------------------------------------------------------------------------------------------------------------------------------------------------------- # Compute the blocking structure for covariate beta's #------------------------------------------------------------------------------------------------------------------------------------------------------- block.temp <- common.betablock(p) beta.beg <- block.temp[[1]] beta.fin <- block.temp[[2]] n.beta.block <- block.temp[[3]] #------------------------------------------------------------------------------------------------------------------------------------------------------ # MCMC quantities - burnin, n.sample, thin #------------------------------------------------------------------------------------------------------------------------------------------------------ common.burnin.nsample.thin.check(burnin, n.sample, thin) #------------------------------------------------------------------------------------------------------------------------------------------------------- # Set up matrices to store samples #------------------------------------------------------------------------------------------------------------------------------------------------------- n.keep <- floor((n.sample - burnin)/thin) samples.beta <- array(NA, c(n.keep, p, Nchains)) samples.gamma <- array(NA, c(Trends.sel, n.keep, 1, Nchains)) samples.w <- array(NA, c(n.keep, K, N.trends, Nchains)) samples.lambda <- array(NA, c(n.keep, N.trends, Nchains)) samples.tau2 <- array(NA, c(n.keep, 1, Nchains)) samples.rho <- array(NA, c(n.keep, 1, Nchains)) samples.phi <- array(NA, c(n.keep, K, Nchains)) samples.fitted <- array(NA, c(n.keep, N.all, Nchains)) samples.like <- array(NA, c(n.keep, N.all, Nchains)) samples.deviance <- array(NA, c(n.keep, 1, Nchains)) #------------------------------------------------------------------------------------------------------------------------------------------------------- # Specify the Metropolis quantities #------------------------------------------------------------------------------------------------------------------------------------------------------- accept.all <- rep(0, 2 * (Trends.sel + 1) * Nchains) accept <- accept.all begin.accept <- seq(from = 1, to = length(accept), by = 2) end.accept <- begin.accept + 1 accept.blocks.num <- array(begin.accept, c(Nchains, 2)) accept.blocks.den <- array(end.accept, c(Nchains, 2)) accept.weight <- matrix(0, nrow = K, ncol = 2 * Nchains) accept.w.all <- matrix(0, nrow = K, ncol = 2 * Nchains) accept.phis <- matrix(0, nrow = K, ncol = 2 * Nchains) accept.phis.all <- matrix(0, nrow = K, ncol = 2 * Nchains) accept.gammas <- matrix(0, nrow = Trends.sel, ncol = 2 * Nchains) accept.gammas.all <- matrix(0, nrow = Trends.sel, ncol = 2 * Nchains) accept.couple <- rep(0, 2) couples <- accept.couple tau2.shape <- prior.tau2[1] + K/2 #------------------------------------------------------------------------------------------------------------------------------------------------------- # Create the determinant #------------------------------------------------------------------------------------------------------------------------------------------------------- Wstar <- diag(apply(W, 1, sum)) - W Wstar.eigen <- eigen(Wstar) Wstar.val <- Wstar.eigen$values det.Q.W <- Qdet(Nchains, rho, Wstar.val) #------------------------------------------------------------------------------------------------------------------------------------------------------- # Specify quantities that do not change #------------------------------------------------------------------------------------------------------------------------------------------------------- Y.mat <- matrix(Y, nrow=K, ncol=N, byrow=FALSE) offset.mat <- matrix(offset, nrow=N.all, ncol=Nchains) tp <- rep(1:N, each=K) tp.mat <- matrix(tp, nrow=K, ncol=N) tp.mat.trends <- array(tp.mat, c(K, N, Trends.sel)) tp.mat.trends <- aperm(tp.mat.trends, c(1, 3, 2)) tpmat <- array(tp, c(N.all, Trends.sel, Nchains)) #------------------------------------------------------------------------------------------------------------------------------------------------------- # Update the matrix corresponding to time given trends CP/CT or MD/MI #------------------------------------------------------------------------------------------------------------------------------------------------------- any.CP <- any(Trend.pairs[, 2] != 0) track.pos <- which(Trend.pairs[, 2] != 0) neg.pos <- which(Trend.pairs.update < 0, arr.ind = TRUE) Trend.pairs.update[neg.pos[, 1], 2] <- Trend.pairs.update[neg.pos[, 1], 1] track.add <- which(Trend.pairs[, 2] > 0) if(any(names(track.pos) %in% c("MD", "MI"))) { track.0 <- track.pos[which(names(track.pos) %in% c("MD", "MI"))] track.0 <- track.0[which(track.0 %in% track.add)] track.add <- track.pos[-which(track.pos %in% track.0)] track.add <- Trend.pairs.update[track.add, 2] }else { track.add <- Trend.pairs.update[track.add, 2] } if(any.CP) { tp.pos <- Trend.pairs.update[track.pos, 2] if(any(names(tp.pos) %in% c("CP", "CT"))) { tp.CP <- tp.pos[names(tp.pos) %in% c("CP", "CT")] tpmat[, tp.CP, ] <- tpmat[, tp.CP, ] - changepoint tpmat[tpmat < 0] <- 0 tp.mat.trends[, tp.CP, ] <- tp.mat.trends[, tp.CP, ] - changepoint tp.mat.trends[tp.mat.trends < 0] <- 0 } if(any(names(tp.pos) %in% c("MD", "MI"))) { tp.CP <- tp.pos[names(tp.pos) %in% c("MD", "MI")] k.space <- seq(from = 1, to = N, length = knots + 2) k.space <- round(k.space[-c(1, (knots+2))], digits = 1) if(all(c("MD", "MI") %in% names(tp.pos))) { kmat.col <- 2 * knots }else { kmat.col <- knots } kmat <- matrix(k.space, nrow = N.all, ncol = kmat.col, byrow = TRUE) kmat.Nchains <- array(kmat, dim = c(N.all, kmat.col, Nchains)) kmat.N <- matrix(k.space, nrow = K, ncol = kmat.col, byrow = TRUE) kmat.N <- array(kmat.N, dim = c(K, kmat.col, N)) if(all(c("MD", "MI") %in% trends)) { tp.pos.0 <- rep(NA, 2) tp.pos.0[1] <- which(names(tp.CP) == "MD")[1] tp.pos.0[2] <- which(names(tp.CP) == "MI")[1] } else if("MD" %in% trends & !("MI" %in% trends)) { tp.pos.0 <- which(names(tp.CP) == "MD")[1] } else if("MI" %in% trends & !("MD" %in% trends)) { tp.pos.0 <- which(names(tp.CP) == "MI")[1] } tp.pos.row <- tp.CP[tp.pos.0] tpmat[, tp.pos.row, ] <- tpmat[, tp.pos.row, ] / N tpmat[, tp.CP[-tp.pos.0], ] <- ((tpmat[, tp.CP[-tp.pos.0], ] - kmat.Nchains)^3) / N^3 tpmat[tpmat < 0] <- 0 kmax <- apply(tpmat[, tp.CP[-tp.pos.0], ], 2, max) kmax.mat <- matrix(kmax, nrow = N.all, ncol = kmat.col, byrow = TRUE) kmax.Nchains <- array(kmax.mat, dim = c(N.all, kmat.col, Nchains)) kmax.N <- matrix(kmax, nrow = K, ncol = kmat.col, byrow = TRUE) kmax.N <- array(kmax.N, dim = c(K, kmat.col, N)) tpmat[, tp.CP[-tp.pos.0], ] <- tpmat[, tp.CP[-tp.pos.0], ] / kmax.Nchains tp.mat.trends[, tp.pos.row, ] <- tp.mat.trends[, tp.pos.row, ] / N tp.mat.trends[, tp.CP[-tp.pos.0], ] <- ((tp.mat.trends[, tp.CP[-tp.pos.0], ] - kmat.N)^3) / N^3 tp.mat.trends[tp.mat.trends < 0] <- 0 tp.mat.trends[, tp.CP[-tp.pos.0], ] <- tp.mat.trends[, tp.CP[-tp.pos.0], ] / kmax.N } } #------------------------------------------------------------------------------------------------------------------------------------------------------- # Keep track of the additional positions of the selected gamma parameters of the CP/CT and MD/MI trends #------------------------------------------------------------------------------------------------------------------------------------------------------- Trends.chosen.names <- c("Constant", unique(Trends.pos[Trends.id])) New.trend.pos <- rep(NA, length(track.add)) if(length(track.add) != 0) { for(i in 1:length(track.add)) { New.trend.pos[i] <- which(Trends.chosen.names %in% names(track.add)[i]) } } #------------------------------------------------------------------------------------------------------------------------------------------------------- # tempering temperatures #------------------------------------------------------------------------------------------------------------------------------------------------------- d.t <- Nchains / (Nchains + 4) temps <- tempupdate(Nchains, d.t) #------------------------------------------------------------------------------------------------------------------------------------------------------- # proposal standard deviations for M-H moves #------------------------------------------------------------------------------------------------------------------------------------------------------- proposal.sd.gamma <- matrix(0.1, nrow = Trends.sel, ncol = Nchains) proposal.sd.phi <- matrix(0.1, nrow = K, ncol = Nchains) proposal.sd.rho <- rep(0.01, Nchains) max.proposal.sd.rho <- 0.1 min.proposal.sd.rho <- 0.001 #------------------------------------------------------------------------------------------------------------------------------------------------------- # begin/end of chains for use in c++ functions due to using arrays #------------------------------------------------------------------------------------------------------------------------------------------------------- begin.chain <- seq(from = 1, to = K * Nchains, by = K) begin.chainN <- seq(from = 1, to = N.all * Nchains, by = N.all) beg.reg.chain <- seq(from = 1, to = N.all, by = K) log1 <- log(1) N.all.trends <- N.all * Trends.sel if(any.CP) { wmat.extend <- array(0, c(N.all, Trends.sel, Nchains)) wmat.extend[, -track.add, ] <- wmat wmat.extend[, track.add, ] <- wmat[, New.trend.pos, ] }else { wmat.extend <- wmat } beg.trend <- seq(from = 1, to = N.all.trends, by = N.all) wmat.ar <- matrix(wmat.extend, nrow = N.all.trends, ncol = Nchains) gamma.mat.ar <- matrix(gamma.mat, nrow = N.all.trends, ncol = Nchains) tpmat.ar <- matrix(tpmat, nrow = N.all.trends, ncol = Nchains) trends.part <- offsetcompute(wmat.ar, gamma.mat.ar, tpmat.ar, Nchains, N.all, Trends.sel, beg.trend) #------------------------------------------------------------------------------------------------------------------------------------------------------- # Run the Bayesian model #------------------------------------------------------------------------------------------------------------------------------------------------------- # Start timer #------------------------------------------------------------------------------------------------------------------------------------------------------- if(verbose) { cat("Generating", n.keep, "post burnin and thinned (if requested) samples\n", sep = " ") progressBar <- txtProgressBar(style = 3) percentage.points<-round((1:100/100)*n.sample) }else { percentage.points<-round((1:100/100)*n.sample) } for(j in 1:n.sample) { #------------------------------------------------------------------------------------------------------------------------------------------------------- # Sample from beta #------------------------------------------------------------------------------------------------------------------------------------------------------- proposal <- matcomp(chol.proposal.corr.beta, beta, proposal.sd.beta, p, Nchains) proposal.beta <- beta offset.temp <- offset.mat + trends.part + phimat for(r in 1:n.beta.block) { proposal.beta[beta.beg[r]:beta.fin[r], ] <- proposal[beta.beg[r]:beta.fin[r], ] beta.linpred <- linpredcomputeNchains(X.standardised, N.all, p, beta, Nchains) proposal.linpred <- linpredcomputeNchains(X.standardised, N.all, p, proposal.beta, Nchains) prob <- poissonbetablockupdate(N.all, beta, proposal.beta, beta.linpred, proposal.linpred, offset.temp, Y, prior.mean.beta, prior.var.beta, Nchains, temps, p) accept.beta.chain <- prob > runif(Nchains) beta[beta.beg[r]:beta.fin[r], accept.beta.chain] <- proposal.beta[beta.beg[r]:beta.fin[r], accept.beta.chain] accept[accept.blocks.num[, 1]] <- accept[accept.blocks.num[, 1]] + as.numeric(accept.beta.chain) proposal.beta[beta.beg[r]:beta.fin[r], !accept.beta.chain] <- beta[beta.beg[r]:beta.fin[r], !accept.beta.chain] } accept[accept.blocks.den[, 1]] <- accept[accept.blocks.den[, 1]] + n.beta.block regression.mat <- linpredcomputeNchains(X.standardised, N.all, p, beta, Nchains) #------------------------------------------------------------------------------------------------------------------------------------------------------- # Sample trend gamma's #------------------------------------------------------------------------------------------------------------------------------------------------------- W.areas <- apply(w, c(2, 3), sum) offset.temp <- offset.mat + regression.mat + trends.part + phimat for (i in Trends.id) { gamma.proposal <- gammaproposal(Nchains, gamma[gamma.pos[i], ], proposal.sd.gamma[gamma.pos[i], ], prior.var.trends[i], W.areas[which(Trends.chosen.names %in% Trends.pos[i])[1], ], i, knots) gamma.mat.proposal <- gamma.mat gamma.mat.proposal[, gamma.pos[i], ] <- matN(gamma.proposal, N.all, Nchains) gamma.mat.proposal.ar <- matrix(gamma.mat.proposal, nrow = N.all.trends, ncol = Nchains) trends.proposal <- offsetcompute(wmat.ar, gamma.mat.proposal.ar, tpmat.ar, Nchains, N.all, Trends.sel, beg.trend) offset.proposal <- offset.mat + regression.mat + trends.proposal + phimat gamma.list <- poissongammaupdate(N.all, gamma[gamma.pos[i], ], gamma.proposal, offset.temp, offset.proposal, Y, prior.mean.trends[i], prior.var.trends[i], Nchains, temps) if(!all(gamma.list[[2]] == 0)) { gamma[gamma.pos[i], ] <- gamma.list[[1]] gamma.mat[, gamma.pos[i], ] <- matN(gamma[gamma.pos[i], ], N.all, Nchains) gamma.mat.ar <- matrix(gamma.mat, nrow = N.all.trends, ncol = Nchains) trends.part <- offsetcompute(wmat.ar, gamma.mat.ar, tpmat.ar, Nchains, N.all, Trends.sel, beg.trend) offset.temp <- offset.mat + regression.mat + trends.part + phimat accept.gammas[gamma.pos[i], accept.blocks.num[, 1]] <- accept.gammas[gamma.pos[i], accept.blocks.num[, 1]] + gamma.list[[2]] } accept.gammas[gamma.pos[i], accept.blocks.den[, 1]] <- accept.gammas[gamma.pos[i], accept.blocks.den[, 1]] + 1 } #------------------------------------------------------------------------------------------------------------------------------------------------------- # Sample from w #------------------------------------------------------------------------------------------------------------------------------------------------------- w.perm <- matrix(aperm(w, c(1, 3, 2)), nrow = K*Nchains, ncol = N.trends) w.props <- sample(N.trends) while (all(w.props == 1:N.trends)) { w.props <- sample(N.trends) } w.proposal <- w.perm[, w.props] w.proposal.array <- array(w.proposal, c(K, Nchains, N.trends)) w.proposal.array <- aperm(w.proposal.array, c(1, 3, 2)) w.proposal.array <- kronecker(kronN, w.proposal.array) if(any.CP) { wmat.extend.proposal <- array(0, c(N.all, Trends.sel, Nchains)) wmat.extend.proposal[, -track.add, ] <- w.proposal.array wmat.extend.proposal[, track.add, ] <- w.proposal.array[, New.trend.pos, ] }else { wmat.extend.proposal <- w.proposal.array } w.proposal.ar <- matrix(wmat.extend.proposal, nrow = N.all.trends, ncol = Nchains) trends.proposal <- offsetcompute(w.proposal.ar, gamma.mat.ar, tpmat.ar, Nchains, N.all, Trends.sel, beg.trend) offset.proposal <- offset.mat + regression.mat + trends.proposal + phimat w.list <- poissonwupdate(K, N, w.perm, offset.temp, offset.proposal, w.proposal, Y.mat, lambda, Nchains, temps, begin.chain, beg.reg.chain, N.trends) if(!all(w.list[[2]] == 0)) { w <- w.list[[1]] w.array <- array(w, c(K, Nchains, N.trends)) w <- aperm(w.array, c(1, 3, 2)) wmat <- kronecker(kronN, w) if(any.CP) { wmat.extend <- array(0, c(N.all, Trends.sel, Nchains)) wmat.extend[, -track.add, ] <- wmat wmat.extend[, track.add, ] <- wmat[, New.trend.pos, ] }else { wmat.extend <- wmat } wmat.ar <- matrix(wmat.extend, nrow = N.all.trends, ncol = Nchains) trends.part <- offsetcompute(wmat.ar, gamma.mat.ar, tpmat.ar, Nchains, N.all, Trends.sel, beg.trend) w.chain.mat <- matrix(aperm(w, c(1, 3, 2)), nrow = K * Nchains, ncol = N.trends) accept.weight[, accept.blocks.num[, 1]] <- accept.weight[, accept.blocks.num[, 1]] + w.list[[2]] } accept.weight[, accept.blocks.den[, 1]] <- accept.weight[, accept.blocks.den[, 1]] + 1 #------------------------------------------------------------------------------------------------------------------------------------------------------- # Sample from lambda #------------------------------------------------------------------------------------------------------------------------------------------------------- lambda.temp <- prior.lambda + apply(w, c(2, 3), sum) lambda <- lambdaupdate(Nchains, lambda.temp) #------------------------------------------------------------------------------------------------------------------------------------------------------- # Sample from phi #------------------------------------------------------------------------------------------------------------------------------------------------------- offset.temp <- offset.mat + regression.mat + trends.part phi.list <- poissonphiupdate(W.triplet, W.begfin, W.triplet.sum, K, N, phi, offset.temp, Y.mat, tau2, rho, Nchains, temps, proposal.sd.phi, beg.reg.chain) if(!all(phi.list[[2]] == 0)) { phi.means <- apply(phi.list[[1]], 2, mean) phi <- phi.list[[1]] - matrix(phi.means, nrow = K, ncol = Nchains, byrow = TRUE) phimat <- kronecker(kronN, phi) accept.phis[, accept.blocks.num[, 1]] <- accept.phis[, accept.blocks.num[, 1]] + phi.list[[2]] } accept.phis[, accept.blocks.den[, 1]] <- accept.phis[, accept.blocks.den[, 1]] + 1 #------------------------------------------------------------------------------------------------------------------------------------------------------- # Samples from tau2 #------------------------------------------------------------------------------------------------------------------------------------------------------- tau2.temp <- tau2quadform(W.triplet, W.triplet.sum, W.n.triplet, K, phi, phi, rho, Nchains) tau2 <- tau2computeNchains(tau2.temp, tau2.shape, prior.tau2[2], Nchains) #------------------------------------------------------------------------------------------------------------------------------------------------------- # Samples from rho #------------------------------------------------------------------------------------------------------------------------------------------------------- rho.temp1 <- rhoquadformcomputeNchains(W.triplet, W.triplet.sum, W.n.triplet, K, Nchains, phi, rho, tau2) proposal.rho <- suppressWarnings(rtruncnorm(n = Nchains, a = 0, b = 0.99, mean = rho, sd = proposal.sd.rho)) rho.temp2 <- rhoquadformcomputeNchains(W.triplet, W.triplet.sum, W.n.triplet, K, Nchains, phi, proposal.rho, tau2) det.Q.W.proposal <- Qdet(Nchains, proposal.rho, Wstar.val) logprob.current <- det.Q.W - rho.temp1 logprob.proposal <- det.Q.W.proposal - rho.temp2 prob <- exp((logprob.proposal - logprob.current) * temps) # raised to temperature levels of each chain accept.rho.chain <- prob > runif(Nchains) rho[accept.rho.chain] <- proposal.rho[accept.rho.chain] det.Q.W[accept.rho.chain] <- det.Q.W.proposal[accept.rho.chain] accept[accept.blocks.num[, 2]] <- accept[accept.blocks.num[, 2]] + as.numeric(accept.rho.chain) accept[accept.blocks.den[, 2]] <- accept[accept.blocks.den[, 2]] + 1 #------------------------------------------------------------------------------------------------------------------------------------------------------- # Metropolis coupling #------------------------------------------------------------------------------------------------------------------------------------------------------- swap <- sample(1:Nchains, 2) offset.temp <- offset.mat + regression.mat + trends.part + phimat accept.swap <- poissoncouplingAllupdate(N.all, K, p, w.chain.mat, offset.temp, beta, gamma, lambda, phi, rho, tau2, W.triplet.sum, W.triplet, W.begfin, Y, prior.mean.beta, prior.var.beta, prior.mean.trends, prior.var.trends, prior.lambda, prior.tau2, swap, temps, begin.chain, N.trends, Trends.sel) if(accept.swap == 1) { rev.swap <- rev(swap) beta[rev.swap] <- beta[swap] regression.mat[, rev.swap] <- regression.mat[, swap] proposal.sd.beta[rev.swap] <- proposal.sd.beta[swap] gamma[, rev.swap] <- gamma[, swap] gamma.mat[, , rev.swap] <- gamma.mat[, , swap] gamma.mat.ar <- matrix(gamma.mat, nrow = N.all.trends, ncol = Nchains) proposal.sd.gamma[, rev.swap] <- proposal.sd.gamma[, swap] lambda[, rev.swap] <- lambda[, swap] w[, , rev.swap] <- w[, , swap] wmat[, , rev.swap] <- wmat[, , swap] if(any.CP) { wmat.extend <- array(0, c(N.all, Trends.sel, Nchains)) wmat.extend[, -track.add, ] <- wmat wmat.extend[, track.add, ] <- wmat[, New.trend.pos, ] }else { wmat.extend <- wmat } w.chain.mat <- matrix(aperm(w, c(1, 3, 2)), nrow = K * Nchains, ncol = N.trends) wmat.ar <- matrix(wmat.extend, nrow = N.all.trends, ncol = Nchains) phi[, rev.swap] <- phi[, swap] proposal.sd.phi[, rev.swap] <- proposal.sd.phi[, swap] phimat[, rev.swap] <- phimat[, swap] tau2[rev.swap] <- tau2[swap] rho[rev.swap] <- rho[swap] proposal.sd.rho[rev.swap] <- proposal.sd.rho[swap] det.Q.W[rev.swap] <- det.Q.W[swap] trends.part <- offsetcompute(wmat.ar, gamma.mat.ar, tpmat.ar, Nchains, N.all, Trends.sel, beg.trend) offset.temp <- offset.mat + regression.mat + trends.part + phimat }else {} accept.couple[1] <- accept.couple[1] + accept.swap accept.couple[2] <- accept.couple[2] + 1 #------------------------------------------------------------------------------------------------------------------------------------------------------- # Update temperatures #------------------------------------------------------------------------------------------------------------------------------------------------------- if(j%%10==0) { MC3.accept <- 100 * accept.couple[1] / accept.couple[2] if(MC3.accept > 30) { d.t <- max(runif(1, d.t * 0.8, d.t), 0.1) # temps <- tempupdate(Nchains, d.t) }else if(MC3.accept < 20) { d.t <- min(runif(1, d.t, d.t * 1.2), 0.99) temps <- tempupdate(Nchains, d.t) }else {} }else {} #------------------------------------------------------------------------------------------------------------------------------------------------------- # Calculate the deviance #------------------------------------------------------------------------------------------------------------------------------------------------------- fitted <- exp(offset.temp) dev.like <- poissondevfit(Y, fitted, N.all, Nchains) deviance <- dev.like[[1]] like <- dev.like[[2]] #------------------------------------------------------------------------------------------------------------------------------------------------------- # Save the results #------------------------------------------------------------------------------------------------------------------------------------------------------- if(j > burnin & (j-burnin)%%thin==0) { ele <- (j - burnin) / thin samples.beta[ele,,] <- beta samples.gamma[,ele,,] <- gamma samples.w[ele,,,] <- w samples.lambda[ele,,] <- lambda samples.tau2[ele,,] <- tau2 samples.rho[ele,,] <- rho samples.phi[ele,,] <- phi samples.deviance[ele,,] <- deviance samples.fitted[ele,,] <- fitted samples.like[ele,,] <- like }else { } #------------------------------------------------------------------------------------------------------------------------------------------------------- # Self tune the acceptance probabilties #------------------------------------------------------------------------------------------------------------------------------------------------------- if(ceiling(j/100)==floor(j/100) & j < burnin) { #------------------------------------------------------------------------------------------------------------------------------------------------------- # Determine the acceptance probabilities #------------------------------------------------------------------------------------------------------------------------------------------------------- accept.beta <- 100 * accept[accept.blocks.num[,1]] / accept[accept.blocks.den[,1]] accept.gamma <- 100 * accept.gammas[,accept.blocks.num[,1]] / accept.gammas[,accept.blocks.den[,1]] accept.gamma[1,] <- 0 accept.gammas.all <- accept.gammas.all + accept.gammas accept.rho <- 100 * accept[accept.blocks.num[,2]] / accept[accept.blocks.den[,2]] accept.w <- 100 * accept.weight[,accept.blocks.num[,1]] / accept.weight[,accept.blocks.den[,1]] accept.w.all <- accept.w.all + accept.weight accept.phi <- 100 * accept.phis[,accept.blocks.num[,1]] / accept.phis[,accept.blocks.den[,1]] accept.phis.all <- accept.phis.all + accept.phis accept.all <- accept.all + accept accept <- rep(0, 2 * (Trends.sel + 1) * Nchains) accept.weight <- matrix(0, nrow=K, ncol=2*Nchains) accept.phis <- matrix(0, nrow=K, ncol=2*Nchains) accept.gammas <- matrix(0, nrow=Trends.sel, ncol=2*Nchains) #------------------------------------------------------------------------------------------------------------------------------------------------------- # beta tuning parameter #------------------------------------------------------------------------------------------------------------------------------------------------------- if(any(accept.beta > 50)) { proposal.sd.beta[which(accept.beta > 50)] <- 2 * proposal.sd.beta[which(accept.beta > 50)] }else if(any(accept.beta < 40)) { proposal.sd.beta[which(accept.beta < 40)] <- 0.5 * proposal.sd.beta[which(accept.beta < 40)] }else { } #------------------------------------------------------------------------------------------------------------------------------------------------------- # gamma tuning parameter #------------------------------------------------------------------------------------------------------------------------------------------------------- if(any(accept.gamma > 50)) { proposal.sd.gamma[which(accept.gamma > 50)] <- 2 * proposal.sd.gamma[which(accept.gamma > 50)] }else if(any(accept.gamma < 40)) { proposal.sd.gamma[which(accept.gamma < 40)] <- 0.5 * proposal.sd.gamma[which(accept.gamma < 40)] }else { } #------------------------------------------------------------------------------------------------------------------------------------------------------- # rho tuning parameter #------------------------------------------------------------------------------------------------------------------------------------------------------- if(any(accept.rho > 50)) { proposal.sd.rho[which(accept.rho > 50)] <- 2 * proposal.sd.rho[which(accept.rho > 50)] if(any(proposal.sd.rho > max.proposal.sd.rho)) { proposal.sd.rho[which(proposal.sd.rho > max.proposal.sd.rho)] <- max.proposal.sd.rho }else { } }else if(any(accept.rho < 40)) { proposal.sd.rho[which(accept.rho < 40)] <- 0.5 * proposal.sd.rho[which(accept.rho < 40)] if(any(proposal.sd.rho < min.proposal.sd.rho)) { proposal.sd.rho[which(proposal.sd.rho < min.proposal.sd.rho)] <- min.proposal.sd.rho }else { } }else { } #------------------------------------------------------------------------------------------------------------------------------------------------------- # phi tuning parameter #------------------------------------------------------------------------------------------------------------------------------------------------------- if(any(accept.phi > 50)) { proposal.sd.phi[which(accept.phi > 50)] <- 2 * proposal.sd.phi[which(accept.phi > 50)] }else if(any(accept.phi < 40)) { proposal.sd.phi[which(accept.phi < 40)] <- 0.5 * proposal.sd.phi[which(accept.phi < 40)] }else { } }else {} #------------------------------------------------------------------------------------------------------------------------------------------------------- # Print progress to the console #------------------------------------------------------------------------------------------------------------------------------------------------------- if(j %in% percentage.points) { setTxtProgressBar(progressBar, j/n.sample) } } #------------------------------------------------------------------------------------------------------------------------------------------------------- # End timer #------------------------------------------------------------------------------------------------------------------------------------------------------- if(verbose) { cat("\nSummarising results") close(progressBar) }else {} #------------------------------------------------------------------------------------------------------------------------------------------------------- # Summarise and save the results #------------------------------------------------------------------------------------------------------------------------------------------------------- # Select untempered chain for inference #------------------------------------------------------------------------------------------------------------------------------------------------------- chain.sel <- 1 p.d <- DIC <- LMPL <- NA fitted.values <- residuals <- rep(NA, N.all) #------------------------------------------------------------------------------------------------------------------------------------------------------- # Watanabe-Akaike Information Criterion (WAIC) #------------------------------------------------------------------------------------------------------------------------------------------------------- LPPD <- sum(log(apply(samples.like[,,chain.sel],2,mean)), na.rm=TRUE) p.w <- sum(apply(log(samples.like[,,chain.sel]),2,var), na.rm=TRUE) WAIC <- -2 * (LPPD - p.w) #------------------------------------------------------------------------------------------------------------------------------------------------------- # Compute information criterion (DIC, DIC3, WAIC) #------------------------------------------------------------------------------------------------------------------------------------------------------- mode.w <- matrix(0, nrow=K, ncol=N.trends) Wsum <- apply(samples.w[,,,chain.sel],c(2,3),sum) Wtrend <- which(Wsum == rowMaxs(Wsum), arr.ind=TRUE) for (i in 1:K) { mode.w[Wtrend[i,1], Wtrend[i,2]] <- 1 } mode.w <- array(mode.w, c(K,N.trends,N)) mode.beta <- rep(NA, p) if(p == 1) { mode.beta <- density(samples.beta[,,chain.sel]) mode.beta <- mean(mode.beta$x[which(mode.beta$y==max(mode.beta$y))]) }else { for(i in 1:p) { betamode <- density(samples.beta[,i,chain.sel]) mode.beta[i] <- mean(betamode$x[which(betamode$y==max(betamode$y))]) } } reg.mat <- matrix(X.standardised %*% mode.beta, nrow=K, ncol=N, byrow=FALSE) gamma.mat <- array(0, c(K,Trends.sel,N)) for(i in Trends.id) { gamma.dens <- density(samples.gamma[gamma.pos[i],,,chain.sel]) gamma.mean <- mean(gamma.dens$x[which(gamma.dens$y==max(gamma.dens$y))]) gamma.mat[,gamma.pos[i],] <- matN(rep(gamma.mean, N),K,N) } mode.phi <- rep(NA,K) for(i in 1:K) { phimode <- density(samples.phi[,i,chain.sel]) mode.phi[i] <- mean(phimode$x[which(phimode$y==max(phimode$y))]) } phi.mat <- matN(rep(mode.phi,N),K,N) wmat.extend <- array(0, c(K,Trends.sel,N)) wmat.extend[,-track.add,] <- mode.w wmat.extend[,track.add,] <- mode.w[,New.trend.pos,] trends.part <- apply(wmat.extend * (gamma.mat * tp.mat.trends),c(1,3),sum) offset.temp <- as.numeric(offset.mat[,chain.sel] + reg.mat + trends.part + phi.mat) fit.mean <- exp(offset.temp) deviance.fitted <- -2 * sum(dpois(x=Y, lambda=fit.mean, log=TRUE)) p.d <- mean(samples.deviance[,,chain.sel]) - deviance.fitted DIC <- 2 * mean(samples.deviance[,,chain.sel]) - deviance.fitted #------------------------------------------------------------------------------------------------------------------------------------------------------- # Compute the LMPL #------------------------------------------------------------------------------------------------------------------------------------------------------- CPO <- rep(NA, N.all) for(j in 1:N.all) { CPO[j] <- 1/mean((1 / dpois(x = Y[j], lambda = samples.fitted[,j,chain.sel]))) } LMPL <- sum(log(CPO)) #------------------------------------------------------------------------------------------------------------------------------------------------------- # Create the Fitted values #------------------------------------------------------------------------------------------------------------------------------------------------------- fitted.values <- apply(samples.fitted[,,chain.sel],2,mean) response.residuals <- as.numeric(Y) - fitted.values pearson.residuals <- response.residuals /sqrt(fitted.values) residuals <- data.frame(response=response.residuals, pearson=pearson.residuals) #------------------------------------------------------------------------------------------------------------------------------------------------------- # Transform the parameters back to the original covariate scale #------------------------------------------------------------------------------------------------------------------------------------------------------- samples.beta.orig <- common.betatransform(samples.beta[,,chain.sel], X.indicator, X.mean, X.sd, p, FALSE) #------------------------------------------------------------------------------------------------------------------------------------------------------- # Compute the acceptance rates #------------------------------------------------------------------------------------------------------------------------------------------------------- accept.beta <- 100 * accept.all[accept.blocks.num[,1]] / accept.all[accept.blocks.den[,1]] accept.beta <- accept.beta[chain.sel] accept.gammas <- 100 * accept.gammas.all[,accept.blocks.num[,1]] / accept.gammas.all[,accept.blocks.den[,1]] accept.gammas <- accept.gammas[,chain.sel] accept.rho <- 100 * accept.all[accept.blocks.num[,2]] / accept.all[accept.blocks.den[,2]] accept.rho <- accept.rho[chain.sel] accept.phis <- 100 * accept.phis.all[, accept.blocks.num[,1]] / accept.phis.all[,accept.blocks.den[,1]] accept.phis <- accept.phis[,chain.sel] coupled <- 100*accept.couple[1]/accept.couple[2] accept.final <- c(accept.beta, accept.gammas[-1], accept.rho, mean(accept.phis), coupled) names(accept.final) <- c("beta", paste("gamma.", Trends.pos[Trends.id], sep=""), "rho", "phi", "coupled") #------------------------------------------------------------------------------------------------------------------------------------------------------- # Create a summary object #------------------------------------------------------------------------------------------------------------------------------------------------------- samples.beta.orig <- mcmc(samples.beta.orig) mode.beta.orig <- rep(NA, p) HPD.beta.orig <- matrix(NA, nrow=2, ncol=p) if(p == 1) { mode.beta.orig <- density(samples.beta.orig) mode.beta.orig <- mean(mode.beta.orig$x[which(mode.beta.orig$y==max(mode.beta.orig$y))]) HPD.beta.orig[1,] <- HPDinterval(samples.beta.orig, prob=0.95)[1] HPD.beta.orig[2,] <- HPDinterval(samples.beta.orig, prob=0.95)[2] summary.beta <- t(c(mode.beta.orig, HPD.beta.orig[1,], HPD.beta.orig[2,])) }else { summary.beta <- matrix(NA, nrow=p, ncol=3) for(i in 1:p) { origbetamode <- density(samples.beta.orig[,i]) mode.beta.orig[i] <- mean(origbetamode$x[which(origbetamode$y==max(origbetamode$y))]) HPD.beta.orig[1,i] <- HPDinterval(samples.beta.orig[,i], prob=0.95)[1] HPD.beta.orig[2,i] <- HPDinterval(samples.beta.orig[,i], prob=0.95)[2] summary.beta[i,1] <- mode.beta.orig[i] summary.beta[i,2] <- HPD.beta.orig[1,i] summary.beta[i,3] <- HPD.beta.orig[2,i] } } summary.beta <- cbind(summary.beta, rep(n.keep,p), rep(accept.beta,p), effectiveSize(samples.beta.orig), geweke.diag(samples.beta.orig)$z) rownames(summary.beta) <- colnames(X) colnames(summary.beta) <- c("Mode", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag") summary.gamma <- matrix(NA, nrow=Trends.sel, ncol=7) for(i in Trends.id) { summary.gamma[gamma.pos[i],1] <- unique(as.numeric(gamma.mat[, gamma.pos[i],])) summary.gamma[gamma.pos[i],2:3] <- HPDinterval(mcmc(samples.gamma[gamma.pos[i],,,chain.sel]), prob=0.95) summary.gamma[gamma.pos[i],4] <- rep(n.keep,1) summary.gamma[gamma.pos[i],5] <- accept.gammas[gamma.pos[i]] summary.gamma[gamma.pos[i],6] <- effectiveSize(samples.gamma[gamma.pos[i],,,chain.sel]) summary.gamma[gamma.pos[i],7] <- geweke.diag(samples.gamma[gamma.pos[i],,,chain.sel])$z } colnames(summary.gamma) <- c("Mode", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag") rownames(summary.gamma) <- c("gamma.constant", paste("gamma.", Trends.pos[Trends.id], sep="")) summary.gamma <- summary.gamma[-1,] if(Trends.sel==2) { summary.gamma <- matrix(summary.gamma, nrow=1, ncol=7) colnames(summary.gamma) <- c("Mode", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag") rownames(summary.gamma) <- paste("gamma.", Trends.pos[Trends.id], sep = "") } summary.lambda <- matrix(NA, nrow=N.trends, ncol=7) for(i in 1:N.trends) { lambda.dens <- density(samples.lambda[,i,chain.sel]) lambda.mean <- mean(lambda.dens$x[which(lambda.dens$y==max(lambda.dens$y))]) summary.lambda[i,1] <- lambda.mean summary.lambda[i,2:3] <- HPDinterval(mcmc(samples.lambda[,i,chain.sel]), prob=0.95) summary.lambda[i,4] <- rep(n.keep,1) summary.lambda[i,5] <- rep(100,1) summary.lambda[i,6] <- effectiveSize(samples.lambda[,i,chain.sel]) summary.lambda[i,7] <- geweke.diag(samples.lambda[,i,chain.sel])$z } colnames(summary.lambda) <- c("Mode", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag") rownames(summary.lambda) <- paste("lambda.", All.Trends[which(All.Trends %in% trends)], sep = "") mode.tau2 <- density(samples.tau2[,,chain.sel]) mode.tau2 <- mean(mode.tau2$x[which(mode.tau2$y==max(mode.tau2$y))]) summary.tau2 <- t(c(mode.tau2, HPDinterval(mcmc(samples.tau2[,,chain.sel]), prob=0.95)[1], HPDinterval(mcmc(samples.tau2[,,chain.sel]), prob=0.95)[2])) summary.tau2 <- cbind(summary.tau2, rep(n.keep, 1), rep(100,1), effectiveSize(samples.tau2[,,chain.sel]), geweke.diag(samples.tau2[,,chain.sel])$z) colnames(summary.tau2) <- c("Mode", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag") rownames(summary.tau2) <- c("tau2") mode.rho <- density(samples.rho[,,chain.sel]) mode.rho <- mean(mode.rho$x[which(mode.rho$y==max(mode.rho$y))]) summary.rho <- t(c(mode.rho, HPDinterval(mcmc(samples.rho[,,chain.sel]), prob=0.95)[1], HPDinterval(mcmc(samples.rho[,,chain.sel]), prob=0.95)[2])) summary.rho <- cbind(summary.rho, rep(n.keep, 1), rep(accept.rho,1), effectiveSize(samples.rho[,,chain.sel]), geweke.diag(samples.rho[,,chain.sel])$z) colnames(summary.rho) <- c("Mode", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag") rownames(summary.rho) <- c("rho") summary.results <- rbind(summary.beta, summary.gamma, summary.lambda, summary.tau2, summary.rho) summary.results[,1:3] <- round(summary.results[,1:3],4) summary.results[,4:7] <- round(summary.results[,4:7],1) #------------------------------------------------------------------------------------------------------------------------------------------------------- # Allocated trends for each location #------------------------------------------------------------------------------------------------------------------------------------------------------- trends <- apply(samples.w[,,,chain.sel], c(2,3), sum) trend.probs <- trends / n.keep trends <- which(trends==rowMaxs(trends), arr.ind=TRUE) trends <- trends[order(trends[,1]),] trends[ ,2] <- Trends.chosen.names[trends[ ,2]] #------------------------------------------------------------------------------------------------------------------------------------------------------- # Compile and return the results #------------------------------------------------------------------------------------------------------------------------------------------------------- loglike <- -0.5 * deviance.fitted modelfit <- c(DIC[chain.sel], p.d[chain.sel], WAIC[chain.sel], p.w[chain.sel], LMPL[chain.sel], loglike) names(modelfit) <- c("DIC", "p.d", "WAIC", "p.w", "LMPL", "loglikelihood") samples <- list(beta=mcmc(t(matrix(samples.beta.orig, ncol=n.keep))), gamma=mcmc(t(matrix(samples.gamma[-1,,,chain.sel], ncol=n.keep))), lambda=mcmc(samples.lambda[,,chain.sel]), tau2=mcmc(as.matrix(samples.tau2[,,chain.sel])), rho=mcmc(as.matrix(samples.rho[,,chain.sel])), w=samples.w[,,,chain.sel], phi=mcmc(samples.phi[,,chain.sel]), fitted=mcmc(samples.fitted[,,chain.sel])) model.string <- c("Likelihood model - poisson (log link function)", "\nLatent structure model - spatial main effects and an area clustered trend\n") results <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=list(trends=trends[ ,-1], trend.probs=trend.probs), formula=formula, model=model.string, X=X) class(results) <- "CARBayesST" if(verbose) { b<-proc.time() cat(" finished in ", round(b[3]-a[3], 1), "seconds") }else {} return(results) } #-------------------------------------------------------------------------------------------------------------------------------------------------------
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/poisson.CARclustrends.R
poisson.CARlinear <- function(formula, data=NULL, W, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.mean.alpha=NULL, prior.var.alpha=NULL, prior.tau2=NULL, rho.slo=NULL, rho.int=NULL, MALA=TRUE, verbose=TRUE) { ############################################## #### Format the arguments and check for errors ############################################## #### Verbose a <- common.verbose(verbose) #### Frame object frame.results <- common.frame(formula, data, "poisson") N.all <- frame.results$n p <- frame.results$p X <- frame.results$X X.standardised <- frame.results$X.standardised X.sd <- frame.results$X.sd X.mean <- frame.results$X.mean X.indicator <- frame.results$X.indicator offset <- frame.results$offset Y <- frame.results$Y which.miss <- frame.results$which.miss n.miss <- frame.results$n.miss #### Determine the number of spatial and temporal units W.quants <- common.Wcheckformat.leroux(W) K <- W.quants$n N <- N.all / K offset.mat <- matrix(offset, nrow=K, ncol=N, byrow=FALSE) time <-(1:N - mean(1:N))/N time.mat <- matrix(rep(time, K), byrow=TRUE, nrow=K) #### Check on MALA argument if(length(MALA)!=1) stop("MALA is not length 1.", call.=FALSE) if(!is.logical(MALA)) stop("MALA is not logical.", call.=FALSE) #### Check on the rho arguments if(is.null(rho.int)) { rho <- runif(1) fix.rho.int <- FALSE }else { rho <- rho.int fix.rho.int <- TRUE } if(!is.numeric(rho)) stop("rho.int is fixed but is not numeric.", call.=FALSE) if(rho<0 ) stop("rho.int is outside the range [0, 1].", call.=FALSE) if(rho>1 ) stop("rho.int is outside the range [0, 1].", call.=FALSE) if(is.null(rho.slo)) { lambda <- runif(1) fix.rho.slo <- FALSE }else { lambda <- rho.slo fix.rho.slo <- TRUE } if(!is.numeric(lambda)) stop("rho.slo is fixed but is not numeric.", call.=FALSE) if(lambda<0 ) stop("rho.slo is outside the range [0, 1].", call.=FALSE) if(lambda>1 ) stop("rho.slo is outside the range [0, 1].", call.=FALSE) #### Priors if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p) if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p) if(is.null(prior.tau2)) prior.tau2 <- c(1, 0.01) if(is.null(prior.mean.alpha)) prior.mean.alpha <- rep(0, 1) if(is.null(prior.var.alpha)) prior.var.alpha <- rep(100000, 1) prior.beta.check(prior.mean.beta, prior.var.beta, p) prior.var.check(prior.tau2) if(length(prior.mean.alpha)!=1) stop("the prior mean for alpha is the wrong length.", call.=FALSE) if(!is.numeric(prior.mean.alpha)) stop("the prior mean for alpha is not numeric.", call.=FALSE) if(sum(is.na(prior.mean.alpha))!=0) stop("the prior mean for alpha has missing values.", call.=FALSE) if(length(prior.var.alpha)!=1) stop("the prior variance for alpha is the wrong length.", call.=FALSE) if(!is.numeric(prior.var.alpha)) stop("the prior variance for alpha is not numeric.", call.=FALSE) if(sum(is.na(prior.var.alpha))!=0) stop("the prior variance for alpha has missing values.", call.=FALSE) if(min(prior.var.alpha) <=0) stop("the prior variance for alpha has elements less than zero", call.=FALSE) #### Compute the blocking structure for beta block.temp <- common.betablock(p) beta.beg <- block.temp[[1]] beta.fin <- block.temp[[2]] n.beta.block <- block.temp[[3]] list.block <- as.list(rep(NA, n.beta.block*2)) for(r in 1:n.beta.block) { list.block[[r]] <- beta.beg[r]:beta.fin[r]-1 list.block[[r+n.beta.block]] <- length(list.block[[r]]) } #### MCMC quantities - burnin, n.sample, thin common.burnin.nsample.thin.check(burnin, n.sample, thin) ######################## #### Run the MCMC chains ######################## if(n.chains==1) { #### Only 1 chain results <- poisson.CARlinearMCMC(Y=Y, offset=offset, X.standardised=X.standardised, W=W, rho=rho, lambda=lambda, fix.rho.int=fix.rho.int, fix.rho.slo=fix.rho.slo, K=K, N=N, N.all=N.all, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.mean.alpha=prior.mean.alpha, prior.var.alpha=prior.var.alpha, prior.tau2=prior.tau2, verbose=verbose, chain=1) }else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores==1) { #### Multiple chains in series results <- as.list(rep(NA, n.chains)) for(i in 1:n.chains) { results[[i]] <- poisson.CARlinearMCMC(Y=Y, offset=offset, X.standardised=X.standardised, W=W, rho=rho, lambda=lambda, fix.rho.int=fix.rho.int, fix.rho.slo=fix.rho.slo, K=K, N=N, N.all=N.all, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.mean.alpha=prior.mean.alpha, prior.var.alpha=prior.var.alpha, prior.tau2=prior.tau2, verbose=verbose, chain=i) } }else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores>1 & ceiling(n.cores)==floor(n.cores)) { #### Multiple chains in parallel results <- as.list(rep(NA, n.chains)) if(verbose) { compclust <- makeCluster(n.cores, outfile="CARBayesSTprogress.txt") cat("The current progress of the model fitting algorithm has been output to CARBayesSTprogress.txt in the working directory") }else { compclust <- makeCluster(n.cores) } results <- clusterCall(compclust, fun=poisson.CARlinearMCMC, Y=Y, offset=offset, X.standardised=X.standardised, W=W, rho=rho, lambda=lambda, fix.rho.int=fix.rho.int, fix.rho.slo=fix.rho.slo, K=K, N=N, N.all=N.all, p=p, which.miss=which.miss, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.mean.alpha=prior.mean.alpha, prior.var.alpha=prior.var.alpha, prior.tau2=prior.tau2, verbose=verbose, chain="all") stopCluster(compclust) }else { stop("n.chains or n.cores are not positive integers.", call.=FALSE) } #### end timer if(verbose) { cat("\nSummarising results.\n") }else {} ################################### #### Summarise and save the results ################################### if(n.chains==1) { #### If n.chains==1 ## Compute the acceptance rates accept.final <- rep(NA, 6) names(accept.final) <- c("beta", "alpha", "phi", "delta", "rho.int", "rho.slo") accept.final[1] <- 100 * results$accept[1] / results$accept[2] accept.final[2] <- 100 * results$accept[3] / results$accept[4] accept.final[3] <- 100 * results$accept[5] / results$accept[6] accept.final[4] <- 100 * results$accept[7] / results$accept[8] if(!fix.rho.int) accept.final[5] <- 100 * results$accept[9] / results$accept[10] if(!fix.rho.slo) accept.final[6] <- 100 * results$accept[11] / results$accept[12] ## Compute the fitted deviance mean.phi <- apply(results$samples.phi, 2, mean) mean.delta <- apply(results$samples.delta, 2, mean) mean.phi.mat <- matrix(rep(mean.phi, N), byrow=F, nrow=K) delta.time.mat <- apply(time.mat, 2, "*", mean.delta) mean.alpha <- mean(results$samples.alpha) mean.beta <- apply(results$samples.beta,2,mean) regression.mat <- matrix(X.standardised %*% mean.beta, nrow=K, ncol=N, byrow=FALSE) fitted.mean <- exp(offset.mat + regression.mat + mean.phi.mat + delta.time.mat + mean.alpha * time.mat) deviance.fitted <- -2 * sum(dpois(x=as.numeric(Y), lambda=fitted.mean, log=TRUE), na.rm=TRUE) modelfit <- common.modelfit(results$samples.loglike, deviance.fitted) ## Create the fitted values and residuals fitted.values <- apply(results$samples.fitted, 2, mean) response.residuals <- as.numeric(Y) - fitted.values pearson.residuals <- response.residuals /sqrt(fitted.values) residuals <- data.frame(response=response.residuals, pearson=pearson.residuals) ## Transform the parameters back to the origianl covariate scale. samples.beta.orig <- common.betatransform(results$samples.beta, X.indicator, X.mean, X.sd, p, FALSE) ## Create the samples object if(fix.rho.int & fix.rho.slo) { samples.rhoext <- NA }else if(fix.rho.int & !fix.rho.slo) { samples.rhoext <- results$samples.lambda names(samples.rhoext) <- "rho.slo" }else if(!fix.rho.int & fix.rho.slo) { samples.rhoext <- results$samples.rho names(samples.rhoext) <- "rho.int" }else { samples.rhoext <- cbind(results$samples.rho, results$samples.lambda) colnames(samples.rhoext) <- c("rho.int", "rho.slo") } colnames(results$samples.tau2) <- c("tau2.int", "tau2.slo") samples <- list(beta=mcmc(samples.beta.orig), alpha=mcmc(results$samples.alpha), phi=mcmc(results$samples.phi), delta=mcmc(results$samples.delta), tau2=mcmc(results$samples.tau2), rho=mcmc(samples.rhoext), fitted=mcmc(results$samples.fitted), Y=mcmc(results$samples.Y)) ## Create a summary object n.keep <- floor((n.sample - burnin)/thin) summary.beta <- t(rbind(apply(samples$beta, 2, mean), apply(samples$beta, 2, quantile, c(0.025, 0.975)))) summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.final[names(accept.final)=="beta"],p), effectiveSize(samples$beta), geweke.diag(samples$beta)$z) rownames(summary.beta) <- colnames(X) colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag") summary.tau2 <- cbind(apply(results$samples.tau2, 2, mean), t(apply(results$samples.tau2, 2, quantile, c(0.025, 0.975))), rep(n.keep, 2), rep(100, 2), effectiveSize(samples$tau2), geweke.diag(samples$tau2)$z) summary.alpha <- c(mean(results$samples.alpha), quantile(results$samples.alpha, c(0.025, 0.975)), n.keep, accept.final[names(accept.final)=="alpha"], effectiveSize(samples$alpha), geweke.diag(samples$alpha)$z) summary.combine <- rbind(summary.alpha, summary.tau2) rownames(summary.combine)[1] <- "alpha" summary.rho <- array(NA, c(2,7)) row.names(summary.rho) <- c("rho.int", "rho.slo") if(!fix.rho.int) { summary.rho[1, 1:3] <- c(mean(results$samples.rho), quantile(results$samples.rho, c(0.025, 0.975))) summary.rho[1, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.int"], effectiveSize(results$samples.rho), geweke.diag(results$samples.rho)$z) }else { summary.rho[1, 1:3] <- c(rho, rho, rho) summary.rho[1, 4:7] <- rep(NA, 4) } if(!fix.rho.slo) { summary.rho[2, 1:3] <- c(mean(results$samples.lambda), quantile(results$samples.lambda, c(0.025, 0.975))) summary.rho[2, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.slo"], effectiveSize(results$samples.lambda), geweke.diag(results$samples.lambda)$z) }else { summary.rho[2, 1:3] <- c(lambda, lambda, lambda) summary.rho[2, 4:7] <- rep(NA, 4) } summary.results <- rbind(summary.beta, summary.combine, summary.rho) summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4) summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1) }else { #### If n.chains > 1 ## Compute the acceptance rates accept.final <- rep(NA, 6) names(accept.final) <- c("beta", "alpha", "phi", "delta", "rho.int", "rho.slo") accept.temp <- lapply(results, function(l) l[["accept"]]) accept.temp2 <- do.call(what=rbind, args=accept.temp) accept.final[1] <- 100 * sum(accept.temp2[ ,1]) / sum(accept.temp2[ ,2]) accept.final[2] <- 100 * sum(accept.temp2[ ,3]) / sum(accept.temp2[ ,4]) accept.final[3] <- 100 * sum(accept.temp2[ ,5]) / sum(accept.temp2[ ,6]) accept.final[4] <- 100 * sum(accept.temp2[ ,7]) / sum(accept.temp2[ ,8]) if(!fix.rho.int) accept.final[5] <- 100 * sum(accept.temp2[ ,9]) / sum(accept.temp2[ ,10]) if(!fix.rho.slo) accept.final[6] <- 100 * sum(accept.temp2[ ,11]) / sum(accept.temp2[ ,12]) ## Extract the samples into separate matrix and list objects samples.beta.list <- lapply(results, function(l) l[["samples.beta"]]) samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list) samples.phi.list <- lapply(results, function(l) l[["samples.phi"]]) samples.phi.matrix <- do.call(what=rbind, args=samples.phi.list) samples.delta.list <- lapply(results, function(l) l[["samples.delta"]]) samples.delta.matrix <- do.call(what=rbind, args=samples.delta.list) samples.alpha.list <- lapply(results, function(l) l[["samples.alpha"]]) samples.alpha.matrix <- do.call(what=rbind, args=samples.alpha.list) if(!fix.rho.int) { samples.rho.list <- lapply(results, function(l) l[["samples.rho"]]) samples.rho.matrix <- do.call(what=rbind, args=samples.rho.list) } if(!fix.rho.slo) { samples.lambda.list <- lapply(results, function(l) l[["samples.lambda"]]) samples.lambda.matrix <- do.call(what=rbind, args=samples.lambda.list) } samples.tau2.list <- lapply(results, function(l) l[["samples.tau2"]]) samples.tau2.matrix <- do.call(what=rbind, args=samples.tau2.list) samples.loglike.list <- lapply(results, function(l) l[["samples.loglike"]]) samples.loglike.matrix <- do.call(what=rbind, args=samples.loglike.list) samples.fitted.list <- lapply(results, function(l) l[["samples.fitted"]]) samples.fitted.matrix <- do.call(what=rbind, args=samples.fitted.list) if(n.miss>0) samples.Y.list <- lapply(results, function(l) l[["samples.Y"]]) ## Compute the fitted deviance mean.phi <- apply(samples.phi.matrix, 2, mean) mean.delta <- apply(samples.delta.matrix, 2, mean) mean.phi.mat <- matrix(rep(mean.phi, N), byrow=F, nrow=K) delta.time.mat <- apply(time.mat, 2, "*", mean.delta) mean.alpha <- mean(samples.alpha.matrix) mean.beta <- apply(samples.beta.matrix,2,mean) regression.mat <- matrix(X.standardised %*% mean.beta, nrow=K, ncol=N, byrow=FALSE) fitted.mean <- exp(offset.mat + regression.mat + mean.phi.mat + delta.time.mat + mean.alpha * time.mat) deviance.fitted <- -2 * sum(dpois(x=as.numeric(Y), lambda=fitted.mean, log=TRUE), na.rm=TRUE) modelfit <- common.modelfit(samples.loglike.matrix, deviance.fitted) ## Create the fitted values and residuals fitted.values <- apply(samples.fitted.matrix, 2, mean) response.residuals <- as.numeric(Y) - fitted.values pearson.residuals <- response.residuals /sqrt(fitted.values) residuals <- data.frame(response=response.residuals, pearson=pearson.residuals) ## Transform the parameters back to the original covariate scale. samples.beta.list <- samples.beta.list for(j in 1:n.chains) { samples.beta.list[[j]] <- common.betatransform(samples.beta.list[[j]], X.indicator, X.mean, X.sd, p, FALSE) } samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list) ## Create MCMC objects beta.mcmc <- mcmc.list(lapply(samples.beta.list, mcmc)) alpha.mcmc <- mcmc.list(lapply(samples.alpha.list, mcmc)) phi.mcmc <- mcmc.list(lapply(samples.phi.list, mcmc)) delta.mcmc <- mcmc.list(lapply(samples.delta.list, mcmc)) fitted.mcmc <- mcmc.list(lapply(samples.fitted.list, mcmc)) for(j in 1:n.chains) { colnames(samples.tau2.list[[j]]) <- c("tau2.int", "tau2.slo") } tau2.mcmc <- mcmc.list(lapply(samples.tau2.list, mcmc)) if(n.miss>0) { Y.mcmc <- mcmc.list(lapply(samples.Y.list, mcmc)) }else { Y.mcmc <- NA } if(fix.rho.int & fix.rho.slo) { rhoext.mcmc <- NA }else if(fix.rho.int & !fix.rho.slo) { for(j in 1:n.chains) { colnames(samples.lambda.list[[j]]) <- c("rho.slo") } rhoext.mcmc <- mcmc.list(lapply(samples.lambda.list, mcmc)) }else if(!fix.rho.int & fix.rho.slo) { for(j in 1:n.chains) { colnames(samples.rho.list[[j]]) <- c("rho.int") } rhoext.mcmc <- mcmc.list(lapply(samples.rho.list, mcmc)) }else { rho.temp <- as.list(rep(NA, n.chains)) for(j in 1:n.chains) { rho.temp[[j]] <- cbind(samples.rho.list[[j]], samples.lambda.list[[j]]) colnames(rho.temp[[j]]) <- c("rho.int", "rho.slo") } rhoext.mcmc <- mcmc.list(lapply(rho.temp, mcmc)) } samples <- list(beta=beta.mcmc, alpha=alpha.mcmc, phi=phi.mcmc, delta=delta.mcmc, rho=rhoext.mcmc, tau2=tau2.mcmc, fitted=fitted.mcmc, Y=Y.mcmc) ## create a summary object n.keep <- floor((n.sample - burnin)/thin) * n.chains summary.beta <- t(rbind(apply(samples.beta.matrix, 2, mean), apply(samples.beta.matrix, 2, quantile, c(0.025, 0.975)))) summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.final[names(accept.final)=="beta"],p), effectiveSize(beta.mcmc), gelman.diag(beta.mcmc)$psrf[ ,2]) rownames(summary.beta) <- colnames(X) colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "PSRF (upper 95% CI)") summary.tau2 <- cbind(apply(samples.tau2.matrix, 2, mean), t(apply(samples.tau2.matrix, 2, quantile, c(0.025, 0.975))), rep(n.keep, 2), rep(100, 2), effectiveSize(tau2.mcmc), gelman.diag(tau2.mcmc)$psrf[ ,2]) summary.alpha <- c(mean(samples.alpha.matrix), quantile(samples.alpha.matrix, c(0.025, 0.975)), n.keep, accept.final[names(accept.final)=="alpha"], effectiveSize(alpha.mcmc), gelman.diag(alpha.mcmc)$psrf[ ,2]) summary.combine <- rbind(summary.alpha, summary.tau2) rownames(summary.combine)[1] <- "alpha" summary.rho <- array(NA, c(2,7)) row.names(summary.rho) <- c("rho.int", "rho.slo") if(!fix.rho.int) { temp <- mcmc.list(lapply(samples.rho.list, mcmc)) summary.rho[1, 1:3] <- c(mean(samples.rho.matrix), quantile(samples.rho.matrix, c(0.025, 0.975))) summary.rho[1, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.int"], effectiveSize(temp), gelman.diag(temp)$psrf[ ,2]) }else { summary.rho[1, 1:3] <- c(rho, rho, rho) summary.rho[1, 4:7] <- rep(NA, 4) } if(!fix.rho.slo) { temp <- mcmc.list(lapply(samples.lambda.list, mcmc)) summary.rho[2, 1:3] <- c(mean(samples.lambda.matrix), quantile(samples.lambda.matrix, c(0.025, 0.975))) summary.rho[2, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.slo"], effectiveSize(temp), gelman.diag(temp)$psrf[ ,2]) }else { summary.rho[2, 1:3] <- c(lambda, lambda, lambda) summary.rho[2, 4:7] <- rep(NA, 4) } summary.results <- rbind(summary.beta, summary.combine, summary.rho) summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4) summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1) } ################################### #### Compile and return the results ################################### model.string <- c("Likelihood model - Poisson (log link function)", "\nLatent structure model - Spatially autocorrelated linear time trends\n") n.total <- floor((n.sample - burnin) / thin) * n.chains mcmc.info <- c(n.total, n.sample, burnin, thin, n.chains) names(mcmc.info) <- c("Total samples", "n.sample", "burnin", "thin", "n.chains") results.final <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=NULL, formula=formula, model=model.string, mcmc.info=mcmc.info, X=X) class(results.final) <- "CARBayesST" if(verbose) { b<-proc.time() cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n") }else {} return(results.final) }
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/poisson.CARlinear.R
poisson.CARlinearMCMC <- function(Y, offset, X.standardised, W, rho, lambda, fix.rho.int, fix.rho.slo, K, N, N.all, p, which.miss, n.miss, burnin, n.sample, thin, MALA, n.beta.block, list.block, prior.mean.beta, prior.var.beta, prior.mean.alpha, prior.var.alpha, prior.tau2, verbose, chain) { #Rcpp::sourceCpp("src/CARBayesST.cpp") #source("R/common.functions.R") #library(spdep) #library(truncnorm) # # ############################################ #### Set up the key elements before sampling ############################################ #### Generate the initial parameter values time <-(1:N - mean(1:N))/N time.all <- kronecker(time, rep(1,K)) mod.glm <- glm(Y~X.standardised-1 + time.all, offset=offset, family="quasipoisson") beta.mean <- mod.glm$coefficients beta.sd <- sqrt(diag(summary(mod.glm)$cov.scaled)) temp <- rnorm(n=length(beta.mean), mean=beta.mean, sd=beta.sd) beta <- temp[1:p] alpha <- temp[(p+1)] log.Y <- log(Y) log.Y[Y==0] <- -0.1 res.temp <- log.Y - as.numeric(X.standardised %*% beta) - time.all * alpha - offset res.sd <- sd(res.temp, na.rm=TRUE)/5 phi <- rnorm(n=K, mean=0, sd = res.sd) delta <- rnorm(n=K, mean=0, sd = res.sd) tau2.phi <- var(phi)/10 tau2.delta <- var(delta)/10 #### Specify matrix quantities Y.DA <- Y offset.mat <- matrix(offset, nrow=K, ncol=N, byrow=FALSE) regression.mat <- matrix(X.standardised %*% beta, nrow=K, ncol=N, byrow=FALSE) time.mat <- matrix(rep(time, K), byrow=TRUE, nrow=K) delta.time.mat <- apply(time.mat, 2, "*", delta) phi.mat <- matrix(rep(phi, N), byrow=F, nrow=K) fitted <- as.numeric(exp(offset.mat + regression.mat + phi.mat + delta.time.mat + alpha * time.mat)) #### Matrices to store samples n.keep <- floor((n.sample - burnin)/thin) samples.beta <- array(NA, c(n.keep, p)) samples.alpha <- array(NA, c(n.keep, 1)) samples.phi <- array(NA, c(n.keep, K)) samples.delta <- array(NA, c(n.keep, K)) if(!fix.rho.int) samples.rho <- array(NA, c(n.keep, 1)) if(!fix.rho.slo) samples.lambda <- array(NA, c(n.keep, 1)) samples.tau2 <- array(NA, c(n.keep, 2)) colnames(samples.tau2) <- c("tau2.int", "tau2.slo") samples.fitted <- array(NA, c(n.keep, N.all)) samples.loglike <- array(NA, c(n.keep, N.all)) if(n.miss>0) samples.Y <- array(NA, c(n.keep, n.miss)) #### Specify the Metropolis quantities accept <- rep(0,12) proposal.sd.beta <- 0.01 proposal.sd.phi <- 0.1 proposal.sd.delta <- 0.1 proposal.sd.alpha <- 0.1 proposal.sd.rho <- 0.02 proposal.sd.lambda <- 0.02 tau2.phi.shape <- prior.tau2[1] + K/2 tau2.delta.shape <- prior.tau2[1] + K/2 #### CAR quantities W.quants <- common.Wcheckformat.leroux(W) W <- W.quants$W W.triplet <- W.quants$W.triplet W.n.triplet <- W.quants$n.triplet W.triplet.sum <- W.quants$W.triplet.sum n.neighbours <- W.quants$n.neighbours W.begfin <- W.quants$W.begfin #### Create the determinant if(!fix.rho.int | !fix.rho.slo) { Wstar <- diag(apply(W,1,sum)) - W Wstar.eigen <- eigen(Wstar) Wstar.val <- Wstar.eigen$values }else {} if(!fix.rho.int) det.Q.rho <- 0.5 * sum(log((rho * Wstar.val + (1-rho)))) if(!fix.rho.slo) det.Q.lambda <- 0.5 * sum(log((lambda * Wstar.val + (1-lambda)))) #### Check for islands W.list<- mat2listw(W, style = "B") W.nb <- W.list$neighbours W.islands <- n.comp.nb(W.nb) islands <- W.islands$comp.id n.islands <- max(W.islands$nc) if(rho==1) tau2.phi.shape <- prior.tau2[1] + 0.5 * (K-n.islands) if(lambda==1) tau2.delta.shape <- prior.tau2[1] + 0.5 * (K-n.islands) #### Start timer if(verbose) { cat("\nMarkov chain", chain, "- generating", n.keep, "post burnin and thinned samples.\n", sep = " ") progressBar <- txtProgressBar(style = 3) percentage.points<-round((1:100/100)*n.sample) }else { percentage.points<-round((1:100/100)*n.sample) } ############################## #### Generate the MCMC samples ############################## #### Create the MCMC samples for(j in 1:n.sample) { #################################### ## Sample from Y - data augmentation #################################### if(n.miss>0) { Y.DA[which.miss==0] <- rpois(n=n.miss, lambda=fitted[which.miss==0]) }else {} Y.DA.mat <- matrix(Y.DA, nrow=K, ncol=N, byrow=FALSE) #################### ## Sample from beta #################### offset.temp <- offset + as.numeric(phi.mat) + as.numeric(delta.time.mat) + as.numeric(alpha * time.mat) if(MALA) { temp <- poissonbetaupdateMALA(X.standardised, N.all, p, beta, offset.temp, Y.DA, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block) }else { temp <- poissonbetaupdateRW(X.standardised, N.all, p, beta, offset.temp, Y.DA, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block) } beta <- temp[[1]] accept[1] <- accept[1] + temp[[2]] accept[2] <- accept[2] + n.beta.block regression.mat <- matrix(X.standardised %*% beta, nrow=K, ncol=N, byrow=FALSE) #################### ## Sample from alpha #################### proposal.alpha <- rnorm(n=1, mean=alpha, sd=proposal.sd.alpha) prob1 <- 0.5 * (alpha - prior.mean.alpha)^2 / prior.var.alpha - 0.5 * (proposal.alpha - prior.mean.alpha)^2 / prior.var.alpha lp.current <- offset + as.numeric(regression.mat) + as.numeric(phi.mat) + as.numeric(delta.time.mat) + as.numeric(alpha * time.mat) lp.proposal <- offset + as.numeric(regression.mat) + as.numeric(phi.mat) + as.numeric(delta.time.mat) + as.numeric(proposal.alpha * time.mat) like.current <- Y.DA * lp.current - exp(lp.current) like.proposal <- Y.DA * lp.proposal - exp(lp.proposal) prob2 <- sum(like.proposal - like.current, na.rm=TRUE) prob <- exp(prob1 + prob2) if(prob > runif(1)) { alpha <- proposal.alpha accept[3] <- accept[3] + 1 }else { } accept[4] <- accept[4] + 1 #################### ## Sample from phi #################### phi.offset <- offset.mat + regression.mat + delta.time.mat + alpha * time.mat temp1 <- poissoncarupdateRW(W.triplet, W.begfin, W.triplet.sum, K, phi, tau2.phi, Y.DA.mat, proposal.sd.phi, rho, phi.offset, N, rep(1,N)) phi <- temp1[[1]] if(rho<1) { phi <- phi - mean(phi) }else { phi[which(islands==1)] <- phi[which(islands==1)] - mean(phi[which(islands==1)]) } phi.mat <- matrix(rep(phi, N), byrow=F, nrow=K) accept[5] <- accept[5] + temp1[[2]] accept[6] <- accept[6] + K #################### ## Sample from delta #################### delta.offset <- offset.mat + regression.mat + phi.mat + alpha * time.mat temp2 <- poissoncarupdateRW(W.triplet, W.begfin, W.triplet.sum, K, delta, tau2.delta,Y.DA.mat, proposal.sd.delta, lambda, delta.offset, N, time) delta <- temp2[[1]] if(lambda <1) { delta <- delta - mean(delta) }else { delta[which(islands==1)] <- delta[which(islands==1)] - mean(delta[which(islands==1)]) } delta.time.mat <- apply(time.mat, 2, "*", delta) accept[7] <- accept[7] + temp2[[2]] accept[8] <- accept[8] + K ###################### ## Sample from tau2.phi ####################### temp2.phi <- quadform(W.triplet, W.triplet.sum, W.n.triplet, K, phi, phi, rho) tau2.phi.scale <- temp2.phi + prior.tau2[2] tau2.phi <- 1 / rgamma(1, tau2.phi.shape, scale=(1/tau2.phi.scale)) ######################### ## Sample from tau2.delta ######################### temp2.delta <- quadform(W.triplet, W.triplet.sum, W.n.triplet, K, delta, delta, lambda) tau2.delta.scale <- temp2.delta + prior.tau2[2] tau2.delta <- 1 / rgamma(1, tau2.delta.shape, scale=(1/tau2.delta.scale)) ################## ## Sample from rho ################## if(!fix.rho.int) { proposal.rho <- rtruncnorm(n=1, a=0, b=1, mean=rho, sd=proposal.sd.rho) temp3 <- quadform(W.triplet, W.triplet.sum, W.n.triplet, K, phi, phi, proposal.rho) det.Q.proposal <- 0.5 * sum(log((proposal.rho * Wstar.val + (1-proposal.rho)))) logprob.current <- det.Q.rho - temp2.phi / tau2.phi logprob.proposal <- det.Q.proposal - temp3 / tau2.phi hastings <- log(dtruncnorm(x=rho, a=0, b=1, mean=proposal.rho, sd=proposal.sd.rho)) - log(dtruncnorm(x=proposal.rho, a=0, b=1, mean=rho, sd=proposal.sd.rho)) prob <- exp(logprob.proposal - logprob.current + hastings) #### Accept or reject the proposal if(prob > runif(1)) { rho <- proposal.rho det.Q.rho <- det.Q.proposal accept[9] <- accept[9] + 1 }else {} accept[10] <- accept[10] + 1 }else {} ##################### ## Sample from lambda ##################### if(!fix.rho.slo) { proposal.lambda <- rtruncnorm(n=1, a=0, b=1, mean=lambda, sd=proposal.sd.lambda) temp3 <- quadform(W.triplet, W.triplet.sum, W.n.triplet, K, delta, delta, proposal.lambda) det.Q.proposal <- 0.5 * sum(log((proposal.lambda * Wstar.val + (1-proposal.lambda)))) logprob.current <- det.Q.lambda - temp2.delta / tau2.delta logprob.proposal <- det.Q.proposal - temp3 / tau2.delta hastings <- log(dtruncnorm(x=lambda, a=0, b=1, mean=proposal.lambda, sd=proposal.sd.lambda)) - log(dtruncnorm(x=proposal.lambda, a=0, b=1, mean=lambda, sd=proposal.sd.lambda)) prob <- exp(logprob.proposal - logprob.current + hastings) #### Accept or reject the proposal if(prob > runif(1)) { lambda <- proposal.lambda det.Q.lambda <- det.Q.proposal accept[11] <- accept[11] + 1 }else {} accept[12] <- accept[12] + 1 }else {} ######################### ## Calculate the deviance ######################### lp <- as.numeric(offset.mat + regression.mat + phi.mat + delta.time.mat + alpha * time.mat) fitted <- exp(lp) loglike <- dpois(x=as.numeric(Y), lambda=fitted, log=TRUE) ################### ## Save the results ################### if(j > burnin & (j-burnin)%%thin==0) { ele <- (j - burnin) / thin samples.beta[ele, ] <- beta samples.phi[ele, ] <- phi samples.delta[ele, ] <- delta samples.alpha[ele, ] <- alpha if(!fix.rho.int) samples.rho[ele, ] <- rho if(!fix.rho.slo) samples.lambda[ele, ] <- lambda samples.tau2[ele, ] <- c(tau2.phi, tau2.delta) samples.fitted[ele, ] <- fitted samples.loglike[ele, ] <- loglike if(n.miss>0) samples.Y[ele, ] <- Y.DA[which.miss==0] }else {} ######################################## ## Self tune the acceptance probabilties ######################################## if(ceiling(j/100)==floor(j/100) & j < burnin) { #### Update the proposal sds if(p>2) { proposal.sd.beta <- common.accceptrates1(accept[1:2], proposal.sd.beta, 40, 50) }else { proposal.sd.beta <- common.accceptrates1(accept[1:2], proposal.sd.beta, 30, 40) } proposal.sd.alpha <- common.accceptrates1(accept[3:4], proposal.sd.alpha, 30, 40) proposal.sd.phi <- common.accceptrates1(accept[5:6], proposal.sd.phi, 40, 50) proposal.sd.delta <- common.accceptrates1(accept[7:8], proposal.sd.delta, 40, 50) if(!fix.rho.int) proposal.sd.rho <- common.accceptrates2(accept[9:10], proposal.sd.rho, 40, 50, 0.5) if(!fix.rho.slo) proposal.sd.lambda <- common.accceptrates2(accept[11:12], proposal.sd.lambda, 40, 50, 0.5) accept <- rep(0,12) }else {} ################################ ## print progress to the console ################################ if(j %in% percentage.points & verbose) { setTxtProgressBar(progressBar, j/n.sample) } } ############################################ #### Return the results to the main function ############################################ #### Compile the results if(n.miss==0) samples.Y <- NA if(fix.rho.int) samples.rho <- NA if(fix.rho.slo) samples.lambda <- NA chain.results <- list(samples.beta=samples.beta, samples.alpha=samples.alpha, samples.phi=samples.phi, samples.delta=samples.delta, samples.tau2=samples.tau2, samples.rho=samples.rho, samples.lambda=samples.lambda, samples.loglike=samples.loglike, samples.fitted=samples.fitted, samples.Y=samples.Y, accept=accept) #### Return the results return(chain.results) }
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/poisson.CARlinearMCMC.R
poisson.CARlocalised <- function(formula, data=NULL, G, W, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.delta=NULL, prior.tau2=NULL, MALA=TRUE, verbose=TRUE) { ############################################## #### Format the arguments and check for errors ############################################## #### Verbose a <- common.verbose(verbose) #### Frame object frame.results <- common.frame.localised(formula, data, "poisson") N.all <- frame.results$n p <- frame.results$p X <- frame.results$X X.standardised <- frame.results$X.standardised X.sd <- frame.results$X.sd X.mean <- frame.results$X.mean X.indicator <- frame.results$X.indicator offset <- frame.results$offset Y <- frame.results$Y which.miss <- as.numeric(!is.na(Y)) n.miss <- N.all - sum(which.miss) if(n.miss>0) stop("the response has missing 'NA' values.", call.=FALSE) #### Determine the number of spatial and temporal units W.quants <- common.Wcheckformat.leroux(W) K <- W.quants$n N <- N.all / K offset.mat <- matrix(offset, nrow=K, ncol=N, byrow=FALSE) #### Check on MALA argument if(length(MALA)!=1) stop("MALA is not length 1.", call.=FALSE) if(!is.logical(MALA)) stop("MALA is not logical.", call.=FALSE) #### Format and check the number of clusters G if(length(G)!=1) stop("G is the wrong length.", call.=FALSE) if(!is.numeric(G)) stop("G is not numeric.", call.=FALSE) if(G<=1) stop("G is less than 2.", call.=FALSE) if(G!=round(G)) stop("G is not an integer.", call.=FALSE) if(floor(G/2)==ceiling(G/2)) { Gstar <- G/2 }else { Gstar <- (G+1)/2 } #### Priors if(!is.null(X.standardised)) { if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p) if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p) prior.beta.check(prior.mean.beta, prior.var.beta, p) }else {} if(is.null(prior.tau2)) prior.tau2 <- c(1, 0.01) prior.var.check(prior.tau2) if(is.null(prior.delta)) prior.delta <- 10 if(length(prior.delta)!=1) stop("the prior value for delta is the wrong length.", call.=FALSE) if(!is.numeric(prior.delta)) stop("the prior value for delta is not numeric.", call.=FALSE) if(sum(is.na(prior.delta))!=0) stop("the prior value for delta has missing values.", call.=FALSE) if(prior.delta<=0) stop("the prior value for delta is not positive.", call.=FALSE) #### MCMC quantities - burnin, n.sample, thin common.burnin.nsample.thin.check(burnin, n.sample, thin) ######################## #### Run the MCMC chains ######################## if(n.chains==1) { #### Only 1 chain results <- poisson.CARlocalisedMCMC(Y=Y, offset=offset, X.standardised=X.standardised, W=W, G=G, Gstar=Gstar, K=K, N=N, N.all=N.all, p=p, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.delta=prior.delta, prior.tau2=prior.tau2, verbose=verbose, chain=1) }else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores==1) { #### Multiple chains in series results <- as.list(rep(NA, n.chains)) for(i in 1:n.chains) { results[[i]] <- poisson.CARlocalisedMCMC(Y=Y, offset=offset, X.standardised=X.standardised, W=W, G=G, Gstar=Gstar, K=K, N=N, N.all=N.all, p=p, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.delta=prior.delta, prior.tau2=prior.tau2, verbose=verbose, chain=i) } }else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores>1 & ceiling(n.cores)==floor(n.cores)) { #### Multiple chains in parallel results <- as.list(rep(NA, n.chains)) if(verbose) { compclust <- makeCluster(n.cores, outfile="CARBayesSTprogress.txt") cat("The current progress of the model fitting algorithm has been output to CARBayesSTprogress.txt in the working directory") }else { compclust <- makeCluster(n.cores) } results <- clusterCall(compclust, fun=poisson.CARlocalisedMCMC, Y=Y, offset=offset, X.standardised=X.standardised, W=W, G=G, Gstar=Gstar, K=K, N=N, N.all=N.all, p=p, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.delta=prior.delta, prior.tau2=prior.tau2, verbose=verbose, chain="all") stopCluster(compclust) }else { stop("n.chains or n.cores are not positive integers.", call.=FALSE) } #### end timer if(verbose) { cat("\nSummarising results.\n") }else {} ################################### #### Summarise and save the results ################################### if(n.chains==1) { #### If n.chains==1 ## Compute the acceptance rates accept.lambda <- 100 * results$accept[1] / results$accept[2] accept.delta <- 100 * results$accept[3] / results$accept[4] accept.phi <- 100 * results$accept[5] / results$accept[6] accept.gamma <- 100 if(!is.null(X.standardised)) { accept.beta <- 100 * results$accept[7] / results$accept[8] accept.final <- c(accept.beta, accept.lambda, accept.delta, accept.phi, accept.gamma) names(accept.final) <- c("beta", "lambda", "delta", "phi", "rho.T") }else { accept.final <- c(accept.lambda, accept.delta, accept.phi, accept.gamma) names(accept.final) <- c("lambda", "delta", "phi", "rho.T") } ## Compute the fitted deviance mean.Z <- round(apply(results$samples.Z,2,mean), 0) mean.lambda <- apply(results$samples.lambda, 2, mean) mean.mu <- matrix(mean.lambda[mean.Z], nrow=K, ncol=N, byrow=FALSE) if(!is.null(X.standardised)) { mean.beta <- apply(results$samples.beta,2,mean) regression.mat <- matrix(X.standardised %*% mean.beta, nrow=K, ncol=N, byrow=FALSE) }else { regression.mat <- matrix(0, nrow=K, ncol=N, byrow=FALSE) } mean.phi <- matrix(apply(results$samples.phi, 2, mean), nrow=K, byrow=FALSE) fitted.mean <- as.numeric(exp(mean.mu + offset.mat + regression.mat + mean.phi)) deviance.fitted <- -2 * sum(dpois(x=as.numeric(Y), lambda=fitted.mean, log=TRUE)) modelfit <- common.modelfit(results$samples.loglike, deviance.fitted) ## Create the fitted values and residuals fitted.values <- apply(results$samples.fitted, 2, mean) response.residuals <- as.numeric(Y) - fitted.values pearson.residuals <- response.residuals /sqrt(fitted.values) residuals <- data.frame(response=response.residuals, pearson=pearson.residuals) ## Transform the parameters back to the origianl covariate scale. if(!is.null(X.standardised)) { samples.beta.orig <- common.betatransform(results$samples.beta, X.indicator, X.mean, X.sd, p, FALSE) }else { samples.beta.orig = NA } ## Create the samples object samples <- list(beta=mcmc(samples.beta.orig), lambda=mcmc(results$samples.lambda), Z=mcmc(results$samples.Z), delta=mcmc(results$samples.delta), phi = mcmc(results$samples.phi), tau2=mcmc(results$samples.tau2), rho.T=mcmc(results$samples.gamma), fitted=mcmc(results$samples.fitted)) ## Create a summary object n.keep <- floor((n.sample - burnin)/thin) summary.hyper <- array(NA, c(3, 7)) summary.hyper[1,1:3] <- c(mean(results$samples.delta), quantile(results$samples.delta, c(0.025, 0.975))) summary.hyper[2,1:3] <- c(mean(results$samples.tau2), quantile(results$samples.tau2, c(0.025, 0.975))) summary.hyper[3,1:3] <- c(mean(results$samples.gamma), quantile(results$samples.gamma, c(0.025, 0.975))) rownames(summary.hyper) <- c("delta", "tau2", "rho.T") summary.hyper[1, 4:7] <- c(n.keep, accept.delta, effectiveSize(mcmc(results$samples.delta)), geweke.diag(mcmc(results$samples.delta))$z) summary.hyper[2, 4:7] <- c(n.keep, 100, effectiveSize(mcmc(results$samples.tau2)), geweke.diag(mcmc(results$samples.tau2))$z) summary.hyper[3, 4:7] <- c(n.keep, 100, effectiveSize(mcmc(results$samples.gamma)), geweke.diag(mcmc(results$samples.gamma))$z) summary.lambda <- t(rbind(apply(results$samples.lambda, 2, mean), apply(results$samples.lambda, 2, quantile, c(0.025, 0.975)))) summary.lambda <- cbind(summary.lambda, rep(n.keep, G), rep(accept.lambda, G), effectiveSize(mcmc(results$samples.lambda)), geweke.diag(mcmc(results$samples.lambda))$z) summary.lambda <- matrix(summary.lambda, ncol=7) rownames(summary.lambda) <- paste("lambda", 1:G, sep="") if(!is.null(X.standardised)) { samples.beta.orig <- mcmc(samples.beta.orig) summary.beta <- t(rbind(apply(samples.beta.orig, 2, mean), apply(samples.beta.orig, 2, quantile, c(0.025, 0.975)))) summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.beta,p), effectiveSize(samples.beta.orig), geweke.diag(samples.beta.orig)$z) rownames(summary.beta) <- colnames(X) colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag") summary.results <- rbind(summary.beta, summary.lambda, summary.hyper) }else { summary.results <- rbind(summary.lambda, summary.hyper) } summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4) summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1) colnames(summary.results) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag") }else { #### If n.chains > 1 ## Compute the acceptance rates accept.temp <- lapply(results, function(l) l[["accept"]]) accept.temp2 <- do.call(what=rbind, args=accept.temp) accept.lambda <- 100 * sum(accept.temp2[ ,1]) / sum(accept.temp2[ ,2]) accept.delta <- 100 * sum(accept.temp2[ ,3]) / sum(accept.temp2[ ,4]) accept.phi <- 100 * sum(accept.temp2[ ,5]) / sum(accept.temp2[ ,6]) accept.gamma <- 100 if(!is.null(X.standardised)) { accept.beta <- 100 * sum(accept.temp2[ ,7]) / sum(accept.temp2[ ,8]) accept.final <- c(accept.beta, accept.lambda, accept.delta, accept.phi, accept.gamma) names(accept.final) <- c("beta", "lambda", "delta", "phi", "rho.T") }else { accept.final <- c(accept.lambda, accept.delta, accept.phi, accept.gamma) names(accept.final) <- c("lambda", "delta", "phi", "rho.T") } ## Extract the samples into separate matrix and list objects if(!is.null(X.standardised)) { samples.beta.list <- lapply(results, function(l) l[["samples.beta"]]) samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list) }else {} samples.phi.list <- lapply(results, function(l) l[["samples.phi"]]) samples.phi.matrix <- do.call(what=rbind, args=samples.phi.list) samples.Z.list <- lapply(results, function(l) l[["samples.Z"]]) samples.Z.matrix <- do.call(what=rbind, args=samples.Z.list) samples.gamma.list <- lapply(results, function(l) l[["samples.gamma"]]) samples.gamma.matrix <- do.call(what=rbind, args=samples.gamma.list) samples.delta.list <- lapply(results, function(l) l[["samples.delta"]]) samples.delta.matrix <- do.call(what=rbind, args=samples.delta.list) samples.lambda.list <- lapply(results, function(l) l[["samples.lambda"]]) samples.lambda.matrix <- do.call(what=rbind, args=samples.lambda.list) samples.tau2.list <- lapply(results, function(l) l[["samples.tau2"]]) samples.tau2.matrix <- do.call(what=rbind, args=samples.tau2.list) samples.loglike.list <- lapply(results, function(l) l[["samples.loglike"]]) samples.loglike.matrix <- do.call(what=rbind, args=samples.loglike.list) samples.fitted.list <- lapply(results, function(l) l[["samples.fitted"]]) samples.fitted.matrix <- do.call(what=rbind, args=samples.fitted.list) ## Compute the fitted deviance mean.Z <- round(apply(samples.Z.matrix,2,mean), 0) mean.lambda <- apply(samples.lambda.matrix, 2, mean) mean.mu <- matrix(mean.lambda[mean.Z], nrow=K, ncol=N, byrow=FALSE) if(!is.null(X.standardised)) { mean.beta <- apply(samples.beta.matrix,2,mean) regression.mat <- matrix(X.standardised %*% mean.beta, nrow=K, ncol=N, byrow=FALSE) }else { regression.mat <- matrix(0, nrow=K, ncol=N, byrow=FALSE) } mean.phi <- matrix(apply(samples.phi.matrix, 2, mean), nrow=K, byrow=FALSE) fitted.mean <- as.numeric(exp(mean.mu + offset.mat + regression.mat + mean.phi)) deviance.fitted <- -2 * sum(dpois(x=as.numeric(Y), lambda=fitted.mean, log=TRUE)) modelfit <- common.modelfit(samples.loglike.matrix, deviance.fitted) ## Create the fitted values and residuals fitted.values <- apply(samples.fitted.matrix, 2, mean) response.residuals <- as.numeric(Y) - fitted.values pearson.residuals <- response.residuals /sqrt(fitted.values) residuals <- data.frame(response=response.residuals, pearson=pearson.residuals) ## Transform the parameters back to the original covariate scale. if(!is.null(X.standardised)) { samples.beta.list <- samples.beta.list for(j in 1:n.chains) { samples.beta.list[[j]] <- common.betatransform(samples.beta.list[[j]], X.indicator, X.mean, X.sd, p, FALSE) } samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list) beta.mcmc <- mcmc.list(lapply(samples.beta.list, mcmc)) }else { beta.mcmc = mcmc(NA) } ## Create MCMC objects phi.mcmc <- mcmc.list(lapply(samples.phi.list, mcmc)) fitted.mcmc <- mcmc.list(lapply(samples.fitted.list, mcmc)) tau2.mcmc <- mcmc.list(lapply(samples.tau2.list, mcmc)) Z.mcmc <- mcmc.list(lapply(samples.Z.list, mcmc)) gamma.mcmc <- mcmc.list(lapply(samples.gamma.list, mcmc)) delta.mcmc <- mcmc.list(lapply(samples.delta.list, mcmc)) lambda.mcmc <- mcmc.list(lapply(samples.lambda.list, mcmc)) samples <- list(beta=beta.mcmc, phi=phi.mcmc, Z=Z.mcmc, rho.T=gamma.mcmc, lambda=lambda.mcmc, delta=delta.mcmc, tau2=tau2.mcmc, fitted=fitted.mcmc) ## create a summary object n.keep <- floor((n.sample - burnin)/thin) * n.chains summary.hyper <- array(NA, c(3, 7)) summary.hyper[1,1:3] <- c(mean(samples.delta.matrix), quantile(samples.delta.matrix, c(0.025, 0.975))) summary.hyper[2,1:3] <- c(mean(samples.tau2.matrix), quantile(samples.tau2.matrix, c(0.025, 0.975))) summary.hyper[3,1:3] <- c(mean(samples.gamma.matrix), quantile(samples.gamma.matrix, c(0.025, 0.975))) rownames(summary.hyper) <- c("delta", "tau2", "rho.T") summary.hyper[1, 4:7] <- c(n.keep, accept.delta, effectiveSize(delta.mcmc), gelman.diag(delta.mcmc)$psrf[ ,2]) summary.hyper[2, 4:7] <- c(n.keep, 100, effectiveSize(tau2.mcmc), gelman.diag(tau2.mcmc)$psrf[ ,2]) summary.hyper[3, 4:7] <- c(n.keep, 100, effectiveSize(gamma.mcmc), gelman.diag(gamma.mcmc)$psrf[ ,2]) summary.lambda <- t(rbind(apply(samples.lambda.matrix, 2, mean), apply(samples.lambda.matrix, 2, quantile, c(0.025, 0.975)))) summary.lambda <- cbind(summary.lambda, rep(n.keep, G), rep(accept.lambda, G), effectiveSize(lambda.mcmc), gelman.diag(lambda.mcmc)$psrf[ ,2]) summary.lambda <- matrix(summary.lambda, ncol=7) rownames(summary.lambda) <- paste("lambda", 1:G, sep="") if(!is.null(X.standardised)) { summary.beta <- t(rbind(apply(samples.beta.matrix, 2, mean), apply(samples.beta.matrix, 2, quantile, c(0.025, 0.975)))) summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.final[names(accept.final)=="beta"],p), effectiveSize(beta.mcmc), gelman.diag(beta.mcmc)$psrf[ ,2]) rownames(summary.beta) <- colnames(X) colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "PSRF (upper 95% CI)") summary.results <- rbind(summary.beta, summary.lambda, summary.hyper) }else { summary.results <- rbind(summary.lambda, summary.hyper) } summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4) summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1) colnames(summary.results) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "PSRF (upper 95% CI)") } ################################### #### Compile and return the results ################################### model.string <- c("Likelihood model - Poisson (log link function)", "\nLatent structure model - Localised autoregressive order 1 CAR model\n") n.total <- floor((n.sample - burnin) / thin) * n.chains mcmc.info <- c(n.total, n.sample, burnin, thin, n.chains) names(mcmc.info) <- c("Total samples", "n.sample", "burnin", "thin", "n.chains") results.final <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=mean.Z, formula=formula, model=model.string, mcmc.info=mcmc.info, X=X) class(results.final) <- "CARBayesST" if(verbose) { b<-proc.time() cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n") }else {} return(results.final) }
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/poisson.CARlocalised.R
poisson.CARlocalisedMCMC <- function(Y, offset, X.standardised, W, G, Gstar, K, N, N.all, p, burnin, n.sample, thin, MALA, prior.mean.beta, prior.var.beta, prior.delta, prior.tau2, verbose, chain) { #Rcpp::sourceCpp("src/CARBayesST.cpp") #source("R/common.functions.R") #library(spdep) #library(truncnorm) # # ############################################ #### Set up the key elements before sampling ############################################ #### Compute the blocking structure for beta if(!is.null(X.standardised)) { ## Compute the blocking structure for beta block.temp <- common.betablock(p) beta.beg <- block.temp[[1]] beta.fin <- block.temp[[2]] n.beta.block <- block.temp[[3]] list.block <- as.list(rep(NA, n.beta.block*2)) for(r in 1:n.beta.block) { list.block[[r]] <- beta.beg[r]:beta.fin[r]-1 list.block[[r+n.beta.block]] <- length(list.block[[r]]) } }else {} #### Compute a starting value for beta if(!is.null(X.standardised)) { mod.glm <- glm(Y~X.standardised-1, offset=offset, family="quasipoisson") beta.mean <- mod.glm$coefficients beta.sd <- sqrt(diag(summary(mod.glm)$cov.scaled)) beta <- rnorm(n=length(beta.mean), mean=beta.mean, sd=beta.sd) regression.vec <- X.standardised %*% beta }else { regression.vec <- rep(0, N.all) } #### Generate the initial parameter values log.Y <- log(Y) log.Y[Y==0] <- -0.1 res.temp <- log.Y - regression.vec - offset clust <- kmeans(res.temp,G) lambda <- clust$centers[order(clust$centers)] lambda.mat <- matrix(rep(lambda, N), nrow=N, byrow=TRUE) Z <- rep(1, N.all) for(j in 2:G) { Z[clust$cluster==order(clust$centers)[j]] <- j } Z.mat <- matrix(Z, nrow=K, ncol=N, byrow=FALSE) mu <- matrix(lambda[Z], nrow=K, ncol=N, byrow=FALSE) res.sd <- sd(res.temp, na.rm=TRUE)/5 phi.mat <- matrix(rnorm(n=N.all, mean=0, sd = res.sd), nrow=K, byrow=FALSE) phi <- as.numeric(phi.mat) tau2 <- var(phi)/10 gamma <- runif(1) delta <- runif(1,1, min(2, prior.delta)) #### Specify matrix quantities Y.mat <- matrix(Y, nrow=K, ncol=N, byrow=FALSE) offset.mat <- matrix(offset, nrow=K, ncol=N, byrow=FALSE) regression.mat <- matrix(regression.vec, nrow=K, ncol=N, byrow=FALSE) #### Matrices to store samples n.keep <- floor((n.sample - burnin)/thin) samples.Z <- array(NA, c(n.keep, N.all)) samples.lambda <- array(NA, c(n.keep, G)) samples.delta <- array(NA, c(n.keep, 1)) samples.tau2 <- array(NA, c(n.keep, 1)) samples.gamma <- array(NA, c(n.keep, 1)) samples.phi <- array(NA, c(n.keep, N.all)) samples.fitted <- array(NA, c(n.keep, N.all)) samples.loglike <- array(NA, c(n.keep, N.all)) #### Specify the Metropolis quantities if(!is.null(X.standardised)) { samples.beta <- array(NA, c(n.keep, p)) accept <- rep(0,8) proposal.corr.beta <- solve(t(X.standardised) %*% X.standardised) chol.proposal.corr.beta <- chol(proposal.corr.beta) proposal.sd.beta <- 0.01 }else { accept <- rep(0,6) } proposal.sd.lambda <- 0.1 proposal.sd.delta <- 0.1 proposal.sd.phi <- 0.1 Y.extend <- matrix(rep(Y, G), byrow=F, ncol=G) delta.update <- matrix(rep(1:G, N.all-K), ncol=G, byrow=T) tau2.posterior.shape <- prior.tau2[1] + N * (K-1) /2 #### CAR quantities W.quants <- common.Wcheckformat.leroux(W) W <- W.quants$W W.triplet <- W.quants$W.triplet W.n.triplet <- W.quants$n.triplet W.triplet.sum <- W.quants$W.triplet.sum n.neighbours <- W.quants$n.neighbours W.begfin <- W.quants$W.begfin #### Start timer if(verbose) { cat("\nMarkov chain", chain, "- generating", n.keep, "post burnin and thinned samples.\n", sep = " ") progressBar <- txtProgressBar(style = 3) percentage.points<-round((1:100/100)*n.sample) }else { percentage.points<-round((1:100/100)*n.sample) } ############################## #### Generate the MCMC samples ############################## #### Create the MCMC samples for(j in 1:n.sample) { #################### ## Sample from beta #################### if(!is.null(X.standardised)) { offset.temp <- offset + as.numeric(mu) + as.numeric(phi.mat) if(MALA) { temp <- poissonbetaupdateMALA(X.standardised, N.all, p, beta, offset.temp, Y, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block) }else { temp <- poissonbetaupdateRW(X.standardised, N.all, p, beta, offset.temp, Y, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block) } beta <- temp[[1]] accept[7] <- accept[7] + temp[[2]] accept[8] <- accept[8] + n.beta.block regression.vec <- X.standardised %*% beta regression.mat <- matrix(regression.vec, nrow=K, ncol=N, byrow=FALSE) }else {} ####################### #### Sample from lambda ####################### #### Propose a new value proposal.extend <- c(-100, lambda, 100) for(r in 1:G) { proposal.extend[(r+1)] <- rtruncnorm(n=1, a=proposal.extend[r], b=proposal.extend[(r+2)], mean=proposal.extend[(r+1)], sd=proposal.sd.lambda) } proposal <- proposal.extend[-c(1, (G+2))] #### Compute the data likelihood lp.current <- lambda[Z] + offset + as.numeric(regression.mat) + as.numeric(phi.mat) lp.proposal <- proposal[Z] + offset + as.numeric(regression.mat) + as.numeric(phi.mat) like.current <- Y * lp.current - exp(lp.current) like.proposal <- Y * lp.proposal - exp(lp.proposal) prob <- exp(sum(like.proposal - like.current)) if(prob > runif(1)) { lambda <- proposal lambda.mat <- matrix(rep(lambda, N), nrow=N, byrow=TRUE) mu <- matrix(lambda[Z], nrow=K, ncol=N, byrow=FALSE) accept[1] <- accept[1] + 1 }else {} accept[2] <- accept[2] + 1 ################## #### Sample from Z ################## prior.offset <- rep(NA, G) for(r in 1:G) { prior.offset[r] <- log(sum(exp(-delta * ((1:G - r)^2 + (1:G - Gstar)^2)))) } mu.offset <- exp(offset.mat + regression.mat + phi.mat) test <- Zupdatesqpoi(Z=Z.mat, Offset=mu.offset, Y=Y.mat, delta=delta, lambda=lambda, nsites=K, ntime=N, G=G, SS=1:G, prioroffset=prior.offset, Gstar=Gstar) Z.mat <- test Z <- as.numeric(Z.mat) mu <- matrix(lambda[Z], nrow=K, ncol=N, byrow=FALSE) ###################### #### Sample from delta ###################### proposal.delta <- rtruncnorm(n=1, a=1, b=prior.delta, mean=delta, sd=proposal.sd.delta) sum.delta1 <- sum((Z - Gstar)^2) sum.delta2 <- sum((Z.mat[ ,-1] - Z.mat[ ,-N])^2) current.fc1 <- -delta * (sum.delta1 + sum.delta2) - K * log(sum(exp(-delta * (1:G - Gstar)^2))) proposal.fc1 <- -proposal.delta * (sum.delta1 + sum.delta2) - K * log(sum(exp(-proposal.delta * (1:G - Gstar)^2))) Z.temp <- matrix(rep(as.numeric(Z.mat[ ,-N]),G), ncol=G, byrow=FALSE) Z.temp2 <- (delta.update - Z.temp)^2 + (delta.update - Gstar)^2 current.fc <- current.fc1 - sum(log(apply(exp(-delta * Z.temp2),1,sum))) proposal.fc <- proposal.fc1 - sum(log(apply(exp(-proposal.delta * Z.temp2),1,sum))) hastings <- log(dtruncnorm(x=delta, a=1, b=prior.delta, mean=proposal.delta, sd=proposal.sd.delta)) - log(dtruncnorm(x=proposal.delta, a=1, b=prior.delta, mean=delta, sd=proposal.sd.delta)) prob <- exp(proposal.fc - current.fc + hastings) if(prob > runif(1)) { delta <- proposal.delta accept[3] <- accept[3] + 1 }else {} accept[4] <- accept[4] + 1 #################### #### Sample from phi #################### phi.offset <- mu + offset.mat + regression.mat temp1 <- poissonar1carupdateRW(W.triplet, W.begfin, W.triplet.sum, K, N, phi.mat, tau2, gamma, 1, Y.mat, proposal.sd.phi, phi.offset, W.triplet.sum) phi.temp <- temp1[[1]] phi <- as.numeric(phi.temp) for(i in 1:G) { phi[which(Z==i)] <- phi[which(Z==i)] - mean(phi[which(Z==i)]) } phi.mat <- matrix(phi, nrow=K, ncol=N, byrow=FALSE) accept[5] <- accept[5] + temp1[[2]] accept[6] <- accept[6] + K*N #################### ## Sample from gamma #################### temp2 <- gammaquadformcompute(W.triplet, W.triplet.sum, W.n.triplet, K, N, phi.mat, 1) mean.gamma <- temp2[[1]] / temp2[[2]] sd.gamma <- sqrt(tau2 / temp2[[2]]) gamma <- rtruncnorm(n=1, a=0, b=1, mean=mean.gamma, sd=sd.gamma) #################### ## Samples from tau2 #################### temp3 <- tauquadformcompute(W.triplet, W.triplet.sum, W.n.triplet, K, N, phi.mat, 1, gamma) tau2.posterior.scale <- temp3 + prior.tau2[2] tau2 <- 1 / rgamma(1, tau2.posterior.shape, scale=(1/tau2.posterior.scale)) ######################### ## Calculate the deviance ######################### lp <- as.numeric(mu + offset.mat + regression.mat + phi.mat) fitted <- exp(lp) loglike <- dpois(x=as.numeric(Y), lambda=fitted, log=TRUE) ################### ## Save the results ################### if(j > burnin & (j-burnin)%%thin==0) { ele <- (j - burnin) / thin samples.delta[ele, ] <- delta samples.lambda[ele, ] <- lambda samples.Z[ele, ] <- Z samples.phi[ele, ] <- as.numeric(phi.mat) samples.tau2[ele, ] <- tau2 samples.gamma[ele, ] <- gamma samples.fitted[ele, ] <- fitted samples.loglike[ele, ] <- loglike if(!is.null(X.standardised)) samples.beta[ele, ] <- beta }else {} ######################################## ## Self tune the acceptance probabilties ######################################## if(ceiling(j/100)==floor(j/100) & j < burnin) { if(!is.null(X.standardised)) { if(p>2) { proposal.sd.beta <- common.accceptrates1(accept[7:8], proposal.sd.beta, 40, 50) }else { proposal.sd.beta <- common.accceptrates1(accept[7:8], proposal.sd.beta, 30, 40) } proposal.sd.phi <- common.accceptrates1(accept[5:6], proposal.sd.phi, 40, 50) proposal.sd.lambda <- common.accceptrates2(accept[1:2], proposal.sd.lambda, 20, 40, 10) proposal.sd.delta <- common.accceptrates2(accept[3:4], proposal.sd.delta, 40, 50, prior.delta/6) accept <- rep(0,8) }else { proposal.sd.phi <- common.accceptrates1(accept[5:6], proposal.sd.phi, 40, 50) proposal.sd.lambda <- common.accceptrates2(accept[1:2], proposal.sd.lambda, 20, 40, 10) proposal.sd.delta <- common.accceptrates2(accept[3:4], proposal.sd.delta, 40, 50, prior.delta/6) accept <- rep(0,6) } }else {} ################################ ## print progress to the console ################################ if(j %in% percentage.points & verbose) { setTxtProgressBar(progressBar, j/n.sample) } } ############################################ #### Return the results to the main function ############################################ #### Compile the results if(is.null(X.standardised)) samples.beta <- NA chain.results <- list(samples.beta=samples.beta, samples.phi=samples.phi, samples.Z=samples.Z, samples.lambda=samples.lambda, samples.tau2=samples.tau2, samples.delta=samples.delta, samples.gamma=samples.gamma, samples.loglike=samples.loglike, samples.fitted=samples.fitted, accept=accept) #### Return the results return(chain.results) }
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/poisson.CARlocalisedMCMC.R
poisson.CARsepspatial <- function(formula, data=NULL, W, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.tau2=NULL, rho.S=NULL, rho.T=NULL, MALA=TRUE, verbose=TRUE) { ############################################## #### Format the arguments and check for errors ############################################## #### Verbose a <- common.verbose(verbose) #### Frame object frame.results <- common.frame(formula, data, "poisson") N.all <- frame.results$n p <- frame.results$p X <- frame.results$X X.standardised <- frame.results$X.standardised X.sd <- frame.results$X.sd X.mean <- frame.results$X.mean X.indicator <- frame.results$X.indicator offset <- frame.results$offset Y <- frame.results$Y n.miss <- frame.results$n.miss if(n.miss>0) stop("the response has missing 'NA' values.", call.=FALSE) #### Determine the number of spatial and temporal units W.quants <- common.Wcheckformat.leroux(W) K <- W.quants$n N <- N.all / K offset.mat <- matrix(offset, nrow=K, ncol=N, byrow=FALSE) #### Check on MALA argument if(length(MALA)!=1) stop("MALA is not length 1.", call.=FALSE) if(!is.logical(MALA)) stop("MALA is not logical.", call.=FALSE) #### Check on the rho arguments if(is.null(rho.S)) { rho <- runif(1) fix.rho.S <- FALSE }else { rho <- rho.S fix.rho.S <- TRUE } if(!is.numeric(rho)) stop("rho.S is fixed but is not numeric.", call.=FALSE) if(rho<0 ) stop("rho.S is outside the range [0, 1].", call.=FALSE) if(rho>1 ) stop("rho.S is outside the range [0, 1].", call.=FALSE) if(is.null(rho.T)) { lambda <- runif(1) fix.rho.T <- FALSE }else { lambda <- rho.T fix.rho.T <- TRUE } if(!is.numeric(lambda)) stop("rho.T is fixed but is not numeric.", call.=FALSE) if(lambda<0 ) stop("rho.T is outside the range [0, 1].", call.=FALSE) if(lambda>1 ) stop("rho.T is outside the range [0, 1].", call.=FALSE) #### Priors if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p) if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p) if(is.null(prior.tau2)) prior.tau2 <- c(1, 0.01) prior.beta.check(prior.mean.beta, prior.var.beta, p) prior.var.check(prior.tau2) #### Compute the blocking structure for beta block.temp <- common.betablock(p) beta.beg <- block.temp[[1]] beta.fin <- block.temp[[2]] n.beta.block <- block.temp[[3]] list.block <- as.list(rep(NA, n.beta.block*2)) for(r in 1:n.beta.block) { list.block[[r]] <- beta.beg[r]:beta.fin[r]-1 list.block[[r+n.beta.block]] <- length(list.block[[r]]) } #### MCMC quantities - burnin, n.sample, thin common.burnin.nsample.thin.check(burnin, n.sample, thin) ######################## #### Run the MCMC chains ######################## if(n.chains==1) { #### Only 1 chain results <- poisson.CARsepspatialMCMC(Y=Y, offset=offset, X.standardised=X.standardised, W=W, rho=rho, lambda=lambda, fix.rho.S=fix.rho.S, fix.rho.T=fix.rho.T, K=K, N=N, N.all=N.all, p=p, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, verbose=verbose, chain=1) }else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores==1) { #### Multiple chains in series results <- as.list(rep(NA, n.chains)) for(i in 1:n.chains) { results[[i]] <- poisson.CARsepspatialMCMC(Y=Y, offset=offset, X.standardised=X.standardised, W=W, rho=rho, lambda=lambda, fix.rho.S=fix.rho.S, fix.rho.T=fix.rho.T, K=K, N=N, N.all=N.all, p=p, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, verbose=verbose, chain=i) } }else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores>1 & ceiling(n.cores)==floor(n.cores)) { #### Multiple chains in parallel results <- as.list(rep(NA, n.chains)) if(verbose) { compclust <- makeCluster(n.cores, outfile="CARBayesSTprogress.txt") cat("The current progress of the model fitting algorithm has been output to CARBayesSTprogress.txt in the working directory") }else { compclust <- makeCluster(n.cores) } results <- clusterCall(compclust, fun=poisson.CARsepspatialMCMC, Y=Y, offset=offset, X.standardised=X.standardised, W=W, rho=rho, lambda=lambda, fix.rho.S=fix.rho.S, fix.rho.T=fix.rho.T, K=K, N=N, N.all=N.all, p=p, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.tau2=prior.tau2, verbose=verbose, chain="all") stopCluster(compclust) }else { stop("n.chains or n.cores are not positive integers.", call.=FALSE) } #### end timer if(verbose) { cat("\nSummarising results.\n") }else {} ################################### #### Summarise and save the results ################################### if(n.chains==1) { #### If n.chains==1 ## Compute the acceptance rates accept.final <- rep(NA, 5) names(accept.final) <- c("beta", "phi", "delta", "rho.S", "rho.T") accept.final[1] <- 100 * results$accept[1] / results$accept[2] accept.final[2] <- 100 * results$accept[3] / results$accept[4] accept.final[3] <- 100 * results$accept[7] / results$accept[8] if(!fix.rho.S) accept.final[4] <- 100 * results$accept[5] / results$accept[6] if(!fix.rho.T) accept.final[5] <- 100 * results$accept[9] / results$accept[10] ## Compute the fitted deviance mean.beta <- apply(results$samples.beta,2,mean) regression.mat <- matrix(X.standardised %*% mean.beta, nrow=K, ncol=N, byrow=FALSE) mean.phi <- matrix(apply(results$samples.phi, 2, mean), nrow=K, ncol=N) mean.delta <- apply(results$samples.delta,2,mean) delta.mat <- matrix(mean.delta, nrow=K, ncol=N, byrow=TRUE) fitted.mean <- as.numeric(exp(offset.mat + mean.phi + regression.mat + delta.mat)) deviance.fitted <- -2 * sum(dpois(x=as.numeric(Y), lambda=fitted.mean, log=TRUE), na.rm=TRUE) modelfit <- common.modelfit(results$samples.loglike, deviance.fitted) ## Create the fitted values and residuals fitted.values <- apply(results$samples.fitted, 2, mean) response.residuals <- as.numeric(Y) - fitted.values pearson.residuals <- response.residuals /sqrt(fitted.values) residuals <- data.frame(response=response.residuals, pearson=pearson.residuals) ## Transform the parameters back to the origianl covariate scale. samples.beta.orig <- common.betatransform(results$samples.beta, X.indicator, X.mean, X.sd, p, FALSE) ## Create the samples object if(fix.rho.S & fix.rho.T) { samples.rhoext <- NA }else if(fix.rho.S & !fix.rho.T) { samples.rhoext <- results$samples.lambda names(samples.rhoext) <- "rho.T" }else if(!fix.rho.S & fix.rho.T) { samples.rhoext <- results$samples.rho names(samples.rhoext) <- "rho.S" }else { samples.rhoext <- cbind(results$samples.rho, results$samples.lambda) colnames(samples.rhoext) <- c("rho.S", "rho.T") } samples <- list(beta=mcmc(samples.beta.orig), phi=mcmc(results$samples.phi), delta=mcmc(results$samples.delta), tau2=mcmc(results$samples.tau2), tau2.T=mcmc(results$samples.sig2), rho=mcmc(samples.rhoext), fitted=mcmc(results$samples.fitted)) ## Create a summary object n.keep <- floor((n.sample - burnin)/thin) summary.beta <- t(rbind(apply(samples$beta, 2, mean), apply(samples$beta, 2, quantile, c(0.025, 0.975)))) summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.final[names(accept.final)=="beta"],p), effectiveSize(samples$beta), geweke.diag(samples$beta)$z) rownames(summary.beta) <- colnames(X) colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag") summary.hyper <- array(NA, c(3 + N, 7)) for (tt in 1:N) { summary.hyper[tt,1:3] <- c(mean(results$samples.tau2[, tt]), quantile(results$samples.tau2[, tt], c(0.025, 0.975))) summary.hyper[tt, 4:7] <- c(n.keep, 100, effectiveSize(mcmc(results$samples.tau2[, tt])), geweke.diag(mcmc(results$samples.tau2[, tt]))$z) } summary.hyper[N+1,1:3] <- c(mean(results$samples.sig2), quantile(results$samples.sig2, c(0.025, 0.975))) summary.hyper[N+1, 4:7] <- c(n.keep, 100, effectiveSize(mcmc(results$samples.sig2)), geweke.diag(mcmc(results$samples.sig2))$z) if(!fix.rho.S) { summary.hyper[N+2, 1:3] <- c(mean(results$samples.rho), quantile(results$samples.rho, c(0.025, 0.975))) summary.hyper[N+2, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.S"], effectiveSize(results$samples.rho), geweke.diag(results$samples.rho)$z) }else { summary.hyper[N+2, 1:3] <- c(rho, rho, rho) summary.hyper[N+2, 4:7] <- rep(NA, 4) } if(!fix.rho.T) { summary.hyper[N+3, 1:3] <- c(mean(results$samples.lambda), quantile(results$samples.lambda, c(0.025, 0.975))) summary.hyper[N+3, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.T"], effectiveSize(results$samples.lambda), geweke.diag(results$samples.lambda)$z) }else { summary.hyper[N+3, 1:3] <- c(lambda, lambda, lambda) summary.hyper[N+3, 4:7] <- rep(NA, 4) } rownames(summary.hyper) <- c(paste("tau2.", c(1:N), sep = ""), "tau2.T", "rho.S","rho.T") summary.results <- rbind(summary.beta, summary.hyper) summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4) summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1) }else { #### If n.chains > 1 ## Compute the acceptance rates accept.final <- rep(NA, 5) names(accept.final) <- c("beta", "phi", "delta", "rho.S", "rho.T") accept.temp <- lapply(results, function(l) l[["accept"]]) accept.temp2 <- do.call(what=rbind, args=accept.temp) accept.final[1] <- 100 * sum(accept.temp2[ ,1]) / sum(accept.temp2[ ,2]) accept.final[2] <- 100 * sum(accept.temp2[ ,3]) / sum(accept.temp2[ ,4]) accept.final[3] <- 100 * sum(accept.temp2[ ,7]) / sum(accept.temp2[ ,8]) if(!fix.rho.S) accept.final[4] <- 100 * sum(accept.temp2[ ,5]) / sum(accept.temp2[ ,6]) if(!fix.rho.T) accept.final[5] <- 100 * sum(accept.temp2[ ,9]) / sum(accept.temp2[ ,10]) ## Extract the samples into separate matrix and list objects samples.beta.list <- lapply(results, function(l) l[["samples.beta"]]) samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list) samples.phi.list <- lapply(results, function(l) l[["samples.phi"]]) samples.phi.matrix <- do.call(what=rbind, args=samples.phi.list) samples.delta.list <- lapply(results, function(l) l[["samples.delta"]]) samples.delta.matrix <- do.call(what=rbind, args=samples.delta.list) if(!fix.rho.S) { samples.rho.list <- lapply(results, function(l) l[["samples.rho"]]) samples.rho.matrix <- do.call(what=rbind, args=samples.rho.list) } if(!fix.rho.T) { samples.lambda.list <- lapply(results, function(l) l[["samples.lambda"]]) samples.lambda.matrix <- do.call(what=rbind, args=samples.lambda.list) } samples.tau2.list <- lapply(results, function(l) l[["samples.tau2"]]) samples.tau2.matrix <- do.call(what=rbind, args=samples.tau2.list) samples.sig2.list <- lapply(results, function(l) l[["samples.sig2"]]) samples.sig2.matrix <- do.call(what=rbind, args=samples.sig2.list) samples.loglike.list <- lapply(results, function(l) l[["samples.loglike"]]) samples.loglike.matrix <- do.call(what=rbind, args=samples.loglike.list) samples.fitted.list <- lapply(results, function(l) l[["samples.fitted"]]) samples.fitted.matrix <- do.call(what=rbind, args=samples.fitted.list) ## Compute the fitted deviance mean.beta <- apply(samples.beta.matrix,2,mean) regression.mat <- matrix(X.standardised %*% mean.beta, nrow=K, ncol=N, byrow=FALSE) mean.phi <- matrix(apply(samples.phi.matrix, 2, mean), nrow=K, ncol=N) mean.delta <- apply(samples.delta.matrix,2,mean) delta.mat <- matrix(mean.delta, nrow=K, ncol=N, byrow=TRUE) fitted.mean <- as.numeric(exp(offset.mat + mean.phi + regression.mat + delta.mat)) deviance.fitted <- -2 * sum(dpois(x=as.numeric(Y), lambda=fitted.mean, log=TRUE), na.rm=TRUE) modelfit <- common.modelfit(samples.loglike.matrix, deviance.fitted) ## Create the fitted values and residuals fitted.values <- apply(samples.fitted.matrix, 2, mean) response.residuals <- as.numeric(Y) - fitted.values pearson.residuals <- response.residuals /sqrt(fitted.values) residuals <- data.frame(response=response.residuals, pearson=pearson.residuals) ## Transform the parameters back to the original covariate scale. samples.beta.list <- samples.beta.list for(j in 1:n.chains) { samples.beta.list[[j]] <- common.betatransform(samples.beta.list[[j]], X.indicator, X.mean, X.sd, p, FALSE) } samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list) ## Create MCMC objects beta.mcmc <- mcmc.list(lapply(samples.beta.list, mcmc)) phi.mcmc <- mcmc.list(lapply(samples.phi.list, mcmc)) delta.mcmc <- mcmc.list(lapply(samples.delta.list, mcmc)) fitted.mcmc <- mcmc.list(lapply(samples.fitted.list, mcmc)) tau2.mcmc <- mcmc.list(lapply(samples.tau2.list, mcmc)) sig2.mcmc <- mcmc.list(lapply(samples.sig2.list, mcmc)) if(fix.rho.S & fix.rho.T) { rhoext.mcmc <- NA }else if(fix.rho.S & !fix.rho.T) { for(j in 1:n.chains) { colnames(samples.lambda.list[[j]]) <- c("rho.T") } rhoext.mcmc <- mcmc.list(lapply(samples.lambda.list, mcmc)) }else if(!fix.rho.S & fix.rho.T) { for(j in 1:n.chains) { colnames(samples.rho.list[[j]]) <- c("rho.S") } rhoext.mcmc <- mcmc.list(lapply(samples.rho.list, mcmc)) }else { rho.temp <- as.list(rep(NA, n.chains)) for(j in 1:n.chains) { rho.temp[[j]] <- cbind(samples.rho.list[[j]], samples.lambda.list[[j]]) colnames(rho.temp[[j]]) <- c("rho.S", "rho.T") } rhoext.mcmc <- mcmc.list(lapply(rho.temp, mcmc)) } samples <- list(beta=beta.mcmc, phi=phi.mcmc, delta=delta.mcmc, rho=rhoext.mcmc, tau2=tau2.mcmc, tau2.T=sig2.mcmc, fitted=fitted.mcmc) ## create a summary object n.keep <- floor((n.sample - burnin)/thin) * n.chains summary.beta <- t(rbind(apply(samples.beta.matrix, 2, mean), apply(samples.beta.matrix, 2, quantile, c(0.025, 0.975)))) summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.final[names(accept.final)=="beta"],p), effectiveSize(beta.mcmc), gelman.diag(beta.mcmc)$psrf[ ,2]) rownames(summary.beta) <- colnames(X) colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "PSRF (upper 95% CI)") summary.hyper <- array(NA, c(3 + N, 7)) for (tt in 1:N) { summary.hyper[tt,1:3] <- c(mean(samples.tau2.matrix[, tt]), quantile(samples.tau2.matrix[, tt], c(0.025, 0.975))) summary.hyper[tt, 4:7] <- c(n.keep, 100, effectiveSize(tau2.mcmc[ ,tt]), gelman.diag(tau2.mcmc[ ,tt])$psrf[ ,2]) } summary.hyper[N+1,1:3] <- c(mean(samples.sig2.matrix), quantile(samples.sig2.matrix, c(0.025, 0.975))) summary.hyper[N+1, 4:7] <- c(n.keep, 100, effectiveSize(sig2.mcmc), gelman.diag(sig2.mcmc)$psrf[ ,2]) if(!fix.rho.S) { temp <- mcmc.list(lapply(samples.rho.list, mcmc)) summary.hyper[N+2, 1:3] <- c(mean(samples.rho.matrix), quantile(samples.rho.matrix, c(0.025, 0.975))) summary.hyper[N+2, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.S"], effectiveSize(temp), gelman.diag(temp)$psrf[ ,2]) }else { summary.hyper[N+2, 1:3] <- c(rho, rho, rho) summary.hyper[N+2, 4:7] <- rep(NA, 4) } if(!fix.rho.T) { temp <- mcmc.list(lapply(samples.lambda.list, mcmc)) summary.hyper[N+3, 1:3] <- c(mean(samples.lambda.matrix), quantile(samples.lambda.matrix, c(0.025, 0.975))) summary.hyper[N+3, 4:7] <- c(n.keep, accept.final[names(accept.final)=="rho.T"], effectiveSize(temp), gelman.diag(temp)$psrf[ ,2]) }else { summary.hyper[N+3, 1:3] <- c(lambda, lambda, lambda) summary.hyper[N+3, 4:7] <- rep(NA, 4) } rownames(summary.hyper) <- c(paste("tau2.", c(1:N), sep = ""), "tau2.T", "rho.S","rho.T") summary.results <- rbind(summary.beta, summary.hyper) summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4) summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1) } ################################### #### Compile and return the results ################################### model.string <- c("Likelihood model - Poisson (log link function)", "\nLatent structure model - An overall time trend with temporal specific spatial effects\n") n.total <- floor((n.sample - burnin) / thin) * n.chains mcmc.info <- c(n.total, n.sample, burnin, thin, n.chains) names(mcmc.info) <- c("Total samples", "n.sample", "burnin", "thin", "n.chains") results.final <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=NULL, formula=formula, model=model.string, mcmc.info=mcmc.info, X=X) class(results.final) <- "CARBayesST" if(verbose) { b<-proc.time() cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n") }else {} return(results.final) }
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/poisson.CARsepspatial.R
poisson.CARsepspatialMCMC <- function(Y, offset, X.standardised, W, rho, lambda, fix.rho.S, fix.rho.T, K, N, N.all, p, burnin, n.sample, thin, MALA, n.beta.block, list.block, prior.mean.beta, prior.var.beta, prior.tau2, verbose, chain) { #Rcpp::sourceCpp("src/CARBayesST.cpp") #source("R/common.functions.R") #library(spdep) #library(truncnorm) # # ############################################ #### Set up the key elements before sampling ############################################ #### Generate the initial parameter values mod.glm <- glm(Y~X.standardised-1, offset=offset, family="quasipoisson") beta.mean <- mod.glm$coefficients beta.sd <- sqrt(diag(summary(mod.glm)$cov.scaled)) beta <- rnorm(n=length(beta.mean), mean=beta.mean, sd=beta.sd) log.Y <- log(Y) log.Y[Y==0] <- -0.1 res.temp <- log.Y - X.standardised %*% beta - offset res.sd <- sd(res.temp, na.rm=TRUE)/5 phi <- rnorm(n=N.all, mean=0, sd = res.sd) phi.mat <- matrix(phi, nrow=K, ncol=N, byrow=FALSE) delta <- rnorm(n=N, mean=0, sd = res.sd) tau2 <- apply(phi.mat, 2, var) / 10 sig2 <- var(delta)/10 #### Matrix versions offset.mat <- matrix(offset, nrow=K, ncol=N, byrow=FALSE) regression.mat <- matrix(X.standardised %*% beta, nrow=K, ncol=N, byrow=FALSE) Y.mat <- matrix(Y, nrow=K, ncol=N, byrow=FALSE) delta.mat <- matrix(delta, nrow=K, ncol=N, byrow=TRUE) #### Matrices to store samples n.keep <- floor((n.sample - burnin)/thin) samples.beta <- array(NA, c(n.keep, p)) samples.phi <- array(NA, c(n.keep, N.all)) samples.tau2 <- array(NA, c(n.keep, N)) samples.sig2 <- array(NA, c(n.keep, 1)) if(!fix.rho.S) samples.rho <- array(NA, c(n.keep, 1)) if(!fix.rho.T) samples.lambda <- array(NA, c(n.keep, 1)) samples.delta <- array(NA, c(n.keep, N)) samples.fitted <- array(NA, c(n.keep, N.all)) samples.loglike <- array(NA, c(n.keep, N.all)) #### Specify the Metropolis quantities accept <- rep(0,10) proposal.sd.phi <- 0.1 proposal.sd.rho <- 0.05 proposal.sd.beta <- 0.01 proposal.sd.delta <- 0.05 proposal.sd.lambda <- 0.02 tau2.shape <- prior.tau2[1] + K/2 sig2.shape <- prior.tau2[1] + N/2 #### CAR quantities W.quants <- common.Wcheckformat.leroux(W) K <- W.quants$n N <- N.all / K W <- W.quants$W W.triplet <- W.quants$W.triplet W.n.triplet <- W.quants$n.triplet W.triplet.sum <- W.quants$W.triplet.sum n.neighbours <- W.quants$n.neighbours W.begfin <- W.quants$W.begfin #### Spatial determinant if(!fix.rho.S) { Wstar <- diag(apply(W,1,sum)) - W Wstar.eigen <- eigen(Wstar) Wstar.val <- Wstar.eigen$values det.Q.W <- 0.5 * sum(log((rho * Wstar.val + (1-rho)))) }else {} #### .T quantities D <-array(0, c(N,N)) for(i in 1:N) { for(j in 1:N) { if(abs((i-j))==1) D[i,j] <- 1 } } D.triplet <- c(NA, NA, NA) for(i in 1:N) { for(j in 1:N) { if(D[i,j]>0) { D.triplet <- rbind(D.triplet, c(i,j, D[i,j])) }else{} } } D.triplet <- D.triplet[-1, ] D.n.triplet <- nrow(D.triplet) D.triplet.sum <- tapply(D.triplet[ ,3], D.triplet[ ,1], sum) D.neighbours <- tapply(D.triplet[ ,3], D.triplet[ ,1], length) D.begfin <- array(NA, c(N, 2)) temp <- 1 for(i in 1:N) { D.begfin[i, ] <- c(temp, (temp + D.neighbours[i]-1)) temp <- temp + D.neighbours[i] } if(!fix.rho.T) { Dstar <- diag(apply(D,1,sum)) - D Dstar.eigen <- eigen(Dstar) Dstar.val <- Dstar.eigen$values det.Q.D <- 0.5 * sum(log((lambda * Dstar.val + (1-lambda)))) }else {} #### Check for islands W.list<- mat2listw(W, style = "B") W.nb <- W.list$neighbours W.islands <- n.comp.nb(W.nb) islands <- W.islands$comp.id n.islands <- max(W.islands$nc) n.island1 <- length(which(islands==1)) if(rho==1) tau2.shape <- prior.tau2[1] + 0.5 * (K-n.islands) if(lambda==1) sig2.shape <- prior.tau2[1] + 0.5 * (N-1) #### Start timer if(verbose) { cat("\nMarkov chain", chain, "- generating", n.keep, "post burnin and thinned samples.\n", sep = " ") progressBar <- txtProgressBar(style = 3) percentage.points<-round((1:100/100)*n.sample) }else { percentage.points<-round((1:100/100)*n.sample) } ############################## #### Generate the MCMC samples ############################## #### Create the MCMC samples for(j in 1:n.sample) { ################### ## Sample from beta ################### offset.temp <- as.numeric(offset.mat + phi.mat + delta.mat) if(MALA) { temp <- poissonbetaupdateMALA(X.standardised, N.all, p, beta, offset.temp, Y, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block) }else { temp <- poissonbetaupdateRW(X.standardised, N.all, p, beta, offset.temp, Y, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta, list.block) } beta <- temp[[1]] accept[1] <- accept[1] + temp[[2]] accept[2] <- accept[2] + n.beta.block regression.mat <- matrix(X.standardised %*% beta, nrow=K, ncol=N, byrow=FALSE) #################### ## Sample from phi #################### phi.offset <- offset.mat + regression.mat + delta.mat den.offset <- rho * W.triplet.sum + 1 - rho temp1 <- poissonsrecarupdateRW(W.triplet, W.begfin, W.triplet.sum, K, N, phi.mat, rho, Y.mat, proposal.sd.phi, phi.offset, den.offset, tau2) phi.temp <- temp1[[1]] phi.mean <- apply(phi.temp,2,mean) if(rho<1) { phi <- as.numeric(phi.temp) - kronecker(phi.mean, rep(1,K)) }else { phi.temp[which(islands==1), ] <- phi.temp[which(islands==1), ] - matrix(kronecker(phi.mean, rep(1,n.island1)), ncol=N, byrow=F) phi <- as.numeric(phi.temp) } phi.mat <- matrix(phi, nrow=K, ncol=N, byrow=FALSE) accept[3] <- accept[3] + temp1[[2]] accept[4] <- accept[4] + N.all ##################### ## Samples from delta ##################### delta.offset <- t(offset.mat + regression.mat + phi.mat) temp2 <- poissoncarupdateRW(D.triplet, D.begfin, D.triplet.sum, N, delta, sig2, t(Y.mat), proposal.sd.delta, lambda, delta.offset, K, rep(1,K)) delta <- temp2[[1]] delta <- delta - mean(delta) delta.mat <- matrix(delta, nrow = K, ncol = N, byrow = TRUE) accept[7] <- accept[7] + temp2[[2]] accept[8] <- accept[8] + N #################### ## Samples from tau2 #################### tau2.temp <- tauquadformcompute2(W.triplet, W.triplet.sum, W.n.triplet, K, N, phi.mat, rho) tau2 <- tau2compute(tau2, tau2.temp, tau2.shape, prior.tau2[2], N) #################### ## Samples from sig2 #################### temp2.delta <- quadform(D.triplet, D.triplet.sum, D.n.triplet, N, delta, delta, lambda) sig2.scale <- temp2.delta + prior.tau2[2] sig2 <- 1 / rgamma(1, sig2.shape, scale=(1/sig2.scale)) ################## ## Sample from rho ################## if(!fix.rho.S) { temp3 <- rhoquadformcompute(W.triplet, W.triplet.sum, W.n.triplet, K, N, phi.mat, rho, tau2) proposal.rho <- rtruncnorm(n=1, a=0, b=1, mean=rho, sd=proposal.sd.rho) temp4 <- rhoquadformcompute(W.triplet, W.triplet.sum, W.n.triplet, K, N, phi.mat, proposal.rho, tau2) det.Q.W.proposal <- 0.5 * sum(log((proposal.rho * Wstar.val + (1-proposal.rho)))) logprob.current <- N * det.Q.W - temp3 logprob.proposal <- N * det.Q.W.proposal - temp4 hastings <- log(dtruncnorm(x=rho, a=0, b=1, mean=proposal.rho, sd=proposal.sd.rho)) - log(dtruncnorm(x=proposal.rho, a=0, b=1, mean=rho, sd=proposal.sd.rho)) prob <- exp(logprob.proposal - logprob.current + hastings) if(prob > runif(1)) { rho <- proposal.rho det.Q.W <- det.Q.W.proposal accept[5] <- accept[5] + 1 }else { } accept[6] <- accept[6] + 1 }else {} ##################### ## Sample from lambda ##################### if(!fix.rho.T) { proposal.lambda <- rtruncnorm(n=1, a=0, b=1, mean=lambda, sd=proposal.sd.lambda) temp3 <- quadform(D.triplet, D.triplet.sum, D.n.triplet, N, delta, delta, proposal.lambda) det.Q.proposal <- 0.5 * sum(log((proposal.lambda * Dstar.val + (1-proposal.lambda)))) logprob.current <- det.Q.D - temp2.delta / sig2 logprob.proposal <- det.Q.proposal - temp3 / sig2 hastings <- log(dtruncnorm(x=lambda, a=0, b=1, mean=proposal.lambda, sd=proposal.sd.lambda)) - log(dtruncnorm(x=proposal.lambda, a=0, b=1, mean=lambda, sd=proposal.sd.lambda)) prob <- exp(logprob.proposal - logprob.current + hastings) #### Accept or reject the proposal if(prob > runif(1)) { lambda <- proposal.lambda det.Q.D <- det.Q.proposal accept[9] <- accept[9] + 1 }else { } accept[10] <- accept[10] + 1 }else {} ######################### ## Calculate the deviance ######################### fitted <- as.numeric(exp(offset.mat + regression.mat + phi.mat + delta.mat)) loglike <- dpois(x=as.numeric(Y), lambda=fitted, log=TRUE) ################### ## Save the results ################### if(j > burnin & (j-burnin)%%thin==0) { ele <- (j - burnin) / thin samples.beta[ele, ] <- beta samples.phi[ele, ] <- as.numeric(phi) if(!fix.rho.S) samples.rho[ele, ] <- rho if(!fix.rho.T) samples.lambda[ele, ] <- lambda samples.tau2[ele, ] <- tau2 samples.sig2[ele, ] <- sig2 samples.delta[ele, ] <- delta samples.fitted[ele, ] <- fitted samples.loglike[ele, ] <- loglike }else { } ######################################## ## Self tune the acceptance probabilties ######################################## if(ceiling(j/100)==floor(j/100) & j < burnin) { #### Update the proposal sds if(p>2) { proposal.sd.beta <- common.accceptrates1(accept[1:2], proposal.sd.beta, 40, 50) }else { proposal.sd.beta <- common.accceptrates1(accept[1:2], proposal.sd.beta, 30, 40) } proposal.sd.phi <- common.accceptrates1(accept[3:4], proposal.sd.phi, 40, 50) proposal.sd.delta <- common.accceptrates1(accept[7:8], proposal.sd.delta, 40, 50) if(!fix.rho.S) proposal.sd.rho <- common.accceptrates2(accept[5:6], proposal.sd.rho, 40, 50, 0.5) if(!fix.rho.T) proposal.sd.lambda <- common.accceptrates2(accept[9:10], proposal.sd.lambda, 40, 50, 0.5) accept <- rep(0,10) }else {} ################################ ## print progress to the console ################################ if(j %in% percentage.points & verbose) { setTxtProgressBar(progressBar, j/n.sample) } } ############################################ #### Return the results to the main function ############################################ #### Compile the results if(fix.rho.S) samples.rho <- NA if(fix.rho.T) samples.lambda <- NA chain.results <- list(samples.beta=samples.beta, samples.phi=samples.phi, samples.delta=samples.delta, samples.lambda=samples.lambda, samples.tau2=samples.tau2, samples.rho=samples.rho, samples.sig2=samples.sig2, samples.loglike=samples.loglike, samples.fitted=samples.fitted, accept=accept) #### Return the results return(chain.results) }
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/poisson.CARsepspatialMCMC.R
poisson.MVCARar1 <- function(formula, data=NULL, W, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.Sigma.df=NULL, prior.Sigma.scale=NULL, rho.S=NULL, rho.T=NULL, MALA=TRUE, verbose=TRUE) { ############################################## #### Format the arguments and check for errors ############################################## #### Verbose a <- common.verbose(verbose) #### Check on MALA argument if(length(MALA)!=1) stop("MALA is not length 1.", call.=FALSE) if(!is.logical(MALA)) stop("MALA is not logical.", call.=FALSE) #### Frame object frame.results <- common.frame.MVST(formula, data, "poisson") NK <- frame.results$n p <- frame.results$p X <- frame.results$X X.standardised <- frame.results$X.standardised X.sd <- frame.results$X.sd X.mean <- frame.results$X.mean X.indicator <- frame.results$X.indicator offset <- frame.results$offset Y <- frame.results$Y N.all <- length(Y) J <- ncol(Y) which.miss <- frame.results$which.miss n.miss <- N.all - sum(which.miss) #### W matrix if(!is.matrix(W)) stop("W is not a matrix.", call.=FALSE) W.quants <- common.Wcheckformat.leroux(W) K <- W.quants$n N <- NK / K if(ceiling(N)!= floor(N)) stop("The number of data points in Y divided by the number of rows in W is not a whole number.", call.=FALSE) #### Create a missing list if(n.miss>0) { miss.locator <- array(NA, c(n.miss, 2)) colnames(miss.locator) <- c("row", "column") locations <- which(which.miss==0) miss.locator[ ,1] <- ceiling(locations/J) miss.locator[ ,2] <- locations - (miss.locator[ ,1]-1) * J }else { miss.locator <- NA } #### Check on the rho arguments if(is.null(rho.S)) { rho <- runif(1) fix.rho.S <- FALSE }else { rho <- rho.S fix.rho.S <- TRUE } if(!is.numeric(rho)) stop("rho.S is fixed but is not numeric.", call.=FALSE) if(rho<0 ) stop("rho.S is outside the range [0, 1].", call.=FALSE) if(rho>1 ) stop("rho.S is outside the range [0, 1].", call.=FALSE) if(is.null(rho.T)) { alpha <- runif(1) fix.rho.T <- FALSE }else { alpha <- rho.T fix.rho.T <- TRUE } if(!is.numeric(alpha)) stop("rho.T is fixed but is not numeric.", call.=FALSE) if(length(alpha)!=1) stop("rho.T is fixed but is not of length 1.", call.=FALSE) #### Priors if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p) if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p) if(is.null(prior.Sigma.df)) prior.Sigma.df <- 2 if(is.null(prior.Sigma.scale)) prior.Sigma.scale <- rep(100000, J) prior.beta.check(prior.mean.beta, prior.var.beta, p) if(!is.numeric(prior.Sigma.scale)) stop("prior.Sigma.scale has non-numeric values.", call.=FALSE) if(sum(is.na(prior.Sigma.scale))!=0) stop("prior.Sigma.scale has missing values.", call.=FALSE) #### Compute the blocking structure for beta block.temp <- common.betablock(p) beta.beg <- block.temp[[1]] beta.fin <- block.temp[[2]] n.beta.block <- block.temp[[3]] list.block <- as.list(rep(NA, n.beta.block*2)) for(r in 1:n.beta.block) { list.block[[r]] <- beta.beg[r]:beta.fin[r]-1 list.block[[r+n.beta.block]] <- length(list.block[[r]]) } #### MCMC quantities - burnin, n.sample, thin common.burnin.nsample.thin.check(burnin, n.sample, thin) ######################## #### Run the MCMC chains ######################## if(n.chains==1) { #### Only 1 chain results <- poisson.MVCARar1MCMC(Y=Y, offset=offset, X.standardised=X.standardised, W=W, rho=rho, alpha=alpha, fix.rho.S=fix.rho.S, fix.rho.T=fix.rho.T, K=K, N=N, NK=NK, J=J, N.all=N.all, p=p, miss.locator=miss.locator, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.Sigma.df=prior.Sigma.df, prior.Sigma.scale=prior.Sigma.scale, verbose=verbose, chain=1) }else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores==1) { #### Multiple chains in series results <- as.list(rep(NA, n.chains)) for(i in 1:n.chains) { results[[i]] <- poisson.MVCARar1MCMC(Y=Y, offset=offset, X.standardised=X.standardised, W=W, rho=rho, alpha=alpha, fix.rho.S=fix.rho.S, fix.rho.T=fix.rho.T, K=K, N=N, NK=NK, J=J, N.all=N.all, p=p, miss.locator=miss.locator, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.Sigma.df=prior.Sigma.df, prior.Sigma.scale=prior.Sigma.scale, verbose=verbose, chain=i) } }else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores>1 & ceiling(n.cores)==floor(n.cores)) { #### Multiple chains in parallel results <- as.list(rep(NA, n.chains)) if(verbose) { compclust <- makeCluster(n.cores, outfile="CARBayesSTprogress.txt") cat("The current progress of the model fitting algorithm has been output to CARBayesSTprogress.txt in the working directory") }else { compclust <- makeCluster(n.cores) } results <- clusterCall(compclust, fun=poisson.MVCARar1MCMC, Y=Y, offset=offset, X.standardised=X.standardised, W=W, rho=rho, alpha=alpha, fix.rho.S=fix.rho.S, fix.rho.T=fix.rho.T, K=K, N=N, NK=NK, J=J, N.all=N.all, p=p, miss.locator=miss.locator, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.Sigma.df=prior.Sigma.df, prior.Sigma.scale=prior.Sigma.scale, verbose=verbose, chain="all") stopCluster(compclust) }else { stop("n.chains or n.cores are not positive integers.", call.=FALSE) } #### end timer if(verbose) { cat("\nSummarising results.\n") }else {} ################################### #### Summarise and save the results ################################### if(n.chains==1) { #### If n.chains==1 ## Compute the acceptance rates accept.final <- rep(NA, 5) names(accept.final) <- c("beta", "phi", "rho.S", "rho.T", "Sigma") accept.final[1] <- 100 * sum(results$accept.beta[1:J]) / sum(results$accept.beta[(J+1):(2*J)]) accept.final[2] <- 100 * results$accept[1] / results$accept[2] if(!fix.rho.S) accept.final[3] <- 100 * results$accept[3] / results$accept[4] if(!fix.rho.T) accept.final[4] <- 100 accept.final[5] <- 100 ## Compute the fitted deviance mean.beta <- matrix(apply(results$samples.beta, 2, mean), nrow=p, ncol=J, byrow=F) mean.phi <- matrix(apply(results$samples.phi, 2, mean), nrow=NK, ncol=J, byrow=T) fitted.mean <- exp(X.standardised %*% mean.beta + mean.phi + offset) deviance.fitted <- -2 * sum(dpois(x=as.numeric(t(Y)), lambda=as.numeric(t(fitted.mean)), log=TRUE), na.rm=TRUE) modelfit <- common.modelfit(results$samples.loglike, deviance.fitted) ## Create the fitted values and residuals fitted.values <- matrix(apply(results$samples.fitted, 2, mean), nrow=NK, ncol=J, byrow=T) response.residuals <- Y - fitted.values pearson.residuals <- response.residuals / sqrt(fitted.values) residuals <- list(response=response.residuals, pearson=pearson.residuals) ## Transform the parameters back to the original covariate scale samples.beta.orig <- results$samples.beta for(r in 1:J) { samples.beta.orig[ ,((r-1)*p+1):(r*p)] <- common.betatransform(results$samples.beta[ ,((r-1)*p+1):(r*p) ], X.indicator, X.mean, X.sd, p, FALSE) } ## Create the samples object if(fix.rho.S & fix.rho.T) { samples.rhoext <- NA }else if(fix.rho.S & !fix.rho.T) { samples.rhoext <- results$samples.alpha colnames(samples.rhoext) <- c("rho.T") }else if(!fix.rho.S & fix.rho.T) { samples.rhoext <- results$samples.rho names(samples.rhoext) <- "rho.S" }else { samples.rhoext <- cbind(results$samples.rho, results$samples.alpha) colnames(samples.rhoext) <- c("rho.S", "rho.T") } samples <- list(beta=mcmc(samples.beta.orig), phi=mcmc(results$samples.phi), Sigma=results$samples.Sigma, rho=mcmc(samples.rhoext), fitted=mcmc(results$samples.fitted), Y=mcmc(results$samples.Y)) ## Create a summary object n.keep <- floor((n.sample - burnin)/thin) summary.beta <- t(rbind(apply(samples.beta.orig, 2, mean), apply(samples.beta.orig, 2, quantile, c(0.025, 0.975)))) summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.final[names(accept.final)=="beta"],p), effectiveSize(samples.beta.orig), geweke.diag(samples.beta.orig)$z) col.name <- rep(NA, p*(J-1)) if(is.null(colnames(Y))) { for(r in 1:J) { col.name[((r-1)*p+1):(r*p)] <- paste("Variable ", r, " - ", colnames(X), sep="") } }else { for(r in 1:J) { col.name[((r-1)*p+1):(r*p)] <- paste(colnames(Y)[r], " - ", colnames(X), sep="") } } rownames(summary.beta) <- col.name colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag") summary.hyper <- array(NA, c((J+2) ,7)) for(r in 1:J) { summary.hyper[r, 1] <- mean(results$samples.Sigma[ ,r,r]) summary.hyper[r, 2:3] <- quantile(results$samples.Sigma[ ,r,r], c(0.025, 0.975)) summary.hyper[r, 4] <- n.keep summary.hyper[r, 5] <- 100 summary.hyper[r, 6] <- effectiveSize(results$samples.Sigma[ ,r,r]) summary.hyper[r, 7] <- geweke.diag(results$samples.Sigma[ ,r,r])$z } if(!fix.rho.S) { summary.hyper[(J+1), 1:3] <- c(mean(results$samples.rho), quantile(results$samples.rho, c(0.025, 0.975))) summary.hyper[(J+1), 4:5] <- c(n.keep, accept.final[names(accept.final)=="rho.S"]) summary.hyper[(J+1), 6:7] <- c(effectiveSize(results$samples.rho), geweke.diag(results$samples.rho)$z) }else { summary.hyper[(J+1), 1:3] <- c(rho, rho, rho) summary.hyper[(J+1), 4:5] <- rep(NA, 2) summary.hyper[(J+1), 6:7] <- rep(NA, 2) } if(!fix.rho.T) { summary.hyper[(J+2), 1:3] <- c(mean(results$samples.alpha), quantile(results$samples.alpha, c(0.025, 0.975))) summary.hyper[(J+2), 4:5] <- c(n.keep, accept.final[names(accept.final)=="rho.T"]) summary.hyper[(J+2), 6:7] <- c(effectiveSize(results$samples.alpha), geweke.diag(results$samples.alpha)$z) }else { summary.hyper[(J+2), 1:3] <- c(alpha, alpha, alpha) summary.hyper[(J+2), 4:5] <- rep(NA, 2) summary.hyper[(J+2), 6:7] <- rep(NA, 2) } summary.results <- rbind(summary.beta, summary.hyper) rownames(summary.results)[((J*p)+1): nrow(summary.results)] <- c(paste(rep("Sigma",J), 1:J, 1:J, sep=""), "rho.S", "rho.T") summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4) summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1) }else { #### If n.chains > 1 ## Compute the acceptance rates accept.final <- rep(NA, 5) names(accept.final) <- c("beta", "phi", "rho.S", "rho.T", "Sigma") accept.final[5] <- 100 accept.temp <- lapply(results, function(l) l[["accept.beta"]]) accept.temp2 <- do.call(what=rbind, args=accept.temp) accept.final[1] <- 100 * sum(accept.temp2[ ,1:J]) / sum(accept.temp2[ ,(J+1):(2*J)]) accept.temp <- lapply(results, function(l) l[["accept"]]) accept.temp2 <- do.call(what=rbind, args=accept.temp) accept.final[2] <- 100 * sum(accept.temp2[ ,1]) / sum(accept.temp2[ ,2]) if(!fix.rho.S) accept.final[3] <- 100 * sum(accept.temp2[ ,3]) / sum(accept.temp2[ ,4]) if(!fix.rho.T) accept.final[4] <- 100 ## Extract the samples into separate matrix and list objects samples.beta.list <- lapply(results, function(l) l[["samples.beta"]]) samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list) samples.phi.list <- lapply(results, function(l) l[["samples.phi"]]) samples.phi.matrix <- do.call(what=rbind, args=samples.phi.list) if(!fix.rho.S) { samples.rho.list <- lapply(results, function(l) l[["samples.rho"]]) samples.rho.matrix <- do.call(what=rbind, args=samples.rho.list) } if(!fix.rho.T) { samples.alpha.list <- lapply(results, function(l) l[["samples.alpha"]]) samples.alpha.matrix <- do.call(what=rbind, args=samples.alpha.list) } samples.Sigma.list <- lapply(results, function(l) l[["samples.Sigma"]]) samples.loglike.list <- lapply(results, function(l) l[["samples.loglike"]]) samples.loglike.matrix <- do.call(what=rbind, args=samples.loglike.list) samples.fitted.list <- lapply(results, function(l) l[["samples.fitted"]]) samples.fitted.matrix <- do.call(what=rbind, args=samples.fitted.list) if(n.miss>0) samples.Y.list <- lapply(results, function(l) l[["samples.Y"]]) ## Compute the fitted deviance mean.beta <- matrix(apply(samples.beta.matrix, 2, mean), nrow=p, ncol=J, byrow=F) mean.phi <- matrix(apply(samples.phi.matrix, 2, mean), nrow=NK, ncol=J, byrow=T) fitted.mean <- exp(X.standardised %*% mean.beta + mean.phi + offset) deviance.fitted <- -2 * sum(dpois(x=as.numeric(t(Y)), lambda=as.numeric(t(fitted.mean)), log=TRUE), na.rm=TRUE) modelfit <- common.modelfit(samples.loglike.matrix, deviance.fitted) ## Create the fitted values and residuals fitted.values <- matrix(apply(samples.fitted.matrix, 2, mean), nrow=NK, ncol=J, byrow=T) response.residuals <- Y - fitted.values pearson.residuals <- response.residuals / sqrt(fitted.values) residuals <- list(response=response.residuals, pearson=pearson.residuals) ## Transform the parameters back to the original covariate scale. samples.beta.list <- samples.beta.list for(j in 1:n.chains) { for(r in 1:J) { samples.beta.list[[j]][ ,((r-1)*p+1):(r*p)] <- common.betatransform(samples.beta.list[[j]][ ,((r-1)*p+1):(r*p) ], X.indicator, X.mean, X.sd, p, FALSE) } } samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list) ## Create MCMC objects beta.mcmc <- mcmc.list(lapply(samples.beta.list, mcmc)) phi.mcmc <- mcmc.list(lapply(samples.phi.list, mcmc)) fitted.mcmc <- mcmc.list(lapply(samples.fitted.list, mcmc)) if(n.miss>0) { Y.mcmc <- mcmc.list(lapply(samples.Y.list, mcmc)) }else { Y.mcmc <- NA } if(fix.rho.S & fix.rho.T) { rhoext.mcmc <- NA }else if(fix.rho.S & !fix.rho.T) { for(j in 1:n.chains) { colnames(samples.alpha.list[[j]]) <- c("rho.T") } rhoext.mcmc <- mcmc.list(lapply(samples.alpha.list, mcmc)) }else if(!fix.rho.S & fix.rho.T) { for(j in 1:n.chains) { colnames(samples.rho.list[[j]]) <- c("rho.S") } rhoext.mcmc <- mcmc.list(lapply(samples.rho.list, mcmc)) }else { rho.temp <- as.list(rep(NA, n.chains)) for(j in 1:n.chains) { rho.temp[[j]] <- cbind(samples.rho.list[[j]], samples.alpha.list[[j]]) colnames(rho.temp[[j]]) <- c("rho.S", "rho.T") } rhoext.mcmc <- mcmc.list(lapply(rho.temp, mcmc)) } samples <- list(beta=beta.mcmc, phi=phi.mcmc, rho=rhoext.mcmc, Sigma=samples.Sigma.list, fitted=fitted.mcmc, Y=Y.mcmc) ## create a summary object n.keep <- floor((n.sample - burnin)/thin) * n.chains summary.beta <- t(rbind(apply(samples.beta.matrix, 2, mean), apply(samples.beta.matrix, 2, quantile, c(0.025, 0.975)))) summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.final[names(accept.final)=="beta"],p), effectiveSize(beta.mcmc), gelman.diag(beta.mcmc)$psrf[ ,2]) col.name <- rep(NA, p*(J-1)) if(is.null(colnames(Y))) { for(r in 1:J) { col.name[((r-1)*p+1):(r*p)] <- paste("Variable ", r, " - ", colnames(X), sep="") } }else { for(r in 1:J) { col.name[((r-1)*p+1):(r*p)] <- paste(colnames(Y)[r], " - ", colnames(X), sep="") } } rownames(summary.beta) <- col.name colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "PSRF (upper 95% CI)") summary.hyper <- array(NA, c((J+2) ,7)) for(r in 1:J) { temp <- NA temp2 <- as.list(rep(NA, n.chains)) for(v in 1:n.chains) { temp <- c(temp, samples.Sigma.list[[v]][ ,r,r]) temp2[[v]] <- mcmc(samples.Sigma.list[[v]][ ,r,r]) } temp <- temp[-1] summary.hyper[r, 1] <- mean(temp) summary.hyper[r, 2:3] <- quantile(temp, c(0.025, 0.975)) summary.hyper[r, 4] <- n.keep summary.hyper[r, 5] <- 100 summary.hyper[r, 6] <- effectiveSize(mcmc.list(temp2)) summary.hyper[r, 7] <- gelman.diag(mcmc.list(temp2))$psrf[ ,2] } if(!fix.rho.S) { temp <- mcmc.list(lapply(samples.rho.list, mcmc)) summary.hyper[(J+1), 1:3] <- c(mean(samples.rho.matrix), quantile(samples.rho.matrix, c(0.025, 0.975))) summary.hyper[(J+1), 4:5] <- c(n.keep, accept.final[names(accept.final)=="rho.S"]) summary.hyper[(J+1), 6:7] <- c(effectiveSize(temp), gelman.diag(temp)$psrf[ ,2]) }else { summary.hyper[(J+1), 1:3] <- c(rho, rho, rho) summary.hyper[(J+1), 4:5] <- rep(NA, 2) summary.hyper[(J+1), 6:7] <- rep(NA, 2) } if(!fix.rho.T) { temp <- mcmc.list(lapply(samples.alpha.list, mcmc)) summary.hyper[(J+2), 1:3] <- c(mean(samples.alpha.matrix), quantile(samples.alpha.matrix, c(0.025, 0.975))) summary.hyper[(J+2), 4:5] <- c(n.keep, accept.final[names(accept.final)=="rho.T"]) summary.hyper[(J+2), 6:7] <- c(effectiveSize(temp), gelman.diag(temp)$psrf[ ,2]) }else { summary.hyper[(J+2), 1:3] <- c(alpha, alpha, alpha) summary.hyper[(J+2), 4:5] <- rep(NA, 2) summary.hyper[(J+2), 6:7] <- rep(NA, 2) } summary.results <- rbind(summary.beta, summary.hyper) rownames(summary.results)[((J*p)+1): nrow(summary.results)] <- c(paste(rep("Sigma",J), 1:J, 1:J, sep=""), "rho.S", "rho.T") summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4) summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1) } ################################### #### Compile and return the results ################################### model.string <- c("Likelihood model - Poisson (log link function)", "\nRandom effects model - Multivariate Autoregressive order 1 CAR model\n") n.total <- floor((n.sample - burnin) / thin) * n.chains mcmc.info <- c(n.total, n.sample, burnin, thin, n.chains) names(mcmc.info) <- c("Total samples", "n.sample", "burnin", "thin", "n.chains") results.final <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=NULL, formula=formula, model=model.string, mcmc.info=mcmc.info, X=X) class(results.final) <- "CARBayesST" if(verbose) { b<-proc.time() cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n") }else {} return(results.final) }
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/poisson.MVCARar1.R
poisson.MVCARar1MCMC <- function(Y, offset, X.standardised, W, rho, alpha, fix.rho.S, fix.rho.T, K, N, NK, J, N.all, p, miss.locator, n.miss, burnin, n.sample, thin, MALA, n.beta.block, list.block, prior.mean.beta, prior.var.beta, prior.Sigma.df, prior.Sigma.scale, verbose, chain) { #Rcpp::sourceCpp("src/CARBayesST.cpp") #source("R/common.functions.R") #library(spdep) #library(truncnorm) #library(MCMCpack) # # ############################################ #### Set up the key elements before sampling ############################################ #### Generate the initial parameter values beta <- array(NA, c(p, J)) for(i in 1:J) { mod.glm <- glm(Y[ ,i]~X.standardised-1, offset=offset[ ,i], family="quasipoisson") beta.mean <- mod.glm$coefficients beta.sd <- sqrt(diag(summary(mod.glm)$cov.scaled)) beta[ ,i] <- rnorm(n=p, mean=beta.mean, sd=beta.sd) } log.Y <- log(Y) log.Y[Y==0] <- -0.1 res.temp <- log.Y - X.standardised %*% beta - offset res.sd <- sd(res.temp, na.rm=TRUE)/5 phi.vec <- rnorm(n=N.all, mean=0, sd=res.sd) phi <- matrix(phi.vec, ncol=J, byrow=TRUE) Sigma <- cov(phi) Sigma.inv <- solve(Sigma) Sigma.a <- rep(1, J) regression <- X.standardised %*% beta fitted <- exp(regression + phi + offset) Y.DA <- Y #### Matrices to store samples n.keep <- floor((n.sample - burnin)/thin) samples.beta <- array(NA, c(n.keep, J*p)) samples.phi <- array(NA, c(n.keep, N.all)) samples.Sigma <- array(NA, c(n.keep, J, J)) samples.Sigma.a <- array(NA, c(n.keep, J)) if(!fix.rho.S) samples.rho <- array(NA, c(n.keep, 1)) if(!fix.rho.T) samples.alpha <- array(NA, c(n.keep, 1)) samples.loglike <- array(NA, c(n.keep, N.all)) samples.fitted <- array(NA, c(n.keep, N.all)) if(n.miss>0) samples.Y <- array(NA, c(n.keep, n.miss)) #### Metropolis quantities accept <- rep(0,4) accept.beta <- rep(0,2*J) proposal.sd.beta <- rep(0.01, J) proposal.sd.phi <- 0.1 proposal.sd.rho <- 0.02 Sigma.post.df <- prior.Sigma.df + J - 1 + K * N Sigma.a.post.shape <- (prior.Sigma.df + J) / 2 #### CAR quantities W.quants <- common.Wcheckformat.leroux(W) W <- W.quants$W W.triplet <- W.quants$W.triplet n.triplet <- W.quants$n.triplet W.triplet.sum <- W.quants$W.triplet.sum n.neighbours <- W.quants$n.neighbours W.begfin <- W.quants$W.begfin Wstar <- diag(apply(W,1,sum)) - W Q <- rho * Wstar + diag(rep(1-rho,K)) #### Create the determinant if(!fix.rho.S) { Wstar.eigen <- eigen(Wstar) Wstar.val <- Wstar.eigen$values det.Q <- sum(log((rho * Wstar.val + (1-rho)))) }else {} #### Check for islands W.list<- mat2listw(W, style = "B") W.nb <- W.list$neighbours W.islands <- n.comp.nb(W.nb) islands <- W.islands$comp.id n.islands <- max(W.islands$nc) if(rho==1 & alpha==1) { Sigma.post.df <- prior.Sigma.df + ((N-1) * (K-n.islands)) + J - 1 }else if(rho==1) { Sigma.post.df <- prior.Sigma.df + (N * (K-n.islands)) + J - 1 }else if(alpha==1) { Sigma.post.df <- prior.Sigma.df + ((N-1) * K) + J - 1 }else {} #### Start timer if(verbose) { cat("\nMarkov chain", chain, "- generating", n.keep, "post burnin and thinned samples.\n", sep = " ") progressBar <- txtProgressBar(style = 3) percentage.points<-round((1:100/100)*n.sample) }else { percentage.points<-round((1:100/100)*n.sample) } ############################## #### Generate the MCMC samples ############################## #### Create the MCMC samples #### Create the MCMC samples for(j in 1:n.sample) { #################################### ## Sample from Y - data augmentation #################################### if(n.miss>0) { Y.DA[miss.locator] <- rpois(n=n.miss, lambda=fitted[miss.locator]) }else {} ################### ## Sample from beta ################### offset.temp <- phi + offset for(r in 1:J) { if(MALA) { temp <- poissonbetaupdateMALA(X.standardised, NK, p, beta[ ,r], offset.temp[ ,r], Y.DA[ ,r], prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta[r], list.block) }else { temp <- poissonbetaupdateRW(X.standardised, NK, p, beta[ ,r], offset.temp[ ,r], Y.DA[ ,r], prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta[r], list.block) } beta[ ,r] <- temp[[1]] accept.beta[r] <- accept.beta[r] + temp[[2]] accept.beta[(r+J)] <- accept.beta[(r+J)] + n.beta.block } regression <- X.standardised %*% beta ################## ## Sample from phi ################## #### Create the offset elements den.offset <- rho * W.triplet.sum + 1 - rho phi.offset <- regression + offset #### Create the random draws to create the proposal distribution Chol.Sigma <- t(chol(proposal.sd.phi*Sigma)) z.mat <- matrix(rnorm(n=N.all, mean=0, sd=1), nrow=J, ncol=NK) innovations <- t(Chol.Sigma %*% z.mat) #### Update the elements of phi temp1 <- poissonmvar1carupdateRW(W.triplet, W.begfin, W.triplet.sum, K, N, J, phi, alpha, rho, Sigma.inv, Y.DA, innovations, phi.offset, den.offset) phi <- temp1[[1]] for(r in 1:J) { phi[ ,r] <- phi[ ,r] - mean(phi[ ,r]) } accept[1] <- accept[1] + temp1[[2]] accept[2] <- accept[2] + NK #################### ## Sample from Sigma #################### Sigma.post.scale <- 2 * prior.Sigma.df * diag(1 / Sigma.a) + t(phi[1:K, ]) %*% Q %*% phi[1:K, ] for(t in 2:N) { phit <- phi[((t-1)*K+1):(t*K), ] phitminus1 <- phi[((t-2)*K+1):((t-1)*K), ] temp1 <- phit - alpha * phitminus1 Sigma.post.scale <- Sigma.post.scale + t(temp1) %*% Q %*% temp1 } Sigma <- riwish(Sigma.post.df, Sigma.post.scale) Sigma.inv <- solve(Sigma) ###################### ## Sample from Sigma.a ###################### Sigma.a.posterior.scale <- prior.Sigma.df * diag(Sigma.inv) + 1 / prior.Sigma.scale^2 Sigma.a <- 1 / rgamma(J, Sigma.a.post.shape, scale=(1/Sigma.a.posterior.scale)) ###################### #### Sample from alpha ###################### if(!fix.rho.T) { temp <- MVSTrhoTAR1compute(W.triplet, W.triplet.sum, n.triplet, den.offset, K, N, J, phi, rho, Sigma.inv) num <- temp[[1]] denom <- temp[[2]] alpha <- rnorm(n=1, mean = (num / denom), sd=sqrt(1 / denom)) }else {} ################## ## Sample from rho ################## if(!fix.rho.S) { ## Propose a new value proposal.rho <- rtruncnorm(n=1, a=0, b=1, mean=rho, sd=proposal.sd.rho) proposal.Q <- proposal.rho * Wstar + diag(rep(1-proposal.rho), K) proposal.det.Q <- sum(log((proposal.rho * Wstar.val + (1-proposal.rho)))) proposal.den.offset <- proposal.rho * W.triplet.sum + 1 - proposal.rho ## Compute the quadratic forms based on current and proposed values of rho temp1.QF <- MVSTrhoSAR1compute(W.triplet, W.triplet.sum, n.triplet, den.offset, K, N, J, phi, rho, alpha, Sigma.inv) temp2.QF <- MVSTrhoSAR1compute(W.triplet, W.triplet.sum, n.triplet, proposal.den.offset, K, N, J, phi, proposal.rho, alpha, Sigma.inv) ## Compute the acceptance rate logprob.current <- 0.5 * J * N * det.Q - 0.5 * temp1.QF logprob.proposal <- 0.5 * J * N * proposal.det.Q - 0.5 * temp2.QF hastings <- log(dtruncnorm(x=rho, a=0, b=1, mean=proposal.rho, sd=proposal.sd.rho)) - log(dtruncnorm(x=proposal.rho, a=0, b=1, mean=rho, sd=proposal.sd.rho)) prob <- exp(logprob.proposal - logprob.current + hastings) if(prob > runif(1)) { rho <- proposal.rho det.Q <- proposal.det.Q Q <- proposal.Q accept[3] <- accept[3] + 1 }else {} accept[4] <- accept[4] + 1 }else {} ######################### ## Calculate the deviance ######################### fitted <- exp(regression + phi + offset) loglike <- dpois(x=as.numeric(t(Y)), lambda=as.numeric(t(fitted)), log=TRUE) ################### ## Save the results ################### if(j > burnin & (j-burnin)%%thin==0) { ele <- (j - burnin) / thin samples.beta[ele, ] <- as.numeric(beta) samples.phi[ele, ] <- as.numeric(t(phi)) samples.Sigma[ele, , ] <- Sigma samples.Sigma.a[ele, ] <- Sigma.a if(!fix.rho.S) samples.rho[ele, ] <- rho if(!fix.rho.T) samples.alpha[ele, ] <- alpha samples.loglike[ele, ] <- loglike samples.fitted[ele, ] <- as.numeric(t(fitted)) if(n.miss>0) samples.Y[ele, ] <- Y.DA[miss.locator] }else {} ######################################## ## Self tune the acceptance probabilties ######################################## if(ceiling(j/100)==floor(j/100) & j < burnin) { #### Update the proposal sds for(r in 1:J) { if(p>2) { proposal.sd.beta[r] <- common.accceptrates1(accept.beta[c(r, (r+J))], proposal.sd.beta[r], 40, 50) }else { proposal.sd.beta[r] <- common.accceptrates1(accept.beta[c(r, (r+J))], proposal.sd.beta[r], 30, 40) } } proposal.sd.phi <- common.accceptrates1(accept[1:2], proposal.sd.phi, 40, 50) if(!fix.rho.S) { proposal.sd.rho <- common.accceptrates2(accept[3:4], proposal.sd.rho, 40, 50, 0.5) } accept <- c(0,0,0,0) accept.beta <- rep(0,2*J) }else {} ################################ ## print progress to the console ################################ if(j %in% percentage.points & verbose) { setTxtProgressBar(progressBar, j/n.sample) } } ############################################ #### Return the results to the main function ############################################ #### Compile the results if(n.miss==0) samples.Y <- NA if(fix.rho.S) samples.rho <- NA if(fix.rho.T) samples.alpha <- NA chain.results <- list(samples.beta=samples.beta, samples.phi=samples.phi, samples.Sigma=samples.Sigma, samples.Sigma.a=samples.Sigma.a, samples.rho=samples.rho, samples.alpha=samples.alpha, samples.loglike=samples.loglike, samples.fitted=samples.fitted, samples.Y=samples.Y, accept=accept, accept.beta=accept.beta) #### Return the results return(chain.results) }
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/poisson.MVCARar1MCMC.R
poisson.MVCARar2 <- function(formula, data=NULL, W, burnin, n.sample, thin=1, n.chains=1, n.cores=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.Sigma.df=NULL, prior.Sigma.scale=NULL, rho.S=NULL, rho.T=NULL, MALA=TRUE, verbose=TRUE) { ############################################## #### Format the arguments and check for errors ############################################## #### Verbose a <- common.verbose(verbose) #### Check on MALA argument if(length(MALA)!=1) stop("MALA is not length 1.", call.=FALSE) if(!is.logical(MALA)) stop("MALA is not logical.", call.=FALSE) #### Frame object frame.results <- common.frame.MVST(formula, data, "poisson") NK <- frame.results$n p <- frame.results$p X <- frame.results$X X.standardised <- frame.results$X.standardised X.sd <- frame.results$X.sd X.mean <- frame.results$X.mean X.indicator <- frame.results$X.indicator offset <- frame.results$offset Y <- frame.results$Y N.all <- length(Y) J <- ncol(Y) which.miss <- frame.results$which.miss n.miss <- N.all - sum(which.miss) #### W matrix if(!is.matrix(W)) stop("W is not a matrix.", call.=FALSE) W.quants <- common.Wcheckformat.leroux(W) K <- W.quants$n N <- NK / K if(ceiling(N)!= floor(N)) stop("The number of data points in Y divided by the number of rows in W is not a whole number.", call.=FALSE) #### Create a missing list if(n.miss>0) { miss.locator <- array(NA, c(n.miss, 2)) colnames(miss.locator) <- c("row", "column") locations <- which(which.miss==0) miss.locator[ ,1] <- ceiling(locations/J) miss.locator[ ,2] <- locations - (miss.locator[ ,1]-1) * J }else { miss.locator <- NA } #### Check on the rho arguments if(is.null(rho.S)) { rho <- runif(1) fix.rho.S <- FALSE }else { rho <- rho.S fix.rho.S <- TRUE } if(!is.numeric(rho)) stop("rho.S is fixed but is not numeric.", call.=FALSE) if(rho<0 ) stop("rho.S is outside the range [0, 1].", call.=FALSE) if(rho>1 ) stop("rho.S is outside the range [0, 1].", call.=FALSE) if(is.null(rho.T)) { alpha <- c(runif(1), runif(1)) fix.rho.T <- FALSE }else { alpha <- rho.T fix.rho.T <- TRUE } if(!is.numeric(alpha)) stop("rho.T is fixed but is not numeric.", call.=FALSE) if(length(alpha)!=2) stop("rho.T is fixed but is not of length 2.", call.=FALSE) #### Priors if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p) if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p) if(is.null(prior.Sigma.df)) prior.Sigma.df <- 2 if(is.null(prior.Sigma.scale)) prior.Sigma.scale <- rep(100000, J) prior.beta.check(prior.mean.beta, prior.var.beta, p) if(!is.numeric(prior.Sigma.scale)) stop("prior.Sigma.scale has non-numeric values.", call.=FALSE) if(sum(is.na(prior.Sigma.scale))!=0) stop("prior.Sigma.scale has missing values.", call.=FALSE) #### Compute the blocking structure for beta block.temp <- common.betablock(p) beta.beg <- block.temp[[1]] beta.fin <- block.temp[[2]] n.beta.block <- block.temp[[3]] list.block <- as.list(rep(NA, n.beta.block*2)) for(r in 1:n.beta.block) { list.block[[r]] <- beta.beg[r]:beta.fin[r]-1 list.block[[r+n.beta.block]] <- length(list.block[[r]]) } #### MCMC quantities - burnin, n.sample, thin common.burnin.nsample.thin.check(burnin, n.sample, thin) ######################## #### Run the MCMC chains ######################## if(n.chains==1) { #### Only 1 chain results <- poisson.MVCARar2MCMC(Y=Y, offset=offset, X.standardised=X.standardised, W=W, rho=rho, alpha=alpha, fix.rho.S=fix.rho.S, fix.rho.T=fix.rho.T, K=K, N=N, NK=NK, J=J, N.all=N.all, p=p, miss.locator=miss.locator, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.Sigma.df=prior.Sigma.df, prior.Sigma.scale=prior.Sigma.scale, verbose=verbose, chain=1) }else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores==1) { #### Multiple chains in series results <- as.list(rep(NA, n.chains)) for(i in 1:n.chains) { results[[i]] <- poisson.MVCARar2MCMC(Y=Y, offset=offset, X.standardised=X.standardised, W=W, rho=rho, alpha=alpha, fix.rho.S=fix.rho.S, fix.rho.T=fix.rho.T, K=K, N=N, NK=NK, J=J, N.all=N.all, p=p, miss.locator=miss.locator, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.Sigma.df=prior.Sigma.df, prior.Sigma.scale=prior.Sigma.scale, verbose=verbose, chain=i) } }else if(n.chains > 1 & ceiling(n.chains)==floor(n.chains) & n.cores>1 & ceiling(n.cores)==floor(n.cores)) { #### Multiple chains in parallel results <- as.list(rep(NA, n.chains)) if(verbose) { compclust <- makeCluster(n.cores, outfile="CARBayesSTprogress.txt") cat("The current progress of the model fitting algorithm has been output to CARBayesSTprogress.txt in the working directory") }else { compclust <- makeCluster(n.cores) } results <- clusterCall(compclust, fun=poisson.MVCARar2MCMC, Y=Y, offset=offset, X.standardised=X.standardised, W=W, rho=rho, alpha=alpha, fix.rho.S=fix.rho.S, fix.rho.T=fix.rho.T, K=K, N=N, NK=NK, J=J, N.all=N.all, p=p, miss.locator=miss.locator, n.miss=n.miss, burnin=burnin, n.sample=n.sample, thin=thin, MALA=MALA, n.beta.block=n.beta.block, list.block=list.block, prior.mean.beta=prior.mean.beta, prior.var.beta=prior.var.beta, prior.Sigma.df=prior.Sigma.df, prior.Sigma.scale=prior.Sigma.scale, verbose=verbose, chain="all") stopCluster(compclust) }else { stop("n.chains or n.cores are not positive integers.", call.=FALSE) } #### end timer if(verbose) { cat("\nSummarising results.\n") }else {} ################################### #### Summarise and save the results ################################### if(n.chains==1) { #### If n.chains==1 ## Compute the acceptance rates accept.final <- rep(NA, 5) names(accept.final) <- c("beta", "phi", "rho.S", "rho.T", "Sigma") accept.final[1] <- 100 * sum(results$accept.beta[1:J]) / sum(results$accept.beta[(J+1):(2*J)]) accept.final[2] <- 100 * results$accept[1] / results$accept[2] if(!fix.rho.S) accept.final[3] <- 100 * results$accept[3] / results$accept[4] if(!fix.rho.T) accept.final[4] <- 100 accept.final[5] <- 100 ## Compute the fitted deviance mean.beta <- matrix(apply(results$samples.beta, 2, mean), nrow=p, ncol=J, byrow=F) mean.phi <- matrix(apply(results$samples.phi, 2, mean), nrow=NK, ncol=J, byrow=T) fitted.mean <- exp(X.standardised %*% mean.beta + mean.phi + offset) deviance.fitted <- -2 * sum(dpois(x=as.numeric(t(Y)), lambda=as.numeric(t(fitted.mean)), log=TRUE), na.rm=TRUE) modelfit <- common.modelfit(results$samples.loglike, deviance.fitted) ## Create the fitted values and residuals fitted.values <- matrix(apply(results$samples.fitted, 2, mean), nrow=NK, ncol=J, byrow=T) response.residuals <- Y - fitted.values pearson.residuals <- response.residuals / sqrt(fitted.values) residuals <- list(response=response.residuals, pearson=pearson.residuals) ## Transform the parameters back to the original covariate scale samples.beta.orig <- results$samples.beta for(r in 1:J) { samples.beta.orig[ ,((r-1)*p+1):(r*p)] <- common.betatransform(results$samples.beta[ ,((r-1)*p+1):(r*p) ], X.indicator, X.mean, X.sd, p, FALSE) } ## Create the samples object if(fix.rho.S & fix.rho.T) { samples.rhoext <- NA }else if(fix.rho.S & !fix.rho.T) { samples.rhoext <- results$samples.alpha colnames(samples.rhoext) <- c("rho1.T", "rho2.T") }else if(!fix.rho.S & fix.rho.T) { samples.rhoext <- results$samples.rho names(samples.rhoext) <- "rho.S" }else { samples.rhoext <- cbind(results$samples.rho, results$samples.alpha) colnames(samples.rhoext) <- c("rho.S", "rho1.T", "rho2.T") } samples <- list(beta=mcmc(samples.beta.orig), phi=mcmc(results$samples.phi), Sigma=results$samples.Sigma, rho=mcmc(samples.rhoext), fitted=mcmc(results$samples.fitted), Y=mcmc(results$samples.Y)) ## Create a summary object n.keep <- floor((n.sample - burnin)/thin) summary.beta <- t(rbind(apply(samples.beta.orig, 2, mean), apply(samples.beta.orig, 2, quantile, c(0.025, 0.975)))) summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.final[names(accept.final)=="beta"],p), effectiveSize(samples.beta.orig), geweke.diag(samples.beta.orig)$z) col.name <- rep(NA, p*(J-1)) if(is.null(colnames(Y))) { for(r in 1:J) { col.name[((r-1)*p+1):(r*p)] <- paste("Variable ", r, " - ", colnames(X), sep="") } }else { for(r in 1:J) { col.name[((r-1)*p+1):(r*p)] <- paste(colnames(Y)[r], " - ", colnames(X), sep="") } } rownames(summary.beta) <- col.name colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag") summary.hyper <- array(NA, c((J+3) ,7)) for(r in 1:J) { summary.hyper[r, 1] <- mean(results$samples.Sigma[ ,r,r]) summary.hyper[r, 2:3] <- quantile(results$samples.Sigma[ ,r,r], c(0.025, 0.975)) summary.hyper[r, 4] <- n.keep summary.hyper[r, 5] <- 100 summary.hyper[r, 6] <- effectiveSize(results$samples.Sigma[ ,r,r]) summary.hyper[r, 7] <- geweke.diag(results$samples.Sigma[ ,r,r])$z } if(!fix.rho.S) { summary.hyper[(J+1), 1:3] <- c(mean(results$samples.rho), quantile(results$samples.rho, c(0.025, 0.975))) summary.hyper[(J+1), 4:5] <- c(n.keep, accept.final[names(accept.final)=="rho.S"]) summary.hyper[(J+1), 6:7] <- c(effectiveSize(results$samples.rho), geweke.diag(results$samples.rho)$z) }else { summary.hyper[(J+1), 1:3] <- c(rho, rho, rho) summary.hyper[(J+1), 4:5] <- rep(NA, 2) summary.hyper[(J+1), 6:7] <- rep(NA, 2) } if(!fix.rho.T) { summary.hyper[(J+2), 1:3] <- c(mean(results$samples.alpha[ ,1]), quantile(results$samples.alpha[ ,1], c(0.025, 0.975))) summary.hyper[(J+2), 4:5] <- c(n.keep, accept.final[names(accept.final)=="rho.T"]) summary.hyper[(J+2), 6:7] <- c(effectiveSize(results$samples.alpha[ ,1]), geweke.diag(results$samples.alpha[ ,1])$z) summary.hyper[(J+3), 1:3] <- c(mean(results$samples.alpha[ ,2]), quantile(results$samples.alpha[ ,2], c(0.025, 0.975))) summary.hyper[(J+3), 4:5] <- c(n.keep, accept.final[names(accept.final)=="rho.T"]) summary.hyper[(J+3), 6:7] <- c(effectiveSize(results$samples.alpha[ ,2]), geweke.diag(results$samples.alpha[ ,2])$z) }else { summary.hyper[(J+2), 1:3] <- c(alpha[1], alpha[1], alpha[1]) summary.hyper[(J+2), 4:5] <- rep(NA, 2) summary.hyper[(J+2), 6:7] <- rep(NA, 2) summary.hyper[(J+3), 1:3] <- c(alpha[2], alpha[2], alpha[2]) summary.hyper[(J+3), 4:5] <- rep(NA, 2) summary.hyper[(J+3), 6:7] <- rep(NA, 2) } summary.results <- rbind(summary.beta, summary.hyper) rownames(summary.results)[((J*p)+1): nrow(summary.results)] <- c(paste(rep("Sigma",J), 1:J, 1:J, sep=""), "rho.S", "rho1.T", "rho2.T") summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4) summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1) }else { #### If n.chains > 1 ## Compute the acceptance rates accept.final <- rep(NA, 5) names(accept.final) <- c("beta", "phi", "rho.S", "rho.T", "Sigma") accept.final[5] <- 100 accept.temp <- lapply(results, function(l) l[["accept.beta"]]) accept.temp2 <- do.call(what=rbind, args=accept.temp) accept.final[1] <- 100 * sum(accept.temp2[ ,1:J]) / sum(accept.temp2[ ,(J+1):(2*J)]) accept.temp <- lapply(results, function(l) l[["accept"]]) accept.temp2 <- do.call(what=rbind, args=accept.temp) accept.final[2] <- 100 * sum(accept.temp2[ ,1]) / sum(accept.temp2[ ,2]) if(!fix.rho.S) accept.final[3] <- 100 * sum(accept.temp2[ ,3]) / sum(accept.temp2[ ,4]) if(!fix.rho.T) accept.final[4] <- 100 ## Extract the samples into separate matrix and list objects samples.beta.list <- lapply(results, function(l) l[["samples.beta"]]) samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list) samples.phi.list <- lapply(results, function(l) l[["samples.phi"]]) samples.phi.matrix <- do.call(what=rbind, args=samples.phi.list) if(!fix.rho.S) { samples.rho.list <- lapply(results, function(l) l[["samples.rho"]]) samples.rho.matrix <- do.call(what=rbind, args=samples.rho.list) } if(!fix.rho.T) { samples.alpha.list <- lapply(results, function(l) l[["samples.alpha"]]) samples.alpha.matrix <- do.call(what=rbind, args=samples.alpha.list) } samples.Sigma.list <- lapply(results, function(l) l[["samples.Sigma"]]) samples.loglike.list <- lapply(results, function(l) l[["samples.loglike"]]) samples.loglike.matrix <- do.call(what=rbind, args=samples.loglike.list) samples.fitted.list <- lapply(results, function(l) l[["samples.fitted"]]) samples.fitted.matrix <- do.call(what=rbind, args=samples.fitted.list) if(n.miss>0) samples.Y.list <- lapply(results, function(l) l[["samples.Y"]]) ## Compute the fitted deviance mean.beta <- matrix(apply(samples.beta.matrix, 2, mean), nrow=p, ncol=J, byrow=F) mean.phi <- matrix(apply(samples.phi.matrix, 2, mean), nrow=NK, ncol=J, byrow=T) fitted.mean <- exp(X.standardised %*% mean.beta + mean.phi + offset) deviance.fitted <- -2 * sum(dpois(x=as.numeric(t(Y)), lambda=as.numeric(t(fitted.mean)), log=TRUE), na.rm=TRUE) modelfit <- common.modelfit(samples.loglike.matrix, deviance.fitted) ## Create the fitted values and residuals fitted.values <- matrix(apply(samples.fitted.matrix, 2, mean), nrow=NK, ncol=J, byrow=T) response.residuals <- Y - fitted.values pearson.residuals <- response.residuals / sqrt(fitted.values) residuals <- list(response=response.residuals, pearson=pearson.residuals) ## Transform the parameters back to the original covariate scale. samples.beta.list <- samples.beta.list for(j in 1:n.chains) { for(r in 1:J) { samples.beta.list[[j]][ ,((r-1)*p+1):(r*p)] <- common.betatransform(samples.beta.list[[j]][ ,((r-1)*p+1):(r*p) ], X.indicator, X.mean, X.sd, p, FALSE) } } samples.beta.matrix <- do.call(what=rbind, args=samples.beta.list) ## Create MCMC objects beta.mcmc <- mcmc.list(lapply(samples.beta.list, mcmc)) phi.mcmc <- mcmc.list(lapply(samples.phi.list, mcmc)) fitted.mcmc <- mcmc.list(lapply(samples.fitted.list, mcmc)) if(n.miss>0) { Y.mcmc <- mcmc.list(lapply(samples.Y.list, mcmc)) }else { Y.mcmc <- NA } if(fix.rho.S & fix.rho.T) { rhoext.mcmc <- NA }else if(fix.rho.S & !fix.rho.T) { for(j in 1:n.chains) { colnames(samples.alpha.list[[j]]) <- c("rho1.T", "rho2.T") } rhoext.mcmc <- mcmc.list(lapply(samples.alpha.list, mcmc)) }else if(!fix.rho.S & fix.rho.T) { for(j in 1:n.chains) { colnames(samples.rho.list[[j]]) <- c("rho.S") } rhoext.mcmc <- mcmc.list(lapply(samples.rho.list, mcmc)) }else { rho.temp <- as.list(rep(NA, n.chains)) for(j in 1:n.chains) { rho.temp[[j]] <- cbind(samples.rho.list[[j]], samples.alpha.list[[j]]) colnames(rho.temp[[j]]) <- c("rho.S", "rho1.T", "rho2.T") } rhoext.mcmc <- mcmc.list(lapply(rho.temp, mcmc)) } samples <- list(beta=beta.mcmc, phi=phi.mcmc, rho=rhoext.mcmc, Sigma=samples.Sigma.list, fitted=fitted.mcmc, Y=Y.mcmc) ## create a summary object n.keep <- floor((n.sample - burnin)/thin) * n.chains summary.beta <- t(rbind(apply(samples.beta.matrix, 2, mean), apply(samples.beta.matrix, 2, quantile, c(0.025, 0.975)))) summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.final[names(accept.final)=="beta"],p), effectiveSize(beta.mcmc), gelman.diag(beta.mcmc)$psrf[ ,2]) col.name <- rep(NA, p*(J-1)) if(is.null(colnames(Y))) { for(r in 1:J) { col.name[((r-1)*p+1):(r*p)] <- paste("Variable ", r, " - ", colnames(X), sep="") } }else { for(r in 1:J) { col.name[((r-1)*p+1):(r*p)] <- paste(colnames(Y)[r], " - ", colnames(X), sep="") } } rownames(summary.beta) <- col.name colnames(summary.beta) <- c("Mean", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "PSRF (upper 95% CI)") summary.hyper <- array(NA, c((J+3) ,7)) for(r in 1:J) { temp <- NA temp2 <- as.list(rep(NA, n.chains)) for(v in 1:n.chains) { temp <- c(temp, samples.Sigma.list[[v]][ ,r,r]) temp2[[v]] <- mcmc(samples.Sigma.list[[v]][ ,r,r]) } temp <- temp[-1] summary.hyper[r, 1] <- mean(temp) summary.hyper[r, 2:3] <- quantile(temp, c(0.025, 0.975)) summary.hyper[r, 4] <- n.keep summary.hyper[r, 5] <- 100 summary.hyper[r, 6] <- effectiveSize(mcmc.list(temp2)) summary.hyper[r, 7] <- gelman.diag(mcmc.list(temp2))$psrf[ ,2] } if(!fix.rho.S) { temp <- mcmc.list(lapply(samples.rho.list, mcmc)) summary.hyper[(J+1), 1:3] <- c(mean(samples.rho.matrix), quantile(samples.rho.matrix, c(0.025, 0.975))) summary.hyper[(J+1), 4:5] <- c(n.keep, accept.final[names(accept.final)=="rho.S"]) summary.hyper[(J+1), 6:7] <- c(effectiveSize(temp), gelman.diag(temp)$psrf[ ,2]) }else { summary.hyper[(J+1), 1:3] <- c(rho, rho, rho) summary.hyper[(J+1), 4:5] <- rep(NA, 2) summary.hyper[(J+1), 6:7] <- rep(NA, 2) } if(!fix.rho.T) { temp <- mcmc.list(lapply(samples.alpha.list, mcmc)) summary.hyper[(J+2), 1:3] <- c(mean(samples.alpha.matrix[ ,1]), quantile(samples.alpha.matrix[ ,1], c(0.025, 0.975))) summary.hyper[(J+2), 4:5] <- c(n.keep, accept.final[names(accept.final)=="rho.T"]) summary.hyper[(J+2), 6:7] <- c(effectiveSize(temp)[1], gelman.diag(temp)$psrf[ ,2][1]) summary.hyper[(J+3), 1:3] <- c(mean(samples.alpha.matrix[ ,2]), quantile(samples.alpha.matrix[ ,2], c(0.025, 0.975))) summary.hyper[(J+3), 4:5] <- c(n.keep, accept.final[names(accept.final)=="rho.T"]) summary.hyper[(J+3), 6:7] <- c(effectiveSize(temp)[2], gelman.diag(temp)$psrf[ ,2][2]) }else { summary.hyper[(J+2), 1:3] <- c(alpha[1], alpha[1], alpha[1]) summary.hyper[(J+2), 4:5] <- rep(NA, 2) summary.hyper[(J+2), 6:7] <- rep(NA, 2) summary.hyper[(J+3), 1:3] <- c(alpha[2], alpha[2], alpha[2]) summary.hyper[(J+3), 4:5] <- rep(NA, 2) summary.hyper[(J+3), 6:7] <- rep(NA, 2) } summary.results <- rbind(summary.beta, summary.hyper) rownames(summary.results)[((J*p)+1): nrow(summary.results)] <- c(paste(rep("Sigma",J), 1:J, 1:J, sep=""), "rho.S", "rho1.T", "rho2.T") summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4) summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1) } ################################### #### Compile and return the results ################################### model.string <- c("Likelihood model - Poisson (log link function)", "\nRandom effects model - Multivariate Autoregressive order 2 CAR model\n") n.total <- floor((n.sample - burnin) / thin) * n.chains mcmc.info <- c(n.total, n.sample, burnin, thin, n.chains) names(mcmc.info) <- c("Total samples", "n.sample", "burnin", "thin", "n.chains") results.final <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=NULL, formula=formula, model=model.string, mcmc.info=mcmc.info, X=X) class(results.final) <- "CARBayesST" if(verbose) { b<-proc.time() cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n") }else {} return(results.final) }
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/poisson.MVCARar2.R
poisson.MVCARar2MCMC <- function(Y, offset, X.standardised, W, rho, alpha, fix.rho.S, fix.rho.T, K, N, NK, J, N.all, p, miss.locator, n.miss, burnin, n.sample, thin, MALA, n.beta.block, list.block, prior.mean.beta, prior.var.beta, prior.Sigma.df, prior.Sigma.scale, verbose, chain) { #Rcpp::sourceCpp("src/CARBayesST.cpp") #source("R/common.functions.R") #library(spdep) #library(truncnorm) #library(MCMCpack) # # ############################################ #### Set up the key elements before sampling ############################################ #### Generate the initial parameter values beta <- array(NA, c(p, J)) for(i in 1:J) { mod.glm <- glm(Y[ ,i]~X.standardised-1, offset=offset[ ,i], family="quasipoisson") beta.mean <- mod.glm$coefficients beta.sd <- sqrt(diag(summary(mod.glm)$cov.scaled)) beta[ ,i] <- rnorm(n=p, mean=beta.mean, sd=beta.sd) } log.Y <- log(Y) log.Y[Y==0] <- -0.1 res.temp <- log.Y - X.standardised %*% beta - offset res.sd <- sd(res.temp, na.rm=TRUE)/5 phi.vec <- rnorm(n=N.all, mean=0, sd=res.sd) phi <- matrix(phi.vec, ncol=J, byrow=TRUE) Sigma <- cov(phi) Sigma.inv <- solve(Sigma) Sigma.a <- rep(1, J) regression <- X.standardised %*% beta fitted <- exp(regression + phi + offset) Y.DA <- Y #### Matrices to store samples n.keep <- floor((n.sample - burnin)/thin) samples.beta <- array(NA, c(n.keep, J*p)) samples.phi <- array(NA, c(n.keep, N.all)) samples.Sigma <- array(NA, c(n.keep, J, J)) samples.Sigma.a <- array(NA, c(n.keep, J)) if(!fix.rho.S) samples.rho <- array(NA, c(n.keep, 1)) if(!fix.rho.T) samples.alpha <- array(NA, c(n.keep, 2)) samples.loglike <- array(NA, c(n.keep, N.all)) samples.fitted <- array(NA, c(n.keep, N.all)) if(n.miss>0) samples.Y <- array(NA, c(n.keep, n.miss)) #### Metropolis quantities accept <- rep(0,4) accept.beta <- rep(0,2*J) proposal.sd.beta <- rep(0.01, J) proposal.sd.phi <- 0.1 proposal.sd.rho <- 0.02 Sigma.post.df <- prior.Sigma.df + J - 1 + K * N Sigma.a.post.shape <- (prior.Sigma.df + J) / 2 #### CAR quantities W.quants <- common.Wcheckformat.leroux(W) W <- W.quants$W W.triplet <- W.quants$W.triplet n.triplet <- W.quants$n.triplet W.triplet.sum <- W.quants$W.triplet.sum n.neighbours <- W.quants$n.neighbours W.begfin <- W.quants$W.begfin Wstar <- diag(apply(W,1,sum)) - W Q <- rho * Wstar + diag(rep(1-rho,K)) #### Create the determinant if(!fix.rho.S) { Wstar.eigen <- eigen(Wstar) Wstar.val <- Wstar.eigen$values det.Q <- sum(log((rho * Wstar.val + (1-rho)))) }else {} #### Check for islands W.list<- mat2listw(W, style = "B") W.nb <- W.list$neighbours W.islands <- n.comp.nb(W.nb) islands <- W.islands$comp.id n.islands <- max(W.islands$nc) if(rho==1 & alpha[1]==2 & alpha[2]==-1) { Sigma.post.df <- prior.Sigma.df + ((N-2) * (K-n.islands)) + J - 1 }else if(rho==1) { Sigma.post.df <- prior.Sigma.df + (N * (K-n.islands)) + J - 1 }else if(alpha[1]==2 & alpha[2]==-1) { Sigma.post.df <- prior.Sigma.df + ((N-2) * K) + J - 1 }else {} #### Start timer if(verbose) { cat("\nMarkov chain", chain, "- generating", n.keep, "post burnin and thinned samples.\n", sep = " ") progressBar <- txtProgressBar(style = 3) percentage.points<-round((1:100/100)*n.sample) }else { percentage.points<-round((1:100/100)*n.sample) } ############################## #### Generate the MCMC samples ############################## #### Create the MCMC samples for(j in 1:n.sample) { #################################### ## Sample from Y - data augmentation #################################### if(n.miss>0) { Y.DA[miss.locator] <- rpois(n=n.miss, lambda=fitted[miss.locator]) }else {} ################### ## Sample from beta ################### offset.temp <- phi + offset for(r in 1:J) { if(MALA) { temp <- poissonbetaupdateMALA(X.standardised, NK, p, beta[ ,r], offset.temp[ ,r], Y.DA[ ,r], prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta[r], list.block) }else { temp <- poissonbetaupdateRW(X.standardised, NK, p, beta[ ,r], offset.temp[ ,r], Y.DA[ ,r], prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta[r], list.block) } beta[ ,r] <- temp[[1]] accept.beta[r] <- accept.beta[r] + temp[[2]] accept.beta[(r+J)] <- accept.beta[(r+J)] + n.beta.block } regression <- X.standardised %*% beta ################## ## Sample from phi ################## #### Create the offset elements den.offset <- rho * W.triplet.sum + 1 - rho phi.offset <- regression + offset #### Create the random draws to create the proposal distribution Chol.Sigma <- t(chol(proposal.sd.phi*Sigma)) z.mat <- matrix(rnorm(n=N.all, mean=0, sd=1), nrow=J, ncol=NK) innovations <- t(Chol.Sigma %*% z.mat) #### Update the elements of phi temp1 <- poissonmvar2carupdateRW(W.triplet, W.begfin, W.triplet.sum, K, N, J, phi, alpha[1], alpha[2], rho, Sigma.inv, Y.DA, innovations, phi.offset, den.offset) phi <- temp1[[1]] for(r in 1:J) { phi[ ,r] <- phi[ ,r] - mean(phi[ ,r]) } accept[1] <- accept[1] + temp1[[2]] accept[2] <- accept[2] + NK #################### ## Sample from Sigma #################### Sigma.post.scale <- 2 * prior.Sigma.df * diag(1 / Sigma.a) + t(phi[1:K, ]) %*% Q %*% phi[1:K, ] + t(phi[(K+1):(2*K), ]) %*% Q %*% phi[(K+1):(2*K), ] for(t in 3:N) { phit <- phi[((t-1)*K+1):(t*K), ] phitminus1 <- phi[((t-2)*K+1):((t-1)*K), ] phitminus2 <- phi[((t-3)*K+1):((t-2)*K), ] temp1 <- phit - alpha[1] * phitminus1 - alpha[2] * phitminus2 Sigma.post.scale <- Sigma.post.scale + t(temp1) %*% Q %*% temp1 } Sigma <- riwish(Sigma.post.df, Sigma.post.scale) Sigma.inv <- solve(Sigma) ###################### ## Sample from Sigma.a ###################### Sigma.a.posterior.scale <- prior.Sigma.df * diag(Sigma.inv) + 1 / prior.Sigma.scale^2 Sigma.a <- 1 / rgamma(J, Sigma.a.post.shape, scale=(1/Sigma.a.posterior.scale)) ###################### #### Sample from alpha ###################### if(!fix.rho.T) { temp <- MVSTrhoTAR2compute(W.triplet, W.triplet.sum, n.triplet, den.offset, K, N, J, phi, rho, Sigma.inv) alpha.precision <- matrix(c(temp[[1]], temp[[2]], temp[[2]], temp[[3]]), nrow=2, ncol=2) alpha.var <- solve(alpha.precision) alpha.mean <- rep(NA, 2) alpha.mean[2] <- (temp[[1]] * temp[[5]] - temp[[2]] * temp[[4]]) / (temp[[1]] * temp[[3]] - temp[[2]]^2) alpha.mean[1] <- (temp[[5]] - temp[[3]] * alpha.mean[2]) / temp[[2]] alpha <- mvrnorm(n=1, mu=alpha.mean, Sigma=alpha.var) }else {} ################## ## Sample from rho ################## if(!fix.rho.S) { ## Propose a new value proposal.rho <- rtruncnorm(n=1, a=0, b=1, mean=rho, sd=proposal.sd.rho) proposal.Q <- proposal.rho * Wstar + diag(rep(1-proposal.rho), K) proposal.det.Q <- sum(log((proposal.rho * Wstar.val + (1-proposal.rho)))) proposal.den.offset <- proposal.rho * W.triplet.sum + 1 - proposal.rho ## Compute the quadratic forms based on current and proposed values of rho temp1.QF <- MVSTrhoSAR2compute(W.triplet, W.triplet.sum, n.triplet, den.offset, K, N, J, phi, rho, alpha[1], alpha[2], Sigma.inv) temp2.QF <- MVSTrhoSAR2compute(W.triplet, W.triplet.sum, n.triplet, proposal.den.offset, K, N, J, phi, proposal.rho, alpha[1], alpha[2], Sigma.inv) ## Compute the acceptance rate logprob.current <- 0.5 * J * N * det.Q - 0.5 * temp1.QF logprob.proposal <- 0.5 * J * N * proposal.det.Q - 0.5 * temp2.QF hastings <- log(dtruncnorm(x=rho, a=0, b=1, mean=proposal.rho, sd=proposal.sd.rho)) - log(dtruncnorm(x=proposal.rho, a=0, b=1, mean=rho, sd=proposal.sd.rho)) prob <- exp(logprob.proposal - logprob.current + hastings) if(prob > runif(1)) { rho <- proposal.rho det.Q <- proposal.det.Q Q <- proposal.Q accept[3] <- accept[3] + 1 }else {} accept[4] <- accept[4] + 1 }else {} ######################### ## Calculate the deviance ######################### fitted <- exp(regression + phi + offset) loglike <- dpois(x=as.numeric(t(Y)), lambda=as.numeric(t(fitted)), log=TRUE) ################### ## Save the results ################### if(j > burnin & (j-burnin)%%thin==0) { ele <- (j - burnin) / thin samples.beta[ele, ] <- as.numeric(beta) samples.phi[ele, ] <- as.numeric(t(phi)) samples.Sigma[ele, , ] <- Sigma samples.Sigma.a[ele, ] <- Sigma.a if(!fix.rho.S) samples.rho[ele, ] <- rho if(!fix.rho.T) samples.alpha[ele, ] <- alpha samples.loglike[ele, ] <- loglike samples.fitted[ele, ] <- as.numeric(t(fitted)) if(n.miss>0) samples.Y[ele, ] <- Y.DA[miss.locator] }else {} ######################################## ## Self tune the acceptance probabilties ######################################## if(ceiling(j/100)==floor(j/100) & j < burnin) { #### Update the proposal sds for(r in 1:J) { if(p>2) { proposal.sd.beta[r] <- common.accceptrates1(accept.beta[c(r, (r+J))], proposal.sd.beta[r], 40, 50) }else { proposal.sd.beta[r] <- common.accceptrates1(accept.beta[c(r, (r+J))], proposal.sd.beta[r], 30, 40) } } proposal.sd.phi <- common.accceptrates1(accept[1:2], proposal.sd.phi, 40, 50) if(!fix.rho.S) { proposal.sd.rho <- common.accceptrates2(accept[3:4], proposal.sd.rho, 40, 50, 0.5) } accept <- c(0,0,0,0) accept.beta <- rep(0,2*J) }else {} ################################ ## print progress to the console ################################ if(j %in% percentage.points & verbose) { setTxtProgressBar(progressBar, j/n.sample) } } ############################################ #### Return the results to the main function ############################################ #### Compile the results if(n.miss==0) samples.Y <- NA if(fix.rho.S) samples.rho <- NA if(fix.rho.T) samples.alpha <- NA chain.results <- list(samples.beta=samples.beta, samples.phi=samples.phi, samples.Sigma=samples.Sigma, samples.Sigma.a=samples.Sigma.a, samples.rho=samples.rho, samples.alpha=samples.alpha, samples.loglike=samples.loglike, samples.fitted=samples.fitted, samples.Y=samples.Y, accept=accept, accept.beta=accept.beta) #### Return the results return(chain.results) }
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/poisson.MVCARar2MCMC.R
print.CARBayesST <- function(x,...) { if(is.null(x$localised.structure)) { #### Print out the model fitted cat("\n#################\n") cat("#### Model fitted\n") cat("#################\n") cat(x$model) cat("Regression equation - ") print(x$formula) cat("\n") cat("\n#################\n") cat("#### MCMC details\n") cat("#################\n") cat("Total number of post burnin and thinned MCMC samples generated - ") cat(x$mcmc.info[1]) cat("\n") cat("Number of MCMC chains used - ") cat(x$mcmc.info[5]) cat("\n") cat("Length of the burnin period used for each chain - ") cat(x$mcmc.info[3]) cat("\n") cat("Amount of thinning used - ") cat(x$mcmc.info[4]) cat("\n") #### Print out the results cat("\n############\n") cat("#### Results\n") cat("############\n") cat("Posterior quantities for selected parameters and DIC\n\n") print(x$summary.results[ ,c(1:3,6:7)]) cat("\nDIC = ", x$modelfit[1], " ", "p.d = ", x$modelfit[2], " ", "LMPL = ", x$modelfit[5], "\n") # }else if(class(x$localised.structure)=="numeric") }else if(is.numeric(x$localised.structure)) { #### Print out the model fitted cat("\n#################\n") cat("#### Model fitted\n") cat("#################\n") cat(x$model) cat("Regression equation - ") print(x$formula) cat("\n") cat("\n#################\n") cat("#### MCMC details\n") cat("#################\n") cat("Total number of post burnin and thinned MCMC samples generated - ") cat(x$mcmc.info[1]) cat("\n") cat("Number of MCMC chains used - ") cat(x$mcmc.info[5]) cat("\n") cat("Length of the burnin period used for each chain - ") cat(x$mcmc.info[3]) cat("\n") cat("Amount of thinning used - ") cat(x$mcmc.info[4]) cat("\n") #### Print out the results cat("\n############\n") cat("#### Results\n") cat("############\n") cat("Posterior quantities for selected parameters and DIC\n\n") print(x$summary.results[ ,c(1:3,6:7)]) cat("\nDIC = ", x$modelfit[1], " ", "p.d = ", x$modelfit[2], " ", "LMPL = ", x$modelfit[5], "\n") cat("\nNumber of clusters with the number of data points in each one\n") print(table(paste("group", x$localised.structure, sep=""))) #}else if(class(x$localised.structure)=="list" & nrow(x$localised.structure[[2]])==ncol(x$localised.structure[[2]])) }else if(is.list(x$localised.structure) & nrow(x$localised.structure[[2]])==ncol(x$localised.structure[[2]])) { #### Print out the model fitted cat("\n#################\n") cat("#### Model fitted\n") cat("#################\n") cat(x$model) cat("Regression equation - ") print(x$formula) cat("\n") cat("\n#################\n") cat("#### MCMC details\n") cat("#################\n") cat("Total number of post burnin and thinned MCMC samples generated - ") cat(x$mcmc.info[1]) cat("\n") cat("Number of MCMC chains used - ") cat(x$mcmc.info[5]) cat("\n") cat("Length of the burnin period used for each chain - ") cat(x$mcmc.info[3]) cat("\n") cat("Amount of thinning used - ") cat(x$mcmc.info[4]) cat("\n") #### Print out the results cat("\n############\n") cat("#### Results\n") cat("############\n") cat("Posterior quantities for selected parameters and DIC\n\n") print(x$summary.results[ ,c(1:3,6:7)]) cat("\nDIC = ", x$modelfit[1], " ", "p.d = ", x$modelfit[2], " ", "LMPL = ", x$modelfit[5], "\n") cat("\nThe number of stepchanges identified in the random effect surface") cat("\nthat satisfy Prob(w_ij < 0.5|data) > 0.99 is \n") temp <- x$localised.structure[[2]][!is.na(x$localised.structure[[2]])] tab <- array(NA, c(1,2)) tab[1, ] <- c(sum(temp)/2, (length(temp)- sum(temp))/2) colnames(tab) <- c("stepchange", "no stepchange") print(tab) }else if(is.list(x$localised.structure)) { #### Print out the model fitted cat("\n#################\n") cat("#### Model fitted\n") cat("#################\n") cat(x$model) cat("Regression equation - ") print(x$formula) cat("\n") cat("\n#################\n") cat("#### MCMC details\n") cat("#################\n") cat("Total number of post burnin and thinned MCMC samples generated - ") cat(x$mcmc.info[1]) cat("\n") cat("Number of MCMC chains used - ") cat(x$mcmc.info[5]) cat("\n") cat("Length of the burnin period used for each chain - ") cat(x$mcmc.info[3]) cat("\n") cat("Amount of thinning used - ") cat(x$mcmc.info[4]) cat("\n") #### Print out the results cat("\n############\n") cat("#### Results\n") cat("############\n") cat("Posterior quantities for selected parameters and DIC\n\n") print(x$summary.results[ ,c(1:3,6:7)]) cat("\nDIC = ", x$modelfit[1], " ", "p.d = ", x$modelfit[2], " ", "LMPL = ", x$modelfit[5], "\n") cat("\nThe allocation of areas to temporal trends was") print(table(x$localised.structure[[1]])) }else { } return(invisible(x)) }
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/print.CARBayesST.R
residuals.CARBayesST <- function(object, type="pearson", ...) { #### Return one of two types of residuals if(type=="response") { return(object$residuals$response) }else if(type=="pearson") { return(object$residuals$pearson) }else { return("Error. That is not one of the allowable residual types.") } }
/scratch/gouwar.j/cran-all/cranData/CARBayesST/R/residuals.CARBayesST.R
### R code from vignette source 'CARBayesST.Rnw' ################################################### ### code chunk number 1: CARBayesST.Rnw:92-93 ################################################### options(prompt = "R> ") ################################################### ### code chunk number 2: CARBayesST.Rnw:537-541 ################################################### library("CARBayesdata") library("sf") data("GGHB.IZ") data("pollutionhealthdata") ################################################### ### code chunk number 3: CARBayesST.Rnw:547-551 ################################################### class(GGHB.IZ) head(GGHB.IZ) class(pollutionhealthdata) head(pollutionhealthdata) ################################################### ### code chunk number 4: CARBayesST.Rnw:556-561 ################################################### library(dplyr) pollutionhealthdata <- pollutionhealthdata %>% mutate( SMR = pollutionhealthdata$observed / pollutionhealthdata$expected, logSMR = log(pollutionhealthdata$observed / pollutionhealthdata$expected)) head(pollutionhealthdata) ################################################### ### code chunk number 5: CARBayesST.Rnw:567-569 ################################################### library(GGally) ggpairs(pollutionhealthdata, columns=c(9, 5:7)) ################################################### ### code chunk number 6: CARBayesST.Rnw:584-588 ################################################### group_IZ <- group_by(pollutionhealthdata, IZ) SMR.av <- summarise(group_IZ, SMR.mean = mean(SMR)) GGHB.IZ$SMR <- SMR.av$SMR.mean head(GGHB.IZ) ################################################### ### code chunk number 7: CARBayesST.Rnw:595-596 ################################################### GGHB.IZ <- st_transform(x=GGHB.IZ, crs='+proj=longlat +datum=WGS84 +no_defs') ################################################### ### code chunk number 8: CARBayesST.Rnw:601-611 ################################################### library(leaflet) colours <- colorNumeric(palette = "YlOrRd", domain = GGHB.IZ$SMR) leaflet(data=GGHB.IZ) %>% addTiles() %>% addPolygons(fillColor = ~colours(GGHB.IZ$SMR), color="grey", weight=1, fillOpacity = 0.7) %>% addLegend(pal = colours, values = GGHB.IZ$SMR, opacity = 1, title="SMR") %>% addScaleBar(position="bottomleft") ################################################### ### code chunk number 9: CARBayesST.Rnw:626-630 ################################################### library("spdep") W.nb <- poly2nb(GGHB.IZ, row.names = GGHB.IZ$IZ) W.list <- nb2listw(W.nb, style = "B") W <- nb2mat(W.nb, style = "B") ################################################### ### code chunk number 10: CARBayesST.Rnw:639-645 ################################################### formula <- observed ~ offset(log(expected)) + jsa + price + pm10 model1 <- glm(formula = formula, family = "quasipoisson", data = pollutionhealthdata) resid.glm <- residuals(model1) summary(model1)$coefficients summary(model1)$dispersion ################################################### ### code chunk number 11: CARBayesST.Rnw:650-651 ################################################### moran.mc(x = resid.glm[1:271], listw = W.list, nsim = 10000) ################################################### ### code chunk number 12: CARBayesST.Rnw:783-788 ################################################### library("CARBayesdata") library("sf") data("GGHB.IZ") data("salesdata") head(salesdata) ################################################### ### code chunk number 13: CARBayesST.Rnw:794-801 ################################################### salesdata <- salesdata %>% mutate(salesprop = salesdata$sales / salesdata$stock) library(ggplot2) ggplot(salesdata, aes(x = factor(year), y = salesprop)) + geom_boxplot(fill="red", alpha=0.7) + scale_x_discrete(name = "Year") + scale_y_continuous(name = "Sales proportion") + theme(text=element_text(size=16), plot.title=element_text(size=18, face="bold")) ################################################### ### code chunk number 14: CARBayesST.Rnw:814-819 ################################################### library(dplyr) group_IZ <- group_by(salesdata, IZ) salesprop <- summarise(group_IZ, salesproprtion.mean = mean(salesprop)) GGHB.IZ$sales <- salesprop$salesproprtion.mean head(GGHB.IZ) ################################################### ### code chunk number 15: CARBayesST.Rnw:824-836 ################################################### GGHB.IZ <- st_transform(x=GGHB.IZ, crs='+proj=longlat +datum=WGS84 +no_defs') library(leaflet) colours <- colorNumeric(palette = "YlOrRd", domain = GGHB.IZ$sales) map1 <- leaflet(data=GGHB.IZ) %>% addTiles() %>% addPolygons(fillColor = ~colours(sales), color="grey", weight=1, fillOpacity = 0.7) %>% addLegend(pal = colours, values = GGHB.IZ$sales, opacity = 1, title="Sales") %>% addScaleBar(position="bottomleft") map1 ################################################### ### code chunk number 16: CARBayesST.Rnw:854-857 ################################################### library("spdep") W.nb <- poly2nb(GGHB.IZ, row.names = GGHB.IZ$IZ) W <- nb2mat(W.nb, style = "B")
/scratch/gouwar.j/cran-all/cranData/CARBayesST/inst/doc/CARBayesST.R
#'@import arrangements #'@import dplyr #'@importFrom MASS ginv #'@importFrom stats median #'@importFrom stats na.omit #'@importFrom stats var #'@export #'@title Adaptive Randomization via Mahalanobis Distance #' #'@description Allocates patients to one of two treatments using #'Adaptive Randomization via Mahalanobis Distance proposed by #'Yichen Qin,Yang Li, Wei Ma, Haoyu Yang, and Feifang Hu.(2022) #'@param covariate a data frame. A row of the dataframe #'corresponds to the covariate profile of a patient. #'@param assignment a vector. If partial patients had been allocated #', please input their allocation. IF #'all the patients are not be allocated, please input #''assignment = NA' directly. #'@param q the biased coin probability. #'\eqn{q} should be larger than 1/2 and less than 1, default = 0.75 #'@details #'Suppose that \eqn{n} patients are to be assigned to two treatment groups. #'Consider \eqn{p} continuous covariates for each patient. #'\eqn{T_i} is the assignment of the \eqn{i}th patient. #'The proposed procedure to assign units to treatment groups, namely adaptive #'randomization via Mahalanobis distance (ARM), is outlined below. #' #'(1) Arrange all \eqn{n} units randomly into a sequence #'\eqn{x_1,...,x_n}. #' #'(2) Assign the first two units with \eqn{T_1=1} and \eqn{T_2=2}. #' #'(3) Suppose that \eqn{2i} units have been assigned to #'treatment groups, #'for the \eqn{2i+1}-th and \eqn{2i+2}-th units: #' #' (3a) If the \eqn{2i+1}-th unit is assigned to treatment 1 and #' the \eqn{2i+2}-th #' unit to treatment 2, then calculate the potential #' Mahalanobis distance, between the updated treatment groups. #' with \eqn{2i+2} units, \eqn{M_1(2i + 2)}. #' #' (3b) Similarly, if the \eqn{2i+1}-th unit is #' assigned to treatment 2 and #' the \eqn{2i+2}-th unit to treatment 1, then calculate the #' other potential Mahalanobis distance, \eqn{M_2(2i + 2)}. #' #' #' (4) Assign the \eqn{2i+1}-th unit to treatment groups #' according to the #' following probabilities: #' #'if \eqn{ M_1(2i + 2) < M_2(2i + 2)}, \eqn{P(T_{2i+1} = 1)= q}; #' #'if \eqn{ M_1(2i + 2) > M_2(2i + 2)}, \eqn{P(T_{2i+1} = 1)= 1-q}; #' #'if \eqn{ M_1(2i + 2) = M_2(2i + 2)}, \eqn{P(T_{2i+1} = 1)= 0.5}. #' #' #' (5) Repeat the last two steps until all units are assigned. If n is odd, #' assign the last unit to two treatments with equal probabilities. #' #'Mahalanobis distance \eqn{M(n)} between the sample means across #'different treatment groups is: #' #'\deqn{M(n)= np(1-p)(\hat{x_1} - \hat{x_2})^Tcov(x)^{-1}(\hat{x_1} - \hat{x_2}} #' #' #'See the reference for more details. #' #' #'@return #'An object of class "ARM" is a list containing the following components: #'\item{assignment}{Allocation of patients.} #'\item{sample_size}{The number of patients in treatment 1 and treatment 2 respectively.} #'\item{Mahalanobis_Distance}{Mahalanobis distance between treatment groups 1 and 2.} #' #' #'@references Qin, Y., Y. Li, W. Ma, H. Yang, and F. Hu (2022). Adaptive randomization via mahalanobis distance. Statistica Sinica.DOI:<10.5705/ss.202020.0440>. #'@examples #'library(MASS) #'#simulate covariates of patients #'p <- 6; n <- 30 #'sigma <- diag(p); mean <- c(rep(0,p)) #'data <- mvrnorm(n, mean, sigma) #'covariate <- as.data.frame(data) #'#IF all the patients are not be allocated #'ARM(covariate = covariate, assignment = NA, q=0.75) #'#IF you had allocated partial patients #'ARM(covariate = covariate,assignment = c(1,2),q=0.75) ARM<-function(covariate, assignment, q=0.75){ K=2 method='none' n<-nrow(covariate) p<-ncol(covariate) if (is.na(assignment)[1]){ assignment<-data.frame(assignment=rep(NA,n)) } else{ aln<-length(assignment) assignment<-data.frame(assignment=c(assignment,rep(NA,n-aln))) } assigndata<-cbind(data.frame(assignment), data.frame(covariate)) names(assigndata)[1]<-'assignment' assigndata<-dplyr::arrange(assigndata, is.na(assigndata$assignment)) if (nrow(assigndata[is.na(assigndata$assignment),])==n) { assigndata$assignment<-c(seq(1,K,1),rep(NA,(n-K))) assigndata<-circle_random(assigndata,K,p,q,method,n) } else { noassign<-setdiff(seq(1,K,1),unique(na.omit(assigndata$assignment))) if (length(noassign)==0){ assigndata<-circle_random(assigndata,K,p,q,method,n) } else { assigndata$assignment<-c(assigndata[!is.na(assigndata$assignment),'assignment'],noassign, rep(NA,(n-length(assigndata[!is.na(assigndata$assignment),'assignment'])- length(noassign)))) assigndata<-circle_random(assigndata,K,p,q,method,n) } } R = NULL R$assignment<-assigndata[,1] R$sample_size<-as.data.frame(assigndata%>%group_by(assignment)%>%count(assignment)) R$Mahalanobis_Distance<-pairwise_dis(assigndata,p,K,method) return(R) }
/scratch/gouwar.j/cran-all/cranData/CARM/R/ARM.R
#'@import arrangements #'@import dplyr #'@importFrom MASS ginv #'@importFrom stats median #'@importFrom stats na.omit #'@importFrom stats var #'@export #'@title Adaptive Randomization via Mahalanobis distance for Multi-arm design #' #'@description Randomize patients into treatment groups #'for multi-arm trials using ARMM proposed by Haoyu Yang, Yichen Qin, #'Yang Li, Fan Wang, and Feifang Hu.(2022) #' #'@param covariate a data frame. A row of the dataframe #'corresponds to the covariate profile of a patient. #'@param assignment a vector. If partial patients had been allocated #', please input their allocation. IF #'all the patients are not be allocated, please input #''assignment = NA' directly. #'@param K an integer; number of arms of the trial. #'@param q the biased coin probability. #'\eqn{q} should be larger than 1/2 and less than 1, default = 0.75 #'@param method Methods for calculating Mahalanobis distance, input one of these texts: #''mean', 'max' or 'median'. #' #' #'@details #'Suppose \eqn{n} units (participants) are to be assigned to \eqn{K} #'treatment groups. For each unit \eqn{i, i = 1, ..., n} and #'treatment \eqn{j, j = 1, ..., K}, define the assignment #'matrix \eqn{[T_{ij}]^{n*K}}, where #'\eqn{T_{ij}=1} indicates unit \eqn{i} receives treatment \eqn{j}. #' Consider \eqn{p} continuous covariates, let \eqn{x_i = #' (x_{i1},...,x_{in})^T}. #' #' #' #' #'The proposed method, namely the adaptive randomization #'via Mahalanobis distance for multi-arm design (ARMM), #'is outlined below. The implement of ARMM is similar to ARM. #' #'First assume that \eqn{n} units are in a sequence #'and then assign the first \eqn{K} units to \eqn{K} treatment #'groups randomly #'as the initialization. Then, #'the following units are assigned in blocks of \eqn{K} #'sequentially and #'adaptively until all the units #'are assigned. For \eqn{K} units are assigned to \eqn{K} #'groups, there are in total \eqn{K!} possible allocations. #'Calculate \eqn{K!} potential overall #'covariate imbalance measurement #'according to pairwise Mahalanobis #'distance under the \eqn{K!} possible allocations. #'Choose the allocation which corresponds to the smallest #'Mahalanobis #'distance with a probability of \eqn{q} across all potential allocations. #'Repeat the process until all units are assigned. #' # #' #'For any pair of treatments \eqn{s} and \eqn{t} among the \eqn{K} #'treatment groups, calculate the Mahalanobis distance by: #' #'{\deqn{M_{s,t}(n) = 2n/K/K(\hat{x}_1 -\hat{x}_2)^Tcov(x)^{-1}(\hat{x}_1 -\hat{x}_2)}} #' #'In total, there are \eqn{C_K^2} pairs of Mahalanobis #'distances among \eqn{K} treatment groups.Finally, calculate #'the mean, the median or the maximum to represent the total imbalance. #' #'See the reference for more details. #' #' #' #'@return #'An object of class "ARMM" is a list containing the following components: #'\item{assignment}{Allocation of patients.} #'\item{sample_size}{The number of patients from treatment 1 to treatment \eqn{K} respectively.} #'\item{Mahalanobis_Distance}{Mahalanobis distance among treatment groups .} #' #'@references Yang H, Qin Y, Wang F, et al. Balancing covariates in multi-arm trials via adaptive randomization. Computational Statistics & Data Analysis, 2023, 179: 107642. https://doi.org/10.1016/j.csda.2022.107642 #'@examples #'library(MASS) #'#simulate covariates of patients #'p <- 6; n <- 30 #'sigma <- diag(p); mean <- c(rep(0,p)) #'data <- mvrnorm(n, mean, sigma) #'covariate <- as.data.frame(data) #'#IF all the patients are not be allocated #'ARMM(covariate = covariate, assignment = NA, K = 3, q = 0.75, method = 'mean') #'#IF you had allocated partial patients #'ARMM(covariate = covariate, assignment = c(1,2), K=4, q=0.75, method = 'max') ARMM<-function(covariate, assignment, K, q=0.75, method){ n<-nrow(covariate) p<-ncol(covariate) if (is.na(assignment)[1]){ assignment<-data.frame(assignment=rep(NA,n)) } else{ aln<-length(assignment) assignment<-data.frame(assignment=c(assignment,rep(NA,n-aln))) } assigndata<-cbind(data.frame(assignment), data.frame(covariate)) names(assigndata)[1]<-'assignment' assigndata<-dplyr::arrange(assigndata, is.na(assigndata$assignment)) if (nrow(assigndata[is.na(assigndata$assignment),])==n) { assigndata$assignment<-c(seq(1,K,1),rep(NA,(n-K))) assigndata<-circle_random(assigndata,K,p,q,method,n) } else { noassign<-setdiff(seq(1,K,1),unique(na.omit(assigndata$assignment))) if (length(noassign)==0){ assigndata<-circle_random(assigndata,K,p,q,method,n) } else { assigndata$assignment<-c(assigndata[!is.na(assigndata$assignment),'assignment'],noassign, rep(NA,(n-length(assigndata[!is.na(assigndata$assignment),'assignment'])- length(noassign)))) assigndata<-circle_random(assigndata,K,p,q,method,n) } } R = NULL R$assignment<-assigndata[,1] R$sample_size<-as.data.frame(assigndata%>%group_by(assignment)%>%count(assignment)) R$Mahalanobis_Distance<-pairwise_dis(assigndata,p,K,method) return(R) }
/scratch/gouwar.j/cran-all/cranData/CARM/R/ARMM.R
#'@import arrangements #'@importFrom MASS ginv #'@importFrom stats median #'@importFrom stats na.omit #'@importFrom stats var #'@import dplyr pairwise_dis<-function(assigndata,p,K,method){ dis<-dis_K<-NULL for (s in 1:K) { for (t in 1:K) { if (t>s) { pairwise<-assigndata[which(!is.na(assigndata$assignment)),] if (p>1){ u<-colMeans((pairwise[which(pairwise$assignment==s),-1]))- colMeans(as.matrix(pairwise[which(pairwise$assignment==t),-1])) cov<-cov(as.matrix(pairwise[,-1]))} else{ u<-mean((pairwise[which(pairwise$assignment==s),-1]))- mean(as.matrix(pairwise[which(pairwise$assignment==t),-1])) cov<-var(as.matrix(pairwise[,-1]))} dis<-2/K/K*nrow(pairwise)*(t(u)%*%ginv(cov)%*%u) dis_K<-c(dis_K,dis) } } } if (method=='mean'){ all_dis<-mean(dis_K) } if(method=='max'){ all_dis<-max(dis_K) } if(method=='median'){ all_dis<-median(dis_K) } if(method=='none'){ all_dis<-dis_K } return(all_dis) } circle_random<-function(assigndata,K,p,q,method,n){ MM<-NULL ss=permutations(K) had<-sum(!is.na(assigndata$assignment)) no<-sum(is.na(assigndata$assignment)) if (no %% K == 0){ for (i in 1:(no/K)) { for (j in 1:factorial(K)) { assigndata$assignment[(K*(i-1)+1+had):(K*i+had)]<-ss[j,] MM[j]=pairwise_dis(assigndata,p,K,method) } u2=NULL for (u1 in 1:factorial(K)) { if(min(MM)==MM[u1]) { u2=c(u2,u1) } } u=min(u2) c=rep(NA,factorial(K)) c[u]=q c[-u]=(1-q)/(factorial(K)-1) x=sample(1:factorial(K),1,prob=c) assigndata$assignment[(K*(i-1)+1+had):(K*i+had)]<-ss[x,] } } else { remainder<-no %% K for (i in 1:((no-remainder)/K)) { for (j in 1:factorial(K)) { assigndata$assignment[(K*(i-1)+1+had):(K*i+had)]<-ss[j,] MM[j]=pairwise_dis(assigndata,p,K,method) } u2=NULL for (u1 in 1:factorial(K)) { if(min(MM)==MM[u1]) { u2=c(u2,u1) } } u=min(u2) c=rep(NA,factorial(K)) c[u]=q c[-u]=(1-q)/(factorial(K)-1) x=sample(1:factorial(K),1,prob=c) assigndata$assignment[(K*(i-1)+1+had):(K*i+had)]<-ss[x,] } for (r in (n-remainder+1):n) { assigndata$assignment[r]<-sample(c(1:K),prob=c(rep(1/K,K)),1,replace=TRUE) } } return(assigndata) }
/scratch/gouwar.j/cran-all/cranData/CARM/R/inner_function.R
#' CARM:Covariate-adjusted Adaptive Randomization via Mahalanobis-distance #' #' The CARM package provides function of implement of randomization: #' #' #' @section ARM functions: #' please ?ARM and ?ARMM to view function usage #' #' @docType package #' @name CARM-package NULL
/scratch/gouwar.j/cran-all/cranData/CARM/R/package.R
#' The 'CARME' package. #' #' @description CAR-MM modelling in Stan #' #' @docType package #' @name CARME-package #' @aliases CARME #' @useDynLib CARME, .registration = TRUE #' @import methods #' @import Rcpp #' @importFrom rstan sampling #' #' @references #' Stan Development Team (2023). RStan: the R interface to Stan. R package version #' 2.26.11. https://mc-stan.org #' #' Marco Gramatica. Silvia Liverani. Peter Congdon. #' Structure Induced by a Multiple Membership Transformation on the Conditional #' Autoregressive Model. Bayesian Analysis Advance Publication 1 - 25, 2023. #' https://doi.org/10.1214/23-BA1370 #' #' Petrof, O, Neyens, T, Nuyts, V, Nackaerts, K, Nemery, B, Faes, C. On the #' impact of residential history in the spatial analysis of diseases with a #' long latency period: A study of mesothelioma in Belgium. #' Statistics in Medicine. 2020; 39: 3840– 3866. #' https://doi.org/10.1002/sim.8697 #' #' Marco Gramatica, Peter Congdon, Silvia Liverani, Bayesian Modelling for #' Spatially Misaligned Health Areal Data: A Multiple Membership Approach, #' Journal of the Royal Statistical Society Series C: Applied Statistics, #' Volume 70, Issue 3, June 2021, Pages 645–666, #' https://doi.org/10.1111/rssc.12480 #' NULL
/scratch/gouwar.j/cran-all/cranData/CARME/R/CARME-package.R
#' Adjacency matrix for the South East London set of MSOAs #' #' @description Adjacency matrix of 152 MSOAs in South East London, used for the data #' analysis in the paper "Structure induced by a multiple membership #' transformation on the Conditional Autoregressive model". Column and rows #' names indicate the MSOA code. #' @docType data #' @keywords datasets #' @usage data(W_sel) #' @format A 152x152 symmetric matrix #' @references Marco Gramatica. Silvia Liverani. Peter Congdon. #' "Structure Induced by a Multiple Membership Transformation on the Conditional #' Autoregressive Model." Bayesian Analysis Advance Publication 1 - 25, 2023. #' https://doi.org/10.1214/23-BA1370 "W_sel"
/scratch/gouwar.j/cran-all/cranData/CARME/R/W_sel.R
#' CAR-MM prior model #' #' @export #' @param d_list List of data inputs for the stan model. #' @param ... Arguments passed to `rstan::sampling` (e.g. iter, chains). #' @return An object of class `stanfit` returned by `rstan::sampling` #' #' @references #' { #' Marco Gramatica. Silvia Liverani. Peter Congdon. #' "Structure Induced by a Multiple Membership Transformation on the Conditional #' Autoregressive Model." Bayesian Analysis Advance Publication 1 - 25, 2023. #' https://doi.org/10.1214/23-BA1370 #' #' Petrof, O, Neyens, T, Nuyts, V, Nackaerts, K, Nemery, B, Faes, C. On the #' impact of residential history in the spatial analysis of diseases with a #' long latency period: A study of mesothelioma in Belgium. #' Statistics in Medicine. 2020; 39: 3840– 3866. #' https://doi.org/10.1002/sim.8697 #' } #' #' @examples #'\donttest{ #' set.seed(455) #' #' #---- Load data #' data(W_sel) #' ## Number of areas #' n <- nrow(W_sel) #' ## Number of memberships #' m <- 153 #' #' #---- Simulate covariates #' X <- cbind(rnorm(nrow(W_sel)), rnorm(nrow(W_sel))) #' ## Min-max normalisation #' X_cent <- apply(X, 2, function(x) (x - min(x))/diff(range(x))) #' #' #---- Simulate MM matrix #' w_ord <- c(.5, .35, .15) # Weight of each neighbours orders #' ord <- length(w_ord) - 1 # Order of neighbours to include #' H_sel_sim <- sim_MM_matrix( #' W = W_sel, m = m, ord = ord, w_ord = w_ord, id_vec = rep(1, nrow(W_sel)) #' ) #' #' #---- Simulate outcomes #' ## Linear term parameters #' gamma <- -.5 # Intercept #' beta <- c(1, .5) # Covariates coefficients #' ## CAR random effects #' phi_car <- sim_car(W = W_sel, alpha = .9, tau = 5) #' # Areal log relative risks #' l_RR <- X_cent %*% beta + phi_car #' ## Membership log relative risks #' l_RR_mm <- as.numeric(apply(H_sel_sim, 1, function(x) x %*% l_RR)) #' ## Expected rates #' exp_rates <- rpois(m, lambda = 20) #' ## Outcomes #' y <- rpois(m, lambda = exp_rates*exp(l_RR_mm)) #' #' #---- Create dataset for stan function #' d_sel <- list( #' # Number of areas #' n = nrow(W_sel), #' # Covariates #' k = ncol(X_cent), #' X_cov = X_cent, #' # Adjacency #' W_n = sum(W_sel) / 2, #' # Number of neighbour pairs #' W = W_sel, #' # Memberships #' m = nrow(H_sel_sim), #' H = H_sel_sim, #' # Outcomes #' y = y, #' log_offset = log(exp_rates), #' # Prior parameters #' ## Intercept (mean and sd of normal prior) #' mu_gamma = 0, sigma_gamma = 1, #' ## Covariates (mean and sd of normal prior) #' mu_beta = 0, sigma_beta = 1, #' ## Marginal precision gamma prior #' tau_shape = 2, #' tau_rate = 0.2 #' ) #' #' #---- HMC parameters #' niter <- 1E4 #' nchains <- 4 #' #' #---- Stan sampling #' fit <- car_mm( #' d_list = d_sel, #' # arguments passed to sampling #' iter = niter, chains = nchains, refresh = 500, #' control = list(adapt_delta = .99, max_treedepth = 15) #' ) #' #'} car_mm <- function(d_list, ...) { out <- rstan::sampling(stanmodels$CARMM_COV_P, data = d_list, ...) return(out) }
/scratch/gouwar.j/cran-all/cranData/CARME/R/car_mm.R
#' Helper function for MM matrix simulation #' #' @keywords internal #' @references Marco Gramatica. Silvia Liverani. Peter Congdon. #' "Structure Induced by a Multiple Membership Transformation on the Conditional #' Autoregressive Model." Bayesian Analysis Advance Publication 1 - 25, 2023. #' https://doi.org/10.1214/23-BA1370 #' #' @import expm #' @import stats f_mm_high_ord <- function( W, ord, mat_ord_weight, ind_range ){ n <- nrow(W) # Create base matrix of weights weight_ext <- matrix(0, ncol = n, nrow = length(ind_range)) # Compute necessary multiple order of neighbours matrices W_cml <- W for(i in 3:(ord+1)){ # Starts from 3 bc: 1=identical order ; 2=first order neigh # Name for i-th order neighbour matrix # name_ind <- paste("W_", i, sep = "") # Compute all areas reachable in EXACTLY i-1 steps W_cml_current <- W %^% (i-1) ## Transform in adj matrix W_cml_current[W_cml_current != 0] <- 1 ## Remove unwanted diagonal diag(W_cml_current) <- rep(0, nrow(W_cml_current)) # Find exactly i-th order neighbour W_now <- W_cml_current - W_cml ## Adjust matrix W_now[W_now < 0] <- 0 W_now[W_now != 0] <- 1 # Simulate weights for current neighbour order ## Simulate random weights W_now[which(W_now != 0)] <- runif(length(which(W_now != 0))) for(j in ind_range){ W_now[j, ] <- (W_now[j,]/sum(W_now[j,]))*mat_ord_weight[j,i] } # Save global matrix weight_ext <- W_now[ind_range,] + weight_ext # Move one order forward W_cml <- W_cml_current + W_cml W_cml[W_cml != 0] <- 1 # Control when graph is fully connected if(length(which(W %^% (i-1) > 1)) == n^2) break } return(weight_ext) }
/scratch/gouwar.j/cran-all/cranData/CARME/R/f_mm_high_ord.R
#' Simulation of MM matrix based #' #' @export #' @description #' `sim_MM_matrix` returns a multiple membership matrix simulated based on an #' adjacency matrix according to the method described in #' #' @param W Symmetric adjacency matrix of size `n` #' @param m Integer. Number of membership to simulate #' @param ord Integer. Maximum order of neighbours to be used to simulate the #' memberships based on the adjacency matrix `W` #' @param w_ord A vector of length `ord` that specifies the weights of each #' order of neighbours #' @param id_vec Vector of zeros and ones of length `n`. Defaults to #' a vector of ones. It indicates whether an area is included in the #' simulation of a membership #' @param excess_areas if different from FALSE it indicates the indices of the #' areas to reuse in simulating memberships, whenever `m` > `n`. It defaults to #' FALSE, and if omitted randomly selects without replacement #' (if `m` - `n` <= `n`, otherwise with replacement) a subset of areas #' @param red_areas vector of indices of areas to use if `m` < `n` #' #' @references Marco Gramatica. Silvia Liverani. Peter Congdon. #' "Structure Induced by a Multiple Membership Transformation on the Conditional #' Autoregressive Model." Bayesian Analysis Advance Publication 1 - 25, 2023. #' https://doi.org/10.1214/23-BA1370 #' #' @return an m x n matrix of weights #' @import expm #' @import stats #' #' @examples #' set.seed(455) #' #' #---- Load data #' data(W_sel) #' ## Number of areas #' n <- nrow(W_sel) #' ## Number of memberships #' m <- 153 #' #' #---- Simulate MM matrix #' w_ord <- c(.5, .35, .15) # Weight of each neighbours orders #' ord <- length(w_ord) - 1 # Order of neighbours to include #' H_sel_sim <- sim_MM_matrix( #' W = W_sel, m = m, ord = ord, w_ord = w_ord, id_vec = rep(1, nrow(W_sel)) #' ) sim_MM_matrix <- function( W, m, ord = 3, w_ord, id_vec, excess_areas = FALSE, red_areas ){ # Number of neighbours n <- nrow(W) ; if(n < 4) stop("Too few areas") # Create matrix of weights # weight_ext <- matrix(0, ncol = n, nrow = n) ### Create weights for each new membership ### If the area identically included in the membership does not renormalise ### otherwise it will have to # General checks for weights ## First order (is area identically included in the membership?) if(exists("id_vec") == FALSE) id_vec <- rep(1, n) ## Length of neighbour order weights vector if(length(w_ord) < (ord + 1)) stop("Not enough weights") ## Force 0 on weight for identical area weight if necessary if(sum(id_vec) == 0) w_ord[1] <- 0 ## Check weights sum to 1 if(sum(w_ord[1:(ord+1)]) != 1){ warning( "Neighbour weights do not sum to one. Function will normalise and proceed" ) w_ord <- abs(w_ord)/sum(w_ord) } # Identical area inclusion weight weights_W1 <- diag(id_vec)*w_ord[1] # Normalise weights for membership without identical area ## Create register of weights for higher orders mat_ord_weight <- cbind( id_vec*w_ord[1], # Weights of identical order matrix(rep(w_ord[-1], n), nrow = n, ncol = length(w_ord[-1]), byrow = T) ) ## Normalise register mat_ord_weight <- mat_ord_weight/rowSums(mat_ord_weight) # Compute necessary multiple order of neighbours matrices W_cml <- W weight_ext <- f_mm_high_ord( W, ord = ord, mat_ord_weight = mat_ord_weight, ind_range = 1:n ) # Add Identical neighbours and first order ## First order neigh weights_W2 <- W weights_W2[which(weights_W2 != 0)] <- runif(length(which(weights_W2 != 0))) for(j in 1:n){ weights_W2[j, ] <- (weights_W2[j,]/sum(weights_W2[j,]))*mat_ord_weight[j,2] } ## Finalise weight_ext <- weight_ext + weights_W1 + weights_W2 ; rowSums(weight_ext) ## Check if(sum(rowSums(weight_ext)) != n){ stop("Weights of weight_ext do not sum to 1") } # More MEMBERSHIPS than AREAS (m > n) if(m > n){ # Check if(missing(excess_areas)){ if(m - n > n){ warning("m - n > n, so excess areas sampled with replacement") excess_areas <- sample(1:n, m - n, replace = T) } else{ excess_areas <- sample(1:n, m - n) } } # Create additional matrix ## Higher order excess_weight_high <- f_mm_high_ord( W, ord = ord, mat_ord_weight = mat_ord_weight, ind_range = excess_areas) ## First order excess_weight_2 <- W excess_weight_2[which(excess_weight_2 != 0)] <- runif(length(which(excess_weight_2 != 0))) for(j in excess_areas){ excess_weight_2[j, ] <- (excess_weight_2[j,]/sum(excess_weight_2[j,]))*mat_ord_weight[j,2] } ## Subset to excess areas in second order excess_weight_2 <- excess_weight_2[excess_areas,] ## Identical areas excess_weight_1 <- matrix(0, nrow = length(excess_areas), ncol = n) excess_weight_1[cbind(1:length(excess_areas), excess_areas)] <- w_ord[1] ## Finalise excess_weight <- excess_weight_1 + excess_weight_2 + excess_weight_high # Final save weight_ext <- rbind(weight_ext, excess_weight) ## Check if(sum(rowSums(weight_ext)) != m){ stop("Weights of weight_ext do not sum to 1") } } # More AREAS than MEMBERSHIPS (m < n) if(m < n){ weight_ext <- weight_ext[sample(1:n, m), ] } # Return values # output <- list( # weight_ext = weight_ext, # w_ord = w_ord, # id_vec = id_vec, # weights_W1 = weights_W1, # weights_W2 = weights_W2, # W_cml = W_cml # ) return(weight_ext) }
/scratch/gouwar.j/cran-all/cranData/CARME/R/sim_MM_matrix.R
#' Simulation of proper CAR random effects #' #' @export #' @description #' `sim_car` returns a vector of CAR distributed random effects #' #' @param W Symmetric adjacency matrix of size `n` #' @param alpha properness parameter between 0 and 1. Defaults to 0.5 #' @param tau marginal precision. Defaults to 5 #' #' @references Jin, X., Carlin, B.P. and Banerjee, S. (2005), Generalized #' Hierarchical Multivariate CAR Models for Areal Data. Biometrics, #' 61: 950-961. https://doi.org/10.1111/j.1541-0420.2005.00359.x #' #' @import MASS #' @return a vector of length `n` #' @examples #'data(W_sel) #'sim_car(W = W_sel, alpha = .9, tau = 5) sim_car <- function(W, alpha = .5, tau = 5){ # Number of Neighbours per area D <- diag(rowSums(W)) # Precision matrix for CAR Q <- solve(tau*(D - alpha*W)) # Generate phi's phi <- MASS::mvrnorm(n = 1, mu = rep(0, nrow(W)), Sigma = Q) return(phi) }
/scratch/gouwar.j/cran-all/cranData/CARME/R/sim_car.R
# Generated by rstantools. Do not edit by hand. # names of stan models stanmodels <- c("CARMM_COV_P") # load each stan module Rcpp::loadModule("stan_fit4CARMM_COV_P_mod", what = TRUE) # instantiate each stanmodel object stanmodels <- sapply(stanmodels, function(model_name) { # create C++ code for stan model stan_file <- if(dir.exists("stan")) "stan" else file.path("inst", "stan") stan_file <- file.path(stan_file, paste0(model_name, ".stan")) stanfit <- rstan::stanc_builder(stan_file, allow_undefined = TRUE, obfuscate_model_name = FALSE) stanfit$model_cpp <- list(model_cppname = stanfit$model_name, model_cppcode = stanfit$cppcode) # create stanmodel object methods::new(Class = "stanmodel", model_name = stanfit$model_name, model_code = stanfit$model_code, model_cpp = stanfit$model_cpp, mk_cppmodule = function(x) get(paste0("rstantools_model_", model_name))) })
/scratch/gouwar.j/cran-all/cranData/CARME/R/stanmodels.R
#'Turning a non-numeric variable into a numeric one #' #'Function which turns a single categorical (non-numeric) variable into a numeric one (or several) by introducing dummy '0'/'1' variables. #' #'@param vari array of values to be transformed #'@param outcome TRUE/FALSE indicates whether the variable \code{vari} is an outcome (TRUE) or a predictor (FALSE) #'@param ra indices of the input array \code{vari} which indicate which values will be transformed #'@param mode \code{'binary'} (logistic regression), \code{'multin'} (multinomial regression) #'@usage make_numeric(vari, outcome, ra,mode) #'@return Returned value is an M x N matrix where M is the length of the input array of indices \code{ra} and N is \code{length(vari)-1}. #'@details This function is essentially a standard way to turn categorical non-numeric variables into numeric ones in order to run a regression #'@export make_numeric #'@examples #'#creating a non-numeric set #' #'a<-t(rmultinom(100,1,c(0.2,0.3,0.5)))%*%c(1,2,3) #' #'a[a==1]='red' #'a[a==2]='green' #'a[a==3]='blue' #' #'#running the function #' #'make_numeric(a,FALSE,sample(1:100,50),"linear") #' #'make_numeric(a,TRUE,sample(1:100,50)) make_numeric<-function(vari,outcome,ra,mode=NULL){ if (outcome==TRUE){ #turning non-numeric outcome into a numeric one un=unique(vari); #finding unique elements of the array of outcomes lun=length(un); #number of unique elements lunu<-array(NA,lun) #an empty array of the corresponding length #counting the number of occurrences of each outcome and writing it into the array initialised above for (i in 1:lun){ lunu[i]=length(vari[vari==un[i]]); } #creating an order array corresponding to the array of counts o<-order(lunu); #creating a numeric array for output vari1<-as.numeric(as.factor(vari)); # assigning values from 0 to lun-1 to the values of the array above in the descending order where 0 corresponds to the most frequent one and lun-1 to the least frequent one for (j in 1:lun){ vari1[vari==un[o[j]]]=lun-j; } #numeric output of the function as.numeric(vari1); } else{ #turning non-numeric variable into a numeric one if (mode=='linear'){ vari1<-model.matrix(~vari-1) vari1[ra,2:(dim(vari1)[2])]; } else{ un=unique(vari); #similar to the outcome case lun=length(un); #similar to the outcome case lunu<-array(NA,lun) #similar to the outcome case for (i in 1:lun){ lunu[i]=length(vari[vari==un[i]]); #similar to the outcome case } o<-order(lunu); #similar to the outcome case vari1<-matrix(0,length(vari),lun-1) #creating a matrix of zeros to be turned into lun-1 dummy variables for (i in 1:lun-1){ vari1[which(vari==un[o[i]]),i]=1; #assigning values 1 to different columns of the matrix depending on the value of the variable } #lun-1 dummy variables as an output vari1[ra,]; } } } #'Transforming the set of predictors into a numeric set #' #'Function which turns a set of predictors containing non-numeric variables into a fully numeric set #' #'@param a An M x N matrix, containing all possible subsets (N overall) of the size M of predictors' indices; therefore each column of \code{a} defines a unique subset of the predictors #'@param ai array of indices of the array \code{a} #'@param k index of the array \code{ai} #'@param vari set of all predictors #'@param ra array of sample indices of \code{vari} #'@param l size of the sample #'@param mode \code{'binary'} (logistic regression), \code{'multin'} (multinomial regression) #'@usage make_numeric_sets(a,ai,k,vari,ra,l,mode) #'@return Returns a list containing two objects: \code{tr} and \code{test}\cr #'\item{tr}{training set transformed into a numeric one} #'\item{test}{test set transformed into a numeric one} #'@details Function transforms the whole set of predictors into a numeric set by consecutively calling function \code{make_numeric} for each predictor #'@seealso \code{\link{make_numeric}} #'@export make_numeric_sets #'@examples #'#creating a categorical numeric variable #' #'a<-t(rmultinom(100,1,c(0.2,0.3,0.5)))%*%c(1,2,3) #' #'#creating an analogous non-numeric variable #' #'c<-array(NA,100) #'c[a==1]='red' #'c[a==2]='green' #'c[a==3]='blue' #' #'#creating a data-set #' #'b<-data.frame(matrix(c(a,rbinom(100,1,0.3),runif(100,0,1)),ncol=3)) #' #'#making the first column of the data-set non-numeric #' #'b[,1]=data.frame(c) #' #'#running the function #' #'make_numeric_sets(combn(3,2),1:3,1,b,sample(1:100,60),100,"binary") make_numeric_sets<-function(a,ai,k,vari,ra,l,mode){ #initialialising arrays of test and training sets testset1<-array(NA,0) trset1<-array(NA,0) #going through the indices of the corresponding set of predictors for (m in 1:length(a[,ai[k]])){ #turning the non-numeric variable into numeric if (is.numeric(vari[,a[m,ai[k]]])==FALSE){ #turning a non-numeric variable into numeric #performing this operation for the training and test set anum1<-make_numeric(vari[,a[m,ai[k]]],FALSE,ra,mode) anum<-make_numeric(vari[,a[m,ai[k]]],FALSE,setdiff(1:l,ra),mode) #adding the transformed variable to the existing test and training sets on the left testset1<-cbind(testset1,anum) trset1<-cbind(trset1,anum1) } else{ #if the variable is already numeric we simply add in on the left of the existing test and training set testset1<-cbind(testset1,vari[setdiff(1:l,ra),a[m,ai[k]]]) trset1<-cbind(trset1,vari[ra,a[m,ai[k]]]) } } #output the transformed test and training sets list("test" = testset1, "tr" = trset1) } #'Weights of predictors #' #'Function which computes the weight of each predictor according to the rules of thumb and outputs it into corresponding array #' #'@param vari_col number of predictors #'@param vari set of predictors #'@details Continuous or categorical numerical variable with more then 5 categories has weight 1, otherwise it has weight \code{n-1} where \code{n} is the number of categories #'@return Returns an array of weights of the size \code{vari_col} #'@usage compute_weights(vari_col, vari) #'@export compute_weights #'@references{ #'\insertRef{ref1}{CARRoT} #'} #'@references{ #'\insertRef{ref2012-18631-001}{CARRoT} #'} #'@importFrom Rdpack reprompt #'@examples #'#creating data-set with for variables #' #'a<-matrix(NA,nrow=100,ncol=4) #' #'#binary variable #' #'a[,1]=rbinom(100,1,0.3) #' #'#continuous variable #' #'a[,2]=runif(100,0,1) #' #'#categorical numeric with les than 5 categories #' #'a[,3]=t(rmultinom(100,1,c(0.2,0.3,0.5)))%*%c(1,2,3) #' #'#categorical numeric with 5 categories #' #'a[,4]=t(rmultinom(100,1,c(0.2,0.3,0.3,0.1,0.1)))%*%c(1,2,3,4,5) #' #'#running the function #' #'compute_weights(4,a) compute_weights<-function(vari_col,vari){ #initialising and empty array of weights we<-matrix(nrow=0,ncol=1); #going through all the predictive variables for (i in 1:vari_col){ #if the variable is numeric it has a weight 1 if (is.numeric(vari[,i])==TRUE) { we<-array(c(we,1)) } else{ #otherwise it has a weight of the number of categories minus one we<-array(c(we,length(unique(vari[,i]))-1)); } } #outputting the array of weights we } #'Maximum feasible weight of the predictors #' #'Function which computes maximal weight (multiplied by the corresponding EPV rule) of a regression according to the rule of thumb applied to the outcome variable. Weight of a regression equals the sum of weights of its predictors. #' #'@details For continuous outcomes it equals sample size divided by 10, for multinomial it equals the size of the smallest category divided by 10 #'@param outi set of outcomes #'@param mode indicates the mode: 'linear' (linear regression), 'binary' (logistic regression), 'multin' (multinomial regression) #'@usage compute_max_weight(outi,mode) #'@return returns an integer value of maximum allowed weight multiplied by 10 #'@export compute_max_weight #'@examples #'#continuous outcomes #' #' compute_max_weight(runif(100,0,1),'linear') #' #' #binary outcomes #' #' compute_max_weight(rbinom(100,1,0.4),'binary') #'@references{ #'\insertRef{ref1}{CARRoT} #'} #'@importFrom Rdpack reprompt compute_max_weight<-function(outi,mode){ if (mode=='linear') { #if the mode is linear maximal weight multiplied by the corresponding EPV rule is defined by the sample size numr=length(outi) } else{ #otherwise unio<-unique(outi); #find all unique elements of the outcome lo<-array(NA,length(unio)) #initialise an array of the corresponding length for (i in 1:length(unio)) lo[i]=length(which(outi==unio[i])); #count the occurrences of the each type of outcome numr=min(lo); #choose the smallest one to defined the maximal weight multiplied by the corresponding EPV rule } numr #output the obtained value } #'Cumulative weights of the predictors' subsets #' #'Function which computes the sum of predictors' weights for each subset containing a fixed number of predictors #' #'@param a an \code{m} x N matrix, containing all possible subsets (N overall) of the size \code{m} of predictors' indices; therefore each column of \code{a} defines a unique subset of the predictors #'@param m number of elements in each subset of indices #'@param we array of weights of the predictors #'@param st a subset of predictors to be always included into a predictive model #'@usage sum_weights_sub(a,m,we,st) #'@return Returns an array of weights for predictors defined by each colun of the matrix \code{a} #'@export sum_weights_sub #'@examples #'#all two-element subsets of the set 1:3 #' #'a<-combn(3,2) #' #'sum_weights_sub(a,2,c(1,2,1)) sum_weights_sub<-function(a,m,we,st=NULL){ s<-array(NA,ncol(a)); #the array corresponding to the number of the feasible subsets if ((m>1)|(is.null(st)==FALSE)){ #if the size of the subset is greater than 1 or if it consists only of stationary part for (h in 1:ncol(a)){ #print(h) s[h]=sum(we[a[((h-1)*m+1):(h*m)]]); #sum up the corresponding weights } } else s=we; #otherwise the target value is exactly the array of weights #output the value s } #'Finds certain subsets of predictors #' #'Reorders the columns of matrix \code{a} according to the ordered elements of array \code{s} #'@param a A \code{j} x N matrix, containing all possible subsets (N overall) of the size \code{j} of predictors' indices. #'@param s array of numbers of the size N #'@param j number of rows in \code{a} #'@param c array of all indices of the predictors #'@param st a subset of predictors to be always included into a predictive model #'@usage find_sub(a,s,j,c,st) #'@return Returns a submatrix of matrix \code{a} which consits of columns determined by the input array \code{s} #'@export find_sub #'@examples #'#all two-element subsets of 1:3 #' #'a<-combn(3,2) #'s<-c(3,2,3) #' #'find_sub(a,s,2,1:3) find_sub<-function(a,s,j,c,st){# if (j==1){ #if all the subsets are of the size 1 a=t(matrix(a[,order(s)])); #transforming array of subsets according to the order of the array s } else{ if (dim(a)[2]==1){ #if there is only one subset a=matrix(a[,order(s)]); } else a=a[,order(s)]; #if there is more than one subset of size larger than 1 } a #outputting the transformed subset } #'Maximum number of the regressions #' #'Function which computes the maximum number of regressions with fixed number of variables based on the rule of thumb #' #'@param vari_col number of predictors #'@param k maximum weight of the predictors #'@param c array of all indices of the predictors #'@param we array of weights of the predictors. Continuous or categorical numerical variable with more then 5 categories has weight 1, otherwise it has weight \code{n-1} where \code{n} is the number of categories #'@param minx minimum number of predictors, 1 by default #'@param maxx maximum number of predictors, total number of variables by default #'@param st a subset of predictors to be always included into a predictive model #'@import utils #'@return Integer correponding to maximum number of regressions of the same size #'@usage compute_max_length(vari_col,k,c,we,minx,maxx,st) #'@seealso Function uses \code{\link[utils]{combn}} #'@export compute_max_length #'@references{ #'\insertRef{ref1}{CARRoT} #'} #'@references{ #'\insertRef{ref2012-18631-001}{CARRoT} #'} #'@importFrom Rdpack reprompt #'@examples #'compute_max_length(4,40,1:4,c(1,1,2,1)) compute_max_length<-function(vari_col,k,c,we,minx=1,maxx=NULL,st=NULL){ # #initialising an array of length of the number of types of regressions subject to parameter k, number of variables,minimal number of variables and maximal number of variables lest<-length(st) c=setdiff(c,st); # minx=max(minx,lest) le<-array(NA,min(min(vari_col,k)-minx+1,maxx-max(minx,lest)+1)) #going through regressions with the number of variables we are willing to consider for (m in max(max(minx,1),lest):min(min(vari_col,k),maxx)){ #print(m) a<-combn(c,m-lest); #all subsets of variables of the size m if (is.null(st)==FALSE) a<-rbind(matrix(st,ncol=dim(a)[2],nrow=lest),a) #compute the weights of each column of a s<-sum_weights_sub(a,m,we,st); #computing the corresponding weights le[m-max(max(minx,1),lest)+1]=length(which(s<=k)); #number of regression of the given size satisfying the weight constraint } max(le) #outputting the size of the largest one } #'Probabilities for multinomial regression #' #'Function which computes probabilities of outcomes on the test set by applying regression parameters inferred by a run on the training set. Works for logistic or multinomial regression #'@param trset values of predictors on the training set #'@param testset values of predictors on the test set #'@param outc values of outcomes on the training set #'@param mode \code{'binary'} (logistic regression) or \code{'multin'} (multinomial regression) #'@param Rsq whether R-squared statistics constrained is introduced #'@param p weight of the model #'@param n_tr size of the training set #'@usage get_probabilities(trset,testset,outc,mode,Rsq,p,n_tr) #'@return Probabilities of the outcomes. In \code{'binary'} mode returns an array of the size of the number of observations in a testset. In \code{'multin'} returns an M x N matrix where M is the size of the number of observations in a testset #'and N is the number of unique outcomes minus 1. #'@details In binary mode this function computes the probabilities of the event '0'. In multinomial mode computes the probabilities of the events '0','1',...,'N-1'. #'@seealso Function uses \code{\link[nnet]{multinom}} and \code{\link[stats]{coef}} #'@import stats #'@import nnet #'@export get_probabilities #'@examples #'trset<-matrix(c(rbinom(70,1,0.5),runif(70,0.1)),ncol=2) #' #'testset<-matrix(c(rbinom(10,1,0.5),runif(10,0.1)),ncol=2) #' #'get_probabilities(trset,testset,rbinom(70,1,0.6),'binary') get_probabilities<-function(trset,testset,outc,mode,Rsq=F,p=NULL,n_tr){ #dimensions of the test set d<-dim(data.matrix(testset)); #for binary mode compute the coefficients of the regression regr<-multinom(-outc~.,data=data.frame(trset),trace=FALSE); regr_c<-data.matrix(coef(regr)); if (mode=='binary'){ if (Rsq==F){ #applying coefficients to the test set and immediately transforming the result in order to get probabilities later ps=exp(matrix(rep(1,d[1]))%*%regr_c[1,]+data.matrix(testset)%*%regr_c[2:length(regr_c),]); } else{ L0<-sum(log(((1/(1+exp(-regr_c[1,])))^(sum(outc)))*((1-1/(1+exp(-regr_c[1,])))^(n_tr-sum(outc))))) Lm<-sum(log(1/(1+exp(-matrix(rep(1,length(outc)))%*%regr_c[1,]-data.matrix(trset)%*%regr_c[2:length(regr_c),])))) LR=-2*(L0-Lm) s=1-p/LR if ((s>=0.9)&(is.na(s)==F)){ Rsq_v=1-exp(-LR/n_tr) Rsq_m=1-exp(2*L0/n_tr) if ((Rsq_v*(1-s)/Rsq_m<0.05)&(is.na(Rsq_v*(1-s)/Rsq_m)==F)){ #applying coefficients to the test set and immediately transforming the result in order to get probabilities later ps=exp(matrix(rep(1,d[1]))%*%regr_c[1,]+data.matrix(testset)%*%regr_c[2:length(regr_c),]); } else ps=rep(NA,d[1]) } else ps=rep(NA,d[1]) } } else { #for multinomial mode compute the coefficients of the regression regr_c<-apply(data.matrix(coef(multinom(-outc~.,data=data.frame(trset),trace=FALSE))),2,rev); #applying coefficients to the test set and immediately transforming the result in order to get probabilities later ps=rowSums(exp(matrix(rep(1,d[1]))%*%regr_c[,1]+data.matrix(testset)%*%t(regr_c[,2:dim(regr_c)[2]]))); } #getting rid of infinite values of ps coe=8; ps[ps==Inf]=max(ps[ps<Inf])*coe; while(length((ps[ps==Inf])>0)&(sum(is.na(ps)))!=d[1]){ coe=coe/2; ps[ps==Inf]=max(ps[ps<Inf])*coe; } #computing the first term in the product in order to get probabilities #computing the second term in the product in order to get probabilities for binary and multinomial probabilities if (mode=='binary') { # p0=exp(matrix(rep(1,d[1]))%*%regr_c[1,]+data.matrix(testset)%*%regr_c[2:length(regr_c),]); p=1/(1+1/ps); } else p0=exp(matrix(rep(1,d[1]))%*%regr_c[,1]+data.matrix(testset)%*%t(regr_c[,2:dim(regr_c)[2]])); if (mode!='binary') { #computing the probabilities for binary mode # # p=t(p1)*p0; # } else { #computing the probabilities for multinomial mode and combining them into a matrix p1=t(1/(1+ps)); p=t(p1)*p0[,1] for (i in 2:dim(p0)[2]) p=cbind(p,t(p1)*p0[,i]) } # if (sum(is.na(p))>0) { #output an error message in case there are undefined values of p # # stop('undefined prediction probabilities') # # } p #output the probabilities } #'Predictions for linear regression #' #'Function which runs a linear regression on a training set, computes predictions for the test set #' #'@param trset values of predictors on the training set #'@param testset values of predictors on the test set #'@param outc values of predictors on the training set #'@param k length of the test set #'@param Rsq whether the R-squared statistics constraint is introduced #'@param Rsq_v value of R-squared statistics on the training spli of the data #'@param marg margin of error for R-squared statistics constraint #'@param p weight of the model #'@param n_tr size of the training set #'@usage get_predictions_lin(trset,testset,outc,k,n_tr,p,Rsq,Rsq_v,marg) #'@return An array of continous variables of the length equal to the size of a \code{testset} #'@seealso Function uses function \code{\link[stats]{lsfit}} and \code{\link[stats]{coef}} #'@export get_predictions_lin #'@examples #'trset<-matrix(c(rnorm(90,2,4),runif(90,0,0.5),rbinom(90,1,0.5)),ncol=3) #' #'testset<-matrix(c(rnorm(10,2,4),runif(10,0,0.5),rbinom(10,1,0.5)),ncol=3) #' #'get_predictions_lin(trset,testset,runif(90,0,1),10) get_predictions_lin<-function(trset,testset,outc,k,n_tr,p,Rsq=F,Rsq_v=NULL,marg=0){ #write the coefficients of the linear regression fitted to the corresponding training set into an array regr<-lsfit(as.matrix(trset),outc) regr_c<-data.matrix(coef(regr)); #initialise an array of predictions pred<-array(NA,c(1,k)); if (sum(outc%%1)==0){# in case the outcomes are integers round the predictions if (Rsq==T){ varre=var(regr$residuals) Rr=1-varre/var(outc) marg0=0 if (marg>0) marg0=sqrt(varre/n_tr)*qt(0.975,n_tr-p-1)/abs(regr_c[1,]) if ((Rr>Rsq_v)&(marg0<=marg)) pred<-round(data.matrix(testset)%*%matrix(regr_c[2:length(regr_c)])+regr_c[1]); } else pred<-round(data.matrix(testset)%*%matrix(regr_c[2:length(regr_c)])+regr_c[1]); } else{ #otherwise do not round them if (Rsq==T){ varre=var(regr$residuals) Rr=1-varre/var(outc) marg0=0.0 if (marg>0) { marg0=sqrt(varre/n_tr)*qt(0.975,n_tr-p-1)/regr_c[1,] } if ((Rr>Rsq_v)&(marg0<=marg)) pred<-round(data.matrix(testset)%*%matrix(regr_c[2:length(regr_c)])+regr_c[1]); } else pred<-data.matrix(testset)%*%matrix(regr_c[2:length(regr_c)])+regr_c[1]; } #in case all outcomes are positive assign value zero to all negative predictions if (length(outc[outc<0]==0)) pred[pred<0]=0; #output the array of predictions pred } #'Predictions for multinomial regression #' #'Function which makes a prediction for multinomial/logistic regression based on the given cut-off value and probabilities. #'@param p probabilities of the outcomes for the test set given either by an array (logistic regression) or by a matrix (multinomial regression) #'@param k size of the test set #'@param cutoff cut-off value of the probability #'@param cmode \code{'det'} or \code{''}; \code{'det'} always predicts the more likely outcome as determined by the odds ratio; \code{''} predicts certain outcome with probability corresponding to its odds ratio (more conservative). Option available for multinomial/logistic regression #'@param mode \code{'binary'} (logistic regression), \code{'multin'} (multinomial regression) #'@usage get_predictions(p,k,cutoff,cmode,mode) #'@seealso Uses \code{\link[stats]{rbinom}}, \code{\link[stats]{rmultinom}} #'@return Outputs the array of the predictions of the size of \code{p}. #'@export get_predictions #'@examples #'#binary mode #' #'get_predictions(runif(20,0.4,0.6),20,0.5,'det','binary') #' #'#creating a data-set for multinomial mode #' #'p1<-runif(20,0.4,0.6) #'p2<-runif(20,0.1,0.2) #'p3<-1-p1-p2 #' #'#running the function #' #'get_predictions(matrix(c(p1,p2,p3),ncol=3),20,0.5,'det','multin') get_predictions<-function(p,k,cutoff,cmode,mode){ #initialise the array of predictions pred<-array(NA,c(1,k)); #going through all objects in the testset for (m in 1:k){ if (mode=='binary'){ if (cmode=='det'){ #making a deterministic mode prediction based on the cut-off pred[m]=ifelse(p[m]>cutoff,0,1) } else{ #making a random mode prediction based on sampling from binomial distribution pred[m]=1-rbinom(1,1,p[m]) } } else{ if (cmode=='det'){ #for multinomial mode deterministic prediction based on maximal probability maxpr=which(c(p[m,],1-sum(p[m,]))==max(c(p[m,],1-sum(p[m,])))); pred[m]=maxpr-1; } else{ #random mode prediction based on sampling from multinomial distribution pred[m]=c(0:(k-1))%*%rmultinom(1,1,c(p[m,],1-sum(p[m,]))) } } } #outputting the array of predictions pred } #'Combining in a list #' #'Function for combining outputs in a list #'@param ... an argument of \code{mapply} used by this function #'@seealso Function \code{\link[base]{mapply}} #'@export comb #'@examples #'#array of numbers to be separated in a list #' #'a<-1:4 #' #'#running the function #' #'comb(a) comb <- function(...) { mapply('cbind', ..., SIMPLIFY=FALSE) } #'Area Under the Curve #' #'Function enables efficient computation of area under receiver operating curve (AUC). Source: \url{https://stat.ethz.ch/pipermail/r-help/2005-September/079872.html} #'@param probs probabilities #'@param class outcomes #'@usage AUC(probs, class) #'@return A value for AUC #'@export AUC #'@examples #'AUC(runif(100,0,1),rbinom(100,1,0.3)) AUC <- function(probs, class) { x <- probs y <- class x1 = x[y==1]; n1 = length(x1); x2 = x[y==0]; n2 = length(x2); r = rank(c(x1,x2)) auc = (sum(r[1:n1]) - n1*(n1+1)/2) / n1 / n2 return(auc) } #'Cross-validation run #' #'Function running a single cross-validation by partitioning the data into training and test set #' #'@param vari set of predictors #'@param outi array of outcomes #'@param c set of all indices of the predictors #'@param rule rule of 10 in this case #'@param part indicates partition of the original data-set into training and test set in a proportion \code{(part-1):1} #'@param l number of observations #'@param we weights of the predictors #'@param vari_col overall number of predictors #'@param preds array to write predictions for the test split into, intially empty #'@param cmode \code{'det'} or \code{''}; \code{'det'} always predicts the more likely outcome as determined by the odds ratio; \code{''} predicts certain outcome with probability corresponding to its odds ratio (more conservative). Option available for multinomial/logistic regression #'@param mode \code{'binary'} (logistic regression), \code{'multin'} (multinomial regression) #'@param predm \code{'exact'} or \code{''}; for logistic and multinomial regression; \code{'exact'} computes how many times the exact outcome category was predicted, \code{''} computes how many times either the exact outcome category or its nearest neighbour was predicted #'@param cutoff cut-off value for logistic regression #'@param objfun \code{'roc'} for maximising the predictive power with respect to AUC, \code{'acc'} for maximising predictive power with respect to accuracy. #'@param minx minimum number of predictors to be included in a regression, defaults to 1 #'@param maxx maximum number of predictors to be included in a regression, defaults to maximum feasible number according to one in ten rule #'@param maxw maximum weight of predictors to be included in a regression, defaults to maximum weight according to one in ten rule #'@param nr a subset of the data-set, such that \code{1/part} of it lies in the test set and \code{1-1/part} is in the training set, defaults to empty set #'@param st a subset of predictors to be always included into a predictive model,defaults to empty set #'@param rule an Events per Variable (EPV) rule, defaults to 10 #'@param corr maximum correlation between a pair of predictors in a model #'@param Rsq whether R-squared statistics constrained is introduced #'@param marg margin of error for R-squared statistics constraint #'@param n_tr size of the training set #'@param preds_tr array to write predictions for the training split into, intially empty #@usage cross_val(vari,outi,c,rule,part,l,we,vari_col,preds,mode,cmode,predm,cutoff,objfun,minx,maxx,nr,maxw,st,corr) #'@return #'\item{regr}{An M x N matrix of sums of the absolute errors for each element of the test set for each feasible regression. M is maximum feasible number of variables included in a regression, N is the maximum feasible number of regressions of the fixed size; the row index indicates the number of variables included in a regression. Therefore each row corresponds to results obtained from running regressions with the same number of variables and columns correspond to different subsets of predictors used.} #'\item{regrr}{An M x N matrix of sums of the relative errors for each element of the test set (only for \code{mode = 'linear'}) for each feasible regression. M is maximum feasible number of variables included in a regression, N is the maximum feasible number of regressions of the fixed size; the row index indicates the number of variables included in a regression. Therefore each row corresponds to results obtained from running regressions with the same number of variables and columns correspond to different subsets of predictors used.} #'\item{nvar}{Maximum feasible number of variables in the regression} #'\item{emp}{An accuracy of always predicting the more likely outcome as suggested by the training set (only for \code{mode = 'binary'} and \code{objfun = 'acc'})} #'In \code{regr} and \code{regrr} \code{NA} values are possible since for some numbers of variables there are fewer feasible regressions than for the others. #'@seealso Uses \code{\link{compute_max_weight}}, \code{\link{sum_weights_sub}}, \code{\link{make_numeric_sets}}, \code{\link{get_predictions_lin}}, \code{\link{get_predictions}}, \code{\link{get_probabilities}}, \code{\link{AUC}}, \code{\link[utils]{combn}} #'@export cross_val #'@examples #'#creating variables #' #'vari<-matrix(c(1:100,seq(1,300,3)),ncol=2) #' #'#creating outcomes #' #'out<-rbinom(100,1,0.3) #' #'#creating array for predictions #' #'pr<-array(NA,c(2,2)) #' #'pr_tr<-array(NA,c(2,2)) #' #'#passing set of the inexes of the predictors #' #'c<-c(1:2) #' #'#passing the weights of the predictors #' #'we<-c(1,1) #' #'#setting the mode #' #'m<-'binary' #' #'#running the function #' #'cross_val(vari,out,c,10,10,100,we,2,pr,m,'det','exact',0.5,'acc',nr=c(1,4),n_tr=90,preds_tr=pr_tr) cross_val<-function(vari,outi,c,rule,part,l,we,vari_col,preds,mode,cmode,predm,cutoff,objfun,minx=1,maxx=NULL,nr=NULL,maxw=NULL,st=NULL,corr=1,Rsq=F,marg=0.0,n_tr,preds_tr){ #for linear mode initialising the array of relative errors if (mode=='linear') { predsr<-preds predsr_tr<-preds_tr } if (is.null(nr)==TRUE){ #creating the partition into training and test set given that no subset is specified to be necessarily present both in the training and test set ra=sample(1:l,n_tr); } else { #creatinng the partition when parameter nr is specified lnr=length(nr); ranr=sample(nr,floor((1-(1/part))*lnr)) #partitioning nr itself #partitioning the remaining datapoints rar=sample(setdiff(1:l,nr),n_tr-floor((1-(1/part))*lnr)); #combining the two ra=c(ranr,rar); } #defining the training set trset<-vari[ra,]; #outcomes corresponding to the training set outc<-outi[ra]; #testset testset<-vari[setdiff(c(1:l),ra),]; #computing the maximum allowed weight given the outcomes in the training set and the mode of the prediction mw<-compute_max_weight(outc,mode) #maximal weight given the input parameter maxw (if specified) mw=min(mw,maxw*rule); #maximal number of variables allowed in a single regression nvar=floor((1/rule)*mw); #error message if this nuber is 0 if (nvar==0) stop('not enough events to build a regression model'); #maximal number of variables taking into account restriction by the parameter maxx (if any) if (is.null(maxx)==FALSE){ maxj=min(maxx,min(nvar,vari_col)) } else { maxj=min(nvar,vari_col) } #minimal number of variables taking into account restriction by the parameter minx (if any) minj=max(1,minx) #subset of indices of the regression without the "fixed subset" defined by parameter st c=setdiff(c,st); #length of the "fixed subset" defined by parameter st lest=length(st) #going through all possible sizes of the regression model for (j in max(minj,lest):maxj){ #creating a matrix of all possible regressions of a given size j, taking into account the "fixed subset" defined by st a<-combn(c,(j-lest)); #in case c contains only 1 element and st is empty if ((length(c)<2)&(j>lest)) a=matrix(c) #in case st is non-empty to each column of a add a column of elements of st if (is.null(st)==FALSE) a<-rbind(matrix(st,ncol=dim(a)[2],nrow=lest),a) #compute the weights of each column of a s<-sum_weights_sub(a,j,we,st) #reoders columns of a in ascending order corrresponding to the order of the array of weights s a<-find_sub(a,s,j,c(st,c),st) #sort the array of weights s=sort(s); #find those weights which satisfy the weight constraint (aka maximal number of variables in the regression) ai=which(s<=max(nvar)); if (length(ai)>0){ for (k in 1:length(ai)){ #going through all elements of ai #transform the corresponding subset of variables into numeric one set_num<-make_numeric_sets(a,ai,k,vari,ra,l,mode) #numeric test set testset1<-set_num$test #numeric training set trset1<-set_num$tr #initialise correlsation parameter to 0 corr1<-0 if (corr<1){ #if correlation parameter corr is smaller than 1 compute absolute correlations between variables on the training set corr1<-abs(cor(trset1)) } #if there is no restriction on highly correlated predictors or if there are no highly correlated predictors in the kth subset if ((length(corr1[corr1>corr])<=length(a[,ai[k]]))|(corr==1)){ if (mode=='linear'){ #linear mode #get the prediction for the training set if (Rsq==F){ pred<-get_predictions_lin(trset1,testset1,outc,l-n_tr,n_tr,s[ai[k]],Rsq,Rsq_v) pred_tr<-get_predictions_lin(trset1,trset1,outc,n_tr,n_tr,s[ai[k]],Rsq,Rsq_v) #difference between the actual value and the predicted one on the test set diff<-outi[setdiff(1:l,ra)]-pred; diff_tr<-outi[ra]-pred_tr #difference between predicted value and the actual one divided by the actual one, aka relative difference diffr<-(pred/outi[setdiff(1:l,ra)])-1; diffr_tr<-(pred_tr/outi[ra])-1; #diffr_tr[diff_tr==0]=0 #diffr[diff==0]=0 #sum of all absolute values of differences defined above preds[j-minx+1,k]=sum(abs(diff)); preds_tr[j-minx+1,k]=sum(abs(diff_tr)); #sum of all absolute values of relative differences defined above predsr[j-minx+1,k]=sum(abs(diffr)); predsr_tr[j-minx+1,k]=sum(abs(diffr_tr)); } else { # marg0=0 if (marg>0.0) marg0=sqrt(max(qchisq(0.975,n_tr-1-s[ai[k]])/(n_tr-1-s[ai[k]]),(n_tr-1-s[ai[k]])/qchisq(0.975,n_tr-1-s[ai[k]])))-1 if (marg0<=marg){ Rsq_v=max(1-exp((2-s[ai[k]])/(0.1*n_tr)),(s[ai[k]]-0.05*(n_tr-1-s[ai[k]]))/s[ai[k]]) pred<-get_predictions_lin(trset1,testset1,outc,l-n_tr,n_tr,s[ai[k]],Rsq,Rsq_v,marg) pred_tr<-get_predictions_lin(trset1,trset1,outc,n_tr,n_tr,s[ai[k]],Rsq,Rsq_v,marg) #difference between the actual value and the predicted one on the test set diff<-outi[setdiff(1:l,ra)]-pred; diff_tr<-outi[ra]-pred_tr; #difference between predicted value and the actual one divided by the actual one, aka relative difference diffr<-(pred/outi[setdiff(1:l,ra)])-1; diffr_tr<-(pred_tr/outi[ra])-1; #sum of all absolute values of differences defined above preds[j-minx+1,k]=sum(abs(diff)); preds_tr[j-minx+1,k]=sum(abs(diff_tr)); #sum of all absolute values of relative differences defined above predsr[j-minx+1,k]=sum(abs(diffr)); predsr_tr[j-minx+1,k]=sum(abs(diffr_tr)); } } } else{ if (Rsq==F){ #computing the probabilities for each outcome on the test set by fitting multinomial/logistic regression to the training set p<-get_probabilities(trset1,testset1,outc,mode,Rsq,n_tr=n_tr); p_tr<-get_probabilities(trset1,trset1,outc,mode,Rsq,n_tr=n_tr); if (objfun=='acc'){ #case of accuracy maximisation #transforming probabilities into predictions pred<-get_predictions(p,l-floor((1-1/part)*l),cutoff,cmode,mode) pred_tr<-get_predictions(p_tr,floor((1-1/part)*l),cutoff,cmode,mode) #difference between the actual values and the predicted ones on the test set diff<-outi[setdiff(1:l,ra)]-array(pred); diff_tr<-outi[ra]-array(pred_tr); if (predm=='exact') { #in case of the exact prediction, aka predicting the exact class diff[abs(diff)>0]=1; diff_tr[abs(diff_tr)>0]=1; } else{ #in case of "up to a class" prediction, aka consider correct if the correct class is the one neighboring to the predicted one diff[abs(diff)<2]=0; diff[abs(diff)>1]=1; } #computing the number of times prediction was correct (non-averaged out accuracy) #rows correspond to the number of variables in a regression, column is determined by k preds[j-minx+1,k]=l-floor((1-1/part)*l)-sum(abs(diff)); preds_tr[j-minx+1,k]=floor((1-1/part)*l)-sum(abs(diff_tr)); } else{ #computing the AUROC of the prediction preds[j-minx+1,k]<-AUC(1-p,outi[setdiff(1:l,ra)]) preds_tr[j-minx+1,k]<-AUC(1-p_tr,outi[ra]) } } else { marg0=0.0 if (marg>0) marg0=1.96*sqrt(sum(outc)*(n_tr-sum(outc))/n_tr) if (marg0<=marg){ #Rsq_v=1-exp(-s[ai[k]]/(0.1*n_tr)) #computing the probabilities for each outcome on the test set by fitting multinomial/logistic regression to the training set p<-get_probabilities(trset1,testset1,outc,mode,Rsq,s[ai[k]],n_tr); p_tr<-get_probabilities(trset1,trset1,outc,mode,Rsq,s[ai[k]],n_tr); if (objfun=='acc'){ #case of accuracy maximisation #transforming probabilities into predictions pred<-get_predictions(p,l-floor((1-1/part)*l),cutoff,cmode,mode) pred_tr<-get_predictions(p_tr,floor((1-1/part)*l),cutoff,cmode,mode) #difference between the actual values and the predicted ones on the test set diff<-outi[setdiff(1:l,ra)]-array(pred); diff_tr<-outi[ra]-array(pred_tr); if (predm=='exact') { #in case of the exact prediction, aka predicting the exact class diff[abs(diff)>0]=1; diff_tr[abs(diff_tr)>0]=1; } else{ #in case of "up to a class" prediction, aka consider correct if the correct class is the one neighboring to the predicted one diff[abs(diff)<2]=0; diff[abs(diff)>1]=1; } #computing the number of times prediction was correct (non-averaged out accuracy) #rows correspond to the number of variables in a regression, column is determined by k preds[j-minx+1,k]=l-floor((1-1/part)*l)-sum(abs(diff)); preds_tr[j-minx+1,k]=floor((1-1/part)*l)-sum(abs(diff_tr)); } else{ #computing the AUROC of the prediction preds[j-minx+1,k]<-AUC(1-p,outi[setdiff(1:l,ra)]) preds_tr[j-minx+1,k]<-AUC(1-p_tr,outi[ra]) } } } } } } } } if ((mode=='binary')&(objfun=='acc')){ #empirical prediction based on always choosing the most frequent category cpred=sum(outi[setdiff(1:l,ra)])/(l-floor((1-1/part)*l)); #output is a list of (non-averaged out) accuracies of all feasible regression models, the corredponding maximal nimber of variables, the corredsponding empirical prediction list("regr" = preds, "nvar"=nvar, "emp" = cpred, "regr_tr"=preds_tr) } else{ if (objfun=='roc') { #list of AUROCs for all feasible model and the corrresponding maximal number of variables list("regr" = preds, "nvar"=nvar,"regr_tr"=preds_tr) } else{ if (mode=='multin'){ #unique elements of the training set outcomes uo<-unique(outc); #empirical prediction based on always choosing the most frequent category cpred=1-length(which(outi[setdiff(1:l,ra)]==uo[which.max(tabulate(match(outc,uo)))]))/(l-floor((1-1/part)*l)); #output is a list of (non-averaged out) accuracies of all feasible regression models, the corredponding maximal nimber of variables, the corredsponding empirical prediction list("regr" = preds, "nvar"=nvar, "emp" = cpred,"regr_tr"=preds_tr) } else{ #linear mode #empirical predictions based on always choosing the mean of the training set (empirical absolute and relative errors respectively) cpred=sum(abs(outi[setdiff(1:l,ra)]-mean(outi[ra])))/(l-floor((1-1/part)*l)); cpredr=sum(abs((outi[setdiff(1:l,ra)]-mean(outi[ra]))/outi[setdiff(1:l,ra)]))/(l-floor((1-1/part)*l)); #output is a list of non-averaged out absolute errors of all feasible regression models #the corredponding maximal nimber of variables, non-averaged out relative errors of all feasible regression models #absolute error of the empirical prediction, relative error of the empirical prediction list("regr" = preds, "nvar"=nvar, "regrr"=predsr, "emp"=cpred,"empr"=cpredr,"regrr_tr"=predsr_tr,"regr_tr"=preds_tr) } } } } #'Averaging out the predictive power #' #'Function which averages out the predictive power over all cross-validations #'@param preds An M x \code{crv}N matrix consisting of \code{crv} horizontally concatenated M x N matrices. These M x N matrices are the matrices of predictive powers for all feasible regressions (M is maximum feasible number of variables included in a regression, N is the maximum feasible number of regressions of the fixed size; the row index indicates the number of variables included in a regression) #'@param crv number of cross-validations #'@param k size of the test set for which the predictions are made #'@usage av_out(preds,crv,k) #'@return Returns an M x N matrix of average predictive powers where M is maximum feasible number of variables included in a regression, N is the maximum feasible number of regressions of the fixed size; the row index indicates the number of variables included in a regression #'@export av_out #'@examples #'#creating a matrix of predictive powers #' #'preds<-cbind(matrix(runif(40,1,4),ncol=10),matrix(runif(40,1.5,4),ncol=10)) #'preds<-cbind(preds,matrix(runif(40,1,3.5),ncol=10)) #' #'#running the function #' #'av_out(preds,3,5) av_out<-function(preds,crv,k){ #writing the dimensions of the matrix of preditive powers into the array si<-dim(preds); #dividing the number of columns in preds by the number of cross-validations, since the results from each next cross-validation are always concatenated with the previous one si[2]=si[2]/crv; #initialising the array of averaged out predictive powers predsp<-array(NA,c(si[1],si[2])) for (i in 1:si[1]){ for (j in 1:si[2]){ pr<-preds[i,seq(j,si[2]*crv,si[2])]; #predictive power corresponding to the same model from all cross-validations #the mean value of the corresponding predictive power divided by the size of the test set predsp[i,j]=mean(pr[is.finite(pr)],na.rm=TRUE)/k; } } #output is the matrix of the averaged out predictive powers predsp } #'Best regression #' #'Function which identifies regressions with the highest predictive power #' #'@param predsp An M x N matrix of averaged out predictive power values. M is maximum feasible number of variables included in a regression, N is the maximum feasible number of regressions of the fixed size; the row index indicates the number of variables included in a regression. #'@param nvar array of maximal number of variables for each cross-validation #'@param c array of all indices of the prediction variables #'@param we array of all weights of the prediction variables #'@param st a subset of predictors to be always included into a predictive model #'@param minx minimum number of predictors, defaults to 1 #'@usage get_indices(predsp,nvar,c,we,st,minx) #'@return A list of arrays which contain indices of the predictors corresponfing to the best regressions #'@seealso Uses \code{\link{sum_weights_sub}}, \code{\link{find_sub}}, \code{\link[utils]{combn}} #'@export get_indices #'@examples #'#creating a set of averaged out predictive powers #' #'predsp<-matrix(NA,ncol=3,nrow=3) #' #'predsp[1,]=runif(3,0.7,0.8) #'predsp[2,]=runif(3,0.65,0.85) #'predsp[3,1]=runif(1,0.4,0.5) #' #'#running the function #' #'get_indices(predsp,c(3,3,3),1:3,c(1,1,1)) get_indices<-function(predsp,nvar,c,we,st=NULL,minx=1){ #creating a list ll=list(0) if (sum(is.na(predsp))<length(predsp)){ #finding the index of the arry of the averaged out predictive powers which corresponds to the highest predictive power nums<-which(predsp==max(predsp[predsp!=0],na.rm=TRUE)) #dimensions of the array of the averaged out predictive powers si=dim(predsp) #computing the number of variables of the best predictive model based on the row it corresponds to numv=nums%%si[1] #finding the column which corresponds to the model with the best predictive power numss=ceiling(nums/si[1]) #array of predictive variables without the "fixed subset" defined by parameter st c=setdiff(c,st) #length of the "fixed subset" lest=length(st) #going through all models which exhibited the highest predictive power for (i in 1:length(numv)){ #in case the value of the number of variables is 0 reassign the maximal number of variables to it if (numv[i]==0) numv[i]=si[1] #add the minimal number of variables defined by parameter minx to the number of variables numv1=numv[i]+minx-1 #all subsets of size numv1 minus the size of the "fixed subset" defined by st af<-combn(c,(numv1-lest)) #in case there is only one predictor and st is empty if ((length(c)<2)&(numv1>lest)) af<-matrix(c) #in case the "fixed subset" is not empty add to each column of af a column with elements of st if (is.null(st)==FALSE) af<-rbind(matrix(st,ncol=dim(af)[2],nrow=lest),af) #compute the weights of models corresponding to columns of af s<-sum_weights_sub(af,numv1,we,st) #reorder the columns of sf based on the order of the array of weights s af<-find_sub(af,s,numv1,c,st) #sort the array of weights s=sort(s) #find the models with the weights satisfying the aximal weight constraint aif=which(s<=max(nvar)); #the column of af exhibiting the best predictive power is written as an ith element of the list ll[[i]]<-af[,aif[numss[i]]] } } else ll=NA #output the lest of the models exhibiting the best predictive power ll } #'Indices of the best regressions #' #'One of the two main functions of the package. Identifies the predictors included into regressions with the highest average predictive power #'@param vari set of predictors #'@param outi array of outcomes #'@param crv number of cross-validations #'@param cutoff cut-off value for mode \code{'binary'} #'@param part for each cross-validation partitions the dataset into training and test set in a proportion \code{(part-1):part} #'@param cmode \code{'det'} or \code{''}; \code{'det'} always predicts the more likely outcome as determined by the odds ratio; \code{''} predicts certain outcome with probability corresponding to its odds ratio (more conservative). Option available for multinomial/logistic regression #'@param mode \code{'binary'} (logistic regression), \code{'multin'} (multinomial regression) #'@param predm \code{'exact'} or \code{''}; for logistic and multinomial regression; \code{'exact'} computes how many times the exact outcome category was predicted, \code{''} computes how many times either the exact outcome category or its nearest neighbour was predicted #'@param objfun \code{'roc'} for maximising the predictive power with respect to AUC, available only for \code{mode='binary'}; \code{'acc'} for maximising predictive power with respect to accuracy. #'@param parallel TRUE if using parallel toolbox, FALSE if not. Defaults to FALSE #'@param cores number of cores to use in case of parallel=TRUE #'@param minx minimum number of predictors to be included in a regression, defaults to 1 #'@param maxx maximum number of predictors to be included in a regression, defaults to maximum feasible number according to one in ten rule #'@param maxw maximum weight of predictors to be included in a regression, defaults to maximum weight according to one in ten rule #'@param nr a subset of the data-set, such that \code{1/part} of it lies in the test set and \code{1-1/part} is in the training set, defaults to empty set. This is to ensure that elements of this subset are included both in the training and in the test set. #'@param st a subset of predictors to be always included into a predictive model,defaults to empty set #'@param rule an Events per Variable (EPV) rule, defaults to 10' #'@param corr maximum correlation between a pair of predictors in a model #'@param Rsq whether the R-squared statistics constraint is introduced #'@param marg margin of error for R-squared statistics constraint #'@return Prints the best predictive power provided by a regression, predictive accuracy of the empirical prediction (value of \code{emp} computed by \code{cross_val} for logistic and linear regression). Returns indices of the predictors included into regressions with the highest predictive power written in a list. For \code{mode='linear'} outputs a list of two lists. First list corresponds to the smallest absolute error, second corresponds to the smallest relative error #'@export regr_ind #'@import doParallel #'@import parallel #'@import foreach #'@seealso Uses \code{\link{compute_weights}}, \code{\link{make_numeric}}, \code{\link{compute_max_weight}}, \code{\link{compute_weights}}, \code{\link{compute_max_length}}, \code{\link{cross_val}},\code{\link{av_out}}, \code{\link{get_indices}} #'@examples #'#creating variables for linear regression mode #' #'variables_lin<-matrix(c(rnorm(56,0,1),rnorm(56,1,2)),ncol=2) #' #'#creating outcomes for linear regression mode #' #'outcomes_lin<-rnorm(56,2,1) #' #'#running the function #' #'regr_ind(variables_lin,outcomes_lin,100,mode='linear',parallel=TRUE,cores=2) #' #'#creating variables for binary mode #' #'vari<-matrix(c(1:100,seq(1,300,3)),ncol=2) #' #'#creating outcomes for binary mode #' #'out<-rbinom(100,1,0.3) #' #'#running the function #' #'regr_ind(vari,out,20,cutoff=0.5,part=10,mode='binary',parallel=TRUE,cores=2,nr=c(1,10,20),maxx=1) regr_ind<-function(vari,outi,crv,cutoff=NULL,part=10,mode,cmode='det',predm='exact',objfun='acc',parallel=FALSE,cores,minx=1,maxx=NULL,nr=NULL,maxw=NULL,st=NULL,rule=10,corr=1,Rsq=F,marg=0){ #in case of an error close all the connections #on.exit(closeAllConnections()); #error message in case of incompatible moe and objfun parameters if ((objfun=='roc')&(mode!='binary')) { stop('function "roc" is available for binary mode only') } if ((length(unique(outi))>2)&(mode=='binary')){ stop('the outcome you provided appears to be non-binary, please re-run in multin mode') } #if there is only one predictive variable written in an array if (is.null(dim(vari))){ vari=matrix(vari); } #overall number of predictive variables vari_col=ncol(vari); #compute weights of all predictive variables and write them into an array we<-compute_weights(vari_col,vari) #the sample size l=nrow(vari); n_tr=floor((1-(1/part))*l) #the array of all indices of predictive variables c<-c(1:vari_col); if (is.numeric(outi)==FALSE){ #turning non-numeric outcomes in numeric ones outi<-make_numeric(outi,TRUE,1:length(outi),mode) } #compute maximum weight defined by the outcomes numr<-compute_max_weight(outi,mode); #compute maximal length of a regression which can be fitted to a training set le<-compute_max_length(vari_col,floor((1.0/rule)*numr),c,we,minx,maxx+1,st) #initialising the array of predictive powers of all feasible regression preds<-array(NA,c(min(vari_col,min(floor((1/(rule*min(we)))*numr),maxx+1))-minx+1,le)); preds_tr<-array(NA,c(min(vari_col,min(floor((1/(rule*min(we)))*numr),maxx+1))-minx+1,le)); #defining a %fun% function `%fun%` <- `%do%` if (parallel==TRUE){ #in case the parallel mode is on `%fun%` <- `%dopar%` #creating a cluster of the corresponding foem cl <- makeCluster(cores,setup_strategy="sequential") registerDoParallel(cl) #exporting the corresponding libraries to all cores clusterEvalQ(cl,rm(list=ls())) clusterEvalQ(cl, library(nnet)) clusterEvalQ(cl, library(foreach)) clusterEvalQ(cl, library(doParallel)) clusterEvalQ(cl, library(stats)) #exporting all necessary functions to all cores clusterExport(cl,"cross_val") clusterExport(cl,"compute_max_weight") clusterExport(cl,"sum_weights_sub") clusterExport(cl,"get_probabilities") clusterExport(cl,"get_predictions") clusterExport(cl,"get_predictions_lin") clusterExport(cl,"find_sub") clusterExport(cl,"make_numeric") clusterExport(cl,"make_numeric_sets") clusterExport(cl,"AUC") } #running the given number of cross-validations result <- foreach(i=1:crv, .combine='comb', .multicombine=TRUE,.packages="CARRoT") %fun% { cross_val(vari,outi,c,rule,part,l,we,vari_col,preds,mode,cmode,predm,cutoff,objfun,minx,maxx,nr,maxw,st,corr,Rsq,marg,n_tr,preds_tr); } if(parallel==T) on.exit(stopCluster(cl)) #writing predictive powers of the regressions in an array preds<-result[[1]]; #writing the maximal number of variables in an array nvar<-result[[2]]; preds_tr<-result[[length(result)]] if (mode=='binary') { if (objfun=='acc') { #write the array of empirical predictions cpred<-result[[3]]; } } else { if (mode=='linear'){ #write the array of relative errors predsr<-result[[3]]; predsr_tr<-result[[length(result)-1]] #write the array of absolute errors of empirical predictions cpred<-result[[4]]; #write the array of relative errors of empirical predictions cpredr<-result[[5]]; } else{ #an array of empirical predictions cpred<-result[[3]] } } #stop the cluster # if (parallel==TRUE) stopCluster(cl) # closeAllConnections() #average out the predictive powers over all cross-validations if (objfun=='roc'){ predsp<-av_out(preds,crv,1) predsp_tr<-av_out(preds_tr,crv,1) } else{ predsp<-av_out(preds,crv,l-n_tr) predsp_tr<-av_out(preds_tr,crv,n_tr) } if (mode=='linear') { predspr<-av_out(predsr,crv,l-n_tr) predspr_tr<-av_out(predsr_tr,crv,n_tr) } if (((mode=='binary')&(objfun=='acc'))|(mode=='multin')) { #print the average accuracy attained by the best predictive model and the empirical accuracy t1<-max(predsp[predsp>0],na.rm=TRUE) t1_ind<-which(predsp==t1)[1] if (is.infinite(t1)) t1=NA a1<-c(t1,min(predsp_tr[t1_ind]),1-sum(cpred)/crv) print(a1); } else{ if (objfun=='roc') { #print the average AUROC of the best predictive model t1<-max(predsp[predsp>0],na.rm=TRUE) t1_ind<-which(predsp==t1) if (is.infinite(t1)) t1=NA print(c(t1,predsp_tr[t1_ind])) a1<-c(t1,predsp_tr[t1_ind]) } else{ if (is.na(sum(cpredr[cpredr<Inf])/(crv-length(cpredr[cpredr==Inf])))==F){ #in case all average relative errors are finite t1<-min(predsp[predsp>0],na.rm=TRUE) t2<-min(predspr[predspr>0],na.rm=TRUE) t1_ind<-which(predsp==t1) t2_ind<-which(predspr==t2) if (is.infinite(t1)) t1=NA if (is.infinite(t2)) t2=NA print(c(t1,t2,min(predsp_tr[t1_ind]),min(predspr_tr[t2_ind]),sum(cpred)/crv,sum(cpredr[cpredr<Inf])/(crv-length(cpredr[cpredr==Inf])))) a1<-c(t1,t2,min(predsp_tr[t1_ind]),min(predspr_tr[t2_ind]),sum(cpred)/crv,sum(cpredr[cpredr<Inf])/(crv-length(cpredr[cpredr==Inf]))) } else { #printing NaN values for average relative error in case it is not finite t1<-min(predsp[predsp>0],na.rm=TRUE) t1_ind<-which(predsp==t1) if (is.infinite(t1)) t1=NA print(c(t1,NaN,min(predsp_tr[t1_ind]),sum(cpred)/crv,NaN)) a1<-c(t1,NaN,sum(cpred)/crv,NaN) } } } if (mode=='linear') { #find the indices of the variables included into the regression with the best predictive power if (sum(is.finite(predspr))>0) { #find indices of the variables included in the models corresponding to the lowest absolute and lowest relative error list(a1,get_indices(-predsp,nvar,c,we,st,minx),get_indices(-predspr,nvar,c,we,st,minx)) } else { #in case relative error is infinite output NaN list(a1,get_indices(-predsp,nvar,c,we,st,minx),NaN) } } else{ #find indices of the variables included in the models corresponding to the highest accuracy/AUROC list(a1,get_indices(predsp,nvar,c,we,st,minx)) } } #' Pairwise interactions and squares #' #' Function transforms a set of predictors into a set of predictors, their squares and pairwise interactions #'@param A set of predictors #'@param n first \code{n} predictors, whose interactions with the rest should be taken into account, defaults to all of the predictors #'@return Returns the predictors including their squares and pairwise interactions #'@export quadr #'@examples quadr(cbind(1:100,rnorm(100),runif(100),rnorm(100,0,2))) quadr<-function(A,n=1000){ #copying the array of variables into array B B<-A if (n==1000){ #if the n parameter is set to default n=dim(A)[2] #the number of variables whose interactions are to be considers is the number of all variables } ind<-c() for (i in 1:n){ ind<-c(ind,i:n) } B<-cbind(B,A[,rep(1:n,c(n:1))]*A[,ind]) #for (i in 1:n){ # B=cbind(B,A[,i:dim(A)[2]]*A[,i]) #multiplying variables in order to obtain pairwise interactions #} B } #' Three-way interactions and squares #' #' Function transforms a set of predictors into a set of predictors, their squares, pairwise interactions, cubes and three-way interactions #'@param A set of predictors #'@param n first \code{n} predictors, whose interactions with the rest should be taken into account, defaults to all of the predictors #'@return Returns the predictors including their squares, pairwise interactions, cubes and three-way interactions #'@export cub #'@examples cub(cbind(1:100,rnorm(100),runif(100),rnorm(100,0,2))) #' #' cub<-function(A,n=1000){ B<-quadr(A,n) #creating an array of all pairwise interactions if (n==1000){#if the n parameter is set to default n<-dim(A)[2] #the number of variables whose interactions are to be considers is the number of all variables } m<-dim(B)[2] #the number of variables and their pairwise interactions C<-B[,(n+1):m] k<-m-n ind<-c() ind0<-c() for (i in 1:n){ # ind<-c(ind,i:n) ind0<-c(ind0,c((n-i+1):1)) } for (i in 1:n){ ind<-c(ind,n+1-ind0[(ifelse(i==1,0,sum((n-i+2):n))+1):length(ind0)]) } B<-cbind(B,C[,rep(1:k,ind0)]*A[,ind]) # for (i in 1:n){ # B=cbind(B,B[,(n+0.5*(n+n-i+2)*(i-1)+1):m]*A[,i]) #multiplying variables from B (pairwise interactions) and A in order to obtain three-way interactions # } B } #' Finding the interacting terms based on the index #' #' Function transforms an index of an array of two- or three-way interactions into two or three indices corresponding to the interacting variables #'@param ind index to transform #'@param N number of interacting variables #'@usage find_int(ind,N) #'@return Returns two or three indices corredsponding to a combination of variables written under the given index #'@export find_int #'@examples find_int(28,9) find_int<-function(ind,N){ if (ind<=N){ #if the index is lower than the number of variables ind #it is just a single variable } else{ if (ind<=(N+0.5*(N+1)*N)){ #if the index is smaller than the number of all variables, their squares and two-way interactions a<-2*N #starting point is interactions with the first variables i<-1 while (ind>a){ #locating the index by adding the number of possible interacting variables a=a+N-i i=i+1 } c(i,ind-a+N) #output two interacting variables } else{ #in case the interaction goes beyond a pair of variables a<-N+0.5*(N+1)*N #starting point two-way interactions i<-0 while (ind>a){ #locating the index by adding the number of possible interacting variables taking into account three-way interactions a=a+0.5*(N-i+1)*(N-i) i=i+1 } ind1<-i #the index of the first interacting variable ind2<-ind-a+0.5*(N-i+2)*(N-i+1) #reducing the problem to finding two interacting variables a<-N-i+1 #similar to the two-way interaction case but taking into account the number of the first interacting variable while (ind2>a){ #similarly to two-way case locating two variables interacting with each other a=a+N-i i=i+1 } c(ind1,i,ind2-a+N) #outputting three interacting variables } } }
/scratch/gouwar.j/cran-all/cranData/CARRoT/R/carrot_functions_rsq.R
#' Gibbs sampler for Conditional Autoregressive LASSO and extensions #' #' @description Main sampling algorithm of CAR-LASSO model #' #' @param formula A double sided formula with response at left hand side and predictors at right hand side #' @param data A data.frame with all response and predictors, row as observations #' @param link String name of link function? Currently can be "identity" for normal response, "probit" for binary, "log" for counting, "logit" for compositional. Note that when use "logit", the last response will be used as reference. #' @param adaptive Bool, whether run the adaptive version of the model #' @param r_beta Hyper-parameter for regression coefficient, shape parameter of Gamma, if adaptive, should have row number same as number of predictors while column number of responses #' @param delta_beta Hyper-parameter for regression coefficient, rate parameter of Gamma, if adaptive, should have row number same as number of predictors while column number of responses #' @param r_Omega Hyper-parameter for precision matrix, shape parameter of Gamma. If adaptive, can be a matrix with same size as precision matrix, if this is the case, only upper triangular part without diagonal will be used, or can be a vector whose size was the upper triangular part of precision matrix, if non-adaptive, a scalar. #' @param delta_Omega Hyper-parameter for precision matrix, rate parameter of Gamma, If adaptive, can be a matrix with same size as precision matrix, if this is the case, only upper triangular part without diagonal will be used, or can be a vector whose size was the upper triangular part of precision matrix, if non-adaptive, a scalar. #' @param lambda_diag adaptive only hyper-parameter for penalties on diagonal entries of Omega, should have dimension k and non-negative #' @param n_iter Number of sampling iterations (i.e. after burn in) for the Gibbs sampler #' @param n_burn_in Number of burn in iterations for the Gibbs sampler #' @param thin_by Final sample was thin by this number #' @param ns parameter for ARS, maximum number of hulls, only used when link is "log" and "logit" #' @param m parameter for ARS, initial number of hulls, only used when link is "log" and "logit" #' @param emax parameter for ARS, tolerance for small values being 0, larger meaning we tolerate smaller values, only used when link is "log" and "logit" #' @param progress Bool, whether report progress from C++ #' @param verbos Bool, whether show warnings and messages. #' #' @return A `carlasso_out` object with elements: #' \itemize{ #' \item{`$point_est`}{ #' \itemize{ #' \item{`$Omega`}{: Posterior mean of precision matrix} #' \item{`$beta`}{: Posterior mean of regression coefficient} #' \item{`$CAR`}{ #' \itemize{ #' \item{`$C`}{: The conditional regression coefficients among responses} #' \item{`$B`}{: The conditional regression coefficients between response and predictors} #' \item{`$M`}{: The conditional variance} #' } #' } #' } #' } #' \item{`$nodes`}{ #' \itemize{ #' \item{`$responses`}{: node name of responses} #' \item{`$predictors`}{: node name of predictors} #' } #' } #' #' \item{`$data`}{ #' \itemize{ #' \item{`$response`}{: response matrix} #' \item{`$design`}{: design matrix} #' } #' } #' #' \item{`$settings`}{: all settings sent to the algorithm, exclude data} #' \item{`$MCMC_output`}{ #' \itemize{ #' \item{`$beta`}{: A coda::mcmc object, each row was an MCMC sample of the (column) vectorization of regression coefficient B} #' \item{`$mu`}{: A coda::mcmc object, each row was an MCMC sample of the mean vector} #' \item{`$Omega`}{: A coda::mcmc object, each row was an MCMC sample of the upper triangular part (with diagonal) of precision matrix Omega} #' \item{`$lambda`}{: \strong{Non-adaptive only}, A coda::mcmc object, first column was the shrinkage parameter lambda for regression coefficient and the second column was shrinkage parameter lambda for precision matrix} #' \item{`$lambda_beta`}{: \strong{Adaptive only}, A coda::mcmc object, each row was an MCMC sample of the (column) vectorization of shrinkage parameter for regression coefficient B} #' \item{`$lambda_Omega`}{: \strong{Adaptive only}, A coda::mcmc object, each row was an MCMC sample of the shrinkage parameter for the upper triangular part (without diagonal) of precision matrix Omega} #' } #' } #' } #' #' #' #' @examples #' set.seed(42) #' dt <- simu_AR1() #' car_res <- CARlasso(y1+y2+y3+y4+y5~x1+x2+x3+x4+x5, data = dt, adaptive = TRUE) #' plot(car_res,tol = 0.05) #' # with horseshoe inference #' car_res <- horseshoe(car_res) #' plot(car_res) #' #' CARlasso <- function(formula, # a double sided formula needed, e.g. x+y~a+b data, link = "identity", adaptive = FALSE, r_beta = ifelse(adaptive,0.01,1), delta_beta = ifelse(adaptive,1e-6,0.01), r_Omega = ifelse(adaptive,0.01,1), delta_Omega = ifelse(adaptive,1e-6,0.01), lambda_diag = 0, n_iter = 2000, n_burn_in = 1000, thin_by = 10, ns = 1000, m=20, emax=64, progress = TRUE, verbos = TRUE) { # some warning messages err_no_predictor <- "No predictor supplied, do you want to try bGlasso?.\n\n" warr_centering <- "Predictors will be centered.\n\n" # check links if (!(link %in% c("identity", "probit", "log", "logit"))) { stop("Currently only implemented identity (normal), log (Poisson) logit (multi-nomial) and probit (bernoulli)") } # omit NAs if (!all(!is.na(data)) & verbos) { warning("NAs in data are omitted") data <- na.omit(data) } # get matrices from data design <- model.matrix(formula, data) # design matrix, no intercept: design <- design[, colnames(design) != "(Intercept)"] design <- as.matrix(design) response <- formula response[[3]] <- formula[[2]] y <- model.matrix(response, data) # response matrix. y <- y[, colnames(y) != "(Intercept)"] rm(response) # center predictors if (ncol(design) == 0) { stop(err_no_predictor) } if (verbos) { cat(warr_centering) } design <- apply(design, 2, function(w) { (w - mean(w)) / sd(w) }) # get dimensions n <- nrow(design) p <- ncol(design) k <- ncol(y) - (link == "logit") # careful for multinomial response ### check dimension of r_beta dimname1 <- c( "nrow r_beta", "ncol r_beta", "nrow delta_beta", "ncol delta_beta" ) if (!all(c(c(r_beta), c(delta_beta)) > 0)) { stop("Hyperparameters for beta shrinkage must be positive\n\n") } if (!adaptive) { if (is.null(r_beta)) r_beta <- 1 if (is.null(delta_beta)) delta_beta <- 0.01 if (verbos & (length(r_beta) > 1 | length(delta_beta) > 1)) { cat("Algorithm set to be non-adapive, will take the first entry of hyperprior for beta shrinkage\n\n") } r_beta <- r_beta[1] delta_beta <- delta_beta[1] } else { if (is.null(r_beta)) r_beta <- 0.01 if (is.null(delta_beta)) delta_beta <- 1e-6 if ((length(r_beta) == 1 & length(delta_beta) == 1)) { if (verbos) cat("Algorithm set to be adapive. Assuming all hyper parameters are the same for beta \n\n") r_beta <- matrix(r_beta, p, k) delta_beta <- matrix(delta_beta, p, k) } else { dims <- c(nrow(r_beta), ncol(r_beta), nrow(delta_beta), ncol(delta_beta)) prop_dim <- c(p, k, p, k) mismatch <- dimname1[dims != prop_dim] if (length(mismatch) > 0) { errmsg <- paste( "Dimension mismatch for hyper prior of beta shrinkage: ", paste(mismatch, collapse = " "), "\n\n" ) stop(errmsg) } } } ## end checking r_beta ## checking r_Omega, delta_Omega if (is.matrix(r_Omega)) { if (verbos) cat("Supplied matrix for hyper parameter for r_Omega, will only take upper triangular part\n\n") r_Omega <- c(r_Omega[upper.tri(r_Omega)]) } if (is.matrix(delta_Omega)) { if (verbos) cat("Supplied matrix for hyper parameter for delta_Omega, will only take upper triangular part\n\n") delta_Omega <- c(delta_Omega[upper.tri(delta_Omega)]) } if (!adaptive) { if (is.null(r_Omega)) r_Omega <- 1 if (is.null(delta_Omega)) delta_Omega <- 0.01 if (verbos & (length(r_Omega) > 1 | length(delta_Omega) > 1)) { cat("Algorithm set to be non-adapive, will take the first entry of hyper prior for Omega shrinkage\n\n") } r_Omega <- r_Omega[1] delta_Omega <- delta_Omega[1] } else { if (is.null(r_Omega)) r_Omega <- 0.01 if (is.null(delta_Omega)) delta_Omega <- 1e-6 if (is.null(lambda_diag)) lambda_diag <- 0 if ((length(r_Omega) == 1 & length(delta_Omega) == 1)) { if (verbos) cat("Algorithm set to be adapive. Assuming all hyper parameters are the same for Omega's off diagonal entries \n\n") r_Omega <- rep(r_Omega, .5 * (k - 1) * k) delta_Omega <- rep(delta_Omega, .5 * (k - 1) * k) } else { if (length(r_Omega) != .5 * (k - 1) * k | length(delta_Omega) != .5 * (k - 1) * k) { errmsg <- "Dimension mismatch for hyper prior of Omega shrinkage \n\n" stop(errmsg) } } if ((length(lambda_diag) == 1 )) { if (verbos) cat("Algorithm set to be adaptive. Assuming priors are all the same for Omega's diagonals \n\n") lambda_diag <- rep(lambda_diag, k) } else { if(length(lambda_diag) != k ){ errmsg <- "Dimension mismatch for hyper prior of Omega diagonal shrinkage \n\n" stop(errmsg) } } } ## end checking Omega hyper parameters ## Main algorithms if (link == "identity") { if (verbos) cat("Algorithm start...\n\n") if (verbos & progress) cat("progress:\n\n") if (adaptive) { res <- CAR_ALASSO_Cpp( y, design, n_iter, n_burn_in, thin_by, r_beta, delta_beta, r_Omega, delta_Omega, lambda_diag, progress ) } else { res <- CAR_LASSO_Cpp( y, design, n_iter, n_burn_in, thin_by, r_beta, delta_beta, r_Omega, delta_Omega, progress ) } } if (link == "log") { if (verbos) cat("Algorithm start...\n\n") if (verbos & progress) cat("progress:\n\n") if (adaptive) { res <- CAR_ALASSO_hir_Cpp( y, design, link = 1, n_iter, n_burn_in, thin_by, r_beta, delta_beta, r_Omega, delta_Omega, lambda_diag, ns, m, emax, progress ) } else { res <- CAR_LASSO_hir_Cpp( y, design, link = 1, n_iter, n_burn_in, thin_by, r_beta, delta_beta, r_Omega, delta_Omega, ns, m, emax, progress ) } } if (link == "logit") { if (verbos) cat("Last response will be used as reference group\n\n") if (verbos) cat("Algorithm start...\n\n") if (verbos & progress) cat("progress:\n\n") if (adaptive) { res <- CAR_ALASSO_hir_Cpp( y, design, link = 2, n_iter, n_burn_in, thin_by, r_beta, delta_beta, r_Omega, delta_Omega, lambda_diag, ns, m, emax, progress ) } else { res <- CAR_LASSO_hir_Cpp( y, design, link = 2, n_iter, n_burn_in, thin_by, r_beta, delta_beta, r_Omega, delta_Omega, ns, m, emax, progress ) } } if (link == "probit") { unique_values <- apply(y, 2, function(w) { length(unique(w)) }) if (!all(unique_values == 2)) { stop("Response has multiple unique values, cannot use probit link, do you mean logit?\n") } if (verbos) cat("Algorithm start...\n\n") if (verbos & progress) cat("progress:\n\n") if (adaptive) { res <- CAR_ALASSO_hir_Cpp( y, design, link = 3, n_iter, n_burn_in, thin_by, r_beta, delta_beta, r_Omega, delta_Omega, lambda_diag, ns, m, emax, progress ) } else { res <- CAR_LASSO_hir_Cpp( y, design, link = 3, n_iter, n_burn_in, thin_by, r_beta, delta_beta, r_Omega, delta_Omega, ns, m, emax, progress ) } } omega_post <- get_graph(res,k) b_post <- matrix(colMeans(res$beta),p,k) CAR_post <- get_CAR_MB(b_post,omega_post) point_est <- list(Omega = omega_post, beta = b_post, CAR = CAR_post) res <- lapply(res, coda::mcmc) settings <- list(formula = formula, link = link, adaptive = adaptive, r_beta = r_beta , delta_beta = delta_beta , r_Omega = r_Omega, delta_Omega = delta_Omega, lambda_diag = lambda_diag, n_iter = n_iter, n_burn_in = n_burn_in, thin_by = thin_by, ns=ns,m=m,emax=emax,progress = progress, verbos = verbos) nodes <- list(response = colnames(y), predictors = colnames(design)) res <- list(point_est = point_est, nodes = nodes, data = list(response = y, design = design), settings = settings, MCMC_output = res) class(res) <- "carlasso_out" if(verbos) cat("\ndone\n\n") return(res) } #' Gibbs sampler for Bayesian Graphical LASSO and extensions #' #' @description Main sampling algorithm of Glasso model, note that the mean is in CAR parameterization #' #' @param data A data.frame with all response, row as observations #' @param link String name of link function? Currently can be "identity" for normal response, "probit" for binary, "log" for counting, "logit" for compositional. Note that when use "logit", the last response will be used as reference. #' @param r_Omega Hyper-parameter for precision matrix, shape parameter of Gamma. Should be a scalar #' @param delta_Omega Hyper-parameter for precision matrix, rate parameter of Gamma. Shoule be a scalar #' @param n_iter Number of sampling iterations (i.e. after burn in) for the Gibbs sampler #' @param n_burn_in Number of burn in iterations for the Gibbs sampler #' @param thin_by Final sample was thin by this number #' @param ns parameter for ARS, maximum number of hulls, only used when link is "log" and "logit" #' @param m parameter for ARS, initial number of hulls, only used when link is "log" and "logit" #' @param emax parameter for ARS, tolerance for small values being 0, larger meaning we tolerate smaller values, only used when link is "log" and "logit" #' @param progress Bool, whether report progress from C++ #' @param verbos Bool, whether show warnings and messages. #' #' @return A `bglasso_out` object with elements: #' \itemize{ #' \item{`$point_est`}{ #' \itemize{ #' \item{`$Omega`}{: Posterior mean of precision matrix} #' } #' } #' \item{`$nodes`}{ #' \itemize{ #' \item{`$responses`}{: node name of responses} #' } #' } #' #' \item{`$data`}{ #' \itemize{ #' \item{`$response`}{: response matrix} #' } #' } #' #' \item{`$settings`}{: all settings sent to the algorithm, exclude data} #' \item{`$MCMC_output`}{ #' \itemize{ #' \item{`$mu`}{: A coda::mcmc object, each row was an MCMC sample of the mean vector} #' \item{`$Omega`}{: A coda::mcmc object, each row was an MCMC sample of the upper triangular part (with diagonal) of precision matrix Omega} #' \item{`$lambda`}{: A coda::mcmc object, first column was the shrinkage parameter lambda for regression coefficient and the second column was shrinkage parameter lambda for precision matrix} #' } #' } #' } #' #' #' #' @examples #' set.seed(42) #' dt <- simu_AR1() #' glassores <- bGlasso(data = dt[,1:5]) #' plot(glassores) #' bGlasso <- function(data, link = "identity", r_Omega = 1, delta_Omega = 0.01, n_iter = 2000, n_burn_in = 1000, thin_by = 10, ns = 1000, m=20, emax=64, progress = TRUE, verbos = TRUE) { # check links if (!(link %in% c("identity", "probit", "log", "logit"))) { stop("Currently only implemented identity (normal), log (Poisson) logit (multi-nomial) and probit (bernoulli)") } # omit NAs if (!all(!is.na(data)) & verbos) { warning("NAs in data are omitted") data <- na.omit(data) } y <- as.matrix(data) # get dimensions n <- nrow(y) k <- ncol(y) - (link == "logit") # careful for multinomial response if (is.null(r_Omega)) r_Omega <- 1 if (is.null(delta_Omega)) delta_Omega <- 0.01 if (verbos & (length(r_Omega) > 1 | length(delta_Omega) > 1)) { cat("Multiple hyper parameters supplied, will take the first entry of hyper prior for Omega shrinkage\n\n") } r_Omega <- r_Omega[1] delta_Omega <- delta_Omega[1] ## end checking Omega hyper parameters ## Main algorithms if (link == "identity") { if (verbos) cat("Algorithm start...\n\n") if (verbos & progress) cat("progress:\n\n") res <- Intercept_Graphical_LASSO_Cpp( y,n_iter, n_burn_in, thin_by, r_Omega, delta_Omega, progress ) } if (link == "log") { if (verbos) cat("Algorithm start...\n\n") if (verbos & progress) cat("progress:\n\n") res <- Intercept_Graphical_LASSO_hir_Cpp( y, 1, n_iter, n_burn_in, thin_by, r_Omega, delta_Omega, ns, m, emax, progress ) } if (link == "logit") { if (verbos) cat("Last response will be used as reference group\n\n") if (verbos) cat("Algorithm start...\n\n") if (verbos & progress) cat("progress:\n\n") res <- Intercept_Graphical_LASSO_hir_Cpp( y, 2, n_iter, n_burn_in, thin_by, r_Omega, delta_Omega, ns, m, emax, progress ) } if (link == "probit") { unique_values <- apply(y, 2, function(w) { length(unique(w)) }) if (!all(unique_values == 2)) { stop("Response has multiple unique values, cannot use probit link, do you mean logit?\n") } if (verbos) cat("Algorithm start...\n\n") if (verbos & progress) cat("progress:\n\n") res <- Intercept_Graphical_LASSO_hir_Cpp( y, 3, n_iter, n_burn_in, thin_by, r_Omega, delta_Omega, ns, m, emax, progress ) } omega_post <- get_graph(res,k) point_est <- list(Omega = omega_post) res <- lapply(res, coda::mcmc) settings <- list(link = link, r_Omega = r_Omega, delta_Omega = delta_Omega, n_iter = n_iter, n_burn_in = n_burn_in, thin_by = thin_by, ns=ns,m=m,emax=emax,progress = progress, verbos = verbos) nodes <- list(response = colnames(y)) res <- list(point_est = point_est, nodes = nodes, data = list(response = y), settings = settings, MCMC_output = res) class(res) <- "bglasso_out" if(verbos) cat("\ndone\n\n") return(res) }
/scratch/gouwar.j/cran-all/cranData/CARlasso/R/CAR-LASSO.R
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 CAR_ALASSO_hir_Cpp <- function(data, design, link, n_iter, n_burn_in, thin_by, r_beta, delta_beta, r_Omega, delta_Omega, lambda_diag, ns, m, emax, progress) { .Call(`_CARlasso_CAR_ALASSO_hir_Cpp`, data, design, link, n_iter, n_burn_in, thin_by, r_beta, delta_beta, r_Omega, delta_Omega, lambda_diag, ns, m, emax, progress) } CAR_ALASSO_Cpp <- function(data, design, n_iter, n_burn_in, thin_by, r_beta, delta_beta, r_Omega, delta_Omega, lambda_diag, progress) { .Call(`_CARlasso_CAR_ALASSO_Cpp`, data, design, n_iter, n_burn_in, thin_by, r_beta, delta_beta, r_Omega, delta_Omega, lambda_diag, progress) } CAR_LASSO_hir_Cpp <- function(data, design, link, n_iter, n_burn_in, thin_by, r_beta, delta_beta, r_Omega, delta_Omega, ns, m, emax, progress) { .Call(`_CARlasso_CAR_LASSO_hir_Cpp`, data, design, link, n_iter, n_burn_in, thin_by, r_beta, delta_beta, r_Omega, delta_Omega, ns, m, emax, progress) } CAR_LASSO_Cpp <- function(data, design, n_iter, n_burn_in, thin_by, r_beta, delta_beta, r_Omega, delta_Omega, progress) { .Call(`_CARlasso_CAR_LASSO_Cpp`, data, design, n_iter, n_burn_in, thin_by, r_beta, delta_beta, r_Omega, delta_Omega, progress) } update_car_tau2_adp_helper <- function(beta, lambda2, Omega, k, p, n) { .Call(`_CARlasso_update_car_tau2_adp_helper`, beta, lambda2, Omega, k, p, n) } update_car_lambda_Omega_adp_helper <- function(lambda_curr, Omega, r, delta) { invisible(.Call(`_CARlasso_update_car_lambda_Omega_adp_helper`, lambda_curr, Omega, r, delta)) } update_car_beta_helper <- function(data, design, mu, tau2, Omega, k, p, n) { .Call(`_CARlasso_update_car_beta_helper`, data, design, mu, tau2, Omega, k, p, n) } update_car_mu_helper <- function(data, design, beta, Omega, k, p, n) { .Call(`_CARlasso_update_car_mu_helper`, data, design, beta, Omega, k, p, n) } update_car_Omega_helper <- function(Omega, data, design, mu, beta, lambda_curr, k, p, n) { invisible(.Call(`_CARlasso_update_car_Omega_helper`, Omega, data, design, mu, beta, lambda_curr, k, p, n)) } update_car_tau2_helper <- function(beta, lambda2, Omega, k, p, n) { .Call(`_CARlasso_update_car_tau2_helper`, beta, lambda2, Omega, k, p, n) } rgig <- function(lambda, chi, psi) { .Call(`_CARlasso_rgig`, lambda, chi, psi) } update_Z_helper_CAR <- function(Z_curr, data, design, mu_curr, beta_curr, Omega_curr, k, p, n) { invisible(.Call(`_CARlasso_update_Z_helper_CAR`, Z_curr, data, design, mu_curr, beta_curr, Omega_curr, k, p, n)) } rtn1 <- function(mean, sd, low, high) { .Call(`_CARlasso_rtn1`, mean, sd, low, high) } update_Z_helper_multinomial <- function(Z_curr, mu_Z, Sigma_Z, y, k, p, n, ns, m, emax) { invisible(.Call(`_CARlasso_update_Z_helper_multinomial`, Z_curr, mu_Z, Sigma_Z, y, k, p, n, ns, m, emax)) } update_Z_helper_Pois <- function(Z_curr, mu_Z, Sigma_Z, y, k, p, n, ns, m, emax) { invisible(.Call(`_CARlasso_update_Z_helper_Pois`, Z_curr, mu_Z, Sigma_Z, y, k, p, n, ns, m, emax)) } update_Z_helper_Pois_reg <- function(Z_curr, data, design, mu_curr, beta_curr, Omega_curr, k, p, n, ns, m, emax) { invisible(.Call(`_CARlasso_update_Z_helper_Pois_reg`, Z_curr, data, design, mu_curr, beta_curr, Omega_curr, k, p, n, ns, m, emax)) } Intercept_Graphical_ALASSO_Cpp <- function(data, n_iter, n_burn_in, thin_by, lambda_a, lambda_b, lambda_diag, progress) { .Call(`_CARlasso_Intercept_Graphical_ALASSO_Cpp`, data, n_iter, n_burn_in, thin_by, lambda_a, lambda_b, lambda_diag, progress) } Intercept_Graphical_ALASSO_hir_Cpp <- function(data, link, n_iter, n_burn_in, thin_by, lambda_a, lambda_b, lambda_diag, ns, m, emax, progress) { .Call(`_CARlasso_Intercept_Graphical_ALASSO_hir_Cpp`, data, link, n_iter, n_burn_in, thin_by, lambda_a, lambda_b, lambda_diag, ns, m, emax, progress) } Intercept_Graphical_LASSO_Cpp <- function(data, n_iter, n_burn_in, thin_by, lambda_a, lambda_b, progress) { .Call(`_CARlasso_Intercept_Graphical_LASSO_Cpp`, data, n_iter, n_burn_in, thin_by, lambda_a, lambda_b, progress) } Intercept_Graphical_LASSO_hir_Cpp <- function(data, link, n_iter, n_burn_in, thin_by, lambda_a, lambda_b, ns, m, emax, progress) { .Call(`_CARlasso_Intercept_Graphical_LASSO_hir_Cpp`, data, link, n_iter, n_burn_in, thin_by, lambda_a, lambda_b, ns, m, emax, progress) } rinvGau <- function(mu, lambda) { .Call(`_CARlasso_rinvGau`, mu, lambda) } stein_loss_cpp <- function(Omega, Omega_hat) { .Call(`_CARlasso_stein_loss_cpp`, Omega, Omega_hat) } CAR_multireg_cpp <- function(data, design, n_sample, Bbar, A, nu, V) { .Call(`_CARlasso_CAR_multireg_cpp`, data, design, n_sample, Bbar, A, nu, V) } Multinomial_CAR_multireg_cpp <- function(data, design, n_burn_in, n_iter, thin_by, Bbar, A, nu, V, ns, m, emax) { .Call(`_CARlasso_Multinomial_CAR_multireg_cpp`, data, design, n_burn_in, n_iter, thin_by, Bbar, A, nu, V, ns, m, emax) } Pois_CAR_multireg_cpp <- function(data, design, n_burn_in, n_iter, thin_by, Bbar, A, nu, V, ns, m, emax) { .Call(`_CARlasso_Pois_CAR_multireg_cpp`, data, design, n_burn_in, n_iter, thin_by, Bbar, A, nu, V, ns, m, emax) } Probit_CAR_multireg_cpp <- function(data, design, n_burn_in, n_iter, thin_by, Bbar, A, nu, V) { .Call(`_CARlasso_Probit_CAR_multireg_cpp`, data, design, n_burn_in, n_iter, thin_by, Bbar, A, nu, V) } #' @title Block Gibbs sampler for adaptive CAR-LASSO #' #' @description \strong{This function is for advanced users to build their own sampler use adaptive CARlasso as core.} It will execute one round of Gibbs sampler of adaptive CAR-LASSO model. Be aware that the function is a `void` function implemented in C++, and all updated parameters e.g. Omega will be manipulate directly in memory to save space. Users should manage to do their own work to save the state. Also be aware that R uses shallow copy by default, which means one cannot save the state by simply give it to another object e.g. first `Omega_old <- Omega_curr` then update `Omega_curr`, `Omega_old` will also change. \strong{This function will NOT check dimensions of input.} Below we assume n samples, k responses and p predictors. #' @param Z_curr the current (latent) normal Z_curr, should be n*k. Will not be changed #' @param design the design matrix, should be n*p. Will not be changed #' @param lambda2_beta the current shrinkage parameter of regression coefficients, should be a vector with p*k entries. Will be updated #' @param tau2_curr the current latent scale parameter in the normal mixture representation of Laplace, for regression coefficients, should be a vector with p*k entries. Will be updated. #' @param beta_curr the current regression coefficients, should be a matrix sized p*k (p row and k columns). Will be updated. #' @param lambda_Omega the current shrinkage parameter for Omega, should be a vector with k*(k-1)/2 entries. Will be updated. #' @param Omega_curr the current Omega matrix, should be a matrix of size k*k. Will be updated. #' @param mu_curr the current mu, intercept, should be a vector of size k. Will be updated. #' @param r_beta hyperprior's parameter of shrinkage for regression coefficients, should be a scalar of type 'double' and positive. Will not be updated. #' @param delta_beta hyperprior's parameter of shrinkage for regression coefficients, should be a scalar of type 'double' and positive. Will not be updated. #' @param r_Omega hyperprior's parameter of shrinkage for precision Omega, should be a scalar of type 'double' and positive. Will not be updated. #' @param delta_Omega hyperprior's parameter of shrinkage for rprecision Omega, should be a scalar of type 'double' and positive. Will not be updated. #' @param lambda_diag shrinkage parameter of the diagonal of Omega, should be a vector of size k, should be non-negative. Will not be updated #' @param k integer, number of responses #' @param p integer, number of predictors #' @param n integer, number of Z_curr points #' @return Again this is a `void` function and will not return anything. All update happened in memory directly. rCARAlasso_ <- function(Z_curr, design, lambda2_beta, tau2_curr, beta_curr, lambda_Omega, Omega_curr, mu_curr, r_beta, delta_beta, r_Omega, delta_Omega, lambda_diag, k, p, n) { invisible(.Call(`_CARlasso_rCARAlasso_`, Z_curr, design, lambda2_beta, tau2_curr, beta_curr, lambda_Omega, Omega_curr, mu_curr, r_beta, delta_beta, r_Omega, delta_Omega, lambda_diag, k, p, n)) } #' @title Block Gibbs sampler for CAR-LASSO #' #' @description \strong{This function is for advanced users to build their own sampler use CARlasso as core.} It will execute one round of Gibbs sampler of CAR-LASSO model. Be aware that the function is a `void` function implemented in C++, and all updated parameters e.g. Omega will be manipulate directly in memory to save space. Users should manage to do their own work to save the state. Also be aware that R uses shallow copy by default, which means one cannot save the state by simply give it to another object e.g. first `Omega_old <- Omega_curr` then update `Omega_curr`, `Omega_old` will also change. \strong{This function will NOT check dimensions of input.} Below we assume n samples, k responses and p predictors. #' #' @param Z_curr the current (latent) normal data, should be n*k. Will not be changed #' @param design the design matrix, should be n*p. Will not be changed #' @param lambda2_beta the current shrinkage parameter of regression coefficients, should be a scalar of type `double`. Will be updated #' @param tau2_curr the current latent scale parameter in the normal mixture representation of Laplace, for regression coefficients, should be a vector with p*k entries. Will be updated. #' @param beta_curr the current regression coefficients, should be a matrix sized p*k (p row and k columns). Will be updated. #' @param lambda_Omega the current shrinkage parameter for Omega, should be a scalar of tyoe `double`. Will be updated. #' @param Omega_curr the current Omega matrix, should be a matrix of size k*k. Will be updated. #' @param mu_curr the current mu, intercept, should be a vector of size k. Will be updated. #' @param r_beta hyperprior's parameter of shrinkage for regression coefficients, should be a scalar of type 'double' and positive. Will not be updated. #' @param delta_beta hyperprior's parameter of shrinkage for regression coefficients, should be a scalar of type 'double' and positive. Will not be updated. #' @param r_Omega hyperprior's parameter of shrinkage for precision Omega, should be a scalar of type 'double' and positive. Will not be updated. #' @param delta_Omega hyperprior's parameter of shrinkage for rprecision Omega, should be a scalar of type 'double' and positive. Will not be updated. #' @param k integer, number of responses #' @param p integer, number of predictors #' @param n integer, number of data points #' @return Again this is a `void` function and will not return anything. All update happened in memory directly. #' @export rCARlasso_ <- function(Z_curr, design, lambda2_beta, tau2_curr, beta_curr, lambda_Omega, Omega_curr, mu_curr, r_beta, delta_beta, r_Omega, delta_Omega, k, p, n) { invisible(.Call(`_CARlasso_rCARlasso_`, Z_curr, design, lambda2_beta, tau2_curr, beta_curr, lambda_Omega, Omega_curr, mu_curr, r_beta, delta_beta, r_Omega, delta_Omega, k, p, n)) }
/scratch/gouwar.j/cran-all/cranData/CARlasso/R/RcppExports.R
expandDoubleVerts <- function (term) { expandDoubleVert <- function(term) { frml <- formula(substitute(~x, list(x = term[[2]]))) newtrms <- paste0("0+", attr(terms(frml), "term.labels")) if (attr(terms(frml), "intercept") != 0) newtrms <- c("1", newtrms) as.formula(paste("~(", paste(vapply(newtrms, function(trm) paste0(trm, "|", deparse(term[[3]])), ""), collapse = ")+("), ")"))[[2]] } if (!is.name(term) && is.language(term)) { if (term[[1]] == as.name("(")) { term[[2]] <- expandDoubleVerts(term[[2]]) } stopifnot(is.call(term)) if (term[[1]] == as.name("||")) return(expandDoubleVert(term)) term[[2]] <- expandDoubleVerts(term[[2]]) if (length(term) != 2) { if (length(term) == 3) term[[3]] <- expandDoubleVerts(term[[3]]) } } term } findbars <- function (term) { fb <- function(term) { if (is.name(term) || !is.language(term)) return(NULL) if (term[[1]] == as.name("(")) return(fb(term[[2]])) stopifnot(is.call(term)) if (term[[1]] == as.name("|")) return(term) if (length(term) == 2) return(fb(term[[2]])) c(fb(term[[2]]), fb(term[[3]])) } expandSlash <- function(bb) { makeInteraction <- function(x) { if (length(x) < 2) return(x) trm1 <- makeInteraction(x[[1]]) trm11 <- if (is.list(trm1)) trm1[[1]] else trm1 list(substitute(foo:bar, list(foo = x[[2]], bar = trm11)), trm1) } slashTerms <- function(x) { if (!("/" %in% all.names(x))) return(x) if (x[[1]] != as.name("/")) stop("unparseable formula for grouping factor", call. = FALSE) list(slashTerms(x[[2]]), slashTerms(x[[3]])) } if (!is.list(bb)) expandSlash(list(bb)) else unlist(lapply(bb, function(x) { if (length(x) > 2 && is.list(trms <- slashTerms(x[[3]]))) lapply(unlist(makeInteraction(trms)), function(trm) substitute(foo | bar, list(foo = x[[2]], bar = trm))) else x })) } modterm <- expandDoubleVerts(if (is(term, "formula")) term[[length(term)]] else term) expandSlash(fb(modterm)) }
/scratch/gouwar.j/cran-all/cranData/CARlasso/R/findbars.R
#' Horseshoe method for graphical structure inference #' #' @details This method fits a linear regression with less informative prior on any parameters and compare the posterior mean with the LASSO result. If LASSO is comparably less than result without sparsity prior, we argue that the edge should be absent #' @param obj The carlasso_out object from CARlasso #' @param Bbar Prior mean of regression coefficients, default all 0s #' @param A Prior precision of regression coefficients, default 1e-8 #' @param nu Prior degree of freedom of the Wishart on precision matrix #' @param V prior covariance matrix of the Wishart on precision matrix #' @param thr threshold for horseshoe inference, default 0.5 #' @return A `carlasso_out` object with learned binary adjacency matrix and multi-response linear regression MCMC out put #' @export #' @examples #' set.seed(42) #' dt <- simu_AR1() #' car_res <- CARlasso(y1+y2+y3+y4+y5~x1+x2+x3+x4+x5, data = dt, adaptive = TRUE) #' car_res <- horseshoe(car_res) #' plot(car_res) horseshoe <- function(obj, Bbar=NULL, A = NULL, nu=3, V=NULL, thr = 0.5 ){ y <- obj$data$response design <- obj$data$design ns <- obj$settings$ns m <- obj$settings$m emax <- obj$settings$emax if(obj$settings$link == "identity"){ multireg_res <- CAR_multireg(y,design,nrow(obj$MCMC_output$beta), Bbar, A, nu, V) } if(obj$settings$link == "probit"){ multireg_res <- Probit_CAR_multireg(y,design,obj$settings$n_burn_in,obj$settings$n_iter, obj$settings$thin_by, Bbar, A, nu, V) } if(obj$settings$link == "log"){ multireg_res <- Pois_CAR_multireg(y,design,obj$settings$n_burn_in,obj$settings$n_iter, obj$settings$thin_by, Bbar, A, nu, V, ns, m, emax) } if(obj$settings$link == "logit"){ multireg_res <- Multinomial_CAR_multireg(y,design,obj$settings$n_burn_in,obj$settings$n_iter, obj$settings$thin_by, Bbar, A, nu, V, ns, m, emax) } graph_multireg <- get_graph(multireg_res, k = nrow(obj$point_est$Omega)) B_multireg <- matrix(colMeans(multireg_res$beta),nrow = nrow(obj$point_est$beta)) horseshoe_binary <- list(Omega_binary = abs(obj$point_est$Omega/graph_multireg)>thr, B_binary = abs(abs(obj$point_est$beta/B_multireg)>thr)) obj$horseshoe_binary <- horseshoe_binary obj$multireg_mcmc <- multireg_res return(obj) }
/scratch/gouwar.j/cran-all/cranData/CARlasso/R/graph-learning.R
#' Gut microbiota in the Irish Elderly #' #' This study is based on pyrosequencing of 16S rDNA amplicons from faecal samples collected from 178 elderly Irish citizens and 13 healthy young control subjects. A subset of these samples were also subjected to shotgun sequencing using Illumina HiSeq 2000 2x91bp reads. Antibiotic treatment was an exclusion criterion. #' #' @docType data #' #' @usage data(mgp154) #' #' @format An data.frame with genus and predictors. #' #' @keywords datasets #' #' @references Claesson, Marcus J., et al. "Gut microbiota composition correlates with diet and health in the elderly." Nature 488.7410 (2012): 178-184. #' #' @source \href{https://www.mg-rast.org/mgmain.html?mgpage=project&project=mgp154}{MG-RAST-mgp154} #' "mgp154" #' Hofmockel Soil Aggregate COB KBASE #' #' This study is to examine soil microbial community composition and structure of both bacteria and fungi at a microbially-relevant scale. The researchers isolated soil aggregates from three land management systems in central Iowa to test if the aggregate-level microbial responses are related to plant community and management practices. The clean dataset has 120 samples with 17 genus under consideration. #' #' @docType data #' #' @usage data(mgp2592) #' #' @format An data.frame with genus and predictors. #' #' @keywords datasets #' #' @references Bach, Elizabeth M., et al. "Greatest soil microbial diversity found in micro-habitats." Soil biology and Biochemistry 118 (2018): 217-226. #' #' @source \href{https://www.mg-rast.org/mgmain.html?mgpage=project&project=mgp2592}{MG-RAST-mgp2592} #' "mgp2592"
/scratch/gouwar.j/cran-all/cranData/CARlasso/R/mgp_data.R
stein_loss <- function(Omega, Omega_hat) { stein_loss_cpp(Omega, Omega_hat) } get_graph <- function(CAR_sample, k, summary = "mean") { Omega <- matrix(0, k, k) Omega[upper.tri(Omega, T)] <- apply(CAR_sample$Omega, 2, summary) Omega <- Omega + t(Omega) diag(Omega) <- 0.5 * diag(Omega) return(Omega) } get_CAR_MB <- function(B, Omega) { D <- diag(diag(Omega)) R <- D - Omega return(list( M = diag(1 / diag(Omega)), C = t(solve(D, R)), B = t(solve(D, t(B))) )) } get_partial_correlation <- function(Omega) { Sigma <- solve(Omega) D <- diag(sqrt(diag(Sigma))) D %*% Omega %*% D }
/scratch/gouwar.j/cran-all/cranData/CARlasso/R/misc.R
# This is a wraper for multi-reg based model, used in horseshoe inference # data n*k matrix for data # design p*k matrix for desing matrix, no intercept # Bbar: prior mean of regresssion coefficient # A: prior precision mat for regression coefficnet # nu d.f. for Sigma # V: k*k pdf location para for prior on Sigma CAR_multireg <- function(data,design,n_sample, Bbar=NULL, A = NULL, nu=3, V=NULL){ k <- ncol(data) p <- ncol(design) if(is.null(Bbar)) Bbar <- matrix(0,p+1,k) if(is.null(A)) A <- diag(1e-8,p+1,p+1) if(is.null(V)) V <- 3*diag(2,k,k) res <- CAR_multireg_cpp(data,design,n_sample, Bbar, A, nu, V) return(res) } Multinomial_CAR_multireg <- function(data,design,n_burn_in,n_sample, thin_by, Bbar=NULL, A = NULL, nu=3, V=NULL,ns = 1000,m=20,emax=64){ n <- nrow(data) k <- ncol(data)-1 p <- ncol(design) if(is.null(Bbar)) Bbar <- matrix(0,p+1,k) if(is.null(A)) A <- diag(1e-8,p+1,p+1) if(is.null(V)) V <- 3*diag(2,k,k) res <- Multinomial_CAR_multireg_cpp(data,design,n_burn_in,n_sample, thin_by, Bbar, A, nu, V,ns,m,emax) return(res) } Pois_CAR_multireg <- function(data,design,n_burn_in,n_sample, thin_by, Bbar=NULL, A = NULL, nu=3, V=NULL,ns = 1000,m=20,emax=64){ n <- nrow(data) k <- ncol(data) p <- ncol(design) if(is.null(Bbar)) Bbar <- matrix(0,p+1,k) if(is.null(A)) A <- diag(1e-8,p+1,p+1) if(is.null(V)) V <- 3*diag(2,k,k) res <- Pois_CAR_multireg_cpp(data,design,n_burn_in,n_sample, thin_by, Bbar, A, nu, V,ns,m,emax) return(res) } Probit_CAR_multireg <- function(data,design,n_burn_in,n_sample, thin_by, Bbar=NULL, A = NULL, nu=3, V=NULL){ n <- nrow(data) k <- ncol(data) p <- ncol(design) if(is.null(Bbar)) Bbar <- matrix(0,p+1,k) if(is.null(A)) A <- diag(1e-8,p+1,p+1) if(is.null(V)) V <- 3*diag(2,k,k) res <- Probit_CAR_multireg_cpp(data,design,n_burn_in,n_sample, thin_by, Bbar, A, nu, V) return(res) }
/scratch/gouwar.j/cran-all/cranData/CARlasso/R/multireg-wrap.R
utils::globalVariables(c("abs_weight", "direction.", "name")) #' plot the chain graph estimated by CAR-LASSO with threshold or horseshoe method using ggraph #' #' @param x The carlasso_out xect #' @param ... #' \itemize{ #' \item{`tol`}{: threshold for ploting default 0.01, if horseshoed, then horseshoe result is used} #' } #' @return A `ggplot` xect #' @export plot.carlasso_out <- function(x, ...) { dots <- list(...) tol <- dots$tol if(x$settings$link=="logit"){ response_name <- x$nodes$response[-length(x$nodes$response)] } else{ response_name <- x$nodes$response } if(is.null(tol)) tol = 0.01 col_pn <- c("lightblue","pink") # graph structure using threshold: if(is.null(x$horseshoe_binary)){ B_binary <- abs(x$point_est$beta) > tol Graph_binary <- abs(x$point_est$Omega) > tol } else { B_binary <- x$horseshoe_binary$B_binary Graph_binary <- x$horseshoe_binary$Omega_binary } diag(Graph_binary) <- 1 CAR <- get_CAR_MB(x$point_est$beta*B_binary, Graph_binary*x$point_est$Omega) n_resp <- length(response_name) n_pred <- length(x$nodes$predictors) vertices_df <- data.frame(id = c(paste0("resp", 1:n_resp), paste0("pred", 1:n_pred)), group = c(rep("resp", n_resp), rep("pred", n_pred))) ind_mat_resp <- expand.grid(from = 1:n_resp, to = 1:n_resp) ind_mat_resp <- ind_mat_resp[ind_mat_resp$from != ind_mat_resp$to, ] ind_mat_resp$weight <- sapply(1:nrow(ind_mat_resp), function(i, indmat, mat) { mat[indmat$from[i], indmat$to[i]] } ,ind_mat_resp, CAR$C) ind_mat_pred <- expand.grid(from = 1:n_pred, to = 1:n_resp) ind_mat_pred$weight <- sapply(1:nrow(ind_mat_pred), function(i, indmat, mat) { mat[indmat$from[i], indmat$to[i]] } , ind_mat_pred, CAR$B) ind_mat_pred$from <- paste0("pred", ind_mat_pred$from) ind_mat_pred$to <- paste0("resp", ind_mat_pred$to) ind_mat_resp$from <- paste0("resp", ind_mat_resp$from) ind_mat_resp$to <- paste0("resp", ind_mat_resp$to) edge_df <- rbind(ind_mat_resp, ind_mat_pred) edge_df <- edge_df[edge_df$weight != 0, ] edge_abs_df <- edge_df edge_abs_df$weight <- abs(edge_abs_df$weight) full_graph <- graph.data.frame(edge_df, vertices_df, directed = T) col_ER <- c("orange", "darkgreen") shape_ER <- c("square", "circle") type <- c("predictors", "microbe") direction <- c("negative", "positive") E(full_graph)$edge.color <- col_pn[(sign(E(full_graph)$weight) + 1) / 2 + 1] E(full_graph)$direction. <- direction[(sign(E(full_graph)$weight) + 1) / 2 + 1] E(full_graph)$abs_weight <- abs(E(full_graph)$weight) V(full_graph)$name <- c(response_name, x$nodes$predictors) V(full_graph)$alpha_centrality <- alpha_centrality(full_graph) V(full_graph)$type <- type[c(rep(2, n_resp), rep(1, n_pred))] cbPalette_edge <- c("#0072B2", "#990000") cbPalette_node <- c("#0815d3", "#682d01") set_graph_style(plot_margin = margin(10, 10, 10, 10)) p <- ggraph(full_graph, layout = "circle") if(length(unique(E(full_graph)$direction.))==1){ p <- p + geom_edge_link(aes( width = abs_weight, alpha = abs_weight), color = ifelse(E(full_graph)$direction.[1]=="positive",cbPalette_edge[2], cbPalette_edge[1])) } else { p <- p + geom_edge_link(aes(color = direction., width = abs_weight, alpha = abs_weight)) + scale_edge_color_manual(values = (cbPalette_edge)) } p <- p + geom_node_point(mapping = aes(shape = type, size = alpha_centrality, stroke = 1.5), col = "#000000", fill = "white", alpha = 1) + scale_shape_manual(values = c(21, 24)) + coord_fixed(clip = "off") + guides( width = guide_legend(order = 1), size = guide_legend(order = 2), shape = "none", edge_color = "none" ) dd <- rep(0, length(V(full_graph)$name)) p <- p + geom_node_text(aes(label = name), nudge_x = p$data$x * .38, nudge_y = p$data$y * .2 + dd, family = "") + # repel = T,check_overlap = T)+ theme_graph(base_family = "Helvetica") + theme( legend.text = element_text(size = 9), legend.position = "bottom" ) p } #' plot the graph estimated by graphical lasso with threshold method using ggraph #' #' @param x The bglasso_out #' @param ... #' \itemize{ #' \item{`tol`}{: threshold for ploting default 0.01, if horseshoed, then horseshoe result is used} #' } #' @return A `ggplot` xect #' @export plot.bglasso_out <- function(x, ...) { dots <- list(...) tol <- dots$tol if(x$settings$link=="logit"){ response_name <- x$nodes$response[-length(x$nodes$response)] } else{ response_name <- x$nodes$response } if(is.null(tol)) tol = 0.01 col_pn <- c("lightblue","pink") # graph structure using threshold: Graph_binary <- abs(x$point_est$Omega) > tol plot_graph <- x$point_est$Omega * Graph_binary plot_graph_diag <- diag(diag(plot_graph)) plot_graph <- t( solve(plot_graph_diag, plot_graph_diag-plot_graph)) diag(Graph_binary) <- 1 n_resp <- length(response_name) vertices_df <- data.frame(id = c(paste0("resp", 1:n_resp))) ind_mat_resp <- expand.grid(from = 1:n_resp, to = 1:n_resp) ind_mat_resp <- ind_mat_resp[ind_mat_resp$from != ind_mat_resp$to, ] ind_mat_resp$weight <- sapply(1:nrow(ind_mat_resp), function(i, indmat, mat) { mat[indmat$from[i], indmat$to[i]] } ,ind_mat_resp, plot_graph) ind_mat_resp$from <- paste0("resp", ind_mat_resp$from) ind_mat_resp$to <- paste0("resp", ind_mat_resp$to) edge_df <- ind_mat_resp edge_df <- edge_df[edge_df$weight != 0, ] edge_abs_df <- edge_df edge_abs_df$weight <- abs(edge_abs_df$weight) full_graph <- graph.data.frame(edge_df, vertices_df, directed = F) shape_ER <- c("square", "circle") direction <- c("negative", "positive") E(full_graph)$edge.color <- col_pn[(sign(E(full_graph)$weight) + 1) / 2 + 1] E(full_graph)$direction. <- direction[(sign(E(full_graph)$weight) + 1) / 2 + 1] E(full_graph)$abs_weight <- abs(E(full_graph)$weight) V(full_graph)$name <- c(response_name) V(full_graph)$alpha_centrality <- alpha_centrality(full_graph) cbPalette_edge <- c("#0072B2", "#990000") cbPalette_node <- c("#0815d3", "#682d01") set_graph_style(plot_margin = margin(10, 10, 10, 10)) p <- ggraph(full_graph, layout = "circle") if(length(unique(E(full_graph)$direction.))==1){ p <- p + geom_edge_link(aes( width = abs_weight, alpha = abs_weight), color = ifelse(E(full_graph)$direction.[1]=="positive",cbPalette_edge[2], cbPalette_edge[1])) } else { p <- p + geom_edge_link(aes(color = direction., width = abs_weight, alpha = abs_weight)) + scale_edge_color_manual(values = (cbPalette_edge)) } p <- p + geom_node_point(mapping = aes( size = alpha_centrality, stroke = 1.5), col = "#000000", fill = "white", alpha = 1) + scale_shape_manual(values = c(21, 24)) + coord_fixed(clip = "off") + guides( width = guide_legend(order = 1), size = guide_legend(order = 2), shape = "none", edge_color = "none" ) dd <- rep(0, length(V(full_graph)$name)) p <- p + geom_node_text(aes(label = name), nudge_x = p$data$x * .38, nudge_y = p$data$y * .2 + dd, family = "") + # repel = T,check_overlap = T)+ theme_graph(base_family = "Helvetica") + theme( legend.text = element_text(size = 9), legend.position = "bottom" ) p } g_model1 <- function(k, rho=.7){ temp <- matrix(rep(1:k,k),ncol = k) Sigma <- rho ^ (abs(temp-t(temp))) Omega <- solve(Sigma) Omega <- Omega * (abs(Omega)>1e-15) return(list(Sigma = Sigma, Omega = Omega)) } #' Simulate a simple AR1 model with specific predictor #' @details Simulate a simple AR1 model with k responses and k predictors, each predictor has effect on exact one response node #' @param n sample size #' @param k number of responses #' @param rho partial correlation in AR1 #' @return a dataframe, with y1 to yk as responses and x1 to xk as predictors #' @export simu_AR1 <- function(n=100, k=5, rho = .7){ graph <- g_model1(k,rho) X <- matrix(rnorm(k*n),nrow = n) Y <- matrix(0,n,k) for(i in 1:n){ Y[i,] <- MASS::mvrnorm(1,graph$Sigma %*% X[i,],graph$Sigma) } res <- as.data.frame(cbind(Y,X)) colnames(res) <- c(paste0("y",1:k),paste0("x",1:k)) return(res) }
/scratch/gouwar.j/cran-all/cranData/CARlasso/R/utils.R
## ---- include = FALSE--------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" )
/scratch/gouwar.j/cran-all/cranData/CARlasso/inst/doc/buildown.R
--- title: "Build your own hierarchical model" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Build your own hierarchical model} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` We also offer two low-level one-step sampling functions `rCARAlasso_` and `rCARlasso_` for adaptive and non-adaptive version of the CARlasso, see [reference](https://yunyishen.ml/CAR-LASSO/dev/reference/rCARAlasso_.html) for details. The typical usage would be one writes their own sampler to sample the Normal latent variable and feed it to the one-step sampling. Note that these two functions will **update** the current states in **memory** directly.
/scratch/gouwar.j/cran-all/cranData/CARlasso/inst/doc/buildown.Rmd
## ---- include = FALSE--------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----setup-------------------------------------------------------------------- library(CARlasso) ## ----ar1data------------------------------------------------------------------ set.seed(42) dt <- simu_AR1(n=100,k=5, rho=0.7) dt <- dt[,1:5] head(dt) ## ----ar1example_first, eval = FALSE------------------------------------------- # glassores <- bGlasso(data = dt) # plot(glassores) ## ----comp_data---------------------------------------------------------------- dt <- mgp154[,c("Alistipes","Bacteroides", "Eubacterium","Parabacteroides","all_others")] ## ----compositional1, eval = FALSE--------------------------------------------- # gut_res <- bGlasso( data = dt,link = "logit", # n_iter = 2000, # n_burn_in = 1000, thin_by = 2) # plot(gut_res) ## ----counting, eval = FALSE--------------------------------------------------- # gut_res <- gut_res <- bGlasso( data = dt[,1:4],link = "log", # n_iter = 2000, # n_burn_in = 1000, thin_by = 2) # plot(gut_res)
/scratch/gouwar.j/cran-all/cranData/CARlasso/inst/doc/glasso.R
--- title: "Graphical LASSO support" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Graphical LASSO support} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup} library(CARlasso) ``` We also implemented the standard Graphical LASSO to infer a graph without predictors. The main interface is `bGlasso`. For more details, please see the [reference](https://yunyishen.ml/CAR-LASSO/dev/reference/bGlasso.html). ## GLASSO Network for Gaussian data This is the case when data is Gaussian or can be transformed to Gaussian. We only need responses in this case. We use a 5-node AR1 model to simulate data: ```{r ar1data} set.seed(42) dt <- simu_AR1(n=100,k=5, rho=0.7) dt <- dt[,1:5] head(dt) ``` Unlike `CARlasso`, we do not really need to have a formula (because we do not have predictors, only responses). To use the Normal version, we should set `link="identity"` which is the default. ```{r ar1example_first, eval = FALSE} glassores <- bGlasso(data = dt) plot(glassores) ``` Some of the connections are actually due to predictors. ## GLASSO Network for compositional data This is common in the case of microbe-related studies and some ecological applications with relative abundances. For instance, microbe relative abundance come from sequencing and in this case, the sum of "abundance" is determined by the sequence depth rather than the real total abundance. The data is usually described as "compositional". We first extract only the responses that we want to include in the model: ```{r comp_data} dt <- mgp154[,c("Alistipes","Bacteroides", "Eubacterium","Parabacteroides","all_others")] ``` To run the composition model in `bGlasso`, we need to set `link="logit"` ```{r compositional1, eval = FALSE} gut_res <- bGlasso( data = dt,link = "logit", n_iter = 2000, n_burn_in = 1000, thin_by = 2) plot(gut_res) ``` Note that in this case the last one in the dataframe will be the reference level (`all_others` in this case). ## GLASSO Network for counting data This is common in a lot of ecological applications. For instance, number of seedlings within a site. The responses are counts rather than continuous. We will use the same compositional data as before to illustrate the counts model. However, it is important to note that relative abundances should not be considered as counts. To distinguish between compositional and count data, one can ask the question: **is the sum decided by us?** If yes, we want to use compositional models. To run the count model, we need to set `link="log"`. Note that we only include the first 4 responses: ```{r counting, eval = FALSE} gut_res <- gut_res <- bGlasso( data = dt[,1:4],link = "log", n_iter = 2000, n_burn_in = 1000, thin_by = 2) plot(gut_res) ```
/scratch/gouwar.j/cran-all/cranData/CARlasso/inst/doc/glasso.Rmd
## ---- include = FALSE--------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----setup-------------------------------------------------------------------- library(CARlasso) ## ----ar1data------------------------------------------------------------------ set.seed(42) dt <- simu_AR1(n=100,k=5, rho=0.7) head(dt) ## ----ar1example_first, eval = FALSE------------------------------------------- # car_res <- CARlasso(y1+y2+y3+y4+y5~x1+x2+x3+x4+x5, data = dt, adaptive = TRUE) # plot(car_res,tol = 0.05) ## ----horseshoe_1,eval = FALSE------------------------------------------------- # # with horseshoe inference # car_res <- horseshoe(car_res) # plot(car_res) # ## ----comp_data---------------------------------------------------------------- mgp154[1:5,1:7] ## ----compositional1, eval = FALSE--------------------------------------------- # gut_res <- CARlasso(Alistipes+Bacteroides+ # Eubacterium+Parabacteroides+all_others~ # BMI+Age+Gender+Stratum, # data = mgp154,link = "logit", # adaptive = TRUE, n_iter = 2000, # n_burn_in = 1000, thin_by = 2) ## ----horseshoe_comp, eval = FALSE--------------------------------------------- # # horseshoe will take a while, as it needs to sample the latent normal too # gut_res <- horseshoe(gut_res) # plot(gut_res) ## ----counting, eval = FALSE--------------------------------------------------- # gut_res <- CARlasso(Alistipes+Bacteroides+ # Eubacterium+Parabacteroides+all_others~ # BMI+Age+Gender+Stratum, # data = mgp154,link = "log", # adaptive = TRUE, # r_beta = 0.1, # default sometimes cause singularity in Poisson model due to exponential transformation, slightly change can fix it. # n_iter = 2000, # n_burn_in = 1000, thin_by = 2) # # horseshoe will take a while, as it's currently implemented in R rather than C++ # gut_res <- horseshoe(gut_res) # plot(gut_res)
/scratch/gouwar.j/cran-all/cranData/CARlasso/inst/doc/network.R
--- title: "Get started with CAR-LASSO models" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Get started with CAR-LASSO models} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup} library(CARlasso) ``` `CARlasso` is the main interface to work with the model, for details see the [reference](https://yunyishen.ml/CAR-LASSO/dev/reference/CARlasso.html). ## CAR-LASSO Network for Gaussian data This is the case when data is Gaussian or can be transformed to Gaussian, for example, (log) biomass of trees. We will provide an example with simulated data. The use of the `Carlasso` is similar to `lm`, we could supply a formula and a dataframe with both responses and predictors. First, we simulate data under a 5-node AR1 model: ```{r ar1data} set.seed(42) dt <- simu_AR1(n=100,k=5, rho=0.7) head(dt) ``` To use the Normal version, we should set `link="identity"` which is the default. In this case, we are setting `adaptive=TRUE` to use the adaptive version of CAR-LASSO (for more details, see [the paper](https://arxiv.org/abs/2012.08397)): ```{r ar1example_first, eval = FALSE} car_res <- CARlasso(y1+y2+y3+y4+y5~x1+x2+x3+x4+x5, data = dt, adaptive = TRUE) plot(car_res,tol = 0.05) ``` The color of the edge represents the type of correlation (negative=blue, positive=red) and the width of the edge corresponds to the effect size. Response nodes are represented by circles and predictor nodes are represented by triangles. We can have a more formal horseshoe inference on the structure of the network which will update the `car_res` object: ```{r horseshoe_1,eval = FALSE} # with horseshoe inference car_res <- horseshoe(car_res) plot(car_res) ``` ## CAR-LASSO Network for compositional data This is common in the case of microbe-related studies and some ecological applications with relative abundances. For instance, microbe relative abundance come from sequencing and in this case, the sum of "abundance" is determined by the sequence depth rather than the real total abundance. The data is usually described as "compositional". In `CARlasso`, this type of data are modeled as Logit-Normal-multinomial. In this case, we need to have a "reference level" taxa and all others are "relative" to this taxa. First, we take a look at the data which is still a dataframe with all predictors and responses ```{r comp_data} mgp154[1:5,1:7] ``` To run the composition model, we need to set `link="logit"` ```{r compositional1, eval = FALSE} gut_res <- CARlasso(Alistipes+Bacteroides+ Eubacterium+Parabacteroides+all_others~ BMI+Age+Gender+Stratum, data = mgp154,link = "logit", adaptive = TRUE, n_iter = 2000, n_burn_in = 1000, thin_by = 2) ``` Note that in this case `all_others` (an existing column in our data), i.e. the last one in the left hand side of the formula will be the reference level. We can update the network inference by a horseshoe method to determine when edges will be considered non-existent. More details can be found in [the paper](https://arxiv.org/abs/2012.08397). ```{r horseshoe_comp, eval = FALSE} # horseshoe will take a while, as it needs to sample the latent normal too gut_res <- horseshoe(gut_res) plot(gut_res) ``` ## CAR-LASSO Network for counting data This is common in a lot of ecological applications. For instance, number of seedlings within a site. The responses are counts rather than continuous. In `CARlasso`, it is modeled as Poisson with log-Normal rate. We will use the same compositional data as before to illustrate the counts model. However, it is important to note that relative abundances should not be considered as counts. To distinguish between compositional and count data, one can ask the question: **is the sum decided by us?** If yes, we want to use compositional models. To run the count model, we need to set `link="log"`: ```{r counting, eval = FALSE} gut_res <- CARlasso(Alistipes+Bacteroides+ Eubacterium+Parabacteroides+all_others~ BMI+Age+Gender+Stratum, data = mgp154,link = "log", adaptive = TRUE, r_beta = 0.1, # default sometimes cause singularity in Poisson model due to exponential transformation, slightly change can fix it. n_iter = 2000, n_burn_in = 1000, thin_by = 2) # horseshoe will take a while, as it's currently implemented in R rather than C++ gut_res <- horseshoe(gut_res) plot(gut_res) ```
/scratch/gouwar.j/cran-all/cranData/CARlasso/inst/doc/network.Rmd
--- title: "Build your own hierarchical model" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Build your own hierarchical model} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` We also offer two low-level one-step sampling functions `rCARAlasso_` and `rCARlasso_` for adaptive and non-adaptive version of the CARlasso, see [reference](https://yunyishen.ml/CAR-LASSO/dev/reference/rCARAlasso_.html) for details. The typical usage would be one writes their own sampler to sample the Normal latent variable and feed it to the one-step sampling. Note that these two functions will **update** the current states in **memory** directly.
/scratch/gouwar.j/cran-all/cranData/CARlasso/vignettes/buildown.Rmd
--- title: "Graphical LASSO support" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Graphical LASSO support} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup} library(CARlasso) ``` We also implemented the standard Graphical LASSO to infer a graph without predictors. The main interface is `bGlasso`. For more details, please see the [reference](https://yunyishen.ml/CAR-LASSO/dev/reference/bGlasso.html). ## GLASSO Network for Gaussian data This is the case when data is Gaussian or can be transformed to Gaussian. We only need responses in this case. We use a 5-node AR1 model to simulate data: ```{r ar1data} set.seed(42) dt <- simu_AR1(n=100,k=5, rho=0.7) dt <- dt[,1:5] head(dt) ``` Unlike `CARlasso`, we do not really need to have a formula (because we do not have predictors, only responses). To use the Normal version, we should set `link="identity"` which is the default. ```{r ar1example_first, eval = FALSE} glassores <- bGlasso(data = dt) plot(glassores) ``` Some of the connections are actually due to predictors. ## GLASSO Network for compositional data This is common in the case of microbe-related studies and some ecological applications with relative abundances. For instance, microbe relative abundance come from sequencing and in this case, the sum of "abundance" is determined by the sequence depth rather than the real total abundance. The data is usually described as "compositional". We first extract only the responses that we want to include in the model: ```{r comp_data} dt <- mgp154[,c("Alistipes","Bacteroides", "Eubacterium","Parabacteroides","all_others")] ``` To run the composition model in `bGlasso`, we need to set `link="logit"` ```{r compositional1, eval = FALSE} gut_res <- bGlasso( data = dt,link = "logit", n_iter = 2000, n_burn_in = 1000, thin_by = 2) plot(gut_res) ``` Note that in this case the last one in the dataframe will be the reference level (`all_others` in this case). ## GLASSO Network for counting data This is common in a lot of ecological applications. For instance, number of seedlings within a site. The responses are counts rather than continuous. We will use the same compositional data as before to illustrate the counts model. However, it is important to note that relative abundances should not be considered as counts. To distinguish between compositional and count data, one can ask the question: **is the sum decided by us?** If yes, we want to use compositional models. To run the count model, we need to set `link="log"`. Note that we only include the first 4 responses: ```{r counting, eval = FALSE} gut_res <- gut_res <- bGlasso( data = dt[,1:4],link = "log", n_iter = 2000, n_burn_in = 1000, thin_by = 2) plot(gut_res) ```
/scratch/gouwar.j/cran-all/cranData/CARlasso/vignettes/glasso.Rmd
--- title: "Get started with CAR-LASSO models" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Get started with CAR-LASSO models} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup} library(CARlasso) ``` `CARlasso` is the main interface to work with the model, for details see the [reference](https://yunyishen.ml/CAR-LASSO/dev/reference/CARlasso.html). ## CAR-LASSO Network for Gaussian data This is the case when data is Gaussian or can be transformed to Gaussian, for example, (log) biomass of trees. We will provide an example with simulated data. The use of the `Carlasso` is similar to `lm`, we could supply a formula and a dataframe with both responses and predictors. First, we simulate data under a 5-node AR1 model: ```{r ar1data} set.seed(42) dt <- simu_AR1(n=100,k=5, rho=0.7) head(dt) ``` To use the Normal version, we should set `link="identity"` which is the default. In this case, we are setting `adaptive=TRUE` to use the adaptive version of CAR-LASSO (for more details, see [the paper](https://arxiv.org/abs/2012.08397)): ```{r ar1example_first, eval = FALSE} car_res <- CARlasso(y1+y2+y3+y4+y5~x1+x2+x3+x4+x5, data = dt, adaptive = TRUE) plot(car_res,tol = 0.05) ``` The color of the edge represents the type of correlation (negative=blue, positive=red) and the width of the edge corresponds to the effect size. Response nodes are represented by circles and predictor nodes are represented by triangles. We can have a more formal horseshoe inference on the structure of the network which will update the `car_res` object: ```{r horseshoe_1,eval = FALSE} # with horseshoe inference car_res <- horseshoe(car_res) plot(car_res) ``` ## CAR-LASSO Network for compositional data This is common in the case of microbe-related studies and some ecological applications with relative abundances. For instance, microbe relative abundance come from sequencing and in this case, the sum of "abundance" is determined by the sequence depth rather than the real total abundance. The data is usually described as "compositional". In `CARlasso`, this type of data are modeled as Logit-Normal-multinomial. In this case, we need to have a "reference level" taxa and all others are "relative" to this taxa. First, we take a look at the data which is still a dataframe with all predictors and responses ```{r comp_data} mgp154[1:5,1:7] ``` To run the composition model, we need to set `link="logit"` ```{r compositional1, eval = FALSE} gut_res <- CARlasso(Alistipes+Bacteroides+ Eubacterium+Parabacteroides+all_others~ BMI+Age+Gender+Stratum, data = mgp154,link = "logit", adaptive = TRUE, n_iter = 2000, n_burn_in = 1000, thin_by = 2) ``` Note that in this case `all_others` (an existing column in our data), i.e. the last one in the left hand side of the formula will be the reference level. We can update the network inference by a horseshoe method to determine when edges will be considered non-existent. More details can be found in [the paper](https://arxiv.org/abs/2012.08397). ```{r horseshoe_comp, eval = FALSE} # horseshoe will take a while, as it needs to sample the latent normal too gut_res <- horseshoe(gut_res) plot(gut_res) ``` ## CAR-LASSO Network for counting data This is common in a lot of ecological applications. For instance, number of seedlings within a site. The responses are counts rather than continuous. In `CARlasso`, it is modeled as Poisson with log-Normal rate. We will use the same compositional data as before to illustrate the counts model. However, it is important to note that relative abundances should not be considered as counts. To distinguish between compositional and count data, one can ask the question: **is the sum decided by us?** If yes, we want to use compositional models. To run the count model, we need to set `link="log"`: ```{r counting, eval = FALSE} gut_res <- CARlasso(Alistipes+Bacteroides+ Eubacterium+Parabacteroides+all_others~ BMI+Age+Gender+Stratum, data = mgp154,link = "log", adaptive = TRUE, r_beta = 0.1, # default sometimes cause singularity in Poisson model due to exponential transformation, slightly change can fix it. n_iter = 2000, n_burn_in = 1000, thin_by = 2) # horseshoe will take a while, as it's currently implemented in R rather than C++ gut_res <- horseshoe(gut_res) plot(gut_res) ```
/scratch/gouwar.j/cran-all/cranData/CARlasso/vignettes/network.Rmd
#' @title Penalized Optimization Framework for Community Detection in Networks with Covariates. #' @description Semidefinite programming for optimizing the inner product between combined network and the #' solution matrix. #' @details \emph{ADMM} is proposed in \emph{Covariate Regularized Community Detection in Sparse Graphs} #' of Yan & Sarkar (2021). \emph{ADMM} relies on semidefinite programming (SDP) relaxations for detecting #' the community structure in sparse networks with covariates. #' @param Adj A 0/1 adjacency matrix. #' @param Covariate A covariate matrix. The rows correspond to nodes and the columns correspond to covariates. #' @param lambda A tuning parameter to weigh the covariate matrix. #' @param K A positive integer, indicating the number of underlying communities in graph \code{Adj}. #' @param alpha A number. The elementwise upper bound in the SDP. #' @param rho The learning rate of ADMM. #' @param TT The maximum of iteration. #' @param tol The tolerance for stopping criterion. #' @param quiet An optional inoput. Whether to print result at each step. #' @param report_interval An optional inoput. The frequency to print intermediate result. #' @param r An optional inoput. The expected rank of the solution, leave NULL if no constraint is required. #' @return \item{estall}{A lavel vector.} #' #' @importFrom pracma eig Norm #' @importFrom stats kmeans runif rnorm #' #' @references Yan, B., & Sarkar, P. (2021). \emph{Covariate Regularized Community Detection in Sparse Graphs}. #' \emph{Journal of the American Statistical Association, 116(534), 734-745}. #' \cr\doi{10.1080/01621459.2019.1706541}\cr #' #' @examples #' #' # Simulate the Network #' n = 10; K = 2; #' theta = 0.4 + (0.45-0.05)*(seq(1:n)/n)^2; Theta = diag(theta); #' P = matrix(c(0.8, 0.2, 0.2, 0.8), byrow = TRUE, nrow = K) #' set.seed(2022) #' l = sample(1:K, n, replace=TRUE); # node labels #' Pi = matrix(0, n, K) # label matrix #' for (k in 1:K){ #' Pi[l == k, k] = 1 #' } #' Omega = Theta %*% Pi %*% P %*% t(Pi) %*% Theta; #' Adj = matrix(runif(n*n, 0, 1), nrow = n); #' Adj = Omega - Adj; #' Adj = 1*(Adj >= 0) #' diag(Adj) = 0 #' Adj[lower.tri(Adj)] = t(Adj)[lower.tri(Adj)] #' caseno = 4; Nrange = 10; Nmin = 10; prob1 = 0.9; p = n*4; #' Q = matrix(runif(p*K, 0, 1), nrow = p, ncol = K) #' Q = sweep(Q,2,colSums(Q),`/`) #' W = matrix(0, nrow = n, ncol = K); #' for(jj in 1:n) { #' if(runif(1) <= prob1) {W[jj, 1:K] = Pi[jj, ];} #' else W[jj, sample(K, 1)] = 1; #' } #' W = t(W) #' D0 = Q %*% W #' X = matrix(0, n, p) #' N = switch(caseno, rep(100, n), rep(100, n), round(runif(n)*Nrange+ Nmin), #' round(runif(n)* Nrange+Nmin)) #' for (i in 1: ncol(D0)){ #' X[i, ] = rmultinom(1, N[i], D0[, i]) #' } #' ADMM(Adj, X, lambda = 0.2, K = K, alpha = 0.5, rho = 2, TT = 100, tol = 5) #' @export ADMM = function(Adj, Covariate, lambda, K, alpha, rho, TT, tol, quiet = NULL, report_interval = NULL, r = NULL){ # Inputs: Adj: adjacency matrix # Covariate: n x p covaraite matrix # lambda: tuning parameter between graph and covariates # K: number of clusters # alpha: elementwise upper bound in the SDP # rho: learning rate of ADMM # TT: max iteration # tol: tolerance for stopping criterion # quiet: whether to print result at each step # report_interval: frequency to print intermediate result # r: expected rank of the solution, leave blank if no constraint is required. # Outputs: estall: the label vector As = Adj + lambda* Covariate %*% t(Covariate) n = dim(As)[1] U <- V <- matrix(0, n, n) #Initialization - spectral with perturbation v = eigen(As)$vectors[, 1: K] e = diag(eigen(As)$values[1: K]) X = v %*% t(v) + 0.1*matrix(rnorm(n*n), nrow = n) Y = v %*% t(v) + 0.1*matrix(rnorm(n*n), nrow = n) Z = v %*% t(v) + 0.1*matrix(rnorm(n*n), nrow = n) As_rescaled = (1/rho) * As; if(is.null(report_interval)) report_interval = 1 if(is.null(quiet)) quiet = FALSE if(is.null(r)) r = Inf if (is.infinite(TT)) { delta = matrix(0, 1000, 1) infeas = matrix(0, 1000, 1) } else { delta = matrix(0, TT, 1) infeas = matrix(0, TT, 1) } dt = matrix(0, 1, 3); t = 1; CONVERGED = FALSE; while(CONVERGED == FALSE & t<= TT){ Xold = X X = projAXb(0.5*(Z - U + Y - V + As_rescaled), K, n); Z = X + U Z[Z < 0] = 0 Z[Z > alpha] = alpha Y = projSp(X + V); U = U + X - Z; V = V + X - Y; delta[t] = norm(X - Xold) / norm(Xold); infeas[t] = (sqrt(sum(diag(t(X - Y) * (X - Y)))) + sqrt(sum(diag(t(X - Z) * (X - Z))))) / sqrt(sum(diag(t(X)*X))); CONVERGED = max(delta[t], infeas[t]) < tol; t = t + 1; } T_term = t - 1 estall = rsc(X, K, 'adj') return(estall) }
/scratch/gouwar.j/cran-all/cranData/CASCORE/R/ADMM.R
#' @title Ac #' @param X A sqaure matrix. #' @param n The number of nodes. #' @return z the result #' @noRd #' @keywords internal Ac = function(X, n){ z = c(2 * X %*% matrix(1, n, 1), sum(diag(X))) return(z) }
/scratch/gouwar.j/cran-all/cranData/CASCORE/R/Ac.R
#' @title Acs #' @param z The input matrix. #' @param n The number of nodes. #' @return Z The latent class memberships.. #' @noRd #' @keywords internal Acs = function(z, n){ #mu = z[, 1][1:n] mu = z[1:n] U = matrix(rep(mu, n), byrow = TRUE, nrow = n) V = matrix(rep(mu, n), byrow = FALSE, nrow = n) #Z = U + V + z[, 1][n + 1]*diag(n) Z = U + V + z[n + 1]*diag(n) return(Z) }
/scratch/gouwar.j/cran-all/cranData/CASCORE/R/Acs.R
#' @title Covariate Assisted Spectral Clustering. #' @description \emph{CASC} clusters graph nodes by applying spectral clustering with the assistance from #' node covariates. #' @details \emph{CASC} is a community detection algorithm for networks with node covariates, proposed #' in \emph{Covariate-assisted spectral clustering} of Binkiewicz, et al. (2017). \emph{CASC} applies #' \code{k-means} on the first \code{K} leading eigenvectors of the balanced matrix between the Laplacian #' matrix and the covariate matrix. #' @param Adj A 0/1 adjacency matrix. #' @param Covariate A covariate matrix. The rows correspond to nodes and the columns correspond to covariates. #' @param K A positive integer, indicating the number of underlying communities in graph \code{Adj}. #' @param alphan A tuning parameter to balance between the contributions of the graph and the covariates. #' @param itermax \code{k-means} parameter, indicating the maximum number of iterations allowed. #' The default value is 100. #' @param startn \code{k-means} parameter. If centers is a number, how many random sets should #' be chosen? The default value is 10. #' #' @return \item{estall}{A lavel vector.} #' #' @importFrom pracma eig Norm #' @importFrom stats kmeans runif #' #' @references Binkiewicz, N., Vogelstein, J. T., & Rohe, K. (2017). \emph{Covariate-assisted spectral clustering}. #' \emph{Biometrika, 104(2), 361-377.}\cr\doi{10.1093/biomet/asx008}\cr #' @examples #' #' # Simulate the Network #' n = 10; K = 2; #' theta = 0.4 + (0.45-0.05)*(seq(1:n)/n)^2; Theta = diag(theta); #' P = matrix(c(0.8, 0.2, 0.2, 0.8), byrow = TRUE, nrow = K) #' set.seed(2022) #' l = sample(1:K, n, replace=TRUE); # node labels #' Pi = matrix(0, n, K) # label matrix #' for (k in 1:K){ #' Pi[l == k, k] = 1 #' } #' Omega = Theta %*% Pi %*% P %*% t(Pi) %*% Theta; #' Adj = matrix(runif(n*n, 0, 1), nrow = n); #' Adj = Omega - Adj; #' Adj = 1*(Adj >= 0) #' diag(Adj) = 0 #' Adj[lower.tri(Adj)] = t(Adj)[lower.tri(Adj)] #' caseno = 4; Nrange = 10; Nmin = 10; prob1 = 0.9; p = n*4; #' Q = matrix(runif(p*K, 0, 1), nrow = p, ncol = K) #' Q = sweep(Q,2,colSums(Q),`/`) #' W = matrix(0, nrow = n, ncol = K); #' for(jj in 1:n) { #' if(runif(1) <= prob1) {W[jj, 1:K] = Pi[jj, ];} #' else W[jj, sample(K, 1)] = 1; #' } #' W = t(W) #' D0 = Q %*% W #' X = matrix(0, n, p) #' N = switch(caseno, rep(100, n), rep(100, n), round(runif(n)*Nrange+ Nmin), #' round(runif(n)* Nrange+Nmin)) #' for (i in 1: ncol(D0)){ #' X[i, ] = rmultinom(1, N[i], D0[, i]) #' } #' CASC(Adj, X, 2) #' @export CASC = function(Adj, Covariate, K, alphan = 5, itermax = 100, startn = 10){ s = rowSums(Adj) s = s + mean(s) s = s^(-1/2) S = diag(s) Z = S %*% Adj %*% S net.eigen = eigen(Z%*%Z) ca = Covariate %*% t(Covariate); ca.eigen = eigen(ca); alphalower = (net.eigen$values[K] - net.eigen$values[K+1])/ca.eigen$values[1]; alphaupper = net.eigen$values[1]/(ca.eigen$values[K] - ca.eigen$values[K+1]); d = rep(0, alphan); alpha = seq(alphalower, alphaupper, length.out = alphan); est = matrix(0, alphan, dim(Adj)[1]) for(ii in 1:alphan){ casc.eigen = eigen(Z%*%Z + alpha[ii]*ca); U = casc.eigen$vectors[,1:K]; Unorm = apply(U, 1, Norm); indu = which(Unorm > 0); U = U[indu, ]/Unorm[indu] result = kmeans(U, K, iter.max = itermax, nstart = startn); d[ii] = result$tot.withinss; est[ii, indu] = as.factor(result$cluster) } result = est[which.min(d), ] return(result) }
/scratch/gouwar.j/cran-all/cranData/CASCORE/R/CASC.R
#' @title Covariate Assisted Spectral Clustering on Ratios of Eigenvectors. #' @description Using ratios-of-eigenvectors to detect underlying communities in networks with node covariates. #' @details \emph{CASCORE} is fully established in \emph{Network-Adjusted Covariates for Community Detection} #' of Hu & Wang (2023). \emph{CASCORE} detects the latent community structure under the covariate #' assisted degree corrected stochastic block model (CADCSBM), and it allows the disagreement #' between the community structures indicated in the graph and the covariates, respectively. #' \code{K-means} is applied on the entry-wise ratios between first leading eigenvector and #' each of the other \eqn{K} leading eigenvectors of the combined matrix of the adjacency matrix #' and the covariate matrix, to reveal the underlying memberships. #' @param Adj A 0/1 adjacency matrix. #' @param Covariate A covariate matrix. The rows correspond to nodes and the columns correspond to covariates. #' @param K A positive integer, indicating the number of underlying communities in graph \code{Adj}. #' @param alpha A numeric vector, each element of which is a tuning parameter to weigh the covariate matrix. #' @param alphan The number of candidates \eqn{\alpha}. The default number is 5. #' @param itermax \code{k-means} parameter, indicating the maximum number of #' iterations allowed. The default value is 100. #' @param startn \code{k-means} parameter. If centers is a number, how many #' random sets should be chosen? The default value is 10. #' @return \item{estall}{A lavel vector}. #' #' @importFrom pracma eig Norm #' @importFrom stats kmeans runif median #' #' @references Hu, Y., & Wang, W. (2023) \emph{Network-AdjustedCovariatesforCommunity Detection}, #' \cr\url{https://arxiv.org/abs/2306.15616}\cr #' #' @examples #' #' # Simulate the Network #' n = 10; K = 2; #' theta = 0.4 + (0.45-0.05)*(seq(1:n)/n)^2; Theta = diag(theta); #' P = matrix(c(0.8, 0.2, 0.2, 0.8), byrow = TRUE, nrow = K) #' set.seed(2022) #' l = sample(1:K, n, replace=TRUE); # node labels #' Pi = matrix(0, n, K) # label matrix #' for (k in 1:K){ #' Pi[l == k, k] = 1 #' } #' Omega = Theta %*% Pi %*% P %*% t(Pi) %*% Theta; #' Adj = matrix(runif(n*n, 0, 1), nrow = n); #' Adj = Omega - Adj; #' Adj = 1*(Adj >= 0) #' diag(Adj) = 0 #' Adj[lower.tri(Adj)] = t(Adj)[lower.tri(Adj)] #' caseno = 4; Nrange = 10; Nmin = 10; prob1 = 0.9; p = n*4; #' Q = matrix(runif(p*K, 0, 1), nrow = p, ncol = K) #' Q = sweep(Q,2,colSums(Q),`/`) #' W = matrix(0, nrow = n, ncol = K); #' for(jj in 1:n) { #' if(runif(1) <= prob1) {W[jj, 1:K] = Pi[jj, ];} #' else W[jj, sample(K, 1)] = 1; #' } #' W = t(W) #' D0 = Q %*% W #' X = matrix(0, n, p) #' N = switch(caseno, rep(100, n), rep(100, n), round(runif(n)*Nrange+ Nmin), #' round(runif(n)* Nrange+Nmin)) #' for (i in 1: ncol(D0)){ #' X[i, ] = rmultinom(1, N[i], D0[, i]) #' } #' CASCORE(Adj, X, 2) #' @export CASCORE = function(Adj, Covariate, K, alpha = NULL, alphan = 5, itermax = 100, startn = 10){ # Inputs: # 1) Adj: an n by n symmetric adjacency matrix whose diagonals = 0 and positive entries = 1. # 2) Covariate: an n by p covariate matrix # 3) K: a positive integer which is no larger than n. This is the predefined number of communities. # 3) alpha: a positive number to tune the weight of covariate matrix # Optional Arguments for Kmeans: # 1) itermax: the maximum number of iterations allowed. # 2) nstart: R will try startn different random starting assignments and then select the one with the lowest within cluster variation. # Outputs: # 1) a factor indicating nodes' labels. Items sharing the same label are in the same community. if(!isSymmetric(Adj)) stop("Error! Adj is not symmetric!") if(K > dim(Adj)[1]) stop("Error! More communities than nodes!") if(K %% 1 != 0) stop("Error! K is not an integer!") if(K < 2) stop("Error: There must be at least 2 communities!") if(dim(Adj)[1] != dim(Covariate)[1]) stop("Error! Incompatible!") # if(alpha < 0) stop("Negative Alpha") #Regularity check estall = rep(NA, dim(Adj)[1]); netrow = rowSums(Adj); covrow = rowSums(abs(Covariate)); ind_reg = which(netrow != 0 | covrow != 0) Adj = Adj[ind_reg, ind_reg]; Covariate = Covariate[ind_reg, ]; ##Algorithm n = dim(Adj)[1] d = rowSums(Adj); X = Adj %*% Covariate # lambda = max(log(n), quantile(d, probs = 0.25))/(d + max(log(n), median(d, probs = 0.25))); lambda = log(n)/(d + log(n)); if(is.null(alpha)){ d.net = sort(abs(eig(Adj)), decreasing = TRUE); alphaupper = d.net[1]*log(n)/mean(d); alphalower = max(0.05, d.net[K]/4); alpha = seq(alphalower, alphaupper, length.out = alphan); # print(alpha) est1 = matrix(0, alphan, n); est2 = est1; prop1 = rep(0, alphan); prop2 = rep(0, alphan) for(i in 1:alphan){ # Newmat = X + diag(alpha[i]/(d+1))%*%Covariate; Newmat = X + alpha[i]*diag(lambda)%*%Covariate; zz = Newmat%*%t(Newmat) c = eigen(zz) vec = c$vectors vecn = vec[,1:K]/apply(vec[,1:K], 1, Norm); result = kmeans(vecn, K, iter.max = itermax, nstart = startn); if (result$ifault==4) { result = kmeans(X, K, iter.max = itermax, nstart = startn, algorithm="Lloyd"); } prop2[i] = result$tot.withinss; est2[i,] = as.factor(result$cluster); } est = est2[which.min(prop2), ]; print(alpha[which.min(prop2)]) # print(prop2) } else{ alphan = length(alpha); est1 = matrix(0, alphan, n); est2 = est1; prop1 = rep(0, alphan); prop2 = rep(0, alphan) for(i in 1:alphan){ Newmat = X + alpha[i]*diag(lambda)%*%Covariate; zz = Newmat%*%t(Newmat) c = eigen(zz) vec = c$vectors vecn = vec[,1:K]/apply(vec[,1:K], 1, Norm); result = kmeans(vecn, K, iter.max = itermax, nstart = startn); if (result$ifault==4) { result = kmeans(X, K, iter.max = itermax, nstart = startn, algorithm="Lloyd"); } prop2[i] = result$tot.withinss; est2[i,] = as.factor(result$cluster); } est = est2[which.min(prop2), ]; } estall[ind_reg] = est; return(estall) }
/scratch/gouwar.j/cran-all/cranData/CASCORE/R/CASCORE.R
#' @title Covariates-based Clustering. #' @description \emph{Covariates-based Clustering} is a spectral clustering method that focuses #' solely on the covariates structure of a network. It employs \code{k-means} on the first #' \eqn{K} leading eigenvectors of the weighted cogariates matrix of a graph, with each #' eigenvector normalized to have unit magnitude. #' @param Adj A 0/1 adjacency matrix. #' @param tau An optional tuning parameter, the default value is the mean of adajacency matrix. #' @param K A positive integer, indicating the number of underlying communities in #' graph \code{Adj}. #' @param itermax \code{k-means} parameter, indicating the maximum number of #' iterations allowed. The default value is 100. #' @param startn \code{k-means} parameter. If centers is a number, how many #' random sets should be chosen? The default value is 10. #' @return A label vector. #' #' @importFrom stats kmeans runif #' @examples #' #' # Simulate the Network #' n = 10; K = 2; #' theta = 0.4 + (0.45-0.05)*(seq(1:n)/n)^2; Theta = diag(theta); #' P = matrix(c(0.8, 0.2, 0.2, 0.8), byrow = TRUE, nrow = K) #' set.seed(2022) #' l = sample(1:K, n, replace=TRUE); # node labels #' Pi = matrix(0, n, K) # label matrix #' for (k in 1:K){ #' Pi[l == k, k] = 1 #' } #' Omega = Theta %*% Pi %*% P %*% t(Pi) %*% Theta; #' Adj = matrix(runif(n*n, 0, 1), nrow = n); #' Adj = Omega - Adj; #' Adj = 1*(Adj >= 0) #' diag(Adj) = 0 #' Adj[lower.tri(Adj)] = t(Adj)[lower.tri(Adj)] #' Cov_based(Adj, 2) #' @export Cov_based <- function(Adj, K, tau = NULL, itermax = NULL, startn = NULL){ if(!isSymmetric(Adj)) stop("Error! Adj is not symmetric!") if(K > dim(Adj)[1]) stop("Error! More communities than nodes!") if(K %% 1 != 0) stop("Error! K is not an integer!") if(K <= 0) stop("Error! Nonpositive K!") if(is.null(tau)) tau = mean(Adj); n <- dim(Adj)[1] A_tau = Adj + tau * matrix(1, n, n)/n s = rowSums(A_tau) s = s^(-1/2) S = diag(s) Z = S %*% A_tau %*% S #D <- diag(rowSums(A_tau)) #L_tau <- (D + tau*J/n)^{-1/2} %*% Adj %*% (D + tau*J/n)^{-1/2} #L <- (D + tau*diag(n))^{-1/2} %*% Adj %*% (D + tau*diag(n))^{-1/2} g.eigen <- eigen(Z) R = g.eigen$vectors R = R[, 1: K] R <- t(apply(R, 1, function(x) x/sqrt(sum(x^2)))) # apply Kmeans to assign nodes into communities if(!is.null(itermax) & !is.null(startn)){ result = kmeans(R, K, iter.max = itermax, nstart = startn) #apply kmeans on ratio matrix } if(!is.null(itermax) & is.null(startn)){ result = kmeans(R, K, iter.max = itermax, nstart = 10) #apply kmeans on ratio matrix } if(is.null(itermax) & !is.null(startn)){ result = kmeans(R, K, iter.max = 100, nstart = startn) #apply kmeans on ratio matrix } else{ result = kmeans(R, K, iter.max = 100, nstart = 10) #apply kmeans on ratio matrix } est = as.factor(result$cluster) return(est) }
/scratch/gouwar.j/cran-all/cranData/CASCORE/R/Cov_based.R
#' @title Network-based Clustering. #' @description \emph{Network-based Clustering} is a spectral clustering method that focuses #' solely on the topological structure of a network. It employs \code{k-means} on the first #' \eqn{K} leading eigenvectors of the weighted adjacency matrix of a graph, with each #' eigenvector normalized to have unit magnitude. #' @param Adj A 0/1 adjacency matrix. #' @param tau An optional tuning parameter, the default value is the mean of adajacency matrix. #' @param K A positive integer, indicating the number of underlying communities in #' graph \code{Adj}. #' @param itermax \code{k-means} parameter, indicating the maximum number of #' iterations allowed. The default value is 100. #' @param startn \code{k-means} parameter. If centers is a number, how many #' random sets should be chosen? The default value is 10. #' @return A label vector. #' #' @importFrom stats kmeans runif #' @examples #' #' # Simulate the Network #' n = 10; K = 2; #' theta = 0.4 + (0.45-0.05)*(seq(1:n)/n)^2; Theta = diag(theta); #' P = matrix(c(0.8, 0.2, 0.2, 0.8), byrow = TRUE, nrow = K) #' set.seed(2022) #' l = sample(1:K, n, replace=TRUE); # node labels #' Pi = matrix(0, n, K) # label matrix #' for (k in 1:K){ #' Pi[l == k, k] = 1 #' } #' Omega = Theta %*% Pi %*% P %*% t(Pi) %*% Theta; #' Adj = matrix(runif(n*n, 0, 1), nrow = n); #' Adj = Omega - Adj; #' Adj = 1*(Adj >= 0) #' diag(Adj) = 0 #' Adj[lower.tri(Adj)] = t(Adj)[lower.tri(Adj)] #' Net_based(Adj, 2) #' @export Net_based <- function(Adj, K, tau = NULL, itermax = NULL, startn = NULL){ if(!isSymmetric(Adj)) stop("Error! Adj is not symmetric!") if(K > dim(Adj)[1]) stop("Error! More communities than nodes!") if(K %% 1 != 0) stop("Error! K is not an integer!") if(K <= 0) stop("Error! Nonpositive K!") if(is.null(tau)) tau = mean(Adj); n <- dim(Adj)[1] A_tau = Adj + tau * matrix(1, n, n)/n s = rowSums(A_tau) s = s^(-1/2) S = diag(s) Z = S %*% A_tau %*% S #D <- diag(rowSums(A_tau)) #L_tau <- (D + tau*J/n)^{-1/2} %*% Adj %*% (D + tau*J/n)^{-1/2} #L <- (D + tau*diag(n))^{-1/2} %*% Adj %*% (D + tau*diag(n))^{-1/2} g.eigen <- eigen(Z) R = g.eigen$vectors R = R[, 1: K] R <- t(apply(R, 1, function(x) x/sqrt(sum(x^2)))) # apply Kmeans to assign nodes into communities if(!is.null(itermax) & !is.null(startn)){ result = kmeans(R, K, iter.max = itermax, nstart = startn) #apply kmeans on ratio matrix } if(!is.null(itermax) & is.null(startn)){ result = kmeans(R, K, iter.max = itermax, nstart = 10) #apply kmeans on ratio matrix } if(is.null(itermax) & !is.null(startn)){ result = kmeans(R, K, iter.max = 100, nstart = startn) #apply kmeans on ratio matrix } else{ result = kmeans(R, K, iter.max = 100, nstart = 10) #apply kmeans on ratio matrix } est = as.factor(result$cluster) return(est) }
/scratch/gouwar.j/cran-all/cranData/CASCORE/R/Net_based.R
#' @title Pinv #' @param z The latent class memberships. #' @param n The number of nodes. #' @return \item{X}{The optimal ressult of SDP}. #' @noRd #' @keywords internal Pinv = function(z, n){ mu = z[1:n] nu = z[(n+1): length(z)] x1 = 1/2/n * (diag(n) - (n-2)/n/(2*n-2)*matrix(1, n, n)) %*% mu x2 = 1/n/(2-2*n)*matrix(1, n, 1) %*% nu x3 = - 1/n/(2*n-2)*matrix(1, 1, n) %*% mu x4 = nu/(n - 1) X = c(x1 + x2, x3 + x4) return(X) }
/scratch/gouwar.j/cran-all/cranData/CASCORE/R/Pinv.R
#' @title Spectral Clustering On Ratios-of-Eigenvectors. #' @description Using ratios-of-eigenvectors to detect underlying communities. #' @details \emph{SCORE} is fully established in \emph{Fast community detection by #' SCORE} of Jin (2015). \emph{SCORE} uses the entry-wise ratios between the #' first leading eigenvector and each of the other \eqn{K-1} leading eigenvectors for #' clustering. It is noteworthy that SCORE only works on connected graphs, #' in other words, it does not allow for isolated vertices. #' @param G A 0/1 adjacency matrix of a connected graph. #' @param K A positive integer, indicating the number of underlying communities in graph \code{G}. #' @param itermax \code{k-means} parameter, indicating the maximum number of #' iterations allowed. The default value is 100. #' @param startn \code{k-means} parameter. If centers is a number, how many #' random sets should be chosen? The default value is 10. #' @return \item{estall}{A lavel vector.} #' #' @importFrom stats kmeans runif #' #' @references Jin, J. (2015). \emph{Fast community detection by score}. #' \emph{The Annals of Statistics 43 (1), 57–89.}\cr\doi{10.1214/14-AOS1265}\cr #' #' @examples #' #' # Simulate the Network #' n = 10; K = 2; #' theta = 0.4 + (0.45-0.05)*(seq(1:n)/n)^2; Theta = diag(theta); #' P = matrix(c(0.8, 0.2, 0.2, 0.8), byrow = TRUE, nrow = K) #' set.seed(2022) #' l = sample(1:K, n, replace=TRUE); # node labels #' Pi = matrix(0, n, K) # label matrix #' for (k in 1:K){ #' Pi[l == k, k] = 1 #' } #' Omega = Theta %*% Pi %*% P %*% t(Pi) %*% Theta; #' Adj = matrix(runif(n*n, 0, 1), nrow = n); #' Adj = Omega - Adj; #' Adj = 1*(Adj >= 0) #' diag(Adj) = 0 #' Adj[lower.tri(Adj)] = t(Adj)[lower.tri(Adj)] #' library(igraph) #' is.igraph(Adj) # [1] FALSE #' ix = components(graph.adjacency(Adj)) #' componentLabel = ix$membership #' giantLabel = which(componentLabel == which.max(ix$csize)) #' Giant = Adj[giantLabel, giantLabel] #' SCORE(Giant, 2) #' #' @export #################################################### ######## Spectral Clustering Method: SCORE ######### #################################################### # Assume there are n nodes and K communities # Before applying SCORE, need to: # 1) transform the network graph into an n by n adjacency matrix. It has following properties: # i) symmetrix # ii) diagonals = 0 # iii) positive entries = 1. ##### SCORE ##### # spectral clustering On ratios-of-eigenvectors SCORE = function(G, K, itermax = NULL, startn = NULL){ # Inputs: # 1) G: an n by n symmetric adjacency matrix whose diagonals = 0 and positive entries = 1. # 2) K: a positive integer which is no larger than n. This is the predefined number of communities. # Optional Arguments for Kmeans: # 1) itermax: the maximum number of iterations allowed. # 2) nstart: R will try startn different random starting assignments and then select the one with the lowest within cluster variation. # Outputs: # 1) a factor indicating nodes' labels. Items sharing the same label are in the same community. # Remark: # SCORE only works on connected graphs, i.e., no isolated node is allowed. # exclude all wrong possibilities: if(!isSymmetric(G)) stop("Error! G is not symmetric!") if(K > dim(G)[1]) stop("Error! More communities than nodes!") if(K %% 1 != 0) stop("Error! K is not an integer!") if(K <= 0) stop("Error! Nonpositive K!") g.eigen = eigen(G) if(sum(g.eigen$vectors[, 1]==0) > 0) stop("Error! Zeroes in the first column") R = g.eigen$vectors[, -1] R = R[, 1: (K-1)] R = R / g.eigen$vectors[, 1] # apply Kmeans to assign nodes into communities if(!is.null(itermax) & !is.null(startn)){ result = kmeans(R, K, iter.max = itermax, nstart = startn) #apply kmeans on ratio matrix } if(!is.null(itermax) & is.null(startn)){ result = kmeans(R, K, iter.max = itermax, nstart = 10) #apply kmeans on ratio matrix } if(is.null(itermax) & !is.null(startn)){ result = kmeans(R, K, iter.max = 100, nstart = startn) #apply kmeans on ratio matrix } else{ result = kmeans(R, K, iter.max = 100, nstart = 10) #apply kmeans on ratio matrix } est = as.factor(result$cluster) return(est) }
/scratch/gouwar.j/cran-all/cranData/CASCORE/R/SCORE.R
#' @title cl2mat #' @param labels A vector, representing the labels of nodes. #' @return mat The result matrix. #' @noRd #' @keywords internal cl2mat = function(labels){ n = length(labels); k = length(unique(labels)); mat = matrix(0, n, k) for (j in 1: k){ mat[, j] = 1*(labels == j); } return(mat) }
/scratch/gouwar.j/cran-all/cranData/CASCORE/R/cl2mat.R
#' @title Normalized Principle Component Analysis. #' @description \emph{Normalized Principle Component Analysis (nPCA)}, also known as spectral clustering on the #' graph Laplacian, is a classical spectral clustering method that applies \code{k-means} on the first \eqn{K} #' leading (unit-norm) eigenvectors of the degree-corrected normalized graph laplacian. #' @param Adj A 0/1 adjacency matrix. #' @param K A positive integer, indicating the number of underlying communities in #' graph \code{Adj}. #' @param tau An optional regularization parameter for suitable degree normalization. The default value is the #' average degree of graph \code{Adj}. #' @param itermax \code{k-means} parameter, indicating the maximum number of #' iterations allowed. The default value is 100. #' @param startn \code{k-means} parameter. If centers is a number, how many #' random sets should be chosen? The default value is 10. #' @return \item{estall}{A lavel vector.} #' #' @importFrom stats kmeans runif #' @references Chung, F. R., & Graham, F. C. (1997). \emph{Spectral graph theory (Vol. 92)}. #' \emph{American Mathematical Soc..} #' @examples #' #' # Simulate the Network #' n = 10; K = 2; #' theta = 0.4 + (0.45-0.05)*(seq(1:n)/n)^2; Theta = diag(theta); #' P = matrix(c(0.8, 0.2, 0.2, 0.8), byrow = TRUE, nrow = K) #' set.seed(2022) #' l = sample(1:K, n, replace=TRUE); # node labels #' Pi = matrix(0, n, K) # label matrix #' for (k in 1:K){ #' Pi[l == k, k] = 1 #' } #' Omega = Theta %*% Pi %*% P %*% t(Pi) %*% Theta; #' Adj = matrix(runif(n*n, 0, 1), nrow = n); #' Adj = Omega - Adj; #' Adj = 1*(Adj >= 0) #' diag(Adj) = 0 #' Adj[lower.tri(Adj)] = t(Adj)[lower.tri(Adj)] #' nPCA(Adj, 2) #' #' @export nPCA = function(Adj, K, tau = NULL, itermax = 100, startn = 10){ # Inputs: # 1) Adj: an n by n symmetric adjacency matrix whose diagonals = 0 and positive entries = 1. # 2) K: a positive integer which is no larger than n. This is the predefined number of communities. # Optional Arguments for Kmeans: # 1) itermax: the maximum number of iterations allowed. # 2) nstart: R will try startn different random starting assignments and then select the one with the lowest within cluster variation. # Outputs: # 1) a factor indicating nodes' labels. Items sharing the same label are in the same community. if(!isSymmetric(Adj)) stop("Error! Adj is not symmetric!") if(K > dim(Adj)[1]) stop("Error! More communities than nodes!") if(K %% 1 != 0) stop("Error! K is not an integer!") if(K <= 0) stop("Error! Nonpositive K!") s = rowSums(Adj) if(is.null(tau)) tau = mean(s); s = (s+tau)^(-1/2) S = diag(s) Z = S %*% Adj %*% S s.eigen = eigen(Z) H = s.eigen$vectors H = H[, 1:K] #apply kmeans on ratio matrix result = tryCatch({kmeans(H, K, iter.max = itermax, nstart = startn)}, error = function(x) {kmeans(H, K, iter.max = 100, nstart = 99)}) est = as.factor(result$cluster) return(est) }
/scratch/gouwar.j/cran-all/cranData/CASCORE/R/nPCA.R
#' @title normalizeSym #' @param A A 0/1 adjacency matrix. #' @param L An optional input. #' @return \item{An}{The result matrix}. #' @noRd #' @keywords internal normalizeSym = function(A, L = NULL){ if (is.null(L)) L = 0 n = dim(A)[1] d = colSums(A) d[which(d == 0)] = 1e10 if (L == 0) { d = 1/sqrt(d) D = diag(d) An = D %*% A %*% D An = 1/2*(An + t(An)) } else{ d = 1 / d D=diag(d) An=D %*% A; } return(An) }
/scratch/gouwar.j/cran-all/cranData/CASCORE/R/normalizeSym.R
#' @title Ordinary Principle Component Analysis. #' @description \emph{Ordinary Principle Component Analysis (oPCA)}, also known as spectral clustering #' on the adjacency matrix is a classical spectral clustering method that applies \code{k-means} on #' the first \eqn{K} leading (unit-norm) eigenvectors of the adjacency matrix of a graph. #' @param Adj A 0/1 adjacency matrix. #' @param K A positive integer, indicating the number of underlying communities in #' graph \code{Adj}. #' @param itermax \code{k-means} parameter, indicating the maximum number of #' iterations allowed. The default value is 100. #' @param startn \code{k-means} parameter. If centers is a number, how many #' random sets should be chosen? The default value is 10. #' @return \item{estall}{A lavel vector.} #' #' @importFrom stats kmeans runif #' @references Chung, F. R., & Graham, F. C. (1997). \emph{Spectral graph theory (Vol. 92)}. #' \emph{American Mathematical Soc..} #' @examples #' #' # Simulate the Network #' n = 10; K = 2; #' theta = 0.4 + (0.45-0.05)*(seq(1:n)/n)^2; Theta = diag(theta); #' P = matrix(c(0.8, 0.2, 0.2, 0.8), byrow = TRUE, nrow = K) #' set.seed(2022) #' l = sample(1:K, n, replace=TRUE); # node labels #' Pi = matrix(0, n, K) # label matrix #' for (k in 1:K){ #' Pi[l == k, k] = 1 #' } #' Omega = Theta %*% Pi %*% P %*% t(Pi) %*% Theta; #' Adj = matrix(runif(n*n, 0, 1), nrow = n); #' Adj = Omega - Adj; #' Adj = 1*(Adj >= 0) #' diag(Adj) = 0 #' Adj[lower.tri(Adj)] = t(Adj)[lower.tri(Adj)] #' oPCA(Adj, 2) #' @export oPCA = function(Adj, K, itermax = 100, startn = 10){ # Inputs: # 1) Adj: an n by n symmetric adjacency matrix whose diagonals = 0 and positive entries = 1. # 2) K: a positive integer which is no larger than n. This is the predefined number of communities. # Optional Arguments for Kmeans: # 1) itermax: the maximum number of iterations allowed. # 2) nstart: R will try startn different random starting assignments and then select the one with the lowest within cluster variation. # Outputs: # 1) a factor indicating nodes' labels. Items sharing the same label are in the same community. if(!isSymmetric(Adj)) stop("Error! Adj is not symmetric!") if(K > dim(Adj)[1]) stop("Error! More communities than nodes!") if(K %% 1 != 0) stop("Error! K is not an integer!") if(K <= 0) stop("Error! Nonpositive K!") o.eigen = eigen(Adj) O = o.eigen$vectors O = O[, 1:K] result = tryCatch({kmeans(O, K, iter.max = itermax, nstart = startn)}, error = function(x) {kmeans(O, K, iter.max = itermax, nstart = startn, algorithm="Lloyd")}) est = as.factor(result$cluster) return(est) }
/scratch/gouwar.j/cran-all/cranData/CASCORE/R/oPCA.R
#' @title projAXb #' @param X0 The ground truth clustering matrix. #' @param k The number of community. #' @param n The number of nodes. #' @return X The solution matrix of the SDP. #' @noRd #' @keywords internal projAXb = function(X0, k, n){ # k is the trace of X b = c(2*rep(1, n), k) X = X0 - Acs(Pinv(Ac(X0, n) - b, n), n) return(X) }
/scratch/gouwar.j/cran-all/cranData/CASCORE/R/projAXb.R
#' @title projSP #' @param X0 The ground truth clustering matrix. #' @return \item{X}{The result matrix}. #' @noRd #' @keywords internal projSp = function(X0){ n = dim(X0)[1] tryCatch({print("no error");temp = eigen(0.5*(X0+t(X0))); U = temp$vectors; D = diag(temp$values)}, error = function(x) {print("error");temp = svd(0.5*(X0+t(X0))); U = temp$u; V = temp$v;S = diag(temp$d); D = diag(diag(t(U) %*% X0 %*% U))}) idx = as.integer(which(diag(D) >= 0)) X = as.matrix(U[, idx]) %*% as.matrix(D[idx,idx]) %*% as.matrix(t(U[, idx])); return(X) }
/scratch/gouwar.j/cran-all/cranData/CASCORE/R/projSP.R
#' @title Regularized Spectral Clustering #' @param A An 0/1 adjacency matrix. #' @param K A positive integer, indicating the number of underlying communities in graph \code{Adj}. #' @param method The method of spectral clustering. 'pos' refers to regularized spectral clustering, #' 'lap' refers to spectral clustering using normalized graph laplacian, and 'adj' refers to using #' adjacency matrix. #' @param prior An optional input. The maximum of iteration. #' @return \item{class}{A lavel vector}. #' @noRd #' @keywords internal rsc = function(A, K, method, prior = NULL){ # Regularized Spectral Clustering # Input: A: Adjacency matrix # K: number of clusters # method: 'pos' - Regularized Spectral Clustering # 'lap' - Spectral Clustering (use normalized graph laplacian) # 'adj' - Use adjacency matrix # Output: class labels if (is.null(prior)) prior = 1/K * matrix(1, K, 1) nv = dim(A)[1] tau = mean(A) if (method == "pos"){ A_tau = A + tau * matrix(1, nv, nv) L_tau = normalizeSym(A_tau) } else if(method == "lap") L_tau = normalizeSym(A) else L_tau = (A + t(A))/2 U1 = eigen(L_tau)$vectors[, 1: K] U1 = t(apply(U1, 1, function(x) x/sqrt(sum(x^2)))) maxsum = Inf nrestart = 100 for (i in 1: nrestart){ clustering = kmeans(U1, K) class0 = clustering$cluster sumD = clustering$totss if (maxsum > sum(sumD)){ maxsum = sumD class=class0 } } return(class) }
/scratch/gouwar.j/cran-all/cranData/CASCORE/R/rsc.R
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 #' Internal function #' @keywords internal lib_new_search_e <- function() { .Call('_CASMAP_lib_new_search_e', PACKAGE = 'CASMAP') } #' Internal function #' @keywords internal lib_new_search_chi <- function() { .Call('_CASMAP_lib_new_search_chi', PACKAGE = 'CASMAP') } #' Internal function #' @keywords internal lib_new_search_fastcmh <- function() { .Call('_CASMAP_lib_new_search_fastcmh', PACKAGE = 'CASMAP') } #' Internal function #' @keywords internal lib_new_search_facs <- function() { .Call('_CASMAP_lib_new_search_facs', PACKAGE = 'CASMAP') } #' Internal function #' @keywords internal lib_delete_search_e <- function(inst) { invisible(.Call('_CASMAP_lib_delete_search_e', PACKAGE = 'CASMAP', inst)) } #' Internal function #' @keywords internal lib_delete_search_chi <- function(inst) { invisible(.Call('_CASMAP_lib_delete_search_chi', PACKAGE = 'CASMAP', inst)) } #' Internal function #' @keywords internal lib_delete_search_fastcmh <- function(inst) { invisible(.Call('_CASMAP_lib_delete_search_fastcmh', PACKAGE = 'CASMAP', inst)) } #' Internal function #' @keywords internal lib_delete_search_facs <- function(inst) { invisible(.Call('_CASMAP_lib_delete_search_facs', PACKAGE = 'CASMAP', inst)) } #' Internal function #' @keywords internal lib_read_eth_files <- function(inst, x_filename, y_filename, encoding) { invisible(.Call('_CASMAP_lib_read_eth_files', PACKAGE = 'CASMAP', inst, x_filename, y_filename, encoding)) } #' Internal function #' @keywords internal lib_read_eth_files_with_cov_fastcmh <- function(inst, x_filename, y_filename, covfilename, encoding) { invisible(.Call('_CASMAP_lib_read_eth_files_with_cov_fastcmh', PACKAGE = 'CASMAP', inst, x_filename, y_filename, covfilename, encoding)) } #' Internal function #' @keywords internal lib_read_eth_files_with_cov_facs <- function(inst, x_filename, y_filename, covfilename, encoding) { invisible(.Call('_CASMAP_lib_read_eth_files_with_cov_facs', PACKAGE = 'CASMAP', inst, x_filename, y_filename, covfilename, encoding)) } #' Internal function #' @keywords internal lib_read_plink_files <- function(inst, base_filename, encoding) { invisible(.Call('_CASMAP_lib_read_plink_files', PACKAGE = 'CASMAP', inst, base_filename, encoding)) } #' Internal function #' @keywords internal lib_read_plink_files_with_cov_fastcmh <- function(inst, base_filename, covfilename, encoding) { invisible(.Call('_CASMAP_lib_read_plink_files_with_cov_fastcmh', PACKAGE = 'CASMAP', inst, base_filename, covfilename, encoding)) } #' Internal function #' @keywords internal lib_read_plink_files_with_cov_facs <- function(inst, base_filename, covfilename, encoding) { invisible(.Call('_CASMAP_lib_read_plink_files_with_cov_facs', PACKAGE = 'CASMAP', inst, base_filename, covfilename, encoding)) } #' Internal function #' @keywords internal lib_read_covariates_file_fastcmh <- function(inst, cov_filename) { invisible(.Call('_CASMAP_lib_read_covariates_file_fastcmh', PACKAGE = 'CASMAP', inst, cov_filename)) } #' Internal function #' @keywords internal lib_read_covariates_file_facs <- function(inst, cov_filename) { invisible(.Call('_CASMAP_lib_read_covariates_file_facs', PACKAGE = 'CASMAP', inst, cov_filename)) } #' Internal function #' @keywords internal lib_write_eth_files_iset <- function(inst, x_filename, y_filename) { invisible(.Call('_CASMAP_lib_write_eth_files_iset', PACKAGE = 'CASMAP', inst, x_filename, y_filename)) } #' Internal function #' @keywords internal lib_write_eth_files_int <- function(inst, x_filename, y_filename) { invisible(.Call('_CASMAP_lib_write_eth_files_int', PACKAGE = 'CASMAP', inst, x_filename, y_filename)) } #' Internal function #' @keywords internal lib_write_eth_files_with_cov_fastcmh <- function(inst, x_filename, y_filename, covfilename) { invisible(.Call('_CASMAP_lib_write_eth_files_with_cov_fastcmh', PACKAGE = 'CASMAP', inst, x_filename, y_filename, covfilename)) } #' Internal function #' @keywords internal lib_write_eth_files_with_cov_facs <- function(inst, x_filename, y_filename, covfilename) { invisible(.Call('_CASMAP_lib_write_eth_files_with_cov_facs', PACKAGE = 'CASMAP', inst, x_filename, y_filename, covfilename)) } #' Internal function #' @keywords internal lib_execute_iset <- function(inst, alpha, l_max) { invisible(.Call('_CASMAP_lib_execute_iset', PACKAGE = 'CASMAP', inst, alpha, l_max)) } #' Internal function #' @keywords internal lib_execute_int <- function(inst, alpha, l_max) { invisible(.Call('_CASMAP_lib_execute_int', PACKAGE = 'CASMAP', inst, alpha, l_max)) } #' Internal function #' @keywords internal lib_summary_write_to_file_fais <- function(inst, output_file) { invisible(.Call('_CASMAP_lib_summary_write_to_file_fais', PACKAGE = 'CASMAP', inst, output_file)) } #' Internal function #' @keywords internal lib_summary_write_to_file_fastcmh <- function(inst, output_file) { invisible(.Call('_CASMAP_lib_summary_write_to_file_fastcmh', PACKAGE = 'CASMAP', inst, output_file)) } #' Internal function #' @keywords internal lib_summary_write_to_file_facs <- function(inst, output_file) { invisible(.Call('_CASMAP_lib_summary_write_to_file_facs', PACKAGE = 'CASMAP', inst, output_file)) } #' Internal function #' @keywords internal lib_profiler_write_to_file <- function(inst, output_file) { invisible(.Call('_CASMAP_lib_profiler_write_to_file', PACKAGE = 'CASMAP', inst, output_file)) } #' Internal function #' @keywords internal lib_filter_intervals_write_to_file <- function(inst, output_file) { invisible(.Call('_CASMAP_lib_filter_intervals_write_to_file', PACKAGE = 'CASMAP', inst, output_file)) } #' Internal function #' @keywords internal lib_pvals_testable_ints_write_to_file <- function(inst, output_file) { invisible(.Call('_CASMAP_lib_pvals_testable_ints_write_to_file', PACKAGE = 'CASMAP', inst, output_file)) } #' Internal function #' @keywords internal lib_pvals_significant_ints_write_to_file <- function(inst, output_file) { invisible(.Call('_CASMAP_lib_pvals_significant_ints_write_to_file', PACKAGE = 'CASMAP', inst, output_file)) } #' Internal function #' @keywords internal lib_pvals_testable_isets_write_to_file <- function(inst, output_file) { invisible(.Call('_CASMAP_lib_pvals_testable_isets_write_to_file', PACKAGE = 'CASMAP', inst, output_file)) } #' Internal function #' @keywords internal lib_pvals_significant_isets_write_to_file <- function(inst, output_file) { invisible(.Call('_CASMAP_lib_pvals_significant_isets_write_to_file', PACKAGE = 'CASMAP', inst, output_file)) } #' Internal function #' @keywords internal lib_get_significant_intervals <- function(inst) { .Call('_CASMAP_lib_get_significant_intervals', PACKAGE = 'CASMAP', inst) } #' Internal function #' @keywords internal lib_get_filtered_intervals <- function(inst) { .Call('_CASMAP_lib_get_filtered_intervals', PACKAGE = 'CASMAP', inst) } #' Internal function #' @keywords internal lib_get_significant_itemsets <- function(inst) { .Call('_CASMAP_lib_get_significant_itemsets', PACKAGE = 'CASMAP', inst) } #' Internal function #' @keywords internal lib_get_result_fais <- function(inst) { .Call('_CASMAP_lib_get_result_fais', PACKAGE = 'CASMAP', inst) } #' Internal function #' @keywords internal lib_get_result_int <- function(inst) { .Call('_CASMAP_lib_get_result_int', PACKAGE = 'CASMAP', inst) } #' Internal function #' @keywords internal lib_get_result_iset <- function(inst) { .Call('_CASMAP_lib_get_result_iset', PACKAGE = 'CASMAP', inst) } #' Internal function #' @keywords internal lib_get_result_facs <- function(inst) { .Call('_CASMAP_lib_get_result_facs', PACKAGE = 'CASMAP', inst) }
/scratch/gouwar.j/cran-all/cranData/CASMAP/R/RcppExports.R
#' Get the path to the example data file for regionGWAS mode #' #' Path to \code{CASMAP_example_data_1.txt} in \code{inst/extdata}. #' A dataset containing binary samples for the regionGWAS method. #' There are accompanying labels and covariates dataset. #' #' @format A matrix of \code{0}s and \code{1}s, with 1000 rows (features) #' and 100 columns #' (samples). In other words, each column is a sample, and each sample #' has 1000 binary features. #' #' @details Path to the file containing the data, for reading in to #' CASMAP object using the \code{readFiles} function. #' Note that the significant region is \code{[99, 102]}. #' #' #' @seealso \code{getExampleLabelsFilename}, #' \code{getExampleCovariatesFilename} #' #' @export #' @examples #' datafile <- getExampleDataFilename() getExampleDataFilename <- function(){ filename <- system.file("extdata", "CASMAP_example_data_1.txt", package = "CASMAP", mustWork = TRUE) return(filename) } #' Get the path to the example labels file for regionGWAS mode #' #' Path to \code{CASMAP_example_labels_1.txt} in \code{inst/extdata}. #' A dataset containing the binary labels for the data in the file #' \code{CASMAP_example_data_1.txt}, the path to which is given by #' \code{getExampleDataFilename}. #' #' @format A single column of 100 labels, each of which is either \code{0} #' or \code{1}. #' #' @details Path to the file containing the labels, for reading in to #' CASMAP object using the \code{readFiles} function. #' #' #' @seealso \code{getExampleDataFilename}, #' \code{getExampleCovariatesFilename} #' #' @export #' @examples #' labelsfile <- getExampleLabelsFilename() getExampleLabelsFilename <- function(){ filename <- system.file("extdata", "CASMAP_example_labels_1.txt", package = "CASMAP", mustWork = TRUE) return(filename) } #' Get the path to the example covariates file for regionGWAS mode #' #' Path to \code{CASMAP_example_covariates_1.txt} in \code{inst/extdata}. #' The covariates categories for the data set #' \code{CASMAP_example_data_1.txt}, the path to which is given by #' \code{getExampleDataFilename}. #' #' @format A single column vector of 100 labels, each of which #' is \code{0} or \code{1} (same format as labels file). #' #' @details Path to the file containing the labels, for reading in to #' CASMAP object using the \code{readFiles} function. #' #' @seealso \code{getExampleDataFilename}, #' \code{getExampleLabelsFilename} #' #' @export #' @examples #' covfile <- getExampleCovariatesFilename() getExampleCovariatesFilename <- function(){ filename <- system.file("extdata", "CASMAP_example_covariates_1.txt", package = "CASMAP", mustWork = TRUE) return(filename) } #' Get the path to the example significant intervals file #' #' Path to \code{CASMAP_example_covariates_1.txt} in #' \code{inst/extdata}. #' #' @keywords internal #' @examples #' sigregfile <- getExampleSignificantRegionsFilename() getExampleSignificantRegionsFilename <- function(){ filename <- system.file("extdata", "CASMAP_example_sig_regions_1.txt", package = "CASMAP", mustWork = TRUE) return(filename) }
/scratch/gouwar.j/cran-all/cranData/CASMAP/R/data.R
#' Gets the regionGWAS string #' #' A getter for the global \code{regionGWAS} value, a string #' for the mode parameter. #' #' @keywords internal getRegionGWASString <- function(){ return( get("regionGWASString", envir=CASMAPenv) ) } #' Gets the higherOrderEpistasis string #' #' A getter for the global \code{higherOrderEpistasis} value, a string #' for the mode parameter. #' #' @keywords internal getHigherOrderEpistasisString <- function(){ return( get("higherOrderEpistasisString", envir=CASMAPenv) ) } #' Gets the minModeLength #' #' A getter for the global \code{minModeLength} value, a string #' for the mode parameter. #' #' @keywords internal getMinModeLength <- function(){ return( get("minModeLength", envir=CASMAPenv) ) } #' Checks if substring is part of regionGWAS #' #' Using\code{grepl} to compare strings, ignoring case. #' #' @param x The string which will be compared to 'regionGWAS' #' #' @details #' Uses \code{grepl} to search for exact match. Case will be ignored. #' #' @return \code{TRUE} if the string is a substring of 'regionGWAS', #' otherwise returns \code{FALSE}. #' #' @keywords internal isRegionGWASString <- function(x){ isMatch <- grepl(x, getRegionGWASString(), ignore.case=T) return(isMatch) } #' Checks if substring is part of higherOrderEpistasis #' #' Using grep to search through vector of strings #' #' @param x The string which will be compared to 'higherOrderEpistasis' #' #' @details #' Uses \code{grep} to search for exact match. #' #' #' @return \code{TRUE} if the string is a substring of 'higherOrderEpistasis', #' otherwise returns \code{FALSE}. #' #' @keywords internal isHigherOrderEpistasisString <- function(x){ isMatch <- grepl(x, getHigherOrderEpistasisString(), ignore.case=T) return(isMatch) } #' Error message for mode #' #' Return the appropriate error message for incorrect mode input #' #' @keywords internal modeErrorMessage <-function(){ message <- paste0("'mode' needs to be specified as a character string, ") message <- paste0(message, "either '", getRegionGWASString(), "' or ") message <- paste0(message, "'", getHigherOrderEpistasisString(), "'.") return(message) } #' Minimum length of modeb #' #' Gets the minimum mode character length (should be 3) #' #' @keywords internal getMinModeLength <-function(){ return( get("minModeLength", envir=CASMAPenv) ) } #' Checks mode string is long enough #' #' Checks mode string is at least minimum length #' #' @keywords internal modeNeedsMoreChars <-function(mode){ return( nchar(mode) < getMinModeLength() ) } #' Error message for mode, if too short #' #' Return the appropriate error message for incorrect mode input #' #' @keywords internal modeLengthErrorMessage <-function(){ message <- paste0("'mode' needs be specified as a character string, ") message <- paste0(message, "either '", getRegionGWASString(), "' or ") message <- paste0(message, "'", getHigherOrderEpistasisString(), "',") mc <- getMinModeLength() message <- paste0(message, "and should be at least ") message <- paste0(message, getMinModeLength(), "long.") return(message) } #' Get the function name #' #' Uses \code{match.call} and \code{as.character}. #' @keywords internal getParentFunctionName <- function(){ name <- sys.call(-1) return(as.character(name)) }
/scratch/gouwar.j/cran-all/cranData/CASMAP/R/utils.R
library(methods) #new wrapper to go at end #' Internal class #' #' in internal class #' #' @keywords internal m_SignificantFeaturesSearch <- setRefClass("m_SignificantFeaturesSearch", fields = c('.inst', '.alpha', '.lmax', '.result_available', '.file_loaded'), methods = list( initialize = function(set_defaults=TRUE) { .self$.inst <-.self$.create_instance() .self$.alpha <- NULL .self$.lmax <- NULL .self$.result_available <- FALSE .self$.file_loaded <- FALSE if (set_defaults) { .self$set_alpha(0.05) .self$set_lmax(0) } }, .create_instance = function() { NULL }, .delete_instance = function(inst) { NULL }, finalize = function() { .self$.delete_instance(.self$.inst) }, .check_if_read_is_allowed = function() { }, .mark_read_done = function() { .self$.result_available <- FALSE .self$.file_loaded <- TRUE }, read_eth_files = function(data_path, labels_path, cov_path=NULL, encoding="dominant") { .self$.check_if_read_is_allowed() .self$.do_read_eth_files(data_path, labels_path, cov_path, encoding) .self$.mark_read_done() }, # TODO: split for _int and _iset due to multi-inheritance .do_read_eth_files = function(data_path, labels_path, cov_path=NULL, encoding="dominant") { lib_read_eth_files(.self$.inst, data_path, labels_path, encoding) }, read_plink_files = function(base_path, cov_path=NULL, encoding="dominant") { .self$.check_if_read_is_allowed() .self$.do_read_plink_files(base_path, cov_path, encoding) .self$.mark_read_done() }, # TODO: split for _int and _iset due to multi-inheritance .do_read_plink_files = function(base_path, cov_path=NULL, encoding="dominant") { lib_read_plink_files(.self$.inst, base_path, encoding) }, .check_if_write_is_allowed = function() { .self$.check_if_files_are_loaded() }, write_eth_files = function(x_file, y_file, ...) { .self$.check_if_write_is_allowed() .self$.write_eth_files(x_file, y_file, ...) }, .check_if_alpha_value_is_allowed = function(x) { if (!(x>=0 && x<=1)) { stop("you need to set alpha to a value between 0 and 1") } }, set_alpha = function(alpha) { .self$.check_if_alpha_value_is_allowed(alpha) .self$.alpha <- alpha .self$.result_available <- FALSE }, get_alpha = function() { return(.self$.alpha) }, .check_if_lmax_value_is_allowed = function(x) { if (!(x%%1==0 && x>=0)) { stop("you need to set lmax to a non-negative integer value") } }, set_lmax = function(lmax) { .self$.check_if_lmax_value_is_allowed(lmax) .self$.lmax <- lmax .self$.result_available <- FALSE }, get_lmax = function() { return(.self$.lmax) }, execute = function() { .self$.check_if_execute_is_allowed() .self$.result_available <- FALSE .self$.execute() .self$.result_available <- TRUE }, .check_if_result_available = function() { if (!.self$.result_available) { stop("you need to call the execute method first") } }, get_result = function() { .self$.check_if_result_available() result <- .self$.get_result() result["target.fwer"] <-.self$.alpha return(result) }, write_summary = function(file_path) { .self$.check_if_result_available() .self$.write_summary(file_path) }, .check_if_files_are_loaded = function() { if (!.self$.file_loaded) { stop("you need to call the read_eth_files / read_plink_files method first") } }, .check_if_alpha_is_set = function() { if (is.null(.self$.alpha)) { stop("you need to call the set_alpha method first") } }, .check_if_lmax_is_set = function() { if (is.null(.self$.lmax)) { stop("you need to call the set_lmax method first") } }, .check_if_execute_is_allowed = function() { .self$.check_if_alpha_is_set() .self$.check_if_lmax_is_set() .check_if_files_are_loaded() }, #NOTE NEW: print method show = function(){ mytype <- "unknown" myclasstype = toString(class(.self)[[1]]) if (myclasstype=="SignificantIntervalSearchExact") mytype = "FAIS" if (myclasstype=="SignificantIntervalSearchFastCmh") mytype = "FastCMH" if (myclasstype=="SignificantItemsetSearchFacs") mytype = "FACS" #extract alpha and lmxax myalpha <- toString(.self$.alpha) mylmax <- toString(.self$.lmax) #output message to be returned message1 <- paste0(mytype, " object with:") message2 <- paste0(" * alpha = ", myalpha) message3 <- paste0(" * lmax = ", mylmax) cat(message1, "\n") cat(message2, "\n") cat(message3, "\n") }, write_profile = function(file_path) { .self$.check_if_result_available() lib_profiler_write_to_file(.self$.inst, file_path) } ) ) #' Internal class #' #' An internal class #' #' @keywords internal m_SignificantIntervalSearch <- setRefClass("m_SignificantIntervalSearch", contains = c("m_SignificantFeaturesSearch"), methods = list( # .do_read_eth_files = function(data_path, labels_path, ...) # { # lib_read_eth_files_int(.self$.inst, data_path, labels_path) # }, # .do_read_plink_files = function(base_path, ...) # { # lib_read_plink_files_int(.self$.inst, base_path) # }, .execute = function() { lib_execute_int(.self$.inst, .self$.alpha, .self$.lmax) }, .write_eth_files = function(x_file, y_file, ...) { lib_write_eth_files_int(.self$.inst, x_file, y_file) }, write_filtered_intervals = function(file_path) { .self$.check_if_result_available() lib_filter_intervals_write_to_file(.self$.inst, file_path) }, write_pvals_testable_intervals = function(file_path) { .self$.check_if_result_available() lib_pvals_testable_ints_write_to_file(.self$.inst, file_path) }, write_pvals_significant_intervals = function(file_path) { .self$.check_if_result_available() lib_pvals_significant_ints_write_to_file(.self$.inst, file_path) }, get_significant_intervals = function() { .self$.check_if_result_available() return(lib_get_significant_intervals(.self$.inst)) }, get_filtered_intervals = function() { .self$.check_if_result_available() return(lib_get_filtered_intervals(.self$.inst)) }, .get_result = function() { return(lib_get_result_fais(.self$.inst)) } ) ) #' Internal class #' #' An internal class. #' #' @keywords internal m_SignificantIntervalSearchFais <- setRefClass("m_SignificantIntervalSearchFais", contains = c("m_SignificantIntervalSearch"), methods = list( .write_summary = function(file_path) { lib_summary_write_to_file_fais(.self$.inst, file_path) } ) ) #' Internal class for search for significant regions #' #' Please use the \code{CASMAP} constructor. #' #' @keywords internal SignificantIntervalSearchExact <- setRefClass("SignificantIntervalSearchExact", contains = "m_SignificantIntervalSearchFais", methods = list( .create_instance = lib_new_search_e, .delete_instance = lib_delete_search_e ) ) #' Approximate fast significant interval search #' #' Class for approximate significant intervals search with Tarone correction for #' bounding intermediate FWERs. #' #' @keywords internal SignificantIntervalSearchChi <- setRefClass("SignificantIntervalSearchChi", contains = c("m_SignificantIntervalSearchFais"), methods = list( .create_instance = lib_new_search_chi, .delete_instance = lib_delete_search_chi ) ) #' Internal class #' #' @keywords internal m_SignificantFeaturesSearchWithCovariates <- setRefClass("m_SignificantFeaturesSearchWithCovariates", contains = c("m_SignificantFeaturesSearch"), fields = c(".cov_loaded"), methods = list( initialize = function(...) { callSuper(...) .self$.cov_loaded <- FALSE }, .do_read_eth_files = function(data_path, labels_path, cov_path=NULL, encoding="dominant") { if (is.null(cov_path)) { callSuper(data_path, labels_path, encoding) .self$.cov_loaded <- FALSE } else { .self$.read_eth_files_with_cov(data_path, labels_path, cov_path, encoding) .self$.cov_loaded <- TRUE } }, .do_read_plink_files = function(base_path, cov_path=NULL, encoding="dominant") { if (is.null(cov_path)) { callSuper(base_path, encoding) .self$.cov_loaded <- FALSE } else { .self$.read_plink_files_with_cov(base_path, cov_path, encoding) .self$.cov_loaded <- TRUE } }, update_covariates_file = function(cov_path) { .self$.check_if_files_are_loaded() .self$.update_covariates_file(cov_path) .self$.result_available <- FALSE .self$.cov_loaded <- TRUE }, .write_eth_files = function(x_file, y_file, cov_path=NULL, ...) { if (is.null(cov_path)) { lib_write_eth_files(.self$.inst, x_file, y_file) } else { .self$.check_if_covariates_are_loaded() .self$.write_eth_files_with_cov(x_file, y_file, cov_path) } }, .check_if_covariates_are_loaded = function() { if (!.self$.cov_loaded) { #warning("assuming one covariate for all observations; to change covariates call the update_covariates_file method first") } }, .check_if_execute_is_allowed = function() { callSuper() .self$.check_if_covariates_are_loaded() }, .get_result = function() { return(lib_get_result_int(.self$.inst)) } ) ) #' Fast significant interval search with categorical covariates #' #' Internal class, please use \code{CASMAP} constructor. #' #' @keywords internal SignificantIntervalSearchFastCmh <- setRefClass("SignificantIntervalSearchFastCmh", # Beware: order matters for calling overloaded covariates methods contains = c("m_SignificantFeaturesSearchWithCovariates", "m_SignificantIntervalSearch"), methods = list( .create_instance = lib_new_search_fastcmh, .delete_instance = lib_delete_search_fastcmh, .read_eth_files_with_cov = function(x_file, y_file, cov_path, encoding) { lib_read_eth_files_with_cov_fastcmh(.self$.inst, x_file, y_file, cov_path, encoding) }, .read_plink_files_with_cov = function(base_path, cov_path, encoding) { lib_read_plink_files_with_cov_fastcmh(.self$.inst, base_path, cov_path, encoding) }, .write_eth_files_with_cov = function(x_file, y_file, cov_path, ...) { lib_write_eth_files_with_cov_fastcmh(.self$.inst, x_file, y_file, cov_path) }, .update_covariates_file = function(covariates_path) { lib_read_covariates_file_fastcmh(.self$.inst, covariates_path) }, .write_summary = function(file_path) { lib_summary_write_to_file_fastcmh(.self$.inst, file_path) } ) ) #' Internal class #' #' @keywords internal m_SignificantItemsetSearch <- setRefClass("m_SignificantItemsetSearch", contains = c("m_SignificantFeaturesSearch"), methods = list( # .do_read_eth_files = function(data_path, labels_path) # { # lib_read_eth_files_iset(.self$.inst, data_path, labels_path) # }, # .do_read_plink_files = function(base_path) # { # lib_read_plink_files_iset(.self$.inst, base_path) # }, .execute = function() { lib_execute_iset(.self$.inst, .self$.alpha, .self$.lmax) }, .write_eth_files = function(x_file, y_file, ...) { lib_write_eth_files_iset(.self$.inst, x_file, y_file) }, write_pvals_testable_itemsets = function(file_path) { .self$.check_if_result_available() lib_pvals_testable_isets_write_to_file(.self$.inst, file_path) }, write_pvals_significant_itemsets = function(file_path) { .self$.check_if_result_available() lib_pvals_significant_isets_write_to_file(.self$.inst, file_path) }, get_significant_itemsets = function() { .self$.check_if_result_available() return(lib_get_significant_itemsets(.self$.inst)) }, .get_result = function() { return(lib_get_result_iset(.self$.inst)) } ) ) #' Significant itemsets search with categorical covariates #' #' Internal class, please use \code{CASMAP} constructor. #' #' @keywords internal SignificantItemsetSearchFacs <- setRefClass("SignificantItemsetSearchFacs", # Beware: order matters for calling overloaded covariates methods contains = c("m_SignificantFeaturesSearchWithCovariates", "m_SignificantItemsetSearch"), methods = list( .create_instance = lib_new_search_facs, .delete_instance = lib_delete_search_facs, .read_eth_files_with_cov = function(x_file, y_file, cov_path, encoding) { lib_read_eth_files_with_cov_facs(.self$.inst, x_file, y_file, cov_path, encoding) }, .read_plink_files_with_cov = function(base_path, cov_path, encoding) { lib_read_plink_files_with_cov_facs(.self$.inst, base_path, cov_path, encoding) }, .write_eth_files_with_cov = function(x_file, y_file, cov_path) { lib_write_eth_files_with_cov_facs(.self$.inst, x_file, y_file, cov_path) }, .update_covariates_file = function(cov_path) { lib_read_covariates_file_facs(.self$.inst, cov_path) }, .get_result = function() { return(lib_get_result_facs(.self$.inst)) }, .write_summary = function(file_path) { lib_summary_write_to_file_facs(.self$.inst, file_path) } ) ) #' A method to check value is numeric and in open interval #' #' Checks if a value is numeric and strictly between two other values. #' #' @param x Value to be checked. Needs to be numeric. #' #' @param lower Lower bound. Default value is \code{0}. #' #' @param upper Upper bound. Default value is \code{1}. #' #' @return If numeric, and strictly greater than \code{lower} and #' strictly smaller than \code{upper}, then return \code{TRUE}. #' Else return \code{FALSE}. #' @keywords internal isInOpenInterval <- function(x, lower=0, upper=1){ inInterval <- TRUE if (is.finite(x)){ if ( (x <= lower) || (x >= upper) ) inInterval <- FALSE } else { inInterval <- FALSE } return (inInterval) } #' Check if a variable is boolean or not #' #' Checks if a variable is boolean, if not throws an error, otherwise #' returns boolean. #' #' @param var The variable to be checked (if boolean). #' #' @param name The name of the variable to appear in any error message. #' #' @return If not boolean (or \code{NA}), throws error. #' If \code{NA}, return \code{FALSE}. Otherwise return #' boolean value of \code{var}. #' @keywords internal checkIsBoolean <- function(var, name){ if (is.logical(var)) { # if NA, return FALSE if (is.na(var)) return(FALSE) # otherwise, just return its value (it must be TRUE/FALSE # from above check return(var) } else { message <- paste0("Error: ", name, " is not a boolean.") stop(message) } } #' Constructor for CASMAP class object. #' #' @field mode Either \code{'regionGWAS'} or \code{'higherOrderEpistasis'}. #' #' @field alpha A numeric value setting the Family-wise Error Rate (FWER). #' Must be strictly between \code{0} and \code{1}. Default #' value is \code{0.05}. #' #' @field max_comb_size A numeric specifying the maximum length of #' combinations. For example, if set to \code{4}, #' then only combinations of size between \code{1} #' and \code{4} (inclusive) will be considered. #' To consider combinations of arbitrary (maximal) #' length, use value \code{0}, which is the default #' value. #' #' @details #' Constructor for CASMAP class object, which needs the \code{mode} #' parameter to be set by the user. Please see the examples. #' #' #' @section Base method, for both modes: #' \describe{ #' \item{\code{readFiles}}{Read the data, label and possibly covariates #' files. Parameters are \code{genotype_file}, #' for the data, \code{phenotype_file} for the #' labels and (optional) \code{covariates_file} #' for the covariates. The option #' \code{plink_file_root} is not supported #' in the current version, but will be supported #' in future versions.} #' #' \item{\code{setMode}}{Can set/change the mode, but note that any #' data files will need to read in again using #' the \code{readFiles} command.} #' #' \item{\code{setTargetFWER}}{Can set/change the Family-wise #' Error Rate (FWER). Takes a numeric #' parameter \code{alpha}, strictly between #' \code{0} and \code{1}.} #' #' \item{\code{execute}}{Once the data files have been read, can execute the #' algorithm. Please note that, depending on the size #' of the data files, this could take a long time.} #' #' \item{\code{getSummary}}{Returns a data frame with a summary of the #' results from the execution, but not any #' significant regions/itemsets. See #' \code{getSignificantRegions}, #' \code{getSignificantInteractions}, and #' \code{getSignificantClusterRepresentatives}. } #' #' \item{\code{writeSummary}}{Directly write the information #' from \code{getSummary} to file.} #' #' } #' #' @section \code{regionGWAS} Methods: #' \describe{ #' \item{\code{getSignificantRegions}}{Returns a data frame with the #' the significant regions. Only valid when #' \code{mode='regionGWAS'}.} #' #' \item{\code{getSignificantClusterRepresentatives}}{Returns a data #' frame with the #' the representatives of the significant #' clusters. This will be a subset of the regions #' returned from \code{getSignificantRegions}. #' Only valid when \code{mode='regionGWAS'}.} #' #' \item{\code{writeSignificantRegions}}{Writes the data from #' \code{getSignificantRegions} to file, which #' must be specified in the parameter #' \code{path}. #' Only valid when \code{mode='regionGWAS'}.} #' #' \item{\code{writeSignificantClusterRepresentatives}}{Writes the data #' from #' \code{getSignificantClusterRepresentatives} to #' file, which must be specified in the parameter #' \code{path}. #' Only valid when \code{mode='regionGWAS'}.} #' #' } #' #' @section \code{higherOrderEpistasis} Methods: #' \describe{ #' \item{\code{getSignificantInteractions}}{Returns the frame #' from \code{getSignificantInteractions} to #' file, which must be specified in the parameter #' \code{path}. Only valid #' when \code{mode='higherOrderEpistasis'}.} #' #' \item{\code{writeSignificantInteractions}}{Writes a data frame with #' the significant interactions. Only valid #' when \code{mode='higherOrderEpistasis'}.} #' #' } #' #' @section References: #' A. Terada, M. Okada-Hatakeyama, K. Tsuda and J. Sese #' \emph{Statistical significance of combinatorial regulations}, #' Proceedings of the National Academy of Sciences (2013) 110 #' (32): 12996-13001 #' #' F. Llinares-Lopez, D. G. Grimm, D. Bodenham, #' U. Gieraths, M. Sugiyama, B. Rowan and K. Borgwardt, #' \emph{Genome-wide detection of intervals of genetic heterogeneity #' associated with complex traits}, #' ISMB 2015, Bioinformatics (2015) 31 (12): i240-i249 #' #' L. Papaxanthos, F. Llinares-Lopez, D. Bodenham, #' K .Borgwardt, #' \emph{Finding significant combinations of features in the #' presence of categorical covariates}, Advances #' in Neural Information Processing Systems 29 (NIPS 2016), 2271-2279. #' #' F. Llinares-Lopez, L. Papaxanthos, D. Bodenham, #' D. Roqueiro and K .Borgwardt, #' \emph{Genome-wide genetic heterogeneity discovery #' with categorical covariates}. #' Bioinformatics 2017, 33 (12): 1820-1828. #' #' @export #' @examples #' #' ## An example using the "regionGWAS" mode #' fastcmh <- CASMAP(mode="regionGWAS") # initialise object #' #' datafile <- getExampleDataFilename() # file name of example data #' labelsfile <- getExampleLabelsFilename() # file name of example labels #' covfile <- getExampleCovariatesFilename() # file name of example covariates #' #' # read the data, labels and covariate files #' fastcmh$readFiles(genotype_file=getExampleDataFilename(), #' phenotype_file=getExampleLabelsFilename(), #' covariate_file=getExampleCovariatesFilename() ) #' #' # execute the algorithm (this may take some time) #' fastcmh$execute() #' #' #get the summary results #' summary_results <- fastcmh$getSummary() #' #' #get the significant regions #' sig_regions <- fastcmh$getSignificantRegions() #' #' #get the clustered representatives for the significant regions #' sig_cluster_rep <- fastcmh$getSignificantClusterRepresentatives() #' #' #' ## Another example of regionGWAS #' fais <- CASMAP(mode="regionGWAS") # initialise object #' #' # read the data and labels, but no covariates #' fastcmh$readFiles(genotype_file=getExampleDataFilename(), #' phenotype_file=getExampleLabelsFilename()) #' #' #' ## Another example, doing higher order epistasis search #' facs <- CASMAP(mode="higherOrderEpistasis") # initialise object #' CASMAP <- setRefClass("CASMAP", fields = c('.mode', '.alpha', '.max_comb_size', '.core', '.use_covariates'), methods = list( initialize = function(mode, alpha=0.05, max_comb_size=0) { if (missing(mode)){ message <- modeErrorMessage() stop(message) } #if gets past those checks, then setMode setMode(mode) setTargetFWER(alpha) .self$setMaxCombinationSize(max_comb_size) .self$.core <- NULL .self$.use_covariates <- NULL }, getMode = function() { return(.self$.mode) }, getTargetFWER = function() { return(.self$.alpha) }, getMaxCombinationSize = function() { return(.self$.max_comb_size) }, isInitialized = function() { return(!is.null(.self$.core)) }, .checkInitialized = function() { if (is.null(.self$.core)){ stop("Object not initialized or hyperparameters changed since last execution. Please call method 'readFiles' prior to execute.") } }, setMode = function(mode){ if (!is.character(mode)){ message <- modeErrorMessage() stop(message) } if (modeNeedsMoreChars(mode)){ message <- modeLengthErrorMessage() stop(message) } #it is a character string, so check: if (!isRegionGWASString(mode) && !isHigherOrderEpistasisString(mode)){ message <- modeErrorMessage() stop(message) } #if reaches this stage, it is correctly set: if (isRegionGWASString(mode)){ mode <- getRegionGWASString() } #if reaches this stage, it is correctly set: if (isHigherOrderEpistasisString(mode)){ mode <- getHigherOrderEpistasisString() } .self$.mode <- mode .self$.core <- NULL .self$.use_covariates <- NULL }, setTargetFWER = function(alpha=0.05) { #check alpha if (!isInOpenInterval(alpha)){ stop("Target FWER 'alpha' needs to be a value strictly between 0 and 1.") } .self$.alpha <- alpha .self$.core <- NULL .self$.use_covariates <- NULL }, setMaxCombinationSize = function(max_comb_size=0) { if (is.finite(max_comb_size)){ max_comb_size <- floor(max_comb_size) if (.self$.mode == 'higherOrderEpistasis' & max_comb_size > 0){ print("The current implementation of higher-order epistasis analyses does not support a limited maximum number of interacting variants. The analysis will be carried out for an unlimited order.") max_comb_size <- 0 } if (max_comb_size < 0){ max_comb_size <- 0 } } else { stop("Maximum combination size 'max_comb_size' needs to be either 0 (unlimited) or a positive integer.") } .self$.max_comb_size <- max_comb_size .self$.core <- NULL .self$.use_covariates <- NULL }, .createCore = function() { if (!is.null(.self$.use_covariates)){ # Instantiate object of the appropriate depending on options if (.self$.mode == 'regionGWAS' & !.self$.use_covariates){ .self$.core <- SignificantIntervalSearchChi() } else if (.self$.mode == 'regionGWAS' & .self$.use_covariates){ .self$.core <- SignificantIntervalSearchFastCmh() } else if (.self$.mode == 'higherOrderEpistasis'){ .self$.core <- SignificantItemsetSearchFacs() } # Set parameters of the object .self$.core$set_alpha(.self$.alpha) .self$.core$set_lmax(.self$.max_comb_size) } else { .self$.core <- NULL .self$.use_covariates <- NULL } }, readFiles = function(genotype_file=NULL, phenotype_file=NULL, covariate_file=NULL, plink_file_root=NULL, encoding="dominant") { # Check whether user decided to use tab-separated text files (binary_format) or PLINK formatted files (plink_format) if (!missing(plink_file_root)){ stop("plink format not currently supported. Please use binary format and specify 'genotype_file' and 'phenotype_file'.") } # At least one of the two must be two, otherwise raise an error binary_format <- !is.null(genotype_file) & !is.null(phenotype_file) if (missing(genotype_file) || missing(phenotype_file)){ stop("'genotype_file' and 'phenotype_file' must both be specified as arguments.") } binary_format <- TRUE plink_format <- FALSE # Check that encoding type is correct if (!is.element(encoding, c('dominant', 'recessive'))){ stop("Currently implemented encodings: 'dominant' and 'recessive' >") } # If an additional covariates file was specified, set the object into "CMH mode" .self$.use_covariates <- !missing(covariate_file) # Create appropriate "core" object .self$.createCore() # Give preference to plink_format over binary_format if, by any reason, a user decides to mess around and # specify both #plink_forat not currently supported #just to make sure it is set to FALSE plink_format <- FALSE if (plink_format){ if(.self$.use_covariates){ .self$.core$read_plink_files(plink_file_root, covariate_file, encoding) } else { .self$.core$read_plink_files(plink_file_root, encoding) } } else if(binary_format){ if(.self$.use_covariates){ .self$.core$read_eth_files(data_path=genotype_file, labels_path=phenotype_file, cov_path=covariate_file, encoding=encoding) } else { .self$.core$read_eth_files(data_path=genotype_file, labels_path=phenotype_file, encoding=encoding) } } else{ stop("'genotype_file' and 'phenotype_file' must both be specified as arguments.") } }, execute = function(){ .self$.checkInitialized() .self$.core$execute() }, writeSummary = function(path){ .self$.checkInitialized() .self$.core$write_summary(path) }, writeProfile = function(path){ .self$.checkInitialized() .self$.core$write_profile(path) }, writeSignificantRegions = function(path){ if (.self$.mode != 'regionGWAS'){ stop("Method 'writeSignificantRegions' only available for region-based GWAS analyses.") } .self$.checkInitialized() .self$.core$write_pvals_significant_intervals(path) }, writeSignificantClusterRepresentatives = function(path){ if (.self$.mode != 'regionGWAS'){ stop("Method 'writeSignificantClusterRepresentatives' only available for region-based GWAS analyses.") } .self$.checkInitialized() .self$.core$write_filtered_intervals(path) }, writeSignificantInteractions = function(path){ if (.self$.mode != 'higherOrderEpistasis'){ stop("Method 'writeSignificantInteractions' only available for higher-order epistasis analyses.") } .self$.checkInitialized() .self$.core$write_pvals_significant_itemsets(path) }, getSummary = function(){ .self$.checkInitialized() return(.self$.core$get_result()) }, getSignificantRegions = function(){ if (.self$.mode != 'regionGWAS'){ stop("Method 'getSignificantRegions' only available for region-based GWAS analyses.") } .self$.checkInitialized() return(.self$.core$get_significant_intervals()) }, getSignificantClusterRepresentatives = function(){ if (.self$.mode != 'regionGWAS'){ stop("Method 'getSignificantClusterRepresentatives' only available for region-based GWAS analyses.") } .self$.checkInitialized() return(.self$.core$get_filtered_intervals()) }, getSignificantInteractions = function(){ if (.self$.mode != 'higherOrderEpistasis'){ stop("Method 'getSignificantInteractions' only available for higher-order epistasis analyses.") } .self$.checkInitialized() return(.self$.core$get_significant_itemsets()) }, show = function(){ cat("CASMAP object with:", "\n") cat(paste(" * Mode =", .self$.mode), "\n") cat(paste(" * Target FWER =", .self$.alpha), "\n") cat(paste(" * Maximum combination size =", .self$.max_comb_size), "\n") if (!is.null(.self$.core)){ cat(" * Input files read", "\n") cat(paste(" * Covariate =", .self$.use_covariates), "\n") } else{ cat(" * No input files read", "\n") } } ) )
/scratch/gouwar.j/cran-all/cranData/CASMAP/R/wrapper.R
#' @useDynLib CASMAP #' @exportPattern "^[[:alpha:]]+" #' @importFrom Rcpp evalCpp #' @importFrom Rcpp sourceCpp #' @importFrom methods new NULL #' Global variables environment #' #' An environment to store a few global variables. Internal. #' #' @keywords internal CASMAPenv <- new.env(parent=emptyenv()) assign("regionGWASString", "regionGWAS", envir=CASMAPenv) assign("higherOrderEpistasisString", "higherOrderEpistasis", envir=CASMAPenv) assign("minModeLength", 3, envir=CASMAPenv)
/scratch/gouwar.j/cran-all/cranData/CASMAP/R/zzz.R
## ----include = FALSE----------------------------------------------------- #need to make vignette compile library(Rcpp) library(CASMAP) ## ----init 1-------------------------------------------------------------- library(CASMAP) # An example using the "regionGWAS" mode fastcmh <- CASMAP(mode="regionGWAS") # initialise object fastcmh$setTargetFWER(0.01) # set target FWER ## ----init 2-------------------------------------------------------------- library(CASMAP) # Another example, doing higher order epistasis search with target FWER 0.01 facs <- CASMAP(mode="higherOrderEpistasis", alpha=0.01) print(facs) ## ----read data----------------------------------------------------------- library(CASMAP) fastcmh <- CASMAP(mode="regionGWAS") # initialise object datafile <- getExampleDataFilename() # file name of example data labelsfile <- getExampleLabelsFilename() # file name of example labels covfile <- getExampleCovariatesFilename() # file name of example covariates # read the data, labels and (optionally) covariate files fastcmh$readFiles(genotype_file=getExampleDataFilename(), phenotype_file=getExampleLabelsFilename(), covariate_file=getExampleCovariatesFilename()) #The object now displays that data files have been read, and covariates are used print(fastcmh) ## ----data format, eval=FALSE--------------------------------------------- # #to see where these data files are located on your local drive: # print(getExampleDataFilename()) # # ## Example: # ## [1] "/path/to/pkgs/CASMAP/extdata/CASMAP_example_data_1.txt" ## ----execute------------------------------------------------------------- # execute the algorithm (this may take some time) fastcmh$execute() ## ----summary results----------------------------------------------------- #get the summary results summary_results <- fastcmh$getSummary() print(summary_results) ## ----significant regions------------------------------------------------- #get the significant regions sig_regions <- fastcmh$getSignificantRegions() print(sig_regions) ## ----significant reps---------------------------------------------------- #get the clustered representatives for the significant regions sig_cluster_rep <- fastcmh$getSignificantClusterRepresentatives() print(sig_cluster_rep) ## ----no covariates------------------------------------------------------- ## Another example of regionGWAS fais <- CASMAP(mode="regionGWAS") # initialise object # read the data and labels, but no covariates fais$readFiles(genotype_file=getExampleDataFilename(), phenotype_file=getExampleLabelsFilename()) print(fais) ## ----encoding method----------------------------------------------------- library(CASMAP) fastcmh <- CASMAP(mode="regionGWAS") # using the dominant encoding (default) fastcmh$readFiles(genotype_file=getExampleDataFilename(), phenotype_file=getExampleLabelsFilename(), covariate_file=getExampleCovariatesFilename(), encoding="dominant") # using the dominant encoding (default) fastcmh$readFiles(genotype_file=getExampleDataFilename(), phenotype_file=getExampleLabelsFilename(), covariate_file=getExampleCovariatesFilename(), encoding="recessive")
/scratch/gouwar.j/cran-all/cranData/CASMAP/inst/doc/intro.R
--- title: "Introduction to CASMAP" author: "Dean Bodenham" date: "27 June 2018" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{CASMAP-introduction} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r include = FALSE} #need to make vignette compile library(Rcpp) library(CASMAP) ``` # Introduction The `CASMAP` package provides methods for searching for combinatorial associations inbinary data while taking categorical covariates into account. There are two main modes: the methods either search for region-based mappings or for higher order epistatic interactions. ## Creating `CASMAP` objects To create a `CASMAP` object, it is necessary to specify the mode. The first example below creates an object that will perform a region-based GWAS search, and then sets the target family-wise error rate to `0.01`. ```{r init 1} library(CASMAP) # An example using the "regionGWAS" mode fastcmh <- CASMAP(mode="regionGWAS") # initialise object fastcmh$setTargetFWER(0.01) # set target FWER ``` The next example shows how to create an object that will search for arbitrary combinations, i.e. a higher order epistatic search. Note that it is also possible to set the target family-wise error rate when constructing the object by setting `alpha`. By printing the object, one can see certain information. The field *Maximum combination size = 0* indicates that combinations of all possible length will be considered. In future versions, it will be possible to limit this number, for example to combinations of maxmimum length 4. <!--Furthermore, while the default option is to search for combinations of arbitary length, it is also possible to limit the length of combinations that are considered. For example, setting the parameter `max_comb_size=4` below limits the search to only combinations of length 1, 2, 3, and 4. --> ```{r init 2} library(CASMAP) # Another example, doing higher order epistasis search with target FWER 0.01 facs <- CASMAP(mode="higherOrderEpistasis", alpha=0.01) print(facs) ``` ## Reading in the data files Once the object is created, the next step is to read in the data files. The `readLines` command is used, and paths to the data files should be specified for the parameters `genotype_file`, `phenotype_file` and (optionally) `covariate_file`. We have provided example data files with the package, as well as functions to easily get the paths to these data files: ```{r read data} library(CASMAP) fastcmh <- CASMAP(mode="regionGWAS") # initialise object datafile <- getExampleDataFilename() # file name of example data labelsfile <- getExampleLabelsFilename() # file name of example labels covfile <- getExampleCovariatesFilename() # file name of example covariates # read the data, labels and (optionally) covariate files fastcmh$readFiles(genotype_file=getExampleDataFilename(), phenotype_file=getExampleLabelsFilename(), covariate_file=getExampleCovariatesFilename()) #The object now displays that data files have been read, and covariates are used print(fastcmh) ``` ## Data format Note that the `CASMAP` methods expect the data file to be a text file consisting of space-separated `0`s and `1`s, in an $p \times n$ matrix, where each of the $p$ rows is a feature, and each of the $n$ columns is a sample/subject. The labels and covariates files are single columns of $n$ entries, where each entry is `0` or `1`. To see an example of the data format, take a look at the included example files, the paths to which are given by the commands `getExampleDataFilename`, `getExampleLabelsFilename` and `getExampleCovariatesFilename`: ```{r data format, eval=FALSE} #to see where these data files are located on your local drive: print(getExampleDataFilename()) ## Example: ## [1] "/path/to/pkgs/CASMAP/extdata/CASMAP_example_data_1.txt" ``` In future versions the PLINK data format will be supported. ## Executing the algorithm Once you have read in the data, label and covariates files, you are ready to execute the algorithm. Simply use the `execute` command. Note that, depending on the size of your data set, this could take some time. ```{r execute} # execute the algorithm (this may take some time) fastcmh$execute() ``` ## Extracting the results There are two main sets of results: 1. Summary results 2. Information on significant regions/significant interactions The summary results provide information on how many regions/interactions were processed, how many are testable, and what are the significance and testable thresholds: ```{r summary results} #get the summary results summary_results <- fastcmh$getSummary() print(summary_results) ``` It is also possible to write this information to file directly using the `writeSummary` command. The significant regions lists all the regions that are considered significant. However, it is possible that these regions overlap into clusters. The most significant regions in these clusters can be extracted using the `getSignificantClusterRepresentatives` command. In the example below, there is only one significant regions, so it is its own cluster representative: ```{r significant regions} #get the significant regions sig_regions <- fastcmh$getSignificantRegions() print(sig_regions) ``` ```{r significant reps} #get the clustered representatives for the significant regions sig_cluster_rep <- fastcmh$getSignificantClusterRepresentatives() print(sig_cluster_rep) ``` Note that the $p$-value and odds ratio for the regions/representatives is provided along with the location. For the `higherOrderEpistasis` mode, the method `getSignificantInteractions` should be used (and there are no cluster representatives). ## Other examples It is also possible to perform a search without any covariates: ```{r no covariates} ## Another example of regionGWAS fais <- CASMAP(mode="regionGWAS") # initialise object # read the data and labels, but no covariates fais$readFiles(genotype_file=getExampleDataFilename(), phenotype_file=getExampleLabelsFilename()) print(fais) ``` ## Setting the encoding method The binary data could be encoded with either a dominant or recessive encoding. The default for `readLines` is `dominant`, but it is also possible to specify the coding explicitly: ```{r encoding method} library(CASMAP) fastcmh <- CASMAP(mode="regionGWAS") # using the dominant encoding (default) fastcmh$readFiles(genotype_file=getExampleDataFilename(), phenotype_file=getExampleLabelsFilename(), covariate_file=getExampleCovariatesFilename(), encoding="dominant") # using the dominant encoding (default) fastcmh$readFiles(genotype_file=getExampleDataFilename(), phenotype_file=getExampleLabelsFilename(), covariate_file=getExampleCovariatesFilename(), encoding="recessive") ``` ## Future releases Note that future versions of the package will include the option to read PLINK files, and the option to set the maximum combination length.
/scratch/gouwar.j/cran-all/cranData/CASMAP/inst/doc/intro.Rmd
--- title: "Introduction to CASMAP" author: "Dean Bodenham" date: "27 June 2018" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{CASMAP-introduction} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r include = FALSE} #need to make vignette compile library(Rcpp) library(CASMAP) ``` # Introduction The `CASMAP` package provides methods for searching for combinatorial associations inbinary data while taking categorical covariates into account. There are two main modes: the methods either search for region-based mappings or for higher order epistatic interactions. ## Creating `CASMAP` objects To create a `CASMAP` object, it is necessary to specify the mode. The first example below creates an object that will perform a region-based GWAS search, and then sets the target family-wise error rate to `0.01`. ```{r init 1} library(CASMAP) # An example using the "regionGWAS" mode fastcmh <- CASMAP(mode="regionGWAS") # initialise object fastcmh$setTargetFWER(0.01) # set target FWER ``` The next example shows how to create an object that will search for arbitrary combinations, i.e. a higher order epistatic search. Note that it is also possible to set the target family-wise error rate when constructing the object by setting `alpha`. By printing the object, one can see certain information. The field *Maximum combination size = 0* indicates that combinations of all possible length will be considered. In future versions, it will be possible to limit this number, for example to combinations of maxmimum length 4. <!--Furthermore, while the default option is to search for combinations of arbitary length, it is also possible to limit the length of combinations that are considered. For example, setting the parameter `max_comb_size=4` below limits the search to only combinations of length 1, 2, 3, and 4. --> ```{r init 2} library(CASMAP) # Another example, doing higher order epistasis search with target FWER 0.01 facs <- CASMAP(mode="higherOrderEpistasis", alpha=0.01) print(facs) ``` ## Reading in the data files Once the object is created, the next step is to read in the data files. The `readLines` command is used, and paths to the data files should be specified for the parameters `genotype_file`, `phenotype_file` and (optionally) `covariate_file`. We have provided example data files with the package, as well as functions to easily get the paths to these data files: ```{r read data} library(CASMAP) fastcmh <- CASMAP(mode="regionGWAS") # initialise object datafile <- getExampleDataFilename() # file name of example data labelsfile <- getExampleLabelsFilename() # file name of example labels covfile <- getExampleCovariatesFilename() # file name of example covariates # read the data, labels and (optionally) covariate files fastcmh$readFiles(genotype_file=getExampleDataFilename(), phenotype_file=getExampleLabelsFilename(), covariate_file=getExampleCovariatesFilename()) #The object now displays that data files have been read, and covariates are used print(fastcmh) ``` ## Data format Note that the `CASMAP` methods expect the data file to be a text file consisting of space-separated `0`s and `1`s, in an $p \times n$ matrix, where each of the $p$ rows is a feature, and each of the $n$ columns is a sample/subject. The labels and covariates files are single columns of $n$ entries, where each entry is `0` or `1`. To see an example of the data format, take a look at the included example files, the paths to which are given by the commands `getExampleDataFilename`, `getExampleLabelsFilename` and `getExampleCovariatesFilename`: ```{r data format, eval=FALSE} #to see where these data files are located on your local drive: print(getExampleDataFilename()) ## Example: ## [1] "/path/to/pkgs/CASMAP/extdata/CASMAP_example_data_1.txt" ``` In future versions the PLINK data format will be supported. ## Executing the algorithm Once you have read in the data, label and covariates files, you are ready to execute the algorithm. Simply use the `execute` command. Note that, depending on the size of your data set, this could take some time. ```{r execute} # execute the algorithm (this may take some time) fastcmh$execute() ``` ## Extracting the results There are two main sets of results: 1. Summary results 2. Information on significant regions/significant interactions The summary results provide information on how many regions/interactions were processed, how many are testable, and what are the significance and testable thresholds: ```{r summary results} #get the summary results summary_results <- fastcmh$getSummary() print(summary_results) ``` It is also possible to write this information to file directly using the `writeSummary` command. The significant regions lists all the regions that are considered significant. However, it is possible that these regions overlap into clusters. The most significant regions in these clusters can be extracted using the `getSignificantClusterRepresentatives` command. In the example below, there is only one significant regions, so it is its own cluster representative: ```{r significant regions} #get the significant regions sig_regions <- fastcmh$getSignificantRegions() print(sig_regions) ``` ```{r significant reps} #get the clustered representatives for the significant regions sig_cluster_rep <- fastcmh$getSignificantClusterRepresentatives() print(sig_cluster_rep) ``` Note that the $p$-value and odds ratio for the regions/representatives is provided along with the location. For the `higherOrderEpistasis` mode, the method `getSignificantInteractions` should be used (and there are no cluster representatives). ## Other examples It is also possible to perform a search without any covariates: ```{r no covariates} ## Another example of regionGWAS fais <- CASMAP(mode="regionGWAS") # initialise object # read the data and labels, but no covariates fais$readFiles(genotype_file=getExampleDataFilename(), phenotype_file=getExampleLabelsFilename()) print(fais) ``` ## Setting the encoding method The binary data could be encoded with either a dominant or recessive encoding. The default for `readLines` is `dominant`, but it is also possible to specify the coding explicitly: ```{r encoding method} library(CASMAP) fastcmh <- CASMAP(mode="regionGWAS") # using the dominant encoding (default) fastcmh$readFiles(genotype_file=getExampleDataFilename(), phenotype_file=getExampleLabelsFilename(), covariate_file=getExampleCovariatesFilename(), encoding="dominant") # using the dominant encoding (default) fastcmh$readFiles(genotype_file=getExampleDataFilename(), phenotype_file=getExampleLabelsFilename(), covariate_file=getExampleCovariatesFilename(), encoding="recessive") ``` ## Future releases Note that future versions of the package will include the option to read PLINK files, and the option to set the maximum combination length.
/scratch/gouwar.j/cran-all/cranData/CASMAP/vignettes/intro.Rmd
#MI.test tests the independence between a feature and the outcome. It prints out p.value, the smaller the p.value, the stronger evidence of dependence between them. MI.test=function(feature, outcome, k1, k2){ #feature is the vector of X, outcome is the vector of Y, k1 and k2 are the corresponding number of categories, X and Y must have the same sample size. n=length(feature); test.stat=2*n*MI.z(table(feature, outcome))+(k1-1)*(k2-1); p.value=pchisq(test.stat,(k1-1)*(k2-1),lower.tail = F); return(p.value); } #' AQI Index #' #' A quantitative measure of dataset quality. The AQI Index score indicates the degree that how features are associated with the outcome in a dataset. (synonyms of "feature": "variable" "factor" "attribute") \cr #' For more information, please refer to the corresponding publication: Shi, J., Zhang, J. and Ge, Y. (2019), "An Association-Based Intrinsic Quality Index for Healthcare Dataset Ranking" <doi:10.1109/ICHI.2019.8904553> #' @param data data frame (features as columns and observations as rows). The outcome variable (Y) MUST be the last column. It requires at least one features and only one outcome. Both the features (Xs) and the outcome (Y) MUST be discrete (if not naturally discrete, you may try the `autoBin.binary` function in the same package). #' @param alpha.filter level of significance for the mutual information test of independence in step 2 (<doi:10.1109/ICHI.2019.8904553>). By default, `alpha.filter = 0.2`. #' @return The AQI Index score. #' @importFrom EntropyEstimation Entropy.z MI.z #' @examples #' ## Generate a toy dataset: "data" #' n <- 10000 #' set.seed(1) #' x1 <- rbinom(n, 3, 0.5) + 0.2 #' set.seed(2) #' x2 <- rbinom(n, 2, 0.8) + 0.5 #' set.seed(3) #' x3 <- rbinom(n, 5, 0.3) #' set.seed(4) #' error <- round(runif(n, min=-1, max=1)) #' y <- x1 + x3 + error #' data <- data.frame(cbind(x1, x2, x3, y)) #' colnames(data) <- c("feature1", "feature2", "feature3", "Y") #' #' ## Calculate the AQI score of "data" #' AQI(data) #' @export AQI <- function(data, alpha.filter=0.2){ #outcome must be in the last column data <- as.data.frame(data) score=NULL # Step 1: return dependent features, step1Index[] n=nrow(data) u=1/log(log(n)) step1Index=vector() count=0 k2=length(table(data[length(data)])) #outcome categories for(fi in 1:(length(data)-1)){ k1=length(table(data[fi])) #feature categories p_value=MI.test(data[[fi]],data[[length(data)]], k1, k2) if(p_value<=alpha.filter){ count=count+1 step1Index[count]=fi } } indexCASMI=vector() if(count==0){ warning("No associated variables are found toward the outcome, thus the AQI score is zero. First, please ensure that the outcome variable is the last column. Second, please ensure that a reasonable alpha.filter is set, or use the default value.") score=0 } else{ # Step 2: select features by joint SMI with Hz estimator # select the best feature maxKappaStar=0 maxIndex=0 for(index in step1Index){ #valueKappaStar=kappa.star(data[[index]], data[[length(data)]]) feature=data[[index]]; outcome=data[[length(data)]]; #valueKappaStar=MI.z(table(feature, outcome))/Entropy.z(table(outcome))*(1-length(which(table(feature)==1))/n) valueKappaStar=MI.z(table(feature, outcome))/Entropy.z(table(outcome))*(1-sum(table(feature)==1L)/n)^u if(valueKappaStar > maxKappaStar){ maxKappaStar=valueKappaStar maxIndex=index } } indexCASMI=c(indexCASMI, maxIndex) if(length(step1Index)==1) {return(100/(1-(log(maxKappaStar))/exp(1)))} # select the 2nd, 3rd, ... best features by joint maxmaxKappaStar=0 while(maxKappaStar>maxmaxKappaStar & length(indexCASMI)<count){ maxmaxKappaStar=maxKappaStar step1Index=step1Index[!step1Index==maxIndex] maxKappaStar=0 maxIndex=0 for(index in step1Index){ tmpIndex1=c(index, indexCASMI, length(data)) tmpIndex2=c(index, indexCASMI) ftable=ftable(table(data[tmpIndex1])) ftableOfFeatures=ftable(table(data[tmpIndex2])) #valueKappaStar=kappa.star2(ftable, ftableOfFeature, data[[length(data)]]) outcome=data[[length(data)]] #valueKappaStar=MI.z(ftable)/Entropy.z(table(outcome))*(1-length(which(ftableOfFeatures==1))/n) valueKappaStar=MI.z(ftable)/Entropy.z(table(outcome))*(1-sum(ftableOfFeatures==1L)/n)^u if(valueKappaStar > maxKappaStar){ maxKappaStar=valueKappaStar maxIndex=index } } if(maxKappaStar>maxmaxKappaStar+10^-14){ # +10^-14 is to solve the problem of precision indexCASMI=c(indexCASMI, maxIndex) } } score=maxmaxKappaStar } return(100/(1-(log(score))/exp(1))) }
/scratch/gouwar.j/cran-all/cranData/CASMI/R/AQI.R
# return suggested threshold for a quantitative variable autoBinThresh <- function(data, colIndex){ # column index of X that needs auto binning finalThresh <- NA if (colIndex >= ncol(data) || colIndex <= 0) { return(finalThresh) } # deal with NAs, equal values, and other problems if ( (sum(!is.na(data[,colIndex])) <= 1) || (all(na.omit(data[,colIndex]) == na.omit(data[,colIndex])[1])) || !is.numeric(data[,colIndex]) ) { return(finalThresh) } sortedXY <- na.omit( data[order(data[,colIndex]), c(colIndex,ncol(data))] ) # subset, sort by X maxMI <- 0 for(i in 1:(nrow(sortedXY) - 1)){ threshold <- (sortedXY[i,1] + sortedXY[i+1,1]) / 2 tmpMI <- MI.z(table(cut(sortedXY[,1], breaks = c(-Inf, threshold, Inf), labels = c("L", "H")), sortedXY[,ncol(sortedXY)])) if(tmpMI > maxMI){ finalThresh <- threshold maxMI <- tmpMI } } return(finalThresh) } #' Auto Binning for Quantitative Variables - Binary #' #' Automatically suggest an optimal cutting point for categorizing a quantitative variable before using the \pkg{CASMI}-based functions. This function does binary cutting, that is, to convert the quantitative variable into a categorical variable with two levels/categories. #' @param data data frame (features as columns and observations as rows). An outcome variable is required. The outcome variable (Y) MUST be the last column. #' @param index index or a vector of indices of the quantitative variables that need to be automatically categorized. #' @return `autoBin.binary()` returns the entire data frame after automatic binary categorization for the selected quantitative variable(s). #' @examples #' ## Use the "iris" dataset embedded in R #' data("iris") #' newData <- autoBin.binary(iris, c(1,2,3,4)) #' newData #' #' @importFrom EntropyEstimation MI.z #' @importFrom stats na.omit #' #' @export # return the finalized data frame after auto binning autoBin.binary <- function(data, index){ if (!is.data.frame(data)) { stop("The 'data' input must be a dataframe.") } data <- as.data.frame(data) # Check if the inputs are of correct type if (!is.numeric(index)) { stop("The 'index' input must be an index or a vector of indices.") } for(i in 1:length(index)){ colIndex <- index[i] finalThresh <- autoBinThresh(data, colIndex) if(!is.na(finalThresh)){ data[,colIndex] <- cut(data[,colIndex], breaks = c(-Inf, finalThresh, Inf)) # cut the original variable by the threshold } else { sentence1 <- "The automatic categorization is not done for this variable -- Column Index: " sentence1 <- paste0(sentence1, colIndex) sentence2 <- " -- due to the following possible problems: (1) the column is not of a numeric type; (2) the column index is not valid; (3) there is none or only one (distinct) value in the following variable, so no categorization is needed." sentence1 <- paste0(sentence1, sentence2) warning(sentence1) } } return(as.data.frame(data)) }
/scratch/gouwar.j/cran-all/cranData/CASMI/R/AutoBin_Binary.R
# MIz.test tests the independence between a feature and the outcome (with z estimator). It prints out p.value, the smaller the p.value, the stronger evidence of dependence between them. MIz.test <- function(feature, outcome) { #feature is the vector of X, outcome is the vector of Y, k1 and k2 are the corresponding number of categories, X and Y must have the same sample size. # considering NA in the feature XYtable <- table(feature, outcome) n <- sum(XYtable) k1 = length(table(feature)) # feature categories k2 = length(table(outcome)) # outcome categories test.stat = 2 * n * MI.z(XYtable) + (k1 - 1) * (k2 - 1) p.value = pchisq(test.stat, (k1 - 1) * (k2 - 1), lower.tail = F) return(p.value) } g_function <- function(x1, x2, x3) { # return a 3*1 matrix vec = c(1 / x2,-(x1 - x3) / x2 ^ 2,-1 / x2) return(as.matrix(vec, nrow = 3, ncol = 1)) } # confidence interval (CI) of SMI, whatever the estimator is. return E in CI. E.CI.smi <- function(XYtable, alpha) { # input the frequency table between the feature and the outcome K1 = nrow(XYtable) K2 = ncol(XYtable) K = K1 * K2 n = sum(XYtable) # make sure XYtable[K1,K2] is not zero. If zero, switch. if (XYtable[K1, K2] == 0) { nonZeroIndex = which(XYtable != 0, arr.ind = T) i = nonZeroIndex[1, 1] j = nonZeroIndex[1, 2] tmp = XYtable[i,] XYtable[i,] = XYtable[K1,] XYtable[K1,] = tmp tmp = row.names(XYtable)[i] row.names(XYtable)[i] = row.names(XYtable)[K1] row.names(XYtable)[K1] = tmp tmp = XYtable[, j] XYtable[, j] = XYtable[, K2] XYtable[, K2] = tmp tmp = colnames(XYtable)[j] colnames(XYtable)[j] = colnames(XYtable)[K2] colnames(XYtable)[K2] = tmp } Xmargin = rowSums(XYtable) Ymargin = colSums(XYtable) # first row of matrix A pK1. = Xmargin[K1] / sum(Xmargin) derX = vector() for (i in 1:(K1 - 1)) { pi. = Xmargin[i] / sum(Xmargin) derX[i] = log(pK1.) - log(pi.) } A1 = vector() for (i in 1:(K1 - 1)) { for (j in 1:K2) { A1 = c(A1, derX[i]) } } for (i in ((K1 - 1) * K2 + 1):(K - 1)) { A1[i] = 0 } # second row of matrix A p.K2 = Ymargin[K2] / sum(Ymargin) derY = vector() for (i in 1:(K2 - 1)) { p.j = Ymargin[i] / sum(Ymargin) derY[i] = log(p.K2) - log(p.j) } A2 = vector() for (i in 1:K1) { for (j in 1:K2) { if (j == K2) { A2 = c(A2, 0) } else{ A2 = c(A2, derY[j]) } } } A2 = A2[-length(A2)] # third row of matrix A pK = XYtable[K1, K2] / n A3 = vector() for (i in 1:K1) { for (j in 1:K2) { pk = XYtable[i, j] / n if (pk == 0) { A3 = c(A3, 0) } else{ A3 = c(A3, log(pK) - log(pk)) } } } A3 = A3[-length(A3)] A = as.matrix(rbind(A1, A2, A3)) # the Sigma matrix p = c(t(XYtable)) / n Sigma = matrix( data = NA, nrow = K - 1, ncol = K - 1, byrow = TRUE ) for (i in 1:(K - 1)) { for (j in 1:(K - 1)) { if (i == j) { Sigma[i, i] = p[i] * (1 - p[i]) } else{ Sigma[i, j] = -p[i] * p[j] } } } # sigma hat HXhat = entropy.plugin(Xmargin) HYhat = entropy.plugin(Ymargin) HXYhat = entropy.plugin(XYtable) g = g_function(HXhat, HYhat, HXYhat) sigmaHat = sqrt(t(g) %*% A %*% Sigma %*% t(A) %*% g) # E in CI Zstat = qnorm(alpha / 2, lower.tail = FALSE) # 95% confidence level E = Zstat * (sigmaHat / sqrt(n)) return(E) } ftable2xytable <- function(ftable) { # convert an ftable to a regular XYtable where X are joint and Y is the outcome return(ftable[rowSums(ftable)!=0,]) } Kappas <- function(data, idxFeatures){ # input data, index(es) of feature(s); outcome must be in the last column # ftable automatically handles NA values tmpIndexFnO = c(idxFeatures, ncol(data)) # Index of features and outcome ftable = ftable(table(data[tmpIndexFnO])) Xmargin <- rowSums(ftable) Ymargin <- colSums(ftable) n <- sum(ftable) kappa.z = MI.z(ftable) / Entropy.z(Ymargin) # same as smi.z kappaStar = kappa.z * (1 - sum(Xmargin == 1L) / n) return(list(kappa.z = kappa.z, kappa.star = kappaStar)) } generateResults <- function(data, indexCASMI, kappaStarCASMI, alpha) { # return a set of results based on indexCASMI nameCASMI = names(data[indexCASMI]) # 1. SMIziCASMI: standardized mutual information (with z estimator, based on H(Y)) of each selected feature. Not cumulative. # 2. PziCASMI: p-value of mutual information test (with z estimator) for each selected feature. (to test independence, prefer large n) SMIziCASMI = vector() SMIziCASMI.CI = vector() PziCASMI = vector() for (i in 1:length(indexCASMI)) { XYtable = table(data[,indexCASMI[i]], data[,ncol(data)]) # XYtable handled NA automatically Ymargin = colSums(XYtable) mi_z = MI.z(XYtable) # EntropyEstimation package smi_z = mi_z / Entropy.z(Ymargin) SMIziCASMI = c(SMIziCASMI, smi_z) low = smi_z - E.CI.smi(XYtable, alpha) uppr = smi_z + E.CI.smi(XYtable, alpha) SMIziCASMI.CI <- rbind(SMIziCASMI.CI, c(low, uppr)) n = sum(XYtable) K1 = nrow(XYtable) K2 = ncol(XYtable) PziCASMI = c(PziCASMI, pchisq( 2 * n * mi_z + (K1 - 1) * (K2 - 1), df = (K1 - 1) * (K2 - 1), lower.tail = FALSE )) # same as MIz.test() } ####### sample_size <- apply(data[indexCASMI], 2, function(col) sum(!is.na(col))) df <- data.frame( indexCASMI, sample_size, round(kappaStarCASMI, 4), round(SMIziCASMI, 4), round(SMIziCASMI.CI, 4), sprintf("%.4f", round(PziCASMI, 4)), nameCASMI, row.names = NULL ) colnames(df) <- c( "Var.Idx", "n", "Kappa*", "SMIz", "SMIz.Low", "SMIz.Upr", "p.MIz", "Var.Name" ) ### overall CASMI score for all selected features kappa.star.hat <- Kappas(data, indexCASMI)$kappa.star tmpIndexFnO = c(indexCASMI, ncol(data)) # Index of features and outcome ftable = ftable(table(data[tmpIndexFnO])) tmpXYtable <- ftable2xytable(ftable) E <- E.CI.smi(tmpXYtable, alpha) low = kappa.star.hat - E low = round(low, 6) uppr = kappa.star.hat + E uppr = round(uppr, 6) ci.kappa.star.hat = c(low, uppr) kappa.star.hat = round(kappa.star.hat, 6) names(kappa.star.hat) = c("Kappa* for all selected features") names(ci.kappa.star.hat) = c("Kappa* CI Lower", "Upper") if (kappa.star.hat != round(kappaStarCASMI[length(kappaStarCASMI)],6)) { warning( "Mismatched Kappa* values detected. There may be an issue with the data or the function. Please report the issue to the package developer after ensuring the data has been properly preprocessed." ) } ##### return return( list( Outcome.Variable.Name = names(data[ncol(data)]), Confidence.Level = 1 - alpha, KappaStar = kappa.star.hat, KappaStarCI = ci.kappa.star.hat, results = df ) ) } # Function to check for long values check_length <- function(column) { column = na.omit(column) return(any(nchar(as.character(column)) > 100)) } #' \pkg{CASMI}-Based Feature Selection #' #' Selects the most relevant features toward an outcome. It automatically learns the number of features to be selected, and the selected features are ranked. The method automatically handles the feature redundancy issue. (synonyms of "feature": "variable" "factor" "attribute") \cr #' For more information, please refer to the corresponding publication: Shi, J., Zhang, J. and Ge, Y. (2019), "\pkg{CASMI}—An Entropic Feature Selection Method in Turing’s Perspective" <doi:10.3390/e21121179> #' @param data data frame (features as columns and observations as rows). The outcome variable (Y) MUST be the last column. It requires at least one features and only one outcome. Both the features (Xs) and the outcome (Y) MUST be discrete (if not naturally discrete, you may try the `autoBin.binary` function in the same package). #' @param feature.na.handle options for handling NA values in the data. There are three options: `"stepwise", "na.omit", "NA as a category"`. `feature.na.handle = "stepwise"` excludes NA rows only when a particular variable is being calculated. For example, suppose we have data(Feature1: A, NA, B; Feature2: C, D, E; Feature3: F, G, H; Outcome: O, P, Q); the second observation will be excluded only when a particular step includes Feature1, but will not be excluded when a step calculates among Feature2, Feature3, and the Outcome. This option is designed to take advantage of a maximum number of data points. `feature.na.handle = "na.omit"` excludes observations with any NA values at the beginning of the analysis. `feature.na.handle = "NA as a category"` regards the NA value as a new category. This is designed to be used when NA values in the data have a consistent meaning instead of being missing values. For example, in survey data asking for comments, each NA value might consistently mean "no opinion." By default, `feature.na.handle = "stepwise"`. #' @param alpha.filter level of significance for the mutual information test of independence in step 1 of the features selection (initial screening). The smaller the alpha.filter, the fewer the features sent to step 2 (<doi:10.3390/e21121179>). By default, `alpha.filter = 0.1`. #' @param alpha level of significance for the confidence intervals in final results. By default, `alpha = 0.05`. #' @param intermediate.steps output the intermediate process. By default, `intermediate.steps = TRUE`. Set to `FALSE` for showing only summary results. #' @param kappa.star.cap a threshold of `kappa*` for pausing the feature selection process. The program will automatically pause at the first feature of which the `kappa*` value exceeds the kappa.star.cap threshold. By default, `kappa.star.cap = 1.0`, which is the maximum possible value. A lower value may result in fewer final features but less computing time. #' @param feature.num.cap the maximum number of features to be selected. A lower value may result in fewer final features but less computing time. #' @return `CASMI.selectFeatures()` returns selected features and relevant information, including the estimated Kappa* for all selected features (`$KappaStar`) and the corresponding confidence interval (`$KappaStarCI`). The selected features are ranked. The Standardized Mutual Information using the z estimator (`SMIz`) and the corresponding confidence interval (`SMIz.Low` for lower bound, `SMIz.Upr` for upper bound) are given for each selected feature (`Var.Idx` for column index, `Var.Name` for column name). The p-value from the mutual information test of independence using the z estimator (`p.MIz`) is given for each selected feature. #' @examples #' ## Generate a toy dataset: "data" #' ## Features 1 and 3 are associated with Y, while feature 2 is irrelevant. #' ## The outcome variable Y must be discrete and be the LAST column. Features must be discrete. #' n <- 10000 #' set.seed(1) #' x1 <- rbinom(n, 3, 0.5) + 0.2 #' set.seed(2) #' x2 <- rbinom(n, 2, 0.8) + 0.5 #' set.seed(3) #' x3 <- rbinom(n, 5, 0.3) #' set.seed(4) #' error <- round(runif(n, min=-1, max=1)) #' y <- x1 + x3 + error #' data <- data.frame(cbind(x1, x2, x3, y)) #' colnames(data) <- c("feature1", "feature2", "feature3", "Y") #' #' ## Select features and provide relevant results for the toy dataset "data" #' CASMI.selectFeatures(data) #' #' ## For showing only the summary results #' CASMI.selectFeatures(data, intermediate.steps = FALSE) #' #' ## Adjust 'feature.num.cap' for including fewer features. #' ## A lower 'feature.num.cap' value may result in fewer final features but less computing time. #' ## For example, if needing only the top one feature based on the toy dataset: #' CASMI.selectFeatures(data, feature.num.cap = 1) #' #' #' @importFrom EntropyEstimation Entropy.z MI.z #' @importFrom entropy entropy.plugin mi.plugin #' @importFrom stats pchisq qnorm #' #' @export CASMI.selectFeatures <- function(data, # outcome must be in the last column feature.na.handle = "stepwise", alpha.filter = 0.1, alpha = 0.05, intermediate.steps = TRUE, kappa.star.cap = 1.0, feature.num.cap = ncol(data)) { # check data type if (!is.data.frame(data)) { stop("Error: The input is not a data frame type.") } data <- as.data.frame(data) outcome.has.na <- any(is.na(data[,ncol(data)])) if (outcome.has.na) { stop("Error: The last column has NA values. Please make sure to put the outcome in the last column. NA is not allowed in the outcome.") } # Check each column result <- apply(data, 2, check_length) # Print columns with at least one value over 100 characters/digits long_columns <- names(result)[result] # Asking user if(length(long_columns) > 0){ cat("The following columns have values with over 100 characters/digits:\n") print(long_columns) # Prompt the user for further action user_choice <- readline(prompt="Do you want to continue with this data? (yes/no): ") if(tolower(user_choice) == "no"){ stop("Ok. Please pre-process the data and try again.") } else if(tolower(user_choice) != "yes"){ stop("Invalid choice. Assuming you don't want to continue.") } } # NA handling if(feature.na.handle == "stepwise") { # do nothing }else if(feature.na.handle == "na.omit") { data <- na.omit(data) }else if(feature.na.handle == "NA as a category") { data[] <- apply(data, 2, function(column) { ifelse(is.na(column), "=na=", column) }) data <- as.data.frame(data) }else { stop("Error: Wrong 'feature.na.handle' value. Please check the documentation.") } if (!is.numeric(alpha.filter)) { stop("Error: 'alpha.filter' should be numeric.") } else if (alpha.filter < 0 || alpha.filter > 1) { stop("Error: 'alpha.filter' should be between 0 and 1.") } if (!is.numeric(alpha)) { stop("Error: 'alpha' should be numeric.") } else if (alpha < 0 || alpha > 1) { stop("Error: 'alpha' should be between 0 and 1.") } if (!is.logical(intermediate.steps) || length(intermediate.steps) != 1) { stop("Error: 'intermediate.steps' must be either TRUE or FALSE.") } if (!is.numeric(kappa.star.cap)) { stop("Error: 'kappa.star.cap' should be numeric.") } else if (kappa.star.cap < 0 || kappa.star.cap > 1) { stop("Error: 'kappa.star.cap' should be between 0 and 1.") } if (!is.numeric(feature.num.cap)) { stop("Error: 'feature.num.cap' should be numeric.") } else if (feature.num.cap < 1) { stop("Error: 'feature.num.cap' should be at least 1.") } start_time <- proc.time() # Step 1: return dependent features, step1Index[] step1Index = vector() count = 0 for (fi in 1:(ncol(data) - 1)) { p_value = MIz.test(data[,fi], data[,ncol(data)]) if (p_value <= alpha.filter) { count = count + 1 step1Index[count] = fi } } indexCASMI = vector() kappaStarCASMI = vector() if (count == 0) { warning( "No associated variables are found toward the outcome. Please double-check: 1. the outcome variable is the last column; 2. a reasonable 'alpha.filter' is set (by default it's 0.1)." ) indexCASMI = NULL } else{ if(intermediate.steps) { cat(count, "feature(s) passed Step 1 (a test of independence).\n") } # Step 2: select features by joint SMI with Hz estimator # select the best feature (the first feature) maxKappaStar = 0 maxIndex = 0 for (index in step1Index) { valueKappaStar = Kappas(data, index)$kappa.star if (valueKappaStar > maxKappaStar) { maxKappaStar = valueKappaStar maxIndex = index } } indexCASMI = c(indexCASMI, maxIndex) kappaStarCASMI = c(kappaStarCASMI, maxKappaStar) if(intermediate.steps) { intermediateCount = 1 cat("Selecting Feature:", intermediateCount, "- selected column (idx.):", maxIndex, ", Kappa*:", maxKappaStar, ", elapsed (sec.):", (proc.time() - start_time)["elapsed"], "\n") } if (length(step1Index) == 1) { return(generateResults(data, indexCASMI, kappaStarCASMI, alpha)) } # select the 2nd, 3rd, ... best features by joint maxmaxKappaStar = 0 while (maxKappaStar > maxmaxKappaStar & length(indexCASMI) < count & maxKappaStar < kappa.star.cap & length(indexCASMI) < feature.num.cap) { maxmaxKappaStar = maxKappaStar step1Index = step1Index[!step1Index == maxIndex] maxKappaStar = 0 maxIndex = 0 for (index in step1Index) { tmpIndexFeatures = c(indexCASMI, index) # Index of selected features + the new feature valueKappaStar = Kappas(data, tmpIndexFeatures)$kappa.star if (valueKappaStar > maxKappaStar) { maxKappaStar = valueKappaStar maxIndex = index } } if (maxKappaStar > maxmaxKappaStar + 10 ^ -14) { # +10^-14 is to solve the problem of precision indexCASMI = c(indexCASMI, maxIndex) kappaStarCASMI = c(kappaStarCASMI, maxKappaStar) if(intermediate.steps) { intermediateCount = intermediateCount + 1 cat("Selecting Feature:", intermediateCount, "- selected column (idx.):", maxIndex, ", Kappa*:", maxKappaStar, ", elapsed (sec.):", (proc.time() - start_time)["elapsed"], "\n") } } if (maxKappaStar >= kappa.star.cap) { warning( "The feature selection process paused automatically because kappa* of currently selected features reached the 'kappa.star.cap' value." ) } if (length(indexCASMI) >= feature.num.cap) { warning( "The feature selection process paused automatically because the number of selected features reached the 'feature.num.cap' value." ) } } if(intermediate.steps) { cat("---End of intermediate process.---\n") cat("In progress of generating summary.\n\n") } list = generateResults(data, indexCASMI, kappaStarCASMI, alpha) return(list) } }
/scratch/gouwar.j/cran-all/cranData/CASMI/R/FeatureSelection.R
#' 'caret' Applications for Spatio-Temporal models #' @description Supporting functionality to run 'caret' with spatial or spatial-temporal data. #' 'caret' is a frequently used package for model training and prediction using machine learning. #' CAST includes functions to improve spatial-temporal modelling tasks using 'caret'. #' It includes the newly suggested 'Nearest neighbor distance matching' cross-validation to estimate the performance #' of spatial prediction models and allows for spatial variable selection to selects suitable predictor variables #' in view to their contribution to the spatial model performance. #' CAST further includes functionality to estimate the (spatial) area of applicability of prediction models #' by analysing the similarity between new data and training data. #' Methods are described in Meyer et al. (2018); Meyer et al. (2019); Meyer and Pebesma (2021); Milà et al. (2022); Meyer and Pebesma (2022). #' @name CAST #' @docType package #' @title 'caret' Applications for Spatial-Temporal Models #' @author Hanna Meyer, Carles Milà, Marvin Ludwig, Lan Linnenbrink #' @references #' \itemize{ #' \item Linnenbrink, J., Milà, C., Ludwig, M., and Meyer, H.: kNNDM: k-fold Nearest Neighbour Distance Matching Cross-Validation for map accuracy estimation, EGUsphere [preprint], https://doi.org/10.5194/egusphere-2023-1308, 2023. #' \item Milà, C., Mateu, J., Pebesma, E., Meyer, H. (2022): Nearest Neighbour Distance Matching Leave-One-Out Cross-Validation for map validation. Methods in Ecology and Evolution 00, 1– 13. #' \item Meyer, H., Pebesma, E. (2022): Machine learning-based global maps of ecological variables and the challenge of assessing them. Nature Communications. 13. #' \item Meyer, H., Pebesma, E. (2021): Predicting into unknown space? Estimating the area of applicability of spatial prediction models. Methods in Ecology and Evolution. 12, 1620– 1633. #' \item Meyer, H., Reudenbach, C., Wöllauer, S., Nauss, T. (2019): Importance of spatial predictor variable selection in machine learning applications - Moving from data reproduction to spatial prediction. Ecological Modelling. 411, 108815. #' \item Meyer, H., Reudenbach, C., Hengl, T., Katurji, M., Nauß, T. (2018): Improving performance of spatio-temporal machine learning models using forward feature selection and target-oriented validation. Environmental Modelling & Software 101: 1-9. #' } #' #' @import caret #' @importFrom stats sd dist na.omit lm predict quantile na.exclude complete.cases #' @importFrom utils combn #' @importFrom grDevices rainbow #' @importFrom graphics axis plot segments #' @keywords package #' @aliases CAST-package #' NULL
/scratch/gouwar.j/cran-all/cranData/CAST/R/CAST-package.R
#' Create Space-time Folds #' @description Create spatial, temporal or spatio-temporal Folds for cross validation based on pre-defined groups #' @param x data.frame containing spatio-temporal data #' @param spacevar Character indicating which column of x identifies the #' spatial units (e.g. ID of weather stations) #' @param timevar Character indicating which column of x identifies the #' temporal units (e.g. the day of the year) #' @param k numeric. Number of folds. If spacevar or timevar is NA and a #' leave one location out or leave one time step out cv should be performed, #' set k to the number of unique spatial or temporal units. #' @param class Character indicating which column of x identifies a class unit (e.g. land cover) #' @param seed numeric. See ?seed #' @return A list that contains a list for model training and a list for #' model validation that can directly be used as "index" and "indexOut" in #' caret's trainControl function #' @details The function creates train and test sets by taking (spatial and/or temporal) groups into account. #' In contrast to \code{\link{nndm}}, it requires that the groups are already defined (e.g. spatial clusters or blocks or temporal units). #' Using "class" is helpful in the case that data are clustered in space #' and are categorical. E.g This is the case for land cover classifications when #' training data come as training polygons. In this case the data should be split in a way #' that entire polygons are held back (spacevar="polygonID") but at the same time the distribution of classes #' should be similar in each fold (class="LUC"). #' @note Standard k-fold cross-validation can lead to considerable misinterpretation in spatial-temporal modelling tasks. This function can be used to prepare a Leave-Location-Out, Leave-Time-Out or Leave-Location-and-Time-Out cross-validation as target-oriented validation strategies for spatial-temporal prediction tasks. See Meyer et al. (2018) for further information. #' @author Hanna Meyer #' @seealso \code{\link{trainControl}},\code{\link{ffs}}, \code{\link{nndm}} #' @references #' Meyer, H., Reudenbach, C., Hengl, T., Katurji, M., Nauß, T. (2018): Improving performance of spatio-temporal machine learning models using forward feature selection and target-oriented validation. Environmental Modelling & Software 101: 1-9. #' @examples #' \dontrun{ #' dat <- readRDS(system.file("extdata","Cookfarm.RDS",package="CAST")) #' ### Prepare for 10-fold Leave-Location-and-Time-Out cross validation #' indices <- CreateSpacetimeFolds(dat,"SOURCEID","Date") #' str(indices) #' ### Prepare for 10-fold Leave-Location-Out cross validation #' indices <- CreateSpacetimeFolds(dat,spacevar="SOURCEID") #' str(indices) #' ### Prepare for leave-One-Location-Out cross validation #' indices <- CreateSpacetimeFolds(dat,spacevar="SOURCEID", #' k=length(unique(dat$SOURCEID))) #' str(indices) #' } #' @export CreateSpacetimeFolds #' @aliases CreateSpacetimeFolds CreateSpacetimeFolds <- function(x,spacevar=NA,timevar=NA, k=10,class=NA,seed=sample(1:1000, 1)){ x <- data.frame(x) ### if classification is used, make sure that classes are equally distributed across folds if(!is.na(class)){ unit <- unique(x[,c(spacevar,class)]) unit$CAST_fold <- createFolds(unit[,which(names(unit)==class)],k = k,list=FALSE) #x <- merge(x,unit,by.x=c(spacevar,class),by.y=c(spacevar,class),all.x=TRUE,sort=FALSE) x <- plyr::join(x,unit,by=c(spacevar,class),match="first") spacevar <- "CAST_fold" } if(!is.na(spacevar)){ if(k>length(unique(x[,spacevar]))){ k <- length(unique(x[,spacevar])) print(paste0("warning: k is higher than number of unique locations. k is set to ",k)) } } if(!is.na(timevar)){ if(k>length(unique(x[,timevar]))){ k <- length(unique(x[,timevar])) print(paste0("warning: k is higher than number of unique points in time. k is set to ",k)) } } #split space into k folds if(!is.na(spacevar)){ set.seed(seed) spacefolds <- lapply(caret::createFolds(1:length(unique(x[,spacevar])),k),function(y){ unique(x[,spacevar])[y]}) } #split time into k folds if(!is.na(timevar)){ set.seed(seed) timefolds <- lapply(caret::createFolds(1:length(unique(x[,timevar])),k),function(y){ unique(x[,timevar])[y]}) } # combine space and time folds cvindices_train <- list() cvindices_test <- list() for (i in 1:k){ if(!is.na(timevar)&!is.na(spacevar)){ cvindices_test[[i]]<- which(x[,spacevar]%in%spacefolds[[i]]& x[,timevar]%in%timefolds[[i]]) cvindices_train[[i]]<- which(!x[,spacevar]%in%spacefolds[[i]]& !x[,timevar]%in%timefolds[[i]]) } if(is.na(timevar)&!is.na(spacevar)){ cvindices_test[[i]]<- which(x[,spacevar]%in%spacefolds[[i]]) cvindices_train[[i]]<- which(!x[,spacevar]%in%spacefolds[[i]]) } if(!is.na(timevar)&is.na(spacevar)){ cvindices_test[[i]]<- which(x[,timevar]%in%timefolds[[i]]) cvindices_train[[i]]<- which(!x[,timevar]%in%timefolds[[i]]) } } return(list("index"=cvindices_train,"indexOut"=cvindices_test)) }
/scratch/gouwar.j/cran-all/cranData/CAST/R/CreateSpacetimeFolds.R
#' Model the relationship between the DI and the prediction error #' @description Performance metrics are calculated for moving windows of DI values of cross-validated training data #' @param model the model used to get the AOA #' @param trainDI the result of \code{\link{trainDI}} or aoa object \code{\link{aoa}} #' @param multiCV Logical. Re-run model fitting and validation with different CV strategies. See details. #' @param window.size Numeric. Size of the moving window. See \code{\link{rollapply}}. #' @param calib Character. Function to model the DI~performance relationship. Currently lm and scam are supported #' @param length.out Numeric. Only used if multiCV=TRUE. Number of cross-validation folds. See details. #' @param method Character. Method used for distance calculation. Currently euclidean distance (L2) and Mahalanobis distance (MD) are implemented but only L2 is tested. Note that MD takes considerably longer. See ?aoa for further explanation #' @param useWeight Logical. Only if a model is given. Weight variables according to importance in the model? #' @param k Numeric. See mgcv::s #' @param m Numeric. See mgcv::s #' @details If multiCV=TRUE the model is re-fitted and validated by length.out new cross-validations where the cross-validation folds are defined by clusters in the predictor space, #' ranging from three clusters to LOOCV. Hence, a large range of DI values is created during cross-validation. #' If the AOA threshold based on the calibration data from multiple CV is larger than the original AOA threshold (which is likely if extrapolation situations are created during CV), #' the AOA threshold changes accordingly. See Meyer and Pebesma (2021) for the full documentation of the methodology. #' @return A scam or linear model #' @author #' Hanna Meyer, Marvin Ludwig #' @references Meyer, H., Pebesma, E. (2021): Predicting into unknown space? #' Estimating the area of applicability of spatial prediction models. #' \doi{10.1111/2041-210X.13650} #' @seealso \code{\link{aoa}} #' @example inst/examples/ex_DItoErrormetric.R #' #' #' @export DItoErrormetric <- function(model, trainDI, multiCV=FALSE, length.out = 10, window.size = 5, calib = "scam", method= "L2", useWeight=TRUE, k = 6, m = 2){ if(inherits(trainDI,"aoa")){ trainDI = trainDI$parameters } # get DIs and Errormetrics OR calculate new ones from multiCV if(!multiCV){ preds_all <- get_preds_all(model, trainDI) } if(multiCV){ preds_all <- multiCV(model, length.out, method, useWeight) } # train model between DI and Errormetric error_model = errorModel(preds_all, model, window.size, calib, k, m) # save AOA threshold and raw data attr(error_model, "AOA_threshold") <- attr(preds_all, "AOA_threshold") class(error_model) <- c("errorModel", class(error_model)) return(error_model) } #' Model expected error between Metric and DI #' @param preds_all data.frame: pred, obs, DI #' @param model the model used to get the AOA #' @param window.size Numeric. Size of the moving window. See \code{\link{rollapply}}. #' @param calib Character. Function to model the DI~performance relationship. Currently lm and scam are supported #' @param k Numeric. See mgcv::s #' @param m Numeric. See mgcv::s #' @return scam or lm #' errorModel <- function(preds_all, model, window.size, calib, k, m){ ## use performance metric from the model: rmse <- function(pred,obs){sqrt( mean((pred - obs)^2, na.rm = TRUE) )} rsquared <- function(pred,obs){summary(lm(pred~obs))$r.squared} mae <- function(pred,obs){MAE(pred,obs)} kappa <- function(pred,obs){ pred <- factor(pred) obs <- factor(obs) lev <- unique(c(levels(pred), levels(obs))) pred <- factor(pred, levels = lev) obs <- factor(obs, levels = lev) result <- tryCatch( confusionMatrix(pred, obs)$overall["Kappa"], error = function(e)e) if(inherits(result, "error")){result <- 0} # 0 not right value!!! adjust!!! return(unname(result)) } accuracy <- function(pred,obs){ pred <- factor(pred) obs <- factor(obs) lev <- unique(c(levels(pred), levels(obs))) pred <- factor(pred, levels = lev) obs <- factor(obs, levels = lev) result <- tryCatch(confusionMatrix(pred, obs)$overall["Accuracy"], error = function(e)e) if(inherits(result, "error")){result <- 0} return(unname(result)) } if(!tolower(model$metric)%in%c("rmse","rsquared","mae","kappa","accuracy")){ message("Model metric not yet included in this function") stop() } evalfunc <- function(pred,obs){ eval(parse(text=paste0(tolower(model$metric),"(pred,obs)"))) } # order data according to DI: performance <- preds_all[order(preds_all$DI),] # calculate performance for moving window: performance$metric <- zoo::rollapply(performance[,1:2], window.size, FUN=function(x){evalfunc(x[,1],x[,2])}, by.column=F,align = "center",fill=NA) performance$ll <- data.table::shift(performance$DI,window.size/2) performance$ul <- data.table::shift(performance$DI,-round(window.size/2),0) performance <- performance[!is.na(performance$metric),] ### Estimate Error: if(calib=="lm"){ errormodel <- lm(metric ~ DI, data = performance) } if(calib=="scam"){ if (!requireNamespace("scam", quietly = TRUE)) { stop("Package \"scam\" needed for this function to work. Please install it.", call. = FALSE) } if (model$maximize){ # e.g. accuracy, kappa, r2 bs="mpd" }else{ bs="mpi" #e.g. RMSE } errormodel <- scam::scam(metric~s(DI, k=k, bs=bs, m=m), data=performance, family=stats::gaussian(link="identity")) } attr(errormodel, "performance") = performance return(errormodel) } #' MultiCV #' @description #' Multiple Cross-Validation with increasing feature space clusteres #' @param model the model used to get the AOA #' @param length.out Numeric. Only used if multiCV=TRUE. Number of cross-validation folds. See details. #' @param method Character. Method used for distance calculation. Currently euclidean distance (L2) and Mahalanobis distance (MD) are implemented but only L2 is tested. Note that MD takes considerably longer. See ?aoa for further explanation #' @param useWeight Logical. Only if a model is given. Weight variables according to importance in the model? #' @param ... additional parameters to trainDI #' @returns preds_all #' #' multiCV <- function(model, length.out, method, useWeight,...){ preds_all <- data.frame() train_predictors <- model$trainingData[,-which(names(model$trainingData)==".outcome")] train_response <- model$trainingData$.outcome for (nclst in round(seq(3,nrow(train_predictors), length.out = length.out))){ # define clusters in predictor space used for CV: clstrID <- tryCatch({stats::kmeans(train_predictors,nclst)$cluster}, error=function(e)e) if(inherits(clstrID,"error")){next} clstrID <- clstrID folds <- CreateSpacetimeFolds(data.frame("clstrID"=clstrID), spacevar="clstrID",k=nclst) # update model call with new CV strategy: mcall <- as.list(model$call) mcall <- mcall[-which(names(mcall)%in%c("form","data","","x","y","","trControl"))] mcall$x <- quote(train_predictors) mcall$y <- quote(train_response) mcall$trControl <- trainControl(method="cv",index=folds$index,savePredictions = TRUE) mcall$tuneGrid <- model$bestTune mcall$method <- model$method mcall$metric <- model$metric mcall$cl <- NULL # fix option for parallel later # retrain model and calculate AOA model_new <- do.call(caret::train,mcall) trainDI_new <- trainDI(model_new, method=method, useWeight=useWeight) # get cross-validated predictions, order them and use only those located in the AOA preds <- model_new$pred preds <- preds[order(preds$rowIndex),c("pred","obs")] preds_dat_tmp <- data.frame(preds,"DI"=trainDI_new$trainDI) preds_dat_tmp <- preds_dat_tmp[preds_dat_tmp$DI <= trainDI_new$threshold,] preds_all <- rbind(preds_all,preds_dat_tmp) } attr(preds_all, "AOA_threshold") <- trainDI_new$threshold message(paste0("Note: multiCV=TRUE calculated new AOA threshold of ", round(trainDI_new$threshold, 5), "\nThreshold is stored in the attributes, access with attr(error_model, 'AOA_threshold').", "\nPlease refere to examples and details for further information.")) return(preds_all) } #' Get Preds all #' @param model, a model #' @param trainDI, a trainDI #' get_preds_all <- function(model, trainDI){ if(is.null(model$pred)){ stop("no cross-predictions can be retrieved from the model. Train with savePredictions=TRUE or provide calibration data") } ## extract cv predictions from model preds_all <- model$pred for (i in 1:length(model$bestTune)){ tunevar <- names(model$bestTune[i]) preds_all <- preds_all[preds_all[,tunevar]==model$bestTune[,tunevar],] } preds_all <- preds_all[order(preds_all$rowIndex),c("pred","obs")] ## add DI from trainDI preds_all$DI <- trainDI$trainDI[!is.na(trainDI$trainDI)] ## only take predictions from inside the AOA: preds_all <- preds_all[preds_all$DI<=trainDI$threshold,] attr(preds_all, "AOA_threshold") <- trainDI$threshold return(preds_all) }
/scratch/gouwar.j/cran-all/cranData/CAST/R/DItoErrormetric.R
#' Area of Applicability #' @description #' This function estimates the Dissimilarity Index (DI) and the derived #' Area of Applicability (AOA) of spatial prediction models by #' considering the distance of new data (i.e. a SpatRaster of spatial predictors #' used in the models) in the predictor variable space to the data used for model #' training. Predictors can be weighted based on the internal #' variable importance of the machine learning algorithm used for model training. #' The AOA is derived by applying a threshold on the DI which is the (outlier-removed) #' maximum DI of the cross-validated training data. #' @param newdata A SpatRaster, stars object or data.frame containing the data #' the model was meant to make predictions for. #' @param model A train object created with caret used to extract weights from (based on variable importance) as well as cross-validation folds. #' See examples for the case that no model is available or for models trained via e.g. mlr3. #' @param trainDI A trainDI object. Optional if \code{\link{trainDI}} was calculated beforehand. #' @param train A data.frame containing the data used for model training. Optional. Only required when no model is given #' @param weight A data.frame containing weights for each variable. Optional. Only required if no model is given. #' @param variables character vector of predictor variables. if "all" then all variables #' of the model are used or if no model is given then of the train dataset. #' @param CVtest list or vector. Either a list where each element contains the data points used for testing during the cross validation iteration (i.e. held back data). #' Or a vector that contains the ID of the fold for each training point. #' Only required if no model is given. #' @param CVtrain list. Each element contains the data points used for training during the cross validation iteration (i.e. held back data). #' Only required if no model is given and only required if CVtrain is not the opposite of CVtest (i.e. if a data point is not used for testing, it is used for training). #' Relevant if some data points are excluded, e.g. when using \code{\link{nndm}}. #' @param method Character. Method used for distance calculation. Currently euclidean distance (L2) and Mahalanobis distance (MD) are implemented but only L2 is tested. Note that MD takes considerably longer. #' @param useWeight Logical. Only if a model is given. Weight variables according to importance in the model? #' @details The Dissimilarity Index (DI) and the corresponding Area of Applicability (AOA) are calculated. #' If variables are factors, dummy variables are created prior to weighting and distance calculation. #' #' Interpretation of results: If a location is very similar to the properties #' of the training data it will have a low distance in the predictor variable space #' (DI towards 0) while locations that are very different in their properties #' will have a high DI. #' See Meyer and Pebesma (2021) for the full documentation of the methodology. #' @note If classification models are used, currently the variable importance can only #' be automatically retrieved if models were trained via train(predictors,response) and not via the formula-interface. #' Will be fixed. #' @return An object of class \code{aoa} containing: #' \item{parameters}{object of class trainDI. see \code{\link{trainDI}}} #' \item{DI}{SpatRaster, stars object or data frame. Dissimilarity index of newdata} #' \item{AOA}{SpatRaster, stars object or data frame. Area of Applicability of newdata. #' AOA has values 0 (outside AOA) and 1 (inside AOA)} #' #' @author #' Hanna Meyer #' @references Meyer, H., Pebesma, E. (2021): Predicting into unknown space? #' Estimating the area of applicability of spatial prediction models. #' Methods in Ecology and Evolution 12: 1620-1633. \doi{10.1111/2041-210X.13650} #' @seealso \code{\link{calibrate_aoa}}, \code{\link{trainDI}} #' @examples #' \dontrun{ #' library(sf) #' library(terra) #' library(caret) #' library(viridis) #' #' # prepare sample data: #' dat <- readRDS(system.file("extdata","Cookfarm.RDS",package="CAST")) #' dat <- aggregate(dat[,c("VW","Easting","Northing")],by=list(as.character(dat$SOURCEID)),mean) #' pts <- st_as_sf(dat,coords=c("Easting","Northing")) #' pts$ID <- 1:nrow(pts) #' set.seed(100) #' pts <- pts[1:30,] #' studyArea <- rast(system.file("extdata","predictors_2012-03-25.tif",package="CAST"))[[1:8]] #' trainDat <- extract(studyArea,pts,na.rm=FALSE) #' trainDat <- merge(trainDat,pts,by.x="ID",by.y="ID") #' #' # visualize data spatially: #' plot(studyArea) #' plot(studyArea$DEM) #' plot(pts[,1],add=TRUE,col="black") #' #' # train a model: #' set.seed(100) #' variables <- c("DEM","NDRE.Sd","TWI") #' model <- train(trainDat[,which(names(trainDat)%in%variables)], #' trainDat$VW, method="rf", importance=TRUE, tuneLength=1, #' trControl=trainControl(method="cv",number=5,savePredictions=T)) #' print(model) #note that this is a quite poor prediction model #' prediction <- predict(studyArea,model,na.rm=TRUE) #' plot(varImp(model,scale=FALSE)) #' #' #...then calculate the AOA of the trained model for the study area: #' AOA <- aoa(studyArea,model) #' plot(AOA) #' #' #### #' #The AOA can also be calculated without a trained model. #' #All variables are weighted equally in this case: #' #### #' AOA <- aoa(studyArea,train=trainDat,variables=variables) #' #' #' #### #' # The AOA can also be used for models trained via mlr3 (parameters have to be assigned manually): #' #### #' #' library(mlr3) #' library(mlr3learners) #' library(mlr3spatial) #' library(mlr3spatiotempcv) #' library(mlr3extralearners) #' #' # initiate and train model: #' train_df <- trainDat[, c("DEM","NDRE.Sd","TWI", "VW")] #' backend <- as_data_backend(train_df) #' task <- as_task_regr(backend, target = "VW") #' lrn <- lrn("regr.randomForest", importance = "mse") #' lrn$train(task) #' #' # cross-validation folds #' rsmp_cv <- rsmp("cv", folds = 5L)$instantiate(task) #' #' ## predict: #' prediction <- predict(studyArea,lrn$model,na.rm=TRUE) #' #' ### Estimate AOA #' AOA <- aoa(studyArea, #' train = as.data.frame(task$data()), #' variables = task$feature_names, #' weight = data.frame(t(lrn$importance())), #' CVtest = rsmp_cv$instance[order(row_id)]$fold) #' #' } #' @export aoa #' @aliases aoa aoa <- function(newdata, model=NA, trainDI = NA, train=NULL, weight=NA, variables="all", CVtest=NULL, CVtrain=NULL, method="L2", useWeight=TRUE) { # handling of different raster formats as_stars <- FALSE leading_digit <- any(grepl("^{1}[0-9]",names(newdata))) if (inherits(newdata, "stars")) { if (!requireNamespace("stars", quietly = TRUE)) stop("package stars required: install that first") newdata <- methods::as(newdata, "SpatRaster") as_stars <- TRUE } if (inherits(newdata, "Raster")) { # if (!requireNamespace("raster", quietly = TRUE)) # stop("package raster required: install that first") message("Raster will soon not longer be supported. Use terra or stars instead") newdata <- methods::as(newdata, "SpatRaster") } # if not provided, compute train DI if(!inherits(trainDI, "trainDI")){ message("No trainDI provided. Computing DI of training data...") trainDI <- trainDI(model, train, variables, weight, CVtest, CVtrain,method, useWeight) } message("Computing DI of newdata...") # check if variables are in newdata if(any(trainDI$variables %in% names(newdata)==FALSE)){ if(leading_digit){ stop("names of newdata start with leading digits, automatically added 'X' results in mismatching names of train data in the model") } stop("names of newdata don't match names of train data in the model") } # Prepare output as either as RasterLayer or vector: out <- NA if (inherits(newdata, "SpatRaster")){ out <- newdata[[1]] names(out) <- "DI" } #### order data: if (inherits(newdata, "SpatRaster")){ if (any(is.factor(newdata))){ newdata[[which(is.factor(newdata))]] <- as.numeric(newdata[[which(is.factor(newdata))]]) } newdata <- terra::as.data.frame(newdata,na.rm=FALSE) } newdata <- newdata[,na.omit(match(trainDI$variables, names(newdata)))] ## Handling of categorical predictors: catvars <- trainDI$catvars if (!inherits(catvars,"error")&length(catvars)>0){ for (catvar in catvars){ # mask all unknown levels in newdata as NA (even technically no predictions can be made) trainDI$train[,catvar]<-droplevels(trainDI$train[,catvar]) newdata[,catvar] <- factor(newdata[,catvar]) newdata[!newdata[,catvar]%in%unique(trainDI$train[,catvar]),catvar] <- NA newdata[,catvar] <- droplevels(newdata[,catvar]) # then create dummy variables for the remaining levels in train: dvi_train <- predict(caret::dummyVars(paste0("~",catvar), data = trainDI$train),trainDI$train) dvi_newdata <- predict(caret::dummyVars(paste0("~",catvar), data=trainDI$train),newdata) dvi_newdata[is.na(newdata[,catvar]),] <- 0 trainDI$train <- data.frame(trainDI$train,dvi_train) newdata <- data.frame(newdata,dvi_newdata) } newdata <- newdata[,-which(names(newdata)%in%catvars)] trainDI$train <- trainDI$train[,-which(names(trainDI$train)%in%catvars)] } # scale and weight new data newdata <- scale(newdata,center=trainDI$scaleparam$`scaled:center`, scale=trainDI$scaleparam$`scaled:scale`) if(!inherits(trainDI$weight, "error")){ newdata <- sapply(1:ncol(newdata),function(x){ newdata[,x]*unlist(trainDI$weight[x]) }) } # rescale and reweight train data train_scaled <- scale(trainDI$train, center = trainDI$scaleparam$`scaled:center`, scale = trainDI$scaleparam$`scaled:scale`) train_scaled <- sapply(1:ncol(train_scaled),function(x){train_scaled[,x]*unlist(trainDI$weight[x])}) # Distance Calculation --------- mindist <- rep(NA, nrow(newdata)) okrows <- which(apply(newdata, 1, function(x) all(!is.na(x)))) newdataCC <- newdata[okrows,] if(method=="MD"){ if(dim(train_scaled)[2] == 1){ S <- matrix(stats::var(train_scaled), 1, 1) newdataCC <- as.matrix(newdataCC,ncol=1) } else { S <- stats::cov(train_scaled) } S_inv <- MASS::ginv(S) } mindist[okrows] <- .mindistfun(newdataCC, train_scaled, method, S_inv) DI_out <- mindist/trainDI$trainDist_avrgmean message("Computing AOA...") #### Create Mask for AOA and return statistics if (inherits(out, "SpatRaster")){ terra::values(out) <- DI_out AOA <- out terra::values(AOA) <- 1 AOA[out>trainDI$thres] <- 0 AOA <- terra::mask(AOA,out) names(AOA) = "AOA" # handling of different raster formats. if (as_stars){ out <- stars::st_as_stars(out) AOA <- stars::st_as_stars(AOA) } }else{ out <- DI_out AOA <- rep(1,length(out)) AOA[out>trainDI$thres] <- 0 } # used in old versions of the AOA. eventually remove the attributes # attributes(AOA)$aoa_stats <- list("Mean_train" = trainDI$trainDist_avrgmean, # "threshold" = trainDI$thres) # attributes(AOA)$TrainDI <- trainDI$trainDI result <- list(parameters = trainDI, DI = out, AOA = AOA) class(result) <- "aoa" return(result) }
/scratch/gouwar.j/cran-all/cranData/CAST/R/aoa.R
#' Best subset feature selection #' @description Evaluate all combinations of predictors during model training #' @param predictors see \code{\link{train}} #' @param response see \code{\link{train}} #' @param method see \code{\link{train}} #' @param metric see \code{\link{train}} #' @param maximize see \code{\link{train}} #' @param globalval Logical. Should models be evaluated based on 'global' performance? See \code{\link{global_validation}} #' @param trControl see \code{\link{train}} #' @param tuneLength see \code{\link{train}} #' @param tuneGrid see \code{\link{train}} #' @param seed A random number #' @param verbose Logical. Should information about the progress be printed? #' @param ... arguments passed to the classification or regression routine #' (such as randomForest). #' @return A list of class train. Beside of the usual train content #' the object contains the vector "selectedvars" and "selectedvars_perf" #' that give the best variables selected as well as their corresponding #' performance. It also contains "perf_all" that gives the performance of all model runs. #' @details bss is an alternative to \code{\link{ffs}} and ideal if the training #' set is small. Models are iteratively fitted using all different combinations #' of predictor variables. Hence, 2^X models are calculated. Don't try running bss #' on very large datasets because the computation time is much higher compared to #' \code{\link{ffs}}. #' #' The internal cross validation can be run in parallel. See information #' on parallel processing of carets train functions for details. #' #' #' @note This variable selection is particularly suitable for spatial #' cross validations where variable selection #' MUST be based on the performance of the model for predicting new spatial units. #' Note that bss is very slow since all combinations of variables are tested. #' A more time efficient alternative is the forward feature selection (\code{\link{ffs}}) #' (\code{\link{ffs}}). #' @author Hanna Meyer #' @seealso \code{\link{train}},\code{\link{ffs}}, #' \code{\link{trainControl}},\code{\link{CreateSpacetimeFolds}}, #' \code{\link{nndm}} #' @examples #' \dontrun{ #' data(iris) #' bssmodel <- bss(iris[,1:4],iris$Species) #' bssmodel$perf_all #' } #' @export bss #' @aliases bss bss <- function (predictors, response, method = "rf", metric = ifelse(is.factor(response), "Accuracy", "RMSE"), maximize = ifelse(metric == "RMSE", FALSE, TRUE), globalval=FALSE, trControl = caret::trainControl(), tuneLength = 3, tuneGrid = NULL, seed = 100, verbose=TRUE, ...){ trControl$returnResamp <- "final" trControl$savePredictions <- "final" if(inherits(response,"character")){ response <- factor(response) if(metric=="RMSE"){ metric <- "Accuracy" maximize <- TRUE } } se <- function(x){sd(x, na.rm = TRUE)/sqrt(length(na.exclude(x)))} n <- length(names(predictors)) if(maximize) evalfunc <- function(x){max(x,na.rm=T)} if(!maximize) evalfunc <- function(x){min(x,na.rm=T)} isBetter <- function (actmodelperf,bestmodelperf,maximization=maximize){ result <- ifelse (!maximization, actmodelperf < bestmodelperf, actmodelperf > bestmodelperf) return(result) } testgrid <- expand.grid(lapply(seq_along(names(predictors)), c, 0)) testgrid <- testgrid[-which(rowSums(testgrid==0)>=(length(names(predictors))-1)),] acc <- 0 perf_all <- data.frame(matrix(ncol=length(predictors)+3,nrow=nrow(testgrid))) names(perf_all) <- c(paste0("var",1:length(predictors)),metric,"SE","nvar") for (i in 1:nrow(testgrid)){ set.seed(seed) model <- caret::train(predictors[,unlist(testgrid[i,])], response,method=method,trControl=trControl, tuneLength=tuneLength, tuneGrid=tuneGrid,...) if (globalval){ perf_stats <- global_validation(model)[names(global_validation(model))==metric] }else{ perf_stats <- model$results[,names(model$results)==metric] } actmodelperf <- evalfunc(perf_stats) actmodelperfSE <- se( sapply(unique(model$resample$Resample), FUN=function(x){mean(model$resample[model$resample$Resample==x, metric],na.rm=TRUE)})) bestmodelperfSE <- actmodelperfSE if (i == 1){ bestmodelperf <- actmodelperf if(globalval){ bestmodelperfSE <- NA } bestmodel <- model } else{ if (isBetter(actmodelperf,bestmodelperf,maximization=maximize)){ bestmodelperf <- actmodelperf bestmodelperfSE <- actmodelperfSE bestmodel <- model } } acc <- acc+1 perf_all[acc,1:length(model$finalModel$xNames)] <- model$finalModel$xNames perf_all[acc,(length(predictors)+1):ncol(perf_all)] <- c(actmodelperf,actmodelperfSE,length(model$finalModel$xNames)) if (verbose){ print(paste0("models that still need to be trained: ", 2^n-(n+1) - acc)) } } if (globalval){ selectedvars_perf <- global_validation(bestmodel)[names(global_validation(bestmodel))==metric] }else{ if (maximize){ selectedvars_perf <-max(bestmodel$results[,metric]) }else{ selectedvars_perf <- min(bestmodel$results[,metric]) } } bestmodel$selectedvars <- bestmodel$finalModel$xNames bestmodel$selectedvars_perf <- selectedvars_perf bestmodel$perf_all <- perf_all bestmodel$perf_all <- bestmodel$perf_all[!apply(is.na(bestmodel$perf_all), 1, all),] bestmodel$perf_all <- bestmodel$perf_all[order(bestmodel$perf_all$nvar),] bestmodel$type <- "bss" class(bestmodel) <- c("ffs", "bss", "train") return(bestmodel) }
/scratch/gouwar.j/cran-all/cranData/CAST/R/bss.R
#' Calibrate the AOA based on the relationship between the DI and the prediction error #' @description Performance metrics are calculated for moving windows of DI values of cross-validated training data #' @param AOA the result of \code{\link{aoa}} #' @param model the model used to get the AOA #' @param window.size Numeric. Size of the moving window. See \code{\link{rollapply}}. #' @param calib Character. Function to model the DI~performance relationship. Currently lm and scam are supported #' @param multiCV Logical. Re-run model fitting and validation with different CV strategies. See details. #' @param length.out Numeric. Only used if multiCV=TRUE. Number of cross-validation folds. See details. #' @param maskAOA Logical. Should areas outside the AOA set to NA? #' @param method Character. Method used for distance calculation. Currently euclidean distance (L2) and Mahalanobis distance (MD) are implemented but only L2 is tested. Note that MD takes considerably longer. See ?aoa for further explanation #' @param useWeight Logical. Only if a model is given. Weight variables according to importance in the model? #' @param k Numeric. See mgcv::s #' @param m Numeric. See mgcv::s #' @param showPlot Logical. #' @details If multiCV=TRUE the model is re-fitted and validated by length.out new cross-validations where the cross-validation folds are defined by clusters in the predictor space, #' ranging from three clusters to LOOCV. Hence, a large range of DI values is created during cross-validation. #' If the AOA threshold based on the calibration data from multiple CV is larger than the original AOA threshold (which is likely if extrapolation situations are created during CV), #' the AOA is updated accordingly. See Meyer and Pebesma (2021) for the full documentation of the methodology. #' @return A list of length 2 with the elements "AOA": SpatRaster or stars object which contains the original DI and the AOA (which might be updated if new test data indicate this option), as well as the expected performance based on the relationship. #' Data used for calibration are stored in the attributes. The second element is a plot showing the relationship. #' @author #' Hanna Meyer #' @references Meyer, H., Pebesma, E. (2021): Predicting into unknown space? #' Estimating the area of applicability of spatial prediction models. #' \doi{10.1111/2041-210X.13650} #' @seealso \code{\link{aoa}} #' @examples #' \dontrun{ #' library(sf) #' library(terra) #' library(caret) #' library(viridis) #' library(latticeExtra) #' #' #' # prepare sample data: #' dat <- readRDS(system.file("extdata","Cookfarm.RDS",package="CAST")) #' dat <- aggregate(dat[,c("VW","Easting","Northing")],by=list(as.character(dat$SOURCEID)),mean) #' pts <- st_as_sf(dat,coords=c("Easting","Northing")) #' pts$ID <- 1:nrow(pts) #' studyArea <- rast(system.file("extdata","predictors_2012-03-25.tif",package="CAST"))[[1:8]] #' dat <- extract(studyArea,pts,na.rm=TRUE) #' trainDat <- merge(dat,pts,by.x="ID",by.y="ID") #' #' # train a model: #' variables <- c("DEM","NDRE.Sd","TWI") #' set.seed(100) #' model <- train(trainDat[,which(names(trainDat)%in%variables)], #' trainDat$VW,method="rf",importance=TRUE,tuneLength=1, #' trControl=trainControl(method="cv",number=5,savePredictions=TRUE)) #' #' #...then calculate the AOA of the trained model for the study area: #' AOA <- aoa(studyArea,model) #' # '# and get the expected performance on a pixel-level: #' AOA_new <- calibrate_aoa(AOA,model) #' plot(AOA_new$AOA$expected_RMSE) #' } #' @export calibrate_aoa #' @aliases calibrate_aoa calibrate_aoa <- function(AOA,model, window.size=5, calib="scam",multiCV=FALSE, length.out = 10, maskAOA=TRUE, method= "L2", useWeight=TRUE, showPlot=TRUE,k=6,m=2){ message("Note: calibrate_aoa is deprecated and will be removed soon. Please use and refere to DItoErrormetric instead.") as_stars <- FALSE # as_raster <- FALSE if (inherits(AOA$AOA, "stars")) { if (!requireNamespace("stars", quietly = TRUE)) stop("package stars required: install that first") attr <- attributes(AOA)[c("aoa_stats","TrainDI")] AOA$AOA <- methods::as(AOA$AOA, "SpatRaster") AOA$DI <- methods::as(AOA$DI, "SpatRaster") attributes(AOA)<- c(attributes(AOA),attr) as_stars <- TRUE } if (inherits(AOA$AOA, "Raster")) { #if (!requireNamespace("raster", quietly = TRUE)) # stop("package raster required: install that first") message("Raster will soon not longer be supported. Use terra or stars instead") attr <- attributes(AOA)[c("aoa_stats","TrainDI")] AOA$AOA <- methods::as(AOA$AOA, "SpatRaster") AOA$DI <- methods::as(AOA$DI, "SpatRaster") attributes(AOA)<- c(attributes(AOA),attr) # as_raster <- TRUE } if(multiCV){ preds_all <- data.frame() train_predictors <- model$trainingData[,-which(names(model$trainingData)==".outcome")] train_response <- model$trainingData$.outcome for (nclst in round(seq(3,nrow(train_predictors),length.out = length.out))){ # define clusters in predictor space used for CV: clstrID <- tryCatch({stats::kmeans(train_predictors,nclst)$cluster}, error=function(e)e) if(inherits(clstrID,"error")){next} clstrID <- clstrID folds <- CreateSpacetimeFolds(data.frame("clstrID"=clstrID), spacevar="clstrID",k=nclst) # update model call with new CV strategy: mcall <- as.list(model$call) mcall <- mcall[-which(names(mcall)%in%c("form","data","","x","y","","trControl"))] mcall$x <- quote(train_predictors) mcall$y <- quote(train_response) mcall$trControl <- trainControl(method="cv",index=folds$index,savePredictions = TRUE) mcall$tuneGrid <- model$bestTune mcall$method <- model$method mcall$metric <- model$metric mcall$cl <- NULL # fix option for parallel later # retrain model and calculate AOA model_new <- do.call(caret::train,mcall) AOA_new <- aoa(train_predictors,model_new,method=method, useWeight=useWeight) # legacy change (very dirty, change this as soon as possible) #AOA_new <- AOA_new$AOA # get cross-validated predictions, order them and use only those located in the AOA preds <- model_new$pred preds <- preds[order(preds$rowIndex),c("pred","obs")] preds_dat_tmp <- data.frame(preds,"DI"=AOA_new$parameters$trainDI) preds_dat_tmp <- preds_dat_tmp[preds_dat_tmp$DI<=AOA_new$parameters$threshold,] preds_all <- rbind(preds_all,preds_dat_tmp) } AOA$parameters$threshold <- max(preds_all$DI) AOA$parameters$trainDI <- preds_all$DI } ### Get cross-validated predictions from the model: if(!multiCV){ # Get cross-validated predictions: if(is.null(model$pred)){ stop("CV predictions cannot be derived from the model. re-train using savePredictions, see ?caret::trainControl") } preds_all <- model$pred for (i in 1:length(model$bestTune)){ tunevar <- names(model$bestTune[i]) preds_all <- preds_all[preds_all[,tunevar]==model$bestTune[,tunevar],] } preds_all <- preds_all[order(preds_all$rowIndex),c("pred","obs")] preds_all$DI <- AOA$parameters$trainDI[!is.na(AOA$parameters$trainDI)] ## only take predictions from inside the AOA: preds_all <- preds_all[preds_all$DI<=AOA$parameters$threshold,] } ### Estimate the error~DI relationship: if(is.null(preds_all)){ stop("no cross-predictions can be retrieved from the model. Train with savePredictions=TRUE or provide calibration data") } ## use performance metric from the model: rmse <- function(pred,obs){sqrt( mean((pred - obs)^2, na.rm = TRUE) )} rsquared <- function(pred,obs){summary(lm(pred~obs))$r.squared} mae <- function(pred,obs){MAE(pred,obs)} kappa <- function(pred,obs){ pred <- factor(pred) obs <- factor(obs) lev <- unique(c(levels(pred), levels(obs))) pred <- factor(pred, levels = lev) obs <- factor(obs, levels = lev) result <- tryCatch( confusionMatrix(pred, obs)$overall["Kappa"], error = function(e)e) if(inherits(result, "error")){result <- 0} # 0 not right value!!! adjust!!! return(unname(result)) } accuracy <- function(pred,obs){ pred <- factor(pred) obs <- factor(obs) lev <- unique(c(levels(pred), levels(obs))) pred <- factor(pred, levels = lev) obs <- factor(obs, levels = lev) result <- tryCatch(confusionMatrix(pred, obs)$overall["Accuracy"], error = function(e)e) if(inherits(result, "error")){result <- 0} return(unname(result)) } if(!tolower(model$metric)%in%c("rmse","rsquared","mae","kappa","accuracy")){ message("Model metric not yet included in this function") stop() } evalfunc <- function(pred,obs){ eval(parse(text=paste0(tolower(model$metric),"(pred,obs)"))) } # order data according to DI: performance <- preds_all[order(preds_all$DI),] # calculate performance for moving window: performance$metric <- zoo::rollapply(performance[,1:2], window.size, FUN=function(x){evalfunc(x[,1],x[,2])}, by.column=F,align = "center",fill=NA) performance$ll <- data.table::shift(performance$DI,window.size/2) performance$ul <- data.table::shift(performance$DI,-round(window.size/2),0) performance <- performance[!is.na(performance$metric),] ### Update AOA: if (multiCV){ if(inherits(AOA$AOA,"SpatRaster")){ AOA$AOA <- terra::setValues(AOA$AOA, 0) }else{ AOA$AOA <- 0 } AOA$AOA[AOA$DI<=max(performance$DI,na.rm=T)] <- 1 if(inherits(AOA$AOA,"SpatRaster")){ AOA$AOA <- terra::mask(AOA$AOA,AOA$DI) }else{ AOA$AOA[is.na(AOA$DI)] <- NA } } ### Estimate Error: if(calib=="lm"){ errormodel <- lm(metric ~ DI, data = performance) } if(calib=="scam"){ if (!requireNamespace("scam", quietly = TRUE)) { stop("Package \"scam\" needed for this function to work. Please install it.", call. = FALSE) } if (model$maximize){ # e.g. accuracy, kappa, r2 bs="mpd" }else{ bs="mpi" #e.g. RMSE } errormodel <- scam::scam(metric~s(DI, k=k, bs=bs, m=m), data=performance, family=stats::gaussian(link="identity")) } attributes(AOA)$calib$model <- errormodel DI_pred <- AOA$DI attr <- attributes(AOA)[c("aoa_stats","TrainDI","calib")] # predict and make sure it's not going beyond min observed values if(inherits(DI_pred,"SpatRaster")){ terra::values(DI_pred)[terra::values(AOA$DI)<min(performance$DI,na.rm=TRUE)] <- min(performance$DI,na.rm=TRUE) AOA$expectedError <- terra::predict(DI_pred,errormodel) }else{ DI_pred[AOA$DI<min(performance$DI,na.rm=TRUE)] <- min(performance$DI,na.rm=TRUE) AOA$expectedError <- predict(errormodel,data.frame("DI"=DI_pred)) } if(maskAOA){ if(inherits(AOA$expectedError,"SpatRaster")){ AOA$expectedError <- terra::mask(AOA$expectedError,AOA$AOA,maskvalue=0) }else{ AOA$expectedError[AOA$AOA==0] <- NA } } names(performance)[which(names(performance)=="metric")] <- model$metric attr$calib$group_stats <- performance attributes(AOA)<- c(attributes(AOA),attr) ### Plot result: # if(showPlot){ # loc <- "topleft" # if(model$maximize){ # loc <- "topright" # } # # plot(attr$calib$group_stats$DI,attr$calib$group_stats[,model$metric],xlab="DI", # ylab=model$metric) # graphics::legend(loc,lty=c(NA,2),lwd=c(NA,1),pch=c(1,NA),col=c("black","black"), # legend=c("CV","model"),bty="n") # graphics::lines(seq(0,max(attr$calib$group_stats$DI, na.rm=TRUE),max(attr$calib$group_stats$DI, na.rm=TRUE)/100), # predict(attributes(AOA)$calib$model, # data.frame("DI"=seq(0, max(attr$calib$group_stats$DI,na.rm=TRUE), # max(attr$calib$group_stats$DI, na.rm=TRUE)/100))),lwd=1,lty=2,col="black") p <- lattice::xyplot(attr$calib$group_stats[,model$metric]~attr$calib$group_stats$DI,xlab="DI", ylab=model$metric,col="black", key=list(columns=2, text=list(lab=c("cross-validation","model")), points=list(pch=c(1,NA), col="black"), lines=list(lty=c(0,2), lwd=2, col="black")),panel = function(x, y, ...) { lattice::panel.xyplot(x, y, ...) lattice::llines(x, predict(attr$calib$model), col="black", lwd=2, lty=2) }) if(showPlot){ print(p) } if (as_stars){ AOA$AOA <- split(stars::st_as_stars(AOA$AOA), "band") AOA$DI <- split(stars::st_as_stars(AOA$DI), "band") AOA$expectedError <- split(stars::st_as_stars(AOA$expectedError), "band") attributes(AOA$AOA)<- c(attributes(AOA$AOA),attr) } # if(as_raster){ # AOA$AOA <- methods::as(AOA$AOA, "Raster") # AOA$DI <- methods::as(AOA$DI, "Raster") # AOA$expectedError <- methods::as(AOA$expectedError, "Raster") # attributes(AOA$AOA)<- c(attributes(AOA$AOA),attr) # } names(AOA)[names(AOA)=="expectedError"] <- paste0("expected_",model$metric) #return(AOA) return(list(AOA = AOA, plot = p)) }
/scratch/gouwar.j/cran-all/cranData/CAST/R/calibrate_aoa.R
#' Clustered samples simulation #' #' @description A simple procedure to simulate clustered points based on a two-step sampling. #' @param sarea polygon. Area where samples should be simulated. #' @param nsamples integer. Number of samples to be simulated. #' @param nparents integer. Number of parents. #' @param radius integer. Radius of the buffer around each parent for offspring simulation. #' #' @return sf object with the simulated points and the parent to which each point belongs to. #' @details A simple procedure to simulate clustered points based on a two-step sampling. #' First, a pre-specified number of parents are simulated using random sampling. #' For each parent, `(nsamples-nparents)/nparents` are simulated within a radius of the parent point using random sampling. #' #' @examples #' # Simulate 100 points in a 100x100 square with 5 parents and a radius of 10. #' library(sf) #' library(ggplot2) #' #' set.seed(1234) #' simarea <- list(matrix(c(0,0,0,100,100,100,100,0,0,0), ncol=2, byrow=TRUE)) #' simarea <- sf::st_polygon(simarea) #' simpoints <- clustered_sample(simarea, 100, 5, 10) #' simpoints$parent <- as.factor(simpoints$parent) #' ggplot() + #' geom_sf(data = simarea, alpha = 0) + #' geom_sf(data = simpoints, aes(col = parent)) #' #' @author Carles Milà #' @export clustered_sample <- function(sarea, nsamples, nparents, radius){ # Number of offspring per parent nchildren <- round((nsamples-nparents)/nparents, 0) # Simulate parents parents <- sf::st_sf(geometry=sf::st_sample(sarea, nparents, type="random")) res <- parents res$parent <- 1:nrow(parents) # Simulate offspring for(i in 1:nrow(parents)){ # Generate buffer and cut parts outside of the area of study buf <- sf::st_buffer(parents[i,], dist=radius) buf <- sf::st_intersection(buf, sarea) # Simulate children children <- sf::st_sf(geometry=sf::st_sample(buf, nchildren, type="random")) children$parent <- i res <- rbind(res, children) } return(res) }
/scratch/gouwar.j/cran-all/cranData/CAST/R/clustered_sample.R
#' Forward feature selection #' @description A simple forward feature selection algorithm #' @param predictors see \code{\link{train}} #' @param response see \code{\link{train}} #' @param method see \code{\link{train}} #' @param metric see \code{\link{train}} #' @param maximize see \code{\link{train}} #' @param globalval Logical. Should models be evaluated based on 'global' performance? See \code{\link{global_validation}} #' @param withinSE Logical Models are only selected if they are better than the #' currently best models Standard error #' @param minVar Numeric. Number of variables to combine for the first selection. #' See Details. #' @param trControl see \code{\link{train}} #' @param tuneLength see \code{\link{train}} #' @param tuneGrid see \code{\link{train}} #' @param seed A random number used for model training #' @param verbose Logical. Should information about the progress be printed? #' @param ... arguments passed to the classification or regression routine #' (such as randomForest). #' @return A list of class train. Beside of the usual train content #' the object contains the vector "selectedvars" and "selectedvars_perf" #' that give the order of the best variables selected as well as their corresponding #' performance (starting from the first two variables). It also contains "perf_all" #' that gives the performance of all model runs. #' @details Models with two predictors are first trained using all possible #' pairs of predictor variables. The best model of these initial models is kept. #' On the basis of this best model the predictor variables are iteratively #' increased and each of the remaining variables is tested for its improvement #' of the currently best model. The process stops if none of the remaining #' variables increases the model performance when added to the current best model. #' #' The internal cross validation can be run in parallel. See information #' on parallel processing of carets train functions for details. #' #' Using withinSE will favour models with less variables and #' probably shorten the calculation time #' #' Per Default, the ffs starts with all possible 2-pair combinations. #' minVar allows to start the selection with more than 2 variables, e.g. #' minVar=3 starts the ffs testing all combinations of 3 (instead of 2) variables #' first and then increasing the number. This is important for e.g. neural networks #' that often cannot make sense of only two variables. It is also relevant if #' it is assumed that the optimal variables can only be found if more than 2 #' are considered at the same time. #' #' @note This variable selection is particularly suitable for spatial #' cross validations where variable selection #' MUST be based on the performance of the model for predicting new spatial units. #' See Meyer et al. (2018) and Meyer et al. (2019) for further details. #' #' @author Hanna Meyer #' @seealso \code{\link{train}},\code{\link{bss}}, #' \code{\link{trainControl}},\code{\link{CreateSpacetimeFolds}},\code{\link{nndm}} #' @references #' \itemize{ #' \item Gasch, C.K., Hengl, T., Gräler, B., Meyer, H., Magney, T., Brown, D.J. (2015): Spatio-temporal interpolation of soil water, temperature, and electrical conductivity in 3D+T: the Cook Agronomy Farm data set. Spatial Statistics 14: 70-90. #' \item Meyer, H., Reudenbach, C., Hengl, T., Katurji, M., Nauß, T. (2018): Improving performance of spatio-temporal machine learning models using forward feature selection and target-oriented validation. Environmental Modelling & Software 101: 1-9. \doi{10.1016/j.envsoft.2017.12.001} #' \item Meyer, H., Reudenbach, C., Wöllauer, S., Nauss, T. (2019): Importance of spatial predictor variable selection in machine learning applications - Moving from data reproduction to spatial prediction. Ecological Modelling. 411, 108815. \doi{10.1016/j.ecolmodel.2019.108815}. #' \item Ludwig, M., Moreno-Martinez, A., Hölzel, N., Pebesma, E., Meyer, H. (2023): Assessing and improving the transferability of current global spatial prediction models. Global Ecology and Biogeography. \doi{10.1111/geb.13635}. #' } #' @examples #' \dontrun{ #' data(iris) #' ffsmodel <- ffs(iris[,1:4],iris$Species) #' ffsmodel$selectedvars #' ffsmodel$selectedvars_perf #'} #' #' # or perform model with target-oriented validation (LLO CV) #' #the example is described in Gasch et al. (2015). The ffs approach for this dataset is described in #' #Meyer et al. (2018). Due to high computation time needed, only a small and thus not robust example #' #is shown here. #' #' \dontrun{ #' #run the model on three cores: #' library(doParallel) #' library(lubridate) #' cl <- makeCluster(3) #' registerDoParallel(cl) #' #' #load and prepare dataset: #' dat <- readRDS(system.file("extdata","Cookfarm.RDS",package="CAST")) #' trainDat <- dat[dat$altitude==-0.3&year(dat$Date)==2012&week(dat$Date)%in%c(13:14),] #' #' #visualize dataset: #' ggplot(data = trainDat, aes(x=Date, y=VW)) + geom_line(aes(colour=SOURCEID)) #' #' #create folds for Leave Location Out Cross Validation: #' set.seed(10) #' indices <- CreateSpacetimeFolds(trainDat,spacevar = "SOURCEID",k=3) #' ctrl <- trainControl(method="cv",index = indices$index) #' #' #define potential predictors: #' predictors <- c("DEM","TWI","BLD","Precip_cum","cday","MaxT_wrcc", #' "Precip_wrcc","NDRE.M","Bt","MinT_wrcc","Northing","Easting") #' #' #run ffs model with Leave Location out CV #' set.seed(10) #' ffsmodel <- ffs(trainDat[,predictors],trainDat$VW,method="rf", #' tuneLength=1,trControl=ctrl) #' ffsmodel #' plot(ffsmodel) #' #or only selected variables: #' plot(ffsmodel,plotType="selected") #' #' #compare to model without ffs: #' model <- train(trainDat[,predictors],trainDat$VW,method="rf", #' tuneLength=1, trControl=ctrl) #' model #' stopCluster(cl) #'} #' @export ffs #' @aliases ffs ffs <- function (predictors, response, method = "rf", metric = ifelse(is.factor(response), "Accuracy", "RMSE"), maximize = ifelse(metric == "RMSE", FALSE, TRUE), globalval=FALSE, withinSE = FALSE, minVar = 2, trControl = caret::trainControl(), tuneLength = 3, tuneGrid = NULL, seed = sample(1:1000, 1), verbose=TRUE, ...){ trControl$returnResamp <- "final" trControl$savePredictions <- "final" if(inherits(response,"character")){ response <- factor(response) if(metric=="RMSE"){ metric <- "Accuracy" maximize <- TRUE } } if (trControl$method=="LOOCV"){ if (withinSE==TRUE){ print("warning: withinSE is set to FALSE as no SE can be calculated using method LOOCV") withinSE <- FALSE }} if(globalval){ if (withinSE==TRUE){ print("warning: withinSE is set to FALSE as no SE can be calculated using global validation") withinSE <- FALSE }} se <- function(x){sd(x, na.rm = TRUE)/sqrt(length(na.exclude(x)))} n <- length(names(predictors)) acc <- 0 perf_all <- data.frame(matrix(ncol=length(predictors)+3, nrow=choose(n, minVar)+(n-minVar)*(n-minVar+1)/2)) names(perf_all) <- c(paste0("var",1:length(predictors)),metric,"SE","nvar") if(maximize) evalfunc <- function(x){max(x,na.rm=TRUE)} if(!maximize) evalfunc <- function(x){min(x,na.rm=TRUE)} isBetter <- function (actmodelperf,bestmodelperf, bestmodelperfSE=NULL, maximization=FALSE, withinSE=FALSE){ if(withinSE){ result <- ifelse (!maximization, actmodelperf < bestmodelperf-bestmodelperfSE, actmodelperf > bestmodelperf+bestmodelperfSE) }else{ result <- ifelse (!maximization, actmodelperf < bestmodelperf, actmodelperf > bestmodelperf) } return(result) } #### chose initial best model from all combinations of two variables minGrid <- t(data.frame(combn(names(predictors),minVar))) for (i in 1:nrow(minGrid)){ if (verbose){ print(paste0("model using ",paste0(minGrid[i,],collapse=","), " will be trained now..." )) } set.seed(seed) #adaptations for pls: tuneGrid_orig <- tuneGrid tuneLength_orig <- tuneLength if(method=="pls"&!is.null(tuneGrid)&any(tuneGrid$ncomp>minVar)){ tuneGrid <- data.frame(ncomp=tuneGrid[tuneGrid$ncomp<=minVar,]) if(verbose){ print(paste0("note: maximum ncomp is ", minVar)) } } #adaptations for tuning of ranger: if(method=="ranger"&!is.null(tuneGrid)&any(tuneGrid$mtry>minVar)){ tuneGrid$mtry <- minVar if(verbose){ print("invalid value for mtry. Reset to valid range.") } } # adaptations for RF and minVar == 1 - tuneLength must be 1, only one mtry possible if(minVar==1 & method%in%c("ranger", "rf") & is.null(tuneGrid)){ tuneLength <- minVar } #train model: model <- caret::train(predictors[minGrid[i,]], response, method=method, metric=metric, trControl=trControl, tuneLength = tuneLength, tuneGrid = tuneGrid, ...) tuneGrid <- tuneGrid_orig tuneLength <- tuneLength_orig ### compare the model with the currently best model if (globalval){ perf_stats <- global_validation(model)[names(global_validation(model))==metric] }else{ perf_stats <- model$results[,names(model$results)==metric] } actmodelperf <- evalfunc(perf_stats) actmodelperfSE <- se( sapply(unique(model$resample$Resample), FUN=function(x){mean(model$resample[model$resample$Resample==x, metric],na.rm=TRUE)})) if (i == 1){ bestmodelperf <- actmodelperf bestmodelperfSE <- actmodelperfSE bestmodel <- model } else{ if (isBetter(actmodelperf,bestmodelperf,maximization=maximize,withinSE=FALSE)){ bestmodelperf <- actmodelperf bestmodelperfSE <- actmodelperfSE bestmodel <- model } } acc <- acc+1 variablenames <- names(model$trainingData)[-length(names(model$trainingData))] perf_all[acc,1:length(variablenames)] <- variablenames perf_all[acc,(length(predictors)+1):ncol(perf_all)] <- c(actmodelperf,actmodelperfSE,length(variablenames)) if(verbose){ print(paste0("maximum number of models that still need to be trained: ", round(choose(n, minVar)+(n-minVar)*(n-minVar+1)/2-acc,0))) } } #### increase the number of predictors by one (try all combinations) #and test if model performance increases selectedvars <- names(bestmodel$trainingData)[-which( names(bestmodel$trainingData)==".outcome")] if (globalval){ selectedvars_perf <- global_validation(bestmodel)[names(global_validation(bestmodel))==metric] }else{ if (maximize){ selectedvars_perf <-max(bestmodel$results[,metric]) }else{ selectedvars_perf <- min(bestmodel$results[,metric]) } } selectedvars_SE <- bestmodelperfSE if(verbose){ print(paste0(paste0("vars selected: ",paste(selectedvars, collapse = ',')), " with ",metric," ",round(selectedvars_perf,3))) } for (k in 1:(length(names(predictors))-minVar)){ startvars <- names(bestmodel$trainingData)[-which( names(bestmodel$trainingData)==".outcome")] nextvars <- names(predictors)[-which( names(predictors)%in%startvars)] if (length(startvars)<(k+(minVar-1))){ message(paste0("Note: No increase in performance found using more than ", length(startvars), " variables")) bestmodel$selectedvars <- selectedvars bestmodel$selectedvars_perf <- selectedvars_perf[-length(selectedvars_perf)] bestmodel$selectedvars_perf_SE <- selectedvars_SE[-length(selectedvars_SE)] #!!! bestmodel$perf_all <- perf_all bestmodel$perf_all <- bestmodel$perf_all[!apply(is.na(bestmodel$perf_all), 1, all),] bestmodel$perf_all <- bestmodel$perf_all[colSums(!is.na(bestmodel$perf_all)) > 0] bestmodel$minVar <- minVar bestmodel$type <- "ffs" class(bestmodel) <- c("ffs", "train") return(bestmodel) break() } for (i in 1:length(nextvars)){ if(verbose){ print(paste0("model using additional variable ",nextvars[i], " will be trained now..." )) } set.seed(seed) #adaptation for pls: tuneGrid_orig <- tuneGrid if(method=="pls"&!is.null(tuneGrid)&any(tuneGrid$ncomp>ncol(predictors[,c(startvars,nextvars[i])]))){ tuneGrid<- data.frame(ncomp=tuneGrid[tuneGrid$ncomp<=ncol(predictors[,c(startvars,nextvars[i])]),]) if(verbose){ print(paste0("note: maximum ncomp is ", ncol(predictors[,c(startvars,nextvars[i])]))) }} #adaptation for ranger: if(method=="ranger"&!is.null(tuneGrid)&any(tuneGrid$mtry>ncol(predictors[,c(startvars,nextvars[i])]))){ tuneGrid$mtry[tuneGrid$mtry>ncol(predictors[,c(startvars,nextvars[i])])] <- ncol(predictors[,c(startvars,nextvars[i])]) if(verbose){ print("invalid value for mtry. Reset to valid range.") } } model <- caret::train(predictors[,c(startvars,nextvars[i])], response, method = method, metric=metric, trControl = trControl, tuneLength = tuneLength, tuneGrid = tuneGrid, ...) tuneGrid <- tuneGrid_orig if (globalval){ perf_stats <- global_validation(model)[names(global_validation(model))==metric] }else{ perf_stats <- model$results[,names(model$results)==metric] } actmodelperf <- evalfunc(perf_stats) actmodelperfSE <- se( sapply(unique(model$resample$Resample), FUN=function(x){mean(model$resample[model$resample$Resample==x, metric],na.rm=TRUE)})) if(isBetter(actmodelperf,bestmodelperf, selectedvars_SE[length(selectedvars_SE)], #SE from model with nvar-1 maximization=maximize,withinSE=withinSE)){ bestmodelperf <- actmodelperf bestmodelperfSE <- actmodelperfSE bestmodel <- model } acc <- acc+1 variablenames <- names(model$trainingData)[-length(names(model$trainingData))] perf_all[acc,1:length(variablenames)] <- variablenames perf_all[acc,(length(predictors)+1):ncol( perf_all)] <- c(actmodelperf,actmodelperfSE,length(variablenames)) if(verbose){ print(paste0("maximum number of models that still need to be trained: ", round(choose(n, minVar)+(n-minVar)*(n-minVar+1)/2-acc,0))) } } selectedvars <- c(selectedvars,names(bestmodel$trainingData)[-which( names(bestmodel$trainingData)%in%c(".outcome",selectedvars))]) selectedvars_SE <- c(selectedvars_SE,bestmodelperfSE) if (maximize){ if(globalval){ selectedvars_perf <- c(selectedvars_perf,global_validation(bestmodel)[names(global_validation(bestmodel))==metric]) }else{ selectedvars_perf <- c(selectedvars_perf,max(bestmodel$results[,metric])) } } if (!maximize){ if(globalval){ selectedvars_perf <- c(selectedvars_perf,global_validation(bestmodel)[names(global_validation(bestmodel))==metric]) }else{ selectedvars_perf <- c(selectedvars_perf,min(bestmodel$results[,metric])) } } if(verbose){ print(paste0(paste0("vars selected: ",paste(selectedvars, collapse = ',')), " with ",metric," ",round(selectedvars_perf[length(selectedvars_perf)],3))) } } # Old version that is not using global_validation: # if (maximize){ # selectedvars_perf <- c(selectedvars_perf,max(bestmodel$results[,metric])) # if(verbose){ # print(paste0(paste0("vars selected: ",paste(selectedvars, collapse = ',')), # " with ", metric," ",round(max(bestmodel$results[,metric]),3))) # } # } # if (!maximize){ # selectedvars_perf <- c(selectedvars_perf,min(bestmodel$results[,metric])) # if(verbose){ # print(paste0(paste0("vars selected: ",paste(selectedvars, collapse = ',')), # " with ",metric," ",round(min(bestmodel$results[,metric]),3))) # } # } bestmodel$selectedvars <- selectedvars bestmodel$selectedvars_perf <- selectedvars_perf bestmodel$selectedvars_perf_SE <- selectedvars_SE if(globalval){ bestmodel$selectedvars_perf_SE <- NA } bestmodel$perf_all <- perf_all bestmodel$perf_all <- bestmodel$perf_all[!apply(is.na(bestmodel$perf_all), 1, all),] bestmodel$minVar <- minVar bestmodel$type <- "ffs" bestmodel$perf_all <- bestmodel$perf_all[colSums(!is.na(bestmodel$perf_all)) > 0] class(bestmodel) <- c("ffs", "train") return(bestmodel) }
/scratch/gouwar.j/cran-all/cranData/CAST/R/ffs.R
#' Calculate euclidean nearest neighbor distances in geographic space or feature space #' #' @description Calculates nearest neighbor distances in geographic space or feature space between training data as well as between training data and prediction locations. #' Optional, the nearest neighbor distances between training data and test data or between training data and CV iterations is computed. #' @param x object of class sf, training data locations #' @param modeldomain SpatRaster, stars or sf object defining the prediction area (see Details) #' @param type "geo" or "feature". Should the distance be computed in geographic space or in the normalized multivariate predictor space (see Details) #' @param cvfolds optional. list or vector. Either a list where each element contains the data points used for testing during the cross validation iteration (i.e. held back data). #' Or a vector that contains the ID of the fold for each training point. See e.g. ?createFolds or ?CreateSpacetimeFolds or ?nndm #' @param cvtrain optional. List of row indices of x to fit the model to in each CV iteration. If cvtrain is null but cvfolds is not, all samples but those included in cvfolds are used as training data #' @param testdata optional. object of class sf: Point data used for independent validation #' @param preddata optional. object of class sf: Point data indicating the locations within the modeldomain to be used as target prediction points. Useful when the prediction objective is a subset of #' locations within the modeldomain rather than the whole area. #' @param samplesize numeric. How many prediction samples should be used? #' @param sampling character. How to draw prediction samples? See \link[sp]{spsample}. Use sampling = "Fibonacci" for global applications. #' @param variables character vector defining the predictor variables used if type="feature. If not provided all variables included in modeldomain are used. #' @return A data.frame containing the distances. Unit of returned geographic distances is meters. attributes contain W statistic between prediction area and either sample data, CV folds or test data. See details. #' @details The modeldomain is a sf polygon or a raster that defines the prediction area. The function takes a regular point sample (amount defined by samplesize) from the spatial extent. #' If type = "feature", the argument modeldomain (and if provided then also the testdata and/or preddata) has to include predictors. Predictor values for x, testdata and preddata are optional if modeldomain is a raster. #' If not provided they are extracted from the modeldomain rasterStack. #' W statistic describes the match between the distributions. See Linnenbrink et al (2023) for further details. #' @note See Meyer and Pebesma (2022) for an application of this plotting function #' @seealso \code{\link{nndm}} \code{\link{knndm}} #' @import ggplot2 #' @author Hanna Meyer, Edzer Pebesma, Marvin Ludwig #' @examples #' \dontrun{ #' library(CAST) #' library(sf) #' library(terra) #' library(caret) #' library(rnaturalearth) #' library(ggplot2) #' #' data(splotdata) #' studyArea <- rnaturalearth::ne_countries(continent = "South America", returnclass = "sf") #' #' ########### Distance between training data and new data: #' dist <- geodist(splotdata, studyArea) #' plot(dist) #' #' ########### Distance between training data, new data and test data (here Chile): #' plot(splotdata[,"Country"]) #' dist <- geodist(splotdata[splotdata$Country != "Chile",], studyArea, #' testdata = splotdata[splotdata$Country == "Chile",]) #' plot(dist) #' #' ########### Distance between training data, new data and CV folds: #' folds <- createFolds(1:nrow(splotdata), k=3, returnTrain=FALSE) #' dist <- geodist(x=splotdata, modeldomain=studyArea, cvfolds=folds) #' plot(dist) #' #' ########### Distances in the feature space: #' predictors <- terra::rast(system.file("extdata","predictors_chile.tif", package="CAST")) #' dist <- geodist(x = splotdata, #' modeldomain = predictors, #' type = "feature", #' variables = c("bio_1","bio_12", "elev")) #' plot(dist) #' #' dist <- geodist(x = splotdata[splotdata$Country != "Chile",], #' modeldomain = predictors, cvfolds = folds, #' testdata = splotdata[splotdata$Country == "Chile",], #' type = "feature", #' variables=c("bio_1","bio_12", "elev")) #' plot(dist) #' #' ############ Example for a random global dataset #' ############ (refer to figure in Meyer and Pebesma 2022) #' #' ### Define prediction area (here: global): #' ee <- st_crs("+proj=eqearth") #' co <- ne_countries(returnclass = "sf") #' co.ee <- st_transform(co, ee) #' #' ### Simulate a spatial random sample #' ### (alternatively replace pts_random by a real sampling dataset (see Meyer and Pebesma 2022): #' sf_use_s2(FALSE) #' pts_random <- st_sample(co.ee, 2000, exact=FALSE) #' #' ### See points on the map: #' ggplot() + geom_sf(data = co.ee, fill="#00BFC4",col="#00BFC4") + #' geom_sf(data = pts_random, color = "#F8766D",size=0.5, shape=3) + #' guides(fill = "none", col = "none") + #' labs(x = NULL, y = NULL) #' #' ### plot distances: #' dist <- geodist(pts_random,co.ee) #' plot(dist) + scale_x_log10(labels=round) #' #' #' #' #'} #' @export geodist <- function(x, modeldomain, type = "geo", cvfolds=NULL, cvtrain=NULL, testdata=NULL, preddata=NULL, samplesize=2000, sampling = "regular", variables=NULL){ # input formatting ------------ if (inherits(modeldomain, "Raster")) { # if (!requireNamespace("raster", quietly = TRUE)) # stop("package raster required: install that first") message("Raster will soon not longer be supported. Use terra or stars instead") modeldomain <- methods::as(modeldomain,"SpatRaster") } if (inherits(modeldomain, "stars")) { if (!requireNamespace("stars", quietly = TRUE)) stop("package stars required: install that first") modeldomain <- methods::as(modeldomain, "SpatRaster") } x <- sf::st_transform(x,4326) if(type == "feature"){ if(is.null(variables)){ variables <- names(modeldomain) } if(any(!variables%in%names(x))){ # extract variable values of raster: message("features are extracted from the modeldomain") x <- sf::st_transform(x,sf::st_crs(modeldomain)) if(class(x)[1]=="sfc_POINT"){ x <- sf::st_as_sf(x) } #x <- sf::st_as_sf(raster::extract(modeldomain, x, df = TRUE, sp = TRUE)) x <- sf::st_as_sf(terra::extract(modeldomain, x, na.rm=FALSE,bind=TRUE)) x <- sf::st_transform(x,4326) } if(!is.null(testdata)){ if(any(!variables%in%names(testdata))){# extract variable values of raster: testdata <- sf::st_transform(testdata,sf::st_crs(modeldomain)) #testdata <- sf::st_as_sf(raster::extract(modeldomain, testdata, df = TRUE, sp = TRUE)) testdata <- sf::st_as_sf(terra::extract(modeldomain, testdata, na.rm=FALSE,bind=TRUE)) if(any(is.na(testdata))){ testdata <- na.omit(testdata) message("some test data were removed because of NA in extracted predictor values") } testdata <- sf::st_transform(testdata,4326) } } if(!is.null(preddata)){ if(any(!variables%in%names(preddata))){# extract variable values of raster: preddata <- sf::st_transform(preddata,sf::st_crs(modeldomain)) #preddata <- sf::st_as_sf(raster::extract(modeldomain, preddata, df = TRUE, sp = TRUE)) preddata <- sf::st_as_sf(terra::extract(modeldomain, preddata, na.rm=FALSE,bind=TRUE)) if(any(is.na(preddata))){ preddata <- na.omit(preddata) message("some prediction data were removed because of NA in extracted predictor values") } preddata <- sf::st_transform(preddata,4326) } } } # required steps ---- ## Sample prediction location from the study area if preddata not available: if(is.null(preddata)){ modeldomain <- sampleFromArea(modeldomain, samplesize, type,variables,sampling)} else{ modeldomain <- preddata } # always do sample-to-sample and sample-to-prediction s2s <- sample2sample(x, type,variables) s2p <- sample2prediction(x, modeldomain, type, samplesize,variables) dists <- rbind(s2s, s2p) # optional steps ---- ##### Distance to test data: if(!is.null(testdata)){ s2t <- sample2test(x, testdata, type,variables) dists <- rbind(dists, s2t) } ##### Distance to CV data: if(!is.null(cvfolds)){ cvd <- cvdistance(x, cvfolds, cvtrain, type, variables) dists <- rbind(dists, cvd) } class(dists) <- c("geodist", class(dists)) attr(dists, "type") <- type ##### Compute W statistics # if(type == "geo"){ # take this condition out once tested for feature space as well W_sample <- twosamples::wass_stat(dists[dists$what == "sample-to-sample", "dist"], dists[dists$what == "prediction-to-sample", "dist"]) attr(dists, "W_sample") <- W_sample if(!is.null(testdata)){ W_test <- twosamples::wass_stat(dists[dists$what == "test-to-sample", "dist"], dists[dists$what == "prediction-to-sample", "dist"]) attr(dists, "W_test") <- W_test } if(!is.null(cvfolds)){ W_CV <- twosamples::wass_stat(dists[dists$what == "CV-distances", "dist"], dists[dists$what == "prediction-to-sample", "dist"]) attr(dists, "W_CV") <- W_CV } # } return(dists) } # Sample to Sample Distance sample2sample <- function(x, type,variables){ if(type == "geo"){ sf::sf_use_s2(TRUE) d <- sf::st_distance(x) diag(d) <- Inf min_d <- apply(d, 1, min) sampletosample <- data.frame(dist = min_d, what = factor("sample-to-sample"), dist_type = "geo") }else if(type == "feature"){ x <- x[,variables] x <- sf::st_drop_geometry(x) scaleparam <- attributes(scale(x)) x <- data.frame(scale(x)) x_clean <- data.frame(x[complete.cases(x),]) # sample to sample feature distance d <- c() for (i in 1:nrow(x_clean)){ trainDist <- FNN::knnx.dist(x_clean[i,],x_clean,k=1) trainDist[i] <- NA d <- c(d,min(trainDist,na.rm=T)) } sampletosample <- data.frame(dist = d, what = factor("sample-to-sample"), dist_type = "feature") } return(sampletosample) } # Sample to Prediction sample2prediction = function(x, modeldomain, type, samplesize,variables){ if(type == "geo"){ modeldomain <- sf::st_transform(modeldomain, sf::st_crs(x)) sf::sf_use_s2(TRUE) d0 <- sf::st_distance(modeldomain, x) min_d0 <- apply(d0, 1, min) sampletoprediction <- data.frame(dist = min_d0, what = factor("prediction-to-sample"), dist_type = "geo") }else if(type == "feature"){ x <- x[,variables] x <- sf::st_drop_geometry(x) scaleparam <- attributes(scale(x)) x <- data.frame(scale(x)) x_clean <- x[complete.cases(x),] modeldomain <- modeldomain[,variables] modeldomain <- sf::st_drop_geometry(modeldomain) modeldomain <- data.frame(scale(modeldomain,center=scaleparam$`scaled:center`, scale=scaleparam$`scaled:scale`)) target_dist_feature <- c() for (i in 1:nrow(modeldomain)){ trainDist <- FNN::knnx.dist(modeldomain[i,],x_clean,k=1) target_dist_feature <- c(target_dist_feature,min(trainDist,na.rm=T)) } sampletoprediction <- data.frame(dist = target_dist_feature, what = "prediction-to-sample", dist_type = "feature") } return(sampletoprediction) } # sample to test sample2test <- function(x, testdata, type,variables){ if(type == "geo"){ testdata <- sf::st_transform(testdata,4326) d_test <- sf::st_distance(testdata, x) min_d_test <- apply(d_test, 1, min) dists_test <- data.frame(dist = min_d_test, what = factor("test-to-sample"), dist_type = "geo") }else if(type == "feature"){ x <- x[,variables] x <- sf::st_drop_geometry(x) scaleparam <- attributes(scale(x)) x <- data.frame(scale(x)) x_clean <- x[complete.cases(x),] testdata <- testdata[,variables] testdata <- sf::st_drop_geometry(testdata) testdata <- data.frame(scale(testdata,center=scaleparam$`scaled:center`, scale=scaleparam$`scaled:scale`)) test_dist_feature <- c() for (i in 1:nrow(testdata)){ testDist <- FNN::knnx.dist(testdata[i,],x_clean,k=1) test_dist_feature <- c(test_dist_feature,min(testDist,na.rm=T)) } dists_test <- data.frame(dist = test_dist_feature, what = "test-to-sample", dist_type = "feature") } return(dists_test) } # between folds cvdistance <- function(x, cvfolds, cvtrain, type, variables){ if(!is.null(cvfolds)&!is.list(cvfolds)){ # restructure input if CVtest only contains the fold ID tmp <- list() for (i in unique(cvfolds)){ tmp[[i]] <- which(cvfolds==i) } cvfolds <- tmp } if(type == "geo"){ d_cv <- c() for (i in 1:length(cvfolds)){ if(!is.null(cvtrain)){ d_cv_tmp <- sf::st_distance(x[cvfolds[[i]],], x[cvtrain[[i]],]) }else{ d_cv_tmp <- sf::st_distance(x[cvfolds[[i]],], x[-cvfolds[[i]],]) } d_cv <- c(d_cv,apply(d_cv_tmp, 1, min)) } dists_cv <- data.frame(dist = d_cv, what = factor("CV-distances"), dist_type = "geo") }else if(type == "feature"){ x <- x[,variables] x <- sf::st_drop_geometry(x) x <- data.frame(scale(x)) d_cv <- c() for(i in 1:length(cvfolds)){ if(!is.null(cvtrain)){ testdata_i <- x[cvfolds[[i]],] traindata_i <- x[cvtrain[[i]],] }else{ testdata_i <- x[cvfolds[[i]],] traindata_i <- x[-cvfolds[[i]],] } testdata_i <- testdata_i[complete.cases(testdata_i),] traindata_i <- traindata_i[complete.cases(traindata_i),] for (k in 1:nrow(testdata_i)){ trainDist <- tryCatch(FNN::knnx.dist(testdata_i[k,],traindata_i,k=1), error = function(e)e) if(inherits(trainDist, "error")){ trainDist <- NA message("warning: no distance could be calculated for a fold. Possibly because predictor values are NA") } trainDist[k] <- NA d_cv <- c(d_cv,min(trainDist,na.rm=T)) } } dists_cv <- data.frame(dist = d_cv, what = factor("CV-distances"), dist_type = "feature") } return(dists_cv) } sampleFromArea <- function(modeldomain, samplesize, type,variables,sampling){ ##### Distance to prediction locations: # regularly spread points (prediction locations): # see https://edzer.github.io/OGH21/ if(inherits(modeldomain, "Raster")){ modeldomain <- terra::rast(modeldomain) } if(inherits(modeldomain, "SpatRaster")) { if(samplesize>terra::ncell(modeldomain)){ samplesize <- terra::ncell(modeldomain) message(paste0("samplesize for new data shouldn't be larger than number of pixels. Samplesize was reduced to ",terra::ncell(modeldomain))) } #create mask to sample from: template <- modeldomain[[1]] terra::values(template)[!is.na(terra::values(template))] <-1 modeldomainextent <- terra::as.polygons(template) |> sf::st_as_sf() |> sf::st_geometry() }else{ modeldomainextent <- modeldomain } sf::sf_use_s2(FALSE) sf::st_as_sf(modeldomainextent) |> sf::st_transform(4326) -> bb methods::as(bb, "Spatial") |> sp::spsample(n = samplesize, type = sampling) |> sf::st_as_sfc() |> sf::st_set_crs(4326) -> predictionloc predictionloc <- sf::st_as_sf(predictionloc) if(type == "feature"){ modeldomain <- terra::project(modeldomain, "epsg:4326") predictionloc <- sf::st_as_sf(terra::extract(modeldomain,terra::vect(predictionloc),bind=TRUE)) predictionloc <- na.omit(predictionloc) } return(predictionloc) }
/scratch/gouwar.j/cran-all/cranData/CAST/R/geodist.R
#' Evaluate 'global' cross-validation #' @description Calculate validation metric using all held back predictions at once #' @param model an object of class \code{\link{train}} #' @return regression (\code{\link{postResample}}) or classification (\code{\link{confusionMatrix}}) statistics #' @details Relevant when folds are not representative for the entire area of interest. #' In this case, metrics like R2 are not meaningful since it doesn't reflect the general ability of #' the model to explain the entire gradient of the response. #' Comparable to LOOCV, predictions from all held back folds are used here together to calculate validation statistics. #' @author Hanna Meyer #' @seealso \code{\link{CreateSpacetimeFolds}} #' @examples #' dat <- readRDS(system.file("extdata","Cookfarm.RDS",package="CAST")) #' dat <- dat[sample(1:nrow(dat),500),] #' indices <- CreateSpacetimeFolds(dat,"SOURCEID","Date") #' ctrl <- caret::trainControl(method="cv",index = indices$index,savePredictions="final") #' model <- caret::train(dat[,c("DEM","TWI","BLD")],dat$VW, method="rf", trControl=ctrl, ntree=10) #' global_validation(model) #' @export global_validation #' @aliases global_validation global_validation <- function(model){ predictions <- model$pred if(is.null(predictions)){stop("Global performance could not be estimated because predictions were not saved. Train model with savePredictions='final'")} ### only use predictions of best tune: for (i in 1:length(model$bestTune)){ predictions <- predictions[predictions[,names(model$bestTune)[i]]==model$bestTune[,i],] } obs <- predictions$obs pred <- predictions$pred if(model$modelType=="Regression"){ out <- caret::postResample(pred = pred, obs = obs) }else{ out <- caret::confusionMatrix(pred, obs)$overall } return(out) }
/scratch/gouwar.j/cran-all/cranData/CAST/R/global_validation.R
#' K-fold Nearest Neighbour Distance Matching #' @description #' This function implements the kNNDM algorithm and returns the necessary #' indices to perform a k-fold NNDM CV for map validation. #' #' @author Carles Milà and Jan Linnenbrink #' @param tpoints sf or sfc point object. Contains the training points samples. #' @param modeldomain sf polygon object defining the prediction area. Optional; alternative to ppoints (see Details). #' @param ppoints sf or sfc point object. Contains the target prediction points. Optional; alternative to modeldomain (see Details). #' @param space character. Only "geographical" knndm, i.e. kNNDM in the geographical space, is currently implemented. #' @param k integer. Number of folds desired for CV. Defaults to 10. #' @param maxp numeric. Maximum fold size allowed, defaults to 0.5, i.e. a single fold can hold a maximum of half of the training points. #' @param clustering character. Possible values include "hierarchical" and "kmeans". See details. #' @param linkf character. Only relevant if clustering = "hierarchical". Link function for agglomerative hierarchical clustering. #' Defaults to "ward.D2". Check `stats::hclust` for other options. #' @param samplesize numeric. How many points in the modeldomain should be sampled as prediction points? #' Only required if modeldomain is used instead of ppoints. #' @param sampling character. How to draw prediction points from the modeldomain? See `sf::st_sample`. #' Only required if modeldomain is used instead of ppoints. #' #' @return An object of class \emph{knndm} consisting of a list of eight elements: #' indx_train, indx_test (indices of the observations to use as #' training/test data in each kNNDM CV iteration), Gij (distances for #' G function construction between prediction and target points), Gj #' (distances for G function construction during LOO CV), Gjstar (distances #' for modified G function during kNNDM CV), clusters (list of cluster IDs), #' W (Wasserstein statistic), and space (stated by the user in the function call). #' #' @details #' knndm is a k-fold version of NNDM LOO CV for medium and large datasets. Brielfy, the algorithm tries to #' find a k-fold configuration such that the integral of the absolute differences (Wasserstein W statistic) #' between the empirical nearest neighbour distance distribution function between the test and training data during CV (Gj*), #' and the empirical nearest neighbour distance distribution function between the prediction and training points (Gij), #' is minimised. It does so by performing clustering of the training points' coordinates for different numbers of #' clusters that range from k to N (number of observations), merging them into k final folds, #' and selecting the configuration with the lowest W. #' #' Using a projected CRS in `knndm` has large computational advantages since fast nearest neighbour search can be #' done via the `FNN` package, while working with geographic coordinates requires computing the full #' spherical distance matrices. As a clustering algorithm, `kmeans` can only be used for #' projected CRS while `hierarchical` can work with both projected and geographical coordinates, though it requires #' calculating the full distance matrix of the training points even for a projected CRS. #' #' In order to select between clustering algorithms and number of folds `k`, different `knndm` configurations can be run #' and compared, being the one with a lower W statistic the one that offers a better match. W statistics between `knndm` #' runs are comparable as long as `tpoints` and `ppoints` or `modeldomain` stay the same. #' #' Map validation using knndm should be used using `CAST::global_validation`, i.e. by stacking all out-of-sample #' predictions and evaluating them all at once. The reasons behind this are 1) The resulting folds can be #' unbalanced and 2) nearest neighbour functions are constructed and matched using all CV folds simultaneously. #' #' If training data points are very clustered with respect to the prediction area and the presented knndm #' configuration still show signs of Gj* > Gij, there are several things that can be tried. First, increase #' the `maxp` parameter; this may help to control for strong clustering (at the cost of having unbalanced folds). #' Secondly, decrease the number of final folds `k`, which may help to have larger clusters. #' #' The `modeldomain` is a sf polygon that defines the prediction area. The function takes a regular point sample #' (amount defined by `samplesize`) from the spatial extent. As an alternative use `ppoints` instead of #' `modeldomain`, if you have already defined the prediction locations (e.g. raster pixel centroids). #' When using either `modeldomain` or `ppoints`, we advise to plot the study area polygon and the #' training/prediction points as a previous step to ensure they are aligned. #' #' @note Experimental cycle. Article describing and testing the algorithm in preparation. #' @references #' \itemize{ #' \item Linnenbrink, J., Milà, C., Ludwig, M., and Meyer, H.: kNNDM: k-fold Nearest Neighbour Distance Matching Cross-Validation for map accuracy estimation, EGUsphere [preprint], https://doi.org/10.5194/egusphere-2023-1308, 2023. #' \item Milà, C., Mateu, J., Pebesma, E., Meyer, H. (2022): Nearest Neighbour Distance Matching Leave-One-Out Cross-Validation for map validation. Methods in Ecology and Evolution 00, 1– 13. #' } #' @seealso \code{\link{geodist}}, \code{\link{nndm}} #' #' @export #' @examples #' ######################################################################## #' # Example 1: Simulated data - Randomly-distributed training points #' ######################################################################## #' #' library(sf) #' library(ggplot2) #' #' # Simulate 1000 random training points in a 100x100 square #' set.seed(1234) #' simarea <- list(matrix(c(0,0,0,100,100,100,100,0,0,0), ncol=2, byrow=TRUE)) #' simarea <- sf::st_polygon(simarea) #' train_points <- sf::st_sample(simarea, 1000, type = "random") #' pred_points <- sf::st_sample(simarea, 1000, type = "regular") #' plot(simarea) #' plot(pred_points, add = TRUE, col = "blue") #' plot(train_points, add = TRUE, col = "red") #' #' # Run kNNDM for the whole domain, here the prediction points are known. #' knndm_folds <- knndm(train_points, ppoints = pred_points, k = 5) #' knndm_folds #' plot(knndm_folds) #' folds <- as.character(knndm_folds$clusters) #' ggplot() + #' geom_sf(data = simarea, alpha = 0) + #' geom_sf(data = train_points, aes(col = folds)) #' #' ######################################################################## #' # Example 2: Simulated data - Clustered training points #' ######################################################################## #' \dontrun{ #' library(sf) #' library(ggplot2) #' #' # Simulate 1000 clustered training points in a 100x100 square #' set.seed(1234) #' simarea <- list(matrix(c(0,0,0,100,100,100,100,0,0,0), ncol=2, byrow=TRUE)) #' simarea <- sf::st_polygon(simarea) #' train_points <- clustered_sample(simarea, 1000, 50, 5) #' pred_points <- sf::st_sample(simarea, 1000, type = "regular") #' plot(simarea) #' plot(pred_points, add = TRUE, col = "blue") #' plot(train_points, add = TRUE, col = "red") #' #' # Run kNNDM for the whole domain, here the prediction points are known. #' knndm_folds <- knndm(train_points, ppoints = pred_points, k = 5) #' knndm_folds #' plot(knndm_folds) #' folds <- as.character(knndm_folds$clusters) #' ggplot() + #' geom_sf(data = simarea, alpha = 0) + #' geom_sf(data = train_points, aes(col = folds)) #'} #' ######################################################################## #' # Example 3: Real- world example; using a modeldomain instead of previously #' # sampled prediction locations #' ######################################################################## #' \dontrun{ #' library(sf) #' library(terra) #' library(ggplot2) #' #' ### prepare sample data: #' dat <- readRDS(system.file("extdata","Cookfarm.RDS",package="CAST")) #' dat <- aggregate(dat[,c("DEM","TWI", "NDRE.M", "Easting", "Northing","VW")], #' by=list(as.character(dat$SOURCEID)),mean) #' pts <- dat[,-1] #' pts <- st_as_sf(pts,coords=c("Easting","Northing")) #' st_crs(pts) <- 26911 #' studyArea <- rast(system.file("extdata","predictors_2012-03-25.tif",package="CAST")) #' studyArea[!is.na(studyArea)] <- 1 #' studyArea <- as.polygons(studyArea, values = FALSE, na.all = TRUE) |> #' st_as_sf() |> #' st_union() #' pts <- st_transform(pts, crs = st_crs(studyArea)) #' plot(studyArea) #' plot(st_geometry(pts), add = TRUE, col = "red") #' #' knndm_folds <- knndm(pts, modeldomain=studyArea, k = 5) #' knndm_folds #' plot(knndm_folds) #' folds <- as.character(knndm_folds$clusters) #' ggplot() + #' geom_sf(data = pts, aes(col = folds)) #' #' #use for cross-validation: #' library(caret) #' ctrl <- trainControl(method="cv", #' index=knndm_folds$indx_train, #' savePredictions='final') #' model_knndm <- train(dat[,c("DEM","TWI", "NDRE.M")], #' dat$VW, #' method="rf", #' trControl = ctrl) #' global_validation(model_knndm) #'} knndm <- function(tpoints, modeldomain = NULL, ppoints = NULL, space = "geographical", k = 10, maxp = 0.5, clustering = "hierarchical", linkf = "ward.D2", samplesize = 1000, sampling = "regular"){ # create sample points from modeldomain if(is.null(ppoints)&!is.null(modeldomain)){ if(!identical(sf::st_crs(tpoints), sf::st_crs(modeldomain))){ stop("tpoints and modeldomain must have the same CRS") } message(paste0(samplesize, " prediction points are sampled from the modeldomain")) ppoints <- sf::st_sample(x = modeldomain, size = samplesize, type = sampling) sf::st_crs(ppoints) <- sf::st_crs(modeldomain) } # Conditional preprocessing actions if (any(class(tpoints) %in% "sfc")) { tpoints <- sf::st_sf(geom = tpoints) } if (any(class(ppoints) %in% "sfc")) { ppoints <- sf::st_sf(geom = ppoints) } if(is.na(sf::st_crs(tpoints))){ warning("Missing CRS in training or prediction points. Assuming projected CRS.") islonglat <- FALSE }else{ islonglat <- sf::st_is_longlat(tpoints) } # Prior checks check_knndm(tpoints, ppoints, space, k, maxp, clustering, islonglat) # kNNDM in the geographical space (currently only option) if(isTRUE(space == "geographical")){ knndm_res <- knndm_geo(tpoints, ppoints, k, maxp, clustering, linkf, islonglat) } # Output knndm_res } # kNNDM checks check_knndm <- function(tpoints, ppoints, space, k, maxp, clustering, islonglat){ if(!identical(sf::st_crs(tpoints), sf::st_crs(ppoints))){ stop("tpoints and ppoints must have the same CRS") } if (!(clustering %in% c("kmeans", "hierarchical"))) { stop("clustering must be one of `kmeans` or `hierarchical`") } if (space != "geographical") { stop("Only kNNDM in the geographical space is currently implemented.") } if (!(maxp < 1 & maxp > 1/k)) { stop("maxp must be strictly between 1/k and 1") } if(isTRUE(islonglat) & clustering == "kmeans"){ stop("kmeans works in the Euclidean space and therefore can only handle projected coordinates. Please use hierarchical clustering or project your data.") } } # kNNDM in the geographical space knndm_geo <- function(tpoints, ppoints, k, maxp, clustering, linkf, islonglat){ # Gj and Gij calculation tcoords <- sf::st_coordinates(tpoints)[,1:2] if(isTRUE(islonglat)){ distmat <- sf::st_distance(tpoints) units(distmat) <- NULL diag(distmat) <- NA Gj <- apply(distmat, 1, function(x) min(x, na.rm=TRUE)) Gij <- sf::st_distance(ppoints, tpoints) units(Gij) <- NULL Gij <- apply(Gij, 1, min) }else{ Gj <- c(FNN::knn.dist(tcoords, k = 1)) Gij <- c(FNN::knnx.dist(query = sf::st_coordinates(ppoints)[,1:2], data = tcoords, k = 1)) } # Check if Gj > Gij (warning suppressed regarding ties) testks <- suppressWarnings(stats::ks.test(Gj, Gij, alternative = "great")) if(testks$p.value >= 0.05){ clust <- sample(rep(1:k, ceiling(nrow(tpoints)/k)), size = nrow(tpoints), replace=F) if(isTRUE(islonglat)){ Gjstar <- distclust_geo(distmat, clust) }else{ Gjstar <- distclust_proj(tcoords, clust) } k_final <- "random CV" W_final <- twosamples::wass_stat(Gjstar, Gij) message("Gij <= Gj; a random CV assignment is returned") }else{ if(clustering == "hierarchical"){ # For hierarchical clustering we need to compute the full distance matrix, # but we can integrate geographical distances if(!isTRUE(islonglat)){ distmat <- sf::st_distance(tpoints) } hc <- stats::hclust(d = stats::as.dist(distmat), method = linkf) } # Build grid of number of clusters to try - we sample low numbers more intensively clustgrid <- data.frame(nk = as.integer(round(exp(seq(log(k), log(nrow(tpoints)-2), length.out = 100))))) clustgrid$W <- NA clustgrid <- clustgrid[!duplicated(clustgrid$nk),] clustgroups <- list() # Compute 1st PC for ordering clusters pcacoords <- stats::prcomp(tcoords, center = TRUE, scale. = FALSE, rank = 1) # We test each number of clusters for(nk in clustgrid$nk){ # Create nk clusters if(clustering == "hierarchical"){ clust_nk <- stats::cutree(hc, k=nk) }else if(clustering == "kmeans"){ clust_nk <- stats::kmeans(tcoords, nk)$cluster } tabclust <- as.data.frame(table(clust_nk)) tabclust$clust_k <- NA # compute cluster centroids and apply PC loadings to shuffle along the 1st dimension centr_tpoints <- sapply(tabclust$clust_nk, function(x){ centrpca <- matrix(apply(tcoords[clust_nk %in% x, , drop=FALSE], 2, mean), nrow = 1) colnames(centrpca) <- colnames(tcoords) return(predict(pcacoords, centrpca)) }) tabclust$centrpca <- centr_tpoints tabclust <- tabclust[order(tabclust$centrpca),] # We don't merge big clusters clust_i <- 1 for(i in 1:nrow(tabclust)){ if(tabclust$Freq[i] >= nrow(tpoints)/k){ tabclust$clust_k[i] <- clust_i clust_i <- clust_i + 1 } } rm("clust_i") # And we merge the remaining into k groups clust_i <- setdiff(1:k, unique(tabclust$clust_k)) tabclust$clust_k[is.na(tabclust$clust_k)] <- rep(clust_i, ceiling(nk/length(clust_i)))[1:sum(is.na(tabclust$clust_k))] tabclust2 <- data.frame(ID = 1:length(clust_nk), clust_nk = clust_nk) tabclust2 <- merge(tabclust2, tabclust, by = "clust_nk") tabclust2 <- tabclust2[order(tabclust2$ID),] clust_k <- tabclust2$clust_k # Compute W statistic if not exceeding maxp if(!any(table(clust_k)/length(clust_k)>maxp)){ if(isTRUE(islonglat)){ Gjstar_i <- distclust_geo(distmat, clust_k) }else{ Gjstar_i <- distclust_proj(tcoords, clust_k) } clustgrid$W[clustgrid$nk==nk] <- twosamples::wass_stat(Gjstar_i, Gij) clustgroups[[paste0("nk", nk)]] <- clust_k } } # Final configuration k_final <- clustgrid$nk[which.min(clustgrid$W)] W_final <- min(clustgrid$W, na.rm=T) clust <- clustgroups[[paste0("nk", k_final)]] if(isTRUE(islonglat)){ Gjstar <- distclust_geo(distmat, clust) }else{ Gjstar <- distclust_proj(tcoords, clust) } } # Output cfolds <- CAST::CreateSpacetimeFolds(data.frame(clust=clust), spacevar = "clust", k = k) res <- list(clusters = clust, indx_train = cfolds$index, indx_test = cfolds$indexOut, Gij = Gij, Gj = Gj, Gjstar = Gjstar, W = W_final, method = clustering, q = k_final, space = "geographical") class(res) <- c("knndm", "list") res } # Helper function: Compute out-of-fold NN distance (geographical) distclust_geo <- function(distm, folds){ alldist <- rep(NA, length(folds)) for(f in unique(folds)){ alldist[f == folds] <- apply(distm[f == folds, f != folds, drop=FALSE], 1, min) } alldist } # Helper function: Compute out-of-fold NN distance (projected) distclust_proj <- function(tr_coords, folds){ alldist <- rep(NA, length(folds)) for(f in unique(folds)){ alldist[f == folds] <- c(FNN::knnx.dist(query = tr_coords[f == folds,,drop=FALSE], data = tr_coords[f != folds,,drop=FALSE], k = 1)) } alldist }
/scratch/gouwar.j/cran-all/cranData/CAST/R/knndm.R
#' Nearest Neighbour Distance Matching (NNDM) algorithm #' @description #' This function implements the NNDM algorithm and returns the necessary #' indices to perform a NNDM LOO CV for map validation. #' @author Carles Milà #' @param tpoints sf or sfc point object. Contains the training points samples. #' @param modeldomain sf polygon object defining the prediction area (see Details). #' @param ppoints sf or sfc point object. Contains the target prediction points. #' Optional. Alternative to modeldomain (see Details). #' @param samplesize numeric. How many points in the modeldomain should be sampled as prediction points? #' Only required if modeldomain is used instead of ppoints. #' @param sampling character. How to draw prediction points from the modeldomain? See `sf::st_sample`. #' Only required if modeldomain is used instead of ppoints. #' @param phi Numeric. Estimate of the landscape autocorrelation range in the #' same units as the tpoints and ppoints for projected CRS, in meters for geographic CRS. #' Per default (phi="max"), the size of the prediction area is used. See Details. #' @param min_train Numeric between 0 and 1. Minimum proportion of training #' data that must be used in each CV fold. Defaults to 0.5 (i.e. half of the training points). #' #' @return An object of class \emph{nndm} consisting of a list of six elements: #' indx_train, indx_test, and indx_exclude (indices of the observations to use as #' training/test/excluded data in each NNDM LOO CV iteration), Gij (distances for #' G function construction between prediction and target points), Gj #' (distances for G function construction during LOO CV), Gjstar (distances #' for modified G function during NNDM LOO CV), phi (landscape autocorrelation range). #' indx_train and indx_test can directly be used as "index" and "indexOut" in #' caret's \code{\link{trainControl}} function or used to initiate a custom validation strategy in mlr3. #' #' @details NNDM proposes a LOO CV scheme such that the nearest neighbour distance distribution function between the test and training data during the CV process is matched to the nearest neighbour #' distance distribution function between the prediction and training points. Details of the method can be found in Milà et al. (2022). #' #' Specifying \emph{phi} allows limiting distance matching to the area where this is assumed to be relevant due to spatial autocorrelation. #' Distances are only matched up to \emph{phi}. Beyond that range, all data points are used for training, without exclusions. #' When \emph{phi} is set to "max", nearest neighbor distance matching is performed for the entire prediction area. Euclidean distances are used for projected #' and non-defined CRS, great circle distances are used for geographic CRS (units in meters). #' #' The \emph{modeldomain} is a sf polygon that defines the prediction area. The function takes a regular point sample (amount defined by \emph{samplesize)} from the spatial extent. #' As an alternative use \emph{ppoints} instead of \emph{modeldomain}, if you have already defined the prediction locations (e.g. raster pixel centroids). #' When using either \emph{modeldomain} or \emph{ppoints}, we advise to plot the study area polygon and the training/prediction points as a previous step to ensure they are aligned. #' #' @note NNDM is a variation of LOOCV and therefore may take a long time for large training data sets. #' A k-fold variant will be implemented shortly. #' @seealso \code{\link{geodist}}, \code{\link{knndm}} #' @references #' \itemize{ #' \item Milà, C., Mateu, J., Pebesma, E., Meyer, H. (2022): Nearest Neighbour Distance Matching Leave-One-Out Cross-Validation for map validation. Methods in Ecology and Evolution 00, 1– 13. #' \item Meyer, H., Pebesma, E. (2022): Machine learning-based global maps of ecological variables and the challenge of assessing them. Nature Communications. 13. #' } #' @export #' @examples #' ######################################################################## #' # Example 1: Simulated data - Randomly-distributed training points #' ######################################################################## #' #' library(sf) #' #' # Simulate 100 random training points in a 100x100 square #' set.seed(123) #' poly <- list(matrix(c(0,0,0,100,100,100,100,0,0,0), ncol=2, byrow=TRUE)) #' sample_poly <- sf::st_polygon(poly) #' train_points <- sf::st_sample(sample_poly, 100, type = "random") #' pred_points <- sf::st_sample(sample_poly, 100, type = "regular") #' plot(sample_poly) #' plot(pred_points, add = TRUE, col = "blue") #' plot(train_points, add = TRUE, col = "red") #' #' # Run NNDM for the whole domain, here the prediction points are known #' nndm_pred <- nndm(train_points, ppoints=pred_points) #' nndm_pred #' plot(nndm_pred) #' #' # ...or run NNDM with a known autocorrelation range of 10 #' # to restrict the matching to distances lower than that. #' nndm_pred <- nndm(train_points, ppoints=pred_points, phi = 10) #' nndm_pred #' plot(nndm_pred) #' #' ######################################################################## #' # Example 2: Simulated data - Clustered training points #' ######################################################################## #' #' library(sf) #' #' # Simulate 100 clustered training points in a 100x100 square #' set.seed(123) #' poly <- list(matrix(c(0,0,0,100,100,100,100,0,0,0), ncol=2, byrow=TRUE)) #' sample_poly <- sf::st_polygon(poly) #' train_points <- clustered_sample(sample_poly, 100, 10, 5) #' pred_points <- sf::st_sample(sample_poly, 100, type = "regular") #' plot(sample_poly) #' plot(pred_points, add = TRUE, col = "blue") #' plot(train_points, add = TRUE, col = "red") #' #' # Run NNDM for the whole domain #' nndm_pred <- nndm(train_points, ppoints=pred_points) #' nndm_pred #' plot(nndm_pred) #' #' ######################################################################## #' # Example 3: Real- world example; using a modeldomain instead of previously #' # sampled prediction locations #' ######################################################################## #' \dontrun{ #' library(sf) #' library(terra) #' #' ### prepare sample data: #' dat <- readRDS(system.file("extdata","Cookfarm.RDS",package="CAST")) #' dat <- aggregate(dat[,c("DEM","TWI", "NDRE.M", "Easting", "Northing","VW")], #' by=list(as.character(dat$SOURCEID)),mean) #' pts <- dat[,-1] #' pts <- st_as_sf(pts,coords=c("Easting","Northing")) #' st_crs(pts) <- 26911 #' studyArea <- rast(system.file("extdata","predictors_2012-03-25.tif",package="CAST")) #' studyArea[!is.na(studyArea)] <- 1 #' studyArea <- as.polygons(studyArea, values = FALSE, na.all = TRUE) |> #' st_as_sf() |> #' st_union() #' pts <- st_transform(pts, crs = st_crs(studyArea)) #' plot(studyArea) #' plot(st_geometry(pts), add = TRUE, col = "red") #' #' nndm_folds <- nndm(pts, modeldomain= studyArea) #' plot(nndm_folds) #' #' #use for cross-validation: #' library(caret) #' ctrl <- trainControl(method="cv", #' index=nndm_folds$indx_train, #' indexOut=nndm_folds$indx_test, #' savePredictions='final') #' model_nndm <- train(dat[,c("DEM","TWI", "NDRE.M")], #' dat$VW, #' method="rf", #' trControl = ctrl) #' global_validation(model_nndm) #'} #' nndm <- function(tpoints, modeldomain = NULL, ppoints = NULL, samplesize = 1000, sampling = "regular", phi = "max", min_train = 0.5){ # create sample points from modeldomain if(is.null(ppoints)&!is.null(modeldomain)){ if(!identical(sf::st_crs(tpoints), sf::st_crs(modeldomain))){ stop("tpoints and modeldomain must have the same CRS") } message(paste0(samplesize, " prediction points are sampled from the modeldomain")) ppoints <- sf::st_sample(x = modeldomain, size = samplesize, type = sampling) sf::st_crs(ppoints) <- sf::st_crs(modeldomain) }else if(!is.null(ppoints)){ if(!identical(sf::st_crs(tpoints), sf::st_crs(ppoints))){ stop("tpoints and ppoints must have the same CRS") } } # If tpoints is sfc, coerce to sf. if(any(class(tpoints) %in% "sfc")){ tpoints <- sf::st_sf(geom=tpoints) } # If ppoints is sfc, coerce to sf. if(any(class(ppoints) %in% "sfc")){ ppoints <- sf::st_sf(geom=ppoints) } # if phi==max calculate the range of the size area if(phi=="max"){ xmin <- min(sf::st_coordinates(ppoints)[,1]) xmax <- max(sf::st_coordinates(ppoints)[,1]) ymin <- min(sf::st_coordinates(ppoints)[,2]) ymax <- max(sf::st_coordinates(ppoints)[,2]) p <- sf::st_sfc(sf::st_point(c(xmin,ymin)), sf::st_point(c(xmax,ymax))) sf::st_crs(p) <- sf::st_crs(ppoints) phi <- as.numeric(max(sf::st_distance(p))) } # Input data checks nndm_checks(tpoints, ppoints, phi, min_train) # Compute nearest neighbour distances between training and prediction points Gij <- sf::st_distance(ppoints, tpoints) units(Gij) <- NULL Gij <- apply(Gij, 1, min) # Compute distance matrix of training points tdist <- sf::st_distance(tpoints) units(tdist) <- NULL diag(tdist) <- NA Gj <- apply(tdist, 1, function(x) min(x, na.rm=TRUE)) Gjstar <- Gj # Start algorithm rmin <- min(Gjstar) jmin <- which.min(Gjstar)[1] kmin <- which(tdist[jmin,]==rmin) while(rmin <= phi){ # Check if removing the point improves the match. If yes, update if((sum(Gjstar<=rmin)-1)/length(Gjstar) >= (sum(Gij<=rmin)/length(Gij)) & sum(!is.na(tdist[jmin, ]))/ncol(tdist) > min_train){ tdist[jmin, kmin] <- NA Gjstar <- apply(tdist, 1, function(x) min(x, na.rm=TRUE)) rmin <- min(Gjstar[Gjstar>=rmin]) # Distances are the same for the same pair jmin <- which(Gjstar==rmin)[1] kmin <- which(tdist[jmin,]==rmin) }else if(sum(Gjstar>rmin)==0){ break }else{ # Otherwise move on to the next distance rmin <- min(Gjstar[Gjstar>rmin]) jmin <- which(Gjstar==rmin)[1] kmin <- which(tdist[jmin,]==rmin) } } # Derive indicators indx_train <- list() indx_test <- list() indx_exclude <- list() for(i in 1:nrow(tdist)){ indx_train[[i]] <- which(!is.na(tdist[i,])) indx_test[[i]] <- i indx_exclude[[i]] <- setdiff(which(is.na(tdist[i,])), i) } # Return list of indices res <- list(indx_train=indx_train, indx_test=indx_test, indx_exclude=indx_exclude, Gij=Gij, Gj=Gj, Gjstar=Gjstar, phi=phi) class(res) <- c("nndm", "list") res } # Input data checks for NNDM nndm_checks <- function(tpoints, ppoints, phi, min_train){ # Check for valid range of phi if(phi < 0 | !is.numeric(phi)){ stop("phi must be positive.") } # min_train must be a single positive numeric if(length(min_train)!=1 | min_train<0 | min_train>1 | !is.numeric(min_train)){ stop("min_train must be a numeric between 0 and 1.") } # Check class and geometry type of tpoints if(!any(c("sfc", "sf") %in% class(tpoints))){ stop("tpoints must be a sf/sfc object.") }else if(!any(class(sf::st_geometry(tpoints)) %in% c("sfc_POINT"))){ stop("tpoints must be a sf/sfc point object.") } # Check class and geometry type of ppoints if(!any(c("sfc", "sf") %in% class(ppoints))){ stop("ppoints must be a sf/sfc object.") }else if(!any(class(sf::st_geometry(ppoints)) %in% c("sfc_POINT"))){ stop("ppoints must be a sf/sfc point object.") } }
/scratch/gouwar.j/cran-all/cranData/CAST/R/nndm.R
#' Plot CAST classes #' @description Generic plot function for CAST Classes #' #' @name plot #' @param x trainDI object #' @param ... other params #' #' #' @author Marvin Ludwig, Hanna Meyer #' @export plot.trainDI = function(x, ...){ ggplot(data.frame(TrainDI = x$trainDI), aes_string(x = "TrainDI"))+ geom_density()+ geom_vline(aes(xintercept = x$threshold, linetype = "AOA_threshold"))+ scale_linetype_manual(name = "", values = c(AOA_threshold = "dashed"))+ theme_bw()+ theme(legend.position="bottom") } #' @name plot #' #' @param x aoa object #' @param samplesize numeric. How many prediction samples should be plotted? #' @param ... other params #' #' @import ggplot2 #' #' @author Marvin Ludwig, Hanna Meyer #' #' @export plot.aoa = function(x, samplesize = 1000, ...){ trainDI = data.frame(DI = x$parameters$trainDI, what = "trainDI") if(inherits(x$AOA, "RasterLayer")){ targetDI = terra::spatSample(methods::as(x$DI, "SpatRaster"), size = samplesize, method="regular") targetDI = data.frame(DI = as.numeric(targetDI[,1]), what = "predictionDI") }else if(inherits(x$AOA, "stars")){ targetDI = terra::spatSample(methods::as(x$DI, "SpatRaster"), size = samplesize,method="regular") targetDI = data.frame(DI = as.numeric(targetDI[,1]), what = "predictionDI") }else if(inherits(x$AOA, "SpatRaster")){ targetDI = terra::spatSample(x$DI, size = samplesize,method="regular") targetDI = data.frame(DI = as.numeric(targetDI[,1]), what = "predictionDI") }else{ targetDI = data.frame(DI = sample(x$DI, size = samplesize), what = "predictionDI") } dfDI = rbind(trainDI, targetDI) ggplot(dfDI, aes_string(x = "DI", group = "what", fill = "what"))+ geom_density(adjust=1.5, alpha=.4)+ scale_fill_discrete(name = "Set")+ geom_vline(aes(xintercept = x$parameters$threshold, linetype = "AOA_threshold"))+ scale_linetype_manual(name = "", values = c(AOA_threshold = "dashed"))+ theme_bw()+ theme(legend.position = "bottom") } #' @name plot #' @param x An object of type \emph{nndm}. #' @param ... other arguments. #' @author Carles Milà #' #' @export plot.nndm <- function(x, ...){ # Prepare data for plotting: Gij function Gij_df <- data.frame(r=x$Gij[order(x$Gij)]) Gij_df$val <- 1:nrow(Gij_df)/nrow(Gij_df) Gij_df <- Gij_df[Gij_df$r <= x$phi,] Gij_df <- rbind(Gij_df, data.frame(r=0, val=0)) Gij_df <- rbind(Gij_df, data.frame(r=x$phi, val=sum(x$Gij<=x$phi)/length(x$Gij))) Gij_df$Function <- "1_Gij(r)" # Prepare data for plotting: Gjstar function Gjstar_df <- data.frame(r=x$Gjstar[order(x$Gjstar)]) Gjstar_df$val <- 1:nrow(Gjstar_df)/nrow(Gjstar_df) Gjstar_df <- Gjstar_df[Gjstar_df$r <= x$phi,] Gjstar_df <- rbind(Gjstar_df, data.frame(r=0, val=0)) Gjstar_df <- rbind(Gjstar_df, data.frame(r=x$phi, val=sum(x$Gjstar<=x$phi)/length(x$Gjstar))) Gjstar_df$Function <- "2_Gjstar(r)" # Prepare data for plotting: G function Gj_df <- data.frame(r=x$Gj[order(x$Gj)]) Gj_df$val <- 1:nrow(Gj_df)/nrow(Gj_df) Gj_df <- Gj_df[Gj_df$r <= x$phi,] Gj_df <- rbind(Gj_df, data.frame(r=0, val=0)) Gj_df <- rbind(Gj_df, data.frame(r=x$phi, val=sum(x$Gj<=x$phi)/length(x$Gj))) Gj_df$Function <- "3_Gj(r)" # Merge data for plotting Gplot <- rbind(Gij_df, Gjstar_df, Gj_df) # Plot ggplot2::ggplot(Gplot) + ggplot2::geom_step(ggplot2::aes_string(x="r", y="val", colour="Function", size="Function"), alpha = 0.8) + ggplot2::scale_size_manual(values=c(1.1, 1.1, 0.5), labels=c(expression(hat(G)[ij](r)), expression(hat(G)[j]^"*"*"(r,"*bold(L)*")"), expression(hat(G)[j](r)))) + ggplot2::scale_colour_manual(values=c("#000000", "#E69F00", "#56B4E9"), labels=c(expression(hat(G)[ij](r)), expression(hat(G)[j]^"*"*"(r,"*bold(L)*")"), expression(hat(G)[j](r)))) + ggplot2::ylab(expression(paste(hat(G)[ij](r), ", ", hat(G)[j]^"*"*"(r,"*bold(L)*")", ", ", hat(G)[j](r)))) + ggplot2::labs(colour="", size="") + ggplot2::theme_bw() + ggplot2::theme(legend.text.align=0, legend.text=ggplot2::element_text(size=12)) } #' @name plot #' @param x An object of type \emph{knndm}. #' @param ... other arguments. #' @author Carles Milà #' #' @export plot.knndm <- function(x, ...){ # Prepare data for plotting: Gij function Gij_df <- data.frame(r=x$Gij[order(x$Gij)]) Gij_df$Function <- "1_Gij(r)" # Prepare data for plotting: Gjstar function Gjstar_df <- data.frame(r=x$Gjstar[order(x$Gjstar)]) Gjstar_df$Function <- "2_Gjstar(r)" # Prepare data for plotting: G function Gj_df <- data.frame(r=x$Gj[order(x$Gj)]) Gj_df$Function <- "3_Gj(r)" # Merge data for plotting Gplot <- rbind(Gij_df, Gjstar_df, Gj_df) # Plot ggplot2::ggplot(data=Gplot, ggplot2::aes_string(x="r", group="Function", col="Function")) + ggplot2::geom_vline(xintercept=0, lwd = 0.1) + ggplot2::geom_hline(yintercept=0, lwd = 0.1) + ggplot2::geom_hline(yintercept=1, lwd = 0.1) + ggplot2::stat_ecdf(geom = "step", lwd = 1) + ggplot2::scale_colour_manual(values=c("#000000", "#E69F00", "#56B4E9"), labels=c(expression(hat(G)[ij](r)), expression(hat(G)[j]^"*"*"(r,L)"), expression(hat(G)[j](r)))) + ggplot2::ylab(expression(paste(hat(G)[ij](r), ", ", hat(G)[j]^"*"*"(r,L)", ", ", hat(G)[j](r)))) } #' Plot results of a Forward feature selection or best subset selection #' @description A plotting function for a forward feature selection result. #' Each point is the mean performance of a model run. Error bars represent #' the standard errors from cross validation. #' Marked points show the best model from each number of variables until a further variable #' could not improve the results. #' If type=="selected", the contribution of the selected variables to the model #' performance is shown. #' @param x Result of a forward feature selection see \code{\link{ffs}} #' @param plotType character. Either "all" or "selected" #' @param palette A color palette #' @param reverse Character. Should the palette be reversed? #' @param marker Character. Color to mark the best models #' @param size Numeric. Size of the points #' @param lwd Numeric. Width of the error bars #' @param pch Numeric. Type of point marking the best models #' @param ... Further arguments for base plot if type="selected" #' @author Marvin Ludwig and Hanna Meyer #' @seealso \code{\link{ffs}}, \code{\link{bss}} #' @examples #' \dontrun{ #' data(splotdata) #' splotdata <- st_drop_geometry(splotdata) #' ffsmodel <- ffs(splotdata[,6:16], splotdata$Species_richness, ntree = 10) #' plot(ffsmodel) #' #plot performance of selected variables only: #' plot(ffsmodel,plotType="selected") #'} #' @name plot #' @importFrom forcats fct_rev fct_inorder #' @export plot.ffs <- function(x,plotType="all",palette=rainbow,reverse=FALSE, marker="black",size=1.5,lwd=0.5, pch=21,...){ metric <- x$metric if (is.null(x$type)){ x$type <- "ffs" } if(is.null(x$minVar)){ x$minVar <- 2 } if(x$type=="bss"&plotType=="selected"){ type <- "all" print("warning: type must be 'all' for a bss model") } if (plotType=="selected"){ plot_df = data.frame(labels = forcats::fct_rev(forcats::fct_inorder(c(paste(x$selectedvars[1:x$minVar], collapse = "\n + "), paste("+", x$selectedvars[-1:-x$minVar], sep = " ")))), perf = x$selectedvars_perf, perfse = x$selectedvars_perf_SE) p <- ggplot(plot_df, aes_string(x = "perf", y = "labels"))+ geom_point()+ geom_segment(aes_string(x = "perf - perfse", xend = "perf + perfse", y = "labels", yend = "labels"))+ xlab(x$metric)+ ylab(NULL) return(p) }else{ output_df <- x$perf_all output_df$run <- seq(nrow(output_df)) names(output_df)[which(names(output_df)==metric)] <- "value" if (x$type=="bss"){ bestmodels <- output_df$run[which(output_df$value==x$selectedvars_perf)] }else{ bestmodels <- c() for (i in unique(output_df$nvar)){ if (x$maximize){ bestmodels <- c(bestmodels, output_df$run[output_df$nvar==i][which(output_df$value[ output_df$nvar==i]==max(output_df$value[output_df$nvar==i]))][1]) }else{ bestmodels <- c(bestmodels, output_df$run[output_df$nvar==i][which(output_df$value[ output_df$nvar==i]==min(output_df$value[output_df$nvar==i]))][1]) } } bestmodels <- bestmodels[1:(length(x$selectedvars)-1)] } if (!reverse){ cols <- palette(max(output_df$nvar)-(min(output_df$nvar)-1)) }else{ cols <- rev(palette(max(output_df$nvar)-(min(output_df$nvar)-1))) } ymin <- output_df$value - output_df$SE ymax <- output_df$value + output_df$SE if (max(output_df$nvar)>11){ p <- ggplot2::ggplot(output_df, ggplot2::aes_string(x = "run", y = "value"))+ ggplot2::geom_errorbar(ggplot2::aes(ymin = ymin, ymax = ymax), color = cols[output_df$nvar-(min(output_df$nvar)-1)],lwd=lwd)+ ggplot2::geom_point(ggplot2::aes_string(colour="nvar"),size=size)+ ggplot2::geom_point(data=output_df[bestmodels, ], ggplot2::aes_string(x = "run", y = "value"), pch=pch,colour=marker,lwd=size)+ ggplot2::scale_x_continuous(name = "Model run", breaks = pretty(output_df$run))+ ggplot2::scale_y_continuous(name = metric)+ ggplot2::scale_colour_gradientn(breaks=seq(2,max(output_df$nvar), by=ceiling(max(output_df$nvar)/5)), colours = cols, name = "variables",guide = "colourbar") }else{ dfint <- output_df dfint$nvar <- as.factor(dfint$nvar) p <- ggplot2::ggplot(dfint, ggplot2::aes_string(x = "run", y = "value"))+ ggplot2::geom_errorbar(ggplot2::aes(ymin = ymin, ymax = ymax), color = cols[output_df$nvar-(min(output_df$nvar)-1)],lwd=lwd)+ ggplot2::geom_point(ggplot2::aes_string(colour="nvar"),size=size)+ ggplot2::geom_point(data=output_df[bestmodels, ], ggplot2::aes_string(x = "run", y = "value"), pch=pch,colour=marker,lwd=size)+ ggplot2::scale_x_continuous(name = "Model run", breaks = pretty(dfint$run))+ ggplot2::scale_y_continuous(name = metric)+ ggplot2::scale_colour_manual(values = cols, name = "variables") } return(p) } } #' @name plot #' @description Density plot of nearest neighbor distances in geographic space or feature space between training data as well as between training data and prediction locations. #' Optional, the nearest neighbor distances between training data and test data or between training data and CV iterations is shown. #' The plot can be used to check the suitability of a chosen CV method to be representative to estimate map accuracy. #' @param x geodist, see \code{\link{geodist}} #' @param unit character. Only if type=="geo" and only applied to the plot. Supported: "m" or "km". #' @param stat "density" for density plot or "ecdf" for empirical cumulative distribution function plot. #' @export #' @return a ggplot #' plot.geodist <- function(x, unit = "m", stat = "density", ...){ type <- attr(x, "type") if(unit=="km"){ x$dist <- x$dist/1000 xlabs <- "geographic distances (km)" }else{ xlabs <- "geographic distances (m)" } if( type=="feature"){ xlabs <- "feature space distances"} what <- "" #just to avoid check note if (type=="feature"){unit ="unitless"} if(stat=="density"){ p <- ggplot2::ggplot(data=x, aes(x=dist, group=what, fill=what)) + ggplot2::geom_density(adjust=1.5, alpha=.4, stat=stat) + ggplot2::scale_fill_discrete(name = "distance function") + ggplot2::xlab(xlabs) + ggplot2::theme(legend.position="bottom", plot.margin = unit(c(0,0.5,0,0),"cm")) }else if(stat=="ecdf"){ p <- ggplot2::ggplot(data=x, aes(x=dist, group=what, col=what)) + ggplot2::geom_vline(xintercept=0, lwd = 0.1) + ggplot2::geom_hline(yintercept=0, lwd = 0.1) + ggplot2::geom_hline(yintercept=1, lwd = 0.1) + ggplot2::stat_ecdf(geom = "step", lwd = 1) + ggplot2::scale_color_discrete(name = "distance function") + ggplot2::xlab(xlabs) + ggplot2::ylab("ECDF") + ggplot2::theme(legend.position="bottom", plot.margin = unit(c(0,0.5,0,0),"cm")) } p } #' @name plot #' @description Plot the DI and errormetric from Cross-Validation with the modelled relationship #' @param x errorModel, see \code{\link{DItoErrormetric}} #' @param ... other params #' @export #' @return a ggplot #' plot.errorModel <- function(x, ...){ performance = attr(x, "performance")[,c("DI", "metric")] performance$what = "cross-validation" model_line = data.frame(DI = performance$DI, metric = predict(x, performance), what = "model") p = ggplot()+ geom_point(data = performance, mapping = aes_string(x = "DI", y = "metric", shape = "what"))+ geom_line(data = model_line, mapping = aes_string(x = "DI", y = "metric", linetype = "what"), lwd = 1)+ theme(legend.title = element_blank(), legend.position = "bottom") return(p) }
/scratch/gouwar.j/cran-all/cranData/CAST/R/plot.R
#' Plot results of a Forward feature selection or best subset selection #' @description plot_ffs() is deprecated and will be removed soon. Please use generic plot() function on ffs object. #' A plotting function for a forward feature selection result. #' Each point is the mean performance of a model run. Error bars represent #' the standard errors from cross validation. #' Marked points show the best model from each number of variables until a further variable #' could not improve the results. #' If type=="selected", the contribution of the selected variables to the model #' performance is shown. #' @param ffs_model Result of a forward feature selection see \code{\link{ffs}} #' @param plotType character. Either "all" or "selected" #' @param palette A color palette #' @param reverse Character. Should the palette be reversed? #' @param marker Character. Color to mark the best models #' @param size Numeric. Size of the points #' @param lwd Numeric. Width of the error bars #' @param pch Numeric. Type of point marking the best models #' @param ... Further arguments for base plot if type="selected" #' @author Marvin Ludwig and Hanna Meyer #' @seealso \code{\link{ffs}}, \code{\link{bss}} #' @examples #' \dontrun{ #' data(iris) #' ffsmodel <- ffs(iris[,1:4],iris$Species) #' plot(ffsmodel) #' #plot performance of selected variables only: #' plot(ffsmodel,plotType="selected") #'} #' @export plot_ffs <- function(ffs_model,plotType="all",palette=rainbow,reverse=FALSE, marker="black",size=1.5,lwd=0.5, pch=21,...){ message("plot_ffs() is deprecated and will be removed soon. Please use generic plot() function on ffs object.") plot.ffs(x=ffs_model,plotType=plotType,palette=palette,reverse=reverse,marker=marker,size=size,lwd=lwd,pch=pch,...) }
/scratch/gouwar.j/cran-all/cranData/CAST/R/plot_ffs.R
#' Plot euclidean nearest neighbor distances in geographic space or feature space #' #' @description Density plot of nearest neighbor distances in geographic space or feature space between training data as well as between training data and prediction locations. #' Optional, the nearest neighbor distances between training data and test data or between training data and CV iterations is shown. #' The plot can be used to check the suitability of a chosen CV method to be representative to estimate map accuracy. Alternatively distances can also be calculated in the multivariate feature space. #' #' @param x object of class sf, training data locations #' @param modeldomain SpatRaster, stars or sf object defining the prediction area (see Details) #' @param type "geo" or "feature". Should the distance be computed in geographic space or in the normalized multivariate predictor space (see Details) #' @param cvfolds optional. list or vector. Either a list where each element contains the data points used for testing during the cross validation iteration (i.e. held back data). #' Or a vector that contains the ID of the fold for each training point. See e.g. ?createFolds or ?CreateSpacetimeFolds or ?nndm #' @param cvtrain optional. List of row indices of x to fit the model to in each CV iteration. If cvtrain is null but cvfolds is not, all samples but those included in cvfolds are used as training data #' @param testdata optional. object of class sf: Data used for independent validation #' @param samplesize numeric. How many prediction samples should be used? #' @param sampling character. How to draw prediction samples? See \link[sp]{spsample}. Use sampling = "Fibonacci" for global applications. #' @param variables character vector defining the predictor variables used if type="feature. If not provided all variables included in modeldomain are used. #' @param unit character. Only if type=="geo" and only applied to the plot. Supported: "m" or "km". #' @param stat "density" for density plot or "ecdf" for empirical cumulative distribution function plot. #' @param showPlot logical #' @return A list including the plot and the corresponding data.frame containing the distances. Unit of returned geographic distances is meters. #' @details The modeldomain is a sf polygon or a raster that defines the prediction area. The function takes a regular point sample (amount defined by samplesize) from the spatial extent. #' If type = "feature", the argument modeldomain (and if provided then also the testdata) has to include predictors. Predictor values for x are optional if modeldomain is a raster. #' If not provided they are extracted from the modeldomain rasterStack. #' @note See Meyer and Pebesma (2022) for an application of this plotting function #' @seealso \code{\link{nndm}} #' @import ggplot2 #' @author Hanna Meyer, Edzer Pebesma, Marvin Ludwig #' @examples #' \dontrun{ #' library(sf) #' library(terra) #' library(caret) #' #' ########### prepare sample data: #' dat <- readRDS(system.file("extdata","Cookfarm.RDS",package="CAST")) #' dat <- aggregate(dat[,c("DEM","TWI", "NDRE.M", "Easting", "Northing")], #' by=list(as.character(dat$SOURCEID)),mean) #' pts <- st_as_sf(dat,coords=c("Easting","Northing")) #' st_crs(pts) <- 26911 #' pts_train <- pts[1:29,] #' pts_test <- pts[30:42,] #' studyArea <- terra::rast(system.file("extdata","predictors_2012-03-25.tif",package="CAST")) #' studyArea <- studyArea[[c("DEM","TWI", "NDRE.M", "NDRE.Sd", "Bt")]] #' #' ########### Distance between training data and new data: #' dist <- plot_geodist(pts_train,studyArea) #' #' ########### Distance between training data, new data and test data: #' #mapview(pts_train,col.regions="blue")+mapview(pts_test,col.regions="red") #' dist <- plot_geodist(pts_train,studyArea,testdata=pts_test) #' #' ########### Distance between training data, new data and CV folds: #' folds <- createFolds(1:nrow(pts_train),k=3,returnTrain=FALSE) #' dist <- plot_geodist(x=pts_train, modeldomain=studyArea, cvfolds=folds) #' #' ## or use nndm to define folds #' AOI <- as.polygons(rast(studyArea), values = F) |> #' st_as_sf() |> #' st_union() |> #' st_transform(crs = st_crs(pts_train)) #' nndm_pred <- nndm(pts_train, AOI) #' dist <- plot_geodist(x=pts_train, modeldomain=studyArea, #' cvfolds=nndm_pred$indx_test, cvtrain=nndm_pred$indx_train) #' #' ########### Distances in the feature space: #' plot_geodist(x=pts_train, modeldomain=studyArea, #' type = "feature",variables=c("DEM","TWI", "NDRE.M")) #' #' dist <- plot_geodist(x=pts_train, modeldomain=studyArea, cvfolds = folds, testdata = pts_test, #' type = "feature",variables=c("DEM","TWI", "NDRE.M")) #' #'############ Example for a random global dataset #'############ (refer to figure in Meyer and Pebesma 2022) #'library(sf) #'library(rnaturalearth) #'library(ggplot2) #' #'### Define prediction area (here: global): #'ee <- st_crs("+proj=eqearth") #'co <- ne_countries(returnclass = "sf") #'co.ee <- st_transform(co, ee) #' #'### Simulate a spatial random sample #'### (alternatively replace pts_random by a real sampling dataset (see Meyer and Pebesma 2022): #'sf_use_s2(FALSE) #'pts_random <- st_sample(co.ee, 2000, exact=FALSE) #' #'### See points on the map: #'ggplot() + geom_sf(data = co.ee, fill="#00BFC4",col="#00BFC4") + #' geom_sf(data = pts_random, color = "#F8766D",size=0.5, shape=3) + #' guides(fill = FALSE, col = FALSE) + #' labs(x = NULL, y = NULL) #' #'### plot distances: #'dist <- plot_geodist(pts_random,co.ee,showPlot=FALSE) #'dist$plot+scale_x_log10(labels=round) #'} #' @export plot_geodist <- function(x, modeldomain, type = "geo", cvfolds=NULL, cvtrain=NULL, testdata=NULL, samplesize=2000, sampling = "regular", variables=NULL, unit="m", stat = "density", showPlot=TRUE){ message("plot_geodist() is deprecated and will be removed soon. \n Please use generic plot() function on geodist object from geodist().") # input formatting ------------ if (inherits(modeldomain, "Raster")) { # if (!requireNamespace("raster", quietly = TRUE)) # stop("package raster required: install that first") message("Raster will soon not longer be supported. Use terra or stars instead") modeldomain <- methods::as(modeldomain,"SpatRaster") } if (inherits(modeldomain, "stars")) { if (!requireNamespace("stars", quietly = TRUE)) stop("package stars required: install that first") modeldomain <- methods::as(modeldomain, "SpatRaster") } x <- sf::st_transform(x,4326) if(type == "feature"){ if(is.null(variables)){ variables <- names(modeldomain) } if(any(!variables%in%names(x))){ # extract variable values of raster: message("features are extracted from the modeldomain") x <- sf::st_transform(x,sf::st_crs(modeldomain)) if(class(x)[1]=="sfc_POINT"){ x <- sf::st_as_sf(x) } #x <- sf::st_as_sf(raster::extract(modeldomain, x, df = TRUE, sp = TRUE)) x <- sf::st_as_sf(terra::extract(modeldomain, x, na.rm=FALSE,bind=TRUE)) x <- sf::st_transform(x,4326) } if(!is.null(testdata)){ if(any(!variables%in%names(testdata))){# extract variable values of raster: testdata <- sf::st_transform(testdata,sf::st_crs(modeldomain)) #testdata <- sf::st_as_sf(raster::extract(modeldomain, testdata, df = TRUE, sp = TRUE)) testdata <- sf::st_as_sf(terra::extract(modeldomain, testdata, na.rm=FALSE,bind=TRUE)) if(any(is.na(testdata))){ testdata <- na.omit(testdata) message("some test data were removed because of NA in extracted predictor values") } testdata <- sf::st_transform(testdata,4326) } } } # required steps ---- ## Sample prediction location from the study area: modeldomain <- sampleFromArea(modeldomain, samplesize, type,variables,sampling) # always do sample-to-sample and sample-to-prediction s2s <- sample2sample(x, type,variables) s2p <- sample2prediction(x, modeldomain, type, samplesize,variables) dists <- rbind(s2s, s2p) # optional steps ---- ##### Distance to test data: if(!is.null(testdata)){ s2t <- sample2test(x, testdata, type,variables) dists <- rbind(dists, s2t) } ##### Distance to CV data: if(!is.null(cvfolds)){ cvd <- cvdistance(x, cvfolds, cvtrain, type, variables) dists <- rbind(dists, cvd) } # Compile output and plot data ---- p <- .plot.nnd(dists,type,unit,stat) if(showPlot){ print(p) } out <- list(p,dists) names(out) <- c("plot","distances") return(out) } # Sample to Sample Distance sample2sample <- function(x, type,variables){ if(type == "geo"){ sf::sf_use_s2(TRUE) d <- sf::st_distance(x) diag(d) <- Inf min_d <- apply(d, 1, min) sampletosample <- data.frame(dist = min_d, what = factor("sample-to-sample"), dist_type = "geo") }else if(type == "feature"){ x <- x[,variables] x <- sf::st_drop_geometry(x) scaleparam <- attributes(scale(x)) x <- data.frame(scale(x)) x_clean <- data.frame(x[complete.cases(x),]) # sample to sample feature distance d <- c() for (i in 1:nrow(x_clean)){ trainDist <- FNN::knnx.dist(x_clean[i,],x_clean,k=1) trainDist[i] <- NA d <- c(d,min(trainDist,na.rm=T)) } sampletosample <- data.frame(dist = d, what = factor("sample-to-sample"), dist_type = "feature") } return(sampletosample) } # Sample to Prediction sample2prediction = function(x, modeldomain, type, samplesize,variables){ if(type == "geo"){ modeldomain <- sf::st_transform(modeldomain, sf::st_crs(x)) sf::sf_use_s2(TRUE) d0 <- sf::st_distance(modeldomain, x) min_d0 <- apply(d0, 1, min) sampletoprediction <- data.frame(dist = min_d0, what = factor("prediction-to-sample"), dist_type = "geo") }else if(type == "feature"){ x <- x[,variables] x <- sf::st_drop_geometry(x) scaleparam <- attributes(scale(x)) x <- data.frame(scale(x)) x_clean <- x[complete.cases(x),] modeldomain <- modeldomain[,variables] modeldomain <- sf::st_drop_geometry(modeldomain) modeldomain <- data.frame(scale(modeldomain,center=scaleparam$`scaled:center`, scale=scaleparam$`scaled:scale`)) target_dist_feature <- c() for (i in 1:nrow(modeldomain)){ trainDist <- FNN::knnx.dist(modeldomain[i,],x_clean,k=1) target_dist_feature <- c(target_dist_feature,min(trainDist,na.rm=T)) } sampletoprediction <- data.frame(dist = target_dist_feature, what = "prediction-to-sample", dist_type = "feature") } return(sampletoprediction) } # sample to test sample2test <- function(x, testdata, type,variables){ if(type == "geo"){ testdata <- sf::st_transform(testdata,4326) d_test <- sf::st_distance(testdata, x) min_d_test <- apply(d_test, 1, min) dists_test <- data.frame(dist = min_d_test, what = factor("test-to-sample"), dist_type = "geo") }else if(type == "feature"){ x <- x[,variables] x <- sf::st_drop_geometry(x) scaleparam <- attributes(scale(x)) x <- data.frame(scale(x)) x_clean <- x[complete.cases(x),] testdata <- testdata[,variables] testdata <- sf::st_drop_geometry(testdata) testdata <- data.frame(scale(testdata,center=scaleparam$`scaled:center`, scale=scaleparam$`scaled:scale`)) test_dist_feature <- c() for (i in 1:nrow(testdata)){ testDist <- FNN::knnx.dist(testdata[i,],x_clean,k=1) test_dist_feature <- c(test_dist_feature,min(testDist,na.rm=T)) } dists_test <- data.frame(dist = test_dist_feature, what = "test-to-sample", dist_type = "feature") } return(dists_test) } # between folds cvdistance <- function(x, cvfolds, cvtrain, type, variables){ if(!is.null(cvfolds)&!is.list(cvfolds)){ # restructure input if CVtest only contains the fold ID tmp <- list() for (i in unique(cvfolds)){ tmp[[i]] <- which(cvfolds==i) } cvfolds <- tmp } if(type == "geo"){ d_cv <- c() for (i in 1:length(cvfolds)){ if(!is.null(cvtrain)){ d_cv_tmp <- sf::st_distance(x[cvfolds[[i]],], x[cvtrain[[i]],]) }else{ d_cv_tmp <- sf::st_distance(x[cvfolds[[i]],], x[-cvfolds[[i]],]) } d_cv <- c(d_cv,apply(d_cv_tmp, 1, min)) } dists_cv <- data.frame(dist = d_cv, what = factor("CV-distances"), dist_type = "geo") }else if(type == "feature"){ x <- x[,variables] x <- sf::st_drop_geometry(x) x <- data.frame(scale(x)) d_cv <- c() for(i in 1:length(cvfolds)){ if(!is.null(cvtrain)){ testdata_i <- x[cvfolds[[i]],] traindata_i <- x[cvtrain[[i]],] }else{ testdata_i <- x[cvfolds[[i]],] traindata_i <- x[-cvfolds[[i]],] } testdata_i <- testdata_i[complete.cases(testdata_i),] traindata_i <- traindata_i[complete.cases(traindata_i),] for (k in 1:nrow(testdata_i)){ trainDist <- tryCatch(FNN::knnx.dist(testdata_i[k,],traindata_i,k=1), error = function(e)e) if(inherits(trainDist, "error")){ trainDist <- NA message("warning: no distance could be calculated for a fold. Possibly because predictor values are NA") } trainDist[k] <- NA d_cv <- c(d_cv,min(trainDist,na.rm=T)) } } dists_cv <- data.frame(dist = d_cv, what = factor("CV-distances"), dist_type = "feature") } return(dists_cv) } sampleFromArea <- function(modeldomain, samplesize, type,variables,sampling){ ##### Distance to prediction locations: # regularly spread points (prediction locations): # see https://edzer.github.io/OGH21/ if(inherits(modeldomain, "Raster")){ modeldomain <- terra::rast(modeldomain) } if(inherits(modeldomain, "SpatRaster")) { if(samplesize>terra::ncell(modeldomain)){ samplesize <- terra::ncell(modeldomain) message(paste0("samplesize for new data shouldn't be larger than number of pixels. Samplesize was reduced to ",terra::ncell(modeldomain))) } #create mask to sample from: template <- modeldomain[[1]] terra::values(template)[!is.na(terra::values(template))] <-1 modeldomainextent <- terra::as.polygons(template) |> sf::st_as_sf() |> sf::st_geometry() }else{ modeldomainextent <- modeldomain } sf::sf_use_s2(FALSE) sf::st_as_sf(modeldomainextent) |> sf::st_transform(4326) -> bb methods::as(bb, "Spatial") |> sp::spsample(n = samplesize, type = sampling) |> sf::st_as_sfc() |> sf::st_set_crs(4326) -> predictionloc predictionloc <- sf::st_as_sf(predictionloc) if(type == "feature"){ modeldomain <- terra::project(modeldomain, "epsg:4326") predictionloc <- sf::st_as_sf(terra::extract(modeldomain,terra::vect(predictionloc),bind=TRUE)) predictionloc <- na.omit(predictionloc) } return(predictionloc) } # plot results .plot.nnd <- function(x,type,unit,stat){ if(unit=="km"){ x$dist <- x$dist/1000 xlabs <- "geographic distances (km)" }else{ xlabs <- "geographic distances (m)" } if( type=="feature"){ xlabs <- "feature space distances"} what <- "" #just to avoid check note if (type=="feature"){unit ="unitless"} if(stat=="density"){ p <- ggplot2::ggplot(data=x, aes(x=dist, group=what, fill=what)) + ggplot2::geom_density(adjust=1.5, alpha=.4, stat=stat) + ggplot2::scale_fill_discrete(name = "distance function") + ggplot2::xlab(xlabs) + ggplot2::theme(legend.position="bottom", plot.margin = unit(c(0,0.5,0,0),"cm")) }else if(stat=="ecdf"){ p <- ggplot2::ggplot(data=x, aes(x=dist, group=what, col=what)) + ggplot2::geom_vline(xintercept=0, lwd = 0.1) + ggplot2::geom_hline(yintercept=0, lwd = 0.1) + ggplot2::geom_hline(yintercept=1, lwd = 0.1) + ggplot2::stat_ecdf(geom = "step", lwd = 1) + ggplot2::scale_color_discrete(name = "distance function") + ggplot2::xlab(xlabs) + ggplot2::ylab("ECDF") + ggplot2::theme(legend.position="bottom", plot.margin = unit(c(0,0.5,0,0),"cm")) } }
/scratch/gouwar.j/cran-all/cranData/CAST/R/plot_geodist.R
#' Print CAST classes #' @description Generic print function for trainDI and aoa #' @name print #' @param x trainDI object #' @param ... other params #' @export print.trainDI = function(x, ...){ cat(paste0("DI of ", nrow(x$train), " observation \n")) cat(paste0("Predictors:"), x$variables, "\n\n") cat("AOA Threshold: ") cat(x$threshold) } #' @name print #' @param x trainDI object #' @param ... other params #' @export show.trainDI = function(x, ...){ print.trainDI(x) } #' @name print #' @param x aoa object #' @param ... other params #' @export print.aoa = function(x, ...){ cat("DI:\n") print(x$DI) cat("AOA:\n") print(x$AOA) cat("\n\nPredictor Weights:\n") print(x$parameters$weight) cat("\n\nAOA Threshold: ") cat(x$parameters$threshold) } #' @name print #' @param x aoa object #' @param ... other params #' @export show.aoa = function(x, ...){ print.aoa(x) } #' @name print #' @param x An object of type \emph{nndm}. #' @param ... other arguments. #' @export print.nndm <- function(x, ...){ mean_train <- round(mean(sapply(x$indx_train, length)), 2) min_train <- round(min(sapply(x$indx_train, length)), 2) cat(paste0("nndm object\n", "Total number of points: ", length(x$Gj), "\n", "Mean number of training points: ", mean_train, "\n", "Minimum number of training points: ", min_train, "\n")) } #' @name print #' @param x An object of type \emph{nndm}. #' @param ... other arguments. #' @export show.nndm = function(x, ...){ print.nndm(x) } #' @name print #' @param x An object of type \emph{knndm}. #' @param ... other arguments. #' @export print.knndm <- function(x, ...){ cat(paste0("knndm object\n", "Space: ", x$space, "\n", "Clustering algorithm: ", x$method, "\n", "Intermediate clusters (q): ", x$q, "\n", "W statistic: ", round(x$W, 4), "\n", "Number of folds: ", length(unique(x$clusters)), "\n", "Observations in each fold: "), table(x$clusters), "\n") } #' @name print #' @param x An object of type \emph{knndm}. #' @param ... other arguments. #' @export show.knndm = function(x, ...){ print.knndm(x) } #' @name print #' @param x An object of type \emph{ffs} #' @param ... other arguments. #' @export print.ffs = function(x, ...){ cat("Selected Variables: \n") cat(x$selectedvars) cat("\n") cat("---\n") print.train(x) } #' @name print #' @param x An object of type \emph{ffs} #' @param ... other arguments. #' @export show.ffs = function(x, ...){ print.ffs(x) }
/scratch/gouwar.j/cran-all/cranData/CAST/R/print.R
#' sPlotOpen Data of Species Richness #' #' sPlotOpen Species Richness for South America with associated predictors #' @format #' A sf points / data.frame with 703 rows and 17 columns: #' \describe{ #' \item{PlotObeservationID, GIVD_ID, Country, Biome}{sPlotOpen Metadata} #' \item{Species_richness}{Response Variable - Plant species richness from sPlotOpen} #' \item{bio_x, elev}{Predictor Variables - Worldclim and SRTM elevation} #' \item{geometry}{Lat/Lon} #' } #' @source \itemize{ #' \item{Plot with Species_richness from \href{https://onlinelibrary.wiley.com/doi/full/10.1111/geb.13346}{sPlotOpen}} #' \item{predictors acquired via R package \href{https://github.com/rspatial/geodata}{geodata}} #' } #' #' @references \itemize{ #' \item{Sabatini, F. M. et al. sPlotOpen – An environmentally balanced, open‐access, global dataset of vegetation plots. (2021). \doi{10.1111/geb.13346}} #' \item{Lopez-Gonzalez, G. et al. ForestPlots.net: a web application and research tool to manage and analyse tropical forest plot data: ForestPlots.net. #' Journal of Vegetation Science (2011).} #' \item{Pauchard, A. et al. Alien Plants Homogenise Protected Areas: Evidence from the Landscape and Regional Scales in South Central Chile. in Plant Invasions in Protected Areas (2013).} #' \item{Peyre, G. et al. VegPáramo, a flora and vegetation database for the Andean páramo. phytocoenologia (2015).} #' \item{Vibrans, A. C. et al. Insights from a large-scale inventory in the southern Brazilian Atlantic Forest. Scientia Agricola (2020).} #' } #' @usage data(splotdata) #' "splotdata"
/scratch/gouwar.j/cran-all/cranData/CAST/R/splotdata.R
#' Calculate Dissimilarity Index of training data #' @description #' This function estimates the Dissimilarity Index (DI) of #' within the training data set used for a prediction model. #' Predictors can be weighted based on the internal #' variable importance of the machine learning algorithm used for model training. #' @note #' This function is called within \code{\link{aoa}} to estimate the DI and AOA of new data. #' However, it may also be used on its own if only the DI of training data is of interest, #' or to facilitate a parallelization of \code{\link{aoa}} by avoiding a repeated calculation of the DI within the training data. #' #' @param model A train object created with caret used to extract weights from (based on variable importance) as well as cross-validation folds #' @param train A data.frame containing the data used for model training. Only required when no model is given #' @param weight A data.frame containing weights for each variable. Only required if no model is given. #' @param variables character vector of predictor variables. if "all" then all variables #' of the model are used or if no model is given then of the train dataset. #' @param CVtest list or vector. Either a list where each element contains the data points used for testing during the cross validation iteration (i.e. held back data). #' Or a vector that contains the ID of the fold for each training point. #' Only required if no model is given. #' @param CVtrain list. Each element contains the data points used for training during the cross validation iteration (i.e. held back data). #' Only required if no model is given and only required if CVtrain is not the opposite of CVtest (i.e. if a data point is not used for testing, it is used for training). #' Relevant if some data points are excluded, e.g. when using \code{\link{nndm}}. #' @param method Character. Method used for distance calculation. Currently euclidean distance (L2) and Mahalanobis distance (MD) are implemented but only L2 is tested. Note that MD takes considerably longer. #' @param useWeight Logical. Only if a model is given. Weight variables according to importance in the model? #' #' @seealso \code{\link{aoa}} #' @importFrom graphics boxplot #' @import ggplot2 #' #' @return A list of class \code{trainDI} containing: #' \item{train}{A data frame containing the training data} #' \item{weight}{A data frame with weights based on the variable importance.} #' \item{variables}{Names of the used variables} #' \item{catvars}{Which variables are categorial} #' \item{scaleparam}{Scaling parameters. Output from \code{scale}} #' \item{trainDist_avrg}{A data frame with the average distance of each training point to every other point} #' \item{trainDist_avrgmean}{The mean of trainDist_avrg. Used for normalizing the DI} #' \item{trainDI}{Dissimilarity Index of the training data} #' \item{threshold}{The DI threshold used for inside/outside AOA} #' #' #' #' @export trainDI #' #' @author #' Hanna Meyer, Marvin Ludwig #' #' @references Meyer, H., Pebesma, E. (2021): Predicting into unknown space? #' Estimating the area of applicability of spatial prediction models. #' \doi{10.1111/2041-210X.13650} #' #' #' @examples #' \dontrun{ #' library(sf) #' library(terra) #' library(caret) #' library(viridis) #' library(ggplot2) #' #' # prepare sample data: #' dat <- readRDS(system.file("extdata","Cookfarm.RDS",package="CAST")) #' dat <- aggregate(dat[,c("VW","Easting","Northing")],by=list(as.character(dat$SOURCEID)),mean) #' pts <- st_as_sf(dat,coords=c("Easting","Northing")) #' pts$ID <- 1:nrow(pts) #' set.seed(100) #' pts <- pts[1:30,] #' studyArea <- rast(system.file("extdata","predictors_2012-03-25.tif",package="CAST"))[[1:8]] #' trainDat <- extract(studyArea,pts,na.rm=FALSE) #' trainDat <- merge(trainDat,pts,by.x="ID",by.y="ID") #' #' # visualize data spatially: #' plot(studyArea) #' plot(studyArea$DEM) #' plot(pts[,1],add=TRUE,col="black") #' #' # train a model: #' set.seed(100) #' variables <- c("DEM","NDRE.Sd","TWI") #' model <- train(trainDat[,which(names(trainDat)%in%variables)], #' trainDat$VW, method="rf", importance=TRUE, tuneLength=1, #' trControl=trainControl(method="cv",number=5,savePredictions=T)) #' print(model) #note that this is a quite poor prediction model #' prediction <- predict(studyArea,model,na.rm=TRUE) #' plot(varImp(model,scale=FALSE)) #' #' #...then calculate the DI of the trained model: #' DI = trainDI(model=model) #' plot(DI) #' #' # the DI can now be used to compute the AOA: #' AOA = aoa(studyArea, model = model, trainDI = DI) #' print(AOA) #' plot(AOA) #' } #' trainDI <- function(model = NA, train = NULL, variables = "all", weight = NA, CVtest = NULL, CVtrain = NULL, method="L2", useWeight=TRUE){ # get parameters if they are not provided in function call----- if(is.null(train)){train = aoa_get_train(model)} if(length(variables) == 1){ if(variables == "all"){ variables = aoa_get_variables(variables, model, train) } } if(is.na(weight)[1]){ if(useWeight){ weight = aoa_get_weights(model, variables = variables) }else{ message("variable are not weighted. see ?aoa") weight <- t(data.frame(rep(1,length(variables)))) names(weight) <- variables } }else{ #check if manually given weights are correct. otherwise ignore (set to 1): if(nrow(weight)!=1||ncol(weight)!=length(variables)){ message("variable weights are not correctly specified and will be ignored. See ?aoa") weight <- t(data.frame(rep(1,length(variables)))) names(weight) <- variables } weight <- weight[,na.omit(match(variables, names(weight)))] if (any(weight<0)){ weight[weight<0]<-0 message("negative weights were set to 0") } } # get CV folds from model or from parameters folds <- aoa_get_folds(model,CVtrain,CVtest) CVtest <- folds[[2]] CVtrain <- folds[[1]] # check for input errors ----- if(nrow(train)<=1){stop("at least two training points need to be specified")} # reduce train to specified variables train <- train[,na.omit(match(variables, names(train)))] train_backup <- train # convert categorial variables catupdate <- aoa_categorial_train(train, variables, weight) train <- catupdate$train weight <- catupdate$weight # scale train train <- scale(train) # make sure all variables have variance if (any(apply(train, 2, FUN=function(x){all(is.na(x))}))){ stop("some variables in train seem to have no variance") } # save scale param for later scaleparam <- attributes(train) # multiply train data with variable weights (from variable importance) if(!inherits(weight, "error")&!is.null(unlist(weight))){ train <- sapply(1:ncol(train),function(x){train[,x]*unlist(weight[x])}) } # calculate average mean distance between training data trainDist_avrg <- c() trainDist_min <- c() if(method=="MD"){ if(dim(train)[2] == 1){ S <- matrix(stats::var(train), 1, 1) } else { S <- stats::cov(train) } S_inv <- MASS::ginv(S) } for(i in seq(nrow(train))){ # distance to all other training data (for average) trainDistAll <- .alldistfun(t(train[i,]), train, method, S_inv=S_inv)[-1] trainDist_avrg <- append(trainDist_avrg, mean(trainDistAll, na.rm = TRUE)) # calculate distance to other training data: trainDist <- matrix(.alldistfun(t(matrix(train[i,])), train, method, sorted = FALSE, S_inv)) trainDist[i] <- NA # mask of any data that are not used for training for the respective data point (using CV) whichfold <- NA if(!is.null(CVtrain)&!is.null(CVtest)){ whichfold <- as.numeric(which(lapply(CVtest,function(x){any(x==i)})==TRUE)) # index of the fold where i is held back if(length(whichfold)>1){stop("a datapoint is used for testing in more than one fold. currently this option is not implemented")} if(length(whichfold)!=0){ # in case that a data point is never used for testing trainDist[!seq(nrow(train))%in%CVtrain[[whichfold]]] <- NA # everything that is not in the training data for i is ignored } if(length(whichfold)==0){#in case that a data point is never used for testing, the distances for that point are ignored trainDist <- NA } } ####################################### if (length(whichfold)==0){ trainDist_min <- append(trainDist_min, NA) }else{ trainDist_min <- append(trainDist_min, min(trainDist, na.rm = TRUE)) } } trainDist_avrgmean <- mean(trainDist_avrg,na.rm=TRUE) # Dissimilarity Index of training data ----- TrainDI <- trainDist_min/trainDist_avrgmean # AOA Threshold ---- threshold_quantile <- stats::quantile(TrainDI, 0.75,na.rm=TRUE) threshold_iqr <- (1.5 * stats::IQR(TrainDI,na.rm=T)) thres <- threshold_quantile + threshold_iqr # account for case that threshold_quantile + threshold_iqr is larger than maximum DI. if (thres>max(TrainDI,na.rm=T)){ thres <- max(TrainDI,na.rm=T) } # note: previous versions of CAST derived the threshold this way: #thres <- grDevices::boxplot.stats(TrainDI)$stats[5] # Return: trainDI Object ------- aoa_results = list( train = train_backup, weight = weight, variables = variables, catvars = catupdate$catvars, scaleparam = scaleparam, trainDist_avrg = trainDist_avrg, trainDist_avrgmean = trainDist_avrgmean, trainDI = TrainDI, threshold = thres, method = method ) class(aoa_results) = "trainDI" return(aoa_results) } ################################################################################ # Helper functions ################################################################################ # Encode categorial variables aoa_categorial_train <- function(train, variables, weight){ # get all categorial variables catvars <- tryCatch(names(train)[which(sapply(train[,variables], class)%in%c("factor","character"))], error=function(e) e) if (!inherits(catvars,"error")&length(catvars)>0){ message("warning: predictors contain categorical variables. The integration is currently still under development. Please check results carefully!") for (catvar in catvars){ # mask all unknown levels in newdata as NA (even technically no predictions can be made) train[,catvar]<-droplevels(train[,catvar]) # then create dummy variables for the remaining levels in train: dvi_train <- predict(caret::dummyVars(paste0("~",catvar), data = train), train) train <- data.frame(train,dvi_train) if(!inherits(weight, "error")){ addweights <- data.frame(t(rep(weight[,which(names(weight)==catvar)], ncol(dvi_train)))) names(addweights)<- colnames(dvi_train) weight <- data.frame(weight,addweights) } } if(!inherits(weight, "error")){ weight <- weight[,-which(names(weight)%in%catvars)] } train <- train[,-which(names(train)%in%catvars)] } return(list(train = train, weight = weight, catvars = catvars)) } # Get weights from train object aoa_get_weights = function(model, variables){ weight <- tryCatch(if(model$modelType=="Classification"){ as.data.frame(t(apply(caret::varImp(model,scale=F)$importance,1,mean))) }else{ as.data.frame(t(caret::varImp(model,scale=F)$importance[,"Overall"])) }, error=function(e) e) if(!inherits(weight, "error") & length(variables)>1){ names(weight)<- rownames(caret::varImp(model,scale=F)$importance) }else{ # set all weights to 1 weight <- as.data.frame(t(rep(1, length(variables)))) names(weight) = variables message("note: variables were not weighted either because no weights or model were given, no variable importance could be retrieved from the given model, or the model has a single feature. Check caret::varImp(model)") } #set negative weights to 0 if(!inherits(weight, "error")){ weight <- weight[,na.omit(match(variables, names(weight)))] if (any(weight<0)){ weight[weight<0]<-0 message("negative weights were set to 0") } } return(weight) } # Get trainingdata from train object aoa_get_train <- function(model){ train <- as.data.frame(model$trainingData) return(train) } # Get folds from train object aoa_get_folds <- function(model, CVtrain, CVtest){ ### if folds are to be extracted from the model: if (!is.na(model)[1]){ if(tolower(model$control$method)!="cv"){ message("note: Either no model was given or no CV was used for model training. The DI threshold is therefore based on all training data") }else{ CVtest <- model$control$indexOut CVtrain <- model$control$index } } ### if folds are specified manually: if(is.na(model)[1]){ if(!is.null(CVtest)&!is.list(CVtest)){ # restructure input if CVtest only contains the fold ID tmp <- list() for (i in unique(CVtest)){ tmp[[i]] <- which(CVtest==i) } CVtest <- tmp } if(is.null(CVtest)&is.null(CVtrain)){ message("note: No model and no CV folds were given. The DI threshold is therefore based on all training data") }else{ if(is.null(CVtest)){ # if CVtest is not given, then use the opposite of CVtrain CVtest <- lapply(CVtrain,function(x){which(!sort(unique(unlist(CVtrain)))%in%x)}) }else{ if(is.null(CVtrain)){ # if CVtrain is not given, then use the opposite of CVtest CVtrain <- lapply(CVtest,function(x){which(!sort(unique(unlist(CVtest)))%in%x)}) } } } } return(list(CVtrain,CVtest)) } # Get variables from train object aoa_get_variables <- function(variables, model, train){ if(length(variables) == 1){ if(variables == "all"){ if(!is.na(model)[1]){ variables <- names(model$trainingData)[-which(names(model$trainingData)==".outcome")] }else{ variables <- names(train) } } } return(variables) } .mindistfun <- function(point, reference, method, S_inv=NULL){ if (method == "L2"){ # Euclidean Distance return(c(FNN::knnx.dist(reference, point, k = 1))) } else if (method == "MD"){ # Mahalanobis Distance return(sapply(1:dim(point)[1], function(y) min(sapply(1:dim(reference)[1], function(x) sqrt( t(point[y,] - reference[x,]) %*% S_inv %*% (point[y,] - reference[x,]) ))))) } } .alldistfun <- function(point, reference, method, sorted = TRUE,S_inv=NULL){ if (method == "L2"){ # Euclidean Distance if(sorted){ return(FNN::knnx.dist(reference, point, k = dim(reference)[1])) } else { return(FNN::knnx.dist(point,reference,k=1)) } } else if (method == "MD"){ # Mahalanobis Distance if(sorted){ return(t(sapply(1:dim(point)[1], function(y) sort(sapply(1:dim(reference)[1], function(x) sqrt( t(point[y,] - reference[x,]) %*% S_inv %*% (point[y,] - reference[x,]) )))))) } else { return(t(sapply(1:dim(point)[1], function(y) sapply(1:dim(reference)[1], function(x) sqrt( t(point[y,] - reference[x,]) %*% S_inv %*% (point[y,] - reference[x,]) ))))) } } }
/scratch/gouwar.j/cran-all/cranData/CAST/R/trainDI.R
## ----setup, echo=FALSE-------------------------------------------------------- knitr::opts_chunk$set(fig.width = 8.83,cache = FALSE) user_hanna <- Sys.getenv("USER") %in% c("hanna") ## ----c1, message = FALSE, warning=FALSE--------------------------------------- #install.packages("CAST") library(CAST) ## ----c2, message = FALSE, warning=FALSE--------------------------------------- help(CAST) ## ----c3, message = FALSE, warning=FALSE--------------------------------------- data <- readRDS(system.file("extdata","Cookfarm.RDS",package="CAST")) head(data) ## ----c4, message = FALSE, warning=FALSE--------------------------------------- library(sf) data_sp <- unique(data[,c("SOURCEID","Easting","Northing")]) data_sp <- st_as_sf(data_sp,coords=c("Easting","Northing"),crs=26911) plot(data_sp,axes=T,col="black") ## ----c5, message = FALSE, warning=FALSE, eval=user_hanna---------------------- #...or plot the data with mapview: library(mapview) mapviewOptions(basemaps = c("Esri.WorldImagery")) mapview(data_sp) ## ----c6, message = FALSE, warning=FALSE--------------------------------------- library(lubridate) library(ggplot2) trainDat <- data[data$altitude==-0.3& year(data$Date)==2012& week(data$Date)%in%c(10:12),] ggplot(data = trainDat, aes(x=Date, y=VW)) + geom_line(aes(colour=SOURCEID)) ## ----c7, message = FALSE, warning=FALSE--------------------------------------- library(caret) predictors <- c("DEM","TWI","Precip_cum","cday", "MaxT_wrcc","Precip_wrcc","BLD", "Northing","Easting","NDRE.M") set.seed(10) model <- train(trainDat[,predictors],trainDat$VW, method="rf",tuneGrid=data.frame("mtry"=2), importance=TRUE,ntree=50, trControl=trainControl(method="cv",number=3)) ## ----c8, message = FALSE, warning=FALSE--------------------------------------- library(terra) predictors_sp <- rast(system.file("extdata","predictors_2012-03-25.tif",package="CAST")) prediction <- predict(predictors_sp,model,na.rm=TRUE) plot(prediction) ## ----c9, message = FALSE, warning=FALSE--------------------------------------- model ## ----c10, message = FALSE, warning=FALSE-------------------------------------- set.seed(10) indices <- CreateSpacetimeFolds(trainDat,spacevar = "SOURCEID", k=3) set.seed(10) model_LLO <- train(trainDat[,predictors],trainDat$VW, method="rf",tuneGrid=data.frame("mtry"=2), importance=TRUE, trControl=trainControl(method="cv", index = indices$index)) model_LLO ## ----c11, message = FALSE, warning=FALSE-------------------------------------- plot(varImp(model_LLO)) ## ----c12, message = FALSE, warning=FALSE-------------------------------------- set.seed(10) ffsmodel_LLO <- ffs(trainDat[,predictors],trainDat$VW,metric="Rsquared", method="rf", tuneGrid=data.frame("mtry"=2), verbose=FALSE,ntree=50, trControl=trainControl(method="cv", index = indices$index)) ffsmodel_LLO ffsmodel_LLO$selectedvars ## ----c13, message = FALSE, warning=FALSE-------------------------------------- plot(ffsmodel_LLO) ## ----c14, message = FALSE, warning=FALSE-------------------------------------- prediction_ffs <- predict(predictors_sp,ffsmodel_LLO,na.rm=TRUE) plot(prediction_ffs) ## ----c15, message = FALSE, warning=FALSE-------------------------------------- ### AOA for which the spatial CV error applies: AOA <- aoa(predictors_sp,ffsmodel_LLO) plot(prediction_ffs,main="prediction for the AOA \n(spatial CV error applied)") plot(AOA$AOA,col=c("grey","transparent"),add=T) #spplot(prediction_ffs,main="prediction for the AOA \n(spatial CV error applied)")+ #spplot(AOA$AOA,col.regions=c("grey","transparent")) ### AOA for which the random CV error applies: AOA_random <- aoa(predictors_sp,model) plot(prediction,main="prediction for the AOA \n(random CV error applied)") plot(AOA_random$AOA,col=c("grey","transparent"),add=T) #spplot(prediction,main="prediction for the AOA \n(random CV error applied)")+ #spplot(AOA_random$AOA,col.regions=c("grey","transparent"))
/scratch/gouwar.j/cran-all/cranData/CAST/inst/doc/cast01-CAST-intro-cookfarm.R
--- title: "1. Introduction to CAST" author: "Hanna Meyer" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: true vignette: > %\VignetteIndexEntry{Introduction to CAST} %\VignetteEncoding{UTF-8} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console --- ```{r setup, echo=FALSE} knitr::opts_chunk$set(fig.width = 8.83,cache = FALSE) user_hanna <- Sys.getenv("USER") %in% c("hanna") ``` ## Introduction !!Note: Some recent developments of CAST are not yet fully documented in this tutorial. A major update can be expected for Apr 2024!! ### Background One key task in environmental science is obtaining information of environmental variables continuously in space or in space and time, usually based on remote sensing and limited field data. In that respect, machine learning algorithms have been proven to be an important tool to learn patterns in nonlinear and complex systems. However, standard machine learning applications are not suitable for spatio-temporal data, as they usually ignore the spatio-temporal dependencies in the data. This becomes problematic in (at least) two aspects of predictive modelling: Overfitted models as well as overly optimistic error assessment (see [Meyer et al 2018](https://www.sciencedirect.com/science/article/pii/S1364815217310976) or [Meyer et al 2019](https://www.sciencedirect.com/science/article/abs/pii/S0304380019303230) ). To approach these problems, CAST supports the well-known caret package ([Kuhn 2018](https://topepo.github.io/caret/index.html) to provide methods designed for spatio-temporal data. This tutorial shows how to set up a spatio-temporal prediction model that includes objective and reliable error estimation. It further shows how spatio-temporal overfitting can be detected by comparison between validation strategies. It will be shown that certain variables are responsible for the problem of overfitting due to spatio-temporal autocorrelation patterns. Therefore, this tutorial also shows how to automatically exclude variables that lead to overfitting with the aim to improve the spatio-temporal prediction model. In order to follow this tutorial, I assume that the reader is familiar with the basics of predictive modelling nicely explained in [Kuhn and Johnson 2013](https://doi.org/10.1007/978-1-4614-6849-3) as well as machine learning applications via the caret package. ### How to start To work with the tutorial, first install the CAST package and load the library: ```{r c1, message = FALSE, warning=FALSE} #install.packages("CAST") library(CAST) ``` If you need help, see ```{r c2, message = FALSE, warning=FALSE} help(CAST) ``` ## Example of a typical spatio-temporal prediction task The example prediction task for this tutorial is the following: we have a set of data loggers distributed over a farm, and we want to map soil moisture, based on a set of spatial and temporal predictor variables. We will use Random Forests as a machine learning algorithm in this tutorial. ### Description of the example dataset To do so, we will work with the cookfarm dataset, described in e.g. [Gasch et al 2015](https://www.sciencedirect.com/science/article/pii/S2211675315000251/) and available via the GSIF package ([Hengl 2017](https://CRAN.R-project.org/package=GSIF)). The dataset included in the CAST package is a re-structured dataset which was used for the analysis in [Meyer et al 2018](https://www.sciencedirect.com/science/article/pii/S1364815217310976). ```{r c3, message = FALSE, warning=FALSE} data <- readRDS(system.file("extdata","Cookfarm.RDS",package="CAST")) head(data) ``` I want to point out on the following information of this dataset: The "SOURCEID" represents the ID for the data logger, "VW" is soil moisture which is our response variable, "Easting" and "Northing" are the coordinates of the data loggers, "altitude" indicates the depth of the soil in which VW was measured, and the remaining columns represent different potential predictor variables which are terrain related (e.g. "DEM", "TWI"), vegetation indices (e.g. "NDRE"), soil properties (e.g. "BLD") or climate-related predictors (e.g. "Precip_wrcc"). See [Gasch et al 2015](https://www.sciencedirect.com/science/article/pii/S2211675315000251) for further description on the dataset. To get an impression on the spatial properties of the dataset, let's have a look on the spatial distribution of the data loggers on the cookfarm: ```{r c4, message = FALSE, warning=FALSE} library(sf) data_sp <- unique(data[,c("SOURCEID","Easting","Northing")]) data_sp <- st_as_sf(data_sp,coords=c("Easting","Northing"),crs=26911) plot(data_sp,axes=T,col="black") ``` ```{r c5, message = FALSE, warning=FALSE, eval=user_hanna} #...or plot the data with mapview: library(mapview) mapviewOptions(basemaps = c("Esri.WorldImagery")) mapview(data_sp) ``` We see that the data are taken at 42 locations (SOURCEID) over the field. The loggers recorded data between 2007 and 2013 (the dataset here only contains the data from 2010 on). The VW data are given here on a daily basis. ### Data subsetting To reduce the data to an amount that can be handled in a tutorial, let's restrict the data to the depth of -0.3 and to two weeks of the year 2012. After subsetting let's have an overview on the soil moisture time series measured by the data loggers. ```{r c6, message = FALSE, warning=FALSE} library(lubridate) library(ggplot2) trainDat <- data[data$altitude==-0.3& year(data$Date)==2012& week(data$Date)%in%c(10:12),] ggplot(data = trainDat, aes(x=Date, y=VW)) + geom_line(aes(colour=SOURCEID)) ``` What we can see is that (as expected) each logger location has a unique time series of soil moisture. ## Model training and prediction In the following we will use this subset of the cookfarm data as an example to spatially predict soil moisture (i.e. to map soil moisture) with (and without) consideration of the spatio-temporal dependencies. To start with, lets use this dataset to create a "default" Random Forest model that predicts soil moisture based on some predictor variables. To keep computation time at a minimum, we don't include hyperparameter tuning (hence mtry was set to 2) which is reasonable as Random Forests are comparably insensitive to tuning. ```{r c7, message = FALSE, warning=FALSE} library(caret) predictors <- c("DEM","TWI","Precip_cum","cday", "MaxT_wrcc","Precip_wrcc","BLD", "Northing","Easting","NDRE.M") set.seed(10) model <- train(trainDat[,predictors],trainDat$VW, method="rf",tuneGrid=data.frame("mtry"=2), importance=TRUE,ntree=50, trControl=trainControl(method="cv",number=3)) ``` Based on the trained model we can make spatial predictions of soil moisture. To do this we load a multiband raster that contains spatial data of all predictor variables for the 25th of March 2012 (as an example). We then apply the trained model on this data set. ```{r c8, message = FALSE, warning=FALSE} library(terra) predictors_sp <- rast(system.file("extdata","predictors_2012-03-25.tif",package="CAST")) prediction <- predict(predictors_sp,model,na.rm=TRUE) plot(prediction) ``` The result is a spatially comprehensive map of soil moisture for this day. We see that simply creating a map using machine learning and caret is an easy task, however accurately measuring its performance is less simple. Though the map looks good on a first sight we now have to follow up with the question of how accurate this map is, hence we need to ask how well the model is able to map soil moisture. From a visible inspection it is noticeable that the model produces a strange linear features at the eastern side of the farm which looks suspicious. But let's come back to this later and first focus on a statistical validation of the model. ## Cross validation strategies for spatio-temporal data Among validation strategies, k-fold cross validation (CV) is popular to estimate the performance of the model in view to data that have not been used for model training. During CV, models are repeatedly trained (k models) and in each model run, the data of one fold are put to the side and are not used for model training but for model validation. In this way, the performance of the model can be estimated using data that have not been included in the model training. ### The Standard approach: Random k-fold CV In the example above we used a random k-fold CV that we defined in caret's trainControl argument. More specifically, we used a random 3-fold CV. Hence, the data points in our dataset were RANDOMLY split into 3 folds. To assess the performance of the model let's have a look on the output of the Random CV: ```{r c9, message = FALSE, warning=FALSE} model ``` We see that soil moisture could be modelled with a high R² (0.90) which indicates a nearly perfect fit of the data. Sounds good, but unfortunately, the random k fold CV does not give us a good indication for the map accuracy. Random k-fold CV means that each of the three folds (with the highest certainty) contains data points from each data logger. Therefore, a random CV cannot indicate the ability of the model to make predictions beyond the location of the training data (i.e. to map soil moisture). Since our aim is to map soil moisture, we rather need to perform a target-oriented validation which validates the model in view to spatial mapping. ### Target-oriented validation We are not interested in the model performance in view to random subsets of our data loggers, but we need to know how well the model is able to make predictions for areas without data loggers. To find this out, we need to repeatedly leave the complete time series of one or more data loggers out and use them as test data during CV. To do this we first need to create meaningful folds rather than random folds. CAST's function "CreateSpaceTimeFolds" is designed to provide index arguments used by caret's trainControl. The index defines which data points are used for model training during each model run and reversely defines which data points are held back. Hence, using the index argument we can account for the dependencies in the data by leaving the complete data from one or more data loggers out (LLO CV), from one or more time steps out (LTO CV) or from data loggers and time steps out (LLTO CV). In this example we're focusing on LLO CV, therefore we use the column "SOURCEID" to define the location of a data logger and split the data into folds using this information. Analog to the random CV we split the data into five folds, hence five model runs are performed each leaving one fifth of all data loggers out for validation. Note that several suggestions of spatial CV exist. What we call LLO here is just a simple example. See references in [Meyer and Pebesma 2022](https://www.nature.com/articles/s41467-022-29838-9) for some examples and have a look at [Mila et al 2022](https://doi.org/10.1111/2041-210X.13851) for the methodology implemented in the CAST function nndm. ```{r c10, message = FALSE, warning=FALSE} set.seed(10) indices <- CreateSpacetimeFolds(trainDat,spacevar = "SOURCEID", k=3) set.seed(10) model_LLO <- train(trainDat[,predictors],trainDat$VW, method="rf",tuneGrid=data.frame("mtry"=2), importance=TRUE, trControl=trainControl(method="cv", index = indices$index)) model_LLO ``` By inspecting the output of the model, we see that in view to new locations, the R² is only 0.16 so the performance is much lower than expected from the random CV (R² = 0.90). Apparently, there is considerable overfitting in the model, causing a good random performance but a poor performance in view to new locations. This might partly be attributed to the choice of variables where we must suspect that certain variables are misinterpreted by the model (see [Meyer et al 2018](https://www.sciencedirect.com/science/article/pii/S1364815217310976) or [talk at the OpenGeoHub summer school 2019] (https://www.youtube.com/watch?v=mkHlmYEzsVQ)). Let's have a look at the variable importance ranking of Random Forest and see if we find something suspicious: ```{r c11, message = FALSE, warning=FALSE} plot(varImp(model_LLO)) ``` The importance ranking indicates that among others, "Easting" is an important variable. This fits to the observation of an inappropriate linear features in the predicted map. Apparently the model assigns a high importance to this variable which causes a high random CV performance. But at the same time the model fails in the prediction on new locations because the variable is unsuitable for predictions beyond the locations of the data loggers used for model training. Assuming that certain variables are misinterpreted by the algorithm we should be able to produce a higher LLO performance when such variables are removed. Let's see if this is true... ## Removing variables that cause overfitting CAST's forward feature selection (ffs) selects variables that make sense in view to the selected CV method and excludes those which are counterproductive (or meaningless) in view to the selected CV method. When we use LLO as CV method, ffs selects variables that lead in combination to the highest LLO performance (i.e. the best spatial model). All variables that have no spatial meaning or are even counterproductive won't improve or even reduce the LLO performance and are therefore excluded from the model by the ffs. ffs is doing this job by first training models using all possible pairs of two predictor variables. The best model of these initial models is kept. On the basis of this best model the predictor variables are iterativly increased and each of the remaining variables is tested for its improvement of the currently best model. The process stops if none of the remaining variables increases the model performance when added to the current best model. So let's run the ffs on our case study using R² as a metric to select the optimal variables. This process will take 1-2 minutes... ```{r c12, message = FALSE, warning=FALSE} set.seed(10) ffsmodel_LLO <- ffs(trainDat[,predictors],trainDat$VW,metric="Rsquared", method="rf", tuneGrid=data.frame("mtry"=2), verbose=FALSE,ntree=50, trControl=trainControl(method="cv", index = indices$index)) ffsmodel_LLO ffsmodel_LLO$selectedvars ``` Using the ffs with LLO CV, the R² could be increased from 0.16 to 0.28. The variables that are used for this model are "DEM","NDRE.M" and "Northing". All others are removed because they have (at least in this small example) no spatial meaning or are even counterproductive. Using the plot$\_$ffs function we can visualize how the performance of the model changed depending on the variables being used: ```{r c13, message = FALSE, warning=FALSE} plot(ffsmodel_LLO) ``` See that the best model using two variables led to an R² of slightly above 0.2. Using the third variable could slightly increase the R². Any further variable could not improve the LLO performance. Note that the R² features a high standard deviation regardless of the variables being used. This is due to the small dataset that was used which cannot lead to robust results. What effect does the new model has on the spatial representation of soil moisture? ```{r c14, message = FALSE, warning=FALSE} prediction_ffs <- predict(predictors_sp,ffsmodel_LLO,na.rm=TRUE) plot(prediction_ffs) ``` We see that the variable selection does not only have an effect on the statistical performance but also the predicted spatial patterns change considerably. It is of note that the linear feature is not any more in the resulting soil moisture map most likely because "Easting" was removed from the set of predictor variables by ffs. ## Area of Applicability Still it is required to analyse if the model can be applied to the entire study area of if there are locations that are very different in their predictor properties to what the model has learned from. See more details in the vignette on the Area of applicability and [Meyer and Pebesma 2021](https://doi.org/10.1111/2041-210X.13650). ```{r c15, message = FALSE, warning=FALSE} ### AOA for which the spatial CV error applies: AOA <- aoa(predictors_sp,ffsmodel_LLO) plot(prediction_ffs,main="prediction for the AOA \n(spatial CV error applied)") plot(AOA$AOA,col=c("grey","transparent"),add=T) #spplot(prediction_ffs,main="prediction for the AOA \n(spatial CV error applied)")+ #spplot(AOA$AOA,col.regions=c("grey","transparent")) ### AOA for which the random CV error applies: AOA_random <- aoa(predictors_sp,model) plot(prediction,main="prediction for the AOA \n(random CV error applied)") plot(AOA_random$AOA,col=c("grey","transparent"),add=T) #spplot(prediction,main="prediction for the AOA \n(random CV error applied)")+ #spplot(AOA_random$AOA,col.regions=c("grey","transparent")) ``` The figure shows in grey areas that are outside the area of applicability, hence predictions should not be considered for these locations. See tutorial on the AOA in this package for more information. ## Conclusions To conclude, the tutorial has shown how CAST can be used to facilitate target-oriented (here: spatial) CV on spatial and spatio-temporal data which is crucial to obtain meaningful validation results. Using the ffs in conjunction with target-oriented validation, variables can be excluded that are counterproductive in view to the target-oriented performance due to misinterpretations by the algorithm. ffs therefore helps to select the ideal set of predictor variables for spatio-temporal prediction tasks and gives objective error estimates. ## Final notes The intention of this tutorial is to describe the motivation that led to the development of CAST as well as its functionality. Priority is not on modelling soil moisture of the cookfarm in the best possible way but to provide an example for the motivation and functionality of CAST that can run within a few minutes. Hence, only a very small subset of the entire cookfarm dataset was used. Keep in mind that due to the small subset the example is not robust and quite different results might be obtained depending on small changes in the settings. The intention of showing the motivation of CAST is also the reason why the coordinates are used here as predictor variables. Though coordinates are used as predictors in quite some scientific studies they rather provide here an extreme example of how misleading variables can lead to overfitting. ## Further reading * Meyer, H., & Pebesma, E. (2022): Machine learning-based global maps of ecological variables and the challenge of assessing them. Nature Communications. Accepted. * Meyer, H., & Pebesma, E. (2021). Predicting into unknown space? Estimating the area of applicability of spatial prediction models. Methods in Ecology and Evolution, 12, 1620– 1633. [https://doi.org/10.1111/2041-210X.13650] * Meyer H, Reudenbach C, Wöllauer S,Nauss T (2019) Importance of spatial predictor variable selection in machine learning applications–Moving from data reproduction to spatial prediction. Ecological Modelling 411: 108815 [https://doi.org/10.1016/j.ecolmodel.2019.108815] * Meyer H, Reudenbach C, Hengl T, Katurij M, Nauss T (2018) Improving performance of spatio-temporal machine learning models using forward feature selection and target-oriented validation. Environmental Modelling & Software 101: 1–9 [https://doi.org/10.1016/j.envsoft.2017.12.001] * Talk from the OpenGeoHub summer school 2019 on spatial validation and variable selection: https://www.youtube.com/watch?v=mkHlmYEzsVQ. * Tutorial (https://youtu.be/EyP04zLe9qo) and Lecture (https://youtu.be/OoNH6Nl-X2s) recording from OpenGeoHub summer school 2020 on the area of applicability. As well as talk at the OpenGeoHub summer school 2021: https://av.tib.eu/media/54879
/scratch/gouwar.j/cran-all/cranData/CAST/inst/doc/cast01-CAST-intro-cookfarm.Rmd
## ----setup, echo=FALSE-------------------------------------------------------- knitr::opts_chunk$set(fig.width = 8.83) ## ----message = FALSE, warning=FALSE------------------------------------------- library(CAST) library(caret) library(terra) library(sf) library(viridis) library(gridExtra) ## ----message = FALSE,include=FALSE, warning=FALSE----------------------------- RMSE = function(a, b){ sqrt(mean((a - b)^2,na.rm=T)) } ## ----message = FALSE, warning=FALSE------------------------------------------- predictors <- rast(system.file("extdata","bioclim.tif",package="CAST")) plot(predictors,col=viridis(100)) ## ----message = FALSE, warning=FALSE------------------------------------------- generate_random_response <- function(raster, predictornames = names(raster), seed = sample(seq(1000), 1)){ operands_1 = c("+", "-", "*", "/") operands_2 = c("^1","^2") expression <- paste(as.character(predictornames, sep="")) # assign random power to predictors set.seed(seed) expression <- paste(expression, sample(operands_2, length(predictornames), replace = TRUE), sep = "") # assign random math function between predictors (expect after the last one) set.seed(seed) expression[-length(expression)] <- paste(expression[- length(expression)], sample(operands_1, length(predictornames)-1, replace = TRUE), sep = " ") print(paste0(expression, collapse = " ")) # collapse e = paste0("raster$", expression, collapse = " ") response = eval(parse(text = e)) names(response) <- "response" return(response) } ## ----message = FALSE, warning=FALSE------------------------------------------- response <- generate_random_response (predictors, seed = 10) plot(response,col=viridis(100),main="virtual response") ## ----message = FALSE, warning=FALSE------------------------------------------- mask <- predictors[[1]] values(mask)[!is.na(values(mask))] <- 1 mask <- st_as_sf(as.polygons(mask)) mask <- st_make_valid(mask) ## ----message = FALSE, warning=FALSE------------------------------------------- set.seed(15) samplepoints <- st_as_sf(st_sample(mask,20,"random")) plot(response,col=viridis(100)) plot(samplepoints,col="red",add=T,pch=3) ## ----message = FALSE, warning=FALSE------------------------------------------- trainDat <- extract(predictors,samplepoints,na.rm=FALSE) trainDat$response <- extract(response,samplepoints,na.rm=FALSE, ID=FALSE)$response trainDat <- na.omit(trainDat) ## ----message = FALSE, warning=FALSE------------------------------------------- set.seed(10) model <- train(trainDat[,names(predictors)], trainDat$response, method="rf", importance=TRUE, trControl = trainControl(method="cv")) print(model) ## ----message = FALSE, warning=FALSE------------------------------------------- plot(varImp(model,scale = F),col="black") ## ----message = FALSE, warning=FALSE------------------------------------------- prediction <- predict(predictors,model,na.rm=T) truediff <- abs(prediction-response) plot(rast(list(prediction,response)),main=c("prediction","reference")) ## ----message = FALSE, warning=FALSE------------------------------------------- AOA <- aoa(predictors, model) class(AOA) names(AOA) print(AOA) ## ----message = FALSE, warning=FALSE------------------------------------------- plot(AOA) ## ----message = FALSE, warning=FALSE, fig.show="hold", out.width="30%"-------- plot(truediff,col=viridis(100),main="true prediction error") plot(AOA$DI,col=viridis(100),main="DI") plot(prediction, col=viridis(100),main="prediction for AOA") plot(AOA$AOA,col=c("grey","transparent"),add=T,plg=list(x="topleft",box.col="black",bty="o",title="AOA")) ## ----message = FALSE, warning=FALSE------------------------------------------- set.seed(25) samplepoints <- clustered_sample(mask,75,15,radius=25000) plot(response,col=viridis(100)) plot(samplepoints,col="red",add=T,pch=3) ## ----message = FALSE, warning=FALSE------------------------------------------- trainDat <- extract(predictors,samplepoints,na.rm=FALSE) trainDat$response <- extract(response,samplepoints,na.rm=FALSE)$response trainDat <- data.frame(trainDat,samplepoints) trainDat <- na.omit(trainDat) ## ----message = FALSE, warning=FALSE------------------------------------------- set.seed(10) model_random <- train(trainDat[,names(predictors)], trainDat$response, method="rf", importance=TRUE, trControl = trainControl(method="cv")) prediction_random <- predict(predictors,model_random,na.rm=TRUE) print(model_random) ## ----message = FALSE, warning=FALSE------------------------------------------- folds <- CreateSpacetimeFolds(trainDat, spacevar="parent",k=10) set.seed(15) model <- train(trainDat[,names(predictors)], trainDat$response, method="rf", importance=TRUE, tuneGrid = expand.grid(mtry = c(2:length(names(predictors)))), trControl = trainControl(method="cv",index=folds$index)) print(model) prediction <- predict(predictors,model,na.rm=TRUE) ## ----message = FALSE, warning=FALSE------------------------------------------- AOA_spatial <- aoa(predictors, model) AOA_random <- aoa(predictors, model_random) ## ----message = FALSE, warning=FALSE, fig.show="hold", out.width="50%"-------- plot(AOA_spatial$DI,col=viridis(100),main="DI") plot(prediction, col=viridis(100),main="prediction for AOA \n(spatial CV error applies)") plot(AOA_spatial$AOA,col=c("grey","transparent"),add=TRUE,plg=list(x="topleft",box.col="black",bty="o",title="AOA")) plot(prediction_random, col=viridis(100),main="prediction for AOA \n(random CV error applies)") plot(AOA_random$AOA,col=c("grey","transparent"),add=TRUE,plg=list(x="topleft",box.col="black",bty="o",title="AOA")) ## ----message = FALSE, warning=FALSE------------------------------------------- grid.arrange(plot(AOA_spatial) + ggplot2::ggtitle("Spatial CV"), plot(AOA_random) + ggplot2::ggtitle("Random CV"), ncol = 2) ## ----message = FALSE, warning=FALSE------------------------------------------- ###for the spatial CV: RMSE(values(prediction)[values(AOA_spatial$AOA)==1], values(response)[values(AOA_spatial$AOA)==1]) RMSE(values(prediction)[values(AOA_spatial$AOA)==0], values(response)[values(AOA_spatial$AOA)==0]) model$results ###and for the random CV: RMSE(values(prediction_random)[values(AOA_random$AOA)==1], values(response)[values(AOA_random$AOA)==1]) RMSE(values(prediction_random)[values(AOA_random$AOA)==0], values(response)[values(AOA_random$AOA)==0]) model_random$results ## ----message = FALSE, warning=FALSE------------------------------------------- DI_RMSE_relation <- DItoErrormetric(model, AOA_spatial$parameters, multiCV=TRUE, window.size = 5, length.out = 5) plot(DI_RMSE_relation) expected_RMSE = terra::predict(AOA_spatial$DI, DI_RMSE_relation) # account for multiCV changing the DI threshold updated_AOA = AOA_spatial$DI > attr(DI_RMSE_relation, "AOA_threshold") plot(expected_RMSE,col=viridis(100),main="expected RMSE") plot(updated_AOA, col=c("grey","transparent"),add=TRUE,plg=list(x="topleft",box.col="black",bty="o",title="AOA")) ## ----message = FALSE, warning=FALSE------------------------------------------- dat <- readRDS(system.file("extdata","Cookfarm.RDS",package="CAST")) # calculate average of VW for each sampling site: dat <- aggregate(dat[,c("VW","Easting","Northing")],by=list(as.character(dat$SOURCEID)),mean) # create sf object from the data: pts <- st_as_sf(dat,coords=c("Easting","Northing")) ##### Extract Predictors for the locations of the sampling points studyArea <- rast(system.file("extdata","predictors_2012-03-25.tif",package="CAST")) st_crs(pts) <- crs(studyArea) trainDat <- extract(studyArea,pts,na.rm=FALSE) pts$ID <- 1:nrow(pts) trainDat <- merge(trainDat,pts,by.x="ID",by.y="ID") # The final training dataset with potential predictors and VW: head(trainDat) ## ----message = FALSE, warning=FALSE------------------------------------------- predictors <- c("DEM","NDRE.Sd","TWI","Bt") response <- "VW" model <- train(trainDat[,predictors],trainDat[,response], method="rf",tuneLength=3,importance=TRUE, trControl=trainControl(method="LOOCV")) model ## ----message = FALSE, warning=FALSE------------------------------------------- #Predictors: plot(stretch(studyArea[[predictors]])) #prediction: prediction <- predict(studyArea,model,na.rm=TRUE) ## ----message = FALSE, warning=FALSE, fig.show="hold", out.width="50%"-------- AOA <- aoa(studyArea,model) #### Plot results: plot(AOA$DI,col=viridis(100),main="DI with sampling locations (red)") plot(pts,zcol="ID",col="red",add=TRUE) plot(prediction, col=viridis(100),main="prediction for AOA \n(LOOCV error applies)") plot(AOA$AOA,col=c("grey","transparent"),add=TRUE,plg=list(x="topleft",box.col="black",bty="o",title="AOA"))
/scratch/gouwar.j/cran-all/cranData/CAST/inst/doc/cast02-AOA-tutorial.R
--- title: "2. Area of applicability of spatial prediction models" author: "Hanna Meyer" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: true vignette: > %\VignetteIndexEntry{Area of applicability of spatial prediction models} %\VignetteEncoding{UTF-8} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console --- ```{r setup, echo=FALSE} knitr::opts_chunk$set(fig.width = 8.83) ``` --- # Introduction In spatial predictive mapping, models are often applied to make predictions far beyond sampling locations (i.e. field observations used to map a variable even on a global scale), where new locations might considerably differ in their environmental properties. However, areas in the predictor space without support of training data are problematic. The model has not been enabled to learn about relationships in these environments and predictions for such areas have to be considered highly uncertain. In CAST, we implement the methodology described in [Meyer\&Pebesma (2021)](https://doi.org/10.1111/2041-210X.13650) to estimate the "area of applicability" (AOA) of (spatial) prediction models. The AOA is defined as the area where we enabled the model to learn about relationships based on the training data, and where the estimated cross-validation performance holds. To delineate the AOA, first an dissimilarity index (DI) is calculated that is based on distances to the training data in the multidimensional predictor variable space. To account for relevance of predictor variables responsible for prediction patterns we weight variables by the model-derived importance scores prior to distance calculation. The AOA is then derived by applying a threshold based on the DI observed in the training data using cross-validation. This tutorial shows an example of how to estimate the area of applicability of spatial prediction models. For further information see: Meyer, H., & Pebesma, E. (2021). Predicting into unknown space? Estimating the area of applicability of spatial prediction models. Methods in Ecology and Evolution, 12, 1620– 1633. [https://doi.org/10.1111/2041-210X.13650] ### Getting started ```{r, message = FALSE, warning=FALSE} library(CAST) library(caret) library(terra) library(sf) library(viridis) library(gridExtra) ``` ```{r,message = FALSE,include=FALSE, warning=FALSE} RMSE = function(a, b){ sqrt(mean((a - b)^2,na.rm=T)) } ``` # Example 1: Using simulated data ## Get data ### Generate Predictors As predictor variables, a set of bioclimatic variables are used (https://www.worldclim.org). For this tutorial, they have been originally downloaded using the getData function from the raster package but cropped to an area in central Europe. The cropped data are provided in the CAST package. ```{r, message = FALSE, warning=FALSE} predictors <- rast(system.file("extdata","bioclim.tif",package="CAST")) plot(predictors,col=viridis(100)) ``` ### Generate Response To be able to test the reliability of the method, we're using a simulated prediction task. We therefore simulate a virtual response variable from the bioclimatic variables. ```{r,message = FALSE, warning=FALSE} generate_random_response <- function(raster, predictornames = names(raster), seed = sample(seq(1000), 1)){ operands_1 = c("+", "-", "*", "/") operands_2 = c("^1","^2") expression <- paste(as.character(predictornames, sep="")) # assign random power to predictors set.seed(seed) expression <- paste(expression, sample(operands_2, length(predictornames), replace = TRUE), sep = "") # assign random math function between predictors (expect after the last one) set.seed(seed) expression[-length(expression)] <- paste(expression[- length(expression)], sample(operands_1, length(predictornames)-1, replace = TRUE), sep = " ") print(paste0(expression, collapse = " ")) # collapse e = paste0("raster$", expression, collapse = " ") response = eval(parse(text = e)) names(response) <- "response" return(response) } ``` ```{r,message = FALSE, warning=FALSE} response <- generate_random_response (predictors, seed = 10) plot(response,col=viridis(100),main="virtual response") ``` ### Simulate sampling locations To simulate a typical prediction task, field sampling locations are randomly selected. Here, we randomly select 20 points. Note that this is a very small data set, but used here to avoid long computation times. ```{r,message = FALSE, warning=FALSE} mask <- predictors[[1]] values(mask)[!is.na(values(mask))] <- 1 mask <- st_as_sf(as.polygons(mask)) mask <- st_make_valid(mask) ``` ```{r,message = FALSE, warning=FALSE} set.seed(15) samplepoints <- st_as_sf(st_sample(mask,20,"random")) plot(response,col=viridis(100)) plot(samplepoints,col="red",add=T,pch=3) ``` ## Model training Next, a machine learning algorithm will be applied to learn the relationships between predictors and response. ### Prepare data Therefore, predictors and response are extracted for the sampling locations. ```{r,message = FALSE, warning=FALSE} trainDat <- extract(predictors,samplepoints,na.rm=FALSE) trainDat$response <- extract(response,samplepoints,na.rm=FALSE, ID=FALSE)$response trainDat <- na.omit(trainDat) ``` ### Train the model Random Forest is applied here as machine learning algorithm (others can be used as well, as long as variable importance is returned). The model is validated by default cross-validation to estimate the prediction error. ```{r,message = FALSE, warning=FALSE} set.seed(10) model <- train(trainDat[,names(predictors)], trainDat$response, method="rf", importance=TRUE, trControl = trainControl(method="cv")) print(model) ``` ### Variable importance The estimation of the AOA will require the importance of the individual predictor variables. ```{r,message = FALSE, warning=FALSE} plot(varImp(model,scale = F),col="black") ``` ### Predict and calculate error The trained model is then used to make predictions for the entire area of interest. Since a simulated area-wide response is used, it's possible in this tutorial to compare the predictions with the true reference. ```{r,message = FALSE, warning=FALSE} prediction <- predict(predictors,model,na.rm=T) truediff <- abs(prediction-response) plot(rast(list(prediction,response)),main=c("prediction","reference")) ``` ## AOA Calculation The visualization above shows the predictions made by the model. In the next step, the DI and AOA will be calculated. The AOA calculation takes the model as input to extract the importance of the predictors, used as weights in multidimensional distance calculation. Note that the AOA can also be calculated without a trained model (i.e. using training data and new data only). In this case all predictor variables are treated equally important (unless weights are given in form of a table). ```{r,message = FALSE, warning=FALSE} AOA <- aoa(predictors, model) class(AOA) names(AOA) print(AOA) ``` Plotting the `aoa` object shows the distribution of DI values within the training data and the DI of the new data. ```{r,message = FALSE, warning=FALSE} plot(AOA) ``` The most output of the `aoa` function are two raster data: The first is the DI that is the normalized and weighted minimum distance to a nearest training data point divided by the average distance within the training data. The AOA is derived from the DI by using a threshold. The threshold is the (outlier-removed) maximum DI observed in the training data where the DI of the training data is calculated by considering the cross-validation folds. The used threshold and all relevant information about the training data DI is returned in the `parameters` list entry. We can plot the DI as well as predictions onyl in the AOA: ```{r,message = FALSE, warning=FALSE, fig.show="hold", out.width="30%"} plot(truediff,col=viridis(100),main="true prediction error") plot(AOA$DI,col=viridis(100),main="DI") plot(prediction, col=viridis(100),main="prediction for AOA") plot(AOA$AOA,col=c("grey","transparent"),add=T,plg=list(x="topleft",box.col="black",bty="o",title="AOA")) ``` The patterns in the DI are in general agreement with the true prediction error. Very high values are present in the Alps, as they have not been covered by training data but feature very distinct environmental conditions. Since the DI values for these areas are above the threshold, we regard this area as outside the AOA. ## AOA for spatially clustered data? The example above had randomly distributed training samples. However, sampling locations might also be highly clustered in space. In this case, the random cross-validation is not meaningful (see e.g. [Meyer et al. 2018](https://doi.org/10.1016/j.envsoft.2017.12.001), [Meyer et al. 2019](https://doi.org/10.1016/j.ecolmodel.2019.108815), [Valavi et al. 2019](https://doi.org/10.1111/2041-210X.13107), [Roberts et al. 2018](https://doi.org/10.1111/ecog.02881), [Pohjankukka et al. 2017](https://doi.org/10.1080/13658816.2017.1346255), [Brenning 2012](https://CRAN.R-project.org/package=sperrorest)) Also the threshold for the AOA is not reliable, because it is based in distance to a nearest data point within the training data (which is usually very small when data are clustered). Instead, cross-validation should be based on a leave-cluster-out approach, and the AOA estimation based on distances to a nearest data point not located in the same spatial cluster. To show how this looks like, we use 15 spatial locations and simulate 5 data points around each location. ```{r,message = FALSE, warning=FALSE} set.seed(25) samplepoints <- clustered_sample(mask,75,15,radius=25000) plot(response,col=viridis(100)) plot(samplepoints,col="red",add=T,pch=3) ``` ```{r,message = FALSE, warning=FALSE} trainDat <- extract(predictors,samplepoints,na.rm=FALSE) trainDat$response <- extract(response,samplepoints,na.rm=FALSE)$response trainDat <- data.frame(trainDat,samplepoints) trainDat <- na.omit(trainDat) ``` We first train a model with (in this case) inappropriate random cross-validation. ```{r,message = FALSE, warning=FALSE} set.seed(10) model_random <- train(trainDat[,names(predictors)], trainDat$response, method="rf", importance=TRUE, trControl = trainControl(method="cv")) prediction_random <- predict(predictors,model_random,na.rm=TRUE) print(model_random) ``` ...and a model based on leave-cluster-out cross-validation. ```{r,message = FALSE, warning=FALSE} folds <- CreateSpacetimeFolds(trainDat, spacevar="parent",k=10) set.seed(15) model <- train(trainDat[,names(predictors)], trainDat$response, method="rf", importance=TRUE, tuneGrid = expand.grid(mtry = c(2:length(names(predictors)))), trControl = trainControl(method="cv",index=folds$index)) print(model) prediction <- predict(predictors,model,na.rm=TRUE) ``` The AOA is then calculated (for comparison) using the model validated by random cross-validation, and second by taking the spatial clusters into account and calculating the threshold based on minimum distances to a nearest training point not located in the same cluster. This is done in the aoa function, where the folds used for cross-validation are automatically extracted from the model. ```{r,message = FALSE, warning=FALSE} AOA_spatial <- aoa(predictors, model) AOA_random <- aoa(predictors, model_random) ``` ```{r,message = FALSE, warning=FALSE, fig.show="hold", out.width="50%"} plot(AOA_spatial$DI,col=viridis(100),main="DI") plot(prediction, col=viridis(100),main="prediction for AOA \n(spatial CV error applies)") plot(AOA_spatial$AOA,col=c("grey","transparent"),add=TRUE,plg=list(x="topleft",box.col="black",bty="o",title="AOA")) plot(prediction_random, col=viridis(100),main="prediction for AOA \n(random CV error applies)") plot(AOA_random$AOA,col=c("grey","transparent"),add=TRUE,plg=list(x="topleft",box.col="black",bty="o",title="AOA")) ``` Note that the AOA is much larger for the spatial CV approach. However, the spatial cross-validation error is considerably larger, hence also the area for which this error applies is larger. The random cross-validation performance is very high, however, the area to which the performance applies is small. This fact is also apparent if you plot the `aoa` objects which will display the distributions of the DI of the training data as well as the DI of the new data. For random CV most of the predictionDI is larger than the AOA threshold determined by the trainDI. Using spatial CV, the predictionDI is well within the DI of the training samples. ```{r, message = FALSE, warning=FALSE} grid.arrange(plot(AOA_spatial) + ggplot2::ggtitle("Spatial CV"), plot(AOA_random) + ggplot2::ggtitle("Random CV"), ncol = 2) ``` ## Comparison prediction error with model error Since we used a simulated response variable, we can now compare the prediction error within the AOA with the model error, assuming that the model error applies inside the AOA but not outside. ```{r,message = FALSE, warning=FALSE} ###for the spatial CV: RMSE(values(prediction)[values(AOA_spatial$AOA)==1], values(response)[values(AOA_spatial$AOA)==1]) RMSE(values(prediction)[values(AOA_spatial$AOA)==0], values(response)[values(AOA_spatial$AOA)==0]) model$results ###and for the random CV: RMSE(values(prediction_random)[values(AOA_random$AOA)==1], values(response)[values(AOA_random$AOA)==1]) RMSE(values(prediction_random)[values(AOA_random$AOA)==0], values(response)[values(AOA_random$AOA)==0]) model_random$results ``` The results indicate that there is a high agreement between the model CV error (RMSE) and the true prediction RMSE. This is the case for both, the random as well as the spatial model. ## Relationship between the DI and the performance measure The relationship between error and DI can be used to limit predictions to an area (within the AOA) where a required performance (e.g. RMSE, R2, Kappa, Accuracy) applies. This can be done using the result of DItoErrormetric which used the relationship analyzed in a window of DI values. The corresponding model (here: shape constrained additive models which is the default: Monotone increasing P-splines with the dimension of the basis used to represent the smooth term is 6 and a 2nd order penalty.) can be used to estimate the performance on a pixel level, which then allows limiting predictions using a threshold. Note that we used a multi-purpose CV to estimate the relationship between the DI and the RMSE here (see details in the paper). ```{r,message = FALSE, warning=FALSE} DI_RMSE_relation <- DItoErrormetric(model, AOA_spatial$parameters, multiCV=TRUE, window.size = 5, length.out = 5) plot(DI_RMSE_relation) expected_RMSE = terra::predict(AOA_spatial$DI, DI_RMSE_relation) # account for multiCV changing the DI threshold updated_AOA = AOA_spatial$DI > attr(DI_RMSE_relation, "AOA_threshold") plot(expected_RMSE,col=viridis(100),main="expected RMSE") plot(updated_AOA, col=c("grey","transparent"),add=TRUE,plg=list(x="topleft",box.col="black",bty="o",title="AOA")) ``` # Example 2: A real-world example The example above used simulated data so that it allows to analyze the reliability of the AOA. However, a simulated area-wide response is not available in usual prediction tasks. Therefore, as a second example the AOA is estimated for a dataset that has point observations as a reference only. ## Data and preprocessing To do so, we will work with the cookfarm dataset, described in e.g. [Gasch et al 2015](https://www.sciencedirect.com/science/article/pii/S2211675315000251). The dataset included in CAST is a re-structured dataset. Find more details also in the vignette "Introduction to CAST". We will use soil moisture (VW) as response variable here. Hence, we're aiming at making a spatial continuous prediction based on limited measurements from data loggers. ```{r, message = FALSE, warning=FALSE} dat <- readRDS(system.file("extdata","Cookfarm.RDS",package="CAST")) # calculate average of VW for each sampling site: dat <- aggregate(dat[,c("VW","Easting","Northing")],by=list(as.character(dat$SOURCEID)),mean) # create sf object from the data: pts <- st_as_sf(dat,coords=c("Easting","Northing")) ##### Extract Predictors for the locations of the sampling points studyArea <- rast(system.file("extdata","predictors_2012-03-25.tif",package="CAST")) st_crs(pts) <- crs(studyArea) trainDat <- extract(studyArea,pts,na.rm=FALSE) pts$ID <- 1:nrow(pts) trainDat <- merge(trainDat,pts,by.x="ID",by.y="ID") # The final training dataset with potential predictors and VW: head(trainDat) ``` ## Model training and prediction A set of variables is used as predictors for VW in a random Forest model. The model is validated with a leave one out cross-validation. Note that the model performance is very low, due to the small dataset being used here (and for this small dataset a low ability of the predictors to model VW). ```{r, message = FALSE, warning=FALSE} predictors <- c("DEM","NDRE.Sd","TWI","Bt") response <- "VW" model <- train(trainDat[,predictors],trainDat[,response], method="rf",tuneLength=3,importance=TRUE, trControl=trainControl(method="LOOCV")) model ``` ### Prediction Next, the model is used to make predictions for the entire study area. ```{r, message = FALSE, warning=FALSE} #Predictors: plot(stretch(studyArea[[predictors]])) #prediction: prediction <- predict(studyArea,model,na.rm=TRUE) ``` ## AOA estimation Next we're limiting the predictions to the AOA. Predictions outside the AOA should be excluded. ```{r, message = FALSE, warning=FALSE, fig.show="hold", out.width="50%"} AOA <- aoa(studyArea,model) #### Plot results: plot(AOA$DI,col=viridis(100),main="DI with sampling locations (red)") plot(pts,zcol="ID",col="red",add=TRUE) plot(prediction, col=viridis(100),main="prediction for AOA \n(LOOCV error applies)") plot(AOA$AOA,col=c("grey","transparent"),add=TRUE,plg=list(x="topleft",box.col="black",bty="o",title="AOA")) ``` # Final notes * The AOA is estimated based on training data and new data (i.e. raster group of the entire area of interest). The trained model are only used for getting the variable importance needed to weight predictor variables. These can be given as a table either, so the approach can be used with other packages than caret as well. * Knowledge on the AOA is important when predictions are used as a baseline for decision making or subsequent environmental modelling. * We suggest that the AOA should be provided alongside the prediction map and complementary to the communication of validation performances. ## Further reading * Meyer, H., & Pebesma, E. (2022): Machine learning-based global maps of ecological variables and the challenge of assessing them. Nature Communications. Accepted. * Meyer, H., & Pebesma, E. (2021). Predicting into unknown space? Estimating the area of applicability of spatial prediction models. Methods in Ecology and Evolution, 12, 1620– 1633. [https://doi.org/10.1111/2041-210X.13650] * Tutorial (https://youtu.be/EyP04zLe9qo) and Lecture (https://youtu.be/OoNH6Nl-X2s) recording from OpenGeoHub summer school 2020 on the area of applicability. As well as talk at the OpenGeoHub summer school 2021: https://av.tib.eu/media/54879
/scratch/gouwar.j/cran-all/cranData/CAST/inst/doc/cast02-AOA-tutorial.Rmd
## ----setup, include=FALSE----------------------------------------------------- knitr::opts_chunk$set(echo = TRUE, message = FALSE, warning = FALSE) ## ----message = FALSE, warning=FALSE------------------------------------------- library(CAST) library(caret) library(terra) library(sf) ## ----message = FALSE, warning=FALSE------------------------------------------- data("splotdata") predictors <- rast(system.file("extdata","predictors_chile.tif",package="CAST")) splotdata <- st_drop_geometry(splotdata) ## ----message = FALSE, warning=FALSE------------------------------------------- set.seed(10) model_random <- train(splotdata[,names(predictors)], splotdata$Species_richness, method="rf", importance=TRUE, ntrees = 50, trControl = trainControl(method="cv")) prediction_random <- predict(predictors,model_random,na.rm=TRUE) ## ----------------------------------------------------------------------------- model_random_trainDI = trainDI(model_random) print(model_random_trainDI) ## ----eval = FALSE------------------------------------------------------------- # saveRDS(model_random_trainDI, "path/to/file") ## ----fig.show="hold", out.width="30%"----------------------------------------- r1 = crop(predictors, c(-75.66667, -67, -30, -17.58333)) r2 = crop(predictors, c(-75.66667, -67, -45, -30)) r3 = crop(predictors, c(-75.66667, -67, -55.58333, -45)) plot(r1[[1]],main = "Tile 1") plot(r2[[1]],main = "Tile 2") plot(r3[[1]],main = "Tile 3") ## ----fig.show="hold", out.width="30%"----------------------------------------- aoa_r1 = aoa(newdata = r1, trainDI = model_random_trainDI) plot(r1[[1]], main = "Tile 1: Predictors") plot(aoa_r1$DI, main = "Tile 1: DI") plot(aoa_r1$AOA, main = "Tile 1: AOA") ## ----eval = FALSE------------------------------------------------------------- # # library(parallel) # # tiles_aoa = mclapply(list(r1, r2, r3), function(tile){ # aoa(newdata = tile, trainDI = model_random_trainDI) # # }, mc.cores = 3) # ## ----echo = FALSE------------------------------------------------------------- tiles_aoa = lapply(list(r1, r2, r3), function(tile){ aoa(newdata = tile, trainDI = model_random_trainDI) }) ## ----fig.show="hold", out.width="30%"----------------------------------------- plot(tiles_aoa[[1]]$AOA, main = "Tile 1") plot(tiles_aoa[[2]]$AOA, main = "Tile 2") plot(tiles_aoa[[3]]$AOA, main = "Tile 3") ## ----eval = FALSE------------------------------------------------------------- # # Simple Example Code for raster tiles on the hard drive # # tiles = list.files("path/to/tiles", full.names = TRUE) # # tiles_aoa = mclapply(tiles, function(tile){ # current = terra::rast(tile) # aoa(newdata = current, trainDI = model_random_trainDI) # # }, mc.cores = 3)
/scratch/gouwar.j/cran-all/cranData/CAST/inst/doc/cast03-AOA-parallel.R
--- title: '3. AOA in Parallel' author: "Marvin Ludwig" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: true vignette: > %\VignetteIndexEntry{AOA in parallel} %\VignetteEncoding{UTF-8} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE, message = FALSE, warning = FALSE) ``` Estimating the Area of Applicability (AOA) can be computationally intensive, depending on the amount of training data used for a model as well as the amount of new data the AOA has to be computed. This vignette goes over the possibility to (partly) compute the AOA in parallel. We will use the same data setup as the vignette "Area of applicability of spatial prediction models". Please have a look there for a general introduction to the AOA and the details about the example data generation. # Generate Example Data ```{r, message = FALSE, warning=FALSE} library(CAST) library(caret) library(terra) library(sf) ``` ```{r,message = FALSE, warning=FALSE} data("splotdata") predictors <- rast(system.file("extdata","predictors_chile.tif",package="CAST")) splotdata <- st_drop_geometry(splotdata) ``` ```{r,message = FALSE, warning=FALSE} set.seed(10) model_random <- train(splotdata[,names(predictors)], splotdata$Species_richness, method="rf", importance=TRUE, ntrees = 50, trControl = trainControl(method="cv")) prediction_random <- predict(predictors,model_random,na.rm=TRUE) ``` # Parallel AOA by dividing the new data For better performances, it is recommended to compute the AOA in two steps. First, the DI of training data and the resulting DI threshold is computed from the model or training data with the function `trainDI`. The result from trainDI is usually the first step of the `aoa` function, however it can be skipped by providing the trainDI object in the function call. This makes it possible to compute the AOA on multiple raster tiles at once (e.g. on different cores). This is especially useful for very large prediction areas, e.g. in global mapping. ```{r} model_random_trainDI = trainDI(model_random) print(model_random_trainDI) ``` ```{r, eval = FALSE} saveRDS(model_random_trainDI, "path/to/file") ``` If you have a large raster, you divide it into multiple smaller tiles and apply the trainDI object afterwards to each tile. ```{r, fig.show="hold", out.width="30%"} r1 = crop(predictors, c(-75.66667, -67, -30, -17.58333)) r2 = crop(predictors, c(-75.66667, -67, -45, -30)) r3 = crop(predictors, c(-75.66667, -67, -55.58333, -45)) plot(r1[[1]],main = "Tile 1") plot(r2[[1]],main = "Tile 2") plot(r3[[1]],main = "Tile 3") ``` Use the `trainDI` argument in the `aoa` function to specify, that you want to use a previously computed trainDI object. ```{r, fig.show="hold", out.width="30%"} aoa_r1 = aoa(newdata = r1, trainDI = model_random_trainDI) plot(r1[[1]], main = "Tile 1: Predictors") plot(aoa_r1$DI, main = "Tile 1: DI") plot(aoa_r1$AOA, main = "Tile 1: AOA") ``` You can now run the aoa function in parallel on the different tiles! Of course you can use for favorite parallel backend for this task, here we use mclapply from the `parallel` package. ```{r, eval = FALSE} library(parallel) tiles_aoa = mclapply(list(r1, r2, r3), function(tile){ aoa(newdata = tile, trainDI = model_random_trainDI) }, mc.cores = 3) ``` ```{r, echo = FALSE} tiles_aoa = lapply(list(r1, r2, r3), function(tile){ aoa(newdata = tile, trainDI = model_random_trainDI) }) ``` ```{r, fig.show="hold", out.width="30%"} plot(tiles_aoa[[1]]$AOA, main = "Tile 1") plot(tiles_aoa[[2]]$AOA, main = "Tile 2") plot(tiles_aoa[[3]]$AOA, main = "Tile 3") ``` For larger tasks it might be useful to save the tiles to you hard-drive and load them one by one to avoid filling up your RAM. ```{r, eval = FALSE} # Simple Example Code for raster tiles on the hard drive tiles = list.files("path/to/tiles", full.names = TRUE) tiles_aoa = mclapply(tiles, function(tile){ current = terra::rast(tile) aoa(newdata = current, trainDI = model_random_trainDI) }, mc.cores = 3) ```
/scratch/gouwar.j/cran-all/cranData/CAST/inst/doc/cast03-AOA-parallel.Rmd
## ----setup, include=FALSE----------------------------------------------------- knitr::opts_chunk$set(echo = TRUE,fig.width=6.2, fig.height=3.4) ## ----message = FALSE, warning=FALSE------------------------------------------- library(CAST) library(caret) library(terra) library(sf) library(rnaturalearth) library(ggplot2) ## ----message = FALSE, warning=FALSE------------------------------------------- seed <- 10 # random realization samplesize <- 300 # how many samples will be used? nparents <- 20 #For clustered samples: How many clusters? radius <- 500000 # For clustered samples: What is the radius of a cluster? ## ----message = FALSE, warning=FALSE------------------------------------------- ee <- st_crs("+proj=eqearth") co <- ne_countries(returnclass = "sf") co.ee <- st_transform(co, ee) ## ----message = FALSE, warning=FALSE, results='hide'--------------------------- sf_use_s2(FALSE) set.seed(seed) pts_random <- st_sample(co.ee, samplesize) ### See points on the map: ggplot() + geom_sf(data = co.ee, fill="#00BFC4",col="#00BFC4") + geom_sf(data = pts_random, color = "#F8766D",size=0.5, shape=3) + guides(fill = "none", col = "none") + labs(x = NULL, y = NULL) ## ----message = FALSE, warning=FALSE, results='hide'--------------------------- set.seed(seed) sf_use_s2(FALSE) pts_clustered <- clustered_sample(co.ee, samplesize, nparents, radius) ggplot() + geom_sf(data = co.ee, fill="#00BFC4",col="#00BFC4") + geom_sf(data = pts_clustered, color = "#F8766D",size=0.5, shape=3) + guides(fill = "none", col = "none") + labs(x = NULL, y = NULL) ## ----message = FALSE, warning=FALSE, results='hide'--------------------------- dist_random <- geodist(pts_random,co.ee, sampling="Fibonacci") dist_clstr <- geodist(pts_clustered,co.ee, sampling="Fibonacci") plot(dist_random, unit = "km")+scale_x_log10(labels=round)+ggtitle("Randomly distributed reference data") plot(dist_clstr, unit = "km")+scale_x_log10(labels=round)+ggtitle("Clustered reference data") ## ----message = FALSE, warning=FALSE, results='hide'--------------------------- randomfolds <- caret::createFolds(1:nrow(pts_clustered)) ## ----message = FALSE, warning=FALSE, results='hide',echo=FALSE---------------- for (i in 1:nrow(pts_clustered)){ pts_clustered$randomCV[i] <- which(unlist(lapply(randomfolds,function(x){sum(x%in%i)}))==1) } ggplot() + geom_sf(data = co.ee, fill="#00BFC4",col="#00BFC4") + geom_sf(data = pts_clustered, color = rainbow(max(pts_clustered$randomCV))[pts_clustered$randomCV],size=0.5, shape=3) + guides(fill = FALSE, col = FALSE) + labs(x = NULL, y = NULL)+ggtitle("random fold membership shown by color") ## ----message = FALSE, warning=FALSE, results='hide'--------------------------- dist_clstr <- geodist(pts_clustered,co.ee, sampling="Fibonacci", cvfolds= randomfolds) plot(dist_clstr, unit = "km")+scale_x_log10(labels=round) ## ----message = FALSE, warning=FALSE, results='hide'--------------------------- spatialfolds <- CreateSpacetimeFolds(pts_clustered,spacevar="parent",k=length(unique(pts_clustered$parent))) ## ----message = FALSE, warning=FALSE, results='hide',echo=FALSE---------------- ggplot() + geom_sf(data = co.ee, fill="#00BFC4",col="#00BFC4") + geom_sf(data = pts_clustered, color = rainbow(max(pts_clustered$parent))[pts_clustered$parent],size=0.5, shape=3) + guides(fill = FALSE, col = FALSE) + labs(x = NULL, y = NULL)+ ggtitle("spatial fold membership by color") ## ----message = FALSE, warning=FALSE, results='hide'--------------------------- dist_clstr <- geodist(pts_clustered,co.ee, sampling="Fibonacci", cvfolds= spatialfolds$indexOut) plot(dist_clstr, unit = "km")+scale_x_log10(labels=round) ## ----message = FALSE, warning=FALSE, results='hide'--------------------------- # create a spatial CV for the randomly distributed data. Here: # "leave region-out-CV" sf_use_s2(FALSE) pts_random_co <- st_join(st_as_sf(pts_random),co.ee) ggplot() + geom_sf(data = co.ee, fill="#00BFC4",col="#00BFC4") + geom_sf(data = pts_random_co, aes(color=subregion),size=0.5, shape=3) + scale_color_manual(values=rainbow(length(unique(pts_random_co$subregion))))+ guides(fill = FALSE, col = FALSE) + labs(x = NULL, y = NULL)+ ggtitle("spatial fold membership by color") ## ----message = FALSE, warning=FALSE, results='hide'--------------------------- spfolds_rand <- CreateSpacetimeFolds(pts_random_co,spacevar = "subregion", k=length(unique(pts_random_co$subregion))) dist_rand_sp <- geodist(pts_random_co,co.ee, sampling="Fibonacci", cvfolds= spfolds_rand$indexOut) plot(dist_rand_sp, unit = "km")+scale_x_log10(labels=round) ## ----message = FALSE, warning=FALSE, results='hide'--------------------------- nndmfolds_clstr <- nndm(pts_clustered, modeldomain=co.ee, samplesize = 2000) dist_clstr <- geodist(pts_clustered,co.ee, sampling = "Fibonacci", cvfolds = nndmfolds_clstr$indx_test, cvtrain = nndmfolds_clstr$indx_train) plot(dist_clstr, unit = "km")+scale_x_log10(labels=round) ## ----message = FALSE, warning=FALSE, results='hide'--------------------------- nndmfolds_rand <- nndm(pts_random_co, modeldomain=co.ee, samplesize = 2000) dist_rand <- geodist(pts_random_co,co.ee, sampling = "Fibonacci", cvfolds = nndmfolds_rand$indx_test, cvtrain = nndmfolds_rand$indx_train) plot(dist_rand, unit = "km")+scale_x_log10(labels=round) ## ----message = FALSE, warning=FALSE, results='hide'--------------------------- knndmfolds_clstr <- knndm(pts_clustered, modeldomain=co.ee, samplesize = 2000) pts_clustered$knndmCV <- as.character(knndmfolds_clstr$clusters) ggplot() + geom_sf(data = co.ee, fill="#00BFC4",col="#00BFC4") + geom_sf(data = pts_clustered, aes(color=knndmCV),size=0.5, shape=3) + scale_color_manual(values=rainbow(length(unique(pts_clustered$knndmCV))))+ guides(fill = FALSE, col = FALSE) + labs(x = NULL, y = NULL)+ ggtitle("spatial fold membership by color") dist_clstr <- geodist(pts_clustered,co.ee, sampling = "Fibonacci", cvfolds = knndmfolds_clstr$indx_test, cvtrain = knndmfolds_clstr$indx_train) plot(dist_clstr, unit = "km")+scale_x_log10(labels=round) ## ----message = FALSE, warning=FALSE, results='hide'--------------------------- predictors_global <- rast(system.file("extdata","bioclim_global.tif",package="CAST")) plot(predictors_global) ## ----message = FALSE, warning=FALSE, results='hide'--------------------------- # use random CV: dist_clstr_rCV <- geodist(pts_clustered,predictors_global, type = "feature", sampling="Fibonacci", cvfolds = randomfolds) # use spatial CV: dist_clstr_sCV <- geodist(pts_clustered,predictors_global, type = "feature", sampling="Fibonacci", cvfolds = spatialfolds$indexOut) # Plot results: plot(dist_clstr_rCV)+scale_x_log10()+ggtitle("Clustered reference data and random CV") plot(dist_clstr_sCV)+scale_x_log10()+ggtitle("Clustered reference data and spatial CV")
/scratch/gouwar.j/cran-all/cranData/CAST/inst/doc/cast04-plotgeodist.R
--- title: "4. Visualization of nearest neighbor distance distributions" author: "Hanna Meyer" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: true vignette: > %\VignetteIndexEntry{Visualization of nearest neighbor distance distributions} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE,fig.width=6.2, fig.height=3.4) ``` ## Introduction This tutorial shows how euclidean nearest neighbor distances in the geographic space or feature space can be calculated and visualized using CAST. This type of visualization allows to assess whether training data feature a representative coverage of the prediction area and if cross-validation (CV) folds (or independent test data) are adequately chosen to be representative for the prediction locations. See e.g. [Meyer and Pebesma (2022)](https://doi.org/10.1038/s41467-022-29838-9) and [Milà et al. (2022)](https://doi.org/10.1111/2041-210X.13851) for further discussion on this topic. ## Sample data As example data, we use two different sets of global virtual reference data: One is a spatial random sample and in the second example, reference data are clustered in geographic space (see [Meyer and Pebesma (2022)](https://doi.org/10.1038/s41467-022-29838-9) for more discussions on this). ```{r, message = FALSE, warning=FALSE} library(CAST) library(caret) library(terra) library(sf) library(rnaturalearth) library(ggplot2) ``` Here we can define some parameters to run the example with different settings ```{r, message = FALSE, warning=FALSE} seed <- 10 # random realization samplesize <- 300 # how many samples will be used? nparents <- 20 #For clustered samples: How many clusters? radius <- 500000 # For clustered samples: What is the radius of a cluster? ``` ### Prediction area The prediction area is the entire global land area, i.e. we could imagine a prediction task where we aim at making global predictions based on the set of reference data. ```{r,message = FALSE, warning=FALSE} ee <- st_crs("+proj=eqearth") co <- ne_countries(returnclass = "sf") co.ee <- st_transform(co, ee) ``` ### Spatial random sample Then, we simulate the random sample and visualize the data on the entire global prediction area. ```{r,message = FALSE, warning=FALSE, results='hide'} sf_use_s2(FALSE) set.seed(seed) pts_random <- st_sample(co.ee, samplesize) ### See points on the map: ggplot() + geom_sf(data = co.ee, fill="#00BFC4",col="#00BFC4") + geom_sf(data = pts_random, color = "#F8766D",size=0.5, shape=3) + guides(fill = "none", col = "none") + labs(x = NULL, y = NULL) ``` ### Clustered sample As second data set we use a clustered design of the same size. ```{r,message = FALSE, warning=FALSE, results='hide'} set.seed(seed) sf_use_s2(FALSE) pts_clustered <- clustered_sample(co.ee, samplesize, nparents, radius) ggplot() + geom_sf(data = co.ee, fill="#00BFC4",col="#00BFC4") + geom_sf(data = pts_clustered, color = "#F8766D",size=0.5, shape=3) + guides(fill = "none", col = "none") + labs(x = NULL, y = NULL) ``` ## Distances in geographic space Then we can plot the distributions of the spatial distances of reference data to their nearest neighbor ("sample-to-sample") with the distribution of distances from all points of the global land surface to the nearest reference data point ("sample-to-prediction"). Note that samples of prediction locations are used to calculate the sample-to-prediction nearest neighbor distances. Since we're using a global case study here, throughout this tutorial we use sampling=Fibonacci to draw prediction locations with constant point density on the sphere. ```{r,message = FALSE, warning=FALSE, results='hide'} dist_random <- geodist(pts_random,co.ee, sampling="Fibonacci") dist_clstr <- geodist(pts_clustered,co.ee, sampling="Fibonacci") plot(dist_random, unit = "km")+scale_x_log10(labels=round)+ggtitle("Randomly distributed reference data") plot(dist_clstr, unit = "km")+scale_x_log10(labels=round)+ggtitle("Clustered reference data") ``` Note that for the random data set the nearest neighbor distance distribution of the training data is quasi identical to the nearest neighbor distance distribution of the prediction area. In comparison, the second data set has the same number of training data but these are heavily clustered in geographic space. We therefore see that the nearest neighbor distances within the reference data is rather small. Prediction locations, however, are on average much further away. ### Accounting for cross-validation folds #### Random Cross-validation Let's use the clustered data set to show how the distribution of spatial nearest neighbor distances during cross-validation can be visualized as well. Therefore, we first use the "default" way of a random 10-fold cross validation where we randomly split the reference data into training and test (see Meyer et al., 2018 and 2019 to see why this might not be a good idea). ```{r,message = FALSE, warning=FALSE, results='hide'} randomfolds <- caret::createFolds(1:nrow(pts_clustered)) ``` ```{r,message = FALSE, warning=FALSE, results='hide',echo=FALSE} for (i in 1:nrow(pts_clustered)){ pts_clustered$randomCV[i] <- which(unlist(lapply(randomfolds,function(x){sum(x%in%i)}))==1) } ggplot() + geom_sf(data = co.ee, fill="#00BFC4",col="#00BFC4") + geom_sf(data = pts_clustered, color = rainbow(max(pts_clustered$randomCV))[pts_clustered$randomCV],size=0.5, shape=3) + guides(fill = FALSE, col = FALSE) + labs(x = NULL, y = NULL)+ggtitle("random fold membership shown by color") ``` ```{r,message = FALSE, warning=FALSE, results='hide'} dist_clstr <- geodist(pts_clustered,co.ee, sampling="Fibonacci", cvfolds= randomfolds) plot(dist_clstr, unit = "km")+scale_x_log10(labels=round) ``` Obviously the CV folds are not representative for the prediction locations (at least not in terms of distance to a nearest training data point). I.e. when these folds are used for performance assessment of a model, we can expect overly optimistic estimates because we only validate predictions in close proximity to the reference data. #### Spatial Cross-validation This, however, should not be the case but the CV performance should be regarded as representative for the prediction task. Therefore, we use a spatial CV instead. Here, we use a leave-cluster-out CV, which means that in each iteration, one of the spatial clusters is held back. ```{r,message = FALSE, warning=FALSE, results='hide'} spatialfolds <- CreateSpacetimeFolds(pts_clustered,spacevar="parent",k=length(unique(pts_clustered$parent))) ``` ```{r,message = FALSE, warning=FALSE, results='hide',echo=FALSE} ggplot() + geom_sf(data = co.ee, fill="#00BFC4",col="#00BFC4") + geom_sf(data = pts_clustered, color = rainbow(max(pts_clustered$parent))[pts_clustered$parent],size=0.5, shape=3) + guides(fill = FALSE, col = FALSE) + labs(x = NULL, y = NULL)+ ggtitle("spatial fold membership by color") ``` ```{r,message = FALSE, warning=FALSE, results='hide'} dist_clstr <- geodist(pts_clustered,co.ee, sampling="Fibonacci", cvfolds= spatialfolds$indexOut) plot(dist_clstr, unit = "km")+scale_x_log10(labels=round) ``` See that this fits the nearest neighbor distribution of the prediction area much better. Note that `geodist` also allows inspecting independent test data instead of cross validation folds. See `?geodist` and `?plot.geodist`. #### Why has spatial CV sometimes blamed for being too pessimistic ? Recently, [Wadoux et al. (2021)](https://doi.org/10.1016/j.ecolmodel.2021.109692) published a paper with the title "Spatial cross-validation is not the right way to evaluate map accuracy" where they state that "spatial cross-validation strategies resulted in a grossly pessimistic map accuracy assessment". Why do they come to this conclusion? The reference data they used in their study where either regularly, random or comparably mildly clustered in geographic space, but they applied spatial CV strategies that held large spatial units back during CV. Here we can see what happens when we apply spatial CV to randomly distributed reference data. ```{r,message = FALSE, warning=FALSE, results='hide'} # create a spatial CV for the randomly distributed data. Here: # "leave region-out-CV" sf_use_s2(FALSE) pts_random_co <- st_join(st_as_sf(pts_random),co.ee) ggplot() + geom_sf(data = co.ee, fill="#00BFC4",col="#00BFC4") + geom_sf(data = pts_random_co, aes(color=subregion),size=0.5, shape=3) + scale_color_manual(values=rainbow(length(unique(pts_random_co$subregion))))+ guides(fill = FALSE, col = FALSE) + labs(x = NULL, y = NULL)+ ggtitle("spatial fold membership by color") ``` ```{r,message = FALSE, warning=FALSE, results='hide'} spfolds_rand <- CreateSpacetimeFolds(pts_random_co,spacevar = "subregion", k=length(unique(pts_random_co$subregion))) dist_rand_sp <- geodist(pts_random_co,co.ee, sampling="Fibonacci", cvfolds= spfolds_rand$indexOut) plot(dist_rand_sp, unit = "km")+scale_x_log10(labels=round) ``` We see that the nearest neighbor distances during cross-validation don't match the nearest neighbor distances during prediction. But compared to the section above, this time the cross-validation folds are too far away from reference data. Naturally we would end up with overly pessimistic performance estimates because we make prediction situations during cross-validation harder, compared to what is required during model application to the entire area of interest (here global). The spatial CV chosen here is therefore not suitable for this prediction task, because prediction situations created during CV do not resemble what is encountered during prediction. #### Nearest Neighbour Distance Matching CV A good way to approximate the geographical prediction distances during the CV is to use Nearest Neighbour Distance Matching (NNDM) CV (see [Milà et al., 2022](https://doi.org/10.1111/2041-210X.13851) for more details). NNDM CV is a variation of LOO CV in which the empirical distribution function of nearest neighbour distances found during prediction is matched during the CV process. ```{r,message = FALSE, warning=FALSE, results='hide'} nndmfolds_clstr <- nndm(pts_clustered, modeldomain=co.ee, samplesize = 2000) dist_clstr <- geodist(pts_clustered,co.ee, sampling = "Fibonacci", cvfolds = nndmfolds_clstr$indx_test, cvtrain = nndmfolds_clstr$indx_train) plot(dist_clstr, unit = "km")+scale_x_log10(labels=round) ``` The NNDM CV-distance distribution matches the sample-to-prediction distribution very well. What happens if we use NNDM CV for the randomly-distributed sampling points instead? ```{r,message = FALSE, warning=FALSE, results='hide'} nndmfolds_rand <- nndm(pts_random_co, modeldomain=co.ee, samplesize = 2000) dist_rand <- geodist(pts_random_co,co.ee, sampling = "Fibonacci", cvfolds = nndmfolds_rand$indx_test, cvtrain = nndmfolds_rand$indx_train) plot(dist_rand, unit = "km")+scale_x_log10(labels=round) ``` The NNDM CV-distance still matches the sample-to-prediction distance function. #### k-fold Nearest Neighbour Distance Matching CV Since NNDM CV is highly time consuming, the k-fold version may provide a good trade-off. See (see [Linnenbrink et al., 2023](https://doi.org/10.5194/egusphere-2023-1308) for more details) ```{r,message = FALSE, warning=FALSE, results='hide'} knndmfolds_clstr <- knndm(pts_clustered, modeldomain=co.ee, samplesize = 2000) pts_clustered$knndmCV <- as.character(knndmfolds_clstr$clusters) ggplot() + geom_sf(data = co.ee, fill="#00BFC4",col="#00BFC4") + geom_sf(data = pts_clustered, aes(color=knndmCV),size=0.5, shape=3) + scale_color_manual(values=rainbow(length(unique(pts_clustered$knndmCV))))+ guides(fill = FALSE, col = FALSE) + labs(x = NULL, y = NULL)+ ggtitle("spatial fold membership by color") dist_clstr <- geodist(pts_clustered,co.ee, sampling = "Fibonacci", cvfolds = knndmfolds_clstr$indx_test, cvtrain = knndmfolds_clstr$indx_train) plot(dist_clstr, unit = "km")+scale_x_log10(labels=round) ``` ## Distances in feature space So far we compared nearest neighbor distances in geographic space. We can also do so in feature space. Therefore, a set of bioclimatic variables are used (https://www.worldclim.org) as features (i.e. predictors) in this virtual prediction task. ```{r,message = FALSE, warning=FALSE, results='hide'} predictors_global <- rast(system.file("extdata","bioclim_global.tif",package="CAST")) plot(predictors_global) ``` Then we visualize nearest neighbor feature space distances under consideration of cross-validation. ```{r,message = FALSE, warning=FALSE, results='hide'} # use random CV: dist_clstr_rCV <- geodist(pts_clustered,predictors_global, type = "feature", sampling="Fibonacci", cvfolds = randomfolds) # use spatial CV: dist_clstr_sCV <- geodist(pts_clustered,predictors_global, type = "feature", sampling="Fibonacci", cvfolds = spatialfolds$indexOut) # Plot results: plot(dist_clstr_rCV)+scale_x_log10()+ggtitle("Clustered reference data and random CV") plot(dist_clstr_sCV)+scale_x_log10()+ggtitle("Clustered reference data and spatial CV") ``` With regard to the chosen predictor variables we see that again the nearest neighbor distance of the clustered training data is rather small, compared to what is required during prediction. Again the random CV is not representative for the prediction locations while spatial CV is doing a better job. ### References * Meyer, H., Pebesma, E. (2022): Machine learning-based global maps of ecological variables and the challenge of assessing them. Nature Communications 13, 2208. https://doi.org/10.1038/s41467-022-29838-9 * Milà, C., Mateu, J., Pebesma, E., Meyer, H. (2022): Nearest Neighbour Distance Matching Leave-One-Out Cross-Validation for map validation. Methods in Ecology and Evolution 00, 1– 13. https://doi.org/10.1111/2041-210X.13851. * Linnenbrink, J., Milà, C., Ludwig, M., and Meyer, H. (2023): kNNDM: k-fold Nearest Neighbour Distance Matching Cross-Validation for map accuracy estimation, EGUsphere [preprint], https://doi.org/10.5194/egusphere-2023-1308.
/scratch/gouwar.j/cran-all/cranData/CAST/inst/doc/cast04-plotgeodist.Rmd
\dontrun{ library(CAST) library(sf) library(terra) library(caret) data(splotdata) splotdata <- st_drop_geometry(splotdata) predictors <- terra::rast(system.file("extdata","predictors_chile.tif", package="CAST")) model <- caret::train(splotdata[,6:16], splotdata$Species_richness, ntree = 10, trControl = trainControl(method = "cv", savePredictions = TRUE)) AOA <- aoa(predictors, model) errormodel <- DItoErrormetric(model, AOA) plot(errormodel) expected_error = terra::predict(AOA$DI, errormodel) plot(expected_error) # with multiCV = TRUE errormodel = DItoErrormetric(model, AOA, multiCV = TRUE, length.out = 3) plot(errormodel) expected_error = terra::predict(AOA$DI, errormodel) plot(expected_error) # mask AOA based on new threshold from multiCV mask_aoa = terra::mask(expected_error, AOA$DI > attr(errormodel, 'AOA_threshold'), maskvalues = 1) plot(mask_aoa) }
/scratch/gouwar.j/cran-all/cranData/CAST/inst/examples/ex_DItoErrormetric.R
--- title: "1. Introduction to CAST" author: "Hanna Meyer" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: true vignette: > %\VignetteIndexEntry{Introduction to CAST} %\VignetteEncoding{UTF-8} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console --- ```{r setup, echo=FALSE} knitr::opts_chunk$set(fig.width = 8.83,cache = FALSE) user_hanna <- Sys.getenv("USER") %in% c("hanna") ``` ## Introduction !!Note: Some recent developments of CAST are not yet fully documented in this tutorial. A major update can be expected for Apr 2024!! ### Background One key task in environmental science is obtaining information of environmental variables continuously in space or in space and time, usually based on remote sensing and limited field data. In that respect, machine learning algorithms have been proven to be an important tool to learn patterns in nonlinear and complex systems. However, standard machine learning applications are not suitable for spatio-temporal data, as they usually ignore the spatio-temporal dependencies in the data. This becomes problematic in (at least) two aspects of predictive modelling: Overfitted models as well as overly optimistic error assessment (see [Meyer et al 2018](https://www.sciencedirect.com/science/article/pii/S1364815217310976) or [Meyer et al 2019](https://www.sciencedirect.com/science/article/abs/pii/S0304380019303230) ). To approach these problems, CAST supports the well-known caret package ([Kuhn 2018](https://topepo.github.io/caret/index.html) to provide methods designed for spatio-temporal data. This tutorial shows how to set up a spatio-temporal prediction model that includes objective and reliable error estimation. It further shows how spatio-temporal overfitting can be detected by comparison between validation strategies. It will be shown that certain variables are responsible for the problem of overfitting due to spatio-temporal autocorrelation patterns. Therefore, this tutorial also shows how to automatically exclude variables that lead to overfitting with the aim to improve the spatio-temporal prediction model. In order to follow this tutorial, I assume that the reader is familiar with the basics of predictive modelling nicely explained in [Kuhn and Johnson 2013](https://doi.org/10.1007/978-1-4614-6849-3) as well as machine learning applications via the caret package. ### How to start To work with the tutorial, first install the CAST package and load the library: ```{r c1, message = FALSE, warning=FALSE} #install.packages("CAST") library(CAST) ``` If you need help, see ```{r c2, message = FALSE, warning=FALSE} help(CAST) ``` ## Example of a typical spatio-temporal prediction task The example prediction task for this tutorial is the following: we have a set of data loggers distributed over a farm, and we want to map soil moisture, based on a set of spatial and temporal predictor variables. We will use Random Forests as a machine learning algorithm in this tutorial. ### Description of the example dataset To do so, we will work with the cookfarm dataset, described in e.g. [Gasch et al 2015](https://www.sciencedirect.com/science/article/pii/S2211675315000251/) and available via the GSIF package ([Hengl 2017](https://CRAN.R-project.org/package=GSIF)). The dataset included in the CAST package is a re-structured dataset which was used for the analysis in [Meyer et al 2018](https://www.sciencedirect.com/science/article/pii/S1364815217310976). ```{r c3, message = FALSE, warning=FALSE} data <- readRDS(system.file("extdata","Cookfarm.RDS",package="CAST")) head(data) ``` I want to point out on the following information of this dataset: The "SOURCEID" represents the ID for the data logger, "VW" is soil moisture which is our response variable, "Easting" and "Northing" are the coordinates of the data loggers, "altitude" indicates the depth of the soil in which VW was measured, and the remaining columns represent different potential predictor variables which are terrain related (e.g. "DEM", "TWI"), vegetation indices (e.g. "NDRE"), soil properties (e.g. "BLD") or climate-related predictors (e.g. "Precip_wrcc"). See [Gasch et al 2015](https://www.sciencedirect.com/science/article/pii/S2211675315000251) for further description on the dataset. To get an impression on the spatial properties of the dataset, let's have a look on the spatial distribution of the data loggers on the cookfarm: ```{r c4, message = FALSE, warning=FALSE} library(sf) data_sp <- unique(data[,c("SOURCEID","Easting","Northing")]) data_sp <- st_as_sf(data_sp,coords=c("Easting","Northing"),crs=26911) plot(data_sp,axes=T,col="black") ``` ```{r c5, message = FALSE, warning=FALSE, eval=user_hanna} #...or plot the data with mapview: library(mapview) mapviewOptions(basemaps = c("Esri.WorldImagery")) mapview(data_sp) ``` We see that the data are taken at 42 locations (SOURCEID) over the field. The loggers recorded data between 2007 and 2013 (the dataset here only contains the data from 2010 on). The VW data are given here on a daily basis. ### Data subsetting To reduce the data to an amount that can be handled in a tutorial, let's restrict the data to the depth of -0.3 and to two weeks of the year 2012. After subsetting let's have an overview on the soil moisture time series measured by the data loggers. ```{r c6, message = FALSE, warning=FALSE} library(lubridate) library(ggplot2) trainDat <- data[data$altitude==-0.3& year(data$Date)==2012& week(data$Date)%in%c(10:12),] ggplot(data = trainDat, aes(x=Date, y=VW)) + geom_line(aes(colour=SOURCEID)) ``` What we can see is that (as expected) each logger location has a unique time series of soil moisture. ## Model training and prediction In the following we will use this subset of the cookfarm data as an example to spatially predict soil moisture (i.e. to map soil moisture) with (and without) consideration of the spatio-temporal dependencies. To start with, lets use this dataset to create a "default" Random Forest model that predicts soil moisture based on some predictor variables. To keep computation time at a minimum, we don't include hyperparameter tuning (hence mtry was set to 2) which is reasonable as Random Forests are comparably insensitive to tuning. ```{r c7, message = FALSE, warning=FALSE} library(caret) predictors <- c("DEM","TWI","Precip_cum","cday", "MaxT_wrcc","Precip_wrcc","BLD", "Northing","Easting","NDRE.M") set.seed(10) model <- train(trainDat[,predictors],trainDat$VW, method="rf",tuneGrid=data.frame("mtry"=2), importance=TRUE,ntree=50, trControl=trainControl(method="cv",number=3)) ``` Based on the trained model we can make spatial predictions of soil moisture. To do this we load a multiband raster that contains spatial data of all predictor variables for the 25th of March 2012 (as an example). We then apply the trained model on this data set. ```{r c8, message = FALSE, warning=FALSE} library(terra) predictors_sp <- rast(system.file("extdata","predictors_2012-03-25.tif",package="CAST")) prediction <- predict(predictors_sp,model,na.rm=TRUE) plot(prediction) ``` The result is a spatially comprehensive map of soil moisture for this day. We see that simply creating a map using machine learning and caret is an easy task, however accurately measuring its performance is less simple. Though the map looks good on a first sight we now have to follow up with the question of how accurate this map is, hence we need to ask how well the model is able to map soil moisture. From a visible inspection it is noticeable that the model produces a strange linear features at the eastern side of the farm which looks suspicious. But let's come back to this later and first focus on a statistical validation of the model. ## Cross validation strategies for spatio-temporal data Among validation strategies, k-fold cross validation (CV) is popular to estimate the performance of the model in view to data that have not been used for model training. During CV, models are repeatedly trained (k models) and in each model run, the data of one fold are put to the side and are not used for model training but for model validation. In this way, the performance of the model can be estimated using data that have not been included in the model training. ### The Standard approach: Random k-fold CV In the example above we used a random k-fold CV that we defined in caret's trainControl argument. More specifically, we used a random 3-fold CV. Hence, the data points in our dataset were RANDOMLY split into 3 folds. To assess the performance of the model let's have a look on the output of the Random CV: ```{r c9, message = FALSE, warning=FALSE} model ``` We see that soil moisture could be modelled with a high R² (0.90) which indicates a nearly perfect fit of the data. Sounds good, but unfortunately, the random k fold CV does not give us a good indication for the map accuracy. Random k-fold CV means that each of the three folds (with the highest certainty) contains data points from each data logger. Therefore, a random CV cannot indicate the ability of the model to make predictions beyond the location of the training data (i.e. to map soil moisture). Since our aim is to map soil moisture, we rather need to perform a target-oriented validation which validates the model in view to spatial mapping. ### Target-oriented validation We are not interested in the model performance in view to random subsets of our data loggers, but we need to know how well the model is able to make predictions for areas without data loggers. To find this out, we need to repeatedly leave the complete time series of one or more data loggers out and use them as test data during CV. To do this we first need to create meaningful folds rather than random folds. CAST's function "CreateSpaceTimeFolds" is designed to provide index arguments used by caret's trainControl. The index defines which data points are used for model training during each model run and reversely defines which data points are held back. Hence, using the index argument we can account for the dependencies in the data by leaving the complete data from one or more data loggers out (LLO CV), from one or more time steps out (LTO CV) or from data loggers and time steps out (LLTO CV). In this example we're focusing on LLO CV, therefore we use the column "SOURCEID" to define the location of a data logger and split the data into folds using this information. Analog to the random CV we split the data into five folds, hence five model runs are performed each leaving one fifth of all data loggers out for validation. Note that several suggestions of spatial CV exist. What we call LLO here is just a simple example. See references in [Meyer and Pebesma 2022](https://www.nature.com/articles/s41467-022-29838-9) for some examples and have a look at [Mila et al 2022](https://doi.org/10.1111/2041-210X.13851) for the methodology implemented in the CAST function nndm. ```{r c10, message = FALSE, warning=FALSE} set.seed(10) indices <- CreateSpacetimeFolds(trainDat,spacevar = "SOURCEID", k=3) set.seed(10) model_LLO <- train(trainDat[,predictors],trainDat$VW, method="rf",tuneGrid=data.frame("mtry"=2), importance=TRUE, trControl=trainControl(method="cv", index = indices$index)) model_LLO ``` By inspecting the output of the model, we see that in view to new locations, the R² is only 0.16 so the performance is much lower than expected from the random CV (R² = 0.90). Apparently, there is considerable overfitting in the model, causing a good random performance but a poor performance in view to new locations. This might partly be attributed to the choice of variables where we must suspect that certain variables are misinterpreted by the model (see [Meyer et al 2018](https://www.sciencedirect.com/science/article/pii/S1364815217310976) or [talk at the OpenGeoHub summer school 2019] (https://www.youtube.com/watch?v=mkHlmYEzsVQ)). Let's have a look at the variable importance ranking of Random Forest and see if we find something suspicious: ```{r c11, message = FALSE, warning=FALSE} plot(varImp(model_LLO)) ``` The importance ranking indicates that among others, "Easting" is an important variable. This fits to the observation of an inappropriate linear features in the predicted map. Apparently the model assigns a high importance to this variable which causes a high random CV performance. But at the same time the model fails in the prediction on new locations because the variable is unsuitable for predictions beyond the locations of the data loggers used for model training. Assuming that certain variables are misinterpreted by the algorithm we should be able to produce a higher LLO performance when such variables are removed. Let's see if this is true... ## Removing variables that cause overfitting CAST's forward feature selection (ffs) selects variables that make sense in view to the selected CV method and excludes those which are counterproductive (or meaningless) in view to the selected CV method. When we use LLO as CV method, ffs selects variables that lead in combination to the highest LLO performance (i.e. the best spatial model). All variables that have no spatial meaning or are even counterproductive won't improve or even reduce the LLO performance and are therefore excluded from the model by the ffs. ffs is doing this job by first training models using all possible pairs of two predictor variables. The best model of these initial models is kept. On the basis of this best model the predictor variables are iterativly increased and each of the remaining variables is tested for its improvement of the currently best model. The process stops if none of the remaining variables increases the model performance when added to the current best model. So let's run the ffs on our case study using R² as a metric to select the optimal variables. This process will take 1-2 minutes... ```{r c12, message = FALSE, warning=FALSE} set.seed(10) ffsmodel_LLO <- ffs(trainDat[,predictors],trainDat$VW,metric="Rsquared", method="rf", tuneGrid=data.frame("mtry"=2), verbose=FALSE,ntree=50, trControl=trainControl(method="cv", index = indices$index)) ffsmodel_LLO ffsmodel_LLO$selectedvars ``` Using the ffs with LLO CV, the R² could be increased from 0.16 to 0.28. The variables that are used for this model are "DEM","NDRE.M" and "Northing". All others are removed because they have (at least in this small example) no spatial meaning or are even counterproductive. Using the plot$\_$ffs function we can visualize how the performance of the model changed depending on the variables being used: ```{r c13, message = FALSE, warning=FALSE} plot(ffsmodel_LLO) ``` See that the best model using two variables led to an R² of slightly above 0.2. Using the third variable could slightly increase the R². Any further variable could not improve the LLO performance. Note that the R² features a high standard deviation regardless of the variables being used. This is due to the small dataset that was used which cannot lead to robust results. What effect does the new model has on the spatial representation of soil moisture? ```{r c14, message = FALSE, warning=FALSE} prediction_ffs <- predict(predictors_sp,ffsmodel_LLO,na.rm=TRUE) plot(prediction_ffs) ``` We see that the variable selection does not only have an effect on the statistical performance but also the predicted spatial patterns change considerably. It is of note that the linear feature is not any more in the resulting soil moisture map most likely because "Easting" was removed from the set of predictor variables by ffs. ## Area of Applicability Still it is required to analyse if the model can be applied to the entire study area of if there are locations that are very different in their predictor properties to what the model has learned from. See more details in the vignette on the Area of applicability and [Meyer and Pebesma 2021](https://doi.org/10.1111/2041-210X.13650). ```{r c15, message = FALSE, warning=FALSE} ### AOA for which the spatial CV error applies: AOA <- aoa(predictors_sp,ffsmodel_LLO) plot(prediction_ffs,main="prediction for the AOA \n(spatial CV error applied)") plot(AOA$AOA,col=c("grey","transparent"),add=T) #spplot(prediction_ffs,main="prediction for the AOA \n(spatial CV error applied)")+ #spplot(AOA$AOA,col.regions=c("grey","transparent")) ### AOA for which the random CV error applies: AOA_random <- aoa(predictors_sp,model) plot(prediction,main="prediction for the AOA \n(random CV error applied)") plot(AOA_random$AOA,col=c("grey","transparent"),add=T) #spplot(prediction,main="prediction for the AOA \n(random CV error applied)")+ #spplot(AOA_random$AOA,col.regions=c("grey","transparent")) ``` The figure shows in grey areas that are outside the area of applicability, hence predictions should not be considered for these locations. See tutorial on the AOA in this package for more information. ## Conclusions To conclude, the tutorial has shown how CAST can be used to facilitate target-oriented (here: spatial) CV on spatial and spatio-temporal data which is crucial to obtain meaningful validation results. Using the ffs in conjunction with target-oriented validation, variables can be excluded that are counterproductive in view to the target-oriented performance due to misinterpretations by the algorithm. ffs therefore helps to select the ideal set of predictor variables for spatio-temporal prediction tasks and gives objective error estimates. ## Final notes The intention of this tutorial is to describe the motivation that led to the development of CAST as well as its functionality. Priority is not on modelling soil moisture of the cookfarm in the best possible way but to provide an example for the motivation and functionality of CAST that can run within a few minutes. Hence, only a very small subset of the entire cookfarm dataset was used. Keep in mind that due to the small subset the example is not robust and quite different results might be obtained depending on small changes in the settings. The intention of showing the motivation of CAST is also the reason why the coordinates are used here as predictor variables. Though coordinates are used as predictors in quite some scientific studies they rather provide here an extreme example of how misleading variables can lead to overfitting. ## Further reading * Meyer, H., & Pebesma, E. (2022): Machine learning-based global maps of ecological variables and the challenge of assessing them. Nature Communications. Accepted. * Meyer, H., & Pebesma, E. (2021). Predicting into unknown space? Estimating the area of applicability of spatial prediction models. Methods in Ecology and Evolution, 12, 1620– 1633. [https://doi.org/10.1111/2041-210X.13650] * Meyer H, Reudenbach C, Wöllauer S,Nauss T (2019) Importance of spatial predictor variable selection in machine learning applications–Moving from data reproduction to spatial prediction. Ecological Modelling 411: 108815 [https://doi.org/10.1016/j.ecolmodel.2019.108815] * Meyer H, Reudenbach C, Hengl T, Katurij M, Nauss T (2018) Improving performance of spatio-temporal machine learning models using forward feature selection and target-oriented validation. Environmental Modelling & Software 101: 1–9 [https://doi.org/10.1016/j.envsoft.2017.12.001] * Talk from the OpenGeoHub summer school 2019 on spatial validation and variable selection: https://www.youtube.com/watch?v=mkHlmYEzsVQ. * Tutorial (https://youtu.be/EyP04zLe9qo) and Lecture (https://youtu.be/OoNH6Nl-X2s) recording from OpenGeoHub summer school 2020 on the area of applicability. As well as talk at the OpenGeoHub summer school 2021: https://av.tib.eu/media/54879
/scratch/gouwar.j/cran-all/cranData/CAST/vignettes/cast01-CAST-intro-cookfarm.Rmd