content
stringlengths
0
14.9M
filename
stringlengths
44
136
# function to write required jags model as a text file modeltext = function(dat.str, randvar.ia, corstr,path){ cont <- as.character(dat.str$iv[dat.str$type == "cont"]) cat <- as.character(dat.str$iv[dat.str$type == "cat"]) if (length(randvar.ia) == 1){ conthcl.pre <- cont[as.logical(dat.str[dat.str$type == "cont",3:ncol(dat.str)])] cathcl.pre <- cat[as.logical(dat.str[dat.str$type == "cat",3:ncol(dat.str)])] if (length(conthcl.pre)==0){conthcl.pre<-rep(0,length(cont))} if (length(cathcl.pre)==0){cathcl.pre<-rep(0,length(cat))} } else { conthcl.pre <- cont[any(dat.str[dat.str$type == "cont",3:ncol(dat.str)]>0)] # as.logical was rowSums before cathcl.pre <- cat[any(dat.str[dat.str$type == "cat",3:ncol(dat.str)]>0)] # as.logical was rowSums before } randvar <- names(dat.str[3:ncol(dat.str)]) allnames <- c(cont, cat) conthcl <- as.numeric(cont == conthcl.pre) cathcl <- as.numeric(cat == cathcl.pre) nrhcl <- sum(conthcl, cathcl, unlist(randvar.ia)) + 1 # random slopes plus intercept nrcont <- length(cont) nrcat <- length(cat) nrand <- length(randvar) nrIA <- (nrcat+nrcont-1)*(nrcat+nrcont)/2 # initialize variables for required parameters on different levels of hierarchy bcont <- replicate(nrcont, matrix(NA,nrow = nrand, ncol = 3), simplify=F) bcat <- replicate(nrcat, matrix(NA,nrow = nrand, ncol = 3), simplify=F) # intercepts for random variables bI <- replicate(nrand, rep(NA,3), simplify=F) bIAs <- replicate(((nrcont+nrcat)^2-(nrcont+nrcat))/2, matrix(NA,nrow = nrand, ncol = 3), simplify=F) mucont <- vector("list", 2) mucat <- vector("list", 2) muIAs <- vector("list", 2) precont <- matrix() precat <- matrix() preIAs <- matrix() pl.pre<-numeric() taucont <- matrix(NA, nrow = nrcont, ncol = nrand) taucat <- matrix(NA, nrow = nrcat, ncol = nrand) tauIAs <- matrix(NA, nrow = nrIA, ncol = nrand) sigmacont <- matrix(NA, nrow = nrcont, ncol = nrand) sigmacat <- matrix(NA, nrow = nrcat, ncol = nrand) sigmaIAs <- matrix(NA, nrow = nrIA, ncol = nrand) # correlation between random variables: matrices of what order? multivar <- rep(list(NULL),nrand) bivar <- rep(list(NULL),nrand) multicol <- rep(list(1),nrand) for (i in 1:nrand){ threshold <- 1 test <- which(corstr[[i]]==1, arr.ind = TRUE) tab <- table(test) tmp <- as.numeric(names(tab)[tab == threshold]) if (length(tmp)!=0){ biv.low <- sort(tmp)[1:(length(tmp)/2)] biv.up <- sort(tmp)[floor((length(tmp)/2)+1):length(tmp)] bivar[[i]] <- matrix(c(biv.low, biv.up), ncol= 2,byrow = TRUE) } tmp <- as.numeric(names(tab)[tab > threshold]) if (length(tmp)>=0){ multicol[[i]] <- max(length(tmp),multicol[[i]]) if (length(tmp)!=0){ multivar[[i]] <- matrix(as.numeric(names(tab)[tab > threshold]), nrow = 1, byrow = TRUE) } else { multivar[i] <- list(NULL)} } } tau0g<- vector("list",nrand) sigma0<-vector("list",nrand) mu0g <- vector("list",nrand) for (i in 1:length(randvar)){ if (all(multivar[[i]]!=1)&all(bivar[[i]]!=1)){ tau0g[[i]] <- paste("tau0g", ".", randvar[i], sep = '') sigma0[[i]] <- paste("sigma0", ".", randvar[i], sep = '') } } bimultivar <- mapply(cbind,multivar,bivar,SIMPLIFY=FALSE) ind<-lapply(bimultivar,function(x)sum(x==1)) for(i in 1:length(multivar)){ if(any(as.logical(unlist(ind)))){ if (ind[[i]]==0){ mu0g[[i]] <- "mu.corr[1]" } } else{ mu0g[[i]]<-"mu0g" } } # random intercept for (i in 1:nrand){ corrcount <- 0 if (all(multivar[[i]]!=1) & all(bivar[[i]]!=1)){ bI[[i]][[1]] <- paste("b", randvar[i], "[", randvar[i],"[i]]", sep = '') bI[[i]][[2]] <- paste("b", randvar[i], "[n]", sep = '') bI[[i]][[3]] <- paste("b", randvar[i], sep = '') } else { if (any(multivar[[i]]==1)){ tmp <- which(multivar[[i]]==1)[1] corrcount <- corrcount + 1 bI[[i]][[1]] <- paste("b", randvar[i], paste(multivar[[i]][tmp,], collapse = ''), "[", randvar[i],"[i],",corrcount, "]" , sep = '') bI[[i]][[2]] <- paste("b", randvar[i], paste(multivar[[i]][tmp,], collapse = ''), "[n", ",", corrcount, "]", sep = '') bI[[i]][[3]] <- paste("b", randvar[i], paste(multivar[[i]][tmp,], collapse = ''), sep = '') } if (any(bivar[[i]] == 1)){ tmp <- which(bivar[[i]]==1 ,arr.ind = TRUE)[1] corrcount <- corrcount + 1 bI[[i]][[1]] <- paste("b", randvar[i], paste(bivar[[i]][tmp,], collapse = ''), "[", randvar[i],"[i],",corrcount, "]" , sep = '') bI[[i]][[2]] <- paste("b", randvar[i], paste(bivar[[i]][tmp,], collapse = ''), "[n", ",", corrcount, "]", sep = '') bI[[i]][[3]] <- paste("b", randvar[i], paste(bivar[[i]][tmp,], collapse = ''), sep = '') } } } # where are the intercepts? tmp.multi<-unlist(lapply(multivar,FUN=function(x)sum(x==1))) tmp.bi<-unlist(lapply(bivar,FUN=function(x)sum(x==1))) corrcount <- matrix(c(tmp.multi,tmp.bi),byrow = TRUE,nrow = 2) # random slopes and group parameters contcount <- matrix(0,nrow = nrcont, ncol = nrand) nhclcont <- rep(0,times = nrcont) if (nrcont > 0){ for (ncont in 1:nrcont){ onlyfix <- 0 if(conthcl[ncont] == 1){ for (i in which(dat.str[dat.str$type == "cont",][ncont,3:ncol(dat.str)]==1)){ if (all(multivar[[i]]!=1+ncont) & all(bivar[[i]]!=1+ncont)){ bcont[[ncont]][i,1] <- paste("b", cont[ncont], ".", randvar[i],"[", randvar[i],"[i]]", " * x", cont[ncont], "[i]", sep = "") bcont[[ncont]][i,2] <- paste("b", cont[ncont], ".", randvar[i],"[n]", sep = "") bcont[[ncont]][i,3] <- paste("b", cont[ncont], ".", randvar[i], sep = "") contcount[ncont,i] <- contcount[ncont,i] + 1 } else { if (any(multivar[[i]]==(1+ncont))){ tmp <- which(multivar[[i]]==1+ncont, arr.ind = TRUE)[1] corrcount[1,i] <- corrcount[1,i] + 1 bcont[[ncont]][i,1] <- paste("b", randvar[i], paste(multivar[[i]][tmp,], collapse = ''),"[", randvar[i],"[i],",corrcount[1,i], "]", " * x", cont[ncont], "[i]", sep = "") bcont[[ncont]][i,2] <- paste("b", randvar[i], paste(multivar[[i]][tmp,], collapse = ''), "[n", ",", corrcount[1,i], "]", sep = "") bcont[[ncont]][i,3] <- paste("b", randvar[i], paste(multivar[[i]][tmp,], collapse = ''), sep = "") } else if (any(bivar[[i]] == (1+ncont))){ tmp <- which(bivar[[i]]==(1+ncont) ,arr.ind = TRUE)[1] corrcount[2,i] <- corrcount[2,i] + 1 bcont[[ncont]][i,1] <- paste("b", randvar[i], paste(bivar[[i]][tmp,], collapse = ''),"[", randvar[i],"[i],",corrcount[2,i], "]", " * x", cont[ncont], "[i]", sep = "") bcont[[ncont]][i,2] <- paste("b", randvar[i], paste(bivar[[i]][tmp,], collapse = ''), "[n", ",", corrcount[2,i], "]", sep = "") bcont[[ncont]][i,3] <- paste("b", randvar[i], paste(bivar[[i]][tmp,], collapse = ''), sep = "") } } } } else {mucont[[1]][[ncont]] <- paste("mu", cont[ncont]," * x", cont[ncont], "[i]", sep = "") onlyfix <- 1} if (all(unlist(multivar)!=(1+ncont)) & all(unlist(bivar)!=(1+ncont))){ mucont[[2]][[ncont]] <- paste("mu", cont[ncont], sep = "") # plus pre for rescaling as cauchy precont[[ncont]] <- paste("pre", cont[ncont], sep = "") if (conthcl[ncont]!=1){nhclcont[ncont] <- 1} for (i in which(dat.str[dat.str$type == "cont",][ncont,3:ncol(dat.str)]==1)){ taucont[ncont,i] <- paste("tau", cont[ncont], ".", randvar[i], sep = "") # plus sigma for rescaling prec as sd sigmacont[ncont,i] <- paste("sigma", cont[ncont], ".", randvar[i], sep = "") }} # random slopes for cat pred only when required if (onlyfix == 0 & sum(contcount) != sum(dat.str[dat.str$type == "cont",][1:ncont,3:ncol(dat.str)]==1)){ mucont[[2]][ncont] <- paste0("mu.corr[", (1+ncont),"]") for(i in which(contcount == 1, arr.ind = TRUE)[2]) taucont[ncont,i] <- paste("tau", cont[ncont], ".", randvar[i], sep = "") # plus sigma for rescaling prec as sd sigmacont[ncont,i] <- paste("sigma", cont[ncont], ".", randvar[i], sep = "") } } pl.pre <- contcount }else{bcont <- rm(bcont); mucont <- list(NA, NA); precont <- rm(precont); taucont <- rm(taucont); sigmacont <- rm(sigmacont)} catcount <- matrix(0,nrow = nrcat, ncol = nrand) nhclcat <- rep(0,times = nrcat) if (nrcat > 0){ for (ncat in 1:nrcat){ onlyfix <- 0 # random slopes for categorical predictors if(cathcl[ncat] == 1){ # nrs <- which(dat.str[dat.str$type == "cat",][ncat,3:ncol(dat.str)]==1) # nrs <- unique(which(as.matrix(dat.str[dat.str$type == "cat",][,3:ncol(dat.str)]==1),arr.ind = TRUE)[,2]) # for (i in nrs){ for (i in which(dat.str[dat.str$type == "cat",][ncat,3:ncol(dat.str)]==1)){ if (all(multivar[[i]]!=(1+ncont+ncat)) & all(bivar[[i]]!=(1+ncont+ncat))){ bcat[[ncat]][i,1] <- paste("b", cat[ncat], ".", randvar[i], "[", randvar[i],"[i]]", " * x", cat[ncat], "[i]", sep = "") bcat[[ncat]][i,2] <- paste("b", cat[ncat], ".", randvar[i], "[n]", sep = "") bcat[[ncat]][i,3] <- paste("b", cat[ncat], ".", randvar[i], sep = "") catcount[ncat, i] <- catcount[ncat, i] + 1 } else { if (any(multivar[[i]]==(1+ncont+ncat))){ tmp <- which(multivar[[i]]==(1+ncont+ncat), arr.ind = TRUE)[1] corrcount[1,i] <- corrcount[1,i] + 1 bcat[[ncat]][i,1] <- paste("b", randvar[i],paste(multivar[[i]][tmp,], collapse = ''), "[", randvar[i],"[i],",corrcount[1,i], "]", " * x", cat[ncat], "[i]", sep = "") bcat[[ncat]][i,2] <- paste("b", randvar[i],paste(multivar[[i]][tmp,], collapse = ''), "[n", ",", corrcount[1,i], "]", sep = "") bcat[[ncat]][i,3] <- paste("b", randvar[i],paste(multivar[[i]][tmp,], collapse = ''), sep = "") } else if (any(bivar[[i]]==(1+ncont+ncat))){ tmp <- which(bivar[[i]]==(1+ncont+ncat), arr.ind = TRUE)[1] corrcount[2,i] <- corrcount[2,i] + 1 bcat[[ncat]][i,1] <- paste("b", randvar[i],paste(bivar[[i]][tmp,], collapse = ''),"[", randvar[i],"[i],",corrcount[2,i], "]", " * x", cat[ncat], "[i]", sep = "") bcat[[ncat]][i,2] <- paste("b", randvar[i],paste(bivar[[i]][tmp,], collapse = ''), "[n", ",", corrcount[2,i], "]", sep = "") bcat[[ncat]][i,3] <- paste("b", randvar[i],paste(bivar[[i]][tmp,], collapse = ''), sep = "") } } } } else {mucat[[1]][ncat] <- paste("mu", cat[ncat]," * x", cat[ncat], "[i]", sep = "") onlyfix <- 1} if (all(unlist(multivar)!=(1+nrcont+ncat)) & all(unlist(bivar)!=(1+nrcont+ncat))){ mucat[[2]][[ncat]] <- paste("mu", cat[ncat], sep = "") # plus pre for rescaling as cauchy precat[[ncat]] <- paste("pre", cat[ncat], sep = "") if (cathcl[ncat]!=1){nhclcat[ncat] <- 1} for (i in which(dat.str[dat.str$type == "cat",][ncat,3:ncol(dat.str)]==1)){ taucat[ncat,i] <- paste("tau", cat[ncat], ".", randvar[i], sep = "") # plus sigma for rescaling prec as sd sigmacat[ncat,i] <- paste("sigma", cat[ncat], ".", randvar[i], sep = "") } } if (onlyfix == 0 & sum(catcount) != sum(dat.str[dat.str$type == "cat",][1:ncat,3:ncol(dat.str)]==1)){ mucat[[2]][ncat] <- paste0("mu.corr[", (1+nrcont+ncat),"]") for(i in which(catcount == 1, arr.ind = TRUE)[2]) taucat[ncat,i] <- paste("tau", cat[ncat], ".", randvar[i], sep = "") # plus sigma for rescaling prec as sd sigmacat[ncat,i] <- paste("sigma", cat[ncat], ".", randvar[i], sep = "") } } }else{bcat <- rm(bcat); mucat <- list(NA, NA); precat <- rm(precat); taucat <- rm(taucat); sigmacat <- rm(sigmacat) } pl.pre <- rbind(pl.pre, catcount) # pl.ind <- rowSums(pl.pre) == nrand pl.ind <- rowSums(pl.pre) > 0 # covariance structure if required bi.yes <- which(lapply(bivar,length)>0) multi.yes <- which(lapply(multivar, length)>0) multipart <- replicate(nrand, matrix(NA,nrow = (max(lengths(multivar))), ncol = 1), simplify=F) bipart <- replicate(nrand, matrix(NA,nrow = (max(lengths(bivar))/2), ncol = 1), simplify=F) # multivar<-lapply(multivar,function(x)as.matrix(x)) # bivar<-lapply(bivar,function(x)as.matrix(x)) for (i in multi.yes){ for (j in 1:nrow(multivar[[i]])){ multipart[[i]][j,] <- paste("b",randvar[i],paste(unique(multivar[[i]][j,]), collapse = ""), "[n, 1:",length(multivar[[i]]), "] ~ dmnorm (mu.corr[c(",paste(unique(multivar[[i]][j,]), collapse = ","),")]", ", SigmaInv",randvar[i],paste(unique(multivar[[i]][j,]), collapse = ""), "[1:",length(multivar[[i]]),",1:",length(multivar[[i]]),"])","\n", sep = "") } } for (i in bi.yes){ for (j in 1:nrow(bivar[[i]])){ bipart[[i]][j,] <- paste("b",randvar[i],paste(unique(bivar[[i]][j,]), collapse = ""), "[n, 1:",length(bivar[[i]][j,]), "] ~ dmnorm (mu.corr[c(",paste(unique(bivar[[i]][j,]), collapse = ","),")]", ", SigmaInv", randvar[i],paste(as.vector(unique(bivar[[i]][j,])), collapse = ""), "[1:", length(bivar[[i]][j,]),",1:", length(bivar[[i]][j,]),"])","\n", sep = "") } } bipart <- lapply(bipart, function(x) x[!is.na(x)]) multipart <- lapply(multipart, function(x) x[!is.na(x)]) mupart.corr <- NA pre.corr <- NA sigmainv.corr <- NA sigma.corr <- NA wishdf <- NA corr.names<-NA for (both in union(unique(unlist(multivar)),unique(unlist(bivar)))){ if (both == 1){ mupart.corr <- append(mupart.corr, paste("mu.corr[", both, "] ~ dnorm(0,1)","\n", sep = "")) } else { mupart.corr <- append(mupart.corr, paste("mu.corr[", both, "] <- pre", both, "* scale", c("cont","cat")[1+as.numeric(both>(nrcont+1))],"\n", sep = "")) pre.corr <- append(pre.corr, paste("pre", both, " ~ dt(0,1,1)","\n", sep = "")) corr.names<-c(corr.names,as.character(dat.str$iv[both-1])) } } corr.names<-corr.names[!is.na(corr.names)] mupart.corr <- mupart.corr[!is.na(mupart.corr)] pre.corr <- pre.corr[!is.na(pre.corr)] Icount <- 1 rho <- NA RHO <- NA for (i in 1:length(bivar)){ if (!is.null(bivar[[i]])){ for (j in 1:nrow(bivar[[i]])){ long <- length(bivar[[i]][j,]) sigmainv.corr <- append(sigmainv.corr, paste0("SigmaInv",randvar[i],paste(unique(bivar[[i]][j,]), collapse = ""), "[1:",long, ",1:",long, "] ~ dwish(I", Icount,"[1:", long, ",1:",long, "],", long+1,")","\n")) reqsigma <- paste0("Sigma",randvar[i],paste(unique(bivar[[i]][j,]), collapse = "")) sigma.corr <- append(sigma.corr, paste0(reqsigma, "[1:",long, ",1:",long, "] <- inverse(", "SigmaInv",randvar[i],paste(unique(bivar[[i]][j,]), collapse = ""), "[1:",long, ",1:",long, "])","\n")) rho <- append(rho, paste0("for (i1 in 1:", long, "){\n for(i2 in 1:", long, "){rho", paste0(randvar[i],paste(unique(bivar[[i]][j,]), collapse = "")), "[i1,i2] <- ", reqsigma, "[i1,i2]/sqrt(", reqsigma, "[i1,i1] *", reqsigma, "[i2,i2])\n}\n}")) RHO <- append(RHO, paste0("rho", paste0(randvar[i],paste(unique(bivar[[i]][j,]), collapse = "")))) Icount <- Icount + 1 wishdf <- c(wishdf, long) } } } for (i in 1:length(multivar)){ if (!is.null(multivar[[i]])){ for (j in 1:nrow(multivar[[i]])){ long <- length(multivar[[i]][j,]) sigmainv.corr <- append(sigmainv.corr, paste0("SigmaInv",randvar[i],paste(unique(multivar[[i]][j,]), collapse = ""), "[1:",long, ",1:",long, "] ~ dwish(I", Icount,"[1:", long, ",1:",long, "],", long+1,")","\n")) reqsigma <- paste0("Sigma",randvar[i],paste(unique(multivar[[i]][j,]), collapse = "")) sigma.corr <- append(sigma.corr, paste0(reqsigma, "[1:",long, ",1:",long, "] <- inverse(", "SigmaInv",randvar[i],paste(unique(multivar[[i]][j,]), collapse = ""), "[1:",long, ",1:",long, "])","\n")) rho <- append(rho, paste0("for (i1 in 1:", long, "){\n for(i2 in 1:", long, "){rho", paste0(randvar[i],paste(unique(multivar[[i]][j,]), collapse = "")), "[i1,i2] <- ", reqsigma, "[i1,i2]/sqrt(", reqsigma, "[i1,i1] *", reqsigma, "[i2,i2])\n}\n}")) RHO <- append(RHO, paste0("rho", paste0(randvar[i],paste(unique(multivar[[i]][j,]), collapse = "")))) Icount <- Icount + 1 wishdf <- c(wishdf, long) } } } sigmainv.corr <- sigmainv.corr[!is.na(sigmainv.corr)] sigma.corr <- sigma.corr[!is.na(sigma.corr)] wishdf <- wishdf[!is.na(wishdf)] RHO <- RHO[!is.na(RHO)] rho <- rho[!is.na(rho)] ia.purecont<-vector() if (nrcat & nrcont > 0){ counter <- 0 for (ncont in 1:(nrcont+nrcat-1)){ for (ncat in (ncont+1):(nrcont+nrcat)){ counter <- counter + 1 # if(sum(startsWith(x = c(allnames[ncont],allnames[ncat]), prefix = substr(allnames[ncont],start = 1, stop = nchar(allnames[ncont])-1)))==2 & grepl(pattern=".spl",x=allnames[ncont]) & grepl(pattern=".spl",x=allnames[ncat])){ }else{ for (i in 1:nrand){ if(randvar.ia[[i]][ncat,ncont] == 1){ bIAs[[counter]][i,1] <- paste("b", allnames[ncont], "x", allnames[ncat], ".", randvar[i], "[", randvar[i],"[i]]", " * x", allnames[ncont], "[i]"," * x", allnames[ncat], "[i]", sep = "") bIAs[[counter]][i,2] <- paste("b", allnames[ncont], "x", allnames[ncat], ".", randvar[i], "[n]", sep = "") bIAs[[counter]][i,3] <- paste("b", allnames[ncont], "x", allnames[ncat], ".", randvar[i], sep = "") tauIAs[counter,i] <- paste("tau", allnames[ncont], "x", allnames[ncat], ".", randvar[i], sep = "") # plus sigma for rescaling prec as sd sigmaIAs[counter,i] <- paste("sigma", allnames[ncont], "x", allnames[ncat], ".", randvar[i], sep = "") } else {muIAs[[1]][[counter]] <- paste("mu", allnames[ncont],"x", allnames[ncat], " * x", allnames[ncont],"[i]", " * x", allnames[ncat],"[i]", sep = "")} muIAs[[2]][counter]<- paste("mu", allnames[ncont], "x", allnames[ncat], sep = "") # preIAs <- append(preIAs, paste("pre", allnames[ncont], "x", allnames[ncat], sep = "")) } preIAs <- append(preIAs, paste("pre", allnames[ncont], "x", allnames[ncat], sep = "")) } if(ncont<=nrcont&ncat<=nrcont){ ia.purecont<-c(ia.purecont,1) } else{ia.purecont<-c(ia.purecont,0)} }} preIAs <- preIAs[2:length(preIAs)] } else{muIAs <- rm(muIAs); preIAs <- rm(preIAs); tauIAs <- rm(tauIAs); sigmaIAs <- rm(sigmaIAs)} ## assign text for regression formula eqparms <- NA B <- vector("list", nrand) for (i in 1:nrand){ eqparms <- append(eqparms, bI[[i]][1]) if (all(multivar[[i]]!=1) & all(bivar[[i]]!=1)){ B[[i]] <- append(B[[i]], bI[[i]][2]) } } # assign random cont effects for (i in which(conthcl==1)){ for (j in which(dat.str[dat.str$type == "cont",][i,3:ncol(dat.str)]==1)){ eqparms <- append(eqparms, bcont[[i]][j,1]) if (all(multivar[[j]]!=(1+i)) & all(bivar[[j]]!=(1+i))){ B[[j]] <- append(B[[j]], bcont[[i]][j,2]) }} } # assign fixed cont effects for (i in which(conthcl==0)){ eqparms <- append(eqparms, mucont[[1]][i]) } # same for cat and ia effects for (i in which(cathcl==1)){ for (j in which(dat.str[dat.str$type == "cat",][i,3:ncol(dat.str)]==1)){ eqparms <- append(eqparms, bcat[[i]][j,1]) if (all(multivar[[j]]!=(1+sum(conthcl)+i)) & all(bivar[[j]]!=(1+sum(conthcl)+i))){ B[[j]] <- append(B[[j]], bcat[[i]][j,2]) }} } for (i in which(cathcl==0)){ eqparms <- append(eqparms, mucat[[1]][i]) } # no correlations between random ias for (j in 1:nrand){ for (i in which(randvar.ia[[j]][lower.tri(randvar.ia[[j]])]==1)){ eqparms <- append(eqparms, bIAs[[i]][j,1]) B[[j]] <- append(B[[j]], bIAs[[i]][j,2]) } } sums <- matrix(0, nrow = nrow(randvar.ia[[1]]), ncol = ncol(randvar.ia[[1]])) for (i in 1:nrand){ sums <- sums + randvar.ia[[i]] } neitherIA <- which(sums[lower.tri(sums)]==0) for (i in neitherIA){ eqparms <- append(eqparms, muIAs[[1]][i]) } eqparms <- eqparms[!is.na(eqparms)] # split up individual effects according to random grouping variable TAU <- vector("list", nrand) SIGMA <- vector("list", nrand) MU <- vector("list", nrand) PRE <- vector("list", nrand) for (i in 1:nrand){ TAU[[i]] <- c(tau0g[[i]], taucont[,i], taucat[,i], tauIAs[which(randvar.ia[[i]][lower.tri(randvar.ia[[i]])]==1),i]) TAU[[i]] <- TAU[[i]][!is.na(TAU[[i]])] SIGMA[[i]] <- c(sigma0[i], sigmacont[,i], sigmacat[,i], sigmaIAs[which(randvar.ia[[i]][lower.tri(randvar.ia[[i]])]==1),i]) SIGMA[[i]] <- SIGMA[[i]][!is.na(SIGMA[[i]])] } MU<-mu0g for (i in which(conthcl==1)){ for (j in which(dat.str[dat.str$type == "cont",][i,3:ncol(dat.str)]==1)){ if (all(multivar[[j]]!=(1+i)) & all(bivar[[j]]!=(1+i))){ MU[[j]] <- append(MU[[j]], mucont[[2]][i]) } } } for (i in which(cathcl==1)){ for (j in which(dat.str[dat.str$type == "cat",][i,3:ncol(dat.str)]==1)){ if (all(multivar[[j]]!=(1+sum(conthcl)+i)) & all(bivar[[j]]!=(1+sum(conthcl)+i))){ MU[[j]] <- append(MU[[j]], mucat[[2]][i]) } } } for (i in 1:nrand){ MU[[i]] <- append(MU[[i]], muIAs[[2]][which(randvar.ia[[i]][lower.tri(randvar.ia[[i]])]==1)]) } MU <- lapply(MU, function(x) x[!is.na(x)]) # part 1 of required model text regeq <- matrix() for (i in 1:length(eqparms)){ regeq <- append(regeq, c(eqparms[i], "+","\n")) } # text modules for jags model text likelihood <- paste("y[i] ~ dnorm( mu[i], tau )\n") regeq <- paste(regeq[2:(length(regeq)-2)], collapse = '') part1 <- paste("for ( i in 1:Ndata ) {\n", likelihood, "mu[i] <- ", regeq,"\n}") eff.rand <- vector("list", nrand) part2 <- replicate(nrand, 0, simplify=F) for (i in 1:length(B)){ if(!is.null(B[[i]])){ for (j in 1:length(B[[i]])){ eff.rand[[i]][j] <- paste(B[[i]][j], "~ dnorm(", MU[[i]][j], ",", TAU[[i]][j],")\n") }} tmp <- paste(eff.rand[[i]], collapse = ' ') part2[[i]] <- paste("for (n in 1:", "N", randvar[i], "s)", sep = '', "{\n",paste(eff.rand[[i]], collapse = ''), collapse = '') } mucorr <- vector("list", 1) for (i in 1:length(MU)){ mucorr[[i]] <- c(NA, MU[[i]][grepl("corr", MU[[i]])]) MU[[i]] <- c(NA, MU[[i]][!grepl("corr", MU[[i]])]) } for (i in 1:length(mucont)){ mucorr <- c(mucorr, mucont[[i]][grepl("corr", mucont[[i]])], mucat[[i]][grepl("corr", mucat[[i]])]) mucont[[i]] <- c(NA,mucont[[i]][!grepl("corr", mucont[[i]])]) mucat[[i]] <- c(NA, mucat[[i]][!grepl("corr", mucat[[i]])]) } mucorr <- unique(unlist(lapply(mucorr, function(x) x[!is.na(x)]))) MU <- lapply(MU, function(x) x[!is.na(x)]) mucont <- lapply(mucont, function(x) x[!is.na(x)]) mucat <- lapply(mucat, function(x) x[!is.na(x)]) PRE <- c(precont, precat, preIAs) PRE <- PRE[!is.na(PRE)] MU.INT <- c(mu0g, mucont[[2]], mucat[[2]], muIAs[[2]]) #, "mu.corr" for (i in 1:length(bipart)){ if (length(bipart[[i]])>0){ for (j in 1:length(bipart[[i]])){ part2[[i]] <- append(part2[[i]], paste(bipart[[i]][j])) }} } for (i in 1:length(multipart)){ if (length(multipart[[i]])>0){ for (j in 1:length(multipart[[i]])){ part2[[i]] <- append(part2[[i]], paste(multipart[[i]][j])) }} part2[[i]] <- append(part2[[i]], " }")} part2 <- paste(append(unlist(part2), "tau ~ dgamma(sg, rg)\n"), collapse = '') # now write mu priors eff.tauhyp <- NA eff.sigmahyp <- NA eff.mhyp <- NA if (all(unlist(multivar)!=1) & all(unlist(bivar)!=1)){ eff.mhyp <- paste(MU[[1]][1], "~ dnorm(0,1) \n") } for (i in 1:nrand){ if (all(multivar[[i]]!=1) & all(bivar[[i]]!=1)){ # and tau priors eff.tauhyp <- append(eff.tauhyp, paste(TAU[[i]][1], " <- 1/", sigma0[i], "^2\n")) eff.sigmahyp <- append(eff.sigmahyp, paste(sigma0[i], " ~ dgamma(1,0.04)\n")) } } if (length(mucont[[2]])>0){ for (i in 1:length(mucont[[2]])){ eff.mhyp <- append(eff.mhyp, paste(mucont[[2]][i], " <- ", precont[i], "* scalecont \n")) }} for (i in 1:nrand){ for (j in which(dat.str[dat.str$type == "cont",][,2+i]==1)){ if ((all(multivar[[i]]!=(1+j)) & all(bivar[[i]]!=(1+j)))){ eff.tauhyp <- append(eff.tauhyp, paste(taucont[j,i], " <- 1/", sigmacont[j,i], "^2\n")) eff.sigmahyp <- append(eff.sigmahyp, paste(sigmacont[j,i], " ~ dgamma(1,0.04)\n")) }} } if (length(mucat[[2]])>0){ for (i in 1:length(mucat[[2]])){ eff.mhyp <- append(eff.mhyp, paste(mucat[[2]][i], " <- ", precat[i], "* scalecat \n", sep = "")) }} for (i in 1:nrand){ for (j in which(dat.str[dat.str$type == "cat",][,2+i]==1)){ if ((all(multivar[[i]]!=(1+nrcont+j)) & all(bivar[[i]]!=(1+nrcont+j)))){ eff.tauhyp <- append(eff.tauhyp, paste(taucat[j,i], " <- 1/", sigmacat[j,i], "^2\n", sep = "")) eff.sigmahyp <- append(eff.sigmahyp, paste(sigmacat[j,i], " ~ dgamma(1,0.04)\n")) }} } options(warn = -1) muIAs[[2]]<-muIAs[[2]][!is.na(muIAs[[2]])] options(warn=0) if (length(muIAs[[2]])>0){ for (i in 1:length(muIAs[[2]])){ if(ia.purecont[i]==0){ eff.mhyp <- append(eff.mhyp, paste(muIAs[[2]][i], " <- ", preIAs[i], "* scalecat \n", sep = "")) } else if(ia.purecont[i]==1){ eff.mhyp <- append(eff.mhyp, paste(muIAs[[2]][i], " <- ", preIAs[i], "* scalecont \n", sep = "")) } }} for (j in 1:nrand){ for (k in which(randvar.ia[[j]][lower.tri(randvar.ia[[j]])]==1)){ eff.tauhyp <- append(eff.tauhyp, paste(tauIAs[k,j], " <- 1/", sigmaIAs[k,j], "^2\n", sep = "")) eff.sigmahyp <- append(eff.sigmahyp, paste(sigmaIAs[k,j], " ~ dgamma(1,0.04)\n")) } } if (is.na(eff.tauhyp[1])){ eff.tauhyp <- eff.tauhyp[2:length(eff.tauhyp)] } eff.tauhyp <- eff.tauhyp[!is.na(eff.tauhyp)] if (is.na(eff.sigmahyp[1])){ eff.sigmahyp <- eff.sigmahyp[2:length(eff.sigmahyp)] } eff.sigmahyp <- eff.sigmahyp[!is.na(eff.sigmahyp)] if (is.na(eff.mhyp[1])){ eff.mhyp <- eff.mhyp[2:length(eff.mhyp)] } eff.mhyp <- eff.mhyp[!is.na(eff.mhyp)] prior.pre <- matrix() for (i in 1:length(PRE)){ prior.pre[i] <- paste(PRE[i], "~ dt(0,1,1)\n") } scalecat <- "scalecat <- 1/2\n" scalecont <- "scalecont <- sqrt(2)/4\n" part3 <- paste(paste(eff.mhyp, collapse = ' '), paste(eff.tauhyp, collapse = ' '), paste(eff.sigmahyp, collapse = ' '), paste(prior.pre, collapse = ' '), paste(scalecat, collapse = ' '), paste(scalecont, collapse = ' '), paste(mupart.corr, collapse = ' '), paste(pre.corr, collapse = ' '), paste(sigmainv.corr, collapse = ' '), paste(sigma.corr, collapse = ' '), paste("sg <- pow(m,2)/pow(d,2) \nrg <- m/pow(d,2) \nm ~ dgamma(1,1) \nd ~ dgamma(1,1)\n")) part4 <- paste(rho, collapse = '') # complete model modelstring = paste(" model {", part1, part2, part3, part4, "}") writeLines(modelstring,con=path) bI.save <- matrix(unlist(bI),ncol=3,byrow=TRUE)[,3] bcont.save <- unlist(lapply(bcont, function(x) x[,3])) bcat.save <- unlist(lapply(bcat, function(x) x[,3])) bIAs.save <- unlist(lapply(bIAs, function(x) x[,3])) options(warn = -1) mcmcsave <- unique(c(bI.save[!is.na(bI.save)], bcont.save[!is.na(bcont.save)], bcat.save[!is.na(bcat.save)], bIAs.save[!is.na(bIAs.save)])) options(warn=0) parameters <- list(MU = MU.INT, SIGMA = unlist(SIGMA), b.save = mcmcsave) parameters[["pl.ind"]] <- pl.ind parameters[["pl.nhclcont"]] <- mucont[[2]][as.logical(nhclcont)] parameters[["pl.nhclcat"]] <- mucat[[2]][as.logical(nhclcat)] if (!is.na(RHO[1])){ parameters[["RHO"]] <- unlist(RHO) parameters[["mu.corr"]] <- mucorr parameters[["wishdf"]] <- wishdf parameters[["corrnames"]]<-corr.names } return(parameters) }
/scratch/gouwar.j/cran-all/cranData/BayesRS/R/modeltext.R
plotPostMT_HDImeans2 = function(paramSampleVec, HDIlow,HDIhi, ylab=NULL , xlab=NULL , xlim=NULL, main=NULL, credMass=NULL, pltitle=NULL, showHDI = NULL, colflag = NULL, bfs = NULL, ylim=NULL, bfpos = NULL) { if ( is.null(xlab) ) xlab="Parameter" if ( is.null(main) ) main="" if ( is.null(ylab) ) ylab="Posterior Density" if ( is.null(credMass) ) credMass=.95 # 95% HDI as default if (is.null(showHDI)) showHDI <- 1 if (is.null(colflag)) colflag <- 1 if (is.null(xlim)) { xlim=range( c( 0 , paramSampleVec)) } varnames=hdiLow=hdiHigh=NULL # in case xlim is set to 0, and some value has been given to xrange, center plot symmetrically on zero, using maximal extension in case range is set to 0, and given range otherwise if (xlim[1]==0) { maxext <- max(abs(min(paramSampleVec$samples)), abs(max(paramSampleVec$samples))) #largest extension into positive or negative range xlim = c(-maxext, maxext) #centers plot symmetrically on zero } #HDI = HDIofMCMC( paramSampleVec$samples[paramSampleVec$classify == 2] , credMass ) HDI <- tapply(paramSampleVec$samples, paramSampleVec$classify, FUN = HDIofMCMC) postSummary = matrix( NA , nrow=nlevels(paramSampleVec$classify) , ncol=11 , dimnames=list( c( 1:nlevels(paramSampleVec$classify)) , c("mean","median","mode", "hdiMass","hdiLow","hdiHigh", "compVal","pcGTcompVal", "ROPElow","ROPEhigh","pcInROPE"))) postSummary[,"mean"] = aggregate(samples ~ classify, FUN = mean,data = paramSampleVec)[,2] postSummary[,"median"] = aggregate(samples ~ classify, FUN = median,data = paramSampleVec)[,2] mcmcDensity <- tapply(paramSampleVec$samples, paramSampleVec$classify, FUN = density) for (i in 1:nlevels(paramSampleVec$classify)) { postSummary[i,"mode"] = mcmcDensity[[i]]$x[which.max(mcmcDensity[[i]]$y)] } postSummary[,"hdiMass"]=credMass postSummary[,"hdiLow"]=sapply(HDI,function(x) x[1]) postSummary[,"hdiHigh"]=sapply(HDI,function(x) x[2]) postSummary <- as.data.frame(postSummary) densCurve <- tapply(paramSampleVec$samples, paramSampleVec$classify, FUN = density, adjust = 2) myy1 <- (seq(1:length(densCurve))) myy2 <- (seq(1:length(densCurve))) # Display the HDI. postSummary$myy1 <- myy1 postSummary$myy2 <- myy2 postSummary$varnames <- levels(paramSampleVec$varnames) postSummary$varnames <- as.character(postSummary$varnames) postSummary$varnames <- factor(postSummary$varnames, levels=unique(postSummary$varnames)) colvals=c("red", "white", "black","grey52", "orange", "lightblue", "lightgreen")[1:nrow(postSummary)] # Plot HDIs with means if(colflag == 1){ p1 <- ggplot(postSummary, aes(varnames, mean)) + geom_point(shape = 1, size = 5) + coord_flip() + geom_segment(aes(x = seq(1,nrow(postSummary)), xend = seq(1,nrow(postSummary)), y = hdiLow, yend = hdiHigh), colour = "black", size=1.75, lineend = "round") + geom_segment(aes(x = 0.5, xend = nrow(postSummary)+0.5, y = 0, yend = 0), colour = "red", size = 1, lineend = "round") #geom_point(aes(x = seq(1,nrow(postSummary)), y=mean, shape = 1), colour = "black", size=5, shape = rep(1,nrow(postSummary))) } else { p1 <- ggplot(postSummary, aes(varnames, mean)) + geom_point(shape = 1, size = 5) + coord_flip() + geom_segment(aes(x = seq(1,nrow(postSummary)), xend = seq(1,nrow(postSummary)), y = hdiLow, yend = hdiHigh), colour = "black", size=1.75, lineend = "round")# + #geom_point(aes(x = seq(1,nrow(postSummary)), y=mean), colour = "black", size=5, shape = rep(1,nrow(postSummary))) } myplot<-p1+ggtitle(main) + labs(title = pltitle) + theme_bw() + theme(legend.position = c(.80, .85)) + theme(legend.title=element_blank()) + theme(legend.text = element_text(size = 12)) + theme(axis.title.x = element_text(size=14),axis.text.x = element_text(size=12)) + theme(axis.title.y = element_text(size=14),axis.text.y = element_text(size=12)) + theme(plot.title = element_text(size = rel(1.75), hjust = 0)) + theme(legend.position = "none") + labs(x=xlab, y=ylab) + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank(), axis.line = element_line(colour = "black"), strip.text = element_text(size = 12), axis.text=element_text(size=12), axis.title=element_text(size=14,face="bold")) # show bfs as numbers if (!is.null(bfs)){ postSummary$bfs <- bfs myplot <- myplot + annotate("text", x=seq(1,nrow(postSummary)), y=bfpos, label= bfs, size = 5) + scale_y_continuous(limits = ylim) } return( myplot ) }
/scratch/gouwar.j/cran-all/cranData/BayesRS/R/plotPostMT_HDImeans2.R
.BFe <- function(to, so, tr, sr, tau, log = FALSE) { ## check inputs stopifnot( length(to) == 1, is.numeric(to), is.finite(to), length(so) == 1, is.numeric(so), is.finite(so), 0 < so, length(tr) == 1, is.numeric(tr), is.finite(tr), length(sr) == 1, is.numeric(sr), is.finite(sr), 0 < sr, length(tau) == 1, is.numeric(tau), is.finite(tau), 0 <= tau, length(log) == 1, is.logical(log) ) ## compute bf logbf <- stats::dnorm(x = to - tr, mean = 0, sd = sqrt(so^2 + sr^2), log = TRUE) - stats::dnorm(x = to - tr, mean = 0, sd = sqrt(so^2 + sr^2 + 2*tau^2), log = TRUE) if (log == TRUE) return(logbf) else return(exp(logbf)) } #' @title Equality of effect size Bayes factor #' #' @description Computes the equality of effect size Bayes factor #' #' @details The equality of effect size Bayes factor is the Bayes factor #' contrasting the hypothesis of equal original and replication effect sizes #' \eqn{H_0: \theta_o = \theta_r}{H0: theta_o = theta_r} to the hypothesis #' of unequal effect sizes \eqn{H_1: \theta_o \neq \theta_r}{H1: theta_o != #' theta_r}. Under the hypothesis of unequal effect sizes \eqn{H_1}{H1} the #' study specific effect sizes are assumed to be normally distributed around #' an overall effect size with heterogeneity standard deviation \code{tau}. #' #' @param to Original effect estimate #' @param so Standard error of the original effect estimate #' @param tr Replication effect estimate #' @param sr Standard error of the replication effect estimate #' @param tau The heterogeneity standard deviation \eqn{\tau}{tau} under the #' hypothesis of unequal effect sizes \eqn{H_1}{H1} #' @param log Logical indicating whether the natural logarithm of the Bayes #' factor should be returned. Defaults to \code{FALSE} #' #' @return The equality of effect size Bayes factor #' \eqn{\mathrm{BF}_{01}}{BF01}. \eqn{\mathrm{BF}_{01} > 1}{BF01 > 1} #' indicates that the data favour the hypothesis of equal effect sizes #' \eqn{H_0}{H0} (replication success), whereas \eqn{\mathrm{BF}_{01} < #' 1}{BF01 < 1} indicates that the data favour the hypothesis of unequal #' effect sizes \eqn{H_1}{H1} (replication failure). #' #' #' @author Samuel Pawel #' #' @references Bayarri, M. and Mayorall, A. (2002). Bayesian Design of #' "Successful" Replications. The American Statistician, 56(3): 207-214. #' \doi{10.1198/000313002155} #' #' Verhagen, J. and Wagenmakers, E. J. (2014). Bayesian tests to quantify the result #' of a replication attempt. Journal of Experimental Psychology: General, #' 145:1457-1475. \doi{10.1037/a0036731} #' #' @examples #' ## strong evidence for unequal effect sizes #' BFe(to = 1, tr = 0.5, so = sqrt(1/100), sr = sqrt(1/100), tau = 0.3) #' #' ## some evidence for equal effect sizes #' BFe(to = 1, tr = 1, so = sqrt(1/200), sr = sqrt(1/200), tau = 0.3) #' #' #' @export BFe <- Vectorize(.BFe)
/scratch/gouwar.j/cran-all/cranData/BayesRep/R/BFe.R
## non-vectorized version of BFo .BFo <- function(zo, g, log = FALSE, truncate = FALSE) { ## check inputs stopifnot( length(zo) == 1, is.numeric(zo), is.finite(zo), length(g) == 1, is.numeric(g), is.finite(g), 0 <= g, length(log) == 1, is.logical(log), length(truncate) == 1, is.logical(truncate) ) ## compute bf logbf <- stats::dnorm(x = zo, mean = 0, sd = 1, log = TRUE) - stats::dnorm(x = zo, mean = 0, sd = sqrt(1 + g), log = TRUE) ## add correction factor when truncated alternative if (truncate == TRUE) { logbf <- logbf - log(2) - stats::pnorm(q = zo*sqrt(g/(1 + g)), log.p = TRUE) } if (log == TRUE) return(logbf) else return(exp(logbf)) } #' @title Bayes factor for effect estimate from original study #' #' @description Computes Bayes factor contrasting \deqn{H_0: \theta = 0}{H0: #' theta = 0} to \deqn{H_S: \theta \sim \mathrm{N}(0, g \cdot #' \sigma_o^2)}{HS: theta ~ N(0, g*sigma_o^2)} with respect to the effect #' estimate from an original study \eqn{\hat{\theta}_o}{hat(theta)_o}, #' assumed to be approximately normally distributed, i.e. #' \eqn{\hat{\theta}_o \, | \, \theta \sim \mathrm{N}(\theta, \sigma_o^2),}{ #' hat(theta)_o | theta ~ N(theta, sigma_o^2),} with known standard error #' \eqn{\sigma_o}{sigma_o}. #' #' @param zo \eqn{z}{z}-value from original study, \eqn{z_o = #' \hat{\theta}_o/\sigma_o}{zo = hat(theta)_o/sigma_o}, i.e. original effect #' estimate divided by standard error #' @param g Relative variance of \eqn{\mathrm{N}(0, g \cdot \sigma_o^2)}{N(0, #' g*sigma_o^2)} prior for the effect size under \eqn{H_S}{HS} #' @param log Logical indicating whether the natural logarithm of the Bayes #' factor should be returned) #' @param truncate Logical indicating whether a truncated alternative should be #' used (truncated in the direction of the data). #' #' @return Bayes factor (BF \eqn{< 1} indicates that data favour \eqn{H_S}{HS}, #' while BF \eqn{> 1} indicates that data favour \eqn{H_0}{H0}) #' #' @author Samuel Pawel #' #' @references Pawel, S. and Held, L. (2022). The sceptical Bayes factor for the #' assessment of replication success. Journal of the Royal Statistical #' Society Series B: Statistical Methodology. 84(3): 879-911. #' \doi{10.1111/rssb.12491} #' #' @seealso \code{\link{BFoSMD}}, \code{\link{BFologOR}} #' #' @examples BFo(zo = 3, g = 1) #' #' @noRd BFo <- Vectorize(.BFo) ## ## truncation test ## margf <- function(t, s, g) { ## integrate(f = function(theta) {dnorm(x = t, mean = theta, sd = s)* ## dnorm(x = theta, mean = 0, sd = sqrt(g)*s)}, ## lower = 0, upper = Inf)$value ## } ## analytf <- function(t, s, g) { ## 2*dnorm(x = t, mean = 0, sd = s*sqrt(1 + g))*2*pnorm(t/s*sqrt(g/(1 + g))) ## } ## t <- 2 ## s <- 0.5 ## g <- 1.3 ## margf(t, s, g) ## analytf(t, s, g) ## integrate(f = analytf, lower = -Inf, upper = Inf, s = s, g = g) ## non-vectorized version of BFologOR .BFologOR <- function(ao, bo, nTo = ao + bo, co, do, nCo = co + do, ss) { ## check inputs stopifnot( length(ao) == 1, is.numeric(ao), is.finite(ao), 0 <= ao, length(nTo) == 1, is.numeric(nTo), is.finite(nTo), 0 <= nTo, length(co) == 1, is.numeric(co), is.finite(co), 0 <= co, length(nCo) == 1, is.numeric(nCo), is.finite(nCo), 0 <= nCo, length(ss) == 1, is.numeric(ss), is.finite(ss), 0 <= ss ) ## exact likelihood function likExact <- function(a, nT, c, nC, logOR) { ## integrate out the proportion in the control group from the likelihood ## using the translation-invariant Jeffreys' prior (p0*(1 - po))^(-0.5) intFun <- function(p0) { stats::dbinom(x = a, size = nT, prob = 1/(1 + exp(-(log(p0/(1 - p0)) + logOR)))) * stats::dbinom(x = c, size = nC, prob = p0) * stats::dbeta(x = p0, shape1 = 0.5, shape2 = 0.5) } lik <- try(stats::integrate(f = intFun, lower = 0, upper = 1)$value, silent = TRUE) if (inherits(lik, "try-error")) return(NaN) else return(lik) } ## if prior variance is exactly zero, BF is 1 if (ss == 0) { bf0S <- 1 } else { ## compute marginal likelihood under H_S mS <- try(stats::integrate(f = function(logOR) { vapply(X = logOR, FUN = function(logOR) { likExact(a = ao, nT = nTo, c = co, nC = nCo, logOR = logOR) * stats::dnorm(x = logOR, mean = 0, sd = ss) }, FUN.VALUE = 1) }, lower = -Inf, upper = Inf)$value, silent = TRUE) ## compute bf if (inherits(mS, "try-error")) return(NaN) bf0S <- likExact(a = ao, nT = nTo, c = co, nC = nCo, logOR = 0)/mS } ## return bf return(bf0S) } #' @title Bayes factor for logOR effect estimate from original study #' #' @description Computes Bayes factor contrasting \deqn{H_0: \log \mathrm{OR} = #' 0}{H0: logOR = 0} to \deqn{H_1: \log \mathrm{OR} \sim \mathrm{N}(0, #' \code{ss}^2)}{H1: logOR ~ N(0, ss^2)} with respect to the data from the #' original study (summarized by the entries of a standard 2\eqn{\times}{x}2 #' table). #' #' @param ao Number of cases in treatment group #' @param bo Number of non-cases in treatment group #' @param nTo Number of patients in treatment group (specify alternatively to b) #' @param co Number of cases in control group #' @param do Number of non-cases in control group #' @param nCo Number of patients in control group (specify alternatively to d) #' @param ss Standard devation of the sceptical prior under #' \eqn{H_\mathrm{S}}{HS}. Defaults to \code{0} #' #' @return Bayes factor (BF \eqn{< 1} indicates that data favour \eqn{H_S}{HS}, #' while BF \eqn{> 1} indicates that data favour \eqn{H_0}{H0}) #' #' @author Samuel Pawel #' #' @references #' Pawel, S. and Held, L. (2022). The sceptical Bayes factor for the #' assessment of replication success. Journal of the Royal Statistical #' Society Series B: Statistical Methodology. 84(3): 879-911. #' \doi{10.1111/rssb.12491} #' #' @seealso \code{\link{BFo}}, \code{\link{BFoSMD}} #' #' @examples #' ao <- 4 #' bo <- 25 #' co <- 12 #' do <- 24 #' #' ## compare to normal approximation #' est <- log(ao*do/bo/co) #' se <- sqrt(1/ao + 1/bo + 1/co + 1/do) #' zo <- est/se #' ssseq <- sqrt(seq(0, 1.5, length.out = 25)) #' comp <- cbind("normal" = BFo(zo = zo, g = ssseq^2/se^2), #' "exact" = BFologOR(ao = ao, bo = bo, co = co, do = do, ss = ssseq)) #' matplot(ssseq, comp, type = "l", lty = 1, lwd = 1.5, log = "y", col = c(1, 2), #' xlab = "Prior variance", ylab = bquote(BF["0S"])) #' legend("topright", c("normal", "exact"), lty = 1, lwd = 1.5, col = c(1,2), #' bty = "n") #' #' @noRd BFologOR <- Vectorize(.BFologOR) ## non-vectorized version of BFoSMD .BFoSMD <- function(to, no, n1o = no, n2o = no, ss, type = c("two.sample", "one.sample", "paired")) { ## input checks stopifnot( length(to) == 1, is.numeric(to), is.finite(to), length(n1o) == 1, is.numeric(n1o), is.finite(n1o), 0 < n1o, length(n2o) == 1, is.numeric(n2o), is.finite(n2o), 0 < n2o, length(ss) == 1, is.numeric(ss), is.finite(ss), 0 <= ss, !is.null(type) ) type <- match.arg(type) if (type != "two.sample") { if (n1o != n2o) { warning(paste0('different n1o and n2o supplied but type set to "', type, '", using no = n1o')) } } ## compute df and effective sample size depending on test type if (type == "two.sample") { df <- n1o + n2o - 2 nstar <- n1o*n2o/(n1o + n2o) } else { df <- n1o - 1 nstar <- n1o } ## if prior variance is exactly zero, BF is 1 if (ss == 0) { bf0S <- 1 } else { ## compute marginal likelihood under H_S mS <- try(stats::integrate(f = function(SMD) { suppressWarnings({ stats::dt(x = to, df = df, ncp = SMD*sqrt(nstar)) * stats::dnorm(x = SMD, mean = 0, sd = ss) }) }, lower = -Inf, upper = Inf)$value, silent = TRUE) ## compute bf if (inherits(mS, "try-error")) return(NaN) bf0S <- stats::dt(x = to, df = df, ncp = 0)/mS } ## return bf return(bf0S) } #' @title Bayes factor for SMD effect estimate from original study #' #' @description Computes Bayes factor contrasting \deqn{H_0: \mathrm{SMD} = #' 0}{H0: SMD = 0} to \deqn{H_S: \mathrm{SMD} \sim \mathrm{N}(0, #' \code{ss}^2)}{HS: SMD ~ N(0, ss^2)} with respect to the data from the #' original study (summarized by \eqn{t}-statistic from \eqn{t}-test and the #' corresponding sample size). #' #' \eqn{t}-statistics from the following types of \eqn{t}-tests are #' accepted: #' #' - Two-sample \eqn{t}-test where the SMD represents the standardized #' mean difference between two group means (assuming equal variances in #' both groups) #' - One-sample \eqn{t}-test where the SMD represents the standardized #' mean difference to the null value. #' - Paired \eqn{t}-test where the SMD represents the standardized mean #' difference score. #' #' #' @param to \eqn{t}-statistic #' @param no Sample size (per group) #' @param n1o Sample size in group 1 (only required for two-sample \eqn{t}-test #' with unequal group sizes) #' @param n2o Sample size in group 2 (only required for two-sample \eqn{t}-test #' with unequal group sizes) #' @param ss Standard devation of the sceptical prior under #' \eqn{H_\mathrm{S}}{HS}. Defaults to \code{0} #' @param type Type of \eqn{t}-test associated with \eqn{t}-statistic. Can be #' `"two.sample"`, `"one.sample"`, `"paired"`. Defaults to `"two.sample"`. #' #' @return Bayes factor (BF \eqn{< 1} indicates that data favour \eqn{H_S}{HS}, #' while BF \eqn{> 1} indicates that data favour \eqn{H_0}{H0}) #' #' @author Samuel Pawel #' #' @references Pawel, S. and Held, L. (2022). The sceptical Bayes factor for the #' assessment of replication success. Journal of the Royal Statistical #' Society Series B: Statistical Methodology. 84(3): 879-911. #' \doi{10.1111/rssb.12491} #' #' @seealso \code{\link{BFo}}, \code{\link{BFologOR}} #' #' @examples #' to <- 2.5 #' n1 <- 8 #' n2 <- 10 #' #' ## compare to normal approximation #' est <- to*sqrt(1/n1 + 1/n2) #' se <- sqrt(1/n1 + 1/n2) #' z <- est/se #' ssseq <- sqrt(seq(0, 1.5, length.out = 25)) #' comp <- cbind("normal" = BFo(zo = z, g = ssseq^2/se^2), #' "exact" = BFoSMD(to = to, n1o = n1, n2o = n2, ss = ssseq)) #' matplot(ssseq, comp, type = "l", lty = 1, lwd = 1.5, log = "y", col = c(1, 2), #' xlab = "Prior standard deviation", ylab = bquote(BF["0S"])) #' legend("topright", c("normal", "exact"), lty = 1, lwd = 1.5, col = c(1,2), #' bty = "n") #' #' @noRd BFoSMD <- Vectorize(.BFoSMD)
/scratch/gouwar.j/cran-all/cranData/BayesRep/R/BFo.R
## non-vectorized version of BFr .BFr <- function(to, so, tr, sr, ss = 0, truncate = FALSE, log = FALSE, zo = NULL, zr = NULL, c = NULL, g = 0) { ## check inputs stopifnot( length(truncate) == 1, is.logical(truncate), !is.na(truncate), length(log) == 1, is.logical(log), !is.na(log) ) if (!missing(to) && !missing(so) && !missing(tr) && !missing(sr)) { ## parametrization 1 stopifnot( length(to) == 1, is.numeric(to), is.finite(to), length(so) == 1, is.numeric(so), is.finite(so), 0 < so, length(tr) == 1, is.numeric(tr), is.finite(tr), length(sr) == 1, is.numeric(sr), is.finite(sr), 0 < sr, length(ss) == 1, is.numeric(ss), is.finite(ss), 0 <= ss ) ## compute relative prior variance zo <- to/so zr <- tr/sr c <- so^2/sr^2 g <- ss^2/so^2 } else { ## parametrization 2 stopifnot( length(zo) == 1, is.numeric(zo), is.finite(zo), length(zr) == 1, is.numeric(zr), is.finite(zr), length(c) == 1, is.numeric(c), is.finite(c), 0 < c, length(g) == 1, is.numeric(g), is.finite(g), 0 <= g ) } ## compute bf logbf <- stats::dnorm(x = zr, mean = 0, sd = sqrt(1 + c*g), log = TRUE) - stats::dnorm(x = zr, mean = zo*sqrt(c), sd = sqrt(1 + c), log = TRUE) if (truncate == TRUE) { ## add truncation correction factor logbf <- logbf + stats::pnorm(q = sign(zo)*zo, log.p = TRUE) - stats::pnorm(q = sign(zo)*(zo + zr*sqrt(c))/sqrt(1 + c), log.p = TRUE) } if (log == TRUE) return(logbf) else return(exp(logbf)) } #' @title Generalized replication Bayes factor #' #' @description Computes the generalized replication Bayes factor #' #' @details The generalized replication Bayes factor is the Bayes factor #' contrasting the sceptic's hypothesis that the effect size is about zero #' \deqn{H_{\mathrm{S}}: \theta \sim \mathrm{N}(0, \code{ss}^2)}{HS: theta ~ #' N(0, ss^2)} to the advocate's hypothesis that the effect size is #' compatible with its posterior distribution based on the original study #' and a uniform prior \deqn{H_{\mathrm{A}}: \theta \sim f(\theta \, | \, #' \mathrm{original~study}).}{HA: theta ~ f(theta | original study).} The #' standard replication Bayes factor from Verhagen and Wagenmakers (2014) is #' obtained by specifying a point-null hypothesis \code{ss = 0} (the #' default). #' #' The function can be used with two input parametrizations, either on the #' absolute effect scale (\code{to}, \code{so}, \code{tr}, \code{sr}, \code{ss}) #' or alternatively on the relative *z*-scale (\code{zo}, \code{zr}, \code{c}, #' \code{g}). If an argument on the effect scale is missing, the *z*-scale is #' automatically used and the other non-missing arguments on the effect scale #' ignored. #' #' @param to Original effect estimate #' @param so Standard error of the original effect estimate #' @param tr Replication effect estimate #' @param sr Standard error of the replication effect estimate #' @param ss Standard devation of the sceptical prior under #' \eqn{H_\mathrm{S}}{HS}. Defaults to \code{0} #' @param truncate Logical indicating whether advocacy prior should be truncated #' to direction of the original effect estimate (i.e., a one-sided test). #' Defaults to \code{FALSE} #' @param log Logical indicating whether the natural logarithm of the Bayes #' factor should be returned. Defaults to \code{FALSE} #' @param zo Original *z*-value \code{zo} = \code{to}/\code{so} (alternative #' parametrization for \code{to} and \code{so}) #' @param zr Replication *z*-value \code{zr} = \code{tr}/\code{sr} (alternative #' parametrization for \code{tr} and \code{sr}) #' @param c Relative variance \code{c = so^2/sr^2} (alternative parametrization #' for \code{so} and \code{sr}) #' @param g Relative prior variance \code{g = ss^2/so^2}. Defaults to \code{0} #' (alternative parametrization for \code{ss}) #' #' @return The generalized replication Bayes factor #' \eqn{\mathrm{BF}_{\mathrm{SA}}}{BF_SA}. \eqn{\mathrm{BF}_{\mathrm{SA}} < #' 1}{BF_SA < 1} indicates that the data favour the advocate's hypothesis #' \eqn{H_{\mathrm{A}}}{HA} (replication success), whereas #' \eqn{\mathrm{BF}_{\mathrm{SA}} > 1}{BF_SA > 1} indicates that the data #' favour the sceptic's hypothesis \eqn{H_{\mathrm{S}}}{HS} (replication #' failure). #' #' @seealso \code{\link{BFrSMD}}, \code{\link{BFrlogOR}} #' #' @author Samuel Pawel #' #' @references Verhagen, J. and Wagenmakers, E. J. (2014). Bayesian tests to #' quantify the result of a replication attempt. Journal of Experimental #' Psychology: General, 145:1457-1475. \doi{10.1037/a0036731} #' #' Ly, A., Etz, A., Marsman, M., Wagenmakers, E. J. (2019). Replication Bayes #' factors from evidence updating. Behavior Research Methods, 51(6):2498-2508. #' \doi{10.3758/s13428-018-1092-x} #' #' Pawel, S. and Held, L. (2022). The sceptical Bayes factor for the #' assessment of replication success. Journal of the Royal Statistical #' Society Series B: Statistical Methodology, 84(3): 879-911. #' \doi{10.1111/rssb.12491} #' #' @examples #' to <- 2 #' tr <- 2.5 #' so <- 1 #' sr <- 1 #' BFr(to = to, so = so, tr = tr, sr = sr) #' BFr(zo = to/so, zr = tr/sr, c = so^2/sr^2) #' #' #' @export BFr <- Vectorize(.BFr) ## non-vectorized version of BFrlogOR .BFrlogOR <- function(ao, bo, nTo = ao + bo, co, do, nCo = co + do, ar, br, nTr = ar + br, cr, dr, nCr = cr + dr, ss, method = c("integration", "hypergeo")) { ## check inputs ## check inputs if (!missing(bo)) { ## parametrization 1 stopifnot( length(bo) == 1, is.numeric(bo), is.finite(bo), 0 <= bo ) } if (!missing(do)) { ## parametrization 1 stopifnot( length(do) == 1, is.numeric(do), is.finite(do), 0 <= do ) } if (!missing(br)) { ## parametrization 1 stopifnot( length(br) == 1, is.numeric(br), is.finite(br), 0 <= br ) } if (!missing(dr)) { ## parametrization 1 stopifnot( length(dr) == 1, is.numeric(dr), is.finite(dr), 0 <= dr ) } stopifnot( length(ao) == 1, is.numeric(ao), is.finite(ao), 0 <= ao, length(nTo) == 1, is.numeric(nTo), is.finite(nTo), 0 <= nTo, length(co) == 1, is.numeric(co), is.finite(co), 0 <= co, length(nCo) == 1, is.numeric(nCo), is.finite(nCo), 0 <= nCo, length(ar) == 1, is.numeric(ar), is.finite(ar), 0 <= ar, length(nTr) == 1, is.numeric(nTr), is.finite(nTr), 0 <= nTr, length(cr) == 1, is.numeric(cr), is.finite(cr), 0 <= cr, length(nCr) == 1, is.numeric(nCr), is.finite(nCr), 0 <= nCr, length(ss) == 1, is.numeric(ss), is.finite(ss), 0 <= ss, !is.null(method) ) method <- match.arg(method) ## exact likelihood function likExact <- function(a, nT, c, nC, logOR) { ## integrate out the proportion in the control group from the likelihood ## using the translation-invariant Jeffreys' prior (p0*(1 - po))^(-0.5) intFun <- function(p0) { stats::dbinom(x = a, size = nT, prob = 1/(1 + exp(-(log(p0/(1 - p0)) + logOR)))) * stats::dbinom(x = c, size = nC, prob = p0) * stats::dbeta(x = p0, shape1 = 0.5, shape2 = 0.5) } lik <- try(stats::integrate(f = intFun, lower = 0, upper = 1)$value, silent = TRUE) if (inherits(lik, "try-error")) return(NaN) else return(lik) } ## compute marginal likelihood under sceptical prior H_S if (ss == 0) { ## point-null mS <- likExact(a = ar, nT = nTr, c = cr, nC = nCr, logOR = 0) } else { ## composite-null mS <- try(stats::integrate(f = function(logOR) { vapply(X = logOR, FUN = function(logOR) { likExact(a = ar, nT = nTr, c = cr, nC = nCr, logOR = logOR) * stats::dnorm(x = logOR, mean = 0, sd = ss) }, FUN.VALUE = 1) }, lower = -Inf, upper = Inf)$value, silent = TRUE) if (inherits(mS, "try-error")) return(NaN) } ## compute marginal likelihood under advocacy prior H_A: ## 1) posterior density of logOR based on original study and Jeffreys priors if (method == "hypergeo") { ## 1a) analytical solution using hypergeometric function e <- ao + 0.5 f <- nTo - ao + 0.5 g <- co + 0.5 h <- nCo - co + 0.5 C <- beta(a = e + g, b = f + h)/(beta(a = e, b = f) * beta(a = g, b = h)) postExact <- function(logOR) { suppressWarnings({ postdens <- ifelse(logOR < 0, hypergeo::hypergeo(A = e + f, B = e + g, C = e + f + g + h, z = 1 - exp(logOR))*C*exp(e*logOR), hypergeo::hypergeo(A = e + f, B = f + h, C = e + f + g + h, z = 1 - exp(-logOR))*C*exp(-f*logOR)) }) return(abs(postdens)) } } else { ## 1b) using analytical posterior for p0 and p1, then CoV and integrating out p0 postExact <- function(logOR) { intFun <- function(p0) { stats::dbeta(x = stats::plogis(q = logOR + log(p0/(1 - p0))), shape1 = 0.5 + ao, shape2 = 0.5 + nTo - ao) * exp(logOR) * (1 - p0) * p0 / (p0 * exp(logOR) + (1 - p0))^2 * ## CoV constant stats::dbeta(x = p0, shape1 = 0.5 + co, shape2 = 0.5 + nCo - co) } postdens <- try(stats::integrate(f = intFun, lower = 0, upper = 1)$value, silent = TRUE) if (inherits(postdens, "try-error")) return(NaN) else return(postdens) } } ## 2) integrate replication likelihood wrt to posterior of logOR mA <- try(stats::integrate(f = function(logOR) { vapply(X = logOR, FUN = function(logOR) { likExact(a = ar, nT = nTr, c = cr, nC = nCr, logOR = logOR) * postExact(logOR = logOR) }, FUN.VALUE = 1) }, lower = -Inf, upper = Inf)$value, silent = TRUE) if (inherits(mA, "try-error")) return(NaN) ## compute BF bfSA <- mS/mA return(bfSA) } #' @title Generalized replication Bayes factor for logOR effect sizes #' #' @description Computes the generalized replication Bayes factor for log odds #' ratio (logOR) effect sizes #' #' @details This function computes the generalized replication Bayes factor for #' log odds ratio (logOR) effect sizes using an exact binomial likelihood #' for the data instead of the normal approximation used in #' \code{\link{BFr}} (for details, see Section 4 in Pawel and Held, 2022). #' #' #' @param ao Number of cases in original study treatment group #' @param bo Number of non-cases in original study treatment group #' @param nTo Number of participants in original study treatment group (specify #' alternatively to \code{b}) #' @param co Number of cases in original study control group #' @param do Number of non-cases in original study control group #' @param nCo Number of participants in original study control group (specify #' alternatively to \code{d}) #' @param ar Number of cases in replication study treatment group #' @param br Number of non-cases in replication study treatment group #' @param nTr Number of participants in replication study treatment group #' (specify alternatively to \code{b}) #' @param cr Number of cases in replication study control group #' @param dr Number of non-cases in replication study control group #' @param nCr Number of participants in replication study control group (specify #' alternatively to \code{d}) #' @param ss Standard deviation of the sceptical prior under #' \eqn{H_\mathrm{S}}{HS}. Defaults to \code{0} #' @param method Method to compute posterior density. Either #' \code{"integration"} (default) or \code{"hypergeo"} #' #' @return The generalized replication Bayes factor #' \eqn{\mathrm{BF}_{\mathrm{SA}}}{BF_SA}. \eqn{\mathrm{BF}_{\mathrm{SA}} < #' 1}{BF_SA < 1} indicates that the data favour the advocate's hypothesis #' \eqn{H_{\mathrm{A}}}{HA} (replication success), whereas #' \eqn{\mathrm{BF}_{\mathrm{SA}} > 1}{BF_SA > 1} indicates that the data #' favour the sceptic's hypothesis \eqn{H_{\mathrm{S}}}{HS} (replication #' failure). #' #' @references Verhagen, J. and Wagenmakers, E. J. (2014). Bayesian tests to #' quantify the result of a replication attempt. Journal of Experimental #' Psychology: General, 145:1457-1475. \doi{10.1037/a0036731} #' #' Pawel, S. and Held, L. (2022). The sceptical Bayes factor for the assessment #' of replication success. Journal of the Royal Statistical Society Series #' B: Statistical Methodology, 84(3): 879-911. \doi{10.1111/rssb.12491} #' #' @author Samuel Pawel #' #' @examples #' data("SSRPexact") #' balafoutas2012 <- subset(SSRPexact, study == "Balafoutas and Sutter (2012), Science") #' with(balafoutas2012, #' BFrlogOR(ao = ao, bo = bo, co = co, do = do, ar = ar, br = br, cr = cr, dr = dr, #' ss = 0)) #' #' @export BFrlogOR <- Vectorize(.BFrlogOR) ## non-vectorized version of BFrSMD .BFrSMD <- function(to, no, n1o = no, n2o = no, tr, nr, n1r = nr, n2r = nr, ss, type = c("two.sample", "one.sample", "paired")) { ## input checks stopifnot( length(to) == 1, is.numeric(to), is.finite(to), length(tr) == 1, is.numeric(tr), is.finite(tr), length(n1o) == 1, is.numeric(n1o), is.finite(n1o), 0 < n1o, length(n2o) == 1, is.numeric(n2o), is.finite(n2o), 0 < n2o, length(n1r) == 1, is.numeric(n1r), is.finite(n1r), 0 < n1r, length(n2r) == 1, is.numeric(n2r), is.finite(n2r), 0 < n2r, length(ss) == 1, is.numeric(ss), is.finite(ss), 0 <= ss, !is.null(type) ) type <- match.arg(type) if (type != "two.sample") { if (n1o != n2o) { warning(paste0('different n1o and n2o supplied but type set to "', type, '", using no = n1o')) } if (n1r != n2r) { warning(paste0('different n1r and n2r supplied but type set to "', type, '", using nr = n1r')) } } ## compute df and effective sample size depending on test type if (type == "two.sample") { df <- n1r + n2r - 2 nstar <- 1/(1/n1r + 1/n2r) dfo <- n1o + n2o - 2 nstaro <- 1/(1/n1o + 1/n2o) } else { df <- n1r - 1 nstar <- n1r dfo <- n1o -1 nstaro <- n1o } ## compute marginal likelihood under sceptical prior H_S if (ss == 0) { ## point-null mS <- stats::dt(x = tr, df = df, ncp = 0) } else { ## composite-null mS <- try(stats::integrate(f = function(SMD) { suppressWarnings({ stats::dt(x = tr, df = df, ncp = SMD*sqrt(nstar)) * stats::dnorm(x = SMD, mean = 0, sd = ss) }) }, lower = -Inf, upper = Inf)$value, silent = TRUE) if (inherits(mS, "try-error")) return(NaN) } ## compute marginal likelihood under advocacy prior H_A: ## 1) posterior density of SMD based on original study and flat prior postExact <- function(SMD) { postdens <- try(stats::integrate(f = function(prec) { ## stats::dnorm(x = SMD, mean = to*prec/sqrt(nstaro), sd = sqrt(1/nstaro)) * ## stats::dgamma(x = prec, shape = dfo + 1, rate = dfo) stats::dnorm(x = SMD, mean = to*sqrt(prec/nstaro), sd = sqrt(1/nstaro)) * stats::dgamma(x = prec, shape = (dfo + 1)/2, rate = dfo/2) }, lower = 0, upper = Inf)$value, silent = TRUE) if (inherits(postdens, "try-error")) return(NaN) else return(postdens) } ## 2) integrate tr likelihood wrt to posterior of SMD mA <- try(stats::integrate(f = function(SMD) { vapply(X = SMD, FUN = function(SMD) { suppressWarnings({ stats::dt(x = tr, df = df, ncp = SMD*sqrt(nstar)) * postExact(SMD) }) }, FUN.VALUE = 1) }, lower = -Inf, upper = Inf)$value, silent = TRUE) if (inherits(mA, "try-error")) return(NaN) ## compute BF bfSA <- mS/mA return(bfSA) } #' @title Generalized replication Bayes factor for SMD effect sizes #' #' @description Computes the generalized replication Bayes factor for #' standardized mean difference (SMD) effect sizes #' #' @details This function computes the generalized replication Bayes factor for #' standardized mean difference (SMD) effect sizes using an exact #' *t*-likelihood for the data instead of the normal approximation used in #' \code{\link{BFr}} (for details, see Section 4 in Pawel and Held, 2022). #' Data from both studies are summarized by \eqn{t}-statistics and sample #' sizes. The following types of \eqn{t}-tests are accepted: #' #' - Two-sample \eqn{t}-test where the SMD represents the standardized #' mean difference between two group means (assuming equal variances in #' both groups). #' - One-sample \eqn{t}-test where the SMD represents the standardized #' mean difference to the null value. #' - Paired \eqn{t}-test where the SMD represents the standardized mean #' difference score. #' #' @param to \eqn{t}-statistic from the original study #' @param no Sample size of the original study (per group) #' @param n1o Sample size in group 1 of the original study (only required for #' two-sample \eqn{t}-test with unequal group sizes) #' @param n2o Sample size in group 2 of the original study (only specify if #' unequal group sizes) #' @param tr \eqn{t}-statistic from the replication study #' @param nr Sample size of the replication study (per group) #' @param n1r Sample size in group 1 of the replication study (only required for #' two-sample \eqn{t}-test with unequal group sizes) #' @param n2r Sample size in group 2 of the replication study (only required for #' two-sample \eqn{t}-test with unequal group sizes) #' @param ss Standard devation of the sceptical prior under #' \eqn{H_\mathrm{S}}{HS}. Defaults to \code{0} #' @param type Type of \eqn{t}-test associated with \eqn{t}-statistic. Can be #' `"two.sample"`, `"one.sample"`, `"paired"`. Defaults to `"two.sample"` #' #' @return The generalized replication Bayes factor #' \eqn{\mathrm{BF}_{\mathrm{SA}}}{BF_SA}. \eqn{\mathrm{BF}_{\mathrm{SA}} < #' 1}{BF_SA < 1} indicates that the data favour the advocate's hypothesis #' \eqn{H_{\mathrm{A}}}{HA} (replication success), whereas #' \eqn{\mathrm{BF}_{\mathrm{SA}} > 1}{BF_SA > 1} indicates that the data #' favour the sceptic's hypothesis \eqn{H_{\mathrm{S}}}{HS} (replication #' failure). #' #' @seealso \code{\link{BFr}}, \code{\link{BFrlogOR}} #' #' @references Verhagen, J. and Wagenmakers, E. J. (2014). Bayesian tests to #' quantify the result of a replication attempt. Journal of Experimental #' Psychology: General, 145:1457-1475. \doi{10.1037/a0036731} #' #' Pawel, S. and Held, L. (2022). The sceptical Bayes factor for the assessment #' of replication success. Journal of the Royal Statistical Society Series #' B: Statistical Methodology, 84(3): 879-911. \doi{10.1111/rssb.12491} #' #' @author Samuel Pawel #' #' @examples #' data("SSRPexact") #' morewedge2010 <- subset(SSRPexact, study == "Morewedge et al. (2010), Science") #' with(morewedge2010, #' BFrSMD(to = to, n1o = n1o, n2o = n2o, tr = tr, n1r = n1r, n2r = n2r, ss = 0)) #' #' @export BFrSMD <- Vectorize(.BFrSMD)
/scratch/gouwar.j/cran-all/cranData/BayesRep/R/BFr.R
## non-vectorized version of BFs .BFs <- function(to, so, tr, sr, truncate = FALSE, zo = NULL, zr = NULL, c = NULL) { ## check inputs stopifnot( length(truncate) == 1, is.logical(truncate), !is.na(truncate) ) if (!missing(to) && !missing(so) && !missing(tr) && !missing(sr)) { ## parametrization 1 stopifnot( length(to) == 1, is.numeric(to), is.finite(to), length(so) == 1, is.numeric(so), is.finite(so), 0 < so, length(tr) == 1, is.numeric(tr), is.finite(tr), length(sr) == 1, is.numeric(sr), is.finite(sr), 0 < sr ) ## compute relative quantities zo <- to/so zr <- tr/sr c <- so^2/sr^2 } else { ## parametrization 2 stopifnot( length(zo) == 1, is.numeric(zo), is.finite(zo), length(zr) == 1, is.numeric(zr), is.finite(zr), length(c) == 1, is.numeric(c), is.finite(c), 0 < c ) } ## compute some quantities for initial checks gMinBFo <- max(c(zo^2 - 1, 0)) ## relative prior variance at minBFo minBFo <- BFo(zo = zo, g = gMinBFo) ## minBFo BFr0 <- BFr(zo = zo, zr = zr, c = c, g = 0, truncate = truncate) ## Replication BF (BF_SA(zr; g=0)) BFrgMinBFo <- BFr(zo = zo, zr = zr, c = c, g = gMinBFo, truncate = truncate) ## BF_SA(zr; g=gminBFo) ## when BF_SA(zr; gMinBFo) <= minBFo: ## BFs = minBFo if (BFrgMinBFo <= minBFo) { vss <- gMinBFo ## otherwise determine g* where BFo and BFr intersect: ## BFs = BFo(g*) = BFr(g*) } else { ## when c = 1: use Lambert W function to compute g* if (c == 1) { x <- 0.5*(zo^2 + zr^2)/sqrt(2)*exp(-0.5*(zo^2 + 0.5*(zr - zo)^2)) if (truncate == TRUE) { x <- x*stats::pnorm(q = sign(zo)*(zo + zr)/sqrt(2))/stats::pnorm(q = abs(zo)) } res <- -0.5*(zo^2 + zr^2)/lamW::lambertWm1(x = -x) - 1 if (res < 0) { vss <- NaN } else { vss <- res } ## when c != 1: compute g* numerically } else { BFdiff <- function(g) { logBFo <- BFo(zo = zo, g = g, log = TRUE) logBFr <- BFr(zo = zo, zr = zr, c = c, g = g, log = TRUE, truncate = truncate) return(logBFr - logBFo) } res <- try(stats::uniroot(f = BFdiff, lower = 0, upper = gMinBFo)$root, silent = TRUE) if (inherits(res, "try-error")) { vss <- NaN } else { vss <- res } } } ## return BFs (and vss if specified) if (is.nan(vss)) { bfs <- NaN } else { bfs <- BFo(zo = zo, g = vss) } return(bfs) } #' @title Sceptical Bayes factor #' #' @description Computes the sceptical Bayes factor #' #' @details The sceptical Bayes factor is a summary measure of the following #' two-step reverse-Bayes procedure for assessing replication success: #' #' 1. Use the data from the original study to determine the standard deviation #' \eqn{\tau_{\gamma}}{tau_gamma} of a sceptical normal prior \eqn{\theta \sim #' \mathrm{N}(0, \tau_{\gamma}^2)}{HS: theta ~ N(0, tau_gamma^2)} such that the #' Bayes factor contrasting the null hypothesis \eqn{H_0: \theta = 0}{H0: theta #' = 0} to the sceptic's hypothesis \eqn{H_{\mathrm{S}}: \theta \sim #' \mathrm{N}(0, \tau_{\gamma}^2)}{HS: theta ~ N(0, tau_gamma^2)} equals a #' specified level \eqn{\gamma \in (0, 1]}{gamma in (0 1]}. This prior #' represents a sceptic who remains unconvinced about the presence of an effect #' at level \eqn{\gamma}{gamma}. #' #' 2. Use the data from the replication study to compare the sceptic's #' hypothesis \eqn{H_{\mathrm{S}}: \theta \sim \mathrm{N}(0, #' \tau_{\gamma}^2)}{HS: theta ~ N(0, tau_gamma^2)} to the advocate's hypothesis #' \eqn{H_{\mathrm{A}}: \theta \sim f(\theta \, | \, #' \mathrm{original~study})}{HA: theta ~ f(theta | original study)}. The prior of #' the effect size under \eqn{H_{\mathrm{A}}}{HA} is its posterior based on the #' original study and a uniform prior, thereby representing the position of an #' advocate of the original study. Replication success at level #' \eqn{\gamma}{gamma} is achieved if the Bayes factor contrasting #' \eqn{H_{\mathrm{S}}}{HS} to \eqn{H_{\mathrm{A}}}{HA} is smaller than #' \eqn{\gamma}{gamma}, which means that the replication data favour the #' advocate over the sceptic at a higher level than the sceptic's initial #' objection. The sceptical Bayes factor \eqn{\mathrm{BF}_{\mathrm{S}}}{BF_S} is #' the smallest level \eqn{\gamma}{gamma} at which replication success can be #' established. #' #' The function can be used with two input parametrizations, either on the #' absolute effect scale (\code{to}, \code{so}, \code{tr}, \code{sr}) or #' alternatively on the relative *z*-scale (\code{zo}, \code{zr}, \code{c}). If #' an argument on the effect scale is missing, the *z*-scale is automatically #' used and the other non-missing arguments on the effect scale ignored. #' #' @param to Original effect estimate #' @param so Standard error of the original effect estimate #' @param tr Replication effect estimate #' @param sr Standard error of the replication effect estimate #' @param truncate Logical indicating whether advocacy prior should be truncated #' to direction of the original effect estimate (i.e., a one-sided test). #' Defaults to \code{FALSE} #' @param zo Original *z*-value \code{zo} = \code{to}/\code{so} (alternative #' parametrization for \code{to} and \code{so}) #' @param zr Replication *z*-value \code{zr} = \code{tr}/\code{sr} (alternative #' parametrization for \code{tr} and \code{sr}) #' @param c Relative variance \code{c = so^2/sr^2} (alternative parametrization #' for \code{so} and \code{sr}) #' #' @return The sceptical Bayes factor \eqn{\mathrm{BF}_{\mathrm{S}}}{BF_S}. #' \eqn{\mathrm{BF}_{\mathrm{S}} < 1}{BF_S < 1} indicates replication #' success, the smaller the value of \eqn{\mathrm{BF}_{\mathrm{S}}}{BF_S} #' the higher the degree of replication success. It is possible that the #' result of the replication is so inconclusive that replication success #' cannot be established at any level. In this case, the sceptical Bayes #' factor does not exist and the function returns \code{NaN}. #' #' #' @references Pawel, S. and Held, L. (2022). The sceptical Bayes factor for the #' assessment of replication success. Journal of the Royal Statistical #' Society Series B: Statistical Methodology, 84(3): 879-911. #' \doi{10.1111/rssb.12491} #' #' @author Samuel Pawel #' #' @seealso \code{\link{BFsSMD}}, \code{\link{BFslogOR}} #' #' @examples #' to <- 2 #' tr <- 2.5 #' so <- 1 #' sr <- 1 #' BFs(to = to, so = so, tr = tr, sr = sr) #' BFs(zo = to/so, zr = tr/sr, c = so^2/sr^2) #' #' @export BFs <- Vectorize(.BFs) ## non-vectorized version of BFslogOR .BFslogOR <- function(ao, bo, nTo = ao + bo, co, do, nCo = co + do, ar, br, nTr = ar + br, cr, dr, nCr = cr + dr, method = c("integration", "hypergeo")) { ## check inputs if (!missing(bo)) { ## parametrization 1 stopifnot( length(bo) == 1, is.numeric(bo), is.finite(bo), 0 <= bo ) } if (!missing(do)) { ## parametrization 1 stopifnot( length(do) == 1, is.numeric(do), is.finite(do), 0 <= do ) } if (!missing(br)) { ## parametrization 1 stopifnot( length(br) == 1, is.numeric(br), is.finite(br), 0 <= br ) } if (!missing(dr)) { ## parametrization 1 stopifnot( length(dr) == 1, is.numeric(dr), is.finite(dr), 0 <= dr ) } stopifnot( length(ao) == 1, is.numeric(ao), is.finite(ao), 0 <= ao, length(nTo) == 1, is.numeric(nTo), is.finite(nTo), 0 <= nTo, length(co) == 1, is.numeric(co), is.finite(co), 0 <= co, length(nCo) == 1, is.numeric(nCo), is.finite(nCo), 0 <= nCo, length(ar) == 1, is.numeric(ar), is.finite(ar), 0 <= ar, length(nTr) == 1, is.numeric(nTr), is.finite(nTr), 0 <= nTr, length(cr) == 1, is.numeric(cr), is.finite(cr), 0 <= cr, length(nCr) == 1, is.numeric(nCr), is.finite(nCr), 0 <= nCr, !is.null(method) ) method <- match.arg(method) ## compute minimum BF for original data minBFoptim <- stats::optim(par = 0, fn = function(ss2) { ## for some reasons this optimization works better on the variance scale log(BFologOR(ao = ao, nTo = nTo, co = co, nCo = nCo, ss = sqrt(ss2))) }, method = "L-BFGS-B", lower = 0, upper = Inf) ssminBFo <- sqrt(minBFoptim$par) minBFo <- exp(minBFoptim$value) ## when ssmin = 0, check by hand whether success at level = 1 if (ssminBFo == 0) { BFr0 <- BFrlogOR(ao = ao, nTo = nTo, co = co, nCo = nCo, ar = ar, nTr = nTr, cr = cr, nCr = nCr, ss = 0, method = method) if (is.nan(BFr0)) { warnMessage <- paste("numerical problems when computing posterior density, try method =", ifelse(method == "integration", "'hypergeo'", "'integration'")) warning(warnMessage) ssSceptical <- NaN bfs <- NaN } else if (BFr0 <= 1) { ssSceptical <- 0 bfs <- 1 } else { ssSceptical <- NaN bfs <- NaN } } else { ## check by hand whether success at level minBFo BFrminBFo <- BFrlogOR(ao = ao, nTo = nTo, co = co, nCo = nCo, ar = ar, nTr = nTr, cr = cr, nCr = nCr, ss = ssminBFo, method = method) if (is.nan(BFrminBFo)) { warnMessage <- paste("numerical problems when computing posterior density, try method =", ifelse(method == "integration", "'hypergeo'", "'integration'")) warning(warnMessage) ssSceptical <- NaN bfs <- NaN } else if (BFrminBFo <= minBFo) { ssSceptical <- ssminBFo bfs <- minBFo } else { ## otherwise use uniroot rootFun <- function(ss2) { res <- BFrlogOR(ao = ao, nTo = nTo, co = co, nCo = nCo, ar = ar, nTr = nTr, cr = cr, nCr = nCr, ss = sqrt(ss2), method = method) - BFologOR(ao = ao, nTo = nTo, co = co, nCo = nCo, ss = sqrt(ss2)) return(res) } rootRes <- try(stats::uniroot(f = rootFun, interval = c(0, ssminBFo^2)), silent = TRUE) if (inherits(rootRes, "try-error")) { ssSceptical <- NaN bfs <- NaN } else { ssSceptical <- sqrt(rootRes$root) bfs <- BFologOR(ao = ao, nTo = nTo, co = co, nCo = nCo, ss = ssSceptical) } } } return(bfs) } #' @title Sceptical Bayes factor for logOR effect sizes #' #' @description Computes the sceptical Bayes factor for logOR effect sizes #' #' @details This function computes the sceptical Bayes factor for log odds ratio #' (logOR) effect sizes using an exact binomial likelihood for the data #' instead of the normal approximation used in \code{\link{BFs}} (for #' details, see Section 4 in Pawel and Held, 2022). #' #' @param ao Number of cases in original study treatment group #' @param bo Number of non-cases in original study treatment group #' @param nTo Number of participants in original study treatment group (specify #' alternatively to \code{b}) #' @param co Number of cases in original study control group #' @param do Number of non-cases in original study control group #' @param nCo Number of participants in original study control group (specify #' alternatively to \code{d}) #' @param ar Number of cases in replication study treatment group #' @param br Number of non-cases in replication study treatment group #' @param nTr Number of participants in replication study treatment group #' (specify alternatively to \code{b}) #' @param cr Number of cases in replication study control group #' @param dr Number of non-cases in replication study control group #' @param nCr Number of participants in replication study control group (specify #' alternatively to \code{d}) #' @param method Method to compute posterior density. Either #' \code{"integration"} (default) or \code{"hypergeo"} #' #' @return The sceptical Bayes factor \eqn{\mathrm{BF}_{\mathrm{S}}}{BF_S}. #' \eqn{\mathrm{BF}_{\mathrm{S}} < 1}{BF_S < 1} indicates replication #' success, the smaller the value of \eqn{\mathrm{BF}_{\mathrm{S}}}{BF_S} #' the higher the degree of replication success. It is possible that the #' result of the replication is so inconclusive that replication success #' cannot be established at any level. In this case, the sceptical Bayes #' factor does not exist and the function returns \code{NaN}. #' #' @author Samuel Pawel #' #' @references Pawel, S. and Held, L. (2022). The sceptical Bayes factor for the #' assessment of replication success. Journal of the Royal Statistical #' Society Series B: Statistical Methodology, 84(3): 879-911. #' \doi{10.1111/rssb.12491} #' #' @seealso \code{\link{BFs}}, \code{\link{BFslogOR}} #' #' @examples #' data("SSRPexact") #' balafoutas2012 <- subset(SSRPexact, study == "Balafoutas and Sutter (2012), Science") #' with(balafoutas2012, #' BFslogOR(ao = ao, bo = bo, co = co, do = do, ar = ar, br = br, cr = cr, dr = dr)) #' #' @export BFslogOR <- Vectorize(.BFslogOR) ## non-vectorized version of BFsSMD .BFsSMD <- function(to, no, n1o = no, n2o = no, tr, nr, n1r = nr, n2r = nr, type = c("two.sample", "one.sample", "paired")) { ## input checks stopifnot( length(to) == 1, is.numeric(to), is.finite(to), length(tr) == 1, is.numeric(tr), is.finite(tr), length(n1o) == 1, is.numeric(n1o), is.finite(n1o), 0 < n1o, length(n2o) == 1, is.numeric(n2o), is.finite(n2o), 0 < n2o, length(n1r) == 1, is.numeric(n1r), is.finite(n1r), 0 < n1r, length(n2r) == 1, is.numeric(n2r), is.finite(n2r), 0 < n2r, !is.null(type) ) type <- match.arg(type) if (type != "two.sample") { startpar <- max(c(to^2 - 1, 0))/(1/n1o + 1/n2o) if (n1o != n2o) { warning(paste0('different n1o and n2o supplied but type set to "', type, '", using no = n1o')) } if (n1r != n2r) { warning(paste0('different n1r and n2r supplied but type set to "', type, '", using nr = n1r')) } } else { startpar <- max(c(to^2 - 1, 0))/n1o } suppressWarnings({ ## compute minimum BF for original data minBFoptim <- stats::optim(par = startpar, fn = function(ss) { BFoSMD(to = to, n1o = n1o, n2o = n2o, ss = ss, type = type) }, method = "L-BFGS-B", lower = 0, upper = Inf) ssminBFo <- minBFoptim$par minBFo <- minBFoptim$value ## when ssmin = 0, check by hand whether success at level = 1 if (ssminBFo == 0) { BFr0 <- BFrSMD(to = to, n1o = n1o, n2o = n2o, tr = tr, n1r = n1r, n2r = n2r, ss = 0, type = type) if (BFr0 <= 1) { ssSceptical <- 0 bfs <- 1 } else { ssSceptical <- NaN bfs <- NaN } } else { ## check by hand whether success at level = minBFo BFrminBFo <- BFrSMD(to = to, n1o = n1o, n2o = n2o, tr = tr, n1r = n1r, n2r = n2r, ss = ssminBFo, type = type) if (BFrminBFo <= minBFo) { ssSceptical <- ssminBFo bfs <- minBFo } else { ## otherwise use uniroot rootFun <- function(ss2) { res <- BFrSMD(to = to, n1o = n1o, n2o = n2o, tr = tr, n1r = n1r, n2r = n2r, ss = sqrt(ss2), type = type) - BFoSMD(to = to, n1o = n1o, n2o = n2o, ss = sqrt(ss2), type = type) return(res) } rootRes <- try(stats::uniroot(f = rootFun, interval = c(0, ssminBFo^2)), silent = TRUE) if (inherits(rootRes, "try-error")) { ssSceptical <- NaN bfs <- NaN } else { ssSceptical <- sqrt(rootRes$root) bfs <- BFoSMD(to = to, n1o = n1o, n2o = n2o, ss = ssSceptical, type = type) } } } return(bfs) }) } #' @title Sceptical Bayes factor for SMD effect sizes #' #' @description Computes the sceptical Bayes factor for standardized mean #' difference (SMD) effect sizes #' #' @details This function computes the sceptical Bayes factor for standardized #' mean difference (SMD) effect sizes using an exact *t*-likelihood for the #' data instead of the normal approximation used in \code{\link{BFs}} (for #' details, see Section 4 in Pawel and Held, 2022). Data from both studies #' are summarized by \eqn{t}-statistics and sample sizes. The following #' types of \eqn{t}-tests are accepted: #' #' - Two-sample \eqn{t}-test where the SMD represents the standardized #' mean difference between two group means (assuming equal variances in #' both groups). #' - One-sample \eqn{t}-test where the SMD represents the standardized #' mean difference to the null value. #' - Paired \eqn{t}-test where the SMD represents the standardized mean #' difference score. #' #' @param to \eqn{t}-statistic from the original study #' @param no Sample size of the original study (per group) #' @param n1o Sample size in group 1 of the original study (only required for #' two-sample \eqn{t}-test with unequal group sizes) #' @param n2o Sample size in group 2 of the original study (only specify if #' unequal group sizes) #' @param tr \eqn{t}-statistic from the replication study #' @param nr Sample size of the replication study (per group) #' @param n1r Sample size in group 1 of the replication study (only required for #' two-sample \eqn{t}-test with unequal group sizes) #' @param n2r Sample size in group 2 of the replication study (only required for #' two-sample \eqn{t}-test with unequal group sizes) #' @param type Type of \eqn{t}-test associated with \eqn{t}-statistic. Can be #' `"two.sample"`, `"one.sample"`, `"paired"`. Defaults to `"two.sample"`. #' #' @return The sceptical Bayes factor \eqn{\mathrm{BF}_{\mathrm{S}}}{BF_S}. #' \eqn{\mathrm{BF}_{\mathrm{S}} < 1}{BF_S < 1} indicates replication #' success, the smaller the value of \eqn{\mathrm{BF}_{\mathrm{S}}}{BF_S} #' the higher the degree of replication success. It is possible that the #' result of the replication is so inconclusive that replication success #' cannot be established at any level. In this case, the sceptical Bayes #' factor does not exist and the function returns \code{NaN}. #' #' @author Samuel Pawel #' #' @references Pawel, S. and Held, L. (2022). The sceptical Bayes factor for the #' assessment of replication success. Journal of the Royal Statistical #' Society Series B: Statistical Methodology, 84(3): 879-911. #' \doi{10.1111/rssb.12491} #' #' @seealso \code{\link{BFs}}, \code{\link{BFslogOR}} #' #' @examples #' data("SSRPexact") #' morewedge2010 <- subset(SSRPexact, study == "Morewedge et al. (2010), Science") #' with(morewedge2010, #' BFsSMD(to = to, n1o = n1o, n2o = n2o, tr = tr, n1r = n1r, n2r = n2r)) #' #' @export #' BFsSMD <- Vectorize(.BFsSMD)
/scratch/gouwar.j/cran-all/cranData/BayesRep/R/BFs.R
## non-vectorized version of formatBF .formatBF <- function(BF, digits = "default") { ## check inputs stopifnot( length(BF) == 1, is.numeric(BF), (is.finite(BF) && 0 < BF) || is.na(BF), length(digits) == 1, (is.character(digits) && digits == "default") || (is.numeric(digits) && 0 <= digits) ) ## return NA if input NA if (is.na(BF) || is.nan(BF)) { result <- NA } else { ## format BF if (digits == "default") { if (BF < 1/1000) result <- "< 1/1000" if ((BF >= 1/1000) && (BF <= 1/10)) result <- paste0("1/", as.character(round(1/BF))) if ((BF > 1/10) && (BF < 1)) result <- paste0("1/", as.character(round(1/BF, digits = 1))) if ((BF < 10) && (BF >= 1)) result <- as.character(round(BF, digits = 1)) if ((BF >= 10) && (BF <= 1000)) result <- as.character(round(BF)) if (BF > 1000) result <- "> 1000" } else { if (BF < 1) { result <- paste0("1/", as.character(round(1/BF, digits = digits))) } else { result <- as.character(round(BF, digits = digits)) } } ## when 1/1 return 1 if (result == "1/1") result <- "1" } return(result) } #' @title Formatting of Bayes factors #' #' @description Formats Bayes factors such that Bayes factors smaller than 1 are #' represented as ratios \eqn{1/x}, where \eqn{x} is rounded to the #' specified number of digits, while Bayes factors larger than 1 are only #' rounded to the specified number of digits. #' #' @param BF Bayes factor #' @param digits either \code{"default"} (see Details) or a positive integer #' specifiying the number of decimal places to round the Bayes factor (for #' Bayes factors \eqn{\geq 1}{>= 1}) or its inverse (for Bayes factors #' \eqn{< 1}{< 1}) #' #' @return A character vector of ratios (for inputs \eqn{< 1}{< 1}) or rounded #' numeric values (for inputs \eqn{\geq 1}{>= 1}) ). #' #' @details The default formatting, which is recommended in Held and Ott (2018), #' is as follows: For very small Bayes factors BF < 1/1000, "< 1/1000" is #' returned. Bayes factors BF with 1/1000 \eqn{\leq}{<=} BF \eqn{\leq}{<=} #' 1/10 are formatted as \eqn{1/x} where \eqn{x} is an integer and Bayes #' factors BF with \eqn{1/10} \eqn{<} BF \eqn{<} 1 as \eqn{1/x}, where #' \eqn{x} is rounded to one decimal place. Accordingly, Bayes factors #' \eqn{\leq}{<=} BF \eqn{<} 10 are rounded to one decimal place, Bayes #' factors 10 \eqn{\leq}{<=} BF \eqn{\leq}{<=} 1000 are rounded to the next #' integer and for larger Bayes factors, "> 1000" is returned. #' #' If digits is specified, the Bayes factor (if it is \eqn{\geq}{>=} 1) or its #' inverse (if the Bayes factor is \eqn{<} 1) is rounded to the number of #' decimal places specified and returned as a ratio if the Bayes factor is #' \eqn{<} 1. #' #' @references Held, L. and Ott, M. (2018). On \eqn{p}-values and Bayes factors. #' Annual Review of Statistics and Its Application, 5, 393-419. #' \doi{10.1146/annurev-statistics-031017-100307} #' #' @author Manuela Ott (creator of package \code{pCalibrate}), Leonhard Held #' (contributor of package \code{pCalibrate}), Samuel Pawel (made small #' changes to \code{pCalibrate::formatBF}) #' #' @examples #' (bf <- BFr(to = 2, so = 0.5, tr = 2.5, sr = 0.9)) #' formatBF(BF = bf) #' #' @export formatBF <- Vectorize(.formatBF) #' @title Density of truncated normal distribution #' #' @description Computes density of normal distribution truncated to interval #' \eqn{[a, b]}{`[`a, b`]`} #' #' @param x Quantile #' @param mean Mean #' @param sd Standard deviation #' @param a Lower truncation bound #' @param b Upper truncation bound #' @param log Logical indicating whether natural logarithm of density #' should be returned #' #' @return Numeric vector of (log) densities #' #' @author Samuel Pawel #' #' @examples #' ## verify that density integrates to one #' stats::integrate(f = dnormtrunc, lower = -0.5, upper = 2, a = -0.5, b = 2) #' #' @noRd dnormtrunc <- function(x, mean = 0, sd = 1, a = -Inf, b = Inf, log = FALSE) { ## compute normalizing constant k and indicator I k <- diff(stats::pnorm(q = c(a, b), mean = mean, sd = sd)) I <- as.numeric(x >= a & x <= b) ## compute and return truncated density dens <- stats::dnorm(x = x, mean = mean, sd = sd)*I/k if (log == TRUE) dens <- log(dens) return(dens) } ## non-vectorized version of vss .vss <- function(x, gamma, jeffreys = FALSE) { ## input checks stopifnot(length(x) == 1, is.numeric(x), is.finite(x), length(gamma) == 1, is.numeric(gamma), is.finite(gamma), 0 < gamma, gamma <= 1, length(jeffreys) == 1, is.logical(jeffreys)) ## if |x| <= 1, vss exists only for gamma = 1 if (abs(x) <= 1) { if (gamma == 1) res <- 0 else res <- NaN } else { ## compute sufficiently sceptical relative prior variance with W-function y <- -x^2*exp(-x^2)/gamma^2 if (jeffreys == FALSE) { res <- as.numeric(-x^2/lamW::lambertWm1(x = y) - 1) } else { res <- as.numeric(-x^2/lamW::lambertW0(x = y) - 1) } if (!is.nan(res) && res < 0) res <- NaN } return(res) } #' @title Sufficiently sceptical relative prior variance #' #' @description Computes sufficiently sceptical relative prior variance at level #' \eqn{\gamma}{gamma}. It is defined as the relative prior variance #' \eqn{g_\gamma}{g_gamma} such that the Bayes factor contrasting \deqn{H_0: #' \theta = 0}{H0: theta = 0} to \deqn{H_1: \theta \sim \mathrm{N}(0, #' g_\gamma \cdot \sigma^2)}{H1: theta ~ N(0, g_gamma*sigma^2)} for data #' \eqn{x \, | \, \theta \sim \mathrm{N}(\theta, \sigma^2)}{ x | theta ~ #' N(theta, sigma^2)} with known variance \eqn{\sigma^2}{sigma^2}, is fixed #' at the specified threshold \eqn{\gamma}{gamma}, i.e. #' \deqn{\mathrm{BF}_{01}(x) = \frac{f(x \, | \, H_0)}{f(x \, | \, H_1)} = #' \gamma.}{ BF01(x) = f(x|H0)/f(x|H1) = gamma.} #' #' If the sufficiently #' sceptical relative prior variance exists, there are always two solutions #' \eqn{g_\gamma}{g_gamma} and \eqn{g^\prime_\gamma}{g'_gamma}, however, #' only the smaller of the two is usually of interest, as the second #' solution merely exists due to the Jeffreys-Lindley paradox. If desired, #' also the larger solution \eqn{g^\prime_\gamma}{g'_gamma} can be computed #' with the argument `jeffreys` set to `TRUE`. #' #' @param x Observed data value #' @param gamma Bayes factor threshold \eqn{\gamma}{gamma} #' @param jeffreys Logical indicating whether Jeffreys-Lindley solution #' should be returned. Default is `FALSE` #' @return Sufficiently sceptical relative prior variance (relative to #' variance of the data) if existant, otherwise NaN #' #' #' @references Pawel, S. and Held, L. (2022). The sceptical Bayes factor for the #' assessment of replication success. Journal of the Royal Statistical #' Society Series B: Statistical Methodology, 84(3): 879-911. #' \doi{10.1111/rssb.12491} #' #' @author Samuel Pawel #' #' @examples #' zo <- 3 #' gamma <- 1/10 #' gss <- vss(x = zo, gamma = gamma) #' g <- seq(0, 4, 0.01) #' #' plot(g, BFo(zo = zo, g = g), type = "l", log = "y", yaxt = "n", ylab = "BF") #' bf_breaks <- c(100, 30, 10, 3, 1, 1/3, 1/10, 1/30, 1/100) #' axis(side = 2, at = bf_breaks, labels = formatBF(bf_breaks), las = 1) #' abline(h = gamma, lty = 2) #' segments(x0 = gss, y0 = 0.001, y1 = gamma, lty = 2, col = 2) #' #' @noRd vss <- Vectorize(.vss)
/scratch/gouwar.j/cran-all/cranData/BayesRep/R/BFutils.R
#' @title BayesRep package #' #' @description The BayesRep package provides various tools for Bayesian #' analysis of replication studies. #' #' \code{\link{repPosterior}} visualizes the posterior distribution of the #' effect size based on both studies. \code{\link{BFs}} computes the #' sceptical Bayes factor (Pawel and Held, 2022), \code{\link{BFr}} computes #' the replication Bayes factor (Verhagen and Wagenmakers, 2014), and #' \code{\link{BFe}} computes the equality of effect size Bayes factor #' (Bayarri and Mayorall, 2002). #' #' These functions take effect estimates and their standard errors from original #' and replication study as inputs. Throughout, original effect estimate and #' standard error are denoted by \code{to} and \code{so} and replication #' effect estimate and standard error are denoted \code{tr} and \code{sr}. #' It is assumed that each effect estimate is normally distributed around #' its true underlying effect size with variance equal to its squared #' standard error \deqn{\code{to} \, | \, \theta_o \sim \mathrm{N}(\theta_o, #' \code{so}^2) ~ \mathrm{and} ~ \code{tr} \, | \, \theta_r \sim #' \mathrm{N}(\theta_r, \code{sr}^2).}{to | theta_o ~ N(theta_o, so^2) and #' tr | theta_r ~ N(theta_r, sr^2).} These assumptions may be inadequate for #' studies with small sample size (there are special functions for data with #' continuous outcomes and standardized mean difference effect size, #' \code{\link{BFsSMD}} and \code{\link{BFrSMD}}, and binary outcomes with #' log odds ratio effects, \code{\link{BFslogOR}} and #' \code{\link{BFrlogOR}}, which are based on the exact distribution of the #' data). If not specified otherwise, it is assumed that the true effect #' sizes from both studies are the same (\eqn{\theta_o = \theta_r}). #' #' #' @references Bayarri, M. and Mayorall, A. (2002). Bayesian Design of #' "Successful" Replications. The American Statistician, 56(3): 207-214. #' \doi{10.1198/000313002155} #' #' Verhagen, J. and Wagenmakers, E. J. (2014). Bayesian tests to quantify the result #' of a replication attempt. Journal of Experimental Psychology: General, #' 145:1457-1475. \doi{10.1037/a0036731} #' #' Pawel, S. and Held, L. (2022). The sceptical Bayes factor for the #' assessment of replication success. Journal of the Royal Statistical #' Society Series B: Statistical Methodology, 84(3): 879-911. #' \doi{10.1111/rssb.12491} #' #' #' @docType package #' @name BayesRep NULL
/scratch/gouwar.j/cran-all/cranData/BayesRep/R/BayesRep.R
#' Data from the Social Sciences Replication Project #' #' @description Data from the Social Sciences Replication Project. #' The variables are as follows: #' \describe{ #' \item{`study`}{Authors, year, and journal of the original study} #' \item{`type`}{Type of effect size. Either `"logOR"` for log oddds ratio effect #' size, `"SMD1"` for standardized mean difference from one-sample or paired #' \eqn{t}-test, or `"SMD2"` for standardized mean difference from two-sample #' \eqn{t}-test} #' \item{`to`}{\eqn{t}-statistic from the original study #' (only available for `"SMD1"` and `"SMD2"`)} #' \item{`n1o`}{Sample size in group 1 of the original study #' (only available for `"SMD1"` and `"SMD2"`)} #' \item{`n2o`}{Sample size in group 2 of the original study #' (only available for `"SMD2"`)} #' \item{`tr`}{\eqn{t}-statistic from the replication study #' (only available for `"SMD1"` and `"SMD2"`)} #' \item{`n1r`}{Sample size in group 1 of the replication study #' (only available for `"SMD1"` and `"SMD2"`)} #' \item{`n2r`}{Sample size in group 2 of the replication study #' (only available for `"SMD2"`)} #' \item{`ao`}{Number of cases in original study treatment group #' (only available for `"logOR"`)} #' \item{`bo`}{Number of non-cases in original study treatment group #' (only available for `"logOR"`)} #' \item{`co`}{Number of cases in original study control group #' (only available for `"logOR"`)} #' \item{`do`}{Number of non-cases in original study control group #' (only available for `"logOR"`)} #' \item{`ar`}{Number of cases in replication study treatment group #' (only available for `"logOR"`)} #' \item{`br`}{Number of cases in replication study control group #' (only available for `"logOR"`)} #' \item{`cr`}{Number of cases in replication study control group #' (only available for `"logOR"`)} #' \item{`dr`}{Number of non-cases in replication study control group #' (only available for `"logOR"`)} #' } #' #' @name SSRPexact #' #' @docType data #' #' @author Samuel Pawel #' #' @usage data(SSRPexact) #' #' @format A data frame with 21 rows and 16 variables #' #' @source The data were manually extracted from the Bayesian supplement of the #' SSRP (<https://osf.io/nsxgj/>). The data are licensed under CC0 1.0 #' Universal. #' #' @references Camerer, C. F., Dreber, A., Holzmeister, F., Ho, T.-H., Huber, #' J., Johannesson, M., ... Wu, H. (2018). Evaluating the replicability of #' social science experiments in Nature and Science between 2010 and 2015. #' Nature Human Behaviour, 2, 637-644. \doi{10.1038/s41562-018-0399-z} #' #' #' @keywords data "SSRPexact" ## ## data extracted from Bayesian supplement of SSRP (https://osf.io/nsxgj/) ## study <- c("Ackerman et al. (2010), Science", ## "Aviezer et al. (2012), Science", ## "Balafoutas and Sutter (2012), Science", ## "Derex et al. (2013), Nature", ## "Duncan et al. (2012), Science", ## "Gervais and Norenzayan (2012), Science", ## "Gneezy et al. (2014), Science", ## "Hauser et al. (2014), Nature", ## "Janssen et al. (2010), Science", ## "Karpicke and Blunt (2011), Science", ## "Kidd and Castano (2013), Science", ## "Kovacs et al. (2010), Science", ## "Lee and Schwarz (2010), Science", ## "Morewedge et al. (2010), Science", ## "Nishi et al. (2015), Nature", ## "Pyc and Rawson (2010), Science", ## "Ramirez and Beilock (2011), Science", ## "Rand et al. (2012), Nature", ## "Shah et al. (2012), Science", ## "Sparrow et al. (2011), Science", ## "Wilson et al. (2014), Science") ## type <- c("SMD2", "SMD1", "logOR", ## NA, # Derex: H0/H1: equal/ordered probabilities ## "SMD1", "SMD2", "logOR", "logOR", ## NA, # Janssen: non-parametric Mann-Whitney test ## "SMD2", "SMD2", "SMD1", "SMD2", "SMD2", "SMD2", "SMD2", "SMD2", ## "SMD2", "SMD2", "SMD1", "SMD2") ## ## results from studies with t-tests (one-sample or two-sample) ## to <- c(2.02, 13.07, NA, NA, 3.41, 2.24, NA, NA, NA, 4.65, 2.53, 2.42, 2.6, 2.78, ## 2.68, 2.37, 5.53, 2.45, 2.04, 3.26, 4.83) ## n1o <- c(26, 15, NA, NA, 15, 26, NA, NA, NA, 20, 43, 24, 21, 16, 10, 18, 10, 175, ## 26, 69, 15) ## n2o <- c(28, NA, NA, NA, NA, 31, NA, NA, NA, 20, 43, NA, 19, 16, 10, 18, 10, 168, ## 30, NA, 15) ## tr <- c(1.5351, 5.342, NA, NA, 4.6276, -0.82, NA, NA, NA, 2.8825, -0.726, 7.0152, ## -0.78, 3.54, 2.53, 2.64, -0.7352, 1.01, -0.373, 0.7579, 4.49) ## n1r <- c(296, 14, NA, NA, 92, 262, NA, NA, NA, 23, 349, 95, 147, 44, 24, 156, 45, ## 1058, 298, 234, 20) ## n2r <- c(303, NA, NA, NA, NA, 269, NA, NA, NA, 26, 365, NA, 139, 45, 24, 150, 34, ## 1078, 321, NA, 19) ## ## results from studies with binary outcomes ## ao <- c(NA, NA, 21, NA, NA, NA, 65, 20, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, ## NA, NA, NA) ## bo <- c(NA, NA, 15, NA, NA, NA, 26, 0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, ## NA, NA, NA) ## co <- c(NA, NA, 11, NA, NA, NA, 43, 4, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, ## NA, NA, NA) ## do <- c(NA, NA, 25, NA, NA, NA, 44, 16, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, ## NA, NA, NA) ## ar <- c(NA, NA, 63, NA, NA, NA, 147, 11, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, ## NA, NA, NA) ## br <- c(NA, NA, 60, NA, NA, NA, 55, 0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, ## NA, NA, NA) ## cr <- c(NA, NA, 44, NA, NA, NA, 113, 2, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, ## NA, NA, NA) ## dr <- c(NA, NA, 76, NA, NA, NA, 92, 9, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, ## NA, NA, NA) ## SSRPexact <- data.frame(study = study, type = type, ## to, n1o, n2o, tr, n1r, n2r, ## ao, bo, co, do, ar, br, cr, dr) ## ## save as rds and use version = 2, otherwise the pkg will depend on R > 3.5 ## save(SSRPexact, file = "../data/SSRPexact.rda", version = 2)
/scratch/gouwar.j/cran-all/cranData/BayesRep/R/SSRPexact.R
#' @title Effect size posterior distribution #' #' @description Computes the posterior distribution of the effect size based on #' the original and replication effect estimates and their standard errors, #' assuming a common underlying effect size and an initial flat prior. #' #' @param to Original effect estimate #' @param so Standard error of the original effect estimate #' @param tr Replication effect estimate #' @param sr Standard error of the replication effect estimate #' @param lower Lower bound of range for which distribution should computed. #' Defaults to minimum of \code{to} and \code{tr} minus four times the #' pooled standard error #' @param upper Upper bound of range for which distribution should computed. #' Defaults to maximum of \code{to} and \code{tr} plus four times the pooled #' standard error #' @param nGrid Number of grid points. Defaults to \code{1000} #' @param plot Logical indicating whether posterior distribution should be #' plotted. If \code{FALSE}, only data used for plotting are returned. #' Defaults to \code{TRUE} #' @param CI Logical indicating whether 95% highest posterior credible interval #' should be plotted. Defaults to \code{TRUE} #' @param ... Additional arguments passed to \code{matplot} #' #' @return Plots posterior distribution of the effect size, invisibly returns a #' list with the data for the plot #' #' @author Samuel Pawel #' #' @examples #' ## Example from Reproducibility Project Cancer Biology #' ## Aird: Data from https://elifesciences.org/articles/21253 Fig4B #' hro <- 25.93 #' lhro <- log(hro) #' hroCI <- c(5.48, 122.58) #' se_lhro <- diff(log(hroCI))/(2*qnorm(0.975)) #' hrr <- 3.75 #' lhrr <- log(hrr) #' hrrCI <- c(1.19, 11.81) #' se_lhrr <- diff(log(hrrCI))/(2*qnorm(0.975)) #' repPosterior(to = lhro, so = se_lhro, tr = lhrr, sr = se_lhrr) #' #' #' @export repPosterior <- function(to, so, tr, sr, lower = min(c(to, tr)) - 4/sqrt(1/so^2 + 1/sr^2), upper = max(c(to, tr)) + 4/sqrt(1/so^2 + 1/sr^2), nGrid = 1000, plot = TRUE, CI = TRUE, ...) { ## input checks stopifnot( length(to) == 1, is.numeric(to), is.finite(to), length(tr) == 1, is.numeric(tr), is.finite(tr), length(so) == 1, is.numeric(so), is.finite(so), 0 < so, length(sr) == 1, is.numeric(sr), is.finite(sr), 0 < sr, length(lower) == 1, is.numeric(lower), is.finite(lower), length(upper) == 1, is.numeric(upper), is.finite(upper), lower < upper, length(nGrid) == 1, is.numeric(nGrid), is.finite(nGrid), 0 < nGrid, length(plot) == 1, is.logical(plot), !is.na(plot), length(CI) == 1, is.logical(CI), !is.na(CI) ) ## Define appropriate range x <- seq(from = lower, to = upper, length.out = nGrid) ## Compute posterior s2Post <- 1/(1/so^2 + 1/sr^2) muPost <- s2Post*(to/so^2 + tr/sr^2) hpdLower <- muPost - stats::qnorm(p = 0.975)*sqrt(s2Post) hpdUpper <- muPost + stats::qnorm(p = 0.975)*sqrt(s2Post) ## Compoute prior, likelihood, and posterior prior <- function(x) { d <- stats::dnorm(x = x, mean = to, sd = so) return(d) } likelihood <- function(x) { stats::dnorm(x = tr, mean = x, sd = sr) } posterior <- function(x) { d <- stats::dnorm(x = x, mean = muPost, sd = sqrt(s2Post)) return(d) } priorDF <- data.frame(x = x, density = prior(x)) posteriorDF <- data.frame(x = x, density = posterior(x)) likelihoodDF <- data.frame(x = x, density = likelihood(x)) if (plot == TRUE) { graphics::matplot(x = x, y = cbind(posteriorDF$density, priorDF$density, likelihoodDF$density), type = "l", lty = c(1, 2, 3), col = 1, ylim = c(0, posterior(x = muPost)*1.05), las = 1, xlab = "Effect size", ylab = "Density", ...) graphics::legend("topright", legend = c("Posterior", "Original study (prior)", "Replication study (likelihood)"), lty = c(1, 2, 3), bty = "n") if (CI == TRUE) { graphics::arrows(x0 = hpdLower, x1 = hpdUpper, y0 = posterior(x = muPost)*1.025, length = 0.1, angle = 90, code = 3) } } out <- list("priorDF" = priorDF, "posteriorDF" = posteriorDF, "likelihoodDF" = likelihoodDF, "CI" = c(hpdLower, hpdUpper), "prior" = prior, "likelihood" = likelihood, "posterior" = posterior) invisible(out) }
/scratch/gouwar.j/cran-all/cranData/BayesRep/R/repPosterior.R
library(tinytest) library(BayesRep) ## formatBF ## ----------------------------------------------------------------------------- bf <- c(1/300.01, 1/3, 1/2, 1/1.01, 1, 1.01, 2, 3, 300.01, NaN, NA) bfexpected <- c("1/300", "1/3", "1/2", "1", "1", "1", "2", "3", "300", NA, NA) expect_equal(formatBF(bf), bfexpected, info = "formatBF works correctly") ## BFs and BFr ## ----------------------------------------------------------------------------- ## results from Table 1 in Pawel and Held (2022), extracted with dput tablePaper <- structure(list(to = c(1.14417264341251, 1.96382457556354, 0.818339255457479, 0.582737811761101, 0.226778691550525, 0.696711646322321, 0.488611206901194, 0.485326762453132, 0.817287557101302, 0.203339606989006, 0.743741868967827, 0.285393193419145, 0.396018834302785, 0.141926757107812, 0.27657049025531, 0.385698718807712, 0.274109959311116, 0.275860006439804, 0.297627922877741, 0.409601469520218, 1.07988209588739), so = c(0.164398987305357, 0.288675134594813, 0.192450089729875, 0.144337567297406, 0.0755928946018454, 0.164398987305357, 0.185695338177052, 0.218217890235992, 0.288675134594813, 0.0712470499879096, 0.129099444873581, 0.120385853085769, 0.174077655955698, 0.054232614454664, 0.140028008402801, 0.123091490979333, 0.137360563948689, 0.10976425998969, 0.136082763487954, 0.164398987305357, 0.242535625036333), tr = c(1.19476271568933, 1.18451560871901, 0.683204796807765, 0.37746393971232, 0.184400291006785, 0.404898618376599, 0.370800610719356, 0.671862945083057, 0.467849160696654, 0.116566798991139, 0.358414852842928, 0.147477389849293, 0.150864396208723, 0.025792251826568, 0.0627861994145097, 0.0496313025904017, -0.0150068664441846, -0.0272611982545971, -0.0354850035691282, -0.0463558676741315, -0.0985013285629837), sr = c(0.229415733870562, 0.301511344577764, 0.166666666666667, 0.127000127000191, 0.0497518595104995, 0.147441956154897, 0.107832773203438, 0.104257207028537, 0.105999788000636, 0.045786854649563, 0.160128153805087, 0.0645497224367903, 0.0574484989621426, 0.021652326748721, 0.040961596025952, 0.0657951694959769, 0.040291148201269, 0.0375029300308675, 0.0435194139889245, 0.0594438298277764, 0.114707866935281), bfs = c(0.0000530477419198753, 0.0127778208784195, 0.0224319045241547, 0.117135677207107, 0.144938815030417, 0.179077965986501, 0.255508503862225, 0.309170196577008, 0.322063749958956, 0.400925337910988, 0.629934964682855, 0.637603992664161, 0.849104156875212, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN), bfr = c(0.00000161275109902623, 0.00352146020839239, 0.000394726943693376, 0.0323155569085302, 0.00211057610414055, 0.082609423824399, 0.00626432422526435, 0.00000000299745740619506, 0.000325684031619899, 0.122358606947869, 0.606604189711098, 0.259071929155332, 0.248184710610032, 9.58539878281878, 3.21906462367827, 28.9728262321521, 25.4825254562925, 72.2003426853402, 35.6721291232169, 65.1067536780313, 24995.5941001993), study = c("Hauser et al. (2014)", "Aviezer et al. (2012)", "Wilson et al. (2014)", "Derex et al. (2013)", "Gneezy et al. (2014)", "Karpicke and Blunt (2011)", "Morewedge et al. (2010)", "Kovacs et al. (2010)", "Duncan et al. (2012)", "Nishi et al. (2015)", "Janssen et al. (2010)", "Balafoutas and Sutter (2012)", "Pyc and Rawson (2010)", "Rand et al. (2012)", "Ackerman et al. (2010)", "Sparrow et al. (2011)", "Shah et al. (2012)", "Kidd and Castano (2013)", "Gervais and Norenzayan (2012)", "Lee and Schwarz (2010)", "Ramirez and Beilock (2011)")), row.names = c(NA, -21L), class = "data.frame") tablePaper$zo <- tablePaper$to/tablePaper$so tablePaper$zr <- tablePaper$tr/tablePaper$sr tablePaper$c <- tablePaper$so^2/tablePaper$sr^2 ## BFs tests bfs1 <- BFs(to = tablePaper$to, so = tablePaper$so, tr = tablePaper$tr, sr = tablePaper$sr) expect_equal(bfs1, tablePaper$bfs, info = "BFs (computed with to, so, tr, sr) as in Table 1 from Pawel and Held (2022)") bfs2 <- BFs(zo = tablePaper$zo, zr = tablePaper$zr, c = tablePaper$c) expect_equal(bfs2, tablePaper$bfs, info = "BFs (computed with zo, zr, c) as in Table 1 from Pawel and Held (2022)") ## BFr tests bfr1 <- BFr(to = tablePaper$to, so = tablePaper$so, tr = tablePaper$tr, sr = tablePaper$sr, ss = 0) expect_equal(bfr1, tablePaper$bfr, info = "BFr (computed with to, so, tr, sr, ss) as in Table 1 from Pawel and Held (2022)") bfr2 <- BFr(zo = tablePaper$zo, zr = tablePaper$zr, c = tablePaper$c, g = 0) expect_equal(bfr2, tablePaper$bfr, info = "BFr (computed with zo, zr, c, g) as in Table 1 from Pawel and Held (2022)") ## BFrlogOR and BFslogOR ## ----------------------------------------------------------------------------- ## results from Table 1 in Pawel and Held (2022), extracted with dput logORtable <- structure(list(study = c("Balafoutas and Sutter (2012), Science", "Gneezy et al. (2014), Science", "Hauser et al. (2014), Nature"), ao = c(21, 65, 20), bo = c(15, 26, 0), co = c(11, 43, 4), do = c(25, 44, 16), ar = c(63, 147, 11), br = c(60, 55, 0), cr = c(44, 113, 2), dr = c(76, 92, 9), bfsint = c(0.633250599371189, 0.133503588926685, NaN), bfshyg = c(0.633250599371083, NaN, 0.000955394729787569), bfrepint = c(0.258716394614373, 0.00181587960505, NaN), bfrephyg = c(0.258716394614278, NaN, 0.0000595783977455087)), class = "data.frame", row.names = c(NA, -3L)) ## ## BFslogOR tests (take too long, don't run by default) ## bfslogor1 <- with(logORtable, BFslogOR(ao = ao, bo = bo, co = co, do = do, ## ar = ar, br = br, cr = cr, dr = dr, ## method = "integration")) ## expect_equal(bfslogor1, logORtable$bfsint, ## info = "BFslogOR (integration) as in Table 1 from Pawel and Held (2022)") ## bfslogor2 <- with(logORtable, BFslogOR(ao = ao, bo = bo, co = co, do = do, ## ar = ar, br = br, cr = cr, dr = dr, ## method = "hypergeo")) ## expect_equal(bfslogor2, logORtable$bfshyg, ## info = "BFslogOR (hypergeo) as in Table 1 from Pawel and Held (2022)") ## BFrlogOR tests bfrlogor1 <- with(logORtable, BFrlogOR(ao = ao, bo = bo, co = co, do = do, ar = ar, br = br, cr = cr, dr = dr, method = "integration", ss = 0)) expect_equal(bfrlogor1, logORtable$bfrepint, info = "BFrlogOR (integration) as in Table 1 from Pawel and Held (2022)") ## ## disable tests with method = "hypergeo" because causes issues on CRAN's M1 ## ## Mac and "integration" is anyway the default of BFrlogOR ## bfrlogor2 <- with(logORtable, BFrlogOR(ao = ao, bo = bo, co = co, do = do, ## ar = ar, br = br, cr = cr, dr = dr, ## method = "hypergeo", ss = 0)) ## expect_equal(bfrlogor2, logORtable$bfrephyg, ## info = "BFrlogOR (hypergeo) as in Table 1 from Pawel and Held (2022)") ## BFrSMD and BFsSMD ## ----------------------------------------------------------------------------- ## results from Table 1 in Pawel and Held (2022), extracted with dput smdTable <- structure(list(study = c("Ackerman et al. (2010), Science", "Gervais and Norenzayan (2012), Science", "Karpicke and Blunt (2011), Science", "Kidd and Castano (2013), Science", "Lee and Schwarz (2010), Science", "Morewedge et al. (2010), Science", "Nishi et al. (2015), Nature", "Pyc and Rawson (2010), Science", "Ramirez and Beilock (2011), Science", "Rand et al. (2012), Nature", "Shah et al. (2012), Science", "Wilson et al. (2014), Science", "Aviezer et al. (2012), Science", "Duncan et al. (2012), Science", "Kovacs et al. (2010), Science", "Sparrow et al. (2011), Science"), type = c("SMD", "SMD", "SMD", "SMD", "SMD", "SMD", "SMD", "SMD", "SMD", "SMD", "SMD", "SMD", "SM", "SM", "SM", "SM"), to = c(2.02, 2.24, 4.65, 2.53, 2.6, 2.78, 2.68, 2.37, 5.53, 2.45, 2.04, 4.83, 13.07, 3.41, 2.42, 3.26), n1o = c(26, 26, 20, 43, 21, 16, 10, 18, 10, 175, 26, 15, 15, 15, 24, 69), n2o = c(28, 31, 20, 43, 19, 16, 10, 18, 10, 168, 30, 15, NA, NA, NA, NA), tr = c(1.5351, -0.82, 2.8825, -0.726, -0.78, 3.54, 2.53, 2.64, -0.7352, 1.01, -0.373, 4.49, 5.342, 4.6276, 7.0152, 0.7579), n1r = c(296, 262, 23, 349, 147, 44, 24, 156, 45, 1058, 298, 20, 14, 92, 95, 234), n2r = c(303, 269, 26, 365, 139, 45, 24, 150, 34, 1078, 321, 19, NA, NA, NA, NA), bfsSMD = c(NaN, NaN, 0.201393824351508, NaN, NaN, 0.251915218801748, 0.461389422414924, 0.847327600666465, NaN, NaN, NaN, 0.0284008547136663, 0.098330118343855, 0.320262992627344, 0.26512121011443, NaN), bfrepSMD = c(3.2425436178894, 36.8293701822416, 0.0865954100114667, 68.5206475865596, 68.7768313720538, 0.00639050298948843, 0.131877512393028, 0.25088787491907, 17376.1099486192, 9.72328315095483, 26.1613308415308, 0.000538516055919849, 0.0243407752343321, 0.000414919994401764, 0.00000000744262267585224, 31.8489025486167)), row.names = c(NA, -16L), class = "data.frame") smd1 <- subset(smdTable, type == "SMD") smd2 <- subset(smdTable, type == "SM") ## BFsSMD (put some tolerance because optimization has slightly changed) bfssmd1 <- with(smd1, BFsSMD(to = to, n1o = n1o, n2o = n2o, tr = tr, n1r = n1r, n2r = n2r, type = "two.sample")) expect_equal(log(bfssmd1), log(smd1$bfsSMD), tolerance = 0.001, info = "BFsSMD (two.sample) as in Table 1 from Pawel and Held (2022)") bfssmd2 <- with(smd2, BFsSMD(to = to, no = n1o, tr = tr, nr = n1r, type = "one.sample")) expect_equal(log(bfssmd2), log(smd2$bfsSMD), tolerance = 0.001, info = "BFsSMD (one.sample) as in Table 1 from Pawel and Held (2022)") ## BFrSMD bfrsmd1 <- with(smd1, BFrSMD(to = to, n1o = n1o, n2o = n2o, tr = tr, n1r = n1r, n2r = n2r, type = "two.sample", ss = 0)) expect_equal(bfrsmd1, smd1$bfrepSMD, info = "BFrSMD (two.sample) as in Table 1 from Pawel and Held (2022)") bfrsmd2 <- with(smd2, BFrSMD(to = to, no = n1o, tr = tr, nr = n1r, type = "one.sample", ss = 0)) expect_equal(bfrsmd2, smd2$bfrepSMD, info = "BFrSMD (one.sample) as in Table 1 from Pawel and Held (2022)")
/scratch/gouwar.j/cran-all/cranData/BayesRep/inst/tinytest/test-BF.R
#' @title Design prior for effect size #' #' @description Creates a design prior for the effect size which can then be #' used for power and sample size calculations of a replication study. The #' design prior is obtained from updating an initial prior for the effect #' size by the data from the original study. A normal-normal hierarchical #' model is assumed, see Pawel et al. (2022) for details. #' #' @param to Effect estimate from original study #' @param so Standard error of effect estimate from original study #' @param mu The initial prior mean. Defaults to \code{0} #' @param sp The initial prior standard deviation. Defaults to \code{Inf} (an #' improper uniform prior) #' @param tau The initial prior heterogeneity standard deviation. Defaults to #' \code{0} (no heterogeneity) #' @param g The relative initial prior variance \code{g} = #' \code{sp^2}/(\code{tau^2} + \code{so^2}) (alternative parametrization of #' prior standard deviation \code{sp}) #' @param h The relative initial prior heterogeneity variance \code{h} = #' \code{tau^2}/\code{so^2} (alternative parametrization of prior #' heterogeneity standard deviation \code{tau}) #' @param type Shortcut for special parameter combinations. The available #' options are \code{NA}, \code{"conditional"}, \code{"predictive"}, and #' \code{"EB"} (see details). Defaults to \code{NA} #' #' @details The \code{"conditional"} design prior corresponds to a point mass at #' the original effect estimate, i.e., assuming that the true effect size is #' equal to the original effect estimate. The \code{"predictive"} design #' prior is obtained from updating a uniform initial prior by the likelihood #' of the original data. The \code{"EB"} design prior is obtained by #' empirical Bayes estimation of the variance of the normal prior and #' induces adaptive shrinkage that depends on the p-value of the original #' effect estimate. #' #' @return #' #' Returns an object of class \code{"designPrior"} which is a list containing: #' #' \tabular{ll}{ #' \code{dpMean} \tab The computed mean of the design prior \cr #' \tab \cr #' \code{dpVar} \tab The computed variance of the design prior \cr #' \tab \cr #' \code{to} \tab The specified original effect estimate \cr #' \tab \cr #' \code{so} \tab The specified original standard error \cr #' \tab \cr #' \code{mu} \tab The specified mean of the initial prior \cr #' \tab \cr #' \code{sp} \tab The specified standard deviation of the initial prior \cr #' \tab \cr #' \code{tau} \tab The specified heterogeneity variance \cr #' } #' #' @author Samuel Pawel #' #' @references #' #' Pawel, S., Consonni, G., and Held, L. (2022). Bayesian approaches to #' designing replication studies. arXiv preprint. #' \doi{10.48550/arXiv.2211.02552} #' #' @seealso \code{\link{pors}}, \code{\link{ssd}} #' #' @examples #' designPrior(to = 1.1, so = 1) #' @export designPrior <- function(to, so, mu = 0, sp = Inf, tau = 0, g = sp^2/(tau^2 + so^2), h = tau^2/so^2, type = c(NA, "conditional", "predictive", "EB")) { ## input checks stopifnot( length(to) == 1, is.numeric(to), is.finite(to), length(so) == 1, is.numeric(so), is.finite(so), 0 < so, length(mu) == 1, is.numeric(mu), is.finite(mu), length(sp) == 1, is.numeric(sp), !is.na(sp), !is.nan(sp), 0 <= sp, length(tau) == 1, is.numeric(tau), ## is.finite(tau), 0 <= tau, length(g) == 1, is.numeric(g), !is.na(g), !is.nan(g), 0 <= g, length(h) == 1, is.numeric(h), is.finite(h), 0 <= h, !is.null(type) ) type <- match.arg(type) ## recompute absolute parameters based on relative ones tau <- sqrt(h)*so sp <- sqrt(g*(so^2 + tau^2)) ## shortcuts if (!is.na(type)) { if (type == "conditional") { mu <- to tau <- 0 sp <- 0 } if (type == "predictive") { sp <- Inf } if (type == "EB") { sp <- sqrt(pmax((to - mu)^2 - so^2 - tau^2, 0)) g <- sp^2/(so^2 + tau^2) } g <- sp^2/(so^2 + tau^2) } ## compute mean and variance of design prior by standard Bayesian updating m <- to/(1 + 1/g) + mu/(1 + g) v <- so^2*(1 + h)/(1 + 1/g) dp <- list(dpMean = m, dpVar = v, to = to, so = so, mu = mu, sp = sp, tau = tau) class(dp) <- "designPrior" return(dp) } #' Print method for class \code{"designPrior"} #' @method print designPrior #' #' @param x Object of class \code{"designPrior"} #' @param ... Other arguments (for consistency with the generic) #' #' @return Prints text summary in the console and invisibly returns the #' \code{"designPrior"} object #' #' @author Samuel Pawel #' #' @examples #' dp <- designPrior(to = 0.5, so = 0.05, sp = 0.2, tau = 0.1) #' print(dp) #' @export print.designPrior <- function(x, ...) { cat("original data and initial prior for effect size\n") cat("------------------------------------------------------------------------") cat("\n to =", signif(x$to, 2), ": original effect estimate") cat("\n so =", signif(x$so, 2), ": standard error of original effect estimate") cat("\n tau =", signif(x$tau, 2), ": assumed heterogeneity standard deviation") cat("\n N(mean = ", signif(x$mu, 2), ", sd = ", signif(x$sp, 2), ") ", ": initial normal prior", sep = "") cat("\n\ndesign prior for effect size\n") cat("------------------------------------------------------------------------") cat("\n N(mean = ", signif(x$dpMean, 2), ", sd = ", signif(sqrt(x$dpVar), 2), ") ", ": normal design prior", sep = "") cat("\n") invisible(x) } #' Density method for class \code{"designPrior"} #' @method density designPrior #' #' @param x Object of class \code{"designPrior"} #' @param ... Other arguments passed to \code{stats::dnorm} #' #' @return Returns the density function of the design prior #' #' @author Samuel Pawel #' #' @examples #' dp <- designPrior(to = 2.3123, so = 0.1, mu = 1.1, tau = 0.2) #' f <- density(dp) #' tseq <- seq(1, 3.5, 0.01) #' plot(tseq, f(theta = tseq), type = "l", xlab = "theta", ylab = "Design prior density") #' @importFrom stats density #' @export density.designPrior <- function(x, ...) { ## return design prior density function for the overall effect size (theta) densFun <- function(theta) { d <- stats::dnorm(x = theta, mean = x$dpMean, sd = sqrt(x$dpVar), ...) return(d) } return(densFun) } #' Plot method for class \code{"designPrior"} #' @method plot designPrior #' #' @param x Object of class \code{"designPrior"} #' @param ... Other arguments passed to \code{plot} #' #' @return Plots the density of the design prior #' #' @author Samuel Pawel #' #' @examples #' dp <- designPrior(to = 2.3123, so = 0.1, mu = 1.1, tau = 0.2) #' plot(dp) #' plot(dp, xlim = c(0, 5), length.out = 500) #' @export plot.designPrior <- function(x, ...) { if (!(methods::hasArg(xlim))) { xlim <- x$dpMean + c(-4, 4)*sqrt(x$dpVar) } else { xlim <- list(...)$xlim } if (!(methods::hasArg(length.out))) { length.out <- 1000 } else { length.out <- list(...)$length.out } xseq <- seq(from = xlim[1], to = xlim[2], length.out = length.out) densFun <- density(x) plot(x = xseq, y = densFun(xseq), type = "l", xlab = "Effect size", ylab = "Design prior density", las = 1, ...) }
/scratch/gouwar.j/cran-all/cranData/BayesRepDesign/R/designPrior.R
#' @title Compute probability of replication success #' #' @description This function computes the probabiliy of replication success #' based on a success region for the replication effect estimate, a design #' prior, and a replication standard error. If the specified number of sites #' is larger than 1, the supplied success region has to be formulated in #' terms of the meta-analytic replication effect estimate across sites. #' #' @param sregion Success region for replication effect estimate #' @param dprior Design prior object #' @param sr Standard error of replication effect estimate #' @param nsites Number of sites, defaults to \code{1}. The sites are assumed to #' have the same standard error sr #' #' @return The probability of replication success #' #' @references #' #' Pawel, S., Consonni, G., and Held, L. (2022). Bayesian approaches to #' designing replication studies. arXiv preprint. #' \doi{10.48550/arXiv.2211.02552} #' #' @author Samuel Pawel #' #' @examples #' dprior <- designPrior(to = 1.1, so = 1) #' sregion <- successRegion(intervals = cbind(1.96, Inf)) #' pors(sregion = sregion, dprior = dprior, sr = 1) #' #' @export pors <- function(sregion, dprior, sr, nsites = 1) { ## input checks stopifnot( class(sregion) == "successRegion", class(dprior) == "designPrior", length(sr) > 0, is.numeric(sr), all(is.finite(sr)), all(0 <= sr), length(nsites) == 1, is.numeric(nsites), is.finite(nsites), nsites > 0 ) ps <- vapply(X = sr, FUN = function(sr1) { ## compute parameters of predictive distribution of (average) ## replication effect estimate predmean <- dprior$dpMean predsd <- sqrt(dprior$dpVar + (dprior$tau^2 + sr1^2)/nsites) ## compute probability of replication success p <- sum(stats::pnorm(q = sregion[,2], mean = predmean, sd = predsd) - stats::pnorm(q = sregion[,1], mean = predmean, sd = predsd)) }, FUN.VALUE = 1) return(ps) }
/scratch/gouwar.j/cran-all/cranData/BayesRepDesign/R/pors.R
#' @title Sample size related to standard error and unit standard deviation #' #' @description This function computes the sample size related to a specified #' standard error \eqn{\sigma}{\code{se}} and unit standard deviation #' \code{unitSD}, which is the standard deviation of one effective unit (one #' measurement, one pair of measurements, one event, etc.). The relationship #' \eqn{\sigma = \code{unitSD}/\sqrt{n}}{\code{se} = \code{unitSD}/sqrt(n)} is #' assumed. The unit standard deviation depends on the parameter type and #' the assumptions underlying the standard error calculation. The default is #' \code{unitSD = 2} which is, under some assumptions, a reasonable #' approximation to the unit standard deviation for standardized mean #' differences and log odds/hazard/rate ratios, see Section 2.4 in #' Spiegelhalter et al. (2004). #' #' @param se Standard error #' @param unitSD Unit standard deviation. Defaults to \code{2} #' #' @return The sample size corresponding to the specified standard error and #' unit standard deviation #' #' @references #' #' Pawel, S., Consonni, G., and Held, L. (2022). Bayesian approaches to #' designing replication studies. arXiv preprint. #' \doi{10.48550/arXiv.2211.02552} #' #' Spiegelhalter, D.J., Abrams, K.R., Myles, J.P. (2004). Bayesian approaches to #' clinical trials and health care evaluation. Wiley. #' \doi{10.1002/0470092602} #' #' #' @author Samuel Pawel #' #' @examples #' smd1 <- 0.3 #' so1 <- 0.05 #' dprior <- designPrior(to = smd1, so = so1) #' ssd1 <- ssdSig(level = 0.025, dprior = dprior, power = 0.8) #' se2n(se = ssd1$sr, unitSD = 2) # required n #' #' @export se2n <- function(se, unitSD = 2) { ## input checks stopifnot( length(se) > 0, is.numeric(se), all(is.finite(se)), all(0 <= se), length(unitSD) == 1, is.numeric(unitSD), is.finite(unitSD), 0 < unitSD ) ## TODO implement more precise conversions for certain effect size types? n <- ceiling(unitSD^2/se^2) return(n) }
/scratch/gouwar.j/cran-all/cranData/BayesRepDesign/R/se2n.R
#' @title Sample size determination for replication success #' #' @description This function computes the standard error of the replication #' effect estimate required to achieve replication success with a certain #' probability and based on a certain type of success region. #' #' @param sregionfun Function that returns the success region for replication #' effect estimate as a function of the replication standard error #' @param dprior Design prior object #' @param power Desired probability of replication success #' @param nsites Number of sites. Defaults to \code{1}. The sites are assumed to #' have the same sample size #' @param searchInt Search interval for standard errors #' @param ... Other arguments passed to \code{uniroot} #' #' @return Returns an object of class \code{"ssdRS"} which is a list containing: #' \tabular{ll}{ #' \code{designPrior} \tab The specified \code{"designPrior"} object \cr #' \tab \cr #' \code{power} \tab The specified power \cr #' \tab \cr #' \code{powerRecomputed} \tab The recomputed power \cr #' \tab \cr #' \code{sr} \tab The required replication standard error \cr #' \tab \cr #' \code{c} \tab The required relative sample size \code{c = nr/no} #' (assuming \code{so = unitSD/no} and \code{sr = unitSD/nr}) \cr #' } #' #' @references #' #' Pawel, S., Consonni, G., and Held, L. (2022). Bayesian approaches to #' designing replication studies. arXiv preprint. #' \doi{10.48550/arXiv.2211.02552} #' #' @author Samuel Pawel #' #' @examples #' ## specify design prior #' to1 <- 2 #' so1 <- 1 #' dprior <- designPrior(to = to1, so = so1) #' #' ## compute required standard error for significance at one-sided 2.5% #' sregionfunSig <- function(sr, alpha = 0.025) { #' successRegion(intervals = cbind(stats::qnorm(p = 1- alpha)*sr, Inf)) #' } #' ssd(sregionfun = sregionfunSig, dprior = dprior, power = 0.8) #' #' @export ssd <- function(sregionfun, dprior, power, nsites = 1, searchInt = c(.Machine$double.eps^0.5, 4), ...) { ## input checks stopifnot( is.function(sregionfun), class(dprior) == "designPrior", length(power) == 1, is.numeric(power), is.finite(power), 0 < power, power < 1, length(nsites) == 1, is.numeric(nsites), is.finite(nsites), nsites > 0, length(searchInt) == 2, is.numeric(searchInt), all(is.finite(searchInt)), 0 <= searchInt[1], searchInt[1] < searchInt[2] ) ## check whether specified power achievable sregionLim <- sregionfun(.Machine$double.eps) limP <- pors(sregion = sregionLim, dprior = dprior, sr = .Machine$double.eps, nsites = nsites) if (power > limP) { warning(paste0("Power not achievable with specified design prior (at most ", round(limP, 3), ")")) sr <- NaN outPow <- NaN } else { ## numerical search for log replication standard error such that probability ## of replication success = power rootFun <- function(logsr) { sregion <- sregionfun(exp(logsr)) pors(sregion = sregion, dprior = dprior, sr = exp(logsr), nsites = nsites) - power } res <- try(stats::uniroot(f = rootFun, interval = log(searchInt), ... = ...)$root, silent = TRUE) if (inherits(res, "try-error")) { sr <- NaN outPow <- NaN warning("Numerical problems, try adjusting searchInt") } else { sr <- exp(res) outPow <- rootFun(log(sr)) + power } } ## create output object out <- list("designPrior" = dprior, "power" = power, "powerRecomputed" = outPow, "sr" = sr, "c" = dprior$so^2/sr^2, type = "method agnostic success region (numerical computation)") class(out) <- "ssdRS" return(out) } #' Print method for class \code{"ssdRS"} #' @method print ssdRS #' #' @param x Object of class \code{"ssdRS"} #' @param ... Other arguments (for consistency with the generic) #' #' @return Prints text summary in the console and invisibly returns the #' \code{"ssdRS"} object #' #' @author Samuel Pawel #' #' @examples #' ## specify design prior #' to1 <- 2 #' so1 <- 1 #' dprior <- designPrior(to = to1, so = so1) #' #' ## compute required standard error for significance at one-sided 2.5% #' sregionfunSig <- function(sr, alpha = 0.025) { #' successRegion(intervals = cbind(stats::qnorm(p = 1- alpha)*sr, Inf)) #' } #' ssd1 <- ssd(sregionfun = sregionfunSig, dprior = dprior, power = 0.8) #' print(ssd1) #' @export print.ssdRS <- function(x, ...) { ## cat("========================================================================\n") cat(" Bayesian sample size calculation for replication studies\n") ## cat(" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n") cat(" ========================================================\n\n") cat("success criterion and computation\n") cat("------------------------------------------------------------------------") cat("\n ", x$type, "\n\n") print(x$designPrior) cat("\nprobability of replication success\n") cat("------------------------------------------------------------------------") cat("\n PoRS =", signif(x$power, 2), ": specified") cat("\n PoRS =", signif(x$powerRecomputed, 2), ": recomputed with sr\n") cat("\nrequired sample size\n") cat("------------------------------------------------------------------------") cat("\n sr =", signif(x$sr, 2), ": required standard error of replication effect estimate") cat("\n c = so^2/sr^2 ~= nr/no =", signif(x$c, 2), ": required relative variance / sample size") cat("\n") ## cat("========================================================================\n") invisible(x) }
/scratch/gouwar.j/cran-all/cranData/BayesRepDesign/R/ssd.R
#' @title Sample size determination for replication success based on #' Bayes factor #' #' @description This function computes the standard error required to achieve #' replication success with a certain probability and based on the Bayes #' factor under normality. The Bayes factor is oriented so that values above #' one indicate evidence for the null hypothesis of the effect size being #' zero, whereas values below one indicate evidence for the hypothesis of #' the effect size being non-zero (with normal prior assigned to it). #' #' @param level Bayes factor level below which replication success is achieved #' @param dprior Design prior object #' @param power Desired probability of replication success #' @param priormean Mean of the normal prior under the alternative. Defaults to #' \code{0} #' @param priorvar Variance of the normal prior under the alternative. Defaults #' to \code{1} #' @param searchInt Interval for numerical search over replication standard #' errors #' #' @return Returns an object of class \code{"ssdRS"}. See \code{\link{ssd}} for #' details. #' #' @references #' #' Pawel, S., Consonni, G., and Held, L. (2022). Bayesian approaches to #' designing replication studies. arXiv preprint. #' \doi{10.48550/arXiv.2211.02552} #' #' @author Samuel Pawel #' #' @examples #' ## specify design prior #' to1 <- 0.2 #' so1 <- 0.05 #' dprior <- designPrior(to = to1, so = so1, tau = 0.03) #' ssdBF01(level = 1/10, dprior = dprior, power = 0.8) #' #' @export ssdBF01 <- function(level, dprior, power, priormean = 0, priorvar = 1, searchInt = c(.Machine$double.eps^0.5, 2)) { ## input checks stopifnot( length(level) == 1, is.numeric(level), is.finite(level), 0 < level, level < 1, class(dprior) == "designPrior", length(power) == 1, is.numeric(power), is.finite(power), 0 < power, power < 1, level < power, length(priormean) == 1, is.numeric(priormean), is.finite(priormean), length(priorvar) == 1, is.numeric(priorvar), is.finite(priorvar), 0 < priorvar, length(searchInt) == 2, is.numeric(searchInt), all(is.finite(searchInt)), 0 <= searchInt[1], searchInt[1] < searchInt[2] ) ## computing bound of probability of replication success limP <- porsBF01(level = level, dprior = dprior, sr = .Machine$double.eps, priormean = priormean, priorvar = priorvar) if (power > limP) { warning(paste0("Power not achievable with specified design prior (at most ", round(limP, 3), ")")) sr <- NaN outPow <- NaN } else { ## computing replication standard error sr rootFun <- function(sr) { porsBF01(level = level, dprior = dprior, sr = sr, priormean = priormean, priorvar = priorvar) - power } res <- try(stats::uniroot(f = rootFun, interval = searchInt)$root, silent = TRUE) if (inherits(res, "try-error")) { sr <- NaN outPow <- NaN warning("Numerical problems, try adjusting searchInt") } else { sr <- res ## computing probability of replication success outPow <- porsBF01(level = level, dprior = dprior, sr = sr, priormean = priormean, priorvar = priorvar) } } ## create output object out <- list("designPrior" = dprior, "power" = power, "powerRecomputed" = outPow, "sr" = sr, "c" = dprior$so^2/sr^2, type = paste("Bayes factor (in favor of H0) <=", signif(level, 3), "(numerical computation)")) class(out) <- "ssdRS" return(out) } #' @title Probability of replication success based on Bayes factor #' #' @description This function computes the probability to achieve replication #' success based on a Bayes factor. The Bayes factor is oriented so that #' values above one indicate evidence for the null hypothesis of the effect #' size being zero, whereas values below one indicate evidence for the #' hypothesis of the effect size being non-zero (with normal prior assigned #' to it). #' #' @param level Bayes factor level below which replication success is achieved #' @param dprior Design prior object #' @param sr Replication standard error #' @param priormean Mean of the normal prior under the alternative. Defaults to #' \code{0} #' @param priorvar Variance of the normal prior under the alternative. Defaults #' to \code{1} #' #' @return The probability to achieve replication success #' #' @references #' #' Pawel, S., Consonni, G., and Held, L. (2022). Bayesian approaches to #' designing replication studies. arXiv preprint. #' \doi{10.48550/arXiv.2211.02552} #' #' @author Samuel Pawel #' #' @examples #' ## specify design prior #' to1 <- 2 #' so1 <- 0.05 #' dprior <- designPrior(to = to1, so = so1, tau = 0.03) #' porsBF01(level = 1/10, dprior = dprior, sr = c(0.05, 0.04)) #' #' @export porsBF01 <- function(level, dprior, sr, priormean = 0, priorvar = 1) { ## input checks stopifnot( length(level) == 1, is.numeric(level), is.finite(level), 0 < level, class(dprior) == "designPrior", length(sr) > 0, is.numeric(sr), all(is.finite(sr)), all(0 <= sr), length(priormean) == 1, is.numeric(priormean), is.finite(priormean), length(priorvar) == 1, is.numeric(priorvar), is.finite(priorvar), 0 < priorvar ) ps <- vapply(X = sr, FUN = function(sr1) { ## compute probability of replication success g <- priorvar/sr1^2 A <- sr1^2*(1 + 1/g)*(priormean^2/priorvar - 2*log(level) + log(1 + g)) ## success region depends on direction of prior mean sregion <- successRegion(intervals = rbind(c(-Inf, -sqrt(A) - priormean/g), c(sqrt(A) - priormean/g, Inf))) p <- pors(sregion = sregion, dprior = dprior, sr = sr1) return(p) }, FUN.VALUE = 1) return(ps) } ## ## checking some stuff ## BF01a <- function(tr, sr, m, v) { ## stats::dnorm(x = tr, mean = 0, sd = sr) / ## stats::dnorm(x = tr, mean = m, sd = sqrt(v + sr^2)) ## } ## BF01b <- function(tr, sr, m, v) { ## sqrt(1 + v/sr^2)*exp(-0.5*((tr + m*sr^2/v)^2*v/sr^2/(sr^2 + v) - m^2/v)) ## } ## tr <- 0.1 ## sr <- 0.05 ## m <- -0.2 ## v <- 0.3 ## BF01a(tr = tr, sr = sr, m = m, v = v) ## BF01b(tr = tr, sr = sr, m = m, v = v)
/scratch/gouwar.j/cran-all/cranData/BayesRepDesign/R/ssdBF01.R
#' @title Sample size determination for replication success based on #' replication Bayes factor #' #' @description This function computes the standard error required to achieve #' replication success with a certain probability and based on the #' replication Bayes factor under normality. The replication Bayes factor is #' assumed to be oriented so that values below one indicate replication #' success, whereas values above one indicate evidence for the null #' hypothesis. #' #' @param level Bayes factor level below which replication success is achieved #' @param dprior Design prior object #' @param power Desired probability of replication success #' @param searchInt Interval for numerical search over replication standard #' errors #' @param paradox Should the probability of replication success be computed #' allowing for the replication paradox (replication success when the effect #' estimates from original and replication study have a different sign)? #' Defaults to \code{TRUE} #' #' @return Returns an object of class \code{"ssdRS"}. See \code{\link{ssd}} for #' details. #' #' @references #' #' Pawel, S., Consonni, G., and Held, L. (2022). Bayesian approaches to #' designing replication studies. arXiv preprint. #' \doi{10.48550/arXiv.2211.02552} #' #' Verhagen, J. and Wagenmakers, E. J. (2014). Bayesian tests to quantify the result #' of a replication attempt. Journal of Experimental Psychology: General, #' 145:1457-1475. \doi{10.1037/a0036731} #' #' Ly, A., Etz, A., Marsman, M., and Wagenmakers, E.-J. (2018). Replication Bayes #' factors from evidence updating. Behavior Research Methods, 51(6), 2498-2508. #' \doi{10.3758/s13428-018-1092-x} #' #' @author Samuel Pawel #' #' @examples #' ## specify design prior #' to1 <- 0.2 #' so1 <- 0.05 #' dprior <- designPrior(to = to1, so = so1, tau = 0.03) #' ssdBFr(level = 1/10, dprior = dprior, power = 0.8) #' #' @export ssdBFr <- function(level, dprior, power, searchInt = c(.Machine$double.eps^0.5, 2), paradox = TRUE) { ## input checks stopifnot( length(level) == 1, is.numeric(level), is.finite(level), 0 < level, level < 1, class(dprior) == "designPrior", length(power) == 1, is.numeric(power), is.finite(power), 0 < power, power < 1, level < power, length(searchInt) == 2, is.numeric(searchInt), all(is.finite(searchInt)), 0 <= searchInt[1], searchInt[1] < searchInt[2], length(paradox) == 1, is.logical(paradox), !is.na(paradox) ) ## computing bound of probability of replication success limP <- porsBFr(level = level, dprior = dprior, sr = .Machine$double.eps, paradox = paradox) if (power > limP) { warning(paste0("Power not achievable with specified design prior (at most ", round(limP, 3), ")")) sr <- NaN outPow <- NaN } else { ## computing replication standard error sr rootFun <- function(sr) { porsBFr(level = level, dprior = dprior, sr = sr, paradox = paradox) - power } res <- try(stats::uniroot(f = rootFun, interval = searchInt)$root, silent = TRUE) if (inherits(res, "try-error")) { sr <- NaN outPow <- NaN warning("Numerical problems, try adjusting searchInt") } else { sr <- res ## computing probability of replication success outPow <- porsBFr(level = level, dprior = dprior, sr = sr, paradox = paradox) } } ## create output object out <- list("designPrior" = dprior, "power" = power, "powerRecomputed" = outPow, "sr" = sr, "c" = dprior$so^2/sr^2, type = paste("replication Bayes factor <=", signif(level, 3), "(numerical computation)")) class(out) <- "ssdRS" return(out) } #' @title Probability of replication success based on replication Bayes factor #' #' @description This function computes the probability to achieve replication #' success based on the replication Bayes factor. The replication Bayes #' factor is assumed to be oriented so that values below one indicate #' replication success, whereas values above one indicate evidence for the #' null hypothesis. #' #' @param level Bayes factor level below which replication success is achieved #' @param dprior Design prior object #' @param sr Replication standard error #' @param paradox Should the probability of replication success be computed #' allowing for the replication paradox (replication success when the effect #' estimates from original and replication study have a different sign)? #' Defaults to \code{TRUE} #' #' @return The probability to achieve replication success #' #' @references #' #' Pawel, S., Consonni, G., and Held, L. (2022). Bayesian approaches to #' designing replication studies. arXiv preprint. #' \doi{10.48550/arXiv.2211.02552} #' #' Verhagen, J. and Wagenmakers, E. J. (2014). Bayesian tests to quantify the result #' of a replication attempt. Journal of Experimental Psychology: General, #' 145:1457-1475. \doi{10.1037/a0036731} #' #' Ly, A., Etz, A., Marsman, M., & Wagenmakers, E.-J. (2018). Replication Bayes #' factors from evidence updating. Behavior Research Methods, 51(6), 2498-2508. #' \doi{10.3758/s13428-018-1092-x} #' #' @author Samuel Pawel #' #' @examples #' ## specify design prior #' to1 <- 0.2 #' so1 <- 0.05 #' dprior <- designPrior(to = to1, so = so1, tau = 0.03) #' porsBFr(level = 1/10, dprior = dprior, sr = c(0.05, 0.04)) #' #' @export porsBFr <- function(level, dprior, sr, paradox = TRUE) { ## input checks stopifnot( length(level) == 1, is.numeric(level), is.finite(level), 0 < level, class(dprior) == "designPrior", length(sr) > 0, is.numeric(sr), all(is.finite(sr)), all(0 <= sr), length(paradox) == 1, is.logical(paradox), !is.na(paradox) ) ps <- vapply(X = sr, FUN = function(sr1) { ## compute probability of replication success to <- dprior$to so <- dprior$so c <- so^2/sr1^2 A <- sr1^2*(1 +1/c)*(to^2/so^2 - 2*log(level) + log(1 + c)) if (paradox) { ## success region that allows for replication success with ## replication paradox sregion <- successRegion(intervals = rbind(c(-Inf, -sqrt(A) - to/c), c(sqrt(A) - to/c, Inf))) } else { ## success region depends on sign direction of original estiamte if (sign(to) == 1) { sregion <- successRegion(intervals = cbind(sqrt(A) - to/c, Inf)) } else { sregion <- successRegion(intervals = cbind(-Inf, -sqrt(A) - to/c)) } } p <- pors(sregion = sregion, dprior = dprior, sr = sr1) return(p) }, FUN.VALUE = 1) return(ps) } ## ## ## checking some stuff ## ## BFr <- function(to, tr, so, sr) { ## ## stats::dnorm(x = tr, sd = sr) / ## ## stats::dnorm(x = tr, mean = to, sd = sqrt(so^2 + sr^2)) ## ## } ## to <- 0.2 ## so <- 0.04 ## sr <- 0.06 ## zo <- to/so ## c <- so^2/sr^2 ## gamma <- 1/10 ## A <- sr^2*(1 + sr^2/so^2)*(to^2/so^2 - 2*log(gamma) + log(1 + so^2/sr^2)) ## tr <- sqrt(A) - to*sr^2/so^2 ## BFr(to, tr, so, sr) ## sRegionBFr <- function(sr) { ## A <- sr^2*(1 + sr^2/so^2)*(to^2/so^2 - 2*log(gamma) + log(1 + so^2/sr^2)) ## successRegion(intervals = rbind(c(-Inf, -sqrt(A) - to*sr^2/so^2), ## c(sqrt(A) - to*sr^2/so^2, Inf))) ## } ## sRegionBFr(0.05) ## ssdRS(sregionfun = sRegionBFr, dprior = designPrior(to = to, so = so), ## power = 0.8)
/scratch/gouwar.j/cran-all/cranData/BayesRepDesign/R/ssdBFr.R
#' @title Sample size determination for replication success based on #' the sceptical Bayes factor #' #' @description This function computes the standard error required to achieve #' replication success with a certain probability and based on the sceptical #' Bayes factor. The sceptical Bayes factor is assumed to be oriented so #' that values below one indicate replication success. #' #' @param level Threshold for the sceptical Bayes factor below which replication #' success is achieved #' @param dprior Design prior object #' @param power Desired probability of replication success #' @param searchInt Interval for numerical search over replication standard #' errors #' @param paradox Should the probability of replication success be computed #' allowing for the replication paradox (replication success when the effect #' estimates from original and replication study have a different sign)? #' Defaults to \code{TRUE} #' #' @return Returns an object of class \code{"ssdRS"}. See \code{\link{ssd}} for #' details. #' #' @references #' #' Pawel, S., Consonni, G., and Held, L. (2022). Bayesian approaches to #' designing replication studies. arXiv preprint. #' \doi{10.48550/arXiv.2211.02552} #' #' Pawel, S. and Held, L. (2020). The sceptical Bayes factor for the assessement #' of replication success. Journal of the Royal Statistical Society: Series B #' (Statistical Methodology), 84(3), 879-911. \doi{10.1111/rssb.12491} #' #' @author Samuel Pawel #' #' @examples #' ## specify design prior #' to1 <- 0.2 #' so1 <- 0.05 #' dprior <- designPrior(to = to1, so = so1, tau = 0.03) #' ssdBFs(level = 1/10, dprior = dprior, power = 0.9) #' #' @export ssdBFs <- function(level, dprior, power, searchInt = c(.Machine$double.eps^0.5, 2), paradox = TRUE) { ## input checks stopifnot( length(level) == 1, is.numeric(level), is.finite(level), 0 < level, level < 1, class(dprior) == "designPrior", length(power) == 1, is.numeric(power), is.finite(power), 0 < power, power < 1, level < power, length(searchInt) == 2, is.numeric(searchInt), all(is.finite(searchInt)), 0 <= searchInt[1], searchInt[1] < searchInt[2], length(paradox) == 1, is.logical(paradox), !is.na(paradox) ) ## computing bound of probability of replication success limP <- porsBFs(level = level, dprior = dprior, sr = .Machine$double.eps^0.5, paradox = paradox) if (power > limP) { warning(paste0("Power not achievable with specified design prior (at most ", round(limP, 3), ")")) sr <- NaN outPow <- NaN } else { ## computing replication standard error sr rootFun <- function(sr) { porsBFs(level = level, dprior = dprior, sr = sr, paradox = paradox) - power } res <- try(stats::uniroot(f = rootFun, interval = searchInt)$root, silent = TRUE) if (inherits(res, "try-error")) { sr <- NaN outPow <- NaN warning("Numerical problems, try adjusting searchInt") } else { sr <- res ## computing probability of replication success outPow <- porsBFs(level = level, dprior = dprior, sr = sr, paradox = paradox) } } ## create output object out <- list("designPrior" = dprior, "power" = power, "powerRecomputed" = outPow, "sr" = sr, "c" = dprior$so^2/sr^2, type = paste("sceptical Bayes factor <=", signif(level, 3), "(numerical computation)")) class(out) <- "ssdRS" return(out) } #' @title Probability of replication success based on the sceptical Bayes factor #' #' @description This function computes the probability to achieve replication #' success based on the sceptical Bayes factor. The sceptical Bayes factor #' is assumed to be oriented so that values below one indicate replication #' success. #' #' @param level Threshold for the sceptical Bayes factor below which replication #' success is achieved #' @param dprior Design prior object #' @param sr Replication standard error #' @param paradox Should the probability of replication success be computed #' allowing for the replication paradox (replication success when the effect #' estimates from original and replication study have a different sign)? #' Defaults to \code{TRUE} #' #' @return The probability to achieve replication success #' #' @references #' #' Pawel, S., Consonni, G., and Held, L. (2022). Bayesian approaches to #' designing replication studies. arXiv preprint. #' \doi{10.48550/arXiv.2211.02552} #' #' Pawel, S. and Held, L. (2020). The sceptical Bayes factor for the assessement #' of replication success. Journal of the Royal Statistical Society: Series B #' (Statistical Methodology), 84(3), 879-911. \doi{10.1111/rssb.12491} #' #' @author Samuel Pawel #' #' @examples #' ## specify design prior #' to1 <- 0.2 #' so1 <- 0.05 #' dprior <- designPrior(to = to1, so = so1) #' porsBFs(level = 1/3, dprior = dprior, sr = 0.05) #' #' @export porsBFs <- function(level, dprior, sr, paradox = TRUE) { ## input checks stopifnot( length(level) == 1, is.numeric(level), is.finite(level), 0 < level, level < 1, class(dprior) == "designPrior", length(sr) > 0, is.numeric(sr), all(is.finite(sr)), all(0 <= sr) ) ps <- vapply(X = sr, FUN = function(sr1) { ## compute success region so <- dprior$so to <- dprior$to zo <- to/so q <- lamW::lambertWm1(x = -zo^2/level^2*exp(-zo^2)) s <- -zo^2/q - 1 if (is.nan(s) | s < 0) { p <- 0 } else { c <- so^2/sr1^2 A <- (-2*log(level) + 2*log(1 + c) - 2*log(1 + s*c) + zo^2/(1 - s))*(1/c + s)*(sr1^2 + so^2)/(1 - s) M <- to*(1/c + s)/(1 - s) if (s < 1) { intsBothsides <- rbind(c(-Inf, -sqrt(A) - M), c(sqrt(A) - M, Inf)) ## replication paradox can occur in this situation if (paradox) { ints <- intsBothsides } else { if (sign(to) > 0) { ints <- intsBothsides[2,,drop = FALSE] } else { ints <- intsBothsides[1,,drop = FALSE] } } } else if (isTRUE(all.equal(s, 1, tolerance = 0.0001))) { X <- 2*log(level)*(so^2 + sr^2)/to if (sign(to) > 0) { ints <- cbind(to - X, Inf) } else { ints <- cbind(-Inf, to + X) } } else { ints <- cbind(-sqrt(A) - M, sqrt(A) - M) } sregion <- successRegion(intervals = ints) p <- pors(sregion = sregion, dprior = dprior, sr = sr1) } return(p) }, FUN.VALUE = 1) return(ps) } ## ## some checks ## so <- 1.5 ## sr <- 0.8 ## s <- c(0.8, 1.5) ## gamma <- 1/10 ## to <- 2 ## tr <- 1.5 ## ## should be the same ## tr^2/(sr^2 + s*so^2) - (tr - to)^2/(so^2 + sr^2) ## so^2*(1 - s)/((sr^2 + s*so^2)*(sr^2 + so^2))*(tr + to*(sr^2 + s*so^2)/so^2/(1 - s))^2 + ## to^2/so^2/(s - 1)
/scratch/gouwar.j/cran-all/cranData/BayesRepDesign/R/ssdBFs.R
#' @title Sample size determination for replication success based on #' effect size equivalence #' #' @description This function computes the standard error required to achieve #' replication success with a certain probability and based on effect size #' equivalence of original and replication effect size. Effect size #' equivalence is defined by the confidence interval for the difference #' between the original and replication effect sizes falling within an #' equivalence region around zero defined by the specified margin. #' #' @param level 1 - confidence level of confidence interval for effect size #' difference #' @param dprior Design prior object #' @param power Desired probability of replication success #' @param margin The equivalence margin > 0 for the symmetric equivalence region #' around zero #' @param searchInt Interval for numerical search over replication standard #' errors #' #' @return Returns an object of class \code{"ssdRS"}. See \code{\link{ssd}} for #' details. #' #' @references #' #' Pawel, S., Consonni, G., and Held, L. (2022). Bayesian approaches to #' designing replication studies. arXiv preprint. #' \doi{10.48550/arXiv.2211.02552} #' #' Anderson, S. F. and Maxwell, S. E. (2016). There's more than one way to #' conduct a replication study: Beyond statistical significance. Psychological #' Methods, 21(1), 1-12. \doi{10.1037/met0000051} #' #' @author Samuel Pawel #' #' @examples #' ## specify design prior #' to1 <- 0.2 #' so1 <- 0.05 #' dprior <- designPrior(to = to1, so = so1, tau = 0.05) #' ssdEqu(level = 0.1, dprior = dprior, power = 0.8, margin = 0.2) #' #' @export ssdEqu <- function(level, dprior, power, margin, searchInt = c(0, 2)) { ## input checks stopifnot( length(level) == 1, is.numeric(level), is.finite(level), 0 < level, level < 1, class(dprior) == "designPrior", length(power) == 1, is.numeric(power), is.finite(power), 0 < power, power < 1, length(margin) == 1, is.numeric(margin), is.finite(margin), margin > 0, length(searchInt) == 2, is.numeric(searchInt), all(is.finite(searchInt)), 0 <= searchInt[1], searchInt[1] < searchInt[2] ) ## computing bound of margin za <- stats::qnorm(p = 1 - level/2) so <- dprior$so marginLim <- za*so if (margin <= marginLim) { warning(paste0("Equivalence not achievable with specified margin (at least ", round(marginLim, 3), ")")) sr <- NaN outPow <- NaN } else { ## computing bound of probability of replication success limP <- porsEqu(level = level, dprior = dprior, margin = margin, sr = 0) if (power > limP) { warning(paste0("Power not achievable with specified design prior (at most ", round(limP, 3), ")")) sr <- NaN outPow <- NaN } else { ## computing replication standard error sr rootFun <- function(sr) { porsEqu(level = level, dprior = dprior, margin = margin, sr = sr) - power } res <- try(stats::uniroot(f = rootFun, interval = searchInt)$root, silent = TRUE) if (inherits(res, "try-error")) { sr <- NaN outPow <- NaN warning("Numerical problems, try adjusting searchInt") } else { sr <- res ## computing probability of replication success outPow <- porsEqu(level = level, dprior = dprior, margin = margin, sr = sr) } } } ## create output object out <- list("designPrior" = dprior, "power" = power, "powerRecomputed" = outPow, "sr" = sr, "c" = so^2/sr^2, type = paste("equivalence with confidence level =", signif(1 - level, 3), "and margin =", signif(margin, 3), "(numerical computation)")) class(out) <- "ssdRS" return(out) } #' @title Probability of replication success based on effect size equivalence #' #' @description This function computes the probability to achieve replication #' success on equivalence of original and replication effect size. Effect #' size equivalence is defined by the confidence interval for the difference #' between the original and replication effect sizes falling within an #' equivalence region around zero defined by the specified margin. #' #' @param level 1 - confidence level of confidence interval for effect size #' difference #' @param dprior Design prior object #' @param margin The equivalence margin > 0 for the symmetric equivalence region #' around zero #' @param sr Replication standard error #' #' @return The probability to achieve replication success #' #' @references #' #' Pawel, S., Consonni, G., and Held, L. (2022). Bayesian approaches to #' designing replication studies. arXiv preprint. #' \doi{10.48550/arXiv.2211.02552} #' #' Anderson, S. F. and Maxwell, S. E. (2016). There's more than one way to #' conduct a replication study: Beyond statistical significance. Psychological #' Methods, 21(1), 1-12. \doi{10.1037/met0000051} #' #' @author Samuel Pawel #' #' @examples #' ## specify design prior #' to1 <- 2 #' so1 <- 0.05 #' dprior <- designPrior(to = to1, so = so1, tau = 0.1) #' porsEqu(level = 0.1, dprior = dprior, margin = 0.3, sr = c(0.05, 0.03)) #' #' @export porsEqu <- function(level, dprior, margin, sr) { ## input checks stopifnot( length(level) == 1, is.numeric(level), is.finite(level), 0 < level, level < 1, class(dprior) == "designPrior", length(margin) == 1, is.numeric(margin), is.finite(margin), margin > 0, length(sr) > 0, is.numeric(sr), all(is.finite(sr)), all(0 <= sr) ) ps <- vapply(X = sr, FUN = function(sr1) { ## compute probability of replication success to <- dprior$to so <- dprior$so sdiff <- sqrt(so^2 + sr1^2) za <- stats::qnorm(p = 1 - level/2) if (margin <= za*sdiff) { p <- 0 } else { sregion <- successRegion(intervals = cbind(to - margin + za*sdiff, to + margin - za*sdiff)) p <- pors(sregion = sregion, dprior = dprior, sr = sr1) } return(p)}, FUN.VALUE = 1) return(ps) } ## ## checking some stuff ## ciDiff <- function(to, tr, so, sr, alpha = 0.05) { ## za <- stats::qnorm(p = 1 - alpha) ## sdiff <- sqrt(so^2 + sr^2) ## ci <- tr - to + c(-1, 1)*sdiff*za ## return(ci) ## } ## to <- 0.2 ## so <- 0.04 ## sr <- 0.06 ## sdiff <- sqrt(so^2 + sr^2) ## delta <- 0.02 ## za <- stats::qnorm(p = 1 - 0.05) ## tr <- to - delta + za*sdiff ## ciDiff(to = to, tr = tr, so = so, sr = sr)
/scratch/gouwar.j/cran-all/cranData/BayesRepDesign/R/ssdEqu.R
#' @title Sample size determination for replication success based on #' meta-analytic significance #' #' @description This function computes the standard error required to achieve #' replication success with a certain probability and based on statistical #' significance of the fixed-effects meta-analytic effect estimate obtained #' from combining original and replication effect estimates. #' #' @param level Significance level for the replication effect estimate #' (one-sided and in the same direction as the original effect estimate) #' @param dprior Design prior object #' @param power Desired probability of replication success #' @param searchInt Interval for numerical search over replication standard #' errors #' #' @return Returns an object of class \code{"ssdRS"}. See \code{\link{ssd}} for #' details. #' #' @references #' #' Pawel, S., Consonni, G., and Held, L. (2022). Bayesian approaches to #' designing replication studies. arXiv preprint. #' \doi{10.48550/arXiv.2211.02552} #' #' @author Samuel Pawel #' #' @examples #' ## specify design prior #' to1 <- 2 #' so1 <- 1 #' dprior <- designPrior(to = to1, so = so1, tau = 0.25, sp = Inf) #' ssdMeta(level = 0.025^2, dprior = dprior, power = 0.95) #' #' @export ssdMeta <- function(level, dprior, power, searchInt = c(0, 10)) { ## input checks stopifnot( length(level) == 1, is.numeric(level), is.finite(level), 0 < level, level < 1, class(dprior) == "designPrior", length(power) == 1, is.numeric(power), is.finite(power), 0 < power, power < 1, level < power, length(searchInt) == 2, is.numeric(searchInt), all(is.finite(searchInt)), 0 <= searchInt[1], searchInt[1] < searchInt[2] ) ## computing bound of probability of replication success limP <- porsMeta(level = level, dprior = dprior, sr = 0) if (power > limP) { warning(paste0("Power not achievable with specified design prior (at most ", round(limP, 3), ")")) sr <- NaN outPow <- NaN } else { ## computing replication standard error sr rootFun <- function(sr) { porsMeta(level = level, dprior = dprior, sr = sr) - power } res <- try(stats::uniroot(f = rootFun, interval = searchInt)$root, silent = TRUE) if (inherits(res, "try-error")) { ## TODO it happens that the power is always larger, what should we ## do then? sr <- NaN outPow <- NaN warning("Numerical problems, try adjusting searchInt") } else { sr <- res ## computing probability of replication success outPow <- porsMeta(level = level, dprior = dprior, sr = sr) } } ## create output object out <- list("designPrior" = dprior, "power" = power, "powerRecomputed" = outPow, "sr" = sr, "c" = dprior$so^2/sr^2, type = paste("meta-analytic p-value <=", signif(level, 3), "(numerical computation)")) class(out) <- "ssdRS" return(out) } #' @title Probability of replication success based on meta-analytic significance #' #' @description This function computes the probability to achieve replication #' success on statistical significance of the fixed-effects meta-analytic #' effect estimate obtained from combining original and replication effect #' estimates. #' #' @param level Significance level for p-value of the meta-analytic effect #' estimate (one-sided and in the same direction as the original effect #' estimate) #' @param dprior Design prior object #' @param sr Replication standard error #' #' @return The probability to achieve replication success #' #' @references #' #' Pawel, S., Consonni, G., and Held, L. (2022). Bayesian approaches to #' designing replication studies. arXiv preprint. #' \doi{10.48550/arXiv.2211.02552} #' #' @author Samuel Pawel #' #' @examples #' ## specify design prior #' to1 <- 2 #' so1 <- 1 #' dprior <- designPrior(to = to1, so = so1, tau = 0.1) #' porsMeta(level = 0.025^2, dprior = dprior, sr = c(0.2, 0.1)) #' #' @export porsMeta <- function(level, dprior, sr) { ## input checks stopifnot( length(level) == 1, is.numeric(level), is.finite(level), 0 < level, level < 1, class(dprior) == "designPrior", length(sr) > 0, is.numeric(sr), all(is.finite(sr)), all(0 <= sr) ) ps <- vapply(X = sr, FUN = function(sr1) { ## success region depends on direction of original estimate so <- dprior$so to <- dprior$to if (sign(dprior$to) >= 0) { lowerLim <- stats::qnorm(p = 1 - level)*sr1*sqrt(1 + sr1^2/so^2) - to*sr1^2/so^2 sregion <- successRegion(intervals = cbind(lowerLim, Inf)) } else { upperLim <- stats::qnorm(p = level)*sr*sqrt(1 + sr1^2/so^2) - to*sr1^2/so^2 sregion <- successRegion(cbind(-Inf, upperLim)) } ## compute probability of replication success pors(sregion = sregion, dprior = dprior, sr = sr1) }, FUN.VALUE = 1) return(ps) } ## ## checking some stuff ## pmeta <- function(to, tr, so, sr) { ## sm <- 1/sqrt(1/so^2 + 1/sr^2) ## tm <- sm^2*(to/so^2 + tr/sr^2) ## return(stats::pnorm(q = tm/sm, lower = FALSE)) ## } ## to <- 0.5 ## so <- 0.8 ## sr <- 1.1 ## sm <- 1/sqrt(1/so^2 + 1/sr^2) ## za <- stats::qnorm(p = 0.975) ## tr <- sr^2*(za/sm - to/so^2) ## tr <- sr*za*sqrt(1 + sr^2/so^2) - to*sr^2/so^2 ## pmeta(to = to, tr = tr, so = so, sr = sr)
/scratch/gouwar.j/cran-all/cranData/BayesRepDesign/R/ssdMeta.R
#' @title Sample size determination for replication success based on #' the sceptical p-value #' #' @description This function computes the standard error required to achieve #' replication success with a certain probability and based on the sceptical #' p-value. #' #' @details The sceptical p-value is assumed to be uncalibrated as in Held #' (2020). The package ReplicationSuccess allows for sample size and power #' calculations with the recalibrated sceptical p-value #' (\url{https://CRAN.R-project.org/package=ReplicationSuccess}). #' #' @param level Threshold for the (one-sided) sceptical p-value below which #' replication success is achieved #' @param dprior Design prior object #' @param power Desired probability of replication success #' #' @return Returns an object of class \code{"ssdRS"}. See \code{\link{ssd}} for #' details. #' #' @references #' #' Pawel, S., Consonni, G., and Held, L. (2022). Bayesian approaches to #' designing replication studies. arXiv preprint. #' \doi{10.48550/arXiv.2211.02552} #' #' Held, L. (2020). A new standard for the analysis and design of replication #' studies (with discussion). Journal of the Royal Statistical Society: Series A #' (Statistics in Society), 183(2), 431-448. \doi{10.1111/rssa.12493} #' #' @author Samuel Pawel #' #' @examples #' ## specify design prior #' to1 <- 0.2 #' so1 <- 0.05 #' dprior <- designPrior(to = to1, so = so1, tau = 0.03) #' ssdPs(level = 0.05, dprior = dprior, power = 0.9) #' #' @export ssdPs <- function(level, dprior, power) { ## input checks stopifnot( length(level) == 1, is.numeric(level), is.finite(level), 0 < level, level < 1, class(dprior) == "designPrior", length(power) == 1, is.numeric(power), is.finite(power), 0 < power, power < 1, level < power ) ## computing bound of probability of replication success limP <- porsPs(level = level, dprior = dprior, sr = 0) if (power > limP) { warning(paste0("Power not achievable with specified design prior (at most ", round(limP, 3), ")")) sr <- NaN outPow <- NaN } else { ## computing replication standard error sr dpmean <- dprior$dpMean dpvar <- dprior$dpVar tau <- dprior$tau to <- dprior$to so <- dprior$so zo <- to/so if (sign(to) > 0) { za <- stats::qnorm(p = 1 - level) } else { za <- stats::qnorm(p = level) } zb <- stats::qnorm(p = power) A <- dpvar + tau^2 - so^2/((zo/za)^2 - 1) x <- (za*dpmean - zb*sqrt(dpmean^2 + (za^2 - zb^2)*A))/(za^2 - zb^2) sr <- sqrt(x^2 - so^2/((zo/za)^2 - 1)) outPow <- porsPs(level = level, dprior = dprior, sr = sr) ## pow <- porsPs(level = level, dprior = dprior, sr = na.omit(srs)) ## powequal <- abs(pow - power) <= 0.0001 ## if (any(powequal)) { ## sr <- srs[powequal] ## outPow <- pow[powequal] ## } else { ## sr <- NaN ## outPow <- NaN ## } } ## create output object out <- list("designPrior" = dprior, "power" = power, "powerRecomputed" = outPow, "sr" = sr, "c" = dprior$so^2/sr^2, type = paste("sceptical p-value <=", signif(level, 3), "(exact computation)")) class(out) <- "ssdRS" return(out) } #' @title Probability of replication success based on the sceptical p-value #' #' @description This function computes the probability to achieve replication #' success based on the sceptical p-value. #' #' @details The sceptical p-value is assumed to be uncalibrated as in Held #' (2020). The package ReplicationSuccess allows for sample size and power #' calculations with the recalibrated sceptical p-value #' (\url{https://CRAN.R-project.org/package=ReplicationSuccess}). #' #' @param level Threshold for the (one-sided) sceptical p-value below which #' replication success is achieved #' @param dprior Design prior object #' @param sr Replication standard error #' #' @return The probability to achieve replication success #' #' @references #' #' Pawel, S., Consonni, G., and Held, L. (2022). Bayesian approaches to #' designing replication studies. arXiv preprint. #' \doi{10.48550/arXiv.2211.02552} #' #' Held, L. (2020). A new standard for the analysis and design of replication #' studies (with discussion). Journal of the Royal Statistical Society: Series A #' (Statistics in Society), 183(2), 431-448. \doi{10.1111/rssa.12493} #' #' @author Samuel Pawel #' #' @examples #' ## specify design prior #' to1 <- 0.2 #' so1 <- 0.05 #' dprior <- designPrior(to = to1, so = so1) #' porsPs(level = 0.025, dprior = dprior, sr = c(0.05, 0.01)) #' #' @export porsPs <- function(level, dprior, sr) { ## input checks stopifnot( length(level) == 1, is.numeric(level), is.finite(level), 0 < level, level < 1, class(dprior) == "designPrior", length(sr) > 0, is.numeric(sr), all(is.finite(sr)), all(0 <= sr) ) ps <- vapply(X = sr, FUN = function(sr1) { ## success region depends on the direction of original study to <- dprior$to so <- dprior$so zo <- to/so za <- stats::qnorm(p = 1 - level) if (za > abs(zo)) { p <- 0 } else { if (sign(to) >= 0) { int <- cbind(za*sqrt(sr1^2 + so^2/((zo/za)^2 - 1)), Inf) } else { zaNeg <- -za int <- cbind(-Inf, zaNeg*sqrt(sr1^2 + so^2/((zo/zaNeg)^2 - 1))) } sregion <- successRegion(intervals = int) p <- pors(sregion = sregion, dprior = dprior, sr = sr1) } return(p) }, FUN.VALUE = 1) return(ps) }
/scratch/gouwar.j/cran-all/cranData/BayesRepDesign/R/ssdPs.R
#' @title Sample size determination for replication success based on #' significance #' #' @description This function computes the standard error required to achieve #' replication success with a certain probability and based on statistical #' significance of the replication effect estimate. #' #' @param level Significance level for the replication effect estimate #' (one-sided and in the same direction as the original effect estimate) #' @param dprior Design prior object #' @param power Desired probability of replication success #' #' @return Returns an object of class \code{"ssdRS"}. See \code{\link{ssd}} for #' details. #' #' @references #' #' Pawel, S., Consonni, G., and Held, L. (2022). Bayesian approaches to #' designing replication studies. arXiv preprint. #' \doi{10.48550/arXiv.2211.02552} #' #' @author Samuel Pawel #' #' @examples #' ## specify design prior #' to1 <- 2 #' so1 <- 0.5 #' dprior <- designPrior(to = to1, so = so1, tau = 0.1) #' ssdSig(level = 0.025, dprior = dprior, power = 0.9) #' #' @export ssdSig <- function(level, dprior, power) { ## input checks stopifnot( length(level) == 1, is.numeric(level), is.finite(level), 0 < level, level < 1, class(dprior) == "designPrior", length(power) == 1, is.numeric(power), is.finite(power), 0 < power, power < 1, level < power ) ## extracting design prior parameters tau <- dprior$tau dpmean <- dprior$dpMean dpvar <- dprior$dpVar so <- dprior$so to <- dprior$to ## computing standard normal quantiles for power calculation if (sign(to) > 0) { za <- stats::qnorm(p = 1 - level) } else { za <- stats::qnorm(p = level) } zb <- stats::qnorm(p = power) ## computing bound of probability of replication success limP <- porsSig(level = level, dprior = dprior, sr = 0) if (power > limP) { warning(paste0("Power not achievable with specified design prior (at most ", round(limP, 3), ")")) sr <- NaN outPow <- NaN } else { ## computing replication standard error sr analytically sr <- (dpmean*za - zb*sqrt((za^2 - zb^2)*(tau^2 + dpvar) + dpmean^2))/((za^2 - zb^2)) ## computing probability of replication success outPow <- porsSig(level = level, dprior = dprior, sr = sr) } ## create output object out <- list("designPrior" = dprior, "power" = power, "powerRecomputed" = outPow, "sr" = sr, "c" = so^2/sr^2, type = paste("replication p-value <=", signif(level, 3), "(exact computation)")) class(out) <- "ssdRS" return(out) } #' @title Probability of replication success based on significance #' #' @description This function computes the probability to achieve replication #' success on statistical significance of the replication effect estimate. #' #' @param level Significance level for p-value of the replication effect #' estimate (one-sided and in the same direction as the original effect #' estimate) #' @param dprior Design prior object #' @param sr Replication standard error #' #' @return The probability to achieve replication success #' #' @references #' #' Pawel, S., Consonni, G., and Held, L. (2022). Bayesian approaches to #' designing replication studies. arXiv preprint. #' \doi{10.48550/arXiv.2211.02552} #' #' @author Samuel Pawel #' #' @examples #' ## specify design prior #' to1 <- 2 #' so1 <- 1 #' dprior <- designPrior(to = to1, so = so1, tau = 0.1) #' porsSig(level = 0.025, dprior = dprior, sr = c(0.5, 0.3)) #' #' @export porsSig <- function(level, dprior, sr) { ## input checks stopifnot( length(level) == 1, is.numeric(level), is.finite(level), 0 < level, level < 1, class(dprior) == "designPrior", length(sr) > 0, is.numeric(sr), all(is.finite(sr)), all(0 <= sr) ) ps <- vapply(X = sr, FUN = function(sr1) { ## compute probability of replication success sregion <- successRegionSig(sr = sr1, to = dprior$to, tau = 0, nsites = 1, level = level) p <- pors(sregion = sregion, dprior = dprior, sr = sr1) return(p) }, FUN.VALUE = 1) return(ps) } #' @title Success region based on significance #' #' @description This function returns the success region for the (meta-analytic) #' replication effect estimate to achieve significance #' #' @param sr Replication standard error #' @param to Original effect estimate #' @param tau Heterogeneity standard deviation used in the calculation of the #' meta-analytic replication effect estimate and its standard error. #' Defaults to \code{0} (fixed effects analysis) #' @param nsites nsites Number of sites, defaults to \code{1}. The effect #' estimates from all sites are assumed to have the same standard error #' \code{sr} #' @param level Significance level for p-value of the (average) replication #' effect estimate (one-sided and in the same direction as the original #' effect estimate) #' #' @return An object of class \code{"successRegion"}. See #' \code{\link{successRegion}} for details. #' #' @references #' #' Pawel, S., Consonni, G., and Held, L. (2022). Bayesian approaches to #' designing replication studies. arXiv preprint. #' \doi{10.48550/arXiv.2211.02552} #' #' @author Samuel Pawel #' #' @examples #' successRegionSig(sr = 0.05, to = 0.2, tau = 0.01, nsites = 3, level = 0.025) #' #' @export successRegionSig <- function(sr, to, tau = 0, nsites = 1, level) { ## input checks stopifnot( length(sr) == 1, is.numeric(sr), is.finite(sr), 0 <= sr, length(to) == 1, is.numeric(sr), is.finite(sr), length(tau) == 1, is.numeric(tau), is.finite(tau), 0 <= tau, length(nsites) == 1, is.numeric(nsites), is.finite(nsites), nsites > 0, length(level) == 1, is.numeric(level), is.finite(level), 0 < level, level < 1 ) ## compute standard error of weighted average srMA <- 1/sqrt(nsites/(sr^2 + tau^2)) ## success region depends on direction of original estimate if (sign(to) >= 0) { sregion <- successRegion(intervals = cbind(stats::qnorm(p = 1 - level)*srMA, Inf)) } else { sregion <- successRegion(cbind(-Inf, stats::qnorm(p = level)*srMA)) } return(sregion) }
/scratch/gouwar.j/cran-all/cranData/BayesRepDesign/R/ssdSig.R
#' @title Sample size determination for replication success based on #' TOST equivalence #' #' @description This function computes the standard error required to achieve #' replication success with a certain probability and based on establishing #' the absence of a practically relevant effect size with the Two One-Sided #' Tests (TOST) procedure in the replication study. #' #' @param level Significance level for the TOST p-value #' @param dprior Design prior object #' @param power Desired probability of replication success #' @param margin The equivalence margin > 0 for the equivalence region around #' zero that defines a region of practically irrelevant effect sizes #' @param searchInt Interval for numerical search over replication standard #' errors #' #' @return Returns an object of class \code{"ssdRS"}. See \code{\link{ssd}} for #' details. #' #' @references #' #' Pawel, S., Consonni, G., and Held, L. (2022). Bayesian approaches to #' designing replication studies. arXiv preprint. #' \doi{10.48550/arXiv.2211.02552} #' #' Anderson, S. F. and Maxwell, S. E. (2016). There's more than one way to #' conduct a replication study: Beyond statistical significance. Psychological #' Methods, 21(1), 1-12. \doi{10.1037/met0000051} #' #' @author Samuel Pawel #' #' @examples #' ## specify design prior #' to1 <- 0.05 #' so1 <- 0.05 #' dprior <- designPrior(to = to1, so = so1, tau = 0.05) #' ssdTOST(level = 0.05, dprior = dprior, power = 0.9, margin = 0.3) #' #' @export ssdTOST <- function(level, dprior, power, margin, searchInt = c(0, 2)) { ## input checks stopifnot( length(level) == 1, is.numeric(level), is.finite(level), 0 < level, level < 1, class(dprior) == "designPrior", length(power) == 1, is.numeric(power), is.finite(power), 0 < power, power < 1, length(margin) == 1, is.numeric(margin), is.finite(margin), margin > 0, length(searchInt) == 2, is.numeric(searchInt), all(is.finite(searchInt)), 0 <= searchInt[1], searchInt[1] < searchInt[2] ) ## computing bound of probability of replication success limP <- porsTOST(level = level, dprior = dprior, margin = margin, sr = 0) if (power > limP) { warning(paste0("Power not achievable with specified design prior (at most ", round(limP, 3), ")")) sr <- NaN outPow <- NaN } else { ## computing replication standard error sr rootFun <- function(sr) { porsTOST(level = level, dprior = dprior, margin = margin, sr = sr) - power } res <- try(stats::uniroot(f = rootFun, interval = searchInt)$root, silent = TRUE) if (inherits(res, "try-error")) { sr <- NaN outPow <- NaN warning("Numerical problems, try adjusting searchInt") } else { sr <- res ## computing probability of replication success outPow <- porsTOST(level = level, dprior = dprior, margin = margin, sr = sr) } } ## create output object out <- list("designPrior" = dprior, "power" = power, "powerRecomputed" = outPow, "sr" = sr, "c" = dprior$so^2/sr^2, type = paste("TOST equivalence with level =", signif(level, 3), "and margin =", signif(margin, 3), "(numerical computation)")) class(out) <- "ssdRS" return(out) } #' @title Probability of replication success based on TOST equivalence #' #' @description This function computes the probability to achieve replication #' success based on establishing the absence of a practically relevant #' effect size with the Two One-Sided Tests (TOST) procedure in #' the replication study. #' #' @param level Significance level for the TOST p-value #' @param dprior Design prior object #' @param margin The equivalence margin > 0 for the equivalence region around #' zero that defines a region of practically irrelevant effect sizes #' @param sr Replication standard error #' #' @return The probability to achieve replication success #' #' @references #' #' Pawel, S., Consonni, G., and Held, L. (2022). Bayesian approaches to #' designing replication studies. arXiv preprint. #' \doi{10.48550/arXiv.2211.02552} #' #' Anderson, S. F. and Maxwell, S. E. (2016). There's more than one way to #' conduct a replication study: Beyond statistical significance. Psychological #' Methods, 21(1), 1-12. \doi{10.1037/met0000051} #' #' @author Samuel Pawel #' #' @examples #' ## specify design prior #' to1 <- 2 #' so1 <- 0.05 #' dprior <- designPrior(to = to1, so = so1, tau = 0.1) #' porsTOST(level = 0.1, dprior = dprior, margin = 0.3, sr = c(0.05, 0.03)) #' #' @export porsTOST <- function(level, dprior, margin, sr) { ## input checks stopifnot( length(level) == 1, is.numeric(level), is.finite(level), 0 < level, level < 1, class(dprior) == "designPrior", length(margin) == 1, is.numeric(margin), is.finite(margin), margin > 0, length(sr) > 0, is.numeric(sr), all(is.finite(sr)), all(0 <= sr) ) ## compute probability of replication success za <- stats::qnorm(p = 1 - level/2) ps <- vapply(X = sr, FUN = function(sr1) { ints <- cbind(-margin + za*sr1, margin - za*sr1) if (ints[1] >= ints[2]) { p <- 0 } else { sregion <- successRegion(intervals = ints) p <- pors(sregion = sregion, dprior = dprior, sr = sr1) } return(p)}, FUN.VALUE = 1) return(ps) } ## TODO add tests
/scratch/gouwar.j/cran-all/cranData/BayesRepDesign/R/ssdTOST.R
#' @title Success region for replication effect estimate #' #' @description Creates a success region object which can then be used for #' computing the probability of replication success with \code{\link{pors}}. #' #' @param intervals A 2xN matrix containing N disjoint intervals, the first #' column containing the lower and the second column containing the upper #' limits #' #' @return Returns an object of class \code{"successRegion"} which is a matrix #' containing the success intervals sorted in ascending order #' #' @author Samuel Pawel #' #' @references #' #' Pawel, S., Consonni, G., and Held, L. (2022). Bayesian approaches to #' designing replication studies. arXiv preprint. #' \doi{10.48550/arXiv.2211.02552} #' #' @seealso \code{\link{pors}}, \code{\link{ssd}} #' #' @examples #' successRegion(intervals = rbind(c(1.96, Inf), c(-Inf, -1.96))) #' successRegion(intervals = cbind(1.96, Inf)) #' #' @export successRegion <- function(intervals) { ## input checks stopifnot( is.matrix(intervals), !any(intervals[,1,drop = FALSE] > intervals[,2, drop = FALSE]) ) ## check whether intervals overlap intervalsSorted <- intervals[order(intervals[,2], decreasing = TRUE),, drop = FALSE] if (nrow(intervals) > 1) { for (i in seq(2, nrow(intervalsSorted))) { if (intervalsSorted[i - 1, 1] < intervalsSorted[i, 2]) { stop("intervals must be disjoint") } } } class(intervalsSorted) <- "successRegion" return(intervalsSorted) } #' Print method for class \code{"successRegion"} #' @method print successRegion #' #' @param x Object of class \code{"successRegion"} #' @param ... Other arguments #' #' @return Prints text summary in the console and invisibly returns the #' \code{"successRegion"} object #' #' @author Samuel Pawel #' #' @examples #' ## success region for two-sided significance test #' successRegion(intervals = rbind(c(1.96, Inf), c(-Inf, -1.96))) #' ## success region for one-sided significance test #' successRegion(intervals = rbind(c(1.96, Inf))) #' @export print.successRegion <- function(x, ...) { parensMat <- matrix(nrow = nrow(x), ncol = ncol(x)) parensMat[,1] <- ifelse(is.finite(x[,1]), "[", "(") parensMat[,2] <- ifelse(is.finite(x[,2]), "]", ")") intChar <- rev(paste0(paste0(parensMat[,1], x[,1], ", "), paste0(x[,2], parensMat[,2]))) cat("Success region for replication effect estimate\n") cat(paste0(" ", intChar), sep = " and") cat("\n") invisible(x) }
/scratch/gouwar.j/cran-all/cranData/BayesRepDesign/R/successRegion.R
library(BayesRepDesign) ## design prior testing grid tos <- c(-3, 2, 0, 2, 3) sos <- c(0.5, 1, 1.1) taus <- c(0, 0.25) mus <- c(0, 0.5) sps <- c(Inf, 10, 0.5) alpha <- 0.025 srs <- c(0.1, 0.4, 1, 2) testGrid <- expand.grid(to = tos, so = sos, sr = srs, tau = taus, mu = mus, sp = sps) ## compute probability of replication success resultsDF <- do.call("rbind", lapply(X = seq(1, nrow(testGrid)), FUN = function(i) { to <- testGrid$to[i] so <- testGrid$so[i] sr <- testGrid$sr[i] tau <- testGrid$tau[i] mu <- testGrid$mu[i] sp <- testGrid$sp[i] dp <- designPrior(to = to, so = so, mu = mu, sp = sp, tau = tau) ## generic function with significance success region if (sign(to) >= 0) { sreg <- successRegion(intervals = cbind(stats::qnorm(p = 1- alpha)*sr, Inf)) } else { sreg <- successRegion(intervals = cbind(-Inf, stats::qnorm(p = alpha)*sr)) } p_pors <- pors(sregion = sreg, dprior = dp, sr = sr) ## significance p_porsSig <- porsSig(level = alpha, dprior = dp, sr = sr) ## TOST p_porsTOST <- porsTOST(level = alpha, dprior = dp, margin = 1, sr = sr) ## meta-analysis p_porsMeta <- porsMeta(level = alpha^2, dprior = dp, sr = sr) ## equivalence p_porsEqu <- porsEqu(level = 1 - 2*alpha, dprior = dp, margin = 1, sr = sr) ## sceptical p-value p_porsPs <- porsPs(level = alpha*6, dprior = dp, sr = sr) ## standard BF p_porsBF01 <- porsBF01(level = alpha*4, dprior = dp, sr = sr) ## replication BF p_porsBFr <- porsBFr(level = alpha*2, dprior = dp, sr = sr) ## sceptical BF p_porsBFs <- porsBFs(level = alpha*4, dprior = dp, sr = sr) ## return everything out <- data.frame(to, so, tau, mu, sp, sr, p_pors, p_porsSig, p_porsTOST, p_porsMeta, p_porsEqu, p_porsPs, p_porsBF01, p_porsBFr, p_porsBFs) return(out) })) ## porsDF <- resultsDF ## save(object = porsDF, file = "porsDF.rda", version = 2) load("porsDF.rda") expect_equal(porsDF$p_pors, resultsDF$p_pors, tolerance = 1e-05, info = "pors") expect_equal(porsDF$p_porsSig, resultsDF$p_porsSig, tolerance = 1e-05, info = "porsSig") expect_equal(porsDF$p_porsTOST, resultsDF$p_porsTOST, tolerance = 1e-05, info = "porsTOST") expect_equal(porsDF$p_porsMeta, resultsDF$p_porsMeta, tolerance = 1e-05, info = "porsMeta") expect_equal(porsDF$p_porsEqu, resultsDF$p_porsEqu, tolerance = 1e-05, info = "porsEqu") expect_equal(porsDF$p_porsPs, resultsDF$p_porsPs, tolerance = 1e-05, info = "porsPs") expect_equal(porsDF$p_porsBF01, resultsDF$p_porsBF01, tolerance = 1e-05, info = "porsBF01") expect_equal(porsDF$p_porsBFr, resultsDF$p_porsBFr, tolerance = 1e-05, info = "porsBFr") expect_equal(porsDF$p_porsBFs, resultsDF$p_porsBFs, tolerance = 1e-05, info = "porsBFs")
/scratch/gouwar.j/cran-all/cranData/BayesRepDesign/inst/tinytest/test-pors.R
library(BayesRepDesign) ## testing grid tos <- c(-3, -2, 2, 3) sos <- c(0.5, 1, 1.1) taus <- c(0, 0.25) mus <- c(0, 0.5) sps <- c(Inf, 10) alpha <- 0.025 pows <- c(0.4, 0.6, 0.7, 0.8) testGrid <- expand.grid(to = tos, so = sos, tau = taus, mu = mus, sp = sps, power = pows) ## SSD numerically and analytically suppressWarnings({ resultsDF <- do.call("rbind", lapply(X = seq(1, nrow(testGrid)), FUN = function(i) { to <- testGrid$to[i] so <- testGrid$so[i] tau <- testGrid$tau[i] mu <- testGrid$mu[i] sp <- testGrid$sp[i] power <- testGrid$power[i] dp <- designPrior(to = to, so = so, mu = mu, sp = sp, tau = tau) ## significance numerically with ssd if (sign(to) >= 0) { ## function to compute required standard error for significance one-sided sregionfunSig <- function(sr) { successRegion(intervals = cbind(stats::qnorm(p = 1- alpha)*sr, Inf)) } } else { ## function to compute required standard error for significance one-sided sregionfunSig <- function(sr) { successRegion(intervals = cbind(-Inf, stats::qnorm(p = alpha)*sr)) } } res_ssd <- ssd(sregionfun = sregionfunSig, dprior = dp, power = power, searchInt = c(.Machine$double.eps, 4)) ## significance analytically res_ssdSig <- ssdSig(level = alpha, dprior = dp, power = power) ## meta-analysis res_ssdMeta <- ssdMeta(level = alpha^2, dprior = dp, power = power, searchInt = c(0, 10)) ## equivalence res_ssdEqu <- ssdEqu(level = alpha*4, dprior = dp, power = power, margin = 5, searchInt = c(0, 2)) ## sceptical p-value res_ssdPs <- ssdPs(level = alpha*6, dprior = dp, power = power) ## standard BF res_ssdBF01 <- ssdBF01(level = alpha*4, dprior = dp, power = power, priormean = 0, priorvar = 2) ## replication BF res_ssdBFr <- ssdBFr(level = alpha*4, dprior = dp, power = power) ## sceptical BF res_ssdBFs <- ssdBFs(level = alpha*4, dprior = dp, power = power) ## return everything out <- data.frame(to = to, so = so, tau = tau, mu = mu, sp = sp, power = power, power_ssd = res_ssd$powerRecomputed, sr_ssd = res_ssd$sr, power_ssdSig = res_ssdSig$powerRecomputed, sr_ssdSig = res_ssdSig$sr, power_ssdPs = res_ssdPs$powerRecomputed, sr_ssdPs = res_ssdPs$sr, power_ssdMeta = res_ssdMeta$powerRecomputed, sr_ssdMeta = res_ssdMeta$sr, power_ssdEqu = res_ssdEqu$powerRecomputed, sr_ssdEqu = res_ssdEqu$sr, power_ssdBF01 = res_ssdBF01$powerRecomputed, sr_ssdBF01 = res_ssdBF01$sr, sr_ssdBFr = res_ssdBFr$sr, power_ssdBFr = res_ssdBFr$powerRecomputed, sr_ssdBFs = res_ssdBFs$sr, power_ssdBFs = res_ssdBFs$powerRecomputed ) return(out) })) }) library(tinytest) expect_equal(resultsDF$power_ssd, resultsDF$power, tolerance = 1e-04, info = "ssd: back-computed power = specified power") expect_equal(resultsDF$power_ssdSig, resultsDF$power, tolerance = 1e-04, info = "ssdSig: back-computed power = specified power") expect_equal(resultsDF$power_ssd, resultsDF$power_ssdSig, tolerance = 1e-04, info = "ssd and ssdSig: back-computed power is the same") expect_equal(resultsDF$sr_ssd, resultsDF$sr_ssdSig, tolerance = 1e-04, info = "ssd and ssdSig: replication standard error is the same") expect_equal(resultsDF$power_ssdPs, resultsDF$power, tolerance = 1e-04, info = "ssdPs: back-computed power = specified power") nonNA <- !is.na(resultsDF$power_ssdEqu) expect_equal(resultsDF$power_ssdEqu[nonNA], resultsDF$power[nonNA], tolerance = 1e-04, info = "ssdEqu: back-computed power = specified power") nonNA <- !is.na(resultsDF$power_ssdMeta) expect_equal(resultsDF$power_ssdMeta[nonNA], resultsDF$power[nonNA], tolerance = 1e-04, info = "ssdMeta: back-computed power = specified power") nonNA <- !is.na(resultsDF$power_ssdBF01) expect_equal(resultsDF$power_ssdBF01[nonNA], resultsDF$power[nonNA], tolerance = 1e-04, info = "ssdBF01: back-computed power = specified power") nonNA <- !is.na(resultsDF$power_ssdBFr) expect_equal(resultsDF$power_ssdBFr[nonNA], resultsDF$power[nonNA], tolerance = 1e-04, info = "ssdBFr: back-computed power = specified power") nonNA <- !is.na(resultsDF$power_ssdBFs) expect_equal(resultsDF$power_ssdBFs[nonNA], resultsDF$power[nonNA], tolerance = 1e-04, info = "ssdBFs: back-computed power = specified power")
/scratch/gouwar.j/cran-all/cranData/BayesRepDesign/inst/tinytest/test-ssd.R
#' Computes the posterior distribution of hazard value for a vector x for the Piecewise Linear Log Hazard model (PLLH) #' @param x Vector of times to compute the posterior mean hazard function #' @param G1 List of posterior samples from the BayesPiecewiseLinearLogHazard function. #' @return Matrix containing the posterior distribution of hazard values h(x) #'@export GetALLHazLogSlope = function(x,G1){ GetHazPLLH = function(x,s,lam,J){ y=x slopes = diff(lam)/diff(s) slopes=slopes[1:(J+1)] for(m in 1:length(x)){ for(k in 1:(J+1)){ if((x[m]>s[k]) && (x[m]<=s[k+1])){ y[m]=(x[m]-s[k])*slopes[k]+lam[k] } } } return(y) } HAZ = matrix(ncol=length(x),nrow=nrow(G1[[1]])) y1=rep(0,length(x)) y=x for(b in 1:nrow(G1[[1]])){ s=G1[[1]][b,] lam=G1[[2]][b,] J = G1[[3]][b] HAZ[b,]=GetHazPLLH(x,s,lam,J) } return(HAZ) }
/scratch/gouwar.j/cran-all/cranData/BayesReversePLLH/R/GetALLHazLogSlope.R
#' Computes the posterior hazard values for a vector x for the Piecewise Exponential Hazard model (PEH) #' @param x Vector of times to compute the hazard. #' @param G1 List of posterior samples from the BayesPiecewiseHazard function. #' @return Matrix containing the posterior distribution of hazard values h(x) #'@export GetALLHazPiece = function(x,G1){ GetHazPEH = function(x,s,lam,J){ y=x for(m in 1:length(x)){ for(k in 1:(J+1)){ if((x[m]>s[k]) && (x[m]<=s[k+1])){ y[m]=lam[k] } } } return(y) } HAZ = matrix(ncol=length(x),nrow=nrow(G1[[1]])) y1=rep(0,length(x)) y=x for(b in 1:nrow(G1[[1]])){ s=G1[[1]][b,] lam=(G1[[2]])[b,] J = G1[[3]][b] HAZ[b,]=GetHazPEH(x,s,lam,J) } return(HAZ) }
/scratch/gouwar.j/cran-all/cranData/BayesReversePLLH/R/GetALLHazPiece.R
#' Computes the posterior distribution of survival probabilities for a vector x for the Piecewise Exponential Hazard model (PEH) #' @param x Vector of times to compute the posterior mean survival probability. #' @param G1 List of posterior samples from the BayesPiecewiseLinearHazard function. #' @return Matrix containing the posterior distribution of survival probabilities S(x) #'@export GetALLSurvPEH = function(x,G1){ GetSurvPEH = function(x,s,lam,J){ y=x for(m in 1:length(x)){ for(k in 1:(J+1)){ if((x[m]>s[k]) && (x[m]<=s[k+1])){ if(k>1){ y[m]=exp(-exp(lam[k])*(y[m]-s[k]) - sum(exp(lam[1:(k-1)])*(s[2:k]-s[1:(k-1)]))) }else{ ##First interval y[m]=exp(-exp(lam[k])*(y[m]-s[k])) } } } } return(y) } SurvHold=matrix(nrow=nrow(G1[[1]]),ncol=length(x)) for(b in 1:nrow(G1[[1]])){ s=G1[[1]][b,] lam=(G1[[2]])[b,] J = G1[[3]][b] SurvHold[b,]= GetSurvPEH(x , s, lam, J) } return(SurvHold) }
/scratch/gouwar.j/cran-all/cranData/BayesReversePLLH/R/GetALLSurvPEH.R
#' Computes posterior distribution of survival probabilities for a vector x for the Piecewise Linear Log Hazard model (PLLH) #' @param x Vector of times to compute the posterior mean survival probability. #' @param G1 List of posterior samples from the BayesPiecewiseLinearLogHazard function. #' @return Matrix containing the posterior distribution survival probabilities S(x) #'@export GetALLSurvPLLH = function(x,G1){ MeanHold=rep(0,length(x)) SurvHold = matrix(nrow=nrow(G1[[1]]),ncol=length(x)) for(b in 1:nrow(G1[[1]])){ s=G1[[1]][b,] lam=(G1[[2]])[b,] J = G1[[3]][b] SurvHold[b,]=SurvPLLH(x , s, lam, J) } return(SurvHold) }
/scratch/gouwar.j/cran-all/cranData/BayesReversePLLH/R/GetALLSurvPLLH.R
#' Computes the posterior mean hazard value for a vector x for the Piecewise Linear Log Hazard model (PLLH) #' @param x Vector of times to compute the posterior mean hazard function #' @param G1 List of posterior samples from the BayesPiecewiseLinearLogHazard function. #' @return Vector containing the posterior mean hazard values h(x) #'@export PostMeanHazLogSlope = function(x,G1){ GetHazPLLH = function(x,s,lam,J){ y=x slopes = diff(lam)/diff(s) slopes=slopes[1:(J+1)] for(m in 1:length(x)){ for(k in 1:(J+1)){ if((x[m]>s[k]) && (x[m]<=s[k+1])){ y[m]=(x[m]-s[k])*slopes[k]+lam[k] } } } return(y) } Store=rep(NA,nrow(G1[[1]])) y1=rep(0,length(x)) y=x for(b in 1:nrow(G1[[1]])){ s=G1[[1]][b,] lam=G1[[2]][b,] J = G1[[3]][b] y=GetHazPLLH(x,s,lam,J) ##Add up hazard y1=y1+y } return(y1/nrow(G1[[1]])) }
/scratch/gouwar.j/cran-all/cranData/BayesReversePLLH/R/PostMeanHazLogSlope.R
#' Computes the posterior mean hazard values for a vector x for the Piecewise Exponential Hazard model (PEH) #' @param x Vector of times to compute the posterior mean hazard. #' @param G1 List of posterior samples from the BayesPiecewiseHazard function. #' @return Vector containing the posterior mean hazard values h(x) #'@export PostMeanHazPiece = function(x,G1){ GetHazPEH = function(x,s,lam,J){ y=x for(m in 1:length(x)){ for(k in 1:(J+1)){ if((x[m]>s[k]) && (x[m]<=s[k+1])){ y[m]=lam[k] } } } return(y) } Store=rep(NA,nrow(G1[[1]])) y1=rep(0,length(x)) y=x for(b in 1:nrow(G1[[1]])){ s=G1[[1]][b,] lam=(G1[[2]])[b,] J = G1[[3]][b] y=GetHazPEH(x,s,lam,J) y1=y1+ y } return(y1/nrow(G1[[1]])) }
/scratch/gouwar.j/cran-all/cranData/BayesReversePLLH/R/PostMeanHazPiece.R
#' Computes the posterior mean survival probabilities for a vector x for the Piecewise Exponential Hazard model (PEH) #' @param x Vector of times to compute the posterior mean survival probability. #' @param G1 List of posterior samples from the BayesPiecewiseLinearHazard function. #' @return Vector containing the posterior mean survival probabilities S(x) #'@export PostMeanSurvPEH = function(x,G1){ GetSurvPEH = function(x,s,lam,J){ y=x for(m in 1:length(x)){ for(k in 1:(J+1)){ if((x[m]>s[k]) && (x[m]<=s[k+1])){ if(k>1){ y[m]=exp(-exp(lam[k])*(y[m]-s[k]) - sum(exp(lam[1:(k-1)])*(s[2:k]-s[1:(k-1)]))) }else{ ##First interval y[m]=exp(-exp(lam[k])*(y[m]-s[k])) } } } } return(y) } SurvHold=rep(0,length(x)) for(b in 1:nrow(G1[[1]])){ s=G1[[1]][b,] lam=(G1[[2]])[b,] J = G1[[3]][b] SurvHold=SurvHold+ GetSurvPEH(x , s, lam, J) } return(SurvHold/nrow(G1[[1]])) }
/scratch/gouwar.j/cran-all/cranData/BayesReversePLLH/R/PostMeanSurvPEH.R
#' Computes the posterior mean survival probabilities for a vector x for the Piecewise Linear Log Hazard model (PLLH) #' @param x Vector of times to compute the posterior mean survival probability. #' @param G1 List of posterior samples from the BayesPiecewiseLinearLogHazard function. #' @return Vector containing the posterior mean survival probabilities S(x) #'@export PostMeanSurvPLLH = function(x,G1){ SurvHold=rep(0,length(x)) for(b in 1:nrow(G1[[1]])){ s=G1[[1]][b,] lam=(G1[[2]])[b,] J = G1[[3]][b] SurvHold=SurvHold+ SurvPLLH(x , s, lam, J) } return(SurvHold/nrow(G1[[1]])) }
/scratch/gouwar.j/cran-all/cranData/BayesReversePLLH/R/PostMeanSurvPLLH.R
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 #' Returns the approximate restricted posterior mean survival for the PLLH model. #' #' Uses a grid and parameter values to approximate the restricted posterior mean survival for the PLLH model using the integral of the survival function. #' @param Y Sequence from 0.01 to the maximum observed event time used to compute the approximate restricted mean survival time. Smaller spaced sequences results in better approximation but longer computation time. #' @param s Vector of split points. The first and last entries must be 0 and max(Y). #' @param lam Vector of log-hazard values at each split point location. Must be same length as s. #' @param J Number of split points. #' @return Returns the approximate restricted posterior mean survival time for the PLLH model. #' @importFrom Rcpp evalCpp #' @useDynLib BayesReversePLLH #' @examples #' ##Generate Data #' Y1=rweibull(100,4,1) #' ##Create sequence from (0,max(Y1)) for approximation #' Y=seq(.01,max(Y1),.01) #' ##Parameters used to approximate the mean #' s=c(0,1,max(Y1)) #' lam=c(-2,0,-2) #' J=1 #' ApproxMean( Y, s, lam, J) #' @export ApproxMean <- function(Y, s, lam, J) { .Call('_BayesReversePLLH_ApproxMean', PACKAGE = 'BayesReversePLLH', Y, s, lam, J) } SurvPLLH <- function(Y, s, lam, J) { .Call('_BayesReversePLLH_SurvPLLH', PACKAGE = 'BayesReversePLLH', Y, s, lam, J) } #' Samples from the PEH Cox model with a treatment indicator. #' #' Samples from the Piecewise Exponential Hazard (PEH) Cox model with a treatment indicator and returns a list containing posterior parameters and posterior restricted mean survival. #' @param Y Vector of event or censoring times. #' @param I1 Vector of event indicators. #' @param Trt Vector containing patient treatment/control assignment. #' @param Poi Prior mean number of split points. #' @param B Number of iterations for MCMC. #' @return Returns a list containing posterior samples of (1) the split point locations, (2) the log-hazards at each split point, (3) the number of split points, (4) the variance parameter for the log-hazard values, (5) the treatment coefficient, (6) the mean restricted survivial time of the control therapy, (7) the mean restricted survival time of the treatment therapy. #' @examples #' ##Generate Data #' Y=rweibull(20,4,1) #' I=rbinom(20,1,.5) #' Trt=rbinom(20,1,.5) #' ##Hyperparameter for number of split points #' Poi=5 #'##Number of iterations for MCMC #'B=200 #'BayesPiecewiseLinearLogHazardTrt( Y, I,Trt, Poi, B) #'@export BayesPiecewiseLinearLogHazardTrt <- function(Y, I1, Trt, Poi, B) { .Call('_BayesReversePLLH_BayesPiecewiseLinearLogHazardTrt', PACKAGE = 'BayesReversePLLH', Y, I1, Trt, Poi, B) } #' Samples from the PEH Cox model with a patient covariate vector. #' #' Samples from the Piecewise Linear Log-Hazard (PLLH) Cox model and returns a list containing posterior parameters and posterior restricted mean survival. #' @param Y Vector of event or censoring times. #' @param I1 Vector of event indicators. #' @param Trt Vector containing patient treatment/control assignment. #' @param Poi Prior mean number of split points. #' @param B Number of iterations for MCMC. #' @return Returns a list containing posterior samples of (1) the split point locations, (2) the log-hazards at each split point, (3) the number of split points, (4) the variance parameter for the log-hazard values, (5) the treatment coefficient, (6) the mean restricted survivial time of the control therapy, (7) the mean restricted survival time of the treatment therapy. #' @examples #' ##Generate Data #' Y=rweibull(20,4,1) #' I=rbinom(20,1,.5) #' Trt=rbinom(20,1,.5) #' ##Hyperparameter for number of split points #' Poi=5 #'##Number of iterations for MCMC #'B=200 #'BayesPiecewiseHazardTrt( Y, I,Trt, Poi, B) #'@export BayesPiecewiseHazardTrt <- function(Y, I1, Trt, Poi, B) { .Call('_BayesReversePLLH_BayesPiecewiseHazardTrt', PACKAGE = 'BayesReversePLLH', Y, I1, Trt, Poi, B) } #' Samples from the PEH Cox model with a patient covariate vector. #' #' Samples from the Piecewise Exponential Hazard (PEH) Cox model with a patient covariate vector and returns a list containing posterior parameters and posterior restricted mean survival. #' @param Y Vector of event or censoring times. #' @param I1 Vector of event indicators. #' @param COV Matrix of size nxp containing p patient covariates. #' @param Poi Prior mean number of split points. #' @param B Number of iterations for MCMC. #' @return Returns a list containing posterior samples of (1) the split point locations, (2) the log-hazards at each split point, (3) the number of split points, (4) the variance parameter for the log-hazard values, (5) the coefficients in the Cox model. #' @examples #' ##Generate Data #' Y=rweibull(20,4,1) #' I=rbinom(20,1,.5) #' COV = matrix(rnorm(40,0,1),ncol=2) #' ##Hyperparameter for number of split points #' Poi=5 #'##Number of iterations for MCMC #'B=200 #'BayesPiecewiseHazardCOV( Y, I,COV, Poi, B) #'@export BayesPiecewiseHazardCOV <- function(Y, I1, COV, Poi, B) { .Call('_BayesReversePLLH_BayesPiecewiseHazardCOV', PACKAGE = 'BayesReversePLLH', Y, I1, COV, Poi, B) } #' Samples from the PLLH Cox model with a patient covariate vector. #' #' Samples from the Piecewise Linear Log-Hazard (PLLH) Cox model with a patient covariate vector and returns a list containing posterior parameters and posterior restricted mean survival. #' @param Y Vector of event or censoring times. #' @param I1 Vector of event indicators. #' @param COV Matrix of size nxp containing p patient covariates. #' @param Poi Prior mean number of split points. #' @param B Number of iterations for MCMC. #' @return Returns a list containing posterior samples of (1) the split point locations, (2) the log-hazards at each split point, (3) the number of split points, (4) the variance parameter for the log-hazard values, (5) the coefficients in the Cox model. #' @examples #' ##Generate Data #' Y=rweibull(20,4,1) #' I=rbinom(20,1,.5) #' COV = matrix(rnorm(40,0,1),ncol=2) #' ##Hyperparameter for number of split points #' Poi=5 #'##Number of iterations for MCMC #'B=200 #'BayesPiecewiseLinearLogHazardCOV( Y, I,COV, Poi, B) #'@export BayesPiecewiseLinearLogHazardCOV <- function(Y, I1, COV, Poi, B) { .Call('_BayesReversePLLH_BayesPiecewiseLinearLogHazardCOV', PACKAGE = 'BayesReversePLLH', Y, I1, COV, Poi, B) } #' Samples from the PEH model without covariates. #' #' Samples from the Piecewise Exponential Hazard (PEH) model and returns a list containing posterior parameters and posterior restricted mean survival. #' @param Y Vector of event or censoring times. #' @param I1 Vector of event indicators. #' @param Poi Prior mean number of split points. #' @param B Number of iterations for MCMC. #' @return Returns a list containing posterior samples of (1) the split point locations, (2) the log-hazards at each split point, (3) the number of split points, (4) the variance parameter for the log-hazard values, (5) the posterior mean restricted survivial time. #' @examples #' ##Generate Data #' Y=rweibull(20,4,1) #' I=rbinom(20,1,.5) #' ##Hyperparameter for number of split points #' Poi=5 #'##Number of iterations for MCMC #'B=200 #'BayesPiecewiseHazard( Y, I, Poi, B) #'@export BayesPiecewiseHazard <- function(Y, I1, Poi, B) { .Call('_BayesReversePLLH_BayesPiecewiseHazard', PACKAGE = 'BayesReversePLLH', Y, I1, Poi, B) } #' Samples from the PLLH model without covariates. #' #' Samples from the Piecewise Linear Log-Hazard (PLLH) model and returns a list containing posterior parameters and posterior restricted mean survival. #' @param Y Vector of event or censoring times. #' @param I1 Vector of event indicators. #' @param Poi Prior mean number of split points. #' @param B Number of iterations for MCMC. #' @return Returns a list containing posterior samples of (1) the split point locations, (2) the log-hazards at each split point, (3) the number of split points, (4) the variance parameter for the log-hazard values, (5) the posterior mean restricted survivial time. #' @examples #' ##Generate Data #' Y=rweibull(20,4,1) #' I=rbinom(20,1,.5) #' ##Hyperparameter for number of split points #' Poi=5 #'##Number of iterations for MCMC #'B=200 #'BayesPiecewiseLinearLogHazard( Y, I, Poi, B) #'@export BayesPiecewiseLinearLogHazard <- function(Y, I1, Poi, B) { .Call('_BayesReversePLLH_BayesPiecewiseLinearLogHazard', PACKAGE = 'BayesReversePLLH', Y, I1, Poi, B) }
/scratch/gouwar.j/cran-all/cranData/BayesReversePLLH/R/RcppExports.R
Bernoulli_Uniform <- function(ind,p){ p.g=length(ind) sb = lbeta(1+p.g,1+p-p.g) return(sb) }
/scratch/gouwar.j/cran-all/cranData/BayesS5/R/Bernoulli_Uniform.R
S5 <- function(X,y,ind_fun,model,tuning,tem,ITER=20,S=20,C0=5,verbose=TRUE){ n = nrow(X) p = ncol(X) y = y -mean(y) #requireNamespace() requireNamespace("Matrix") Matrix = Matrix::Matrix if(missing(tem)){tem = seq(0.4,1,length.out=20)^2} if(missing(ind_fun)){ print("The prior on regression coefficients is unspecified. The default is piMoM") ind_fun = BayesS5::ind_fun_pimom tuning <- BayesS5::hyper_par(type="pimom",X,y,thre = p^-0.5) # tuning parameter selection for nonlocal priors print("The choosen hyperparameter tau") print(tuning) #assign("tuning", tuning, .GlobalEnv) } #else{ # a = 0 # if(ind_fun == "pimom"){ind_fun = BayesS5::ind_fun_pimom; a = 1} # if(ind_fun == "pemom"){ind_fun = BayesS5::ind_fun_pemom; a = 1} # if(ind_fun == "g-prior"){ind_fun = BayesS5::ind_fun_g; a = 1} # if(a == 0){stop("The ind_fun is not in the list!")} # if(missing(tuning)){ stop("The tuning parameter is missing!") } #} if(missing(model)){ print("The model prior is unspecified. The default is Bernoulli_Uniform") model = BayesS5::Bernoulli_Uniform } print("#################################") print("S5 starts running") A3 = S;r0=1 verb = verbose a0=0.01;b0=0.01 tau = 1 IT = length(tem) IT.seq = rep(ITER,IT) #require(Matrix) g = B = tuning sam = sample(1:p,3) gam = rep(0,p); gam[sam]=1 curr = ind_fun(X[,sam],y,n,p,tuning) + model(sam,p) p.g=sum(gam) ind2= which(gam==1) GAM.fin0 = NULL OBJ.fin0 = NULL for(uu in 1:C0){ C.p = rep(-1000000,p) C.m = rep(-1000000,p) #prior based on model1 curr = ind_fun(X[,ind2],y,n,p,tuning) + model(ind2,p) GAM = gam OBJ = curr obj = OBJ if(p.g>0){ fit = solve(crossprod(X[,ind2])+diag(p.g))%*%crossprod(X[,ind2],y) res = y-X[,ind2]%*%fit corr = as.vector(cor(res,X)) ind.ix = sort.int(abs(corr),decreasing=TRUE,index.return=TRUE)$ix s = c(ind2,ind.ix) }else{res=y corr = as.vector(cor(res,X)) ind.ix = sort.int(abs(corr),decreasing=TRUE,index.return=TRUE)$ix s = ind.ix } if(p<50){p00=10}else{p00=round(p/10)} size = A3 IND = s[1:(size+p.g)] p.ind = length(IND) C.p = rep(-100000,p) for(i in (p.g+1):p.ind){ j=IND[i] gam.p = gam;gam.p[j]=1;ind.p=which(gam.p==1) int = ind_fun(X[,ind.p],y,n,p,tuning) + model(ind.p,p) obj.p = c(int) if(is.na(obj.p)==TRUE){obj.p = -100000} C.p[j] = obj.p } C.m = rep(-1000000,p) IND.m = ind2 p.ind.m = length(IND.m) for(i in 1:p.g){ j=ind2[i] gam.m = gam;gam.m[j]=0;ind.m=which(gam.m==1) int = ind_fun(X[,ind.m],y,n,p,tuning) +model(ind.m,p) obj.m = c(int) if(is.na(obj.m)==TRUE){obj.m = -1000000} C.m[j] = obj.m } p.g = sum(gam) OBJ.m0 = matrix(C.m,p,1) OBJ.p0 = matrix(C.p,p,1) ID = sum(2^(3*log(ind2))) ID.obj = ID it=1 #GAM.total = matrix(0,p,50000) GAM.total = Matrix(0,p,50000,sparse=TRUE) OBJ.total = rep(-100000,50000) GAM.total[,1] = gam OBJ.total[1] = obj time.total = rep(0,50000) it=1 ID0 = NULL INT = NULL pmt0 = proc.time() for(it in 1:IT){ IT0 = IT.seq[it] pq=0 for(iter in 1:IT0){ id = sum(2^(3*log(ind2))) id.ind = which(id==ID) leng = length(id.ind) if(leng==0){ ID = c(ID,id) C.p = rep(-100000,p) for(i in (p.g+1):p.ind){ j=IND[i] gam.p = gam;gam.p[j]=1;ind.p=which(gam.p==1) int = ind_fun(X[,ind.p],y,n,p,tuning) + model(ind.p,p) obj.p = c(int) if(is.na(obj.p)==TRUE){obj.p = -100000} C.p[j] = obj.p ind.total = which(OBJ.total< -10000)[1] OBJ.total[ind.total] = obj.p GAM.total[,ind.total] = gam.p time.total[ind.total] = (proc.time()-pmt0)[3] } p.g = sum(gam) C.m = rep(-100000,p) IND.m = ind2 p.ind.m = length(IND.m) for(i in 1:p.g){ j=ind2[i] gam.m = gam;gam.m[j]=0;ind.m=which(gam.m==1) int = ind_fun(X[,ind.m],y,n,p,tuning) + model(ind.m,p) obj.m = c(int) if(is.na(obj.m)==TRUE){obj.m = -100000} C.m[j] = obj.m ind.total = which(OBJ.total< -10000)[1] OBJ.total[ind.total] = obj.m GAM.total[,ind.total] = gam.m time.total[ind.total] = (proc.time()-pmt0)[3] } OBJ.p0 = cbind(OBJ.p0,C.p) OBJ.m0 = cbind(OBJ.m0,C.m) }else{ pq= pq+1 C.p = OBJ.p0[,(id.ind[1])];C.m = OBJ.m0[,(id.ind[1])] } prop = exp(tem[it]*(C.p-max(C.p))) sample.p = sample(1:length(prop),1,prob=prop) obj.p = C.p[sample.p] #obj.p prop = exp(tem[it]*(C.m-max(C.m))) sample.m = sample(1:length(prop),1,prob=prop) obj.m = C.m[sample.m] #obj.m l = 1/(1+exp(tem[it]*obj.m-tem[it]*obj.p)) if(l>runif(1)){ gam[sample.p]=1;obj = obj.p;curr=obj.p }else{ gam[sample.m]=0;obj = obj.m;curr=obj.m } ind2 = which(gam==1) p.g = sum(gam) curr = ind_fun(X[,ind2],y,n,p,tuning) +model(ind2,p) if(p.g>0){ fit = solve(crossprod(X[,ind2])+diag(p.g))%*%crossprod(X[,ind2],y) res = y-X[,ind2]%*%fit corr = as.vector(crossprod(res,X)) ind.ix = sort.int(abs(corr),decreasing=TRUE,index.return=TRUE)$ix s = c(ind2,ind.ix) }else{res=y corr = as.vector(crossprod(res,X)) ind.ix = sort.int(abs(corr),decreasing=TRUE,index.return=TRUE)$ix s = ind.ix } size = A3 IND = s[1:(size+p.g)] p.ind = length(IND) id = sum(2^(3*log(ind2))) id.ind = which(id==ID.obj) leng = length(id.ind) if(leng==0){ ID.obj = c(ID.obj,id) OBJ = c(OBJ,curr) GAM= cbind(GAM,gam) } } gam.pr = GAM.total[,which.max(OBJ.total)] obj.pr = max(OBJ.total) ind2.pr = which(gam.pr==1) if(verb==TRUE){ print("#################################") curr = ind_fun(X[,ind2],y,n,p,tuning) + model(ind2,p) print("Inverse Temperature");print(tem[it]);print("The Selected Variables in the Searched MAP Model"); print(ind2.pr);print("The Evaluated Object Value at the Searched MAP Model");print(obj.pr); print("Current Model");print(ind2); print("The Evaluated Object Value at the Current Model");print(curr); print("The Number of Total Searched Models"); print(length(unique(OBJ.total))) } } time0 =proc.time()-pmt0 print(time0) rm(OBJ.p0);rm(C.p) rm(OBJ.m0);rm(C.m) gam = GAM.total[,which.max(OBJ.total)] ind2 = which(gam==1) ind.total = which(OBJ.total> -100000) OBJ.fin = unique(OBJ.total[ind.total]) w = length(OBJ.fin) time.fin = rep(0,w) GAM.fin = matrix(0,p,w);GAM.fin[,1] = GAM.total[,which(OBJ.total==OBJ.fin[1])[1]] for(i in 2:length(OBJ.fin)){ GAM.fin[,i] = GAM.total[,which(OBJ.total==OBJ.fin[i])[1]] } rm(GAM.total);rm(OBJ.total) const = sum(exp(OBJ.fin-max(OBJ.fin))) posterior = exp(OBJ.fin-max(OBJ.fin))/const total.size = length(OBJ.fin) m = max(OBJ.fin) ind.m0 = which.max(OBJ.fin) gam = GAM.fin[,ind.m0] ind2 = which(gam==1);p.g = sum(gam) GAM.fin0 = cbind(GAM.fin0,GAM.fin) OBJ.fin0 = c(OBJ.fin0,OBJ.fin) } print("#################################") print("Post-process starts") print("#################################") OBJ.fin1 = unique(OBJ.fin0) w = length(OBJ.fin1) time.fin = rep(0,w) GAM.fin1 = Matrix(0,p,w,sparse=TRUE);GAM.fin1[,1] = GAM.fin0[,which(OBJ.fin0==OBJ.fin1[1])[1]] for(i in 2:w){ GAM.fin1[,i] = GAM.fin0[,which(OBJ.fin0==OBJ.fin1[i])[1]] # time.fin[i] = time.total[which(OBJ.total==OBJ.fin[i])[1]] } rm(GAM.fin0) GAM = GAM.fin1 OBJ = OBJ.fin1 print("Done!") return(list(GAM = GAM,OBJ = OBJ, tuning=tuning)) }
/scratch/gouwar.j/cran-all/cranData/BayesS5/R/S5.R
S5_additive <- function(X, y, K = 5, model, tuning = 0.5*nrow(X), tem, ITER=20,S=30, C0=5, verbose=TRUE){ requireNamespace("splines2") requireNamespace("Matrix") n = nrow(X) p = ncol(X) #y = y -mean(y) tau = tuning g = 1 Matrix = Matrix::Matrix if(missing(tem)){tem = seq(0.4,1,length.out=30)^2} ################################################## ind_fun = BayesS5::ind_fun_NLfP #ind_fun = ind_fun_NLfP index = function(j){ a = (K*(j-1)+2):(K*j+1) return(a) } #assign("index", index, .GlobalEnv) index.tot = function(ind2){ ind = sapply(ind2,index)#;a[ind] = 1 return(as.vector(ind)) } screening = function(j,phi,res){ ind3 = index.tot(j) fit = solve(crossprod(phi[,c(1,ind3)])+0.0001*diag(K+1))%*%crossprod(phi[,c(1,ind3)],res) fit.f = phi[,c(1,ind3)]%*%fit a = crossprod(fit.f - mean(fit.f)) return(a) } ########################################################### if(missing(model)){ print("The model prior is unspecified. The default is Bernoulli_Uniform") model = BayesS5::Bernoulli_Uniform } A3 = S; r0=1 verb = verbose P0 = tcrossprod(rep(1,n))/n phi0 = matrix(0,n,K*p) Knots = matrix(0,p,2) colnames(Knots) = c("Lower","Upper") for(j in 1:p){ Knots[j, ] = c(min(X[,j])-1.0,max(X[,j])+1.0) phi0[,(K*(j-1)+1):(K*j)] = splines2::bSpline(X[,j], df = K, Boundary.knots = Knots[j, ]) } phi = cbind(rep(1,n),phi0) IP = diag(n) - tcrossprod(rep(1,n))/n IP.phi = IP%*%phi #assign("IP", IP, .GlobalEnv) #assign("IP.phi", IP.phi, .GlobalEnv) ind2 = sample(1:p,2) #ind2 = true gam = rep(0,p); gam[ind2]=1 ind2 = which(gam==1) GAM.screen = Matrix(0,p,50000,sparse=TRUE) ID.screen = rep(-100000000,50000) save = rep(0,p) p.g = length(ind2) ind3 = index.tot(ind2) if(p.g>0){ fit = solve(crossprod(phi[,c(1,ind3)])+0.001*diag(p.g*K+1))%*%crossprod(phi[,c(1,ind3)],y) res = y-phi[,c(1,ind3)]%*%fit}else{res=y} save = sapply(1:p,screening,phi,res) ind.ix = sort.int(save,decreasing=TRUE,index.return=TRUE)$ix corr = as.vector(cor(res,X)) ind.l = sort.int(abs(corr),decreasing=TRUE,index.return=TRUE)$ix IND = c(ind2,union(ind.ix[1:S],ind.l[1:5])) IND = unique(IND) p.ind = length(IND) ID.screen[1] = sum(5^(log(ind2))) GAM.screen[IND,1] = 1 ##### j = 1; NNN = 10000 kk = stats::rchisq(NNN,K-1) aa = log(mean(exp(-1/kk))) C.prior3 = rep(0,p) C.g2 = rep(0,p) for(j in 1:p){ C.g2[j] = -0.5*log(det(crossprod(phi[,(K*(j-1)+2):(K*j+1)]))) C.prior3[j] = C.g2[j] + aa } # assign("C.prior3", C.prior3, .GlobalEnv) # assign("C.g2", C.prior3, .GlobalEnv) #assign("tau", tau, .GlobalEnv) #assign("g", g, .GlobalEnv) aa = 0; j = 1; NNN = 10000 for(h in 1:NNN){ kk = stats::rnorm(K)*sqrt(g) aa = aa + exp(-tau*n/crossprod(IP.phi[,(K*(j-1)+2):(K*j+1)]%*%kk)) } C.prior1 = log(aa/NNN) C.prior1 = as.numeric(C.prior1) # assign("C.prior1", C.prior1, .GlobalEnv) aa = 0; j = 1; NNN = 10000 for(h in 1:NNN){ kk = stats::rcauchy(K) aa = aa + exp(-tau*n/crossprod(IP.phi[,(K*(j-1)+2):(K*j+1)]%*%kk)) } C.prior2 = log(aa/NNN) C.prior2 = as.numeric(C.prior2) #assign("C.prior2", C.prior2, .GlobalEnv) pmt = proc.time() print("#################################") print("S5 starts running") IT = length(tem) IT.seq = rep(ITER,IT) curr = -1000000000 tryCatch({ curr = ind_fun(ind2, y, phi, n, p, K, IP.phi, C.prior1, tuning) + model(ind2, p ) },error=function(e){}) p.g=sum(gam) GAM.fin0 = NULL OBJ.fin0 = NULL for(uu in 1:C0){ C.p = rep(-1000000000,p) C.m = rep(-1000000000,p) GAM = gam OBJ = curr obj = OBJ p.g=sum(gam) C.p = rep(-100000000,p) for(i in (p.g+1):p.ind){ j=IND[i] gam.p = gam;gam.p[j]=1;ind.p=which(gam.p==1) int = -10000000 int = ind_fun(ind.p, y, phi, n, p, K, IP.phi, C.prior1, tuning) + model(ind.p, p ) obj.p = c(int) if(is.na(obj.p)==TRUE){obj.p = -100000000} C.p[j] = obj.p } C.m = rep(-100000000,p) IND.m = ind2 p.ind.m = length(IND.m) for(i in 1:p.g){ j=ind2[i] gam.m = gam;gam.m[j]=0;ind.m=which(gam.m==1) int = -10000000 int = ind_fun(ind.m, y, phi, n, p, K, IP.phi, C.prior1, tuning) + model(ind.m, p) obj.m = c(int) if(is.na(obj.m)==TRUE){obj.m = -100000000} C.m[j] = obj.m } p.g = sum(gam) OBJ.m0 = matrix(C.m,p,1) OBJ.p0 = matrix(C.p,p,1) ID = sum(5^(log(ind2))) ID.obj = ID it=1 #GAM.total = matrix(0,p,50000) GAM.total = Matrix(0,p,50000,sparse=TRUE) OBJ.total = rep(-100000000,50000) GAM.total[,1] = gam OBJ.total[1] = obj time.total = rep(0,50000) it=1 INT = NULL pmt0 = proc.time() for(it in 1:IT){ IT0 = IT.seq[it] pq=0 for(iter in 1:IT0){ id = sum(5^(log(ind2))) id.ind = which(id==ID) leng = length(id.ind) if(leng==0){ ID = c(ID,id) C.p = rep(-100000000,p) for(i in (p.g+1):p.ind){ j=IND[i] gam.p = gam;gam.p[j]=1;ind.p=which(gam.p==1) int = -10000000 #tryCatch({ int = ind_fun(ind.p,y, phi, n, p, K, IP.phi, C.prior1, tuning) + model(ind.p, p) #},error=function(e){}) obj.p = c(int) if(is.na(obj.p)==TRUE){obj.p = -100000000} C.p[j] = obj.p ind.total = which(OBJ.total< -90000000)[1] OBJ.total[ind.total] = obj.p GAM.total[,ind.total] = gam.p time.total[ind.total] = (proc.time()-pmt0)[3] } p.g = sum(gam) C.m = rep(-100000000,p) IND.m = ind2 p.ind.m = length(IND.m) for(i in 1:p.g){ j=ind2[i] gam.m = gam;gam.m[j]=0;ind.m=which(gam.m==1) int = -10000000 #tryCatch({ int = ind_fun(ind.m, y, phi, n, p, K, IP.phi, C.prior1, tuning) + model(ind.m,p) #},error=function(e){}) obj.m = c(int) if(is.na(obj.m)==TRUE){obj.m = -100000000} C.m[j] = obj.m ind.total = which(OBJ.total< -90000000)[1] OBJ.total[ind.total] = obj.m GAM.total[,ind.total] = gam.m time.total[ind.total] = (proc.time()-pmt0)[3] } OBJ.p0 = cbind(OBJ.p0,C.p) OBJ.m0 = cbind(OBJ.m0,C.m) }else{ pq= pq+1 C.p = OBJ.p0[,(id.ind[1])];C.m = OBJ.m0[,(id.ind[1])] } prop = exp(tem[it]*(C.p-max(C.p))) sample.p = sample(1:length(prop),1,prob=prop) obj.p = C.p[sample.p] #obj.p prop = exp(tem[it]*(C.m-max(C.m))) sample.m = sample(1:length(prop),1,prob=prop) obj.m = C.m[sample.m] #obj.m l = 1/(1+exp(tem[it]*obj.m-tem[it]*obj.p)) if(l>runif(1)){ gam[sample.p]=1;obj = obj.p;curr=obj.p }else{ gam[sample.m]=0;obj = obj.m;curr=obj.m } ind2 = which(gam==1) p.g = sum(gam) #int = -100000000 #tryCatch({ # int = ind_fun(ind2) + model(ind2) #},error=function(e){}) #curr = int #jjj = sample(1:3,1) #if(jjj==1){ #pmt0 = proc.time() id = sum(5^(log(ind2))) id.ind = which(id==ID.screen) leng = length(id.ind) if(leng==0){ jjj = sample(1:2,1) if(jjj==1){ save = rep(0,p) ind3 = index.tot(ind2) if(p.g>0){ fit = solve(crossprod(phi[,c(1,ind3)])+0.01*diag(p.g*K+1))%*%crossprod(phi[,c(1,ind3)],y) res = y-phi[,c(1,ind3)]%*%fit}else{res=y} save = sapply(1:p,screening,phi,res) ind.ix = sort.int(save,decreasing=TRUE,index.return=TRUE)$ix corr = as.vector(cor(res,X)) ind.l = sort.int(abs(corr),decreasing=TRUE,index.return=TRUE)$ix IND = c(ind2,union(ind.ix[1:S],ind.l[1:5])) p.ind = length(IND) ind.id = which(ID.screen< 0)[1] ID.screen[ind.id] = id GAM.screen[IND,ind.id] = 1} }else{ IND = which(GAM.screen[,id.ind[1]]==1) p.ind = length(IND) } #print(proc.time()-pmt0) #} id = sum(5^(log(ind2))) id.ind = which(id==ID.obj) leng = length(id.ind) if(leng==0){ ID.obj = c(ID.obj,id) OBJ = c(OBJ,curr) GAM= cbind(GAM,gam) } } if(verbose==TRUE){ print("#################################") gam.pr = GAM.total[,which.max(OBJ.total)] obj.pr = max(OBJ.total) ind2.pr = which(gam.pr==1) print("Inverse Temperature");print(tem[it]);print("The Selected Variables in the Searched MAP Model"); print(ind2.pr);print("The Evaluated Object Value at the Searched MAP Model");print(obj.pr); print("Current Model");print(ind2); print("The Evaluated Object Value at the Current Model");print(curr); print("Total Searched Variables"); print(IND) print("The Number of Total Searched Models"); print(length(unique(OBJ.total))) #print(length(which(OBJ.total> -10000))) print(paste("tuning parameter = ", tau)); } } time0 = proc.time()-pmt0 print(time0) rm(OBJ.p0);rm(C.p) rm(OBJ.m0);rm(C.m) gam = GAM.total[,which.max(OBJ.total)] ind2 = which(gam==1) ind.total = which(OBJ.total> -100000000) OBJ.fin = unique(OBJ.total[ind.total]) w = length(OBJ.fin) time.fin = rep(0,w) GAM.fin = matrix(0,p,w);GAM.fin[,1] = GAM.total[,which(OBJ.total==OBJ.fin[1])[1]] if(w>1){ for(i in 2:length(OBJ.fin)){ GAM.fin[,i] = GAM.total[,which(OBJ.total==OBJ.fin[i])[1]] } } rm(GAM.total);rm(OBJ.total) const = sum(exp(OBJ.fin-max(OBJ.fin))) posterior = exp(OBJ.fin-max(OBJ.fin))/const total.size = length(OBJ.fin) m = max(OBJ.fin) ind.m0 = which.max(OBJ.fin) gam = GAM.fin[,ind.m0] ind2 = which(gam==1);p.g = sum(gam) GAM.fin0 = cbind(GAM.fin0,GAM.fin) OBJ.fin0 = c(OBJ.fin0,OBJ.fin) #ind2 = true #gam = rep(0,p); #gam[ind2]=1 } print("#################################") print("Post-process starts") print("#################################") OBJ.fin1 = unique(OBJ.fin0) w = length(OBJ.fin1) time.fin = rep(0,w) GAM.fin1 = Matrix(0,p,w,sparse=TRUE);GAM.fin1[,1] = GAM.fin0[,which(OBJ.fin0==OBJ.fin1[1])[1]] if(w>1){ for(i in 2:w){ GAM.fin1[,i] = GAM.fin0[,which(OBJ.fin0==OBJ.fin1[i])[1]] } } rm(GAM.fin0) GAM = GAM.fin1 OBJ = OBJ.fin1 print("Done!") gam.map = GAM[, which.max(OBJ)] ind.map = which(gam.map==1);p.map = length(ind.map) POST_model = exp(OBJ - max(OBJ))/sum(exp(OBJ - max(OBJ))) POST_incl_prob = GAM%*%POST_model hppm = 1/sum(exp(OBJ - max(OBJ))) ind.MAP = which(gam.map == 1) print(ind.MAP) print("# of Searched Models by S5") print(length(OBJ)) ind.marg = which(as.vector(POST_incl_prob) > 0.5) return(list(GAM = GAM, OBJ = OBJ, phi = phi, Knots= Knots, K = K, post = POST_model, marg.prob = as.vector(POST_incl_prob), ind.hppm = ind.MAP, ind.marg = ind.marg, hppm.prob = hppm, tuning=tau )) }
/scratch/gouwar.j/cran-all/cranData/BayesS5/R/S5_additive.R
S5_parallel = function(NC,X,y,ind_fun,model,tuning,tem,ITER=20,S=20,C0=2){ requireNamespace("snowfall") requireNamespace("Matrix") #require(snowfall) #require(Matrix) n = nrow(X) p = ncol(X) y = y - mean(y) sfInit = snowfall::sfInit sfLibrary = snowfall::sfLibrary sfExportAll = snowfall::sfExportAll sfStop = snowfall::sfStop if(missing(tem)){tem = seq(0.4,1,length.out=20)^2} if(missing(ind_fun)){ print("The prior on regression coefficietns is unspecified. The default is piMoM") ind_fun = BayesS5::ind_fun_pimom tuning <- BayesS5::hyper_par(type="pimom",X,y,thre = p^-0.5) # tuning parameter selection for nonlocal priors print("The choosen hyperparameter tau") print(tuning) } if(missing(model)){ print("The model prior is unspecified. The default is Bernoulli_Uniform") model = BayesS5::Bernoulli_Uniform } sfInit(parallel=TRUE, cpus=NC) sfLibrary(Matrix) sfExportAll() #sfExport( "S5" ) pmt=proc.time() wrapper = function(i){ fit = BayesS5::S5(X=X,y=y,ind_fun=ind_fun, model = model,tuning=tuning,tem=tem,ITER=ITER,S=S,C0=C0,verbose=FALSE) return(fit) } out = sfLapply(1:NC,wrapper) print(proc.time()-pmt) sfStop() OBJ = NULL IND = NULL for(i in 1:NC){ OBJ = c(OBJ,out[[i]]$OBJ) IND = c(IND,length(out[[i]]$OBJ)) } IND = c(0,IND,0) GAM = Matrix(0,p,length(OBJ),sparse= TRUE) for(i in 1:NC){ gam = out[[i]]$GAM ind = (sum(IND[1:i])+1):(sum(IND[1:(i+1)])) GAM[,ind] = gam } GAM.fin0 = GAM; OBJ.fin0 = OBJ OBJ.fin1 = unique(OBJ.fin0) w = length(OBJ.fin1) time.fin = rep(0,w) GAM.fin1 = Matrix(0,p,w,sparse=TRUE);GAM.fin1[,1] = GAM.fin0[,which(OBJ.fin0==OBJ.fin1[1])[1]] for(i in 2:w){ GAM.fin1[,i] = GAM.fin0[,which(OBJ.fin0==OBJ.fin1[i])[1]] # time.fin[i] = time.total[which(OBJ.total==OBJ.fin[i])[1]] } return(list(GAM=GAM.fin1,OBJ = OBJ.fin1, tuning = tuning)) }
/scratch/gouwar.j/cran-all/cranData/BayesS5/R/S5_parallel.R
SSS = function(X,y,ind_fun,model,tuning,N=1000,C0=1,verbose=TRUE){ n = nrow(X) p = ncol(X) # assign("p",p,.GlobalEnv) # assign("n",n,.GlobalEnv) y = y -mean(y) requireNamespace("Matrix") requireNamespace("abind") abind = abind::abind if(missing(ind_fun)){ print("The prior on regression coefficients is unspecified. The default is piMoM") ind_fun = BayesS5::ind_fun_pimom tuning <- BayesS5::hyper_par(type="pimom",X,y,thre = p^-0.5) # tuning parameter selection for nonlocal priors print("The choosen hyperparameter tau") print(tuning) #assign("tuning", tuning, .GlobalEnv) } if(missing(model)){ print("The model prior is unspecified. The default is Bernoulli_Uniform") model = BayesS5::Bernoulli_Uniform } r0=1 # auxiilary value verb = verbose # set verbose a0=0.01;b0=0.01 # hyperparameter for inverse gamma prior on sigma^2 tau = 1 # tau #require(Matrix) #library(abind) ### initialize the model sam = sample(1:p,3) gam = rep(0,p)#;gam[sam]=1;curr = ind_fun(sam) + model(sam) ind2= which(gam==1) p.g=sum(gam) ### curr = ind_fun(X[,ind2],y,n,p,tuning) + model (ind2,p) # evaluate the objective value on the initial model GAM.fin0 = NULL # to save the binary vector of the searched models OBJ.fin0 = NULL # to save the objective values of the searched models g = tuning # set g for Zellner's g-prior B = tuning # set tau for piMoM or peMoM prior GAM = gam OBJ = curr obj = OBJ ### search the neighborhood of the initial model C.p = rep(-1000000,p) IND = (1:p)[-ind2] for(i in 1:(p-p.g)){ j=IND[i] gam.p = gam;gam.p[j]=1;ind.p=which(gam.p==1) int = ind_fun(X[,ind.p],y,n,p,tuning) +model(ind.p,p) obj.p = c(int) if(is.na(obj.p)==TRUE){obj.p = -1000000} C.p[j] = obj.p } p.g = sum(gam) C.m = rep(-1000000,p) IND.m = ind2 p.ind.m = length(IND.m) for(i in 1:p.g){ j=ind2[i] gam.m = gam;gam.m[j]=0;ind.m=which(gam.m==1) int = ind_fun(X[,ind.m],y,n,p,tuning) +model(ind.m,p) obj.m = c(int) if(is.na(obj.m)==TRUE){obj.m = -1000000} C.m[j] = obj.m } C.s = matrix(-1000000,round(n/2),p) for(i in 1:p.g){ for(w in 1:(p-p.g)){ j=ind2[i] u = (1:p)[-ind2][w] gam.s = gam;gam.s[j]=0;gam.s[u]=1;ind.s=which(gam.s==1) int = ind_fun(X[,ind.s],y,n,p,tuning) +model(ind.s,p) obj.s = c(int) if(is.na(obj.m)==TRUE){obj.m = -1000000} C.s[i,u] = obj.s } } print("#################################") print("SSS starts running") ### creat the arrays to save the neighborhood of visited models d0 = 200 OBJ.s0 = array(-1000000,dim=c(round(n/2),p,d0));OBJ.s0[,,1] = C.s OBJ.m0 = matrix(-1000000,p,d0);OBJ.m0[,1] = C.m OBJ.p0 = matrix(-1000000,p,d0);OBJ.p0[,1] = C.p ID = sum(2^(3*log(ind2))) # set the id of the initial model ############################################# Start !!! for(uu in 1:C0){ # repeat the SSS algorithm C0 times GAM.total = Matrix(0,p,1000000,sparse=TRUE) OBJ.total = rep(-1000000,1000000) GAM.total[,1] = gam OBJ.total[1] = obj time.total = rep(0,1000000) pmt0 = proc.time() for(iter in 1:N){ id = sum(2^(3*log(ind2))) # calculate the id of the current model id.ind = which(id==ID) # check whether the current model has already been visited using the id leng = length(id.ind) # how many times visited? if(leng==0){ ### if the current model has not been visited, search the neighborhood of the current model ID = c(ID,id) C.p = rep(-1000000,p) IND = (1:p)[-ind2] for(i in 1:(p-p.g)){ j=IND[i] gam.p = gam;gam.p[j]=1;ind.p=which(gam.p==1) int = ind_fun(X[,ind.p],y,n,p,tuning) +model(ind.p,p) obj.p = c(int) if(is.na(obj.p)==TRUE){obj.p = -100000} C.p[j] = obj.p ind.total = which(OBJ.total< -100000)[1] OBJ.total[ind.total] = obj.p GAM.total[,ind.total] = gam.p time.total[ind.total] = (proc.time()-pmt0)[3] } p.g = sum(gam) C.m = rep(-1000000,p) IND.m = ind2 p.ind.m = length(IND.m) for(i in 1:p.g){ j=ind2[i] gam.m = gam;gam.m[j]=0;ind.m=which(gam.m==1) int = ind_fun(X[,ind.m],y,n,p,tuning) +model(ind.m,p) obj.m = c(int) if(is.na(obj.m)==TRUE){obj.m = -1000000} C.m[j] = obj.m ind.total = which(OBJ.total< -100000)[1] OBJ.total[ind.total] = obj.m GAM.total[,ind.total] = gam.m time.total[ind.total] = (proc.time()-pmt0)[3] } C.s = matrix(-1000000,round(n/2),p) IND.s = ind2 for(i in 1:p.g){ for(w in 1:(p-p.g)){ j=ind2[i] u = (1:p)[-ind2][w] gam.s = gam;gam.s[j]=0;gam.s[u]=1;ind.s=which(gam.s==1) int = ind_fun(X[,ind.s],y,n,p,tuning) +model(ind.s,p) obj.s = c(int) if(is.na(obj.m)==TRUE){obj.m = -1000000} C.s[i,u] = obj.s ind.total = which(OBJ.total< -100000)[1] OBJ.total[ind.total] = obj.s GAM.total[,ind.total] = gam.s time.total[ind.total] = (proc.time()-pmt0)[3] } } d = which(apply(OBJ.p0,2,mean)< (min(OBJ.p0[,1])+1))[1] if(is.na(d)==TRUE){ OBJ.s0 = abind(OBJ.s0,C.s) OBJ.p0 = cbind(OBJ.p0,C.p) OBJ.m0 = cbind(OBJ.m0,C.m) }else{ OBJ.s0[,,d] = C.s OBJ.p0[,d] = C.p OBJ.m0[,d] = C.m } ### }else{ ### if the current model has already been visited, call the saved neighborhood C.p = OBJ.p0[,(id.ind[1])];C.m = OBJ.m0[,(id.ind[1])];C.s = OBJ.s0[,,(id.ind[1])] } ### based on the neighborhood, choose a next model to move by randomly sampling proportion to the posterior probability prop = exp(C.s-max(C.s)) prop[which(is.na(prop))] = 0 if(sum(prop)<0.1){prop[1]=1} sample.s = sample(1:length(prop),1,prob=prop) obj.s = C.s[sample.s] prop = exp(C.p-max(C.p)) prop[which(is.na(prop))] = 0 if(sum(prop)<0.1){prop[1]=1} sample.p = sample(1:length(prop),1,prob=prop) obj.p = C.p[sample.p] prop = exp(C.m-max(C.m)) prop[which(is.na(prop))] = 0 if(sum(prop)<0.1){prop[1]=1} sample.m = sample(1:length(prop),1,prob=prop) obj.m = C.m[sample.m] l1 = 1/(1+exp(obj.m-obj.p)+exp(obj.s-obj.p)) l2 = 1/(1+exp(obj.p-obj.m)+exp(obj.s-obj.m)) l3 = 1-l1 - l2 if(l3<0){l3=0} z = sample(1:3,1,prob=c(l1,l2,l3)) if(z==1){gam[sample.p]=1;obj = obj.p;curr=obj.p} if(z==2){gam[sample.m]=0;obj = obj.m;curr=obj.m} if(z==3){ wh = which(obj.s==C.s,arr.ind=TRUE) gam[ind2[wh[1]]]=0;gam[wh[2]] = 1 obj = obj.s;curr=obj.s } ### ind2 = which(gam==1) # generate the binary variable for the new model p.g = sum(gam) # the size of the new model gam.pr = GAM.total[,which.max(OBJ.total)] # the MAP model among the searched models obj.pr = max(OBJ.total) # its objective value ind2.pr = which(gam.pr==1) # its size if(verb==TRUE&iter%%50==0){ print("#################################") curr = ind_fun(X[,ind2],y,n,p,tuning) + model(ind2,p) # print the objective value of the current model print("# of iterations");print(iter);print("The Selected Variables in the Searched MAP Model");print(ind2.pr);print("The Evaluated Object Value at the Searched MAP Model");print(obj.pr); print("Current Model");print(ind2); print("The Evaluated Object Value at the Current Model");print(curr); print("The Number of Total Searched Models");print(length(unique(OBJ.total))) } } time0 =proc.time()-pmt0 print(time0) ### after a single SSS algorithm runs, summarize the searched models and their posterior model probability ind.total = which(OBJ.total> -100000) OBJ.fin = unique(OBJ.total[ind.total]) # generate the unique objective values from models that have been visited multiple times w = length(OBJ.fin) time.fin = rep(0,w) GAM.fin = matrix(0,p,w) for(i in 1:length(OBJ.fin)){ GAM.fin[,i] = GAM.total[,which(OBJ.total==OBJ.fin[i])[1]] # generate the unique binary variables of models from models that have been visited multiple times } const = sum(exp(OBJ.fin-max(OBJ.fin))) # calculate the normalizing constant posterior = exp(OBJ.fin-max(OBJ.fin))/const # calculate the posterior model probability total.size = length(OBJ.fin) # the total number of models that are searched by the SSS m = max(OBJ.fin) # the objective value of the MAP model ind.m0 = which.max(OBJ.fin) gam = GAM.fin[,ind.m0] # the binary variable of the MAP model ind2 = which(gam==1);p.g = sum(gam) # its index and size GAM.fin0 = cbind(GAM.fin0,GAM.fin) # save the binary variables OBJ.fin0 = c(OBJ.fin0,OBJ.fin) # save the the objective values corresponding to the binary variables } print("#################################") print("Post-process starts") print("#################################") ### after C0 number of single SSS algorithm runs, summarize the searched models and their posterior model probability OBJ.fin1 = unique(OBJ.fin0) w = length(OBJ.fin1) time.fin = rep(0,w) GAM.fin1 = Matrix(0,p,w,sparse=TRUE);GAM.fin1[,1] = GAM.fin0[,which(OBJ.fin0==OBJ.fin1[1])[1]] for(i in 2:w){ GAM.fin1[,i] = GAM.fin0[,which(OBJ.fin0==OBJ.fin1[i])[1]] # time.fin[i] = time.total[which(OBJ.total==OBJ.fin[i])[1]] } rm(GAM.fin0) GAM = GAM.fin1 # the binary vaiables of searched models OBJ = OBJ.fin1 # the the objective values corresponding to the binary variables print("Done!") return(list(GAM = GAM,OBJ = OBJ, tuning = tuning) ) }
/scratch/gouwar.j/cran-all/cranData/BayesS5/R/SSS.R
Uniform <- function(ind,p){0}
/scratch/gouwar.j/cran-all/cranData/BayesS5/R/Uniform.R
hyper_par <- function(type,X,y,thre){ n =nrow(X) p =ncol(X) if(missing(thre)){thre = p^-0.5} if(type=="pimom"){ betas = matrix(0,3,50000) for(k in 1:50000){ sam = sample(1:p,3) ind = sample(1:n,n) betas[,k] = as.vector(solve(crossprod(X[ind,sam]))%*%crossprod(X[ind,sam],y)) } res=y corr = as.vector(cor(res,X)) ind.ix = sort.int(abs(corr),decreasing=TRUE,index.return=TRUE)$ix s = ind.ix[1:3] #p = p+1 beta.hat =solve(crossprod(X[,s]))%*%crossprod(X[,s],y) sig.hat = crossprod(y - X[,s]%*%beta.hat)/n betas=as.vector(betas) tau.cand = seq(0.1,(sd(y)+0.1),length.out=300)^2 pro = rep(0,300) for(k in 1:300){ tau = tau.cand[k] den = function(x){tau^0.5*x^-2*exp(-1*tau/(x^2) )/gamma(1/2)} den.null1 = density(betas) data = list(x=den.null1$x,y=den.null1$y) den.null = approxfun(data[[1]], data[[2]], rule=1,method = "linear") f = function(x){den(x) - den.null(x)} tryCatch({ th=1 a = uniroot(f,interval = c(0.001,max(betas))) th = a$root loc = integrate(den.null,lower = th, upper =max(betas)-0.001)$value nonloc = integrate(den,lower = 0, upper = th)$value pro[k] = loc + nonloc}, error=function(e){}) } tau=1 B = tau.cand[which.min((pro-thre)^2)] } if(type=="pemom"){ betas = matrix(0,3,50000) for(k in 1:50000){ sam = sample(1:p,3) ind = sample(1:n,n) betas[,k] = as.vector(solve(crossprod(X[ind,sam]))%*%crossprod(X[ind,sam],y)) } res=y corr = as.vector(cor(res,X)) ind.ix = sort.int(abs(corr),decreasing=TRUE,index.return=TRUE)$ix s = ind.ix[1:3] #p = p+1 beta.hat =solve(crossprod(X[,s]))%*%crossprod(X[,s],y) sig.hat = crossprod(y - X[,s]%*%beta.hat)/n betas=as.vector(betas) tau.cand = seq(0.1,(sd(y)+0.1),length.out=300)^2 pro = rep(1,300) for(k in 1:300){ tau = tau.cand[k] den = function(x){ sqrt(2*pi*sig.hat)^-1*exp( -1*tau/(x^2) - x^2/(sig.hat*tau) + sqrt(2/sig.hat) ) } den.null1 = density(betas) data = list(x=den.null1$x,y=den.null1$y) den.null = approxfun(data[[1]], data[[2]], rule=1,method = "linear") #curve(den.null,-5,5) #curve(den,add=T,col="red") f = function(x){den(x) - den.null(x)} th=1 tryCatch({ a = uniroot(f,interval = c(0.001,max(betas))) th = a$root loc = integrate(den.null,lower = th, upper =max(betas)-0.001)$value nonloc = integrate(den,lower = 0, upper = th)$value pro[k] = loc + nonloc}, error=function(e){}) } B = tau.cand[which.min(abs(pro-thre))] } return(B) }
/scratch/gouwar.j/cran-all/cranData/BayesS5/R/hyper_par.R
ind_fun_NLfP =function(ind2, y, phi, n, p, K, IP.phi, C.prior1, tuning){ tau = g = tuning a0 = b0 = 1 # assign("g", g, .GlobalEnv) # assign("tau", tau, .GlobalEnv) #assign("a0", a0, .GlobalEnv) #assign("b0", b0, .GlobalEnv) index = function(j){ a = (K*(j-1)+2):(K*j+1) return(a) } index.tot = function(ind2){ #a = rep(0,K*p+1) ind = sapply(ind2,index)#;a[ind] = 1 return(as.vector(ind)) } nonlocal = function(j,beta){ ind = index(ind2[j]) return(1/crossprod(IP.phi[,ind]%*%beta[(K*(j-1)+1):(K*j)])) } obj01 = function(x2,ind2,tau,g,a0,b0){ p.g = length(ind2) if(p.g>0){ beta0 = x2[1] beta = x2[2:(p.g*K+1)] prec = x2[p.g*K+2] ind3 = index.tot(ind2) if(prec<0){prec=10^-5} a = 0.5*prec*crossprod(y-phi[,c(1,ind3)]%*%c(beta0,beta)) + 0.5*prec*crossprod(beta)/g + tau*sum(sapply(1:p.g,nonlocal,beta = beta))/prec*n b = -1*(a0-1)*log(prec) + b0*prec -0.5*(n+K*p.g)*log(prec) c = a+b }else{ prec = x2[(1+p.g)] if(prec<0){prec=10^-5} a = 0.5*prec*crossprod(y) b = -1*(a0-1)*log(prec) + b0*prec -0.5*n*log(prec) c = a+b } return(c) } J = function(x0,ind2,tau,g,a0,b0){ p.g = length(ind2) beta0 = x0[1] beta = x0[2:(p.g*K+1)] prec = x0[p.g*K+2] ind3 = index.tot(ind2) if(p.g>0){ a22 = matrix(0,p.g*K,p.g*K) a23 = rep(0,p.g*K) a33 = 0 for(k in 1:p.g){ j = ind2[k] ind = index.tot(j) ind1 = (K*(k-1)+1):(K*k) phi.beta = IP.phi[,ind]%*%beta[ind1] a22.1 = 8*tau*t(phi[,ind])%*%tcrossprod(phi.beta)%*%(phi[,ind])*as.numeric(crossprod(phi.beta))^-3/prec a22.2 = -2*tau*crossprod(IP.phi[,ind])*as.numeric(crossprod(phi.beta))^-2/prec a22[ind1,ind1] = a22.1 + a22.2 a23[ind1] = 2*tau*(crossprod(IP.phi[,ind])*as.numeric(crossprod(phi.beta))^-1)%*%beta[ind1]/(prec^2) a33 = a33 + 2*tau*as.numeric(crossprod(phi.beta))^-1/(prec^3) } u = matrix(0,K*p.g+2,K*p.g+2) b11 = prec*n b12 = prec*as.vector(prec*crossprod(phi[,ind3],rep(1,n))) b13 = -1*sum(y-phi[,c(1,ind3)]%*%c(beta0,beta)) b22 = prec*(crossprod(phi[,ind3]) + diag(K*p.g)/g) + a22 b23 = -1*crossprod(phi[,ind3],y-phi[,c(1,ind3)]%*%c(beta0,beta)) + beta/g + a23 b33 = (n/2+p.g*K/2+a0-1)*prec^-2 + a33 u[1,1] = b11 u[1,2:(p.g*K+1)] = u[2:(p.g*K+1),1] = b12 u[1,(p.g*K+2)] = u[(p.g*K+2),1] = b13 u[2:(p.g*K+1),(p.g*K+2)] = u[(p.g*K+2),2:(p.g*K+1)] = b23 u[2:(p.g*K+1),2:(p.g*K+1)] = b22 u[(p.g*K+2),(p.g*K+2)] = b33 }else{ u = diag(2) } return(u) } ind_fun1=function(ind2,tau,g,a0,b0){ #a0=0.01;b0=0.01 p.g=length(ind2) ind3 = index.tot(ind2) if(p.g >0){ fit = solve(crossprod(phi[,c(1,ind3)])+0.001*diag(p.g*K+1))%*%crossprod(phi[,c(1,ind3)],y) ress = crossprod(y-phi[,c(1,ind3)]%*%fit) prec0 = n/ress initial_x = c(fit,prec0) wrapper <- function(theta) obj01(theta,ind2,tau,g,a0,b0) #o <- optim(initial_x, wrapper,hessian=TRUE) o <- optim(initial_x, wrapper) f_x = o$value x0 = o$par Hess = J(x0,ind2,tau,g,a0,b0) det.J = 10^100 tryCatch({ det.J = determinant(Hess,logarithm = TRUE)$modulus },error=function(e){}) #; if(det.J<0){det.J = 100^100} int = -1*p.g*C.prior1 - 0.5*det.J - f_x - 0.5*K*p.g*log(g) }else{ int = -(0.5*n+a0)*log(crossprod(y-mean(y))/2+b0) } return(as.numeric(int)) } return(ind_fun1(ind2,tau,g,a0,b0)) }
/scratch/gouwar.j/cran-all/cranData/BayesS5/R/ind_fun_NLfP.R
ind_fun_g <- function(X.ind,y,n,p,tuning){ g= tuning a0=0.01;b0=0.01 tau = 1 p.g = ncol(X.ind) if(length(p.g)==0){p.g=0} if(p.g >1){ #X0 = X[,ind2];QR=qr(X0) #ress = crossprod(qr.resid(QR, y)) fit = solve(crossprod(X.ind))%*%crossprod(X.ind,y) ress = crossprod(y-X.ind%*%fit) v = crossprod(y-mean(y)) int = -0.5*p.g*log(1+g)-0.5*(n-1)*log(1+g*(ress/v)) }else{ if(p.g==1){ fit = as.numeric(as.numeric(sum(X.ind*y))/as.numeric(crossprod(X.ind))) ress = crossprod(y-X.ind*fit) v = crossprod(y-mean(y)) int = -0.5*p.g*log(1+g)-0.5*(n-1)*log(1+g*(ress/v)) }else{if(p.g==0){ int = -0.5*(n-1)*log(1+g) } } } return(int) }
/scratch/gouwar.j/cran-all/cranData/BayesS5/R/ind_fun_g.R
ind_fun_pemom <- function(X.ind,y,n,p,tuning){ B = tuning obj01 = function(x2,B,y,X.ind,a0,b0){ a0=0.01;b0=0.01 p.g = ncol(X.ind) if(length(p.g)==0){p.g=0} tau=1 if(p.g>1){ beta = x2[1:p.g] prec = x2[(1+p.g)] if(prec<0){prec=10^-5} a = 0.5*prec*crossprod(y-X.ind%*%beta)+0.5*prec*tau*crossprod(beta) b = sum(B/beta^2)- (a0-1)*log(prec) + b0*prec - p.g*sqrt(2*prec*tau*B)-0.5*(n+p.g)*log(prec) c = a+b }else{if(p.g==1){ beta = x2[1:p.g] prec = x2[(1+p.g)] if(prec<0){prec=10^-5} a = 0.5*prec*crossprod(y-X.ind*beta)+0.5*prec*tau*beta^2 b = B/beta^2 - (a0-1)*log(prec) + b0*prec - p.g*sqrt(2*prec*tau*B)-0.5*(n+p.g)*log(prec) c = a+b }else{ prec = x2[(1+p.g)] if(prec<0){prec=10^-5} a = 0.5*prec*crossprod(y) b = -1*(a0-1)*log(prec) + b0*prec -0.5*n*log(prec) c = a+b } } return(c) } J = function(x1,B,y,X.ind,a0,b0){ a0=0.01;b0=0.01 p.g1 = ncol(X.ind) if(length(p.g1)==0){p.g1=0} tau=1 if(p.g1>1){ beta1 = x1[1:p.g1] prec1 = x1[(p.g1+1)] D = matrix(0,(p.g1+1),(p.g1+1)) D[1:p.g1,1:p.g1] =prec1*crossprod(X.ind)+prec1*tau*diag(p.g1)+diag(6*B/beta1^4) FF = as.vector(crossprod(X.ind,y-X.ind%*%beta1)-tau*beta1)*prec1^2 D[(p.g1+1),1:p.g1]=FF D[1:p.g1,(p.g1+1)]=FF D[(p.g1+1),(p.g1+1)] = -1*(n/2+p.g1/2+a0+1)*prec1^2+prec1^3*(crossprod(y-X.ind%*%beta1)-tau*crossprod(beta1))-0.75*p.g1*sqrt(2*B*tau)*prec1^2.5+prec1^3*2*b0 #0.5*(n+p.g1-2+2*a0)/prec1^2+p.g1*0.25*sqrt(2*tau*B)*prec1^(-1.5) }else{ if(p.g1==1){ beta1 = x1[1:p.g1] prec1 = x1[(p.g1+1)] D = matrix(0,(p.g1+1),(p.g1+1)) D[1:p.g1,1:p.g1] =prec1*crossprod(X.ind)+prec1*tau*diag(p.g1)+6*B/beta1^4 FF = as.vector(crossprod(X.ind,y-X.ind*beta1)-tau*beta1)*prec1^2 D[(p.g1+1),1:p.g1]=FF D[1:p.g1,(p.g1+1)]=FF D[(p.g1+1),(p.g1+1)] = -1*(n/2+p.g1+a0+1)*prec1^2+prec1^3*(crossprod(y-X.ind*beta1)-tau*beta1^2)-0.75*p.g1*sqrt(2*B*tau)*prec1^2.5+prec1^3*2*b0 #0.5*(n+p.g1-2+2*a0)/prec1^2+p.g1*0.25*sqrt(2*tau*B)*prec1^(-1.5) }else{ prec1 = x1[(p.g1+1)] D =abs(-0.5*(n+2*a0+2)*prec1^2+prec1^3*2*b0+prec1^3*crossprod(y)) } } return(D) } r0=1 B = tuning a0=0.01;b0=0.01 tau = 1 p.g = ncol(X.ind) if(length(p.g)==0){p.g=0} if(p.g >1){ fit = solve(crossprod(X.ind)+diag(p.g))%*%crossprod(X.ind,y) ress = crossprod(y-X.ind%*%fit) prec0 = n/ress initial_x = fit initial_x = c(initial_x,prec0) wrapper <- function(theta) obj01(theta,B,y,X.ind,a0,b0) #o <- optim(initial_x, wrapper) #f_x = o$value #x0 = o$par o = nlm(wrapper, initial_x) f_x = o$minimum x0 = o$estimate ccc = 0.5*log(2*pi)+0.5*p.g*log(tau) #int = ccc-0.5*as.numeric(determinant(J(x0,ind2,B,XtX,y,X,tau,a0,b0),logarithm=TRUE)$modulus)-f_x int = ccc-0.5*log(det(J(x0,B,y,X.ind,a0,b0)))-f_x }else{ if(p.g==1){ fit = solve(crossprod(X.ind)+diag(p.g))%*%crossprod(X.ind,y) ress = crossprod(y-X.ind%*%fit) prec0 = n/ress initial_x = fit initial_x = c(initial_x,prec0) wrapper <- function(theta) obj01(theta,B,y,X.ind,a0,b0) #o <- optim(initial_x, wrapper) #f_x = o$value #x0 = o$par o = nlm(wrapper, initial_x) f_x = o$minimum x0 = o$estimate ccc = 0.5*log(2*pi)+0.5*p.g*log(tau) #int = ccc-0.5*as.numeric(determinant(J(x0,ind2,B,XtX,y,X,tau,a0,b0),logarithm=TRUE)$modulus)-f_x int = ccc-0.5*log(det(J(x0,B,y,X.ind,a0,b0)))-f_x }else{if(p.g==0){ int = lgamma(0.5*n+a0) -(0.5*n+a0)*log(crossprod(y)+2*b0) } } } return(int) }
/scratch/gouwar.j/cran-all/cranData/BayesS5/R/ind_fun_pemom.R
ind_fun_pimom <- function(X.ind,y,n,p,tuning){ obj01_pimom <- function(x2,B,y,X.ind,a0,b0){ a0=0.01;b0=0.01;r0=1 tau = 1 p.g = length(x2)-1 if(p.g>1){ beta = x2[1:p.g] prec = x2[(1+p.g)] if(prec<0){prec=10^-5} a = 0.5*prec*crossprod(y-X.ind%*%beta)+r0*sum(log(beta^2)) b = sum(B/beta^2)- (a0-1)*log(prec) + b0*prec - p.g*((r0-0.5)*log(B)-lgamma(r0-0.5))-0.5*n*log(prec) c = a+b }else{if(p.g==1){ beta = x2[1:p.g] prec = x2[(1+p.g)] if(prec<0){prec=10^-5} a = 0.5*prec*crossprod(y-X.ind*beta)+r0*log(beta^2) b = B/beta^2- (a0-1)*log(prec) + b0*prec - p.g*((r0-0.5)*log(B)-lgamma(r0-0.5))-0.5*n*log(prec) c = a+b }else{ prec = x2[(1+p.g)] if(prec<0){prec=10^-5} a = 0.5*prec*crossprod(y) b = -1*(a0-1)*log(prec) + b0*prec -0.5*n*log(prec) c = a+b } } return(c) } J_pimom <- function(x1,B,y,X.ind,a0,b0){ a0=0.01;b0=0.01;r0=1 tau = 1 p.g1 = length(x1)-1 if(p.g1>1){ beta1 = x1[1:p.g1] prec1 = x1[(p.g1+1)] D = matrix(0,(p.g1+1),(p.g1+1)) D[1:p.g1,1:p.g1] =prec1*crossprod(X.ind)+diag(6*B/beta1^4-2*r0/beta1^2) FF = as.vector(crossprod(X.ind,y-X.ind%*%beta1))*prec1^2 D[(p.g1+1),1:p.g1]=FF D[1:p.g1,(p.g1+1)]=FF D[(p.g1+1),(p.g1+1)] = -1*(n/2+a0+1)*prec1^2+prec1^3*(crossprod(y-X.ind%*%beta1))+prec1^3*2*b0 #0.5*(n+p.g1-2+2*a0)/prec1^2+p.g1*0.25*sqrt(2*tau*B)*prec1^(-1.5) }else{ if(p.g1==1){ beta1 = x1[1:p.g1] prec1 = x1[(p.g1+1)] D = matrix(0,(p.g1+1),(p.g1+1)) D[1:p.g1,1:p.g1] =prec1*crossprod(X.ind)+6*B/beta1^4-2/beta1^2 FF = as.vector(crossprod(X.ind,y-X.ind*beta1))*prec1^2 D[(p.g1+1),1:p.g1]=FF D[1:p.g1,(p.g1+1)]=FF D[(p.g1+1),(p.g1+1)] = -1*(n/2+a0+1)*prec1^2+prec1^3*(crossprod(y-X.ind*beta1))+prec1^3*2*b0 #0.5*(n+p.g1-2+2*a0)/prec1^2+p.g1*0.25*sqrt(2*tau*B)*prec1^(-1.5) }else{ prec1 = x1[(p.g1+1)] D = abs(-0.5*(n+2*a0+2)*prec1^2+prec1^3*2*b0+prec1^3*crossprod(y)) } } return(D) } r0=1 B = tuning a0=0.01;b0=0.01 tau = 1 p.g = ncol(X.ind) if(length(p.g)==0){p.g=0} #sb = lbeta(1+p.g,1+p-p.g) if(p.g >1){ fit = solve(crossprod(X.ind)+diag(p.g)/B)%*%crossprod(X.ind,y) ress = crossprod(y-X.ind%*%fit) prec0 = (n+p.g+2*a0)/(ress+2*b0) initial_x = fit initial_x = c(initial_x,prec0) wrapper <- function(theta) obj01_pimom(theta,B,y,X.ind,a0,b0) #o <- optim(initial_x, wrapper,method="L-BFGS-B" ) #f_x = o$value #x0 = o$par o = nlm(wrapper, initial_x) f_x = o$minimum x0 = o$estimate ccc = 0.5*log(2*pi)+0.5*p.g*log(tau) int = ccc-0.5*determinant(J_pimom(x0,B,y,X.ind,a0,b0),logarithm=TRUE)$modulus-f_x }else{ if(p.g==1){ fit = solve(crossprod(X.ind)+diag(p.g))%*%crossprod(X.ind,y) ress = crossprod(y-X.ind%*%fit) prec0 = (n+p.g+2*a0)/(ress+2*b0) initial_x = fit initial_x = c(initial_x,prec0) wrapper <- function(theta) obj01_pimom(theta,B,y,X.ind,a0,b0) #o <- optim(initial_x, wrapper,method="L-BFGS-B") #f_x = o$value #x0 = o$par o = nlm(wrapper, initial_x) f_x = o$minimum x0 = o$estimate ccc = 0.5*log(2*pi)+0.5*p.g*log(tau) int = ccc-0.5*determinant(J_pimom(x0,B,y,X.ind,a0,b0),logarithm=TRUE)$modulus-f_x }else{if(p.g==0){ int = lgamma(0.5*n+a0) -(0.5*n+a0)*log(crossprod(y)+2*b0) } } } return(int) }
/scratch/gouwar.j/cran-all/cranData/BayesS5/R/ind_fun_pimom.R
obj_fun_g <- function(ind,X,y,n,p,tuning){ p.g = length(ind) X0 = cbind(rep(1,n),X[,ind]) beta= rep(0,p+1) if(p.g >0){ #X0 = X[,ind2];QR=qr(X0) #ress = crossprod(qr.resid(QR, y)) beta[c(1,1+ind)] = tuning*solve(crossprod(X0))%*%crossprod(X0,y)/(1+tuning) ress = crossprod(y-X0%*%beta[c(1,1+ind)]) sig = ress/(n + 2) }else{ beta[1] = mean(y) sig = crossprod(y-mean(y))/(n + 2) } return(list(beta=beta,sig=sig)) }
/scratch/gouwar.j/cran-all/cranData/BayesS5/R/obj_fun_g.R
obj_fun_pemom <- function(ind,X,y,n,p,tuning){ p.g = length(ind) X0 = cbind(rep(1,n),X[,ind]) obj01_pimom <- function(x2,B,y,X0,a0,b0){ a0=0.01;b0=0.01;r0=1 tau = 1 p.g = length(x2)-1 if(p.g>1){ beta = x2[2:p.g] beta0 = x2[1] prec = x2[(1+p.g)] if(prec<0){prec=10^-5} a = 0.5*prec*crossprod(y-X0%*%c(beta0,beta) ) + 0.5*prec*tau*crossprod(beta) b = sum(B/beta^2)- (a0-1)*log(prec) + b0*prec - (p.g-1)*sqrt(2*prec*tau*B)-0.5*(n+p.g-1)*log(prec) c = a+b }else{ prec = x2[(1+p.g)] if(prec<0){prec=10^-5} a = 0.5*prec*crossprod(y-mean(y)) b = -1*(a0-1)*log(prec) + b0*prec -0.5*n*log(prec) c = a+b } return(c) } r0=1 B = tuning a0=0.01;b0=0.01 tau = 1 if(p.g >0){ fit = solve(crossprod(X0)+diag(p.g+1)/B)%*%crossprod(X0,y) ress = crossprod(y-X0%*%fit) prec0 = (n+p.g+2*a0)/(ress+2*b0) initial_x = c(fit,prec0) wrapper <- function(theta) obj01_pimom(theta,tuning,y,X0,a0,b0) o = nlm(wrapper, initial_x) f_x = o$minimum x0 = o$estimate beta = rep(0,p+1) beta[c(1,1+ind)] = x0[1:(p.g+1)] sig = x0[p.g+2] }else{ beta = rep(0,p+1) beta[1] = mean(y) sig = crossprod(y-mean(y))/(n + a0) } return(list(beta=beta,sig=sig)) }
/scratch/gouwar.j/cran-all/cranData/BayesS5/R/obj_fun_pemom.R
obj_fun_pimom <- function(ind,X,y,n,p,tuning){ p.g = length(ind) X0 = cbind(rep(1,n),X[,ind]) obj01_pimom <- function(x2,B,y,X0,a0,b0){ a0=0.01;b0=0.01;r0=1 tau = 1 p.g = length(x2)-1 if(p.g>1){ beta = x2[2:p.g] beta0 = x2[1] prec = x2[(1+p.g)] if(prec<0){prec=10^-5} a = 0.5*prec*crossprod(y-X0%*%c(beta0,beta) )+ sum(log(beta^2)) b = sum(B/beta^2)- (a0-1)*log(prec) + b0*prec - (p.g-1)*((r0-0.5)*log(B)-lgamma(r0-0.5))-0.5*n*log(prec) c = a+b }else{ prec = x2[(1+p.g)] if(prec<0){prec=10^-5} a = 0.5*prec*crossprod(y-mean(y)) b = -1*(a0-1)*log(prec) + b0*prec -0.5*n*log(prec) c = a+b } return(c) } r0=1 B = tuning a0=0.01;b0=0.01 tau = 1 if(p.g >0){ fit = solve(crossprod(X0)+diag(p.g+1)/B)%*%crossprod(X0,y) ress = crossprod(y-X0%*%fit) prec0 = (n+p.g+2*a0)/(ress+2*b0) initial_x = c(fit,prec0) wrapper <- function(theta) obj01_pimom(theta,tuning,y,X0,a0,b0) o = nlm(wrapper, initial_x) f_x = o$minimum x0 = o$estimate beta = rep(0,p+1) beta[c(1,1+ind)] = x0[1:(p.g+1)] sig = x0[p.g+2] }else{ beta = rep(0,p+1) beta[1] = mean(y) sig = crossprod(y-mean(y))/(n + a0) } return(list(beta=beta,sig=sig)) }
/scratch/gouwar.j/cran-all/cranData/BayesS5/R/obj_fun_pimom.R
result <- function(fit){ #GAM = fit[-1,]; OBJ = fit[1,] GAM = fit$GAM; OBJ = fit$OBJ; tuning= fit$tuning p = nrow(GAM) marg.gam = rep(0,p) for(u in 1:ncol(GAM)){ marg.gam = marg.gam + GAM[,u]*exp(OBJ[u]-max(OBJ)) } marg.gam = marg.gam / sum(exp(OBJ-max(OBJ))) gam0 = GAM[,which.max(OBJ)] ind2 = which(gam0==1) post = exp(OBJ-max(OBJ))/sum(exp(OBJ-max(OBJ))) hppm = 1/sum(exp(OBJ-max(OBJ))) print("# of Searched Models by S5");print(length(OBJ)) print("The MAP model is ") print(which(gam0==1)) print(paste("with posterior probability",round(hppm,3) )) return(list(hppm = which(gam0==1), hppm.prob = hppm, marg.prob = marg.gam,gam = GAM, obj = OBJ, post = post, tuning = tuning) ) }
/scratch/gouwar.j/cran-all/cranData/BayesS5/R/result.R
result_est_LS <- function(res, X, y,verbose=TRUE){ hppm = res$hppm hppm.prob = res$hppm.prob marg.prob = res$marg.gam gam = res$gam obj = res$obj post = res$post tuning = res$tuning p = nrow(gam) ind.LS = which(gam[,which.max(obj)] == 1) p.LS = length(ind.LS) if(p.LS>0){ beta.LS = rep(0,p+1) beta.LS[c(1,1+ind.LS)] = stats::lm(y~X[,ind.LS])$coefficients }else{ beta.LS = rep(0,p+1) beta.LS[1] = mean(y) } beta.BMA.LS = rep(0,p+1) for(i in 1:length(post)){ ind.BMA = which(gam[,i] == 1) p.BMA = length(ind.BMA) if(p.BMA>0){ beta.BMA = rep(0,p+1) beta.BMA[c(1,1+ind.BMA)] = stats::lm(y~X[,ind.BMA])$coefficients beta.BMA.LS = beta.BMA.LS + beta.BMA*post[i] }else{ beta.BMA = rep(0,p+1) beta.BMA[1] = mean(y) beta.BMA.LS = beta.BMA.LS + beta.BMA*post[i] } if(verbose==TRUE&&i%%1000==0){ print(paste("The number of evaluated models: ", i)) } } return(list(intercept.MAP = beta.LS[1], beta.MAP = beta.LS[-1],intercept.BMA = beta.BMA.LS[1] ,beta.BMA = beta.BMA.LS[-1])) }
/scratch/gouwar.j/cran-all/cranData/BayesS5/R/result_est_LS.R
result_est_MAP <- function(res, X, y, obj_fun,verbose=TRUE){ hppm = res$hppm hppm.prob = res$hppm.prob marg.prob = res$marg.gam gam = res$gam obj = res$obj post = res$post tuning = res$tuning p = nrow(gam) n = nrow(X) if(missing(obj_fun)){ print("The prior on regression coefficients is unspecified. The default is piMoM") ind_fun = BayesS5::obj_fun_pimom tuning <- BayesS5::hyper_par(type="pimom",X,y,thre = p^-0.5) # tuning parameter selection for nonlocal priors print("The choosen hyperparameter tau") print(tuning) #assign("tuning", tuning, .GlobalEnv) } ind.MAP = which(gam[,which.max(obj)] == 1) o = obj_fun(ind.MAP,X=X,y=y,n=n,p=p,tuning=tuning) beta.MAP = o$beta sig.MAP = o$sig beta.BMA.MAP = rep(0,p+1) for(i in 1:length(post)){ ind.BMA = which(gam[,i] == 1) p.BMA = length(ind.BMA) if(p.BMA>0){ o = obj_fun(ind.BMA,X=X,y=y,n=n,p=p,tuning=tuning) beta.BMA = o$beta beta.BMA.MAP = beta.BMA.MAP + beta.BMA*post[i] }else{ beta.BMA = rep(0,p+1) beta.BMA[1] = mean(y) beta.BMA.MAP = beta.BMA.MAP + beta.BMA*post[i] } if(verbose==TRUE&&i%%500==0){ print(paste("The number of evaluated models: ", i)) } } return(list(intercept.MAP = beta.MAP[1], beta.MAP = beta.MAP[-1],sig.MAP = sig.MAP, intercept.BMA = beta.BMA.MAP[1] ,beta.BMA = beta.BMA.MAP[-1])) }
/scratch/gouwar.j/cran-all/cranData/BayesS5/R/result_est_MAP.R
#' @title Fitting BayesSUR models #' @description #' Main function of the package. Fits a range of models introduced in the #' package vignette \code{BayesSUR.pdf}. Returns an object of S3 class #' \code{BayesSUR}. There are three options for the prior on the residual #' covariance matrix (i.e., independent inverse-Gamma, inverse-Wishart and #' hyper-inverse Wishart) and three options for the prior on the latent #' indicator variable (i.e., independent Bernoulli, hotspot and Markov random #' field). So there are nine models in total. See details for their combinations. #' #' @docType package #' @useDynLib BayesSUR #' @aliases BayesSUR-package #' @importFrom utils head tail read.table write.table #' @importFrom Rcpp sourceCpp #' @importFrom xml2 as_xml_document write_xml #' @importFrom parallel detectCores #' #' @name BayesSUR #' @param data a numeric matrix with variables on the columns and observations #' on the rows, if arguments \code{Y} and \code{X} (and possibly \code{X_0}) #' are vectors. Can be \code{NULL} if arguments \code{Y} and \code{X} (and #' possibly \code{X_0}) are numeric matrices #' @param Y,X vectors of indices (with respect to the data matrix) for the #' outcomes (\code{Y}) and the predictors to select (\code{X}) respectively; #' if the \code{data} argument is \code{NULL}, these needs to be numeric #' matrices containing the data instead, with variables on the columns and #' observations on the rows #' @param X_0 vectors of indices (with respect to the data matrix) for the #' fixed predictors that are not selected, i.e. always included in the model; #' if the data argument is not provided, this needs to be a numeric matrix #' containing the data instead, with variables on the columns and observations #' on the rows #' @param betaPrior string indicating the prior for regression coefficients; it #' has to be either \code{independent} for independent spike-and-slab priors #' (only slab part for \code{X_0} if specified), or \code{reGroup} for weakly #' normal priors for mandatory variables (random effects) and spike-and-slab #' priors for other variables of Zhao (2023) #' @param covariancePrior string indicating the prior for the covariance $C$; #' it has to be either \code{HIW} for the hyper-inverse-Wishar (which will #' result in a sparse covariance matrix), \code{IW} for the inverse-Wishart #' prior (dense covariance) or \code{IG} for independent inverse-Gamma on all #' the diagonal elements and 0 otherwise. See the details for the model #' specification #' @param gammaPrior string indicating the gamma prior to use, either #' \code{hotspot} (default) for the Hotspot prior of Bottolo (2011), \code{MRF} #' for the Markov Random Field prior or \code{hierarchical} for a simpler #' hierarchical prior. See the details for the model specification #' @param nIter number of iterations for the MCMC procedure. Default 10000 #' @param burnin number of iterations to discard at the start of the chain. #' Default is 5000 #' @param nChains number of parallel tempered chains to run (default 2). The #' temperature is adapted during the burnin phase #' @param outFilePath path to where the output files are to be written #' @param gammaSampler string indicating the type of sampler for gamma, either #' \code{bandit} for the Thompson sampling inspired samper or \code{MC3} for #' the usual MC^3 sampler. See Russo et al.(2018) or Madigan and York (1995) #' for details #' @param gammaInit gamma initialisation to either all-zeros (\code{0}), all #' ones (\code{1}), MLE-informed (\code{MLE}) or (default) randomly (\code{R}) #' @param mrfG either a matrix or a path to the file containing (the edge list #' of) the G matrix for the MRF prior on gamma (if necessary) #' @param standardize logical flag for X variable standardization. Default is #' \code{standardize=TRUE}. Coefficients are returned on the standardized scale #' @param standardize.response logical flag for Y standardization. Default is #' \code{standardize.response=TRUE} #' @param hyperpar a list of named hypeparameters to use instead of the default #' values. Valid names are mrf_d, mrf_e, a_sigma, b_sigma, a_tau, b_tau, nu, #' a_eta, b_eta, a_o, b_o, a_pi, b_pi, a_w and b_w. Their default values are #' a_w=2, b_w=5, a_omega=2, b_omega=1, a_o=2, b_o=p-2, a_pi=2, b_pi=1, nu=s+2, #' a_tau=0.1, b_tau=10, a_eta=0.1, b_eta=1, a_sigma=1, b_sigma=1, mrf_d=-3 and #' mrf_e=0.03. See the vignette for more information #' @param maxThreads maximum threads used for parallelization. Default is 1. #' Reproducibility of results with \code{set.seed()} is only guaranteed if #' \code{maxThreads=1} #' @param output_gamma allow (\code{TRUE}) or suppress (\code{FALSE}) the #' output for gamma. See the return value below for more information #' @param output_beta allow (\code{TRUE}) or suppress (\code{FALSE}) the output #' for beta. See the return value below for more information #' @param output_Gy allow (\code{TRUE}) or suppress (\code{FALSE}) the output #' for Gy. See the return value below for more information #' @param output_sigmaRho allow (\code{TRUE}) or suppress (\code{FALSE}) the #' output for sigmaRho. See the return value below for more information #' @param output_pi allow (\code{TRUE}) or suppress (\code{FALSE}) the output #' for pi. See the return value below for more information #' @param output_tail allow (\code{TRUE}) or suppress (\code{FALSE}) the output #' for tail (hotspot tail probability). See the return value below for more #' information #' @param output_model_size allow (\code{TRUE}) or suppress (\code{FALSE}) the #' output for model_size. See the return value below for more information #' @param output_model_visit allow (\code{TRUE}) or suppress (\code{FALSE}) the #' output for all visited models over the MCMC iterations. Default is #' \code{FALSE}. See the return value below for more information #' @param output_CPO allow (\code{TRUE}) or suppress (\code{FALSE}) the output #' for (scaled) conditional predictive ordinates (\code{*_CPO_out.txt}), #' CPO with joint posterior predictive of the response variables #' (\code{*_CPOsumy_out.txt}) and widely applicable information criterion #' (\code{*_WAIC_out.txt}). See the return value below for more information #' @param output_Y allow (\code{TRUE}) or suppress (\code{FALSE}) the output #' for responses dataset Y #' @param output_X allow (\code{TRUE}) or suppress (\code{FALSE}) the output #' for predictors dataset X #' @param tmpFolder the path to a temporary folder where intermediate data #' files are stored (will be erased at the end of the chain). It is specified #' relative to \code{outFilePath} #' #' @details The arguments \code{covariancePrior} and \code{gammaPrior} specify #' the model HRR, dSUR or SSUR with different gamma prior. Let #' \eqn{\gamma_{jk}} be latent indicator variable of each coefficient and #' \eqn{C} be covariance matrix of response variables. The nine models #' specified through the arguments \code{covariancePrior} and #' \code{gammaPrior} are as follows. #' \tabular{cccc}{ #' \tab \eqn{\gamma_{jk}}~Bernoulli \tab \eqn{\gamma_{jk}}~hotspot \tab \eqn{\gamma}~MRF \cr #' \eqn{C}~indep \tab HRR-B \tab HRR-H \tab HRR-M \cr #' \eqn{C}~IW \tab dSUR-B \tab dSUR-H \tab dSUR-M \cr #' \eqn{C}~HIW \tab SSUR-B \tab SSUR-H \tab SSUR-M #' } #' #' @return An object of class \code{BayesSUR} is saved as #' \code{obj_BayesSUR.RData} in the output file, including the following #' components: #' \itemize{ #' \item status - the running status #' \item input - a list of all input parameters by the user #' \item output - a list of the all output filenames: #' \itemize{ #' \item "\code{*_logP_out.txt}" - contains each row for the \eqn{1000t}-th iteration's log-likelihoods of parameters, i.e., Tau, Eta, JunctionTree, SigmaRho, O, Pi, Gamma, W, Beta and data conditional log-likelihood depending on the models. #' \item "\code{*_gamma_out.txt}" - posterior mean of the latent indicator matrix. #' \item "\code{*_pi_out.txt}" - posterior mean of the predictor effects (prospensity) by decomposing the probability of the latent indicator. #' \item "\code{*_hotspot_tail_p_out.txt}" - posterior mean of the hotspot tail probability. Only available for the hotspot prior on the gamma. #' \item "\code{*_beta_out.txt}" - posterior mean of the coefficients matrix. #' \item "\code{*_Gy_out.txt}" - posterior mean of the response graph. Only available for the HIW prior on the covariance. #' \item "\code{*_sigmaRho_out.txt}" - posterior mean of the transformed parameters. Not available for the IG prior on the covariance. #' \item "\code{*_model_size_out.txt}" - contains each row for the\eqn{1000t}-th iteration's model sizes of the multiple response variables. #' \item "\code{*_model_visit_gy_out.txt}" - contains each row for the nonzero indices of the vectorized estimated graph matrix for each iteration. #' \item "\code{*_model_visit_gamma_out.txt}" - contains each row for the nonzero indices of the vectorized estimated gamma matrix for each iteration. #' \item "\code{*_CPO_out.txt}" - the (scaled) conditional predictive ordinates (CPO). #' \item "\code{*_CPOsumy_out.txt}" - the (scaled) conditional predictive ordinates (CPO) with joint posterior predictive of the response variables. #' \item "\code{*_WAIC_out.txt}" - the widely applicable information criterion (WAIC). #' \item "\code{*_Y.txt}" - responses dataset. #' \item "\code{*_X.txt}" - predictors dataset. #' \item "\code{*_X0.txt}" - fixed predictors dataset. #' } #' \item call - the matched call. #' } #' #' @references Russo D, Van Roy B, Kazerouni A, Osband I, Wen Z (2018). \emph{A tutorial on Thompson sampling.} Foundations and Trends in Machine Learning, 11: 1-96. #' @references Madigan D, York J (1995). \emph{Bayesian graphical models for discrete data.} International Statistical Review, 63: 215–232. #' @references Bottolo L, Banterle M, Richardson S, Ala-Korpela M, Jarvelin MR, Lewin A (2020). \emph{A computationally efficient Bayesian seemingly unrelated regressions model for high-dimensional quantitative trait loci discovery.} Journal of Royal Statistical Society: Series C, 70: 886-908. #' @references Zhao Z, Banterle M, Bottolo L, Richardson S, Lewin A, Zucknick M (2021). \emph{BayesSUR: An R package for high-dimensional multivariate Bayesian variable and covariance selection in linear regression.} Journal of Statistical Software, 100: 1–32. #' @references Zhao Z, Banterle M, Lewin A, Zucknick M (2023). \emph{Multivariate Bayesian structured variable selection for pharmacogenomic studies.} Journal of the Royal Statistical Society: Series C (Applied Statistics), qlad102. #' #' @examples #' data("exampleEQTL", package = "BayesSUR") #' hyperpar <- list(a_w = 2, b_w = 5) #' set.seed(9173) #' fit <- BayesSUR( #' Y = exampleEQTL[["blockList"]][[1]], #' X = exampleEQTL[["blockList"]][[2]], #' data = exampleEQTL[["data"]], outFilePath = tempdir(), #' nIter = 5, burnin = 0, nChains = 1, gammaPrior = "hotspot", #' hyperpar = hyperpar, tmpFolder = "tmp/", output_CPO = TRUE #' ) #' #' ## check output #' # show the summary information #' summary(fit) #' #' # show the estimated beta, gamma and graph of responses Gy #' plot(fit, estimator = c("beta", "gamma", "Gy"), type = "heatmap") #' #' \dontrun{ #' ## Set up temporary work directory for saving a pdf figure #' # td <- tempdir() #' # oldwd <- getwd() #' # setwd(td) #' #' ## Produce authentic math formulas in the graph #' # plot(fit, estimator = c("beta", "gamma", "Gy"), type = "heatmap", fig.tex = TRUE) #' # system(paste(getOption("pdfviewer"), "ParamEstimator.pdf")) #' # setwd(oldwd) #' } #' #' @export BayesSUR <- function(data = NULL, Y, X, X_0 = NULL, covariancePrior = "HIW", gammaPrior = "hotspot", betaPrior = "independent", nIter = 10000, burnin = 5000, nChains = 2, outFilePath = "", gammaSampler = "bandit", gammaInit = "R", mrfG = NULL, standardize = TRUE, standardize.response = TRUE, maxThreads = 1, output_gamma = TRUE, output_beta = TRUE, output_Gy = TRUE, output_sigmaRho = TRUE, output_pi = TRUE, output_tail = TRUE, output_model_size = TRUE, output_model_visit = FALSE, output_CPO = FALSE, output_Y = TRUE, output_X = TRUE, hyperpar = list(), tmpFolder = "tmp/") { # Check the directory for the output files if (outFilePath == "") { stop("Please specify a directory to save all output files!") } outFilePathLength <- nchar(outFilePath) if (substr(outFilePath, outFilePathLength, outFilePathLength) != "/") { outFilePath <- paste(outFilePath, "/", sep = "") } if (!file.exists(outFilePath)) { dir.create(outFilePath) } # Create temporary directory tmpFolderLength <- nchar(tmpFolder) if (substr(tmpFolder, tmpFolderLength, tmpFolderLength) != "/") { tmpFolder <- paste(tmpFolder, "/", sep = "") } tmpFolder <- paste(outFilePath, tmpFolder, sep = "") if (!file.exists(tmpFolder)) { dir.create(tmpFolder) } ## Check the input: reasoning is that the user provides either # a data matrix or a data path-to-file # - in this case Y, X (and X_0) need to be provided as vectors of indexes # if the data matrix is not provided, the user will give 2/3 matrices for Y, X (and X_0) # - in which case we write those in order into a new joint file # everything else throws an error # check the formula cl <- match.call() # we'll check in reverse order, is data NULL? if (is.null(data)) { # Y,X (and if there X_0) need to be valid numeric matrices then # check Y and X have comfortable number of observations npY <- dim(Y) if ((!is.numeric(Y)) || is.null(npY)) { my_stop("If 'data' is NULL, Y should be a numeric matrix", tmpFolder) } npX <- dim(X) if ((!is.numeric(X)) || is.null(npX) || (npX[1] != npY[1])) { my_stop("If 'data' is NULL, X should be a numeric matrix and the same number of rows of Y", tmpFolder) } if (is.null(X_0)) { X_0 <- matrix(NA, nrow = npY[1], ncol = 0) } else { npX0 <- dim(X_0) if ((!is.numeric(X_0)) || is.null(npX0) || (npX0[1] != npY[1])) { my_stop("If 'data' is NULL and X_0 is provided, X_0 should be a numeric matrix and the same number of rows of Y", tmpFolder) } } # Standarize the data if (standardize) { X <- scale(X) X_0 <- scale(X_0) } if (standardize.response) Y <- scale(Y) # Write the three down in a single data file write.table(cbind(Y, X, X_0), paste(sep = "", tmpFolder, "data.txt"), row.names = FALSE, col.names = FALSE) data <- paste(sep = "", tmpFolder, "data.txt") blockLabels <- c(rep(0, ncol(Y)), rep(1, ncol(X)), rep(2, ncol(X_0))) # Write the data in a output file write.table(Y, paste(sep = "", outFilePath, "data_Y.txt"), row.names = FALSE, col.names = TRUE) write.table(X, paste(sep = "", outFilePath, "data_X.txt"), row.names = FALSE, col.names = TRUE) write.table(X_0, paste(sep = "", outFilePath, "data_X0.txt"), row.names = FALSE, col.names = TRUE) } else { # data is not null, so the user wants to use it to input the data # is the data given as matrix? ## If it's valid matrix, simply write it and re-assign the variable data to hold its path npData <- dim(data) if (is.numeric(data) || (!is.null(npData)) || (npData[2] >= 2)) { # Standarize the data if (standardize) { data[, X] <- scale(data[, X]) data[, X_0] <- scale(data[, X_0]) } if (standardize.response) data[, Y] <- scale(data[, Y]) # Write the Y and X data in a output file write.table(data[, Y], paste(sep = "", outFilePath, "data_Y.txt"), row.names = FALSE, col.names = TRUE) write.table(data[, X], paste(sep = "", outFilePath, "data_X.txt"), row.names = FALSE, col.names = TRUE) write.table(data[, X_0], paste(sep = "", outFilePath, "data_X0.txt"), row.names = FALSE, col.names = TRUE) write.table(data, paste(sep = "", tmpFolder, "data.txt"), row.names = FALSE, col.names = FALSE) data <- paste(sep = "", tmpFolder, "data.txt") } else { my_stop("Y should be NULL or a numeric matrix with 2 or more columns!") } # is the data given as a string? if (is.character(data) && length(data) == 1) { if (substr(data, 1, 1) == "~") { data <- path.expand(data) } } ## at this point data contains the path to a file that exists # try and read one line to check dimensions dataHeader <- read.table(data, header = FALSE, nrows = 1) nVariables <- ncol(dataHeader) ## Y, X (and X_0) should be some fixed variables that needs to be included in the model if (is.null(X_0)) { X_0 <- as.numeric(c()) } # be sure that they are vectors if (!(is.vector(Y, "numeric") && is.vector(X, "numeric") && is.vector(X_0, "numeric"))) { my_stop("When the 'data' argument is set, Y,X and X_0 need to be corresponding index vectors!", tmpFolder) } # check thay do not overlap if (length(c(intersect(Y, X), intersect(Y, X_0), intersect(X_0, X))) != 0) { my_stop("Y, X and X_0 need to be distinct index vectors!", tmpFolder) } # check if dimensions correspond -- higher dimensions gets an error if (length(c(Y, X, X_0)) > nVariables) { my_stop("When the 'data' argument is set, Y,X and X_0 need to be corresponding index vectors!", tmpFolder) } # equal dimensions are ok, but lower dimensions means some columns of the data will be disregarded ( set to -1 ) # We can now init the blockList blockLabels <- rep(NA, nVariables) blockLabels[Y] <- 0 blockLabels[X] <- 1 if (length(X_0) > 0) { blockLabels[X_0] <- 2 } blockLabels[is.na(blockLabels)] <- -1 } # cleanup file PATHS dataLength <- nchar(data) if (dataLength == 0) { my_stop("Please provide a correct path to a plain-text (.txt) file", tmpFolder) } # magicly strip '/' from the start and '.txt' from the end of the data file name dataString <- head(strsplit(tail(strsplit(data, split = c("/"))[[1]], 1), ".txt")[[1]], 1) ## Then init the structure graph # Consider that the indexes are written so that Y is 0 , X is 1 and (if there) X_0 is 2 if (length(X_0) > 0) { structureGraph <- structureGraph <- matrix(c(0, 0, 0, 1, 0, 0, 2, 0, 0), 3, 3, byrow = TRUE) } else { structureGraph <- structureGraph <- matrix(c(0, 0, 1, 0), 2, 2, byrow = TRUE) } ## Finally write blockLabels and structureGraph to a file write.table(blockLabels, paste(sep = "", tmpFolder, "blockLabels.txt"), row.names = FALSE, col.names = FALSE) blockList <- paste(sep = "", tmpFolder, "blockLabels.txt") write.table(structureGraph, paste(sep = "", tmpFolder, "structureGraph.txt"), row.names = FALSE, col.names = FALSE) structureGraph <- paste(sep = "", tmpFolder, "structureGraph.txt") # check how burnin was given if (burnin < 0) { my_stop("Burnin must be positive or 0", tmpFolder) } else { if (burnin > nIter) { my_stop("Burnin might not be greater than nIter", tmpFolder) } else { if (burnin < 1) { # given as a fraction burnin <- ceiling(nIter * burnin) # the zero case is taken into account here as well } } } # else assume is given as an absolute number ############################### ## prepare the print of hyperparameters corresponding the specified model hyperpar.all <- list(a_w = 2, b_w = 5, a_o = 2, b_o = sum(blockLabels == 1) - 2, a_pi = NA, b_pi = NA, nu = sum(blockLabels == 0) + 2, a_tau = 0.1, b_tau = 10, a_eta = 0.1, b_eta = 1, a_sigma = 1, b_sigma = 1, mrf_d = -3, mrf_e = 0.03) if (toupper(gammaPrior) %in% c("HOTSPOT", "HOTSPOTS", "HS")) { hyperpar.all$a_pi <- 2 hyperpar.all$b_pi <- 1 if (toupper(covariancePrior) %in% c("INDEPENDENT", "INDEP", "IG")) { hyperpar.all <- hyperpar.all[-c(7:11, 14:15)] } if (toupper(covariancePrior) %in% c("DENSE", "IW")) { hyperpar.all <- hyperpar.all[-c(10:11, 12:15)] } if (toupper(covariancePrior) %in% c("SPARSE", "HIW")) { hyperpar.all <- hyperpar.all[-c(12:15)] } } if (toupper(gammaPrior) %in% c("HIERARCHICAL", "H")) { hyperpar.all$a_pi <- 1 hyperpar.all$b_pi <- sum(blockLabels == 0) - 1 if (toupper(covariancePrior) %in% c("INDEPENDENT", "INDEP", "IG")) { hyperpar.all <- hyperpar.all[-c(3:6, 7:11, 14:15)] } if (toupper(covariancePrior) %in% c("DENSE", "IW")) { hyperpar.all <- hyperpar.all[-c(3:6, 10:11, 12:15)] } if (toupper(covariancePrior) %in% c("SPARSE", "HIW")) { hyperpar.all <- hyperpar.all[-c(3:6, 12:15)] } } if (toupper(gammaPrior) %in% c("MRF", "MARKOV RANDOM FIELD")) { if (is.null(mrfG)) { my_stop("Argument 'mrfG' was specified!", tmpFolder) } if (toupper(covariancePrior) %in% c("INDEPENDENT", "INDEP", "IG")) { hyperpar.all <- hyperpar.all[-c(3:6, 7:13)] } if (toupper(covariancePrior) %in% c("DENSE", "IW")) { hyperpar.all <- hyperpar.all[-c(3:6, 10:13)] } if (toupper(covariancePrior) %in% c("SPARSE", "HIW")) { hyperpar.all <- hyperpar.all[-c(3:6, 12:13)] } } if (toupper(betaPrior) == "REGROUP") { hyperpar.all$a_w0 <- hyperpar.all$a_w hyperpar.all$b_w0 <- hyperpar.all$b_w } if (length(hyperpar) > 0) { for (i in seq_along(hyperpar)) { if (names(hyperpar)[[i]] %in% names(hyperpar.all)) { hyperpar.all[[which(names(hyperpar.all) == names(hyperpar)[[i]])]] <- hyperpar[[i]] } if (!is.null(hyperpar$a_omega)) hyperpar.all$a_pi <- hyperpar$a_omega if (!is.null(hyperpar$b_omega)) hyperpar.all$b_pi <- hyperpar$b_omega } } # method to use if (toupper(covariancePrior) %in% c("SPARSE", "HIW")) { covariancePrior <- "HIW" } else if (toupper(covariancePrior) %in% c("DENSE", "IW")) { covariancePrior <- "IW" } else if (toupper(covariancePrior) %in% c("INDEPENDENT", "INDEP", "IG")) { covariancePrior <- "IG" } else { my_stop("Unknown covariancePrior argument: only sparse (HIW), dense(IW) or independent (IG) are available", tmpFolder) } # mrfG and gammaPrior if (gammaPrior == "") { if (is.null(mrfG)) { message("Using default prior for Gamma - hotspot prior\n") # mrfG="" gammaPrior <- "hotspot" } else { message("No value for gammaPrior was specified, but mrfG was given - choosing MRF prior\n") gammaPrior <- "MRF" } } else { if (toupper(gammaPrior) %in% c("HOTSPOT", "HOTSPOTS", "HS")) { gammaPrior <- "hotspot" } else if (toupper(gammaPrior) %in% c("MRF", "MARKOV RANDOM FIELD")) { gammaPrior <- "MRF" } else if (toupper(gammaPrior) %in% c("HIERARCHICAL", "H")) { gammaPrior <- "hierarchical" } else { my_stop("Unknown gammaPrior argument: only hotspot, MRF or hierarchical are available", tmpFolder) } } # if mrfG is not a string if (!(is.character(mrfG) && length(mrfG) == 1)) { # if it's a matrix if ((is.numeric(mrfG) || is.data.frame(mrfG)) && !is.null(dim(mrfG))) { if (ncol(mrfG) == 2) { mrfG <- cbind(mrfG, rep(1, nrow(mrfG))) } write.table(mrfG, paste(sep = "", outFilePath, "mrfG.txt"), row.names = FALSE, col.names = FALSE) mrfG <- paste(sep = "", outFilePath, "mrfG.txt") } else if (is.null(mrfG)) { # save a meaningless mrfG.txt file to pass the parameter to C++ mrfG <- matrix(c(0, 0, 0), ncol = 3) write.table(mrfG, paste(sep = "", outFilePath, "mrfG.txt"), row.names = FALSE, col.names = FALSE) mrfG <- paste(sep = "", outFilePath, "mrfG.txt") } else { my_stop("Unknown mrfG argument: check the help function for possibile values", tmpFolder) } } ## Set up the XML file for hyperparameters xml <- as_xml_document( list(hyperparameters = list( lapply(hyperpar, function(x) list(x)) # every element in the list should be a list )) ) hyperParFile <- paste(sep = "", tmpFolder, "hyperpar.xml") write_xml(xml, file = hyperParFile) ## Create the return object ret <- list(status = 1, input = list(), output = list()) class(ret) <- "BayesSUR" # Copy the inputs ret$input["nIter"] <- nIter ret$input["burnin"] <- burnin ret$input["nChains"] <- nChains ret$input["covariancePrior"] <- covariancePrior ret$input["gammaPrior"] <- gammaPrior ret$input["gammaSampler"] <- gammaSampler ret$input["gammaInit"] <- gammaInit ret$input["mrfG"] <- mrfG if (toupper(gammaPrior) %in% c("HIERARCHICAL", "H")) { names(hyperpar.all)[names(hyperpar.all) == "a_pi"] <- "a_omega" names(hyperpar.all)[names(hyperpar.all) == "b_pi"] <- "b_omega" } ret$input$hyperParameters <- hyperpar.all methodString <- switch(covariancePrior, "HIW" = "SSUR", "IW" = "dSUR", "IG" = "HRR" ) ret$call <- cl # Prepare path to outputs ret$output["outFilePath"] <- outFilePath ret$output["logP"] <- paste(sep = "", dataString, "_", methodString, "_logP_out.txt") if (output_gamma) { ret$output["gamma"] <- paste(sep = "", dataString, "_", methodString, "_gamma_out.txt") } if (gammaPrior %in% c("hierarchical", "hotspot") && output_pi) { ret$output["pi"] <- paste(sep = "", dataString, "_", methodString, "_pi_out.txt") } if (gammaPrior == "hotspot" && output_tail) { ret$output["tail"] <- paste(sep = "", dataString, "_", methodString, "_hotspot_tail_p_out.txt") } if (output_beta) { ret$output["beta"] <- paste(sep = "", dataString, "_", methodString, "_beta_out.txt") } if (covariancePrior == "HIW" && output_Gy) { ret$output["Gy"] <- paste(sep = "", dataString, "_", methodString, "_Gy_out.txt") ret$output["Gvisit"] <- paste(sep = "", dataString, "_", methodString, "_Gy_visit.txt") } if (covariancePrior %in% c("HIW", "IW") && output_sigmaRho) { ret$output["sigmaRho"] <- paste(sep = "", dataString, "_", methodString, "_sigmaRho_out.txt") } if (output_model_size) { ret$output["model_size"] <- paste(sep = "", dataString, "_", methodString, "_model_size_out.txt") } if (output_CPO) { ret$output["CPO"] <- paste(sep = "", dataString, "_", methodString, "_CPO_out.txt") ret$output["CPOsumy"] <- paste(sep = "", dataString, "_", methodString, "_CPOsumy_out.txt") ret$output["WAIC"] <- paste(sep = "", dataString, "_", methodString, "_WAIC_out.txt") } if (output_Y) { ret$output["Y"] <- paste(sep = "", "data_Y.txt") } if (output_X) { ret$output["X"] <- paste(sep = "", "data_X.txt") if (length(X_0) > 0) { ret$output["X0"] <- paste(sep = "", "data_X0.txt") } } # set.seed(seed) # betaPrior="independent" # set number of threads maxThreads <- min(maxThreads, detectCores()) ret$status <- BayesSUR_internal(data, mrfG, blockList, structureGraph, hyperParFile, outFilePath, nIter, burnin, nChains, covariancePrior, gammaPrior, gammaSampler, gammaInit, betaPrior, maxThreads, output_gamma, output_beta, output_Gy, output_sigmaRho, output_pi, output_tail, output_model_size, output_CPO, output_model_visit) ## save fitted object obj_BayesSUR <- list(status = ret$status, input = ret$input, output = ret$output, call = ret$call) save(obj_BayesSUR, file = paste(sep = "", outFilePath, "obj_BayesSUR.RData")) if (outFilePath != tmpFolder) { unlink(tmpFolder, recursive = TRUE) } return(ret) } my_stop <- function(msg, tmpFolder) { unlink(tmpFolder, recursive = TRUE) stop(msg) }
/scratch/gouwar.j/cran-all/cranData/BayesSUR/R/BayesSUR.R
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 #' @title BayesSUR_internal #' @description #' Run a SUR Bayesian sampler -- internal function #' @name BayesSUR_internal #' @param dataFile path to data file #' @param outFilePath path to where the output is to be written #' @param nIter number of iterations #' @param nChains number of parallel chains to run #' #' NOTE THAT THIS IS BASICALLY JUST A WRAPPER NULL BayesSUR_internal <- function(dataFile, mrfGFile, blockFile, structureGraphFile, hyperParFile, outFilePath, nIter = 10L, burnin = 0L, nChains = 1L, covariancePrior = "HIW", gammaPrior = "hotspot", gammaSampler = "bandit", gammaInit = "MLE", betaPrior = "independent", maxThreads = 1L, output_gamma = TRUE, output_beta = TRUE, output_Gy = TRUE, output_sigmaRho = TRUE, output_pi = TRUE, output_tail = TRUE, output_model_size = TRUE, output_CPO = TRUE, output_model_visit = FALSE) { .Call('_BayesSUR_BayesSUR_internal', PACKAGE = 'BayesSUR', dataFile, mrfGFile, blockFile, structureGraphFile, hyperParFile, outFilePath, nIter, burnin, nChains, covariancePrior, gammaPrior, gammaSampler, gammaInit, betaPrior, maxThreads, output_gamma, output_beta, output_Gy, output_sigmaRho, output_pi, output_tail, output_model_size, output_CPO, output_model_visit) } randU01 <- function() { .Call('_BayesSUR_randU01', PACKAGE = 'BayesSUR') } randLogU01 <- function() { .Call('_BayesSUR_randLogU01', PACKAGE = 'BayesSUR') } randIntUniform <- function(a, b) { .Call('_BayesSUR_randIntUniform', PACKAGE = 'BayesSUR', a, b) } randExponential <- function(lambda) { .Call('_BayesSUR_randExponential', PACKAGE = 'BayesSUR', lambda) } randVecExponential <- function(n, lambda) { .Call('_BayesSUR_randVecExponential', PACKAGE = 'BayesSUR', n, lambda) } randBinomial <- function(n, p) { .Call('_BayesSUR_randBinomial', PACKAGE = 'BayesSUR', n, p) } randMultinomial <- function(n, prob) { .Call('_BayesSUR_randMultinomial', PACKAGE = 'BayesSUR', n, prob) } randNormal <- function(m = 0., sigmaSquare = 1.) { .Call('_BayesSUR_randNormal', PACKAGE = 'BayesSUR', m, sigmaSquare) } randVecNormal <- function(n, m = 0., sigmaSquare = 1.) { .Call('_BayesSUR_randVecNormal', PACKAGE = 'BayesSUR', n, m, sigmaSquare) } randT <- function(nu) { .Call('_BayesSUR_randT', PACKAGE = 'BayesSUR', nu) } randVecT <- function(n, nu) { .Call('_BayesSUR_randVecT', PACKAGE = 'BayesSUR', n, nu) } randMvT <- function(nu, m, Sigma) { .Call('_BayesSUR_randMvT', PACKAGE = 'BayesSUR', nu, m, Sigma) } randGamma <- function(shape, scale) { .Call('_BayesSUR_randGamma', PACKAGE = 'BayesSUR', shape, scale) } randIGamma <- function(shape, scale) { .Call('_BayesSUR_randIGamma', PACKAGE = 'BayesSUR', shape, scale) } randWishart <- function(df, S) { .Call('_BayesSUR_randWishart', PACKAGE = 'BayesSUR', df, S) } randBeta <- function(a, b) { .Call('_BayesSUR_randBeta', PACKAGE = 'BayesSUR', a, b) } randBernoulli <- function(pi) { .Call('_BayesSUR_randBernoulli', PACKAGE = 'BayesSUR', pi) }
/scratch/gouwar.j/cran-all/cranData/BayesSUR/R/RcppExports.R
#' @title coef method for class \code{BayesSUR} #' @description #' Extract the posterior mean of the coefficients of a \code{BayesSUR} class object #' @name coef.BayesSUR #' #' @param object an object of class \code{BayesSUR} #' @param beta.type type of output beta. Default is \code{marginal}, giving #' marginal beta estimation. If \code{beta.type="conditional"}, it gives beta #' estimation conditional on gamma=1. #' @param Pmax If \code{Pmax=0.5} and \code{beta.type="conditional"}, it gives #' median probability model betas. Default is 0. #' @param ... other arguments #' #' @return Estimated coefficients are from an object of class \code{BayesSUR}. #' If the \code{BayesSUR} specified data standardization, the fitted values #' are base based on standardized data. #' #' @examples #' data("exampleQTL", package = "BayesSUR") #' hyperpar <- list(a_w = 2, b_w = 5) #' #' set.seed(9173) #' fit <- BayesSUR( #' Y = exampleEQTL[["blockList"]][[1]], #' X = exampleEQTL[["blockList"]][[2]], #' data = exampleEQTL[["data"]], outFilePath = tempdir(), #' nIter = 10, burnin = 0, nChains = 1, gammaPrior = "hotspot", #' hyperpar = hyperpar, tmpFolder = "tmp/" #' ) #' #' ## check prediction #' beta.hat <- coef(fit) #' #' @export coef.BayesSUR <- function(object, beta.type = "marginal", Pmax = 0, ...) { if (!(beta.type %in% c("marginal", "conditional"))) { stop("Please specify correct 'beta.type'!") } if (Pmax < 0 || Pmax > 1) { stop("Please specify a correct argument 'Pmax' in [0,1]!") } if ((Pmax > 0) && (beta.type == "marginal")) { stop("Pmax > 0 is valid only if the argument beta.type = 'conditional'!") } getEstimator(object, estimator = "beta", Pmax = Pmax, beta.type = beta.type, ...) }
/scratch/gouwar.j/cran-all/cranData/BayesSUR/R/coef.BayesSUR.R
# This script contains Roxygen code for the datasets used in BayesSUR #' @title targetGene #' @name targetGene #' @docType data NULL
/scratch/gouwar.j/cran-all/cranData/BayesSUR/R/datasets.R
#' @title expected log pointwise predictive density #' @description #' Measure the prediction accuracy by the elpd (expected log pointwise #' predictive density). The out-of-sample predictive fit can either be #' estimated by Bayesian leave-one-out cross-validation (LOO) or by widely #' applicable information criterion (WAIC) (Vehtari et al. 2017). #' @name elpd #' #' @param object an object of class \code{BayesSUR} #' @param method the name of the prediction accuracy index. Default is the #' \code{"LOO"} (Bayesian LOO estimate of out-of-sample predictive fit). The #' other index is the \code{"WAIC"} (widely applicable information criterion). #' For the HRR models, both "\code{LOO}" and "\code{WAIC}" are computed based #' on the multivate t-distribution of the posterior predictive rather than #' approximation of importance sampling. #' #' @return Return the predictiion accuracy measure from an object of class #' \code{BayesSUR}. It is elpd.loo if the argumnet \code{method="LOO"} and #' elpd.WAIC if \code{method="WAIC"}. #' #' @references Vehtari, A., Gelman, A., Gabry, J. (2017). \emph{Practical Bayesian model evaluation using leave-one-out cross-validation and WAIC.} Statistics and Computing, 27(5): 1413–1432. #' #' @examples #' data("exampleEQTL", package = "BayesSUR") #' hyperpar <- list(a_w = 2, b_w = 5) #' #' set.seed(9173) #' fit <- BayesSUR( #' Y = exampleEQTL[["blockList"]][[1]], #' X = exampleEQTL[["blockList"]][[2]], #' data = exampleEQTL[["data"]], outFilePath = tempdir(), #' nIter = 10, burnin = 0, nChains = 1, gammaPrior = "hotspot", #' hyperpar = hyperpar, tmpFolder = "tmp/", output_CPO = TRUE #' ) #' #' ## check output #' # print prediction accuracy elpd (expected log pointwise predictive density) #' # by the Bayesian LOO estimate of out-of-sample predictive fit #' elpd(fit, method = "LOO") #' #' @export elpd <- function(object, method = "LOO") { object$output[-1] <- paste(object$output$outFilePath, object$output[-1], sep = "") if (is.null(object$output$CPO)) { stop("Please specify the argument 'output_CPO = TRUE' in BayesSUR()!") } if (toupper(method) == "LOO") { elpd <- sum(log(read.table(object$output$CPO))) names(elpd) <- "elpd.loo" } else if (toupper(method) == "WAIC") { elpd <- sum(read.table(object$output$WAIC)) names(elpd) <- "elpd.waic" } else { stop("Please give the correct method name!") } return(elpd) }
/scratch/gouwar.j/cran-all/cranData/BayesSUR/R/elpd.R
#' @title Simulated data set to mimic a small expression quantitative trait loci (eQTL) example #' #' @description #' Simulated data set to mimic a small expression quantitative trait loci #' (eQTL) example, with p=150 single nucleotide polymorphisms (SNPs) as #' explanatory variables, s=10 gene expression features as response variables #' and data for n=100 observations. Loading the data will load the associated #' blockList object needed to fit the model with BayesSUR(). The R code for #' generating the simulated data is given in the Examples paragraph. #' #' #importFrom BDgraph rgwish #' #importFrom gRbase mcsMAT #' #importFrom scrime simulateSNPs #' #' @examples #' # Load the eQTL sample dataset #' data("exampleEQTL", package = "BayesSUR") #' str(exampleEQTL) #' #' \dontrun{ #' # =============== #' # The code below is to show how to generate the dataset "exampleEQTL.rda" above #' # =============== #' #' requireNamespace("BDgraph", quietly = TRUE) #' requireNamespace("gRbase", quietly = TRUE) #' requireNamespace("scrime", quietly = TRUE) #' #' ########################### Problem Dimensions #' n <- 100 #' p <- 150 #' s <- 10 #' #' ############################ Select a set of n x p (SNPs) covariates #' #' ## The synthetic data in the paper use a subset of the real SNPs as covariates, #' # but as the NFBC66 dataset is confidential we'll use scrime to sample similar data #' #' x <- scrime::simulateSNPs(c(n, 10), p, c(3, 2), prop.explain = c(0.9, 0.95))$data[1:n, ] #' x <- cbind(rep(1, n), x) #' #' #################################################################### #' #' graph_pattern <- 2 #' #' snr <- 25 #' #' corr_param <- 0.9 #' #' ### Create the underlying graph #' if (graph_pattern == 1) { #' ### 1) Random but full #' G <- matrix(1, s, s) #' Prime <- list(c(1:s)) #' Res <- Prime #' Sep <- list() #' } else if (graph_pattern == 2) { #' ### 2) Block Diagonal structure #' Prime <- list( #' c(1:floor(s * 2 / 3)), #' c((floor(s * 2 / 3) + 1):(ceiling(s * 4 / 5) - 1)), #' c(ceiling(s * 4 / 5):s) #' ) #' #' Res <- Prime #' Sep <- lapply(Res, function(x) which(x == -99)) #' #' G <- matrix(0, s, s) #' for (i in Prime) { #' G[i, i] <- 1 #' } #' } else if (graph_pattern == 3) { #' ### 3) Decomposable model #' Prime <- list( #' c(1:floor(s * 5 / 12), ceiling(s * 9 / 10):s), #' c(floor(s * 2 / 9):(ceiling(s * 2 / 3) - 1)), #' c(ceiling(s * 2 / 3):(ceiling(s * 4 / 5) - 1)), #' c(ceiling(s * 4 / 5):s) #' ) #' #' Sep <- list() #' H <- list() #' for (i in 2:length(Prime)) { #' H <- union(H, Prime[[i - 1]]) #' Sep[[i - 1]] <- intersect(H, Prime[[i]]) #' } #' #' Res <- list() #' Res[[1]] <- Prime[[1]] #' for (i in 2:length(Prime)) { #' Res[[i]] <- setdiff(Prime[[i]], Sep[[i - 1]]) #' } #' #' G <- matrix(0, s, s) #' for (i in Prime) { #' G[i, i] <- 1 #' } #' #' ## decomp check #' dimnames(G) <- list(1:s, 1:s) #' length(gRbase::mcsMAT(G - diag(s))) > 0 #' } else if (graph_pattern == 4) { #' ### 4) Non-decomposable model #' nblocks <- 5 #' nElemPerBlock <- c( #' floor(s / 4), floor(s / 2) - 1 - floor(s / 4), #' ceiling(s * 2 / 3) - 1 - floor(s / 2), 7 #' ) #' nElemPerBlock <- c(nElemPerBlock, s - sum(nElemPerBlock)) #' res <- 1:s #' blockIdx <- list() #' for (i in 1:nblocks) { #' # blockIdx[[i]] = sample(res,nElemPerBlock[i]) #' blockIdx[[i]] <- res[1:nElemPerBlock[i]] #' res <- setdiff(res, blockIdx[[i]]) #' } #' #' G <- matrix(0, s, s) #' ## add diagonal #' for (i in 1:nblocks) { #' G[blockIdx[[i]], blockIdx[[i]]] <- 1 #' } #' ## add cycle #' G[blockIdx[[1]], blockIdx[[2]]] <- 1 #' G[blockIdx[[2]], blockIdx[[1]]] <- 1 #' G[blockIdx[[1]], blockIdx[[5]]] <- 1 #' G[blockIdx[[5]], blockIdx[[1]]] <- 1 #' G[blockIdx[[2]], blockIdx[[3]]] <- 1 #' G[blockIdx[[3]], blockIdx[[2]]] <- 1 #' G[blockIdx[[3]], blockIdx[[5]]] <- 1 #' G[blockIdx[[5]], blockIdx[[3]]] <- 1 #' #' ## decomp check #' dimnames(G) <- list(1:s, 1:s) #' length(gRbase::mcsMAT(G - diag(s))) > 0 #' #' # Prime = blockIdx #' Res <- blockIdx ## this is not correct but not used in the non-decomp case #' } #' #' ### Gamma Pattern #' gamma <- matrix(0, p + 1, s) #' gamma[1, ] <- 1 #' #' #' ### 2) Extra Patterns #' #' ## outcomes (correlated in the decomp model) have some predictors in common #' gamma[6:10, 6:9] <- 1 #' #' ## outcomes (correlated in the decomp model) have some predictors in common #' # gamma[16:20,14:15] = 1 #' #' ## outcomes (sort-of correlated [pair-wise] in the decomp model) #' # have predictors in common 6:15 #' gamma[26:30, 4:8] <- 1 #' #' ## outcomes (NOT correlated in the decomp model) have predictors in common 16:17 #' gamma[36:40, c(3:5, 9:10)] <- 1 #' #' ## these predictors are associated with ALL the outcomes #' gamma[46:50, ] <- 1 #' #' combn11 <- combn(rep((6:9 - 1) * p, each = length(6:10 - 1)) + rep(6:10 - 1, #' times = length(6:9)), 2) #' combn31 <- combn(rep((4:8 - 1) * p, each = length(26:30 - 1)) + rep(26:30 - 1, #' times = length(4:8)), 2) #' combn32 <- combn(rep((4:8 - 1) * p, each = length(46:50 - 1)) + rep(46:50 - 1, #' times = length(4:8)), 2) #' combn41 <- combn(rep((3:5 - 1) * p, each = length(36:40 - 1)) + rep(36:40 - 1, #' times = length(3:5)), 2) #' combn42 <- combn(rep((3:5 - 1) * p, each = length(46:50 - 1)) + rep(46:50 - 1, #' times = length(3:5)), 2) #' combn51 <- combn(rep((9:10 - 1) * p, each = length(36:40 - 1)) + rep(36:40 - 1, #' times = length(9:10)), 2) #' combn52 <- combn(rep((9:10 - 1) * p, each = length(46:50 - 1)) + rep(46:50 - 1, #' times = length(9:10)), 2) #' #' Gmrf <- rbind(t(combn11), t(combn31), t(combn32), t(combn41), t(combn42), t(combn51), t(combn52)) #' #' ## get for every correlated bunch in the decomposable model, #' #' if (graph_pattern < 4) { #' # a different set of predictors #' for (i in 1:length(Prime)) { #' gamma[6:10 + (i + 6) * 10, Prime[[i]]] <- 1 #' } ## for each Prime component #' #' ## for every Residual instead #' for (i in 1:length(Res)) { #' gamma[6:10 + (i + 10) * 10, Res[[i]]] <- 1 #' } #' } else { #' for (i in 1:length(Prime)) { #' gamma[6:10 + (i + 4) * 10, Prime[[i]]] <- 1 #' } ## for each Prime component #' #' ## for every Residual instead #' for (i in 1:length(Res)) { #' gamma[6:10 + (i + 9) * 10, Res[[i]]] <- 1 #' } #' } #' #' #### Sample the betas #' sd_b <- 1 #' b <- matrix(rnorm((p + 1) * s, 0, sd_b), p + 1, s) #' #' xb <- matrix(NA, n, s) #' #' for (i in 1:s) { #' if (sum(gamma[, i]) > 1) { #' xb[, i] <- x[, gamma[, i] == 1] %*% b[gamma[, i] == 1, i] #' } else { #' xb[, i] <- rep(1, n) * b[1, i] #' } #' } #' #' ## Sample the variance #' v_r <- mean(diag(var(xb))) / snr #' #' nu <- s + 1 #' #' M <- matrix(corr_param, s, s) #' diag(M) <- rep(1, s) #' #' P <- BDgraph::rgwish(n = 1, adj = G, b = 3, D = v_r * M) #' #' var <- solve(P) #' #' factor <- 10 #' factor_min <- 0.01 #' factor_max <- 1000 #' count <- 0 #' maxit <- 10000 #' #' factor_prev <- 1 #' #' repeat{ #' var <- var / factor * factor_prev #' #' ### Sample the errors and the Ys #' cVar <- chol(as.matrix(var)) #' # err = matrix(rnorm(n*s),n,s) %*% cVar #' err <- matrix(rnorm(n * s, sd = 0.5), n, s) %*% cVar #' y <- xb + err #' #' ## Reparametrisation ( assuming PEO is 1:s ) #' cVar <- t(cVar) # make it lower-tri #' S <- diag(diag(cVar)) #' sigma <- S * S #' L <- cVar %*% solve(S) #' rho <- diag(s) - solve(L) #' #' ### S/N Ratio #' emp_snr <- mean(diag(var(xb) %*% solve(sigma))) #' emp_g_snr <- mean(diag(var((err) %*% t(rho)) %*% solve(sigma))) #' #' ############## #' #' if (abs(emp_snr - snr) < (snr / 10) | count > maxit) { #' break #' } else { #' if (emp_snr < snr) { # increase factor #' factor_min <- factor #' } else { # decrease factor #' factor_max <- factor #' } #' factor_prev <- factor #' factor <- (factor_min + factor_max) / 2 #' } #' count <- count + 1 #' } #' #' ################# #' colnames(y) <- paste("GEX", 1:ncol(y), sep = "") #' colnames(G) <- colnames(y) #' Gy <- G #' gamma <- gamma[-1, ] #' mrfG <- Gmrf[!duplicated(Gmrf), ] #' data <- cbind(y, x[, -1]) # leave out the intercept because is coded inside already #' #' exampleEQTL <- list(data = data, blockList = list(1:s, s + 1:p)) #' #' ## Write data file to the user's directory by save() #' } #' "exampleEQTL"
/scratch/gouwar.j/cran-all/cranData/BayesSUR/R/exampleEQTL.R
#' @title Preprocessed data set to mimic a small pharmacogenomic example #' #' @description #' Preprocessed data set to mimic a small pharmacogenetic example from the #' Genomics of Drug Sensitivity in Cancer (GDSC) database, with p=850 gene #' features as explanatory variables, s=7 drugs sensitivity data as response #' variables and data for n=498 cell lines. Gene features include p1=343 gene #' expression features (GEX), p2=426 by copy number variations (CNV) and p3=68 #' mutated genes (MUT). Loading the data will load the associated blockList #' (and mrfG) objects needed to fit the model with BayesSUR(). The R code for #' generating the simulated data is given in the Examples paragraph. #' #' #importFrom plyr mapvalues #' #importFrom data.table like #' #' @examples #' # Load the GDSC sample dataset #' data("exampleGDSC", package = "BayesSUR") #' str(exampleGDSC) #' #' \dontrun{ #' # =============== #' # This code below is to do preprocessing of GDSC data and obtain the complete dataset #' # "exampleGDSC.rda" above. The user needs load the datasets from #' # https://www.cancerrxgene.org release 5. #' # But downloading and transforming the three used datasets below to *.csv files first. #' # =============== #' #' requireNamespace("plyr", quietly = TRUE) #' requireNamespace("data.table", quietly = TRUE) #' #' #' features <- data.frame(read.csv("/gdsc_en_input_w5.csv", head = T)) #' names.fea <- strsplit(rownames(features), "") #' features <- t(features) #' p <- c(13321, 13747 - 13321, 13818 - 13747) #' Cell.Line <- rownames(features) #' features <- data.frame(Cell.Line, features) #' #' ic50_00 <- data.frame(read.csv("gdsc_drug_sensitivity_fitted_data_w5.csv", head = T)) #' ic50_0 <- ic50_00[, c(1, 4, 7)] #' drug.id <- data.frame(read.csv("gdsc_tissue_output_w5.csv", head = T))[, c(1, 3)] #' drug.id2 <- drug.id[!duplicated(drug.id$drug.id), ] #' # delete drug.id=1066 since ID1066 and ID156 both correspond drug AZD6482, #' # and no ID1066 in the "suppl.Data1" by Garnett et al. (2012) #' drug.id2 <- drug.id2[drug.id2$drug.id != 1066, ] #' drug.id2$drug.name <- as.character(drug.id2$drug.name) #' drug.id2$drug.name <- substr(drug.id2$drug.name, 1, nchar(drug.id2$drug.name) - 6) #' drug.id2$drug.name <- gsub(" ", "-", drug.id2$drug.name) #' #' ic50 <- ic50_0 #' # mapping the drug_id to drug names in drug sensitivity data set #' ic50$drug_id <- plyr::mapvalues(ic50$drug_id, from = drug.id2[, 2], to = drug.id2[, 1]) #' colnames(ic50) <- c("Cell.Line", "compound", "IC50") #' #' # transform drug sensitivity overall cell lines to a data matrix #' y0 <- reshape(ic50, v.names = "IC50", timevar = "compound", #' idvar = "Cell.Line", direction = "wide") #' y0$Cell.Line <- gsub("-", ".", y0$Cell.Line) #' #' # =============== #' # select nonmissing pharmacological data #' # =============== #' y00 <- y0 #' m0 <- dim(y0)[2] - 1 #' eps <- 0.05 #' # r1.na is better to be not smaller than r2.na #' r1.na <- 0.3 #' r2.na <- 0.2 #' k <- 1 #' while (sum(is.na(y0[, 2:(1 + m0)])) > 0) { #' r1.na <- r1.na - eps / k #' r2.na <- r1.na - eps / k #' k <- k + 1 #' ## select drugs with <30% (decreasing with k) missing data overall cell lines #' na.y <- apply(y0[, 2:(1 + m0)], 2, function(xx) sum(is.na(xx)) / length(xx)) #' while (sum(na.y < r1.na) < m0) { #' y0 <- y0[, -c(1 + which(na.y >= r1.na))] #' m0 <- sum(na.y < r1.na) #' na.y <- apply(y0[, 2:(1 + m0)], 2, function(xx) sum(is.na(xx)) / length(xx)) #' } #' #' ## select cell lines with treatment of at least 80% (increasing with k) drugs #' na.y0 <- apply(y0[, 2:(1 + m0)], 1, function(xx) sum(is.na(xx)) / length(xx)) #' while (sum(na.y0 < r2.na) < (dim(y0)[1])) { #' y0 <- y0[na.y0 < r2.na, ] #' na.y0 <- apply(y0[, 2:(1 + m0)], 1, function(xx) sum(is.na(xx)) / length(xx)) #' } #' num.na <- sum(is.na(y0[, 2:(1 + m0)])) #' message("#{NA}=", num.na, "\n", "r1.na =", r1.na, ", r2.na =", r2.na, "\n") #' } #' #' # =============== #' # combine drug sensitivity, tissues and molecular features #' # =============== #' yx <- merge(y0, features, by = "Cell.Line") #' names.cell.line <- yx$Cell.Line #' names.drug <- colnames(yx)[2:(dim(y0)[2])] #' names.drug <- substr(names.drug, 6, nchar(names.drug)) #' # numbers of gene expression features, copy number festures and muatation features #' p <- c(13321, 13747 - 13321, 13818 - 13747) #' num.nonpen <- 13 #' yx <- data.matrix(yx[, -1]) #' y <- yx[, 1:(dim(y0)[2] - 1)] #' x <- cbind(yx[, dim(y0)[2] - 1 + sum(p) + 1:num.nonpen], yx[, dim(y0)[2] - 1 + 1:sum(p)]) #' #' # delete genes with only one mutated cell line #' x <- x[, #' -c(num.nonpen + p[1] + p[2] + which(colSums(x[, num.nonpen + p[1] + p[2] + 1:p[3]]) <= 1))] #' p[3] <- ncol(x) - num.nonpen - p[1] - p[2] #' #' GDSC <- list( #' y = y, x = x, p = p, num.nonpen = num.nonpen, names.cell.line = names.cell.line, #' names.drug = names.drug #' ) #' #' #' ## ================ #' ## ================ #' ## select a small set of drugs #' ## ================ #' ## ================ #' #' name_drugs <- c( #' "Methotrexate", "RDEA119", "PD-0325901", "CI-1040", "AZD6244", "Nilotinib", #' "Axitinib" #' ) #' #' # extract the drugs' pharmacological profiling and tissue dummy #' YX0 <- cbind(GDSC$y[, colnames(GDSC$y) %in% paste("IC50.", name_drugs, sep = "")] #' [, c(1, 3, 6, 4, 7, 2, 5)], GDSC$x[, 1:GDSC$num.nonpen]) #' colnames(YX0) <- c(name_drugs, colnames(GDSC$x)[1:GDSC$num.nonpen]) #' # extract the genetic information of CNV & MUT #' X23 <- GDSC$x[, GDSC$num.nonpen + GDSC$p[1] + 1:(p[2] + p[3])] #' colnames(X23)[1:p[2]] <- paste(substr( #' colnames(X23)[1:p[2]], 1, #' nchar(colnames(X23)[1:p[2]]) - 3 #' ), ".CNV", sep = "") #' #' # locate all genes with CNV or MUT information #' name_genes_duplicate <- c( #' substr(colnames(X23)[1:p[2]], 1, nchar(colnames(X23)[1:p[2]]) - 4), #' substr(colnames(X23)[p[2] + 1:p[3]], 1, nchar(colnames(X23)[p[2] + 1:p[3]]) - 4) #' ) #' name_genes <- name_genes_duplicate[!duplicated(name_genes_duplicate)] #' #' # select the GEX which have the common genes with CNV or MUT #' X1 <- #' GDSC$x[, GDSC$num.nonpen + which(colnames(GDSC$x)[GDSC$num.nonpen + 1:p[1]] %in% name_genes)] #' #' p[1] <- ncol(X1) #' X1 <- log(X1) #' #' # summary the data information #' exampleGDSC <- list(data = cbind(YX0, X1, X23)) #' exampleGDSC$blockList <- list( #' 1:length(name_drugs), length(name_drugs) + 1:GDSC$num.nonpen, #' ncol(YX0) + 1:sum(p) #' ) #' #' # ======================== #' # construct the G matrix: edge potentials in the MRF prior #' # ======================== #' #' # edges between drugs: Group1 ("RDEA119","17-AAG","PD-0325901","CI-1040" and "AZD6244") #' # indexed as (2:5) #' # http://software.broadinstitute.org/gsea/msigdb/cards/KEGG_MAPK_SIGNALING_PATHWAY #' pathway_genes <- read.table("MAPK_pathway.txt")[[1]] #' Idx_Pathway1 <- which(c(colnames(X1), name_genes_duplicate) %in% pathway_genes) #' Gmrf_Group1Pathway1 <- t(combn(rep(Idx_Pathway1, each = length(2:5)) + #' rep((2:5 - 1) * sum(p), times = length(Idx_Pathway1)), 2)) #' #' # edges between drugs: Group2 ("Nilotinib","Axitinib") indexed as (6:7) #' # delete gene ABL2 #' Idx_Pathway2 <- which(c(colnames(X1), name_genes_duplicate) %like% "BCR" | #' c(colnames(X1), name_genes_duplicate) %like% "ABL")[-c(3, 5)] #' Gmrf_Group2Pathway2 <- t(combn(rep(Idx_Pathway2, each = length(6:7)) + #' rep((6:7 - 1) * sum(p), times = length(Idx_Pathway2)), 2)) #' #' # edges between the common gene in different data sources #' Gmrf_CommonGene <- NULL #' list_CommonGene <- list(0) #' k <- 1 #' for (i in 1:length(name_genes)) { #' Idx_CommonGene <- which(c(colnames(X1), name_genes_duplicate) == name_genes[i]) #' if (length(Idx_CommonGene) > 1) { #' Gmrf_CommonGene <- rbind(Gmrf_CommonGene, #' t(combn(rep(Idx_CommonGene, each = length(name_drugs)) #' + rep((1:length(name_drugs) - 1) * sum(p), times = length(Idx_CommonGene)), 2))) #' k <- k + 1 #' } #' } #' Gmrf_duplicate <- rbind(Gmrf_Group1Pathway1, Gmrf_Group2Pathway2, Gmrf_CommonGene) #' Gmrf <- Gmrf_duplicate[!duplicated(Gmrf_duplicate), ] #' exampleGDSC$mrfG <- Gmrf #' #' # create the target gene names of the two groups of drugs #' targetGenes1 <- matrix(Idx_Pathway1, nrow = 1) #' colnames(targetGenes1) <- colnames(exampleGDSC$data)[seq_along(targetGene$group1)] #' targetGenes2 <- matrix(Idx_Pathway2, nrow = 1) #' colnames(targetGenes2) <- colnames(exampleGDSC$data)[seq_along(targetGene$group2)] #' #' targetGene <- list(group1 = targetGenes1, group2 = targetGenes2) #' #' ## Write data file exampleGDSC.rda to the user's directory by save() #' } #' "exampleGDSC"
/scratch/gouwar.j/cran-all/cranData/BayesSUR/R/exampleGDSC.R
#' @title get fitted responses #' @description #' Return the fitted response values that correspond to the posterior mean #' estimates from a \code{BayesSUR} class object. #' @name fitted.BayesSUR #' #' @param object an object of class \code{BayesSUR} #' @param beta.type type of estimated beta for the fitted model. Default is #' \code{marginal}, giving marginal beta estimation. If #' \code{beta.type="conditional"}, it gives beta estimation conditional #' on gamma=1 #' @param Pmax valid if \code{beta.type="conditional"}. If #' \code{beta.type="conditional"} and \code{Pmax=0.5}, it gives median #' probability model betas. Default is 0 #' @param ... other arguments #' #' @return Fitted values extracted from an object of class \code{BayesSUR}. If #' the \code{BayesSUR} specified data standardization, the fitted values are #' base based on standardized data. #' #' @examples #' data("exampleEQTL", package = "BayesSUR") #' hyperpar <- list(a_w = 2, b_w = 5) #' #' set.seed(9173) #' fit <- BayesSUR( #' Y = exampleEQTL[["blockList"]][[1]], #' X = exampleEQTL[["blockList"]][[2]], #' data = exampleEQTL[["data"]], outFilePath = tempdir(), #' nIter = 10, burnin = 0, nChains = 1, gammaPrior = "hotspot", #' hyperpar = hyperpar, tmpFolder = "tmp/" #' ) #' #' ## check fitted values #' fitted.val <- fitted(fit) #' #' @export fitted.BayesSUR <- function(object, Pmax = 0, beta.type = "marginal", ...) { if (Pmax < 0 || Pmax > 1) { stop("Please specify correct argument 'Pmax' in [0,1]!") } if ((Pmax > 0) && (beta.type == "marginal")) { stop("Pmax > 0 is valid only if the argument beta.type = 'conditional'!") } beta_hat <- getEstimator(object, estimator = "beta", Pmax = Pmax, beta.type = beta.type, ...) object$output[-1] <- paste(object$output$outFilePath, object$output[-1], sep = "") X <- as.matrix(read.table(object$output$X, header = TRUE)) if ("X0" %in% names(object$output)) { X0 <- as.matrix(read.table(object$output$X0)) } else { X0 <- NULL } y.pred <- cbind(X0, X) %*% beta_hat return(y.pred) }
/scratch/gouwar.j/cran-all/cranData/BayesSUR/R/fitted.BayesSUR.R
#' @title extract the posterior mean of parameters #' @description #' Extract the posterior mean of the parameters of a \code{BayesSUR} class object. #' @name getEstimator #' #' @param object an object of class \code{BayesSUR} #' @param estimator the name of one estimator. Default is the latent indicator #' estimator "\code{gamma}". Other options "\code{beta}", "\code{Gy}", #' "\code{CPO}" and "\code{logP}" #' correspond the marginal (conditional) coefficient matrix if #' \code{beta.type="marginal"}(\code{"conditional"}), response graph and #' conditional predictive ordinate (CPO) respectively #' @param Pmax threshold that truncate the estimator "\code{gamma}" or #' "\code{Gy}". Default is \code{0}. If \code{Pmax=0.5} and #' \code{beta.type="conditional"}, it gives median probability model betas #' @param beta.type the type of output beta. Default is \code{marginal}, giving #' marginal beta estimation. If \code{beta.type="conditional"}, it gives beta #' estimation conditional on gamma=1 #' #' @return Return the estimator from an object of class \code{BayesSUR}. It is #' a matrix if the length of argument \code{marginal} is greater than 1. #' Otherwise, it is a list #' #' @examples #' data("exampleEQTL", package = "BayesSUR") #' hyperpar <- list(a_w = 2, b_w = 5) #' #' set.seed(9173) #' fit <- BayesSUR( #' Y = exampleEQTL[["blockList"]][[1]], #' X = exampleEQTL[["blockList"]][[2]], #' data = exampleEQTL[["data"]], outFilePath = tempdir(), #' nIter = 10, burnin = 0, nChains = 1, gammaPrior = "hotspot", #' hyperpar = hyperpar, tmpFolder = "tmp/" #' ) #' #' ## check output #' # extract the posterior mean of the coefficients matrix #' beta_hat <- getEstimator(fit, estimator = "beta") #' #' @export getEstimator <- function(object, estimator = "gamma", Pmax = 0, beta.type = "marginal") { object$output[-1] <- paste(object$output$outFilePath, object$output[-1], sep = "") if (sum(!estimator %in% c("gamma", "beta", "Gy", "CPO", "logP")) > 0) { stop("Please specify correct 'estimator'!") } else { ret <- rep(list(NULL), length(estimator)) names(ret) <- estimator } if (Pmax < 0 || Pmax > 1) { stop("Please specify correct argument 'Pmax' in [0,1]!") } if ("gamma" %in% estimator) { ret$gamma <- as.matrix(read.table(object$output$gamma)) if (Pmax > 0) { ret$gamma[ret$gamma <= Pmax] <- 0 } rownames(ret$gamma) <- colnames(read.table(object$output$X, header = TRUE)) colnames(ret$gamma) <- colnames(read.table(object$output$Y, header = TRUE)) } if ("beta" %in% estimator) { ret$beta <- as.matrix(read.table(object$output$beta)) if (sum(beta.type %in% c("marginal", "conditional")) > 0) { if (beta.type == "conditional") { gammas <- as.matrix(read.table(object$output$gamma)) if ("X0" %in% names(object$output)) { X0 <- as.matrix(read.table(object$output$X0)) ret$beta[-seq_len(ncol(X0)), ] <- (gammas >= Pmax) * ret$beta[-seq_len(ncol(X0)), ] / gammas } else { ret$beta <- (gammas >= Pmax) * ret$beta / gammas } ret$beta[is.na(ret$beta)] <- 0 } } else { stop("Please specify correct beta.type!") } colnames(ret$beta) <- colnames(read.table(object$output$Y, header = TRUE)) if ("X0" %in% names(object$output)) { rownames(ret$beta) <- c(colnames(read.table(object$output$X0, header = TRUE)), colnames(read.table(object$output$X, header = TRUE))) } else { rownames(ret$beta) <- colnames(read.table(object$output$X, header = TRUE)) } } if ("Gy" %in% estimator) { covariancePrior <- object$input$covariancePrior if (covariancePrior == "HIW") { ret$Gy <- as.matrix(read.table(object$output$Gy)) } else { stop("Gy is only estimated with hyper-inverse Wishart prior for the covariance matrix of responses!") } if (Pmax > 0) { ret$Gy[ret$Gy <= Pmax] <- 0 } rownames(ret$Gy) <- colnames(ret$Gy) <- names(read.table(object$output$Y, header = TRUE)) } if ("CPO" %in% estimator) { if (is.null(object$output$CPO)) { stop("Please specify argument output_CPO in BayesSUR()!") } ret$CPO <- as.matrix(read.table(object$output$CPO)) rownames(ret$CPO) <- rownames(as.matrix(read.table(object$output$Y, header = TRUE))) colnames(ret$CPO) <- colnames(as.matrix(read.table(object$output$Y, header = TRUE))) } if ("logP" %in% estimator) { ret$logP <- t(as.matrix(read.table(object$output$logP))) } if (length(estimator) > 1) { return(ret) } else { return(ret[[1]]) } }
/scratch/gouwar.j/cran-all/cranData/BayesSUR/R/getEstimator.R
#' @title create a selection of plots #' @description #' plot method for class \code{BayesSUR}. This is the main plot function to be #' called by the user. This function calls one or several of the following #' functions: \code{plotEstimator()}, \code{plotGraph()}, \code{plotMCMCdiag()}, #' \code{plotManhattan()}, \code{plotNetwork()}, \code{plotCPO()}. #' @importFrom grDevices dev.hold dev.flush devAskNewPage #' @name plot.BayesSUR #' #' @param x an object of class \code{BayesSUR} #' @param estimator It is in \code{c(NULL, 'beta', 'gamma', 'Gy', 'logP', 'CPO')} #' and works by combining with argument \code{type}. #' \itemize{ #' \item If \code{estimator} is in \code{c("beta", "gamma", "Gy")} and #' argument \code{type="heatmap"}, it prints heatmaps of the specified #' estimator in \code{estimator} by a call to to function #' \code{plotEstimator()} for more other arguments. #' \item If \code{estimator="Gy"} and argument \code{type="graph"}, it prints #' a structure graph of \code{"Gy"} by a call to function \code{plotGraph()} #' for more other arguments. #' \item If \code{estimator=c("gamma", "Gy")} and argument #' \code{type="network"}, it prints the estimated network between the #' response variables and predictors with nonzero coefficients by a call to #' function \code{plotMCMCdiag()} for more other arguments. #' \item If \code{estimator=NULL} (default) and \code{type=NULL} (default), #' it interactively prints the plots of estimators (i.e., beta, gamma #' and (or) Gy), response graph Gy, network, Manhattan and MCMC diagnostics. #' } #' @param type It is one of \code{NULL}, \code{"heatmap"}, \code{"graph"}, #' \code{"network"}, \code{"Manhattan"} and \code{"diagnostics"}, and works by #' combining with argument \code{estimator}. #' \itemize{ #' \item If \code{type="Manhattan"} and argument \code{estimator="gamma"}, #' it prints Manhattan-like plots for marginal posterior inclusion #' probabilities (mPIP) and numbers of associated response variables for #' individual predictors by a call to function \code{plotManhattan()} for #' more other arguments. #' \item If \code{type="diagnostics"} and argument \code{estimator="logP"} #' it shows trace plots and diagnostic density plots of a fitted model by a #' call to function \code{plotMCMCdiag()} for more other arguments. #' \item If \code{type="diagnostics"} and argument \code{estimator="CPO"}, #' it shows the conditional predictive ordinate (CPO) for each individual of #' a fitted model by a call to function \code{plotCPO()} for more other arguments. #' } #' @param ... other arguments, see functions \code{plotEstimator()}, #' \code{plotGraph()}, \code{plotNetwork()}, \code{plotManhattan()}, #' \code{plotMCMCdiag()} or \code{plotCPO()} #' #' @examples #' data("exampleEQTL", package = "BayesSUR") #' hyperpar <- list(a_w = 2, b_w = 5) #' #' set.seed(9173) #' fit <- BayesSUR( #' Y = exampleEQTL[["blockList"]][[1]], #' X = exampleEQTL[["blockList"]][[2]], #' data = exampleEQTL[["data"]], outFilePath = tempdir(), #' nIter = 2, burnin = 0, nChains = 1, gammaPrior = "hotspot", #' hyperpar = hyperpar, tmpFolder = "tmp/" #' ) #' #' ## check output #' \dontrun{ #' ## Show the interactive plots. Note that it needs at least 2000*(nbloc+1) iterations #' ## for the diagnostic plots where nbloc=3 by default #' # plot(fit) #' } #' #' ## plot heatmaps of the estimated beta, gamma and Gy #' plot(fit, estimator = c("beta", "gamma", "Gy"), type = "heatmap") #' #' ## plot estimated graph of responses Gy #' plot(fit, estimator = "Gy", type = "graph") #' #' ## plot network between response variables and associated predictors #' plot(fit, estimator = c("gamma", "Gy"), type = "network") #' #' ## print Manhattan-like plots #' plot(fit, estimator = "gamma", type = "Manhattan") #' #' ## print MCMC diagnostic plots #' #plot(fit, estimator = "logP", type = "diagnostics") #' #' @export plot.BayesSUR <- function(x, estimator = NULL, type = NULL, ...) { if (!inherits(x, "BayesSUR")) { stop("Use only with \"BayesSUR\" objects") } if (!is.null(estimator)) { if (sum(!(estimator %in% c("beta", "gamma", "Gy", "logP", "CPO")))) { stop("'estimator' should be in c(NULL, 'beta', 'gamma', 'Gy', 'logP', 'CPO')!") } } else { if (!is.null(type)) { stop("If 'estimator = NULL', 'type' has to be 'NULL'!") } } if (!is.null(type)) { if (!(type %in% c("heatmap", "graph", "Manhattan", "network", "diagnostics"))) { stop("Please specify correct type!") } if (!(((sum(estimator %in% c("beta", "gamma", "Gy")) > 0) && (type == "heatmap")) || ((length(estimator) == 1) && (estimator[1] == "Gy") && (type == "graph")) || ((length(estimator) == 2) && (sum(estimator %in% c("gamma", "Gy")) == 2) && (type == "network")) || ((length(estimator) == 1) && (estimator[1] == "gamma") && (type == "Manhattan")) || ((length(estimator) == 1) && (estimator[1] == "logP") && (type == "diagnostics")) || ((length(estimator) == 1) && (estimator[1] == "CPO") && (type == "diagnostics"))) ) { stop("Please specify correct argument!") } ## refer to function plotEstimator() if ((sum(estimator %in% c("beta", "gamma", "Gy")) > 0) && (type == "heatmap")) { plotEstimator(x, estimator, ...) } ## refer to function plotGraph() if ((length(estimator) == 1) && (estimator[1] == "Gy") && (type == "graph")) { plotGraph(x, ...) } ## refer to function plotNetwork() if ((length(estimator) == 2) && (sum(estimator %in% c("gamma", "Gy")) == 2) && (type == "network")) { plotNetwork(x, ...) } ## refer to function plotManhattan() if ((length(estimator) == 1) && (estimator[1] == "gamma") && (type == "Manhattan")) { plotManhattan(x, ...) } ## refer to function plotMCMCdiag() if ((length(estimator) == 1) && (estimator[1] == "logP") && (type == "diagnostics")) { plotMCMCdiag(x, ...) } ## refer to function plotCPO() if ((length(estimator) == 1) && (estimator[1] == "CPO") && (type == "diagnostics")) { plotCPO(x, ...) } } else { if (!is.null(estimator)) { stop("If 'type = NULL', 'estimator' has to be 'NULL'!") } ## print plots interactively if (is.null(estimator[1])) { show <- rep(FALSE, 5) show[1:4] <- TRUE if (x$input$covariancePrior == "IG") { show[2:3] <- FALSE } devAskNewPage(TRUE) if (show[1L]) { dev.hold() plotEstimator(x, estimator = c("beta", "gamma", "Gy"), header = "\nEstimators", ...) dev.flush() } if (show[2L]) { dev.hold() plotGraph(x, ...) dev.flush() } if (show[3L]) { dev.hold() plotNetwork(x, header = "\n\nNetwork respresentation", ...) dev.flush() } if (show[4L]) { dev.hold() plotManhattan(x, header = "\n\nManhattan-like plots", ...) dev.flush() } if (show[5L]) { dev.hold() plotMCMCdiag(x, header = "\nMCMC diagnostic plots", ...) dev.flush() } devAskNewPage(options("device.ask.default")[[1]]) } else { stop("Please specify correct argument!") } } }
/scratch/gouwar.j/cran-all/cranData/BayesSUR/R/plot.BayesSUR.R
#' @title plot conditional predictive ordinate #' @description #' Plot the conditional predictive ordinate (CPO) for each individual of a #' fitted model generated by \code{BayesSUR} which is a \code{BayesSUR} object. #' CPO is a handy posterior predictive check because it may be used to identify #' outliers, influential observations, and for hypothesis testing across #' different non-nested models (Gelfand 1996). #' @importFrom graphics axis box text par abline #' @name plotCPO #' #' @param x an object of class \code{BayesSUR} #' @param outlier.thresh threshold for the CPOs. The default is 0.01. #' @param outlier.mark mark the outliers with the response names. #' The default is \code{FALSE} #' @param scale.CPO scaled CPOs which is divided by their maximum. #' The default is \code{TRUE} #' @param x.loc a vector of features distance #' @param axis.label a vector of predictor names which are shown in CPO plot. #' The default is \code{NULL} only showing the indices. The value \code{"auto"} #' show the predictor names from the original data. #' @param mark.pos location of the marked text relative to the point #' @param las graphical parameter of plot.default #' @param cex.axis graphical parameter of plot.default #' @param mark.color color of the marked text. The default color is red #' @param mark.cex font size of the marked text. The default font size is 0.8 #' @param xlab a title for the x axis #' @param ylab a title for the y axis #' @param ... other arguments #' #' @details The default threshold for the CPOs to detect the outliers is 0.01 #' by Congdon (2005). It can be tuned by the argument \code{outlier.thresh}. #' #' @references Statisticat, LLC (2013). \emph{Bayesian Inference.} Farmington, CT: Statisticat, LLC. #' @references Gelfand A. (1996). \emph{Model Determination Using Sampling Based Methods}. In Gilks W., Richardson S., Spiegelhalter D. (eds.), Markov Chain Monte Carlo in Practice, pp. 145–161. Chapman & Hall, Boca Raton, FL. #' @references Congdon P. (2005). \emph{Bayesian Models for Categorical Data}. John Wiley & Sons, West Sussex, England. #' #' @examples #' data("exampleEQTL", package = "BayesSUR") #' hyperpar <- list(a_w = 2, b_w = 5) #' #' set.seed(9173) #' fit <- BayesSUR( #' Y = exampleEQTL[["blockList"]][[1]], #' X = exampleEQTL[["blockList"]][[2]], #' data = exampleEQTL[["data"]], outFilePath = tempdir(), #' nIter = 10, burnin = 0, nChains = 1, gammaPrior = "hotspot", #' hyperpar = hyperpar, tmpFolder = "tmp/", output_CPO = TRUE #' ) #' #' ## check output #' # plot the conditional predictive ordinate (CPO) #' plotCPO(fit) #' #' @export plotCPO <- function(x, outlier.mark = TRUE, outlier.thresh = 0.01, scale.CPO = TRUE, x.loc = FALSE, axis.label = NULL, las = 0, cex.axis = 1, mark.pos = c(0, -.01), mark.color = 2, mark.cex = 0.8, xlab = "Observations", ylab = NULL, ...) { x$output[-1] <- paste(x$output$outFilePath, x$output[-1], sep = "") if (is.null(x$output$CPO)) { stop("Please specify argument output_CPO in BayesSUR()!") } CPO <- as.matrix(read.table(x$output$CPO)) rownames(CPO) <- rownames(as.matrix(read.table(x$output$Y, header = TRUE))) colnames(CPO) <- colnames(as.matrix(read.table(x$output$Y, header = TRUE))) CPO_idx <- seq_len(nrow(CPO)) if (is.null(ylab)) { ylab <- ifelse(scale.CPO, "scaled CPOs", "CPOs") } name.observations <- rownames(CPO) if (is.null(axis.label)) { x.loc <- CPO_idx names(x.loc) <- CPO_idx } else { if (axis.label[1] == "auto") { x.loc <- CPO_idx names(x.loc) <- name.observations } else { if (!x.loc[1]) { x.loc <- seq_along(axis.label) } else { if (length(axis.label) != length(x.loc)) { stop("The given predictor names are not consistent with the data") } } names(x.loc) <- axis.label } } opar <- par(no.readonly = TRUE) on.exit(par(opar)) par(xpd = FALSE) if (ncol(CPO) > 1) { if (scale.CPO) CPO <- CPO / max(CPO) plot.default(as.vector(CPO) ~ rep(CPO_idx, times = ncol(CPO)), xlim = c(1, nrow(CPO)), ylim = c(0, max(CPO)), xaxt = "n", bty = "n", ylab = ylab, xlab = xlab, main = "Conditional predictive ordinate", pch = 19) axis(1, at = x.loc, labels = names(x.loc), las = las, cex.axis = cex.axis) box() # mark the names of the specified response variables corresponding to the given responses if (outlier.mark) { if (min(CPO) > outlier.thresh) { message("NOTE: The minimum CPO is larger than the threshold of the (scaled) CPO!\n") } else { name.responses <- colnames(CPO) text(rep(CPO_idx, times = ncol(CPO))[ which(as.vector(CPO) <= outlier.thresh)] + mark.pos[1], as.vector(CPO[CPO < outlier.thresh]) + mark.pos[2], labels = rep(name.responses, each = nrow(CPO))[as.vector(CPO) < outlier.thresh], col = mark.color, cex = mark.cex) abline(h = outlier.thresh, lty = 2, col = mark.color) } } } else { if (scale.CPO) CPO <- CPO / max(CPO) plot.default(CPO ~ seq_along(CPO), xaxt = "n", bty = "n", xlim = c(1, length(CPO)), ylim = c(min(CPO) + mark.pos[2] * 2, max(CPO)), ylab = ylab, xlab = xlab, main = "Conditional predictive ordinate", pch = 19) axis(1, at = x.loc, labels = names(x.loc), las = las, cex.axis = cex.axis) box() # mark the names of the specified response variables corresponding to the given responses if (outlier.mark) { if (min(CPO) > outlier.thresh) { message("NOTE: The minimum CPO is larger than the threshold of the (scaled) CPO!\n") } else { opar <- par(no.readonly = TRUE) on.exit(par(opar)) par(new = TRUE) plot.default(CPO[CPO < outlier.thresh] ~ which(CPO < outlier.thresh), xaxt = "n", bty = "n", xlim = c(1, length(CPO)), ylim = c(min(CPO) + mark.pos[2] * 2, max(CPO)), ylab = "", xlab = "", main = "", pch = 19, col = mark.color) text(which(CPO <= outlier.thresh) + mark.pos[1], CPO[CPO < outlier.thresh] + mark.pos[2], labels = name.observations[CPO < outlier.thresh], col = mark.color, cex = mark.cex) abline(h = outlier.thresh, lty = 2, col = mark.color) } } } }
/scratch/gouwar.j/cran-all/cranData/BayesSUR/R/plotCPO.R
#' @title plot heatmap of estimators #' @description #' Plot the posterior mean estimators from a \code{BayesSUR} class object, #' including the coefficients beta, latent indicator variable gamma and #' graph of responses. #' @importFrom graphics axis box text mtext par image #' @importFrom grDevices colorRampPalette dev.off grey #' @importFrom tikzDevice tikz #' @name plotEstimator #' #' @param x an object of class \code{BayesSUR} #' @param estimator print the heatmap of estimators. The value "beta" is for #' the estimated coefficients matrix, "gamma" for the latent indicator matrix #' and "Gy" for the graph of responses #' @param colorScale.gamma value palette for gamma #' @param colorScale.beta a vector of three colors for diverging color schemes #' @param legend.cex.axis magnification of axis annotation relative to cex #' @param name.responses a vector of the response names. The default is #' \code{NA} only to show the locations. The value \code{"auto"} show the #' response names from the orginal data. #' @param name.predictors a vector of the predictor names. The default is #' \code{NA} only to show the locations. The value \code{"auto"} show the #' predictor names from the orginal data. #' @param xlab a title for the x axis #' @param ylab a title for the y axis #' @param fig.tex print the figure through LaTex. Default is \code{FALSE} #' @param output the file name of printed figure #' @param header the main title #' @param header.cex size of the main title for all estimators #' @param cex.main size of the title for each estimator #' @param title.beta a title for the printed "beta" #' @param title.gamma a title for the printed "gamma" #' @param title.Gy a title for the printed "Gy" #' @param tick a logical value specifying whether tickmarks and an axis line #' should be drawn. Default is \code{FALSE} #' @param mgp the margin line (in mex units) for the axis title, axis labels #' and axis line #' @param ... other arguments #' #' @examples #' data("exampleEQTL", package = "BayesSUR") #' hyperpar <- list(a_w = 2, b_w = 5) #' #' set.seed(9173) #' fit <- BayesSUR( #' Y = exampleEQTL[["blockList"]][[1]], #' X = exampleEQTL[["blockList"]][[2]], #' data = exampleEQTL[["data"]], outFilePath = tempdir(), #' nIter = 10, burnin = 0, nChains = 1, gammaPrior = "hotspot", #' hyperpar = hyperpar, tmpFolder = "tmp/" #' ) #' #' ## check output #' # Plot the estimators from the fitted object #' plotEstimator(fit, estimator = c("beta", "gamma", "Gy")) #' #' \dontrun{ #' ## Set up temporary work directory for saving a pdf figure #' # td <- tempdir() #' # oldwd <- getwd() #' # setwd(td) #' #' ## Produce authentic math formulas in the graph #' # plotEstimator(fit, estimator = c("beta", "gamma", "Gy"), fig.tex = TRUE) #' # system(paste(getOption("pdfviewer"), "ParamEstimator.pdf")) #' # setwd(oldwd) #' } #' #' @export plotEstimator <- function(x, estimator = NULL, colorScale.gamma = grey((100:0) / 100), colorScale.beta = c("blue", "white", "red"), legend.cex.axis = 1, name.responses = NA, name.predictors = NA, xlab = "", ylab = "", fig.tex = FALSE, output = "ParamEstimator", header = "", header.cex = 2, tick = FALSE, mgp = c(2.5, 1, 0), cex.main = 1.5, title.beta = NA, title.gamma = NA, title.Gy = NA, ...) { if (!inherits(x, "BayesSUR")) { stop("Use only with a \"BayesSUR\" object") } if (sum(!estimator %in% c("beta", "gamma", "Gy")) > 0) { stop("Please specify correct argument estimator!") } x$output[-1] <- paste0(x$output$outFilePath, x$output[-1]) beta_hat <- as.matrix(read.table(x$output$beta)) gamma_hat <- as.matrix(read.table(x$output$gamma)) response_idx <- seq_len(ncol(gamma_hat)) predictor_idx <- seq_len(nrow(gamma_hat)) nonpen <- nrow(beta_hat) - nrow(gamma_hat) if (nonpen > 0) { rownames(beta_hat) <- c(colnames(read.table(x$output$X0, header = TRUE)), colnames(read.table(x$output$X, header = TRUE))) } else { rownames(beta_hat) <- colnames(read.table(x$output$X, header = TRUE)) } colnames(beta_hat) <- colnames(read.table(x$output$Y, header = TRUE)) covariancePrior <- x$input$covariancePrior if ((covariancePrior == "HIW") && ("Gy" %in% estimator)) { Gy_hat <- as.matrix(read.table(x$output$Gy)) colnames(Gy_hat) <- rownames(Gy_hat) <- colnames(read.table(x$output$Y, header = TRUE)) } ## BUG TO BE FIXED!!! # specify the labels of axes if (is.na(name.responses)[1]) name.responses <- response_idx if (name.responses[1] == "auto") name.responses <- colnames(beta_hat) if (is.character(name.responses)) { if (length(name.responses) != ncol(beta_hat)) { stop("The length of the given response names are not consistent with the data!") } } if (is.na(name.predictors)) name.predictors <- predictor_idx if (name.predictors[1] == "auto") name.predictors <- rownames(beta_hat) if (is.character(name.predictors)) { if (length(name.predictors) != nrow(beta_hat)) { stop("The length of the given predictor names are not consistent with the data!") } } opar <- par(no.readonly = TRUE) on.exit(par(opar)) if (!fig.tex) { par(mar = c(6, 6, 5.1, 4.1)) par(mfrow = c(1, sum(estimator %in% c("beta", "gamma", "Gy")))) if ("beta" %in% estimator) { # floor(100*constant)+100-1 colours that your want in the legend bar which has the white middle color colorbar <- c(colorRampPalette(c(colorScale.beta[1], colorScale.beta[2]))( floor(1000 / (-(max(beta_hat) - min(beta_hat)) / min(beta_hat) - 1))), colorRampPalette(c(colorScale.beta[2], colorScale.beta[3]))(1000)[-1]) if (is.na(title.beta)) title.beta <- expression(hat(bold(B))) image( z = beta_hat, x = predictor_idx, y = response_idx, col = colorbar, mgp = mgp, axes = ifelse(is.na(name.responses)[1], TRUE, FALSE), xlab = xlab, ylab = ylab, main = title.beta, cex.main = cex.main, cex.lab = 1.5, ...) box() vertical.image.legend(color = colorbar, zlim = c(min(beta_hat), max(beta_hat)), legend.cex.axis = legend.cex.axis) if (!is.na(name.responses)[1]) { par(las = 2, cex.axis = 1) axis(2, at = response_idx, labels = name.responses, tick = tick) axis(1, at = predictor_idx, labels = name.predictors, tick = tick) } } if ("gamma" %in% estimator) { if (is.na(title.gamma)) title.gamma <- expression(hat(bold(Gamma))) image( z = gamma_hat, x = predictor_idx, y = response_idx, col = colorScale.gamma, mgp = mgp, axes = ifelse(is.na(name.responses)[1], TRUE, FALSE), xlab = xlab, ylab = ylab, main = title.gamma, cex.main = cex.main, cex.lab = 1.5, ...) box() vertical.image.legend(color = colorScale.gamma, zlim = c(0, 1), legend.cex.axis = legend.cex.axis) if (!is.na(name.responses)[1]) { par(las = 2, cex.axis = 1) axis(2, at = response_idx, labels = name.responses, tick = tick) if (nonpen > 0) { name.predictors <- name.predictors[-c(1:nonpen)] } axis(1, at = predictor_idx, labels = name.predictors, tick = tick) } } if ("Gy" %in% estimator) { if (is.na(title.Gy)) title.Gy <- "Estimated graph of responses" image( z = Gy_hat + diag(ncol(Gy_hat)), x = response_idx, y = response_idx, col = colorScale.gamma, mgp = mgp, axes = ifelse(is.na(name.responses)[1], TRUE, FALSE), xlab = ylab, ylab = ylab, main = title.Gy, cex.main = cex.main, cex.lab = 1.5, ...) box() vertical.image.legend(color = colorScale.gamma, zlim = c(min(Gy_hat), max(Gy_hat)), legend.cex.axis = legend.cex.axis) if (!is.na(name.responses)[1]) { par(las = 2, cex.axis = 1) axis(2, at = response_idx, labels = name.responses, tick = tick) axis(1, at = response_idx, labels = name.responses, tick = tick) } } title(paste0("\n", header), cex.main = header.cex, outer = TRUE) } else { options(tikzMetricPackages = c("\\usepackage{amsmath}", "\\usepackage{bm}", "\\usetikzlibrary{calc}")) tikz(paste0(output, ".tex"), width = 3.6 * sum(estimator %in% c("beta", "gamma", "Gy")), height = 4, standAlone = TRUE, packages = c("\\usepackage{tikz}", "\\usepackage{amsmath}", "\\usepackage{bm}", "\\usepackage[active,tightpage,psfixbb]{preview}", "\\PreviewEnvironment{pgfpicture}")) par(mfrow = c(1, sum(estimator %in% c("beta", "gamma", "Gy")))) par(mar = c(6, 6, 4, 4) + 0.1) if ("beta" %in% estimator) { # floor(100*constant)+100-1 colours that your want in the legend bar which has the white middle color colorbar <- c(colorRampPalette(c(colorScale.beta[1], colorScale.beta[2]))( floor(1000 / (-(max(beta_hat) - min(beta_hat)) / min(beta_hat) - 1))), colorRampPalette(c(colorScale.beta[2], colorScale.beta[3]))(1000)[-1]) if (is.na(title.beta)) title.beta <- paste("Estimator", "$\\hat{\\bm{B}}$") image( z = beta_hat, x = predictor_idx, y = response_idx, col = colorbar, axes = ifelse(is.na(name.responses)[1], TRUE, FALSE), mgp = mgp, xlab = xlab, ylab = ylab, main = title.beta, cex.main = cex.main, cex.lab = 1.5, ...) box() vertical.image.legend(color = colorbar, zlim = c(min(beta_hat), max(beta_hat)), legend.cex.axis = legend.cex.axis) if (!is.na(name.responses)[1]) { par(las = 2, cex.axis = 1) axis(2, at = response_idx, labels = name.responses, tick = tick) # opar <- par(cex.axis=1) axis(1, at = predictor_idx, labels = name.predictors, tick = tick) } } if ("gamma" %in% estimator) { if (is.na(title.gamma)) title.gamma <- paste("Estimator", "$\\hat{\\mathbf{\\Gamma}}$") image( z = gamma_hat, x = predictor_idx, y = seq_len(ncol(gamma_hat)), col = colorScale.gamma, axes = ifelse(is.na(name.responses)[1], TRUE, FALSE), xlab = xlab, ylab = ylab, main = title.gamma, mgp = mgp, cex.main = cex.main, cex.lab = 1.5, ...) box() vertical.image.legend(color = colorScale.gamma, zlim = c(0, 1), legend.cex.axis = legend.cex.axis) if (!is.na(name.responses)[1]) { par(las = 2, cex.axis = 1) axis(2, at = seq_len(ncol(gamma_hat)), labels = name.responses, tick = tick) if (nonpen > 0) { name.predictors <- name.predictors[-c(1:nonpen)] } axis(1, at = predictor_idx, labels = name.predictors, tick = tick) } } if ("Gy" %in% estimator) { if (is.na(title.Gy)) title.Gy <- paste("Estimator", "$\\hat{\\mathcal{G}}$") image( z = Gy_hat + diag(ncol(Gy_hat)), x = seq_len(nrow(Gy_hat)), y = seq_len(nrow(Gy_hat)), col = colorScale.gamma, axes = ifelse(is.na(name.responses)[1], TRUE, FALSE), mgp = mgp, xlab = ylab, ylab = ylab, main = title.Gy, cex.main = cex.main, cex.lab = 1.5, ...) box() vertical.image.legend(color = colorScale.gamma, zlim = c(min(Gy_hat), max(Gy_hat)), legend.cex.axis = legend.cex.axis) if (!is.na(name.responses)[1]) { par(las = 2, cex.axis = 1) axis(2, at = seq_len(ncol(Gy_hat)), labels = name.responses, tick = tick) axis(1, at = seq_len(nrow(Gy_hat)), labels = name.responses, tick = tick) } } title(paste0("\n", header), cex.main = header.cex, outer = TRUE) dev.off() tools::texi2pdf(paste0(output, ".tex")) } } # the function vertical.image.legend() is adapted from the R package "aqfig" vertical.image.legend <- function(zlim, color, legend.cex.axis = 1) { starting.par.settings <- par(no.readonly = TRUE) mai <- par("mai") fin <- par("fin") x.legend.fig <- c(1 - (mai[4] / fin[1]), 1) y.legend.fig <- c(mai[1] / fin[2], 1 - (mai[3] / fin[2])) x.legend.plt <- c(x.legend.fig[1] + (0.18 * (x.legend.fig[2] - x.legend.fig[1])), x.legend.fig[2] - (0.6 * (x.legend.fig[2] - x.legend.fig[1]))) y.legend.plt <- y.legend.fig cut.pts <- seq(zlim[1], zlim[2], length = length(color) + 1) z <- (cut.pts[seq_along(color)] + cut.pts[2:(length(color) + 1)]) / 2 par(new = TRUE, pty = "m", plt = c(x.legend.plt, y.legend.plt)) # If z is not increasing, only two values if (all(diff(z) > 0)) { image( x = 1.5, y = z, z = matrix(z, nrow = 1, ncol = length(color)), col = color, xlab = "", ylab = "", xaxt = "n", yaxt = "n" ) axis(4, mgp = c(3, 0.2, 0), las = 2, cex.axis = legend.cex.axis, tcl = -0.1) box() } mfg.settings <- par()$mfg par(starting.par.settings) par(mfg = mfg.settings, new = FALSE) }
/scratch/gouwar.j/cran-all/cranData/BayesSUR/R/plotEstimator.R
#' @title plot graph for response variables #' @description #' Plot the estimated graph for multiple response variables from a #' \code{BayesSUR} class object. #' @importFrom igraph E plot.igraph graph_from_adjacency_matrix #' @importFrom graphics par #' @name plotGraph #' #' @param x either an object of class \code{BayesSUR} (default) or a symmetric #' numeric matrix representing an adjacency matrix for a given graph structure. #' If x is an adjacency matrix, argument \code{main="Given graph of responses"} #' by default. #' @param Pmax a value for thresholding the learning structure matrix of #' multiple response variables. Default is 0.5 #' @param main an overall title for the plot #' @param edge.width edge width. Default is 2 #' @param edge.weight draw weighted edges after thresholding at 0.5. The #' default value \code{FALSE} is not to draw weighted edges #' @param vertex.label character vector used to label the nodes #' @param vertex.label.color label color. Default is \code{"black"} #' @param vertex.size node size. Default is 30 #' @param vertex.color node color. Default is \code{"dodgerblue"} #' @param vertex.frame.color node color. Default is \code{"NA"} #' @param ... other arguments #' #' @examples #' data("exampleEQTL", package = "BayesSUR") #' hyperpar <- list(a_w = 2, b_w = 5) #' #' set.seed(9173) #' fit <- BayesSUR( #' Y = exampleEQTL[["blockList"]][[1]], #' X = exampleEQTL[["blockList"]][[2]], #' data = exampleEQTL[["data"]], outFilePath = tempdir(), #' nIter = 10, burnin = 0, nChains = 1, gammaPrior = "hotspot", #' hyperpar = hyperpar, tmpFolder = "tmp/" #' ) #' #' ## check output #' # show the graph relationship between responses #' plotGraph(fit, estimator = "Gy") #' #' @export plotGraph <- function(x, Pmax = 0.5, main = "Estimated graph of responses", edge.width = 2, edge.weight = FALSE, vertex.label = NULL, vertex.label.color = "black", vertex.size = 30, vertex.color = "dodgerblue", vertex.frame.color = NA, ...) { if (!inherits(x, "BayesSUR")) { if (is.matrix(x) && is.numeric(x)) { if (!((dim(x)[1] == dim(x)[2]) && (sum(dim(x)) > 2))) { stop("Use only with a \"BayesSUR\" object or numeric square matrix") } Gy_hat <- x if (!is.null(vertex.label)) { rownames(Gy_hat) <- colnames(Gy_hat) <- vertex.label } if (main == "Estimated graph of responses") { main <- "Given graph of responses" } } else { stop("Use only with a \"BayesSUR\" object or numeric square matrix") } } else { x$output[-1] <- paste0(x$output$outFilePath, x$output[-1]) covariancePrior <- x$input$covariancePrior if (covariancePrior == "HIW") { Gy_hat <- as.matrix(read.table(x$output$Gy)) } else { stop("Gy is only estimated with hyper-inverse Wishart prior for the covariance matrix of responses!") } if (!is.null(vertex.label)) { rownames(Gy_hat) <- colnames(Gy_hat) <- vertex.label } else { rownames(Gy_hat) <- colnames(Gy_hat) <- names(read.table(x$output$Y, header = TRUE)) } } if (Pmax < 0 || Pmax > 1) { stop("Please specify correct argument 'Pmax' in [0,1]!") } if (edge.weight) { Gy_thresh <- Gy_hat Gy_thresh[Gy_hat <= Pmax] <- 0 } else { Gy_thresh <- as.matrix(Gy_hat > Pmax) } net <- graph_from_adjacency_matrix(Gy_thresh, weighted = TRUE, mode = "undirected", diag = FALSE) if (edge.weight) { plot.igraph(net, main = main, edge.width = E(net)$weight * 2, vertex.label = vertex.label, vertex.color = vertex.color, vertex.frame.color = vertex.frame.color, ...) } else { plot.igraph(net, main = main, edge.width = edge.width, vertex.label = vertex.label, vertex.color = vertex.color, vertex.frame.color = vertex.frame.color, vertex.label.color = vertex.label.color, vertex.size = vertex.size, ...) } }
/scratch/gouwar.j/cran-all/cranData/BayesSUR/R/plotGraph.R
#' @title plot MCMC diagnostic plots #' @description #' Show trace plots and diagnostic density plots of a fitted model object of class \code{BayesSUR}. #' @importFrom graphics par plot.default legend title matplot #' @importFrom stats density #' @importFrom grDevices hcl.colors #' @name plotMCMCdiag #' @param x an object of class \code{BayesSUR} #' @param nbloc number of splits for the last half iterations after substracting #' burn-in length #' @param HIWg diagnostic plot of the response graph. Default is \code{NULL}. #' \code{HIW="degree"} prints the diagnostic of the degrees of response nodes. #' \code{HIW="edges"} prints the diagnostic of every edge between two responses. #' \code{HIW="lik"} prints the diagnostic of the posterior likelihoods of the #' hyperparameters related to the response relationships #' @param header the main title #' @param ... other arguments for the plots of the log-likelihood and model size #' #' @examples #' data("exampleEQTL", package = "BayesSUR") #' hyperpar <- list(a_w = 2, b_w = 5) #' #' set.seed(9173) #' fit <- BayesSUR( #' Y = exampleEQTL[["blockList"]][[1]], #' X = exampleEQTL[["blockList"]][[2]], #' data = exampleEQTL[["data"]], outFilePath = tempdir(), #' nIter = 10, burnin = 0, nChains = 1, gammaPrior = "hotspot", #' hyperpar = hyperpar, tmpFolder = "tmp/" #' ) #' #' ## check output #' plotMCMCdiag(fit) #' #' @export plotMCMCdiag <- function(x, nbloc = 3, HIWg = NULL, header = "", ...) { if (!inherits(x, "BayesSUR")) { stop("Use only with a \"BayesSUR\" object") } x$output[-1] <- paste0(x$output$outFilePath, x$output[-1]) logP <- t(as.matrix(read.table(x$output$logP))) model_size <- as.matrix(read.table(x$output$model_size)) ncol_Y <- ncol(read.table(x$output$gamma)) nIter <- x$input$nIter covariancePrior <- x$input$covariancePrior if (covariancePrior == "HIW" && is.null(x$output$Gvisit)) { Gvisit <- as.matrix(read.table(x$output$Gvisit)) } if (nIter <= 1) { stop("The diagosis only shows results from more than one MCMC iteration!") } if (nIter < 4000) { message("NOTE: The diagosis only shows results of two iteration points due to less than 4000 MCMC iterations!") } if (nIter >= 4000) { logP <- logP[, ncol(logP) - floor(nIter / 1000) - 1 + 1:floor(nIter / 1000)] } else { logP <- logP[, c(1, ncol(logP))] } if (is.null(HIWg)) { Ptau.indx <- ifelse(covariancePrior != "IG", 7, 3) Plik.indx <- ifelse(covariancePrior != "IG", 10, 5) model_size <- model_size if (nIter >= 4000) { model_size <- rowSums(model_size[nrow(model_size) - floor(nIter / 1000) - 1 + 1:floor(nIter / 1000), ]) } else { model_size <- rowSums(model_size[c(1, nrow(model_size)), ]) } dens.all <- density(logP[Ptau.indx, ]) if (nIter >= 4000) { dens.first <- density(logP[Ptau.indx, 1:floor(ncol(logP) / 2)]) dens.last <- density(logP[Ptau.indx, (1 + floor(ncol(logP) / 2)):ncol(logP)]) ymin <- min(dens.all$y, dens.first$y, dens.last$y) ymax <- max(dens.all$y, dens.first$y, dens.last$y) xmin <- min(dens.all$x, dens.first$x, dens.last$x) xmax <- max(dens.all$x, dens.first$x, dens.last$x) } else { ymin <- min(dens.all$y) ymax <- max(dens.all$y) xmin <- min(dens.all$x) xmax <- max(dens.all$x) nbloc <- 1 } ### nsplit number of split of the sweep mid <- floor(floor(ncol(logP) / 2) / nbloc) ymax2 <- xmin2 <- xmax2 <- list.dens <- NULL for (i in 1:nbloc) { dens <- density(logP[Ptau.indx, (ifelse(nbloc == 1, 0, floor(ncol(logP) / 2)) + 1 + mid * (i - 1)):ncol(logP)]) ymax2 <- max(ymax2, dens$y) xmin2 <- min(xmin2, dens$x) xmax2 <- max(xmax2, dens$x) list.dens <- c(list.dens, list(dens)) } ### plot the figures opar <- par(no.readonly = TRUE) on.exit(par(opar)) par(mfrow = c(2, 2)) if (nbloc > 1) { plot.default(logP[Plik.indx, ], xlab = "Iterations (*1000)", ylab = "Log likelihood (posterior)", type = "l", lty = 1, ...) } else { plot.default(logP[Plik.indx, ] ~ c(1, nIter), xlab = "Iterations", ylab = "Log likelihood (posterior)", type = "l", lty = 1, ...) } if (nbloc > 1) { plot.default(model_size, xlab = "Iterations (*1000)", ylab = "Model size", type = "l", lty = 1, ...) } else { plot.default(model_size ~ c(1, nIter), xlab = "Iterations", ylab = "Model size", type = "l", lty = 1, ...) } title0 <- expression(paste("Log Posterior Distribution: log ", P(gamma ~ group("|", list(Y, .), "")))) plot.default(dens.all, main = "", col = "black", xlim = c(xmin, xmax), ylim = c(ymin, ymax), xlab = title0, ylab = "", type = "l", lty = 1) if (nIter >= 4000) { par(new = TRUE) plot.default(dens.first, main = "", col = "red", xlim = c(xmin, xmax), ylim = c(ymin, ymax), xlab = "", ylab = "", type = "l", lty = 1) par(new = TRUE) plot.default(dens.last, main = "", col = "green", xlim = c(xmin, xmax), ylim = c(ymin, ymax), xlab = "", , ylab = "Density", type = "l", lty = 1) if (nbloc > 1) { legend("topleft", title = "iteration", legend = paste0(c("ALL", "First half", "Last half"), " = [", c(1, 1, floor(ncol(logP) / 2) * 1000 + 1), ":", c(ncol(logP), floor((ncol(logP)) / 2), ncol(logP)) * 1000, "]"), col = 1:3, lty = 1, text.col = 1:3, cex = 0.8) } } for (i in 1:nbloc) { plot.default(list.dens[[i]], col = i, xlim = c(xmin2, xmax2), ylim = c(ymin, ymax2), xlab = title0, ylab = "", type = "l", lty = 1, main = "") if (nbloc > 1) par(new = TRUE) } title(ylab = "Density") if (nbloc > 1) { legend("topleft", title = "moving window", legend = paste0("set ", 1:nbloc, " = [", (floor((ncol(logP)) / 2) + mid * (nbloc:1 - 1)) * 1000 + 1, ":", (ncol(logP)) * 1000, "]"), col = 1:nbloc, lty = 1, text.col = 1:nbloc, cex = 0.8) } } else { if (covariancePrior != "HIW") { stop("The argument HIWg only works for the model with hyper-inverse Wishart prior on the covariance!") } if (HIWg == "degree") { m <- ncol_Y node1 <- node2 <- NULL for (i in 1:(m - 1)) { node1 <- c(node1, rep(i, m - i)) node2 <- c(node2, (i + 1):m) } nodes <- cbind(node1, node2) node.degree <- matrix(0, nrow = nrow(Gvisit), ncol = m) for (i in 1:m) { node.degree[, i] <- rowSums(Gvisit[, which(nodes == i, arr.ind = TRUE)[, 1]]) } matplot(node.degree, type = "l", lty = 1, col = hcl.colors(m), xlab = "Iterations (*1000)", ylab = "degree", main = "Response degrees", xlim = c(1, nrow(Gvisit) * 1.1)) legend("topright", legend = 1:m, col = hcl.colors(m), lty = 1, text.col = hcl.colors(m), cex = 1 / m * 4) } if (substr(HIWg, 1, 4) == "edge") { m <- ncol_Y node1 <- node2 <- NULL for (i in 1:(m - 1)) { node1 <- c(node1, rep(i, m - i)) node2 <- c(node2, (i + 1):m) } if (HIWg == "edge") { matplot(Gvisit, type = "l", lty = 1, col = hcl.colors(ncol(Gvisit)), xlab = "Iterations (*1000)", ylab = "", main = "Edges selection", xlim = c(1, nrow(Gvisit) * 1.1)) legend("topright", legend = paste0(node1, "-", node2), col = hcl.colors(ncol(Gvisit)), lty = 1, text.col = hcl.colors(ncol(Gvisit)), cex = 1 / m * 2) } else { plot.default( Gvisit[, which(paste0(node1, node2) == substr(HIWg, 5, nchar(HIWg)))], type = "l", lty = 1, xlab = "Iterations (*1000)", ylab = "", main = paste0("Edge-", substr(HIWg, 5, nchar(HIWg)), " selection")) } } if (HIWg == "lik") { Gvisit <- t(logP[1:4, ]) matplot(Gvisit, type = "l", lty = 1, col = seq_len(ncol(Gvisit)), xlab = "Iterations (*1000)", ylab = "Log likelihood (posterior)", main = "Likelihoods of graph learning") legend("topright", legend = c("tau", "eta", "JT", "SigmaRho"), col = seq_len(ncol(Gvisit)), lty = 1, text.col = seq_len(ncol(Gvisit)), cex = 0.8) } } title(paste0("\n", header), outer = TRUE) }
/scratch/gouwar.j/cran-all/cranData/BayesSUR/R/plotMCMCdiag.R
#' @title plot Manhattan-like plots #' @description #' Plot Manhattan-like plots for marginal posterior inclusion probabilities #' (mPIP) and numbers of responses of association for predictors of a #' \code{BayesSUR} class object. #' @importFrom graphics axis box text par plot.default segments #' @name plotManhattan #' #' @param x an object of class \code{BayesSUR} #' @param manhattan value(s) in \code{c('mPIP', 'numResponse')}. #' \code{manhattan='mPIP'} shows the Manhattan-like plot of the marginal #' posterior inclusion probabilities (mPIP). \code{manhattan='numResponse'} #' shows the Manhattan-like plot of the number of responses. The default is #' to show both figures. #' @param x.loc a vector of features distance #' @param axis.label a vector of predictor names which are shown in the #' Manhattan-like plot. The value \code{"NULL"} only showing the indices. The #' default \code{"auto"} show the predictor names from the original data. #' @param mark.responses a vector of response names which are shown in the #' Manhattan-like plot for the mPIP #' @param mark.pos the location of the marked text relative to the point #' @param xlab1 a title for the x axis of Manhattan-like plot for the mPIP #' @param ylab1 a title for the y axis of Manhattan-like plot for the mPIP #' @param xlab2 a title for the x axis of Manhattan-like plot for the numbers of responses #' @param ylab2 a title for the y axis of Manhattan-like plot for the numbers of responses #' @param threshold threshold for showing number of response variables #' significantly associated with each feature #' @param las graphical parameter of plot.default #' @param cex.axis graphical parameter of plot.default #' @param mark.color the color of the marked text. The default color is red. #' @param mark.cex the fontsize of the marked text. The default fontsize is 0.8 #' @param header the main title #' @param ... other arguments #' #' @examples #' data("exampleEQTL", package = "BayesSUR") #' hyperpar <- list(a_w = 2, b_w = 5) #' #' set.seed(9173) #' fit <- BayesSUR( #' Y = exampleEQTL[["blockList"]][[1]], #' X = exampleEQTL[["blockList"]][[2]], #' data = exampleEQTL[["data"]], outFilePath = tempdir(), #' nIter = 10, burnin = 0, nChains = 1, gammaPrior = "hotspot", #' hyperpar = hyperpar, tmpFolder = "tmp/" #' ) #' #' ## check output #' # show the Manhattan-like plots #' plotManhattan(fit) #' #' @export plotManhattan <- function(x, manhattan = c("mPIP", "numResponse"), x.loc = FALSE, axis.label = "auto", mark.responses = NULL, xlab1 = "Predictors", ylab1 = "mPIP", xlab2 = "Predictors", ylab2 = "No. of responses", threshold = 0.5, las = 0, cex.axis = 1, mark.pos = c(0, 0), mark.color = 2, mark.cex = 0.8, header = "", ...) { if (!inherits(x, "BayesSUR")) { stop("Use only with a \"BayesSUR\" object") } if (threshold < 0 || threshold > 1) { stop("Please specify orrect argument 'threshold' in [0,1]!") } x$output[-1] <- paste0(x$output$outFilePath, x$output[-1]) gamma <- as.matrix(read.table(x$output$gamma)) rownames(gamma) <- colnames(read.table(x$output$X, header = TRUE)) if (is.null(axis.label)) { x.loc <- seq_len(nrow(gamma)) names(x.loc) <- seq_len(nrow(gamma)) } else { # name.predictors <- colnames(read.table(x$output$X,header=T)) name.predictors <- rownames(gamma) if (axis.label[1] == "auto") { x.loc <- seq_len(nrow(gamma)) names(x.loc) <- name.predictors } else { if ((!match(axis.label, name.predictors)[1]) & (!x.loc[1])) { stop("The given predictor names are not consistent with the data") } if (!x.loc[1]) x.loc <- match(axis.label, name.predictors) names(x.loc) <- axis.label } } opar <- par(no.readonly = TRUE) on.exit(par(opar)) if (sum(manhattan %in% c("mPIP", "numResponse")) == 2) { par(mfrow = c(2, 1)) } else { par(mfrow = c(1, 1)) } # Manhattan plot for marginal posterior inclusion probabilities (mPIP) if ("mPIP" %in% manhattan) { par(mar = c(4, 4, 4, 2)) plot.default(as.vector(gamma) ~ rep(seq_len(nrow(gamma)), times = ncol(gamma)), xlim = c(1, nrow(gamma)), ylim = c(0, max(gamma)), xaxt = "n", bty = "n", ylab = ylab1, xlab = xlab1, main = "", pch = 19, ...) axis(1, at = x.loc, labels = names(x.loc), las = las, cex.axis = cex.axis) box() # mark the names of the specified response variables corresponding to the given predictors if (!is.null(mark.responses)) { name.responses <- colnames(read.table(x$output$Y, header = TRUE)) if (!is.na(match(mark.responses, name.responses)[1])) { text(rep(x.loc, times = length(mark.responses)) + mark.pos[1], as.vector(gamma[x.loc, name.responses %in% mark.responses]) + mark.pos[2], labels = rep(mark.responses, each = length(x.loc)), col = mark.color, cex = mark.cex) } else { stop("The given response names are not consistent with the data") } } } # Manhattan plot for numbers of responses if ("numResponse" %in% manhattan) { par(mar = c(6, 4, 3, 2)) no.gamma <- rowSums(gamma >= threshold) plot.default(no.gamma ~ c(seq_len(nrow(gamma))), xlim = c(1, nrow(gamma)), ylim = c(0, max(no.gamma) + 0.3), type = "n", xaxt = "n", ylab = ylab2, xlab = xlab2, main = "", ...) segments(seq_len(nrow(gamma)), 0, seq_len(nrow(gamma)), no.gamma) axis(1, at = x.loc, labels = names(x.loc), las = las, cex.axis = cex.axis) text(nrow(gamma) / 8, -max(no.gamma) / 1.3, paste("NOTE: Number of responses with mPIP >=", threshold), cex = .5, xpd = NA) } title(paste0("\n\n", header), outer = TRUE) }
/scratch/gouwar.j/cran-all/cranData/BayesSUR/R/plotManhattan.R
#' @title plot network representation of the associations between responses and predictors #' @description #' Plot the network representation of the associations between responses and #' predictors, based on the estimated gamma matrix and graph of responses #' from a "BayesSUR" class object. #' #' @importFrom graphics text #' @importFrom grDevices gray #' @importFrom igraph V E gsize layout_in_circle plot.igraph degree #' @importFrom igraph layout.fruchterman.reingold delete.vertices #' @importFrom igraph graph.adjacency delete.edges ecount V<- #' @name plotNetwork #' @param x an object of class \code{BayesSUR} #' @param includeResponse A vector of the response names which are shown in the network #' @param excludeResponse A vector of the response names which are not shown in the network #' @param includePredictor A vector of the predictor names which are shown in the network #' @param excludePredictor A vector of the predictor names which are not shown in the network #' @param MatrixGamma A matrix or dataframe of the latent indicator variable. #' Default is \code{NULL} and to extrate it from object of class inheriting #' from an object of class \code{BayesSUR} #' @param PmaxPredictor cutpoint for thresholding the estimated latent #' indicator variable. Default is 0.5 #' @param PmaxResponse cutpoint for thresholding the learning structure matrix #' of multiple response variables. Default is 0.5 #' @param nodesizePredictor node size of Predictors in the output graph. #' Default is 15 #' @param nodesizeResponse node size of response variables in the output graph. #' Default is 25 #' @param no.isolates remove isolated nodes from responses graph and full #' graph, may get problem if there are also isolated Predictors #' @param lineup A ratio of the heights between responses' area and predictors' #' @param gray.alpha the opacity. The default is 0.6 #' @param edgewith.response the edge width between response nodes #' @param edgewith.predictor the edge width between the predictor and response node #' @param edge.weight draw weighted edges after thresholding at 0.5. The #' default value \code{FALSE} is not to draw weighted edges #' @param label.predictor A vector of the names of predictors #' @param label.response A vector of the names of response variables #' @param color.predictor color of the predictor nodes #' @param color.response color of the response nodes #' @param name.predictors A subtitle for the predictors #' @param name.responses A subtitle for the responses #' @param vertex.frame.color color of the frame of the vertices. If you don't #' want vertices to have a frame, supply NA as the color name #' @param layoutInCircle place vertices on a circle, in the order of their #' vertex ids. The default is \code{FALSE} #' @param header the main title #' @param ... other arguments #' #' @examples #' data("exampleEQTL", package = "BayesSUR") #' hyperpar <- list(a_w = 2, b_w = 5) #' #' set.seed(9173) #' fit <- BayesSUR( #' Y = exampleEQTL[["blockList"]][[1]], #' X = exampleEQTL[["blockList"]][[2]], #' data = exampleEQTL[["data"]], outFilePath = tempdir(), #' nIter = 10, burnin = 0, nChains = 1, gammaPrior = "hotspot", #' hyperpar = hyperpar, tmpFolder = "tmp/" #' ) #' #' ## check output #' # draw network representation of the associations between responses and covariates #' plotNetwork(fit) #' #' @export plotNetwork <- function(x, includeResponse = NULL, excludeResponse = NULL, includePredictor = NULL, excludePredictor = NULL, MatrixGamma = NULL, PmaxPredictor = 0.5, PmaxResponse = 0.5, nodesizePredictor = 2, nodesizeResponse = 15, no.isolates = FALSE, lineup = 1.2, gray.alpha = 0.6, edgewith.response = 5, edgewith.predictor = 2, edge.weight = FALSE, label.predictor = NULL, label.response = NULL, color.predictor = NULL, color.response = NULL, name.predictors = NULL, name.responses = NULL, vertex.frame.color = NA, layoutInCircle = FALSE, header = "", ...) { if (!inherits(x, "BayesSUR")) { stop("Use only with a \"BayesSUR\" object") } if (PmaxPredictor < 0 || PmaxPredictor > 1) { stop("Please specify correct argument 'PmaxPredictor' in [0,1]!") } if (PmaxResponse < 0 || PmaxResponse > 1) { stop("Please specify correct argument 'PmaxResponse' in [0,1]!") } x$output[-1] <- paste(x$output$outFilePath, x$output[-1], sep = "") covariancePrior <- x$input$covariancePrior if (covariancePrior == "HIW") { Gy_hat <- as.matrix(read.table(x$output$Gy)) } else { stop("Gy is only estimated with hyper-inverse Wishart prior for the covariance matrix of responses!") } gamma_hat <- as.matrix(read.table(x$output$gamma)) colnames(gamma_hat) <- names(read.table(x$output$Y, header = TRUE)) rownames(gamma_hat) <- colnames(read.table(x$output$X, header = TRUE)) if (sum(colnames(gamma_hat) == paste("V", seq_len(ncol(gamma_hat)), sep = "")) == ncol(gamma_hat)) { colnames(gamma_hat) <- paste("Y", seq_len(ncol(gamma_hat)), sep = "") } if (sum(rownames(gamma_hat) == paste("V", seq_len(nrow(gamma_hat)), sep = "")) == nrow(gamma_hat)) { rownames(gamma_hat) <- paste("X", seq_len(nrow(gamma_hat)), sep = "") } # select the required resposes and predictors to plot the network excludeResponse.idx <- rep(FALSE, ncol(gamma_hat)) excludePredictor.idx <- rep(FALSE, nrow(gamma_hat)) if (!is.null(includeResponse)) { excludeResponse.idx <- c(!(colnames(gamma_hat) %in% includeResponse)) } if (!is.null(excludeResponse)) { excludeResponse.idx <- c(excludeResponse.idx | c(colnames(gamma_hat) %in% excludeResponse)) } if (!is.null(includePredictor)) { excludePredictor.idx <- c(!(rownames(gamma_hat) %in% includePredictor)) } if (!is.null(excludePredictor)) { excludePredictor.idx <- c(excludePredictor.idx | c(rownames(gamma_hat) %in% excludePredictor)) } gamma_hat <- gamma_hat[!excludePredictor.idx, !excludeResponse.idx] Gy_hat <- Gy_hat[!excludeResponse.idx, !excludeResponse.idx] if (edge.weight) { Gy_thresh <- Gy_hat Gy_thresh[Gy_hat <= PmaxResponse] <- 0 gamma_thresh <- gamma_hat gamma_thresh[gamma_hat <= PmaxPredictor] <- 0 } else { Gy_thresh <- as.matrix(Gy_hat > PmaxResponse) gamma_thresh <- as.matrix(gamma_hat > PmaxPredictor) } if (sum(rowSums(gamma_thresh) != 0) == 0) { stop(paste("There were no predictors with mPIP gamma > ", PmaxPredictor, ". Not able to draw a network!", sep = "")) } gamma_thresh <- matrix(gamma_thresh[rowSums(gamma_thresh) != 0, ], ncol = ncol(gamma_hat)) colnames(gamma_thresh) <- colnames(gamma_hat) rownames(gamma_thresh) <- rownames(gamma_hat)[rowSums(gamma_hat > PmaxPredictor) != 0] rownames(Gy_thresh) <- colnames(Gy_thresh) <- colnames(gamma_hat) plotSEMgraph(Gy_thresh, t(gamma_thresh), nodesizeSNP = nodesizePredictor, nodesizeMET = nodesizeResponse, no.isolates = no.isolates, edgewith.response = edgewith.response, edgewith.predictor = edgewith.predictor, edge.weight = edge.weight, label.predictor = label.predictor, label.response = label.response, color.predictor = color.predictor, color.response = color.response, name.predictors = name.predictors, name.responses = name.responses, vertex.frame.color = vertex.frame.color, layoutInCircle = layoutInCircle, ...) title(paste("\n\n", header, sep = ""), outer = TRUE) } plotSEMgraph <- function(ADJmatrix, GAMmatrix, nodesizeSNP = 2, nodesizeMET = 25, no.isolates = FALSE, lineup = 1, gray.alpha = 0.6, edgewith.response = 5, edgewith.predictor = 2, label.predictor = NULL, label.response = NULL, color.predictor = NULL, color.response = NULL, name.predictors = NULL, name.responses = NULL, edge.weight = FALSE, vertex.frame.color = NA, layoutInCircle = FALSE, ...) { ## give warnings for re-defined arguments if (exists("edge.width")) warning("Argument 'edge.width' was re-defined into new argments 'edgewith.response' and 'edgewith.predictor' in this function!") if (exists("edge.color")) warning("Argument 'edge.color' cannot be changed in this function!") if (exists("edge.arrow.size")) warning("Argument 'edge.arrow.size' cannot be changed in this function!") # ADJmatrix must be a square qxq adjacency matrix (or data frame) qq <- dim(ADJmatrix)[1] if (dim(ADJmatrix)[2] != qq) stop("adjacency matrix not square") # GAMmatrix must be a qxp binary matrix (or data frame) pp <- dim(GAMmatrix)[2] if (dim(GAMmatrix)[1] != qq) stop("Gamma and Adjacency have different no. q") # join mets block (adjency) and lower triangle (gamma) semgraph <- rbind(ADJmatrix, t(GAMmatrix)) # add zero blocks for lower triangle and snp block zeroblock <- matrix(rep(0, pp * (pp + qq)), nrow = qq + pp, ncol = pp) zeroblock <- data.frame(zeroblock) colnames(zeroblock) <- colnames(GAMmatrix) rownames(zeroblock) <- rownames(semgraph) semgraph <- cbind(semgraph, zeroblock) # igraph objects graphADJ <- graph.adjacency(as.matrix(ADJmatrix), weighted = TRUE, diag = FALSE, mode = "undirected") graphSEM <- graph.adjacency(as.matrix(semgraph), weighted = TRUE, diag = FALSE, mode = "directed") # don't plot isolated nodes? if (no.isolates) { graphADJ <- delete.vertices(graphADJ, degree(graphADJ) == 0) graphSEM <- delete.vertices(graphSEM, degree(graphSEM) == 0) message("Removing isolated nodes from Adjacency and Full SEM, may get problem if there are also isolated SNPs.") } # get co-ords for undirected edges using layout function (scaled) lladj <- layout.fruchterman.reingold(graphADJ) lmax <- max(lladj[, 1]) lmin <- min(lladj[, 1]) lladj[, 1] <- (lladj[, 1] - lmin) / (lmax - lmin) lmax <- max(lladj[, 2]) lmin <- min(lladj[, 2]) lladj[, 2] <- (lladj[, 2] - lmin) / (lmax - lmin) # plot adjacency only # plot(graphADJ,vertex.size=15,edge.width=2,edge.color="black",layout=lladj) # line up snps lymax <- max(lladj[, 2]) + (max(lladj[, 2]) - min(lladj[, 2])) * (lineup - 1) / 2 lymin <- min(lladj[, 2]) + (max(lladj[, 2]) - min(lladj[, 2])) * (1 - lineup) / 2 llsnps <- matrix(c(rep(-0.5, pp), lymin + (1:pp) * 1.0 * (lymax - lymin) / pp), nrow = pp, ncol = 2) llsem <- rbind(lladj, llsnps) ### plot SEM # plot snps and mets nodes differently # set node sizes directly in graph object V(graphSEM)$size <- c(rep(nodesizeMET, qq), rep(nodesizeSNP, pp)) n.edgeADJ <- gsize(graphADJ) n.edgeGAM <- gsize(graphSEM) - n.edgeADJ V(graphSEM)$label.color <- "black" V(graphSEM)$color <- c(rep("dodgerblue", nrow(GAMmatrix)), rep("red", ncol(GAMmatrix))) if (!is.null(color.predictor)) V(graphSEM)$color[-c(seq_len(nrow(GAMmatrix)))] <- color.predictor if (!is.null(color.response)) V(graphSEM)$color[seq_len(nrow(GAMmatrix))] <- color.response V(graphSEM)$label <- c(rownames(GAMmatrix), colnames(GAMmatrix)) if (!is.null(label.predictor)) V(graphSEM)$label[-c(seq_len(nrow(GAMmatrix)))] <- label.predictor if (!is.null(label.response)) V(graphSEM)$label[seq_len(nrow(GAMmatrix))] <- label.response if (edge.weight) { edge.width <- E(graphSEM)$weight * ifelse(edge.weight, 5, 1) } else { edge.width <- c(rep(edgewith.response, 2 * n.edgeADJ), rep(edgewith.predictor, 2 * n.edgeGAM)) } if (!layoutInCircle) { layoutSEM <- llsem } else { layoutSEM <- layout_in_circle(graphSEM) } # plot undirected graph between response variables graphSEMresponses <- delete.edges( graphSEM, E(graphSEM)[(1:ecount(graphSEM))[-c(1:(2 * n.edgeADJ))]]) plot.igraph(graphSEMresponses, edge.arrow.size = 0, edge.width = edge.width[1:(2 * n.edgeADJ)], vertex.frame.color = vertex.frame.color, edge.color = rep(gray(0), 2 * n.edgeADJ), layout = layoutSEM, ...) # plot directed graph between predictors and response variables graphSEMpredictor2responses <- delete.edges(graphSEM, E(graphSEM)[(1:ecount(graphSEM))[c(1:(2 * n.edgeADJ))]]) plot.igraph(graphSEMpredictor2responses, edge.arrow.size = 0.5, edge.width = edge.width[-c(1:(2 * n.edgeADJ))], vertex.frame.color = vertex.frame.color, edge.color = rep(gray(0.7, alpha = gray.alpha), 2 * n.edgeGAM), layout = layoutSEM, add = TRUE, ...) if (!is.null(name.predictors)) text(-1, -1.3, name.predictors, cex = 1.2) if (!is.null(name.responses)) text(0.4, -1.3, name.responses, cex = 1.2) }
/scratch/gouwar.j/cran-all/cranData/BayesSUR/R/plotNetwork.R
#' @title predict method for class \code{BayesSUR} #' @description #' Predict responses corresponding to the posterior mean of the coefficients, #' return posterior mean of coefficients or indices of nonzero coefficients of #' a \code{BayesSUR} class object. #' @name predict.BayesSUR #' #' @param object an object of class \code{BayesSUR} #' @param newx Matrix of new values for x at which predictions are to be made #' @param type Type of prediction required. \code{type="response"} gives the #' fitted responses; \code{type="coefficients"} returns the estimated #' coefficients depending on the arguments \code{beta.type} and \code{Pmax}. #' \code{type="nonzero"} returns a list of the indices of the nonzero #' coefficients corresponding to the estimated latent indicator variable #' thresholding at \code{Pmax} #' @param beta.type the type of estimated coefficients beta for prediction. #' Default is \code{marginal}, giving marginal beta estimation. If #' \code{beta.type="conditional"}, it gives conditional beta estimation #' @param Pmax If \code{type="nonzero"}, it is a threshold for the estimated #' latent indicator variable. If \code{type="coefficients"}, #' \code{beta.type="conditional"} and \code{Pmax=0.5}, it gives median #' probability model betas. Default is 0 #' @param ... other arguments #' #' @return Predicted values extracted from an object of class \code{BayesSUR}. #' If the \code{BayesSUR} specified data standardization, the fitted values #' are base based on standardized data. #' #' @examples #' data("exampleEQTL", package = "BayesSUR") #' hyperpar <- list(a_w = 2, b_w = 5) #' #' set.seed(9173) #' fit <- BayesSUR( #' Y = exampleEQTL[["blockList"]][[1]], #' X = exampleEQTL[["blockList"]][[2]], #' data = exampleEQTL[["data"]], outFilePath = tempdir(), #' nIter = 20, burnin = 10, nChains = 1, gammaPrior = "hotspot", #' hyperpar = hyperpar, tmpFolder = "tmp/" #' ) #' #' ## check prediction #' predict.val <- predict(fit, newx = exampleEQTL[["blockList"]][[2]]) #' #' @export predict.BayesSUR <- function(object, newx, type = "response", beta.type = "marginal", Pmax = 0, ...) { if (length(type) > 1) { warning("'type' has length > 1 and only the first element will be used") type <- type[1] } if (!(type %in% c("response", "coefficients", "nonzero"))) { stop("Please specify correct 'type'!") } if (Pmax < 0 || Pmax > 1) { stop("Please specify correct argument 'Pmax' in [0,1]!") } if (!(beta.type %in% c("marginal", "conditional"))) { stop("Please specify acorrect 'beta.type'!") } if ((type %in% c("response", "coefficients")) && (Pmax > 0) && (beta.type == "marginal")) { stop("Pmax > 0 is valid only if the arguments type='coefficients' and beta.type='conditional'!") } gamma_hat <- getEstimator(object, estimator = "gamma", Pmax = Pmax, ...) beta_hat <- getEstimator(object, estimator = "beta", Pmax = Pmax, beta.type = beta.type, ...) object$output[-1] <- paste(object$output$outFilePath, object$output[-1], sep = "") X <- as.matrix(read.table(object$output$X, header = TRUE)) if ("X0" %in% names(object$output)) { X0 <- as.matrix(read.table(object$output$X0)) } else { X0 <- NULL } if (missing(newx)) { y.pred <- cbind(X0, X) %*% beta_hat } else { y.pred <- newx %*% beta_hat } gamma_out <- which(gamma_hat == 1, arr.ind = TRUE) colnames(gamma_out) <- c("predictors", "response") if (type == "response") { return(y.pred) } if (type == "coefficients") { return(beta_hat) } if (type == "nonzero") { return(gamma_out) } }
/scratch/gouwar.j/cran-all/cranData/BayesSUR/R/predict.BayesSUR.R
#' @title print method for class \code{BayesSUR} #' @description #' Print a short summary of a \code{BayesSUR} class object. It includes the #' argument matching information, number of selected predictors based on #' thresholding the posterior mean of the latent indicator variable at 0.5 #' by default. #' #' @name print.BayesSUR #' @param x an object of class \code{BayesSUR} #' @param Pmax threshold that truncates the estimated coefficients based on #' thresholding the estimated latent indicator variable. Default is 0.5 #' @param ... other arguments #' #' @return Return a short summary from an object of class \code{BayesSUR}, #' including the number of selected predictors with mPIP>\code{Pmax} and the #' expected log pointwise predictive density estimates (i.e., elpd.LOO and #' elpd.WAIC). #' #' @examples #' data("exampleEQTL", package = "BayesSUR") #' hyperpar <- list(a_w = 2, b_w = 5) #' #' set.seed(9173) #' fit <- BayesSUR( #' Y = exampleEQTL[["blockList"]][[1]], #' X = exampleEQTL[["blockList"]][[2]], #' data = exampleEQTL[["data"]], outFilePath = tempdir(), #' nIter = 10, burnin = 0, nChains = 1, gammaPrior = "hotspot", #' hyperpar = hyperpar, tmpFolder = "tmp/", output_CPO = TRUE #' ) #' #' ## check output #' # show the print information #' print(fit) #' #' @export print.BayesSUR <- function(x, Pmax = 0.5, ...) { gamma <- as.matrix(read.table(paste(x$output$outFilePath, x$output$gamma, sep = ""))) call.string <- unlist(strsplit(deparse(x$call), ",")) call.string[which(call.string == " ") + 1] <- substr(call.string[which(call.string == " ") + 1], 4, nchar(call.string[which(call.string == " ") + 1])) call.string <- call.string[call.string != " "] if (length(call.string) <= 3) { cat("\nCall:\n ", paste(call.string, c(rep(",", length(call.string) - 1), ""), sep = "", collapse = ""), "\n", sep = "") } else { cat("\nCall:\n ", paste(call.string[1:3], c(",", ",", ", ...)"), sep = "", collapse = ""), "\n", sep = "") } cat("\nNumber of selected predictors (mPIP > ", Pmax, "): ", sum(gamma > Pmax), " of ", ncol(gamma), "x", nrow(gamma), "\n", sep = "") cat("\nExpected log pointwise predictive density (elpd):\n", " elpd.LOO = ", elpd(x, method = "LOO"), ", elpd.WAIC = ", elpd(x, method = "WAIC"), "\n\n", sep = "") }
/scratch/gouwar.j/cran-all/cranData/BayesSUR/R/print.BayesSUR.R
#' @title summary method for class \code{BayesSUR} #' @description #' Summary method for class \code{BayesSUR}. It includes the argument matching #' information, Top predictors/responses on average mPIP across all #' responses/predictors, elpd estimates, MCMC specification, model #' specification and hyper-parameters. The summarized number of the selected #' variable corresponds to the posterior mean of the latent indicator variable #' thresholding at 0.5 by default. #' #' @importFrom Matrix Matrix #' @name summary.BayesSUR #' @param object an object of class \code{BayesSUR} #' @param Pmax threshold that truncates the estimated coefficients based on #' thresholding the estimated latent indicator variable. Default is 0.5 #' @param ... other arguments #' #' @return Return a result summary from an object of class \code{BayesSUR}, #' including the CPOs, number of selected predictors with mPIP>\code{Pmax}, #' top 10 predictors on average mPIP across all responses, top 10 responses on #' average mPIP across all predictors, Expected log pointwise predictive #' density (elpd) estimates, MCMC specification, model specification (i.e., #' covariance prior and gamma prior) and hyper-parameters. #' #' @examples #' data(exampleEQTL, package = "BayesSUR") #' hyperpar <- list(a_w = 2, b_w = 5) #' #' set.seed(9173) #' fit <- BayesSUR( #' Y = exampleEQTL[["blockList"]][[1]], #' X = exampleEQTL[["blockList"]][[2]], #' data = exampleEQTL[["data"]], outFilePath = tempdir(), #' nIter = 10, burnin = 0, nChains = 1, gammaPrior = "hotspot", #' hyperpar = hyperpar, tmpFolder = "tmp/", output_CPO = TRUE #' ) #' #' ## check output #' # show the summary information #' summary(fit) #' #' @export summary.BayesSUR <- function(object, Pmax = 0.5, ...) { if (Pmax < 0 || Pmax > 1) { stop("Please specify correct argument 'Pmax' in (0,1)!") } ans <- list(status = object$status) if (is.null(object$output$CPO)) { ans$elpd <- NA } else { ans$elpd <- c(elpd(object, method = "LOO"), elpd(object, method = "WAIC")) } object$output[-1] <- paste(object$output$outFilePath, object$output[-1], sep = "") if (is.null(object$output$CPO)) { ans$CPO <- NA } else { ans$CPO <- summary.default(as.vector(as.matrix(read.table(object$output$CPO))))[-4] } gamma <- as.matrix(read.table(object$output$gamma)) ans$df <- sum(gamma > Pmax) # extract top 10 covariates based on average mPIP across all responses mean.predictors <- rowMeans(gamma) top10.predictors <- mean.predictors[ sort.list(mean.predictors, decreasing = TRUE)[1:min(10, nrow(gamma))]] names(top10.predictors) <- names(read.table(object$output$X, header = TRUE))[ sort.list(mean.predictors, decreasing = TRUE)[1:min(10, nrow(gamma))]] # extract top 10 response variables based on average mPIP across responses mean.responses <- colMeans(gamma) top10.responses <- mean.responses[ sort.list(mean.responses, decreasing = TRUE)[1:min(10, ncol(gamma))]] names(top10.responses) <- names(read.table(object$output$Y, header = TRUE))[ sort.list(mean.responses, decreasing = TRUE)[1:min(10, ncol(gamma))]] ans$chainParameters <- object$input[1:3] ans$modelParameters <- object$input[4:9] ans$hyperParameters <- object$input$hyperparameters ans$outputFiles <- object$output ans$outputFiles["outFilePath"] <- NULL cat("\nCall:\n ", paste(unlist(strsplit(deparse(object$call), ","))[1:3], c(",", ",", ", ...)"), sep = "", collapse = ""), "\n", sep = "") cat("\nCPOs:\n") print(ans$CPO) cat("\nNumber of selected predictors (mPIP > ", Pmax, "): ", sum(gamma > Pmax), " of ", ncol(gamma), "x", nrow(gamma), "\n", sep = "") cat("\nTop", min(10, nrow(gamma)), "predictors on average mPIP across all responses:\n") print(top10.predictors) cat("\nTop", min(10, ncol(gamma)), "responses on average mPIP across all predictors:\n") print(top10.responses) cat("\nExpected log pointwise predictive density (elpd) estimates:\n", " elpd.LOO = ", ans$elpd[1], ", elpd.WAIC = ", ans$elpd[2], "\n", sep = "") cat("\nMCMC specification:\n", " iterations = ", ans$chainParameters$nIter, ", burn-in = ", ans$chainParameters$burnin, ", chains = ", ans$chainParameters$nChains, "\n gamma local move sampler: ", ans$modelParameters$gammaSampler, "\n gamma initialisation: ", ans$modelParameters$gammaInit, "\n", sep = "") cat("\nModel specification:\n", " covariance prior: ", ans$modelParameters$covariancePrior, "\n gamma prior: ", ans$modelParameters$gammaPrior, "\n", sep = "") if (is.null(ans$hyperParameters)) { cat("\nHyper-parameters:\n") print(unlist(object$input$hyperParameters)) } cat("\n") invisible(ans) }
/scratch/gouwar.j/cran-all/cranData/BayesSUR/R/summary.BayesSUR.R
#' @title indices list of target genes in the GDSC data set #' #' @description #' Indices list of target genes corresponding the \code{example_GDSC} data set. #' It has two components representing the gene indices of the MAPK/ERK pathway and #' BCR-ABL gene fusion in the \code{example_GDSC} data set. #' #' @examples #' # Load the indices of gene targets from the GDSC sample dataset #' data("targetGene", package = "BayesSUR") #' str(targetGene) #' #' \dontrun{ #' # =============== #' # This code below is to do preprocessing of GDSC data and obtain the complete dataset #' # "targetGene.rda" above. The user needs load the datasets from #' # https://www.cancerrxgene.org release 5. #' # But downloading and transforming the three used datasets below to *.csv files first. #' # =============== #' #' requireNamespace("plyr", quietly = TRUE) #' requireNamespace("data.table", quietly = TRUE) #' #' #' features <- data.frame(read.csv("/gdsc_en_input_w5.csv", head = T)) #' names.fea <- strsplit(rownames(features), "") #' features <- t(features) #' p <- c(13321, 13747 - 13321, 13818 - 13747) #' Cell.Line <- rownames(features) #' features <- data.frame(Cell.Line, features) #' #' ic50_00 <- data.frame(read.csv("gdsc_drug_sensitivity_fitted_data_w5.csv", head = T)) #' ic50_0 <- ic50_00[, c(1, 4, 7)] #' drug.id <- data.frame(read.csv("gdsc_tissue_output_w5.csv", head = T))[, c(1, 3)] #' drug.id2 <- drug.id[!duplicated(drug.id$drug.id), ] #' # delete drug.id=1066 since ID1066 and ID156 both correspond drug AZD6482, #' # and no ID1066 in the "suppl.Data1" by Garnett et al. (2012) #' drug.id2 <- drug.id2[drug.id2$drug.id != 1066, ] #' drug.id2$drug.name <- as.character(drug.id2$drug.name) #' drug.id2$drug.name <- substr(drug.id2$drug.name, 1, nchar(drug.id2$drug.name) - 6) #' drug.id2$drug.name <- gsub(" ", "-", drug.id2$drug.name) #' #' ic50 <- ic50_0 #' # mapping the drug_id to drug names in drug sensitivity data set #' ic50$drug_id <- plyr::mapvalues(ic50$drug_id, from = drug.id2[, 2], to = drug.id2[, 1]) #' colnames(ic50) <- c("Cell.Line", "compound", "IC50") #' #' # transform drug sensitivity overall cell lines to a data matrix #' y0 <- reshape(ic50, v.names = "IC50", timevar = "compound", #' idvar = "Cell.Line", direction = "wide") #' y0$Cell.Line <- gsub("-", ".", y0$Cell.Line) #' #' # =============== #' # select nonmissing pharmacological data #' # =============== #' y00 <- y0 #' m0 <- dim(y0)[2] - 1 #' eps <- 0.05 #' # r1.na is better to be not smaller than r2.na #' r1.na <- 0.3 #' r2.na <- 0.2 #' k <- 1 #' while (sum(is.na(y0[, 2:(1 + m0)])) > 0) { #' r1.na <- r1.na - eps / k #' r2.na <- r1.na - eps / k #' k <- k + 1 #' ## select drugs with <30% (decreasing with k) missing data overall cell lines #' na.y <- apply(y0[, 2:(1 + m0)], 2, function(xx) sum(is.na(xx)) / length(xx)) #' while (sum(na.y < r1.na) < m0) { #' y0 <- y0[, -c(1 + which(na.y >= r1.na))] #' m0 <- sum(na.y < r1.na) #' na.y <- apply(y0[, 2:(1 + m0)], 2, function(xx) sum(is.na(xx)) / length(xx)) #' } #' #' ## select cell lines with treatment of at least 80% (increasing with k) drugs #' na.y0 <- apply(y0[, 2:(1 + m0)], 1, function(xx) sum(is.na(xx)) / length(xx)) #' while (sum(na.y0 < r2.na) < (dim(y0)[1])) { #' y0 <- y0[na.y0 < r2.na, ] #' na.y0 <- apply(y0[, 2:(1 + m0)], 1, function(xx) sum(is.na(xx)) / length(xx)) #' } #' num.na <- sum(is.na(y0[, 2:(1 + m0)])) #' message("#{NA}=", num.na, "\n", "r1.na =", r1.na, ", r2.na =", r2.na, "\n") #' } #' #' # =============== #' # combine drug sensitivity, tissues and molecular features #' # =============== #' yx <- merge(y0, features, by = "Cell.Line") #' names.cell.line <- yx$Cell.Line #' names.drug <- colnames(yx)[2:(dim(y0)[2])] #' names.drug <- substr(names.drug, 6, nchar(names.drug)) #' # numbers of gene expression features, copy number festures and muatation features #' p <- c(13321, 13747 - 13321, 13818 - 13747) #' num.nonpen <- 13 #' yx <- data.matrix(yx[, -1]) #' y <- yx[, 1:(dim(y0)[2] - 1)] #' x <- cbind(yx[, dim(y0)[2] - 1 + sum(p) + 1:num.nonpen], yx[, dim(y0)[2] - 1 + 1:sum(p)]) #' #' # delete genes with only one mutated cell line #' x <- x[, -c(num.nonpen + p[1] + p[2] + #' which(colSums(x[, num.nonpen + p[1] + p[2] + 1:p[3]]) <= 1))] #' p[3] <- ncol(x) - num.nonpen - p[1] - p[2] #' #' GDSC <- list( #' y = y, x = x, p = p, num.nonpen = num.nonpen, names.cell.line = names.cell.line, #' names.drug = names.drug #' ) #' #' #' ## ================ #' ## ================ #' ## select a small set of drugs #' ## ================ #' ## ================ #' #' name_drugs <- c( #' "Methotrexate", "RDEA119", "PD-0325901", "CI-1040", "AZD6244", "Nilotinib", #' "Axitinib" #' ) #' #' # extract the drugs' pharmacological profiling and tissue dummy #' YX0 <- cbind(GDSC$y[, colnames(GDSC$y) %in% paste("IC50.", name_drugs, sep = "")] #' [, c(1, 3, 6, 4, 7, 2, 5)], GDSC$x[, 1:GDSC$num.nonpen]) #' colnames(YX0) <- c(name_drugs, colnames(GDSC$x)[1:GDSC$num.nonpen]) #' # extract the genetic information of CNV & MUT #' X23 <- GDSC$x[, GDSC$num.nonpen + GDSC$p[1] + 1:(p[2] + p[3])] #' colnames(X23)[1:p[2]] <- paste(substr( #' colnames(X23)[1:p[2]], 1, #' nchar(colnames(X23)[1:p[2]]) - 3 #' ), ".CNV", sep = "") #' #' # locate all genes with CNV or MUT information #' name_genes_duplicate <- c( #' substr(colnames(X23)[1:p[2]], 1, nchar(colnames(X23)[1:p[2]]) - 4), #' substr(colnames(X23)[p[2] + 1:p[3]], 1, nchar(colnames(X23)[p[2] + 1:p[3]]) - 4) #' ) #' name_genes <- name_genes_duplicate[!duplicated(name_genes_duplicate)] #' #' # select the GEX which have the common genes with CNV or MUT #' X1 <- GDSC$x[, GDSC$num.nonpen + #' which(colnames(GDSC$x)[GDSC$num.nonpen + 1:p[1]] %in% name_genes)] #' #' p[1] <- ncol(X1) #' X1 <- log(X1) #' #' # summary the data information #' example_GDSC <- list(data = cbind(YX0, X1, X23)) #' example_GDSC$blockList <- list( #' 1:length(name_drugs), length(name_drugs) + 1:GDSC$num.nonpen, #' ncol(YX0) + 1:sum(p)) #' #' # ======================== #' # construct the G matrix: edge potentials in the MRF prior #' # ======================== #' #' # edges between drugs: Group1 ("RDEA119","17-AAG","PD-0325901","CI-1040" and "AZD6244") #' # indexed as (2:5) #' # http://software.broadinstitute.org/gsea/msigdb/cards/KEGG_MAPK_SIGNALING_PATHWAY #' pathway_genes <- read.table("MAPK_pathway.txt")[[1]] #' Idx_Pathway1 <- which(c(colnames(X1), name_genes_duplicate) %in% pathway_genes) #' Gmrf_Group1Pathway1 <- t(combn(rep(Idx_Pathway1, each = length(2:5)) + #' rep((2:5 - 1) * sum(p), times = length(Idx_Pathway1)), 2)) #' #' # edges between drugs: Group2 ("Nilotinib","Axitinib") indexed as (6:7) #' # delete gene ABL2 #' Idx_Pathway2 <- which(c(colnames(X1), name_genes_duplicate) %like% "BCR" | #' c(colnames(X1), name_genes_duplicate) %like% "ABL")[-c(3, 5)] #' Gmrf_Group2Pathway2 <- t(combn(rep(Idx_Pathway2, each = length(6:7)) + #' rep((6:7 - 1) * sum(p), times = length(Idx_Pathway2)), 2)) #' #' # edges between the common gene in different data sources #' Gmrf_CommonGene <- NULL #' list_CommonGene <- list(0) #' k <- 1 #' for (i in 1:length(name_genes)) { #' Idx_CommonGene <- which(c(colnames(X1), name_genes_duplicate) == name_genes[i]) #' if (length(Idx_CommonGene) > 1) { #' Gmrf_CommonGene <- #' rbind(Gmrf_CommonGene, t(combn(rep(Idx_CommonGene, each = length(name_drugs)) + #' rep((1:length(name_drugs) - 1) * sum(p), times = length(Idx_CommonGene)), 2))) #' k <- k + 1 #' } #' } #' Gmrf_duplicate <- rbind(Gmrf_Group1Pathway1, Gmrf_Group2Pathway2, Gmrf_CommonGene) #' Gmrf <- Gmrf_duplicate[!duplicated(Gmrf_duplicate), ] #' example_GDSC$mrfG <- Gmrf #' #' # create the target gene names of the two groups of drugs #' targetGenes1 <- matrix(Idx_Pathway1, nrow = 1) #' colnames(targetGenes1) <- colnames(example_GDSC$data)[seq_along(targetGene$group1)] #' targetGenes2 <- matrix(Idx_Pathway2, nrow = 1) #' colnames(targetGenes2) <- colnames(example_GDSC$data)[seq_along(targetGene$group2)] #' #' targetGene <- list(group1 = targetGenes1, group2 = targetGenes2) #' #' ## Write data file targetGene.rda to the user's directory by save() #' } #' "targetGene"
/scratch/gouwar.j/cran-all/cranData/BayesSUR/R/targetGene.R
## ----setup, include=FALSE----------------------------------------------------- knitr::opts_chunk$set(echo = TRUE, eval = FALSE) options(rmarkdown.html_vignette.check_title = FALSE) ## ----eval=TRUE---------------------------------------------------------------- library("BayesSUR") ## ----------------------------------------------------------------------------- # library("gRbase") # sim.ssur <- function(n, s, p, t0 = 0, seed = 123, mv = TRUE, # t.df = Inf, random.intercept = 0, intercept = TRUE) { # # set seed to fix coefficients # set.seed(7193) # sd_b <- 1 # mu_b <- 1 # b <- matrix(rnorm((p + ifelse(t0 == 0, 1, 0)) * s, mu_b, sd_b), p + ifelse(t0 == 0, 1, 0), s) # # # design groups and pathways of Gamma matrix # gamma <- matrix(FALSE, p + ifelse(t0 == 0, 1, 0), s) # if (t0 == 0) gamma[1, ] <- TRUE # gamma[2:6 - ifelse(t0 == 0, 0, 1), 1:5] <- TRUE # gamma[11:21 - ifelse(t0 == 0, 0, 1), 6:12] <- TRUE # gamma[31:51 - ifelse(t0 == 0, 0, 1), 1:5] <- TRUE # gamma[31:51 - ifelse(t0 == 0, 0, 1), 13:15] <- TRUE # gamma[52:61 - ifelse(t0 == 0, 0, 1), 1:12] <- TRUE # gamma[71:91 - ifelse(t0 == 0, 0, 1), 6:15] <- TRUE # gamma[111:121 - ifelse(t0 == 0, 0, 1), 1:15] <- TRUE # gamma[122 - ifelse(t0 == 0, 0, 1), 16:18] <- TRUE # gamma[123 - ifelse(t0 == 0, 0, 1), 19] <- TRUE # gamma[124 - ifelse(t0 == 0, 0, 1), 20] <- TRUE # # G_kron <- matrix(0, s * p, s * p) # G_m <- bdiag(matrix(1, ncol = 5, nrow = 5), # matrix(1, ncol = 7, nrow = 7), # matrix(1, ncol = 8, nrow = 8)) # G_p <- bdiag(matrix(1, ncol = 5, nrow = 5), diag(3), # matrix(1, ncol = 11, nrow = 11), diag(9), # matrix(1, ncol = 21, nrow = 21), # matrix(1, ncol = 10, nrow = 10), diag(9), # matrix(1, ncol = 21, nrow = 21), diag(19), # matrix(1, ncol = 11, nrow = 11), diag(181)) # G_kron <- kronecker(G_m, G_p) # # combn11 <- combn(rep((1:5 - 1) * p, each = length(1:5)) + # rep(1:5, times = length(1:5)), 2) # combn12 <- combn(rep((1:5 - 1) * p, each = length(30:60)) + # rep(30:60, times = length(1:5)), 2) # combn13 <- combn(rep((1:5 - 1) * p, each = length(110:120)) + # rep(110:120, times = length(1:5)), 2) # combn21 <- combn(rep((6:12 - 1) * p, each = length(10:20)) + # rep(10:20, times = length(6:12)), 2) # combn22 <- combn(rep((6:12 - 1) * p, each = length(51:60)) + # rep(51:60, times = length(6:12)), 2) # combn23 <- combn(rep((6:12 - 1) * p, each = length(70:90)) + # rep(70:90, times = length(6:12)), 2) # combn24 <- combn(rep((6:12 - 1) * p, each = length(110:120)) + # rep(110:120, times = length(6:12)), 2) # combn31 <- combn(rep((13:15 - 1) * p, each = length(30:50)) + # rep(30:50, times = length(13:15)), 2) # combn32 <- combn(rep((13:15 - 1) * p, each = length(70:90)) + # rep(70:90, times = length(13:15)), 2) # combn33 <- combn(rep((13:15 - 1) * p, each = length(110:120)) + # rep(110:120, times = length(13:15)), 2) # combn4 <- combn(rep((16:18 - 1) * p, each = length(121)) + # rep(121, times = length(16:18)), 2) # combn5 <- matrix(rep((19 - 1) * p, each = length(122)) + # rep(122, times = length(19)), nrow = 1, ncol = 2) # combn6 <- matrix(rep((20 - 1) * p, each = length(123)) + # rep(123, times = length(20)), nrow = 1, ncol = 2) # # combnAll <- rbind(t(combn11), t(combn12), t(combn13), # t(combn21), t(combn22), t(combn23), t(combn24), # t(combn31), t(combn32), t(combn33), # t(combn4), combn5, combn6) # # set.seed(seed + 7284) # sd_x <- 1 # x <- matrix(rnorm(n * p, 0, sd_x), n, p) # # if (t0 == 0 & intercept) x <- cbind(rep(1, n), x) # if (!intercept) { # gamma <- gamma[-1, ] # b <- b[-1, ] # } # xb <- matrix(NA, n, s) # if (mv) { # for (i in 1:s) { # if (sum(gamma[, i]) >= 1) { # if (sum(gamma[, i]) == 1) { # xb[, i] <- x[, gamma[, i]] * b[gamma[, i], i] # } else { # xb[, i] <- x[, gamma[, i]] %*% b[gamma[, i], i] # } # } else { # xb[, i] <- sapply(1:s, function(i) rep(1, n) * b[1, i]) # } # } # } else { # if (sum(gamma) >= 1) { # xb <- x[, gamma] %*% b[gamma, ] # } else { # xb <- sapply(1:s, function(i) rep(1, n) * b[1, i]) # } # } # # corr_param <- 0.9 # M <- matrix(corr_param, s, s) # diag(M) <- rep(1, s) # # ## wanna make it decomposable # Prime <- list(c(1:(s * .4), (s * .8):s), # c((s * .4):(s * .6)), # c((s * .65):(s * .75)), # c((s * .8):s)) # G <- matrix(0, s, s) # for (i in 1:length(Prime)) { # G[Prime[[i]], Prime[[i]]] <- 1 # } # # # check # dimnames(G) <- list(1:s, 1:s) # length(gRbase::mcsMAT(G - diag(s))) > 0 # # var <- solve(BDgraph::rgwish(n = 1, adj = G, b = 3, D = M)) # # # change seeds to add randomness on error # set.seed(seed + 8493) # sd_err <- 0.5 # if (is.infinite(t.df)) { # err <- matrix(rnorm(n * s, 0, sd_err), n, s) %*% chol(as.matrix(var)) # } else { # err <- matrix(rt(n * s, t.df), n, s) %*% chol(as.matrix(var)) # } # # if (t0 == 0) { # b.re <- NA # z <- NA # y <- xb + err # if (random.intercept != 0) { # y <- y + matrix(rnorm(n * s, 0, sqrt(random.intercept)), n, s) # } # # z <- sample(1:4, n, replace = T, prob = rep(1 / 4, 4)) # # return(list(y = y, x = x, b = b, gamma = gamma, z = model.matrix(~ factor(z) + 0)[, ], # b.re = b.re, Gy = G, mrfG = combnAll)) # } else { # # add random effects # z <- t(rmultinom(n, size = 1, prob = c(.1, .2, .3, .4))) # z <- sample(1:t0, n, replace = T, prob = rep(1 / t0, t0)) # set.seed(1683) # b.re <- rnorm(t0, 0, 2) # y <- matrix(b.re[z], nrow = n, ncol = s) + xb + err # # return(list( # y = y, x = x, b = b, gamma = gamma, z = model.matrix(~ factor(z) + 0)[, ], # b.re = b.re, Gy = G, mrfG = combnAll # )) # } # } ## ----------------------------------------------------------------------------- # library("Matrix") # n <- 250 # s <- 20 # p <- 300 # sim1 <- sim.ssur(n, s, p, seed = 1) ## ----------------------------------------------------------------------------- # t0 <- 4 # sim2 <- sim.ssur(n, s, p, t0, seed = 1) # learning data # sim2.val <- sim.ssur(n, s, p, t0, seed=101) # validation data ## ----------------------------------------------------------------------------- # hyperpar <- list(mrf_d = -2, mrf_e = 1.6, a_w0 = 100, b_w0 = 500, a_w = 15, b_w = 60) # set.seed(1038) # fit2 <- BayesSUR( # data = cbind(sim2$y, sim2$z, sim2$x), # Y = 1:s, # X_0 = s + 1:t0, # X = s + t0 + 1:p, # outFilePath = "sim2_mrf_re", # hyperpar = hyperpar, # gammaInit = "0", # betaPrior = "reGroup", # nIter = 300, burnin = 100, # covariancePrior = "HIW", # standardize = F, # standardize.response = F, # gammaPrior = "MRF", # mrfG = sim2$mrfG, # output_CPO = T # ) ## ----------------------------------------------------------------------------- # summary(fit2) ## ----------------------------------------------------------------------------- # # compute accuracy, sensitivity, specificity of variable selection # gamma <- getEstimator(fit2) # (accuracy <- sum(data.matrix(gamma > 0.5) == sim2$gamma) / prod(dim(gamma))) ## ----------------------------------------------------------------------------- # (sensitivity <- sum((data.matrix(gamma > 0.5) == 1) & (sim2$gamma == 1)) / sum(sim2$gamma == 1)) ## ----------------------------------------------------------------------------- # (specificity <- sum((data.matrix(gamma > 0.5) == 0) & (sim2$gamma == 0)) / sum(sim2$gamma == 0)) ## ----------------------------------------------------------------------------- # # compute RMSE and RMSPE for prediction performance # beta <- getEstimator(fit2, estimator = "beta", Pmax = .5, beta.type = "conditional") # (RMSE <- sqrt(sum((sim2$y - cbind(sim2$z, sim2$x) %*% beta)^2) / prod(dim(sim2$y)))) ## ----------------------------------------------------------------------------- # (RMSPE <- sqrt(sum((sim2.val$y - cbind(sim2.val$z, sim2.val$x) %*% beta)^2) / prod(dim(sim2.val$y)))) ## ----------------------------------------------------------------------------- # # compute bias of beta estimates # b <- sim2$b # b[sim2$gamma == 0] <- 0 # (beta.l2 <- sqrt(sum((beta[-c(1:4), ] - b)^2) / prod(dim(b)))) ## ----------------------------------------------------------------------------- # g.re <- getEstimator(fit2, estimator = "Gy") # (g.accuracy <- sum((g.re > 0.5) == sim2$Gy) / prod(dim(g.re))) ## ----------------------------------------------------------------------------- # (g.sensitivity <- sum(((g.re > 0.5) == sim2$Gy)[sim2$Gy == 1]) / sum(sim2$Gy == 1)) ## ----------------------------------------------------------------------------- # (g.specificity <- sum(((g.re > 0.5) == sim2$Gy)[sim2$Gy == 0]) / sum(sim2$Gy == 0))
/scratch/gouwar.j/cran-all/cranData/BayesSUR/inst/doc/BayesSUR-RE.R
--- title: "BayesSUR with random effects" author: "Zhi Zhao" output: rmarkdown::html_vignette vignette: > %\VignetteEngine{knitr::rmarkdown} %\VignetteIndexEntry{BayesSUR with random effects} \usepackage[utf8]{inputenc} --- ```{css, echo=FALSE} pre { overflow-y: auto; } pre[class] { max-height: 350px; } ``` ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE, eval = FALSE) options(rmarkdown.html_vignette.check_title = FALSE) ``` The BayesSUR model has been extended to include mandatory variables by assigning Gaussian priors as random effects rather than spike-and-slab priors, named as **SSUR-MRF with random effects** in [Zhao et al. 2023](https://doi.org/10.1093/jrsssc/qlad102). The R code for the simulated data and real data analyses in [Zhao et al. 2023](https://doi.org/10.48550/arXiv.2101.05899) can be found at the GitHub repository [BayesSUR-RE](https://github.com/zhizuio/BayesSUR-RE). Here, we show some small examples to run the BayesSUR mdoel with random effects. To get started, load the package with ```{r, eval=TRUE} library("BayesSUR") ``` ## Simulate data We design a network as the following figure (a) to construct a complex structure between $20$ response variables and $300$ predictors. It assumes that the responses are divided into six groups, and the first $120$ predictors are divided into nine groups. ![<font size="2">_Simulation scenarios: True relationships between response variables and predictors. (a) Network structure between $\mathbf Y$ and $\mathbf X$. (b) Spare latent indicator variable $\Gamma$ for the associations between $\mathbf Y$ and $\mathbf X$ in the SUR model. Black blocks indicate nonzero coefficients and white blocks indicate zero coefficients. (c) Additional structure in the residual covariance matrix between response variables not explained by $\mathbf X\mathbf B$. Black blocks indicate correlated residuals of the corresponding response variables and white blocks indicate uncorrelated residuals of the corresponding response variables._</font>](../man/figures/figure2.png){width=90%} <br> Load the simulation function `sim.ssur()` as follows. ```{r} library("gRbase") sim.ssur <- function(n, s, p, t0 = 0, seed = 123, mv = TRUE, t.df = Inf, random.intercept = 0, intercept = TRUE) { # set seed to fix coefficients set.seed(7193) sd_b <- 1 mu_b <- 1 b <- matrix(rnorm((p + ifelse(t0 == 0, 1, 0)) * s, mu_b, sd_b), p + ifelse(t0 == 0, 1, 0), s) # design groups and pathways of Gamma matrix gamma <- matrix(FALSE, p + ifelse(t0 == 0, 1, 0), s) if (t0 == 0) gamma[1, ] <- TRUE gamma[2:6 - ifelse(t0 == 0, 0, 1), 1:5] <- TRUE gamma[11:21 - ifelse(t0 == 0, 0, 1), 6:12] <- TRUE gamma[31:51 - ifelse(t0 == 0, 0, 1), 1:5] <- TRUE gamma[31:51 - ifelse(t0 == 0, 0, 1), 13:15] <- TRUE gamma[52:61 - ifelse(t0 == 0, 0, 1), 1:12] <- TRUE gamma[71:91 - ifelse(t0 == 0, 0, 1), 6:15] <- TRUE gamma[111:121 - ifelse(t0 == 0, 0, 1), 1:15] <- TRUE gamma[122 - ifelse(t0 == 0, 0, 1), 16:18] <- TRUE gamma[123 - ifelse(t0 == 0, 0, 1), 19] <- TRUE gamma[124 - ifelse(t0 == 0, 0, 1), 20] <- TRUE G_kron <- matrix(0, s * p, s * p) G_m <- bdiag(matrix(1, ncol = 5, nrow = 5), matrix(1, ncol = 7, nrow = 7), matrix(1, ncol = 8, nrow = 8)) G_p <- bdiag(matrix(1, ncol = 5, nrow = 5), diag(3), matrix(1, ncol = 11, nrow = 11), diag(9), matrix(1, ncol = 21, nrow = 21), matrix(1, ncol = 10, nrow = 10), diag(9), matrix(1, ncol = 21, nrow = 21), diag(19), matrix(1, ncol = 11, nrow = 11), diag(181)) G_kron <- kronecker(G_m, G_p) combn11 <- combn(rep((1:5 - 1) * p, each = length(1:5)) + rep(1:5, times = length(1:5)), 2) combn12 <- combn(rep((1:5 - 1) * p, each = length(30:60)) + rep(30:60, times = length(1:5)), 2) combn13 <- combn(rep((1:5 - 1) * p, each = length(110:120)) + rep(110:120, times = length(1:5)), 2) combn21 <- combn(rep((6:12 - 1) * p, each = length(10:20)) + rep(10:20, times = length(6:12)), 2) combn22 <- combn(rep((6:12 - 1) * p, each = length(51:60)) + rep(51:60, times = length(6:12)), 2) combn23 <- combn(rep((6:12 - 1) * p, each = length(70:90)) + rep(70:90, times = length(6:12)), 2) combn24 <- combn(rep((6:12 - 1) * p, each = length(110:120)) + rep(110:120, times = length(6:12)), 2) combn31 <- combn(rep((13:15 - 1) * p, each = length(30:50)) + rep(30:50, times = length(13:15)), 2) combn32 <- combn(rep((13:15 - 1) * p, each = length(70:90)) + rep(70:90, times = length(13:15)), 2) combn33 <- combn(rep((13:15 - 1) * p, each = length(110:120)) + rep(110:120, times = length(13:15)), 2) combn4 <- combn(rep((16:18 - 1) * p, each = length(121)) + rep(121, times = length(16:18)), 2) combn5 <- matrix(rep((19 - 1) * p, each = length(122)) + rep(122, times = length(19)), nrow = 1, ncol = 2) combn6 <- matrix(rep((20 - 1) * p, each = length(123)) + rep(123, times = length(20)), nrow = 1, ncol = 2) combnAll <- rbind(t(combn11), t(combn12), t(combn13), t(combn21), t(combn22), t(combn23), t(combn24), t(combn31), t(combn32), t(combn33), t(combn4), combn5, combn6) set.seed(seed + 7284) sd_x <- 1 x <- matrix(rnorm(n * p, 0, sd_x), n, p) if (t0 == 0 & intercept) x <- cbind(rep(1, n), x) if (!intercept) { gamma <- gamma[-1, ] b <- b[-1, ] } xb <- matrix(NA, n, s) if (mv) { for (i in 1:s) { if (sum(gamma[, i]) >= 1) { if (sum(gamma[, i]) == 1) { xb[, i] <- x[, gamma[, i]] * b[gamma[, i], i] } else { xb[, i] <- x[, gamma[, i]] %*% b[gamma[, i], i] } } else { xb[, i] <- sapply(1:s, function(i) rep(1, n) * b[1, i]) } } } else { if (sum(gamma) >= 1) { xb <- x[, gamma] %*% b[gamma, ] } else { xb <- sapply(1:s, function(i) rep(1, n) * b[1, i]) } } corr_param <- 0.9 M <- matrix(corr_param, s, s) diag(M) <- rep(1, s) ## wanna make it decomposable Prime <- list(c(1:(s * .4), (s * .8):s), c((s * .4):(s * .6)), c((s * .65):(s * .75)), c((s * .8):s)) G <- matrix(0, s, s) for (i in 1:length(Prime)) { G[Prime[[i]], Prime[[i]]] <- 1 } # check dimnames(G) <- list(1:s, 1:s) length(gRbase::mcsMAT(G - diag(s))) > 0 var <- solve(BDgraph::rgwish(n = 1, adj = G, b = 3, D = M)) # change seeds to add randomness on error set.seed(seed + 8493) sd_err <- 0.5 if (is.infinite(t.df)) { err <- matrix(rnorm(n * s, 0, sd_err), n, s) %*% chol(as.matrix(var)) } else { err <- matrix(rt(n * s, t.df), n, s) %*% chol(as.matrix(var)) } if (t0 == 0) { b.re <- NA z <- NA y <- xb + err if (random.intercept != 0) { y <- y + matrix(rnorm(n * s, 0, sqrt(random.intercept)), n, s) } z <- sample(1:4, n, replace = T, prob = rep(1 / 4, 4)) return(list(y = y, x = x, b = b, gamma = gamma, z = model.matrix(~ factor(z) + 0)[, ], b.re = b.re, Gy = G, mrfG = combnAll)) } else { # add random effects z <- t(rmultinom(n, size = 1, prob = c(.1, .2, .3, .4))) z <- sample(1:t0, n, replace = T, prob = rep(1 / t0, t0)) set.seed(1683) b.re <- rnorm(t0, 0, 2) y <- matrix(b.re[z], nrow = n, ncol = s) + xb + err return(list( y = y, x = x, b = b, gamma = gamma, z = model.matrix(~ factor(z) + 0)[, ], b.re = b.re, Gy = G, mrfG = combnAll )) } } ``` To simulate data with sample size $n=250$, responsible variables $s=20$ and covariates $p=300$, we can specify the corresponding parameters in the function `sim.ssur()` as follows. ```{r} library("Matrix") n <- 250 s <- 20 p <- 300 sim1 <- sim.ssur(n, s, p, seed = 1) ``` To simulate data from $4$ individual groups with group indicator variables following the defaul multinomial distribution $multinomial(0.1,0.2,0.3,0.4)$, we can simply add the argument `t0 = 4` in the function `sim.ssur()` as follows. ```{r} t0 <- 4 sim2 <- sim.ssur(n, s, p, t0, seed = 1) # learning data sim2.val <- sim.ssur(n, s, p, t0, seed=101) # validation data ``` ## Run BayesSUR model with random effects According to the guideline of prior specification in [Zhao et al. 2023](https://doi.org/10.1093/jrsssc/qlad102), we first set the following parameters `hyperpar` and then running the BayesSUR model with random effects via `betaPrior = "reGroup"` (default `betaPrior = "independent"` with spike-and-slab priors for all coefficients). **For illustration, we run a short MCMC** with `nIter = 300` and `burnin = 100`. Note that here the graph used for the Markov random field prior is the true graph from the returned object of the simulation `sim2$mrfG`. ```{r} hyperpar <- list(mrf_d = -2, mrf_e = 1.6, a_w0 = 100, b_w0 = 500, a_w = 15, b_w = 60) set.seed(1038) fit2 <- BayesSUR( data = cbind(sim2$y, sim2$z, sim2$x), Y = 1:s, X_0 = s + 1:t0, X = s + t0 + 1:p, outFilePath = "sim2_mrf_re", hyperpar = hyperpar, gammaInit = "0", betaPrior = "reGroup", nIter = 300, burnin = 100, covariancePrior = "HIW", standardize = F, standardize.response = F, gammaPrior = "MRF", mrfG = sim2$mrfG, output_CPO = T ) ``` ``` ## BayesSUR -- Bayesian Seemingly Unrelated Regression Modelling ## Reading input files ... ... successfull! ## Clearing and initialising output files ## Initialising the (SUR) MCMC Chain ... ... DONE! ## Drafting the output files with the start of the chain ... DONE! ## ## Starting 2 (parallel) chain(s) for 300 iterations: ## Temperature ladder updated, new temperature ratio : 1.1 ## MCMC ends. --- Saving results and exiting ## Saved to : sim2_mrf_re1/data_SSUR_****_out.txt ## Final w : 0.148291 ## Final tau : 1.84125 w/ proposal variance: 0.408163 ## Final eta : 0.0355005 ## -- Average Omega : 0 ## Final temperature ratio : 1.1 ## ## DONE, exiting! ``` Check some summarized information of the results: ```{r} summary(fit2) ``` ``` ## Call: ## BayesSUR(data = cbind(sim2$y, sim2$z, sim2$x), ...) ## ## CPOs: ## Min. 1st Qu. Median 3rd Qu. Max. ## 0.0001118321 0.0241323466 0.0349716031 0.0456556652 0.2321280902 ## ## Number of selected predictors (mPIP > 0.5): 2843 of 20x300 ## ## Top 10 predictors on average mPIP across all responses: ## X.130 X.54 X.249 X.56 X.77 X.253 X.281 X.80 ## 0.720640 0.706705 0.652730 0.650985 0.643780 0.640780 0.639045 0.636565 ## X.260 X.297 ## 0.634820 0.629595 ## ## Top 10 responses on average mPIP across all predictors: ## X.8 X.5 X.12 X.6 X.18 X.4 X.14 X.1 ## 0.4957363 0.4879933 0.4873303 0.4860670 0.4846080 0.4828333 0.4784240 0.4773090 ## X.19 X.2 ## 0.4756350 0.4742257 ## ## Expected log pointwise predictive density (elpd) estimates: ## elpd.LOO = -16437.89, elpd.WAIC = -16470.16 ## ## MCMC specification: ## iterations = 300, burn-in = 100, chains = 2 ## gamma local move sampler: bandit ## gamma initialisation: 0 ## ## Model specification: ## covariance prior: HIW ## gamma prior: MRF ## ## Hyper-parameters: ## a_w b_w nu a_tau b_tau a_eta b_eta mrf_d mrf_e a_w0 b_w0 ## 15.0 60.0 22.0 0.1 10.0 0.1 1.0 -2.0 1.6 100.0 500.0 ``` Compute the model performance with respect to **variable selection** ```{r} # compute accuracy, sensitivity, specificity of variable selection gamma <- getEstimator(fit2) (accuracy <- sum(data.matrix(gamma > 0.5) == sim2$gamma) / prod(dim(gamma))) ``` ``` ## [1] 0.5358333 ``` ```{r} (sensitivity <- sum((data.matrix(gamma > 0.5) == 1) & (sim2$gamma == 1)) / sum(sim2$gamma == 1)) ``` ``` ## [1] 0.5376623 ``` ```{r} (specificity <- sum((data.matrix(gamma > 0.5) == 0) & (sim2$gamma == 0)) / sum(sim2$gamma == 0)) ``` ``` ## [1] 0.5355641 ``` Compute the model performance with respect to **response prediction** ```{r} # compute RMSE and RMSPE for prediction performance beta <- getEstimator(fit2, estimator = "beta", Pmax = .5, beta.type = "conditional") (RMSE <- sqrt(sum((sim2$y - cbind(sim2$z, sim2$x) %*% beta)^2) / prod(dim(sim2$y)))) ``` ``` ## [1] 7.025327 ``` ```{r} (RMSPE <- sqrt(sum((sim2.val$y - cbind(sim2.val$z, sim2.val$x) %*% beta)^2) / prod(dim(sim2.val$y)))) ``` ``` ## [1] 8.084381 ``` Compute the model performance with respect to **coefficient bias** ```{r} # compute bias of beta estimates b <- sim2$b b[sim2$gamma == 0] <- 0 (beta.l2 <- sqrt(sum((beta[-c(1:4), ] - b)^2) / prod(dim(b)))) ``` ``` ## [1] 0.4530592 ``` Compute the model performance with respect to **covariance selection** ```{r} g.re <- getEstimator(fit2, estimator = "Gy") (g.accuracy <- sum((g.re > 0.5) == sim2$Gy) / prod(dim(g.re))) ``` ``` ## [1] 0.545 ``` ```{r} (g.sensitivity <- sum(((g.re > 0.5) == sim2$Gy)[sim2$Gy == 1]) / sum(sim2$Gy == 1)) ``` ``` ## [1] 0.1287129 ``` ```{r} (g.specificity <- sum(((g.re > 0.5) == sim2$Gy)[sim2$Gy == 0]) / sum(sim2$Gy == 0)) ``` ``` ## [1] 0.969697 ``` ## Referrence > Zhi Zhao, Marco Banterle, Alex Lewin, Manuela Zucknick (2023). > Multivariate Bayesian structured variable selection for pharmacogenomic studies. > _Journal of the Royal Statistical Society: Series C (Applied Statistics)_, qlad102. DOI: [10.1093/jrsssc/qlad102](https://doi.org/10.1093/jrsssc/qlad102).
/scratch/gouwar.j/cran-all/cranData/BayesSUR/inst/doc/BayesSUR-RE.Rmd
--- title: "BayesSUR with random effects" author: "Zhi Zhao" output: rmarkdown::html_vignette vignette: > %\VignetteEngine{knitr::rmarkdown} %\VignetteIndexEntry{BayesSUR with random effects} \usepackage[utf8]{inputenc} --- ```{css, echo=FALSE} pre { overflow-y: auto; } pre[class] { max-height: 350px; } ``` ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE, eval = FALSE) options(rmarkdown.html_vignette.check_title = FALSE) ``` The BayesSUR model has been extended to include mandatory variables by assigning Gaussian priors as random effects rather than spike-and-slab priors, named as **SSUR-MRF with random effects** in [Zhao et al. 2023](https://doi.org/10.1093/jrsssc/qlad102). The R code for the simulated data and real data analyses in [Zhao et al. 2023](https://doi.org/10.48550/arXiv.2101.05899) can be found at the GitHub repository [BayesSUR-RE](https://github.com/zhizuio/BayesSUR-RE). Here, we show some small examples to run the BayesSUR mdoel with random effects. To get started, load the package with ```{r, eval=TRUE} library("BayesSUR") ``` ## Simulate data We design a network as the following figure (a) to construct a complex structure between $20$ response variables and $300$ predictors. It assumes that the responses are divided into six groups, and the first $120$ predictors are divided into nine groups. ![<font size="2">_Simulation scenarios: True relationships between response variables and predictors. (a) Network structure between $\mathbf Y$ and $\mathbf X$. (b) Spare latent indicator variable $\Gamma$ for the associations between $\mathbf Y$ and $\mathbf X$ in the SUR model. Black blocks indicate nonzero coefficients and white blocks indicate zero coefficients. (c) Additional structure in the residual covariance matrix between response variables not explained by $\mathbf X\mathbf B$. Black blocks indicate correlated residuals of the corresponding response variables and white blocks indicate uncorrelated residuals of the corresponding response variables._</font>](../man/figures/figure2.png){width=90%} <br> Load the simulation function `sim.ssur()` as follows. ```{r} library("gRbase") sim.ssur <- function(n, s, p, t0 = 0, seed = 123, mv = TRUE, t.df = Inf, random.intercept = 0, intercept = TRUE) { # set seed to fix coefficients set.seed(7193) sd_b <- 1 mu_b <- 1 b <- matrix(rnorm((p + ifelse(t0 == 0, 1, 0)) * s, mu_b, sd_b), p + ifelse(t0 == 0, 1, 0), s) # design groups and pathways of Gamma matrix gamma <- matrix(FALSE, p + ifelse(t0 == 0, 1, 0), s) if (t0 == 0) gamma[1, ] <- TRUE gamma[2:6 - ifelse(t0 == 0, 0, 1), 1:5] <- TRUE gamma[11:21 - ifelse(t0 == 0, 0, 1), 6:12] <- TRUE gamma[31:51 - ifelse(t0 == 0, 0, 1), 1:5] <- TRUE gamma[31:51 - ifelse(t0 == 0, 0, 1), 13:15] <- TRUE gamma[52:61 - ifelse(t0 == 0, 0, 1), 1:12] <- TRUE gamma[71:91 - ifelse(t0 == 0, 0, 1), 6:15] <- TRUE gamma[111:121 - ifelse(t0 == 0, 0, 1), 1:15] <- TRUE gamma[122 - ifelse(t0 == 0, 0, 1), 16:18] <- TRUE gamma[123 - ifelse(t0 == 0, 0, 1), 19] <- TRUE gamma[124 - ifelse(t0 == 0, 0, 1), 20] <- TRUE G_kron <- matrix(0, s * p, s * p) G_m <- bdiag(matrix(1, ncol = 5, nrow = 5), matrix(1, ncol = 7, nrow = 7), matrix(1, ncol = 8, nrow = 8)) G_p <- bdiag(matrix(1, ncol = 5, nrow = 5), diag(3), matrix(1, ncol = 11, nrow = 11), diag(9), matrix(1, ncol = 21, nrow = 21), matrix(1, ncol = 10, nrow = 10), diag(9), matrix(1, ncol = 21, nrow = 21), diag(19), matrix(1, ncol = 11, nrow = 11), diag(181)) G_kron <- kronecker(G_m, G_p) combn11 <- combn(rep((1:5 - 1) * p, each = length(1:5)) + rep(1:5, times = length(1:5)), 2) combn12 <- combn(rep((1:5 - 1) * p, each = length(30:60)) + rep(30:60, times = length(1:5)), 2) combn13 <- combn(rep((1:5 - 1) * p, each = length(110:120)) + rep(110:120, times = length(1:5)), 2) combn21 <- combn(rep((6:12 - 1) * p, each = length(10:20)) + rep(10:20, times = length(6:12)), 2) combn22 <- combn(rep((6:12 - 1) * p, each = length(51:60)) + rep(51:60, times = length(6:12)), 2) combn23 <- combn(rep((6:12 - 1) * p, each = length(70:90)) + rep(70:90, times = length(6:12)), 2) combn24 <- combn(rep((6:12 - 1) * p, each = length(110:120)) + rep(110:120, times = length(6:12)), 2) combn31 <- combn(rep((13:15 - 1) * p, each = length(30:50)) + rep(30:50, times = length(13:15)), 2) combn32 <- combn(rep((13:15 - 1) * p, each = length(70:90)) + rep(70:90, times = length(13:15)), 2) combn33 <- combn(rep((13:15 - 1) * p, each = length(110:120)) + rep(110:120, times = length(13:15)), 2) combn4 <- combn(rep((16:18 - 1) * p, each = length(121)) + rep(121, times = length(16:18)), 2) combn5 <- matrix(rep((19 - 1) * p, each = length(122)) + rep(122, times = length(19)), nrow = 1, ncol = 2) combn6 <- matrix(rep((20 - 1) * p, each = length(123)) + rep(123, times = length(20)), nrow = 1, ncol = 2) combnAll <- rbind(t(combn11), t(combn12), t(combn13), t(combn21), t(combn22), t(combn23), t(combn24), t(combn31), t(combn32), t(combn33), t(combn4), combn5, combn6) set.seed(seed + 7284) sd_x <- 1 x <- matrix(rnorm(n * p, 0, sd_x), n, p) if (t0 == 0 & intercept) x <- cbind(rep(1, n), x) if (!intercept) { gamma <- gamma[-1, ] b <- b[-1, ] } xb <- matrix(NA, n, s) if (mv) { for (i in 1:s) { if (sum(gamma[, i]) >= 1) { if (sum(gamma[, i]) == 1) { xb[, i] <- x[, gamma[, i]] * b[gamma[, i], i] } else { xb[, i] <- x[, gamma[, i]] %*% b[gamma[, i], i] } } else { xb[, i] <- sapply(1:s, function(i) rep(1, n) * b[1, i]) } } } else { if (sum(gamma) >= 1) { xb <- x[, gamma] %*% b[gamma, ] } else { xb <- sapply(1:s, function(i) rep(1, n) * b[1, i]) } } corr_param <- 0.9 M <- matrix(corr_param, s, s) diag(M) <- rep(1, s) ## wanna make it decomposable Prime <- list(c(1:(s * .4), (s * .8):s), c((s * .4):(s * .6)), c((s * .65):(s * .75)), c((s * .8):s)) G <- matrix(0, s, s) for (i in 1:length(Prime)) { G[Prime[[i]], Prime[[i]]] <- 1 } # check dimnames(G) <- list(1:s, 1:s) length(gRbase::mcsMAT(G - diag(s))) > 0 var <- solve(BDgraph::rgwish(n = 1, adj = G, b = 3, D = M)) # change seeds to add randomness on error set.seed(seed + 8493) sd_err <- 0.5 if (is.infinite(t.df)) { err <- matrix(rnorm(n * s, 0, sd_err), n, s) %*% chol(as.matrix(var)) } else { err <- matrix(rt(n * s, t.df), n, s) %*% chol(as.matrix(var)) } if (t0 == 0) { b.re <- NA z <- NA y <- xb + err if (random.intercept != 0) { y <- y + matrix(rnorm(n * s, 0, sqrt(random.intercept)), n, s) } z <- sample(1:4, n, replace = T, prob = rep(1 / 4, 4)) return(list(y = y, x = x, b = b, gamma = gamma, z = model.matrix(~ factor(z) + 0)[, ], b.re = b.re, Gy = G, mrfG = combnAll)) } else { # add random effects z <- t(rmultinom(n, size = 1, prob = c(.1, .2, .3, .4))) z <- sample(1:t0, n, replace = T, prob = rep(1 / t0, t0)) set.seed(1683) b.re <- rnorm(t0, 0, 2) y <- matrix(b.re[z], nrow = n, ncol = s) + xb + err return(list( y = y, x = x, b = b, gamma = gamma, z = model.matrix(~ factor(z) + 0)[, ], b.re = b.re, Gy = G, mrfG = combnAll )) } } ``` To simulate data with sample size $n=250$, responsible variables $s=20$ and covariates $p=300$, we can specify the corresponding parameters in the function `sim.ssur()` as follows. ```{r} library("Matrix") n <- 250 s <- 20 p <- 300 sim1 <- sim.ssur(n, s, p, seed = 1) ``` To simulate data from $4$ individual groups with group indicator variables following the defaul multinomial distribution $multinomial(0.1,0.2,0.3,0.4)$, we can simply add the argument `t0 = 4` in the function `sim.ssur()` as follows. ```{r} t0 <- 4 sim2 <- sim.ssur(n, s, p, t0, seed = 1) # learning data sim2.val <- sim.ssur(n, s, p, t0, seed=101) # validation data ``` ## Run BayesSUR model with random effects According to the guideline of prior specification in [Zhao et al. 2023](https://doi.org/10.1093/jrsssc/qlad102), we first set the following parameters `hyperpar` and then running the BayesSUR model with random effects via `betaPrior = "reGroup"` (default `betaPrior = "independent"` with spike-and-slab priors for all coefficients). **For illustration, we run a short MCMC** with `nIter = 300` and `burnin = 100`. Note that here the graph used for the Markov random field prior is the true graph from the returned object of the simulation `sim2$mrfG`. ```{r} hyperpar <- list(mrf_d = -2, mrf_e = 1.6, a_w0 = 100, b_w0 = 500, a_w = 15, b_w = 60) set.seed(1038) fit2 <- BayesSUR( data = cbind(sim2$y, sim2$z, sim2$x), Y = 1:s, X_0 = s + 1:t0, X = s + t0 + 1:p, outFilePath = "sim2_mrf_re", hyperpar = hyperpar, gammaInit = "0", betaPrior = "reGroup", nIter = 300, burnin = 100, covariancePrior = "HIW", standardize = F, standardize.response = F, gammaPrior = "MRF", mrfG = sim2$mrfG, output_CPO = T ) ``` ``` ## BayesSUR -- Bayesian Seemingly Unrelated Regression Modelling ## Reading input files ... ... successfull! ## Clearing and initialising output files ## Initialising the (SUR) MCMC Chain ... ... DONE! ## Drafting the output files with the start of the chain ... DONE! ## ## Starting 2 (parallel) chain(s) for 300 iterations: ## Temperature ladder updated, new temperature ratio : 1.1 ## MCMC ends. --- Saving results and exiting ## Saved to : sim2_mrf_re1/data_SSUR_****_out.txt ## Final w : 0.148291 ## Final tau : 1.84125 w/ proposal variance: 0.408163 ## Final eta : 0.0355005 ## -- Average Omega : 0 ## Final temperature ratio : 1.1 ## ## DONE, exiting! ``` Check some summarized information of the results: ```{r} summary(fit2) ``` ``` ## Call: ## BayesSUR(data = cbind(sim2$y, sim2$z, sim2$x), ...) ## ## CPOs: ## Min. 1st Qu. Median 3rd Qu. Max. ## 0.0001118321 0.0241323466 0.0349716031 0.0456556652 0.2321280902 ## ## Number of selected predictors (mPIP > 0.5): 2843 of 20x300 ## ## Top 10 predictors on average mPIP across all responses: ## X.130 X.54 X.249 X.56 X.77 X.253 X.281 X.80 ## 0.720640 0.706705 0.652730 0.650985 0.643780 0.640780 0.639045 0.636565 ## X.260 X.297 ## 0.634820 0.629595 ## ## Top 10 responses on average mPIP across all predictors: ## X.8 X.5 X.12 X.6 X.18 X.4 X.14 X.1 ## 0.4957363 0.4879933 0.4873303 0.4860670 0.4846080 0.4828333 0.4784240 0.4773090 ## X.19 X.2 ## 0.4756350 0.4742257 ## ## Expected log pointwise predictive density (elpd) estimates: ## elpd.LOO = -16437.89, elpd.WAIC = -16470.16 ## ## MCMC specification: ## iterations = 300, burn-in = 100, chains = 2 ## gamma local move sampler: bandit ## gamma initialisation: 0 ## ## Model specification: ## covariance prior: HIW ## gamma prior: MRF ## ## Hyper-parameters: ## a_w b_w nu a_tau b_tau a_eta b_eta mrf_d mrf_e a_w0 b_w0 ## 15.0 60.0 22.0 0.1 10.0 0.1 1.0 -2.0 1.6 100.0 500.0 ``` Compute the model performance with respect to **variable selection** ```{r} # compute accuracy, sensitivity, specificity of variable selection gamma <- getEstimator(fit2) (accuracy <- sum(data.matrix(gamma > 0.5) == sim2$gamma) / prod(dim(gamma))) ``` ``` ## [1] 0.5358333 ``` ```{r} (sensitivity <- sum((data.matrix(gamma > 0.5) == 1) & (sim2$gamma == 1)) / sum(sim2$gamma == 1)) ``` ``` ## [1] 0.5376623 ``` ```{r} (specificity <- sum((data.matrix(gamma > 0.5) == 0) & (sim2$gamma == 0)) / sum(sim2$gamma == 0)) ``` ``` ## [1] 0.5355641 ``` Compute the model performance with respect to **response prediction** ```{r} # compute RMSE and RMSPE for prediction performance beta <- getEstimator(fit2, estimator = "beta", Pmax = .5, beta.type = "conditional") (RMSE <- sqrt(sum((sim2$y - cbind(sim2$z, sim2$x) %*% beta)^2) / prod(dim(sim2$y)))) ``` ``` ## [1] 7.025327 ``` ```{r} (RMSPE <- sqrt(sum((sim2.val$y - cbind(sim2.val$z, sim2.val$x) %*% beta)^2) / prod(dim(sim2.val$y)))) ``` ``` ## [1] 8.084381 ``` Compute the model performance with respect to **coefficient bias** ```{r} # compute bias of beta estimates b <- sim2$b b[sim2$gamma == 0] <- 0 (beta.l2 <- sqrt(sum((beta[-c(1:4), ] - b)^2) / prod(dim(b)))) ``` ``` ## [1] 0.4530592 ``` Compute the model performance with respect to **covariance selection** ```{r} g.re <- getEstimator(fit2, estimator = "Gy") (g.accuracy <- sum((g.re > 0.5) == sim2$Gy) / prod(dim(g.re))) ``` ``` ## [1] 0.545 ``` ```{r} (g.sensitivity <- sum(((g.re > 0.5) == sim2$Gy)[sim2$Gy == 1]) / sum(sim2$Gy == 1)) ``` ``` ## [1] 0.1287129 ``` ```{r} (g.specificity <- sum(((g.re > 0.5) == sim2$Gy)[sim2$Gy == 0]) / sum(sim2$Gy == 0)) ``` ``` ## [1] 0.969697 ``` ## Referrence > Zhi Zhao, Marco Banterle, Alex Lewin, Manuela Zucknick (2023). > Multivariate Bayesian structured variable selection for pharmacogenomic studies. > _Journal of the Royal Statistical Society: Series C (Applied Statistics)_, qlad102. DOI: [10.1093/jrsssc/qlad102](https://doi.org/10.1093/jrsssc/qlad102).
/scratch/gouwar.j/cran-all/cranData/BayesSUR/vignettes/BayesSUR-RE.Rmd
# Cap 3.2 #' calculates the C factor #' #'@inheritParams BLE_Reg C <- function(ys,xs,R,Vs) { c1 <- ginv(R) c2 <- t(xs)%*%ginv(Vs)%*%xs C_minus <- c1 + c2 C_result <- ginv(C_minus) return(C_result) } #' calculates the BLE for Beta #' #'@inheritParams BLE_Reg E_beta <- function(ys,xs,a,R,Vs) { c_beta <- C(ys,xs,R,Vs) p1 <- t(xs)%*%ginv(Vs)%*%ys p2 <- ginv(R)%*%a res <- c_beta%*%(p1 + p2) return(res) } #' calculates the risk matrix associated with the BLE for Beta #' #'@inheritParams BLE_Reg V_beta <- function(ys,xs,R,Vs){ v_beta <- C(ys,xs,R,Vs) return(v_beta) } #'calculates the BLE for the individuals not in the sample #'@inheritParams BLE_Reg E_theta_Reg <- function(ys,xs,a,R,Vs,x_nots) { c_theta <- C(ys,xs,R,Vs) p1 <- t(xs)%*%ginv(Vs)%*%ys p2 <- ginv(R)%*%a res <- x_nots%*%c_theta%*%(p1 + p2) return(res) } #'calculates the risk matrix associated with the BLE for the individuals not in the sample #'@inheritParams BLE_Reg V_theta_Reg <- function(ys,xs,R,Vs,x_nots,V_nots) { c_theta <- C(ys,xs,R,Vs) p1 <- x_nots%*%c_theta%*%t(x_nots) p2 <- V_nots res <- p1 + p2 return(res) } #'calculates BLE for the total T #'@inheritParams BLE_Reg #' T_Reg <- function(ys,xs,a,R,Vs,x_nots) { one_s <- create1(ys) parc1 <- t(one_s)%*%ys esp <- E_theta_Reg(ys,xs,a,R,Vs,x_nots) one_nots <- create1(esp) parc2 <- t(one_nots)%*%esp return(parc1 + parc2) } #'calculates risk matrix associated with the BLE for for the total T #'@inheritParams BLE_Reg #' VT_Reg <- function(ys,xs,a,R,Vs,x_nots,V_nots) { v_theta <- V_theta_Reg(ys,xs,R,Vs,x_nots,V_nots) esp <- E_theta_Reg(ys,xs,a,R,Vs,x_nots) one_nots <- create1(esp) res <- t(one_nots)%*%v_theta%*%one_nots return(res) } #' General BLE case #' #' Calculates the Bayes Linear Estimator for Regression models (general case) #'@param ys response variable of the sample #'@param xs explicative variable of the sample #'@param a vector of means from Beta #'@param R covariance matrix of Beta #'@param Vs covariance of sample errors #'@param x_nots values of X for the individuals not in the sample #'@param V_nots covariance matrix of the individuals not in the sample #' #' @return A list containing the following components: \itemize{ #' \item \code{est.beta} - BLE of Beta #' \item \code{Vest.beta} - Variance associated with the above #' \item \code{est.mean} - BLE of each individual not in the sample #' \item \code{Vest.mean} - Covariance matrix associated with the above #' \item \code{est.tot} - BLE for the total #' \item \code{Vest.tot} - Variance associated with the above #' } #' #' @source \url{https://www150.statcan.gc.ca/n1/en/catalogue/12-001-X201400111886} #' @references Gonçalves, K.C.M, Moura, F.A.S and Migon, H.S.(2014). Bayes Linear Estimation for Finite Population with emphasis on categorical data. Survey Methodology, 40, 15-28. #' #' @examples #' xs <- matrix(c(1,1,1,1,2,3,5,0),nrow=4,ncol=2) #' ys <- c(12,17,28,2) #' x_nots <- matrix(c(1,1,1,0,1,4),nrow=3,ncol=2) #' a <- c(1.5,6) #' R <- matrix(c(10,2,2,10),nrow=2,ncol=2) #' Vs <- diag(c(1,1,1,1)) #' V_nots <- diag(c(1,1,1)) #' #' Estimator <- BLE_Reg(ys, xs, a, R, Vs, x_nots, V_nots) #' Estimator #' #' @export BLE_Reg <- function(ys,xs,a,R,Vs,x_nots,V_nots){ beta <- as.data.frame(E_beta(ys,xs,a,R,Vs)) colnames(beta) = c("Beta") var_beta <- as.data.frame(V_beta(ys,xs,R,Vs)) y_nots <- as.data.frame(E_theta_Reg(ys,xs,a,R,Vs,x_nots)) colnames(y_nots) = c("y_nots") var_y_nots <- as.data.frame(V_theta_Reg(ys,xs,R,Vs,x_nots,V_nots)) total <- T_Reg(ys,xs,a,R,Vs,x_nots)[,] var_total <- VT_Reg(ys,xs,a,R,Vs,x_nots,V_nots)[,] return(list(est.beta = beta, Vest.beta = var_beta,est.mean = y_nots, Vest.mean = var_y_nots, est.tot = total, Vest.tot = var_total)) }
/scratch/gouwar.j/cran-all/cranData/BayesSampling/R/BLE_Reg.R
#' Full Person-level Population Database #' #' This data set corresponds to some socioeconomic variables from 150266 people of a city in a particular year. #' #' @docType data #' #' @usage data(BigCity) #' #' @format A data.frame with 150266 rows and 12 variables: #' \describe{ #' \item{HHID}{The identifier of the household. It corresponds to an alphanumeric sequence (four letters and five digits).} #' \item{PersonID}{The identifier of the person within the household. NOTE it is not a unique identifier of a person for the whole population. It corresponds to an alphanumeric sequence (five letters and two digits).} #' \item{Stratum}{Households are located in geographic strata. There are 119 strata across the city.} #' \item{PSU}{Households are clustered in cartographic segments defined as primary sampling units (PSU). There are 1664 PSU and they are nested within strata.} #' \item{Zone}{Segments clustered within strata can be located within urban or rural areas along the city.} #' \item{Sex}{Sex of the person.} #' \item{Income}{Per capita monthly income.} #' \item{Expenditure}{Per capita monthly expenditure.} #' \item{Employment}{A person's employment status.} #' \item{Poverty}{This variable indicates whether the person is poor or not. It depends on income.} #' } #' #' @references Package ‘TeachingSampling’; see \code{\link[TeachingSampling]{BigCity}} #' #' @source \url{https://CRAN.R-project.org/package=TeachingSampling} #' "BigCity"
/scratch/gouwar.j/cran-all/cranData/BayesSampling/R/Data.R
#' Bayes Linear Method for Categorical Data #' #' Creates the Bayes Linear Estimator for Categorical Data #' #' @param ys k-vector of sample proportion for each category. #' @param n sample size. #' @param N total size of the population. #' @param m k-vector with the prior proportion of each strata. If \code{NULL}, sample proportion for each strata will be used (non-informative prior). #' @param rho matrix with the prior correlation coefficients between two different units within categories. It must be a symmetric square matrix of dimension k (or k-1). If \code{NULL}, non-informative prior will be used. #' #' @return A list containing the following components: \itemize{ #' \item \code{est.prop} - BLE for the sample proportion of each category #' \item \code{Vest.prop} - Variance associated with the above #' \item \code{Vs.Matrix} - Vs matrix, as defined by the BLE method (should be a positive-definite matrix) #' \item \code{R.Matrix} - R matrix, as defined by the BLE method (should be a positive-definite matrix) #' } #' #' @source \url{https://www150.statcan.gc.ca/n1/en/catalogue/12-001-X201400111886} #' @references Gonçalves, K.C.M, Moura, F.A.S and Migon, H.S.(2014). Bayes Linear Estimation for Finite Population with emphasis on categorical data. Survey Methodology, 40, 15-28. #' #' @examples #' # 2 categories #' ys <- c(0.2614, 0.7386) #' n <- 153 #' N <- 15288 #' m <- c(0.7, 0.3) #' rho <- matrix(0.1, 1) #' #' Estimator <- BLE_Categorical(ys,n,N,m,rho) #' Estimator #' #' ys <- c(0.2614, 0.7386) #' n <- 153 #' N <- 15288 #' m <- c(0.7, 0.3) #' rho <- matrix(0.5, 1) #' #' Estimator <- BLE_Categorical(ys,n,N,m,rho) #' Estimator #' #' #' # 3 categories #' ys <- c(0.2, 0.5, 0.3) #' n <- 100 #' N <- 10000 #' m <- c(0.4, 0.1, 0.5) #' mat <- c(0.4, 0.1, 0.1, 0.1, 0.2, 0.1, 0.1, 0.1, 0.6) #' rho <- matrix(mat, 3, 3) #' #' @export BLE_Categorical <- function(ys, n, N, m=NULL, rho=NULL){ mes_1 <- "parameter 'm' (prior proportions) not informed, sample proportions used in estimations" mes_2 <- "parameter 'rho' not informed, non informative prior correlation coefficients used in estimations" k <- length(ys) if( k == 1 ){stop("only 1 category defined")} if( prod(ys >= 0) != 1 ){stop("all sample proportions must be non-negative numbers")} if( sum(ys) != 1 ){stop("sum of sample proportions should be 1")} if( is.null(m) ){ message(mes_1) m <- ys } if( length(m) != length(ys) ){stop("length of parameters 'ys' and 'm' must coincide")} if( prod(m > 0) != 1 ){stop("all prior proportions must be positive numbers")} if( sum(m) != 1 ){stop("sum of prior proportions should be 1")} rho_informed <- 1 if( is.null(rho) ){ rho_informed <- 0 message(mes_2) rho <- diag(x = 1-1e-10, nrow=k) } if( ! is.symmetric.matrix(rho) | dim(rho)[1] < k-1 ){stop("'rho' must be a symmetric square matrix of dimension k")} if( max(abs(rho)) >= 1 ){stop("all values in 'rho' must be between -1 and 1")} ys <- ys[-k] m <- m[-k] # Matrix 'm_ij' m_ij <- matrix(0, nrow = k-1, ncol = k-1) for (i in 1:(k-1)) { for (j in 1:(k-1)) { m_ij[i,j] <- (m[i]*m[j] + rho[j,i]*sqrt( m[i]*(1-m[i])*m[j]*(1-m[j]) ))/m[j] } } a <- m v <- m*(1-m) c <- m * (diag(m_ij) - m) sigma <- sqrt(v - c) xs <- diag(k-1) x_nots <- diag(k-1) # Matrix 'R' R_d <- diag(c, nrow = k-1) # elements in the diagonal R_out <- matrix(0, nrow = k-1, ncol = k-1) for (i in 1:(k-1)) { for (j in 1:(k-1)) { R_out[i,j] <- m[i] * (m_ij[j,i] - m[j]) # elements outside the diagonal } } R_out <- R_out * (1 - diag(nrow = k-1)) R <- R_d + R_out if( rho_informed == 1 ){ if( ! is.symmetric.matrix(R) ){stop("'R' must be a symmetric matrix. Review parameter 'rho'")} if( ! is.positive.definite(R, tol=1e-15) ){warning("'R' should be a positive-definite matrix. Possible problem with parameter 'rho'")} } # Matrix 'Vs' Vs_d <- diag(sigma^2, nrow = k-1) # elements in the diagonal Vs_out <- matrix(0, nrow = k-1, ncol = k-1) for (i in 1:(k-1)) { for (j in 1:(k-1)) { Vs_out[i,j] <- (-m[i])*m_ij[j,i] # elements outside the diagonal } } Vs_out <- Vs_out * (1 - diag(nrow = k-1)) Vs <- (1/n)*(Vs_d + Vs_out) if( rho_informed == 1 ){ if( ! is.symmetric.matrix(Vs) ){stop("'Vs' must be a symmetric matrix. Review parameter 'rho'")} if( ! is.positive.definite(Vs, tol=1e-15) ){warning("'Vs' should be a positive-definite matrix. Possible problem with parameter 'rho'")} } V_nots <- Vs*n/(N-n) C_inv <- ginv(R) + ginv(Vs) C <- ginv(C_inv) Beta <- C%*%(ginv(Vs)%*%ys + ginv(R)%*%a) p_aux <- (n*ys + (N-n)*Beta)/N p_k <- 1 - sum(p_aux) p <- c(p_aux,p_k) V_aux <- (V_nots + C) * ((N-n)/N)^2 V_k <- sum(V_aux) Cov_k <- c for (i in 1:k-1) { Cov_k[i] <- -sum(V_aux[i,]) } V_p <- rbind(cbind(V_aux, Cov_k[]), c(Cov_k, V_k)) if( prod(diag(V_p) > 0) != 1 ){warning("'Vest.prop' should have only positive diagonal values. Review prior specification and verify calculated matrices 'R' and 'Vs'.")} return(list(est.prop = p, Vest.prop = V_p, Vs.Matrix = Vs, R.Matrix = R)) }
/scratch/gouwar.j/cran-all/cranData/BayesSampling/R/categorical.R
#' creates vector of 1's to be used in the estimators #' @param y sample matrix #' @return vector of 1's with size equal to the number of observations in the sample #' create1 <- function(y){ if(is.vector(y)){ vect1 <- rep(1,length(y)) } else if(is.list(y)){ vect1 <- rep(1,dim(y)[1]) } else if(is.matrix(y)){ vect1 <- rep(1,length(y[,1])) } else { stop("incorrect number of dimensions") } return(vect1) }
/scratch/gouwar.j/cran-all/cranData/BayesSampling/R/create1.R
ginv <- MASS::ginv bdiag <- Matrix::bdiag #Diagonal <- Matrix::Diagonal var <- stats::var sample <- base::sample colnames <- base::colnames is.symmetric.matrix <- matrixcalc::is.symmetric.matrix is.positive.definite <- matrixcalc::is.positive.definite
/scratch/gouwar.j/cran-all/cranData/BayesSampling/R/imports.R
#' Simple Random Sample BLE #' #' Creates the Bayes Linear Estimator for the Simple Random Sampling design (without replacement) #' #' @param ys vector of sample observations or sample mean (\code{sigma} and \code{n} parameters will be required in this case). #' @param N total size of the population. #' @param m prior mean. If \code{NULL}, sample mean will be used (non-informative prior). #' @param v prior variance of an element from the population (bigger than \code{sigma^2}). If \code{NULL}, it will tend to infinity (non-informative prior). #' @param sigma prior estimate of variability (standard deviation) within the population. If \code{NULL}, sample variance will be used. #' @param n sample size. Necessary only if \code{ys} represent sample mean (will not be used otherwise). #' #' @return A list containing the following components: \itemize{ #' \item \code{est.beta} - BLE of Beta (BLE for every individual) #' \item \code{Vest.beta} - Variance associated with the above #' \item \code{est.mean} - BLE for each individual not in the sample #' \item \code{Vest.mean} - Covariance matrix associated with the above #' \item \code{est.tot} - BLE for the total #' \item \code{Vest.tot} - Variance associated with the above #' } #' #' @source \url{https://www150.statcan.gc.ca/n1/en/catalogue/12-001-X201400111886} #' @references Gonçalves, K.C.M, Moura, F.A.S and Migon, H.S.(2014). Bayes Linear Estimation for Finite Population with emphasis on categorical data. Survey Methodology, 40, 15-28. #' #' @examples #' ys <- c(5,6,8) #' N <- 5 #' m <- 6 #' v <- 5 #' sigma <- 1 #' #' Estimator <- BLE_SRS(ys, N, m, v, sigma) #' Estimator #' #' #' # Same example but informing sample mean and sample size instead of sample observations #' ys <- mean(c(5,6,8)) #' N <- 5 #' n <- 3 #' m <- 6 #' v <- 5 #' sigma <- 1 #' #' Estimator <- BLE_SRS(ys, N, m, v, sigma, n) #' Estimator #' #' @export BLE_SRS <- function(ys, N, m=NULL, v=NULL, sigma=NULL, n=NULL){ mes_1 <- "parameter 'm' (prior mean) not informed, sample mean used in estimations" mes_2 <- "parameter 'sigma' (prior variability) not informed, sample variance used in estimations" mes_3 <- "parameter 'v' (prior variance of an element) not informed, (10^100 * mean(ys)) used in estimations (non-informative prior)" mes_4 <- "sample mean informed instead of sample observations, parameters 'n' and 'sigma' will be necessary" if(length(ys)==1){ message(mes_4) if( (is.null(sigma)) | is.null(n) ){ stop("ys of length 1 requires not null parameters 'sigma' and 'n'") } ys <- rep(ys, n) } if (is.null(m)){ message(mes_1) m <- mean(ys) } if(is.null(sigma)){ message(mes_2) sigma <- sqrt(var(ys)) } if(is.null(v)){ message(mes_3) v <- 10^100 * mean(ys)} if(v < sigma^2){ stop("prior variance (parameter 'v') too small") } xs <- create1(ys) a <- m Vs <- diag(xs)*(sigma^2) c <- v - sigma^2 R <- c x_nots <- rep(1,N - length(ys)) V_nots <- diag(x_nots)*(sigma^2) return(BLE_Reg(ys,xs,a,R,Vs,x_nots,V_nots)) } #' Stratified Simple Random Sample BLE #' #' Creates the Bayes Linear Estimator for the Stratified Simple Random Sampling design (without replacement) #' @param ys vector of sample observations or sample mean for each strata (\code{sigma} parameter will be required in this case). #' @param h vector with number of observations in each strata. #' @param N vector with the total size of each strata. #' @param m vector with the prior mean of each strata. If \code{NULL}, sample mean for each strata will be used (non-informative prior). #' @param v vector with the prior variance of an element from each strata (bigger than \code{sigma^2} for each strata). If \code{NULL}, it will tend to infinity (non-informative prior). #' @param sigma vector with the prior estimate of variability (standard deviation) within each strata of the population. If \code{NULL}, sample variance of each strata will be used. #' #' @return A list containing the following components: \itemize{ #' \item \code{est.beta} - BLE of Beta (BLE for the individuals in each strata) #' \item \code{Vest.beta} - Variance associated with the above #' \item \code{est.mean} - BLE for each individual not in the sample #' \item \code{Vest.mean} - Covariance matrix associated with the above #' \item \code{est.tot} - BLE for the total #' \item \code{Vest.tot} - Variance associated with the above #' } #' #' @source \url{https://www150.statcan.gc.ca/n1/en/catalogue/12-001-X201400111886} #' @references Gonçalves, K.C.M, Moura, F.A.S and Migon, H.S.(2014). Bayes Linear Estimation for Finite Population with emphasis on categorical data. Survey Methodology, 40, 15-28. #' #' @examples #' ys <- c(2,-1,1.5, 6,10, 8,8) #' h <- c(3,2,2) #' N <- c(5,5,3) #' m <- c(0,9,8) #' v <- c(3,8,1) #' sigma <- c(1,2,0.5) #' #' Estimator <- BLE_SSRS(ys, h, N, m, v, sigma) #' Estimator #' #' #' # Same example but informing sample means instead of sample observations #' y1 <- mean(c(2,-1,1.5)) #' y2 <- mean(c(6,10)) #' y3 <- mean(c(8,8)) #' ys <- c(y1, y2, y3) #' h <- c(3,2,2) #' N <- c(5,5,3) #' m <- c(0,9,8) #' v <- c(3,8,1) #' sigma <- c(1,2,0.5) #' #' Estimator <- BLE_SSRS(ys, h, N, m, v, sigma) #' Estimator #' #' @export BLE_SSRS <- function(ys, h, N, m=NULL, v=NULL, sigma=NULL){ mes_1 <- "parameter 'm' (prior mean) not informed, sample mean used in estimations" mes_2 <- "parameter 'sigma' (prior variability) not informed, sample variance used in estimations" mes_3 <- "parameter 'v' (prior variance of an element) not informed, (10^100 * mean(ys)) used in estimations (non-informative prior)" mes_4 <- "sample means informed instead of sample observations, parameter 'sigma' will be necessary" H <- length(h) if(H == 1){stop("only 1 strata defined, try using the BLE_SRS() function")} if(length(ys)!=sum(h)){ if(length(ys)!=length(h)){ stop("length of 'ys' incompatable with 'h'") } else{ # length(ys)==length(h) message(mes_4) if(is.null(sigma)){ stop("not null parameter 'sigma' required") } ys <- rep(ys, h) } } marker <- c(1) # marks where the observations of each strata begin for(i in 2:H){ marker <- c(marker, marker[i-1] + h[i-1]) } if (is.null(m)){ message(mes_1) m <- c(mean(ys[1:h[1]])) for(i in 2:H-1){ M <- mean(ys[marker[i]:(marker[i+1] - 1)]) m <- c(m, M) } M <- mean(ys[marker[H] : length(ys)]) m <- c(m, M) } if(is.null(sigma)){ message(mes_2) s <- c(var(ys[1:h[1]])) for(i in 2:H-1){ S <- var(ys[marker[i]:(marker[i+1] - 1)]) s <- c(s, S) } S <- var(ys[marker[H] : length(ys)]) s <- c(s, S) sigma <- sqrt(s) } if(is.null(v)){ message(mes_3) v <- c() for(i in 1:H){ V <- 10^100 * m[i] v <- c(v, V) } } for (i in 1:H) { if(v[i] < sigma[i]^2){ stop("prior variance (parameter 'v') too small") } } aux <- rep(1, h[1]) for(i in 2:H){ zero <- rep(0, sum(h)) one <- rep(1, h[i]) aux <- c(aux, zero, one) } xs <- matrix(aux,nrow = sum(h),ncol=H) out <- N-h aux_out <- rep(1, out[1]) for(i in 2:H){ zero <- rep(0, sum(out)) one <- rep(1, out[i]) aux_out <- c(aux_out, zero, one) } x_nots <- matrix(aux_out,nrow = sum(out),ncol=H) Vs <- diag(h[1])*(sigma[1])^2 for (i in 2:H) { V <- diag(h[i])*(sigma[i])^2 Vs <- bdiag(Vs,V) } Vs <- as.matrix(Vs) k <- N[1] - h[1] V_nots <- diag(k)*(sigma[1])^2 for (i in 2:H) { k <- N[i] - h[i] V <- diag(k)*(sigma[i])^2 V_nots <- bdiag(V_nots,V) } V_nots <- as.matrix(V_nots) a <- m c <- v - sigma^2 R <- c*diag(H) return(BLE_Reg(ys,xs,a,R,Vs,x_nots,V_nots)) } #' Ratio BLE #' #' Creates the Bayes Linear Estimator for the Ratio "estimator" #' #' @param ys vector of sample observations or sample mean (\code{sigma} and \code{n} parameters will be required in this case). #' @param xs vector with values for the auxiliary variable of the elements in the sample or sample mean. #' @param x_nots vector with values for the auxiliary variable of the elements not in the sample. #' @param m prior mean for the ratio between Y and X. If \code{NULL}, \code{mean(ys)/mean(xs)} will be used (non-informative prior). #' @param v prior variance of the ratio between Y and X (bigger than \code{sigma^2}). If \code{NULL}, it will tend to infinity (non-informative prior). #' @param sigma prior estimate of variability (standard deviation) of the ratio within the population. If \code{NULL}, sample variance of the ratio will be used. #' @param n sample size. Necessary only if \code{ys} and \code{xs} represent sample means (will not be used otherwise). #' #' @return A list containing the following components: \itemize{ #' \item \code{est.beta} - BLE of Beta #' \item \code{Vest.beta} - Variance associated with the above #' \item \code{est.mean} - BLE for each individual not in the sample #' \item \code{Vest.mean} - Covariance matrix associated with the above #' \item \code{est.tot} - BLE for the total #' \item \code{Vest.tot} - Variance associated with the above #' } #' #' @source \url{https://www150.statcan.gc.ca/n1/en/catalogue/12-001-X201400111886} #' @references Gonçalves, K.C.M, Moura, F.A.S and Migon, H.S.(2014). Bayes Linear Estimation for Finite Population with emphasis on categorical data. Survey Methodology, 40, 15-28. #' #' @examples #' ys <- c(10,8,6) #' xs <- c(5,4,3.1) #' x_nots <- c(1,20,13,15,-5) #' m <- 2.5 #' v <- 10 #' sigma <- 2 #' #' Estimator <- BLE_Ratio(ys, xs, x_nots, m, v, sigma) #' Estimator #' #' #' # Same example but informing sample means and sample size instead of sample observations #' ys <- mean(c(10,8,6)) #' xs <- mean(c(5,4,3.1)) #' n <- 3 #' x_nots <- c(1,20,13,15,-5) #' m <- 2.5 #' v <- 10 #' sigma <- 2 #' #' Estimator <- BLE_Ratio(ys, xs, x_nots, m, v, sigma, n) #' Estimator #' #' @export BLE_Ratio <- function(ys, xs, x_nots, m=NULL, v=NULL, sigma=NULL, n=NULL){ mes_1 <- "parameter 'm' (prior mean) not informed, sample mean used in estimations" mes_2 <- "parameter 'sigma' (prior variability) not informed, sample variance used in estimations" mes_3 <- "parameter 'v' (prior variance of an element) not informed, (10^100 * mean(ys)) used in estimations (non-informative prior)" mes_4 <- "sample means informed instead of sample observations, parameters 'n' and 'sigma' will be necessary" if(length(ys) != length(xs)){ stop("dimensions of ys and xs are different") } if(length(ys)==1){ message(mes_4) if( (is.null(sigma)) | is.null(n) ){ stop("ys of length 1 requires not null parameters 'sigma' and 'n'") } ys <- rep(ys, n) xs <- rep(xs, n) } z <- ys/xs if (is.null(m)){ message(mes_1) m <- mean(ys)/mean(xs) } if(is.null(sigma)){ message(mes_2) sigma <- sqrt(var(z)) } if(is.null(v)){ message(mes_3) v <- 10^100 * mean(ys)} if(v < sigma^2){ stop("prior variance (parameter 'v') too small") } Vs <- diag(xs)*(sigma^2) V_nots <- diag(x_nots)*(sigma^2) a <- m c <- v - sigma^2 R <- c return(BLE_Reg(ys,xs,a,R,Vs,x_nots,V_nots)) }
/scratch/gouwar.j/cran-all/cranData/BayesSampling/R/sample_designs.R
## ---- include = FALSE--------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----setup-------------------------------------------------------------------- library(BayesSampling) ## ----ex 1, message=TRUE, warning=TRUE----------------------------------------- ys <- c(0.2614, 0.7386) n <- 153 N <- 15288 m <- c(0.7, 0.3) rho <- matrix(0.1, 1) Estimator <- BLE_Categorical(ys,n,N,m,rho) Estimator$est.prop Estimator$Vest.prop ## ----ex 1.2, message=TRUE, warning=TRUE--------------------------------------- ys <- c(0.2614, 0.7386) n <- 153 N <- 15288 m <- c(0.7, 0.3) rho <- matrix(0.5, 1) Estimator <- BLE_Categorical(ys,n,N,m,rho) Estimator$est.prop Estimator$Vest.prop ## ----ex 2, message=TRUE, warning=TRUE----------------------------------------- ys <- c(0.2, 0.5, 0.3) n <- 100 N <- 10000 m <- c(0.4, 0.1, 0.5) mat <- c(0.4, 0.1, 0.1, 0.1, 0.2, 0.1, 0.1, 0.1, 0.6) rho <- matrix(mat, 3, 3) Estimator <- BLE_Categorical(ys,n,N,m,rho) Estimator$est.prop Estimator$Vest.prop ## ----ex 2.2, message=TRUE, warning=TRUE--------------------------------------- ys <- c(0.2, 0.5, 0.3) n <- 100 N <- 10000 m <- c(0.4, 0.1, 0.5) Estimator <- BLE_Categorical(ys,n,N,m,rho=NULL) Estimator$est.prop
/scratch/gouwar.j/cran-all/cranData/BayesSampling/inst/doc/BLE_Categorical.R
--- title: "BLE_Categorical" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{BLE_Categorical} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup} library(BayesSampling) ``` # Application of the BLE to categorical data ### (From Section 4 of the "[Gonçalves, Moura and Migon: Bayes linear estimation for finite population with emphasis on categorical data](https://www150.statcan.gc.ca/n1/en/catalogue/12-001-X201400111886)") In a situation where the population can be divided into different and exclusive categories, we can calculate the Bayes Linear Estimator for the proportion of individuals in each category with the _BLE_Categorical()_ function, which receives the following parameters: * $y_s$ - $k$-vector of sample proportion for each category; * $n$ - sample size; * $N$ - total size of the population; * $m$ - $k$-vector with the prior proportion of each category. If _NULL_, sample proportion for each category will be used (non-informative prior); * $rho$ - matrix with the prior correlation coefficients between two different units within categories. It must be a symmetric square matrix of dimension $k$ (or $k-1$). If _NULL_, non-informative prior will be used (see below). ### Vague Prior Distribution Letting $\rho_{ii} \to 1$, that is, assuming prior ignorance, the resulting point estimate will be the same as the one seen in the design-based context for categorical data.\ This can be achieved using the _BLE_Categorical()_ function by omitting either the prior proportions and/or the parameter _rho_, that is: * $m =$ _NULL_ - sample proportions in each category will be used * $rho =$ _NULL_ - $\rho_{ii} \to 1$ and $\rho_{ij} = 0, i \neq j$ ### _R_ and _Vs_ Matrices If the calculation of matrices _R_ and _Vs_ results in non-positive definite matrices, a warning will be displayed. In general this does not produce incorrect/ inconsistent results for the proportion estimate but for its associated variance. It is suggested to review the prior correlation coefficients (parameter _rho_). ### Examples 1. Example presented in the mentioned [article](https://www150.statcan.gc.ca/n1/en/catalogue/12-001-X201400111886) (2 categories) ```{r ex 1, message=TRUE, warning=TRUE} ys <- c(0.2614, 0.7386) n <- 153 N <- 15288 m <- c(0.7, 0.3) rho <- matrix(0.1, 1) Estimator <- BLE_Categorical(ys,n,N,m,rho) Estimator$est.prop Estimator$Vest.prop ``` Bellow we can see that the greater the correlation coefficient, the closer our estimation will get to the sample proportions. ```{r ex 1.2, message=TRUE, warning=TRUE} ys <- c(0.2614, 0.7386) n <- 153 N <- 15288 m <- c(0.7, 0.3) rho <- matrix(0.5, 1) Estimator <- BLE_Categorical(ys,n,N,m,rho) Estimator$est.prop Estimator$Vest.prop ``` 2. Example from the help page (3 categories) ```{r ex 2, message=TRUE, warning=TRUE} ys <- c(0.2, 0.5, 0.3) n <- 100 N <- 10000 m <- c(0.4, 0.1, 0.5) mat <- c(0.4, 0.1, 0.1, 0.1, 0.2, 0.1, 0.1, 0.1, 0.6) rho <- matrix(mat, 3, 3) Estimator <- BLE_Categorical(ys,n,N,m,rho) Estimator$est.prop Estimator$Vest.prop ``` Same example, but with no prior correlation coefficients informed (non-informative prior) ```{r ex 2.2, message=TRUE, warning=TRUE} ys <- c(0.2, 0.5, 0.3) n <- 100 N <- 10000 m <- c(0.4, 0.1, 0.5) Estimator <- BLE_Categorical(ys,n,N,m,rho=NULL) Estimator$est.prop ```
/scratch/gouwar.j/cran-all/cranData/BayesSampling/inst/doc/BLE_Categorical.Rmd
## ---- include = FALSE--------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----setup-------------------------------------------------------------------- library(BayesSampling) ## ----ex 1, message=FALSE, warning=FALSE--------------------------------------- data(BigCity) end <- dim(BigCity)[1] s <- seq(from = 1, to = end, by = 1) set.seed(5) samp <- sample(s, size = 10000, replace = FALSE) ordered_samp <- sort(samp) BigCity_red <- BigCity[ordered_samp,] Expend <- BigCity_red$Expenditure Income <- BigCity_red$Income sampl <- sample(seq(1,10000),size=10) ys <- Expend[sampl] xs <- Income[sampl] ## ----ex 1.1------------------------------------------------------------------- mean(Expend/Income) ## ----ex 1.2------------------------------------------------------------------- mean(ys)/mean(xs) ## ----ex 1.3------------------------------------------------------------------- x_nots <- BigCity_red$Income[-sampl] Estimator <- BLE_Ratio(ys, xs, x_nots, m = 0.85, v = 0.24, sigma = sqrt(0.23998)) Estimator$est.beta Estimator$Vest.beta Estimator$est.mean[1:4,] Estimator$Vest.mean[1:5,1:5] Estimator$est.tot ## ----ex 2--------------------------------------------------------------------- ys <- c(10,8,6) xs <- c(5,4,3.1) x_nots <- c(1,20,13,15,-5) m <- 2.5 v <- 10 sigma <- 2 Estimator <- BLE_Ratio(ys, xs, x_nots, m, v, sigma) Estimator ## ----ex 3--------------------------------------------------------------------- ys <- mean(c(10,8,6)) xs <- mean(c(5,4,3.1)) n <- 3 x_nots <- c(1,20,13,15,-5) m <- 2.5 v <- 10 sigma <- 2 Estimator <- BLE_Ratio(ys, xs, x_nots, m, v, sigma, n) Estimator
/scratch/gouwar.j/cran-all/cranData/BayesSampling/inst/doc/BLE_Ratio.R
--- title: "BLE_Ratio" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{BLE_Ratio} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup} library(BayesSampling) ``` # Application of the BLE to the Ratio estimator ### (From Section 3 of the "[Gonçalves, Moura and Migon: Bayes linear estimation for finite population with emphasis on categorical data](https://www150.statcan.gc.ca/n1/en/catalogue/12-001-X201400111886)") In many practical situations, it is possible to have information about an auxiliary variate $x_i$ (correlated with $y_i$) for all the population units, or at least for each unit in the sample, plus the population mean, $\bar X$. In practice, $x_i$ is often the value of $y_i$ at some previous time when a complete census was taken. This approach is used in situations where __the expected value and the variance of $y_i$ is proportional to $x_i$__, so in the BLE setup, we replace some hypotheses about the $y$'s with ones about the first two moments of the rate $y_i$/$x_i$. To the best of our knowledge, the new ratio estimator proposed below is a novel contribution in sampling survey theory. The new ratio estimator is obtained as a particular case of model (2.4) and with the hypothesis of exchangeability, used in Bayes linear approach, applied to the rate $y_i$/$x_i$ for all $i = 1,..., N$ as described below: \begin{equation} \tag{3.1} E \left( \frac{y_i}{x_i} \right) = m, \hspace{0.7cm} V \left( \frac{y_i}{x_i} \right) = v \hspace{0.7cm} \text{and} \hspace{0.7cm} Cov \left( \frac{y_i}{x_i},\frac{y_j}{x_j} \right) = c, \hspace{0.5cm} i,j = 1,...,N \hspace{0.5cm} \forall i \neq j \end{equation} \par such that: $\sigma^2 = v - c$ ### Application We can apply this with the _BLE_Ratio()_ function, which receives the following parameters: * $y_s$ - either a vector containing the observed values or just the value for the sample mean ($\sigma$ and $n$ parameters will be required in this case); * $x_s$ - either a vector containing the values for the auxiliary variable of the elements in the sample or just the value for the sample mean; * $x_{\bar{s}}$ - a vector containing the values for the auxiliary variable of the elements not in the sample; * $m$ - prior mean for the ratio between $Y$ and $X$. If _NULL_, $\bar{y_s}$/$\bar{x_s}$ will be used (non-informative prior); * $v$ - prior variance of the ratio between $Y$ and $X$ ($> \sigma^2$). If _NULL_, it will tend to infinity (non-informative prior); * $\sigma$ - prior estimate of variability (standard deviation) of the ratio within the population. If _NULL_, sample variance of the ratio will be used; * $n$ - sample size. Necessary only if $y_s$ and $x_s$ represent sample means (will not be used otherwise). ### Vague Prior Distribution Letting $v \to \infty$ and $v \to \infty$, but keeping $\sigma^2$ fixed, that is, assuming prior ignorance, we recover the ratio type estimator, found in the design-based approach: $\hat{T}_{ra} = N \bar{X} (\bar{y}_s / \bar{x}_s)$.\ This can be achieved using the _BLE_SRS()_ function by omitting either the prior mean or the prior variance, that is: * $m =$ _NULL_ - the ratio between sample means will be used as prior mean * $v =$ _NULL_ - prior variance will tend to infinity ### Examples 1. We will use the TeachingSampling's BigCity dataset for this example (actually we have to take a sample of size $10000$ from this dataset so that R can perform the calculations). Imagine that we want to estimate the mean or the total Expenditure of this population, using the Income as an auxiliary variable (suppose that we know its value for every individual, maybe from a census). After taking a simple random sample of 10 individuals, we want to estimate the expenditure/income ratio and the total expenditure, conjugating the sample information with an expert's expectation (a priori mean for the ratio will be $0.85$, that is, people from this city expend 85% of their income). ```{r ex 1, message=FALSE, warning=FALSE} data(BigCity) end <- dim(BigCity)[1] s <- seq(from = 1, to = end, by = 1) set.seed(5) samp <- sample(s, size = 10000, replace = FALSE) ordered_samp <- sort(samp) BigCity_red <- BigCity[ordered_samp,] Expend <- BigCity_red$Expenditure Income <- BigCity_red$Income sampl <- sample(seq(1,10000),size=10) ys <- Expend[sampl] xs <- Income[sampl] ``` The real ratio between expenditure and income will be the value we want to estimate. In this example we know its real value: ```{r ex 1.1} mean(Expend/Income) ``` Our design-based estimator for the mean would be the ratio between sample means: ```{r ex 1.2} mean(ys)/mean(xs) ``` Applying the prior information about the ratio we can get a better estimate, especially in cases when only a small sample is available: ```{r ex 1.3} x_nots <- BigCity_red$Income[-sampl] Estimator <- BLE_Ratio(ys, xs, x_nots, m = 0.85, v = 0.24, sigma = sqrt(0.23998)) Estimator$est.beta Estimator$Vest.beta Estimator$est.mean[1:4,] Estimator$Vest.mean[1:5,1:5] Estimator$est.tot ``` 2. Example from the help page ```{r ex 2} ys <- c(10,8,6) xs <- c(5,4,3.1) x_nots <- c(1,20,13,15,-5) m <- 2.5 v <- 10 sigma <- 2 Estimator <- BLE_Ratio(ys, xs, x_nots, m, v, sigma) Estimator ``` 3. Example from the help page, but informing sample means and sample size instead of sample observations ```{r ex 3} ys <- mean(c(10,8,6)) xs <- mean(c(5,4,3.1)) n <- 3 x_nots <- c(1,20,13,15,-5) m <- 2.5 v <- 10 sigma <- 2 Estimator <- BLE_Ratio(ys, xs, x_nots, m, v, sigma, n) Estimator ```
/scratch/gouwar.j/cran-all/cranData/BayesSampling/inst/doc/BLE_Ratio.Rmd
## ---- include = FALSE--------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----setup-------------------------------------------------------------------- library(BayesSampling) ## ----ex 1--------------------------------------------------------------------- xs <- matrix(c(1,1,1,1,2,3,5,0),nrow=4,ncol=2) ys <- c(12,17,28,2) x_nots <- matrix(c(1,1,1,0,1,4),nrow=3,ncol=2) a <- c(1.5,6) R <- matrix(c(10,2,2,10),nrow=2,ncol=2) Vs <- diag(c(1,1,1,1)) V_nots <- diag(c(1,1,1)) Estimator <- BLE_Reg(ys,xs,a,R,Vs,x_nots,V_nots) Estimator
/scratch/gouwar.j/cran-all/cranData/BayesSampling/inst/doc/BLE_Reg.R
--- title: "BLE_Reg" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{BLE_Reg} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup} library(BayesSampling) ``` # Bayes linear approach to finite population ### (From Section 2.2 of the "[Gonçalves, Moura and Migon: Bayes linear estimation for finite population with emphasis on categorical data](https://www150.statcan.gc.ca/n1/en/catalogue/12-001-X201400111886)") Consider $U = \{u_1, ..., u_N \}$ a finite population with $N$ units. Let $y = (y_1,..., y_N)'$ be the vector with the values of interest of the units in $U$. The response vector $y$ is partitioned into the known observed $n$ - sample vector $y_s$, and the non-observed vector $y_{\bar{s}}$ of dimension $N - n$. The general problem is to predict a function of the vector $y$, such as the total $T = \sum_{i=1}^{N}y_i = 1^{'}_{s} y_s + 1^{'}_{\bar{s}} y_{\bar{s}}$, where $1_s$ and $1_{\bar{s}}$ are the vectors of 1's of dimensions $n$ and $N - n$, respectively. In the model-based approach, this is usually done by assuming a parametric model for the population values $y_i$’s and then obtaining the Empirical Best Linear Unbiased Predictor (EBLUP) for the unknown vector $y_s$ under this model. Usually, the mean square error of the EBLUP of T is obtained by second order approximation, as well as an unbiased estimator of it. See Valliant, Dorfman and Royall (2000), chapter 2, for details. \par The Bayesian approach to finite population prediction often assumes a parametric model, but it aims to find the posterior distribution of T given $y_s$. Point estimates can be obtained by setting a loss function, although in many practical problems, the posterior mean is often considered and its associated variance is given by the posterior variance, i.e.: \begin{equation} \tag{2.3} E(T | y_s) = 1^{'}_s y_s + 1^{'}_{\bar{s}} E(y_{\bar{s}} | y_s) \hspace{0.7cm} \text{and} \hspace{0.7cm} V(T | y_s) = 1^{'}_{\bar{s}} V(y_{\bar{s}} | y_s) 1_{\bar{s}} \end{equation} It is possible to obtain an approximation to the quantities in (2.3) by using a Bayes linear estimation approach. Here, we will particularly obtain the estimators by assuming a __general two-stage hierarchical model for finite population, specified only by its mean and variance-covariance matrix__, presented in Bolfarine and Zacks (1992), page 76. Particular cases describing usual population structures found in practice are easily derived from (2.4). The general model can be written as: \begin{equation} \tag{2.4} Y \hspace{0.1cm} | \hspace{0.1cm} \beta \sim [X \beta, V] \hspace{0.7cm} \text{and} \hspace{0.7cm} \beta \sim [a,R] \end{equation} where $X$ is a covariate matrix of dimension $N \times p$, with rows $X_i = (x_{i1}, ..., x_{ip})$, $i = 1, ..., N$; $\beta = (\beta_1, ..., \beta_p)'$ is a $p \times 1$ vector of unknown parameters; and $y$, given $\beta$, is a random vector with mean $X\beta$ and known covariance matrix $V$ of dimension $N \times N$. Analogously $a$ and $R$ are the respective $p \times 1$ prior mean vector and $p \times p$ prior covariance matrix of $\beta$. Since the response vector $y$ is partitioned into $y_s$ and $y_\bar{s}$, the matrix $X$, which is assumed to be known, is analogously partitioned into $X_s$ and $X_\bar{s}$, and $V$ is partitioned into $V_s$, $V_\bar{s}$, $V_{s \bar{s}}$ and $V_{\bar{s} s}$. The first aim is to predict $y_\bar{s}$ given the observed sample $y_s$ and then the total $T$. We did this in the following steps: first, we used a joint prior distribution that is only partially specified in terms of moments, as follows: \begin{equation} \begin{pmatrix} y_{\bar{s}}\\ y_s \end{pmatrix} \Big| \beta \hspace{0.1cm} \sim \hspace{0.1cm} \begin{bmatrix} \begin{pmatrix} X_{\bar{s}} \beta\\ X_s \beta \end{pmatrix},\begin{pmatrix} V_{\bar{s}} & V_{\bar{s} s}\\ V_{s \bar{s}} & V_s \end{pmatrix} \end{bmatrix} \end{equation} ([...](https://www150.statcan.gc.ca/n1/en/catalogue/12-001-X201400111886)) ### Application The _BLE_Reg()_ function will apply this methodology to the given sample, calculate the Bayes Linear Estimator (and its associate variance) to the parameter $\beta$ and for the individuals not in the sample, given the auxiliary variable values. In a simple model the auxiliary variable will have value $1$ for every individual. #### Examples 1. Example from the help page ```{r ex 1} xs <- matrix(c(1,1,1,1,2,3,5,0),nrow=4,ncol=2) ys <- c(12,17,28,2) x_nots <- matrix(c(1,1,1,0,1,4),nrow=3,ncol=2) a <- c(1.5,6) R <- matrix(c(10,2,2,10),nrow=2,ncol=2) Vs <- diag(c(1,1,1,1)) V_nots <- diag(c(1,1,1)) Estimator <- BLE_Reg(ys,xs,a,R,Vs,x_nots,V_nots) Estimator ```
/scratch/gouwar.j/cran-all/cranData/BayesSampling/inst/doc/BLE_Reg.Rmd
## ---- include = FALSE--------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----setup-------------------------------------------------------------------- library(BayesSampling) ## ----ex 1, message=FALSE, warning=FALSE--------------------------------------- data(BigCity) set.seed(1) Expend <- sample(BigCity$Expenditure,10000) mean(Expend) #Real mean expenditure value, goal of the estimation ys <- sample(Expend, size = 20, replace = FALSE) ## ----ex 1.1------------------------------------------------------------------- mean(ys) ## ----ex 1.2------------------------------------------------------------------- Estimator <- BLE_SRS(ys, N = 10000, m=300, v=10.1^5, sigma = sqrt(10^5)) Estimator$est.beta Estimator$Vest.beta Estimator$est.mean[1,] Estimator$Vest.mean[1:5,1:5] ## ----ex 2--------------------------------------------------------------------- ys <- c(5,6,8) N <- 5 m <- 6 v <- 5 sigma <- 1 Estimator <- BLE_SRS(ys, N, m, v, sigma) Estimator ## ----ex 3--------------------------------------------------------------------- ys <- mean(c(5,6,8)) n <- 3 N <- 5 m <- 6 v <- 5 sigma <- 1 Estimator <- BLE_SRS(ys, N, m, v, sigma, n) Estimator
/scratch/gouwar.j/cran-all/cranData/BayesSampling/inst/doc/BLE_SRS.R
--- title: "BLE_SRS" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{BLE_SRS} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup} library(BayesSampling) ``` # Application of the BLE to the Simple Random Sample design ### (From Section 2.3.1 of the "[Gonçalves, Moura and Migon: Bayes linear estimation for finite population with emphasis on categorical data](https://www150.statcan.gc.ca/n1/en/catalogue/12-001-X201400111886)") In a simple model, where there is no auxiliary variable, and a Simple Random Sample was taken from the population, we can calculate the Bayes Linear Estimator for the individuals of the population with the _BLE_SRS()_ function, which receives the following parameters: * $y_s$ - either a vector containing the observed values or just the value for the sample mean ($\sigma$ and $n$ parameters will be required in this case); * $N$ - total size of the population; * $m$ - prior mean. If _NULL_, sample mean will be used (non-informative prior); * $v$ - prior variance of an element from the population ($> \sigma^2$). If _NULL_, it will tend to infinity (non-informative prior); * $\sigma$ - prior estimate of variability (standard deviation) within the population. If _NULL_, sample variance will be used; * $n$ - sample size. Necessary only if $y_s$ represent sample mean (will not be used otherwise). ### Vague Prior Distribution Letting $v \to \infty$ and keeping $\sigma^2$ fixed, that is, assuming prior ignorance, the resulting estimator will be the same as the one seen in the design-based context for the simple random sampling case.\ This can be achieved using the _BLE_SRS()_ function by omitting either the prior mean and/or the prior variance, that is: * $m =$ _NULL_ - the sample mean will be used * $v =$ _NULL_ - prior variance will tend to infinity ### Examples 1. We will use the TeachingSampling's BigCity dataset for this example (actually we have to take a sample of size $10000$ from this dataset so that R can perform the calculations). Imagine that we want to estimate the mean or the total Expenditure of this population, after taking a simple random sample of only 20 individuals, but applying a prior information (taken from a previous study or an expert's judgment) about the mean expenditure (a priori mean = $300$). ```{r ex 1, message=FALSE, warning=FALSE} data(BigCity) set.seed(1) Expend <- sample(BigCity$Expenditure,10000) mean(Expend) #Real mean expenditure value, goal of the estimation ys <- sample(Expend, size = 20, replace = FALSE) ``` Our design-based estimator for the mean will be the sample mean: ```{r ex 1.1} mean(ys) ``` Applying the prior information about the population we can get a better estimate, especially in cases when only a small sample is available: ```{r ex 1.2} Estimator <- BLE_SRS(ys, N = 10000, m=300, v=10.1^5, sigma = sqrt(10^5)) Estimator$est.beta Estimator$Vest.beta Estimator$est.mean[1,] Estimator$Vest.mean[1:5,1:5] ``` 2. Example from the help page ```{r ex 2} ys <- c(5,6,8) N <- 5 m <- 6 v <- 5 sigma <- 1 Estimator <- BLE_SRS(ys, N, m, v, sigma) Estimator ``` 3. Example from the help page, but informing sample mean and sample size instead of sample observations ```{r ex 3} ys <- mean(c(5,6,8)) n <- 3 N <- 5 m <- 6 v <- 5 sigma <- 1 Estimator <- BLE_SRS(ys, N, m, v, sigma, n) Estimator ```
/scratch/gouwar.j/cran-all/cranData/BayesSampling/inst/doc/BLE_SRS.Rmd
## ---- include = FALSE--------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----setup-------------------------------------------------------------------- library(BayesSampling) ## ----ex 1, message=FALSE, warning=FALSE--------------------------------------- data(BigCity) end <- dim(BigCity)[1] s <- seq(from = 1, to = end, by = 1) set.seed(3) samp <- sample(s, size = 10000, replace = FALSE) ordered_samp <- sort(samp) BigCity_red <- BigCity[ordered_samp,] Rural <- BigCity_red[which(BigCity_red$Zone == "Rural"),] Rural_Exp <- Rural$Expenditure length(Rural_Exp) Rural_ys <- sample(Rural_Exp, size = 30, replace = FALSE) Urban <- BigCity_red[which(BigCity_red$Zone == "Urban"),] Urban_Exp <- Urban$Expenditure length(Urban_Exp) Urban_ys <- sample(Urban_Exp, size = 30, replace = FALSE) ## ----ex 1.1------------------------------------------------------------------- mean(Rural_Exp) mean(Urban_Exp) ## ----ex 1.2------------------------------------------------------------------- mean(Rural_ys) mean(Urban_ys) ## ----ex 1.3------------------------------------------------------------------- ys <- c(Rural_ys, Urban_ys) h <- c(30,30) N <- c(length(Rural_Exp), length(Urban_Exp)) m <- c(280, 420) v=c(4*(10.1^4), 10.1^5) sigma = c(sqrt(4*10^4), sqrt(10^5)) Estimator <- BLE_SSRS(ys, h, N, m, v, sigma) ## ----ex 1.4------------------------------------------------------------------- Estimator$est.beta Estimator$Vest.beta ## ----ex 2--------------------------------------------------------------------- ys <- c(2,-1,1.5, 6,10, 8,8) h <- c(3,2,2) N <- c(5,5,3) m <- c(0,9,8) v <- c(3,8,1) sigma <- c(1,2,0.5) Estimator <- BLE_SSRS(ys, h, N, m, v, sigma) Estimator ## ----ex 3--------------------------------------------------------------------- y1 <- mean(c(2,-1,1.5)) y2 <- mean(c(6,10)) y3 <- mean(c(8,8)) ys <- c(y1, y2, y3) h <- c(3,2,2) N <- c(5,5,3) m <- c(0,9,8) v <- c(3,8,1) sigma <- c(1,2,0.5) Estimator <- BLE_SSRS(ys, h, N, m, v, sigma) Estimator
/scratch/gouwar.j/cran-all/cranData/BayesSampling/inst/doc/BLE_SSRS.R
--- title: "BLE_SSRS" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{BLE_SSRS} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup} library(BayesSampling) ``` # Application of the BLE to the Stratified Simple Random Sample design ### (From Section 2.3.2 of the "[Gonçalves, Moura and Migon: Bayes linear estimation for finite population with emphasis on categorical data](https://www150.statcan.gc.ca/n1/en/catalogue/12-001-X201400111886)") In a simple model, where there is no auxiliary variable, and a Stratified Simple Random Sample was taken from the population, we can calculate the Bayes Linear Estimator for the individuals of each strata of the population with the _BLE_SSRS()_ function, which receives the following parameters: * $y_s$ - a vector containing either the observed values (aggregated by strata) or sample mean for each strata ($\sigma$ parameter will be required in this case); * $h$ - a vector containing the number of observations of each strata in the sample; * $N$ - a vector containing the total size of each strata; * $m$ - a vector containing the prior mean of each strata. If _NULL_, sample mean for each strata will be used (non-informative prior); * $v$ - a vector containing the prior variance of an element from each strata ($v_i> \sigma_i^2$ for each strata $i$). If _NULL_, it will tend to infinity (non-informative prior); * $\sigma$ - a vector containing the prior estimate of variability (standard deviation) within each strata. If _NULL_, sample variance of each strata will be used. ### Examples 1. We will use the TeachingSampling's BigCity dataset for this example (actually we have to take a sample of size $10000$ from this dataset so that R can perform the calculations). Imagine that we want to estimate the mean or the total Expenditure of this population, but we know that there is a difference between the rural individuals expenditure mean and the urban ones. After taking a stratified simple random sample of 30 individuals from each area, we want to estimate the real expenditure means, conjugating the sample information with an expert expectation (a priori mean will be $280$ for the rural area and $420$ for the urban). ```{r ex 1, message=FALSE, warning=FALSE} data(BigCity) end <- dim(BigCity)[1] s <- seq(from = 1, to = end, by = 1) set.seed(3) samp <- sample(s, size = 10000, replace = FALSE) ordered_samp <- sort(samp) BigCity_red <- BigCity[ordered_samp,] Rural <- BigCity_red[which(BigCity_red$Zone == "Rural"),] Rural_Exp <- Rural$Expenditure length(Rural_Exp) Rural_ys <- sample(Rural_Exp, size = 30, replace = FALSE) Urban <- BigCity_red[which(BigCity_red$Zone == "Urban"),] Urban_Exp <- Urban$Expenditure length(Urban_Exp) Urban_ys <- sample(Urban_Exp, size = 30, replace = FALSE) ``` The real expenditure means will be the values we want to estimate. In this example we know their real values: ```{r ex 1.1} mean(Rural_Exp) mean(Urban_Exp) ``` Our design-based estimator for the mean will be the sample mean for each strata: ```{r ex 1.2} mean(Rural_ys) mean(Urban_ys) ``` Applying the prior information about the population we can get a better estimate, especially in cases when only a small sample is available: ```{r ex 1.3} ys <- c(Rural_ys, Urban_ys) h <- c(30,30) N <- c(length(Rural_Exp), length(Urban_Exp)) m <- c(280, 420) v=c(4*(10.1^4), 10.1^5) sigma = c(sqrt(4*10^4), sqrt(10^5)) Estimator <- BLE_SSRS(ys, h, N, m, v, sigma) ``` Our Bayes Linear Estimator for the mean expenditure of each strata: ```{r ex 1.4} Estimator$est.beta Estimator$Vest.beta ``` 2. Example from the help page ```{r ex 2} ys <- c(2,-1,1.5, 6,10, 8,8) h <- c(3,2,2) N <- c(5,5,3) m <- c(0,9,8) v <- c(3,8,1) sigma <- c(1,2,0.5) Estimator <- BLE_SSRS(ys, h, N, m, v, sigma) Estimator ``` 3. Example from the help page, but informing sample means instead of sample observations ```{r ex 3} y1 <- mean(c(2,-1,1.5)) y2 <- mean(c(6,10)) y3 <- mean(c(8,8)) ys <- c(y1, y2, y3) h <- c(3,2,2) N <- c(5,5,3) m <- c(0,9,8) v <- c(3,8,1) sigma <- c(1,2,0.5) Estimator <- BLE_SSRS(ys, h, N, m, v, sigma) Estimator ```
/scratch/gouwar.j/cran-all/cranData/BayesSampling/inst/doc/BLE_SSRS.Rmd
## ---- include = FALSE--------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----setup-------------------------------------------------------------------- library(BayesSampling)
/scratch/gouwar.j/cran-all/cranData/BayesSampling/inst/doc/BayesSampling.R
--- title: "BayesSampling" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{BayesSampling} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup} library(BayesSampling) ``` # Bayes linear estimation for finite population ### (From "[Gonçalves, Moura and Migon: Bayes linear estimation for finite population with emphasis on categorical data](https://www150.statcan.gc.ca/n1/en/catalogue/12-001-X201400111886)") ## 1. Introduction Surveys have long been an important way of obtaining accurate information from a finite population. For instance, governments need to obtain descriptive statistics of the population for purposes of evaluating and implementing their policies. For those concerned with official statistics in the first third of the twentieth century, the major issue was to establish a standard of acceptable practice. Neyman (1934) created such a framework by introducing the role of randomization methods in the sampling process. He advocated the use of the randomization distribution induced by the sampling design to evaluate the frequentist properties of alternative procedures. He also introduced the idea of stratification with optimal sample size allocation and the use of unequal selection probabilities. His work was recognized as the cornerstone of design-based sample survey theory and inspired many other authors. For example, Horvitz and Thompson (1952) proposed a general theory of unequal probability sampling and the probability weighted estimation method, the so-called “Horvitz and Thompson’s estimator”.\ The design-based sample survey theory has been very appealing to official statistics agencies around the world. As pointed out by Skinner, Holt and Smith (1989), page 2, the main reason is that it is essentially distribution-free. Indeed, all advances in survey sampling theory from Neyman onwards have been strongly influenced by the descriptive use of survey sampling. The consequence of this has been a lack of theoretical developments related to the analytic use of surveys, in particular for prediction purposes. __In some specific situations, the design-based approach has proved to be inefficient, providing inadequate predictors. For instance, estimation in small domains and the presence of the non-response cannot be dealt with by the design-based approach without some implicit assumptions, which is equivalent to assuming a model.__ Supporters of the design-based approach argue that model-based inference largely depends on the model assumptions, which might not be true. On the other hand, interval inference for target population parameters (usually totals or means) relies on the Central Limit Theorem, which cannot be applied in many practical situations, where the sample size is not large enough and/or independence assumptions of the random variables involved are not realistic.\ Basu (1971) did not accept estimates of population quantities which depend on the sampling rule, like the inclusion probabilities. He argued that this estimation procedure does not satisfy the likelihood principle, at which he was adept. Basu (1971) created the circus elephant example to show that the Horvitz-Thompson estimator could lead to inappropriate estimates and proposed an alternative estimator. The question that arises is whether it is possible to conciliate both approaches. In the superpopulation model context, Zacks (2002) showed that some design-based estimators can be recovered by using a general regression model approach. Little (2003) claims that: “careful model specification sensitive to the survey design can address the concerns with model specifications, and Bayesian statistics provide a coherent and unified treatment of descriptive and analytic survey inference”. He gave some illustrative examples of how __standard design-based inference can be derived from the Bayesian perspective, using some models with non-informative prior distributions.__\ In the Bayesian context, another appealing proposal to conciliate the design-based and model-based approaches was proposed by Smouse (1984). The method incorporates prior information in finite population inference models by relying on Bayesian least squares techniques and requires only the specification of first and second moments of the distributions involved, describing prior knowledge about the structures present in the population. The approach is an alternative to the methods of randomization and appears midway between two extreme views: on the one hand the design-based procedures and on the other those based on superpopulation models. O’Hagan (1985), in an unpublished report, presented the Bayes linear estimators in some specific sample survey contexts and O’Hagan (1987) also derived Bayes linear estimators for some randomized response models. O’Hagan (1985) dealt with several population structures, such as stratification and clustering, by assuming suitable hypotheses about the first and second moments and showed how some common design-based estimators can be obtained as a particular case of his more general approach. He also pointed out that his estimates do not account for non-informative sampling. He quoted Scott (1977) and commented that informative sampling should be carried out by a full Bayesian analysis. An important reference about informative sampling dealing with hierarchical models can be found in Pfeffermann, Moura and Silva (2006).\ ## 2. Bayes linear estimation for finite population The Bayes approach has been found to be successful in many applications, particularly when the data analysis has been improved by expert judgments. But while Bayesian models have many appealing features, their application often involves the full specification of a prior distribution for a large number of parameters. Goldstein and Wooff (2007), section 1.2, argue that as the complexity of the problem increases, our actual ability to fully specify the prior and/or the sampling model in detail is impaired. They conclude that in such situations, there is a need to develop methods based on partial belief specification.\ Hartigan (1969) proposed an estimation method, termed __Bayes linear estimation approach, that only requires the specification of first and second moments__. The resulting estimators have the property of minimizing posterior squared error loss among all estimators that are linear in the data and __can be thought of as approximations to posterior means__. The Bayes linear estimation approach is fully employed in this article and is briefly described below.\ ### 2.1 Bayes linear approach Let $y_s$ be the vector with observations and $\theta$ be the parameter to be estimated. For each value of $\theta$ and each possible estimate $d$, belonging to the parametric space $\Theta$, we associate a quadratic loss function $L(\theta, d) = (\theta - d)' (\theta - d) = tr (\theta - d) (\theta - d)'$. The main interest is to find the value of $d$ that minimizes $r(d) = E [L (\theta, d) | y_s]$, the conditional expected value of the quadratic loss function given the data.\ Suppose that the joint distribution of $\theta$ and $y_s$ is partially specified by only their first two moments: \begin{equation} \tag{2.1} \begin{pmatrix} \theta\\ y_s \end{pmatrix} \hspace{0.1cm} \sim \hspace{0.1cm} \begin{bmatrix} \begin{pmatrix} a\\ \text{f} \end{pmatrix},\begin{pmatrix} R & AQ\\ QA^{'} & Q \end{pmatrix} \end{bmatrix} \end{equation} where $a$ and $f$, respectively, denote mean vectors and $R$, $AQ$ and $Q$ the covariance matrix elements of $\theta$ and $y_s$.\ The Bayes linear estimator (BLE) of $\theta$ is the value of $d$ that minimizes the expected value of this quadratic loss function within the class of all linear estimates of the form $d = d(y_s) = h + H y_s$, for some vector $h$ and matrix $H$. Thus, the BLE of $\theta$, $\hat{d}$, and its associated variance, $\hat{V} (\hat{d})$, are respectively given by: \begin{equation} \tag{2.2} \hat{d} = a + A(y_s - \text{f}) \hspace{0.7cm} \text{and} \hspace{0.7cm} \hat{V}(\hat{d}) = R - AQA^{'} \end{equation} __It should be noted that the BLE depends on the specification of the first and second moments of the joint distribution__ partially specified in (2.1). From the Bayes linear approach applied to the general linear regression model for finite population prediction, the paper shows how to obtain some particular design-based estimators, as in simple random sampling and stratified simple random sampling. ## 3. Functions The package contain the main following functions: * BLE_Reg() - general function (base for the rest of the functions) * BLE_SRS() - Simple Random Sample case * BLE_SSRS() - Stratified Simple Random Sample case * BLE_Ratio() - Ratio Estimator case
/scratch/gouwar.j/cran-all/cranData/BayesSampling/inst/doc/BayesSampling.Rmd
--- title: "BLE_Categorical" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{BLE_Categorical} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup} library(BayesSampling) ``` # Application of the BLE to categorical data ### (From Section 4 of the "[Gonçalves, Moura and Migon: Bayes linear estimation for finite population with emphasis on categorical data](https://www150.statcan.gc.ca/n1/en/catalogue/12-001-X201400111886)") In a situation where the population can be divided into different and exclusive categories, we can calculate the Bayes Linear Estimator for the proportion of individuals in each category with the _BLE_Categorical()_ function, which receives the following parameters: * $y_s$ - $k$-vector of sample proportion for each category; * $n$ - sample size; * $N$ - total size of the population; * $m$ - $k$-vector with the prior proportion of each category. If _NULL_, sample proportion for each category will be used (non-informative prior); * $rho$ - matrix with the prior correlation coefficients between two different units within categories. It must be a symmetric square matrix of dimension $k$ (or $k-1$). If _NULL_, non-informative prior will be used (see below). ### Vague Prior Distribution Letting $\rho_{ii} \to 1$, that is, assuming prior ignorance, the resulting point estimate will be the same as the one seen in the design-based context for categorical data.\ This can be achieved using the _BLE_Categorical()_ function by omitting either the prior proportions and/or the parameter _rho_, that is: * $m =$ _NULL_ - sample proportions in each category will be used * $rho =$ _NULL_ - $\rho_{ii} \to 1$ and $\rho_{ij} = 0, i \neq j$ ### _R_ and _Vs_ Matrices If the calculation of matrices _R_ and _Vs_ results in non-positive definite matrices, a warning will be displayed. In general this does not produce incorrect/ inconsistent results for the proportion estimate but for its associated variance. It is suggested to review the prior correlation coefficients (parameter _rho_). ### Examples 1. Example presented in the mentioned [article](https://www150.statcan.gc.ca/n1/en/catalogue/12-001-X201400111886) (2 categories) ```{r ex 1, message=TRUE, warning=TRUE} ys <- c(0.2614, 0.7386) n <- 153 N <- 15288 m <- c(0.7, 0.3) rho <- matrix(0.1, 1) Estimator <- BLE_Categorical(ys,n,N,m,rho) Estimator$est.prop Estimator$Vest.prop ``` Bellow we can see that the greater the correlation coefficient, the closer our estimation will get to the sample proportions. ```{r ex 1.2, message=TRUE, warning=TRUE} ys <- c(0.2614, 0.7386) n <- 153 N <- 15288 m <- c(0.7, 0.3) rho <- matrix(0.5, 1) Estimator <- BLE_Categorical(ys,n,N,m,rho) Estimator$est.prop Estimator$Vest.prop ``` 2. Example from the help page (3 categories) ```{r ex 2, message=TRUE, warning=TRUE} ys <- c(0.2, 0.5, 0.3) n <- 100 N <- 10000 m <- c(0.4, 0.1, 0.5) mat <- c(0.4, 0.1, 0.1, 0.1, 0.2, 0.1, 0.1, 0.1, 0.6) rho <- matrix(mat, 3, 3) Estimator <- BLE_Categorical(ys,n,N,m,rho) Estimator$est.prop Estimator$Vest.prop ``` Same example, but with no prior correlation coefficients informed (non-informative prior) ```{r ex 2.2, message=TRUE, warning=TRUE} ys <- c(0.2, 0.5, 0.3) n <- 100 N <- 10000 m <- c(0.4, 0.1, 0.5) Estimator <- BLE_Categorical(ys,n,N,m,rho=NULL) Estimator$est.prop ```
/scratch/gouwar.j/cran-all/cranData/BayesSampling/vignettes/BLE_Categorical.Rmd
--- title: "BLE_Ratio" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{BLE_Ratio} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup} library(BayesSampling) ``` # Application of the BLE to the Ratio estimator ### (From Section 3 of the "[Gonçalves, Moura and Migon: Bayes linear estimation for finite population with emphasis on categorical data](https://www150.statcan.gc.ca/n1/en/catalogue/12-001-X201400111886)") In many practical situations, it is possible to have information about an auxiliary variate $x_i$ (correlated with $y_i$) for all the population units, or at least for each unit in the sample, plus the population mean, $\bar X$. In practice, $x_i$ is often the value of $y_i$ at some previous time when a complete census was taken. This approach is used in situations where __the expected value and the variance of $y_i$ is proportional to $x_i$__, so in the BLE setup, we replace some hypotheses about the $y$'s with ones about the first two moments of the rate $y_i$/$x_i$. To the best of our knowledge, the new ratio estimator proposed below is a novel contribution in sampling survey theory. The new ratio estimator is obtained as a particular case of model (2.4) and with the hypothesis of exchangeability, used in Bayes linear approach, applied to the rate $y_i$/$x_i$ for all $i = 1,..., N$ as described below: \begin{equation} \tag{3.1} E \left( \frac{y_i}{x_i} \right) = m, \hspace{0.7cm} V \left( \frac{y_i}{x_i} \right) = v \hspace{0.7cm} \text{and} \hspace{0.7cm} Cov \left( \frac{y_i}{x_i},\frac{y_j}{x_j} \right) = c, \hspace{0.5cm} i,j = 1,...,N \hspace{0.5cm} \forall i \neq j \end{equation} \par such that: $\sigma^2 = v - c$ ### Application We can apply this with the _BLE_Ratio()_ function, which receives the following parameters: * $y_s$ - either a vector containing the observed values or just the value for the sample mean ($\sigma$ and $n$ parameters will be required in this case); * $x_s$ - either a vector containing the values for the auxiliary variable of the elements in the sample or just the value for the sample mean; * $x_{\bar{s}}$ - a vector containing the values for the auxiliary variable of the elements not in the sample; * $m$ - prior mean for the ratio between $Y$ and $X$. If _NULL_, $\bar{y_s}$/$\bar{x_s}$ will be used (non-informative prior); * $v$ - prior variance of the ratio between $Y$ and $X$ ($> \sigma^2$). If _NULL_, it will tend to infinity (non-informative prior); * $\sigma$ - prior estimate of variability (standard deviation) of the ratio within the population. If _NULL_, sample variance of the ratio will be used; * $n$ - sample size. Necessary only if $y_s$ and $x_s$ represent sample means (will not be used otherwise). ### Vague Prior Distribution Letting $v \to \infty$ and $v \to \infty$, but keeping $\sigma^2$ fixed, that is, assuming prior ignorance, we recover the ratio type estimator, found in the design-based approach: $\hat{T}_{ra} = N \bar{X} (\bar{y}_s / \bar{x}_s)$.\ This can be achieved using the _BLE_SRS()_ function by omitting either the prior mean or the prior variance, that is: * $m =$ _NULL_ - the ratio between sample means will be used as prior mean * $v =$ _NULL_ - prior variance will tend to infinity ### Examples 1. We will use the TeachingSampling's BigCity dataset for this example (actually we have to take a sample of size $10000$ from this dataset so that R can perform the calculations). Imagine that we want to estimate the mean or the total Expenditure of this population, using the Income as an auxiliary variable (suppose that we know its value for every individual, maybe from a census). After taking a simple random sample of 10 individuals, we want to estimate the expenditure/income ratio and the total expenditure, conjugating the sample information with an expert's expectation (a priori mean for the ratio will be $0.85$, that is, people from this city expend 85% of their income). ```{r ex 1, message=FALSE, warning=FALSE} data(BigCity) end <- dim(BigCity)[1] s <- seq(from = 1, to = end, by = 1) set.seed(5) samp <- sample(s, size = 10000, replace = FALSE) ordered_samp <- sort(samp) BigCity_red <- BigCity[ordered_samp,] Expend <- BigCity_red$Expenditure Income <- BigCity_red$Income sampl <- sample(seq(1,10000),size=10) ys <- Expend[sampl] xs <- Income[sampl] ``` The real ratio between expenditure and income will be the value we want to estimate. In this example we know its real value: ```{r ex 1.1} mean(Expend/Income) ``` Our design-based estimator for the mean would be the ratio between sample means: ```{r ex 1.2} mean(ys)/mean(xs) ``` Applying the prior information about the ratio we can get a better estimate, especially in cases when only a small sample is available: ```{r ex 1.3} x_nots <- BigCity_red$Income[-sampl] Estimator <- BLE_Ratio(ys, xs, x_nots, m = 0.85, v = 0.24, sigma = sqrt(0.23998)) Estimator$est.beta Estimator$Vest.beta Estimator$est.mean[1:4,] Estimator$Vest.mean[1:5,1:5] Estimator$est.tot ``` 2. Example from the help page ```{r ex 2} ys <- c(10,8,6) xs <- c(5,4,3.1) x_nots <- c(1,20,13,15,-5) m <- 2.5 v <- 10 sigma <- 2 Estimator <- BLE_Ratio(ys, xs, x_nots, m, v, sigma) Estimator ``` 3. Example from the help page, but informing sample means and sample size instead of sample observations ```{r ex 3} ys <- mean(c(10,8,6)) xs <- mean(c(5,4,3.1)) n <- 3 x_nots <- c(1,20,13,15,-5) m <- 2.5 v <- 10 sigma <- 2 Estimator <- BLE_Ratio(ys, xs, x_nots, m, v, sigma, n) Estimator ```
/scratch/gouwar.j/cran-all/cranData/BayesSampling/vignettes/BLE_Ratio.Rmd
--- title: "BLE_Reg" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{BLE_Reg} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup} library(BayesSampling) ``` # Bayes linear approach to finite population ### (From Section 2.2 of the "[Gonçalves, Moura and Migon: Bayes linear estimation for finite population with emphasis on categorical data](https://www150.statcan.gc.ca/n1/en/catalogue/12-001-X201400111886)") Consider $U = \{u_1, ..., u_N \}$ a finite population with $N$ units. Let $y = (y_1,..., y_N)'$ be the vector with the values of interest of the units in $U$. The response vector $y$ is partitioned into the known observed $n$ - sample vector $y_s$, and the non-observed vector $y_{\bar{s}}$ of dimension $N - n$. The general problem is to predict a function of the vector $y$, such as the total $T = \sum_{i=1}^{N}y_i = 1^{'}_{s} y_s + 1^{'}_{\bar{s}} y_{\bar{s}}$, where $1_s$ and $1_{\bar{s}}$ are the vectors of 1's of dimensions $n$ and $N - n$, respectively. In the model-based approach, this is usually done by assuming a parametric model for the population values $y_i$’s and then obtaining the Empirical Best Linear Unbiased Predictor (EBLUP) for the unknown vector $y_s$ under this model. Usually, the mean square error of the EBLUP of T is obtained by second order approximation, as well as an unbiased estimator of it. See Valliant, Dorfman and Royall (2000), chapter 2, for details. \par The Bayesian approach to finite population prediction often assumes a parametric model, but it aims to find the posterior distribution of T given $y_s$. Point estimates can be obtained by setting a loss function, although in many practical problems, the posterior mean is often considered and its associated variance is given by the posterior variance, i.e.: \begin{equation} \tag{2.3} E(T | y_s) = 1^{'}_s y_s + 1^{'}_{\bar{s}} E(y_{\bar{s}} | y_s) \hspace{0.7cm} \text{and} \hspace{0.7cm} V(T | y_s) = 1^{'}_{\bar{s}} V(y_{\bar{s}} | y_s) 1_{\bar{s}} \end{equation} It is possible to obtain an approximation to the quantities in (2.3) by using a Bayes linear estimation approach. Here, we will particularly obtain the estimators by assuming a __general two-stage hierarchical model for finite population, specified only by its mean and variance-covariance matrix__, presented in Bolfarine and Zacks (1992), page 76. Particular cases describing usual population structures found in practice are easily derived from (2.4). The general model can be written as: \begin{equation} \tag{2.4} Y \hspace{0.1cm} | \hspace{0.1cm} \beta \sim [X \beta, V] \hspace{0.7cm} \text{and} \hspace{0.7cm} \beta \sim [a,R] \end{equation} where $X$ is a covariate matrix of dimension $N \times p$, with rows $X_i = (x_{i1}, ..., x_{ip})$, $i = 1, ..., N$; $\beta = (\beta_1, ..., \beta_p)'$ is a $p \times 1$ vector of unknown parameters; and $y$, given $\beta$, is a random vector with mean $X\beta$ and known covariance matrix $V$ of dimension $N \times N$. Analogously $a$ and $R$ are the respective $p \times 1$ prior mean vector and $p \times p$ prior covariance matrix of $\beta$. Since the response vector $y$ is partitioned into $y_s$ and $y_\bar{s}$, the matrix $X$, which is assumed to be known, is analogously partitioned into $X_s$ and $X_\bar{s}$, and $V$ is partitioned into $V_s$, $V_\bar{s}$, $V_{s \bar{s}}$ and $V_{\bar{s} s}$. The first aim is to predict $y_\bar{s}$ given the observed sample $y_s$ and then the total $T$. We did this in the following steps: first, we used a joint prior distribution that is only partially specified in terms of moments, as follows: \begin{equation} \begin{pmatrix} y_{\bar{s}}\\ y_s \end{pmatrix} \Big| \beta \hspace{0.1cm} \sim \hspace{0.1cm} \begin{bmatrix} \begin{pmatrix} X_{\bar{s}} \beta\\ X_s \beta \end{pmatrix},\begin{pmatrix} V_{\bar{s}} & V_{\bar{s} s}\\ V_{s \bar{s}} & V_s \end{pmatrix} \end{bmatrix} \end{equation} ([...](https://www150.statcan.gc.ca/n1/en/catalogue/12-001-X201400111886)) ### Application The _BLE_Reg()_ function will apply this methodology to the given sample, calculate the Bayes Linear Estimator (and its associate variance) to the parameter $\beta$ and for the individuals not in the sample, given the auxiliary variable values. In a simple model the auxiliary variable will have value $1$ for every individual. #### Examples 1. Example from the help page ```{r ex 1} xs <- matrix(c(1,1,1,1,2,3,5,0),nrow=4,ncol=2) ys <- c(12,17,28,2) x_nots <- matrix(c(1,1,1,0,1,4),nrow=3,ncol=2) a <- c(1.5,6) R <- matrix(c(10,2,2,10),nrow=2,ncol=2) Vs <- diag(c(1,1,1,1)) V_nots <- diag(c(1,1,1)) Estimator <- BLE_Reg(ys,xs,a,R,Vs,x_nots,V_nots) Estimator ```
/scratch/gouwar.j/cran-all/cranData/BayesSampling/vignettes/BLE_Reg.Rmd
--- title: "BLE_SRS" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{BLE_SRS} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup} library(BayesSampling) ``` # Application of the BLE to the Simple Random Sample design ### (From Section 2.3.1 of the "[Gonçalves, Moura and Migon: Bayes linear estimation for finite population with emphasis on categorical data](https://www150.statcan.gc.ca/n1/en/catalogue/12-001-X201400111886)") In a simple model, where there is no auxiliary variable, and a Simple Random Sample was taken from the population, we can calculate the Bayes Linear Estimator for the individuals of the population with the _BLE_SRS()_ function, which receives the following parameters: * $y_s$ - either a vector containing the observed values or just the value for the sample mean ($\sigma$ and $n$ parameters will be required in this case); * $N$ - total size of the population; * $m$ - prior mean. If _NULL_, sample mean will be used (non-informative prior); * $v$ - prior variance of an element from the population ($> \sigma^2$). If _NULL_, it will tend to infinity (non-informative prior); * $\sigma$ - prior estimate of variability (standard deviation) within the population. If _NULL_, sample variance will be used; * $n$ - sample size. Necessary only if $y_s$ represent sample mean (will not be used otherwise). ### Vague Prior Distribution Letting $v \to \infty$ and keeping $\sigma^2$ fixed, that is, assuming prior ignorance, the resulting estimator will be the same as the one seen in the design-based context for the simple random sampling case.\ This can be achieved using the _BLE_SRS()_ function by omitting either the prior mean and/or the prior variance, that is: * $m =$ _NULL_ - the sample mean will be used * $v =$ _NULL_ - prior variance will tend to infinity ### Examples 1. We will use the TeachingSampling's BigCity dataset for this example (actually we have to take a sample of size $10000$ from this dataset so that R can perform the calculations). Imagine that we want to estimate the mean or the total Expenditure of this population, after taking a simple random sample of only 20 individuals, but applying a prior information (taken from a previous study or an expert's judgment) about the mean expenditure (a priori mean = $300$). ```{r ex 1, message=FALSE, warning=FALSE} data(BigCity) set.seed(1) Expend <- sample(BigCity$Expenditure,10000) mean(Expend) #Real mean expenditure value, goal of the estimation ys <- sample(Expend, size = 20, replace = FALSE) ``` Our design-based estimator for the mean will be the sample mean: ```{r ex 1.1} mean(ys) ``` Applying the prior information about the population we can get a better estimate, especially in cases when only a small sample is available: ```{r ex 1.2} Estimator <- BLE_SRS(ys, N = 10000, m=300, v=10.1^5, sigma = sqrt(10^5)) Estimator$est.beta Estimator$Vest.beta Estimator$est.mean[1,] Estimator$Vest.mean[1:5,1:5] ``` 2. Example from the help page ```{r ex 2} ys <- c(5,6,8) N <- 5 m <- 6 v <- 5 sigma <- 1 Estimator <- BLE_SRS(ys, N, m, v, sigma) Estimator ``` 3. Example from the help page, but informing sample mean and sample size instead of sample observations ```{r ex 3} ys <- mean(c(5,6,8)) n <- 3 N <- 5 m <- 6 v <- 5 sigma <- 1 Estimator <- BLE_SRS(ys, N, m, v, sigma, n) Estimator ```
/scratch/gouwar.j/cran-all/cranData/BayesSampling/vignettes/BLE_SRS.Rmd
--- title: "BLE_SSRS" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{BLE_SSRS} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup} library(BayesSampling) ``` # Application of the BLE to the Stratified Simple Random Sample design ### (From Section 2.3.2 of the "[Gonçalves, Moura and Migon: Bayes linear estimation for finite population with emphasis on categorical data](https://www150.statcan.gc.ca/n1/en/catalogue/12-001-X201400111886)") In a simple model, where there is no auxiliary variable, and a Stratified Simple Random Sample was taken from the population, we can calculate the Bayes Linear Estimator for the individuals of each strata of the population with the _BLE_SSRS()_ function, which receives the following parameters: * $y_s$ - a vector containing either the observed values (aggregated by strata) or sample mean for each strata ($\sigma$ parameter will be required in this case); * $h$ - a vector containing the number of observations of each strata in the sample; * $N$ - a vector containing the total size of each strata; * $m$ - a vector containing the prior mean of each strata. If _NULL_, sample mean for each strata will be used (non-informative prior); * $v$ - a vector containing the prior variance of an element from each strata ($v_i> \sigma_i^2$ for each strata $i$). If _NULL_, it will tend to infinity (non-informative prior); * $\sigma$ - a vector containing the prior estimate of variability (standard deviation) within each strata. If _NULL_, sample variance of each strata will be used. ### Examples 1. We will use the TeachingSampling's BigCity dataset for this example (actually we have to take a sample of size $10000$ from this dataset so that R can perform the calculations). Imagine that we want to estimate the mean or the total Expenditure of this population, but we know that there is a difference between the rural individuals expenditure mean and the urban ones. After taking a stratified simple random sample of 30 individuals from each area, we want to estimate the real expenditure means, conjugating the sample information with an expert expectation (a priori mean will be $280$ for the rural area and $420$ for the urban). ```{r ex 1, message=FALSE, warning=FALSE} data(BigCity) end <- dim(BigCity)[1] s <- seq(from = 1, to = end, by = 1) set.seed(3) samp <- sample(s, size = 10000, replace = FALSE) ordered_samp <- sort(samp) BigCity_red <- BigCity[ordered_samp,] Rural <- BigCity_red[which(BigCity_red$Zone == "Rural"),] Rural_Exp <- Rural$Expenditure length(Rural_Exp) Rural_ys <- sample(Rural_Exp, size = 30, replace = FALSE) Urban <- BigCity_red[which(BigCity_red$Zone == "Urban"),] Urban_Exp <- Urban$Expenditure length(Urban_Exp) Urban_ys <- sample(Urban_Exp, size = 30, replace = FALSE) ``` The real expenditure means will be the values we want to estimate. In this example we know their real values: ```{r ex 1.1} mean(Rural_Exp) mean(Urban_Exp) ``` Our design-based estimator for the mean will be the sample mean for each strata: ```{r ex 1.2} mean(Rural_ys) mean(Urban_ys) ``` Applying the prior information about the population we can get a better estimate, especially in cases when only a small sample is available: ```{r ex 1.3} ys <- c(Rural_ys, Urban_ys) h <- c(30,30) N <- c(length(Rural_Exp), length(Urban_Exp)) m <- c(280, 420) v=c(4*(10.1^4), 10.1^5) sigma = c(sqrt(4*10^4), sqrt(10^5)) Estimator <- BLE_SSRS(ys, h, N, m, v, sigma) ``` Our Bayes Linear Estimator for the mean expenditure of each strata: ```{r ex 1.4} Estimator$est.beta Estimator$Vest.beta ``` 2. Example from the help page ```{r ex 2} ys <- c(2,-1,1.5, 6,10, 8,8) h <- c(3,2,2) N <- c(5,5,3) m <- c(0,9,8) v <- c(3,8,1) sigma <- c(1,2,0.5) Estimator <- BLE_SSRS(ys, h, N, m, v, sigma) Estimator ``` 3. Example from the help page, but informing sample means instead of sample observations ```{r ex 3} y1 <- mean(c(2,-1,1.5)) y2 <- mean(c(6,10)) y3 <- mean(c(8,8)) ys <- c(y1, y2, y3) h <- c(3,2,2) N <- c(5,5,3) m <- c(0,9,8) v <- c(3,8,1) sigma <- c(1,2,0.5) Estimator <- BLE_SSRS(ys, h, N, m, v, sigma) Estimator ```
/scratch/gouwar.j/cran-all/cranData/BayesSampling/vignettes/BLE_SSRS.Rmd
--- title: "BayesSampling" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{BayesSampling} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup} library(BayesSampling) ``` # Bayes linear estimation for finite population ### (From "[Gonçalves, Moura and Migon: Bayes linear estimation for finite population with emphasis on categorical data](https://www150.statcan.gc.ca/n1/en/catalogue/12-001-X201400111886)") ## 1. Introduction Surveys have long been an important way of obtaining accurate information from a finite population. For instance, governments need to obtain descriptive statistics of the population for purposes of evaluating and implementing their policies. For those concerned with official statistics in the first third of the twentieth century, the major issue was to establish a standard of acceptable practice. Neyman (1934) created such a framework by introducing the role of randomization methods in the sampling process. He advocated the use of the randomization distribution induced by the sampling design to evaluate the frequentist properties of alternative procedures. He also introduced the idea of stratification with optimal sample size allocation and the use of unequal selection probabilities. His work was recognized as the cornerstone of design-based sample survey theory and inspired many other authors. For example, Horvitz and Thompson (1952) proposed a general theory of unequal probability sampling and the probability weighted estimation method, the so-called “Horvitz and Thompson’s estimator”.\ The design-based sample survey theory has been very appealing to official statistics agencies around the world. As pointed out by Skinner, Holt and Smith (1989), page 2, the main reason is that it is essentially distribution-free. Indeed, all advances in survey sampling theory from Neyman onwards have been strongly influenced by the descriptive use of survey sampling. The consequence of this has been a lack of theoretical developments related to the analytic use of surveys, in particular for prediction purposes. __In some specific situations, the design-based approach has proved to be inefficient, providing inadequate predictors. For instance, estimation in small domains and the presence of the non-response cannot be dealt with by the design-based approach without some implicit assumptions, which is equivalent to assuming a model.__ Supporters of the design-based approach argue that model-based inference largely depends on the model assumptions, which might not be true. On the other hand, interval inference for target population parameters (usually totals or means) relies on the Central Limit Theorem, which cannot be applied in many practical situations, where the sample size is not large enough and/or independence assumptions of the random variables involved are not realistic.\ Basu (1971) did not accept estimates of population quantities which depend on the sampling rule, like the inclusion probabilities. He argued that this estimation procedure does not satisfy the likelihood principle, at which he was adept. Basu (1971) created the circus elephant example to show that the Horvitz-Thompson estimator could lead to inappropriate estimates and proposed an alternative estimator. The question that arises is whether it is possible to conciliate both approaches. In the superpopulation model context, Zacks (2002) showed that some design-based estimators can be recovered by using a general regression model approach. Little (2003) claims that: “careful model specification sensitive to the survey design can address the concerns with model specifications, and Bayesian statistics provide a coherent and unified treatment of descriptive and analytic survey inference”. He gave some illustrative examples of how __standard design-based inference can be derived from the Bayesian perspective, using some models with non-informative prior distributions.__\ In the Bayesian context, another appealing proposal to conciliate the design-based and model-based approaches was proposed by Smouse (1984). The method incorporates prior information in finite population inference models by relying on Bayesian least squares techniques and requires only the specification of first and second moments of the distributions involved, describing prior knowledge about the structures present in the population. The approach is an alternative to the methods of randomization and appears midway between two extreme views: on the one hand the design-based procedures and on the other those based on superpopulation models. O’Hagan (1985), in an unpublished report, presented the Bayes linear estimators in some specific sample survey contexts and O’Hagan (1987) also derived Bayes linear estimators for some randomized response models. O’Hagan (1985) dealt with several population structures, such as stratification and clustering, by assuming suitable hypotheses about the first and second moments and showed how some common design-based estimators can be obtained as a particular case of his more general approach. He also pointed out that his estimates do not account for non-informative sampling. He quoted Scott (1977) and commented that informative sampling should be carried out by a full Bayesian analysis. An important reference about informative sampling dealing with hierarchical models can be found in Pfeffermann, Moura and Silva (2006).\ ## 2. Bayes linear estimation for finite population The Bayes approach has been found to be successful in many applications, particularly when the data analysis has been improved by expert judgments. But while Bayesian models have many appealing features, their application often involves the full specification of a prior distribution for a large number of parameters. Goldstein and Wooff (2007), section 1.2, argue that as the complexity of the problem increases, our actual ability to fully specify the prior and/or the sampling model in detail is impaired. They conclude that in such situations, there is a need to develop methods based on partial belief specification.\ Hartigan (1969) proposed an estimation method, termed __Bayes linear estimation approach, that only requires the specification of first and second moments__. The resulting estimators have the property of minimizing posterior squared error loss among all estimators that are linear in the data and __can be thought of as approximations to posterior means__. The Bayes linear estimation approach is fully employed in this article and is briefly described below.\ ### 2.1 Bayes linear approach Let $y_s$ be the vector with observations and $\theta$ be the parameter to be estimated. For each value of $\theta$ and each possible estimate $d$, belonging to the parametric space $\Theta$, we associate a quadratic loss function $L(\theta, d) = (\theta - d)' (\theta - d) = tr (\theta - d) (\theta - d)'$. The main interest is to find the value of $d$ that minimizes $r(d) = E [L (\theta, d) | y_s]$, the conditional expected value of the quadratic loss function given the data.\ Suppose that the joint distribution of $\theta$ and $y_s$ is partially specified by only their first two moments: \begin{equation} \tag{2.1} \begin{pmatrix} \theta\\ y_s \end{pmatrix} \hspace{0.1cm} \sim \hspace{0.1cm} \begin{bmatrix} \begin{pmatrix} a\\ \text{f} \end{pmatrix},\begin{pmatrix} R & AQ\\ QA^{'} & Q \end{pmatrix} \end{bmatrix} \end{equation} where $a$ and $f$, respectively, denote mean vectors and $R$, $AQ$ and $Q$ the covariance matrix elements of $\theta$ and $y_s$.\ The Bayes linear estimator (BLE) of $\theta$ is the value of $d$ that minimizes the expected value of this quadratic loss function within the class of all linear estimates of the form $d = d(y_s) = h + H y_s$, for some vector $h$ and matrix $H$. Thus, the BLE of $\theta$, $\hat{d}$, and its associated variance, $\hat{V} (\hat{d})$, are respectively given by: \begin{equation} \tag{2.2} \hat{d} = a + A(y_s - \text{f}) \hspace{0.7cm} \text{and} \hspace{0.7cm} \hat{V}(\hat{d}) = R - AQA^{'} \end{equation} __It should be noted that the BLE depends on the specification of the first and second moments of the joint distribution__ partially specified in (2.1). From the Bayes linear approach applied to the general linear regression model for finite population prediction, the paper shows how to obtain some particular design-based estimators, as in simple random sampling and stratified simple random sampling. ## 3. Functions The package contain the main following functions: * BLE_Reg() - general function (base for the rest of the functions) * BLE_SRS() - Simple Random Sample case * BLE_SSRS() - Stratified Simple Random Sample case * BLE_Ratio() - Ratio Estimator case
/scratch/gouwar.j/cran-all/cranData/BayesSampling/vignettes/BayesSampling.Rmd
#' The 'BayesSenMC' package. #' #' @description This package generates different posterior distributions of adjusted odds ratio under different priors of sensitivity and specificity, and plots the models for comparison. It also provides estimations for the specifications of the models using diagnostics of exposure status with a non-linear mixed effects model. #' #' @docType package #' @name BayesSenMC-package #' @aliases BayesSenMC #' @useDynLib BayesSenMC, .registration = TRUE #' @import methods #' @import Rcpp #' @importFrom rstan sampling #' #' @references #' Stan Development Team (2020). RStan: the R interface to Stan. R package version 2.21.2. https://mc-stan.org #' NULL
/scratch/gouwar.j/cran-all/cranData/BayesSenMC/R/BayesSenMC-package.R