content
stringlengths
0
14.9M
filename
stringlengths
44
136
#FIXME: remove #' Deprecated function. Do not use! #' #' @param df No text #' @param chars.as.factor No text #' @param factors.as.char No text #' @param ints.as.num No text #' @param logicals.as.factor No text #' @param x No text #' @param num.format No text #' @param clip.len No text #' #' @name deprecated #' @rdname deprecated NULL #' @export #' @rdname deprecated convertDfCols = convertDataFrameCols #' @export #' @rdname deprecated listToShortString = convertToShortString
/scratch/gouwar.j/cran-all/cranData/BBmisc/R/zzz_deprecated.R
#' @title BCA1SG algorithm for panel count data which are modeled by the nonhomogeneous Poisson process. #' #' @description This function implements the BCA1SG algorithm on the semiparametric NHPP model for panel count data. Details about the algorithm and the semiparametric NHPP model can be found in Wang et al.(2020, Biometrics). #' #' @param input_data An object of class data.frame. # The structure of the data.frame must be # \{patient ID, time of measurement, measurement(cumulative counts),covariate_1,...,covariate_p\}. # This data frame cannot contain missing values. See the dataset skiTum for an example. #' @param initial_beta The initial value of the regression coefficients. # The dimension of this input should comply with the dimension of the covariates. #' @param initial_Lambda An R function which serves as the initial value of the baseline mean cumulative function. #' @param threshold Convergence threshold. The algorithm is terminated when the infinity norm of the difference between successive iterates is less than the convergence threshold. #' @param max_iter Maximum number of iterations allowed. #' @param max_stepsize Maximum stepsize allowed. #' @param xi The xi parameter in the inexact backtracking line search algorithm. See Wang et al.(2020) for details. #' @param contraction The contraction parameter in the inexact backtracking line search algorithm. See Wang et al.(2020) for details. #' @return A list of length 5: # distinct_time is the set of distinct observation time points; # est_Lambda is the estimated baseline mean cumulative function at the set of distinct observation time points; # est_beta is the estimated regression coefficients; # iteration is the number of iterations; # timecost is the computational time in seconds. #' #' @export stock_predict #' #' @examples #' #' # Example 1: Application on the skin tumor data set #' data(adapt_skiTum) #' res<-BCA1SG_NHPP(adapt_skiTum,initial_beta = rep(0,4)) #' res$est_beta #' res$iteration #' res$timecost #' plot(res$distinct_time,res$est_Lambda,type="s",lwd=3) #' BCA1SG_NHPP<-function(input_data,initial_beta,initial_Lambda=function(x){x},threshold=1e-5,max_iter=5000,max_stepsize=1e4,xi=0.3,contraction=0.5){ if(sum(is.na(input_data))>0){ stop("The input data frame contains NA.") } eps<-10^(-8) samplesize<-length(unique(input_data[,1])) cov_dim<-ncol(input_data)-3 if(cov_dim!=length(initial_beta)){ stop("The dimension of initial_beta does not comply with the dimension of the covariates.") } measure_time<-rep(0,samplesize) for(i in 1:samplesize){ tmp<-input_data[input_data[,1]==i,] measure_time[i]<-length(tmp[,1]) } max_measure<-max(measure_time) timepoints<-matrix(rep(-1,samplesize*max_measure),samplesize,max_measure) transpoints<-matrix(rep(-1,samplesize*max_measure),samplesize,max_measure) measure_results<-matrix(rep(-1,samplesize*max_measure),samplesize,max_measure) covariate<-matrix(rep(0,cov_dim*samplesize),samplesize,cov_dim) for(i in 1:samplesize){ tmp<-input_data[input_data[,1]==i,] timepoints[i,1:measure_time[i]]<-tmp[,2] measure_results[i,1:measure_time[i]]<-tmp[,3] covariate[i,]<-as.numeric(tmp[1,-c(1,2,3)]) } uniquepoints<-sort(unique(as.vector(timepoints)))[-1] for(i in 1:samplesize){ for(j in 1:measure_time[i]){ transpoints[i,j]<-which(timepoints[i,j]==uniquepoints) } } n<-length(uniquepoints) loglikelihood<-function(Lambda,beta){ for(i in 1:n){ Lambda[n-i+1]<-sum(Lambda[1:(n-i+1)]) } term1<-covariate%*%beta lk<-0 for(i in 1:samplesize){ lk<-lk+log(Lambda[transpoints[i,1]])*measure_results[i,1] if(measure_time[i]>1){ for(j in 2:measure_time[i]){ lk<-lk+log(Lambda[transpoints[i,j]]-Lambda[transpoints[i,j-1]])*(measure_results[i,j]-measure_results[i,j-1]) } } lk<-lk+term1[i]*measure_results[i,measure_time[i]]-exp(term1[i])*Lambda[transpoints[i,measure_time[i]]] } return(lk) } explc_hess<-function(Lambda,beta=numeric(0)){ mygrad<-rep(0,n) for(i in 1:n){ Lambda[n-i+1]<-sum(Lambda[1:(n-i+1)]) } for(i in 1:samplesize){ for(k in 1:transpoints[i,1]){ mygrad[k]<-mygrad[k]-measure_results[i,1]/(Lambda[transpoints[i,1]])^2 } if(measure_time[i]>1){ for(j in 2:measure_time[i]){ for(k in (transpoints[i,j-1]+1):transpoints[i,j]) mygrad[k]<-mygrad[k]-(measure_results[i,j]-measure_results[i,j-1])/(Lambda[transpoints[i,j]]-Lambda[transpoints[i,j-1]])^2 } } } return(mygrad) } explc_grad<-function(Lambda,beta=numeric(0)){ mygrad<-rep(0,n) term1<-covariate%*%beta for(i in 1:n){ Lambda[n-i+1]<-sum(Lambda[1:(n-i+1)]) } for(i in 1:samplesize){ for(k in 1:transpoints[i,1]){ mygrad[k]<-mygrad[k]+measure_results[i,1]/(Lambda[transpoints[i,1]])-exp(term1[i]) } if(measure_time[i]>1){ for(j in 2:measure_time[i]){ for(k in (transpoints[i,j-1]+1):transpoints[i,j]) mygrad[k]<-mygrad[k]+(measure_results[i,j]-measure_results[i,j-1])/(Lambda[transpoints[i,j]]-Lambda[transpoints[i,j-1]])-exp(term1[i]) } } } return(mygrad) } loglikelihood0<-function(beta,LL=numeric(0),NN=numeric(0)){ term1<-covariate%*%beta lk<-sum(NN*term1-exp(term1)*LL) return(-lk) } Int_Lambda<-initial_Lambda(uniquepoints) ###################################################BCA1SG Algorithm old_Lambda<-Int_Lambda old_Lambda<-c(old_Lambda[1],diff(old_Lambda)) old_Lambda[old_Lambda<=0]<-eps old_beta<-initial_beta active_set<-numeric(0) ptm<-proc.time() flag<-TRUE count<-0 while((flag)&&(count<max_iter)){ movement<-Inf while((movement>threshold)&&(count<max_iter)){ ##########first update the parametric part LL<-sum(old_Lambda[1:transpoints[1,measure_time[1]]]) NN<-measure_results[1,measure_time[1]] for(i in 2:samplesize){ LL<-c(LL,sum(old_Lambda[1:transpoints[i,measure_time[i]]])) NN<-c(NN,measure_results[i,measure_time[i]]) } res<-stats::optim(par=old_beta,fn=loglikelihood0,LL=LL,NN=NN)$par movement1<-max(abs(old_beta-res)) old_beta<-res ###########then update the nonparametric part hess<-explc_hess(old_Lambda,beta=old_beta) hess<-hess-rep(eps,n) mygrad<-explc_grad(old_Lambda,beta=old_beta) ###############compute the movement direction direct<--1/hess*mygrad count<-count+1 ###############project the direction onto the region defined by the active constraints direct[active_set]<-0 ###############compute the largest step length d<-direct[direct<0] old<-old_Lambda[direct<0] if(length(d)>0){ alpha<-min((eps-old)/d) }else{ alpha<-1 } alpha<-min(alpha,max_stepsize) direct<-direct*alpha ###############compute the new_Theta candidate new_Lambda<-old_Lambda+direct ########line search line_count<-0 while((loglikelihood(new_Lambda,beta = old_beta)<(loglikelihood(old_Lambda,beta = old_beta)+xi*mygrad%*%direct))&&(line_count<50)){ alpha<-alpha*contraction direct<-direct*contraction new_Lambda<-old_Lambda+direct line_count<-line_count+1 } ###############update the active set if(sum(((new_Lambda-eps)<=10^(-6)))>0){ active_set<-which(((new_Lambda-eps)<=10^(-6))) } movement<-max(abs(c(new_Lambda-old_Lambda,movement1))) old_Lambda<-new_Lambda # print(c(count,movement)) } ###############decide whether to remove some point from the boundary if(length(active_set)>0){ lagrg<-rep(0,length(active_set)) for(i in 1:length(active_set)){ lagrg[i]<-mygrad[active_set[i]] } if(max(lagrg)>10^(-5)){ # print("remove") # print(max(lagrg)) active_set<-active_set[-which.max(lagrg)] }else{ flag<-FALSE } }else{ flag<-FALSE } } for(i in 1:n){ old_Lambda[n-i+1]<-sum(old_Lambda[1:(n-i+1)]) } timecost<-as.numeric((proc.time()-ptm)[3]) if(count>=max_iter){ warning("The algorithm fails to converge. Please try to increase the threshold or the max_iter.") } output_MLE<-list(distinct_time=uniquepoints,est_Lambda=old_Lambda,est_beta=old_beta,iteration=count,timecost=timecost) return(output_MLE) }
/scratch/gouwar.j/cran-all/cranData/BCA1SG/R/BCA1SG_NHPP.R
BCA1SG_degradation<-function(input_data,initial_delta,initial_r,initial_Lambda=function(x){x},threshold=1e-5,max_iter=5000,max_stepsize=1e5,xi=0.3,contraction=0.5){ # requireNamespace() # requireNamespace("Matrix") # requireNamespace("logOfGamma") if(sum(is.na(input_data))>0){ stop("The input data frame contains NA.") } eps<-10^(-8) samplesize<-length(unique(input_data[,1])) measure_time<-rep(0,samplesize) for(i in 1:samplesize){ tmp<-input_data[input_data[,1]==i,] measure_time[i]<-length(tmp[,1]) } max_measure<-max(measure_time) timepoints<-matrix(rep(-1,samplesize*max_measure),samplesize,max_measure) transpoints<-matrix(rep(-1,samplesize*max_measure),samplesize,max_measure) measure_results<-matrix(rep(-1,samplesize*max_measure),samplesize,max_measure) for(i in 1:samplesize){ tmp<-input_data[input_data[,1]==i,] timepoints[i,1:measure_time[i]]<-tmp[,2] measure_results[i,1:measure_time[i]]<-tmp[,3] } uniquepoints<-sort(unique(as.vector(timepoints)))[-1] for(i in 1:samplesize){ for(j in 1:measure_time[i]){ transpoints[i,j]<-which(timepoints[i,j]==uniquepoints) } } n<-length(uniquepoints) ################################################################################################################### explc_grad<-function(Theta,delta=numeric(0),r=numeric(0)){ for(i in 1:n){ Theta[n-i+1]<-sum(Theta[1:(n-i+1)]) } mygrad<-rep(0,n) nonzero<-diff(AA@p) for(q in 1:n){ if(q>1){ for(i in 1:(q-1)){ if(nonzero[i]>0){ for(j in (AA@p[i]+1):AA@p[i+1]){ if((AA@i[j]+1)>=q){ mygrad[q]<-mygrad[q]+AA@x[j]/(Theta[AA@i[j]+1]-Theta[i]) } } } } } for(i in q:n){ mygrad[q]<-mygrad[q]+A[i,i]/Theta[i] } } for(i in 1:samplesize){ deltai<-delta+measure_time[i]/2 ri<-r+(measure_results[i,1]-Theta[transpoints[i,1]])^2/(2*measure_results[i,1]) if(measure_time[i]>1){ for(j in 2:measure_time[i]){ ri<-ri+(measure_results[i,j]-measure_results[i,j-1]-(Theta[transpoints[i,j]]-Theta[transpoints[i,j-1]]))^2/(2*(measure_results[i,j]-measure_results[i,j-1])) } } for(j in 1:transpoints[i,1]){ mygrad[j]<-mygrad[j]-(deltai*(Theta[transpoints[i,1]])/(measure_results[i,1]))/ri } if(measure_time[i]>1){ for(j in 2:measure_time[i]){ for(k in (transpoints[i,j-1]+1):transpoints[i,j]){ mygrad[k]<-mygrad[k]-(deltai*(Theta[transpoints[i,j]]-Theta[transpoints[i,j-1]])/(measure_results[i,j]-measure_results[i,j-1]))/ri } } } for(j in 1:transpoints[i,measure_time[i]]){ mygrad[j]<-mygrad[j]+deltai/(ri) } } return(mygrad) } explc_hess<-function(Theta,delta=numeric(0),r=numeric(0)){ for(i in 1:n){ Theta[n-i+1]<-sum(Theta[1:(n-i+1)]) } mygrad<-rep(0,n) nonzero<-diff(AA@p) for(q in 1:n){ if(q>1){ for(i in 1:(q-1)){ if(nonzero[i]>0){ for(j in (AA@p[i]+1):AA@p[i+1]){ if((AA@i[j]+1)>=q){ mygrad[q]<-mygrad[q]-AA@x[j]/(Theta[AA@i[j]+1]-Theta[i])^2 } } } } } for(i in q:n){ mygrad[q]<-mygrad[q]-A[i,i]/Theta[i]^2 } } for(i in 1:samplesize){ deltai<-delta+measure_time[i]/2 ri<-r+(measure_results[i,1]-Theta[transpoints[i,1]])^2/(2*measure_results[i,1]) if(measure_time[i]>1){ for(j in 2:measure_time[i]){ ri<-ri+(measure_results[i,j]-measure_results[i,j-1]-(Theta[transpoints[i,j]]-Theta[transpoints[i,j-1]]))^2/(2*(measure_results[i,j]-measure_results[i,j-1])) } } for(j in 1:transpoints[i,1]){ mygrad[j]<-mygrad[j]-(deltai*ri/measure_results[i,1]-deltai*(Theta[transpoints[i,1]]/measure_results[i,1]-1)^2)/ri^2 } if(measure_time[i]>1){ for(j in 2:measure_time[i]){ for(k in (transpoints[i,j-1]+1):transpoints[i,j]){ mygrad[k]<-mygrad[k]-(deltai*ri/(measure_results[i,j]-measure_results[i,j-1])-deltai*((Theta[transpoints[i,j]]-Theta[transpoints[i,j-1]])/(measure_results[i,j]-measure_results[i,j-1])-1)^2)/ri^2 } } } } return(mygrad) } loglikelihood1<-function(par,Theta=numeric(0)){ if(min(par)<0){ return(Inf) }else{ delta<-par[1] r<-par[2] for(i in 1:n){ Theta[n-i+1]<-sum(Theta[1:(n-i+1)]) } lk<-0 for(i in 1:samplesize){ deltai<-delta+measure_time[i]/2 ri<-r+(measure_results[i,1]-Theta[transpoints[i,1]])^2/(2*measure_results[i,1]) if(measure_time[i]>1){ for(j in 2:measure_time[i]){ ri<-ri+(measure_results[i,j]-measure_results[i,j-1]-(Theta[transpoints[i,j]]-Theta[transpoints[i,j-1]]))^2/(2*(measure_results[i,j]-measure_results[i,j-1])) } } lk<-lk+logOfGamma::gammaln(deltai)-logOfGamma::gammaln(delta)+delta*log(r)-deltai*log(ri) } return(-lk) } } loglikelihood<-function(Theta,delta=numeric(0),r=numeric(0)){ for(i in 1:n){ Theta[n-i+1]<-sum(Theta[1:(n-i+1)]) } lk<-0 for(i in 1:samplesize){ deltai<-delta+measure_time[i]/2 ri<-r+(measure_results[i,1]-Theta[transpoints[i,1]])^2/(2*measure_results[i,1]) if(measure_time[i]>1){ for(j in 2:measure_time[i]){ ri<-ri+(measure_results[i,j]-measure_results[i,j-1]-(Theta[transpoints[i,j]]-Theta[transpoints[i,j-1]]))^2/(2*(measure_results[i,j]-measure_results[i,j-1])) } } lk<-lk+logOfGamma::gammaln(deltai)-logOfGamma::gammaln(delta)+delta*log(r)-deltai*log(ri)+log(Theta[transpoints[i,1]]) if(measure_time[i]>1){ for(j in 2:measure_time[i]){ lk<-lk+log(Theta[transpoints[i,j]]-Theta[transpoints[i,j-1]]) } } } return(lk) } A<-matrix(rep(0,n^2),n,n) for(i in 1:samplesize){ A[transpoints[i,1],transpoints[i,1]]<-A[transpoints[i,1],transpoints[i,1]]+1 if(measure_time[i]>1){ for(j in 2:measure_time[i]){ A[transpoints[i,j-1],transpoints[i,j]]<-A[transpoints[i,j-1],transpoints[i,j]]+1 } } } AA<-Matrix::Matrix(t(A)) Int_Lambda<-initial_Lambda(uniquepoints) ###################################################BCA1SG Algorithm old_Theta<-Int_Lambda old_Theta<-c(old_Theta[1],diff(old_Theta)) old_Theta[old_Theta<=0]<-eps old_delta<-initial_delta old_r<-initial_r increment<-Inf active_set<-numeric(0) count<-0 ptm<-proc.time() flag<-TRUE while((flag)&(count<max_iter)){ line_count<-0 movement<-Inf while((movement>threshold)&(count<max_iter)){ res<-stats::optim(par=c(old_delta,old_r),fn=loglikelihood1,Theta=old_Theta)$par movement1<-old_delta-res[1] movement2<-old_r-res[2] last_delta<-old_delta last_r<-old_r old_delta<-res[1] old_r<-res[2] hess<-explc_hess(old_Theta,delta=old_delta,r=old_r) mygrad<-explc_grad(old_Theta,delta=old_delta,r=old_r) ###############compute the movement direction direct<--1/hess*mygrad ###############project the direction if(length(active_set)>0){ for(i in 1:length(active_set)){ direct[active_set[i]]<-0 } } #direct_norm1<-max(abs(c(direct,movement1,movement2))) count<-count+1 ###############compute the largest step length d<-direct[direct<0] old<-old_Theta[direct<0] if(length(d)>0){ beta<-min((eps-old)/d) }else{ beta<-1 } beta<-min(beta,max_stepsize) direct<-direct*beta ###############compute the new_Theta candidate new_Theta<-old_Theta+direct ########line search line_count<-0 while((loglikelihood(new_Theta,delta=old_delta,r=old_r)<(loglikelihood(old_Theta,delta=old_delta,r=old_r)+xi*mygrad%*%direct))&&(line_count<100)){ beta<-beta*contraction direct<-direct*contraction new_Theta<-old_Theta+direct line_count<-line_count+1 } ###############update the active set if(sum(((new_Theta-eps)<=10^(-6)))>0){ active_set<-which(((new_Theta-eps)<=10^(-6))) } movement<-max(abs(c(new_Theta-old_Theta,old_delta-last_delta,old_r-last_r))) # print(c(count,movement)) old_Theta<-new_Theta } ###############decide whether to remove some point from the boundary if(length(active_set)>0){ lagrg<-rep(0,length(active_set)) for(i in 1:length(active_set)){ lagrg[i]<-mygrad[active_set[i]] } if(max(lagrg)>0){ # print("remove") # print(max(lagrg)) active_set<-active_set[-which.max(lagrg)] }else{ flag<-FALSE } }else{ flag<-FALSE } } for(i in 1:length(new_Theta)){ new_Theta[length(new_Theta)-i+1]<-sum(new_Theta[1:(length(new_Theta)-i+1)]) } timecost<-as.numeric((proc.time()-ptm)[3]) if(count>=max_iter){ warning("The algorithm fails to converge. Please try to increase the threshold or the max_iter.") } output_MLE<-list(distinct_time=uniquepoints,est_Lambda=new_Theta,est_delta=old_delta,est_r=old_r,iteration=count,timecost=timecost) return(output_MLE) }
/scratch/gouwar.j/cran-all/cranData/BCA1SG/R/BCA1SG_degradation.R
BCA1SG_interval_censor<-function(input_data,initial_beta,initial_Lambda=function(x){x},threshold=1e-5,max_iter=5000,max_stepsize=1e4,xi=0.3,contraction=0.5){ if(sum(is.na(input_data))>0){ stop("The input data frame contains NA.") } eps<-10^(-8) eps1<-10^(-300) tmp_index<-which(input_data[,1]==input_data[,2]) if(length(tmp_index)>0){ for(i in 1:length(tmp_index)){ input_data[tmp_index[i],1]<-input_data[tmp_index[i],1]-0.01 input_data[tmp_index[i],2]<-input_data[tmp_index[i],2]+0.01 } } tmp_index<-which((input_data[,1]==0)&(input_data[,2]==Inf)) if(length(tmp_index)>0){ input_data<-input_data[-tmp_index,] } Lowerbound<-input_data[,1] Upperbound<-input_data[,2] if(max(Upperbound[Upperbound!=Inf])>max(Lowerbound)){ stop("The MLE of the nonparametric cumulative hazard contain Infinity.") } samplesize<-nrow(input_data) cov_dim<-ncol(input_data)-2 if(cov_dim!=length(initial_beta)){ stop("The dimension of initial_beta does not comply with the dimension of the covariates.") } covariate<-as.matrix(input_data[,-c(1,2)]) uniquepoints<-unique(sort(c(Lowerbound,Upperbound))) if(uniquepoints[1]==0){ uniquepoints<-uniquepoints[-1] } if(max(uniquepoints)==Inf){ uniquepoints<-uniquepoints[-length(uniquepoints)] } n<-length(uniquepoints) transL<-rep(0,samplesize) transU<-rep(-1,samplesize) for(i in 1:samplesize){ if(Lowerbound[i]>0){ transL[i]<-which(abs(Lowerbound[i]-uniquepoints)<eps) } if(Upperbound[i]<Inf){ transU[i]<-which(abs(Upperbound[i]-uniquepoints)<eps) } } backupU<-transU backupL<-transL backupL[transL==0]<-1 backupU[transU==-1]<-n indicator<-rep(2,samplesize) indicator[Lowerbound==0]<-1 indicator[Upperbound==Inf]<-3 loglikelihood<-function(Lambda,beta=numeric(0)){ for(i in 1:n){ Lambda[n-i+1]<-sum(Lambda[1:(n-i+1)]) } term1<-as.numeric(exp(beta%*%t(covariate))) term2<-exp(-Lambda[backupL]*term1) term3<-exp(-Lambda[backupU]*term1) lk<-sum((indicator==1)*pmax(log(1-term3),-1e200)+(indicator==2)*pmax(log(term2-term3),-1e200)+(indicator==3)*pmax(log(term2),-1e200)) return(lk) } loglikelihood0<-function(beta,Lambda=numeric(0)){ for(i in 1:n){ Lambda[n-i+1]<-sum(Lambda[1:(n-i+1)]) } term1<-as.numeric(exp(beta%*%t(covariate))) term2<-exp(-Lambda[backupL]*term1) term3<-exp(-Lambda[backupU]*term1) lk<-sum((indicator==1)*pmax(log(1-term3),-1e200)+(indicator==2)*pmax(log(term2-term3),-1e200)+(indicator==3)*pmax(log(term2),-1e200)) return(-lk) } explc_grad<-function(Lambda,beta=numeric(0)){ for(i in 1:n){ Lambda[n-i+1]<-sum(Lambda[1:(n-i+1)]) } mygrad<-rep(0,n) term1<-as.numeric(exp(beta%*%t(covariate))) term2<-exp(-Lambda[backupL]*term1) term3<-exp(-Lambda[backupU]*term1) cond1<-term1*term3/pmax((1-term3),1e-8) cond2<-term1*term3/pmax((term2-term3),1e-8) for(i in 1:n){ mygrad[i]<-sum((indicator==1)*(i<=transU)*cond1+(indicator==2)*((i<=transL)*(-term1)+(i>transL)*(i<=transU)*pmin(cond2,1e100))+(indicator==3)*(i<=transL)*(-term1)) } return(mygrad) } explc_hess<-function(Lambda,beta=numeric(0)){ for(i in 1:n){ Lambda[n-i+1]<-sum(Lambda[1:(n-i+1)]) } mygrad<-rep(0,n) term1<-as.numeric(exp(beta%*%t(covariate))) term2<-exp(-Lambda[backupL]*term1) term3<-exp(-Lambda[backupU]*term1) cond3<--term1^2*term3/pmax((1-term3)^2,1e-8) cond4<--term1^2*term2*term3/pmax((term2-term3)^2,1e-8) for(i in 1:n){ mygrad[i]<-sum((indicator==1)*(i<=transU)*cond3+(indicator==2)*(i>transL)*(i<=transU)*pmax(cond4,-1e100)) } return(mygrad) } Int_Lambda<-initial_Lambda(uniquepoints) ###################################################BCA1SG Algorithm old_Lambda<-Int_Lambda old_Lambda<-c(old_Lambda[1],diff(old_Lambda)) old_Lambda[old_Lambda<=0]<-eps old_beta<-initial_beta active_set<-numeric(0) ptm<-proc.time() flag<-TRUE count<-0 while((flag)&(count<max_iter)){ movement<-Inf while((movement>threshold)&(count<max_iter)){ ##########first update the parametric part res<-stats::optim(par=old_beta,fn=loglikelihood0,Lambda=old_Lambda)$par movement1<-max(abs(old_beta-res)) last_beta<-old_beta old_beta<-res ###########then update the nonparametric part hess<-explc_hess(old_Lambda,beta = old_beta) mygrad<-explc_grad(old_Lambda,beta = old_beta) hess<-hess-rep(eps,n) ###############compute the movement direction direct<--1/hess*mygrad count<-count+1 ###############project the direction onto the region defined by the active constraints direct[active_set]<-0 ###############compute the largest step length d<-direct[direct<0] old<-old_Lambda[direct<0] if(length(d)>0){ alpha<-min((eps-old)/d) }else{ alpha<-1 } alpha<-min(alpha,max_stepsize) direct<-direct*alpha ###############compute the new_Theta candidate new_Lambda<-old_Lambda+direct ########line search line_count<-0 while((loglikelihood(new_Lambda,beta = old_beta)<(loglikelihood(old_Lambda,beta = old_beta)+xi*mygrad%*%direct))&&(line_count<50)){ alpha<-alpha*contraction direct<-direct*contraction new_Lambda<-old_Lambda+direct line_count<-line_count+1 } ###############update the active set active_set<-which(((new_Lambda-eps)<=10^(-6))) increment<-loglikelihood(new_Lambda,beta = old_beta)-loglikelihood(old_Lambda,beta = last_beta) movement<-max(abs(c(new_Lambda-old_Lambda,movement1))) old_Lambda<-new_Lambda # print(c(count,movement)) } ###############decide whether to remove some point from the boundary if(length(active_set)>0){ lagrg<-rep(0,length(active_set)) for(i in 1:length(active_set)){ lagrg[i]<-mygrad[active_set[i]] } if(max(lagrg)>10^(-6)){ # print("remove") # print(max(lagrg)) active_set<-active_set[-which.max(lagrg)] }else{ flag<-FALSE } }else{ flag<-FALSE } } for(i in 1:n){ old_Lambda[n-i+1]<-sum(old_Lambda[1:(n-i+1)]) } timecost<-as.numeric((proc.time()-ptm)[3]) if(count>=max_iter){ warning("The algorithm fails to converge. Please try to increase the threshold or the max_iter.") } output_MLE<-list(distinct_time=uniquepoints,est_Lambda=old_Lambda,est_beta=old_beta,iteration=count,timecost=timecost) return(output_MLE) }
/scratch/gouwar.j/cran-all/cranData/BCA1SG/R/BCA1SG_interval_censor.R
#' A data set adapted from the data set "skiTum" in the package "spef" #' #' A data frame containing the recurrence of skin tumors. See Chiou et al.(2017) for details. #' #' @format A data frame with columns: #' \describe{ #' \item{id}{patient id (repeated for each recurrence).} #' \item{time}{observation time.} #' \item{count}{cumulative number of tumors till the current observation time.} #' \item{age}{patient's age at enrollment; age = 1 if greater than 65, age = 0 otherwise.} #' \item{male}{gender; male = 1, female = 0.} #' \item{dfmo}{treatment (DFMO) group = 1; placebo = 0.} #' \item{priorTumor}{number of prior tumor from diagnosis to randomization.} #' } "adapt_skiTum" #' A data set adapted from the data set "duser" in the package "FHtest" #' #' Data set of 763 drug users in Badalona (Spain). The data come from the detoxification unit of Hospital Universitari Germans Trias i Pujol in Badalona, Spain. See Gomez et al.(2000) for details. #' #' @format A data frame with columns: #' \describe{ #' \item{left}{left endpoint of time to HIV-infection.} #' \item{right}{right endpoint of time to HIV-infectio.n} #' \item{zgen}{gender (0: male; 1: female).} #' \item{age}{patient's age.} #' } "adapt_duser"
/scratch/gouwar.j/cran-all/cranData/BCA1SG/R/data.R
probs_attrue <- function (probs_pred, y) { tp <- rep(0, nrow(probs_pred)) names (tp) <- rownames (probs_pred) for(i in 1:nrow(probs_pred)) tp[i] <- probs_pred[i,y[i]] tp } comp_amlp <- function(probs_pred, y) { mean (-log (probs_attrue (probs_pred, y))) } ## Mloss -- a matrix specifying losses, with row for true values, and ## column for predicted values. comp_loss <- function(probs_pred, y, Mloss = NULL) { G <- ncol (probs_pred) if (is.null (Mloss)) { Mloss <- matrix(1,G,G) diag(Mloss) <- 0 } loss_pred <- probs_pred %*% Mloss y_pred <- apply(loss_pred,1,which.min) loss <- 0 for(i in 1:nrow(probs_pred)) { loss <- loss + Mloss[y[i],y_pred[i]] } loss / length (y) } comp_eer <- function (probs_pred) { mean (1 - apply (probs_pred, 1, max)) } eval_pred <- function (out_pred, y_ts, Mloss = NULL) { array_probs_pred <- out_pred$array_probs_pred nos_fsel <- out_pred$nos_fsel amlp <- er <- loss <- NULL amlp <- apply (array_probs_pred, 3, comp_amlp, y = y_ts) er <- apply (array_probs_pred, 3, comp_loss, y = y_ts) probs_at_truelabels <- apply (array_probs_pred, 3, probs_attrue, y = y_ts) summary <- data.frame (No.Features = nos_fsel, Error.Rate = er, AMLP = amlp) if (!is.null (Mloss)) { loss <- apply (array_probs_pred, 3, comp_loss, y = y_ts, Mloss = Mloss) result <- cbind (summary, Loss = loss) } list (probs_at_truelabels = probs_at_truelabels, summary = summary) } ## partition all cases into nfold subsets ## This function partitions a set of observations into subsets of almost ## equal size. The result is used in crossvalidation mk_folds <- function(y, nfold = 10, random = FALSE) { n <- length (y) nos_g <- table (y) G <- length (nos_g) nfold <- min (nfold, n) reduced.rep <- TRUE while (reduced.rep){ if (!random) folds <- rep (1:nfold, length = n) else folds <- sample(rep (1:nfold, length=n)) ## check any fold has reduced class representation reduced.rep <- FALSE for (i in 1:nfold) { G_ifold <- length(unique (y[folds!=i])) if (G_ifold < G){ reduced.rep <- TRUE random <- TRUE break } } } ## create fold list foldlist <- rep (list (""),nfold) for (i in 1:nfold) { foldlist [[i]] <- which (folds == i) } foldlist } #################### a generic crossvalidation function #################### ## X --- features with rows for cases ## y --- a vector of response values ## nfold --- number of folds in cross validation ## fitpred_func --- function for training and prediction: ## the arguments of fitpred_func must include X_tr, y_tr, X_ts ## the outputs of fitpred_func must include probs_pred ## ... --- other arguments needed by fitpred_func other than X_tr, y_tr, X_ts cross_vld <- function ( X, y, nfold = 10, folds = NULL, fitpred_func = bcbcsf_fitpred, ...) { if (!is.matrix(X)) stop ("'X' must be a matrix with rows for cases") n <- nrow(X) if (is.null (folds)) { folds <- mk_folds (y, nfold, random = FALSE) } nfold <- length (folds) array_probs_pred <- NULL vector_ts <- NULL for (i_test in 1:nfold) { cat(sprintf ("Fold%2d: ", i_test) ) ts <- folds [[i_test]] vector_ts <- c (vector_ts, ts) tr <- (1:n)[- (ts)] onetrpr <- fitpred_func ( X_tr = X[tr,, drop = FALSE], y_tr = y[tr], X_ts = X[ts,, drop = FALSE], ...) one_array_probs_pred <- onetrpr$array_probs_pred array_probs_pred <- abind ( array_probs_pred, one_array_probs_pred, along = 1) onetrpr <- onetrpr[names(onetrpr) != "array_probs_pred"] } cat ("\n") ## make the order of cases in array_probs_pred is the same as X array_probs_pred <- array_probs_pred[order (vector_ts),,, drop = FALSE] dims <- dim (array_probs_pred) dimnames (array_probs_pred) <- list(paste("Case", 1:dims[1], sep=""), paste("Class", 1:dims[2], sep=""), paste("fsel", 1:dims[3], sep="")) #dimnames (array_probs_pred) [[1]] <- paste("Case", 1:n, sep="") c (onetrpr, list (folds = folds, array_probs_pred = array_probs_pred) ) }
/scratch/gouwar.j/cran-all/cranData/BCBCSF/R/comp_pred.r
############################################################################ ######################### bcbcsf main functions ############################ ############################################################################ bcbcsf_fitpred <- function ( ## arguments specifying info of data sets X_tr, y_tr, nos_fsel = ncol (X_tr), X_ts = NULL, standardize = FALSE, rankf = FALSE, ## arguments for prediction burn = NULL, thin = 1, offset_sdxj = 0.5, ## arguments for Markov chain sampling no_rmc = 1000, no_imc = 5, no_mhwmux = 10, fit_bcbcsf_filepre = ".fitbcbcsf_", ## arguments specifying priors for parameters and hyerparameters w0_mu = 0.05, alpha0_mu = 0.5, alpha1_mu = 3, w0_x = 1.00, alpha0_x = 0.5, alpha1_x = 10, w0_nu = 0.05, alpha0_nu = 0.5, prior_psi = NULL, ## arguments for metropolis sampling for wmu, wx stepadj_mhwmux = 1, diag_mhwmux = FALSE, ## arguments for computing adjustment factor bcor = 1, cut_qf = exp (-10), cut_dpoi = exp (-10), nos_sim = 1000, ## whether look at progress monitor = TRUE) { if (!is.matrix (X_tr)) stop ("X_tr must be a matrix") ## read information about data n <- nrow (X_tr) ## numbers of obs p <- ncol (X_tr) ## find number of observations in each group, posterior freq of y nos_g <- as.vector (table (y_tr)) G <- length (nos_g) if (any(nos_g < 2)) stop ("less than 2 cases in some group in your data") ## set prior for proportion of cases if (is.null (prior_psi)) prior_psi <- rep (1, G) if (length (prior_psi) != G) stop ("length of prior_psi is wrong") ## prior frequency of class lables post_y <- nos_g + prior_psi freqy <- post_y / sum (post_y) ################### Preprocessing Features ############################## ## 1) feature selection on original data (the same as standardized data) info_sel <- list (vars = 1:p) if (any (c(rankf, nos_fsel < p))) info_sel <- rank_F (X_tr, y_tr) ## 2) ordering and selecting features p_fselmax <- max (nos_fsel) fselmax <- info_sel $ vars [1 : p_fselmax] X_tr <- X_tr[, fselmax, drop = FALSE] ## 3) standardizing retained features mle_ori_fselmax <- trpr_mle ( X_tr = X_tr, y_tr = y_tr, rankf = FALSE)$list_fit_mle[[1]] nuj_ori_fselmax <- rep (0, p_fselmax) wxj_ori_fselmax <- rep (1, p_fselmax) if (standardize) { nuj_ori_fselmax <- mle_ori_fselmax $ nuj wxj_ori_fselmax <- mle_ori_fselmax $ wxj X_tr <- sweep (X_tr, 2, nuj_ori_fselmax, "-") X_tr <- sweep (X_tr, 2, sqrt (wxj_ori_fselmax), "/") } ## 4) Gathering sufficient statistic on standardized data tgsum_X_fselmax <- t (rowsum (X_tr,y_tr)) ## grouped sum of features sum_X2_fselmax <- colSums (X_tr^2) ######################################################################### ## creating storage of results nnfsel <- length (nos_fsel) fitfiles <- rep ("", nnfsel) array_probs_pred <- NULL if (!is.null (X_ts)) { n_ts <- nrow (X_ts) array_probs_pred <- array (0, dim = c(n_ts, G, nnfsel) ) } if (monitor) cat (" Be Patient ... BCBCSF is fitting... \n") finished <- 0 total <- sum (nos_fsel) * no_rmc if (monitor) pb <- txtProgressBar(min = 0, max = total, style = 3) ######################################################################### ## starting training and prediction for each number of retained features for (i in seq (1, nnfsel) ) { ## information on selected k features k <- nos_fsel [i] if (!is.null (fit_bcbcsf_filepre)) fitfiles[i] <- paste (fit_bcbcsf_filepre, "alpha1_mu_",alpha1_mu, "_n_",n, "_p_", p, "_nfsel_", k, "_biascor_", bcor, ".RData", sep = "") ## Start Marlov chain super-transition if (k >= 1) { ## information on feature selection and standardization fsel <- info_sel $ vars [1:k] nuj_std <- nuj_ori_fselmax [1:k] wxj_std <- wxj_ori_fselmax [1:k] cut_F <- info_sel $ fstats [k] nos_omit <- p - k ## sufficient statstic on selected features tgsum_X <- tgsum_X_fselmax [1:k,,drop = FALSE] sum_X2 <- sum_X2_fselmax [1:k] ## compute qf and partial lambda for bias correction if (bcor == 1 & k < p) { qflmd <- gen_qflmd (y_tr, cut_F, alpha1_mu, alpha1_x, cut_qf, nos_sim) } ## static variables in Gibbs sampling alpha_wmuj <- (alpha1_mu + G) / 2 alpha_wxj <- (alpha1_x + n) / 2 alpha_wnu <- (alpha0_nu + k) / 2 alpha_wmu <- alpha1_mu * k / 2 - alpha0_mu / 2 alpha_wx <- alpha1_x * k / 2 - alpha0_x / 2 lambda0_wmu <- alpha0_mu * w0_mu / 2 ##lambda for wmu from prior lambda0_wx <- alpha0_x * w0_x / 2 ##lambda for wx from prior ## Metropolis Sampling stepsize stepsizes_mhwmux <- stepadj_mhwmux / sqrt ( 10 * c(max(alpha0_mu,alpha_wmu), max(alpha0_x, alpha_wx)) ) ## initialize Markov chain from MLE muj <- mle_ori_fselmax$muj[1:k,,drop = FALSE] - nuj_std wxj <- mle_ori_fselmax$wxj[1:k] / wxj_std wmuj <- mle_ori_fselmax$wmuj[1:k] * 0.01 nuj <- rowMeans (muj) wx <- 1/mean (1/wxj) logwx <- log (wx) wmu <- w0_mu logwmu <- log (wmu) wnu <- 1 ## Markov chain storage MUJ <- array (0, dim = c(k, G, no_rmc)) WXJ <- array (0, dim = c(k, no_rmc)) WMUJ <- array (0, dim = c(k, no_rmc)) NUJ <- array (0, dim = c(k, no_rmc)) WX <- array (0, dim = no_rmc) ## a vector WMU <- array (0, dim = no_rmc) ## a vector WNU <- array (0, dim = no_rmc) ## a vector ## start Gibbs sampling j_save <- 1 i_save <- no_imc * j_save for (i_mc in 1 : (no_rmc * no_imc)) { ## update muj vars_muj <- 1 / (1/wmuj + outer(1/wxj,nos_g) ) means_muj <- (nuj / wmuj + tgsum_X / wxj) * vars_muj muj <- means_muj + replicate (G, rnorm (k)) * sqrt (vars_muj) ## update wxj lambda_wxj <- (alpha1_x * wx + sum_X2 - 2 * rowSums (tgsum_X * muj) + rowSums (sweep (muj^2, 2, nos_g, "*")) )/2 wxj <- rinvgam (k, alpha_wxj, lambda_wxj) ## update wmuj lambda_wmuj <- (alpha1_mu * wmu + rowSums ((muj - nuj)^2) ) / 2 wmuj <- rinvgam (k, alpha_wmuj, lambda_wmuj) ## update nuj sum_muj <- rowSums (muj) vars_nuj <- 1 / (1 / wnu + G / wmuj) means_nuj <- sum_muj / wmuj * vars_nuj nuj <- means_nuj + rnorm (k) * sqrt (vars_nuj) ## update wnu lambda_wnu <- (alpha0_nu * w0_nu + sum (nuj^2) ) / 2 wnu <- rinvgam (1, alpha_wnu, lambda_wnu) ## update wx and wu together with M-H methods ## log posterior of log (wmu, wx) lambda_wmu <- alpha1_mu * sum(1/wmuj) / 2 lambda_wx <- alpha1_x * sum(1/wxj) / 2 logpost_logwmux <- function (lw) { w <- exp (lw) b4cor <- alpha_wmu * lw[1] - lambda_wmu * w[1] - lambda0_wmu / w[1] + alpha_wx * lw[2] - lambda_wx * w[2] - lambda0_wx / w[2] if (bcor == 1 & nos_omit > 0) b4cor + nos_omit * log (comp_adjfactor (w[1], w[2], qflmd, cut_dpoi)) else b4cor } log_wmu_wx <- met_gauss ( iters = no_mhwmux, log_f = logpost_logwmux, ini_value = c(logwmu, logwx), stepsize = stepsizes_mhwmux, diag_mh = diag_mhwmux ) logwmu <- log_wmu_wx [1] logwx <- log_wmu_wx [2] wmu <- exp (logwmu) wx <- exp (logwx) ## write states into Marlov chain arrays if (i_mc == i_save) { MUJ [,,j_save] <- muj WXJ [,j_save] <- wxj NUJ [,j_save] <- nuj WMUJ [,j_save] <- wmuj WX [j_save] <- wx WMU [j_save] <- wmu WNU [j_save] <- wnu j_save <- j_save + 1 i_save <- j_save * no_imc finished <- finished + nos_fsel [i] if (monitor) { setTxtProgressBar(pb, finished) } } } fit_bcbcsf <- list ( fsel = fsel, nuj_std = nuj_std, wxj_std = wxj_std, MUJ = MUJ, WXJ = WXJ, NUJ = NUJ, WMUJ = WMUJ, WX = WX, WMU = WMU, WNU = WNU, freqy = freqy, no_imc = no_imc, no_rmc = no_rmc, bias_corrected = bcor ) } else fit_bcbcsf <- list (fsel = NULL, freqy = freqy) if (fitfiles[i] != "") save (fit_bcbcsf, file = fitfiles[i]) ###################### making prediction ############################### if (!is.null (X_ts)) { array_probs_pred[,,i] <- mcmc_pred (X_ts = X_ts, fit_bcbcsf = fit_bcbcsf, burn = burn, thin = thin, offset_sdxj = offset_sdxj) } } if (monitor) close (pb) if (!is.null (array_probs_pred)) { dims <- dim (array_probs_pred) dimnames (array_probs_pred) <- list(paste("Case", 1:dims[1], sep=""), paste("Class", 1:dims[2], sep=""), paste("fsel", 1:dims[3], sep="")) } ## returning results list (fit_bcbcsf = fit_bcbcsf, fitfiles = fitfiles, array_probs_pred = array_probs_pred, nos_fsel = nos_fsel ) } ############################################################################ ######################### functions for prediction ######################### ############################################################################ bcbcsf_pred <- function ( X_ts, out_fit, burn = NULL, thin = 1, offset_sdxj = 0.5) { if (is.vector (X_ts)) X_ts <- matrix (X_ts,1,) fitfiles <- out_fit$fitfiles nos_fsel <- out_fit$nos_fsel array_probs_pred <- NULL for (i in 1:length (nos_fsel)) { fit_bcbcsf <- reload_fit_bcbcsf (fitfiles[i]) probs_pred <- mcmc_pred (X_ts = X_ts, fit_bcbcsf = fit_bcbcsf, burn = burn, thin = thin, offset_sdxj = offset_sdxj) array_probs_pred <- abind (array_probs_pred, probs_pred, along = 3) } dims <- dim (array_probs_pred) dimnames (array_probs_pred) <- list(paste("Case", 1:dims[1], sep=""), paste("Class", 1:dims[2], sep=""), paste("fsel", 1:dims[3], sep="")) list (fitfiles = fitfiles, array_probs_pred = array_probs_pred, nos_fsel = nos_fsel) } mcmc_pred <- function ( X_ts, fit_bcbcsf = NULL, fit_bcbcsf_file = NULL, burn = NULL, thin = 1, offset_sdxj = 0.5) { n <- nrow (X_ts) if (is.null (fit_bcbcsf)) { fit_bcbcsf <- reload_fit_bcbcsf (fit_bcbcsf_file) } if (is.null (fit_bcbcsf$fsel)) ## no features used { t (replicate (n, fit_bcbcsf$freqy) ) } else { if (is.null (burn)) burn <- floor (fit_bcbcsf$no_rmc * 0.2) mu_dim <- dim (fit_bcbcsf$MUJ) k <- mu_dim [1] G <- mu_dim [2] no_rmc <- mu_dim [3] ## standardizing and selecting features X_ts <- X_ts[, fit_bcbcsf$fsel, drop = FALSE] X_ts <- sweep (X_ts,2, fit_bcbcsf$nuj_std, "-") X_ts <- sweep (X_ts,2, sqrt(fit_bcbcsf$wxj_std), "/") ## prepare indice of samples used to predict ix_pred <- burn + thin * seq(0, (no_rmc - burn) %/% thin ) SDXJ <- sqrt(fit_bcbcsf$WXJ[,ix_pred, drop = FALSE]) if (offset_sdxj > 1E-5) { offset <- quantile (SDXJ, offset_sdxj) } else offset <- 0 SDXJ <- SDXJ + offset .C ( "pred_ht", n, k, G, length(ix_pred), X_ts, fit_bcbcsf$MUJ[,,ix_pred], SDXJ, log(fit_bcbcsf$freqy), probs_pred = matrix(0,n,G), PACKAGE = "BCBCSF" ) $ probs_pred } } mlepred <- function (X_ts, fit_mle) { n <- nrow (X_ts) if (is.null (fit_mle$fsel) ) { t(replicate (n, fit_mle$freqy)) } else { k <- nrow (fit_mle$muj) G <- ncol (fit_mle$muj) .C ("pred_ht", n, k, G, as.integer(1), X_ts[, fit_mle$fsel], fit_mle$muj, sqrt (fit_mle$wxj),log (fit_mle$freqy), probs_pred = matrix (0, n, G), PACKAGE = "BCBCSF" ) $ probs_pred } } ############################################################################ ######################### BCBCSF result Analyzing ########################## ############################################################################ bcbcsf_sumfit <- function ( fit_bcbcsf = NULL, fit_bcbcsf_afile = NULL, burn = NULL, thin = 1) { if (is.null(fit_bcbcsf)) fit_bcbcsf <- reload_fit_bcbcsf (fit_bcbcsf_afile) if (is.null (burn)) burn <- floor (fit_bcbcsf$no_rmc * 0.2) nuj <- apply (fit_bcbcsf $ NUJ[, - (1:burn), drop = FALSE], 1, median ) wmuj <- apply (fit_bcbcsf $ WMUJ[, - (1:burn), drop = FALSE], 1, median ) wx <- median (fit_bcbcsf $ WX[- (1:burn)] ) wmu <- median (fit_bcbcsf $ WMU[- (1:burn)] ) wnu <- median (fit_bcbcsf $ WNU[- (1:burn)] ) muj <- apply (fit_bcbcsf $ MUJ[,, - (1:burn), drop = FALSE], c(1,2), median) wxj <- apply (fit_bcbcsf $ WXJ[, - (1:burn), drop = FALSE], 1, median) cmuj <- muj - apply (muj,1, mean) scmuj <- cmuj/sqrt(wxj) signalj <- apply (scmuj, 1, sd) freqy <- fit_bcbcsf$freqy fsel <- fit_bcbcsf$fsel list ( nuj_std = fit_bcbcsf$nuj_std, wxj_std = fit_bcbcsf$wxj_std, nuj = nuj, wx = wx, wmu = wmu, wnu = wnu, wmuj = wmuj, cmuj = cmuj, muj = muj, wxj = wxj, scmuj = scmuj, signalj = signalj, freqy = freqy, fsel = fsel ) } reload_fit_bcbcsf <- function (fit_bcbcsf_afile) { local ({ fit_bcbcsf <- get(load (fit_bcbcsf_afile)) return (fit_bcbcsf) }) } bcbcsf_plotsumfit <- function (sum_fit) { G <- ncol (sum_fit$scmuj) par (mfrow = c(G+1,1), mar = c(4,4,3,0.5)) ylim <- range (sum_fit$scmuj) for (g in 1:G) { plot (sum_fit$scmuj[,g], type = "h", ylim = ylim, ylab = "Normalized Mean", xlab = "Gene Rank by F-statistic", main = sprintf ("Normalized Means (Signals) of Class %d", g)) } plot (sum_fit$signalj, type = "h", ylim = c(0, max(sum_fit$signalj)), ylab = "Average Signal Level", xlab = "Gene Rank by F-statistic", main = "Overall Signal Levels of Top Genes") } ############################################################################ ######################### Utility Functions ################################ ############################################################################ ## compute log (sum (exp (lx) )) log_sum_exp <- function(lx) { mlx <- max(lx) log(sum(exp(lx - mlx))) + mlx } ## draw random numbers from inverse gamma distribution rinvgam <- function (n, alpha, lambda) { 1 / rgamma (n, alpha, 1) * lambda } richisq <- function (n, alpha, w = 1) { 1 / rgamma (n, alpha / 2) * alpha * w / 2 } ## this is a generic function for generating Markov chain samples ## from a given density with Metropolis method met_gauss <- function ( iters = 100, log_f, ini_value, stepsize = 0.5, diag_mh = FALSE, ...) { state <- ini_value no_var <- length (state) mchain <- matrix (state, no_var , iters) nos_rej <- 0 logf <- log_f (state,...) if (!is.finite (logf)) stop ("Initial value has 0 probability") for (i in 1:iters) { new_state <- rnorm (no_var, state, stepsize) new_logf <- log_f (new_state,...) if (log (runif(1)) < new_logf - logf) { state <- new_state logf <- new_logf } else nos_rej <- nos_rej + 1 ## save state in chain mchain[,i] <- state } if (diag_mh) { cat ("Markov chain is saved in 'mchain' with columns for iterations\n") cat ("Rejection rate = ", nos_rej / iters, "\n") browser () ## pause to allow user to look at Markov chain } state } ## This function estimates the parameters and hyerparameters based on ## the mle of means and variances of each feature. trpr_mle <- function (X_tr, y_tr, X_ts = NULL, nos_fsel = ncol (X_tr), rankf = FALSE) { ## read information about data n <- nrow (X_tr) ## numbers of obs p <- ncol (X_tr) ## find number of observations in each group nos_g <- as.vector (tapply (rep(1,n),INDEX = y_tr, sum)) G <- length (nos_g) if (any(nos_g < 2)) stop ("Less than 2 cases in some group") freqy = nos_g / sum (nos_g) ## feature selection if (any (c(rankf, nos_fsel < p)) ) info_sel <- rank_F (X_tr, y_tr) else { info_sel <- list (vars = 1:p) } ## create result storage nnfsel <- length (nos_fsel) list_fit_mle <- rep (list (""), nnfsel) array_probs_pred <- NULL if (!is.null (X_ts)) array_probs_pred <- array (0, dim = c (nrow (X_ts), G, nnfsel) ) for (i in 1:nnfsel) { k <- nos_fsel [i] if (k == 0) { fsel <- NULL fit_mle <- list (freqy = freqy) } else { fsel <- info_sel $ vars [1:k] X_tr_sel <- X_tr [, fsel, drop = FALSE] ## sufficient statistic and pooled variances gsum_X <- rowsum (X_tr_sel,y_tr) sum_X2 <- colSums (X_tr_sel^2) sum_gsum2 <- colSums (gsum_X^2 / nos_g ) pvars <- (sum_X2 - sum_gsum2) / (n-G) ## pooled variances muj <- t(gsum_X / nos_g) ## group means wxj <- pvars ## pooled variances wx <- 1/mean (1/wxj) nuj <- rowMeans (muj) wnu <- mean (nuj^2) cmuj <- muj - nuj wmuj <- rowMeans (cmuj^2) wmu <- 1/mean (1/wmuj) scmuj <- cmuj/sqrt (wxj) fit_mle <- list ( muj = muj, wxj = wxj, wmuj = wmuj, nuj = nuj, cmuj = cmuj, wx = wx, wmu = wmu, wnu = wnu, scmuj = scmuj, freqy = freqy, fsel = fsel ) } list_fit_mle [[i]] <- fit_mle ## note: in "muj", the row is for features, the column is for groups if (!is.null (X_ts)) { array_probs_pred [,,i] <- mlepred (X_ts = X_ts, fit_mle = fit_mle) } } list ( array_probs_pred = array_probs_pred, nos_fsel = nos_fsel, list_fit_mle = list_fit_mle) } ############################################################################ ######################### Functions for Feature Selection ################## ############################################################################ ## This function ranks all features in terms of F-statistic. rank_F <- function(X, y) { ## This function computes the values of F-statistic of all the features. comp_fstats <- function (X,y) { n <- length (y) nos_g <- as.vector (tapply (rep (1,n), INDEX = y, sum)) G <- length (nos_g) gsum_X <- rowsum (X,y) sum_X2 <- colSums (X^2) sum_gsum2 <- colSums (gsum_X^2 / nos_g ) pvars <- (sum_X2 - sum_gsum2) / (n-G) ## pooled variances sum_X <- colSums (X) gvars <- (sum_gsum2 - sum_X^2 / n) / (G-1) ## variances btw groups ## F-statistic gvars / pvars } fstats <- comp_fstats (X, y) vars <- order (fstats, decreasing = TRUE) list (vars=vars, fstats = fstats [vars]) } ############################################################################ ######################### adjustment factor ################################ ############################################################################ ## This function generates random part of lambda of poisson ## distribution and values of CDF of central F distribution, which are ## needed in approximating adjustment factor --- the probability that the ## F-statistic of a feature is smaller than a threshold gen_qflmd <- function (y_tr, cut_F, alpha1_mu = 1, alpha1_x = 10, cut_qf = exp (-10), nos_sim = 1000) { n <- length (y_tr) nos_g <- tapply (rep(1,n), INDEX = y_tr, sum) G <- length(nos_g) qf <- c() l <- 1 while (TRUE) { qf [l] <- pf(cut_F*(G-1)/(G-1 + 2*(l-1)), G-1 + 2*(l-1), n-G) if( qf[l] <= cut_qf ) break l <- l + 1 } gen_adev <- function () { mu <- rnorm (G) mu.bar <- sum(mu * nos_g) / n sum (mu^2 * nos_g) - n * mu.bar^2 } devs <- replicate (nos_sim, gen_adev() ) plmd <- devs/2 * richisq (nos_sim,alpha1_mu) / richisq (nos_sim,alpha1_x) list (qf = qf, plmd = plmd) } ## Given 'qf' and 'plmd' returned by 'gen_qflmd', this function computes ## the probability that the F-statistic is smaller than a threshold comp_adjfactor <- function(w_mu, w_x, qflmd, cut_dpoi = exp (-10) ) { lmd <- qflmd$plmd * w_mu / w_x qf <- qflmd$qf .C("comp_adjfactor", PACKAGE = "BCBCSF", cut_dpoi, length(qf),length(lmd), qf, lmd, adjf = 0.0 )$adjf }
/scratch/gouwar.j/cran-all/cranData/BCBCSF/R/tr-pr.r
#' @title Calculation of Option Prices Based on a Universal Solution #' @description This is a function to calculate the prices of European options based on the universal solution provided by Bakshi, Cao and Chen (1997) <doi:10.1111/j.1540-6261.1997.tb02749.x>. This solution takes stochastic volatility, stochastic interest and random jumps into consideration. Please cite their work if this package is used. #' @param kappav Speed of convergence on variance #' @param kappar Speed of convergence on risk free rate #' @param thetav Long-term variance #' @param thetar Long-term risk free rate #' @param sigmav Volatility of variance #' @param sigmar Volatility of risk free rate #' @param muj Jump size #' @param sigmaj Volatility of jumps #' @param rho Correlation between underlying price and variance #' @param lambda Jump intensity #' @param S0 Initial/Current underlying price #' @param K Strike price #' @param V0 Initial/Current variance #' @param R0 Initial/Current risk free rate #' @param t Time to maturity #' @return Call: return the price of the European call oprion #' @return Put: return the price of the European put oprion #' @note Please notice each parameter has its "reasonable range". e.g. volatilities cannot be zero or smaller than zero, please input 0.0000001 when they are zero. #' @examples BCC(kappav=0,kappar=0,thetav=0,thetar=0,sigmav=0.0000001,sigmar=0.0000001,muj=0, #' @examples sigmaj=0.0000001,rho=0,lambda=0,S0=100,K=100,V0=0.04,R0=0.01,t=1) #' @examples BCC(kappav=0.5,kappar=0,thetav=0.025,thetar=0,sigmav=0.09,sigmar=0.0000001,muj=0, #' @examples sigmaj=0.0000001,rho=0.1,lambda=0,S0=100,K=100,V0=0.04,R0=0.01,t=1) #' @export BCC <- function(kappav,kappar,thetav,thetar,sigmav,sigmar,muj,sigmaj,rho,lambda, S0,K,V0,R0,t){ f1 <- function(kappav,kappar,thetav,thetar,sigmav,sigmar,muj,sigmaj,rho,lambda,S0,K,V0,R0,t,phi){ er <- sqrt(kappar^2-2*sigmar^2*1i*phi) ev <- sqrt((kappav-(1+1i*phi)*rho*sigmav)^2-1i*phi*(1+1i*phi)*sigmav^2) Bt <- exp(-R0*t) a <- -thetar/(sigmar^2)*(2*log(1-(er-kappar)*(1-exp(-er*t))/(2*er))+(er-kappar)*t) b <- -thetav/(sigmav^2)*2*log(1-(ev-kappav+(1+1i*phi)*rho*sigmav)*(1-exp(-ev*t))/(2*ev)) c <- -thetav/(sigmav^2)*(ev-kappav+(1+1i*phi)*rho*sigmav)*t d <- 1i*phi*log(S0) e <- 2*1i*phi*(1-exp(-er*t))/(2*er-(er-kappar)*(1-exp(-er*t)))*R0 f <- lambda*(1+muj)*t*((1+muj)^(1i*phi)*exp(1i*phi/2*(1+1i*phi)*sigmaj^2)-1) g <- -lambda*1i*phi*muj*t h <- V0*1i*phi*(1+1i*phi)*(1-exp(-ev*t))/(2*ev-(ev-kappav+(1+1i*phi)*rho*sigmav)*(1-exp(-ev*t))) return(exp(a+b+c+d+e+f+g+h)) } f2 <- function(kappav,kappar,thetav,thetar,sigmav,sigmar,muj,sigmaj,rho,lambda,S0,K,V0,R0,t,phi){ er <- sqrt(kappar^2-2*sigmar^2*(1i*phi-1)) ev <- sqrt((kappav-1i*phi*rho*sigmav)^2-1i*phi*(1i*phi-1)*sigmav^2) Bt <- exp(-R0*t) a <- -thetar/(sigmar^2)*(2*log(1-(er-kappar)*(1-exp(-er*t))/(2*er))+(er-kappar)*t) d <- 1i*phi*(log(S0))-log(Bt) b <- -thetav/(sigmav^2)*2*log(1-(ev-kappav+1i*phi*rho*sigmav)*(1-exp(-ev*t))/(2*ev)) c <- -thetav/(sigmav^2)*(ev-kappav+1i*phi*rho*sigmav)*t g <- -lambda*1i*phi*muj*t e <- 2*(1i*phi-1)*(1-exp(-er*t))/(2*er-(er-kappar)*(1-exp(-er*t)))*R0 f <- lambda*t*((1+muj)^(1i*phi)*exp(1i*phi/2*(1i*phi-1)*sigmaj^2)-1) h <- V0*1i*phi*(1i*phi-1)*(1-exp(-ev*t))/(2*ev-(ev-kappav+1i*phi*rho*sigmav)*(1-exp(-ev*t))) return(exp(a+b+c+d+e+f+g+h)) } Pi1 <- function(kappav,kappar,thetav,thetar,sigmav,sigmar,muj,sigmaj,rho,lambda, S0,K,V0,R0,t){ integrand <- function(phi){Re(exp(-1i*phi*log(K))*f1(kappav,kappar,thetav,thetar,sigmav,sigmar,muj,sigmaj,rho,lambda,S0,K,V0,R0,t,phi)/(1i*phi))} vvpi <- 1/2+1/pi*integrate(integrand,lower=0,upper=Inf,subdivisions = 10000L)$value return(vvpi) } Pi2 <- function(kappav,kappar,thetav,thetar,sigmav,sigmar,muj,sigmaj,rho,lambda, S0,K,V0,R0,t){ integrand <- function(phi){Re(exp(-1i*phi*log(K))*f2(kappav,kappar,thetav,thetar,sigmav,sigmar,muj,sigmaj,rho,lambda,S0,K,V0,R0,t,phi)/(1i*phi))} vvpi <- 1/2+1/pi*integrate(integrand,lower=0,upper=Inf,subdivisions = 10000L)$value return(vvpi) } Bt <- exp(-R0*t) call <- S0*Pi1(kappav,kappar,thetav,thetar,sigmav,sigmar,muj,sigmaj,rho,lambda, S0,K,V0,R0,t)-K*Bt*Pi2(kappav,kappar,thetav,thetar,sigmav,sigmar,muj,sigmaj,rho,lambda, S0,K,V0,R0,t) put <- -S0*(1-Pi1(kappav,kappar,thetav,thetar,sigmav,sigmar,muj,sigmaj,rho,lambda, S0,K,V0,R0,t))+K*Bt*(1-Pi2(kappav,kappar,thetav,thetar,sigmav,sigmar,muj,sigmaj,rho,lambda, S0,K,V0,R0,t)) result <- list(call=call,put=put) return(result) }
/scratch/gouwar.j/cran-all/cranData/BCC1997/R/BCC1997.R
#' Goodness of fit. #' #' This function assess the model goodness of fit by calculate the #' discrepancy measure T(bm(y), bm(Theta)) with following steps #' (a) Generate T.obs based on the MCMC samples #' (b) Generate T.rep based on the posterior distribution of the parameters #' (c) Compare T.obs and T.rep, and calculate the P values. #' #' @param fit an objective output from BCC.multi() function #' @return Returns a list with length equals to 2 that contains #' observed and predict value #' @examples #' #import data #' filePath <- system.file("extdata", "example.rds", package = "BCClong") #' fit.BCC <- readRDS(filePath) #' set.seed(20220929) #' BayesT(fit.BCC) #' #' @export #' @importFrom stats rnorm rpois rbinom #' @importFrom mvtnorm rmvnorm #' @useDynLib BCClong, .registration=TRUE BayesT <- function(fit){ max.iter <- fit$max.iter burn.in <- fit$burn.in thin <- fit$thin N <- fit$N R <- fit$R K <- fit$K k <- fit$k dist <- fit$dist alpha <- fit$alpha num.cluster <- fit$num.cluster xt <- fit$dat[[1]]$time dat <- fit$dat summary.stat <- fit$summary.stat ZZ.LOCAL <- fit$ZZ.LOCAL THETA <- fit$THETA num.sample <- (max.iter - burn.in)/thin T.obs <- NULL T.rep <- NULL for (count in 1:num.sample){ # at each iteration #--------------------------------------------------------------# # (a) Generate T.obs based on the MCMC samples #--------------------------------------------------------------# # count <- 1 #--------------------------------------------------------# ga <- vector(mode = "list", length = R) sigma.sq.u <- vector(mode = "list", length = R) sigma.sq.e <- vector(mode = "list", length = R) zz.local <- vector(mode = "list", length = R) theta <- vector(mode = "list", length = R) T.tmp <- 0 for (s in 1:R){ zz.local[[s]] <- fit$ZZ.LOCAL[[s]][count,] ga[[s]] <- fit$GA[[s]][,,count] sigma.sq.e[[s]] <- fit$SIGMA.SQ.E[[s]][count,] sigma.sq.u[[s]] <- fit$SIGMA.SQ.U[[s]][,,count] theta[[s]] <- fit$THETA[[s]][,,count] y <- dat[[s]]$y xt <- dat[[s]]$time ids <- dat[[s]]$id for (i in 1:N){ m <- matrix(cbind(1,xt[which(ids==i)],I(xt[which(ids==i)]^2), I(xt[which(ids==i)]^3))[,1:k[[s]]],ncol=k[[s]]) mz <- matrix(cbind(1,xt[which(ids==i)],I(xt[which(ids==i)]^2), I(xt[which(ids==i)]^3))[,1:K[[s]]],ncol=K[[s]]) for (j in 1:num.cluster){ g <- matrix(ga[[s]][j,],ncol=k[[s]]) %*% t(m) + matrix(theta[[s]],nrow=N)[i,] %*% t(mz) if (dist[[s]] == "gaussian"){ T.tmp <- T.tmp + (zz.local[[s]][i]==j)* sum(((y[which(ids==i)] - g)^2/sigma.sq.e[[s]][j])) } if (dist[[s]] == "poisson"){ mu <- exp(g) var <- exp(g) T.tmp <- T.tmp + (zz.local[[s]][i]==j)* sum(((y[which(ids==i)] - mu)^2/var)) } if (dist[[s]] == "binomial"){ gt <- exp(g) mu <- gt/(1+gt) var <- mu*(1-mu) T.tmp <- T.tmp + (zz.local[[s]][i]==j)* sum(((y[which(ids==i)] - mu)^2/var)) } } } } T.obs <- c(T.obs,T.tmp) #--------------------------------------------------------------# # (b) Generate T.rep based on the MCMC samples #--------------------------------------------------------------# T.tmpp <- NULL for (s in 1:R){ zz.local[[s]] <- fit$ZZ.LOCAL[[s]][count,] ga[[s]] <- fit$GA[[s]][,,count] sigma.sq.e[[s]] <- fit$SIGMA.SQ.E[[s]][count,] sigma.sq.u[[s]] <- fit$SIGMA.SQ.U[[s]][,,count] theta[[s]] <- fit$THETA[[s]][,,count] y <- dat[[s]]$y xt <- dat[[s]]$time ids <- dat[[s]]$id nobss <- table(ids) for (i in 1:N){ m <- matrix(cbind(1,xt[which(ids==i)],I(xt[which(ids==i)]^2), I(xt[which(ids==i)]^3))[,1:k[[s]]],ncol=k[[s]]) mz <- matrix(cbind(1,xt[which(ids==i)],I(xt[which(ids==i)]^2), I(xt[which(ids==i)]^3))[,1:K[[s]]],ncol=K[[s]]) for (j in 1:num.cluster){ # generate the random effect from the multivariate normal distribution; if (K[[s]]==1){ theta.new <- rnorm(1,mean= 0, sd= sqrt(summary.stat$SIGMA.SQ.U[[s]][1,,j])) } if (K[[s]] > 1){ theta.new <- mvtnorm::rmvnorm(1, mean= rep(0,K[[s]]), sigma= diag(summary.stat$SIGMA.SQ.U[[s]][1,,j])) } g <- matrix(summary.stat$GA[[s]][1,,][j,],ncol=k[[s]]) %*% t(m) + theta.new %*% t(mz) if (dist[[s]] == "gaussian"){ if (K[[s]]==1){ y.rep <- rnorm(1,mean=g, sd= sqrt(summary.stat$SIGMA.SQ.E[[s]][1,j])) } if (K[[s]] > 1){ y.rep <- mvtnorm::rmvnorm(1,mean= g , sigma= summary.stat$SIGMA.SQ.E[[s]][1,j]* diag(1,nobss[i])) } T.tmpp <- c(T.tmpp,(zz.local[[s]][i]==j)*sum(((y.rep - g)^2/ summary.stat$SIGMA.SQ.E[[s]][1,j]))) } if (dist[[s]] == "poisson"){ mu <- exp(g) var <- exp(g) y.rep <- rpois(length(mu),lambda=mu) T.tmpp <- c(T.tmpp, (zz.local[[s]][i]==j)*sum(((y.rep - mu)^2/var))) } if (dist[[s]] == "binomial"){ gt <- exp(g) mu <- gt/(1+gt) var <- mu*(1-mu) y.rep <- rbinom(length(mu),size=1,prob=mu) T.tmpp <- c(T.tmpp,(zz.local[[s]][i]==j)*sum(((y.rep - mu)^2/var))) } } } } T.rep <- c(T.rep,sum(T.tmpp)) } result <- list(T.obs = T.obs, T.rep = T.rep) return(result) } # [END]
/scratch/gouwar.j/cran-all/cranData/BCClong/R/DiscrepancyMeasure.R
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 BCC <- function(dat, R, id, n_obs, N, num_cluster, dist, alpha_common, sigma_sq_e_common, p, q, ppi, alpha, zz, zz_local, gamma, sigma_sq_e, phi, sigma_sq_u, beta, delta, a_star, b_star, aa0, bb0, a0, b0, v0, V0, cc0, dd0, c0, d0, rr0, RR0, ww0, vv0, lambda0, Lambda0, LOG_LIK_ITER, PPI, ZZ, ALPHA, ZZ_LOCAL, GA, GA_ACCEPT, THETA, THETA_ACCEPT, SIGMA_SQ_U, SIGMA_SQ_E, T_LOCAL, T, adaptive_tunning, tunning_freq, c_gamma_tunning, c_beta_tunning, burn_in, thin, per, max_iter, seed_initial) { .Call(`_BCClong_BCC`, dat, R, id, n_obs, N, num_cluster, dist, alpha_common, sigma_sq_e_common, p, q, ppi, alpha, zz, zz_local, gamma, sigma_sq_e, phi, sigma_sq_u, beta, delta, a_star, b_star, aa0, bb0, a0, b0, v0, V0, cc0, dd0, c0, d0, rr0, RR0, ww0, vv0, lambda0, Lambda0, LOG_LIK_ITER, PPI, ZZ, ALPHA, ZZ_LOCAL, GA, GA_ACCEPT, THETA, THETA_ACCEPT, SIGMA_SQ_U, SIGMA_SQ_E, T_LOCAL, T, adaptive_tunning, tunning_freq, c_gamma_tunning, c_beta_tunning, burn_in, thin, per, max_iter, seed_initial) } LL <- function(fit, fast_version) { .Call(`_BCClong_LL`, fit, fast_version) }
/scratch/gouwar.j/cran-all/cranData/BCClong/R/RcppExports.R
#' Trace plot function #' #' To visualize the MCMC chain for model parameters #' #' @param fit an objective output from BCC.multi() function. #' @param cluster.indx a numeric value. For cluster-specific parameters, #' specifying cluster.indx will generate the trace plot for #' the corresponding cluster. #' @param feature.indx a numeric value. For cluster-specific parameters, #' specifying feature.indx will generate the trace #' plot for the corresponding cluster. #' @param parameter a character value. Specify which parameter for which the #' trace plot will be generated. The value can be "PPI" for pi, #' alpha for alpha, "GA" for gamma, "SIGMA.SQ.U" for Sigma #' and "SIGMA.SQ.E" for sigma. #' @param xlab Label for x axis #' @param ylab Label for y axis #' @param ylim The range for y axis #' @param xlim The range for x axis #' @param title Title for the trace plot #' @return void function with no return value, only show plots #' @examples #' # get data from the package #' filePath <- system.file("extdata", "epil1.rds", package = "BCClong") #' fit.BCC <- readRDS(filePath) #' traceplot(fit=fit.BCC, parameter="PPI",ylab="pi",xlab="MCMC samples") #' #' @export #' @importFrom graphics plot #' @useDynLib BCClong, .registration=TRUE traceplot <- function(fit, cluster.indx=1, feature.indx=1, parameter="PPI", xlab = NULL, ylab = NULL, ylim = NULL, xlim = NULL, title = NULL) { num.cluster <- fit$num.cluster num.sample <- (fit$max.iter - fit$burn.in)/fit$thin R <- fit$R x <- 1:num.sample if (!parameter %in% c("PPI", "ALPHA", "GA", "SIGMA.SQ.U", "SIGMA.SQ.E")){ stop("invalid parameter") } if (parameter=="PPI"){ opar <- par(mfrow=c(1,num.cluster)) on.exit(par(opar)) for (j in 1: num.cluster){ y <- fit$PPI[,j] plot(x,y,type="l",xlab=xlab,ylab=ylab,xlim=xlim,ylim=ylim, main=paste0("Cluster ",j), lwd=1.5)}} if (parameter=="ALPHA") { opar <- par(mfrow=c(1,R)) on.exit(par(opar)) for (j in 1: R){ y <- fit$ALPHA[,j] plot(x,y,type="l",xlab=xlab,ylab=ylab,xlim=xlim,ylim=ylim, main=paste0("Feature ",j), lwd=1.5) } } if (parameter=="GA") { dim.GA <- dim(fit$GA[[feature.indx]][cluster.indx,,])[1] opar <- par(mfrow=c(1,dim.GA)) on.exit(par(opar)) for (j in 1:dim.GA){ y <- fit$GA[[feature.indx]][cluster.indx,j,] plot(x,y,type="l",xlab=xlab,ylab=ylab,xlim=xlim,ylim=ylim, main=title, lwd=1.5) } } if (parameter=="SIGMA.SQ.U") { opar <- par(mfrow=c(1,num.cluster)) on.exit(par(opar)) for (j in 1: num.cluster){ y <- fit$SIGMA.SQ.U[[feature.indx]][cluster.indx,j,] plot(x,y,type="l",xlab=xlab,ylab=ylab,xlim=xlim,ylim=ylim, main=title, lwd=1.5) } } if (parameter=="SIGMA.SQ.E") { if (fit$dist[feature.indx]=="gaussian"){ opar <- par(mfrow=c(1,num.cluster)) on.exit(par(opar)) for (j in 1: num.cluster){ y <- fit$SIGMA.SQ.E[[feature.indx]][,cluster.indx] plot(x,y,type="l",xlab=xlab,ylab=ylab,xlim=xlim,ylim=ylim, main=title, lwd=1.5) } } else{ message("SIGMA.SQ.E is not estimated for features with Binomial or Poisson distribution") } } } # [END]
/scratch/gouwar.j/cran-all/cranData/BCClong/R/Traceplot.R
#' Trajplot for fitted model #' #' plot the longitudinal trajectory of features by local and global clusterings #' #' @param fit an objective output from BCC.multi() function #' @param feature.ind a numeric value indicating which feature to plot. #' The number indicates the order of the feature specified #' in mydat argument of the BCC.multi()() function #' @param which.cluster a character value: "global" or "local", indicating #' whether to plot the trajectory by global cluster or #' local cluster indices #' @param title Title for the trace plot #' @param xlab Label for x axis #' @param ylab Label for y axis #' @param color Color for the trajplot #' @return void function with no return value, only show plots #' @examples #' # get data from the package #' filePath <- system.file("extdata", "epil1.rds", package = "BCClong") #' fit.BCC <- readRDS(filePath) #' # for local cluster #' trajplot(fit=fit.BCC,feature.ind=1, which.cluster = "local.cluster", #' title= "Local Clustering",xlab="time (months)", #' ylab="anxiety",color=c("#00BA38", "#619CFF")) #' #' # for global cluster #' trajplot(fit=fit.BCC,feature.ind=1, #' which.cluster = "global.cluster", #' title="Global Clustering",xlab="time (months)", #' ylab="anxiety",color=c("#00BA38", "#619CFF")) #' #' @export #' @import ggplot2 #' @importFrom graphics par #' @useDynLib BCClong, .registration=TRUE trajplot <- function(fit,feature.ind=1,which.cluster = "global.cluster", title = NULL, ylab = NULL,xlab = NULL, color=NULL){ time.org <- y <- plot.cluster <- id <- NULL dat <- fit$dat if (which.cluster == "local.cluster"){ number.cluster <- length(unique(dat[[feature.ind]]$cluster.local)) per <- round(100*table(fit$cluster.local[[feature.ind]])/fit$N,1) dat[[feature.ind]]$plot.cluster <- factor(dat[[feature.ind]]$cluster.local, labels=paste("Cluster ",1:number.cluster," (",per,"%",")",sep="")) } if (which.cluster == "global.cluster"){ number.cluster <- length(unique(dat[[feature.ind]]$cluster.global)) per <- round(100*table(fit$cluster.global)/fit$N,1) dat[[feature.ind]]$plot.cluster <- factor(dat[[feature.ind]]$cluster.global, labels=paste("Cluster ",1:number.cluster," (",per,"%",")",sep=""))} if(is.null(color)==FALSE) { gp <- ggplot(data = dat[[feature.ind]], aes(x =time.org, y =y, color=plot.cluster, linetype=plot.cluster, fill=plot.cluster))+ geom_point(size=2,alpha=0.2) + geom_line(aes(x = time.org, y = y,group=id,color=plot.cluster), linewidth=1.5,alpha=0.2)+ geom_smooth(method = "loess", linewidth = 3,se = FALSE,span=2) + ggtitle(title) + theme_bw() + ylab(ylab) + xlab(xlab)+ theme(legend.position ="bottom", legend.title=element_blank(), plot.title = element_text(size = 16, face = "bold"), axis.text=element_text(size=16), axis.title=element_text(size=16), legend.text=element_text(size=12), axis.text.x = element_text(angle = 0 ), strip.text.x = element_text(size = 16, angle = 0), strip.text.y = element_text(size = 16,face="bold")) + guides(color=guide_legend(nrow=1,byrow=FALSE), linetype=guide_legend(nrow=1,byrow=FALSE), fill=guide_legend(nrow=1,byrow=FALSE)) + scale_color_manual(values=color)+ scale_fill_manual(values=color) } else{ gp <- ggplot(data = dat[[feature.ind]], aes(x =time.org, y =y, color=plot.cluster, linetype=plot.cluster,fill=plot.cluster))+ geom_point(size=2,alpha=0.2) + geom_line(aes(x = time.org, y = y,group=id,color=plot.cluster), linewidth=1.5,alpha=0.2)+ geom_smooth(method = "loess", linewidth = 3,se = FALSE,span=2) + ggtitle(title) + theme_bw() + ylab(ylab) + xlab(xlab)+ theme(legend.position ="bottom", legend.title=element_blank(), plot.title = element_text(size = 16, face = "bold"), axis.text=element_text(size=16), axis.title=element_text(size=16), legend.text=element_text(size=12), axis.text.x = element_text(angle = 0 ), strip.text.x = element_text(size = 16, angle = 0), strip.text.y = element_text(size = 16,face="bold")) + guides(color=guide_legend(nrow=1,byrow=FALSE), linetype=guide_legend(nrow=1,byrow=FALSE), fill=guide_legend(nrow=1,byrow=FALSE)) } return(gp) } # [END]
/scratch/gouwar.j/cran-all/cranData/BCClong/R/Trajplot.R
#' Compute a Bayesian Consensus Clustering model for mixed-type longitudinal data #' #' This function performs clustering on mixed-type (continuous, discrete and #' categorical) longitudinal markers using Bayesian consensus clustering method #' with MCMC sampling #' #' @param mydat list of R longitudinal features (i.e., with a length of R), #' where R is the number of features. The data should be prepared #' in a long-format (each row is one time point per individual). #' @param id a list (with a length of R) of vectors of the study id of #' individuals for each feature. Single value (i.e., a length of 1) #' is recycled if necessary #' @param time a list (with a length of R) of vectors of time (or age) at which #' the feature measurements are recorded #' @param center 1: center the time variable before clustering, 0: no centering #' @param num.cluster number of clusters K #' @param formula a list (with a length of R) of formula for each feature. #' Each formula is a twosided linear formula object describing #' both the fixed-effects and random effects part of the model, #' with the response (i.e., longitudinal feature) on the left #' of a ~ operator and the terms, separated by + operations, #' or the right. Random-effects terms are distinguished by #' vertical bars (|) separating expressions for design matrices #' from grouping factors. #' See formula argument from the lme4 package #' @param dist a character vector (with a length of R) that determines the #' distribution for each feature. Possible values are "gaussian" #' for a continuous feature, "poisson" for a discrete feature #' (e.g., count data) using a log link and "binomial" for a #' dichotomous feature (0/1) using a logit link. Single value #' (i.e., a length of 1) is recycled if necessary #' @param alpha.common 1 - common alpha, 0 - separate alphas for each outcome #' @param initials List of initials for: zz, zz.local ga, sigma.sq.u, sigma.sq.e, #' Default is NULL #' @param sigma.sq.e.common 1 - estimate common residual variance across all groups, #' 0 - estimate distinct residual variance, default is 1 #' @param hyper.par hyper-parameters of the prior distributions for the model #' parameters. The default hyper-parameters values will result #' in weakly informative prior distributions. #' @param c.ga.tunning tuning parameter for MH algorithm (fixed effect parameters), #' each parameter corresponds to an outcome/marker, default #' value equals NULL #' @param c.theta.tunning tuning parameter for MH algorithm (random effect), #' each parameter corresponds to an outcome/marker, #' default value equals NULL #' @param adaptive.tunning adaptive tuning parameters, 1 - yes, 0 - no, #' default is 1 #' @param tunning.freq tuning frequency, default is 20 #' @param initial.cluster.membership "mixAK" or "random" or "PAM" or "input" - #' input initial cluster membership for local #' clustering, default is "random" #' @param input.initial.local.cluster.membership if use "input", #' option input.initial.cluster.membership #' must not be empty, default is NULL #' @param input.initial.global.cluster.membership input initial cluster #' membership for global clustering #' default is NULL #' @param seed.initial seed for initial clustering #' (for initial.cluster.membership = "mixAK") #' default is 2080 #' @param burn.in the number of samples disgarded. #' This value must be smaller than max.iter. #' @param thin the number of thinning. For example, if thin = 10, #' then the MCMC chain will keep one sample every 10 iterations #' @param per specify how often the MCMC chain will print the iteration number #' @param max.iter the number of MCMC iterations. #' @return Returns a model contains clustering information #' @examples #' # import dataframe #' filePath <- system.file("extdata", "epil.rds", package = "BCClong") #' dat <- readRDS(filePath) #' set.seed(20220929) #' # example only, larger number of iteration required for accurate result #' fit.BCC <- BCC.multi ( #' mydat = list(dat$anxiety_scale,dat$depress_scale), #' dist = c("gaussian"), #' id = list(dat$id), #' time = list(dat$time), #' formula =list(y ~ time + (1|id)), #' num.cluster = 2, #' burn.in = 3, #' thin = 1, #' per =1, #' max.iter = 8) #' #' @export #' @import label.switching #' @import lme4 #' @import mclust #' @import MCMCpack #' @import mixAK #' @importFrom mvtnorm rmvnorm #' @import nnet #' @import Rcpp #' @import Rmpfr #' @import truncdist #' @import cluster #' @import abind #' @importFrom coda geweke.diag #' @importFrom stats binomial poisson sd var #' @importFrom utils capture.output #' @useDynLib BCClong, .registration=TRUE BCC.multi <- function( mydat, # List of R outcomes (R is the number of outcome, R>=2) id, # id-variable: starting from 1 to N time, # time variable center=1, # 1 - center the time variable before clustering, 0 - no centering num.cluster, # number of cluster formula, # fixed and random effect dist, # "gaussian","poisson","binomial", distribution of the marker alpha.common = 0, # 1 - common alpha, 0 - separate alphas for each marker initials = NULL, # List of initials for: zz, zz.local ga, sigma.sq.u, sigma.sq.e, sigma.sq.e.common = 1, # 1 - estimate common residual variance across all groups, 0 - estimate distinct residual variance hyper.par = list( delta = 1, a.star = 1, b.star = 1, aa0 = 1e-3, bb0 = 1e-3, cc0 = 1e-3, ww0 = 0, vv0 = 1e3, dd0 = 1e-3, rr0 = 4, RR0 = 3 ), c.ga.tunning = NULL, # tunning parameter for MH algorithm (fixed effect parameters), each parameter corresponds to an outcome/marker c.theta.tunning = NULL, # tunning parameter for MH algorithm (random effect), each parameter corresponds to an outcome/marker adaptive.tunning = 0, # adaptive tunning parameters, 1 - yes, 0 - no tunning.freq = 20, # tunning frequency initial.cluster.membership = "random", # "mixAK" or "random" or "input" - input initial cluster membership for local clustering input.initial.local.cluster.membership = NULL, # if use "input", option input.initial.local.cluster.membership must not be empty input.initial.global.cluster.membership = NULL, # input initial cluster membership for global clustering seed.initial = 2080, # seed for initial clustering (for initial.cluster.membership = "mixAK") burn.in, # number of samples discarded thin, # thinning per, # output information every "per" interaction max.iter # maximum number of iteration ) { #-------------------------------------------------------------------------------------# # Set up create.new.id <- function(input_id){ # Create new ID from 1 to N; subj <- unique(input_id) N <- length(subj) id.new <- NULL for (i in 1:N) {id.new <- c(id.new,rep(i,length(input_id[input_id==subj[i]])))} return(id.new)} #-------------------------------------------------------------------------------------# R <- length(mydat) if (length(dist)==1) dist = rep(dist,R) if (length(id)==1) id = rep(id,R) if (length(time)==1) time = rep(time,R) if (length(formula)==1) formula = rep(formula,R) if (is.null(c.ga.tunning)==TRUE) c.ga.tunning <- rep(list(1),R) if (is.null(c.theta.tunning)==TRUE) c.theta.tunning <- rep(list(1),R) # removing NA values; dat <- vector(mode = "list", length = R) time.org <- vector(mode = "list", length = R) for (s in 1:R){ id[[s]] <- id[[s]][is.na(mydat[[s]])==FALSE] time.org[[s]] <- time[[s]][is.na(mydat[[s]])==FALSE] if (center==1){ time[[s]] <- time[[s]][is.na(mydat[[s]])==FALSE]; time[[s]] <- time[[s]] - mean(time[[s]]) } mydat[[s]] <- mydat[[s]][is.na(mydat[[s]])==FALSE] # note the order, this line is last } # Find common id # (require each individual to have at least one observation for all markers) common.id <- NULL for (s in 1:R){ if (s==1) common.id <- unique(id[[s]]) else{ common.id <- Reduce(intersect,list(common.id, unique(id[[s]])))}} common.id <- common.id[is.na(common.id)==FALSE] N <- length(common.id); N # number of individuals included in the analysis #---------------------------------------------# id.org <- vector(mode = "list", length = R) id.new <- vector(mode = "list", length = R) for (s in 1:R){ id.org[[s]] <- id[[s]][id[[s]] %in% common.id]; length(id.org[[1]]) id.new[[s]] <- create.new.id(id.org[[s]]); length(id.new[[1]]) time.org[[s]] <- time.org[[s]][id[[s]] %in% common.id]; length(time.org[[1]]) time[[s]] <- time[[s]][id[[s]] %in% common.id]; length(time[[1]]) mydat[[s]] <- mydat[[s]][id[[s]] %in% common.id] # note the order, this line is last dat[[s]] <- data.frame(cbind( y = mydat[[s]], time.org = time.org[[s]], time = time[[s]], time2 = time[[s]]^2, time3 = time[[s]]^3, id.org = id.org[[s]], id = id.new[[s]])) } n.obs <- lapply(id.org, function(x) as.vector(table(x))) # number of measurements and time points can be different #--------------------------------------------------------------# # starting values; #--------------------------------------------------------------# theta <- vector(mode="list", length=R) k <- vector(mode="list", length=R) # dimension of fixed effect K <- vector(mode="list", length=R) # dimension of random effect cf <- 0.1 for (s in 1:R) { if (dist[[s]] == "gaussian") { fit.glmm <- lmer( formula[[s]], data = dat[[s]], control = lmerControl( optimizer = "bobyqa", optCtrl = list(maxfun=2e5) ) ) } else if (dist[[s]] == "poisson") { fit.glmm <- glmer( formula[[s]], data = dat[[s]], nAGQ = 0, family = poisson(link = "log"), control = glmerControl( optimizer = "bobyqa", optCtrl = list(maxfun=2e5) ) ) } else if (dist[[s]] == "binomial") { fit.glmm <- glmer( formula[[s]], data = dat[[s]], nAGQ = 0, family = binomial(link = "logit"), control = glmerControl( optimizer = "bobyqa", optCtrl = list(maxfun=2e5) ) ) } k[[s]] <- length(fixef(fit.glmm)) theta[[s]] <- cf * data.matrix(ranef(fit.glmm)$id) K[[s]] <- dim(theta[[s]])[2] } if (length(initials) == 0) { # use default initial values my.cluster <- vector(mode = "list", length = R) my.cluster.tmp <- NULL for (s in 1:R) { if (initial.cluster.membership == "mixAK") { if (dist[[s]] == "gaussian") { set.seed(seed.initial) fit.mixAK <- mixAK::GLMM_MCMC( y = dat[[s]][,"y"], dist = c("gaussian"), id = dat[[s]][,"id"], z = list(y = dat[[s]][,"time"]), random.intercept = c(TRUE), prior.b = list(Kmax = num.cluster), parallel = TRUE ,silent = TRUE ) fit.mixAK <- NMixRelabel( fit.mixAK, type = "stephens", keep.comp.prob = TRUE ,silent = TRUE ) my.cluster[[s]] <- apply(fit.mixAK[[1]]$poster.comp.prob, 1, which.max) } else if (dist[[s]] == "poisson") { set.seed(seed.initial) fit.mixAK <- mixAK::GLMM_MCMC( y = dat[[s]][,"y"], dist = c("poisson(log)"), id = dat[[s]][,"id"], z = list(y = dat[[s]][,"time"]), random.intercept = c(TRUE), prior.b = list(Kmax = num.cluster), parallel = TRUE ,silent = TRUE ) fit.mixAK <- NMixRelabel( fit.mixAK, type = "stephens", keep.comp.prob = TRUE ,silent = TRUE ) my.cluster[[s]] <- apply(fit.mixAK[[1]]$poster.comp.prob, 1, which.max) } else if (dist[[s]] == "binomial") { set.seed(seed.initial) fit.mixAK <- mixAK::GLMM_MCMC( y = dat[[s]][,"y"], dist = c("binomial(logit)"), id = dat[[s]][,"id"], z = list(y = dat[[s]][,"time"]), random.intercept = c(TRUE), prior.b = list(Kmax = num.cluster), parallel = TRUE ,silent = TRUE ) fit.mixAK <- NMixRelabel( fit.mixAK, type = "stephens", keep.comp.prob = TRUE ,silent = TRUE ) my.cluster[[s]] <- apply(fit.mixAK[[1]]$poster.comp.prob, 1, which.max) } } if (initial.cluster.membership == "random") {my.cluster[[s]] <- sample(1:num.cluster,N,replace=TRUE)} if (initial.cluster.membership == "input") {my.cluster[[s]] <- input.initial.local.cluster.membership[[s]]} my.cluster.tmp <- cbind(my.cluster.tmp, my.cluster[[s]]) mydf.clust <- data.frame( id=1:length(my.cluster[[s]]), my.cluster=my.cluster[[s]] ) dat[[s]] <- merge( dat[[s]], mydf.clust, by="id" ) } # For regression coefficients fixed.effect <- vector(mode="list", length=R) for (s in 1:R) { fixed.effect[[s]] <- matrix(0, nrow=num.cluster, ncol=k[[s]]) for (j in 1:num.cluster) { if (dist[[s]] == "gaussian") { fit.glmm <- lmer( formula[[s]], data = dat[[s]][dat[[s]]$my.cluster==j,] ) fixed.effect[[s]][j,] <- fixef(fit.glmm) } else if (dist[[s]] == "poisson") { fixed.effect[[s]][j,] <- suppressMessages(fixef(glmer( formula[[s]], data = dat[[s]][dat[[s]]$my.cluster==j,], nAGQ = 0, family = poisson(link = "log") ))) } else if (dist[[s]] == "binomial") { fixed.effect[[s]][j,] <- suppressMessages(fixef(glmer( formula[[s]], data = dat[[s]][dat[[s]]$my.cluster==j,], nAGQ = 0, family = binomial(link = "logit") ))) } } } alpha <- rep(0.9,R) # initial cluster membership if (length(input.initial.global.cluster.membership)==0) {zz <- my.cluster[[1]]} else{zz <- input.initial.global.cluster.membership } zz.local <- my.cluster # regression coeffcients ga <- fixed.effect # for residual variance (for gaussian distribution only) sigma.sq.e <- vector(mode = "list", length = R) for (s in 1:R) { for (j in 1:num.cluster) { if (dist[[s]] == "gaussian") sigma.sq.e[[s]] <- rbind(sigma.sq.e[[s]], 1) if (dist[[s]] == "poisson") sigma.sq.e[[s]] <- rbind(sigma.sq.e[[s]], NA) if (dist[[s]] == "binomial") sigma.sq.e[[s]] <- rbind(sigma.sq.e[[s]], NA) } } # dispersion parameters phi <- vector(mode = "list", length = R) for (s in 1:R) { for (j in 1:num.cluster) { if (dist[[s]] == "gaussian") phi[[s]] <- rbind(phi[[s]], sigma.sq.e[[s]][j]) if (dist[[s]] == "poisson") phi[[s]] <- rbind(phi[[s]], 1) if (dist[[s]] == "binomial") phi[[s]] <- rbind(phi[[s]], 1) } } # starting values for random effect variance sigma.sq.u <- vector(mode = "list", length = R) sigma.sq.u.inv <- vector(mode = "list", length = R) for (s in 1:R) { sigma.sq.u [[s]] <- array(0, c(K[[s]], K[[s]], num.cluster)) sigma.sq.u.inv[[s]] <- array(0, c(K[[s]], K[[s]], num.cluster)) for (j in 1:num.cluster) { sigma.sq.u [[s]][,,j] <- var(theta[[s]][my.cluster.tmp[,s]==j,1:K[[s]]]); solve.tmp <- try(solve(sigma.sq.u[[s]][,,j]), silent=TRUE) if (inherits(solve.tmp,"try-error")==FALSE) {sigma.sq.u.inv[[s]][,,j] <- solve.tmp} else{sigma.sq.u.inv[[s]][,,j] <- 1e-5 } } } } else { # use specified initials alpha <- initials$alpha zz <- initials$zz zz.local <- initials$zz.local ga <- initials$ga sigma.sq.e <- initials$sigma.sq.e # dispersion parameters phi <- vector(mode = "list", length = R) for (s in 1:R) { for (j in 1:num.cluster) { if (dist[[s]] == "gaussian") phi[[s]] <- rbind(phi[[s]], sigma.sq.e[[s]][j]) if (dist[[s]] == "poisson") phi[[s]] <- rbind(phi[[s]], 1) if (dist[[s]] == "binomial") phi[[s]] <- rbind(phi[[s]], 1) } } sigma.sq.u <- initials$sigma.sq.u sigma.sq.u.inv <- vector(mode = "list", length = R) for (s in 1:R) { sigma.sq.u.inv[[s]] <- array(0,c(K[[s]], K[[s]], num.cluster)) for (j in 1:num.cluster) { sigma.sq.u.inv[[s]][,,j] <- solve(sigma.sq.u[[s]][,,j]) } } # Here theta over-write the previously assigned values, if initials are given theta <- vector(mode = "list", length = R) for (s in 1:R) { theta[[s]] <- matrix(0, ncol=K[[s]], nrow=N) for (i in 1:N) { for (j in 1:num.cluster) { if (K[[s]] == 1) theta[[s]][i,1:K[[s]]] <- rmvnorm(n = 1, mean = rep(0,K[[s]]), sigma = matrix(sigma.sq.u[[s]][,,j])) if (K[[s]] > 1) theta[[s]][i,1:K[[s]]] <- rmvnorm(n = 1, mean = rep(0,K[[s]]), sigma = sigma.sq.u[[s]][,,j]) } } } } ppi <- rep(1/num.cluster,num.cluster) # for overall clustering # complete initial values to pass to Cpp function initials.complete <- list(ppi=ppi, alpha=alpha, zz=zz, zz.local=zz.local, ga=ga, sigma.sq.e=sigma.sq.e,sigma.sq.u=sigma.sq.u, theta=theta); #--------------------------------------------------------------# # Hyper-parameters for the Prior Distributions #--------------------------------------------------------------# if (length(hyper.par$delta) == 1) {delta <- rep(hyper.par$delta,num.cluster)} else{delta = hyper.par$delta} a.star <- hyper.par$a.star; b.star <- hyper.par$b.star #---- hyper-parameters for the residual variances - common to both datasets aa0 <- hyper.par$aa0; bb0 <- hyper.par$bb0 a0 <- rep(list(rep(aa0,num.cluster)),R) b0 <- rep(list(rep(bb0,num.cluster)),R) #----- hyper-parameters for fixed effect variables - common to both dataset w0 <- vector(mode = "list", length = R) omega0 <- vector(mode = "list", length = R) #---- hyper-parameters for the random effect variances cc0 <- hyper.par$cc0; dd0 <- hyper.par$dd0 c0 <- rep(list(rep(cc0,num.cluster)),R) d0 <- rep(list(rep(dd0,num.cluster)),R) #---- hyper-parameters for Wishart Distribution rr0 <- hyper.par$rr0; RR0 <- hyper.par$RR0 ww0 <- hyper.par$ww0; vv0 <- hyper.par$vv0; r0 <- vector(mode = "list", length = R) R0 <- vector(mode = "list", length = R) for (s in 1:R) { for (j in 1:num.cluster) { q <- length(ga[[s]][j,]) w0[[s]] <- matrix(ww0, nrow=num.cluster, ncol=q) omega0[[s]] <- array(diag(vv0,q), dim=c(q,q,num.cluster)) r0[[s]] <- rep(rr0, num.cluster) R0[[s]] <- array(diag(RR0,K[[s]]), c(K[[s]],K[[s]],num.cluster)) } } #--------------------------------------------------------------# #--------------------------------------------------------------# #--------------------------------------------------------------# #--------------------------------------------------------------# # Storing the sample; LOG.LIK.ITER <- NULL PPI <- NULL ZZ <- NULL ALPHA <- NULL ZZ.LOCAL <- vector(mode = "list", length = R) GA <- vector(mode = "list", length = R) GA.ACCEPT <- vector(mode = "list", length = R) THETA <- vector(mode = "list", length = R) THETA.ACCEPT <- vector(mode = "list", length = R) SIGMA.SQ.U <- vector(mode = "list", length = R) SIGMA.SQ.E <- vector(mode = "list", length = R) T.LOCAL <- vector(mode = "list", length = R) T <- NULL for (s in 1:R) { ZZ.LOCAL[[s]] <- matrix(0, nrow=(max.iter-burn.in)/thin, ncol=N) SIGMA.SQ.E[[s]] <- matrix(0, nrow=(max.iter-burn.in)/thin, ncol=num.cluster) THETA[[s]] <- array(0, c(N,K[[s]], (max.iter-burn.in)/thin)) T.LOCAL[[s]] <- array(0, c(N,num.cluster, (max.iter-burn.in)/thin)) GA.ACCEPT[[s]] <- matrix(0, nrow=(max.iter-burn.in)/thin, ncol=num.cluster) THETA.ACCEPT[[s]] <- matrix(0, nrow=(max.iter-burn.in)/thin, ncol=N) } message(paste(rep('-',60),sep='',collapse='') ); message(paste(rep('-',60),sep='',collapse='')); message('Running BCC Model') message(paste(rep('-',60),sep='',collapse='')); message(paste(rep('-',60),sep='',collapse='')); c.ga <- vector(mode = "list", length = R) for (s in 1:R) { c.ga[[s]] <- rep(c.ga.tunning[[s]],num.cluster) } c.theta <- c.theta.tunning begin <- proc.time()[1] #sourceCpp("BCC.cpp") tryCatch({ rst = BCC( dat, R, id, simplify2array(n.obs), N, num.cluster, dist, alpha.common, sigma.sq.e.common, unlist(k), unlist(K), # initials ppi, alpha, zz, t(simplify2array(zz.local)), ga, lapply(sigma.sq.e, function(x) {if (is.null(x)) {numeric()} else {x}}), phi, sigma.sq.u, theta, # Hyper-parameters delta, a.star, b.star, aa0, bb0, t(simplify2array(a0)), t(simplify2array(b0)), w0, omega0, cc0, dd0, t(simplify2array(c0)), t(simplify2array(d0)), rr0, RR0, ww0, vv0, t(simplify2array(r0)), R0, # sample LOG.LIK.ITER, PPI, ZZ, ALPHA, ZZ.LOCAL, GA, GA.ACCEPT, THETA, THETA.ACCEPT, SIGMA.SQ.U, SIGMA.SQ.E, T.LOCAL, T, adaptive.tunning, tunning.freq, t(simplify2array(c.ga)), unlist(c.theta), burn.in, thin, per, max.iter, seed.initial )}, error=function(cond) { message("Here's the original error message:") message(cond$message) # Choose a return value in case of error return(NULL) } ) end = proc.time()[1] message('It took ', end - begin, ' seconds') run.time <- end - begin rst$PPI <- matrix(rst$PPI, ncol=num.cluster, byrow=TRUE) rst$ZZ <- matrix(rst$ZZ, ncol=N, byrow=TRUE) if(num.cluster > 1) rst$ALPHA <- matrix(rst$ALPHA, ncol=R, byrow=TRUE) else{rst$ALPHA <- matrix(rst$ALPHA, ncol=1, byrow=TRUE);} for (s in 1:R) { rst$SIGMA.SQ.E [[s]] <- matrix(rst$SIGMA.SQ.E [[s]],ncol=num.cluster) rst$GA.ACCEPT [[s]] <- matrix(rst$GA.ACCEPT [[s]],ncol=num.cluster) rst$THETA.ACCEPT[[s]] <- matrix(rst$THETA.ACCEPT[[s]],ncol=N) rst$THETA [[s]] <- array(rst$THETA [[s]],c(N, K[[s]], length(rst$THETA[[s]])/(N*K[[s]]))) rst$ZZ.LOCAL [[s]] <- matrix(rst$ZZ.LOCAL [[s]],ncol=N) rst$T.LOCAL [[s]] <- array(rst$T.LOCAL [[s]], c(N,num.cluster, length(rst$T.LOCAL[[s]])/(N*num.cluster))) } dimnames(rst$ZZ) <- dimnames(ZZ) dimnames(rst$ALPHA) <- dimnames(ALPHA) PPI <- rst$PPI ZZ <- rst$ZZ T <- rst$T ALPHA <- rst$ALPHA SIGMA.SQ.E <- rst$SIGMA.SQ.E GA.ACCEPT <- rst$GA.ACCEPT THETA.ACCEPT <- rst$THETA.ACCEPT THETA <- rst$THETA ZZ.LOCAL <- rst$ZZ.LOCAL T.LOCAL <- rst$T.LOCAL SIGMA.SQ.U <- rst$SIGMA.SQ.U GA <- rst$GA iter <- rst$iter #--------------------------------------------------------------------------------------------------# message(paste(rep('-',60),sep='',collapse='')); message(paste(rep('-',60),sep='',collapse='')); message('Post-Processing Results') message(paste(rep('-',60),sep='',collapse='')); message(paste(rep('-',60),sep='',collapse='')); #--------------------------------------------------------------------------------------------------# #----------------------------------------------------------------------------# #- Apply burn.in and thin num.sample <- length(seq(burn.in + 1,iter,thin)) #----------------------------------------------------------------------------# # Address Label Switching Using Stephens' algorithm #----------------------------------------------------------------------------# if (num.cluster > 1) { # Apply Stephens' algorithm # for global cluster membership T.trans <- array(0,c(num.sample,N,num.cluster)) for (j in 1:num.cluster){T.trans[,,j] <- t(T[,j,])} invisible(capture.output(out.relabel <- label.switching(method="STEPHENS", z=ZZ,K=num.cluster, p=T.trans)$permutations$STEPHENS)) tp1 <- apply(T,c(1,2),mean); tp2 <- apply(tp1,1,sum) tp <- cbind(tp1,tp2) tp[,1:num.cluster] <- tp[,1:num.cluster]/tp[,(num.cluster+1)] # standardize postprob <- apply(tp[,1:num.cluster],1,max) # for local cluster membership T.LOCAL.trans <- vector(mode = "list", length = R) out.relabel.local <- vector(mode = "list", length = R) for (s in 1:R){ T.LOCAL.trans[[s]] <- array(0,c(num.sample,N,num.cluster)) for (j in 1:num.cluster){T.LOCAL.trans[[s]][,,j] <- t(T.LOCAL[[s]][,j,])} invisible(capture.output(out.relabel.local[[s]] <- label.switching(method="STEPHENS", z=ZZ.LOCAL[[s]], K=num.cluster, p=T.LOCAL.trans[[s]])$permutations$STEPHENS)) } # Post-processing the parameters according to the switching label for (s in 1:R){ for (j in 1:num.sample) { SIGMA.SQ.E[[s]][j,] <- SIGMA.SQ.E[[s]][j, out.relabel.local[[s]][j,] ] SIGMA.SQ.U[[s]][,,j] <- SIGMA.SQ.U[[s]][ , out.relabel.local[[s]][j,], j ] GA[[s]][,,j] <- GA[[s]][ out.relabel.local[[s]][j,], ,j] T.LOCAL[[s]][,,j] <- T.LOCAL[[s]][ , out.relabel.local[[s]][j,], j ] } } # Compute the global and local cluster membership cluster.global <- apply(apply(T,c(1,2),mean),1,nnet::which.is.max) cluster.local <- vector(mode = "list", length = R) for (s in 1:R) { cluster.local[[s]] <- apply(apply(T.LOCAL[[s]],c(1,2),mean),1,nnet::which.is.max) mycluster <- data.frame(id=1:N,cluster.global=cluster.global,cluster.local=cluster.local[[s]]) dat[[s]] <- merge(dat[[s]],mycluster,by="id") } #--- adjusted adherence--- my.alpha <- apply(ALPHA,2,mean); my.alpha my.alpha.adjust <- (num.cluster*my.alpha - 1)/(num.cluster-1) alpha.adjust <- mean(my.alpha.adjust) } #-------------------------------------------------------------------------------------------# # Calculate Summary Statistics for Model Parameters (mean, sd, 95%CR and geweke statistics) #-------------------------------------------------------------------------------------------# res <- function(x) {c(mean=mean(x),sd=sd(x),quantile(x,c(0.025,0.975)), geweke.stat=as.vector(geweke.diag(x)$z[1]))} PPI.stat <- apply(PPI,2,res) ALPHA.stat <- apply(ALPHA,2,res) SIGMA.SQ.E.stat <- vector(mode = "list", length = R) SIGMA.SQ.U.stat <- vector(mode = "list", length = R) GA.stat <- vector(mode = "list", length = R) for (s in 1:R) { if (dist[[s]] == "gaussian") { SIGMA.SQ.E.stat[[s]] <- apply(SIGMA.SQ.E[[s]], 2, res) } SIGMA.SQ.U.stat[[s]] <- apply(SIGMA.SQ.U[[s]], c(1,2), res) GA.stat[[s]] <- apply(GA[[s]], c(1,2), res) } summary.stat <- list( PPI = PPI.stat, ALPHA = ALPHA.stat, GA = GA.stat, SIGMA.SQ.U = SIGMA.SQ.U.stat, SIGMA.SQ.E = SIGMA.SQ.E.stat) #summary.stat if (num.cluster == 1) { postprob <- cluster.global <- cluster.local <- PPI <- T <- ALPHA <- my.alpha <- alpha.adjust <- THETA.ACCEPT <- GA.ACCEPT <- NULL; } # setting class to the returning objects class(dat) <- "data" class(N) <- "data" class(R) <- "data" class(PPI) <- "MCMC_sample" class(ZZ) <- "MCMC_sample" class(ALPHA) <- "MCMC_sample" class(SIGMA.SQ.E) <- "MCMC_sample" class(SIGMA.SQ.U) <- "MCMC_sample" class(ZZ.LOCAL) <- "MCMC_sample" class(GA) <- "MCMC_sample" class(THETA.ACCEPT) <- "MCMC_sample" class(GA.ACCEPT) <- "MCMC_sample" class(my.alpha) <- "model_parameter" class(alpha.adjust) <- "model_parameter" class(postprob) <- "model_parameter" class(k) <- "model_parameter" class(K) <- "model_parameter" class(dist) <- "model_parameter" class(num.cluster) <- "model_parameter" class(THETA) <- "model_parameter" class(cluster.global) <- "cluster_membership" class(cluster.local) <- "cluster_membership" class(max.iter) <- "algorithm_parameter" class(burn.in) <- "algorithm_parameter" class(thin) <- "algorithm_parameter" class(run.time) <- "algorithm_parameter" class(summary.stat) <- "summary_statistics" # returning the parameters; res <- list( dat = dat, N = N, R = R, PPI = PPI, ZZ = ZZ, ALPHA = ALPHA, SIGMA.SQ.E = SIGMA.SQ.E, SIGMA.SQ.U = SIGMA.SQ.U, #T.LOCAL = T.LOCAL, ZZ.LOCAL = ZZ.LOCAL, GA = GA, THETA.ACCEPT = THETA.ACCEPT, GA.ACCEPT = GA.ACCEPT, alpha = my.alpha, alpha.adjust = alpha.adjust, postprob = postprob, k = k, K = K, dist = dist, num.cluster = num.cluster, THETA = THETA, cluster.global = cluster.global, cluster.local = cluster.local, max.iter = max.iter, burn.in = burn.in, thin = thin, run.time = run.time, summary.stat = summary.stat) class(res) <- "BCC" res } #library(compiler) #BCC.multic <- cmpfun(BCC.multi) # [END]
/scratch/gouwar.j/cran-all/cranData/BCClong/R/bccLong.R
#' Model selection #' #' A function that calculates DIC and WAIC for model selection #' #' @param fit an objective output from BCC.multi() function #' @param fast_version if fast_verion=1 (default), then compute the DIC and WAIC using #' the first 100 MCMC samples (after burn-in and thinning) . If fast_version=0, then #' compute the DIC and WAIC using all MCMC samples (after burn-in and thinning) #' @return Returns the calculated score #' @examples #' #import data #' filePath <- system.file("extdata", "example1.rds", package = "BCClong") #' fit.BCC <- readRDS(filePath) #' res <- model.selection.criteria(fit.BCC, fast_version=1) #' res #' #' @export #' @import MASS #' @import mclust #' @import Rcpp #' @importFrom LaplacesDemon WAIC #' @useDynLib BCClong, .registration=TRUE model.selection.criteria <- function(fit, fast_version=1){ if(!fast_version %in% c(0,1)){ stop("fast_version should be either 0 or 1") } # calculate the log-likelihood log_lik <- LL(fit, fast_version = fast_version) Dev <- -2*colSums(log_lik) res <- WAIC(log_lik) WBIC <- -mean(colSums(log_lik)) list(DIC = mean(Dev) + var(Dev)/2, WAIC = res$WAIC, WBIC=WBIC) } # [END]
/scratch/gouwar.j/cran-all/cranData/BCClong/R/modelSelection.R
## ----include = FALSE---------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----warning=F, message=F, fig.height= 5, fig.width= 8, fig.align='center', fig.cap= "Spaghtti plot for each marker"---- library(BCClong) library(joineRML) library(ggplot2) library(cowplot) # import data from joineRML library (use ?epileptic.qol to see details) data(epileptic.qol) # convert days to months epileptic.qol$time_month <- epileptic.qol$time/30.25 # Sort by ID and time epileptic.qol <- epileptic.qol[order(epileptic.qol$id,epileptic.qol$time_month),] ## Make Spaghetti Plots to Visualize p1 <- ggplot(data =epileptic.qol, aes(x =time_month, y = anxiety, group = id))+ geom_point() + geom_line() + geom_smooth(method = "loess", size = 1.5,group =1,se = FALSE, span=2) + theme(legend.position = "none", plot.title = element_text(size = 20, face = "bold"), axis.text=element_text(size=20), axis.title=element_text(size=20), axis.text.x = element_text(angle = 0 ), strip.text.x = element_text(size = 20, angle = 0), strip.text.y = element_text(size = 20,face="bold")) + xlab("Time (months)") + ylab("anxiety") p2 <- ggplot(data =epileptic.qol, aes(x =time_month, y = depress, group = id))+ geom_point() + geom_line() + geom_smooth(method = "loess", size = 1.5,group =1,se = FALSE, span=2) + theme(legend.position = "none", plot.title = element_text(size = 20, face = "bold"), axis.text=element_text(size=20), axis.title=element_text(size=20), axis.text.x = element_text(angle = 0 ), strip.text.x = element_text(size = 20, angle = 0), strip.text.y = element_text(size = 20,face="bold")) + xlab("Time (months)") + ylab("depress") p3 <- ggplot(data =epileptic.qol, aes(x =time_month, y = aep, group = id))+ geom_point() + geom_line() + geom_smooth(method = "loess", size = 1.5,group =1,se = FALSE, span=2) + theme(legend.position = "none", plot.title = element_text(size = 20, face = "bold"), axis.text=element_text(size=20), axis.title=element_text(size=20), axis.text.x = element_text(angle = 0 ), strip.text.x = element_text(size = 20, angle = 0), strip.text.y = element_text(size = 20,face="bold")) + xlab("Time (months)") + ylab("aep") plot_grid(p1,NULL,p2,NULL,p3,NULL,labels=c("(A)","", "(B)","","(C)",""), nrow = 1, align = "v", rel_widths = c(1,0.1,1,0.1,1,0.1)) epileptic.qol$anxiety_scale <- scale(epileptic.qol$anxiety) epileptic.qol$depress_scale <- scale(epileptic.qol$depress) epileptic.qol$aep_scale <- scale(epileptic.qol$aep) dat <- epileptic.qol ## ----warning=F, message=F----------------------------------------------------- fit.BCC2 <- readRDS(file = "../inst/extdata/epil1.rds") fit.BCC2b <- readRDS(file = "../inst/extdata/epil2.rds") fit.BCC2c <- readRDS(file = "../inst/extdata/epil3.rds") fit.BCC2b$cluster.global <- factor(fit.BCC2b$cluster.global, labels=c("Cluster 1","Cluster 2")) table(fit.BCC2$cluster.global, fit.BCC2b$cluster.global) fit.BCC2c$cluster.global <- factor(fit.BCC2c$cluster.global, labels=c("Cluster 1","Cluster 2")) table(fit.BCC2$cluster.global, fit.BCC2c$cluster.global) ## ----warning=F, message=F----------------------------------------------------- print(fit.BCC2$N) print(fit.BCC2$summary.stat$PPI) print(fit.BCC2$summary.stat$ALPHA) print(fit.BCC2$summary.stat$GA) print(fit.BCC2$summary.stat$SIGMA.SQ.U) print(fit.BCC2$summary.stat$SIGMA.SQ.E) table(fit.BCC2$cluster.global) table(fit.BCC2$cluster.local[[1]]) table(fit.BCC2$cluster.local[[2]]) table(fit.BCC2$cluster.local[[3]]) ## ----warning=F, message=F, fig.height=5, fig.width=8, fig.align='center'------ #=====================================================# # Trace-plot for key model parameters #=====================================================# traceplot(fit=fit.BCC2, parameter="PPI",ylab="pi",xlab="MCMC samples") traceplot(fit=fit.BCC2, parameter="ALPHA",ylab="alpha",xlab="MCMC samples") traceplot(fit=fit.BCC2,cluster.indx = 1, feature.indx=1,parameter="GA",ylab="GA",xlab="MCMC samples") traceplot(fit=fit.BCC2,cluster.indx = 1, feature.indx=2,parameter="GA",ylab="GA",xlab="MCMC samples") traceplot(fit=fit.BCC2,cluster.indx = 1, feature.indx=3,parameter="GA",ylab="GA",xlab="MCMC samples") traceplot(fit=fit.BCC2,cluster.indx = 2, feature.indx=1,parameter="GA",ylab="GA",xlab="MCMC samples") traceplot(fit=fit.BCC2,cluster.indx = 2, feature.indx=2,parameter="GA",ylab="GA",xlab="MCMC samples") traceplot(fit=fit.BCC2,cluster.indx = 2, feature.indx=3,parameter="GA",ylab="GA",xlab="MCMC samples") ## ----warning=F, message=F, fig.width=12, fig.height=6, fig.align='center'----- #=====================================================# # Trajectory plot for features #=====================================================# gp1 <- trajplot(fit=fit.BCC2,feature.ind=1, which.cluster = "local.cluster", title= bquote(paste("Local Clustering (",hat(alpha)[1] ==.(round(fit.BCC2$alpha[1],2)),")")), xlab="time (months)",ylab="anxiety",color=c("#00BA38", "#619CFF")) gp2 <- trajplot(fit=fit.BCC2,feature.ind=2, which.cluster = "local.cluster", title= bquote(paste("Local Clustering (",hat(alpha)[2] ==.(round(fit.BCC2$alpha[2],2)),")")), xlab="time (months)",ylab="depress",color=c("#00BA38", "#619CFF")) gp3 <- trajplot(fit=fit.BCC2,feature.ind=3, which.cluster = "local.cluster", title= bquote(paste("Local Clustering (",hat(alpha)[3] ==.(round(fit.BCC2$alpha[3],2)),")")), xlab="time (months)",ylab="aep",color=c("#00BA38", "#619CFF")) gp4 <- trajplot(fit=fit.BCC2,feature.ind=1, which.cluster = "global.cluster", title="Global Clustering",xlab="time (months)",ylab="anxiety",color=c("#00BA38", "#619CFF")) gp5 <- trajplot(fit=fit.BCC2,feature.ind=2, which.cluster = "global.cluster", title="Global Clustering",xlab="time (months)",ylab="depress",color=c("#00BA38", "#619CFF")) gp6 <- trajplot(fit=fit.BCC2,feature.ind=3, which.cluster = "global.cluster", title="Global Clustering", xlab="time (months)",ylab="aep",color=c("#00BA38", "#619CFF")) library(cowplot) plot_grid(gp1, gp2,gp3,gp4,gp5,gp6, labels=c("(A)", "(B)", "(C)", "(D)", "(E)", "(F)"), ncol = 3, align = "v" ) plot_grid(gp1,NULL,gp2,NULL,gp3,NULL, gp4,NULL,gp5,NULL,gp6,NULL, labels=c("(A)","", "(B)","","(C)","","(D)","","(E)","","(F)",""), nrow = 2, align = "v", rel_widths = c(1,0.1,1,0.1,1,0.1)) ## ----message=F, warning=F, fig.height=5, fig.width=7, fig.align='center'------ #res <- BayesT(fit=fit.BCC2) res <- readRDS(file = "../inst/extdata/conRes.rds") plot(log(res$T.obs),log(res$T.rep),xlim=c(8.45,8.7), cex=1.5, ylim=c(8.45,8.7),xlab="Observed T statistics (in log scale)", ylab = "Predicted T statistics (in log scale)") abline(0,1,lwd=2,col=2) p.value <- sum(res$T.rep > res$T.obs)/length(res$T.rep) p.value fit.BCC2$cluster.global <- factor(fit.BCC2$cluster.global,labels=c("Cluster 1","Cluster 2")) boxplot(fit.BCC2$postprob ~ fit.BCC2$cluster.global,ylim=c(0,1),xlab="",ylab="Posterior Cluster Probability") ## ----------------------------------------------------------------------------- sessionInfo()
/scratch/gouwar.j/cran-all/cranData/BCClong/inst/doc/ContinuousData.R
--- title: "ContinuousData" author: "Zhiwen Tan" output: rmarkdown::html_vignette: toc: true number_sections: false vignette: > %\VignetteIndexEntry{ContinuousData} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ## Introduction `BCClong` is an R package for performing Bayesian Consensus Clustering (BCC) model for clustering continuous, discrete and categorical longitudinal data, which are commonly seen in many clinical studies. This document gives a tour of BCClong package. see `help(package = "BCClong")` for more information and references provided by `citation("BCClong")` To download **BCClong**, use the following commands: ``` r require("devtools") devtools::install_github("ZhiwenT/BCClong", build_vignettes = TRUE) library("BCClong") ``` To list all functions available in this package: ```r ls("package:BCClong") ``` ## Components Currently, there are 5 function in this package which are __*BCC.multi*__, __*BayesT*__, __*model.selection.criteria*__, __*traceplot*__, __*trajplot*__. __*BCC.multi*__ function performs clustering on mixed-type (continuous, discrete and categorical) longitudinal markers using Bayesian consensus clustering method with MCMC sampling and provide a summary statistics for the computed model. This function will take in a data set and multiple parameters and output a BCC model with summary statistics. __*BayesT*__ function assess the model goodness of fit by calculate the discrepancy measure T(\bm{\y}, \bm{\Theta}) with following steps (a) Generate T.obs based on the MCMC samples (b) Generate T.rep based on the posterior distribution of the parameters (c) Compare T.obs and T.rep, and calculate the P values. __*model.selection.criteria*__ function calculates DIC and WAIC for the fitted model __*traceplot*__ function visualize the MCMC chain for model parameters __*trajplot*__ function plot the longitudinal trajectory of features by local and global clustering ## Pre-process (Setting up) In this example, the `epileptic.qol` data set from `joinrRML` package was used. The variables used here include `anxiety score`, `depress score` and `AEP score`. All of the variables are continuous. ```{r, warning=F, message=F, fig.height= 5, fig.width= 8, fig.align='center', fig.cap= "Spaghtti plot for each marker"} library(BCClong) library(joineRML) library(ggplot2) library(cowplot) # import data from joineRML library (use ?epileptic.qol to see details) data(epileptic.qol) # convert days to months epileptic.qol$time_month <- epileptic.qol$time/30.25 # Sort by ID and time epileptic.qol <- epileptic.qol[order(epileptic.qol$id,epileptic.qol$time_month),] ## Make Spaghetti Plots to Visualize p1 <- ggplot(data =epileptic.qol, aes(x =time_month, y = anxiety, group = id))+ geom_point() + geom_line() + geom_smooth(method = "loess", size = 1.5,group =1,se = FALSE, span=2) + theme(legend.position = "none", plot.title = element_text(size = 20, face = "bold"), axis.text=element_text(size=20), axis.title=element_text(size=20), axis.text.x = element_text(angle = 0 ), strip.text.x = element_text(size = 20, angle = 0), strip.text.y = element_text(size = 20,face="bold")) + xlab("Time (months)") + ylab("anxiety") p2 <- ggplot(data =epileptic.qol, aes(x =time_month, y = depress, group = id))+ geom_point() + geom_line() + geom_smooth(method = "loess", size = 1.5,group =1,se = FALSE, span=2) + theme(legend.position = "none", plot.title = element_text(size = 20, face = "bold"), axis.text=element_text(size=20), axis.title=element_text(size=20), axis.text.x = element_text(angle = 0 ), strip.text.x = element_text(size = 20, angle = 0), strip.text.y = element_text(size = 20,face="bold")) + xlab("Time (months)") + ylab("depress") p3 <- ggplot(data =epileptic.qol, aes(x =time_month, y = aep, group = id))+ geom_point() + geom_line() + geom_smooth(method = "loess", size = 1.5,group =1,se = FALSE, span=2) + theme(legend.position = "none", plot.title = element_text(size = 20, face = "bold"), axis.text=element_text(size=20), axis.title=element_text(size=20), axis.text.x = element_text(angle = 0 ), strip.text.x = element_text(size = 20, angle = 0), strip.text.y = element_text(size = 20,face="bold")) + xlab("Time (months)") + ylab("aep") plot_grid(p1,NULL,p2,NULL,p3,NULL,labels=c("(A)","", "(B)","","(C)",""), nrow = 1, align = "v", rel_widths = c(1,0.1,1,0.1,1,0.1)) epileptic.qol$anxiety_scale <- scale(epileptic.qol$anxiety) epileptic.qol$depress_scale <- scale(epileptic.qol$depress) epileptic.qol$aep_scale <- scale(epileptic.qol$aep) dat <- epileptic.qol ``` ## Choose Best Number Of Clusters We can compute the mean adjusted adherence to determine the number of clusters using the code below. Since this program takes a long time to run, this chunk of code will not run in this tutorial file. ```r # computed the mean adjusted adherence to determine the number of clusters set.seed(20220929) alpha.adjust <- NULL DIC <- WAIC <- NULL for (k in 1:5){ fit.BCC <- BCC.multi ( mydat = list(dat$anxiety_scale,dat$depress_scale,dat$aep_scale), dist = c("gaussian"), id = list(dat$id), time = list(dat$time), formula =list(y ~ time + (1 |id)), num.cluster = k, initials= NULL, burn.in = 1000, thin = 10, per = 100, max.iter = 2000) alpha.adjust <- c(alpha.adjust, fit.BCC$alpha.adjust) res <- model.selection.criteria(fit.BCC, fast_version=0) DIC <- c(DIC,res$DIC) WAIC <- c(WAIC,res$WAIC)} num.cluster <- 1:5 par(mfrow=c(1,3)) plot(num.cluster[2:5], alpha.adjust, type="o",cex.lab=1.5,cex.axis=1.5,cex.main=1.5,lwd=2, xlab="Number of Clusters", ylab="mean adjusted adherence",main="mean adjusted adherence") plot(num.cluster, DIC, type="o",cex=1.5, cex.lab=1.5,cex.axis=1.5,cex.main=1.5,lwd=2, xlab="Number of Clusters",ylab="DIC",main="DIC") plot(num.cluster, WAIC, type="o",cex=1.5, cex.lab=1.5,cex.axis=1.5,cex.main=1.5,lwd=2, xlab="Number of Clusters",ylab="WAIC",main="WAIC") ``` ## Fit BCC Model Using BCC.multi Function Here, We used gaussian distribution for all three markers. The number of clusters was set to 2 because it has highest mean adjusted adherence. All hyper parameters were set to default. For the purpose of this tutorial, the MCMC iteration will be set to a small number to minimize the compile time and the result will be read from the pre-compiled RDS file.(The pre-compiled data file can be found here (`./inst/extdata/epil*.rds`)) ```r # Fit the final model with the number of cluster 2 (largest mean adjusted adherence) set.seed(20220929) fit.BCC2 <- BCC.multi ( mydat = list(dat$anxiety_scale,dat$depress_scale,dat$aep_scale), dist = c("gaussian"), id = list(dat$id), time = list(dat$time), formula =list(y ~ time + (1|id)), num.cluster = 2, burn.in = 10, # number of samples discarded thin = 1, # thinning per = 10, # output information every "per" iteration max.iter = 30) # maximum number of iteration set.seed(20220929) fit.BCC2b <- BCC.multi ( mydat = list(dat$anxiety_scale,dat$depress_scale,dat$aep_scale), dist = c("gaussian"), id = list(dat$id), time = list(dat$time), formula =list(y ~ time + (1 + time|id)), num.cluster = 2, burn.in = 10, thin = 1, per = 10, max.iter = 30) set.seed(20220929) fit.BCC2c <- BCC.multi ( mydat = list(dat$anxiety_scale,dat$depress_scale,dat$aep_scale), dist = c("gaussian"), id = list(dat$id), time = list(dat$time), formula =list(y ~ time + time2 + (1 + time|id)), num.cluster = 2, burn.in = 10, thin = 1, per = 10, max.iter = 30) ``` Load the pre-compiled results ```{r, warning=F, message=F} fit.BCC2 <- readRDS(file = "../inst/extdata/epil1.rds") fit.BCC2b <- readRDS(file = "../inst/extdata/epil2.rds") fit.BCC2c <- readRDS(file = "../inst/extdata/epil3.rds") fit.BCC2b$cluster.global <- factor(fit.BCC2b$cluster.global, labels=c("Cluster 1","Cluster 2")) table(fit.BCC2$cluster.global, fit.BCC2b$cluster.global) fit.BCC2c$cluster.global <- factor(fit.BCC2c$cluster.global, labels=c("Cluster 1","Cluster 2")) table(fit.BCC2$cluster.global, fit.BCC2c$cluster.global) ``` ## Printing Summary Statistics for key model parameters To print the summary statistics for all parameters ```r fit.BCC2$summary.stat ``` To print the proportion \pi for each cluster (mean, sd, 2.5% and 97.5% percentile) geweke statistics (geweke.stat) between -2 and 2 suggests the parameters converge ```r fit.BCC2$summary.stat$PPI ``` The code below prints out all major parameters ```{r, warning=F, message=F} print(fit.BCC2$N) print(fit.BCC2$summary.stat$PPI) print(fit.BCC2$summary.stat$ALPHA) print(fit.BCC2$summary.stat$GA) print(fit.BCC2$summary.stat$SIGMA.SQ.U) print(fit.BCC2$summary.stat$SIGMA.SQ.E) table(fit.BCC2$cluster.global) table(fit.BCC2$cluster.local[[1]]) table(fit.BCC2$cluster.local[[2]]) table(fit.BCC2$cluster.local[[3]]) ``` ## Visualize Clusters We can use the __*traceplot*__ function to plot the MCMC process and the __*trajplot*__ function to plot the trajectory for each feature. ```{r, warning=F, message=F, fig.height=5, fig.width=8, fig.align='center'} #=====================================================# # Trace-plot for key model parameters #=====================================================# traceplot(fit=fit.BCC2, parameter="PPI",ylab="pi",xlab="MCMC samples") traceplot(fit=fit.BCC2, parameter="ALPHA",ylab="alpha",xlab="MCMC samples") traceplot(fit=fit.BCC2,cluster.indx = 1, feature.indx=1,parameter="GA",ylab="GA",xlab="MCMC samples") traceplot(fit=fit.BCC2,cluster.indx = 1, feature.indx=2,parameter="GA",ylab="GA",xlab="MCMC samples") traceplot(fit=fit.BCC2,cluster.indx = 1, feature.indx=3,parameter="GA",ylab="GA",xlab="MCMC samples") traceplot(fit=fit.BCC2,cluster.indx = 2, feature.indx=1,parameter="GA",ylab="GA",xlab="MCMC samples") traceplot(fit=fit.BCC2,cluster.indx = 2, feature.indx=2,parameter="GA",ylab="GA",xlab="MCMC samples") traceplot(fit=fit.BCC2,cluster.indx = 2, feature.indx=3,parameter="GA",ylab="GA",xlab="MCMC samples") ``` ```{r, warning=F, message=F, fig.width=12, fig.height=6, fig.align='center'} #=====================================================# # Trajectory plot for features #=====================================================# gp1 <- trajplot(fit=fit.BCC2,feature.ind=1, which.cluster = "local.cluster", title= bquote(paste("Local Clustering (",hat(alpha)[1] ==.(round(fit.BCC2$alpha[1],2)),")")), xlab="time (months)",ylab="anxiety",color=c("#00BA38", "#619CFF")) gp2 <- trajplot(fit=fit.BCC2,feature.ind=2, which.cluster = "local.cluster", title= bquote(paste("Local Clustering (",hat(alpha)[2] ==.(round(fit.BCC2$alpha[2],2)),")")), xlab="time (months)",ylab="depress",color=c("#00BA38", "#619CFF")) gp3 <- trajplot(fit=fit.BCC2,feature.ind=3, which.cluster = "local.cluster", title= bquote(paste("Local Clustering (",hat(alpha)[3] ==.(round(fit.BCC2$alpha[3],2)),")")), xlab="time (months)",ylab="aep",color=c("#00BA38", "#619CFF")) gp4 <- trajplot(fit=fit.BCC2,feature.ind=1, which.cluster = "global.cluster", title="Global Clustering",xlab="time (months)",ylab="anxiety",color=c("#00BA38", "#619CFF")) gp5 <- trajplot(fit=fit.BCC2,feature.ind=2, which.cluster = "global.cluster", title="Global Clustering",xlab="time (months)",ylab="depress",color=c("#00BA38", "#619CFF")) gp6 <- trajplot(fit=fit.BCC2,feature.ind=3, which.cluster = "global.cluster", title="Global Clustering", xlab="time (months)",ylab="aep",color=c("#00BA38", "#619CFF")) library(cowplot) plot_grid(gp1, gp2,gp3,gp4,gp5,gp6, labels=c("(A)", "(B)", "(C)", "(D)", "(E)", "(F)"), ncol = 3, align = "v" ) plot_grid(gp1,NULL,gp2,NULL,gp3,NULL, gp4,NULL,gp5,NULL,gp6,NULL, labels=c("(A)","", "(B)","","(C)","","(D)","","(E)","","(F)",""), nrow = 2, align = "v", rel_widths = c(1,0.1,1,0.1,1,0.1)) ``` ## Posterior Check The __*BayesT*__ function will be used for posterior check. Here we used the pre-compiled results, un-comment the line `res <- BayesT(fit=fit.BCC2)` to try your own. The pre-compiled data file can be found here (`./inst/extdata/conRes.rds`) For this function, the p-value between 0.3 to 0.7 was consider reasonable. In the scatter plot, the data pints should be evenly distributed around y = x. ```{r, message=F, warning=F, fig.height=5, fig.width=7, fig.align='center'} #res <- BayesT(fit=fit.BCC2) res <- readRDS(file = "../inst/extdata/conRes.rds") plot(log(res$T.obs),log(res$T.rep),xlim=c(8.45,8.7), cex=1.5, ylim=c(8.45,8.7),xlab="Observed T statistics (in log scale)", ylab = "Predicted T statistics (in log scale)") abline(0,1,lwd=2,col=2) p.value <- sum(res$T.rep > res$T.obs)/length(res$T.rep) p.value fit.BCC2$cluster.global <- factor(fit.BCC2$cluster.global,labels=c("Cluster 1","Cluster 2")) boxplot(fit.BCC2$postprob ~ fit.BCC2$cluster.global,ylim=c(0,1),xlab="",ylab="Posterior Cluster Probability") ``` ## Package References [Tan, Z., Shen, C., Lu, Z. (2022) BCClong: an R package for performing Bayesian Consensus Clustering model for clustering continuous, discrete and categorical longitudinal data.](https://github.com/ZhiwenT/BCClong) ```{r} sessionInfo() ```
/scratch/gouwar.j/cran-all/cranData/BCClong/inst/doc/ContinuousData.Rmd
## ----include = FALSE---------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----warning=F, message=F----------------------------------------------------- library(BCClong) library(mixAK) data(PBC910) ## ----warning=F, message=F----------------------------------------------------- # pre-compiled result fit.BCC2 <- readRDS("../inst/extdata/PBCseq.rds") ## ----warning=F, message=F----------------------------------------------------- print(fit.BCC2$summary.stat$PPI) print(fit.BCC2$summary.stat$ALPHA) print(fit.BCC2$cluster.global) print(fit.BCC2$cluster.local[[1]]) print(fit.BCC2$cluster.local[[2]]) print(fit.BCC2$cluster.local[[3]]) ## ----warning=F, message=F, fig.height= 6, fig.width= 12, fig.align='center'---- gp1 <- trajplot(fit=fit.BCC2,feature.ind=1,which.cluster = "local.cluster", title= bquote(paste("Local Clustering (",hat(alpha)[1] ==.(round(fit.BCC2$alpha[1],2)),")")), xlab="months",ylab="lbili",color=c("#00BA38", "#619CFF")) gp2 <- trajplot(fit=fit.BCC2,feature.ind=2,which.cluster = "local.cluster", title= bquote(paste("Local Clustering (",hat(alpha)[2] ==.(round(fit.BCC2$alpha[2],2)),")")), xlab="months",ylab="platelet",color=c("#00BA38", "#619CFF")) gp3 <- trajplot(fit=fit.BCC2,feature.ind=3,which.cluster = "local.cluster", title= bquote(paste("Local Clustering (",hat(alpha)[3] ==.(round(fit.BCC2$alpha[3],2)),")")), xlab="months",ylab="spiders",color=c("#00BA38", "#619CFF")) gp4 <- trajplot(fit=fit.BCC2,feature.ind=1,which.cluster = "global.cluster", title="Global Clustering", xlab="months",ylab="lbili",color=c("#00BA38", "#619CFF")) gp5 <- trajplot(fit=fit.BCC2,feature.ind=2,which.cluster = "global.cluster", title="Global Clustering", xlab="months",ylab="platelet",color=c("#00BA38", "#619CFF")) gp6 <- trajplot(fit=fit.BCC2,feature.ind=3,which.cluster = "global.cluster", title="Global Clustering", xlab="months",ylab="spiders",color=c("#00BA38", "#619CFF")) library(cowplot) #dev.new(width=180, height=120) plot_grid(gp1, gp2,gp3,gp4,gp5,gp6, labels=c("(A)", "(B)", "(C)", "(D)", "(E)", "(F)"), ncol = 3, align = "v" ) ## ----------------------------------------------------------------------------- sessionInfo()
/scratch/gouwar.j/cran-all/cranData/BCClong/inst/doc/MixedTypeData.R
--- title: "MixedTypeData" author: "Zhiwen Tan" output: rmarkdown::html_vignette: toc: true number_sections: false vignette: > %\VignetteIndexEntry{MixedTypeData} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ## Introduction `BCClong` is an R package for performing Bayesian Consensus Clustering (BCC) model for clustering continuous, discrete and categorical longitudinal data, which are commonly seen in many clinical studies. This document gives a tour of BCClong package. see `help(package = "BCClong")` for more information and references provided by `citation("BCClong")` To download **BCClong**, use the following commands: ``` r require("devtools") devtools::install_github("ZhiwenT/BCClong", build_vignettes = TRUE) library("BCClong") ``` To list all functions available in this package: ```r ls("package:BCClong") ``` ## Components Currently, there are 5 function in this package which are __*BCC.multi*__, __*BayesT*__, __*model.selection.criteria*__, __*traceplot*__, __*trajplot*__. __*BCC.multi*__ function performs clustering on mixed-type (continuous, discrete and categorical) longitudinal markers using Bayesian consensus clustering method with MCMC sampling and provide a summary statistics for the computed model. This function will take in a data set and multiple parameters and output a BCC model with summary statistics. __*BayesT*__ function assess the model goodness of fit by calculate the discrepancy measure T(\bm{\y}, \bm{\Theta}) with following steps (a) Generate T.obs based on the MCMC samples (b) Generate T.rep based on the posterior distribution of the parameters (c) Compare T.obs and T.rep, and calculate the P values. __*model.selection.criteria*__ function calculates DIC and WAIC for the fitted model __*traceplot*__ function visualize the MCMC chain for model parameters __*trajplot*__ function plot the longitudinal trajectory of features by local and global clustering ## Pre-process (Setting up) In this example, the `PBCseq` data in the `mixAK` package was used as it is a public data set. The variables used here include lbili, platelet, and spiders. Of these three variables, lbili and platelet are continuous variables, while spiders are categorical variables. ```{r, warning=F, message=F} library(BCClong) library(mixAK) data(PBC910) ``` ## Fit BCC Model Using BCC.multi Function Here, We used a binomial distribution for spiders marker, a gaussian distribution for the lbili marker and poisson distribution for platelet, respectively. The number of clusters was set to 2. All hyper parameters were set to default. We ran the model with 12,000 iterations, discard the first 2,000 sample, and kept every 10th sample. This resulted in 1,000 samples for each model parameter. The MCMC sampling process took about 30 minutes on an AMD Ryzen$^{TM}$ 5 5600X desktop computer. Since this program takes a long time to run, here we will use the pre-compile result in this example. The pre-compiled data file can be found here (`./inst/extdata/PBCseq.rds`) ```r set.seed(89) fit.BCC2 <- BCC.multi( mydat = list(PBC910$lbili,PBC910$platelet,PBC910$spiders), dist = c("gaussian","poisson","binomial"), id = list(PBC910$id), time = list(PBC910$month), formula =list(y ~ time + (1|id),y ~ time + (1|id), y ~ time + (1|id)), num.cluster = 2, burn.in = 100, thin = 10, per = 10, max.iter = 200) ``` To run the pre-compiled result, please download the PBCseq.rds object from github under `inst/extdata/` folder. Then run the following code. ```{r, warning=F, message=F} # pre-compiled result fit.BCC2 <- readRDS("../inst/extdata/PBCseq.rds") ``` ## Printing Summary Statistics for key model parameters To print the summary statistics for all parameters ```r fit.BCC2$summary.stat ``` To print the proportion \pi for each cluster (mean, sd, 2.5% and 97.5% percentile) geweke statistics (geweke.stat) between -2 and 2 suggests the parameters converge ```r fit.BCC2$summary.stat$PPI ``` The code below prints out all major parameters ```{r, warning=F, message=F} print(fit.BCC2$summary.stat$PPI) print(fit.BCC2$summary.stat$ALPHA) print(fit.BCC2$cluster.global) print(fit.BCC2$cluster.local[[1]]) print(fit.BCC2$cluster.local[[2]]) print(fit.BCC2$cluster.local[[3]]) ``` ## Visualize Clusters We can use the __*traceplot*__ function to plot the MCMC process and the __*trajplot*__ function to plot the trajectory for each feature. ```{r, warning=F, message=F, fig.height= 6, fig.width= 12, fig.align='center'} gp1 <- trajplot(fit=fit.BCC2,feature.ind=1,which.cluster = "local.cluster", title= bquote(paste("Local Clustering (",hat(alpha)[1] ==.(round(fit.BCC2$alpha[1],2)),")")), xlab="months",ylab="lbili",color=c("#00BA38", "#619CFF")) gp2 <- trajplot(fit=fit.BCC2,feature.ind=2,which.cluster = "local.cluster", title= bquote(paste("Local Clustering (",hat(alpha)[2] ==.(round(fit.BCC2$alpha[2],2)),")")), xlab="months",ylab="platelet",color=c("#00BA38", "#619CFF")) gp3 <- trajplot(fit=fit.BCC2,feature.ind=3,which.cluster = "local.cluster", title= bquote(paste("Local Clustering (",hat(alpha)[3] ==.(round(fit.BCC2$alpha[3],2)),")")), xlab="months",ylab="spiders",color=c("#00BA38", "#619CFF")) gp4 <- trajplot(fit=fit.BCC2,feature.ind=1,which.cluster = "global.cluster", title="Global Clustering", xlab="months",ylab="lbili",color=c("#00BA38", "#619CFF")) gp5 <- trajplot(fit=fit.BCC2,feature.ind=2,which.cluster = "global.cluster", title="Global Clustering", xlab="months",ylab="platelet",color=c("#00BA38", "#619CFF")) gp6 <- trajplot(fit=fit.BCC2,feature.ind=3,which.cluster = "global.cluster", title="Global Clustering", xlab="months",ylab="spiders",color=c("#00BA38", "#619CFF")) library(cowplot) #dev.new(width=180, height=120) plot_grid(gp1, gp2,gp3,gp4,gp5,gp6, labels=c("(A)", "(B)", "(C)", "(D)", "(E)", "(F)"), ncol = 3, align = "v" ) ``` ## Package References [Tan, Z., Shen, C., Lu, Z. (2022) BCClong: an R package for performing Bayesian Consensus Clustering model for clustering continuous, discrete and categorical longitudinal data.](https://github.com/ZhiwenT/BCClong) ```{r} sessionInfo() ```
/scratch/gouwar.j/cran-all/cranData/BCClong/inst/doc/MixedTypeData.Rmd
--- title: "ContinuousData" author: "Zhiwen Tan" output: rmarkdown::html_vignette: toc: true number_sections: false vignette: > %\VignetteIndexEntry{ContinuousData} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ## Introduction `BCClong` is an R package for performing Bayesian Consensus Clustering (BCC) model for clustering continuous, discrete and categorical longitudinal data, which are commonly seen in many clinical studies. This document gives a tour of BCClong package. see `help(package = "BCClong")` for more information and references provided by `citation("BCClong")` To download **BCClong**, use the following commands: ``` r require("devtools") devtools::install_github("ZhiwenT/BCClong", build_vignettes = TRUE) library("BCClong") ``` To list all functions available in this package: ```r ls("package:BCClong") ``` ## Components Currently, there are 5 function in this package which are __*BCC.multi*__, __*BayesT*__, __*model.selection.criteria*__, __*traceplot*__, __*trajplot*__. __*BCC.multi*__ function performs clustering on mixed-type (continuous, discrete and categorical) longitudinal markers using Bayesian consensus clustering method with MCMC sampling and provide a summary statistics for the computed model. This function will take in a data set and multiple parameters and output a BCC model with summary statistics. __*BayesT*__ function assess the model goodness of fit by calculate the discrepancy measure T(\bm{\y}, \bm{\Theta}) with following steps (a) Generate T.obs based on the MCMC samples (b) Generate T.rep based on the posterior distribution of the parameters (c) Compare T.obs and T.rep, and calculate the P values. __*model.selection.criteria*__ function calculates DIC and WAIC for the fitted model __*traceplot*__ function visualize the MCMC chain for model parameters __*trajplot*__ function plot the longitudinal trajectory of features by local and global clustering ## Pre-process (Setting up) In this example, the `epileptic.qol` data set from `joinrRML` package was used. The variables used here include `anxiety score`, `depress score` and `AEP score`. All of the variables are continuous. ```{r, warning=F, message=F, fig.height= 5, fig.width= 8, fig.align='center', fig.cap= "Spaghtti plot for each marker"} library(BCClong) library(joineRML) library(ggplot2) library(cowplot) # import data from joineRML library (use ?epileptic.qol to see details) data(epileptic.qol) # convert days to months epileptic.qol$time_month <- epileptic.qol$time/30.25 # Sort by ID and time epileptic.qol <- epileptic.qol[order(epileptic.qol$id,epileptic.qol$time_month),] ## Make Spaghetti Plots to Visualize p1 <- ggplot(data =epileptic.qol, aes(x =time_month, y = anxiety, group = id))+ geom_point() + geom_line() + geom_smooth(method = "loess", size = 1.5,group =1,se = FALSE, span=2) + theme(legend.position = "none", plot.title = element_text(size = 20, face = "bold"), axis.text=element_text(size=20), axis.title=element_text(size=20), axis.text.x = element_text(angle = 0 ), strip.text.x = element_text(size = 20, angle = 0), strip.text.y = element_text(size = 20,face="bold")) + xlab("Time (months)") + ylab("anxiety") p2 <- ggplot(data =epileptic.qol, aes(x =time_month, y = depress, group = id))+ geom_point() + geom_line() + geom_smooth(method = "loess", size = 1.5,group =1,se = FALSE, span=2) + theme(legend.position = "none", plot.title = element_text(size = 20, face = "bold"), axis.text=element_text(size=20), axis.title=element_text(size=20), axis.text.x = element_text(angle = 0 ), strip.text.x = element_text(size = 20, angle = 0), strip.text.y = element_text(size = 20,face="bold")) + xlab("Time (months)") + ylab("depress") p3 <- ggplot(data =epileptic.qol, aes(x =time_month, y = aep, group = id))+ geom_point() + geom_line() + geom_smooth(method = "loess", size = 1.5,group =1,se = FALSE, span=2) + theme(legend.position = "none", plot.title = element_text(size = 20, face = "bold"), axis.text=element_text(size=20), axis.title=element_text(size=20), axis.text.x = element_text(angle = 0 ), strip.text.x = element_text(size = 20, angle = 0), strip.text.y = element_text(size = 20,face="bold")) + xlab("Time (months)") + ylab("aep") plot_grid(p1,NULL,p2,NULL,p3,NULL,labels=c("(A)","", "(B)","","(C)",""), nrow = 1, align = "v", rel_widths = c(1,0.1,1,0.1,1,0.1)) epileptic.qol$anxiety_scale <- scale(epileptic.qol$anxiety) epileptic.qol$depress_scale <- scale(epileptic.qol$depress) epileptic.qol$aep_scale <- scale(epileptic.qol$aep) dat <- epileptic.qol ``` ## Choose Best Number Of Clusters We can compute the mean adjusted adherence to determine the number of clusters using the code below. Since this program takes a long time to run, this chunk of code will not run in this tutorial file. ```r # computed the mean adjusted adherence to determine the number of clusters set.seed(20220929) alpha.adjust <- NULL DIC <- WAIC <- NULL for (k in 1:5){ fit.BCC <- BCC.multi ( mydat = list(dat$anxiety_scale,dat$depress_scale,dat$aep_scale), dist = c("gaussian"), id = list(dat$id), time = list(dat$time), formula =list(y ~ time + (1 |id)), num.cluster = k, initials= NULL, burn.in = 1000, thin = 10, per = 100, max.iter = 2000) alpha.adjust <- c(alpha.adjust, fit.BCC$alpha.adjust) res <- model.selection.criteria(fit.BCC, fast_version=0) DIC <- c(DIC,res$DIC) WAIC <- c(WAIC,res$WAIC)} num.cluster <- 1:5 par(mfrow=c(1,3)) plot(num.cluster[2:5], alpha.adjust, type="o",cex.lab=1.5,cex.axis=1.5,cex.main=1.5,lwd=2, xlab="Number of Clusters", ylab="mean adjusted adherence",main="mean adjusted adherence") plot(num.cluster, DIC, type="o",cex=1.5, cex.lab=1.5,cex.axis=1.5,cex.main=1.5,lwd=2, xlab="Number of Clusters",ylab="DIC",main="DIC") plot(num.cluster, WAIC, type="o",cex=1.5, cex.lab=1.5,cex.axis=1.5,cex.main=1.5,lwd=2, xlab="Number of Clusters",ylab="WAIC",main="WAIC") ``` ## Fit BCC Model Using BCC.multi Function Here, We used gaussian distribution for all three markers. The number of clusters was set to 2 because it has highest mean adjusted adherence. All hyper parameters were set to default. For the purpose of this tutorial, the MCMC iteration will be set to a small number to minimize the compile time and the result will be read from the pre-compiled RDS file.(The pre-compiled data file can be found here (`./inst/extdata/epil*.rds`)) ```r # Fit the final model with the number of cluster 2 (largest mean adjusted adherence) set.seed(20220929) fit.BCC2 <- BCC.multi ( mydat = list(dat$anxiety_scale,dat$depress_scale,dat$aep_scale), dist = c("gaussian"), id = list(dat$id), time = list(dat$time), formula =list(y ~ time + (1|id)), num.cluster = 2, burn.in = 10, # number of samples discarded thin = 1, # thinning per = 10, # output information every "per" iteration max.iter = 30) # maximum number of iteration set.seed(20220929) fit.BCC2b <- BCC.multi ( mydat = list(dat$anxiety_scale,dat$depress_scale,dat$aep_scale), dist = c("gaussian"), id = list(dat$id), time = list(dat$time), formula =list(y ~ time + (1 + time|id)), num.cluster = 2, burn.in = 10, thin = 1, per = 10, max.iter = 30) set.seed(20220929) fit.BCC2c <- BCC.multi ( mydat = list(dat$anxiety_scale,dat$depress_scale,dat$aep_scale), dist = c("gaussian"), id = list(dat$id), time = list(dat$time), formula =list(y ~ time + time2 + (1 + time|id)), num.cluster = 2, burn.in = 10, thin = 1, per = 10, max.iter = 30) ``` Load the pre-compiled results ```{r, warning=F, message=F} fit.BCC2 <- readRDS(file = "../inst/extdata/epil1.rds") fit.BCC2b <- readRDS(file = "../inst/extdata/epil2.rds") fit.BCC2c <- readRDS(file = "../inst/extdata/epil3.rds") fit.BCC2b$cluster.global <- factor(fit.BCC2b$cluster.global, labels=c("Cluster 1","Cluster 2")) table(fit.BCC2$cluster.global, fit.BCC2b$cluster.global) fit.BCC2c$cluster.global <- factor(fit.BCC2c$cluster.global, labels=c("Cluster 1","Cluster 2")) table(fit.BCC2$cluster.global, fit.BCC2c$cluster.global) ``` ## Printing Summary Statistics for key model parameters To print the summary statistics for all parameters ```r fit.BCC2$summary.stat ``` To print the proportion \pi for each cluster (mean, sd, 2.5% and 97.5% percentile) geweke statistics (geweke.stat) between -2 and 2 suggests the parameters converge ```r fit.BCC2$summary.stat$PPI ``` The code below prints out all major parameters ```{r, warning=F, message=F} print(fit.BCC2$N) print(fit.BCC2$summary.stat$PPI) print(fit.BCC2$summary.stat$ALPHA) print(fit.BCC2$summary.stat$GA) print(fit.BCC2$summary.stat$SIGMA.SQ.U) print(fit.BCC2$summary.stat$SIGMA.SQ.E) table(fit.BCC2$cluster.global) table(fit.BCC2$cluster.local[[1]]) table(fit.BCC2$cluster.local[[2]]) table(fit.BCC2$cluster.local[[3]]) ``` ## Visualize Clusters We can use the __*traceplot*__ function to plot the MCMC process and the __*trajplot*__ function to plot the trajectory for each feature. ```{r, warning=F, message=F, fig.height=5, fig.width=8, fig.align='center'} #=====================================================# # Trace-plot for key model parameters #=====================================================# traceplot(fit=fit.BCC2, parameter="PPI",ylab="pi",xlab="MCMC samples") traceplot(fit=fit.BCC2, parameter="ALPHA",ylab="alpha",xlab="MCMC samples") traceplot(fit=fit.BCC2,cluster.indx = 1, feature.indx=1,parameter="GA",ylab="GA",xlab="MCMC samples") traceplot(fit=fit.BCC2,cluster.indx = 1, feature.indx=2,parameter="GA",ylab="GA",xlab="MCMC samples") traceplot(fit=fit.BCC2,cluster.indx = 1, feature.indx=3,parameter="GA",ylab="GA",xlab="MCMC samples") traceplot(fit=fit.BCC2,cluster.indx = 2, feature.indx=1,parameter="GA",ylab="GA",xlab="MCMC samples") traceplot(fit=fit.BCC2,cluster.indx = 2, feature.indx=2,parameter="GA",ylab="GA",xlab="MCMC samples") traceplot(fit=fit.BCC2,cluster.indx = 2, feature.indx=3,parameter="GA",ylab="GA",xlab="MCMC samples") ``` ```{r, warning=F, message=F, fig.width=12, fig.height=6, fig.align='center'} #=====================================================# # Trajectory plot for features #=====================================================# gp1 <- trajplot(fit=fit.BCC2,feature.ind=1, which.cluster = "local.cluster", title= bquote(paste("Local Clustering (",hat(alpha)[1] ==.(round(fit.BCC2$alpha[1],2)),")")), xlab="time (months)",ylab="anxiety",color=c("#00BA38", "#619CFF")) gp2 <- trajplot(fit=fit.BCC2,feature.ind=2, which.cluster = "local.cluster", title= bquote(paste("Local Clustering (",hat(alpha)[2] ==.(round(fit.BCC2$alpha[2],2)),")")), xlab="time (months)",ylab="depress",color=c("#00BA38", "#619CFF")) gp3 <- trajplot(fit=fit.BCC2,feature.ind=3, which.cluster = "local.cluster", title= bquote(paste("Local Clustering (",hat(alpha)[3] ==.(round(fit.BCC2$alpha[3],2)),")")), xlab="time (months)",ylab="aep",color=c("#00BA38", "#619CFF")) gp4 <- trajplot(fit=fit.BCC2,feature.ind=1, which.cluster = "global.cluster", title="Global Clustering",xlab="time (months)",ylab="anxiety",color=c("#00BA38", "#619CFF")) gp5 <- trajplot(fit=fit.BCC2,feature.ind=2, which.cluster = "global.cluster", title="Global Clustering",xlab="time (months)",ylab="depress",color=c("#00BA38", "#619CFF")) gp6 <- trajplot(fit=fit.BCC2,feature.ind=3, which.cluster = "global.cluster", title="Global Clustering", xlab="time (months)",ylab="aep",color=c("#00BA38", "#619CFF")) library(cowplot) plot_grid(gp1, gp2,gp3,gp4,gp5,gp6, labels=c("(A)", "(B)", "(C)", "(D)", "(E)", "(F)"), ncol = 3, align = "v" ) plot_grid(gp1,NULL,gp2,NULL,gp3,NULL, gp4,NULL,gp5,NULL,gp6,NULL, labels=c("(A)","", "(B)","","(C)","","(D)","","(E)","","(F)",""), nrow = 2, align = "v", rel_widths = c(1,0.1,1,0.1,1,0.1)) ``` ## Posterior Check The __*BayesT*__ function will be used for posterior check. Here we used the pre-compiled results, un-comment the line `res <- BayesT(fit=fit.BCC2)` to try your own. The pre-compiled data file can be found here (`./inst/extdata/conRes.rds`) For this function, the p-value between 0.3 to 0.7 was consider reasonable. In the scatter plot, the data pints should be evenly distributed around y = x. ```{r, message=F, warning=F, fig.height=5, fig.width=7, fig.align='center'} #res <- BayesT(fit=fit.BCC2) res <- readRDS(file = "../inst/extdata/conRes.rds") plot(log(res$T.obs),log(res$T.rep),xlim=c(8.45,8.7), cex=1.5, ylim=c(8.45,8.7),xlab="Observed T statistics (in log scale)", ylab = "Predicted T statistics (in log scale)") abline(0,1,lwd=2,col=2) p.value <- sum(res$T.rep > res$T.obs)/length(res$T.rep) p.value fit.BCC2$cluster.global <- factor(fit.BCC2$cluster.global,labels=c("Cluster 1","Cluster 2")) boxplot(fit.BCC2$postprob ~ fit.BCC2$cluster.global,ylim=c(0,1),xlab="",ylab="Posterior Cluster Probability") ``` ## Package References [Tan, Z., Shen, C., Lu, Z. (2022) BCClong: an R package for performing Bayesian Consensus Clustering model for clustering continuous, discrete and categorical longitudinal data.](https://github.com/ZhiwenT/BCClong) ```{r} sessionInfo() ```
/scratch/gouwar.j/cran-all/cranData/BCClong/vignettes/ContinuousData.Rmd
--- title: "MixedTypeData" author: "Zhiwen Tan" output: rmarkdown::html_vignette: toc: true number_sections: false vignette: > %\VignetteIndexEntry{MixedTypeData} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ## Introduction `BCClong` is an R package for performing Bayesian Consensus Clustering (BCC) model for clustering continuous, discrete and categorical longitudinal data, which are commonly seen in many clinical studies. This document gives a tour of BCClong package. see `help(package = "BCClong")` for more information and references provided by `citation("BCClong")` To download **BCClong**, use the following commands: ``` r require("devtools") devtools::install_github("ZhiwenT/BCClong", build_vignettes = TRUE) library("BCClong") ``` To list all functions available in this package: ```r ls("package:BCClong") ``` ## Components Currently, there are 5 function in this package which are __*BCC.multi*__, __*BayesT*__, __*model.selection.criteria*__, __*traceplot*__, __*trajplot*__. __*BCC.multi*__ function performs clustering on mixed-type (continuous, discrete and categorical) longitudinal markers using Bayesian consensus clustering method with MCMC sampling and provide a summary statistics for the computed model. This function will take in a data set and multiple parameters and output a BCC model with summary statistics. __*BayesT*__ function assess the model goodness of fit by calculate the discrepancy measure T(\bm{\y}, \bm{\Theta}) with following steps (a) Generate T.obs based on the MCMC samples (b) Generate T.rep based on the posterior distribution of the parameters (c) Compare T.obs and T.rep, and calculate the P values. __*model.selection.criteria*__ function calculates DIC and WAIC for the fitted model __*traceplot*__ function visualize the MCMC chain for model parameters __*trajplot*__ function plot the longitudinal trajectory of features by local and global clustering ## Pre-process (Setting up) In this example, the `PBCseq` data in the `mixAK` package was used as it is a public data set. The variables used here include lbili, platelet, and spiders. Of these three variables, lbili and platelet are continuous variables, while spiders are categorical variables. ```{r, warning=F, message=F} library(BCClong) library(mixAK) data(PBC910) ``` ## Fit BCC Model Using BCC.multi Function Here, We used a binomial distribution for spiders marker, a gaussian distribution for the lbili marker and poisson distribution for platelet, respectively. The number of clusters was set to 2. All hyper parameters were set to default. We ran the model with 12,000 iterations, discard the first 2,000 sample, and kept every 10th sample. This resulted in 1,000 samples for each model parameter. The MCMC sampling process took about 30 minutes on an AMD Ryzen$^{TM}$ 5 5600X desktop computer. Since this program takes a long time to run, here we will use the pre-compile result in this example. The pre-compiled data file can be found here (`./inst/extdata/PBCseq.rds`) ```r set.seed(89) fit.BCC2 <- BCC.multi( mydat = list(PBC910$lbili,PBC910$platelet,PBC910$spiders), dist = c("gaussian","poisson","binomial"), id = list(PBC910$id), time = list(PBC910$month), formula =list(y ~ time + (1|id),y ~ time + (1|id), y ~ time + (1|id)), num.cluster = 2, burn.in = 100, thin = 10, per = 10, max.iter = 200) ``` To run the pre-compiled result, please download the PBCseq.rds object from github under `inst/extdata/` folder. Then run the following code. ```{r, warning=F, message=F} # pre-compiled result fit.BCC2 <- readRDS("../inst/extdata/PBCseq.rds") ``` ## Printing Summary Statistics for key model parameters To print the summary statistics for all parameters ```r fit.BCC2$summary.stat ``` To print the proportion \pi for each cluster (mean, sd, 2.5% and 97.5% percentile) geweke statistics (geweke.stat) between -2 and 2 suggests the parameters converge ```r fit.BCC2$summary.stat$PPI ``` The code below prints out all major parameters ```{r, warning=F, message=F} print(fit.BCC2$summary.stat$PPI) print(fit.BCC2$summary.stat$ALPHA) print(fit.BCC2$cluster.global) print(fit.BCC2$cluster.local[[1]]) print(fit.BCC2$cluster.local[[2]]) print(fit.BCC2$cluster.local[[3]]) ``` ## Visualize Clusters We can use the __*traceplot*__ function to plot the MCMC process and the __*trajplot*__ function to plot the trajectory for each feature. ```{r, warning=F, message=F, fig.height= 6, fig.width= 12, fig.align='center'} gp1 <- trajplot(fit=fit.BCC2,feature.ind=1,which.cluster = "local.cluster", title= bquote(paste("Local Clustering (",hat(alpha)[1] ==.(round(fit.BCC2$alpha[1],2)),")")), xlab="months",ylab="lbili",color=c("#00BA38", "#619CFF")) gp2 <- trajplot(fit=fit.BCC2,feature.ind=2,which.cluster = "local.cluster", title= bquote(paste("Local Clustering (",hat(alpha)[2] ==.(round(fit.BCC2$alpha[2],2)),")")), xlab="months",ylab="platelet",color=c("#00BA38", "#619CFF")) gp3 <- trajplot(fit=fit.BCC2,feature.ind=3,which.cluster = "local.cluster", title= bquote(paste("Local Clustering (",hat(alpha)[3] ==.(round(fit.BCC2$alpha[3],2)),")")), xlab="months",ylab="spiders",color=c("#00BA38", "#619CFF")) gp4 <- trajplot(fit=fit.BCC2,feature.ind=1,which.cluster = "global.cluster", title="Global Clustering", xlab="months",ylab="lbili",color=c("#00BA38", "#619CFF")) gp5 <- trajplot(fit=fit.BCC2,feature.ind=2,which.cluster = "global.cluster", title="Global Clustering", xlab="months",ylab="platelet",color=c("#00BA38", "#619CFF")) gp6 <- trajplot(fit=fit.BCC2,feature.ind=3,which.cluster = "global.cluster", title="Global Clustering", xlab="months",ylab="spiders",color=c("#00BA38", "#619CFF")) library(cowplot) #dev.new(width=180, height=120) plot_grid(gp1, gp2,gp3,gp4,gp5,gp6, labels=c("(A)", "(B)", "(C)", "(D)", "(E)", "(F)"), ncol = 3, align = "v" ) ``` ## Package References [Tan, Z., Shen, C., Lu, Z. (2022) BCClong: an R package for performing Bayesian Consensus Clustering model for clustering continuous, discrete and categorical longitudinal data.](https://github.com/ZhiwenT/BCClong) ```{r} sessionInfo() ```
/scratch/gouwar.j/cran-all/cranData/BCClong/vignettes/MixedTypeData.Rmd
#' Compute node-marginal likelihoods of a DAG model #' #' This function computes the log-marginal likelihood of the conditional distribution of variable \code{node} #' given its parents in \code{DAG} under a DAG-Wishart prior on the DAG model-parameters #' #' @param node numerical label of \eqn{node} in \code{DAG} #' @param DAG \eqn{(q,q)} adjacency matrix of \code{DAG} #' @param tXX \eqn{(q,q)} matrix \eqn{X'X} with \eqn{X} the \eqn{(n,q)} data matrix #' @param n number of observations (rows) in the data matrix \eqn{X} #' @param a shape hyperparameter of the DAG Wishart prior #' @param U position hyperparameter of the DAG Wishart prior #' #' @return The logarithm of the marginal likelihood of \code{node} #' @noRd #' @keywords internal DW_nodelml <- function(node, DAG, tXX, n, a, U) { j <- node pa <- pa(j, DAG) q <- ncol(tXX) a.star <- (a+length(pa)-q+1) Upost <- U + tXX if (length(pa) == 0) { U_jj <- U[j,j] Upost_jj <- Upost[j,j] prior.normcost <- -lgamma(a.star/2) + a.star/2*log(U_jj/2) post.normcost <- -lgamma(a.star/2 + n/2) + (a.star/2 + n/2)*log(Upost_jj/2) } else { U_paj.j <- U[pa,j] U_jj <- U[j,j] - t(U_paj.j)%*%chol2inv(chol(U[pa,pa]))%*%U_paj.j Upost_paj.j <- Upost[pa,j] Upost_jj <- Upost[j,j] - t(Upost_paj.j)%*%chol2inv(chol(Upost[pa,pa]))%*%Upost_paj.j prior.normcost <- -lgamma(a.star/2) + a.star/2*log(U_jj/2) + 0.5*log(det(as.matrix(U[pa,pa]))) post.normcost <- -lgamma(a.star/2 + n/2) + (a.star/2 + n/2)*log(Upost_jj/2) + 0.5*log(det(as.matrix(Upost[pa,pa]))) } nodelml <- -n/2*log(2*pi) + prior.normcost - post.normcost return(nodelml) }
/scratch/gouwar.j/cran-all/cranData/BCDAG/R/DW_nodelml.R
#' Accept/reject the proposed DAG given the current DAG (internal function) #' #' This function computes the Metropolis Hastings acceptance rate for \code{proposedDAG} given \code{currentDAG} #' and the accepts/rejects \code{proposedDAG} based on the Metropolis Hastings acceptance probability. #' \code{proposedDAG} is a direct successor of \code{currentDAG}, which was obtained by applying an operator of type InsertD, DeleteD or ReverseD to \code{currentDAG}. #' The two DAGs only differ by one edge \eqn{u -> v} which has been inserted/deleted/reversed in \code{currentDAG}. #' #' @param tXX \eqn{(q,q)} matrix \eqn{X'X} with \eqn{X} the \eqn{(n,q)} data matrix #' @param n number of observations (rows) in the data matrix \eqn{X} #' @param currentDAG \eqn{(q,q)} adjacency matrix of current DAG #' @param proposedDAG \eqn{(q,q)} adjacency matrix of proposed DAG #' @param node nodes \eqn{u} and \eqn{v} involved in the modified edge \eqn{u -> v} #' @param op.type the type of operator applied to \code{currentDAG} to obtain \code{proposedDAG} #' @param a shape hyperparameter of the DAG Wishart prior #' @param U position hyperparameter of the DAG Wishart prior #' @param w prior probability of edge inclusion #' @param current.opcard number of direct successors of \code{currentDAG} #' @param proposed.opcard number of direct successors of \code{proposedDAG} #' @noRd #' @keywords internal #' #' @return A Boolean indicating whether \code{proposedDAG} has been accepted (\code{TRUE}) or not (\code{FALSE}) acceptreject_DAG <- function(tXX, n, currentDAG, proposedDAG, node, op.type, a, U, w, current.opcard, proposed.opcard) { logprior.ratios <- c(log(w/(1-w)), log((1-w)/w), log(1)) logprior.ratio <- logprior.ratios[op.type] logproposal.ratio <- log(current.opcard) - log(proposed.opcard) if (op.type != 3) { current_lml <- DW_nodelml(node, currentDAG, tXX, n, a, U) proposed_lml <- DW_nodelml(node, proposedDAG, tXX, n, a, U) } else { current_lml <- DW_nodelml(node[1], currentDAG, tXX, n, a, U) + DW_nodelml(node[2], currentDAG, tXX, n, a, U) proposed_lml <- DW_nodelml(node[1], proposedDAG, tXX, n, a, U) + DW_nodelml(node[2], proposedDAG, tXX, n, a, U) } acp.ratio <- min(0, proposed_lml - current_lml + logprior.ratio + logproposal.ratio) is.accepted <- log(stats::runif(1)) < acp.ratio return(is.accepted) }
/scratch/gouwar.j/cran-all/cranData/BCDAG/R/acceptreject_DAG.R
#' Transform adjacency matrix into graphNEL object #' #' Function to transform an adjacency matrix into a graphNEL object. #' @param DAG Adjacency matrix of a DAG #' #' @return A graphNEL object #' @export #' #' @examples # Randomly generate DAG #' q <- 4; w = 0.2 #' set.seed(123) #' DAG <- rDAG(q,w) #' as_graphNEL(DAG) as_graphNEL <- function(DAG) { q <- ncol(DAG) nodes <- as.character(1:q) ft <- which(DAG != 0, T) graphNEL <- graph::ftM2graphNEL(ft, V = nodes, edgemode = "directed") return(graphNEL) }
/scratch/gouwar.j/cran-all/cranData/BCDAG/R/as_graphNEL.R
#' Convert strings into matrices #' #' This function restores matrices from string objects. #' It is implemented in several functions, such as \code{get_causal_effect} and \code{get_edge_probs}, when output of \code{learn_DAG} was obtained with \code{save.memory = TRUE} #' #' @param string a string to convert into matrix #' @param separator symbol used to separate elements in the input string #' #' @return The (q,q) original matrix from which the string vector was created using the internal function \code{bd_encode} #' #' @noRd #' @keywords internal bd_decode <- function(string, separator = ";") { vec4mat <- as.numeric(strsplit(string, separator)[[1]]) q <- length(vec4mat) matrix(vec4mat, ncol = sqrt(q)) }
/scratch/gouwar.j/cran-all/cranData/BCDAG/R/bd_decode.R
#' Convert matrix into strings #' #' This function converts matrices into string objects. #' It is implemented in \code{learn_DAG} when \code{save.memory = TRUE} #' #' @param matrix a matrix to convert into string #' @param separator symbol used to separate elements of the matrix in the string #' #' @return A string representing the adjacency matrix. #' @noRd #' @keywords internal bd_encode <- function(matrix, separator = ";") { paste(matrix, collapse = separator) }
/scratch/gouwar.j/cran-all/cranData/BCDAG/R/bd_encode.R
#' Compute causal effects between variables #' #' This function computes the total joint causal effect on variable \code{response} consequent to an intervention on variables \code{targets} #' for a given a DAG structure and parameters \code{(D,L)} #' #' We assume that the joint distribution of random variables \eqn{X_1, \dots, X_q} is zero-mean Gaussian with covariance matrix Markov w.r.t. a Directed Acyclic Graph (DAG). #' In addition, the allied Structural Equation Model (SEM) representation of a Gaussian DAG-model allows to express the covariance matrix as a function of the (Cholesky) parameters \code{(D,L)}, #' collecting the conditional variances and regression coefficients of the SEM. #' #' The total causal effect on a given variable of interest (\code{response}) consequent to a joint intervention on a set of variables (\code{targets}) #' is defined according to Pearl's do-calculus theory and under the Gaussian assumption can be expressed as a function of parameters \code{(D,L)}. #' #' @author Federico Castelletti and Alessandro Mascaro #' #' @references J. Pearl (2000). \emph{Causality: Models, Reasoning, and Inference}. Cambridge University Press, Cambridge. #' @references F. Castelletti and A. Mascaro (2021). Structural learning and estimation of joint causal effects among network-dependent variables. \emph{Statistical Methods and Applications}, Advance publication. #' @references P. Nandy, M.H. Maathuis and T. Richardson (2017). Estimating the effect of joint interventions from observational data in sparse high-dimensional settings. \emph{Annals of Statistics} 45(2), 647-674. #' #' @param targets numerical vector with labels of target nodes #' @param response numerical label of response variable #' @param L \eqn{(q,q)} matrix of regression-coefficient parameters #' @param D \eqn{(q,q)} diagonal matrix of conditional-variance parameters #' #' @return The joint total causal effect, represented as a vector of same length of \code{targets} #' @export #' #' @examples # Randomly generate a DAG and the DAG-parameters #' q = 8 #' w = 0.2 #' set.seed(123) #' DAG = rDAG(q = q, w = w) #' outDL = rDAGWishart(n = 1, DAG = DAG, a = q, U = diag(1, q)) #' L = outDL$L; D = outDL$D #' # Total causal effect on node 1 of an intervention on {5,6} #' causaleffect(targets = c(6,7), response = 1, L = L, D = D) #' # Total causal effect on node 1 of an intervention on {5,7} #' causaleffect(targets = c(5,7), response = 1, L = L, D = D) #' causaleffect <- function(targets, response, L, D){ targets_check <- is.numeric(targets) & is.vector(targets) if (targets_check) { targets_check <- targets_check & prod(targets %% 1 == 0) & prod(targets > 0 & targets <= ncol(L)) == 1 } response_check <- is.numeric(response) & length(response) == 1 if (response_check) { response_check <- response_check & (response %% 1 == 0) & prod((response > 0 & response <= ncol(L))) == 1 } L_check <- is.numeric(L) & is.matrix(L) & dim(L)[1] == dim(L)[2] L_dagcheck <- gRbase::is.DAG((L - diag(diag(L)) != 0)*1) D_check <- is.numeric(L) & is.matrix(L) & dim(L)[1] == dim(L)[2] & all(D[lower.tri(D)] == 0, D[upper.tri(D)] == 0) & all(D >= 0) if(targets_check == FALSE) stop("targets must be a vector containing the position of intervention targets in the dataset") if(response_check == FALSE) stop("response must be the numerical value indicating the position of the response variable in the dataset") if(L_check == FALSE) stop("L must be a qxq matrix of regression coefficient parameters") if(L_dagcheck == FALSE) stop("L is not a matrix of coefficients of an acyclic SEM") if(D_check == FALSE) stop("D must be a qxq diagonal matrix of conditional variance parameters") if(length(unique(targets)) != length(targets)) warning("Your vector of targets does not contain distinct elements") y <- response L_I <- L L_I[,targets] = 0 diag(L_I) <- 1 Sigma_I <- solve(t(L_I)) %*% D %*% solve(L_I) effects <- sapply(targets, function(x) Sigma_I[x,y]/Sigma_I[x,x]) return(effects) }
/scratch/gouwar.j/cran-all/cranData/BCDAG/R/causaleffect.R
#' Find the family of a node in a DAG (internal function) #' #' This function finds the family (union of \code{node} and its parents) of \code{node} in \code{DAG} #' #' @param node numerical label of the node in \code{DAG} #' @param DAG \eqn{(q,q)} adjacency matrix of the DAG #' @noRd #' @keywords internal #' #' @return A numerical vector with the labels of the family of \code{node} in \code{DAG} fa <- function(node, DAG) { pa <- which(DAG[,node] != 0) fa <- c(node, pa) return(fa) }
/scratch/gouwar.j/cran-all/cranData/BCDAG/R/fa.R
#' Compute the maximum a posteriori DAG model from the MCMC output #' #' This function computes the maximum a posteriori DAG model estimate (MAP) from the MCMC output of \code{learn_DAG} #' #' Output of \code{learn_dag} function consists of \eqn{S} draws from the joint posterior of DAGs and DAG-parameters in a zero-mean Gaussian DAG-model; #' see the documentation of \code{learn_DAG} for more details. #' #' The Maximum A Posteriori (MAP) model estimate is defined as the DAG visited by the MCMC with the highest associated posterior probability. #' Each DAG posterior probability is estimated as the frequency of visits of the DAG in the MCMC chain. #' The MAP estimate is represented through its \eqn{(q,q)} adjacency matrix, with \eqn{(u,v)}-element equal to one whenever the MAP contains \eqn{u -> v}, #' zero otherwise. #' #' @author Federico Castelletti and Alessandro Mascaro #' #' #' @references F. Castelletti and A. Mascaro (2021). Structural learning and estimation of joint causal effects among network-dependent variables. \emph{Statistical Methods and Applications}, Advance publication. #' @references G. Garcia-Donato and M.A. Martinez-Beneito (2013). On sampling strategies in Bayesian variable selection problems with large model spaces. \emph{Journal of the American Statistical Association} 108 340-352. #' #' @param learnDAG_output object of class \code{bcdag} #' #' @return The \eqn{(q,q)} adjacency matrix of the maximum a posteriori DAG model #' @export #' @examples # Randomly generate a DAG and the DAG-parameters #' q = 8 #' w = 0.2 #' set.seed(123) #' DAG = rDAG(q = q, w = w) #' outDL = rDAGWishart(n = 1, DAG = DAG, a = q, U = diag(1, q)) #' L = outDL$L; D = outDL$D #' Sigma = solve(t(L))%*%D%*%solve(L) #' # Generate observations from a Gaussian DAG-model #' n = 200 #' X = mvtnorm::rmvnorm(n = n, sigma = Sigma) #' # Run the MCMC (Set S = 5000 and burn = 1000 for better results) #' out_mcmc = learn_DAG(S = 500, burn = 100, a = q, U = diag(1,q)/n, data = X, w = 0.1, #' fast = TRUE, save.memory = FALSE) #' # Produce the MAP DAG estimate #' get_MAPdag(out_mcmc) #' get_MAPdag <- function(learnDAG_output) { if (!methods::is(learnDAG_output,"bcdag")) { stop("learnDAG_output must be an object of class bcdag") } type <- attributes(learnDAG_output)$type if (type == "compressed" | type == "compressed and collapsed") { dag_code <- as.vector(sapply(out$Graphs, gsub, pattern = ";", replacement = "")) q <- sqrt(nchar(dag_code[[1]])) } else { Graphs <- learnDAG_output$Graphs dag_code <- apply(Graphs, 3, bd_encode, separator = "") q <- dim(Graphs)[1] } uniq_dag <- unique(dag_code) map_dagcode <- uniq_dag[which.max(tabulate(match(dag_code, uniq_dag)))] map_dagcode <- bd_decode(map_dagcode, separator = "") map_dag <- matrix(map_dagcode, ncol = q) colnames(map_dag) = rownames(map_dag) = 1:ncol(map_dag) return(map_dag) }
/scratch/gouwar.j/cran-all/cranData/BCDAG/R/get_MAPdag.R
#' Compute the median probability DAG model from the MCMC output #' #' This function computes the Median Probability DAG Model estimate (MPM) from the MCMC output of \code{learn_DAG} #' #' Output of \code{learn_dag} function consists of \eqn{S} draws from the joint posterior of DAGs and DAG-parameters in a zero-mean Gaussian DAG-model; #' see the documentation of \code{learn_DAG} for more details. #' #' The Median Probability DAG Model estimate (MPM) is obtained by including all edges whose posterior probability exceeds 0.5. #' The posterior probability of inclusion of \eqn{u -> v} is estimated as the frequency of DAGs visited by the MCMC which contain the directed edge \eqn{u -> v}; #' see also function \code{get_edgeprobs} and the corresponding documentation. #' #' @author Federico Castelletti and Alessandro Mascaro #' #' #' @references F. Castelletti and A. Mascaro (2021). Structural learning and estimation of joint causal effects among network-dependent variables. \emph{Statistical Methods and Applications}, Advance publication #' @references M.M. Barbieri and J.O. Berger (2004). Optimal predictive model selection. \emph{The Annals of Statistics} 32 870-897 #' #' @param learnDAG_output object of class \code{bcdag} #' #' @return The \eqn{(q,q)} adjacency matrix of the median probability DAG model #' @export #' #' @examples # Randomly generate a DAG and the DAG-parameters #' q = 8 #' w = 0.2 #' set.seed(123) #' DAG = rDAG(q = q, w = w) #' outDL = rDAGWishart(n = 1, DAG = DAG, a = q, U = diag(1, q)) #' L = outDL$L; D = outDL$D #' Sigma = solve(t(L))%*%D%*%solve(L) #' # Generate observations from a Gaussian DAG-model #' n = 200 #' X = mvtnorm::rmvnorm(n = n, sigma = Sigma) #' # Run the MCMC (Set S = 5000 and burn = 1000 for better results) #' out_mcmc = learn_DAG(S = 500, burn = 100, a = q, U = diag(1,q)/n, data = X, w = 0.1, #' fast = TRUE, save.memory = FALSE) #' # Produce the MPM DAG estimate #' get_MPMdag(out_mcmc) #' get_MPMdag <- function(learnDAG_output) { if (!methods::is(learnDAG_output,"bcdag")) { stop("learnDAG_output must be an object of class bcdag") } edgeprobs <- get_edgeprobs(learnDAG_output) MPM <- round(edgeprobs) colnames(MPM) = rownames(MPM) = 1:ncol(MPM) return(MPM) }
/scratch/gouwar.j/cran-all/cranData/BCDAG/R/get_MPMdag.R
#' Estimate total causal effects from the MCMC output #' #' This function provides causal effect estimates from the output of \code{learn_DAG} #' #' Output of \code{learn_dag} function consists of \eqn{S} draws from the joint posterior of DAGs and DAG-parameters in a zero-mean Gaussian DAG-model; #' see the documentation of \code{learn_DAG} for more details. #' #' The total causal effect on a given variable of interest (\code{response}) consequent to a joint intervention on a set of variables (\code{targets}) #' is defined according to Pearl's do-calculus theory and under the Gaussian assumption can be expressed as a function of parameters \code{(D,L)}, #' representing a (Cholesky) reparameterization of the covariance matrix. #' #' Specifically, to each intervened variable a causal effect coefficient is associated and the posterior distribution of the latter can be recovered from posterior draws #' of the DAG parameters returned by \code{learn_DAG}. For each coefficient a sample of size \eqn{S} from its posterior is available. If required, the only #' Bayesian Model Average (BMA) estimate (obtained as the sample mean of the \eqn{S} draws) can be returned by setting \code{BMA = TRUE}. #' #' Notice that, whenever implemented with \code{collapse = FALSE}, \code{learn_DAG} returns the marginal posterior distribution of DAGs only. #' In this case, \code{get_causaleffect} preliminarly performs posterior inference of DAG parameters by drawing samples from the posterior of \code{(D,L)}. #' #' Print, summary and plot methods are available for this function. \code{print} returns the values of the prior hyperparameters used in the learnDAG function. \code{summary} returns, for each causal effect parameter, the marginal posterior mean and quantiles for different \eqn{\alpha} levels, and posterior probabilities of negative, null and positive causal effects. \code{plot} provides graphical summaries (boxplot and histogram of the distribution) for the posterior of each causal effect parameter. #' #' @author Federico Castelletti and Alessandro Mascaro #' #' @references J. Pearl (2000). \emph{Causality: Models, Reasoning, and Inference}. Cambridge University Press, Cambridge. #' @references F. Castelletti and A. Mascaro (2021) Structural learning and estimation of joint causal effects among network-dependent variables. \emph{Statistical Methods and Applications}, Advance publication. #' @references P. Nandy, M.H. Maathuis and T. Richardson (2017). Estimating the effect of joint interventions from observational data in sparse high-dimensional settings. \emph{Annals of Statistics} 45(2), 647-674. #' #' #' @param targets numerical \eqn{(p,1)} vector with labels of target nodes #' @param response numerical label of response variable #' @param learnDAG_output object of class \code{bcdag} #' @param verbose if \code{TRUE}, progress bar of MCMC sampling is displayed #' #' @return An S3 object of class \code{bcdagCE} containing \eqn{S} draws from the joint posterior distribution of the \eqn{p} causal effect coefficients, organized into an \eqn{(S,p)} matrix, posterior means and credible intervals (under different \eqn{(1-\alpha)} levels) for each causal effect coefficient, and marginal posterior probabilities of positive, null and negative causal effects. #' #' @export #' #' @examples # Randomly generate a DAG and the DAG-parameters #' q = 8 #' w = 0.2 #' set.seed(123) #' DAG = rDAG(q = q, w = w) #' outDL = rDAGWishart(n = 1, DAG = DAG, a = q, U = diag(1, q)) #' L = outDL$L; D = outDL$D #' Sigma = solve(t(L))%*%D%*%solve(L) #' n = 200 #' # Generate observations from a Gaussian DAG-model #' X = mvtnorm::rmvnorm(n = n, sigma = Sigma) #' # Run the MCMC (set S = 5000 and burn = 1000 for better results) #' out_mcmc = learn_DAG(S = 500, burn = 100, a = q, U = diag(1,q)/n, data = X, w = w, #' fast = TRUE, save.memory = FALSE) #' head(out_mcmc$Graphs) #' head(out_mcmc$L) #' head(out_mcmc$D) #' # Compute the BMA estimate of coefficients representing #' # the causal effect on node 1 of an intervention on {3,4} #' out_causal = get_causaleffect(learnDAG_output = out_mcmc, targets = c(3,4), response = 1)$post_mean #' #' # Methods #' print(out_causal) #' summary(out_causal) #' plot(out_causal) get_causaleffect <- function(learnDAG_output, targets, response, verbose = TRUE) { ## Input check learnDAGinput_check <- methods::is(learnDAG_output, "bcdag") targets_check <- is.numeric(targets) & is.vector(targets) if (targets_check) { targets_check <- targets_check & all(targets %% 1 == 0) & prod(targets > 0 & targets <= dim(learnDAG_output$Graphs)[2]) == 1 } response_check <- is.numeric(response) & length(response) == 1 & (response %% 1 == 0) & prod((response > 0 & response <= dim(learnDAG_output)[2])) == 1 if (response_check) { response_check <- response_check & prod(response %% 1 == 0) & prod(response > 0 & response <= dim(learnDAG_output$Graphs)[2]) == 1 } if(learnDAGinput_check == FALSE) stop("learnDAG_output must be an object of class bcdag") type <- attributes(learnDAG_output)$type input <- attributes(learnDAG_output)$input S <- input$S n <- nrow(input$data) q <- ncol(input$data) X <- scale(input$data, scale = FALSE) tXX = crossprod(X) if(targets_check == FALSE) stop("targets must be a vector containing the position of intervention targets in the dataset") if(response_check == FALSE) stop("response must be the numerical value indicating the position of the response variable in the dataset") collapsed <- type == "compressed and collapsed" | type == "collapsed" if (type == "compressed" | type == "compressed and collapsed") { # If option save.memory == TRUE Graphs <- array(dim = c(q,q,S)) L <- array(dim = c(q,q,S)) D <- array(dim = c(q,q,S)) if (collapsed == FALSE) { for (i in 1:S) { Graphs[,,i] <- bd_decode(learnDAG_output$Graphs[i]) L[,,i] <- bd_decode(learnDAG_output$L[i]) D[,,i] <- bd_decode(learnDAG_output$D[i]) } } else { if (verbose == TRUE) { cat("\nSampling parameters...") pb <- utils::txtProgressBar(min = 2, max = S, style = 3) } for (i in 1:S) { Graphs[,,i] <- bd_decode(learnDAG_output$Graphs[i]) postparams <- rDAGWishart(1, Graphs[,,i], input$a+n, input$U+tXX) L[,,i] <- postparams$L D[,,i] <- postparams$D if (verbose == TRUE) { utils::setTxtProgressBar(pb, i) close(pb) } } } } else { # If option save.memory == FALSE Graphs <- learnDAG_output$Graphs if (collapsed == FALSE) { L <- learnDAG_output$L D <- learnDAG_output$D } else { L <- array(0, c(q,q,S)) D <- array(0, c(q,q,S)) if (verbose == TRUE) { cat("\nSampling parameters...") pb <- utils::txtProgressBar(min = 2, max = S, style = 3) } for (i in 1:S) { postparams <- rDAGWishart(1, Graphs[,,i], input$a+n, input$U+tXX) L[,,i] <- postparams$L D[,,i] <- postparams$D if (verbose == TRUE) { utils::setTxtProgressBar(pb, i) close(pb) } } } } causaleffects <- matrix(0, ncol = length(targets), nrow = S) colnames(causaleffects) = paste0("h = ", targets) for (i in 1:S) { causaleffects[i,] <- causaleffect(targets, response, L[,,i], D[,,i]) } postmean <- base::apply(causaleffects, 2, mean) postquantiles <- base::apply(causaleffects, 2, stats::quantile, c(0.025, 0.25, 0.5, 0.75, 0.975)) Probs <- matrix(0, ncol = 3, nrow = length(targets)) Probs[,1] <- base::colMeans(causaleffects < 0) Probs[,2] <- base::colMeans(causaleffects == 0) Probs[,3] <- base::colMeans(causaleffects > 0) rownames(Probs) <- paste0("h = ", targets) colnames(Probs) <- c("<0", "=0", ">0") out_ce <- list(causaleffects = causaleffects, post_mean = postmean, post_ci = postquantiles, Probs = Probs) input <- c(input, targets = targets, response = response) out <- new_bcdagCE(out_ce, input = input, type = type) }
/scratch/gouwar.j/cran-all/cranData/BCDAG/R/get_causaleffect.R
#' MCMC diagnostics #' #' This function provides diagnostics of convergence for the MCMC output of \code{learn_DAG} function. #' #' Function \code{learn_DAG} implements a Markov Chain Monte Carlo (MCMC) algorithm for structure learning and posterior inference of Gaussian DAGs. #' Output of the algorithm is a collection of \eqn{S} DAG structures (represented as \eqn{(q,q)} adjacency matrices) and DAG parameters \eqn{(D,L)} #' approximately drawn from the joint posterior. #' In addition, if \code{learn_DAG} is implemented with \code{collapse = TRUE}, the only approximate marginal posterior of DAGs (represented by the collection of \eqn{S} DAG structures) is returned; #' see the documentation of \code{learn_DAG} for more details. #' #' Diagnostics of convergence for the MCMC output are conducted by monitoring across MCMC iterations: (1) the number of edges in the DAGs; #' (2) the posterior probability of edge inclusion for each possible edge \eqn{u -> v}. #' With regard to (1), a traceplot of the number of edges in the DAGs visited by the MCMC chain at each step \eqn{s = 1, ..., S} is first provided as the output of the function. #' The absence of trends in the plot can provide information on a genuine convergence of the MCMC chain. #' In addition, the traceplot of the average number of edges in the DAGs visited up to time \eqn{s}, for \eqn{s = 1, ..., S}, is also returned. #' The convergence of the curve around a "stable" average size generally suggests good convergence of the algorithm. #' With regard to (2), for each edge \eqn{u -> v}, the posterior probability at time \eqn{s}, for \eqn{s = 1, ..., S}, can be estimated as #' as the proportion of DAGs visited by the MCMC up to time \eqn{s} which contain the directed edge \eqn{u -> v}. #' Output is organized in \eqn{q} plots (one for each node \eqn{v = 1, ..., q}), each summarizing the posterior probabilities of edges \eqn{u -> v}, \eqn{u = 1, ..., q}. #' If the number of nodes is larger than 30 the traceplot of a random sample of 30 nodes is returned. #' #' @param learnDAG_output object of class \code{bcdag} #' @param ask Boolean argument passed to par() for visualization; #' @param nodes Numerical vector indicating those nodes for which we want to compute the posterior probability of edge inclusion; #' #' @return A collection of plots summarizing the behavior of the number of edges and the posterior probabilities of edge inclusion computed from the MCMC output. #' @export #' #' @author Federico Castelletti and Alessandro Mascaro #' #' @references F. Castelletti and A. Mascaro (2021). Structural learning and estimation of joint causal effects among network-dependent variables. \emph{Statistical Methods and Applications}, Advance publication. #' @references F. Castelletti (2020). Bayesian model selection of Gaussian Directed Acyclic Graph structures. \emph{International Statistical Review} 88 752-775. #' #' @examples # Randomly generate a DAG and the DAG-parameters #' q = 8 #' w = 0.2 #' set.seed(123) #' DAG = rDAG(q = q, w = w) #' outDL = rDAGWishart(n = 1, DAG = DAG, a = q, U = diag(1, q)) #' L = outDL$L; D = outDL$D #' Sigma = solve(t(L))%*%D%*%solve(L) #' n = 200 #' # Generate observations from a Gaussian DAG-model #' X = mvtnorm::rmvnorm(n = n, sigma = Sigma) #' # Run the MCMC for posterior inference of DAGs only (collapse = TRUE) #' out_mcmc = learn_DAG(S = 5000, burn = 1000, a = q, U = diag(1,q)/n, data = X, w = 0.1, #' fast = TRUE, save.memory = FALSE, collapse = TRUE) #' # Produce diagnostic plots #' get_diagnostics(out_mcmc) get_diagnostics <- function(learnDAG_output, ask = TRUE, nodes = integer(0)) { if (!methods::is(learnDAG_output,"bcdag")) { stop("learnDAG_output must be an object of class bcdag") } oldpar <- graphics::par(no.readonly = TRUE) on.exit(graphics::par(oldpar)) type <- attributes(learnDAG_output)$type input <- attributes(learnDAG_output)$input if (type == "compressed" | type == "compressed and collapsed") { S <- length(learnDAG_output$Graphs) q <- ncol(bd_decode(learnDAG_output$Graphs[1])) Graphs <- array(dim = c(q,q,S)) for (i in 1:S) { Graphs[,,i] <- bd_decode(learnDAG_output$Graphs[i]) } } else { S <- input$S q <- ncol(input$data) Graphs <- learnDAG_output$Graphs } ## Graph size Graphsizes <- vector("double", S) for (i in 1:S) { Graphsizes[i] <- sum(Graphs[,,i]) } ## Cumulative edge probabilities cumedgesum <- Graphs cumedgeprob <- Graphs for (i in 2:S) { cumedgesum[,,i] <- (cumedgesum[,,i] + cumedgesum[,,i-1]) cumedgeprob[,,i] <- cumedgesum[,,i]/i } tracematrices <- vector("list", q) whc_iter <- lapply(1:q, function(i) as.matrix(1:S)) whcs <- vector("list", q) for (j in 1:q) { whc <- which(cumedgeprob[,j,S] >= 0.05) whcs[[j]] <- whc[order(cumedgeprob[whc, j, S], decreasing = TRUE)] if (length(whcs[[j]]) != 1) { tracematrices[[j]] <- t(as.matrix(cumedgeprob[whcs[[j]],j,])) } else { tracematrices[[j]] <- as.matrix(cumedgeprob[whcs[[j]],j,]) } if (S > 10000) { whc_iter[[j]] <- seq(1,S, length.out = 10000) if (length(whc) > 1) { tracematrices[[j]] <- tracematrices[[j]][whc_iter[[j]],] } else { tracematrices[[j]] <- as.matrix(tracematrices[[j]][whc_iter[[j]]]) } } } ## Plotting graphics::par(mfrow = c(1,2), ask = ask) base::plot(1:S, Graphsizes, type = "l", xlab = "Iteration", ylab = "graph size", main = "Graph size traceplot", col = 1) base::plot(1:S, cumsum(Graphsizes)/(1:S), type = "l", xlab = "Iteration", ylab = "average graph size", main = "Graph size running mean", col = 2) if (length(nodes) == 0) { graphics::par(ask = ask) if(q <= 30) { graphics::par(mfrow = c(2,3)) for (j in 1:q) { graphics::matplot(y = tracematrices[[j]], x = whc_iter[[j]], type = "l", xlab = "Iteration", ylab = "prob. of inclusion", main = paste("Into node", j), ylim = c(0,1)) if (length(whcs[[j]]) != 0) graphics::legend("topleft", legend = utils::head(whcs[[j]], 6), col = 1:max(length(whcs[[j]]), 6), lty = 1, cex = 0.75) } } else { graphics::par(mfrow = c(2,3)) randomnodes <- sample(1:q, 30) for (j in randomnodes) { graphics::matplot(y = tracematrices[[j]], x = whc_iter[[j]], type = "l", xlab = "Iteration", ylab = "prob. of inclusion", main = paste("Into node", j), ylim = c(0,1)) if (length(whcs[[j]]) != 0) graphics::legend("topleft", legend = utils::head(whcs[[j]], 6), col = 1:max(length(whcs[[j]]), 6), lty = 1, cex = 0.75) } } } else { if(length(nodes) <= 30) { for (j in nodes) { graphics::par(ask = ask, mfrow = c(1,1)) graphics::matplot(y = tracematrices[[j]], x = whc_iter[[j]], type = "l", xlab = "Iteration", ylab = "prob. of inclusion", main = paste("Into node", j), ylim = c(0,1)) if (length(whcs[[j]]) != 0) graphics::legend("topleft", legend = utils::head(whcs[[j]], 6), col = 1:max(length(whcs[[j]]), 6), lty = 1, cex = 0.75) } } else { graphics::par(mfrow = c(2,3), ask = ask) randomnodes <- sample(nodes, 30) for (j in randomnodes) { graphics::matplot(y = tracematrices[[j]], x = whc_iter[[j]], type = "l", xlab = "Iteration", ylab = "prob. of inclusion", main = paste("Into node", j), ylim = c(0,1)) if (length(whcs[[j]]) != 0) graphics::legend("topleft", legend = utils::head(whcs[[j]], 6), col = 1:max(length(whcs[[j]]), 6), lty = 1, cex = 0.75) } } } }
/scratch/gouwar.j/cran-all/cranData/BCDAG/R/get_diagnostics.R
#' Compute posterior probabilities of edge inclusion from the MCMC output #' #' This function computes the posterior probability of inclusion for each edge \eqn{u -> v} given the MCMC output of \code{learn_DAG}; #' #' Output of \code{learn_dag} function consists of \eqn{S} draws from the joint posterior of DAGs and DAG-parameters in a zero-mean Gaussian DAG-model; #' see the documentation of \code{learn_DAG} for more details. #' #' The posterior probability of inclusion of \eqn{u -> v} is estimated as the frequency of DAGs visited by the MCMC which contain the directed edge \eqn{u -> v}. #' Posterior probabilities are collected in a \eqn{(q,q)} matrix with \eqn{(u,v)}-element representing the estimated posterior probability #' of edge \eqn{u -> v}. #' #' @author Federico Castelletti and Alessandro Mascaro #' #' #' @references F. Castelletti and A. Mascaro (2021). Structural learning and estimation of joint causal effects among network-dependent variables. \emph{Statistical Methods and Applications}, Advance publication. #' #' @param learnDAG_output object of class \code{bcdag} #' #' @return A \eqn{(q,q)} matrix with posterior probabilities of edge inclusion #' @export #' #' @examples # Randomly generate a DAG and the DAG-parameters #' q = 8 #' w = 0.2 #' set.seed(123) #' DAG = rDAG(q = q, w = w) #' outDL = rDAGWishart(n = 1, DAG = DAG, a = q, U = diag(1, q)) #' L = outDL$L; D = outDL$D #' Sigma = solve(t(L))%*%D%*%solve(L) #' # Generate observations from a Gaussian DAG-model #' n = 200 #' X = mvtnorm::rmvnorm(n = n, sigma = Sigma) #' # Run the MCMC (Set S = 5000 and burn = 1000 for better results) #' out_mcmc = learn_DAG(S = 500, burn = 100, a = q, U = diag(1,q)/n, data = X, w = 0.1, #' fast = TRUE, save.memory = FALSE) #' # Compute posterior probabilities of edge inclusion #' get_edgeprobs(out_mcmc) #' get_edgeprobs <- function(learnDAG_output) { if (!methods::is(learnDAG_output,"bcdag")) { stop("learnDAG_output must be an object of class bcdag") } type <- attributes(learnDAG_output)$type input <- attributes(learnDAG_output)$input if (type == "compressed and collapsed" | type == "compressed") { S <- length(learnDAG_output$Graphs) q <- sqrt(length(bd_decode(learnDAG_output$Graphs[1]))) Graphs <- array(dim = c(q,q,S)) for (i in 1:S) { Graphs[,,i] <- bd_decode(learnDAG_output$Graphs[i]) } } else { Graphs <- learnDAG_output$Graphs } edgeprobs <- apply(Graphs, c(1,2), mean) colnames(edgeprobs) = rownames(edgeprobs) = 1:ncol(edgeprobs) return(edgeprobs) }
/scratch/gouwar.j/cran-all/cranData/BCDAG/R/get_edgeprobs.R
#' Enumerate all neighbors of a DAG #' #' This functions takes any DAG with \eqn{q} nodes as input and returns all the neighboring DAGs, i.e. all those DAGs that #' can be reached by the addition, removal or reversal of an edge. #' #' @param DAG Adjacency matrix of a DAG #' #' @return The \eqn{(q,q,K)} array containing all neighboring DAGs, with \eqn{K} being the total number of neighbors #' @export #' #' @examples # Randomly generate a DAG #' q <- 4; w <- 0.2 #' set.seed(123) #' DAG <- rDAG(q,w) #' # Get neighbors #' neighbors <- get_neighboringDAGs(DAG) #' neighbors get_neighboringDAGs <- function(DAG) { if (gRbase::is.DAG(DAG) == FALSE) { stop("Input must be a Directed Acyclic Graph") } A <- DAG q <- ncol(A) A_na <- A diag(A_na) <- NA # Define the set of possible operations! id_set = c() dd_set = c() rd_set = c() ## set of nodes for id set_id = which(A_na == 0, TRUE) if(length(set_id) != 0){ id_set = cbind(1, set_id) } ## set of nodes for dd set_dd = which(A_na == 1, TRUE) if(length(set_dd != 0)){ dd_set = cbind(2, set_dd) } ## set of nodes for rd set_rd = which(A_na == 1, TRUE) if(length(set_rd != 0)){ rd_set = cbind(3, set_rd) } O = rbind(id_set, dd_set, rd_set) neighbors <- array(0,c(q,q,nrow(O))) isDAGvec <- vector(length = nrow(O)) for (i in 1:nrow(O)) { neighbors[,,i] <- operation(O[i,1], DAG, O[i,2:3]) isDAGvec[i] <- gRbase::is.DAG(neighbors[,,i]) } neighbors <- neighbors[,,isDAGvec] return(neighbors) }
/scratch/gouwar.j/cran-all/cranData/BCDAG/R/get_neighboringDAGs.R
#' Find the direct successors DAGs of an input DAG (internal function) #' #' @param DAG The input DAG #' #' @return The cardinality of the set of DAGs that can be reached by addition, removal or reversal of one edge. #' @noRd #' @keywords internal get_opcard <- function(DAG) { A <- DAG q <- ncol(A) A_na <- A diag(A_na) <- NA # Define the set of possible operations! id_set = c() dd_set = c() rd_set = c() ## set of nodes for id set_id = which(A_na == 0, TRUE) if(length(set_id) != 0){ id_set = cbind(1, set_id) } ## set of nodes for dd set_dd = which(A_na == 1, TRUE) if(length(set_dd != 0)){ dd_set = cbind(2, set_dd) } ## set of nodes for rd set_rd = which(A_na == 1, TRUE) if(length(set_rd != 0)){ rd_set = cbind(3, set_rd) } O = rbind(id_set, dd_set, rd_set) op.cardvec <- vector(length = nrow(O)) for (i in 1:nrow(O)) { op.cardvec[i] <- gRbase::is.DAG(operation(O[i,1], DAG, O[i,2:3])) } op.card <- sum(op.cardvec) return(op.card) }
/scratch/gouwar.j/cran-all/cranData/BCDAG/R/get_opcard.R
utils::globalVariables(c("out", "U", "data"))
/scratch/gouwar.j/cran-all/cranData/BCDAG/R/globals.R
#' MCMC scheme for Gaussian DAG posterior inference #' #' This function implements a Markov Chain Monte Carlo (MCMC) algorithm for structure learning of Gaussian #' DAGs and posterior inference of DAG model parameters #' #' Consider a collection of random variables \eqn{X_1, \dots, X_q} whose distribution is zero-mean multivariate Gaussian with covariance matrix Markov w.r.t. a Directed Acyclic Graph (DAG). #' Assuming the underlying DAG is unknown (model uncertainty), a Bayesian method for posterior inference on the joint space of DAG structures and parameters can be implemented. #' The proposed method assigns a prior on each DAG structure through independent Bernoulli distributions, \eqn{Ber(w)}, on the 0-1 elements of the DAG adjacency matrix. #' Conditionally on a given DAG, a prior on DAG parameters \eqn{(D,L)} (representing a Cholesky-type reparameterization of the covariance matrix) is assigned through a compatible DAG-Wishart prior; #' see also function \code{rDAGWishart} for more details. #' #' Posterior inference on the joint space of DAGs and DAG parameters is carried out through a Partial Analytic Structure (PAS) algorithm. #' Two steps are iteratively performed for \eqn{s = 1, 2, ...} : (1) update of the DAG through a Metropolis Hastings (MH) scheme; #' (2) sampling from the posterior distribution of the (updated DAG) parameters. #' In step (1) the update of the (current) DAG is performed by drawing a new (direct successor) DAG from a suitable proposal distribution. The proposed DAG is obtained by applying a local move (insertion, deletion or edge reversal) #' to the current DAG and is accepted with probability given by the MH acceptance rate. #' The latter requires to evaluate the proposal distribution at both the current and proposed DAGs, which in turn involves the enumeration of #' all DAGs that can be obtained from local moves from respectively the current and proposed DAG. #' Because the ratio of the two proposals is approximately equal to one, and the approximation becomes as precise as \eqn{q} grows, a faster strategy implementing such an approximation is provided with #' \code{fast = TRUE}. The latter choice is especially recommended for moderate-to-large number of nodes \eqn{q}. #' #' Output of the algorithm is a collection of \eqn{S} DAG structures (represented as \eqn{(q,q)} adjacency matrices) and DAG parameters \eqn{(D,L)} approximately drawn from the joint posterior. #' The various outputs are organized in \eqn{(q,q,S)} arrays; see also the example below. #' If the target is DAG learning only, a collapsed sampler implementing the only step (1) of the MCMC scheme can be obtained #' by setting \code{collapse = TRUE}. In this case, the algorithm outputs a collection of \eqn{S} DAG structures only. #' See also functions \code{get_edgeprobs}, \code{get_MAPdag}, \code{get_MPMdag} for posterior summaries of the MCMC output. #' #' Print, summary and plot methods are available for this function. \code{print} provides information about the MCMC output and the values of the input prior hyperparameters. \code{summary} returns, besides the previous information, a \eqn{(q,q)} matrix collecting the marginal posterior probabilities of edge inclusion. \code{plot} returns the estimated Median Probability DAG Model (MPM), a \eqn{(q,q)} heat map with estimated marginal posterior probabilities of edge inclusion, and a barplot summarizing the distribution of the size of DAGs visited by the MCMC. #' #' @author Federico Castelletti and Alessandro Mascaro #' #' @references F. Castelletti and A. Mascaro (2021). Structural learning and estimation of joint causal effects among network-dependent variables. \emph{Statistical Methods and Applications}, Advance publication. #' @references F. Castelletti and A. Mascaro (2022). BCDAG: An R package for Bayesian structural and Causal learning of Gaussian DAGs. \emph{arXiv pre-print}, url: https://arxiv.org/abs/2201.12003 #' @references F. Castelletti (2020). Bayesian model selection of Gaussian Directed Acyclic Graph structures. \emph{International Statistical Review} 88 752-775. #' #' @param S integer final number of MCMC draws from the posterior of DAGs and parameters #' @param burn integer initial number of burn-in iterations, needed by the MCMC chain to reach its stationary distribution and not included in the final output #' @param data \eqn{(n,q)} data matrix #' @param a common shape hyperparameter of the compatible DAG-Wishart prior, \eqn{a > q - 1} #' @param U position hyperparameter of the compatible DAG-Wishart prior, a \eqn{(q, q)} s.p.d. matrix #' @param w edge inclusion probability hyperparameter of the DAG prior in \eqn{[0,1]} #' @param fast boolean, if \code{TRUE} an approximate proposal for the MCMC moves is implemented #' @param save.memory boolean, if \code{TRUE} MCMC draws are stored as strings, instead of arrays #' @param collapse boolean, if \code{TRUE} only structure learning of DAGs is performed #' @param verbose If \code{TRUE}, progress bars are displayed #' #' @return An S3 object of class \code{bcdag} containing \eqn{S} draws from the posterior of DAGs and (if \code{collapse = FALSE}) of DAG parameters \eqn{D} and \eqn{L}. If \code{save.memory = FALSE}, these are stored in three arrays of dimension \eqn{(q,q,S)}. Otherwise, they are stored as strings. #' @export #' #' @examples # Randomly generate a DAG and the DAG-parameters #' q = 8 #' w = 0.2 #' set.seed(123) #' DAG = rDAG(q = q, w = w) #' outDL = rDAGWishart(n = 1, DAG = DAG, a = q, U = diag(1, q)) #' L = outDL$L; D = outDL$D #' Sigma = solve(t(L))%*%D%*%solve(L) #' # Generate observations from a Gaussian DAG-model #' n = 200 #' X = mvtnorm::rmvnorm(n = n, sigma = Sigma) #' #' ## Set S = 5000 and burn = 1000 for better results #' #' # [1] Run the MCMC for posterior inference of DAGs and parameters (collapse = FALSE) #' out_mcmc = learn_DAG(S = 50, burn = 10, a = q, U = diag(1,q)/n, data = X, w = 0.1, #' fast = FALSE, save.memory = FALSE, collapse = FALSE) #' # [2] Run the MCMC for posterior inference of DAGs only (collapse = TRUE) #' out_mcmc_collapse = learn_DAG(S = 50, burn = 10, a = q, U = diag(1,q)/n, data = X, w = 0.1, #' fast = FALSE, save.memory = FALSE, collapse = TRUE) #' # [3] Run the MCMC for posterior inference of DAGs only with approximate proposal #' # distribution (fast = TRUE) #' # out_mcmc_collapse_fast = learn_DAG(S = 50, burn = 10, a = q, U = diag(1,q)/n, data = X, w = 0.1, #' # fast = FALSE, save.memory = FALSE, collapse = TRUE) #' # Compute posterior probabilities of edge inclusion and Median Probability DAG Model #' # from the MCMC outputs [2] and [3] #' get_edgeprobs(out_mcmc_collapse) #' # get_edgeprobs(out_mcmc_collapse_fast) #' get_MPMdag(out_mcmc_collapse) #' # get_MPMdag(out_mcmc_collapse_fast) #' #' # Methods #' print(out_mcmc) #' summary(out_mcmc) #' plot(out_mcmc) #' learn_DAG <- function(S, burn, data, a, U, w, fast = FALSE, save.memory = FALSE, collapse = FALSE, verbose = TRUE) { input <- as.list(environment()) ## Input check data_check <- sum(is.na(data)) == 0 S.burn_check <- is.numeric(c(S,burn)) & length(S) == 1 & length(burn) == 1 S.burn_check <- if (S.burn_check) { S.burn_check & (S %% 1 == 0) & (burn %% 1 == 0) # verify if is.integer() can be used } else { S.burn_check } a_check <- is.numeric(a) & (length(a) == 1) & (a > ncol(data) - 1) w_check <- is.numeric(w) & (length(w) == 1) & (w <= 1) & (w >= 0) U_check <- is.numeric(U) & (dim(U)[1] == dim(U)[2]) & (prod(eigen(U)$values) > 0) & isSymmetric(U) U.data_check <- dim(U)[1] == ncol(data) if (data_check == FALSE) { stop("Data must not contain NAs") } if (S.burn_check == FALSE) { stop("S and burn must be integer numbers") } if (a_check == FALSE) { stop("a must be at least equal to the number of variables") } if (w_check == FALSE) { stop("w must be a number between 0 and 1") } if (U_check == FALSE) { stop("U must be a squared symmetric positive definite matrix") } if (U.data_check == FALSE) { stop("U must be a squared spd matrix with dimensions equal to the number of variables") } n.iter <- input$burn + input$S X <- scale(data, scale = FALSE) tXX <- crossprod(X) n <- dim(data)[1] q <- dim(data)[2] ## Initialize arrays or vectors depending on save.memory if (save.memory == TRUE) { Graphs <- vector("double", n.iter) L <- vector("double", n.iter) D <- vector("double", n.iter) } else { Graphs <- array(0, dim = c(q,q,n.iter)) L <- array(0, dim = c(q,q,n.iter)) D <- array(0, dim = c(q,q,n.iter)) } currentDAG <- matrix(0, ncol = q, nrow = q) ## iterations if (save.memory == FALSE) { type = "collapsed" if (verbose == TRUE) { cat("Sampling DAGs...") pb <- utils::txtProgressBar(min = 2, max = n.iter, style = 3) } for (i in 1:n.iter) { prop <- propose_DAG(currentDAG, fast) is.accepted <- acceptreject_DAG(tXX, n,currentDAG, prop$proposedDAG, prop$op.node, prop$op.type, a, U, w, prop$current.opcard, prop$proposed.opcard) if (is.accepted == TRUE) { currentDAG <- prop$proposedDAG } Graphs[,,i] <- currentDAG if (verbose == TRUE) { utils::setTxtProgressBar(pb, i) close(pb) } } if (collapse == FALSE) { type = "complete" if (verbose == TRUE) { cat("\nSampling parameters...") pb <- utils::txtProgressBar(min = 2, max = n.iter, style = 3) } for (i in 1:n.iter) { postparams <- rDAGWishart(1, Graphs[,,i], a+n, U+tXX) L[,,i] <- postparams$L D[,,i] <- postparams$D if (verbose == TRUE) { utils::setTxtProgressBar(pb, i) close(pb) } } } Graphs <- Graphs[,,(burn+1):n.iter] L <- L[,,(burn+1):n.iter] D <- D[,,(burn+1):n.iter] } else { type = "compressed and collapsed" if (verbose == TRUE) { cat("Sampling DAGs...") pb <- utils::txtProgressBar(min = 2, max = n.iter, style = 3) } for (i in 1:n.iter) { prop <- propose_DAG(currentDAG, fast) is.accepted <- acceptreject_DAG(tXX, n,currentDAG, prop$proposedDAG, prop$op.node, prop$op.type, a, U, w, prop$current.opcard, prop$proposed.opcard) if (is.accepted == TRUE) { currentDAG <- prop$proposedDAG } Graphs[i] <- bd_encode(currentDAG) if (verbose == TRUE) { utils::setTxtProgressBar(pb, i) close(pb) } } if (collapse == FALSE) { type = "compressed" if (verbose == TRUE) { cat("\nSampling parameters...") pb <- utils::txtProgressBar(min = 2, max = n.iter, style = 3) } for (i in 1:n.iter) { postparams <- rDAGWishart(1, bd_decode(Graphs[i]), a+n, U+tXX) L[i] <- bd_encode(postparams$L) D[i] <- bd_encode(postparams$D) if (verbose == TRUE) { utils::setTxtProgressBar(pb, i) close(pb) } } } Graphs <- utils::tail(Graphs, S) L <- utils::tail(L, S) D <- utils::tail(D, S) } if (collapse == FALSE) { out <- new_bcdag(list(Graphs = Graphs, L = L, D = D), input = input, type = type) } else { out <- new_bcdag(list(Graphs = Graphs), input = input, type = type) } return(out) }
/scratch/gouwar.j/cran-all/cranData/BCDAG/R/learn_DAG.R
#' Protein levels for 68 diagnosed AML patients of subtype M2 #' #' A dataset containing the protein expression levels of 18 proteins for 68 AML patients of subtype M2 (according to French-American-British (FAB) classification system). #' The 18 proteins selected are known to be involved in apoptosis and cell cycle regulation according to the KEGG database (Kanehisa et al. 2012). #' This is a subset of the dataset presented in Kornblau et al. (2009). #' #' @format A data frame with 68 rows and 18 variables: #' \describe{ #' \item{AKT}{AKT protein, expression level} #' \item{AKT.p308}{AKT.p308 protein, expression level} #' \item{AKT.p473}{AKT.p473 protein, expression level} #' \item{BAD}{BAD protein, expression level} #' \item{BAD.p112}{BAD.p112 protein, expression level} #' \item{BAD.p136}{BAD.p136 protein, expression level} #' \item{BAD.p155}{BAD.p155 protein, expression level} #' \item{BAX}{BAX protein, expression level} #' \item{BCL2}{BCL2 protein, expression level} #' \item{BCLXL}{BCLXL protein, expression level} #' \item{CCND1}{CCND1 protein, expression level} #' \item{GSK3}{GSK3 protein, expression level} #' \item{GSK3.p}{GSK3.p protein, expression level} #' \item{MYC}{MYC protein, expression level} #' \item{PTEN}{PTEN protein, expression level} #' \item{PTEN.p}{PTEN.p protein, expression level} #' \item{TP53}{TP53 protein, expression level} #' \item{XIAP}{XIAP protein, expression level} #' ... #' } #' @source \url{http://bioinformatics.mdanderson.org/Supplements/Kornblau-AML-RPPA/aml-rppa.xls} #' @references Kornblau, S. M., Tibes, R., Qiu, Y. H., Chen, W., Kantarjian, H. M., Andreeff, M., ... & Mills, G. B. (2009). Functional proteomic profiling of AML predicts response and survival. Blood, The Journal of the American Society of Hematology, 113(1), 154-164. #' @references Kanehisa, M., Goto, S., Sato, Y., Furumichi, M., & Tanabe, M. (2012). KEGG for integration and interpretation of large-scale molecular data sets. Nucleic acids research, 40(D1), D109-D114. "leukemia"
/scratch/gouwar.j/cran-all/cranData/BCDAG/R/leukemia.R
#' Create new bcdag objects #' #' Internal function used as constructor for S3 objects of class \code{bcdag}, which constitute the output of function \code{learn_DAG()} and the input of functions belonging to the get_ family such as \code{get_causaleffect()}. #' #' @param x A list #' @param input A list containing the inputs given to \code{learn_DAG()} #' @param type A string indicating whether the output produced by \code{learn_DAG()} should be of type "complete", "compressed", "collapsed" or "compressed and collapsed" #' #' @return An S3 object of class \code{bcdag} #' @noRd #' @keywords internal #' #' @author Federico Castelletti and Alessandro Mascaro #' #' @references F. Castelletti and A. Mascaro (2022). BCDAG: An R package for Bayesian structural and Causal learning of Gaussian DAGs. \emph{arXiv pre-print}, url: https://arxiv.org/abs/2201.12003 new_bcdag <- function(x = list(), input = list(), type = "complete") { stopifnot(is.list(x)) stopifnot(is.list(input)) type <- match.arg(type, c("complete", "compressed", "collapsed", "compressed and collapsed")) structure(x, class = "bcdag", type = type, input = input) }
/scratch/gouwar.j/cran-all/cranData/BCDAG/R/new_bcdag.R
#' Create new bcdagCE objects #' #' Internal function used as constructor for S3 objects of class \code{bcdagCE}, #' which constitute the output of function \code{get_causaleffect()}. #' #' @param x A list #' @param input A list containing the inputs given to \code{learn_DAG()} and \code{get_causaleffect()} #' @param type A string indicating whether the output produced by \code{learn_DAG()} was of type "complete", "compressed", "collapsed" or "compressed and collapsed" #' #' @return An S3 object of class \code{bcdagCE} #' @noRd #' @keywords internal #' #' @author Federico Castelletti and Alessandro Mascaro #' #' @references F. Castelletti and A. Mascaro (2022). BCDAG: An R package for Bayesian structural and Causal learning of Gaussian DAGs. \emph{arXiv pre-print}, url: https://arxiv.org/abs/2201.12003 new_bcdagCE <- function(x = list(), input = list(), type = "complete") { stopifnot(is.list(x)) stopifnot(is.list(input)) type <- match.arg(type, c("complete", "compressed", "collapsed", "compressed and collapsed")) structure(x, class = "bcdagCE", type = type, input = input) }
/scratch/gouwar.j/cran-all/cranData/BCDAG/R/new_bcdagCE.R
#' Perform local moves given a DAG (internal function) #' #' This function locally modifies a DAG by inserting (\code{op = 1}), deleting (\code{op = 2}) or reversing (\code{op = 3}) an edge between two \code{nodes} #' #' @param op numerical type in \eqn{{1,2,3}} of the operator applied to \code{DAG} #' @param A \eqn{(q,q)} adjacency matrix of the input DAG #' @param nodes numerical labels of nodes on which the operator is applied, a \eqn{(2,1)} vector #' @noRd #' @keywords internal #' #' @return The \eqn{(q,q)} adjacency matrix of the modified DAG operation <- function(op, A, nodes) { x <- nodes[1] y <- nodes[2] if(op == 1) { A[x,y] = 1 return(A) } if(op == 2) { A[x,y] = 0 return(A) } if(op == 3) { A[x,y] = 0 A[y,x] = 1 return(A) } }
/scratch/gouwar.j/cran-all/cranData/BCDAG/R/operation.R
#' Find the parents of a node in a DAG (internal function) #' #' This function finds the set of parents of \code{node} in \code{DAG} #' #' @param node numerical label of the node in \code{DAG} #' @param DAG \eqn{(q,q)} adjacency matrix of the DAG #' @noRd #' @keywords internal #' #' @return A numerical vector with the labels of the parents of \code{node} in \code{DAG} pa <- function(node, DAG) { pa <- which(DAG[,node] != 0) return(pa) }
/scratch/gouwar.j/cran-all/cranData/BCDAG/R/pa.R
#' bcdag object plot #' #' This method returns summary plots of the output of \code{learn_DAG()}. #' #' @param x a \code{bcdag} object for which a plot is desired #' @param ask Boolean argument passed to par() for visualization; #' @param ... additional arguments affecting the summary produced #' #' @return Plot of the Median Probability DAG, a heatmap of the probabilities of edge inclusion and an histogram of the sizes of graphs visited by learn_DAG(). #' @export #' #' @examples n <- 1000 #' q <- 4 #' DAG <- matrix(c(0,1,1,0,0,0,0,0,0,0,0,0,0,1,1,0), nrow = q) #' #' L <- DAG #' L[L != 0] <- runif(q, 0.2, 1) #' diag(L) <- c(1,1,1,1) #' D <- diag(1, q) #' Sigma <- t(solve(L))%*%D%*%solve(L) #' #' a <- 6 #' g <- 1/1000 #' U <- g*diag(1,q) #' w = 0.2 #' #' set.seed(1) #' X <- mvtnorm::rmvnorm(n, sigma = Sigma) #' #' out <- learn_DAG(1000, 0, X, a, U, w, fast = TRUE, collapse = TRUE, save.memory = FALSE) #' plot(out) plot.bcdag <- function(x, ..., ask = TRUE) { learnDAG_output <- x if (!methods::is(x,"bcdag")) { stop("learnDAG_output must be an object of class bcdag") } oldpar <- graphics::par(no.readonly = TRUE) on.exit(graphics::par(oldpar)) type = attributes(learnDAG_output)$type input = attributes(learnDAG_output)$input edgeprobs <- get_edgeprobs(learnDAG_output) MPMdag <- get_MPMdag(learnDAG_output) Graphsizes <- vector("double", input$S) for (i in 1:input$S) { if (type == "compressed" | type == "compressed and collapsed") { Graphsizes[i] <- sum(bd_decode(learnDAG_output$Graphs[i])) } else { Graphsizes[i] <- sum(learnDAG_output$Graphs[,,i]) } } graphics::par(pty = "s") Rgraphviz::plot(as_graphNEL(MPMdag), main = "Median probability DAG") grDevices::devAskNewPage(ask = ask) c = grDevices::gray.colors(20, start = 1, end = 0, gamma = 1, alpha = NULL) print(lattice::levelplot(t(edgeprobs), xlab = "Into", ylab = "From", col.regions = c, main = "Probabilities of edge inclusion")) print(lattice::histogram(Graphsizes, probability = TRUE, col = "grey", ylab = "% on total", main = "Distribution of DAGs size")) grDevices::devAskNewPage(ask = FALSE) }
/scratch/gouwar.j/cran-all/cranData/BCDAG/R/plot.bcdag.R
#' bcdagCE object plot #' #' This method returns summary plots of the output of \code{get_causaleffect()}. #' #' @param x a \code{bcdagCE} object for which a plot is desired #' @param ... additional arguments affecting the summary produced #' @param which_ce specifies the list of nodes for which you intend to generate a boxplot and a histogram #' #' @return Boxplot and histogram of the posterior distribution of the causal effects computed using get_causaleffect(). #' @export #' #' @examples q = 8 #' w = 0.2 #' set.seed(123) #' DAG = rDAG(q = q, w = w) #' outDL = rDAGWishart(n = 1, DAG = DAG, a = q, U = diag(1, q)) #' L = outDL$L; D = outDL$D #' Sigma = solve(t(L))%*%D%*%solve(L) #' n = 200 #' # Generate observations from a Gaussian DAG-model #' X = mvtnorm::rmvnorm(n = n, sigma = Sigma) #' # Run the MCMC (set S = 5000 and burn = 1000 for better results) #' out_mcmc = learn_DAG(S = 500, burn = 100, a = q, U = diag(1,q)/n, data = X, w = w, #' fast = TRUE, save.memory = FALSE, verbose = FALSE) #' out_ce <- get_causaleffect(out_mcmc, targets = c(4,6), response = 1) #' plot(out_ce) plot.bcdagCE <- function(x, ..., which_ce = integer(0)) { getCE_output <- x if (!methods::is(getCE_output, "bcdagCE")) { stop("learnDAG_output must be an object of class bcdagCE") } type = attributes(getCE_output)$type input = attributes(getCE_output)$input targets <- as.numeric(input[base::grep("targets", names(input))]) ntargets <- length(targets) if (length(which_ce) == 0) { for (j in 1:ntargets) { bw <- lattice::bwplot(getCE_output$causaleffects[,j], xlab = paste0("Causal effect of ", targets[j])) hg <- lattice::histogram(getCE_output$causaleffects[,j], xlab = paste0("Causal effect of ", targets[j]), ylab = "Frequency") print(bw, split = c(1,1,2,1), more = T) print(hg, split = c(2,1,2,1), more = F) } } else { for (j in which_ce) { bw <- lattice::bwplot(getCE_output$causaleffects[,j], xlab = paste0("Causal effect of ", targets[j])) hg <- lattice::histogram(getCE_output$causaleffects[,j], xlab = paste0("Causal effect of ", targets[j]), ylab = "Frequency") print(bw, split = c(1,1,2,1), more = T) print(hg, split = c(2,1,2,1), more = F) } } }
/scratch/gouwar.j/cran-all/cranData/BCDAG/R/plot.bcdagCE.R
#' bcdag object print #' #' This method returns a summary of the input given to \code{learn_DAG()} to produce the \code{bcdag} object. #' #' @param x a \code{bcdag} object for which a summary is desired #' @param ... additional arguments affecting the summary produced #' #' @return A printed message listing the inputs given to learn_DAG. #' @export #' #' @examples n <- 1000 #' q <- 4 #' DAG <- matrix(c(0,1,1,0,0,0,0,0,0,0,0,0,0,1,1,0), nrow = q) #' #' L <- DAG #' L[L != 0] <- runif(q, 0.2, 1) #' diag(L) <- c(1,1,1,1) #' D <- diag(1, q) #' Sigma <- t(solve(L))%*%D%*%solve(L) #' #' a <- 6 #' g <- 1/1000 #' U <- g*diag(1,q) #' w = 0.2 #' #' set.seed(1) #' X <- mvtnorm::rmvnorm(n, sigma = Sigma) #' #' out <- learn_DAG(1000, 0, X, a, U, w, fast = TRUE, collapse = TRUE, save.memory = FALSE) #' print(out) print.bcdag <- function(x, ...) { learnDAG_output <- x if (!methods::is(x,"bcdag")) { stop("learnDAG_output must be an object of class bcdag") } type = attributes(learnDAG_output)$type input = attributes(learnDAG_output)$input cat("A ", type, " bcdag object containing", input$S, "draws from", ifelse(type == "collapsed" | type == "compressed and collapsed", "the posterior distribution of DAGs.", "the joint posterior over DAGs, L and D."), "(Burnin =", input$burn, ").", ifelse(type == "compressed" | type == "compressed and collapsed", "\n\nThe output is saved as strings (option save.memory = TRUE)", " ")) cat("\n\nPrior hyperparameters: ", "\nw = ", input$w, "\na = ", input$a, "\nU =\n") print(input$U) }
/scratch/gouwar.j/cran-all/cranData/BCDAG/R/print.bcdag.R
#' bcdagCE object print #' #' This method returns a summary of the inputs given to \code{learn_DAG()} and \code{get_causaleffect()} to obtain the \code{bcdagCE} object. #' #' @param x a \code{bcdagCE} object for which a summary is desired #' @param ... additional arguments affecting the summary produced #' #' @return A printed message listing the inputs given to learn_DAG and get_causaleffect. #' @export #' #' @examples q = 8 #' w = 0.2 #' set.seed(123) #' DAG = rDAG(q = q, w = w) #' outDL = rDAGWishart(n = 1, DAG = DAG, a = q, U = diag(1, q)) #' L = outDL$L; D = outDL$D #' Sigma = solve(t(L))%*%D%*%solve(L) #' n = 200 #' # Generate observations from a Gaussian DAG-model #' X = mvtnorm::rmvnorm(n = n, sigma = Sigma) #' # Run the MCMC (set S = 5000 and burn = 1000 for better results) #' out_mcmc = learn_DAG(S = 500, burn = 100, a = q, U = diag(1,q)/n, data = X, w = w, #' fast = TRUE, save.memory = FALSE, verbose = FALSE) #' out_ce <- get_causaleffect(out_mcmc, targets = c(4,6), response = 1) #' print(out_ce) print.bcdagCE <- function(x, ...) { getCE_output <- x if (!methods::is(x,"bcdagCE")) { stop("learnDAG_output must be an object of class bcdagCE") } type = attributes(getCE_output)$type input = attributes(getCE_output)$input targets <- as.numeric(input[base::grep("targets", names(input))]) cat("A ", type, " bcdagCE object containing", input$S, "draws from the posterior distribution of causal effects of variables ", paste(targets, collapse = ", "), "on ", input$response) cat("\n\nPrior hyperparameters: ", "\nw = ", input$w, "\na = ", input$a, "\nU =\n") print(input$U) }
/scratch/gouwar.j/cran-all/cranData/BCDAG/R/print.bcdagCE.R
#' MCMC proposal distribution (internal function) #' #' This function implements a proposal distribution for the MCMC scheme of \code{learn_DAG}. #' Given an input \code{DAG}, it first builds the set of all DAGs which can be obtained by applying a local move #' (insertion, deletion or reversal of one edge) to \code{DAG}, #' that is the set of direct successors of \code{DAG}; #' next, it randomly draws one candidate (proposed) DAG from the so-obtained set. #' Finally, the set of direct successors of the proposed DAG is constructed. #' The function returns: the proposed DAG, the type of operator applied to \code{DAG} to obtain the proposed DAG #' (with value 1 if insertion, 2 if deletion, 3 if reversal), #' the nodes involved in the local move, the number of direct successors of \code{DAG} and of the proposed DAG. #' If \code{fast = TRUE} the two numbers of direct successors are approximated by the number of possible operators that can be applied to the DAGs #' (equal for the two graphs) #' #' @param DAG Adjacency matrix of the current DAG #' @param fast boolean, if \code{TRUE} an approximate proposal is implemented #' @noRd #' @return A list containing the \eqn{(q,q)} adjacency matrix of the proposed DAG, the type of applied operator (with values in \eqn{{1,2,3}}), the numerical labels of the nodes involved in the move, the integer number of direct successors of \code{DAG} and of the proposed DAG propose_DAG <- function(DAG, fast) { A <- DAG q <- ncol(A) A_na <- A diag(A_na) <- NA # Define the set of possible operations # The cardinality of O will change depending on how many edges are present in the DAG id_set = c() dd_set = c() rd_set = c() ## set of nodes for id set_id = which(A_na == 0, TRUE) if(length(set_id) != 0){ id_set = cbind(1, set_id) } ## set of nodes for dd set_dd = which(A_na == 1, TRUE) if(length(set_dd != 0)){ dd_set = cbind(2, set_dd) } ## set of nodes for rd set_rd = which(A_na == 1, TRUE) if(length(set_rd != 0)){ rd_set = cbind(3, set_rd) } O = rbind(id_set, dd_set, rd_set) # Sample one random operator and verify it produces a DAG if (fast == FALSE) { proposed.opcardvec <- vector(length = nrow(O)) for (i in 1:nrow(O)) { proposed.opcardvec[i] <- gRbase::is.DAG(operation(O[i,1], DAG, O[i,2:3])) } proposed.opcard <- sum(proposed.opcardvec) i <- sample(which(proposed.opcardvec), 1) A_next <- operation(O[i,1], A, O[i,2:3]) current.opcard <- get_opcard(A_next) } else { repeat { i <- sample(nrow(O), 1) A_next <- operation(O[i,1], A, O[i,2:3]) verify <- gRbase::is.DAG(A_next) if (verify == TRUE) { break } } proposed.opcard <- nrow(O) current.opcard <- nrow(O) } op.type <- O[i,1] if (op.type == 3) { op.node <- O[i,-1] } else { op.node <- O[i,3] } return(list(proposedDAG = A_next, op.type = op.type, op.node = op.node, current.opcard = current.opcard, proposed.opcard = proposed.opcard)) }
/scratch/gouwar.j/cran-all/cranData/BCDAG/R/propose_DAG.R
#' Generate a Directed Acyclic Graph (DAG) randomly #' #' This function randomly generates a Directed Acyclic Graph (DAG) with \code{q} nodes and probability of edge inclusion \code{w}. #' #' #' The \eqn{0-1} adjacency matrix of the DAG is generated by drawing each element in the lower triangular part in \eqn{{0,1}} with probability \eqn{{1-w, w}}. #' Accordingly, the DAG has lower-triangular adjacency matrix and nodes are numerically labeled according to a topological ordering implying \eqn{u > v} whenever \eqn{u -> v}. #' #' @param q number of nodes #' @param w probability of edge inclusion in \eqn{[0,1]} #' #' @return DAG \eqn{(q,q)} adjacency matrix of the generated DAG #' @export #' #' @examples # Randomly generate a DAG on q = 8 nodes with probability of edge inclusion w = 0.2 #' q = 8 #' w = 0.2 #' set.seed(123) #' rDAG(q = q, w = w) #' rDAG = function(q, w){ DAG = matrix(0, q, q); colnames(DAG) = rownames(DAG) = 1:q DAG[lower.tri(DAG)] = stats::rbinom(n = q*(q-1)/2, size = 1, prob = w) return(DAG) }
/scratch/gouwar.j/cran-all/cranData/BCDAG/R/rDAG.R
#' Random samples from a compatible DAG-Wishart distribution #' #' #' This function implements a direct sampling from a compatible DAG-Wishart distribution with parameters \code{a} and \code{U}. #' #' Assume the joint distribution of random variables \eqn{X_1, \dots, X_q} is zero-mean Gaussian with covariance matrix Markov w.r.t. a Directed Acyclic Graph (DAG). #' The allied Structural Equation Model (SEM) representation of a Gaussian DAG-model allows to express the covariance matrix as a function of the (Cholesky) parameters \eqn{(D,L)}, #' collecting the regression coefficients and conditional variances of the SEM. #' #' The DAG-Wishart distribution (Cao et. al, 2019) with shape hyperparameter \eqn{a = (a_1, ..., a_q)} and position hyperparameter \eqn{U} (a s.p.d. \eqn{(q,q)} matrix) provides a conjugate prior for parameters \eqn{(D,L)}. #' In addition, to guarantee compatibility among Markov equivalent DAGs (same marginal likelihood), the default choice (here implemented) \eqn{a_j = a + |pa(j)| - q + 1} \eqn{(a > q - 1)}, with \eqn{|pa(j)|} the number of parents of node \eqn{j} in the DAG, #' was introduced by Peluso and Consonni (2020). #' #' #' @param n number of samples #' @param DAG \eqn{(q, q)} adjacency matrix of the DAG #' @param a common shape hyperparameter of the compatible DAG-Wishart, \eqn{a > q - 1} #' @param U position hyperparameter of the compatible DAG-Wishart, a \eqn{(q, q)} s.p.d. matrix #' #' @return A list of two elements: a \eqn{(q,q,n)} array collecting \eqn{n} sampled matrices \eqn{L} and a \eqn{(q,q,n)} array collecting \eqn{n} sampled matrices \eqn{D} #' @export #' #' @author Federico Castelletti and Alessandro Mascaro #' #' #' @references F. Castelletti and A. Mascaro (2021). Structural learning and estimation of joint causal effects among network-dependent variables. \emph{Statistical Methods and Applications}, Advance publication. #' @references X. Cao, K. Khare and M. Ghosh (2019). Posterior graph selection and estimation consistency for high-dimensional Bayesian DAG models. \emph{The Annals of Statistics} 47 319-348. #' @references S. Peluso and G. Consonni (2020). Compatible priors for model selection of high-dimensional Gaussian DAGs. \emph{Electronic Journal of Statistics} 14(2) 4110 - 4132. #' #' @examples # Randomly generate a DAG on q = 8 nodes with probability of edge inclusion w = 0.2 #' q = 8 #' w = 0.2 #' set.seed(123) #' DAG = rDAG(q = q, w = w) #' # Draw from a compatible DAG-Wishart distribution with parameters a = q and U = diag(1,q) #' outDL = rDAGWishart(n = 5, DAG = DAG, a = q, U = diag(1, q)) #' outDL rDAGWishart <- function(n, DAG, a, U) { q <- ncol(DAG) ajs <- sapply(1:q, function(j) a+sum(DAG[,j]==1)-q+1) L.array <- array(0, dim = c(q,q,n)) D.array <- array(0, dim = c(q,q,n)) for (i in 1:n) { params <- lapply(1:q, function(j) rnodeDAGWishart(j, DAG, ajs[j], U)) sigmas <- sapply(1:q, function(x) params[[x]]$sigmaj) L <- lapply(1:q, function(x) params[[x]]$Lj) D.array[,,i] <- diag(sigmas) for (j in 1:q) { whc <- which(DAG[,j] == 1) L.array[whc,j,i] <- as.numeric(L[[j]]) } diag(L.array[,,i]) <- 1 } if (n == 1) { D.array <- D.array[,,1] L.array <- L.array[,,1] } return(list(D = D.array, L = L.array)) }
/scratch/gouwar.j/cran-all/cranData/BCDAG/R/rDAGWishart.R
#' Draw one observation from a Normal-Inverse-Gamma distribution (internal function) #' #' This function performs one draw from the Multivariate-Normal-Inverse-Gamma (prior/posterior) distribution of the parameters of a Normal linear regression model. #' Response variable is \code{node} and covariates are given by the parents of \code{node} in \code{DAG}. #' It is implemented node-by-node in \code{rDAGWishart} to obtain draws #' from a compatible (prior/posterior) DAG-Wishart distribution. #' #' @param node numerical label of the node in \code{DAG} #' @param DAG \eqn{(q,q)} adjacency matrix of the DAG #' @param aj common shape hyperparameter of the compatible DAG-Wishart, \eqn{a > q - 1} #' @param U position hyperparameter of the compatible DAG-Wishart, a \eqn{(q, q)} s.p.d. matrix #' #' @return A sample of size one from the local distribution of \code{node} in a compatible DAG-Wishart distribution. #' #' @noRd #' @keywords internal #' #' @return A list with two elements; a vector with one draw for the (vector) regression coefficient and a scalar with one draw for the conditional variance rnodeDAGWishart <- function(node, DAG, aj, U) { q <- ncol(data) n <- nrow(data) j <- node pa <- pa(j, DAG) out <- list(sigmaj = 0, Lj = 0) if (length(pa) == 0) { U_jj <- U[j,j] out$sigmaj <- stats::rgamma(1, shape = aj/2, rate = U_jj/2)^-1 } else { U_paj.j <- U[pa,j] invU_papa <- chol2inv(chol(U[pa,pa])) U_jj <- U[j,j] - t(U_paj.j)%*%invU_papa%*%U_paj.j out$sigmaj <- stats::rgamma(1, shape = aj/2, rate = U_jj/2)^-1 out$Lj <- mvtnorm::rmvnorm(1, -invU_papa%*%U_paj.j, out$sigmaj*invU_papa) } return(out) }
/scratch/gouwar.j/cran-all/cranData/BCDAG/R/rnodeDAGWishart.R
#' bcdag object summaries #' #' This method produces summaries of the input and output of the function \code{learn_DAG()}. #' #' @param object a \code{bcdag} object for which a summary is desired #' @param ... additional arguments affecting the summary produced #' #' @return A printed message listing the inputs given to learn_DAG and the estimated posterior probabilities of edge inclusion. #' @export #' #' @examples n <- 1000 #' q <- 4 #' DAG <- matrix(c(0,1,1,0,0,0,0,0,0,0,0,0,0,1,1,0), nrow = q) #' #' L <- DAG #' L[L != 0] <- runif(q, 0.2, 1) #' diag(L) <- c(1,1,1,1) #' D <- diag(1, q) #' Sigma <- t(solve(L))%*%D%*%solve(L) #' #' a <- 6 #' g <- 1/1000 #' U <- g*diag(1,q) #' w = 0.2 #' #' set.seed(1) #' X <- mvtnorm::rmvnorm(n, sigma = Sigma) #' #' out <- learn_DAG(1000, 0, X, a, U, w, fast = TRUE, collapse = TRUE, save.memory = FALSE) #' summary(out) summary.bcdag <- function(object, ...) { learnDAG_output <- object if (!methods::is(learnDAG_output,"bcdag")) { stop("learnDAG_output must be an object of class bcdag") } oldpar <- graphics::par(no.readonly = TRUE) on.exit(graphics::par(oldpar)) type = attributes(learnDAG_output)$type input = attributes(learnDAG_output)$input hyperparams = list(w = input$w, a = input$a, U = input$U) cat("A ", type, " bcdag object containing ", input$S, " draws from", ifelse(type == "collapsed" | type == "compressed and collapsed", " the posterior distribution of DAGs.", "the joint posterior over DAGs, L and D."), "(Burnin =", input$burn, ").", ifelse(type == "compressed" | type == "compressed and collapsed", "\n\nThe output is saved as strings (option save.memory = TRUE)", " ")) cat("\n\nPrior hyperparameters: ", "\nw = ", input$w, "\na = ", input$a, "\nU =\n") print(input$U) edgeprobs <- unname(get_edgeprobs(learnDAG_output)) cat("\nPosterior probabilities of edge inclusion: \n") print(round(edgeprobs, 3)) invisible(list(type = type, S = input$S, burn = input$burn, hyperparams = hyperparams, edgeprobs = edgeprobs)) # edgeprobs <- get_edgeprobs(learnDAG_output) # MPMdag <- get_MPMdag(learnDAG_output) # Graphsizes <- vector("double", input$S) # for (i in 1:input$S) { # if (type == "compressed" | type == "compressed and collapsed") { # Graphsizes[i] <- sum(bd_decode(learnDAG_output$Graphs[i])) # } else { # Graphsizes[i] <- sum(learnDAG_output$Graphs[,,i]) # } # } # graphics::par(pty = "s") # Rgraphviz::plot(as_graphNEL(MPMdag), main = "Median probability DAG") # grDevices::devAskNewPage(ask = TRUE) # c = grDevices::gray.colors(20, start = 1, end = 0, gamma = 1, alpha = NULL) # print(lattice::levelplot(edgeprobs, xlab = "From", ylab = "Into", col.regions = c, main = "Probabilities of edge inclusion")) # print(lattice::histogram(Graphsizes, probability = TRUE, col = "grey", ylab = "% on total", main = "Distribution of DAGs size")) # grDevices::devAskNewPage(ask = FALSE) }
/scratch/gouwar.j/cran-all/cranData/BCDAG/R/summary.bcdag.R
#' bcdagCE object summary #' #' This method produces summaries of the input and output of the function \code{get_causaleffect()}. #' #' @param object a \code{bcdagCE} object for which a summary is desired #' @param ... additional arguments affecting the summary produced #' #' @return A printed message listing the inputs given to learn_DAG() and get_causaleffect() and summary statistics of the posterior distribution. #' @export #' #' @examples q = 8 #' w = 0.2 #' set.seed(123) #' DAG = rDAG(q = q, w = w) #' outDL = rDAGWishart(n = 1, DAG = DAG, a = q, U = diag(1, q)) #' L = outDL$L; D = outDL$D #' Sigma = solve(t(L))%*%D%*%solve(L) #' n = 200 #' # Generate observations from a Gaussian DAG-model #' X = mvtnorm::rmvnorm(n = n, sigma = Sigma) #' # Run the MCMC (set S = 5000 and burn = 1000 for better results) #' out_mcmc = learn_DAG(S = 500, burn = 100, a = q, U = diag(1,q)/n, data = X, w = w, #' fast = TRUE, save.memory = FALSE, verbose = FALSE) #' out_ce <- get_causaleffect(out_mcmc, targets = c(4,6), response = 1) #' # summary(out_ce) summary.bcdagCE <- function(object, ...) { getCE_output <- object if (!methods::is(object,"bcdagCE")) { stop("learnDAG_output must be an object of class bcdagCE") } type = attributes(getCE_output)$type input = attributes(getCE_output)$input hyperparams <- list(w = input$w, a = input$a, U = input$U) targets <- as.numeric(input[base::grep("targets", names(input))]) cat("A ", type, " bcdagCE object containing", input$S, "draws from the posterior distribution of causal effects of variables ", paste(targets, collapse = ", "), "on ", input$response) cat("\n\nPrior hyperparameters: ", "\nw = ", input$w, "\na = ", input$a, "\nU =\n") print(input$U) cat("\nPosterior means of causal effects: \n") print(getCE_output$post_mean) cat("\nPosterior quantiles of causal effects: \n") print(t(getCE_output$post_ci)) cat("\nPosterior probability of causal effects being greater, equal or smaller than 0: \n") print(getCE_output$Probs) invisible(list(type = type, S = input$S, targets = targets, response = input$response, hyperparams = hyperparams, post_mean = getCE_output$postmean, post_ci = getCE_output$post_ci, probs = getCE_output$Probs)) }
/scratch/gouwar.j/cran-all/cranData/BCDAG/R/summary.bcdagCE.R
## ----include = FALSE---------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) oldpar <- par(no.readonly = TRUE) oldoptions <- options() ## ----setup-------------------------------------------------------------------- library(BCDAG) ## ----------------------------------------------------------------------------- set.seed(1) q <- 10 w <- 0.2 DAG <- rDAG(q,w) ## ----printDAG----------------------------------------------------------------- DAG ## ----------------------------------------------------------------------------- a <- q U <- diag(1,q) outDL <- rDAGWishart(n=1, DAG, a, U) class(outDL) ## ----------------------------------------------------------------------------- L <- outDL$L; D <- outDL$D class(L); class(D) ## ----------------------------------------------------------------------------- # Precision matrix Omega <- L %*% solve(D) %*% t(L) # Covariance matrix Sigma <- solve(Omega) ## ----------------------------------------------------------------------------- n <- 1000 X <- mvtnorm::rmvnorm(n = n, sigma = Sigma) ## ----include = FALSE---------------------------------------------------------- par(oldpar) options(oldoptions)
/scratch/gouwar.j/cran-all/cranData/BCDAG/inst/doc/bcdag_generatedata.R
--- title: "Random data generation from Gaussian DAG models" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Random data generation from Gaussian DAG models} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) oldpar <- par(no.readonly = TRUE) oldoptions <- options() ``` ```{css, echo = FALSE} .math.inline { font-size: 11px; } ``` ```{r setup} library(BCDAG) ``` This is the first of a series of three vignettes introducing the R package `BCDAG`. In this vignette we focus on functions `rDAG()` and `rDAGWishart()` which implement random generation of DAG structures and DAG parameters under the assumption that the joint distribution of variables $X_1,\dots, X_q$ is Gaussian and the corresponding model (Choleski) parameters follow a DAG-Wishart distribution. Finally, data generation from Gaussian DAG models is described. ## Generating DAGs and parameters: functions `rDAG()` and `rDAGWishart()` Function `rDAG()` can be used to randomly generate a DAG structure $\mathcal{D}=(V,E)$, where $V=\{1,\dots,q\}$ and $E\subseteq V \times V$ is the set of edges. `rDAG()` has two arguments: the number of nodes (variables) $q$ and the prior probability of edge inclusion $w\in[0,1]$; the latter can be tuned to control the degree of sparsity in the resulting DAG. By fixing a probability of edge inclusion $w=0.2$, a DAG structure with $q=10$ nodes can be generated as follows: ```{r} set.seed(1) q <- 10 w <- 0.2 DAG <- rDAG(q,w) ``` ```{r printDAG} DAG ``` Output of `rDAG()` is the 0-1 $(q,q)$ adjacency matrix of the generated DAG, with element $1$ at position $(u,v)$ indicating the presence of an edge $u\rightarrow v$. Notice that the generated DAG is *topologically ordered*, meaning that edges are allowed only from high to low nodes (nodes are labeled according to rows/columns indexes); accordingly the DAG adjacency matrix is lower-triangular. ## Generating Gaussian DAG parameters Consider a Gaussian DAG model of the form \begin{eqnarray} X_1, \dots, X_q \,|\,\boldsymbol L, \boldsymbol D, \mathcal{D} &\sim& \mathcal{N}_q\left(\boldsymbol 0, (\boldsymbol{L}\boldsymbol{D}^{-1}\boldsymbol{L}^\top)^{-1}\right), \end{eqnarray} where $(\boldsymbol L, \boldsymbol D)$ are model parameters providing the decomposition of the precision (inverse-covariance) matrix $\boldsymbol{\Omega} = \boldsymbol{L}\boldsymbol{D}^{-1}\boldsymbol{L}^\top$; specifically, $\boldsymbol{L}$ is a $(q, q)$ matrix of coefficients such that for each $(u, v)$-element $\boldsymbol{L}_{uv}$ with $u \ne v$, we have $\boldsymbol{L}_{uv} \ne 0$ if and only if $(u, v) \in E$, while $\boldsymbol{L}_{uu} = 1$ for each $u = 1,\dots, q$; also, $\boldsymbol{D}$ is a $(q, q)$ diagonal matrix with $(u, u)$-element $\boldsymbol{D}_{uu}$. The latter decomposition follows from the equivalent Structural Equation Model (SEM) representation of a Gaussian DAG model: \begin{equation} \boldsymbol{L}^\top\boldsymbol{x} = \boldsymbol \epsilon, \quad \boldsymbol \epsilon \sim \mathcal{N}_q(\boldsymbol 0, \boldsymbol D), \end{equation} where $\boldsymbol x = (X_1,\dots, X_q)^\top$; see also Castelletti \& Mascaro (2021). Function `rDAGWishart` implements random sampling from $(\boldsymbol L, \boldsymbol D)\,|\,\mathcal{D} \sim \text{DAG-Wishart}(\boldsymbol{a}_{c}^{\mathcal{D}}, \boldsymbol U)$, where $\boldsymbol{U}$ is the rate parameter (a $(q,q)$ s.p.d. matrix) and $\boldsymbol{a}^{\mathcal {D}}_{c}$ (a $(q,1)$ vector) is the shape parameter of the DAG-Wishart distribution. This class of distributions was introduced by Ben David et al. (2015) as a conjugate prior for Gaussian DAG model-parameters. In its compatible version (Peluso \& Consonni, 2020), elements of the vector parameter $\boldsymbol{a}^{\mathcal {D}}_{c}$ are uniquely determined from a single *common* shape parameter $a>q-1$. Inputs of `rDAGWishart` are: the number of samples $n$, the underlying DAG $\mathcal{D}$, the common shape parameter $a$ and the rate parameter $\boldsymbol U$. Given the DAG $\mathcal{D}$ generated before, the following example implements a single ($n=1$) draw from a compatible DAG-Wishart distribution with parameters $a=q$, $\boldsymbol U = \boldsymbol I_q$: ```{r} a <- q U <- diag(1,q) outDL <- rDAGWishart(n=1, DAG, a, U) class(outDL) ``` ```{r} L <- outDL$L; D <- outDL$D class(L); class(D) ``` The output of `rDAGWishart()` consists of two elements: a $(q,q,n)$-dimensional array collecting the $n$ sampled matrices $\boldsymbol L^{(1)}, \dots, \boldsymbol L^{(n)}$ and a $(q,q,n)$-dimensional array collecting the $n$ sampled matrices $\boldsymbol D^{(1)}, \dots,\boldsymbol D^{(n)}$. We refer the reader to Castelletti \& Mascaro (2021) and Castelletti \& Mascaro (2022+) for more details. ## Generating data from a Gaussian DAG model Data generation from a Gaussian DAG model is then straightforward. Recall that $\boldsymbol{\Omega} = \boldsymbol{L}\boldsymbol{D}^{-1}\boldsymbol{L}^\top$, where $\boldsymbol{\Omega}$ is the inverse-covariance (precision) matrix of a multivariate Gaussian model satisfying the constraints imposed by a DAG. Accordingly, we can recover the precision and covariance matrices as: ```{r} # Precision matrix Omega <- L %*% solve(D) %*% t(L) # Covariance matrix Sigma <- solve(Omega) ``` Next, i.i.d. draws from a Gaussian DAG model can be obtained through the function `rmvnorm()` provided within the R package `mvtnorm`: ```{r} n <- 1000 X <- mvtnorm::rmvnorm(n = n, sigma = Sigma) ``` ## References * Ben-David E, Li T, Massam H, Rajaratnam B (2015). “High dimensional Bayesian inference for Gaussian directed acyclic graph models.” *arXiv pre-print*. * Cao X, Khare K, Ghosh M (2019). “Posterior graph selection and estimation consistency for high-dimensional Bayesian DAG models.” *The Annals of Statistics*, 47(1), 319–348. * Castelletti F, Mascaro A (2021). “Structural learning and estimation of joint causal effects among network-dependent variables.” *Statistical Methods & Applications*, 30, 1289–1314. * Castelletti F, Mascaro A (2022). “BCDAG: An R package for Bayesian structural and Causal learning of Gaussian DAGs.” *arXiv pre-print*. * Peluso S, Consonni G (2020). “Compatible priors for model selection of high-dimensional Gaussian DAGs.” *Electronic Journal of Statistics*, 14(2), 4110–4132. ```{r, include = FALSE} par(oldpar) options(oldoptions) ```
/scratch/gouwar.j/cran-all/cranData/BCDAG/inst/doc/bcdag_generatedata.Rmd
## ----include = FALSE---------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) oldpar <- par(no.readonly = TRUE) oldoptions <- options() ## ----setup-------------------------------------------------------------------- library(BCDAG) ## ----------------------------------------------------------------------------- ## Generate data set.seed(1) q <- 8 w <- 0.2 DAG <- rDAG(q,w) a <- q U <- diag(1,q) outDL <- rDAGWishart(n=1, DAG, a, U) L <- outDL$L; D <- outDL$D Omega <- L %*% solve(D) %*% t(L) Sigma <- solve(Omega) n <- 1000 X <- mvtnorm::rmvnorm(n = n, sigma = Sigma) ## ----echo = FALSE, include=FALSE---------------------------------------------- out <- learn_DAG(S = 5000, burn = 1000, data = X, a, U, w, fast = FALSE, save.memory = FALSE, collapse = FALSE) ## ----eval = FALSE------------------------------------------------------------- # ## Run MCMC # out <- learn_DAG(S = 5000, burn = 1000, data = X, # a, U, w, # fast = FALSE, save.memory = FALSE, collapse = FALSE) ## ----fig.width = 7, fig.height= 6--------------------------------------------- get_diagnostics(out, ask = FALSE) ## ----------------------------------------------------------------------------- print(out) summary(out) plot(out) ## ----------------------------------------------------------------------------- get_edgeprobs(out) ## ----------------------------------------------------------------------------- MPMdag <- get_MPMdag(out) MPMdag ## ----------------------------------------------------------------------------- MAPdag <- get_MAPdag(out) MAPdag ## ----fig.width = 7------------------------------------------------------------ par(mfrow = c(1,3)) Rgraphviz::plot(as_graphNEL(DAG), main = "True DAG") Rgraphviz::plot(as_graphNEL(MPMdag), main = "MPM DAG") Rgraphviz::plot(as_graphNEL(MAPdag), main = "MAP DAG") ## ----------------------------------------------------------------------------- round(L, 3) ## ----------------------------------------------------------------------------- Rgraphviz::plot(as_graphNEL(DAG), main = "True DAG") ## ----------------------------------------------------------------------------- causaleffect(targets = c(4,5), response = 1, L = L, D = D) causaleffect(targets = 4, response = 1, L = L, D = D) causaleffect(targets = 5, response = 1, L = L, D = D) ## ----------------------------------------------------------------------------- DAG2 <- DAG DAG2[4,5] <- 1 par(mfrow = c(1,2)) Rgraphviz::plot(as_graphNEL(DAG), main = "True DAG") Rgraphviz::plot(as_graphNEL(DAG2), main = "Modified DAG") ## ----include = FALSE---------------------------------------------------------- par(mfrow = c(1,1)) ## ----------------------------------------------------------------------------- L2 <- L L2[4,5] <- runif(1) L2[4,5] ## ----------------------------------------------------------------------------- causaleffect(targets = c(4,5), response = 1, L = L2, D = D) causaleffect(targets = 4, response = 1, L = L2, D = D) causaleffect(targets = 5, response = 1, L = L2, D = D) ## ----------------------------------------------------------------------------- effects_out <- get_causaleffect(out, targets = c(4,5), response = 1) head(effects_out$causaleffects) ## ----------------------------------------------------------------------------- print(effects_out) summary(effects_out) plot(effects_out) ## ----echo = FALSE, include=FALSE---------------------------------------------- coll_out <- learn_DAG(S = 5000, burn = 1000, data = X, a, U, w, fast = FALSE, save.memory = FALSE, collapse = TRUE) ## ----eval=FALSE--------------------------------------------------------------- # coll_out <- learn_DAG(S = 5000, burn = 1000, data = X, # a, U, w, # fast = FALSE, save.memory = FALSE, collapse = TRUE) ## ----------------------------------------------------------------------------- names(coll_out) ## ----include = FALSE---------------------------------------------------------- effects_collout <- get_causaleffect(coll_out, targets = c(4,5), response = 1) ## ----eval = FALSE------------------------------------------------------------- # effects_collout <- get_causaleffect(coll_out, targets = c(4,5), response = 1) ## ----------------------------------------------------------------------------- round(effects_collout$post_mean, 3) ## ----include = FALSE---------------------------------------------------------- par(oldpar) options(oldoptions)
/scratch/gouwar.j/cran-all/cranData/BCDAG/inst/doc/bcdag_getfamily.R
--- title: "Elaborate on the output of `learn_DAG()` using get_ functions" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Elaborate on the output of `learn_DAG()` using get_ functions} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) oldpar <- par(no.readonly = TRUE) oldoptions <- options() ``` ```{css, echo = FALSE} .math.inline { font-size: 11px; } ``` ```{r setup} library(BCDAG) ``` This is the third of a series of three vignettes for the R package `BCDAG`. In this vignette, we show how to use the output of `learn_DAG()` for posterior inference on DAGs, DAG parameters, and causal effect estimation. Specifically, we introduce the functions of the `get_` family. Remember that the output of `learn_DAG()` consists of an MCMC sample from the marginal posterior distribution of DAG structures (`collapse = TRUE`) and the joint posterior of DAGs and DAG parameters (`collapse = FALSE`); see also [the corresponding vignette](#) To start with, we simulate a dataset `X` from a randomly generated Gaussian DAG model [as shown in an other vignette](#) ```{r} ## Generate data set.seed(1) q <- 8 w <- 0.2 DAG <- rDAG(q,w) a <- q U <- diag(1,q) outDL <- rDAGWishart(n=1, DAG, a, U) L <- outDL$L; D <- outDL$D Omega <- L %*% solve(D) %*% t(L) Sigma <- solve(Omega) n <- 1000 X <- mvtnorm::rmvnorm(n = n, sigma = Sigma) ``` Next, we use `learn_DAG()` to approximate the joint posterior distribution over DAG structures and DAG parameters: ```{r echo = FALSE, include=FALSE} out <- learn_DAG(S = 5000, burn = 1000, data = X, a, U, w, fast = FALSE, save.memory = FALSE, collapse = FALSE) ``` ```{r eval = FALSE} ## Run MCMC out <- learn_DAG(S = 5000, burn = 1000, data = X, a, U, w, fast = FALSE, save.memory = FALSE, collapse = FALSE) ``` ## MCMC diagnostics of convergence: function `get_diagnostics()` Before using the MCMC output for posterior inference, it is common practice to perform some convergence checks. Function `get_diagnostics()` provides graphical diagnostics of convergence for the MCMC output of `learn_DAG()`. These are based on: the number of edges in the DAGs; the posterior probability of edge inclusion for each possible edge $u \rightarrow v$, both monitored across MCMC iterations. Input of the function is an object of class `bcdag` and the output consists of: - a traceplot and running-mean plot of the number of edges in the DAGs (graph size); - a collection of traceplots of the posterior probabilities of edge inclusion computed across MCMC iterations. For each pair of distinct nodes $(u,v)$, its posterior probability of inclusion at time $s$ $(s = 1,\dots, S)$ is estimated as the proportion of DAGs visited by the MCMC up to time $s$ which contain the directed edge $u \rightarrow v$. Output is organized in $q$ plots (one for each node $v = 1, \dots, q$), each summarizing the posterior probabilities of edges $u \rightarrow v$, $u = 1,\dots, q$. ```{r fig.width = 7, fig.height= 6} get_diagnostics(out, ask = FALSE) ``` ## Posterior inference: DAG structure learning We now show how to perform posterior inference of DAGs from the MCMC output. To summarize the output of `learn_DAG()`, `print()`, `summary()` and `plot()`methods for objects of class `bcdag` are available. When `print()` is applied to a `bcdag` object, a printed message appears. This summarizes the type of `bcdag` object (see details on `bcdag` object types provided in [the previous vignette](#)) and the input arguments of `learn_DAG()` that generated the output. When `summary()` is used, also the posterior probabilities of edge inclusion are reported. Finally, `plot()` returns a graphical representation of the Median Probability DAG, an heatmap with the probabilities of edge inclusion and an histogram of the sizes of graph visited by the Markov chain. ```{r} print(out) summary(out) plot(out) ``` Function `get_edgeprobs()` computes and returns the collection of posterior probabilities of edge inclusion, arranged as a $(q,q)$ matrix, with $(u,v)$-element referring to edge $u\rightarrow v$: ```{r} get_edgeprobs(out) ``` The MPM model returned in the output of `summary()` can be used as a single DAG-model estimate and is obtained by including all edges whose posterior probability exceeds the threshold $0.5$. Function `get_MPMdag()` applies to an object of class `bcdag` and returns the $(q,q)$ adjacency matrix of the MPM: ```{r} MPMdag <- get_MPMdag(out) MPMdag ``` As an alternative, the Maximum A Posterior DAG estimate (MAP) can be considered. This corresponds to the DAG with the highest MCMC frequency of visits and can be recovered through the function `get_MAPdag()`: ```{r} MAPdag <- get_MAPdag(out) MAPdag ``` ```{r fig.width = 7} par(mfrow = c(1,3)) Rgraphviz::plot(as_graphNEL(DAG), main = "True DAG") Rgraphviz::plot(as_graphNEL(MPMdag), main = "MPM DAG") Rgraphviz::plot(as_graphNEL(MAPdag), main = "MAP DAG") ``` In this example, the MPM and MAP estimates differ by a single an edge between nodes $4$ and $7$ which is reversed among the two graphs. However, it can be shown that the two DAG estimates are Markov equivalent, meaning that they encode the same conditional independencies between variables. In a Gaussian setting, Markov equivalent DAGs cannot be distinguished with observational data as they represent the same statistical model. Therefore, there is no difference in choosing the MPM or the MAP estimate to infer the structure of dependencies between variables. In addition, if compared with the true graph, the DAG estimate provided by MPM [CORRETTO?] differs by a single edge between nodes $7$ and $1$ which is missing from MPM. Interestingly, one can see that the regression coefficient associated with $u\rightarrow v$, $\boldsymbol L_{7,1}$, is relatively "small", implying that the strength of the dependence between the two nodes is "weak": ```{r} round(L, 3) ``` ## Posterior inference: causal effect estimation In this last section, we introduce functions `causaleffect()` and `get_causaleffect()`, which allow to compute and estimate causal effects between variables. Specifically, we consider the causal effect on a response variable of interest consequent to a joint intervention on a given set of variables; see also Nandy et al. (2017) and Castelletti \& Mascaro (2021) for formal definitions. For a given DAG, it is possible to identify and estimate the causal effect on a node $Y$ consequent to a hypothetical hard intervention on node $j$ using the rules of the *do-calculus* (Pearl, 2000). A simple implementation of this set of rules and an estimation method for the causally sufficient case and for Gaussian data is provided by function `causaleffect()`. The function takes as input a numerical vector representing the labels of the intervened nodes (also called intervention `target`), a numerical value indicating the `response` variable and the DAG model parameters `L` and `D`; see also Castelletti \& Mascaro (2021) or [our previous vignette](#) for a detailed model description. For a given response variable $Y \in\{1, \ldots, q\}$, and intervention target $I \subseteq \{1,\dots,q\}$ the *total joint effect* of an intervention $\operatorname{do}\left\{X_{j}=\tilde{x}_{j}\right\}_{j \in I}$ on $Y$ is $$ \theta_{Y}^{I}:=\left(\theta_{h, Y}^{I}\right)_{h \in I}, $$ where for each $h \in I$ $$ \theta_{h, Y}^{I}:=\frac{\partial}{\partial x_{h}} \mathbb{E}\left(Y \mid \operatorname{do}\left\{X_{j}=\tilde{x}_{j}\right\}_{j \in I}\right) $$ See also Castelletti \& Mascaro (2021) and Nandy et al. (2017) for more details. To better understand the difference between single and joint interventions, consider as an example the total causal effect on node $Y=1$ (i.e. variable $X_1$) of a joint intervention on nodes $4$ and $5$, $I=\{4,5\}$ (i.e. variables $X_4, X_5$) and the total causal effects of two separate interventions on nodes $4$ and $5$ under the causal model represented by the DAG generated before: ```{r} Rgraphviz::plot(as_graphNEL(DAG), main = "True DAG") ``` These are given by: ```{r} causaleffect(targets = c(4,5), response = 1, L = L, D = D) causaleffect(targets = 4, response = 1, L = L, D = D) causaleffect(targets = 5, response = 1, L = L, D = D) ``` As it can be observed, the total causal effect of intervening on variable $X_4$ is null both in a single intervention on $4$ and in a joint intervention on $\{X_4, X_5\}$, while intervening on $X_5$ produces the same positive total causal effect in both cases. The total causal effects produced are thus exactly the same for both variables in the two cases. However, if we slightly modify the DAG by adding an edge from node $4$ to node $5$, so that: ```{r} DAG2 <- DAG DAG2[4,5] <- 1 par(mfrow = c(1,2)) Rgraphviz::plot(as_graphNEL(DAG), main = "True DAG") Rgraphviz::plot(as_graphNEL(DAG2), main = "Modified DAG") ``` ```{r include = FALSE} par(mfrow = c(1,1)) ``` and modify `L` accordingly: ```{r} L2 <- L L2[4,5] <- runif(1) L2[4,5] ``` The comparison of single and joint total causal effects now produces different results: ```{r} causaleffect(targets = c(4,5), response = 1, L = L2, D = D) causaleffect(targets = 4, response = 1, L = L2, D = D) causaleffect(targets = 5, response = 1, L = L2, D = D) ``` As it can be observed, this time a single intervention on $X_4$ produces a negative causal effect on $X_1$, while jointly intervening on $X_4$ and $X_5$ makes the total causal effect of $X_4$ on $X_1$ null. The effect of $X_4$ on $X_1$ was in fact mediated by $X_5$: intervening simultaneously also on $X_5$ erases the effect of $X_4$ on $X_5$ and, in turn, of $X_4$ on $X_1$. See also Castelletti \& Mascaro (2021) or Nandy et al. (2017) for a more detailed description. The identification and estimation of causal effects requires the specification of a DAG. When the DAG is unknown, function `get_causaleffect()` can be used. It applies to objects of class `bcdag`; the latter corresponds to the output of `learn_DAG()` and consists of a sample of size $S$ from the posterior of DAGs and DAG parameters. In addition `get_causaleffect()` takes as input a numerical vector representing the labels of the intervened nodes (the intervention `target`) and a numerical value indicating the `response` variable. Output of the function is an object of class `bcdagCE`, containing a sample of size $S$ from the posterior distribution of the causal effect coefficients associated with the intervention `targets` and some useful summaries of the posterior distributions such as the mean and the quantiles: ```{r} effects_out <- get_causaleffect(out, targets = c(4,5), response = 1) head(effects_out$causaleffects) ``` `print()`, `summary()` and `plot()` methods are available for objects of class `bcdagCE`. `print()` returns the input used and the prior hyperparameters. `summary()` returns the estimated mean, quantiles of the estimated causal effects, as well as the probabilities of it being greater, lower or equal to zero. `plots()` produces a boxplot and a histogram of the causal effect of each target node. ```{r} print(effects_out) summary(effects_out) plot(effects_out) ``` Also notice that, if the `BCDAG` object input of `get_causaleffect()` is of type `collapsed` or `compressed and collapsed`, then `get_causaleffect()` requires drawing from the posterior distribution of parameters $(\boldsymbol L, \boldsymbol D)$ before estimating the required causal effects: ```{r echo = FALSE, include=FALSE} coll_out <- learn_DAG(S = 5000, burn = 1000, data = X, a, U, w, fast = FALSE, save.memory = FALSE, collapse = TRUE) ``` ```{r eval=FALSE} coll_out <- learn_DAG(S = 5000, burn = 1000, data = X, a, U, w, fast = FALSE, save.memory = FALSE, collapse = TRUE) ``` ```{r} names(coll_out) ``` ```{r include = FALSE} effects_collout <- get_causaleffect(coll_out, targets = c(4,5), response = 1) ``` ```{r eval = FALSE} effects_collout <- get_causaleffect(coll_out, targets = c(4,5), response = 1) ``` ```{r} round(effects_collout$post_mean, 3) ``` ### References * Castelletti, F, Mascaro, A (2021). "Structural learning and estimation of joint causal effects among network-dependent variables". *Statistical Methods & Applications*, 30(5), 1289-1314. * Castelletti F, Mascaro A (2022). “BCDAG: An R package for Bayesian structural and Causal learning of Gaussian DAGs.” *arXiv pre-print*. * Nandy, P, Maathuis, MH., Richardson, TS (2017). "Estimating the effect of joint interventions from observational data in sparse high-dimensional settings". *The Annals of Statistics*, 45(2), 647-674. * Pearl J (2000). *Causality: Models, Reasoning, and Inference*. Cambridge University Press, Cambridge. ISBN 0-521-77362-8. ```{r, include = FALSE} par(oldpar) options(oldoptions) ```
/scratch/gouwar.j/cran-all/cranData/BCDAG/inst/doc/bcdag_getfamily.Rmd
## ----include = FALSE---------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) oldpar <- par(no.readonly = TRUE) oldoptions <- options() ## ----setup-------------------------------------------------------------------- library(BCDAG) ## ----------------------------------------------------------------------------- set.seed(1) q <- 8 w <- 0.2 DAG <- rDAG(q,w) a <- q U <- diag(1,q) outDL <- rDAGWishart(n=1, DAG, a, U) L <- outDL$L; D <- outDL$D Omega <- L %*% solve(D) %*% t(L) Sigma <- solve(Omega) n <- 1000 X <- mvtnorm::rmvnorm(n = n, sigma = Sigma) ## ----echo = FALSE, include=FALSE---------------------------------------------- out <- learn_DAG(S = 5000, burn = 1000, data = X, a, U, w, fast = FALSE, save.memory = FALSE, collapse = FALSE) ## ----eval = FALSE------------------------------------------------------------- # out <- learn_DAG(S = 5000, burn = 1000, data = X, # a, U, w, # fast = FALSE, save.memory = FALSE, collapse = FALSE) ## ----------------------------------------------------------------------------- class(out) ## ----------------------------------------------------------------------------- str(out) ## ----------------------------------------------------------------------------- out$Graphs[,,1] round(out$L[,,1],2) round(out$D[,,1],2) ## ----echo = FALSE, include=FALSE---------------------------------------------- collapsed_out <- learn_DAG(S = 5000, burn = 1000, data = X, a, U, w, fast = FALSE, save.memory = FALSE, collapse = TRUE) ## ----eval = FALSE------------------------------------------------------------- # collapsed_out <- learn_DAG(S = 5000, burn = 1000, data = X, # a, U, w, # fast = FALSE, save.memory = FALSE, collapse = TRUE) ## ----------------------------------------------------------------------------- names(collapsed_out) class(collapsed_out) attributes(collapsed_out)$type collapsed_out$Graphs[,,1] ## ----echo = FALSE, include=FALSE---------------------------------------------- compressed_out <- learn_DAG(S = 5000, burn = 1000, data = X, a, U, w, fast = FALSE, save.memory = TRUE, collapse = FALSE) ## ----eval = FALSE------------------------------------------------------------- # compressed_out <- learn_DAG(S = 5000, burn = 1000, data = X, # a, U, w, # fast = FALSE, save.memory = TRUE, collapse = FALSE) ## ----------------------------------------------------------------------------- names(compressed_out) class(compressed_out) attributes(compressed_out)$type ## ----------------------------------------------------------------------------- compressed_out$Graphs[1] compressed_out$L[1] compressed_out$D[1] ## ----------------------------------------------------------------------------- BCDAG:::bd_decode(compressed_out$Graphs[1]) round(BCDAG:::bd_decode(compressed_out$L[1]),2) round(BCDAG:::bd_decode(compressed_out$D[1]),2) ## ----echo = FALSE, include=FALSE---------------------------------------------- comprcoll_out <- learn_DAG(S = 5000, burn = 1000, data = X, a, U, w, fast = FALSE, save.memory = TRUE, collapse = TRUE) ## ----eval = FALSE------------------------------------------------------------- # comprcoll_out <- learn_DAG(S = 5000, burn = 1000, data = X, # a, U, w, # fast = FALSE, save.memory = TRUE, collapse = TRUE) ## ----------------------------------------------------------------------------- names(comprcoll_out) class(comprcoll_out) attributes(comprcoll_out)$type BCDAG:::bd_decode(comprcoll_out$Graphs[1]) ## ----results='hide'----------------------------------------------------------- # No approximation time_nofast <- system.time(out_nofast <- learn_DAG(S = 5000, burn = 1000, data = X, a, U, w, fast = FALSE, save.memory = FALSE, collapse = FALSE)) # Approximation time_fast <- system.time(out_fast <- learn_DAG(S = 5000, burn = 1000, data = X, a, U, w, fast = TRUE, save.memory = FALSE, collapse = FALSE)) ## ----------------------------------------------------------------------------- time_nofast time_fast ## ----------------------------------------------------------------------------- round(get_edgeprobs(out_nofast), 2) round(get_edgeprobs(out_fast), 2) ## ----include = FALSE---------------------------------------------------------- par(oldpar) options(oldoptions)
/scratch/gouwar.j/cran-all/cranData/BCDAG/inst/doc/bcdag_learnDAG.R
--- title: "MCMC scheme for posterior inference of Gaussian DAG models: the `learn_DAG()` function" output: rmarkdown::html_vignette # html_document: # theme: readable # highlight: textmate vignette: > %\VignetteIndexEntry{MCMC scheme for posterior inference of Gaussian DAG models: the `learn_DAG()` function} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) oldpar <- par(no.readonly = TRUE) oldoptions <- options() ``` ```{css, echo = FALSE} .math.inline { font-size: 11px; } ``` ```{r setup} library(BCDAG) ``` This is the second of a series of three vignettes for the R package `BCDAG`. In this vignette we focus on function `learn_DAG()`, which implements a Markov Chain Monte Carlo (MCMC) algorithm to sample from the joint posterior of DAG structures and DAG-parameters under the Gaussian assumption. ### Model description The underlying Bayesian Gaussian DAG-model can be summarized as follows: \begin{eqnarray} X_1, \dots, X_q \,|\,\boldsymbol L, \boldsymbol D, \mathcal{D} &\sim& \mathcal{N}_q\left(\boldsymbol 0, (\boldsymbol{L}\boldsymbol{D}^{-1}\boldsymbol{L}^\top)^{-1}\right)\\ (\boldsymbol L, \boldsymbol D)\,|\,\mathcal{D} &\sim& \text{DAG-Wishart}(\boldsymbol{a}_{c}^{\mathcal{D}}, \boldsymbol U) \\ p(\mathcal{D}) &\propto& w^{|\mathcal{S}_\mathcal{D}|}(1-w)^{\frac{q(q-1)}{2} - {|\mathcal{S}_\mathcal{D}|}} \end{eqnarray} In particular $\mathcal{D}=(V,E)$ denotes a DAG structure with set of nodes $V=\{1,\dots,q\}$ and set of edges $E\subseteq V \times V$. Moreover, $(\boldsymbol L, \boldsymbol D)$ are model parameters providing the decomposition of the precision (inverse-covariance) matrix $\boldsymbol{\Omega} = \boldsymbol{L}\boldsymbol{D}^{-1}\boldsymbol{L}^\top$; specifically, $\boldsymbol{L}$ is a $(q, q)$ matrix of coefficients such that for each $(u, v)$-element $\boldsymbol{L}_{uv}$ with $u \ne v$, $\boldsymbol{L}_{uv} \ne 0$ if and only if $(u, v) \in E$, while $\boldsymbol{L}_{uu} = 1$ for each $u = 1,\dots, q$; also, $\boldsymbol{D}$ is a $(q, q)$ diagonal matrix with $(u, u)$-element $\boldsymbol{D}_{uu}$. The latter decomposition follows from the equivalent Structural Equation Model (SEM) representation of a Gaussian DAG-model; see also Castelletti \& Mascaro (2021). Conditionally to $\mathcal{D}$, a prior to $(\boldsymbol{L}, \boldsymbol{D})$ is assigned through a *compatible* DAG-Wishart distribution with rate hyperparameter $\boldsymbol{U}$, a $(q,q)$ s.p.d. matrix, and shape hyperparameter $\boldsymbol{a}^{\mathcal {D}}_{c}$, a $(q,1)$ vector; see also Cao et al. (2019) and Peluso \& Consonni (2020). Finally, a prior on DAG $\mathcal{D}$ is assigned through a Binomial distribution on the number of edges in the graph; in $p(\mathcal{D})$, $w \in (0,1)$ is a prior probability of edge inclusion, while $|\mathcal{S_{\mathcal{D}}}|$ denotes the number of edges in $\mathcal{D}$; see again Castelletti \& Mascaro (2021) for further details. Target of the MCMC scheme is therefore the joint posterior of $(\boldsymbol{L},\boldsymbol{D},\mathcal{D})$, \begin{equation} p(\boldsymbol L, \boldsymbol D, \mathcal{D}\,|\, \boldsymbol X) \propto p(\boldsymbol{X}\,|\,\boldsymbol L, \boldsymbol D, \mathcal{D})p(\boldsymbol{L},\boldsymbol{D}\,|\,\mathcal{D}) \,p(\mathcal{D}), \end{equation} where $\boldsymbol{X}$ denotes a $(n,q)$ data matrix as obtained through $n$ i.i.d. draws from the Gaussian DAG-model and $p(\boldsymbol{X}\,|\,\boldsymbol L, \boldsymbol D, \mathcal{D})$ is the likelihood function. See also Castelletti \& Mascaro (2022+) for full details. ### Generating data We first randomly generate a DAG $\mathcal{D}$, the DAG parameters $(\boldsymbol{L},\boldsymbol{D})$ and then $n=1000$ i.i.d. observations from a Gaussian DAG-model as follows: ```{r} set.seed(1) q <- 8 w <- 0.2 DAG <- rDAG(q,w) a <- q U <- diag(1,q) outDL <- rDAGWishart(n=1, DAG, a, U) L <- outDL$L; D <- outDL$D Omega <- L %*% solve(D) %*% t(L) Sigma <- solve(Omega) n <- 1000 X <- mvtnorm::rmvnorm(n = n, sigma = Sigma) ``` See also our [vignette about data generation from Gaussian DAG-models](#). ## `learn_DAG()` Function `learn_DAG()` implements an MCMC algorithm to sample from the joint posterior of DAGs and DAG parameters. This is based on a Partial Analytic Structure (PAS) algorithm (Godsill, 2012) which, at each iteration: 1. Updates the DAG through a Metropolis-Hastings (MH) step where, given the current DAG, a new (direct successor) DAG is drawn from a suitable proposal distribution and accepted with a probability given by the MH acceptance rate (see also section [A note on `fast = TRUE`]); 2. Samples from the posterior distribution of the (updated DAG) parameters; see also Castelletti \& Consonni (2021) for more details. We implement it as follows: ```{r echo = FALSE, include=FALSE} out <- learn_DAG(S = 5000, burn = 1000, data = X, a, U, w, fast = FALSE, save.memory = FALSE, collapse = FALSE) ``` ```{r eval = FALSE} out <- learn_DAG(S = 5000, burn = 1000, data = X, a, U, w, fast = FALSE, save.memory = FALSE, collapse = FALSE) ``` ### Input Inputs of `learn_DAG()` correspond to three different sets of arguments: * `S`, `burn` and `data` are standard inputs required by any MCMC algorithm. In particular, `S` defines the desired length of the chain, which is obtained by discarding the first `burn` observations (the total number of sampled observations is therefore `S + burn`); `data` is instead the $(n,q)$ matrix $\boldsymbol{X}$; * `a`, `U` and `w` are hyperparameters of the priors on DAGs (`w`) and DAG parameters (`a`, `U`); see also Equation [REF]. The same appear in functions `rDAG()` and `rDAGWishart()` which were introduced in our vignette [ADD REF TO THE VIGNETTE]. * `fast`, `save.memory` and `collapse` are boolean arguments which allow to: implement an approximate proposal distribution within the MCMC scheme (`fast = TRUE`); change the array structure of the stored MCMC output into strings in order to save memory (`save.memory = TRUE`); implement an MCMC for DAG structure learning only, without sampling from the posterior of parameters (`collapse = TRUE`). See also [A note on `fast = TRUE`] and Castelletti \& Mascaro (2022+) for full details. ### Output The output of `learn_DAG()` is an object of class `bcdag`: ```{r} class(out) ``` `bcdag` objects include the output of the MCMC algorithm together with a collection of meta-data representing the input arguments of `learn_DAG()`; these are stored in the attributes of the object:: ```{r} str(out) ``` Attribute `type` refers to the output of `learn_DAG()`, whose structure depends on the choice of the arguments `save.memory` and `collapse`. \vspace{0.2cm} When both are set equal to `FALSE`, as in the previous example, the output of `learn_DAG()` is a *complete* `bcdag` object, collecting three $(q,q,S)$ arrays with the DAG structures (in the form of $q \times q$ adjacency matrices) and the DAG parameters $\boldsymbol{L}$ and $\boldsymbol{D}$ (both as $q \times q$ matrices) sampled by the MCMC: ```{r} out$Graphs[,,1] round(out$L[,,1],2) round(out$D[,,1],2) ``` \vspace{0.2cm} When `collapse = TRUE` and `save.memory = FALSE` the output of `learn_DAG()` is a *collapsed* `bcdag` object, consisting of a $(q,q,S)$ array with the adjacency matrices of the DAGs sampled by the MCMC: ```{r echo = FALSE, include=FALSE} collapsed_out <- learn_DAG(S = 5000, burn = 1000, data = X, a, U, w, fast = FALSE, save.memory = FALSE, collapse = TRUE) ``` ```{r eval = FALSE} collapsed_out <- learn_DAG(S = 5000, burn = 1000, data = X, a, U, w, fast = FALSE, save.memory = FALSE, collapse = TRUE) ``` ```{r} names(collapsed_out) class(collapsed_out) attributes(collapsed_out)$type collapsed_out$Graphs[,,1] ``` \vspace{0.2cm} When `save.memory = TRUE` and `collapse = FALSE`, the output is a *compressed* `bcdag` object, collecting samples from the joint posterior on DAGs and DAG parameters in the form of a vector of strings: ```{r echo = FALSE, include=FALSE} compressed_out <- learn_DAG(S = 5000, burn = 1000, data = X, a, U, w, fast = FALSE, save.memory = TRUE, collapse = FALSE) ``` ```{r eval = FALSE} compressed_out <- learn_DAG(S = 5000, burn = 1000, data = X, a, U, w, fast = FALSE, save.memory = TRUE, collapse = FALSE) ``` ```{r} names(compressed_out) class(compressed_out) attributes(compressed_out)$type ``` In such a case, we can access to the MCMC draws as: ```{r} compressed_out$Graphs[1] compressed_out$L[1] compressed_out$D[1] ``` In addition, we implement `bd_decode`, an internal function that can be used to visualize the previous objects as matrices: ```{r} BCDAG:::bd_decode(compressed_out$Graphs[1]) round(BCDAG:::bd_decode(compressed_out$L[1]),2) round(BCDAG:::bd_decode(compressed_out$D[1]),2) ``` \vspace{0.2cm} Finally, if `save.memory = TRUE` and `collapse = TRUE`, the output of `learn_DAG()` is a *compressed and collapsed* `bcdag` object collecting only the sampled DAGs represented as vector of strings: ```{r echo = FALSE, include=FALSE} comprcoll_out <- learn_DAG(S = 5000, burn = 1000, data = X, a, U, w, fast = FALSE, save.memory = TRUE, collapse = TRUE) ``` ```{r eval = FALSE} comprcoll_out <- learn_DAG(S = 5000, burn = 1000, data = X, a, U, w, fast = FALSE, save.memory = TRUE, collapse = TRUE) ``` ```{r} names(comprcoll_out) class(comprcoll_out) attributes(comprcoll_out)$type BCDAG:::bd_decode(comprcoll_out$Graphs[1]) ``` ## A note on `fast = TRUE` Step 1. of the MCMC scheme implemented by `learn_DAG()` updates DAG $\mathcal{D}$ by randomly drawing a new candidate DAG $\mathcal{D}'$ from a proposal distribution and then accepting it with probability given by the Metropolis Hastings (MH) acceptance rate; see also Castelletti \& Mascaro (2021). For a given DAG $\mathcal{D}$, the proposal distribution $q(\mathcal{D}'\,|\,\mathcal{D})$ is built over the set $\mathcal{O}_{\mathcal{D}}$ of \emp{all} direct successors DAGs that can be reached from $\mathcal{D}$ by inserting, deleting or reversing a single edge in $\mathcal{D}$. A DAG $\mathcal{D}'$ is then proposed uniformly from the set $\mathcal{O}_{\mathcal{D}}$ so that $q(\mathcal{D}'\,|\,\mathcal{D})=1/|\mathcal{O}_{\mathcal{D}}|$. Moreover, the MH rate requires to evaluate the ratio of proposals $q(\mathcal{D}'\,|\,\mathcal{D})/q(\mathcal{D}\,|\,\mathcal{D}') = |\mathcal{O}_{\mathcal{D}'}|/|\mathcal{O}_{\mathcal{D}}|$, and accordingly the construction of both $\mathcal{O}_{\mathcal{D}}$ and $\mathcal{O}_{\mathcal{D}'}$. If `fast = FALSE`, the proposal ratio is computed exactly; this requires the enumerations of $\mathcal{O}_\mathcal{D}$ and $\mathcal{O}_{\mathcal{D}'}$ which may become computationally expensive, especially when $q$ is large. However, the ratio approaches $1$ as the number of variables $q$ increases: option `fast = TRUE` implements such an approximation, which therefore avoids the construction of $\mathcal{O}_\mathcal{D}$ and $\mathcal{O}_{\mathcal{D}'}$. A comparison between `fast = FALSE` and `fast = TRUE` in the execution of `learn_DAG()` produces the following results in terms of computational time: ```{r results='hide'} # No approximation time_nofast <- system.time(out_nofast <- learn_DAG(S = 5000, burn = 1000, data = X, a, U, w, fast = FALSE, save.memory = FALSE, collapse = FALSE)) # Approximation time_fast <- system.time(out_fast <- learn_DAG(S = 5000, burn = 1000, data = X, a, U, w, fast = TRUE, save.memory = FALSE, collapse = FALSE)) ``` ```{r} time_nofast time_fast ``` Finally, the corresponding estimated posterior probabilities of edge inclusion are the following: ```{r} round(get_edgeprobs(out_nofast), 2) round(get_edgeprobs(out_fast), 2) ``` ### References * Ben-David E, Li T, Massam H, Rajaratnam B (2015). “High dimensional Bayesian inference for Gaussian directed acyclic graph models.” *arXiv pre-print*. * Cao X, Khare K, Ghosh M (2019). “Posterior graph selection and estimation consistency for high-dimensional Bayesian DAG models.” *The Annals of Statistics*, 47(1), 319–348. * Castelletti F, Consonni G (2021). “Bayesian causal inference in probit graphical models” *Bayesian Analysis*, In press. * Castelletti F, Mascaro A (2021). “Structural learning and estimation of joint causal effects among network-dependent variables.” *Statistical Methods & Applications*, 30, 1289–1314. * Castelletti F, Mascaro A (2022). “BCDAG: An R package for Bayesian structural and Causal learning of Gaussian DAGs.” *arXiv pre-print*. * Godsill, SJ (2012). "On the relationship between Markov chain Monte Carlo methods for model uncertainty." *Journal of computational and graphical statistics*, 10(2), 230-248. * Peluso S, Consonni G (2020). “Compatible priors for model selection of high-dimensional Gaussian DAGs.” *Electronic Journal of Statistics*, 14(2), 4110–4132. ```{r, include = FALSE} par(oldpar) options(oldoptions) ```
/scratch/gouwar.j/cran-all/cranData/BCDAG/inst/doc/bcdag_learnDAG.Rmd
--- title: "Random data generation from Gaussian DAG models" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Random data generation from Gaussian DAG models} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) oldpar <- par(no.readonly = TRUE) oldoptions <- options() ``` ```{css, echo = FALSE} .math.inline { font-size: 11px; } ``` ```{r setup} library(BCDAG) ``` This is the first of a series of three vignettes introducing the R package `BCDAG`. In this vignette we focus on functions `rDAG()` and `rDAGWishart()` which implement random generation of DAG structures and DAG parameters under the assumption that the joint distribution of variables $X_1,\dots, X_q$ is Gaussian and the corresponding model (Choleski) parameters follow a DAG-Wishart distribution. Finally, data generation from Gaussian DAG models is described. ## Generating DAGs and parameters: functions `rDAG()` and `rDAGWishart()` Function `rDAG()` can be used to randomly generate a DAG structure $\mathcal{D}=(V,E)$, where $V=\{1,\dots,q\}$ and $E\subseteq V \times V$ is the set of edges. `rDAG()` has two arguments: the number of nodes (variables) $q$ and the prior probability of edge inclusion $w\in[0,1]$; the latter can be tuned to control the degree of sparsity in the resulting DAG. By fixing a probability of edge inclusion $w=0.2$, a DAG structure with $q=10$ nodes can be generated as follows: ```{r} set.seed(1) q <- 10 w <- 0.2 DAG <- rDAG(q,w) ``` ```{r printDAG} DAG ``` Output of `rDAG()` is the 0-1 $(q,q)$ adjacency matrix of the generated DAG, with element $1$ at position $(u,v)$ indicating the presence of an edge $u\rightarrow v$. Notice that the generated DAG is *topologically ordered*, meaning that edges are allowed only from high to low nodes (nodes are labeled according to rows/columns indexes); accordingly the DAG adjacency matrix is lower-triangular. ## Generating Gaussian DAG parameters Consider a Gaussian DAG model of the form \begin{eqnarray} X_1, \dots, X_q \,|\,\boldsymbol L, \boldsymbol D, \mathcal{D} &\sim& \mathcal{N}_q\left(\boldsymbol 0, (\boldsymbol{L}\boldsymbol{D}^{-1}\boldsymbol{L}^\top)^{-1}\right), \end{eqnarray} where $(\boldsymbol L, \boldsymbol D)$ are model parameters providing the decomposition of the precision (inverse-covariance) matrix $\boldsymbol{\Omega} = \boldsymbol{L}\boldsymbol{D}^{-1}\boldsymbol{L}^\top$; specifically, $\boldsymbol{L}$ is a $(q, q)$ matrix of coefficients such that for each $(u, v)$-element $\boldsymbol{L}_{uv}$ with $u \ne v$, we have $\boldsymbol{L}_{uv} \ne 0$ if and only if $(u, v) \in E$, while $\boldsymbol{L}_{uu} = 1$ for each $u = 1,\dots, q$; also, $\boldsymbol{D}$ is a $(q, q)$ diagonal matrix with $(u, u)$-element $\boldsymbol{D}_{uu}$. The latter decomposition follows from the equivalent Structural Equation Model (SEM) representation of a Gaussian DAG model: \begin{equation} \boldsymbol{L}^\top\boldsymbol{x} = \boldsymbol \epsilon, \quad \boldsymbol \epsilon \sim \mathcal{N}_q(\boldsymbol 0, \boldsymbol D), \end{equation} where $\boldsymbol x = (X_1,\dots, X_q)^\top$; see also Castelletti \& Mascaro (2021). Function `rDAGWishart` implements random sampling from $(\boldsymbol L, \boldsymbol D)\,|\,\mathcal{D} \sim \text{DAG-Wishart}(\boldsymbol{a}_{c}^{\mathcal{D}}, \boldsymbol U)$, where $\boldsymbol{U}$ is the rate parameter (a $(q,q)$ s.p.d. matrix) and $\boldsymbol{a}^{\mathcal {D}}_{c}$ (a $(q,1)$ vector) is the shape parameter of the DAG-Wishart distribution. This class of distributions was introduced by Ben David et al. (2015) as a conjugate prior for Gaussian DAG model-parameters. In its compatible version (Peluso \& Consonni, 2020), elements of the vector parameter $\boldsymbol{a}^{\mathcal {D}}_{c}$ are uniquely determined from a single *common* shape parameter $a>q-1$. Inputs of `rDAGWishart` are: the number of samples $n$, the underlying DAG $\mathcal{D}$, the common shape parameter $a$ and the rate parameter $\boldsymbol U$. Given the DAG $\mathcal{D}$ generated before, the following example implements a single ($n=1$) draw from a compatible DAG-Wishart distribution with parameters $a=q$, $\boldsymbol U = \boldsymbol I_q$: ```{r} a <- q U <- diag(1,q) outDL <- rDAGWishart(n=1, DAG, a, U) class(outDL) ``` ```{r} L <- outDL$L; D <- outDL$D class(L); class(D) ``` The output of `rDAGWishart()` consists of two elements: a $(q,q,n)$-dimensional array collecting the $n$ sampled matrices $\boldsymbol L^{(1)}, \dots, \boldsymbol L^{(n)}$ and a $(q,q,n)$-dimensional array collecting the $n$ sampled matrices $\boldsymbol D^{(1)}, \dots,\boldsymbol D^{(n)}$. We refer the reader to Castelletti \& Mascaro (2021) and Castelletti \& Mascaro (2022+) for more details. ## Generating data from a Gaussian DAG model Data generation from a Gaussian DAG model is then straightforward. Recall that $\boldsymbol{\Omega} = \boldsymbol{L}\boldsymbol{D}^{-1}\boldsymbol{L}^\top$, where $\boldsymbol{\Omega}$ is the inverse-covariance (precision) matrix of a multivariate Gaussian model satisfying the constraints imposed by a DAG. Accordingly, we can recover the precision and covariance matrices as: ```{r} # Precision matrix Omega <- L %*% solve(D) %*% t(L) # Covariance matrix Sigma <- solve(Omega) ``` Next, i.i.d. draws from a Gaussian DAG model can be obtained through the function `rmvnorm()` provided within the R package `mvtnorm`: ```{r} n <- 1000 X <- mvtnorm::rmvnorm(n = n, sigma = Sigma) ``` ## References * Ben-David E, Li T, Massam H, Rajaratnam B (2015). “High dimensional Bayesian inference for Gaussian directed acyclic graph models.” *arXiv pre-print*. * Cao X, Khare K, Ghosh M (2019). “Posterior graph selection and estimation consistency for high-dimensional Bayesian DAG models.” *The Annals of Statistics*, 47(1), 319–348. * Castelletti F, Mascaro A (2021). “Structural learning and estimation of joint causal effects among network-dependent variables.” *Statistical Methods & Applications*, 30, 1289–1314. * Castelletti F, Mascaro A (2022). “BCDAG: An R package for Bayesian structural and Causal learning of Gaussian DAGs.” *arXiv pre-print*. * Peluso S, Consonni G (2020). “Compatible priors for model selection of high-dimensional Gaussian DAGs.” *Electronic Journal of Statistics*, 14(2), 4110–4132. ```{r, include = FALSE} par(oldpar) options(oldoptions) ```
/scratch/gouwar.j/cran-all/cranData/BCDAG/vignettes/bcdag_generatedata.Rmd
--- title: "Elaborate on the output of `learn_DAG()` using get_ functions" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Elaborate on the output of `learn_DAG()` using get_ functions} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) oldpar <- par(no.readonly = TRUE) oldoptions <- options() ``` ```{css, echo = FALSE} .math.inline { font-size: 11px; } ``` ```{r setup} library(BCDAG) ``` This is the third of a series of three vignettes for the R package `BCDAG`. In this vignette, we show how to use the output of `learn_DAG()` for posterior inference on DAGs, DAG parameters, and causal effect estimation. Specifically, we introduce the functions of the `get_` family. Remember that the output of `learn_DAG()` consists of an MCMC sample from the marginal posterior distribution of DAG structures (`collapse = TRUE`) and the joint posterior of DAGs and DAG parameters (`collapse = FALSE`); see also [the corresponding vignette](#) To start with, we simulate a dataset `X` from a randomly generated Gaussian DAG model [as shown in an other vignette](#) ```{r} ## Generate data set.seed(1) q <- 8 w <- 0.2 DAG <- rDAG(q,w) a <- q U <- diag(1,q) outDL <- rDAGWishart(n=1, DAG, a, U) L <- outDL$L; D <- outDL$D Omega <- L %*% solve(D) %*% t(L) Sigma <- solve(Omega) n <- 1000 X <- mvtnorm::rmvnorm(n = n, sigma = Sigma) ``` Next, we use `learn_DAG()` to approximate the joint posterior distribution over DAG structures and DAG parameters: ```{r echo = FALSE, include=FALSE} out <- learn_DAG(S = 5000, burn = 1000, data = X, a, U, w, fast = FALSE, save.memory = FALSE, collapse = FALSE) ``` ```{r eval = FALSE} ## Run MCMC out <- learn_DAG(S = 5000, burn = 1000, data = X, a, U, w, fast = FALSE, save.memory = FALSE, collapse = FALSE) ``` ## MCMC diagnostics of convergence: function `get_diagnostics()` Before using the MCMC output for posterior inference, it is common practice to perform some convergence checks. Function `get_diagnostics()` provides graphical diagnostics of convergence for the MCMC output of `learn_DAG()`. These are based on: the number of edges in the DAGs; the posterior probability of edge inclusion for each possible edge $u \rightarrow v$, both monitored across MCMC iterations. Input of the function is an object of class `bcdag` and the output consists of: - a traceplot and running-mean plot of the number of edges in the DAGs (graph size); - a collection of traceplots of the posterior probabilities of edge inclusion computed across MCMC iterations. For each pair of distinct nodes $(u,v)$, its posterior probability of inclusion at time $s$ $(s = 1,\dots, S)$ is estimated as the proportion of DAGs visited by the MCMC up to time $s$ which contain the directed edge $u \rightarrow v$. Output is organized in $q$ plots (one for each node $v = 1, \dots, q$), each summarizing the posterior probabilities of edges $u \rightarrow v$, $u = 1,\dots, q$. ```{r fig.width = 7, fig.height= 6} get_diagnostics(out, ask = FALSE) ``` ## Posterior inference: DAG structure learning We now show how to perform posterior inference of DAGs from the MCMC output. To summarize the output of `learn_DAG()`, `print()`, `summary()` and `plot()`methods for objects of class `bcdag` are available. When `print()` is applied to a `bcdag` object, a printed message appears. This summarizes the type of `bcdag` object (see details on `bcdag` object types provided in [the previous vignette](#)) and the input arguments of `learn_DAG()` that generated the output. When `summary()` is used, also the posterior probabilities of edge inclusion are reported. Finally, `plot()` returns a graphical representation of the Median Probability DAG, an heatmap with the probabilities of edge inclusion and an histogram of the sizes of graph visited by the Markov chain. ```{r} print(out) summary(out) plot(out) ``` Function `get_edgeprobs()` computes and returns the collection of posterior probabilities of edge inclusion, arranged as a $(q,q)$ matrix, with $(u,v)$-element referring to edge $u\rightarrow v$: ```{r} get_edgeprobs(out) ``` The MPM model returned in the output of `summary()` can be used as a single DAG-model estimate and is obtained by including all edges whose posterior probability exceeds the threshold $0.5$. Function `get_MPMdag()` applies to an object of class `bcdag` and returns the $(q,q)$ adjacency matrix of the MPM: ```{r} MPMdag <- get_MPMdag(out) MPMdag ``` As an alternative, the Maximum A Posterior DAG estimate (MAP) can be considered. This corresponds to the DAG with the highest MCMC frequency of visits and can be recovered through the function `get_MAPdag()`: ```{r} MAPdag <- get_MAPdag(out) MAPdag ``` ```{r fig.width = 7} par(mfrow = c(1,3)) Rgraphviz::plot(as_graphNEL(DAG), main = "True DAG") Rgraphviz::plot(as_graphNEL(MPMdag), main = "MPM DAG") Rgraphviz::plot(as_graphNEL(MAPdag), main = "MAP DAG") ``` In this example, the MPM and MAP estimates differ by a single an edge between nodes $4$ and $7$ which is reversed among the two graphs. However, it can be shown that the two DAG estimates are Markov equivalent, meaning that they encode the same conditional independencies between variables. In a Gaussian setting, Markov equivalent DAGs cannot be distinguished with observational data as they represent the same statistical model. Therefore, there is no difference in choosing the MPM or the MAP estimate to infer the structure of dependencies between variables. In addition, if compared with the true graph, the DAG estimate provided by MPM [CORRETTO?] differs by a single edge between nodes $7$ and $1$ which is missing from MPM. Interestingly, one can see that the regression coefficient associated with $u\rightarrow v$, $\boldsymbol L_{7,1}$, is relatively "small", implying that the strength of the dependence between the two nodes is "weak": ```{r} round(L, 3) ``` ## Posterior inference: causal effect estimation In this last section, we introduce functions `causaleffect()` and `get_causaleffect()`, which allow to compute and estimate causal effects between variables. Specifically, we consider the causal effect on a response variable of interest consequent to a joint intervention on a given set of variables; see also Nandy et al. (2017) and Castelletti \& Mascaro (2021) for formal definitions. For a given DAG, it is possible to identify and estimate the causal effect on a node $Y$ consequent to a hypothetical hard intervention on node $j$ using the rules of the *do-calculus* (Pearl, 2000). A simple implementation of this set of rules and an estimation method for the causally sufficient case and for Gaussian data is provided by function `causaleffect()`. The function takes as input a numerical vector representing the labels of the intervened nodes (also called intervention `target`), a numerical value indicating the `response` variable and the DAG model parameters `L` and `D`; see also Castelletti \& Mascaro (2021) or [our previous vignette](#) for a detailed model description. For a given response variable $Y \in\{1, \ldots, q\}$, and intervention target $I \subseteq \{1,\dots,q\}$ the *total joint effect* of an intervention $\operatorname{do}\left\{X_{j}=\tilde{x}_{j}\right\}_{j \in I}$ on $Y$ is $$ \theta_{Y}^{I}:=\left(\theta_{h, Y}^{I}\right)_{h \in I}, $$ where for each $h \in I$ $$ \theta_{h, Y}^{I}:=\frac{\partial}{\partial x_{h}} \mathbb{E}\left(Y \mid \operatorname{do}\left\{X_{j}=\tilde{x}_{j}\right\}_{j \in I}\right) $$ See also Castelletti \& Mascaro (2021) and Nandy et al. (2017) for more details. To better understand the difference between single and joint interventions, consider as an example the total causal effect on node $Y=1$ (i.e. variable $X_1$) of a joint intervention on nodes $4$ and $5$, $I=\{4,5\}$ (i.e. variables $X_4, X_5$) and the total causal effects of two separate interventions on nodes $4$ and $5$ under the causal model represented by the DAG generated before: ```{r} Rgraphviz::plot(as_graphNEL(DAG), main = "True DAG") ``` These are given by: ```{r} causaleffect(targets = c(4,5), response = 1, L = L, D = D) causaleffect(targets = 4, response = 1, L = L, D = D) causaleffect(targets = 5, response = 1, L = L, D = D) ``` As it can be observed, the total causal effect of intervening on variable $X_4$ is null both in a single intervention on $4$ and in a joint intervention on $\{X_4, X_5\}$, while intervening on $X_5$ produces the same positive total causal effect in both cases. The total causal effects produced are thus exactly the same for both variables in the two cases. However, if we slightly modify the DAG by adding an edge from node $4$ to node $5$, so that: ```{r} DAG2 <- DAG DAG2[4,5] <- 1 par(mfrow = c(1,2)) Rgraphviz::plot(as_graphNEL(DAG), main = "True DAG") Rgraphviz::plot(as_graphNEL(DAG2), main = "Modified DAG") ``` ```{r include = FALSE} par(mfrow = c(1,1)) ``` and modify `L` accordingly: ```{r} L2 <- L L2[4,5] <- runif(1) L2[4,5] ``` The comparison of single and joint total causal effects now produces different results: ```{r} causaleffect(targets = c(4,5), response = 1, L = L2, D = D) causaleffect(targets = 4, response = 1, L = L2, D = D) causaleffect(targets = 5, response = 1, L = L2, D = D) ``` As it can be observed, this time a single intervention on $X_4$ produces a negative causal effect on $X_1$, while jointly intervening on $X_4$ and $X_5$ makes the total causal effect of $X_4$ on $X_1$ null. The effect of $X_4$ on $X_1$ was in fact mediated by $X_5$: intervening simultaneously also on $X_5$ erases the effect of $X_4$ on $X_5$ and, in turn, of $X_4$ on $X_1$. See also Castelletti \& Mascaro (2021) or Nandy et al. (2017) for a more detailed description. The identification and estimation of causal effects requires the specification of a DAG. When the DAG is unknown, function `get_causaleffect()` can be used. It applies to objects of class `bcdag`; the latter corresponds to the output of `learn_DAG()` and consists of a sample of size $S$ from the posterior of DAGs and DAG parameters. In addition `get_causaleffect()` takes as input a numerical vector representing the labels of the intervened nodes (the intervention `target`) and a numerical value indicating the `response` variable. Output of the function is an object of class `bcdagCE`, containing a sample of size $S$ from the posterior distribution of the causal effect coefficients associated with the intervention `targets` and some useful summaries of the posterior distributions such as the mean and the quantiles: ```{r} effects_out <- get_causaleffect(out, targets = c(4,5), response = 1) head(effects_out$causaleffects) ``` `print()`, `summary()` and `plot()` methods are available for objects of class `bcdagCE`. `print()` returns the input used and the prior hyperparameters. `summary()` returns the estimated mean, quantiles of the estimated causal effects, as well as the probabilities of it being greater, lower or equal to zero. `plots()` produces a boxplot and a histogram of the causal effect of each target node. ```{r} print(effects_out) summary(effects_out) plot(effects_out) ``` Also notice that, if the `BCDAG` object input of `get_causaleffect()` is of type `collapsed` or `compressed and collapsed`, then `get_causaleffect()` requires drawing from the posterior distribution of parameters $(\boldsymbol L, \boldsymbol D)$ before estimating the required causal effects: ```{r echo = FALSE, include=FALSE} coll_out <- learn_DAG(S = 5000, burn = 1000, data = X, a, U, w, fast = FALSE, save.memory = FALSE, collapse = TRUE) ``` ```{r eval=FALSE} coll_out <- learn_DAG(S = 5000, burn = 1000, data = X, a, U, w, fast = FALSE, save.memory = FALSE, collapse = TRUE) ``` ```{r} names(coll_out) ``` ```{r include = FALSE} effects_collout <- get_causaleffect(coll_out, targets = c(4,5), response = 1) ``` ```{r eval = FALSE} effects_collout <- get_causaleffect(coll_out, targets = c(4,5), response = 1) ``` ```{r} round(effects_collout$post_mean, 3) ``` ### References * Castelletti, F, Mascaro, A (2021). "Structural learning and estimation of joint causal effects among network-dependent variables". *Statistical Methods & Applications*, 30(5), 1289-1314. * Castelletti F, Mascaro A (2022). “BCDAG: An R package for Bayesian structural and Causal learning of Gaussian DAGs.” *arXiv pre-print*. * Nandy, P, Maathuis, MH., Richardson, TS (2017). "Estimating the effect of joint interventions from observational data in sparse high-dimensional settings". *The Annals of Statistics*, 45(2), 647-674. * Pearl J (2000). *Causality: Models, Reasoning, and Inference*. Cambridge University Press, Cambridge. ISBN 0-521-77362-8. ```{r, include = FALSE} par(oldpar) options(oldoptions) ```
/scratch/gouwar.j/cran-all/cranData/BCDAG/vignettes/bcdag_getfamily.Rmd
--- title: "MCMC scheme for posterior inference of Gaussian DAG models: the `learn_DAG()` function" output: rmarkdown::html_vignette # html_document: # theme: readable # highlight: textmate vignette: > %\VignetteIndexEntry{MCMC scheme for posterior inference of Gaussian DAG models: the `learn_DAG()` function} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) oldpar <- par(no.readonly = TRUE) oldoptions <- options() ``` ```{css, echo = FALSE} .math.inline { font-size: 11px; } ``` ```{r setup} library(BCDAG) ``` This is the second of a series of three vignettes for the R package `BCDAG`. In this vignette we focus on function `learn_DAG()`, which implements a Markov Chain Monte Carlo (MCMC) algorithm to sample from the joint posterior of DAG structures and DAG-parameters under the Gaussian assumption. ### Model description The underlying Bayesian Gaussian DAG-model can be summarized as follows: \begin{eqnarray} X_1, \dots, X_q \,|\,\boldsymbol L, \boldsymbol D, \mathcal{D} &\sim& \mathcal{N}_q\left(\boldsymbol 0, (\boldsymbol{L}\boldsymbol{D}^{-1}\boldsymbol{L}^\top)^{-1}\right)\\ (\boldsymbol L, \boldsymbol D)\,|\,\mathcal{D} &\sim& \text{DAG-Wishart}(\boldsymbol{a}_{c}^{\mathcal{D}}, \boldsymbol U) \\ p(\mathcal{D}) &\propto& w^{|\mathcal{S}_\mathcal{D}|}(1-w)^{\frac{q(q-1)}{2} - {|\mathcal{S}_\mathcal{D}|}} \end{eqnarray} In particular $\mathcal{D}=(V,E)$ denotes a DAG structure with set of nodes $V=\{1,\dots,q\}$ and set of edges $E\subseteq V \times V$. Moreover, $(\boldsymbol L, \boldsymbol D)$ are model parameters providing the decomposition of the precision (inverse-covariance) matrix $\boldsymbol{\Omega} = \boldsymbol{L}\boldsymbol{D}^{-1}\boldsymbol{L}^\top$; specifically, $\boldsymbol{L}$ is a $(q, q)$ matrix of coefficients such that for each $(u, v)$-element $\boldsymbol{L}_{uv}$ with $u \ne v$, $\boldsymbol{L}_{uv} \ne 0$ if and only if $(u, v) \in E$, while $\boldsymbol{L}_{uu} = 1$ for each $u = 1,\dots, q$; also, $\boldsymbol{D}$ is a $(q, q)$ diagonal matrix with $(u, u)$-element $\boldsymbol{D}_{uu}$. The latter decomposition follows from the equivalent Structural Equation Model (SEM) representation of a Gaussian DAG-model; see also Castelletti \& Mascaro (2021). Conditionally to $\mathcal{D}$, a prior to $(\boldsymbol{L}, \boldsymbol{D})$ is assigned through a *compatible* DAG-Wishart distribution with rate hyperparameter $\boldsymbol{U}$, a $(q,q)$ s.p.d. matrix, and shape hyperparameter $\boldsymbol{a}^{\mathcal {D}}_{c}$, a $(q,1)$ vector; see also Cao et al. (2019) and Peluso \& Consonni (2020). Finally, a prior on DAG $\mathcal{D}$ is assigned through a Binomial distribution on the number of edges in the graph; in $p(\mathcal{D})$, $w \in (0,1)$ is a prior probability of edge inclusion, while $|\mathcal{S_{\mathcal{D}}}|$ denotes the number of edges in $\mathcal{D}$; see again Castelletti \& Mascaro (2021) for further details. Target of the MCMC scheme is therefore the joint posterior of $(\boldsymbol{L},\boldsymbol{D},\mathcal{D})$, \begin{equation} p(\boldsymbol L, \boldsymbol D, \mathcal{D}\,|\, \boldsymbol X) \propto p(\boldsymbol{X}\,|\,\boldsymbol L, \boldsymbol D, \mathcal{D})p(\boldsymbol{L},\boldsymbol{D}\,|\,\mathcal{D}) \,p(\mathcal{D}), \end{equation} where $\boldsymbol{X}$ denotes a $(n,q)$ data matrix as obtained through $n$ i.i.d. draws from the Gaussian DAG-model and $p(\boldsymbol{X}\,|\,\boldsymbol L, \boldsymbol D, \mathcal{D})$ is the likelihood function. See also Castelletti \& Mascaro (2022+) for full details. ### Generating data We first randomly generate a DAG $\mathcal{D}$, the DAG parameters $(\boldsymbol{L},\boldsymbol{D})$ and then $n=1000$ i.i.d. observations from a Gaussian DAG-model as follows: ```{r} set.seed(1) q <- 8 w <- 0.2 DAG <- rDAG(q,w) a <- q U <- diag(1,q) outDL <- rDAGWishart(n=1, DAG, a, U) L <- outDL$L; D <- outDL$D Omega <- L %*% solve(D) %*% t(L) Sigma <- solve(Omega) n <- 1000 X <- mvtnorm::rmvnorm(n = n, sigma = Sigma) ``` See also our [vignette about data generation from Gaussian DAG-models](#). ## `learn_DAG()` Function `learn_DAG()` implements an MCMC algorithm to sample from the joint posterior of DAGs and DAG parameters. This is based on a Partial Analytic Structure (PAS) algorithm (Godsill, 2012) which, at each iteration: 1. Updates the DAG through a Metropolis-Hastings (MH) step where, given the current DAG, a new (direct successor) DAG is drawn from a suitable proposal distribution and accepted with a probability given by the MH acceptance rate (see also section [A note on `fast = TRUE`]); 2. Samples from the posterior distribution of the (updated DAG) parameters; see also Castelletti \& Consonni (2021) for more details. We implement it as follows: ```{r echo = FALSE, include=FALSE} out <- learn_DAG(S = 5000, burn = 1000, data = X, a, U, w, fast = FALSE, save.memory = FALSE, collapse = FALSE) ``` ```{r eval = FALSE} out <- learn_DAG(S = 5000, burn = 1000, data = X, a, U, w, fast = FALSE, save.memory = FALSE, collapse = FALSE) ``` ### Input Inputs of `learn_DAG()` correspond to three different sets of arguments: * `S`, `burn` and `data` are standard inputs required by any MCMC algorithm. In particular, `S` defines the desired length of the chain, which is obtained by discarding the first `burn` observations (the total number of sampled observations is therefore `S + burn`); `data` is instead the $(n,q)$ matrix $\boldsymbol{X}$; * `a`, `U` and `w` are hyperparameters of the priors on DAGs (`w`) and DAG parameters (`a`, `U`); see also Equation [REF]. The same appear in functions `rDAG()` and `rDAGWishart()` which were introduced in our vignette [ADD REF TO THE VIGNETTE]. * `fast`, `save.memory` and `collapse` are boolean arguments which allow to: implement an approximate proposal distribution within the MCMC scheme (`fast = TRUE`); change the array structure of the stored MCMC output into strings in order to save memory (`save.memory = TRUE`); implement an MCMC for DAG structure learning only, without sampling from the posterior of parameters (`collapse = TRUE`). See also [A note on `fast = TRUE`] and Castelletti \& Mascaro (2022+) for full details. ### Output The output of `learn_DAG()` is an object of class `bcdag`: ```{r} class(out) ``` `bcdag` objects include the output of the MCMC algorithm together with a collection of meta-data representing the input arguments of `learn_DAG()`; these are stored in the attributes of the object:: ```{r} str(out) ``` Attribute `type` refers to the output of `learn_DAG()`, whose structure depends on the choice of the arguments `save.memory` and `collapse`. \vspace{0.2cm} When both are set equal to `FALSE`, as in the previous example, the output of `learn_DAG()` is a *complete* `bcdag` object, collecting three $(q,q,S)$ arrays with the DAG structures (in the form of $q \times q$ adjacency matrices) and the DAG parameters $\boldsymbol{L}$ and $\boldsymbol{D}$ (both as $q \times q$ matrices) sampled by the MCMC: ```{r} out$Graphs[,,1] round(out$L[,,1],2) round(out$D[,,1],2) ``` \vspace{0.2cm} When `collapse = TRUE` and `save.memory = FALSE` the output of `learn_DAG()` is a *collapsed* `bcdag` object, consisting of a $(q,q,S)$ array with the adjacency matrices of the DAGs sampled by the MCMC: ```{r echo = FALSE, include=FALSE} collapsed_out <- learn_DAG(S = 5000, burn = 1000, data = X, a, U, w, fast = FALSE, save.memory = FALSE, collapse = TRUE) ``` ```{r eval = FALSE} collapsed_out <- learn_DAG(S = 5000, burn = 1000, data = X, a, U, w, fast = FALSE, save.memory = FALSE, collapse = TRUE) ``` ```{r} names(collapsed_out) class(collapsed_out) attributes(collapsed_out)$type collapsed_out$Graphs[,,1] ``` \vspace{0.2cm} When `save.memory = TRUE` and `collapse = FALSE`, the output is a *compressed* `bcdag` object, collecting samples from the joint posterior on DAGs and DAG parameters in the form of a vector of strings: ```{r echo = FALSE, include=FALSE} compressed_out <- learn_DAG(S = 5000, burn = 1000, data = X, a, U, w, fast = FALSE, save.memory = TRUE, collapse = FALSE) ``` ```{r eval = FALSE} compressed_out <- learn_DAG(S = 5000, burn = 1000, data = X, a, U, w, fast = FALSE, save.memory = TRUE, collapse = FALSE) ``` ```{r} names(compressed_out) class(compressed_out) attributes(compressed_out)$type ``` In such a case, we can access to the MCMC draws as: ```{r} compressed_out$Graphs[1] compressed_out$L[1] compressed_out$D[1] ``` In addition, we implement `bd_decode`, an internal function that can be used to visualize the previous objects as matrices: ```{r} BCDAG:::bd_decode(compressed_out$Graphs[1]) round(BCDAG:::bd_decode(compressed_out$L[1]),2) round(BCDAG:::bd_decode(compressed_out$D[1]),2) ``` \vspace{0.2cm} Finally, if `save.memory = TRUE` and `collapse = TRUE`, the output of `learn_DAG()` is a *compressed and collapsed* `bcdag` object collecting only the sampled DAGs represented as vector of strings: ```{r echo = FALSE, include=FALSE} comprcoll_out <- learn_DAG(S = 5000, burn = 1000, data = X, a, U, w, fast = FALSE, save.memory = TRUE, collapse = TRUE) ``` ```{r eval = FALSE} comprcoll_out <- learn_DAG(S = 5000, burn = 1000, data = X, a, U, w, fast = FALSE, save.memory = TRUE, collapse = TRUE) ``` ```{r} names(comprcoll_out) class(comprcoll_out) attributes(comprcoll_out)$type BCDAG:::bd_decode(comprcoll_out$Graphs[1]) ``` ## A note on `fast = TRUE` Step 1. of the MCMC scheme implemented by `learn_DAG()` updates DAG $\mathcal{D}$ by randomly drawing a new candidate DAG $\mathcal{D}'$ from a proposal distribution and then accepting it with probability given by the Metropolis Hastings (MH) acceptance rate; see also Castelletti \& Mascaro (2021). For a given DAG $\mathcal{D}$, the proposal distribution $q(\mathcal{D}'\,|\,\mathcal{D})$ is built over the set $\mathcal{O}_{\mathcal{D}}$ of \emp{all} direct successors DAGs that can be reached from $\mathcal{D}$ by inserting, deleting or reversing a single edge in $\mathcal{D}$. A DAG $\mathcal{D}'$ is then proposed uniformly from the set $\mathcal{O}_{\mathcal{D}}$ so that $q(\mathcal{D}'\,|\,\mathcal{D})=1/|\mathcal{O}_{\mathcal{D}}|$. Moreover, the MH rate requires to evaluate the ratio of proposals $q(\mathcal{D}'\,|\,\mathcal{D})/q(\mathcal{D}\,|\,\mathcal{D}') = |\mathcal{O}_{\mathcal{D}'}|/|\mathcal{O}_{\mathcal{D}}|$, and accordingly the construction of both $\mathcal{O}_{\mathcal{D}}$ and $\mathcal{O}_{\mathcal{D}'}$. If `fast = FALSE`, the proposal ratio is computed exactly; this requires the enumerations of $\mathcal{O}_\mathcal{D}$ and $\mathcal{O}_{\mathcal{D}'}$ which may become computationally expensive, especially when $q$ is large. However, the ratio approaches $1$ as the number of variables $q$ increases: option `fast = TRUE` implements such an approximation, which therefore avoids the construction of $\mathcal{O}_\mathcal{D}$ and $\mathcal{O}_{\mathcal{D}'}$. A comparison between `fast = FALSE` and `fast = TRUE` in the execution of `learn_DAG()` produces the following results in terms of computational time: ```{r results='hide'} # No approximation time_nofast <- system.time(out_nofast <- learn_DAG(S = 5000, burn = 1000, data = X, a, U, w, fast = FALSE, save.memory = FALSE, collapse = FALSE)) # Approximation time_fast <- system.time(out_fast <- learn_DAG(S = 5000, burn = 1000, data = X, a, U, w, fast = TRUE, save.memory = FALSE, collapse = FALSE)) ``` ```{r} time_nofast time_fast ``` Finally, the corresponding estimated posterior probabilities of edge inclusion are the following: ```{r} round(get_edgeprobs(out_nofast), 2) round(get_edgeprobs(out_fast), 2) ``` ### References * Ben-David E, Li T, Massam H, Rajaratnam B (2015). “High dimensional Bayesian inference for Gaussian directed acyclic graph models.” *arXiv pre-print*. * Cao X, Khare K, Ghosh M (2019). “Posterior graph selection and estimation consistency for high-dimensional Bayesian DAG models.” *The Annals of Statistics*, 47(1), 319–348. * Castelletti F, Consonni G (2021). “Bayesian causal inference in probit graphical models” *Bayesian Analysis*, In press. * Castelletti F, Mascaro A (2021). “Structural learning and estimation of joint causal effects among network-dependent variables.” *Statistical Methods & Applications*, 30, 1289–1314. * Castelletti F, Mascaro A (2022). “BCDAG: An R package for Bayesian structural and Causal learning of Gaussian DAGs.” *arXiv pre-print*. * Godsill, SJ (2012). "On the relationship between Markov chain Monte Carlo methods for model uncertainty." *Journal of computational and graphical statistics*, 10(2), 230-248. * Peluso S, Consonni G (2020). “Compatible priors for model selection of high-dimensional Gaussian DAGs.” *Electronic Journal of Statistics*, 14(2), 4110–4132. ```{r, include = FALSE} par(oldpar) options(oldoptions) ```
/scratch/gouwar.j/cran-all/cranData/BCDAG/vignettes/bcdag_learnDAG.Rmd
BCDating <- setClass("BCDating", representation(name="character", states="ts", peaks="numeric", troughs="numeric", y="ts", param="list", type="character")) # Functions on BBQ ------------------------- BBQ <- function (y, mincycle = 5, minphase = 2, name = "") { k.peak = 2 k.trough = 2 l.peak = 2 l.trough = 2 e = NULL if (!is.ts(y)) stop("Argument <y> should be an object of class ts") e1 <- BCDating.init(y, k.peak = k.peak, k.trough = k.trough, l.peak = l.peak, l.trough = l.trough) datok <- BCDating.censor(e1, y, mincycle = mincycle, minphase = minphase, e = e) datok@y <- y datok@type <- "BCDating:BBQ" datok@param <- list(k.peak = k.peak, k.trough = k.trough, l.peak = l.peak, l.trough = l.trough, mincycle = mincycle, minphase = minphase, e = e) return(datok) } build.mat_tp <- function (peaks, troughs) { if (any(is.na(peaks), is.na(troughs))) stop("Missing values are not allowed") np <- length(peaks) nt <- length(troughs) peaksn <- matrix(peaks, np, 2) peaksn[, 2] <- 1 troughsn <- matrix(troughs, nt, 2) troughsn[, 2] <- 0 anmat <- rbind(peaksn, troughsn) anmat <- anmat[order(anmat[, 1]), ] return(anmat) } CTS_BBQ <-function (x, i) return(ETS_BBQ(-x, i)) ETS_BBQ <- function (x, i) { if (!((i >= 1) & (i <= length(x)))) stop("wrong parameter i") return(all(x[i] >= x)) } BCDating.alter1 <- function (dat, y) { if (class(dat)[1] != "BCDating") stop("Argument <dat> must be of an object of class 'BCDating'") bcp <- dat@peaks bct <- dat@troughs np <- length(bcp) nt <- length(bct) keep <- rep(TRUE, nt) if (bcp[1] < bct[1]) { # Peaks are first r <- min(np, nt) for (i in 1:r) if (y[bct[i]] > y[bcp[i]]) keep[i] <- FALSE } else { # Troughs are first r <- min(np, nt - 1) for (i in 1:r) if (y[bct[i + 1]] > y[bcp[i]]) keep[i + 1] <- FALSE } if (r > nt) keep <- keep[1:nt] bct <- bct[keep] res <- dat res@peaks <- bcp res@troughs <- bct return(res) } BCDating.alter2 <- function (dat, y) { if (class(dat)[1] != "BCDating") stop("Argument <dat> must be of an object of class 'BCDating'") bcp <- dat@peaks # Business Cycle Peaks bct <- dat@troughs # Business Cycle Throughs if (any(is.na(bcp), is.na(bct), is.na(y))) stop("Missing values are not allowed") np <- length(bcp) # Number of Peaks nt <- length(bct) # Number of Throughs anmat <- build.mat_tp(bcp, bct) j <- 1 repeat { if (j >= nrow(anmat)) break state1 <- anmat[j, 2] state2 <- anmat[j + 1, 2] if (state1 == state2) { if (state1 == 1) { if (y[anmat[j, 1]] > y[anmat[j + 1, 1]]) vire <- j + 1 else vire <- j } if (state1 == 0) { if (y[anmat[j, 1]] > y[anmat[j + 1, 1]]) vire <- j else vire <- j + 1 } anmat <- anmat[-vire, ] } else j <- j + 1 } if (FALSE) { res <- dat res@peaks <- anmat[anmat[, 2] == 1, 1] res@troughs <- anmat[anmat[, 2] == 0, 1] res@states <- BCDating.pt2states(start = start(y), end = end(y), freq = frequency(y), peaks = res@peaks, troughs = res@troughs) } res <- BCDating.peakstroughs(start = start(y), end = end(y), freq = frequency(y), peaks = anmat[anmat[, 2] == 1, 1], troughs = anmat[anmat[, 2] == 0, 1], name = dat@name, type = dat@type, param = dat@param) return(res) } BCDating.censor <- function (dat, y, mincycle = 5, minphase = 2, e = NULL) { if (class(dat)[1] != "BCDating") stop("argument <dat> should be an object of class 'BCDating'") if (class(y)[1] != "ts") stop("argument <y> should be an object of class ts'") if (!(frequency(y) %in% c(4, 12))) stop("the time series (argument <y>) should be quarterly or monthly") if (is.null(e)) { if (frequency(y) == 12) e <- 6 if (frequency(y) == 4) e <- 2 } et1 <- BCDating.alter2(dat, y) et2 <- BCDating.alter1(et1, y) et3 <- BCDating.alter2(et2, y) deb <- et3 repeat { init <- deb et1 <- BCDating.enf1p(init, y, mincycle = mincycle) et2 <- BCDating.enfvbp(et1, y, e = e) et3 <- BCDating.enfvc(et2, y) et4 <- BCDating.enf1p(et3, y, mincycle = mincycle) et5 <- BCDating.enfve(et4, y, minphase = minphase) et6 <- BCDating.enfvc(et5, y) fin <- et6 if (all(deb@states == fin@states)) break deb <- fin } return(fin) } BCDating.enf1p <- function (dat, y, mincycle = 5) { if (class(dat)[1] != "BCDating") stop("argument <dat> must be of an object of class 'BCDating'") bcp <- dat@peaks i <- 2 repeat { if (i >= length(bcp)) break if (bcp[i] - bcp[i - 1] < mincycle) { if (y[bcp[i]] > y[bcp[i - 1]]) vire <- i - 1 else vire <- i bcp <- bcp[-vire] } else i <- i + 1 } dat@peaks <- bcp intermediaire <- BCDating.alter2(dat, y) bct <- intermediaire@troughs i <- 2 repeat { if (i >= length(bct)) break if (bct[i] - bct[i - 1] < mincycle) { if (y[bct[i]] < y[bct[i - 1]]) vire <- i - 1 else vire <- i bct <- bct[-vire] } else i <- i + 1 } dat@troughs <- bct fin <- BCDating.alter2(dat, y) return(fin) } BCDating.enfvbp <- function (dat, y, e = 6) { if (class(dat)[1] != "BCDating") stop("argument <dat> should be an object of class 'BCDating'") if (class(y)[1] != "ts") stop("argument <y> should be an object of class ts'") n <- length(y) proceed <- function(seqq, e) return(seqq[(seqq > e) & (seqq <= n - e)]) res <- dat res@peaks <- proceed(dat@peaks, e) res@troughs <- proceed(dat@troughs, e) return(BCDating.alter2(res, y)) } BCDating.enfvc <- function (dat, y) { if (class(dat)[1] != "BCDating") stop("argument <dat> should be an object of class 'BCDating'") if (class(y)[1] != "ts") stop("argument <y> should be an object of class ts'") bcp <- dat@peaks bct <- dat@troughs n <- length(y) repeat { nothing_done <- TRUE if ((length(bcp) == 0) | (length(bct) == 0)) break m <- min(min(bcp), min(bct)) if (m == min(bcp)) { change_p <- TRUE change_t <- FALSE if (y[1] > y[bcp[1]]) bcp <- bcp[-1] else change_p <- FALSE } else { change_p <- FALSE change_t <- TRUE if (y[1] < y[bct[1]]) bct <- bct[-1] else change_t <- FALSE } nothing_done <- nothing_done & (!change_p) & (!change_t) m <- max(max(bcp), max(bct)) if (m == max(bcp)) { np <- length(bcp) change_p <- TRUE change_t <- FALSE if (y[n] > y[bcp[np]]) bcp <- bcp[-np] else change_p <- FALSE } else { nt <- length(bct) change_p <- FALSE change_t <- TRUE if (y[n] < y[bct[nt]]) bct <- bct[-nt] else change_t <- FALSE } nothing_done <- nothing_done & (!change_p) & (!change_t) if (nothing_done) break } res <- dat res@peaks <- bcp res@troughs <- bct return(BCDating.alter2(res, y)) } BCDating.enfve <- function (dat, y, minphase = 2) { if (class(dat)[1] != "BCDating") stop("argument <dat> must be of an object of class 'BCDating'") j <- 1 repeat { anmat <- build.mat_tp(dat@peaks, dat@troughs) if (j >= nrow(anmat)) break if ((anmat[j + 1, 1] - anmat[j, 1]) < minphase) { anmat <- anmat[-(j + 1), ] dat@peaks <- anmat[anmat[, 2] == 1, 1] dat@troughs <- anmat[anmat[, 2] == 0, 1] dat <- BCDating.alter2(dat, y) } else j <- j + 1 } return(dat) } BCDating.init <- function (y, ETS = ETS_BBQ, CTS = CTS_BBQ, k.peak = 2, k.trough = 2, l.peak = 2, l.trough = 2) { if (!(is.ts(y))) stop("Argument <y> should be an object of class 'ts'") n <- length(y) peaks <- rep(NA, n) troughs <- rep(NA, n) for (i in 1:n) { LB_p <- max(1, i - k.peak) LB_t <- max(1, i - k.trough) z_p <- y[LB_p:min(n, i + l.peak)] z_t <- y[LB_t:min(n, i + l.trough)] if ((i > k.peak) & (i <= n - l.peak)) peaks[i] <- ETS(z_p, i - LB_p + 1) if ((i > k.trough) & (i <= n - l.trough)) troughs[i] <- CTS(z_t, i - LB_t + 1) } return(BCDating.peakstroughs(start = start(y), end = end(y), freq = frequency(y), peaks = which(peaks), troughs = which(troughs))) } BCDating.peakstroughs <- function (start, end, freq = NULL, peaks, troughs, name = "", type = "user-defined", param = NULL) { if (is.null(freq)) { freq <- 0 if (substr(peaks[1], 5, 5) == "M") freq <- 12 if (substr(peaks[1], 5, 5) == "Q") freq <- 4 char2time <- function(chaine) { year <- as.integer(substr(chaine, 1, 4)) per <- as.integer(substr(chaine, 6, nchar(chaine))) return(year + (per - 1)/freq) } temps <- ts(0, start = start, end = end, frequency= freq) temps <- time(temps) peaks <- sapply(char2time(peaks), function(a) which(abs(a - temps) < 0.001)) troughs <- sapply(char2time(troughs), function(a) which(abs(a - temps) < 0.001)) } else { temps <- ts(0, start = start, end = end, frequency = freq) temps <- time(temps) } if (!(freq %in% c(4, 12))) stop("frequency must be 12 (monthly dates) or 4 (quarterly dates)") states <- BCDating.pt2states(start, end, freq, peaks, troughs) if (is.null(param)) param <- vector("list", 0) return((new("BCDating", name = name, states = states, peaks = peaks, troughs = troughs, param = param, type = type))) } BCDating.pt2states <- function (start, end, freq, peaks, troughs) { states <- ts(0, start = start, end = end, frequency = freq) n <- length(states) mat_tp <- build.mat_tp(peaks, troughs) r <- nrow(mat_tp) if (mat_tp[r, 1] < n) mat_tp <- rbind(mat_tp, c(n, 1 - mat_tp[r, 2])) if (peaks[1] < troughs[1]) add <- 0 else add <- 1 states[1:(mat_tp[1, 1])] <- (-1)^add for (j in 1:(nrow(mat_tp) - 1)) { states[(mat_tp[j, 1]+1):(mat_tp[j + 1, 1])] <- (-1)^(j + add) } # states[n] <- states[n-1] return(states) } # Show Method ---------------------------- matsummary <- function (object) { np <- length(object@peaks) nt <- length(object@troughs) r <- max(np, nt) if (r != 0) { res <- matrix(NA, r, 2) if (np == 0) res[1, 2] <- object@troughs[1] if (nt == 0) res[1, 1] <- object@peaks[1] if ((np > 0) & (nt > 0)) { if (object@peaks[1] < object@troughs[1]) { res[1:np, 1] <- object@peaks res[1:nt, 2] <- object@troughs } else { if (np == nt) res <- matrix(NA, r + 1, 2) res[2:(np + 1), 1] <- object@peaks res[1:nt, 2] <- object@troughs } } } colnames(res) <- c("Peaks", "Troughs") return(res) } matsummary2 <- function (object) { dat <- object nr <- length(dat@peaks) + length(dat@troughs) + 1 summarytab <- matrix(NA, nr, 7) change.states <- sort(c(0, dat@peaks, dat@troughs)) summarytab[,1] <- dat@states[change.states + 1] summarytab[,3] <- c(change.states[-1], NA) summarytab[-1,2] <- summarytab[-nr,3] summarytab[,4] <- summarytab[,3] - summarytab[,2] summarytab[,5] <- dat@y[summarytab[,2]] summarytab[,6] <- dat@y[summarytab[,3]] summarytab[,7] <- summarytab[,1] * (summarytab[,6] - summarytab[,5]) return(summarytab) } if (!isGeneric("show")) { setGeneric("show", function(object, ...) standardGeneric("show")) } setMethod("show", signature(object = "BCDating"), function (object) { if (nchar(object@name) > 0) cat("Dating name :", object@name, "\n") res <- matsummary(object) affich <- res affich[, c(1, 2)] <- ts2char(object@states)[res] duration <- res[, 2] - res[, 1] names(duration) <- NULL affich <- cbind(affich, duration) colnames(affich)[3] <- "Duration" res <- data.frame(affich) print(res) } ) # Plot Method --------------------------------------------- vrt <- function(v) #Virtual Time series to prepare the plot { st <- min(time(v),na.rm=TRUE) end <- max(time(v),na.rm=TRUE) dl <- length(v) tsmin <- as.numeric(min(v,na.rm=TRUE)) tsmax <- as.numeric(max(v,na.rm=TRUE)) tsr <- tsmax-tsmin vrt <- ts(data=1:dl/dl*tsr*1.1+tsmin-0.05*tsr,start=st,frequency=frequency(v)) } if (!isGeneric("plot")) { setGeneric("plot", function(x,y, ...) standardGeneric("plot")) } setMethod("plot", signature(x = "BCDating", y = "missing"), function (x, y, dates = FALSE, yearrep=2, col.bg=grey(0.8),col.exp=grey(1),col.rec = grey(0.45), xaxs="i", yaxs="i", main = "", xlab = "", ylab = "", lwd=1, cex = 0.5, vert=NULL, col.vert="darkblue", xmin=NULL, xmax=NULL, ymin=0, ymax=1, debug=FALSE, ...) { if(!is.null(main)) if ((nchar(x@name) > 0) & (nchar(main) == 0)) main <- paste(x@name) smin <- as.numeric(min(time(x@states),na.rm=TRUE)) smax <- as.numeric(max(time(x@states),na.rm=TRUE)) f <- frequency(x@states) if(is.null(xmin)) { xmin <- smin - 1/f xmin <- round(xmin*f)/f } if(is.null(xmax)) { xmax <- smax + 1/f xmax <- round(xmax*f)/f } suppressWarnings( xstates <- window(ts(c(rep(NA,f*(smin-xmin)),x@states,rep(NA,f*(xmax-smax))), start=xmin,frequency=f),xmin,xmax)) xpeaks <- x@peaks + (smin-xmin)*f xtroughs <- x@troughs + (smin-xmin)*f # xn <- length(xstates) plot(c(xmin,xmax),c(ymin,ymax),type="n", xaxs = xaxs, yaxs = yaxs, main = "", xlab = "", ylab = "",...) # back.color <- ts(data=rep(col.bg, xn),start=xmin,frequency=f) #no info # # back.color[which(xstates == -1)] <- col.rec #recession # back.color[which(xstates != -1)] <- col.exp #expansion temps <- time(xstates) dt <- deltat(xstates) # rect(temps - 0.5 * dt, ymin, temps + 0.5 * dt, ymax, col = back.color,border = NA) ## Plotting Recessions if(xpeaks[1]>xtroughs[1]){ a <- (smin-xmin)*f+1 }else{ a <- NULL } if(xpeaks[length(xpeaks)]>xtroughs[length(xtroughs)]){ b <- (smax-xmin)*f+1 }else{ b <- NULL } rbxs <- temps[c(a,xpeaks+1)] rbxe <- temps[c(xtroughs,b)] rect(rbxs-0.5*dt,ymin,rbxe+0.5*dt,ymax,col=col.rec,border=NA) ## Plotting Expansions if(xpeaks[1]<xtroughs[1]){ a <- (smin-xmin)*f+1 }else{ a <- NULL } if(xpeaks[length(xpeaks)]<xtroughs[length(xtroughs)]){ b <- (smax-xmin)*f+1 }else{ b <- NULL } ebxs <- temps[c(a,xtroughs+1)] ebxe <- temps[c(xpeaks,b)] rect(ebxs-0.5*dt,ymin,ebxe+0.5*dt,ymax,col=col.exp,border=NA) ## Plotting Unknowns ubxs <- c(temps[1],smax+1/f) ubxe <- c(smin-1/f,temps[length(temps)]) rect(ubxs-0.5*dt,ymin,ubxe+0.5*dt,ymax,col=col.bg,border=NA) title(main=main,ylab=ylab,xlab=xlab) if(dates) { Dates <- paste(substr(time(xstates),5-yearrep,4) ,cycle(xstates),sep=":") # show(Dates) # ############ JUST FOR DEBUG for(p in xpeaks) { text(x=time(xstates)[p]+.5/f,y=0.8*ymax, labels=Dates[p],pos=4,offset=0,cex=cex) lines(x=rep(time(xstates)[p],2),y=c(0.7,0.9),col="blue") } for(p in xtroughs) { text(x=time(xstates)[p]+.5/f,y=0.2*ymax, labels=Dates[p],pos=4,offset=0,cex=cex) } } if(!is.null(vert)) segments(x0=vert,y0=0,x1=vert,y1=1,col=col.vert,lwd=lwd,lty=2) box(which = "plot") } ) setMethod("plot", signature(x = "BCDating", y = "ts"), function (x, y,main = "", window=FALSE,Dwindow=FALSE,averages=FALSE,dates=FALSE,yearrep=2, col="red",col.bg=grey(.8),col.exp=grey(1),col.rec=grey(.45), cex = 0.5,xlab = "", ylab = "",lwd=1, vert=NULL, col.vert="darkblue", xmin=NULL, xmax=NULL, ymin=0, ymax=1, debug=FALSE, ...) { if(averages & !window) { warning("BCDating: Plotting Averages only in windowed mode") window = TRUE } smin <- as.numeric(min(time(x@states),na.rm=TRUE)) smax <- as.numeric(max(time(x@states),na.rm=TRUE)) f <- frequency(x@states) if(is.null(xmin)) { xmin <- smin - 1/f xmin <- round(xmin*f)/f } if(is.null(xmax)) { xmax <- smax + 1/f xmax <- round(xmax*f)/f } suppressWarnings( xstates <- window(ts(c(rep(NA,f*(smin-xmin)),x@states,rep(NA,f*(xmax-smax))), start=xmin,frequency=f),xmin,xmax)) xpeaks <- x@peaks + (smin-xmin)*f xtroughs <- x@troughs + (smin-xmin)*f if(window & !Dwindow) { ymin <- as.numeric(min(window(y,start=tsp(x@states)[1],end=tsp(x@states)[2]),na.rm=TRUE)) ymax <- as.numeric(max(window(y,start=tsp(x@states)[1],end=tsp(x@states)[2]),na.rm=TRUE)) }else if(!window & !Dwindow) { xmin <- as.numeric(min(time(xstates),time(y),na.rm=TRUE)) xmax <- as.numeric(max(time(xstates),time(y),na.rm=TRUE)) ymin <- as.numeric(min(y,na.rm=TRUE)) ymax <- as.numeric(max(y,na.rm=TRUE)) }else if(window & Dwindow) { xminy <- min(time(y),na.rm=TRUE) xmind <- min(time(xstates),na.rm=TRUE) xmin <- as.numeric(max(xminy,xmind,na.rm=TRUE)) xmaxy <- max(time(y),na.rm=TRUE) xmaxd <- max(time(xstates),na.rm=TRUE) xmax <- as.numeric(min(xmaxy,xmaxd,na.rm=TRUE)) ymin <- as.numeric(min(window(y,start=xmin,end=xmax),na.rm=TRUE)) ymax <- as.numeric(max(window(y,start=xmin,end=xmax),na.rm=TRUE)) }else if(!window & Dwindow) { xmin <- as.numeric(min(time(y),na.rm=TRUE)) xmax <- as.numeric(max(time(y),na.rm=TRUE)) ymin <- as.numeric(min(y,na.rm=TRUE)) ymax <- as.numeric(max(y,na.rm=TRUE)) } suppressWarnings( xstates <- window(ts(c(rep(NA,f*(smin-xmin)),x@states,rep(NA,f*(xmax-smax))), start=xmin,frequency=f),xmin,xmax)) yl <- ymax-ymin ymin <- ymin - yl/40 ymax <- ymax + yl/40 plot(x,dates=dates,yearrep=yearrep, col.bg=col.bg,col.exp=col.exp, col.rec=col.rec, main=main, xlab=xlab, ylab=ylab, lwd=lwd, cex=cex, xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, vert=vert, col.vert=col.vert, debug=debug, ...) y <- cbind(y,0) if(length(averages)==1) averages <- rep(averages,dim(y)[2]-1) if(length(col)==1 & !is.null(dim(y))) col <- rep(col,dim(y)[2]-1) for(v in 1:(dim(y)[2]-1)){ if(averages[v]==FALSE) lines(y[,v],lwd=lwd,col = col[v]) else{ a <- avgts(y[,v],x) l <- length(x@states) pt <- c(1,x@troughs,x@peaks,l) pt <- pt[order(pt)] lpt <- length(pt) spt <- pt[1:lpt-1] ept <- pt[2:lpt] add <- 0.5/frequency(x@states) segments(time(a)[spt]-add,a[spt+1],time(a)[ept]-add,a[ept-1],lwd=lwd,col=col[v]) } } # points(y, pch = pch, col = col[2], cex = cex) if(!is.null(vert)) segments(x0=vert,y0=ymin,x1=vert,y1=ymax,col=col.vert,lwd=lwd,lty=2) box(which = "plot") } ) setMethod("plot", signature(x = "ts", y = "BCDating"), function (x, y, ...) { plot(y,x,...) } ) setMethod("plot", signature(x = "BCDating", y = "BCDating"), function (x, y, ...) { plot(list(x,y),...) } ) setMethod("plot", signature(x = "list", y = "missing"), function (x,pch = 1,cex = 0.8,dates=TRUE,yearrep=2,lines=4, ...) { if (class(x[[1]])[1] != "BCDating") stop("argument <x> should be of an array of objects of class 'BCDating'") xax <- x[[1]]@states rx <- range(time(xax)) rx <- c(rx[1],rx[2]+0.5) ry <- c(0,length(x)) opar <- par(mar=c(lines,4,4,2)+0.1) plot(rx, ry, type = "n", xaxs = "i", yaxt = "n", xlab = "", ylab = "", ...) par(opar) for(i in 1:length(x)) { if (class(x[[i]])[1] != "BCDating") stop("argument <x> should be of an array of objects of class 'BCDating'") back.color <- rep(NA, length(x[[i]]@states)) back.color[x[[i]]@states == -1] <- grey(0.4+0.3*i/length(x)) back.color[x[[i]]@states != -1] <- grey(1) temps <- time(x[[i]]@states) dt <- deltat(x[[i]]@states) rect(temps , i-1, temps + dt, i, col = back.color, border = NA) if(dates) { Dates <- paste(substr(time(x[[i]]@states),5-yearrep,4) ,cycle(x[[i]]@states),sep=":") for(p in x[[i]]@peaks) { text(x=time(x[[i]]@states)[p]-0.1,y=i-0.2, labels=Dates[p],pos=4,cex=cex) } for(p in x[[i]]@troughs) { text(x=time(x[[i]]@states)[p]-0.1,y=i-0.8, labels=Dates[p],pos=4,cex=cex) } } } } ) # Summary Method -------------------------------- if (!isGeneric("summary")) { setGeneric("summary", function(object, ...) standardGeneric("summary")) } setMethod("summary", signature(object = "BCDating"), function (object, print=TRUE, ...) { summarytab <- matsummary2(object) indic <- matrix(NA, 2, 2) colnames(indic) <- c("Amplitude", "Duration") rownames(indic) <- c("Exp=]T;P]", "Rec=]P;T]") indic[1, 1] <- mean(summarytab[summarytab[, 1] == 1, 7],na.rm = TRUE) indic[2, 1] <- mean(summarytab[summarytab[, 1] == -1, 7],na.rm = TRUE) indic[1, 2] <- mean(summarytab[summarytab[, 1] == 1, 4],na.rm = TRUE) indic[2, 2] <- mean(summarytab[summarytab[, 1] == -1, 4],na.rm = TRUE) if (isTRUE(print)) { df.print <- as.data.frame(summarytab) df.print[summarytab[, 1] == 1, 1] <- "Expansion" df.print[summarytab[, 1] == -1, 1] <- "Recession" df.print[, c(2, 3)] <- ts2char(object@states)[summarytab[, c(2, 3)]] df.print[, c(5, 6)] <- round(summarytab[, c(5, 6)], 0) df.print[, 7] <- round(summarytab[, 7], 1) colnames(df.print) <- c("Phase", "]Start", ";End]", "Duration", "LevStart", "LevEnd", "Amplitude") print(df.print) cat("\n") print(round(indic, 1)) } return(invisible(indic)) } ) ts2char <- function (obj) { if (!is.ts(obj)) stop("argument <obj> should be an object of class 'ts'") temps <- time(obj) year <- floor(temps) per <- cycle(obj) letter <- "." if (frequency(obj) == 4) letter <- "Q" if (frequency(obj) == 12) letter <- "M" return(paste(year, letter, per, sep = "")) } avgmat <- function (Dating) { l <- length(Dating@states) points <- c(Dating@troughs,Dating@peaks) points <- points[order(points)] x <- matrix(rep.int(0,l*l),l,l) x[1:points[1],1:points[1]] <- 1/points[1] for(i in 1:(length(points)-1)) x[(points[i]+1):points[i+1],(points[i]+1):points[i+1]] <- 1/(points[i+1]-points[i]) if(points[length(points)]+1 <= l) x[(points[length(points)]+1):l,(points[length(points)]+1):l] <- 1/(l-points[length(points)]) avgmat <- x } avgts <- function(ts,Dating) { ts <- na.omit(ts) wts <- window(ts,start=start(Dating@states),end=end(Dating@states), frequency=frequency(Dating@states)) wdat <- window(Dating,start=start(wts),end=end(wts)) avgts <- ts(avgmat(wdat) %*% wts,start=start(wts), frequency=frequency(wts)) } sid <- function(ts1,ts2) { sd <- start(ts2)-start(ts1) sid <- sd[1]*frequency(ts1)+sd[2] } # window Method ---------------------------------------------------- if (!isGeneric("window")) { setGeneric("window", function(x, ...) standardGeneric("window")) } setMethod("window", signature(x = "BCDating"), function (x, ...) { d <- x d@states <- window(x@states,...) d@peaks <- x@peaks + sid(d@states,x@states) d@troughs <- x@troughs + sid(d@states,x@states) d@peaks <- d@peaks[d@peaks <= length(d@states) & d@peaks >=1] d@troughs <- d@troughs[d@troughs <= length(d@states) & d@troughs >=1] d@y <- window(x@y,...) return(d) } )
/scratch/gouwar.j/cran-all/cranData/BCDating/R/BCDating.R
## Main developers: Karel Van den Meersche, Karline Soetaert ## ##################################################################### ##################################################################### rescaleRows <- function (A, # matrix or dataframe to be row-rescaled: rowSums(A[rescale])=1 columns=1:ncol(A)) # vector containing indices of the columns that should be included in the normalisation { if (is.null(columns)) return(A) if (nrow(A)==1) { R <- sum(A[,columns]) } else { R <- rowSums(A[,columns]) } A[R>0,columns]<-A[R>0,columns]/R[R>0] A } # rescaled dataframe #################################################################################### rdirichlet2 <- function(alpha) # input and output are matrices; each row is a point in a simplex { l <- length(alpha) n <- ncol(alpha) x <- matrix(rgamma(l, alpha),ncol=n) x/rowSums(x) } ## sum of logprobabilities of the rowvectors of x given the rowvectors of alfa; log = TRUE!!! logddirichlet2 <- function(x,alpha) sum((alpha - 1) * log(x)) - sum(lgamma(alpha)) + sum(lgamma(rowSums(alpha))) #################################################################################### lsei1 <- function(A, # search x for which min||Ax-B|| B, # E, # Ex=F F, # Ex=F G, # Gx>H H) # Gx>H { ## require(quadprog,quietly=TRUE) ## dvec <- t(A) %*% B ## Dmat <- t(A) %*% A ## Amat <- t(rbind(E,G)) ## bvec <- c(F,H) ## solve.QP(Dmat ,dvec, Amat , bvec, meq=1)$solution ## require(limSolve,quietly=TRUE) lsei(A,B,E,F,G,H)$X } lsei2 <- function(Rat, # min(X%*%Rat-Dat) Dat, # min(X%*%Rat-Dat) sddat, # weighting G=diag(1,nrow(Rat)), # G*x>= h H=rep(0,nrow(Rat)), # G*x>= h E=matrix(1,1,nrow(Rat)), # E*x=F F=1 # E*x=F ) { X <- matrix(NA,nrow(Dat),nrow(Rat)) for (i in 1:nrow(Dat)) { select <- !is.na(Dat[i,]) A <- t(Rat[,select])/sddat[i,select] B <- Dat[i,select]/sddat[i,select] X[i,] <- lsei1(A,B,E,F,G,H) } dimnames(X) <- list(rownames(Dat),rownames(Rat)) return(X) } # lsei2 ######################################################################################################### panel.cor <- function(x, y, digits=2, prefix="", cex.cor,...) { usr <- par("usr"); on.exit(par(usr)) par(usr = c(0, 1, 0, 1)) r <- abs(cor(x, y)) txt <- format(c(r, 0.123456789), digits=digits)[1] txt <- paste(prefix, txt, sep="") if(missing(cex.cor)) cex <- 0.8/strwidth(txt) text(0.5, 0.5, txt, cex = cex * r) } ########################################################################################### BCE <- function( ## parameters Rat, # initial ratio matrix Dat, # initial data matrix relsdRat = 0, # relative standard deviation on ratio matrix: a number or a matrix abssdRat = 0, # absolute standard deviation on ratio matrix: a number or a matrix minRat = 0, # minimum values of ratio matrix: a number or a matrix maxRat = +Inf, # maximum values of ratio matrix: a number or a matrix relsdDat = 0, # relative standard deviation on data matrix: a number or a matrix abssdDat = 0, # absolute standard deviation on data matrix: a number or a matrix tol = 1e-4, # minimum standard deviation for data matrix tolX = 1e-4, # minimum x values for MCMC initiation positive = 1:ncol(Rat), # which columns contain strictly positive data; other columns are not rescaled, and can become negative) iter = 100, # number of iterations for MCMC outputlength = 1000, # number of iterations kept in the output burninlength = 0, # number of initial iterations to be removed from analysis jmpRat = 0.01, # jump length of ratio matrix (in normal space): a number, vector or matrix jmpX = 0.01, # jump lenth of composition matrix (in a simplex): a number, vector or matrix unif = FALSE, # do we take uniform distro's for ratio matrix? (as in chemtax) verbose = TRUE, # if TRUE, extra information is provided during the run of the function, such as extra warnings, elapsed time and expected time until the end of the MCMC initRat = Rat, # ratio matrix used to start the markov chain: default the initial ratio matrix initX = NULL, # composition matrix used to start the markov chain: default the LSEI solution of Ax=B userProb = NULL, # posterior probability for a given ratio matrix and composition matrix: should be a function with 2 arguments RAT and X, and as returned value a number giving the -log posterior probability of ratio matrix RAT and composition matrix X. Dependence of the probability on the data should be incorporated in the function. confInt = 2/3, # confidence interval in output; because the distributions are not symmetrical, standard deviations are not a useful measure; instead, upper and lower boundaries of the given confidence interval are given. Default is 2/3 (equivalent to standard deviation), but a more or less stringent criterion can be used. export = FALSE, # if true, a list of variables and plots are exported to the specified filename in a folder "out". If a valid path, the list is exported to the location indicated by the path. file = "BCE" # objects are saved to this file. ) # the mcmc function BCE assesses probability distributions of # ratio matrix Rat giving biomarker composition of a # number of taxa # and a composition matrix x giving taxonomical composition of a number of # stations # with Dat = the biomarker composition of the samples. # the probability of outcome x %*% Rat ~= Dat is evaluated with a mcmc. { input.list <- list( Rat = Rat, Dat = Dat, relsdRat = relsdRat, abssdRat = abssdRat, relsdDat = relsdDat, abssdDat = abssdDat, minRat = minRat, maxRat = maxRat, tol = tol, tolX = tolX, positive = positive, userProb = userProb, unif = unif, verbose = verbose, jmpX = jmpX, jmpRat = jmpRat, initRat = initRat, initX = initX, confInt = confInt ) init.list <- init(input.list) with(init.list,{ ##==========================================================## ## initial posterior probability of x, Rat and x%*%Rat = y given data ##==========================================================## rat2 <- rat1 x2 <- x1 logp1 <- logProbabilityBCE(rat1,x1,init.list) ##=======================================## ## initialise mcmc objects ##=======================================## if (burninlength>=iter) stop("The burninlength defines the number of iterations to be removed from the analysis. This burninlength cannot be smaller than iter, the total number of iterations!") ou <- ceiling((iter-burninlength)/outputlength) iter <- iter-(iter-burninlength)%%ou outputlength <- (iter-burninlength)%/%ou ou1 <- burninlength+ou i1 <- 1 mcmc.Rat <- array(dim=c(nalg,npig,outputlength),dimnames=list(algnames,pignames,NULL)) mcmc.X <- array(dim=c(nst,nalg,outputlength),dimnames=list(stnames,algnames,NULL)) mcmc.logp <- vector(length=outputlength) naccepted <- 0 init.time <- proc.time() ##==========## ## mcmc loop ##==========## for (i in 1:iter) { ## new parameters x2 <- rdirichlet2(x1*(alfajmp-nalg)+1) rat2[] <- rnorm(lr,rat1,jmpRat.matrix) ## new posterior probability p2 (-log) logp2 <- logProbabilityBCE(rat2,x2,init.list) r <- exp(logp2-logp1 + logddirichlet2(x1,x2*(alfajmp-nalg)+1) - logddirichlet2(x2,x1*(alfajmp-nalg)+1)) ## METROPOLIS algorithm: select the new point? or stick to the old one? if (r>=runif(1)) { ## update x1, rat1, logp1, naccepted x1 <- x2 rat1 <- rat2 logp1 <- logp2 naccepted <- naccepted+1 } ## update mcmc objects if (i==ou1) { mcmc.Rat[,,i1] <- rat1 mcmc.X[,,i1] <- x1 mcmc.logp[i1] <- logp1 i1 <- i1+1 ou1 <- ou1+ou ## give some process feedback if (i %in% c(100,1000,1:10*10000,iter)) { if (verbose) { present.time <- proc.time()-init.time print(cat("runs:",i,"; elapsed time:",present.time[3],"s ; estimated time left:",present.time[3]*(iter-i)/i,"s ; speed:",i/present.time[3]," runs/s ")) flush.console() } } } } # end mcmc loop if (dim(mcmc.X)[1]==1) {dim(mcmc.X) <- dim(mcmc.X)[2:3] ; rownames(mcmc.X) <- algnames} if (verbose) print(cat("number of accepted runs: ",naccepted," out of ",iter," (",100*naccepted/iter,"%) ",sep="")) mcmc <- list(Rat=mcmc.Rat,X=mcmc.X,logp=mcmc.logp,naccepted=naccepted) class(mcmc) <- c("bce","list") if (export) export(mcmc,file,input.list) return(mcmc) # bce object: a list containing 4 elements: # - mcmc.Rat: array with dimension c(nrow(Rat),ncol(Rat),iter) containing the random walk values of the ratio matrix # - mcmc.X: array with dimension c(nrow(x),ncol(x),iter) containing the random walk values of the composition matrix # - mcmc.logp: vector with length iter containing the random walk values of the posterior probability # - naccepted: integer indicating the number of runs that were accepted })} #################################################### init <- function(input.list) { with(input.list,{ ## warnings and error messages if (ncol(Rat)!=ncol(Dat)) stop("ratio matrix and data matrix must have same number of columns") 2 ##===============================================## ## initialisations ##===============================================## if (is.vector(Dat)) Dat <- t(Dat) # Dat has to be a matrix if (is.data.frame(Dat)) Dat <- as.matrix(Dat) if (is.data.frame(Rat)) Rat <- as.matrix(Rat) if (is.data.frame(initRat)) initRat <- as.matrix(initRat) if (is.data.frame(relsdDat)) relsdDat <- as.matrix(relsdDat) if (is.data.frame(relsdRat)) relsdRat <- as.matrix(relsdRat) if (is.data.frame(abssdDat)) abssdDat <- as.matrix(abssdDat) if (is.data.frame(abssdRat)) abssdRat <- as.matrix(abssdRat) ## useful numbers nalg <- nrow(Rat) ; algnames <- rownames(Rat) # number & names of taxonomic groups nst <- nrow(Dat) ; stnames <- rownames(Dat) # number & names of stations or samples npig <- ncol(Rat) ; pignames <- colnames(Rat) # number & names of biomarkers lx <- nst*nalg lr <- nalg*npig ld <- nst*npig ## standard deviations for Rat and Dat if (length(relsdRat)==1) relsdRat <- rep(relsdRat,ncol(Rat)) if (is.vector(relsdRat)&length(relsdRat)==ncol(Rat)) relsdRat <- t(matrix(relsdRat,nrow=ncol(Rat),ncol=nrow(Rat))) if (length(relsdRat)!=length(Rat)) stop("invalid dimensions of relsdRat") if (length(abssdRat)==1) abssdRat <- rep(abssdRat,ncol(Rat)) if (is.vector(abssdRat)&length(abssdRat)==ncol(Rat)) abssdRat <- t(matrix(abssdRat,nrow=ncol(Rat),ncol=nrow(Rat))) if (length(abssdRat)!=length(Rat)) stop("invalid dimensions of abssdRat") sdrat=as.matrix(relsdRat*Rat+abssdRat) if (length(minRat)==1) minRat <- rep(minRat,ncol(Rat)) if (is.vector(minRat)&length(minRat)==ncol(Rat)) minRat=t(matrix(minRat,nrow=ncol(Rat),ncol=nrow(Rat))) if (length(minRat)!=length(Rat)) stop("invalid dimensions of minRat") if (length(maxRat)==1) maxRat <- rep(maxRat,ncol(Rat)) if (is.vector(maxRat)&length(maxRat)==ncol(Rat)) maxRat=t(matrix(maxRat,nrow=ncol(Rat),ncol=nrow(Rat))) if (length(maxRat)!=length(Rat)) stop("invalid dimensions of maxRat") if (length(relsdDat)==1) relsdDat <- rep(relsdDat,ncol(Dat)) if (is.vector(relsdDat)&length(relsdDat)==ncol(Dat)) relsdDat <- t(matrix(relsdDat,nrow=ncol(Dat),ncol=nrow(Dat))) if (length(relsdDat)!=length(Dat)) stop("invalid dimensions of relsdDat") if (length(abssdDat)==1) abssdDat <- rep(abssdDat,ncol(Dat)) if (is.vector(abssdDat)&length(abssdDat)==ncol(Dat)) abssdDat <- t(matrix(abssdDat,nrow=ncol(Dat),ncol=nrow(Dat))) if (length(abssdDat)!=length(Dat)) stop("invalid dimensions of abssdDat") sddat=as.matrix(relsdDat*Dat+abssdDat) if (any(sddat==0,na.rm=TRUE)) { sddat[sddat==0] <- tol warning("Some elements in the data matrix have standard deviation = 0. They are set to a minimum value (tol)") if (verbose) flush.console() } # for elements of B that have sd=0, we want them to be changed very little when determining the # optimal posterior distribution. Instead of excluding them from analysis, which would be fairly # complex to implement, we give them a standard deviation tol. ## lamda and k for gamma distro Rat and Dat krat <- Rat^2/sdrat^2 lrat <- Rat/sdrat^2 krat[Rat==0&sdrat!=0] <- 1 lrat[Rat==0&sdrat!=0&!is.na(Rat)] <- 1/sdrat[Rat==0&sdrat!=0&!is.na(Rat)] kdat <- Dat^2/sddat^2 ldat <- Dat/sddat^2 kdat[Dat==0] <- 1 ldat[Dat==0&!is.na(Dat)] <- 1/sddat[Dat==0&!is.na(Dat)] select <- sdrat>0; if (unif) select <- Rat>0 wholeranged <- !(1:npig%in%positive) select.pos <- select ; select.pos[,wholeranged] <- FALSE ind.pos <- which(select.pos) select.r <- select ; select.r[,positive] <- FALSE ind.r <- which(select.r) ##==========================================================## ## initialisation x with LSEI ##==========================================================## if (is.null(initRat)) rat1 <- Rat else rat1 <- initRat rat1[is.na(rat1)] <- 0 if (is.null(initX)) { x <- lsei2(rat1,Dat,sddat,H=rep(tolX,nalg)) x1 <- x } else x1 <- x <- initX ##=========================================================## ## initialisation jump lengths ##=========================================================## alfajmp <- 1/(4*jmpX^2)-1 if (length(jmpRat)==1) { jmpRat.matrix <- matrix(jmpRat,nrow=nalg,ncol=npig) } else { if (is.vector(jmpRat)&length(jmpRat)==npig) { jmpRat.matrix <- t(matrix(jmpRat,ncol=nalg,nrow=npig)) } else { if (all(dim(jmpRat)==dim(Rat))) { jmpRat.matrix <- as.matrix(jmpRat) } else { stop("The jump length of the ratio matrix should be either a single value, or specified for each biomarker separately") } } } jmpRat.matrix[sdrat==0] <- 0 # only jump when standard deviation >0 if (any(is.na(jmpRat.matrix))) stop("missing values in jump ratio matrix; please specify a valid jump ratio matrix.") return(list( Rat = Rat, Dat = Dat, sdrat = sdrat, sddat = sddat, minRat = minRat, maxRat = maxRat, tol = tol, tolX = tolX, positive = positive, wholeranged = wholeranged, userProb = userProb, unif = unif, verbose = verbose, krat = krat, lrat = lrat, kdat = kdat, ldat = ldat, algnames = algnames, stnames = stnames, pignames = pignames, nalg = nalg, nst = nst, npig = npig, lx = lx, lr = lr, ld = ld, x = x, ind.r = ind.r, ind.pos = ind.pos, x1 = x1, rat1 = rat1, alfajmp = alfajmp, jmpRat.matrix = jmpRat.matrix, confInt = confInt )) }) } # end initializations ############################################################################################# logProbabilityBCE <- function(RAT, # ratio matrix X, # composition matrix init.list) # list with variables, output of function init(input.list) { with(init.list,{ if (!is.null(userProb)) logp <- log(userProb(RAT,X)) else { y <- X%*%RAT if (!is.null(positive)) { dA.p <- dgamma(RAT[ind.pos],krat[ind.pos],lrat[ind.pos],log=TRUE) kdat <- y^2/sddat^2 ldat <- y/sddat^2 kdat[Dat==0] <- 1 ldat[Dat==0&!is.na(Dat)] <- 1/y[Dat==0&!is.na(Dat)] dB.p <- dgamma(Dat[,positive],kdat[,positive],ldat[,positive],log=TRUE) } else { dA.p <- 0 dB.p <- 0 } if (any(wholeranged)) { dA.r <- dnorm(RAT[ind.r],Rat[ind.r],sdrat[ind.r],log=TRUE) dB.r <- dnorm(y[,wholeranged],Dat[,wholeranged],sddat[,wholeranged],log=TRUE) } else { dA.r <- 0 dB.r <- 0 } dA.p[dA.p < -1e8] <- -1e8 dA.p[dA.p > +1e8] <- +1e8 dA.r[dA.r < -1e8] <- -1e8 dA.r[dA.r > +1e8] <- +1e8 drat <- sum(dA.p,dA.r,na.rm=TRUE) if (unif) drat <- 0 if (any(RAT<minRat|RAT>maxRat,na.rm=TRUE)) drat <- -Inf dB.p[dB.p < -1e8] <- -1e8 dB.p[dB.p > +1e8] <- +1e8 dB.r[dB.r < -1e8] <- -1e8 dB.r[dB.r > +1e8] <- +1e8 ddat <- sum(dB.p,dB.r,na.rm=TRUE) /nst logp <- drat + ddat } return(logp) }) } ########################################################################################## export <- function(x,...) UseMethod("export") export.bce <- function(x, # a bce object, output of the function bce() file="BCE", # the bce object is written to this file input.list=NULL, # a list of the arguments in bce() can be provided and saved as well. ...) # additional arguments { if (!is.null(attributes(x)$A_not_null)) { return("no export function is available for output of the function bce(); see ?BCE for the use of export.bce") } else { BCE <- x save(BCE,input.list,file=file) BCEsummary <- summary(BCE) with(c(BCE,BCEsummary),{ write.csv(firstX,paste(file,"-firstX.csv",sep="")) write.csv(bestRat,paste(file,"-bestRat.csv",sep="")) write.csv(bestX,paste(file,"-bestX.csv",sep="")) write.csv(bestDat,paste(file,"-bestDat.csv",sep="")) write.csv(meanRat,paste(file,"-meanRat.csv",sep="")) write.csv(lbRat,paste(file,"-lbRat.csv",sep="")) write.csv(ubRat,paste(file,"-ubRat.csv",sep="")) write.csv(covRat,paste(file,"-covRat.csv",sep="")) write.csv(meanX,paste(file,"-meanX.csv",sep="")) write.csv(lbX,paste(file,"-lbX.csv",sep="")) write.csv(ubX,paste(file,"-ubX.csv",sep="")) write.csv(covX,paste(file,"-covX.csv",sep="")) png(paste(file,"%03d.png",sep=""),width=1903,height=1345,pointsize=10) par(mfrow=c(4,6)) nalg <- nrow(Rat) npig <- ncol(Rat) nst <- nrow(bestDat) algnames <- rownames(Rat) pignames <- colnames(Rat) stnames <- rownames(bestDat) for (i in 1:nalg) { for (j in (1:npig)[meanRat[i,]!=0]) { plot(Rat[i,j,],type="l",main=paste("trace of",algnames[i],pignames[j]),xlab="",ylab="") hist(Rat[i,j,],100,main=paste("histogram of",algnames[i],pignames[j]),xlab="") } a <- aperm(Rat)[,meanRat[i,]!=0,i] if (!is.null(dim(a))) pairs(a,upper.panel=panel.cor,pch=".") } if (is.matrix(X)) { for(j in 1:nalg) { plot(X[j,],type="l",main=paste("trace of",algnames[j]),xlab="",ylab="") hist(X[j,],100,main=paste("histogram of",algnames[j]),xlab="") } a <- aperm(X[1:nalg,]) if (!is.null(dim(a))) pairs(a,upper.panel=panel.cor,pch=".") } else { for (i in 1:nst) { for(j in 1:nalg) { plot(X[i,j,],type="l",main=paste("trace of",algnames[j],stnames[i]),xlab="",ylab="") hist(X[i,j,],100,main=paste("histogram of",algnames[j],stnames[i]),xlab="") } a <- aperm(X[i,1:nalg,]) if (!is.null(dim(a))) pairs(a,upper.panel=panel.cor,pch=".") } } par(mfrow=c(1,1)) barplot(t(bestX),legend.text=algnames) dev.off() }) } } #end function export.bce() ############################################################################################# plot.bce <- function(x,...) # bce object if (!is.null(attributes(x)$A_not_null)) { NextMethod("modMCMC") } else { with(x,{ nalg <- nrow(Rat) npig <- ncol(Rat) algnames <- rownames(Rat); if (is.null(algnames)) algnames <- paste("taxon",1:nalg) pignames <- colnames(Rat); if (is.null(pignames)) pignames <- paste("biomarker",1:npig) if (is.matrix(X)) {nst <- 1; stnames <- NULL} else { nst <- nrow(X); stnames <- rownames(X)} if (is.null(stnames)) stnames <- paste("sample",1:nst) oldpar <- par(no.readonly=TRUE) par(mfcol=c(nalg,5),mar=c(0,0,0,0),oma=c(0,0,1,0),ask=TRUE) for (j in 1:npig) { for (i in 1:nalg) { R <- Rat[i,j,] Rr <- range(R) if (all(Rr==0)) plot.new() else { ylim <- matrix(c(1,-.2,0,1.2),2)%*%Rr plot(R,type="l",xlab="",ylab="",xaxt="n",yaxt="n",ylim=ylim) text(0,ylim[2],paste(algnames[i],pignames[j]),adj=0) } } } mtext("ratio matrix traces",outer=TRUE) par(mfcol=c(nalg,5),mar=c(0,0,0,0),oma=c(0,0,1,0),ask=TRUE) for (i in 1:nst) { for(j in 1:nalg) { ifelse(is.matrix(X),x <- X[j,],x <- X[i,j,]) xr <- range(x) ylim <- matrix(c(1,-.2,0,1.2),2)%*%xr plot(x,type="l",xlab="",ylab="",xaxt="n",yaxt="n",ylim=ylim) text(0,ylim[2],paste(stnames[i],algnames[j]),adj=0) } } par(oldpar) }) }
/scratch/gouwar.j/cran-all/cranData/BCE/R/BCE-1_5.R
## based on BCE.R, version 1.2 ## new in this release: ## we use normal distributions for A and B, cut off for negative values ## stdev of A and B are absolute values and determined dynamically; a weighting is possible. ## AX=B ipv X*rat=dat (compatibility with tlsce.r) ## Karel Van den Meersche ## 20081025 ################################################################################ ## work in progress ################################################################################ bce1 <- function( ## parameters A, B, Wa=NULL, Wb=NULL, jmpType = "default", jmpA = 0.1, jmpX = 0.1, jmpCovar = NULL, initX = NULL, initA = NULL, priorA = "normal", minA = NULL, maxA = NULL, var0 = NULL, wvar0 = 1e-6, Xratios = TRUE, verbose=TRUE, ... ) { ##=============================## ## warnings and error messages ##=============================## if (NROW(A)!=NROW(B)) stop("A and B must have same number of rows") ##===============================================## ## general initialisations ##===============================================## A0 <- as.matrix(A) B0 <- as.matrix(B) ## useful numbers & names nalg <- ncol(A0) nst <- ncol(B0) npig <- nrow(A0) algnames <- colnames(A0) # number & names of taxonomic groups stnames <- colnames(B0) # number & names of stations or samples pignames <- rownames(A0) # number & names of biomarkers lr <- npig*nalg if (is.null(algnames)) algnames <- 1:nalg if (is.null(stnames)) stnames <- 1:nst if (is.null(pignames)) pignames <- 1:npig lc <- nalg*nst w <- A0>0 lw <- length(which(w)) if (is.null(Wa)) Wa <- matrix(1,npig,nalg) else if (length(Wa)==1) Wa <- matrix(Wa,npig,nalg) else Wa <- as.matrix(Wa) if (is.null(Wb)) Wb <- matrix(1,npig,nst) else if (length(Wb)==1) Wb <- matrix(Wb,npig,nst) else Wb <- as.matrix(Wb) if (is.null(minA)) minA <- matrix(0,npig,nalg) if (is.null(maxA)) maxA <- matrix(Inf,npig,nalg) if (is.null(initX)|is.null(var0)|jmpType=="covar") tlsce0 <- tlsce(A0,B0,Wa,Wb,minA=minA,maxA=maxA,Xratios=Xratios) if (is.null(initX)) X0 <- tlsce0$X else X0 <- initX ## for (i in 1:npig) for (j in 1:nst) ## if (B[i,j]==0 & Wb[i,j]==Inf) X0[A[i,]!=0,j] <- 0 ## if Xratios: X = ZQ+P; EX=F (rowsums of X are 1); P=c(0,..,0,1) ## Q are the estimated model parameters if Xratios if (Xratios) { suppressWarnings(Z <- matrix(c(1,-1,rep(0,nalg-1)),nalg,nalg-1)) P <- c(rep(0,nalg-1),1) Zinv <- matrix(1,nalg-1,nalg); Zinv[upper.tri(Zinv)] <- 0 Q0 <- Zinv%*%X0 } ##==================================================## ## initialisation parameters and residuals function ## ##==================================================## if (is.null(initA)) initA <- A if (Xratios) par <- c(initA[w],Q0) else par <- c(initA[w],X0) lp <- length(par) names(par)[1:lw] <- paste("A", abbreviate(rownames(A)[which(w,arr.ind=TRUE)[,1]]), abbreviate(colnames(A)[which(w,arr.ind=TRUE)[,2]]), sep="_") names(par)[-(1:lw)] <- paste("Q", 1:(nalg-1), rep(abbreviate(colnames(B)),each=nalg-1), sep="_") if (is.null(var0)) var0 <- tlsce0$SS["total"]/lp ## residuals function residuals <- function(par,...) { A1 <- A0; A1[w] <- par[1:lw] Q <- matrix(par[-(1:lw)],ncol=nst) if (Xratios) X <- Z%*%Q+P else X <- Q AX <- A1%*%X if (priorA=="normal") resid <- c(Wa[w]*(A1[w]-A0[w]),Wb*(AX-B0)) else { if (priorA=="uniform") resid <- c(Wb*(AX-B0)) } return(resid) } ## prior information: all elements of A and X are positive; A is limited by minA, maxA lowerpar <- c(minA[w],rep(-Inf,lp-lw)) upperpar <- c(maxA[w],rep(+Inf,lp-lw)) if (Xratios) { prior <- function(par) { Q <- matrix(par[-(1:lw)],ncol=nst) X <- Z%*%Q+P ifelse(any(X<0),-Inf,0) } } else { prior <- function(par) ifelse(any(par[-(1:lw)]<0),-Inf,0) } ##=============================## ## initialisation jump lengths ## ##=============================## if (jmpType=="default") { if (!length(jmpA)%in%c(1,lr)) stop("The jump length of the ratio matrix should be either a single value, or a matrix with the same dimension as A") if (any(is.na(jmpA))) stop("missing values in jump ratio matrix; please specify a valid jump ratio matrix.") if (length(jmpA)==1) jmpA <- rep(jmpA,lw) else jmpA <- jmpA[w] if (length(jmpX)==1) jmpX <- matrix(jmpX,nalg,nst) if (Xratios) { jmpQ <- (jmpX[-1,]+jmpX[-nalg,])*.5 } else { jmpQ <- jmpX } jmp <- c(jmpA,jmpQ) } if (jmpType=="estimate") { if (length(jmpA)==1) jmpA <- jmpA*summary(tlsce0$fit)$cov.scaled*2.4^2/(lp-1) if (length(jmpX)==1) { jmpQ <- matrix(0,0,0) for (i in 1:nst) { BnotNA <- !is.na(B0[,i]) # remove NA from B if (Xratios) { Qlseii <- lsei(A[BnotNA,]%*%Z,B0[BnotNA,i]-A[BnotNA,]%*%P,E=rep(0,nalg-1),F=0,G=Z,H=-P,Wa=Wb[BnotNA,i],fulloutput=TRUE) } else { Qlseii <- lsei(A[BnotNA,],B0[BnotNA,i],E=rep(0,nalg),F=0,G=diag(nalg),H=rep(0,nalg),Wa=Wb[BnotNA,i],fulloutput=TRUE) } jmpQ <- bdiag(jmpQ,Qlseii$covar*2.4^2/(lp-1)) } jmp <- as.matrix(bdiag(jmpA,jmpX*jmpQ)) } } if (jmpType=="covar") jmp <- jmpCovar ##==========## ## mcmc ##==========## mcmc <- modMCMC(f=residuals,p=par,var0=var0,wvar0=wvar0,prior=prior,jump=jmp,lower=lowerpar,upper=upperpar,...) ##============## ## output ## ##============## ## outputlength <- nrow(mcmc$pars) ## mcmc.A <- array(0,dim=c(npig,nalg,outputlength),dimnames=list(pignames,algnames,NULL)) ## mcmc.A[w] <- t(mcmc$pars[,1:lw]) ## check this!! ## mcmc.X <- array(dim=c(nalg,nst,outputlength),dimnames=list(algnames,stnames,NULL)) ## if (Xratios) ## { ## mcmc.Q <- array(dim=c(nalg-1,nst,outputlength),dimnames=list(NULL,stnames,NULL)) ## mcmc.Q[] <- t(mcmc$pars[,-(1:lw)]) ## for (i in 1:outputlength) mcmc.X[,,i] <- Z%*%mcmc.Q[,,i]+P ## } else { ## mcmc.X[] <- t(mcmc$pars[,-(1:lw)]) ## } ## value <- list(A=mcmc.A,X=mcmc.X,mcmc=mcmc) value <- mcmc class(value) <- c("bce","modMCMC") attr(value,"A_not_null") <- w attr(value,"Xratios") <- Xratios attr(value,"pignames") <- pignames attr(value,"algnames") <- algnames attr(value,"stnames") <- stnames return(value) }
/scratch/gouwar.j/cran-all/cranData/BCE/R/bce1.R
pairs.bce <- function(x, # BCE object sample=1, gap=0, upper.panel = NA, diag.panel = NA, ...) if (!is.null(attributes(x)$A_not_null)) { NextMethod("modMCMC") } else { panel.cor <- function(x, y) text(x=mean(range(x)),y=mean(range(y)), labels=format(cor(x,y),digits=2)) panel.hist <- function(x,...) { usr <- par("usr") on.exit(par(usr)) par(usr = c(usr[1:2], 0, 2)) h <- hist(x, plot = FALSE) breaks <- h$breaks nB <- length(breaks) y <- h$counts y <- y/max(y) rect(breaks[-nB], 0, breaks[-1], y, col = "cyan") } if (!is.null(upper.panel) && is.na(upper.panel))upper.panel <- panel.cor if (!is.null(diag.panel) && is.na(diag.panel))diag.panel <- panel.hist ifelse(is.matrix(x$X),X <- t(x$X),X <- t(x$X[sample,,])) # if only one sample, X is a matrix labels <- colnames(X) pairs(X, diag.panel =diag.panel, labels = labels, gap = gap, upper.panel = upper.panel,...) }
/scratch/gouwar.j/cran-all/cranData/BCE/R/pairs.bce.R
summary.bce <- function(object, # a bce-object, output of the function bce1() or BCE() confInt=2/3, # confidence interval of values of composition matrix and ratio matrix ...) # additional arguments affecting the summary produced ## extract best, mean, sd, upper and lower boundaries, and covariance { if (!is.null(attributes(object)$A_not_null)) { covariance <- cov(object$pars) w <- attributes(object)$A_not_null npig <- nrow(w) nalg <- ncol(w) outputlength <- nrow(object$pars) pignames <- attributes(object)$pignames algnames <- attributes(object)$algnames lw <- length(which(w)) lp <- ncol(object$pars) Xratios <- attributes(object)$Xratios if (Xratios) nst <- (lp-lw)/(nalg-1) else nst <- (lp-lw)/nalg stnames <- attributes(object)$stnames mcmc.A <- array(0,dim=c(npig,nalg,outputlength),dimnames=list(pignames,algnames,NULL)) mcmc.A[w] <- t(object$pars[,1:lw]) mcmc.X <- array(dim=c(nalg,nst,outputlength),dimnames=list(algnames,stnames,NULL)) if (Xratios) { suppressWarnings(Z <- matrix(c(1,-1,rep(0,nalg-1)),nalg,nalg-1)) P <- c(rep(0,nalg-1),1) mcmc.Q <- array(dim=c(nalg-1,nst,outputlength),dimnames=list(NULL,stnames,NULL)) mcmc.Q[] <- t(object$pars[,-(1:lw)]) for (i in 1:outputlength) mcmc.X[,,i] <- Z%*%mcmc.Q[,,i]+P } else { mcmc.X[] <- t(object$pars[,-(1:lw)]) } mean.pars <- apply(object$pars,2,mean) best.pars <- object$pars[which.min(object$SS),] sd.pars <- apply(object$pars,2,sd) last.pars <- object$pars[nrow(object$pars),] median.pars <- apply(object$pars,2,quantile,probs=1/2) ub.pars <- apply(object$pars,2,quantile,probs=(1+confInt)/2) lb.pars <- apply(object$pars,2,quantile,probs=(1-confInt)/2) mean.A <- w; mean.A[w] <- mean.pars[1:lw] best.A <- w; best.A[w] <- best.pars[1:lw] sd.A <- w; sd.A[w] <- sd.pars[1:lw] last.A <- w; last.A[w] <- last.pars[1:lw] median.A <- w; median.A[w] <- median.pars[1:lw] ub.A <- w; ub.A[w] <- ub.pars[1:lw] lb.A <- w; lb.A[w] <- lb.pars[1:lw] if (Xratios) { suppressWarnings(Z <- matrix(c(1,-1,rep(0,nalg-1)),nalg,nalg-1)) P <- c(rep(0,nalg-1),1) mean.Q <- matrix(mean.pars[-(1:lw)],nrow=nalg-1) mean.X <- Z%*%mean.Q+P best.Q <- matrix(best.pars[-(1:lw)],nrow=nalg-1) best.X <- Z%*%best.Q+P sd.X <- apply(mcmc.X,1:2,sd) last.Q <- matrix(last.pars[-(1:lw)],nrow=nalg-1) last.X <- Z%*%last.Q+P median.Q <- matrix(median.pars[-(1:lw)],nrow=nalg-1) median.X <- Z%*%median.Q+P ub.Q <- matrix(ub.pars[-(1:lw)],nrow=nalg-1) ub.X <- Z%*%ub.Q+P lb.Q <- matrix(lb.pars[-(1:lw)],nrow=nalg-1) lb.X <- Z%*%lb.Q+P } else { mean.X <- matrix(mean.pars[-(1:lw)],nrow=nalg) best.X <- matrix(mean.pars[-(1:lw)],nrow=nalg) sd.X <- matrix(sd.pars[-(1:lw)],nrow=nalg) last.X <- matrix(last.pars[-(1:lw)],nrow=nalg) median.X <- matrix(median.pars[-(1:lw)],nrow=nalg) ub.X <- matrix(ub.pars[-(1:lw)],nrow=nalg) lb.X <- matrix(lb.pars[-(1:lw)],nrow=nalg) } rownames(mean.A) <- rownames(best.A) <- rownames(sd.A) <- rownames(last.A) <- rownames(median.A) <- rownames(ub.A) <- rownames(lb.A) <- pignames colnames(mean.A) <- colnames(best.A) <- colnames(sd.A) <- colnames(last.A) <- colnames(median.A) <- colnames(ub.A) <- colnames(lb.A) <- rownames(mean.X) <- rownames(best.X) <- rownames(sd.X) <- rownames(last.X) <- rownames(median.X) <- rownames(ub.X) <- rownames(lb.X) <- algnames colnames(mean.X) <- colnames(best.X) <- colnames(sd.X) <- colnames(last.X) <- colnames(median.X) <- colnames(ub.X) <- colnames(lb.X) <- stnames return(list(meanA=mean.A, meanX=mean.X, bestA=best.A, bestX=best.X, sdA=sd.A, sdX=sd.X, lastA=last.A, lastX=last.X, medianA=median.A, medianX=median.X, ubA=ub.A, ubX=ub.X, lbA=lb.A, lbX=lb.X, covar=covariance)) } else { with(object,{ nalg <- dim(Rat)[1] lr <- length(Rat)/length(logp) lx <- length(X)/length(logp) w <- which.max(logp) bestLogp <- logp[w] bestRat <- Rat[,,w] meanrat <- rowMeans(Rat,dims=2) quantile1 <- function(x) quantile(x,probs=c((1-confInt)/2,1/2,(1+confInt)/2)) quantilerat <- apply(Rat,1:2,quantile1) lbrat <- quantilerat[1,,] ubrat <- quantilerat[3,,] sdrat <- apply(Rat,1:2,sd) if (is.matrix(X)) { firstX <- X[,1] bestX <- X[,w] meanX <- rowMeans(X) quantileX <- apply(X,1,quantile1) lbX <- quantileX[1,] ubX <- quantileX[3,] sdX <- apply(X,1,sd) } else{ firstX <- X[,,1] bestX <- X[,,w] meanX <- rowMeans(X,dims=2) quantileX <- apply(X,1:2,quantile1) lbX <- quantileX[1,,] ubX <- quantileX[3,,] sdX <- apply(X,1:2,sd) } bestDat <- bestX%*%bestRat if (all(sdrat==0)) covrat <- 0 else { covratnames <- vector(length=lr) for (i in 1:lr) covratnames[i] <- paste("Rat(",(i-1)%%nalg+1,",",(i-1)%/%nalg+1,")",sep="") covrat <- var(matrix(aperm(Rat,c(3,1,2)),ncol=lr,dimnames=list(NULL,covratnames))[,sdrat>1e-8],na.rm=TRUE) } covXnames <- vector(length=lx) for (i in 1:lx) covXnames[i] <- paste("x(",(i-1)%/%nalg+1,",",(i-1)%%nalg+1,")",sep="") covX <- var(matrix(aperm(X),ncol=lx,dimnames=list(NULL,covXnames)),na.rm=TRUE) ## output return(invisible(list(firstX=firstX, # X determined through least squares regression from the initial ratio matrix and the data matrix bestRat=bestRat, # ratio matrix for which the posterior probability is maximal bestX=bestX, # composition matrix for which the posterior probability is maximal bestLogp=bestLogp, # maximal posterior probability bestDat=bestDat, # product of bestRat and bestX meanRat=meanrat, # means of the elements of the ratio matrix sdRat=sdrat, # standard deviation of the elements of the ratio matrix lbRat=lbrat, # lower boundary of the confidence interval of the elements of the ratio matrix ubRat=ubrat, # upper boundary of the confidence interval of the elements of the ratio matrix covRat=covrat, # covariance matrix of the elements of the ratio matrix meanX=meanX, # means of the elements of the composition matrix sdX=sdX, # standard deviation of the elements of the composition matrix lbX=lbX, # lower boundary of the confidence interval of the elements of the composition matrix ubX=ubX, # upper boundary of the confidence interval of the elements of the composition matrix covX=covX # covariance matrix of the elements of the composition matrix ))) }) } } # end function summary.bce
/scratch/gouwar.j/cran-all/cranData/BCE/R/summary.bce.R
################################################################ ## Total Least Squares Composition Estimator ## use modFit ## this is an orthogonal alternative to chemtax ## Wb added (is called Wa in lsei...) ## is tested with all 4 examples (bce-tests.r) and performs ## as well or better than the previous tlsce. ## the use of modFit also allows for more flexibility in tuning ## the optimization algorithm, and more output details (hessian,...) ################################################################ tlsce <- function(A, B, Wa=NULL, Wb=NULL, minA=NULL, maxA=NULL, A_init=A, Xratios=TRUE, ## chemtax=FALSE, ...) { ##=================## ## initialisations ## ##=================## A <- as.matrix(A) B <- as.matrix(B) l <- nrow(A) # number of pigments m <- ncol(A) # number of species n <- NCOL(B) # number of samples w <- which(A>0) lw <- length(w) A_c <- A[w] # non-zero elements of A if (Xratios#|chemtax ) { E <- t(rep(1,m)); F <- t(rep(1,n))} else { E <- t(rep(0,m)); F <- t(rep(0,n))} # sum of species fractions is 1 or not G <- diag(1,m); H <- matrix(0,m,n) # all elements positive if (is.null(Wa)) Wa <- 1 # weighting of elements of A and B if (length(Wa)==1) Wa <- matrix(Wa,l,m) if (length(Wa)==length(A)) Wa_c <- Wa[w] if (length(Wb)==1) Wb <- matrix(Wb,l,n) A_c_init <- A_init[w] if (is.null(minA)) minA_c <- rep(0,lw) else minA_c <- minA[w] if (is.null(maxA)) maxA_c <- rep(+Inf,lw) else maxA_c <- maxA[w] ## if (chemtax) ## { ## A_rescaled <- rbind(A,1) ## A_rescaled <- A_rescaled/rep(colSums(A_rescaled),each=l+1) ## B_rescaled <- rbind(B,1) ## B_rescaled <- B_rescaled/rep(colSums(B_rescaled),each=l+1) ## if (!is.null(Wb)) Wb_rescaled <- rbind(Wb,1) ## Wa_rescaled <- rbind(Wa,colMeans(Wa)) ## ad hoc solution... ## residuals <- function(A_c_new) ## { ## A_new <- A ## A_new[w] <- A_c_new ## A_new_rescaled <- rbind(A_new,1) ## A_new_rescaled <- A_new_rescaled/rep(colSums(A_new_rescaled),each=l+1) ## X <- LSEI(A_new_rescaled,B_rescaled,E,F,G,H,Wa=Wb)$X ## if (is.null(Wb)) return(c(Wa_rescaled*(A_new_rescaled-A_rescaled),A_new_rescaled%*%X-B_rescaled)) ## return(c(Wa_rescaled*(A_new_rescaled-A_rescaled),Wb_rescaled*(A_new_rescaled%*%X-B_rescaled))) ## } ## } else { residuals <- function(A_c_new) { A_new <- A A_new[w] <- A_c_new X <- LSEI(A_new,B,E,F,G,H,Wa=Wb)$X if (is.null(Wb)) return(c(Wa_c*(A_c-A_c_new),A_new%*%X-B)) return(c(Wa_c*(A_c-A_c_new),Wb*(A_new%*%X-B))) } ## } ##===========## ## model fit ## ##===========## tlsce_fit <- modFit(residuals,A_c,lower=minA_c,upper=maxA_c,...) ##========## ## output ## ##========## A_c_fit <- tlsce_fit$par A_fit <- A; A_fit[w] <- A_c_fit ## if (chemtax) ## { ## A_fit_rescaled <- rbind(A_fit,1) ## A_fit_rescaled <- A_fit_rescaled/rep(colSums(A_fit_rescaled),each=l+1) ## LSEI_fit <- LSEI(A_fit_rescaled,B_rescaled,E,F,G,H,Wa=Wb) ## X <- LSEI_fit$X; rownames(X) <- colnames(A); colnames(X) <- colnames(B) ## B_fit_rescaled <- A_fit_rescaled%*%X ## B_fit <- B_fit_rescaled[-(l+1),]/rep(B_fit_rescaled[l+1,],each=l) ## } ## else ## { LSEI_fit <- LSEI(A_fit,B,E,F,G,H,Wa=Wb) X <- LSEI_fit$X; rownames(X) <- colnames(A); colnames(X) <- colnames(B) B_fit <- A_fit%*%X ## } ssr <- tlsce_fit$ssr ssr_B <- LSEI_fit$solutionNorm ssr_A <- ssr-ssr_B solutionNorms <- c(ssr,ssr_A,ssr_B); names(solutionNorms) <- c("total","A","B") return(list(X=X, A_fit=A_fit, B_fit=B_fit, # the fits SS=solutionNorms, # residual sums of squares fit=tlsce_fit)) # a modFit object } ############################################################## ## helper functions ############################################################## LSEI <- function(A=NULL,B=NULL,E=NULL,F=NULL,G=NULL,H=NULL,Wa=NULL,...) { if (is.vector(B)) return(lsei(A,B,E,F,G,H,Wa=Wa,...)) else { X <- matrix(NA,ncol(A),ncol(B)) solutionNorm <- 0 for (i in 1:ncol(B)) { BnotNA <- !is.na(B[,i]) # remove NA from B ls <- lsei(A[BnotNA,],B[BnotNA,i],E,F[,i],G,H[,i],Wa=Wa[BnotNA,i],...) X[,i] <- ls$X solutionNorm <- solutionNorm + ls$solutionNorm } return(list(X=X,solutionNorm=solutionNorm)) } } # LSEI ## LSEI <- function(A=NULL,B=NULL,E=NULL,F=NULL,G=NULL,H=NULL,Wa=NULL,...) ## { ## if (is.vector(B)) B <- as.matrix(B) ## X <- matrix(NA,ncol(A),ncol(B)) ## solutionNorm <- 0 ## for (j in 1:ncol(B)) ## { ## BnotNA <- !is.na(B[,j]) # remove NA from B (missing data) ## Xpresent <- colSums(subset(A,B[,j]==0))==0 # remove missing groups from A (biomarker not found) ## X[!Xpresent,j] <- 0 ## ls <- lsei(A[BnotNA,Xpresent],B[BnotNA,j],E[,Xpresent],F[,j],G[,Xpresent],H[,j],Wa=Wa[BnotNA,j],...) ## X[Xpresent,j] <- ls$X ## solutionNorm <- solutionNorm + ls$solutionNorm ## } ## return(list(X=X,solutionNorm=solutionNorm)) ## } # LSEI
/scratch/gouwar.j/cran-all/cranData/BCE/R/tlsce.R
## BCEA-deprecated.R #' @title Deprecated functions in package \pkg{BCEA}. #' @description The functions listed below are deprecated and will be defunct in #' the near future. When possible, alternative functions with similar #' functionality are also mentioned. Help pages for deprecated functions are #' available at `help("<function>-deprecated")`. #' @name BCEA-deprecated #' @keywords internal NULL
/scratch/gouwar.j/cran-all/cranData/BCEA/R/BCEA-deprecated.R
#------------------------------------------------------------------------------# # # ___ ___ ___ # _____ / /\ / /\ / /\ # / /::\ / /:/ / /:/_ / /::\ # / /:/\:\ / /:/ / /:/ /\ / /:/\:\ # / /:/~/::\ / /:/ ___ / /:/ /:/_ / /:/~/::\ # /__/:/ /:/\:| /__/:/ / /\ /__/:/ /:/ /\ /__/:/ /:/\:\ # \ \:\/:/~/:/ \ \:\ / /:/ \ \:\/:/ /:/ \ \:\/:/__\/ # \ \::/ /:/ \ \:\ /:/ \ \::/ /:/ \ \::/ # \ \:\/:/ \ \:\/:/ \ \:\/:/ \ \:\ # \ \::/ \ \::/ \ \::/ \ \:\ # \__\/ \__\/ \__\/ \__\/ # # #------------------------------------------------------------------------------# #' @keywords internal "_PACKAGE" ## usethis namespace: start ## usethis namespace: end NULL
/scratch/gouwar.j/cran-all/cranData/BCEA/R/BCEA-package.R
#' @name CEriskav_assign #' @title Cost-effectiveness Analysis Including a Parameter of Risk Aversion #' #' @description Extends the standard cost-effectiveness analysis to modify the utility #' function so that risk aversion of the decision maker is explicitly accounted for. #' #' @aliases CEriskav CEriskav.default #' #' @template args-he #' @param value A vector of values for the risk aversion parameter. If `NULL`, #' default values are assigned by R. The first (smallest) value (`r` -> 0) #' produces the standard analysis with no risk aversion. #' @return An object of the class `CEriskav` containing the following #' elements: #' \item{Ur}{An array containing the simulated values for all the #' ''known-distribution'' utilities for all interventions, all the values of #' the willingness to pay parameter and for all the possible values of #' `r`} #' \item{Urstar}{ An array containing the simulated values for the #' maximum ''known-distribution'' expected utility for all the values of the #' willingness to pay parameter and for all the possible values of `r`} #' \item{IBr}{ An array containing the simulated values for the distribution of #' the Incremental Benefit for all the values of the willingness to pay and for #' all the possible values of `r`} #' \item{eibr}{ An array containing the Expected Incremental Benefit for each #' value of the willingness to pay parameter and for all the possible values of #' `r`} #' \item{vir}{ An array containing all the simulations for the Value of #' Information for each value of the willingness to pay parameter and for all #' the possible values of `r`} #' \item{evir}{ An array containing the Expected Value of Information #' for each value of the willingness to pay parameter and for all the possible #' values of `r`} #' \item{R}{ The number of possible values for the parameter of risk aversion #' `r`} #' \item{r}{ The vector containing all the possible values for the parameter of #' risk aversion `r`} #' #' @author Gianluca Baio #' @seealso [bcea()] #' @importFrom Rdpack reprompt #' #' @references #' \insertRef{Baio2011}{BCEA} #' #' \insertRef{Baio2013}{BCEA} #' #' @examples #' #' # See Baio G., Dawid A.P. (2011) for a detailed description of the #' # Bayesian model and economic problem #' #' # Load the processed results of the MCMC simulation model #' data(Vaccine) #' #' # Runs the health economic evaluation using BCEA #' m <- bcea(e=eff,c=cost, # defines the variables of #' # effectiveness and cost #' ref=2, # selects the 2nd row of (e, c) #' # as containing the reference intervention #' interventions=treats, # defines the labels to be associated #' # with each intervention #' Kmax=50000 # maximum value possible for the willingness #' # to pay threshold; implies that k is chosen #' # in a grid from the interval (0, Kmax) #' ) #' #' # Define the vector of values for the risk aversion parameter, r, eg: #' r <- c(1e-10, 0.005, 0.020, 0.035) #' #' # Run the cost-effectiveness analysis accounting for risk aversion #' \donttest{ #' # uses the results of the economic evaluation #' # if more than 2 interventions, selects the #' # pairwise comparison #' #' CEriskav(m) <- r #' } #' #' @export #' 'CEriskav<-' <- function(he, value) UseMethod("CEriskav<-", he)
/scratch/gouwar.j/cran-all/cranData/BCEA/R/CEriskav.R
#' @rdname CEriskav_assign #' #' @description Default vector of risk aversion parameters: #' 1e-11, 2.5e-6, 5e-6 #' #' @export #' 'CEriskav<-.bcea' <- function(he, value) { value[value == 0] <- 1e-10 ### COMPARISON IS USED TO SELECT THE COMPARISON FOR WHICH THE ANALYSIS IS CARRIED OUT # Reference: Baio G, Dawid AP (2011). if (is.null(value)) { value <- c(1e-11, 2.5e-6, 5e-6) } # expected utilities & EVPI for risk aversion cases K <- length(he$k) R <- length(value) Ur <- array(NA, c(dim(he$U), R)) Urstar <- array(NA, c(dim(he$Ustar), R)) comparator_idx <- c(he$comp, he$ref) for (i in seq_len(K)) { for (l in seq_len(R)) { for (j in comparator_idx) { Ur[, i, j, l] <- (1/value[l])*(1 - exp(-value[l]*he$U[, i, j])) } Urstar[, i, l] <- apply(Ur[, i, , l], 1, max, na.rm = TRUE) } } IBr <- array(NA, c(he$n_sim, K, he$n_comparisons, R)) for (i in seq_len(he$n_comparisons)) { IBr[,, i,] <- Ur[, , he$ref, , drop = FALSE] - Ur[, , he$comp[i], , drop = FALSE] } eibr <- apply(IBr, c(2,3,4), mean) vir <- array(NA, c(he$n_sim, K, R)) for (i in seq_len(K)) { for (l in seq_len(R)) { vir[, i, l] <- Urstar[, i, l] - max(apply(Ur[, i, , l], 2, mean, na.rm = TRUE), na.rm = TRUE) } } evir <- apply(vir, c(2, 3), mean) structure( modifyList( he, list(Ur = Ur, Urstar = Urstar, IBr = IBr, eibr = eibr, vir = vir, evir = evir, R = R, r = value)), class = c("CEriskav", class(he))) } #' @rdname CEriskav_assign #' #' @export #' 'CEriskav<-.default' <- function(he, value) { stop("No available method.", call. = FALSE) }
/scratch/gouwar.j/cran-all/cranData/BCEA/R/CEriskav.default.R
##TODO: how are these different to eib_plot_*, evi_plot_*? ## look at plots in book and examples ## look at original code ## can we just use existing code? #' @name CEriskav_plot_graph #' @title Cost-effectiveness Plot Including a Parameter of Risk Aversion #' #' @description Choice of base R, \pkg{ggplot2}. #' #' @template args-he #' @param pos_legend Legend position NULL #' @rdname CEriskav_plot_graph #' @title CEriskav base R version #' CEriskav_plot_base <- function(he, pos_legend) { default_comp <- 1 pos_legend <- where_legend(he, pos_legend) matplot(x = he$k, y = he$eibr[, default_comp, ], type = "l", col = 1, lty = 1:he$R, xlab = "Willingness to pay", ylab = " ", main = "EIB as a function of the risk aversion parameter", ylim = range(he$eibr)) text <- paste("r = ", he$r, sep = "") # if the first value for r is small enough, # consider close to 0 and print label accordingly if (he$r[1] < 1e-8) { text[1] <- expression(r%->%0) } legend(pos_legend, legend = text, lty = 1:he$R, cex = 0.9, box.lty = 0) abline(h = 0, col = "grey") matplot(x = he$k, y = he$evir, type = "l", col = 1, lty = 1:he$R, ylim = range(he$evir), xlab = "Willingness to pay", ylab = " ", main = "EVI as a function of the risk aversion parameter") legend(pos_legend, legend = text, lty = 1:he$R, cex = 0.9, box.lty = 0) abline(h = 0, col = "grey") } #' @rdname CEriskav_plot_graph #' @title CEriskav ggplot2 version #' CEriskav_plot_ggplot <- function(he, pos_legend) { default_comp <- 1 linetypes <- rep(c(1,2,3,4,5,6), ceiling(he$R/6))[1:he$R] # labels text <- paste0("r = ", he$r) # if the first value for r is small enough, # consider close to 0 and print label accordingly if (he$r[1] < 1e-8) { text[1] <- expression(r%->%0) } legend_params <- make_legend_ggplot(he, pos_legend) eib_dat <- melt(he$eibr[, default_comp, , drop = FALSE], value.name = "eibr") %>% rename(k = "Var1", r = "Var3") %>% mutate(r = as.factor(.data$r)) eibr_plot <- ggplot(eib_dat, aes(x = .data$k, y = .data$eibr, linetype = .data$r)) + geom_line() + geom_hline(yintercept = 0, linetype = 1, colour = "grey50") + scale_linetype_manual("", labels = text, values = linetypes) + theme_bw() + labs(title = "EIB as a function of the risk aversion parameter", x = "Willingness to pay", y = "EIB") + theme( text = element_text(size = 11), legend.key.size = unit(0.66, "line"), legend.spacing = unit(-1.25, "line"), panel.grid = element_blank(), legend.key = element_blank(), legend.position = legend_params$legend.position, legend.justification = legend_params$legend.justification, legend.direction = legend_params$legend_direction, legend.title = element_blank(), legend.background = element_blank(), legend.text.align = 0, plot.title = element_text( lineheight = 1.05, face = "bold", size = 14.3, hjust = 0.5)) evi_dat <- melt(he$evir, value.name = "evir") %>% rename(r = "Var2", k = "Var1") %>% mutate(r = as.factor(.data$r)) evir_plot <- ggplot(evi_dat, aes(x = .data$k, y = .data$evir, linetype = .data$r)) + geom_hline(yintercept = 0, linetype = 1, colour = "grey50")+ geom_line() + scale_linetype_manual("", labels = text, values =linetypes) + theme_bw() + labs(title = "EVI as a function of the risk aversion parameter", x = "Willingness to pay", y = "EVI") + theme( text = element_text(size = 11), legend.key.size = unit(0.66, "line"), legend.spacing = unit(-1.25, "line"), panel.grid = element_blank(), legend.key = element_blank(), legend.position = legend_params$legend.position, legend.justification = legend_params$legend.justification, legend.direction = legend_params$legend.direction, legend.title = element_blank(), legend.background = element_blank(), legend.text.align = 0, plot.title = element_text( lineheight = 1.05, face = "bold", size = 14.3, hjust = 0.5)) plot(eibr_plot) plot(evir_plot) invisible(list(eib = eibr_plot, evi = evir_plot)) }
/scratch/gouwar.j/cran-all/cranData/BCEA/R/CEriskav_plot_graph.R
#' @rdname createInputs #' #' @return \item{mat}{Data.frame containing all the simulations #' for all the monitored parameters} #' \item{parameters}{Character vectors of the names #' of all the monitored parameters} #' #' @author Gianluca Baio, Anna Heath and Mark Strong #' @seealso [bcea()], #' [evppi()] #' @export #' createInputs.default <- function(inputs, print_is_linear_comb = TRUE) { # remove NA columns if (sum(is.na(inputs)) > 0) { inputs <- inputs[ , colSums(is.na(inputs)) == 0] message("Dropped any columns containing NAs") } if (!is.logical(print_is_linear_comb)) stop("print_is_linear_comb must be logical.", call. = FALSE) inputs <- inputs[, !colnames(inputs) %in% c("lp__", "deviance")] # remove redundant parameters # linear combination of columns or constant columns # by M Strong cols_keep <- colnames(inputs) is_const_params <- apply(inputs, 2, "var") == 0 if (any(is_const_params)) cols_keep <- cols_keep[!is_const_params] params <- inputs[, cols_keep, drop = FALSE] ranks <- loo_rank(params) are_multiple_ranks <- length(unique(ranks)) > 1 while (are_multiple_ranks) { linear_combs <- which(ranks == max(ranks)) if (print_is_linear_comb) { print(paste(linear_combs, "\nLinear dependence: removing column", colnames(params)[max(linear_combs)])) } ##TODO: what does cbind do here? params <- cbind(params[, -max(linear_combs), drop = FALSE]) ranks <- loo_rank(params) are_multiple_ranks <- length(unique(ranks)) > 1 } # special case only linear combination remains while (qr(params)$rank == ranks[1]) { if (print_is_linear_comb) { print(paste("Linear dependence... removing column", colnames(params)[1])) } params <- cbind(params[, -1, drop = FALSE]) ranks <- loo_rank(params) } params <- data.frame(params) list(mat = params, parameters = colnames(params)) } #' Create Inputs for EVPI Calculation #' #' Creates an object containing the matrix with the parameters simulated using #' the MCMC procedure (using JAGS, BUGS or Stan) and a vector of parameters #' (strings) that can be used to perform the expected value of partial #' information analysis. In the process, `createInputs` also checks for #' linear dependency among columns of the PSA samples or columns having #' constant values and removes them to only leave the fundamental parameters #' (to run VoI analysis). This also deals with simulations stored in a #' `.csv` or `.txt` file (e.g. as obtained using bootstrapping from a #' non-Bayesian model). #' #' @param inputs A `rjags`, `bugs` or `stanfit` object, containing #' the results of a call to either JAGS, (using `R2jags`), BUGS #' (using `R2WinBUGS`, or Stan (using `rstan`). #' @param print_is_linear_comb Logical indicator. If set to `TRUE` (default) #' then prints the output of the procedure trying to assess whether there are #' some parameters that are a linear combination of others (in which case #' they are removed). #' #' @export #' createInputs <- function(inputs, print_is_linear_comb = TRUE) { UseMethod("createInputs", inputs) } #' @rdname createInputs #' @export #' createInputs.rjags <- function(inputs, print_is_linear_comb = TRUE) { if ("deviance" %in% colnames(inputs)) inputs <- inputs[, colnames(inputs) != "deviance"] inputs <- as.matrix(inputs$BUGSoutput$sims.matrix) NextMethod("createInputs") } #' @rdname createInputs #' @export #' createInputs.bugs <- function(inputs, print_is_linear_comb = TRUE) { if ("deviance" %in% colnames(inputs)) inputs <- inputs[, colnames(inputs) != "deviance"] inputs <- as.matrix(inputs$sims.matrix) NextMethod("createInputs") } #' @rdname createInputs #' @export #' createInputs.stanfit <- function(inputs, print_is_linear_comb = TRUE) { inputs <- as.matrix(inputs[, colnames(inputs) != "lp__"]) NextMethod("createInputs") } #' @rdname createInputs #' @export #' createInputs.data.frame <- function(inputs, print_is_linear_comb = TRUE) { inputs <- as.matrix(inputs) NextMethod("createInputs") } #' @rdname createInputs #' @export #' createInputs.numeric <- function(inputs, print_is_linear_comb = TRUE) { inputs <- as.matrix(inputs) NextMethod("createInputs") } #' Leave-one-out ranking #' @param params Parameters #' @keywords internal #' loo_rank <- function(params) sapply(seq_len(NCOL(params)), function(x) qr(params[, -x])$rank)
/scratch/gouwar.j/cran-all/cranData/BCEA/R/CreateInputs.R
#' GrassmannOptim #' #' This function is taken from the GrassmannOptim package #' by Kofi Placid Adragni and Seongho Wu #' https://cran.r-project.org/web/packages/GrassmannOptim/index.html #' #' @param objfun objfun #' @param W W #' @param sim_anneal sim_anneal #' @param temp_init temp_init #' @param cooling_rate cooling_rate #' @param max_iter_sa max_iter_sa #' @param eps_conv eps_conv #' @param max_iter max_iter #' @param eps_grad eps_grad #' @param eps_f eps_f #' @param verbose verbose #' @importFrom stats rnorm runif #' @importFrom Matrix Matrix expm #' @keywords internal #' @return List #' GrassmannOptim <- function (objfun, W, sim_anneal = FALSE, temp_init = 20, cooling_rate = 2, max_iter_sa = 100, eps_conv = 1e-05, max_iter = 100, eps_grad = 1e-05, eps_f = .Machine$double.eps, verbose = FALSE) { call <- match.call() if ((is.null(W$Qt)) & (is.null(W$dim))) stop("Missing initial values") orthonorm <- function (u) { if (is.null(u)) return(NULL) if (!(is.matrix(u))) u <- as.matrix(u) dd <- dim(u) n <- dd[1] p <-dd[2] if (prod(abs(La.svd(u)$d) > 1e-08) == 0) stop("collinears vectors in orthonorm") if (n < p) { warning("There are too much vectors to orthonormalize in orthonorm.") u <- as.matrix(u[, 1:p]) n <- p } v <- u if (p > 1) { for (i in 2:p) { coef.proj <- c(crossprod(u[, i], v[, 1:(i - 1)]))/diag(crossprod(v[, 1:(i - 1)])) v[, i] <- u[, i] - matrix(v[, 1:(i - 1)], nrow = n) %*% matrix(coef.proj, nrow = i - 1) } } coef.proj <- 1/sqrt(diag(crossprod(v))) return(t(t(v) * coef.proj)) } if (!is.null(W$Qt)) { Qt <- orthonorm(W$Qt) p <- nrow(Qt) d <- W$dim[1] } else { dimx <- W$dim p = dimx[2] d <- dimx[1] tempQ <- matrix(rnorm(p^2), ncol = p) Qt <- Re(eigen(t(tempQ) %*% tempQ)$vectors) } GetA <- function(alpha, p, d) { A <- matrix(0, p, p) for (i in 1:d) { for (j in (d + 1):p) { Eij <- matrix(0, p, p) Eij[i, j] <- 1 Eij[j, i] <- -1 A <- A + alpha[i, j] * Eij } } return(round(A, digits = 4)) } getGradient <- function(objfun, W, fvalue, eps_grad) { alpha <- objfun(W)$gradient if (is.null(alpha)) { Qt <- W$Qt p <- nrow(Qt) d <- W$dim[1] alpha <- matrix(0, nrow = d, ncol = (p - d)) for (i in 1:d) { for (j in (d + 1):p) { Q_tilde <- Qt Q_tilde[, i] <- cos(eps_grad)*Qt[, i]-sin(eps_grad)*Qt[, j] W$Qt <- Q_tilde f_tilde <- round(objfun(W)$value, digits = 5) alpha[i, j - d] <- (f_tilde - fvalue)/eps_grad } } } return(alpha) } max_objfun <- function(All_Qt, W) { nlength <- length(All_Qt) L <- vector(length = nlength) d <- W$dim[1] for (i in 1:nlength) { if (is.na(sum(All_Qt[[i]]))) L[i] <- NA else { W$Qt <- All_Qt[[i]] L[i] <- objfun(W)$value } } L[abs(L) == Inf] = NA if (sum(is.na(L)) == nlength) return(list(status = "allNA")) index <- min(which(L == max(L, na.rm = TRUE))) return(list(Qt = All_Qt[index][[1]], L = round(L[index], digits = 5), index = index, status = "OK")) } if (verbose) cat("Initialization...", "\n") if (sim_anneal) { seq_delta <- runif(1) * exp(seq(-10, 0, by = 2)) %x% c(-1, 1) length_seq <- length(seq_delta) temperature <- temp_init if (verbose) { cat("Simulated Annealing...", "This may take a while.") cat("\nInitial temperature=", temp_init, "\n") cat("Cooling...\n") cat("Current temperature:\n") } while (temperature > 0.1) { for (i in 1:max_iter_sa) { W$Qt <- Qt alpha <- matrix(0, p, p) fvalue <- objfun(W)$value ws <- matrix(rnorm(d * (p - d)), nrow = d, ncol = (p - d)) temp_alpha <- getGradient(objfun, W, fvalue, eps_grad) + sqrt(temperature) * ws alpha[1:d, (d + 1):p] <- temp_alpha matA <- GetA(alpha, p, d) candidates_Qt <- vector("list") Expms <- lapply(seq_delta, function(delta) matrix(attributes(expm(Matrix(-delta * matA)))$x, nrow = nrow(matA), ncol = ncol(matA))) for (j in 1:length_seq) { candidates_Qt[[j]] <- orthonorm(Qt %*% t(Expms[[j]])) if (is.na(det(candidates_Qt[[j]]))) candidates_Qt[[j]] <- NA } gridmax <- max_objfun(candidates_Qt, W) if (gridmax$status != "allNA") { candidate_fvalue <- gridmax$L diff_ratio <- exp((candidate_fvalue - fvalue)/temperature) selector <- as.numeric(runif(1) < min(diff_ratio, 1)) newQt <- gridmax$Qt Qt <- selector * newQt + (1 - selector) * Qt } } temperature <- temperature/cooling_rate if (verbose) cat(temperature, "\n") } } norm_grads <- NULL W$Qt <- Qt fvalue <- objfun(W)$value alpha <- matrix(0, p, p) temp_alpha <- getGradient(objfun, W, fvalue, eps_grad) alpha[1:d, (d + 1):p] <- temp_alpha iter = 1 norm_grad <- sum(diag(t(alpha) %*% alpha)) if (verbose) { cat(sprintf("%s %s %s", "iter", " loglik", " gradient"), "\n") cat(sprintf("%4.0i\t%1.4e\t%1.4e\t", iter, fvalue, norm_grad), "\n") } new_fvalue <- fvalue fvalues <- fvalue norm_grads <- norm_grad if ((norm_grad <= eps_conv)) { converged = TRUE return(invisible( list( Qt = round(Qt, digits = 4), d = d, norm_grads = norm_grads, fvalues = fvalues, converged = converged, call = call) )) } repeat { iter = iter + 1 matA <- GetA(alpha, p, d) candidates_Qt <- vector("list") seq_delta <- runif(1) * exp(seq(-10, 0, by = 2)) %x% c(-1, 1) length_seq <- length(seq_delta) Expms <- lapply(seq_delta, function(delta) matrix(attributes(expm(Matrix(-delta * matA)))$x, nrow = nrow(matA), ncol = ncol(matA))) for (j in 1:length_seq) { candidates_Qt[[j]] <- orthonorm(Qt %*% t(Expms[[j]])) if (is.na(det(candidates_Qt[[j]]))) candidates_Qt[[j]] <- NA } gridmax <- max_objfun(candidates_Qt, W) candidate_Qt <- gridmax$Qt candidate_fvalue <- gridmax$L candidate_W <- W candidate_W$Qt <- candidate_Qt temp_alpha <- getGradient(objfun, candidate_W, candidate_fvalue, eps_grad) alpha[1:d, (d + 1):p] <- temp_alpha norm_grad <- sum(diag(t(alpha) %*% alpha)) if ((norm_grad <= eps_conv)) { if (verbose) { cat(sprintf("%4.0i\t%1.4e\t%1.4e\t", iter, candidate_fvalue, norm_grad), "\n") } converged = TRUE break } if ((candidate_fvalue - fvalue) > eps_f) { if (verbose) { cat(sprintf("%4.0i\t%1.4e\t%1.4e\t", iter, candidate_fvalue, norm_grad), "\n") } fvalue <- candidate_fvalue Qt <- candidate_Qt W$Qt <- Qt fvalues <- c(fvalues, fvalue) norm_grads <- c(norm_grads, norm_grad) } if (iter >= max_iter) { if (verbose) message("Convergence may not have been reached.\nMaximum iterations is reached") converged = FALSE break } } invisible( list( Qt = round(Qt, digits = 4), d = d, norm_grads = norm_grads, fvalues = fvalues, converged = converged, call = call) ) }
/scratch/gouwar.j/cran-all/cranData/BCEA/R/GrassmannOptim.R
#' @importFrom graphics axis #' add_ceplane_setup <- function(plot_params) { do.call("plot", plot_params$setup, quote = TRUE) axis(1) axis(2) } #' @importFrom graphics polygon #' add_ceplane_polygon <- function(plot_params) { do.call("polygon", plot_params$polygon, quote = TRUE) box() } #' @importFrom graphics matplot #' add_ceplane_points <- function(he, plot_params) { do.call("matplot", c(list(x = he$delta_e, y = he$delta_c, add = TRUE), plot_params$points), quote = TRUE) } #' @importFrom graphics text points #' add_ceplane_icer <- function(he, plot_params) { do.call("text", plot_params$icer_text, quote = TRUE) do.call("points", c(list( x = colMeans(he$delta_e), y = colMeans(he$delta_c)), plot_params$icer_points), quote = TRUE) } #' @importFrom graphics text #' add_ceplane_k_txt <- function(plot_params) { k_equals_txt <- paste0("k == ", format( plot_params$wtp, digits = 3, nsmall = 2, scientific = FALSE)) do.call(text, c(list(labels = parse(text = k_equals_txt)), plot_params$k_txt)) } #' @importFrom graphics legend #' add_ceplane_legend <- function(legend_params) { do.call(legend, legend_params) } #' @importFrom graphics abline #' add_axes <- function() { abline(h = 0, v = 0, col = "dark grey") }
/scratch/gouwar.j/cran-all/cranData/BCEA/R/add_ceplane_xxx.R
#' Add Contour Quadrants #' #' @template args-he #' @param params List #' #' @return Plot side effect #' @keywords internal aplot #' add_contour_quadrants <- function(he, params) { if (length(he$comp) > 1) return() pm <- params$quadrants text(x = pm$offset * pm$M.e, y = pm$offset * pm$M.c, adj = pm$adj[[1]], parse(text = pm$t1), cex = pm$cex) text(pm$offset * pm$m.e, pm$offset * pm$M.c, adj = pm$adj[[2]], parse(text = pm$t2), cex = pm$cex) text(pm$offset * pm$m.e, pm$offset * pm$m.c, adj = pm$adj[[3]], parse(text = pm$t3), cex = pm$cex) text(pm$offset * pm$M.e, pm$offset * pm$m.c, adj = pm$adj[[4]], parse(text = pm$t4), cex = pm$cex) } #' Geom Quadrant Text #' #' @template args-he #' @param graph_params Plot parameters; list #' @keywords internal aplot #' geom_quad_txt <- function(he, graph_params) { if (length(he$comp) > 1) return(NULL) geom_text(data = graph_params$quad_txt, aes(x = .data$x, y = .data$y, hjust = .data$hjust, label = .data$label), parse = TRUE, size = rel(3.5), inherit.aes = FALSE) }
/scratch/gouwar.j/cran-all/cranData/BCEA/R/add_contour_quadrants.R
#' Add Contours to Base R Plot #' #' @template args-he #' @param params List #' @keywords internal aplot #' @return plot side effect #' add_contours <- function(he, params) { scale <- params$scale levels <- params$levels nlevels <- params$nlevels pts_col <- params$points$col for (i in seq_along(he$delta_e)) { density <- MASS::kde2d(as.matrix(he$delta_e[, i]), as.matrix(he$delta_c[, i]), n = 300, h = c(sd(as.matrix(he$delta_e[, i]))/scale, sd(as.matrix(he$delta_c[, i]))/scale)) if (!any(is.na(density$z))) { if (is.null(nlevels)) { # normalise the density and use levels in the contour density$z <- (density$z - min(density$z)) / (max(density$z) - min(density$z)) graphics::contour( density$x, density$y, density$z, add = TRUE, levels = levels, col = pts_col[i], lwd = params$contour$size, drawlabels = TRUE) } if (!is.null(nlevels)) { graphics::contour( density$x, density$y, density$z, add = TRUE, col = pts_col[i], lwd = params$contour$size, nlevels = nlevels, drawlabels = FALSE) } } } }
/scratch/gouwar.j/cran-all/cranData/BCEA/R/add_contours.R
#' Check bcea Class #' @template args-he #' @export #' #' @return #' `is.bcea` returns TRUE or FALSE depending on whether its argument #' is a bcea class object. #' is.bcea <- function(he) inherits(he, "bcea") #' bcea Print Method #' #' @param x A `bcea` object containing the results of the Bayesian #' modelling and the economic evaluation. #' @param digits Minimal number of significant digits, see [print.default()]. #' @param give.attr Logical; if TRUE (default), show attributes as sub structures. #' @param no.list Logical; if TRUE, no ‘list of ...’ nor the class are printed. #' @param ... Potential further arguments. #' @keywords print #' @export #' @importFrom utils str #' #' @examples #' data("Vaccine") #' he <- BCEA::bcea(eff, cost) #' print.bcea <- function(x, digits = getOption("digits"), give.attr = FALSE, no.list = TRUE, ...){ out <- str(x, give.attr = give.attr, digits.d = digits, no.list = no.list, ...) invisible(out) }
/scratch/gouwar.j/cran-all/cranData/BCEA/R/base_methods.R
#' Create Bayesian Cost-Effectiveness Analysis Object #' #' Cost-effectiveness analysis based on the results of a simulation model for a #' variable of clinical benefits (e) and of costs (c). Produces results to be #' post-processed to give the health economic analysis. The output is stored in #' an object of the class "bcea". #' #' @param eff An object containing `nsim` simulations for the variable of #' clinical effectiveness for each intervention being considered. In general it #' is a matrix with `nsim` rows and `nint` columns. #' This are partially matched with `e' from previous version of `BCEA` for back-compatibility. #' @param cost An object containing `nsim` simulations for the variable of #' cost for each intervention being considered. In general it is a matrix with #' `nsim` rows and `nint` columns. #' This are partially matched with `c' from previous version of `BCEA` for back-compatibility. #' @param ref Defines which intervention (columns of `eff` or `cost`) is #' considered to be the reference strategy. The default value `ref = 1` #' means that the intervention associated with the first column of `eff` or #' `cost` is the reference and the one(s) associated with the other column(s) #' is(are) the comparators. #' @param interventions Defines the labels to be associated with each #' intervention. By default and if `NULL`, assigns labels in the form #' "Intervention1", ... , "InterventionT". #' @param .comparison Selects the comparator, in case of more than two #' interventions being analysed. Default as NULL plots all the comparisons #' together. Any subset of the possible comparisons can be selected (e.g., #' `comparison=c(1,3)` or `comparison = 2`). #' @param Kmax Maximum value of the willingness to pay to be considered. #' Default value is `k = 50000`. The willingness to pay is then approximated #' on a discrete grid in the interval `[0, Kmax]`. The grid is equal to #' `k` if the parameter is given, or composed of `501` elements if #' `k = NULL` (the default). #' @param k A(n optional) vector for the values of the willingness #' to pay grid. Should be of length > 1 otherwise plots will be empty. #' If not specified then BCEA will construct a grid of 501 values #' from 0 to `Kmax`. This option is useful when performing intensive #' computations (e.g. for the EVPPI). This was changed from `wtp` in previous versions #' for consistency with other functions and so will be deprecated in the future. #' @param plot A logical value indicating whether the function should produce #' the summary plot or not. #' #' @return An object of the class "bcea" containing the following elements #' \item{n_sim}{Number of simulations produced by the Bayesian model} #' \item{n_comparators}{Number of interventions being analysed} #' \item{n_comparisons}{Number of possible pairwise comparisons} #' \item{delta.e}{For each possible comparison, the differential in the #' effectiveness measure} #' \item{delta.c}{For each possible comparison, the differential in the cost measure} #' \item{ICER}{The value of the Incremental Cost-Effectiveness Ratio} #' \item{Kmax}{The maximum value assumed for the willingness to pay threshold} #' \item{k}{The vector of values for the grid approximation of the willingness #' to pay} #' \item{ceac}{The value for the Cost-Effectiveness Acceptability Curve, #' as a function of the willingness to pay} #' \item{ib}{The distribution of the Incremental Benefit, for a given #' willingness to pay} #' \item{eib}{The value for the Expected Incremental Benefit, as a function of #' the willingness to pay} #' \item{kstar}{The grid approximation of the break-even point(s)} #' \item{best}{A vector containing the numeric label of the intervention that #' is the most cost-effective for each value of the willingness to pay in the #' selected grid approximation} #' \item{U}{An array including the value of the expected utility for each #' simulation from the Bayesian model, for each value of the grid approximation #' of the willingness to pay and for each intervention being considered} #' \item{vi}{An array including the value of information for each simulation #' from the Bayesian model and for each value of the grid approximation of the #' willingness to pay} #' \item{Ustar}{An array including the maximum "known-distribution" utility for #' each simulation from the Bayesian model and for each value of the grid #' approximation of the willingness to pay} #' \item{ol}{An array including the opportunity loss for each simulation from #' the Bayesian model and for each value of the grid approximation of the #' willingness to pay} #' \item{evi}{The vector of values for the Expected Value of Information, as #' a function of the willingness to pay} #' \item{interventions}{A vector of labels for all the interventions considered} #' \item{ref}{The numeric index associated with the intervention used as #' reference in the analysis} #' \item{comp}{The numeric index(es) associated with the intervention(s) used #' as comparator(s) in the analysis} #' \item{step}{The step size used to form the grid approximation to the willingness to pay} #' \item{e}{The `eff` matrix used to generate the object (see Arguments)} #' \item{c}{The `cost` matrix used to generate the object (see Arguments)} #' #' @author Gianluca Baio, Andrea Berardi, Nathan Green #' @references #' \insertRef{Baio2013}{BCEA} #' #' \insertRef{Baio2011}{BCEA} #' #' @import dplyr #' @importFrom Rdpack reprompt #' @keywords manip #' #' @examples #' # See Baio (2013), Baio (2011) for a detailed description of the #' # Bayesian model and economic problem #' #' # Load the processed results of the MCMC simulation model #' data(Vaccine) #' #' # Runs the health economic evaluation using BCEA #' m <- bcea( #' e=eff, #' c=cost, # defines the variables of #' # effectiveness and cost #' ref=2, # selects the 2nd row of (e, c) #' # as containing the reference intervention #' interventions=treats, # defines the labels to be associated #' # with each intervention #' Kmax=50000, # maximum value possible for the willingness #' # to pay threshold; implies that k is chosen #' # in a grid from the interval (0, Kmax) #' plot=TRUE # plots the results #' ) #' #' # Creates a summary table #' summary( #' m, # uses the results of the economic evaluation #' # (a "bcea" object) #' wtp=25000 # selects the particular value for k #' ) #' #' \donttest{ #' #' # Plots the cost-effectiveness plane using base graphics #' ceplane.plot( #' m, # plots the Cost-Effectiveness plane #' comparison=1, # if more than 2 interventions, selects the #' # pairwise comparison #' wtp=25000, # selects the relevant willingness to pay #' # (default: 25,000) #' graph="base" # selects base graphics (default) #' ) #' #' # Plots the cost-effectiveness plane using ggplot2 #' if (requireNamespace("ggplot2")) { #' ceplane.plot( #' m, # plots the Cost-Effectiveness plane #' comparison=1, # if more than 2 interventions, selects the #' # pairwise comparison #' wtp=25000, # selects the relevant willingness to pay #' # (default: 25,000) #' graph="ggplot2"# selects ggplot2 as the graphical engine #' ) #' #' # Some more options #' ceplane.plot( #' m, #' graph="ggplot2", #' pos="top", #' size=5, #' ICER_size=1.5, #' label.pos=FALSE, #' opt.theme=ggplot2::theme(text=ggplot2::element_text(size=8)) #' ) #' } #' #' # Plots the contour and scatterplot of the bivariate #' # distribution of (Delta_e,Delta_c) #' contour( #' m, # uses the results of the economic evaluation #' # (a "bcea" object) #' comparison=1, # if more than 2 interventions, selects the #' # pairwise comparison #' nlevels=4, # selects the number of levels to be #' # plotted (default=4) #' levels=NULL, # specifies the actual levels to be plotted #' # (default=NULL, so that R will decide) #' scale=0.5, # scales the bandwidths for both x- and #' # y-axis (default=0.5) #' graph="base" # uses base graphics to produce the plot #' ) #' #' # Plots the contour and scatterplot of the bivariate #' # distribution of (Delta_e,Delta_c) #' contour2( #' m, # uses the results of the economic evaluation #' # (a "bcea" object) #' wtp=25000, # selects the willingness-to-pay threshold #' ) #' #' # Using ggplot2 #' if (requireNamespace("ggplot2")) { #' contour2( #' m, # uses the results of the economic evaluation #' # (a "bcea" object) #' graph="ggplot2",# selects the graphical engine #' wtp=25000, # selects the willingness-to-pay threshold #' label.pos=FALSE # alternative position for the wtp label #' ) #' } #' #' # Plots the Expected Incremental Benefit for the "bcea" object m #' eib.plot(m) #' #' # Plots the distribution of the Incremental Benefit #' ib.plot( #' m, # uses the results of the economic evaluation #' # (a "bcea" object) #' comparison=1, # if more than 2 interventions, selects the #' # pairwise comparison #' wtp=25000, # selects the relevant willingness #' # to pay (default: 25,000) #' graph="base" # uses base graphics #' ) #' #' # Produces a plot of the CEAC against a grid of values for the #' # willingness to pay threshold #' ceac.plot(m) #' #' # Plots the Expected Value of Information for the "bcea" object m #' evi.plot(m) #' } #' #' @export #' bcea <- function(eff, cost, ref = 1, interventions = NULL, .comparison = NULL, Kmax = 50000, k = NULL, plot = FALSE, ...) UseMethod("bcea", eff)
/scratch/gouwar.j/cran-all/cranData/BCEA/R/bcea.R
#' @rdname bcea #' #' @import dplyr #' #' @export #' bcea.default <- function(eff, cost, ref = NULL, interventions = NULL, .comparison = NULL, Kmax = 50000, k = NULL, plot = FALSE, ...) { exArgs <- list(...) # provide named reference if (is.character(ref)) { if (length(ref) > 1 || !ref %in% interventions) { ref <- NULL } else { ref <- which(ref == interventions) } } if (is.null(ref)) { ref <- 1 message("No reference selected. Defaulting to first intervention.") } if (!is.null(k) && length(k) == 1) message("k should be a vector of length > 1, otherwise plots will be empty.") if (exists("wtp", exArgs)) { message("wtp argument soft deprecated. Please use k instead in future.") k <- exArgs$wtp } validate_bcea(eff, cost, ref, interventions) n_sim <- dim(eff)[1] n_intervs <- dim(eff)[2] intervs <- 1:n_intervs interv_names <- if (is.null(interventions)) { paste("intervention", intervs) } else { as.factor(interventions)} if (!is.null(k)) { k <- sort(unique(k)) } else { step <- Kmax/500 k <- seq(0, Kmax, by = step) } df_ce <- data.frame( sim = 1:n_sim, ref = ref, ints = rep(intervs, each = n_sim), eff = matrix(eff, ncol = 1), cost = matrix(cost, ncol = 1)) df_ce <- df_ce %>% select(-ref) %>% rename(ref = "ints") %>% merge(df_ce, by = c("ref", "sim"), suffixes = c("0", "1"), all.x = FALSE) %>% mutate(delta_e = .data$eff0 - .data$eff1, delta_c = .data$cost0 - .data$cost1) df_ce$interv_names <- factor(interv_names[df_ce$ints], levels = interv_names) he <- new_bcea(df_ce, k) he <- setComparisons(he, .comparison) ##TODO: should separate out this really if (plot) plot(he) return(he) } #' @rdname bcea #' @param ... Additional arguments #' @importFrom MCMCvis MCMCchains #' @export bcea.rjags <- function(eff, ...) { cost <- MCMCvis::MCMCchains(eff, params = "cost") eff <- MCMCvis::MCMCchains(eff, params = "eff") bcea.default(eff, cost, ...) } #' @rdname bcea #' @param ... Additional arguments #' @importFrom rstan extract #' @export bcea.rstan <- function(eff, ...) { cost <- rstan::extract(eff, "cost") eff <- rstan::extract(eff, "eff") bcea.default(as.matrix(eff[[1]]), as.matrix(cost[[1]]), ...) } #' @rdname bcea #' @param ... Additional arguments #' @export bcea.bugs <- function(eff, ...) { cost <- eff$sims.list$cost eff <- eff$sims.list$eff bcea.default(eff, cost, ...) }
/scratch/gouwar.j/cran-all/cranData/BCEA/R/bcea.default.R
#' ##TODO: #' #' # replace top-level functions with sub-classes: #' #' ceac.plot <- function(he, graph = "base") { #' he <- structure(he, class = list("ceac", class(he))) #' bcea_plot(he, graph) #' } #' #' ceplane.plot <- function(he, graph = "base") { #' he <- structure(he, class = list("ceplane", class(he))) #' bcea_plot(he, graph) #' } #' #' # then don't have to repeat this structure every time: #' #' bcea_plot <- function(he, graph, ...) { #' params <- make_plot_params(he) #' data <- make_plot_data(he) #' #' if (is_baseplot(graph)) #' plot_base(he, data, params) #' if (is_ggplot(graph)) #' plot_ggplot(he, data, params) #' } #' #' #' #' make_plot_params <- function(he, ...) { #' UseMethod('make_plot_params', he) #' } #' #' make_plot_params.ceac <- function(he) { #' #' } #' #' make_plot_data.ceac <- function(he) { #' #' } #' #' #' #' plot_base <- function(he, ...) { #' UseMethod('plot_base', he) #' } #' #' plot_base.ceac <- function(he) { #' #' } #' #' plot_base.ceplane <- function(he) { #' #' } #' #' #' #' plot_ggplot <- function(he, ...) { #' UseMethod('plot_base', he) #' } #' #' plot_ggplot.ceac <- function(he) { #' #' } #' #' plot_ggplot.ceplane <- function(he) { #' #' } #' #' #' #' #' make_plot_data <- function(he, ...) { #' UseMethod('make_plot_data', he) #' } #' #' #'
/scratch/gouwar.j/cran-all/cranData/BCEA/R/bcea_plot.R
#' @name setReferenceGroup_assign #' @title Set Reference Group #' #' @description Alternative way to define (e,c) reference group. #' #' @template args-he #' @param value Reference group number #' #' @return bcea-type object #' @export #' 'setReferenceGroup<-' <- function(he, value) { UseMethod('setReferenceGroup<-', he) } #' @rdname setReferenceGroup_assign #' @export #' 'setReferenceGroup<-.bcea' <- function(he, value) { bcea(eff = he$e, cost = he$c, ref = value, interventions = he$interventions, Kmax = he$Kmax, plot = FALSE) } #' @rdname setReferenceGroup_assign #' @export #' 'setReferenceGroup<-.default' <- function(he, value) { stop("No method available.") } # ------------------------------------------------------------------------- #' @name setKmax_assign #' @title Set Maximum Willingness to Pay #' #' @description Alternative way to define `K` statistic. #' #' @template args-he #' @param value Maximum willingness to pay #' #' @return bcea-type object #' @export #' 'setKmax<-' <- function(he, value) { UseMethod('setKmax<-', he) } #' @rdname setKmax_assign #' @export #' 'setKmax<-.bcea' <- function(he, value) { bcea(eff = he$e, cost = he$c, ref = he$ref, interventions = he$interventions, Kmax = value, plot = FALSE) } #' @rdname setKmax_assign #' @export #' 'setKmax<-.default' <- function(he, value) { stop("No method available.") }
/scratch/gouwar.j/cran-all/cranData/BCEA/R/bcea_setters.R
#' Optimal intervention #' #' Select the best option for each value of willingness to pay. #' #' @param eib Expected incremental benefit #' @param ref Reference group number #' @param comp Comparison group number(s) #' #' @return Group index #' @export #' best_interv_given_k <- function(eib, ref, comp) { if (length(comp) == 1) { best <- rep(ref, NROW(eib)) best[eib < 0] <- comp ##TODO: why isnt it eib > 0? } else { ##TODO: what cases would this be NULL? if (is.null(dim(eib))) { min_eib <- min(eib) which_eib <- which.min(eib) } else { min_eib <- apply(eib, 1, min) which_eib <- apply(eib, 1, which.min) } best <- ifelse(min_eib > 0, yes = ref, no = comp[which_eib]) } best }
/scratch/gouwar.j/cran-all/cranData/BCEA/R/best_interv_given_k.R
# Package: ldr # Type: Package # Title: Methods for likelihood-based dimension reduction in regression # Version: 1.3.3 # Date: 2014-06-06 # Author: Kofi Placid Adragni, Andrew Raim # Maintainer: Kofi Placid Adragni <[email protected]> # Description: Functions, methods, and data sets for fitting likelihood-based # dimension reduction in regression, # using principal fitted components (pfc), likelihood acquired directions (lad), # covariance reducing models (core). # URL: https://www.jstatsoft.org/v61/i03/ # License: GPL (>= 2) # Packaged: 2021-10-08 16:32:42 UTC; Nathan # Repository: https://github.com/cran/ldr # Date/Publication: 2014-10-29 16:36:14 # # Function to generate a basis function. # # This function is to construct a data-matrix of basis function using the n # response observations. # The response can be continuous or categorical. The function returns a matrix of n # rows and r columns. # The number of columns r depends on the choice of basis function. Polynomial, # piecewise polynomial continuous and discontinuous, # and Fourier bases are implemented. For a polynomial basis, r is the degree of # the polynomial. # bf <- function(y, case=c("poly", "categ", "fourier", "pcont", "pdisc"), degree=1, nslices=1, scale=FALSE) { Indicator<-function(x, H) return(ifelse((x %in% H), 1, 0)) case <- match.arg(case) nobs <- length(y) if (case=="categ") { bins.y<-unique(sort(y)) r<- length(unique(sort(y)))-1 fy<-array(rep(0), c(r, nobs)) for (i in 1:r){ fy[i,]<-sapply(y, function(x) (x==bins.y[i]))} } else if (case=="fourier") { fy<-array(rep(0), c(2*degree, nobs)) for(i in 1:degree) { fy[2*i-1, 1:nobs]<- cos(2*pi*y*i) fy[2*i, 1:nobs]<- sin(2*pi*y*i) } } else if (case=="poly") { if (degree==0) stop("This case is not defined") fy <- array(rep(0), c(degree, nobs)) for (k in 1:degree) fy[k, ] <- y^k } else if (case=="pdisc") { if ((nslices==0) | (nslices==1)){message("The minimum number of slices is 2") nslices <- 2 } r <- (degree + 1) * nslices - 1 fy <- array(rep(0), c(r, nobs)) slicing <- ldr.slices(y,nslices) bins.y <- slicing$bins if (degree==0) # Piecewise constant discontinuous { for(i in 1:r) fy[i,] <- Indicator(y, bins.y[[i]]) } else if (degree==1) # Piecewise linear discontinuous { for(i in seq_len(nslices-1)) { fy[2*i-1, ] <- Indicator(y, bins.y[[i]]) fy[2*i, ] <- Indicator(y, bins.y[[i]])*(y-bins.y[[i]][1]) } fy[2*nslices-1, ] <- Indicator(y, bins.y[[nslices]])*(y-bins.y[[nslices]][1]) } else if (degree==2) # Piecewise quadratic discontinuous { for(i in 1:(nslices-1)) { fy[3*(i-1)+1, ] <- Indicator(y, bins.y[[i]]) fy[3*(i-1)+2, ] <- Indicator(y, bins.y[[i]])*(y-bins.y[[i]][1]) fy[3*(i-1)+3, ] <- Indicator(y, bins.y[[i]])*(y-bins.y[[i]][1])**2 } fy[3*nslices-2, ] <- Indicator(y, bins.y[[nslices]])*(y-bins.y[[nslices]][1]) fy[3*nslices-1, ] <- Indicator(y, bins.y[[nslices]])*(y-bins.y[[nslices]][1])**2 } else if (degree==3)# Piecewise cubic discontinuous { for(i in 1:(nslices-1)) { fy[4*(i-1)+1, ] <- Indicator(y, bins.y[[i]]) fy[4*(i-1)+2, ] <- Indicator(y, bins.y[[i]])*(y-bins.y[[i]][1]) fy[4*(i-1)+3, ] <- Indicator(y, bins.y[[i]])*(y-bins.y[[i]][1])**2 fy[4*(i-1)+4, ] <- Indicator(y, bins.y[[i]])*(y-bins.y[[i]][1])**3 } fy[4*nslices-3, ] <- Indicator(y, bins.y[[nslices]])*(y-bins.y[[nslices]][1]) fy[4*nslices-2, ] <- Indicator(y, bins.y[[nslices]])*(y-bins.y[[nslices]][1])**2 fy[4*nslices-1, ] <- Indicator(y, bins.y[[nslices]])*(y-bins.y[[nslices]][1])**3 } } else if (case=="pcont") { if (nslices == 0 || nslices == 1){ message("The minimum number of slices is 2") nslices <- 2} if (degree==0) stop("Piecewise Constant Continuous is not defined.") r <- nslices*degree+1 fy <- array(rep(0), c(r, nobs)) slicing <- ldr.slices(y, nslices) bins.y <- slicing$bins if (degree==1)# Piecewise linear continuous { fy[1,] <- Indicator(y, bins.y[[1]]) if (r>1) for(i in 1:nslices) fy[i+1,] <- Indicator(y, bins.y[[i]])*(y - bins.y[[i]][1]) } else if (degree==2)# Piecewise quadratic continuous { fy[1,] <- Indicator(y, bins.y[[1]]) for(i in 1:nslices) { fy[2*i,] <- Indicator(y, bins.y[[i]])*(y - bins.y[[i]][1]) fy[2*i+1,] <- Indicator(y, bins.y[[i]])*(y - bins.y[[i]][1])**2 } } else if (degree==3)# Piecewise cubic continuous { fy[1,] <- Indicator(y, bins.y[[1]]) for(i in 1:nslices) { fy[3*i-1,] <- Indicator(y, bins.y[[i]])*(y - bins.y[[i]][1]) fy[3*i,] <- Indicator(y, bins.y[[i]])*(y - bins.y[[i]][1])**2 fy[3*i+1,] <- Indicator(y, bins.y[[i]])*(y - bins.y[[i]][1])**3 } } } return( scale(t(Re(fy)), center=TRUE, scale=scale)) }
/scratch/gouwar.j/cran-all/cranData/BCEA/R/bf.R
#' Cost-effectiveness summary statistics table #' #' As is commonly shown in a journal paper. #' @template args-he #' @param wtp Willingness to pay #' @param ... Additional parameters #' @keywords internal #' @examples #' data(Vaccine) #' #' # Runs the health economic evaluation using BCEA #' m <- bcea( #' e=eff, #' c=cost, # defines the variables of #' # effectiveness and cost #' ref=2, # selects the 2nd row of (e, c) #' # as containing the reference intervention #' interventions=treats, # defines the labels to be associated #' # with each intervention #' Kmax=50000, # maximum value possible for the willingness #' # to pay threshold; implies that k is chosen #' # in a grid from the interval (0, Kmax) #' ) #' ce_table(m) #' @export #' ce_table <- function(he, wtp = 25000, ...) { data.frame( cost = colMeans(he$c)[c(he$ref, he$comp)], eff = colMeans(he$e)[c(he$ref, he$comp)], delta.c = c(NA, colMeans(he$delta_c)), delta.e = c(NA, colMeans(he$delta_e)), ICER = c(NA, he$ICER), INB = c(NA, he$eib[he$k == wtp, ])) } #' Calculate Dataset For ICERs From bcea Object #' #' @template args-he #' @param comp_label Optional vector of strings with comparison labels #' @param ... Additional arguments #' #' @return A data.frame object including mean outcomes, comparison identifier, #' comparison label and associated ICER #' #' @export #' @examples #' data("Smoking") #' he <- BCEA::bcea(eff, cost) #' tabulate_means(he) #' tabulate_means <- function(he, comp_label = NULL, ...) { comp_label <- comp_label %||% seq_len(he$n_comparisons) data.frame( lambda.e = vapply(1:he$n_comparisons, function(x) mean(as.matrix(he$delta_e)[, x]), FUN.VALUE = NA_real_), lambda.c = vapply(1:he$n_comparisons, function(x) mean(as.matrix(he$delta_c)[, x]), FUN.VALUE = NA_real_), comparison = as.factor(1:he$n_comparisons), label = comp_label, ICER = he$ICER) }
/scratch/gouwar.j/cran-all/cranData/BCEA/R/ce_table.R
#' @rdname ceac.plot #' #' @template args-he #' @template args-comparison #' @template args-pos #' @template args-graph #' #' @return \item{ceac}{If `graph = "ggplot2"` a ggplot object, or if `graph = "plotly"` #' a plotly object containing the requested plot. Nothing is returned when `graph = "base"`, #' the default.} The function produces a plot of the #' cost-effectiveness acceptability curve against the discrete grid of possible #' values for the willingness to pay parameter. Values of the CEAC closer to 1 #' indicate that uncertainty in the cost-effectiveness of the reference #' intervention is very low. Similarly, values of the CEAC closer to 0 indicate #' that uncertainty in the cost-effectiveness of the comparator is very low. #' #' @author Gianluca Baio, Andrea Berardi #' @seealso [bcea()], #' [plot.bcea()] #' @references #' #' \insertRef{Baio2011}{BCEA} #' #' \insertRef{Baio2013}{BCEA} #' #' @keywords hplot #' @export #' #' @import ggplot2 #' @importFrom Rdpack reprompt #' #' @examples #' data("Vaccine") #' he <- BCEA::bcea(eff, cost) #' ceac.plot(he) #' #' ceac.plot(he, graph = "base") #' ceac.plot(he, graph = "ggplot2") #' ceac.plot(he, graph = "plotly") #' #' ceac.plot(he, graph = "ggplot2", #' title = "my title", #' line = list(color = "green"), #' theme = ggplot2::theme_dark()) #' #' ## more interventions #' he2 <- BCEA::bcea(cbind(eff, eff - 0.0002), cbind(cost, cost + 5)) #' mypalette <- RColorBrewer::brewer.pal(3, "Accent") #' ceac.plot(he2, graph = "ggplot2", #' title = "my title", #' theme = ggplot2::theme_dark(), #' pos = TRUE, #' line = list(color = mypalette)) # #' ceac.plot(he, graph = "base", title = "my title", line = list(color = "green")) # #' ceac.plot(he2, graph = "base") #' #' ceac.plot(he2, graph = "plotly", pos = "bottom") #' ceac.plot.bcea <- function(he, comparison = NULL, pos = c(1, 0), graph = c("base", "ggplot2", "plotly"), ...) { graph <- match.arg(graph) he <- setComparisons(he, comparison) graph_params <- prepare_ceac_params(he, ...) if (is_baseplot(graph)) { ceac_plot_base(he, pos_legend = pos, graph_params) } else if (is_ggplot(graph)) { ceac_plot_ggplot(he, pos_legend = pos, graph_params, ...) } else if (is_plotly(graph)) { ceac_plot_plotly(he, pos_legend = pos, graph_params) } } #' Cost-Effectiveness Acceptability Curve (CEAC) Plot #' #' Produces a plot of the Cost-Effectiveness Acceptability Curve (CEAC) against #' the willingness to pay threshold. #' #' The CEAC estimates the probability of cost-effectiveness, with respect to a #' given willingness to pay threshold. The CEAC is used mainly to evaluate the #' uncertainty associated with the decision-making process, since it enables the #' quantification of the preference of the compared interventions, defined in terms #' of difference in utilities. #' Formally, the CEAC is defined as: #' #' \deqn{\textrm{CEAC} = P(\textrm{IB}(\theta) > 0)} #' #' If the net benefit function is used as utility function, the definition can be #' re-written as #' #' \deqn{\textrm{CEAC} = P(k \cdot \Delta_e - \Delta_c > 0)} #' #' effectively depending on the willingness to pay value \eqn{k}. #' #' @template args-he #' @param ... If `graph = "ggplot2"` and a named theme object is supplied, #' it will be passed to the ggplot2 object. The usual ggplot2 syntax is used. #' Additional arguments: #' \itemize{ #' \item `line = list(color)`: specifies the line colour(s) - all graph types. #' \item `line = list(type)`: specifies the line type(s) as `lty` numeric values - all graph types. #' \item `line = list(size)`: specifies the line width(s) as numeric values - all graph types. #' \item `currency`: Currency prefix to willingness to pay values - ggplot2 only. #' \item `area_include`: logical, include area under the CEAC curves - plotly only. #' \item `area_color`: specifies the AUC colour - plotly only.} #' @aliases ceac.plot #' @export #' ceac.plot <- function(he, ...) { UseMethod('ceac.plot', he) }
/scratch/gouwar.j/cran-all/cranData/BCEA/R/ceac.plot.R
#' #' #' IB Bootstrapping #' #' #' #' @template args-he #' #' @param k Willingness to pay #' #' @param R Number of samples #' #' #' ib_boot <- function (he, k, R) { #' #' k_idx <- which(he$k == k) #' ci <- vector(mode = "numeric", length = R) #' #' for (i in seq_len(R)) { #' #' idx <- sample(1:he$n_sim, replace = TRUE) #' s <- he$ib[k_idx, , ][idx] > 0 #' ci[i] <- sum(s)/he$n_sim #' } #' ci #' } #' #' #' #' CEAC Bootstrapping #' #' #' #' @template args-he #' #' @param R Number of samples #' #' #' #' @importFrom stats quantile #' #' #' #' @examples #' #' res <- ceac_boot(he, R) #' #' #' #' plot(res[, 1] , type = "l") #' #' lines(res[, 2] , type = "l") #' #' #' ceac_boot <- function(he, R) { #' #' ci <- matrix(, nrow = length(he$k), ncol = 2) #' #' for (i in seq_along(he$k)) { #' #' ib <- ib_boot(he, k = he$k[i], R) #' #' ci[i, ] <- quantile(ib, probs = c(0.025, 0.975)) #' } #' ci #' } #' #'
/scratch/gouwar.j/cran-all/cranData/BCEA/R/ceac_boot.R
#' Cost-Effectiveness Acceptability Curve (CEAC) Plot By Graph Device #' #' Choice of base R, \pkg{ggplot2} or \pkg{plotly}. #' @name ceac_plot_graph #' #' @template args-he #' @param pos_legend Legend position #' @param graph_params Aesthetic ggplot parameters #' @param ... Additional arguments NULL #' @rdname ceac_plot_graph #' @keywords hplot #' ceac_plot_base <- function(he, pos_legend, graph_params, ...) UseMethod("ceac_plot_base", he) #' @rdname ceac_plot_graph #' @keywords hplot #' ceac_plot_base.pairwise <- function(he, pos_legend, graph_params, ...) { ceac_matplot(he, pos_legend, graph_params, "p_best_interv") } #' @rdname ceac_plot_graph #' @keywords hplot #' ceac_plot_base.bcea <- function(he, pos_legend, graph_params, ...) { ceac_matplot(he, pos_legend, graph_params, "ceac") } #' CEAC Matrix Plot #' #' CEAC plot using `matplot` in Base R. #' #' @inheritParams ceac_plot_graph #' @param ceac `ceac` index in `he` #' @keywords internal hplot #' @importFrom graphics matplot legend #' @md ceac_matplot <- function(he, pos_legend, graph_params, ceac) { base_params <- helper_base_params(he, graph_params) legend_params <- ceac_legend_base(he, pos_legend, base_params) do.call("matplot", c(list(x = he$k, y = he[[ceac]]), base_params), quote = TRUE) do.call(legend, legend_params) } #' @rdname ceac_plot_graph #' #' @keywords hplot #' ceac_plot_ggplot <- function(he, pos_legend, graph_params, ...) UseMethod("ceac_plot_ggplot", he) #' @rdname ceac_plot_graph #' @keywords hplot #' ceac_plot_ggplot.pairwise <- function(he, pos_legend, graph_params, ...) { ceac_ggplot(he, pos_legend, graph_params, "p_best_interv", ...) } #' @rdname ceac_plot_graph #' @keywords hplot #' ceac_plot_ggplot.bcea <- function(he, pos_legend, graph_params, ...) { ceac_ggplot(he, pos_legend, graph_params, "ceac", ...) } #' @rdname ceac_plot_graph #' @param ceac ceac index in `he` #' @importFrom scales label_dollar #' @keywords internal hplot #' @md ceac_ggplot <- function(he, pos_legend, graph_params, ceac, ...) { extra_params <- list(...) ceac_dat <- he[[ceac]] n_lines <- ncol(ceac_dat) len_k <- length(he$k) data_psa <- tibble(k = rep(he$k, times = n_lines), ceac = c(ceac_dat), comparison = as.factor(rep(1:n_lines, each = len_k))) graph_params <- helper_ggplot_params(he, graph_params) legend_params <- make_legend_ggplot(he, pos_legend) theme_add <- purrr::keep(extra_params, is.theme) ggplot(data_psa, aes(x = .data$k, y = .data$ceac)) + geom_line(aes(linetype = .data$comparison, linewidth = factor(.data$comparison), colour = factor(.data$comparison))) + theme_ceac() + theme_add + # theme scale_y_continuous(limits = c(0, 1)) + scale_x_continuous( labels = scales::label_dollar(prefix = graph_params$currency)) + do.call(labs, graph_params$annot) + # text do.call(theme, legend_params) + # legend scale_linetype_manual("", # lines labels = graph_params$labels, values = graph_params$line$type) + scale_color_manual("", labels = graph_params$labels, values = graph_params$line$color) + scale_linewidth_manual("", labels = graph_params$labels, values = graph_params$line$size) } #' @rdname ceac_plot_graph #' ceac_plot_plotly <- function(he, pos_legend = "left", graph_params) { comparisons_label <- paste0(he$interventions[he$ref]," vs ",he$interventions[he$comp]) data.psa <- data.frame( k = he$k, ceac = he$ceac, comparison = as.factor(c( sapply(1:he$n_comparisons, function(x) rep(x, length(he$k))) )), label = as.factor(c( sapply(comparisons_label, function(x) rep(x, length(he$k))) ))) graph_params$line$type <- graph_params$line$type %||% rep_len(1:6, he$n_comparisons) # opacities if (!is.null(graph_params$area$color)) graph_params$area$color <- sapply(graph_params$area$color, function(x) ifelse(grepl(pattern = "^rgba\\(", x = x), yes = x, no = plotly::toRGB(x, 0.4))) ceac <- plotly::plot_ly(data.psa, x = ~k) ceac <- plotly::add_trace( ceac, y = ~ ceac, type = "scatter", mode = "lines", fill = ifelse(graph_params$area$include, "tozeroy", "none"), name = ~ label, fillcolor = graph_params$area$color, color = ~ comparison, colors = graph_params$line$color, linetype = ~ comparison, linetypes = graph_params$line$type) legend_params <- make_legend_plotly(pos_legend) ceac <- plotly::layout( ceac, title = graph_params$annot$title, xaxis = list( hoverformat = ".2f", title = graph_params$annot$x), yaxis = list( title = graph_params$annot$y, range = c(0, 1.005)), showlegend = he$n_comparisons > 1, legend = legend_params) plotly::config(ceac, displayModeBar = FALSE) }
/scratch/gouwar.j/cran-all/cranData/BCEA/R/ceac_plot_graph.R
#' @rdname ceaf.plot #' #' @param graph A string used to select the graphical engine to use for #' plotting. Should (partial-) match the two options `"base"` or #' `"ggplot2"`. Default value is `"base"`. #' #' @return \item{ceaf}{A ggplot object containing the plot. Returned only if #' `graph="ggplot2"`.} #' #' @author Gianluca Baio, Andrea Berardi #' @seealso [bcea()], #' [multi.ce()] #' #' @references #' \insertRef{Baio2011}{BCEA} #' #' \insertRef{Baio2013}{BCEA} #' #' @keywords hplot #' #' @import ggplot2 grid #' @importFrom graphics lines #' @importFrom Rdpack reprompt #' #' @examples #' #' # See Baio G., Dawid A.P. (2011) for a detailed description of the #' # Bayesian model and economic problem #' #' # Load the processed results of the MCMC simulation model #' data(Vaccine) #' #' # Runs the health economic evaluation using BCEA #' m <- bcea( #' e=eff, #' c=cost, # defines the variables of #' # effectiveness and cost #' ref=2, # selects the 2nd row of (e, c) #' # as containing the reference intervention #' interventions=treats, # defines the labels to be associated #' # with each intervention #' Kmax=50000, # maximum value possible for the willingness #' # to pay threshold; implies that k is chosen #' # in a grid from the interval (0, Kmax) #' plot=FALSE # inhibits graphical output #' ) #' #' \donttest{ #' mce <- multi.ce(m) # uses the results of the economic analysis #' } #' #' \donttest{ #' ceaf.plot(mce) # plots the CEAF #' } #' #' \donttest{ #' ceaf.plot(mce, graph = "g") # uses ggplot2 #' } #' #' \donttest{ #' # Use the smoking cessation dataset #' data(Smoking) #' m <- bcea(eff, cost, ref = 4, intervention = treats, Kmax = 500, plot = FALSE) #' mce <- multi.ce(m) #' ceaf.plot(mce) #' } #' #' @export #' ceaf.plot.pairwise <- function(mce, graph = c("base", "ggplot2"), ...) { graph <- match.arg(graph) base_graphics <- all(pmatch(graph, c("base", "ggplot2")) != 2) if (!(requireNamespace("ggplot2", quietly = TRUE) && requireNamespace("grid", quietly = TRUE))) { message("Falling back to base graphics\n") base_graphics <- TRUE } if (base_graphics) { plot(NULL, ylim = c(0, 1), xlim = c(0, max(mce$k)), xlab = "Willingness to pay", ylab = "Probability of most cost effectiveness", main = "Cost-effectiveness acceptability frontier") matplot(x = mce$k, mce$p_best_interv, type = "l", lty = 3, add = TRUE) lines(x = mce$k, y = mce$ceaf, type = "l", lty = 1, lwd = 4) } else { df <- data.frame(k = mce$k, ceaf = mce$ceaf) ggplot(df, aes(x = .data$k, y = .data$ceaf)) + theme_bw() + geom_line() + coord_cartesian(ylim = c(-0.05, 1.05)) + labs(title = "Cost-effectiveness acceptability frontier", x = "Willingness to pay", y = "Probability of most cost-effectiveness") + theme(text = element_text(size = 11), legend.key.size = grid::unit(0.66, "lines"), legend.spacing = grid::unit(-1.25, "line"), panel.grid = element_blank(), legend.key = element_blank(), plot.title = element_text( lineheight = 1.05, face = "bold", size = 14.3, hjust = 0.5)) } } #' Cost-Effectiveness Acceptability Frontier (CEAF) plot #' #' Produces a plot the Cost-Effectiveness Acceptability Frontier (CEAF) #' against the willingness to pay threshold. #' #' @param mce The output of the call to the function [multi.ce()] #' @param ... Additional arguments #' #' @export #' ceaf.plot <- function(mce, ...) { UseMethod('ceaf.plot', mce) }
/scratch/gouwar.j/cran-all/cranData/BCEA/R/ceaf.plot.R
#' @rdname ceef.plot #' #' @template args-he #' @param comparators Vector specifying the comparators to be included in the #' frontier analysis. It must have a length > 1. Default as `NULL` includes #' all the available comparators. #' @template args-pos #' @param start.from.origins Logical. Should the frontier start from the #' origins of the axes? The argument is reset to `FALSE` if the average #' effectiveness and/or costs of at least one comparator are negative. #' @param threshold Specifies if the efficiency should be defined based on a #' willingness-to-pay threshold value. If set to `NULL` (the default), no #' conditions are included on the slope increase. If a positive value is passed #' as argument, to be efficient an intervention also requires to have an ICER #' for the comparison versus the last efficient strategy not greater than the #' specified threshold value. A negative value will be ignored with a warning. #' @param flip Logical. Should the axes of the plane be inverted? #' @param dominance Logical. Should the dominance regions be included in the #' plot? #' @param relative Logical. Should the plot display the absolute measures (the #' default as `FALSE`) or the differential outcomes versus the reference #' comparator? #' @param print.summary Logical. Should the efficiency frontier summary be #' printed along with the graph? See Details for additional information. #' @param graph A string used to select the graphical engine to use for #' plotting. Should (partial-)match the two options `"base"` or #' `"ggplot2"`. Default value is `"base"`. #' @param print.plot Logical. Should the efficiency frontier be plotted? #' @param ... If `graph_type="ggplot2"` and a named theme object is supplied, #' it will be added to the ggplot object. Ignored if `graph_type="base"`. #' Setting the optional argument `include.ICER` to `TRUE` will print #' the ICERs in the summary tables, if produced. #' #' @return \item{ceplane}{ A ggplot object containing the plot. Returned only #' if `graph_type="ggplot2"`. } The function produces a plot of the #' cost-effectiveness efficiency frontier. The dots show the simulated values #' for the intervention-specific distributions of the effectiveness and costs. #' The circles indicate the average of each bivariate distribution, with the #' numbers referring to each included intervention. The numbers inside the #' circles are black if the intervention is included in the frontier and grey #' otherwise. If the option `dominance` is set to `TRUE`, the #' dominance regions are plotted, indicating the areas of dominance. #' Interventions in the areas between the dominance region and the frontier are #' in a situation of extended dominance. #' @author Andrea Berardi, Gianluca Baio #' @seealso [bcea()] #' #' @references #' \insertRef{Baio2013}{BCEA} #' #' \insertRef{IQWIG2009}{BCEA} #' #' @importFrom graphics rect abline points legend box #' @importFrom grDevices colours #' @importFrom Rdpack reprompt #' #' @examples #' #' ## create the bcea object m for the smoking cessation example #' data(Smoking) #' m <- bcea(eff, cost, ref = 4, Kmax = 500, interventions = treats) #' #' ## produce plot #' ceef.plot(m, graph = "base") #' #' \donttest{ #' ## tweak the options #' ## flip axis #' ceef.plot(m, #' flip = TRUE, #' dominance = FALSE, #' start.from.origins = FALSE, #' print.summary = FALSE, #' graph = "base") #' #' ## or use ggplot2 instead #' if(require(ggplot2)){ #' ceef.plot(m, #' dominance = TRUE, #' start.from.origins = FALSE, #' pos = TRUE, #' print.summary = FALSE, #' graph = "ggplot2") #' } #' } #' #' @export #' ceef.plot.bcea <- function(he, comparators = NULL, pos = c(1, 1), start.from.origins = TRUE, threshold = NULL, flip = FALSE, dominance = TRUE, relative = FALSE, print.summary = TRUE, graph = c("base", "ggplot2"), print.plot = TRUE, ...) { graph <- match.arg(graph) # extra_args <- list(...) ##TODO: this function uses comparators not comparisons ## thing is he$e,c have all interventions because can modify ## if (!is.null(comparators)) { he$ref <- rank(unique(he$ref, comparators))[1] he$comp <- rank(unique(he$ref, comparators))[-1] he$n_comparators <- length(comparators) he$n_comparisons <- length(comparators) - 1 he$interventions <- he$interventions[comparators] } # if incremental analysis (relative to the reference) required # needs to modify the bcea object if (relative) { temp <- he temp$e <- temp$c <- matrix(NA, he$n_sim, he$n_comparators) temp$e[, he$ref] <- temp$c[, he$ref] <- rep(0, he$n_sim) temp$e[, -he$ref] <- -he$delta.e temp$c[, -he$ref] <- -he$delta.c he <- temp } frontier_data <- prep_frontier_data(he, threshold, start.from.origins) frontier_params <- list(colour = colours()[ floor(seq(262, 340, length.out = he$n_comparators))], # grey scale pos = pos, flip = flip, relative = relative, dominance = dominance) if (print.summary) ceef.summary(he, frontier_data, frontier_params, ...) if (is_baseplot(graph)) { if (print.plot) { ceef_plot_base(he, frontier_data, frontier_params) } } else if (is_ggplot(graph)) { if (print.plot) { ceef_plot_ggplot(he, frontier_data, frontier_params, ...) } } else if (is_plotly(graph)) { ##TODO: # ceef_plot_plotly(he, # frontier_data, # frontier_params, # ...) } } #' Cost-Effectiveness Efficiency Frontier (CEEF) Plot #' #' The line connecting successive points on a cost-effectiveness plane which each #' represent the effect and cost associated with different treatment alternatives. #' The gradient of a line segment represents the ICER of the treatment comparison #' between the two alternatives represented by that segment. #' The cost-effectiveness frontier consists of the set of points corresponding to #' treatment alternatives that are considered to be cost-effective at different values #' of the cost-effectiveness threshold. The steeper the gradient between successive #' points on the frontier, the higher is the ICER between these treatment alternatives #' and the more expensive alternative would be considered cost-effective only when a #' high value of the cost-effectiveness threshold is assumed. #' Points not lying on the cost-effectiveness frontier represent treatment alternatives #' that are not considered cost-effective at any value of the cost-effectiveness threshold. #' #' Back compatibility with BCEA previous versions: #' The `bcea` objects did not include the generating `e` and `c` #' matrices in BCEA versions <2.1-0. This function is not compatible with #' objects created with previous versions. The matrices can be appended to #' `bcea` objects obtained using previous versions, making sure that the #' class of the object remains unaltered. #' #' The argument `print.summary` allows for printing a brief summary of the #' efficiency frontier, with default to `TRUE`. Two tables are plotted, #' one for the interventions included in the frontier and one for the dominated #' interventions. The average costs and clinical benefits are included for each #' intervention. The frontier table includes the slope for the increase in the #' frontier and the non-frontier table displays the dominance type of each #' dominated intervention. Please note that the slopes are defined as the #' increment in the costs for a unit increment in the benefits even if #' `flip = TRUE` for consistency with the ICER definition. The angle of #' increase is in radians and depends on the definition of the axes, i.e. on #' the value given to the `flip` argument. #' #' If the argument `relative` is set to `TRUE`, the graph will not #' display the absolute measures of costs and benefits. Instead the axes will #' represent differential costs and benefits compared to the reference #' intervention (indexed by `ref` in the [bcea()] function). #' #' @template args-he #' @export #' ceef.plot <- function(he, ...) { UseMethod('ceef.plot', he) }
/scratch/gouwar.j/cran-all/cranData/BCEA/R/ceef.plot.R
#' Summary table for CEEF #' #' @template args-he #' @param frontier_data Frontier data #' @param frontier_params Frontier parameters #' @param include.ICER Should we include the ICER? default: FALSE #' @param ... Additional arguments #' #' @return Summary printed to console #' @keywords internal print #' ceef.summary <- function(he, frontier_data, frontier_params, include.ICER = FALSE, ...) { ceef.points <- frontier_data$ceef.points orig.avg <- frontier_data$orig.avg flip <- frontier_params$flip ## tables adaptation and formatting no.ceef <- which(!1:he$n_comparators %in% ceef.points$comp) ## Interventions included if (ceef.points$comp[1] == 0) ceef.points <- ceef.points[-1, ] rownames(ceef.points) <- he$interventions[as.numeric(levels(ceef.points$comp)[ceef.points$comp])] if (!include.ICER) { ceef.points[, 5] <- atan(ceef.points[, 4]^(1*ifelse(!flip, 1, -1))) ceef.points <- ceef.points[, -3] colnames(ceef.points) <- c("Effectiveness", "Costs", "Increase slope", "Increase angle") } else { ICERs <- numeric(dim(ceef.points)[1]) index <- as.numeric(levels(ceef.points$comp)[ceef.points$comp]) for (i in seq_along(ICERs)) { if (ceef.points$comp[i] == he$ref) ICERs[i] <- NA_real_ else ICERs[i] <- he$ICER[index[i] + ifelse(index[i]<he$ref, 0, -1)] } ceef.points[, 3] <- ICERs ceef.points[, 5] <- atan(ceef.points[, 4]^(1*ifelse(!flip, 1, -1))) colnames(ceef.points) <- c("Effectiveness", "Costs", paste0("ICER ", he$interventions[he$ref]," vs."), "Increase slope", "Increase angle") } if (flip) colnames(ceef.points)[1:2] <- colnames(ceef.points[2:1]) ## Interventions not included if (length(no.ceef) > 0) { noceef.points <- data.frame(matrix(NA_real_, ncol = 4, nrow = length(no.ceef))) noceef.points[, 1:2] <- orig.avg[no.ceef, -3] if (!include.ICER) { noceef.points <- noceef.points[, -3] colnames(noceef.points) <- c("Effectiveness", "Costs", "Dominance type") } else { ICERs <- numeric(dim(noceef.points)[1]) for (i in seq_along(ICERs)) { if(no.ceef[i] == he$ref) ICERs[i] <- NA_real_ else ICERs[i] <- he$ICER[no.ceef[i] + ifelse(no.ceef[i] < he$ref, 0, -1)] } noceef.points[, 3] <- ICERs colnames(noceef.points) <- c("Effectiveness", "Costs", paste0("ICER ", he$interventions[he$ref]," vs."), "Dominance type") } how.dominated <- rep("Extended dominance", length(no.ceef)) for (i in seq_along(no.ceef)) for (j in seq_len(dim(ceef.points)[1])) { ## if the product of the deltas is negative it is dominated ## cannot be dominant since not on the frontier if ((noceef.points[i, 1] - ceef.points[j, 1])*(noceef.points[i, 2] - ceef.points[j, 2]) < 0) { how.dominated[i] <- "Absolute dominance" ## alternative: # how.dominated[i] <- paste0("Dominated by ",rownames(ceef.points)[j]) break } } noceef.points[, ifelse(!include.ICER, 3, 4)] <- how.dominated rownames(noceef.points) <- he$interventions[no.ceef] if (flip) colnames(noceef.points)[1:2] <- colnames(noceef.points)[2:1] } ## Print the summary table cat("\nCost-effectiveness efficiency frontier summary \n\n") cat("Interventions on the efficiency frontier:\n") print(ceef.points, quote = FALSE, digits = 5, justify = "center") cat("\n") if (length(no.ceef) > 0) { cat("Interventions not on the efficiency frontier:\n") print(noceef.points, quote = FALSE, digits = 5, justify = "center") } }
/scratch/gouwar.j/cran-all/cranData/BCEA/R/ceef.summary.R
#' @name ceef_plot_graph #' @title Cost-effectiveness Efficiency Frontier Plot By Graph Device #' #' @description Choice of base R, \pkg{ggplot2}. #' #' @template args-he #' @param frontier_data Frontier data #' @param frontier_params Frontier parameters #' @param ... Additional arguments NULL #' @rdname ceef_plot_graph #' @title CEEF plot ggplot2 version #' #' @import ggplot2 #' @importFrom grid unit #' ceef_plot_ggplot <- function(he, frontier_data, frontier_params, ...) { scatter.data <- frontier_data$scatter.data ceef.points <- frontier_data$ceef.points orig.avg <- frontier_data$orig.avg colour <- frontier_params$colour pos <- frontier_params$pos flip <- frontier_params$flip relative <- frontier_params$relative add_dominance_region <- frontier_params$dominance add_frontier <- dim(ceef.points)[1] > 1 extra_args <- list(...) opt_theme <- purrr::keep(extra_args, is.theme) ceplane <- ggplot(ceef.points, aes(x = .data$x, y = .data$y)) if (add_dominance_region) { ceplane <- ceplane + geom_rect(data = ceef.points, aes(xmax = .data$x, ymin = .data$y), ymax = 2*max(abs(range(scatter.data$c))), xmin = -2*max(abs(range(scatter.data$e))), alpha = 0.35, fill = "grey75") } ceplane <- ceplane + geom_hline(yintercept = 0, colour = "grey") + geom_vline(xintercept = 0, colour = "grey") + geom_point(data = scatter.data, aes(x = .data$e, y = .data$c, colour = .data$comp), size = 1) if (add_frontier) ceplane <- ceplane + geom_path() xlab <- ifelse(!relative, "Effectiveness", "Incremental effectiveness") ylab <- ifelse(!relative, "Cost", "Incremental cost") comparators <- sort(c(he$comp, he$ref)) ### add circles ceplane <- ceplane + geom_point( data = orig.avg, aes(x = .data$e.orig, y = .data$c.orig), size = 5.5, colour = "black") + geom_point( data = orig.avg, aes(x = .data$e.orig, y = .data$c.orig), size = 4.5, colour = "white") + scale_colour_manual( "", labels = paste(comparators, ":", he$interventions[comparators]), values = colour, na.value = "black") + labs(title = "Cost-effectiveness efficiency frontier", x = xlab, y = ylab) + theme_bw() ## add text into circles for (i in seq_len(he$n_comparators)) { ceplane <- ceplane + geom_text( data = orig.avg[i, ], aes(x = .data$e.orig, y = .data$c.orig, label = .data$comp), size = 3.5, colour = ifelse(i %in% ceef.points$comp, "black", "grey60")) } legend_params <- make_legend_ggplot(he, pos) ceplane <- ceplane + theme( legend.position = legend_params$legend.position, legend.justification = legend_params$legend.justification, legend.title = element_blank(), legend.background = element_blank(), text = element_text(size = 11), legend.key.size = grid::unit(0.66, "lines"), legend.spacing = grid::unit(-1.25, "line"), panel.grid = element_blank(), legend.key = element_blank(), legend.text.align = 0, plot.title = element_text( hjust = 0.5, face = "bold", lineheight = 1.05, size = 14.3)) + opt_theme if (flip) ceplane <- ceplane + coord_flip() ceplane } #' @rdname ceef_plot_graph #' @title CEEF plot base R version #' ceef_plot_base <- function(he, frontier_data, frontier_params) { scatter.data <- frontier_data$scatter.data ceef.points <- frontier_data$ceef.points orig.avg <- frontier_data$orig.avg colour <- frontier_params$colour pos <- frontier_params$pos flip <- frontier_params$flip relative <- frontier_params$relative dominance <- frontier_params$dominance pos <- where_legend_always(he, pos) if (flip) { temp <- scatter.data$e scatter.data$e <- scatter.data$c scatter.data$c <- temp temp <- ceef.points$x ceef.points$x <- ceef.points$y ceef.points$y <- temp temp <- orig.avg$e.orig orig.avg$e.orig <- orig.avg$c.orig orig.avg$c.orig <- temp rm(temp) } # set up plot window xlab <- ifelse((!flip & !relative), "Effectiveness", ifelse((!flip & relative), "Incremental effectiveness", ifelse((flip & !relative), "Cost", "Incremental cost"))) ylab <- ifelse((!flip & !relative), "Cost", ifelse((!flip & relative), "Incremental cost", ifelse((flip & !relative), "Effectiveness", "Incremental effectiveness"))) plot(NULL, xlim = c(min(range(scatter.data$e)[1],0), max(range(scatter.data$e)[2],0)), ylim = c(min(range(scatter.data$c)[1],0), max(range(scatter.data$c)[2],0)), main = "Cost-effectiveness efficiency frontier", xlab = xlab, ylab = ylab) if (dominance) { # add dominance regions for (i in seq_len(dim(ceef.points)[1])) { rect( col = "grey95", border = NA, xleft = ifelse(!flip, -1, 1) * 2 * max(abs(range(scatter.data$e))), xright = ceef.points$x[i], ybottom = ceef.points$y[i], ytop = ifelse(!flip, 1, -1) * 2 * max(abs(range(scatter.data$c))) ) } if (dim(ceef.points)[1] > 1) for (i in 2:dim(ceef.points)[1]) { rect( col = "grey85", border = NA, xleft = ifelse(!flip, -1, 1) * 2 * max(abs(range(scatter.data$e))), xright = ceef.points$x[ifelse(!flip, i - 1, i)], ybottom = ceef.points$y[ifelse(!flip, i, i - 1)], ytop = ifelse(!flip, 1, -1) * 2 * max(abs(range(scatter.data$c))) ) } } abline(h = 0, v = 0, col = "grey") ##TODO: # plot the scatter # matplot()? # will need to add sim number column to cast # do this in prep_frontier_data() comparators <- unique(scatter.data$comp) for (i in seq_len(he$n_comparators)) { sub_scatter <- dplyr::filter(scatter.data, .data$comp == comparators[i]) points(sub_scatter[, c("e", "c")], type = "p", pch = 20, cex = 0.35, col = colour[i]) } ##TODO: why are these two separate arrays? # add frontier points(ceef.points[, c("x", "y")], type = "l", lwd = 2) # add circles points(orig.avg[, c("e.orig", "c.orig")], pch = 21, cex = 2, bg = "white", col = "black") ### legend # add text; grey if not on the frontier for (i in seq_len(he$n_comparators)) { text(orig.avg[i, c("e.orig", "c.orig")], labels = orig.avg[i, 3], col = ifelse(i %in% ceef.points$comp, "black", "grey60"), cex = 0.75) } comparators <- sort(c(he$comp, he$ref)) text <- paste(comparators, ":", he$interventions[comparators]) legend(pos, text, col = colour, cex = 0.7, bty = "n", lty = 1) # because dominance areas overwrite outer box box() }
/scratch/gouwar.j/cran-all/cranData/BCEA/R/ceef_plot_graph.R
#' @rdname ceplane.plot #' #' @template args-he #' @param comparison Selects the comparator, in case of more than two #' interventions being analysed. Default as `NULL` plots all the #' comparisons together. Any subset of the possible comparisons can be selected #' (e.g., `comparison = c(1,3)` or `comparison = 2`). #' @param wtp The value of the willingness to pay parameter. Not used if #' `graph = "base"` for multiple comparisons. #' @param pos Parameter to set the position of the legend; for a single #' comparison plot, the ICER legend position. Can be given in form of a string #' `(bottom|top)(right|left)` for base graphics and #' `bottom|top|left|right` for \pkg{ggplot2}. It can be a two-elements vector, #' which specifies the relative position on the x and y axis respectively, or #' alternatively it can be in form of a logical variable, with `FALSE` #' indicating to use the default position and `TRUE` to place it on the #' bottom of the plot. Default value is `c(1,1)`, that is the topright #' corner inside the plot area. #' @param graph A string used to select the graphical engine to use for #' plotting. Should (partial-) match the two options `"base"` or #' `"ggplot2"`. Default value is `"base"`. #' @param ... If `graph = "ggplot2"` and a named theme object is supplied, #' it will be passed to the \pkg{ggplot2} object. The usual ggplot2 syntax is used. #' Additional graphical arguments: #' \itemize{ #' \item `label.pos = FALSE`: will place the willingness to pay label in a #' different position at the bottom of the graph - base and \pkg{ggplot2} only (no #' label in \pkg{plotly}). #' \item `line = list(color)`: a colour specifying the colour of the willingness-to-pay line. #' \item `point = list(color)`: a vector of colours specifying the colour(s) associated #' to the cloud of points. Should be of length 1 or equal to the number of comparisons. #' \item `point = list(size)`: a vector of colours specifying the size(s) of the points. #' Should be of length 1 or equal to the number of comparisons. #' \item `point = list(shape)`: a vector of shapes specifying type(s) of the points. #' Should be of length 1 or equal to the number of comparisons. #' \item `icer = list(color)`: a vector of colours specifying the colour(s) of the ICER #' points. Should be of length 1 or equal to the number of comparisons. #' \item `icer = list(size)`: a vector of colours specifying the size(s) of the ICER #' points. Should be of length 1 or equal to the number of comparisons. #' \item `area_include`: logical, include or exclude the cost-effectiveness #' acceptability area (default is TRUE). #' \item `area = list(color)`: a colour specifying the colour of the cost-effectiveness #' acceptability area. #' \item `currency`: Currency prefix to cost differential values - \pkg{ggplot2} only. #' \item `icer_annot`: Annotate each ICER point with text label - \pkg{ggplot2} only. #' } #' #' @return If `graph = "ggplot2"` a ggplot object, or if `graph = "plotly"` #' a plotly object containing the requested plot. Nothing is returned when #' `graph = "base"`, the default. #' #' Grey dots show the simulated values for the joint #' distribution of the effectiveness and cost differentials. The larger red #' dot shows the ICER and the grey area identifies the sustainability area, #' i.e. the part of the plan for which the simulated values are below the #' willingness to pay threshold. The proportion of points in the sustainability #' area effectively represents the CEAC for a given value of the willingness to #' pay. If the comparators are more than 2 and no pairwise comparison is #' specified, all scatterplots are graphed using different colours. #' #' @details In the \pkg{plotly} version, `point_colors`, `ICER_colors` and `area_color` can also #' be specified as rgba colours using either the `[plotly]toRGB` #' function or a rgba colour string, e.g. `'rgba(1, 1, 1, 1)'`. #' #' @author Gianluca Baio, Andrea Berardi #' @seealso [bcea()], #' [ceplane_plot_graph()] #' #' @references #' \insertRef{Baio2011}{BCEA} #' #' \insertRef{Baio2013}{BCEA} #' #' @keywords hplot #' @importFrom Rdpack reprompt #' @export #' #' @examples #' ## create the bcea object for the smoking cessation example #' data(Smoking) #' #' m <- bcea(eff, cost, ref = 4, Kmax = 500, interventions = treats) #' #' ## produce the base plot #' ceplane.plot(m, wtp = 200, graph = "base") #' #' ## select only one comparator #' ceplane.plot(m, wtp = 200, graph = "base", comparison = 3) #' #' ## use ggplot2 #' if (requireNamespace("ggplot2")) { #' ceplane.plot(m, wtp = 200, pos = "right", icer = list(size = 2), graph = "ggplot2") #' } #' #' ## plotly #' ceplane.plot(m, wtp = 200, graph = "plotly") #' ceplane.plot(m, wtp = 200, comparison = 1, graph = "plotly") #' ceplane.plot.bcea <- function(he, comparison = NULL, wtp = 25000, pos = c(0, 1), graph = c("base", "ggplot2", "plotly"), ...) { graph <- match.arg(graph) he <- setComparisons(he, comparison) graph_params <- prep_ceplane_params(he, wtp, ...) if (is_baseplot(graph)) { ceplane_plot_base(he, wtp, pos_legend = pos, graph_params) } else if (is_ggplot(graph)) { ceplane_plot_ggplot(he, wtp, pos_legend = pos, graph_params, ...) } else if (is_plotly(graph)) { ceplane_plot_plotly(he, wtp, graph_params, pos_legend = pos) } } #' Cost-effectiveness Plane Plot #' #' Produces a scatter plot of the cost-effectiveness plane, #' together with the sustainability area, as a function of #' the selected willingness to pay threshold. #' #' @template args-he #' #' @export #' @aliases ceplane.plot #' ceplane.plot <- function(he, ...) { UseMethod('ceplane.plot', he) }
/scratch/gouwar.j/cran-all/cranData/BCEA/R/ceplane.plot.R
#' @keywords dplot #' ceplane_base_params <- function(he, wtp, graph_params) { c(list( setup = setup_params(graph_params), points = points_params(graph_params), polygon = polygon_params(graph_params, wtp), k_txt = k_text(graph_params, wtp), wtp = wtp, ref_first = graph_params$ref_first), icer_params(graph_params, he)) } #' @keywords dplot #' contour_base_params <- function(he, graph_params) { c(list( setup = setup_params(graph_params), points = points_params(graph_params), quadrants = quadrant_params(he, graph_params), scale = graph_params$scale, levels = graph_params$levels, contour = graph_params$contour, nlevels = graph_params$nlevels, ref_first = graph_params$ref_first), icer_params(graph_params, he)) }
/scratch/gouwar.j/cran-all/cranData/BCEA/R/ceplane_base_params.R
#' @keywords dplot #' setup_params <- function(graph_params) { list(xlim = graph_params$xlim, ylim = graph_params$ylim, xlab = graph_params$xlab, ylab = graph_params$ylab, x = NULL, axes = FALSE, main = graph_params$title, xaxs = "i", yaxs = "i") } #' @keywords dplot #' polygon_params <- function(graph_params, wtp) { x_max <- graph_params$xlim[2] y_min <- graph_params$ylim[1] # x_min <- graph_params$xlim[1] # y_max <- graph_params$ylim[2] polygon_x <- c(y_min/wtp, x_max, x_max) polygon_y <- c(y_min, x_max*wtp, y_min) list(x = polygon_x, y = polygon_y, # border = graph_params$area$line_color, col = ifelse(is.null(graph_params$area$color), "grey95", graph_params$area$color)) } #' @keywords dplot #' points_params <- function(graph_params) { list(pch = graph_params$point$shape, #20, cex = graph_params$point$size, col = graph_params$point$color) } #' @keywords dplot #' k_text <- function(graph_params, wtp) { x_k <- graph_params$xlim[1] y_k <- max(x_k*wtp, graph_params$ylim[1]) x_adj <- diff(graph_params$xlim)*0.04 y_adj <- diff(graph_params$ylim)*0.04 list(cex = 0.8, pos = 4, x = x_k + x_adj, y = y_k + y_adj) } #' @keywords dplot #' icer_params <- function(graph_params, he) { x_adj <- diff(graph_params$xlim)*0.04 y_adj <- diff(graph_params$ylim)*0.04 list(icer_text = list(labels = icer_label(he), cex = 0.95, pos = 2, col = "red", x = graph_params$xlim[2] - x_adj, y = graph_params$ylim[2] - y_adj), icer_points = list(pch = 20, col = "red", cex = 1)) } #' @keywords dplot #' icer_label <- function(he) { if (he$n_comparisons == 1) { return( paste0("\U2022", " ICER = ", format( colMeans(he$delta_c)/colMeans(he$delta_e), digits = 6, nsmall = 2))) } return("") }
/scratch/gouwar.j/cran-all/cranData/BCEA/R/ceplane_base_params_xxx.R
#' Extract Separate Parameter Sets #' #' @param ... Additional arguments #' #' @importFrom utils modifyList #' @keywords internal #' ceplane_geom_params <- function(...) { extra_params <- list(...) icer_params <- extra_params[ names(extra_params) %in% c("ICER_size", "ICER_colors")] names(icer_params) <- gsub("ICER_", "", names(icer_params)) point_params <- extra_params[ names(extra_params) %in% c("point_size", "point_colors")] names(point_params) <- gsub("point_", "", names(point_params)) polygon_params <- extra_params[ names(extra_params) %in% c("area_include", "area_color")] names(polygon_params) <- gsub("area_", "", names(polygon_params)) wtp_params <- extra_params[ names(extra_params) %in% "label.pos"] modifyList( list( area_include = TRUE, wtp_label.pos = wtp_params), list( icer = icer_params, point = point_params, area = polygon_params)) }
/scratch/gouwar.j/cran-all/cranData/BCEA/R/ceplane_geom_params.R
#' CE-plane ggplot Parameters #' #' @template args-he #' @param wtp Willingness to pay #' @param pos_legend Position of legend #' @param graph_params Other graphical parameters #' @param ... Additional arguments #' #' @import ggplot2 #' @keywords internal #' ceplane_ggplot_params <- function(he, wtp, pos_legend, graph_params, ...) { ext_params <- ceplane_geom_params(...) graph_params$area <- modifyList(polygon_params(graph_params, wtp), graph_params$area) graph_params$legend <- make_legend_ggplot(he, pos_legend) default_params <- list( size = rel(3.5), wtp = list( geom = "text", x = graph_params$xlim[1], y = graph_params$ylim[1], hjust = "inward", vjust = "inward", label = paste0(" k = ", format(wtp, digits = 6), "\n"), size = convert_pts_to_mm(1), colour = "black"), icer = list( data = data.frame(x = colMeans(he$delta_e), y = colMeans(he$delta_c)), mapping = aes(x = .data$x, y = .data$y), color = "red", size = convert_pts_to_mm(0.8), inherit.aes = FALSE), icer_txt = list( geom = "text", label = ifelse(length(he$ICER) == 1, paste0("\n", "\U2022", " ICER = ", format(he$ICER, digits = 6, nsmall = 2), " "), ""), x = graph_params$xlim[2], y = graph_params$ylim[2], col = "red", size = convert_pts_to_mm(1), hjust = "inward", vjust = "inward"), point = list( shape = rep(19, he$n_comparisons), size = 4), line = list( color = "black"), area = list( # geom = "polygon", fill = graph_params$area$col, alpha = ifelse(ext_params$area_include, 1, 0), data = data.frame(x = graph_params$area$x, y = graph_params$area$y), mapping = aes(x = .data$x, y = .data$y), inherit.aes = FALSE), currency = "", icer_annot = FALSE) modifyList(default_params, graph_params) %>% modifyList(ext_params) }
/scratch/gouwar.j/cran-all/cranData/BCEA/R/ceplane_ggplot_params.R
# ceplane_legend_manual <- function(he, plot_params) { isfirst <- plot_params$ref_first list( scale_color_manual( labels = line_labels.default(he, ref_first = isfirst), values = plot_params$point$color), scale_shape_manual( labels = line_labels.default(he, ref_first = isfirst), values = plot_params$point$shape)) }
/scratch/gouwar.j/cran-all/cranData/BCEA/R/ceplane_legend_manual.R
#' Cost-Effectiveness Plane Plot By Graph Device #' #' Choice of base R, \pkg{ggplot2} or \pkg{plotly}. #' #' @template args-he #' @param wtp Willingness to pay threshold; default 25,000 #' @param pos_legend Legend position #' @param graph_params Graph parameters in \pkg{ggplot2} format #' @param ... Additional arguments #' #' @examples #' # single comparator #' data(Vaccine, package = "BCEA") #' #' he <- bcea(eff, cost) #' ceplane.plot(he, graph = "base") #' #' \dontrun{ #' # need to provide all the defaults because thats what #' # ceplane.plot() does #' #' graph_params <- list(xlab = "x-axis label", #' ylab = "y-axis label", #' title = "my title", #' xlim = c(-0.002, 0.001), #' ylim = c(-13, 5), #' point = list(sizes = 1, #' colors = "darkgrey"), #' area = list(color = "lightgrey")) #' #' he$delta_e <- as.matrix(he$delta_e) #' he$delta_c <- as.matrix(he$delta_c) #' #' BCEA::ceplane_plot_base(he, graph_params = graph_params) #' #' ## single non-default comparator #' #' #' ## multiple comparators #' data(Smoking) #' #' graph_params <- list(xlab = "x-axis label", #' ylab = "y-axis label", #' title = "my title", #' xlim = c(-1, 2.5), #' ylim = c(-1, 160), #' point = list(sizes = 0.5, #' colors = grey.colors(3, start = 0.1, end = 0.7)), #' area = list(color = "lightgrey")) #' #' he <- bcea(eff, cost, ref = 4, Kmax = 500, interventions = treats) #' #' BCEA::ceplane_plot_base(he, #' wtp = 200, #' pos_legend = FALSE, #' graph_params = graph_params) #' } #' #' @name ceplane_plot_graph NULL #' @rdname ceplane_plot_graph #' #' @return For base R returns a plot #' @keywords hplot #' ceplane_plot_base.bcea <- function(he, wtp = 25000, pos_legend, graph_params, ...) { plot_params <- ceplane_base_params(he, wtp, graph_params) legend_params <- ceplane_legend_base(he, pos_legend, plot_params) add_ceplane_setup(plot_params) add_ceplane_polygon(plot_params) add_ceplane_points(he, plot_params) add_axes() add_ceplane_icer(he, plot_params) add_ceplane_k_txt(plot_params) add_ceplane_legend(legend_params) } #' @rdname ceplane_plot_graph #' ceplane_plot_base <- function(he, ...) { UseMethod('ceplane_plot_base', he) } #' @rdname ceplane_plot_graph #' #' @return For \pkg{ggplot2} returns \pkg{ggplot2} object #' #' @import ggplot2 #' @importFrom grid unit #' @importFrom purrr keep #' @importFrom scales label_dollar #' #' @keywords hplot #' #' @examples #' #' data(Vaccine) #' he <- bcea(eff, cost) #' #' ceplane.plot(he, graph = "ggplot2") #' ceplane.plot(he, wtp=10000, graph = "ggplot2", #' point = list(colors = "blue", sizes = 2), #' area = list(col = "springgreen3")) #' #' data(Smoking) #' he <- bcea(eff, cost, ref = 4, Kmax = 500, interventions = treats) #' #' ceplane.plot(he, graph = "ggplot2") #' #' ceplane.plot(he, #' wtp = 200, #' pos = "right", #' ICER_size = 2, #' graph = "ggplot2") #' #' ceplane.plot(he, #' wtp = 200, #' pos = TRUE, #' graph = "ggplot2") #' #' ceplane.plot(he, #' graph = "ggplot2", #' wtp=200, #' theme = ggplot2::theme_linedraw()) #' ceplane_plot_ggplot.bcea <- function(he, wtp = 25000, pos_legend, graph_params, ...) { # single long format for ggplot data delta_ce <- merge( melt( cbind(sim = seq_len(nrow(he$delta_c)), he$delta_c), variable.name = "comparison", value.name = "delta_c", id.vars = "sim"), melt( cbind(sim = seq_len(nrow(he$delta_e)), he$delta_e), variable.name = "comparison", value.name = "delta_e", id.vars = "sim"), by = c("sim", "comparison")) plot_params <- ceplane_ggplot_params(he, wtp, pos_legend, graph_params, ...) theme_add <- purrr::keep(list(...), is.theme) ggplot(delta_ce, aes(x = .data$delta_e, y = .data$delta_c, group = factor(.data$comparison), col = factor(.data$comparison), shape = factor(.data$comparison))) + do.call(geom_polygon, plot_params$area) + theme_ceplane() + theme_add + geom_point(size = plot_params$point$size) + ceplane_legend_manual(he, plot_params) + geom_hline(yintercept = 0, colour = "grey") + geom_vline(xintercept = 0, colour = "grey") + geom_text(data = data.frame(x = colMeans(he$delta_e), y = colMeans(he$delta_c)), aes(x = .data$x, y = .data$y, label = if (plot_params$icer_annot) { line_labels.default( he, ref_first = graph_params$ref_first) } else {""}), inherit.aes = FALSE, show.legend = FALSE, hjust = 0, vjust = 0) + scale_y_continuous( labels = scales::label_dollar(prefix = plot_params$currency)) + coord_cartesian(xlim = plot_params$xlim, ylim = plot_params$ylim, expand = FALSE) + do.call(labs, list(title = plot_params$title, x = plot_params$xlab, y = plot_params$ylab)) + do.call(geom_abline, c(slope = wtp, plot_params$line)) + do.call(geom_point, plot_params$icer) + do.call(annotate, plot_params$wtp) + do.call(annotate, plot_params$icer_txt) + do.call(theme, plot_params$legend) } #' @rdname ceplane_plot_graph #' ceplane_plot_ggplot <- function(he, ...) { UseMethod('ceplane_plot_ggplot', he) } #' @rdname ceplane_plot_graph #' #' @return For \pkg{plotly} returns a plot in the Viewer #' ceplane_plot_plotly.bcea <- function(he, wtp = 25000, pos_legend, graph_params, ...) { comp_label <- paste(he$interventions[he$ref], "vs", he$interventions[he$comp]) # single long format for plotting data delta_ce <- merge( melt( cbind(sim = seq_len(nrow(he$delta_c)), he$delta_c), variable.name = "comparison", value.name = "delta_c", id.vars = "sim"), melt( cbind(sim = seq_len(nrow(he$delta_e)), he$delta_e), variable.name = "comparison", value.name = "delta_e", id.vars = "sim"), by = c("sim", "comparison")) graph_params$ICER_size <- ifelse(he$n_comparisons == 1, 8, 0) if (length(graph_params$point$colors) != length(comp_label)) graph_params$point$colors <- rep_len(graph_params$point$color, length(comp_label)) if (length(graph_params$point$sizes) != length(comp_label)) graph_params$point$sizes <- rep_len(graph_params$point$size, length(comp_label)) if (!"colors" %in% names(graph_params$ICER) || length(graph_params$ICER$colors) != length(comp_label)) graph_params$ICER <- c(graph_params$ICER, list(colors = rep_len("red", length(comp_label)))) if (!"sizes" %in% names(graph_params$ICER) || length(graph_params$ICER$sizes) != length(comp_label)) graph_params$ICER$sizes <- rep_len(graph_params$ICER_size, length(comp_label)) # plot limits range.e <- range(delta_ce$delta_e) range.c <- range(delta_ce$delta_c) range.e[1] <- ifelse(range.e[1] < 0, yes = range.e[1], no = -range.e[1]) range.c[1] <- ifelse(range.c[1] < 0, yes = range.c[1], no = -range.c[1]) # ce plane data x1 <- range.e[1] - 2*abs(diff(range.e)) x2 <- range.e[2] + 2*abs(diff(range.e)) x <- c(x1, x2, x2) y <- c(x1*wtp, x2*wtp, x1*wtp) plane <- data.frame(x = x, y = y) # build a trapezoidal plane instead of a triangle if # the y value is less than the minimum difference on costs if (y[1] > 1.2*range.c[1]) plane <- rbind(plane, c(x2, 2*range.c[1]), #new bottom-right vertex c(x1, 2*range.c[1])) #new bottom-left vertex xrng <- c(ifelse(prod(range.e) < 0, range.e[1]*1.1, ifelse(range.e[1] < 0, range.e[1]*1.1, -(range.e[2] - range.e[1])*0.1)), ifelse(prod(range.e) < 0, range.e[2]*1.1, ifelse(range.e[2] > 0, range.e[2]*1.1, (range.e[2] - range.e[1])*0.1))) yrng <- c(ifelse(prod(range.c) < 0, range.c[1]*1.1, ifelse(range.c[1] < 0, range.c[1]*1.1, -(range.c[2] - range.c[1])*0.1)), ifelse(prod(range.c) < 0, range.c[2]*1.1, ifelse(range.c[2] > 0, range.c[2]*1.1, (range.c[2] - range.c[1])*0.1))) pt_cols <- ifelse(test = grepl(pattern = "^rgba\\(", x = graph_params$point$colors), yes = plotly::toRGB(graph_params$point$colors), no = graph_params$point$colors) # plot set-up ceplane <- plotly::plot_ly(colors = pt_cols) ceplane <- ceplane %>% plotly::add_trace( type = "scatter", mode = "markers", data = delta_ce, y = ~delta_c, x = ~delta_e, color = ~comparison, hoverinfo = "name+x+y") if (graph_params$area_include) { ceplane <- ceplane %>% plotly::add_trace( type = "scatter", mode = "lines", data = plane, x = ~x, y = ~y, line = list(color = ifelse( grepl(pattern = "^rgba\\(", x = graph_params$area$line_color), graph_params$area$line_color, plotly::toRGB(graph_params$area$line_color, 1))), showlegend = FALSE, inherit = FALSE) graph_params$area$col <- "grey" poly_col <- ifelse( grepl(pattern = "^rgba\\(", x = graph_params$area$col), graph_params$area$color, plotly::toRGB(graph_params$area$col, 0.5)) ceplane <- ceplane %>% plotly::add_polygons( data = plane, x = ~x, y = ~y, fillcolor = poly_col, line = list(color = 'transparent'), opacity = 0.3, name = "CEA area", hoveron = "points", showlegend = FALSE, inherit = FALSE) } # ICER if (!all(graph_params$ICER$sizes <= 0)) { means_table <- tabulate_means(he, comp_label) for (comp in seq_len(he$n_comparisons)) { ceplane <- plotly::add_trace( ceplane, type = "scatter", mode = "markers", data = means_table[comp, ], x = ~lambda.e, y = ~lambda.c, marker = list( color = graph_params$ICER$colors[comp], size = graph_params$ICER$sizes[comp]), name = ~paste( ifelse(he$n_comparisons > 1, yes = "",#as.character(label), no = ""), "ICER:", prettyNum(round(ICER, 2), big.mark = ","))) } } legend_params <- make_legend_plotly(pos_legend) ceplane <- plotly::layout( ceplane, title = graph_params$title, xaxis = list( hoverformat = ".2f", range = xrng, title = graph_params$xlab), yaxis = list( hoverformat = ".2f", range = yrng, title = graph_params$ylab), showlegend = TRUE, legend = legend_params) plotly::config(ceplane, displayModeBar = FALSE) } #' @rdname ceplane_plot_graph #' @template args-he #' @param ... Additional arguments #' ceplane_plot_plotly <- function(he, ...) { UseMethod('ceplane_plot_plotly', he) }
/scratch/gouwar.j/cran-all/cranData/BCEA/R/ceplane_plot_graph.R
#' Calculate Credible Intervals #' #' For expected incremental benefit plot. #' #' @template args-he #' @param alpha_cri Significance level, 0 - 1 #' @param cri.quantile Credible interval quantile?; logical #' #' @return cri #' #' @importFrom stats qnorm sd quantile #' compute_eib_cri <- function(he, alpha_cri = 0.05, cri.quantile = TRUE) { if (he$n_comparison > 1) { margin <- c(1, 3) } else { margin <- 1 } ##TODO; do low and high together so only one call to matlines() compute_cri <- function(x, low = TRUE) { tau <- ifelse(low, alpha_cri/2, 1 - alpha_cri/2) if (cri.quantile) { return(quantile(x, tau)) } else { return(mean(x) - qnorm(tau) * sd(x)) } } cri <- data.frame( low = c(apply(he$ib, margin, FUN = compute_cri, low = TRUE)), upp = c(apply(he$ib, margin, FUN = compute_cri, low = FALSE)), comp = as.factor( rep(1:he$n_comparison, each = length(he$k))), k = he$k) return(cri) }
/scratch/gouwar.j/cran-all/cranData/BCEA/R/compute_eib_cri.R
# compute all bcea statistics --------------------------------------------- #' Compute k^* #' #' Find willingness-to-pay threshold when optimal decision changes. #' #' \deqn{k^* := \min\{k : IB < 0 \}} #' #' The value of the break-even point corresponds to the ICER and quantifies #' the point at which the decision-maker is indifferent between the two options. #' #' @param k Willingness-to-pay grid approximation of the budget willing to invest (vector) #' @param best Best intervention for each `k` (int) #' @param ref Reference intervention (int) #' #' @return integer representing intervention #' @seealso [ceac.plot()] #' #' @export #' compute_kstar <- function(k, best, ref) { if (all(best == ref)) { return(numeric()) } flip <- c(0, diff(best)) != 0 k[flip] } #' Compute Cost-Effectiveness Acceptability Curve #' #' @param ib Incremental benefit #' @return Array with dimensions (interv x k) #' @seealso [ceac.plot()] #' #' @export #' compute_CEAC <- function(ib) { apply(ib > 0, c(1,3), mean) } #' Compute Expected Incremental Benefit #' #' A summary measure useful to assess the potential changes in the decision #' under different scenarios. #' #' When considering a pairwise comparison #' (e.g. in the simple case of a reference intervention \eqn{t = 1} and a comparator, #' such as the status quo, \eqn{t = 0}), it is defined as the difference between the #' expected utilities of the two alternatives: #' #' \deqn{eib := \mbox{E}[u(e,c;1)] - \mbox{E}[u(e,c;0)] = \mathcal{U}^1 - \mathcal{U}^0.} #' #' Analysis of the expected incremental benefit describes how the decision changes #' for different values of the threshold. The EIB marginalises out the uncertainty, #' and does not incorporate and describe explicitly the uncertainty in the outcomes. #' To overcome this problem the tool of choice is the CEAC. #' #' @param ib Incremental benefit #' @return Array with dimensions (interv x k) #' @seealso [ceac.plot()], [compute_CEAC()], [compute_IB()] #' #' @export #' compute_EIB <- function(ib) { apply(ib, 3, function(x) apply(x, 1, mean)) # apply(ib, 3, function(x) rowMeans(x)) ##TODO: test } #' Compute Ustar Statistic #' #' The maximum utility value among the comparators, indicating which #' intervention produced the most benefits at each simulation. #' #' @param U Net monetary benefit (sim x k x intervs) #' #' @return Array with dimensions (sim x k) #' #' @export #' compute_Ustar <- function(U) { n_sim <- dim(U)[[1]] K <- dim(U)[[2]] Ustar <- matrix(NA, n_sim, K) for (i in seq_len(K)) { Ustar[, i] <- rowMax(U[, i, ]) } Ustar } #' Compute Value of Information #' #' The difference between the maximum utility computed for the current #' parameter configuration \eqn{U^*} and the utility of the intervention which #' is associated with the maximum utility overall. #' #' The value of obtaining additional information on the parameter \eqn{\theta} #' to reduce the uncertainty in the decisional process. #' It is defined as: #' #' \deqn{\textrm{VI}(\theta) := U^*(\theta) - \mathcal{U}^*} #' #' with \eqn{U^*(\theta)} the maximum utility value for the given simulation #' among all comparators and \eqn{\mathcal{U}^*(\theta)} the expected utility #' gained by the adoption of the cost-effective intervention. #' #' @param Ustar Maximum utility value (sim x k) #' @param U Net monetary benefit (sim x k x interv) #' #' @return Array with dimensions (sim x k) #' @seealso [compute_ol()] #' #' @export #' compute_vi <- function(Ustar, U) { if (any(dim(U)[1] != dim(Ustar)[1], dim(U)[2] != dim(Ustar)[2])) { stop("dimensions of inputs don't correspond", call. = FALSE) } n_sim <- dim(U)[[1]] K <- dim(U)[[2]] vi <- matrix(NA, nrow = n_sim, ncol = K) for (i in seq_len(K)) { vi[, i] <- Ustar[, i] - max(apply(U[, i, ], 2, mean)) } vi } #' Compute Opportunity Loss #' #' The difference between the maximum utility computed for the current #' parameter configuration (e.g. at the current simulation) \eqn{U^*} and the current #' utility of the intervention associated with the maximum utility overall. #' #' In mathematical notation, #' \deqn{\textrm{OL}(\theta) := U^*(\theta) - U(\theta^\tau)} #' #' where \eqn{\tau} is the intervention associated with the overall maximum utility #' and \eqn{U^*(\theta)} is the maximum utility value among the comparators in the given simulation. #' The opportunity loss is a non-negative quantity, since \eqn{U(\theta^\tau)\leq U^*(\theta)}. #' #' In all simulations where the intervention is more #' cost-effective (i.e. when incremental benefit is positive), then \eqn{\textrm{OL}(\theta) = 0} #' as there would be no opportunity loss, if the parameter configuration were the #' one obtained in the current simulation. #' #' @param Ustar Maximum utility value (sim x k) #' @param U Net monetary benefit (sim x k x interv) #' @param best Best intervention for given willingness-to-pay (k) #' #' @return Array with dimensions (sim x k) #' @seealso [compute_vi()] #' #' @export #' compute_ol <- function(Ustar, U, best) { if (any(dim(U)[1] != dim(Ustar)[1], dim(U)[2] != dim(Ustar)[2], dim(U)[2] != length(best))) { stop("dimensions of inputs don't correspond", call. = FALSE) } n_sim <- dim(U)[[1]] K <- dim(U)[[2]] ol <- matrix(NA, nrow = n_sim, ncol = K) for (i in seq_len(K)) { ol[, i] <- Ustar[, i] - U[, i, best[i]] } ol } #' rowMax <- function(dat) do.call(pmax, as.data.frame(dat)) #' Compute U Statistic #' #' Sample of net (monetary) benefit for each #' willingness-to-pay threshold and intervention. #' #' @param df_ce Cost-effectiveness dataframe #' @param k Willingness to pay vector #' #' @return Array with dimensions (sim x k x ints) #' #' @export #' compute_U <- function(df_ce, k) { sims <- sort(unique(df_ce$sim)) ints <- sort(unique(df_ce$ints)) interv_names <- unique(df_ce$interv_names) U_df <- data.frame(k = rep(k, each = nrow(df_ce)), df_ce) %>% mutate(U = .data$k*.data$eff1 - .data$cost1) %>% arrange(.data$ints, .data$k, .data$sim) array(U_df$U, dim = c(length(sims), length(k), length(ints)), dimnames = list(sims = NULL, k = NULL, ints = interv_names)) } #' Compute Incremental Benefit #' #' Sample of incremental net monetary benefit for each #' willingness-to-pay threshold, \eqn{k}, and comparator. #' #' Defined as: #' #' \deqn{IB = u(e,c; 1) - u(e,c; 0).} #' #' If the net benefit function is used as utility function, #' the definition can be re-written as #' #' \deqn{IB = k\cdot\Delta_e - \Delta_c.} #' #' @param df_ce Dataframe of cost and effectiveness deltas #' @param k Vector of willingness to pay values #' #' @import dplyr #' #' @return Array with dimensions (k x sim x ints) #' #' @export #' @seealso [compute_EIB()] #' compute_IB <- function(df_ce, k) { sims <- unique(df_ce$sim) ints <- unique(df_ce$ints) comp_names <- comp_names_from_(df_ce) df_ce <- df_ce %>% filter(ints != .data$ref) %>% rename(comps = "ints") ib_df <- data.frame(k = rep(k, each = nrow(df_ce)), df_ce) %>% mutate(ib = .data$k*.data$delta_e - .data$delta_c) %>% arrange(.data$comps, .data$sim, .data$k) array(ib_df$ib, dim = c(length(k), length(sims), length(ints) - 1), dimnames = list(k = NULL, sims = NULL, ints = comp_names)) } #' Compute Incremental Cost-Effectiveness Ratio #' #' Defined as #' #' \deqn{ICER = \Delta_c/\Delta_e} #' #' @param df_ce Cost-effectiveness dataframe #' @importFrom stats setNames #' #' @return ICER for all comparisons #' @export #' compute_ICER <- function(df_ce) { comp_names <- comp_names_from_(df_ce) df_ce %>% filter(.data$ints != .data$ref) %>% group_by(.data$ints) %>% summarise( ICER = mean(.data$delta_c)/mean(.data$delta_e)) %>% ungroup() %>% select("ICER") %>% # required to match current format unlist() %>% setNames(comp_names) } #' Compute Expected Value of Information #' #' @param ol Opportunity loss #' @return EVI #' @export #' compute_EVI <- function(ol) { colMeans(ol) } #' Comparison Names From #' @param df_ce Cost-effectiveness dataframe #' @keywords internal #' comp_names_from_ <- function(df_ce) { df_ce[, c("ref", "ints", "interv_names")] %>% filter(.data$ref != .data$ints) %>% distinct() %>% arrange(.data$ints) %>% select("interv_names") %>% unlist() } #' Compute Cost-Effectiveness Acceptability Frontier #' #' @param p_best_interv Probability of being best intervention #' compute_ceaf <- function(p_best_interv) { apply(p_best_interv, 1, max) } #' Compute Probability Best Intervention #' @template args-he #' compute_p_best_interv <- function(he) { intervs <- c(he$comp, he$ref) p_best_interv <- array(NA, c(length(he$k), length(intervs))) for (i in seq_along(intervs)) { for (k in seq_along(he$k)) { is_interv_best <- he$U[, k, ] <= he$U[, k, intervs[i]] rank <- apply(!is_interv_best, 1, sum) p_best_interv[k, i] <- mean(rank == 0) } } p_best_interv } #' Compute NB for mixture of interventions #' #' @template args-he #' @param value Mixture weights #' compute_Ubar <- function(he, value) { qU <- array(NA, c(he$n_sim,length(he$k), he$n_comparators)) for (j in seq_len(he$n_comparators)) { qU[, , j] <- value[j]*he$U[, , j] } apply(qU, c(1, 2), sum) }
/scratch/gouwar.j/cran-all/cranData/BCEA/R/compute_xxx.R