content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
#implements order MCMC on a defined search space, sampling version
#partly derived from <doi:10.1080/01621459.2015.1133426>
orderMCMCbase<-function(n,nsmall,startorder,iterations,stepsave,moveprobs,parenttable,scoretable,aliases,numparents,
rowmaps,scoresmatrices,numberofparentsvec,gamma=1,bgnodes,matsize,chainout=FALSE,compress=TRUE) {
result<-list()
if(!is.null(bgnodes)) {
mainnodes<-c(1:matsize)[-bgnodes]
} else {mainnodes<-c(1:matsize)}
currentpermy<-startorder #starting order represented as a permutation
currentorderscores<-orderscoreBase(matsize,currentpermy[1:nsmall],c(1:nsmall),parenttable,aliases,numparents,
rowmaps,scoretable,scoresmatrices,currentpermy) #starting score
currenttotallogscore<-sum(currentorderscores$totscores[mainnodes]) #log total score of all DAGs in the starting order
currentDAG<-samplescoreplus1(matsize,mainnodes,currentorderscores,plus1lists=NULL,scoretable,
scoresmatrices,parenttable,numberofparentsvec,aliases) #score of a single DAG sampled from the starting order
L1 <- list() # stores the adjacency matrix of a DAG sampled from the orders
L2 <- vector() # stores its log BGe score
L3 <- vector() # stores the log BGe score of the entire order
L4 <- list() # stores the orders as permutations
zlimit<- floor(iterations/stepsave) + 1 # number of outer iterations
length(L1) <- zlimit
length(L2) <- zlimit
length(L3) <- zlimit
length(L4) <- zlimit
L1[[1]]<-currentDAG$incidence #starting DAG adjacency matrix
L2[1]<-currentDAG$logscore #starting DAG score
L3[1]<-currenttotallogscore #starting order score
L4[[1]]<-currentpermy[1:nsmall] #starting order
maxdag<-currentDAG$incidence
maxscore<-L2[1]
moveprobsstart<-moveprobs
for (z in 2:zlimit){ #the MCMC chain loop with 'iteration' steps is in two parts
for (count in 1:stepsave){ #since we only save the results to the lists each 'stepsave'
chosenmove<-sample.int(4,1,prob=moveprobs)
if(chosenmove<4){ # if it is 3 then we stay still
proposedpermy<-currentpermy #sample a new order by swapping two elements
switch(as.character(chosenmove),
"1"={ # swap any two elements at random
sampledelements<-sample.int(nsmall,2,replace=FALSE) #chosen at random
},
"2"={ # swap any adjacent elements
k<-sample.int(nsmall-1,1) #chose the smallest at random
sampledelements<-c(k,k+1)
},
"3"={ # swap any adjacent elements
sampledpos<-sample.int(nsmall,1)
},
{# if neither is chosen, we have a problem
cat('The move sampling has failed!')
}) #end switch
if(chosenmove<3){
proposedpermy[sampledelements]<-currentpermy[rev(sampledelements)] #proposed new order ???
scorepositions<-c(min(sampledelements):max(sampledelements))
rescorenodes<-proposedpermy[scorepositions] #we only need to rescore these nodes between the swapped elements to speed up the calculation
proposedorderrescored<-orderscoreBase(matsize,rescorenodes,scorepositions,parenttable,aliases,numparents,rowmaps,
scoretable,scoresmatrices,proposedpermy)
proposedtotallogscore<-currenttotallogscore-sum(currentorderscores$totscores[rescorenodes])+sum(proposedorderrescored$totscores[rescorenodes]) #and the new log total score by updating only the necessary nodes
scoreratio<-exp((proposedtotallogscore-currenttotallogscore)*gamma) #acceptance probability
if(runif(1)<scoreratio){ #Move accepted then set the current order and scores to the proposal
currentpermy<-proposedpermy
currentorderscores$therow[rescorenodes]<-proposedorderrescored$therow[rescorenodes]
currentorderscores$totscores[rescorenodes]<-proposedorderrescored$totscores[rescorenodes]
currenttotallogscore<-proposedtotallogscore
}
} else {
neworder<-positionscorebase(matsize,nsmall,currentorderscores,sampledpos,currentpermy,aliases,
rowmaps,numparents,scoretable,scoresmatrices)
currentpermy<-neworder$order
currentorderscores<-neworder$score
currenttotallogscore<-sum(neworder$totscores)
}
}
}
currentDAG<-samplescoreplus1(matsize,mainnodes,currentorderscores,plus1lists=NULL,scoretable,scoresmatrices,
parenttable,numberofparentsvec,aliases)
if(chainout) {
if(compress) {
L1[[z]]<-Matrix(currentDAG$incidence,sparse=TRUE) #store compressed adjacency matrix of a sampled DAG each 'stepsave'
} else {
L1[[z]]<-currentDAG$incidence #store adjacency matrix of a sampled DAG each 'stepsave'
}
}
L2[z]<-currentDAG$logscore #and log score of a sampled DAG
L3[z]<-currenttotallogscore #and the current order score
L4[[z]]<-currentpermy[1:nsmall] #and store current order
if(L2[z]>maxscore) {
maxscore<-L2[z]
maxdag<-currentDAG$incidence
}
}
result$incidence<-L1
result$DAGscores<-L2
result$orderscores<-L3
result$orders<-L4
result$maxscore<-maxscore
result$maxdag<-Matrix(maxdag,sparse=TRUE)
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/BiDAG/R/orderMCMCbase.R
|
#implements order MCMC algorithm with a defined searchspace, MAP version
#partly derived from <doi:10.1080/01621459.2015.1133426>
orderMCMCbasemax<-function(n,nsmall,startorder,iterations,stepsave,moveprobs,parenttable,scoretable,aliases,numparents,
rowmaps,maxmatrices,numberofparentsvec,gamma=1,bgnodes,matsize,chainout=FALSE,compress=TRUE) {
result<-list()
if(!is.null(bgnodes)) {
mainnodes<-c(1:matsize)[-bgnodes]
} else {mainnodes<-c(1:matsize)}
currentpermy<-startorder #starting order represented as a permutation
currentorderscores<-orderscoreBasemax(matsize,startorder[1:nsmall],c(1:nsmall),parenttable,aliases,numparents,
rowmaps,scoretable,maxmatrices,currentpermy) #starting score
currenttotallogscore<-sum(currentorderscores$totscores[mainnodes]) #log total score of all DAGs in the starting order
currentDAG<-samplescoreplus1.max(matsize=matsize,samplenodes=mainnodes,scores=currentorderscores,plus1lists=NULL,
maxmatrices=maxmatrices,scoretable=scoretable,
parenttable,numberofparentsvec,aliases) #score of a single DAG sampled from the starting order
L1 <- list() # stores the adjacency matrix of a DAG sampled from the orders
L2 <- vector() # stores its log BGe score
L3 <- vector() # stores the log BGe score of the entire order
L4 <- list() # stores the orders as permutations
zlimit<- floor(iterations/stepsave) + 1 # number of outer iterations
length(L1) <- zlimit
length(L2) <- zlimit
length(L3) <- zlimit
length(L4) <- zlimit
L1[[1]]<-currentDAG$incidence #starting DAG adjacency matrix
L2[1]<-currentDAG$logscore #starting DAG score
L3[1]<-currenttotallogscore #starting order score
L4[[1]]<-currentpermy[1:nsmall] #starting order
maxdag<-currentDAG$incidence
maxscore<-L2[1]
moveprobsstart<-moveprobs
for (z in 2:zlimit){ #the MCMC chain loop with 'iteration' steps is in two parts
for (count in 1:stepsave){ #since we only save the results to the lists each 'stepsave'
chosenmove<-sample.int(4,1,prob=moveprobs)
if(chosenmove<4){ # if it is 3 then we stay still
proposedpermy<-currentpermy #sample a new order by swapping two elements
switch(as.character(chosenmove),
"1"={ # swap any two elements at random
sampledelements<-sample.int(nsmall,2,replace=FALSE) #chosen at random
},
"2"={ # swap any adjacent elements
k<-sample.int(nsmall-1,1) #chose the smallest at random
sampledelements<-c(k,k+1)
},
"3"={ # find best position for 1 element
sampledpos<-sample.int(nsmall,1)
},
"4"={ # stay still
},
{# if neither is chosen, we have a problem
print('The move sampling has failed!')
}) #end switch
if(chosenmove<3){
scorepositions<-c(min(sampledelements):max(sampledelements))
proposedpermy[sampledelements]<-currentpermy[rev(sampledelements)] #proposed new order
rescorenodes<-proposedpermy[scorepositions] #we only need to rescore these nodes between the swapped elements to speed up the calculation
proposedorderrescored<-orderscoreBasemax(matsize,rescorenodes,scorepositions,parenttable,aliases,numparents,rowmaps,scoretable,maxmatrices,proposedpermy)
proposedtotallogscore<-currenttotallogscore-sum(currentorderscores$totscores[rescorenodes])+sum(proposedorderrescored$totscores[rescorenodes]) #and the new log total score by updating only the necessary nodes
scoreratio<-exp((proposedtotallogscore-currenttotallogscore)*gamma) #acceptance probability
if(runif(1)<scoreratio){ #Move accepted then set the current order and scores to the proposal
currentpermy<-proposedpermy
currentorderscores$therow[rescorenodes]<-proposedorderrescored$therow[rescorenodes]
currentorderscores$totscores[rescorenodes]<-proposedorderrescored$totscores[rescorenodes]
currenttotallogscore<-proposedtotallogscore
}
} else {
neworder<-positionscorebasemax(matsize,nsmall,currentorderscores,sampledpos,currentpermy,aliases,
rowmaps,numparents,scoretable,maxmatrices)
currentorderscores<-neworder$score
currentpermy<-neworder$order
currenttotallogscore<-sum(currentorderscores$totscores)
}
}
}
currentDAG<-samplescoreplus1.max(n,mainnodes,currentorderscores,plus1lists=NULL,maxmatrices,scoretable,
parenttable,numberofparentsvec,aliases)
if(chainout) {
if(compress) {
L1[[z]]<-Matrix(currentDAG$incidence,sparse=TRUE) #store compressed adjacency matrix of a sampled DAG each 'stepsave'
} else {
L1[[z]]<-currentDAG$incidence #store adjacency matrix of a sampled DAG each 'stepsave'
}
}
L2[z]<-currentDAG$logscore #and log score of a sampled DAG
L3[z]<-currenttotallogscore #and the current order score
L4[[z]]<-currentpermy[1:nsmall] #and store current order
if(L2[z]>maxscore) {
maxscore<-L2[z]
maxdag<-currentDAG$incidence
}
}
result$incidence<-L1
result$DAGscores<-L2
result$orderscores<-L3
result$orders<-L4
result$maxscore<-maxscore
result$maxdag<-Matrix(maxdag,sparse=TRUE)
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/BiDAG/R/orderMCMCbasemax.R
|
orderMCMCmain<-function(param,iterations,stepsave,MAP=TRUE, posterior=0.5,
startorder=c(1:n),moveprobs,plus1=FALSE,chainout=TRUE,
scoreout=FALSE,startspace=NULL,blacklist=NULL,gamma=1,verbose=FALSE,alpha=NULL,
hardlimit=ifelse(plus1,15,22),
cpdag=FALSE,addspace=NULL,scoretable=NULL,compress=TRUE){
result<-list()
maxobj<-list()
MCMCtraces<-list()
n<-param$n
nsmall<-param$nsmall
matsize<-ifelse(param$DBN,n+nsmall,n)
#defining startorder and updatenodes
if(!param$DBN) {
if(param$bgn!=0) {
updatenodes<-c(1:n)[-param$bgnodes]
} else {
updatenodes<-c(1:n)
}
} else { #for DBNs startorder is defined in main.R
updatenodes<-c(1:nsmall)
}
maxorder<-startorder
#creating blacklist objects
if (is.null(blacklist)) {
blacklist<-matrix(0,nrow=matsize,ncol=matsize)
}
diag(blacklist)<-1
if(!is.null(param$bgnodes)) {
for(i in param$bgnodes) {
blacklist[,i]<-1
}
}
#defining startskel
if (!is.null(scoretable)) {
if(is(scoretable,"iterativeMCMC")){
scoretable<-getSpace(scoretable)
}
startskel<-scoretable$adjacency
blacklist<-scoretable$blacklist
scoretable<-scoretable$tables
} else {
if (is.null(startspace)){
startspace<-definestartspace(alpha,param,cpdag=cpdag,algo="pc")
}
startskeleton<-1*(startspace&!blacklist)
if(!is.null(addspace)) { startskel<-1*((addspace|startskeleton)&!blacklist)
} else {startskel<-startskeleton }
}
blacklistparents<-list()
for (i in 1:matsize) {
blacklistparents[[i]]<-which(blacklist[,i]==1)
}
if(verbose) {
cat(paste("maximum parent set size is", max(apply(startskel,2,sum))),"\n")
}
if(max(apply(startskel,2,sum))>hardlimit) {
stop("the size of maximal parent set is higher that the hardlimit; redifine the search space or increase the hardlimit!")
}
#tablestart<-Sys.time()
#computing score tables
ptab<-listpossibleparents.PC.aliases(startskel,isgraphNEL=FALSE,n,updatenodes)
if (verbose) {
cat("core space defined, score table are being computed \n")
flush.console()
}
if (plus1==FALSE) {
parenttable<-ptab$parenttable # basic parenttable without plus1 lists
aliases<-ptab$aliases #aliases for each node since all nodes in parent tables are named as 1,2,3,4. not real parent names
numberofparentsvec<-ptab$numberofparentsvec
numparents<-ptab$numparents
rowmaps<-parentsmapping(parenttable,numberofparentsvec,n,updatenodes)
tablestart<-Sys.time()
if(is.null(scoretable)) {
scoretable<-scorepossibleparents.alias(parenttable,aliases,n,param,updatenodes,rowmaps,
numparents,numberofparentsvec)
}
posetparenttable<-poset(parenttable,numberofparentsvec,rowmaps,n,updatenodes)
if(MAP==TRUE){
maxmatrices<-posetscoremax(posetparenttable,scoretable,numberofparentsvec,rowmaps,
n,plus1lists=NULL,updatenodes)
tableend<-Sys.time()
MCMCresult<-orderMCMCbasemax(n,nsmall,startorder,iterations,stepsave,moveprobs,parenttable,
scoretable,aliases,numparents,rowmaps,maxmatrices,
numberofparentsvec,gamma=gamma,bgnodes=param$bgnodes,matsize=matsize,
chainout=chainout,compress=compress)
mcmcend<-Sys.time()
} else {
bannedscore<-poset.scores(posetparenttable,scoretable,numberofparentsvec,rowmaps,
n,plus1lists=NULL,ptab$numparents,updatenodes=updatenodes)
tableend<-Sys.time()
MCMCresult<-orderMCMCbase(n,nsmall,startorder,iterations,stepsave,moveprobs,parenttable,
scoretable,aliases,numparents,rowmaps,
bannedscore,numberofparentsvec,gamma=gamma,
bgnodes=param$bgnodes,matsize=matsize,chainout=chainout,compress=compress)
mcmcend<-Sys.time()
}
} else {
parenttable<-ptab$parenttable # basic parenttable without plus1 lists
aliases<-ptab$aliases #aliases for each node since all nodes in parent tables are done as 1,2,3,4... not real parent names
numberofparentsvec<-ptab$numberofparentsvec
numparents<-ptab$numparents
plus1lists<-PLUS1(matsize,aliases,updatenodes,blacklistparents)
rowmaps<-parentsmapping(parenttable,numberofparentsvec,n,updatenodes)
tablestart<-Sys.time()
if(is.null(scoretable)) {
scoretable<-scorepossibleparents.PLUS1(parenttable,plus1lists,n,param,updatenodes,
rowmaps,numparents,numberofparentsvec)
}
posetparenttable<-poset(parenttable,numberofparentsvec,rowmaps,n,updatenodes)
if(MAP==TRUE){
maxmatrices<-posetscoremax(posetparenttable,scoretable,numberofparentsvec,
rowmaps,n,plus1lists=plus1lists,updatenodes)
} else {
bannedscore<-poset.scores(posetparenttable,scoretable,ptab$numberofparentsvec,rowmaps,
n,plus1lists=plus1lists,ptab$numparents,updatenodes)
}
if(verbose) {
cat(paste("score tables computed, orderMCMC is running"),"\n")
flush.console()
}
tableend<-Sys.time()
#running MCMC scheme
if(MAP) {
MCMCresult<-orderMCMCplus1max(n,nsmall,startorder,iterations,stepsave,moveprobs,parenttable,
scoretable,aliases,numparents,rowmaps,plus1lists,maxmatrices,numberofparentsvec,
gamma=gamma,bgnodes=param$bgnodes,matsize=matsize,chainout=chainout,compress=compress)
} else {
MCMCresult<-orderMCMCplus1(n,nsmall,startorder,iterations,stepsave,moveprobs,parenttable,
scoretable,aliases,numparents,rowmaps,plus1lists,
bannedscore,numberofparentsvec,gamma=gamma,
bgnodes=param$bgnodes,matsize=matsize,chainout=chainout,compress=compress)
}
mcmcend<-Sys.time()
}
#defining result object
if(chainout) {
if(param$DBN) {
MCMCchain<-lapply(MCMCresult$incidence,function(x)DBNtransform(x,param=param))
MCMCtraces$incidence<-MCMCchain
MCMCtraces$orders<-lapply(MCMCresult$orders,order2var,varnames=param$firstslice$labels)
} else {
MCMCtraces$incidence<-lapply(MCMCresult$incidence,function(x)assignLabels(x,param$labels))
MCMCtraces$orders<-lapply(MCMCresult$orders,order2var,varnames=param$labels)
}
#MCMCtraces$DAGscores<-MCMCresult$DAGscores
MCMCtraces$orderscores<-MCMCresult$orderscores
}
maxobj<-storemaxMCMC(MCMCresult,param)
maxN<-which.max(MCMCresult$DAGscores)
#maxobj$reach<-maxN
if (scoreout){
if(chainout){output<-4}
else{output<-3}
} else {
if(chainout) {output<-2}
else {output<-1}
}
result$DAG<-maxobj$DAG
result$CPDAG<-Matrix(graph2m(dag2cpdag(m2graph(result$DAG))),sparse=TRUE)
result$score<-maxobj$score
result$maxorder<-maxobj$order
result$info<-list()
tabletime<-tableend-tablestart
if(units(tabletime)=="mins") {
tabletime<-as.numeric(tabletime*60)
}
mcmctime<-mcmcend-tableend
if(units(mcmctime)=="mins") {
mcmctime<-as.numeric(mcmctime*60)
}
result$info$runtimes<-c(tabletime,mcmctime)
names(result$info$runtimes)<-c("scoretables","MCMCchain")
result$trace<-MCMCresult$DAGscores
switch(as.character(output),
"1"={ # return only maximum DAG and score trace
},
"2"={ # return all MCMC all saved MCMC steps: incidence, DAGscore, orderscore and order and max result
result$traceadd<-MCMCtraces
},
"3"={ # return max DAG, order, last search space incidence and all scoretables
result$scoretable<-list()
result$scoretable$adjacency<-startskel
result$scoretable$tables<-scoretable
result$scoretable$blacklist<-blacklist
attr(result$scoretable,"class")<-"scorespace"
},
"4"={ # return all MCMC all saved MCMC steps,max result,last search space and scoretables
result$scoretable<-list()
result$scoretable$adjacency<-startskel
result$scoretable$tables<-scoretable
result$scoretable$blacklist<-blacklist
attr(result$scoretable,"class")<-"scorespace"
result$traceadd<-MCMCtraces
}
)
attr(result,"class")<-"orderMCMC"
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/BiDAG/R/orderMCMCmain.R
|
#implements order MCMC on an extended search space sampling version
#partly derived from <doi:10.1080/01621459.2015.1133426>
orderMCMCplus1<-function(n,nsmall,startorder,iterations,stepsave,moveprobs,parenttable,scoretable,aliases,numparents,rowmaps,
plus1lists,scoresmatrices,numberofparentsvec,gamma=1,bgnodes,matsize,chainout=FALSE,compress=TRUE){
result<-list()
if(!is.null(bgnodes)) {
mainnodes<-c(1:matsize)[-bgnodes]
} else {mainnodes<-c(1:matsize)}
currentpermy<-startorder #starting order represented as a permutation
currentorderscores<-orderscorePlus1(matsize,currentpermy[1:nsmall],c(1:nsmall),parenttable,aliases,numparents,rowmaps,plus1lists,scoretable,scoresmatrices,currentpermy) #starting score
currenttotallogscore<-sum(currentorderscores$totscores[mainnodes]) #log total score of all DAGs in the starting order
currentDAG<-samplescoreplus1(matsize,mainnodes,currentorderscores,plus1lists,scoretable,
scoresmatrices,parenttable,numberofparentsvec,aliases) #score of a single DAG sampled from the starting order
L1 <- list() # stores the adjacency matrix of a DAG sampled from the orders
L2 <- vector() # stores its log BGe score of a DAG
L3 <- vector() # stores the log BGe score of the entire order
L4 <- list() # stores the orders as permutations
zlimit<- min(floor(iterations/stepsave) + 1,1000) # number of outer iterations
length(L1) <- zlimit
length(L2) <- zlimit
length(L3) <- zlimit
length(L4) <- zlimit
L1[[1]]<-currentDAG$incidence #starting DAG adjacency matrix
L2[1]<-currentDAG$logscore #starting DAG score
L3[1]<-currenttotallogscore #starting order score
L4[[1]]<-currentpermy[1:nsmall] #starting order
maxdag<-currentDAG$incidence
maxscore<-L2[1]
for (z in 2:zlimit){ #the MCMC chain loop with 'iteration' steps is in two parts
for (count in 1:stepsave){ #since we only save the results to the lists each 'stepsave'
chosenmove<-sample.int(4,1,prob=moveprobs)
if(chosenmove<4){ # if it is 3 then we stay still
proposedpermy<-currentpermy #sample a new order by swapping two elements
switch(as.character(chosenmove),
"1"={ # swap any two elements at random
sampledelements<-sample.int(nsmall,2,replace=FALSE) #chosen at random
},
"2"={ # swap any adjacent elements
k<-sample.int(nsmall-1,1) #chose the smallest at random
sampledelements<-c(k,k+1)
},
"3"={ # swap any adjacent elements
sampledpos<-sample.int(nsmall,1)
},
{# if neither is chosen, we have a problem
cat("The move sampling has failed! \n")
}) #end switch
if(chosenmove<3){
scorepositions<-c(min(sampledelements):max(sampledelements))
proposedpermy[sampledelements]<-currentpermy[rev(sampledelements)] #proposed new order ???
rescorenodes<-proposedpermy[scorepositions] #we only need to rescore these nodes between the swapped elements to speed up the calculation
proposedorderrescored<-orderscorePlus1(matsize,rescorenodes,scorepositions,parenttable,aliases,numparents,rowmaps,plus1lists,scoretable,scoresmatrices,proposedpermy)
proposedtotallogscore<-currenttotallogscore-sum(currentorderscores$totscores[rescorenodes])+sum(proposedorderrescored$totscores[rescorenodes]) #and the new log total score by updating only the necessary nodes
scoreratio<-exp((proposedtotallogscore-currenttotallogscore)*gamma) #acceptance probability
if(runif(1)<scoreratio){ #Move accepted then set the current order and scores to the proposal
currentpermy<-proposedpermy
currenttotallogscore<-proposedtotallogscore
currentorderscores$therow[rescorenodes]<-proposedorderrescored$therow[rescorenodes]
currentorderscores$totscores[rescorenodes]<-proposedorderrescored$totscores[rescorenodes]
currentorderscores$allowedlists[rescorenodes]<-proposedorderrescored$allowedlists[rescorenodes]
}
} else if (chosenmove==3) {
neworder<-positionscorePlus1(matsize,nsmall,currentorderscores,sampledpos,currentpermy,aliases,
rowmaps,plus1lists,numparents,scoretable,scoresmatrices)
currentpermy<-neworder$order
currentorderscores<-neworder$score
currenttotallogscore<-neworder$tot
}
}
}
currentDAG<-samplescoreplus1(matsize,mainnodes,currentorderscores,plus1lists,scoretable,scoresmatrices,
parenttable,numberofparentsvec,aliases)
if(chainout) {
if(compress) {
L1[[z]]<-Matrix(currentDAG$incidence,sparse=TRUE) #store compressed adjacency matrix of a sampled DAG each 'stepsave'
} else {
L1[[z]]<-currentDAG$incidence #store adjacency matrix of a sampled DAG each 'stepsave'
}
}
L2[z]<-currentDAG$logscore #and log score of a sampled DAG
L3[z]<-currenttotallogscore #and the current order score
L4[[z]]<-currentpermy[1:nsmall] #and store current order
if(L2[z]>maxscore) {
maxscore<-L2[z]
maxdag<-currentDAG$incidence
}
}
result$incidence<-L1
result$DAGscores<-L2
result$orderscores<-L3
result$orders<-L4
result$maxscore<-maxscore
result$maxdag<-Matrix(maxdag,sparse=TRUE)
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/BiDAG/R/orderMCMCplus1.R
|
#implements order MCMC on an extended search space, MAP version
#partly derived from <doi:10.1080/01621459.2015.1133426>
orderMCMCplus1max<-function(n,nsmall,startorder,iterations,stepsave,moveprobs,
parenttable,scoretable,aliases,numparents,
rowmaps,plus1lists,maxmatrices,numberofparentsvec,
gamma=1,bgnodes,matsize,chainout=FALSE,compress=TRUE) {
result<-list()
if(!is.null(bgnodes)) {
mainnodes<-c(1:matsize)[-bgnodes]
} else {mainnodes<-c(1:matsize)}
currentpermy<-startorder #starting order represented as a permutation
currentorderscores<-orderscorePlus1max(matsize,scorenodes=startorder[1:nsmall],scorepositions=c(1:nsmall),
parenttable,aliases,numparents,plus1lists,rowmaps,scoretable,
maxmatrices,currentpermy) #starting score
currenttotallogscore<-sum(currentorderscores$totscores[mainnodes]) #log score of a maximum DAG in the starting order
currentDAG<-samplescoreplus1.max(matsize,mainnodes,currentorderscores,plus1lists,maxmatrices,scoretable,
parenttable,numberofparentsvec,aliases) #score of a single DAG sampled from the starting order
L1 <- list() # stores the adjacency matrix of a DAG sampled from the orders
L2 <- vector() # stores its log BGe score
L3 <- vector() # stores the log BGe score of the entire order
L4 <- list() # stores the orders as permutations
zlimit<- floor(iterations/stepsave) + 1 # number of outer iterations
length(L1) <- zlimit
length(L2) <- zlimit
length(L3) <- zlimit
length(L4) <- zlimit
L1[[1]]<-currentDAG$incidence #starting DAG adjacency matrix
L2[1]<-currentDAG$logscore #starting DAG score
L3[1]<-currenttotallogscore #starting order score
L4[[1]]<-currentpermy[1:nsmall] #starting order
maxdag<-currentDAG$incidence
maxscore<-L2[1]
moveprobsstart<-moveprobs
for (z in 2:zlimit){ #the MCMC chain loop with 'iteration' steps is in two parts
for (count in 1:stepsave){ #since we only save the results to the lists each 'stepsave'
chosenmove<-sample.int(4,1,prob=moveprobs)
if(chosenmove<4){ # if it is 3 then we stay still
proposedpermy<-currentpermy #sample a new order by swapping two elements
switch(as.character(chosenmove),
"1"={ # swap any two elements at random
sampledelements<-sample.int(nsmall,2,replace=FALSE) #chosen at random
},
"2"={ # swap any adjacent elements
k<-sample.int(nsmall-1,1) #chose the smallest at random
sampledelements<-c(k,k+1)
},
"3"={ # find best position for 1 element
sampledpos<-sample.int(nsmall,1)
},
"4"={ # stay still
},
{# if neither is chosen, we have a problem
cat("The move sampling has failed! \n")
}) #end switch
if (chosenmove < 3) {
scorepositions<-c(min(sampledelements):max(sampledelements))
proposedpermy[sampledelements]<-currentpermy[rev(sampledelements)] #proposed new order ???
rescorenodes<-proposedpermy[scorepositions] #we only need to rescore these nodes between the swapped elements to speed up the calculation
proposedorderrescored<-orderscorePlus1max(matsize,rescorenodes,scorepositions,parenttable,aliases,numparents,plus1lists,rowmaps,scoretable,maxmatrices,proposedpermy)
proposedtotallogscore<-currenttotallogscore-sum(currentorderscores$totscores[rescorenodes])+sum(proposedorderrescored$totscores[rescorenodes]) #and the new log total score by updating only the necessary nodes
scoreratio<-exp((proposedtotallogscore-currenttotallogscore)*gamma) #acceptance probability
if(runif(1)<scoreratio){ #Move accepted then set the current order and scores to the proposal
currentpermy<-proposedpermy
currentorderscores$therow[rescorenodes]<-proposedorderrescored$therow[rescorenodes]
currentorderscores$totscores[rescorenodes]<-proposedorderrescored$totscores[rescorenodes]
currentorderscores$allowedlists[rescorenodes]<-proposedorderrescored$allowedlists[rescorenodes]
currenttotallogscore<-proposedtotallogscore
}
} else if (chosenmove==3) {
neworder<-positionscorePlus1max(matsize,nsmall,currentorderscores,sampledpos,currentpermy,aliases,
rowmaps,plus1lists,numparents,scoretable,maxmatrices)
currentorderscores<-neworder$score
currentpermy<-neworder$order
currenttotallogscore<-sum(currentorderscores$totscores)
}
}
}
currentDAG<-samplescoreplus1.max(matsize,mainnodes,currentorderscores,plus1lists,maxmatrices,scoretable,
parenttable,numberofparentsvec,aliases)
if(chainout) {
if(compress) {
L1[[z]]<-Matrix(currentDAG$incidence,sparse=TRUE) #store compressed adjacency matrix of a sampled DAG each 'stepsave'
} else {
L1[[z]]<-currentDAG$incidence #store adjacency matrix of a sampled DAG each 'stepsave'
}
}
L2[z]<-currentDAG$logscore #and log score of a sampled DAG
L3[z]<-currenttotallogscore #and the current order score
L4[[z]]<-currentpermy[1:nsmall] #and store current order
if(L2[z]>maxscore) {
maxscore<-L2[z]
maxdag<-currentDAG$incidence
}
}
result$incidence<-L1
result$DAGscores<-L2
result$orderscores<-L3
result$orders<-L4
result$maxscore<-maxscore
result$maxdag<-Matrix(maxdag,sparse=TRUE)
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/BiDAG/R/orderMCMCplus1max.R
|
#builds a table with banned scores for sampling version
poset.scores<-function(posetparenttable,scoretable,numberofparentsvec,rowmaps,n,plus1lists=NULL,
numparents,updatenodes=c(1:n)){
orderscore<-list(length=n) #first level of list: the only level - list of matrices - 3 dimensions
revnumberofparentsvec<-lapply(numberofparentsvec,rev)
if (is.null(plus1lists)){
for (j in updatenodes){
len<-numparents[j] #maximum number of parents for node j
binomcoefs<-choose(len,c(0:len))
nrows<-nrow(posetparenttable[[j]])
P_local<-vector("numeric",length=nrows)
P_local[nrows] <-scoretable[[j]][1,1]
maxoverall<-max(scoretable[[j]][,1])
P_local[1]<-log(sum(exp(scoretable[[j]][,1]-maxoverall)))+maxoverall
cutoff<-1
if(nrows>2){
for(level in 1:(len-1)){
cutoff<-cutoff+binomcoefs[level]
for (i in (nrows-1):cutoff) {
#so we go through all rows where non-zero entries more than number of banned parents
# find the parents in the poset graph
posetparentnodes <- posetparenttable[[j]][i,c(1:revnumberofparentsvec[[j]][i])]
maxparents<-max(P_local[posetparentnodes])
parentsum<-log(sum(exp(P_local[posetparentnodes]-maxparents)))+maxparents-log(len-revnumberofparentsvec[[j]][i]-level+1)
conjugatescore<-scoretable[[j]][rowmaps[[j]]$backwards[nrows-rowmaps[[j]]$forward[i]+1],1]
maxoverall<-max(parentsum,conjugatescore)
P_local[i]<- log(exp(parentsum-maxoverall)+exp(conjugatescore-maxoverall)) + maxoverall
}
}
}
orderscore[[j]]<-as.matrix(P_local)
}
return(orderscore)
} else {
for (j in updatenodes) {
len<-numparents[j] #maximum number of parents for node j
binomcoefs<-choose(len,c(0:len))
ll<-length(plus1lists$parents[[j]])+1
nrows<-nrow(posetparenttable[[j]])
P_local <- matrix(nrow=nrows,ncol=ll)
for (li in 1:ll){
P_local[nrows,li] <-scoretable[[j]][[li]][1,1]
maxoverall<-max(scoretable[[j]][[li]][,1])
P_local[1,li]<-log(sum(exp(scoretable[[j]][[li]][,1]-maxoverall)))+maxoverall
cutoff<-1
if(nrows>2){
for(level in 1:(len-1)){
cutoff<-cutoff+binomcoefs[level]
for (i in (nrows-1):cutoff) {
#so we go through all rows where non-zero entries more than number of banned parents
# find the parents in the poset graph
posetparentnodes <- posetparenttable[[j]][i,c(1:revnumberofparentsvec[[j]][i])]
maxparents<-max(P_local[posetparentnodes,li])
parentsum<-log(sum(exp(P_local[posetparentnodes,li]-maxparents)))+maxparents-log(len-revnumberofparentsvec[[j]][i]-level+1)
conjugatescore<-scoretable[[j]][[li]][rowmaps[[j]]$backwards[nrows-rowmaps[[j]]$forward[i]+1],1]
maxoverall<-max(parentsum,conjugatescore)
P_local[i,li]<- log(exp(parentsum-maxoverall)+exp(conjugatescore-maxoverall)) + maxoverall
}
}
}
}
orderscore[[j]]<-P_local
}
return(orderscore)
}
}
#builds a table with banned scores for max version
posetscoremax<-function(posetparenttable,scoretable,numberofparentsvec,rowmaps,n,plus1lists=NULL,
updatenodes=c(1:n)) {
listy<-list()
revnumberofparentsvec<-lapply(numberofparentsvec,rev)
if (is.null(plus1lists)) {
maxmatrix<-list()
maxrows<-list()
for (j in updatenodes) {
nrows<-nrow(posetparenttable[[j]])
P_local <- numeric(nrows)
maxrow<-numeric(nrows)
P_local[nrows]<-scoretable[[j]][1,1]
maxrow[nrows]<-1
#last element, when all nodes are banned 1st row in a score table
#for all other we have
if(nrows>1) {
for (i in (nrows-1):1) { # find the parents in the poset graph
posetparentnodes <- posetparenttable[[j]][i,c(1:revnumberofparentsvec[[j]][i])]
# take the maximum of the parent scores, and the conjugate score
prevmax<- max(P_local[posetparentnodes])
candmax<-scoretable[[j]][rowmaps[[j]]$backwards[nrows-rowmaps[[j]]$forward[i]+1],1]
if (prevmax>candmax) {
P_local[i]<-prevmax
maxrow[i]<-maxrow[posetparentnodes[which.max(P_local[posetparentnodes])]]
} else {
P_local[i]<-candmax
maxrow[i]<-rowmaps[[j]]$backwards[nrows-rowmaps[[j]]$forward[i]+1]
}
}
}
maxmatrix[[j]]<-as.matrix(P_local)
maxrows[[j]]<- maxrow
}
listy$maxmatrix<-maxmatrix
listy$maxrows<-maxrows
return(listy)
} else {
maxmatrix<-list()
maxrows<-list()
for (j in updatenodes)
{
ll<-length(plus1lists$parents[[j]])+1
nrows<-nrow(posetparenttable[[j]])
P_local <- matrix(nrow=nrows,ncol=ll)
maxrow<-matrix(nrow=nrows,ncol=ll)
for (li in 1:ll)
{
P_local[nrows,li]<-scoretable[[j]][[li]][1,1] #in plus1 lists last row means that all nodes banned but plus1
maxrow[nrows,li]<-1
#last element, when all nodes are banned 1st row in a score table
#for all oter we have
if(nrows>1) {
for (i in (nrows-1):1) {
# find the parents in the poset graph
posetparentnodes <- posetparenttable[[j]][i,c(1:revnumberofparentsvec[[j]][i])]
# take the maximum of the parent scores, and the conjugate score
prevmax<- max(P_local[posetparentnodes,li])
candmax<-scoretable[[j]][[li]][rowmaps[[j]]$backwards[nrows-rowmaps[[j]]$forward[i]+1],1]
if (prevmax>candmax) {
P_local[i,li]<-prevmax
maxrow[i,li]<-maxrow[posetparentnodes[which.max(P_local[posetparentnodes,li])],li]
} else {
P_local[i,li]<-candmax
maxrow[i,li]<-rowmaps[[j]]$backwards[nrows-rowmaps[[j]]$forward[i]+1]
}
}
}
}
maxmatrix[[j]]<-P_local
maxrows[[j]]<-maxrow
}
listy$maxmatrix<-maxmatrix
listy$maxrow<-maxrows
return(listy)
}
}
parentsmapping<-function(parenttable,numberofparentsvec,n,updatenodes=c(1:n)) {
maps<-list()
mapi<-list()
for (i in updatenodes) {
nrows<-nrow(parenttable[[i]])
P_local <- numeric(nrows)
P_localinv <- numeric(nrows)
P_local[1]<-1
P_localinv[1]<-1
if (nrows>1){
for (j in 2:nrows) {
parentnodes <- parenttable[[i]][j,c(1:numberofparentsvec[[i]][j])]
#numberofparentsvec stores number of non zero entries in i-th row in a parnttable,
P_local[j]<-sum(2^parentnodes)/2+1
#so extracting those non-zero entries we get row index
P_localinv[P_local[j]]<-j # the inverse mapping
}
}
mapi$forward<-P_local
mapi$backwards<-P_localinv
maps[[i]]<- mapi
}
return(maps)
}
poset<-function(parenttable,numberofparentsvec,rowmaps,n,updatenodes=c(1:n)){
posetparenttables<-list(length=n)
for (i in updatenodes) {
nrows<-nrow(parenttable[[i]])
ncols<-ncol(parenttable[[i]])
posetparenttables[[i]]<-matrix(NA,nrow=nrows,ncol=ncols)
offsets<-rep(1,nrows)
if(nrows>1) {
for(j in nrows:2){
parentnodes<- parenttable[[i]][j,c(1:numberofparentsvec[[i]][j])]
children<-rowmaps[[i]]$backwards[rowmaps[[i]]$forward[j]-2^parentnodes/2]
posetparenttables[[i]][cbind(children,offsets[children])]<-j
offsets[children]<-offsets[children]+1
}
}
}
return(posetparenttables)
}
|
/scratch/gouwar.j/cran-all/cranData/BiDAG/R/orderposetfns.R
|
#scores a single order base version (base neighbourhood)
orderscoreBase<-function(n,scorenodes,scorepositions,parenttable,aliases,numparents,rowmaps,
scoretable,scoresmatrices,permy) {
orderscores<-vector("double",n)
therows<-vector("integer",n)
k<-1
for (i in scorenodes){
position<-scorepositions[k]
if (position==n) { #no parents allowed, i.e. only first row, only first list
orderscores[i]<-scoretable[[i]][1,1]
therows[i]<-c(2^numparents[i])
}
else {
bannednodes<-permy[1:position]
allowednodes<-permy[(position+1):n]
bannedpool<-which(aliases[[i]]%in%bannednodes)
if (numparents[i]==0||length(bannedpool)==0) { #all parents allowed or no parents in the parent table
therows[i]<-c(1)
}
else {
therows[i]<-rowmaps[[i]]$backwards[sum(2^bannedpool)/2+1]
}
orderscores[i]<-scoresmatrices[[i]][therows[i],1]
}
k<-k+1
}
scores<-list()
scores$therow<-therows
scores$totscores<-orderscores
return(scores)
}
#scores a single order base version (plus1 neighbourhood)
orderscorePlus1<-function(n,scorenodes,scorepositions,parenttable,aliases,numparents,
rowmaps,plus1lists,scoretable,scoresmatrices,permy) {
orderscores<-vector("double",n)
allowedscorelists<-vector("list",n)
therows<-vector("integer",n)
k<-1
for (i in scorenodes){
position<-scorepositions[k]
if (position==n) { #no parents allowed, i.e. only first row, only first list
orderscores[i]<-scoretable[[i]][[1]][1,1]
allowedscorelists[[i]]<-c(1)
therows[i]<-c(2^numparents[i])
}
else {
bannednodes<-permy[1:position]
allowednodes<-permy[(position+1):n]
bannedpool<-which(aliases[[i]]%in%bannednodes)
if (numparents[i]==0||length(bannedpool)==0) { #all parents allowed
therows[i]<-c(1)
}
else {
therows[i]<-rowmaps[[i]]$backwards[sum(2^bannedpool)/2+1]
}
allowedscorelists[[i]]<-c(1,which(plus1lists$parents[[i]]%in%allowednodes)+1)
scoresvec<-scoresmatrices[[i]][therows[i],allowedscorelists[[i]]]
maxallowed<-max(scoresvec)
orderscores[i]<-maxallowed+log(sum(exp(scoresvec-maxallowed)))
}
k<-k+1
}
scores<-list()
scores$therow<-therows
scores$allowedlists<-allowedscorelists
scores$totscores<-orderscores
return(scores)
}
#returns an order and its BGe/BDe logscore such that is sampled from all orders (according to their scores), obtained via
#putting a given node in every position from 1 to n with all other nodes fixed (plus1 neighbourhood)
positionscorePlus1<-function(n,nsmall,currentscore,positionx,permy,aliases,rowmaps,plus1lists,numparents,
scoretable,scoresmatrices) {
vectorx<-vector(length=nsmall) #scores of node in each place in the order
vectorall<-vector(length=nsmall) #scores for each node when x takes its position
base<-currentscore$totscores
totalall<-vector(length=nsmall) #result vector with log scores of all orders
totalall[positionx]<-sum(base) #totallogscore of initial permutation
x<-permy[positionx] #node for which we search max/sample position
vectorx[positionx]<-base[x] #its score in current permy
allowedlistx<-list()
allowedlisty<-list()
therowx<-vector()
therowy<-vector()
if (positionx>1) {
rightpart<-permy[-positionx]
for (i in (positionx-1):1) {
nodey<-permy[i] #node which changes position with nodex
#row with the new score of nodex
if (numparents[x]==0||i==1) {
therowx[i]<-c(1)
} else {
bannedpool<-which(aliases[[x]]%in%permy[1:(i-1)])
if (length(bannedpool)==0) {
therowx[i]<-c(1)
} else {
therowx[i]<-rowmaps[[x]]$backwards[sum(2^bannedpool)/2+1]
}
}
allowedlistx[[i]]<-c(1,which(plus1lists$parents[[x]]%in%c(permy[(i+1):n],nodey))+1)
scoresvec<-scoresmatrices[[x]][therowx[i],allowedlistx[[i]]]
maxallowed<-max(scoresvec)
vectorx[i]<-maxallowed+log(sum(exp(scoresvec-maxallowed)))
newpos<-i+1 #new position of nodey
if(newpos==n) { #no parents allowed, i.e. only first row, only first list
vectorall[nodey]<-scoretable[[nodey]][[1]][1,1]
allowedlisty[[nodey]]<-c(1)
therowy[nodey]<-c(2^numparents[nodey])
} else {
bannedpool<-which(aliases[[nodey]]%in%c(permy[1:(i-1)],x))
if (numparents[nodey]==0||length(bannedpool)==0) {
therowy[nodey]<-c(1)
}
else {
therowy[nodey]<-rowmaps[[nodey]]$backwards[sum(2^bannedpool)/2+1]
}
if (newpos==n) {allowedlisty[[nodey]]<-c(1)} else {
allowedlisty[[nodey]]<-c(1,which(plus1lists$parents[[nodey]]%in%rightpart[(newpos):(n-1)])+1)
}
scoresvec<-scoresmatrices[[nodey]][therowy[nodey],allowedlisty[[nodey]]]
maxallowed<-max(scoresvec)
vectorall[nodey]<-maxallowed+log(sum(exp(scoresvec-maxallowed)))
}
totalall[i]<-totalall[i+1]-vectorx[i+1]+vectorx[i]-base[nodey]+vectorall[nodey]
}
}
if (positionx < nsmall) {
for (i in (positionx+1):nsmall) {
nodey<-permy[i]
if (numparents[x]==0) { #there is only 1 row in the score table
therowx[i]<-c(1)
} else if (i==n) { #no parents allowed, i.e. only first row, only first list
vectorall[x]<-scoretable[[x]][[1]][1,1]
therowx[i]<-c(2^numparents[x])
} else {
bannedpool<-which(aliases[[x]]%in%permy[1:i])
if (length(bannedpool)==0) {
therowx[i]<-c(1)
} else {
therowx[i]<-rowmaps[[x]]$backwards[sum(2^bannedpool)/2+1]
}
}
if (i==n) { allowedlistx[[i]]<-c(1)} else {
allowedlistx[[i]]<-c(1,which(plus1lists$parents[[x]]%in%permy[(i+1):n])+1)
}
scoresvec<-scoresmatrices[[x]][therowx[i],allowedlistx[[i]]]
maxallowed<-max(scoresvec)
vectorx[i]<-maxallowed+log(sum(exp(scoresvec-maxallowed)))
newpos<-i-1
if (newpos==1) {
therowy[nodey]<-c(1)
} else {
bannedpool<-which(aliases[[nodey]]%in%((permy[1:newpos])[-positionx]))
if (numparents[nodey]==0||length(bannedpool)==0) {
therowy[nodey]<-c(1)
} else {
therowy[nodey]<-rowmaps[[nodey]]$backwards[sum(2^bannedpool)/2+1]
}
}
allowedlisty[[nodey]]<-c(1,which(plus1lists$parents[[nodey]]%in%c(permy[i:n],x))+1)
scoresvec<-scoresmatrices[[nodey]][therowy[nodey],allowedlisty[[nodey]]]
maxallowed<-max(scoresvec)
vectorall[nodey]<-maxallowed+log(sum(exp(scoresvec-maxallowed)))
totalall[i]<-totalall[i-1]-vectorx[i-1]+vectorx[i]-base[nodey]+vectorall[nodey]
}
}
totalmax<-max(totalall)
allscore<-totalmax+log(sum(exp(totalall-totalmax)))
maxi<-sample.int(nsmall,1,prob=exp(totalall-allscore))
res<-list()
if (maxi==positionx) {
res$score<-currentscore
res$order<-permy
res$tot<-totalall[positionx]
return(res)
} else if (maxi>positionx) {
newscore<-currentscore
newscore$therow[x]<-therowx[maxi]
newscore$allowedlists[[x]]<-allowedlistx[[maxi]]
newscore$totscores[x]<-vectorx[maxi]
updatenodes<-permy[(positionx+1):maxi]
newscore$therow[updatenodes]<-therowy[updatenodes]
newscore$allowedlists[updatenodes]<-allowedlisty[updatenodes]
newscore$totscores[updatenodes]<-vectorall[updatenodes]
res$score<-newscore
res$order<-movenode(permy,positionx,maxi,n)
res$tot<-totalall[maxi]
return(res)
} else {
newscore<-currentscore
newscore$therow[x]<-therowx[maxi]
newscore$allowedlists[[x]]<-allowedlistx[[maxi]]
newscore$totscores[x]<-vectorx[maxi]
updatenodes<-permy[maxi:(positionx-1)]
newscore$therow[updatenodes]<-therowy[updatenodes]
newscore$allowedlists[updatenodes]<-allowedlisty[updatenodes]
newscore$totscores[updatenodes]<-vectorall[updatenodes]
res$score<-newscore
res$order<-movenode(permy,positionx,maxi,n)
res$tot<-totalall[maxi]
return(res)
}
}
#returns an order and its BGe/BDe logscore such that is sampled from all orders (according to their scores), obtained via
#putting a given node in every position from 1 to n with all other nodes fixed (plus1 neighbourhood)
positionscorebase<-function(n,nsmall,currentscore,positionx,permy,aliases,rowmaps,numparents,
scoretable,scoresmatrices) {
vectorx<-vector(length=nsmall) #scores of node in each place in the order
vectorall<-vector(length=nsmall) #scores for each node when x takes its position
base<-currentscore$totscores
totalall<-vector(length=nsmall) #result vector with log scores of all orders
totalall[positionx]<-sum(base) #totallogscore of initial permutation
x<-permy[positionx] #node for which we search max/sample position
vectorx[positionx]<-base[x] #its score in current permy
therowx<-vector()
therowy<-vector()
if (positionx>1) {
rightpart<-permy[-positionx]
for (i in (positionx-1):1) {
nodey<-permy[i]
if (numparents[x]==0||i==1) {
therowx[i]<-c(1)
} else {
bannedpool<-which(aliases[[x]]%in%permy[1:(i-1)])
if (length(bannedpool)==0) {
therowx[i]<-c(1)
} else {
therowx[i]<-rowmaps[[x]]$backwards[sum(2^bannedpool)/2+1]
}
}
scoresvec<-scoresmatrices[[x]][therowx[i],1]
maxallowed<-max(scoresvec)
vectorx[i]<-maxallowed+log(sum(exp(scoresvec-maxallowed)))
newpos<-i+1 #new position of nodey
if(newpos==n) { #no parents allowed, i.e. only first row, only first list
vectorall[nodey]<-scoretable[[nodey]][1,1]
therowy[nodey]<-c(2^numparents[nodey])
} else {
bannedpool<-which(aliases[[nodey]]%in%c(permy[1:(i-1)],x))
if (numparents[nodey]==0||length(bannedpool)==0) {
therowy[nodey]<-c(1)
}
else {
therowy[nodey]<-rowmaps[[nodey]]$backwards[sum(2^bannedpool)/2+1]
}
scoresvec<-scoresmatrices[[nodey]][therowy[nodey],1]
maxallowed<-max(scoresvec)
vectorall[nodey]<-maxallowed+log(sum(exp(scoresvec-maxallowed)))
}
totalall[i]<-totalall[i+1]-vectorx[i+1]+vectorx[i]-base[nodey]+vectorall[nodey]
}
}
if (positionx < nsmall) {
for (i in (positionx+1):nsmall) {
nodey<-permy[i]
if (numparents[x]==0) { #there is only 1 row in the score table
therowx[i]<-c(1)
} else if (i==n) { #no parents allowed, i.e. only first row, only first list
vectorall[x]<-scoretable[[x]][1,1]
therowx[i]<-c(2^numparents[x])
} else {
bannedpool<-which(aliases[[x]]%in%permy[1:i])
if (length(bannedpool)==0) {
therowx[i]<-c(1)
} else {
therowx[i]<-rowmaps[[x]]$backwards[sum(2^bannedpool)/2+1]
}
}
scoresvec<-scoresmatrices[[x]][therowx[i],1]
maxallowed<-max(scoresvec)
vectorx[i]<-maxallowed+log(sum(exp(scoresvec-maxallowed)))
newpos<-i-1
if (newpos==1) {
therowy[nodey]<-c(1)
} else {
bannedpool<-which(aliases[[nodey]]%in%((permy[1:newpos])[-positionx]))
if (numparents[nodey]==0||length(bannedpool)==0) {
therowy[nodey]<-c(1)
} else {
therowy[nodey]<-rowmaps[[nodey]]$backwards[sum(2^bannedpool)/2+1]
}
}
scoresvec<-scoresmatrices[[nodey]][therowy[nodey],1]
maxallowed<-max(scoresvec)
vectorall[nodey]<-maxallowed+log(sum(exp(scoresvec-maxallowed)))
totalall[i]<-totalall[i-1]-vectorx[i-1]+vectorx[i]-base[nodey]+vectorall[nodey]
}
}
totalmax<-max(totalall)
allscore<-totalmax+log(sum(exp(totalall-totalmax)))
maxi<-sample.int(nsmall,1,prob=exp(totalall-allscore))
res<-list()
if (maxi==positionx) {
res$score<-currentscore
res$order<-permy
res$tot<-totalall[positionx]
return(res)
} else if (maxi>positionx) {
newscore<-currentscore
newscore$therow[x]<-therowx[maxi]
newscore$totscores[x]<-vectorx[maxi]
updatenodes<-permy[(positionx+1):maxi]
newscore$therow[updatenodes]<-therowy[updatenodes]
newscore$totscores[updatenodes]<-vectorall[updatenodes]
res$score<-newscore
res$order<-movenode(permy,positionx,maxi,n)
res$tot<-totalall[maxi]
return(res)
} else {
newscore<-currentscore
newscore$therow[x]<-therowx[maxi]
newscore$totscores[x]<-vectorx[maxi]
updatenodes<-permy[maxi:(positionx-1)]
newscore$therow[updatenodes]<-therowy[updatenodes]
newscore$totscores[updatenodes]<-vectorall[updatenodes]
res$score<-newscore
res$order<-movenode(permy,positionx,maxi,n)
res$tot<-totalall[maxi]
return(res)
}
}
|
/scratch/gouwar.j/cran-all/cranData/BiDAG/R/orderscore.R
|
#returns BGe/BDe logscore of an order (maximum version, base neighbourhood)
orderscoreBasemax<-function(n,scorenodes,scorepositions,parenttable,aliases,numparents,rowmaps,scoretable,
maxmatrices,permy){
orderscores<-vector("double",n)
therows<-vector("numeric",n)
k<-1
#first find a row
for (i in scorenodes){
position<-scorepositions[k]
if (position==n){ #no parents allowed, i.e. only first row, only first list
orderscores[i]<-scoretable[[i]][1,1]
therows[i]<-c(2^numparents[i])
}
else {
bannednodes<-permy[1:position]
allowednodes<-permy[(position+1):n]
bannedpool<-which(aliases[[i]]%in%bannednodes)
if (numparents[i]==0||length(bannedpool)==0) {
therows[i]<-c(1)
}
else {
therows[i]<-rowmaps[[i]]$backwards[sum(2^bannedpool)/2+1]
}
orderscores[i]<-maxmatrices$maxmatrix[[i]][therows[i],1]
}
k<-k+1
} # for scorenodes end
scores<-list()
scores$therow<-therows
scores$totscores<-orderscores
return(scores)
}
#returns BGe/BDe logscore of an order (maximum version, plus1 neighbourhood)
orderscorePlus1max<-function(n,scorenodes,scorepositions,parenttable,aliases,numparents,plus1lists,rowmaps,scoretable,
maxmatrices,permy,verbose=FALSE) {
#score nodes should be in the right order
orderscores<-vector("double",n)
allowedscorelists<-vector("list",n)
therows<-vector("integer",n)
k<-1
#first find a row
for (i in scorenodes){
position<-scorepositions[k]
if (position==n){ #no parents allowed, i.e. only first row, only first list
orderscores[i]<-scoretable[[i]][[1]][1,1]
allowedscorelists[[i]]<-c(1)
therows[i]<-c(2^numparents[i])
}
else {
bannednodes<-permy[1:position]
allowednodes<-permy[(position+1):n]
bannedpool<-which(aliases[[i]]%in%bannednodes)
if (numparents[i]==0||length(bannedpool)==0) {
therows[i]<-c(1)
}
else {
therows[i]<-rowmaps[[i]]$backwards[sum(2^bannedpool)/2+1]
}
allowedscorelists[[i]]<-c(1,which(plus1lists$parents[[i]]%in%allowednodes)+1)
maxvec<-maxmatrices$maxmatrix[[i]][therows[i],allowedscorelists[[i]]]
orderscores[i]<-max(maxvec)
k<-k+1
}
}
scores<-list()
scores$therow<-therows
scores$allowedlists<-allowedscorelists
scores$totscores<-orderscores
return(scores)
}
#returns an order and its BGe/BDe logscore such that this order's score is maximal of all orders, obtained via
#putting a given node in every position from 1 to n with all other nodes fixed (plus1 neighbourhood)
positionscorePlus1max<-function(n,nsmall,currentscore,positionx,permy,aliases,rowmaps,plus1lists,
numparents,scoretable,maxmatrices) {
vectorx<-vector(length=nsmall) #scores of node in each place in the order
vectorall<-vector(length=nsmall) #scores for each node when x takes its position
base<-currentscore$totscores
totalall<-vector(length=nsmall) #result vector with log scores of all orders
totalall[positionx]<-sum(base)
x<-permy[positionx] #node for which we search max/sample position
vectorx[positionx]<-base[x] #its score in current permy
allowedlistx<-list()
allowedlisty<-list()
therowx<-vector()
therowy<-vector()
if (positionx>1) {
rightpart<-permy[-positionx]
for (i in (positionx-1):1) {
nodey<-permy[i]
if (numparents[x]==0||i==1) {
therowx[i]<-c(1)
} else {
bannedpool<-which(aliases[[x]]%in%permy[1:(i-1)])
if (length(bannedpool)==0) {
therowx[i]<-c(1)
} else {
therowx[i]<-rowmaps[[x]]$backwards[sum(2^bannedpool)/2+1]
}
}
allowedlistx[[i]]<-c(1,which(plus1lists$parents[[x]]%in%c(permy[(i+1):n],nodey))+1)
vectorx[i]<-max(maxmatrices$maxmatrix[[x]][therowx[i],allowedlistx[[i]]])
newpos<-i+1 #new position of nodey
if(newpos==n) { #no parents allowed, i.e. only first row, only first list
vectorall[nodey]<-scoretable[[nodey]][[1]][1,1]
allowedlisty[[nodey]]<-c(1)
therowy[nodey]<-c(2^numparents[nodey])
} else {
bannedpool<-which(aliases[[nodey]]%in%c(permy[1:(i-1)],x))
if (numparents[nodey]==0||length(bannedpool)==0) {
therowy[nodey]<-c(1)
}
else {
therowy[nodey]<-rowmaps[[nodey]]$backwards[sum(2^bannedpool)/2+1]
}
if (newpos==n) {allowedlisty[[nodey]]<-c(1)} else {
allowedlisty[[nodey]]<-c(1,which(plus1lists$parents[[nodey]]%in%rightpart[(newpos):(n-1)])+1)
}
vectorall[nodey]<-max(maxmatrices$maxmatrix[[nodey]][therowy[nodey],allowedlisty[[nodey]]])
}
totalall[i]<-totalall[i+1]-vectorx[i+1]+vectorx[i]-base[nodey]+vectorall[nodey]
}
}
if (positionx<nsmall) {
for (i in (positionx+1):nsmall) {
nodey<-permy[i]
if (numparents[x]==0) { #there is only 1 row in the score table
therowx[i]<-c(1)
} else if (i==n) { #no parents allowed, i.e. only first row, only first list
vectorall[x]<-scoretable[[x]][[1]][1,1]
therowx[i]<-c(2^numparents[x])
} else {
bannedpool<-which(aliases[[x]]%in%permy[1:i])
if (length(bannedpool)==0) {
therowx[i]<-c(1)
} else {
therowx[i]<-rowmaps[[x]]$backwards[sum(2^bannedpool)/2+1]
}
}
if (i==n) { allowedlistx[[i]]<-c(1)} else {
allowedlistx[[i]]<-c(1,which(plus1lists$parents[[x]]%in%permy[(i+1):n])+1)
}
vectorx[i]<-max(maxmatrices$maxmatrix[[x]][therowx[i],allowedlistx[[i]]])
newpos<-i-1
if (newpos==1) {
therowy[nodey]<-c(1)
} else {
bannedpool<-which(aliases[[nodey]]%in%((permy[1:newpos])[-positionx]))
if (numparents[nodey]==0||length(bannedpool)==0) {
therowy[nodey]<-c(1)
} else {
therowy[nodey]<-rowmaps[[nodey]]$backwards[sum(2^bannedpool)/2+1]
}
}
allowedlisty[[nodey]]<-c(1,which(plus1lists$parents[[nodey]]%in%c(permy[i:n],x))+1)
vectorall[nodey]<-max(maxmatrices$maxmatrix[[nodey]][therowy[nodey],allowedlisty[[nodey]]])
totalall[i]<-totalall[i-1]-vectorx[i-1]+vectorx[i]-base[nodey]+vectorall[nodey]
}
}
maxall<-max(totalall[1:nsmall])
maxis<-which(totalall==maxall)
if(length(maxis)==1) {
maxi<-maxis
} else {
maxi<-sample(maxis,1)
}
res<-list()
res$totalall<-totalall
if (maxi==positionx) {
res$score<-currentscore
res$order<-permy
return(res)
} else if (maxi>positionx) {
newscore<-currentscore
newscore$therow[x]<-therowx[maxi]
newscore$allowedlists[[x]]<-allowedlistx[[maxi]]
newscore$totscores[x]<-vectorx[maxi]
updatenodes<-permy[(positionx+1):maxi]
newscore$therow[updatenodes]<-therowy[updatenodes]
newscore$allowedlists[updatenodes]<-allowedlisty[updatenodes]
newscore$totscores[updatenodes]<-vectorall[updatenodes]
res$score<-newscore
res$order<-movenode(permy,positionx,maxi,n)
return(res)
} else {
newscore<-currentscore
newscore$therow[x]<-therowx[maxi]
newscore$allowedlists[[x]]<-allowedlistx[[maxi]]
newscore$totscores[x]<-vectorx[maxi]
updatenodes<-permy[maxi:(positionx-1)]
newscore$therow[updatenodes]<-therowy[updatenodes]
newscore$allowedlists[updatenodes]<-allowedlisty[updatenodes]
newscore$totscores[updatenodes]<-vectorall[updatenodes]
res$score<-newscore
res$order<-movenode(permy,positionx,maxi,n)
return(res)
}
}
#returns an order and its BGe/BDe logscore such that this order's score is maximal of all orders, obtained via
#putting a given node in every position from 1 to n with all other nodes fixed (plus1 neighbourhood)
positionscorebasemax<-function(n,nsmall,currentscore,positionx,permy,aliases,rowmaps,numparents,scoretable,maxmatrices) {
vectorx<-vector(length=nsmall) #scores of node in each place in the order
vectorall<-vector(length=nsmall) #scores for each node when x takes its position
base<-currentscore$totscores
totalall<-vector(length=n) #result vector with log scores of all orders
totalall[positionx]<-sum(base)
x<-permy[positionx] #node for which we search max/sample position
vectorx[positionx]<-base[x] #its score in current permy
therowx<-vector()
therowy<-vector()
if (positionx>1) {
rightpart<-permy[-positionx]
for (i in (positionx-1):1) {
nodey<-permy[i]
if (numparents[x]==0||i==1) {
therowx[i]<-c(1)
} else {
bannedpool<-which(aliases[[x]]%in%permy[1:(i-1)])
if (length(bannedpool)==0) {
therowx[i]<-c(1)
} else {
therowx[i]<-rowmaps[[x]]$backwards[sum(2^bannedpool)/2+1]
}
}
vectorx[i]<-max(maxmatrices$maxmatrix[[x]][therowx[i],1])
newpos<-i+1 #new position of nodey
if(newpos==n) { #no parents allowed, i.e. only first row, only first list
vectorall[nodey]<-scoretable[[nodey]][1,1]
therowy[nodey]<-c(2^numparents[nodey])
} else {
bannedpool<-which(aliases[[nodey]]%in%c(permy[1:(i-1)],x))
if (numparents[nodey]==0||length(bannedpool)==0) {
therowy[nodey]<-c(1)
}
else {
therowy[nodey]<-rowmaps[[nodey]]$backwards[sum(2^bannedpool)/2+1]
}
vectorall[nodey]<-max(maxmatrices$maxmatrix[[nodey]][therowy[nodey],1])
}
totalall[i]<-totalall[i+1]-vectorx[i+1]+vectorx[i]-base[nodey]+vectorall[nodey]
}
}
if (positionx<nsmall) {
for (i in (positionx+1):nsmall) {
nodey<-permy[i]
if (numparents[x]==0) { #there is only 1 row in the score table
therowx[i]<-c(1)
} else if (i==n) { #no parents allowed, i.e. only first row, only first list
vectorall[x]<-scoretable[[x]][1,1]
therowx[i]<-c(2^numparents[x])
} else {
bannedpool<-which(aliases[[x]]%in%permy[1:i])
if (length(bannedpool)==0) {
therowx[i]<-c(1)
} else {
therowx[i]<-rowmaps[[x]]$backwards[sum(2^bannedpool)/2+1]
}
}
vectorx[i]<-max(maxmatrices$maxmatrix[[x]][therowx[i],1])
newpos<-i-1
if (newpos==1) {
therowy[nodey]<-c(1)
} else {
bannedpool<-which(aliases[[nodey]]%in%((permy[1:newpos])[-positionx]))
if (numparents[nodey]==0||length(bannedpool)==0) {
therowy[nodey]<-c(1)
} else {
therowy[nodey]<-rowmaps[[nodey]]$backwards[sum(2^bannedpool)/2+1]
}
}
vectorall[nodey]<-max(maxmatrices$maxmatrix[[nodey]][therowy[nodey],1])
totalall[i]<-totalall[i-1]-vectorx[i-1]+vectorx[i]-base[nodey]+vectorall[nodey]
}
}
maxall<-max(totalall)
maxis<-which(totalall==maxall)
maxi<-maxis[sample.int(length(maxis),1)]
res<-list()
if (maxi==positionx) {
res$score<-currentscore
res$order<-permy
return(res)
} else if (maxi>positionx) {
newscore<-currentscore
newscore$therow[x]<-therowx[maxi]
newscore$totscores[x]<-vectorx[maxi]
updatenodes<-permy[(positionx+1):maxi]
newscore$therow[updatenodes]<-therowy[updatenodes]
newscore$totscores[updatenodes]<-vectorall[updatenodes]
res$score<-newscore
res$order<-movenode(permy,positionx,maxi,n)
return(res)
} else {
newscore<-currentscore
newscore$therow[x]<-therowx[maxi]
newscore$totscores[x]<-vectorx[maxi]
updatenodes<-permy[maxi:(positionx-1)]
newscore$therow[updatenodes]<-therowy[updatenodes]
newscore$totscores[updatenodes]<-vectorall[updatenodes]
res$score<-newscore
res$order<-movenode(permy,positionx,maxi,n)
return(res)
}
}
#returns an order which is different from a given order only in position of 1 node, moves this node from initial position to a given position
movenode<-function(permy,positionx,positiony,n) {
permycut<-permy[-positionx]
if(positiony==1) {
newpermy<-c(permy[positionx],permycut)
} else if (positiony==n) {
newpermy<-c(permycut,permy[positionx])
} else {
newpermy<-c(permycut[1:positiony-1],permy[positionx],permycut[(positiony):(n-1)])
}
return(newpermy)
}
|
/scratch/gouwar.j/cran-all/cranData/BiDAG/R/orderscoremax.R
|
# $Id: combinations.R 1083 2007-03-23 22:53:00Z warnes $
#
##
## From email by Brian D Ripley <[email protected]> to r-help
## dated Tue, 14 Dec 1999 11:14:04 +0000 (GMT) in response to
## Alex Ahgarin <[email protected]>. Original version was
## named "subsets" and was Written by Bill Venables.
##
combinations <- function(n, r, v = 1:n, set = TRUE, repeats.allowed=FALSE) {
if(mode(n) != "numeric" || length(n) != 1
|| n < 1 || (n %% 1) != 0) stop("bad value of n")
if(mode(r) != "numeric" || length(r) != 1
|| r < 1 || (r %% 1) != 0) stop("bad value of r")
if(!is.atomic(v) || length(v) < n)
stop("v is either non-atomic or too short")
if( (r > n) & repeats.allowed==FALSE)
stop("r > n and repeats.allowed=FALSE")
if(set) {
v <- unique(sort(v))
if (length(v) < n) stop("too few different elements")
}
v0 <- vector(mode(v), 0)
## Inner workhorse
if(repeats.allowed)
sub <- function(n, r, v)
{
if(r == 0) v0 else
if(r == 1) matrix(v, n, 1) else
if(n == 1) matrix(v, 1, r) else
rbind( cbind(v[1], Recall(n, r-1, v)),
Recall(n-1, r, v[-1]))
}
else
sub <- function(n, r, v)
{
if(r == 0) v0 else
if(r == 1) matrix(v, n, 1) else
if(r == n) matrix(v, 1, n) else
rbind(cbind(v[1], Recall(n-1, r-1, v[-1])),
Recall(n-1, r, v[-1]))
}
sub(n, r, v[1:n])
}
##
## Original version by Bill Venables and cited by by Matthew
## Wiener ([email protected]) in an email to R-help dated
## Tue, 14 Dec 1999 09:11:32 -0500 (EST) in response to
## Alex Ahgarin <[email protected]>
##
##
permutations <- function(n, r, v = 1:n, set = TRUE, repeats.allowed=FALSE)
{
if(mode(n) != "numeric" || length(n) != 1
|| n < 1 || (n %% 1) != 0) stop("bad value of n")
if(mode(r) != "numeric" || length(r) != 1
|| r < 1 || (r %% 1) != 0) stop("bad value of r")
if(!is.atomic(v) || length(v) < n)
stop("v is either non-atomic or too short")
if( (r > n) & repeats.allowed==FALSE)
stop("r > n and repeats.allowed=FALSE")
if(set) {
v <- unique(sort(v))
if (length(v) < n) stop("too few different elements")
}
v0 <- vector(mode(v), 0)
## Inner workhorse
if(repeats.allowed)
sub <- function(n, r, v)
{
if(r==1) matrix(v,n,1) else
if(n==1) matrix(v,1,r) else
{
inner <- Recall(n, r-1, v)
cbind( rep( v, rep(nrow(inner),n) ),
matrix( t(inner), ncol=ncol(inner), nrow=nrow(inner) * n ,
byrow=TRUE )
)
}
}
else
sub <- function(n, r, v)
{
if(r==1) matrix(v,n,1) else
if(n==1) matrix(v,1,r) else
{
X <- NULL
for(i in 1:n)
X <- rbind( X, cbind( v[i], Recall(n-1, r - 1, v[-i])))
X
}
}
sub(n, r, v[1:n])
}
# This function samples an element from a vector properly
propersample <- function(x){if(length(x)==1) x else sample(x,1)}
compareNA <- function(v1,v2) {
same <- (v1 == v2) | (is.na(v1) & is.na(v2))
same[is.na(same)] <- FALSE
return(same)
}
is.subset<-function(v1,v2){
issubset<-TRUE
l<-length(v1)
for (i in 1:l){
if (is.na(v1[i])) {break }
else {issubset<-v1[i] %in% v2}
}
return(issubset)
}
orderbgn<-function(permy,bgnodes) {
if(!is.null(bgnodes)) {
movenodes<-which(permy%in%bgnodes)
newpermy<-permy[-movenodes]
return(c(newpermy,bgnodes))
} else {
return(permy)
}
}
storemaxMCMC<-function(MCMCres,param) {
maxobj<-list()
maxN<-which.max(unlist(MCMCres[[2]]))
if(param$DBN) {
maxobj$DAG<-DBNtransform(MCMCres$maxdag,param)
maxobj$DAGorig<-MCMCres$maxdag
maxobj$order<-order2var(MCMCres[[4]][[maxN]],param$firstslice$labels)
} else {
maxobj$DAG<-MCMCres$maxdag
colnames(maxobj$DAG)<-param$labels
rownames(maxobj$DAG)<-param$labels
maxobj$order<-order2var(MCMCres[[4]][[maxN]],param$labels)
}
maxobj$score<-MCMCres[[2]][[maxN]]
return(maxobj)
}
assignLabels<-function(adjacency,nodelabels){
colnames(adjacency)<-nodelabels
rownames(adjacency)<-nodelabels
return(adjacency)
}
defcolrange<-function(value) {
if(value>0.8) {
return(5)
} else if (value>0.6) {
return(4)
} else if (value>0.4) {
return(3)
} else if (value>0.2) {
return(2)
} else {
return(1)
}
}
#checking startorder, if NULL generating random order of right length
checkstartorder<-function(order,varnames,mainnodes,bgnodes,
DBN=FALSE, split=FALSE) {
matsize<-length(varnames) #maximum length of the startorder
nsmall<-length(mainnodes)#minimum length of the startorder
errortext<-"ok"
errorflag<-0
bgn<-length(bgnodes)
n<-nsmall+bgn
lo<-length(order)
error1<-"startorder should contain either variable names or their respective indices in the data object!"
error2<-"the variables (or indices) in the startorder should be similar to variable names (or their indices in the data object)!"
error3<-"DBN samestruct=FALSE should have following format: vector of length equal to
at 2*nsmall or 2*nsmall+bgn, where nsmall=number of dynamic variables and bgn=number of static variables.
The first half of the order should contain indices (bgn+1):(nsmall+bgn) representing the order of
variables in the initial time slice, the second half represents the order of variables
in any other time slice should contain indices (nsmall+bgn+1):(2*nsmall+bgn)"
if(!DBN) { #usual BN
if(is.null(order)) {
order<-c(sample(mainnodes,nsmall,replace=FALSE),bgnodes)
} else {
if(all(is.character(order))) {#convert to indices
order<-match(order,varnames)
}
if(any(is.na(order))) {
errortext<-error2
errorflag<-2
} else if(!all(is.numeric(order))) {
errortext<-error1
errorflag<-1
} else if (!setequal(order,mainnodes) & !setequal(order,c(1:n))) {
errortext<-error2
errorflag<-2
} else {#startorder is defined correctly
if(lo==n) {
order<-orderbgn(order,bgnodes)#need to put bgnodes to the end
}
else {
order<-c(order,bgnodes)#or attach them if they are missing
}
}
}
} else {#DBN
if(split) {#DBN: initial and transition structure (internal edges) are different
if(is.null(order)) {#need to define initial and transition order
order<-list()
#we change indices of variables turning into internal representation for DBNs
order$init<-c(sample(mainnodes-bgn,nsmall,replace=FALSE),bgnodes+nsmall)
order$trans<-c(sample(mainnodes-bgn,nsmall,replace=FALSE),1:n+nsmall)
} else {
if(all(is.character(order))) {#convert to indices
order<-match(order,varnames)
}
mainall<-c(mainnodes,mainnodes+nsmall)
if(any(is.na(order))) {
errortext<-error2
errorflag<-2
} else if(!all(is.numeric(order))) {
errortext<-error1
errorflag<-1
} else if (!setequal(order,c(1:matsize)) & !setequal(order,mainall)) {
errortext<-error2
errorflag<-2
} else {
order.init<-order[1:nsmall]-bgn #get order for initial structure
order.trans<-order[1:nsmall+nsmall]-nsmall-bgn #get order for transition structure
if(!setequal(order.init,c(1:nsmall)) | !setequal(order.trans,c(1:nsmall))) {
errortext<-error3
errorflag<-3
} else {#startorder is defined correctly
order<-list()
order$init<-c(order.init,bgnodes+nsmall)
order$trans<-c(order.trans,1:n+nsmall)
}
}
}
} else {#DBN: initial and transition structure (internal edges) are the same
if(is.null(order)) {
#we change indices of variables turning into internal representation
order<-c(sample(mainnodes,nsmall,replace=FALSE)-bgn,bgnodes+nsmall)
} else {
if(all(is.character(order))) {
order<-match(order,varnames)
}
if (!setequal(order,mainnodes) & !setequal(order,c(1:n))) {
errortext<-error2
errorflag<-2
} else {#startorder is defined correctly
if(lo==n) {
order<-orderbgn(order,bgnodes)
#we change indices of variables turning into internal representation
if(bgn>0) {
order<-c(order[1:nsmall]-bgn,order[1:bgn+nsmall]+nsmall)
}
} else {
order<-c(order-bgn,bgnodes+nsmall)
}
}
}
}
}
res<-list()
res$errorflag<-errorflag
res$order<-order
res$errortext<-errortext
return(res)
}
#transform numeric order into varnames
order2var<-function(order,varnames) {
return(varnames[order])
}
transp<-function(x) {
if(is.matrix(x)) {
return(t(x))
} else {
return(t(as.matrix(x)))
}
}
|
/scratch/gouwar.j/cran-all/cranData/BiDAG/R/other.R
|
#Computing score tables
#
#This function computes the score tables which can be further used by structure learning functions
#' @param scorepar an object of class \code{scoreparameters}, containing the data and score scorepareters, see constructor function \code{\link{scoreparameters}}
#' @param alpha numerical significance value in \code{\{0,1\}} for the conditional independence tests at the PC algorithm stage (by default \eqn{0.4} for \eqn{n<50}, \eqn{20/n} for \eqn{n>50})
#' @param hardlimit integer, limit on the size of parent sets in the search space; by default 14 when MAP=TRUE and 20 when MAP=FALSE
#' @param plus1 logical, if TRUE (default) the search is performed on the extended search space
#' @param cpdag logical, if TRUE the CPDAG returned by the PC algorithm will be used as the search
#'space, if FALSE (default) the full undirected skeleton will be used as the search space
#' @param startspace (optional) a square matrix, of dimensions equal to the number of nodes, which defines the search space for the order MCMC in the form of an adjacency matrix. If NULL, the skeleton obtained from the PC-algorithm will be used. If \code{startspace[i,j]} equals to 1 (0) it means that the edge from node \code{i} to node \code{j} is included (excluded) from the search space. To include an edge in both directions, both \code{startspace[i,j]} and \code{startspace[j,i]} should be 1.
#' @param blacklist (optional) a square matrix, of dimensions equal to the number of nodes, which defines edges to exclude from the search space. If \code{blacklist[i,j]} equals to 1 it means that the edge from node \code{i} to node \code{j} is excluded from the search space.
#' @param verbose logical, if TRUE messages about the algorithm's progress will be printed, FALSE by default
#' @return Object of class \code{scorespace}, a list of three objects: 'adjacency' matrix representiong the search space, 'blacklist' used to exclude edges from the search space and 'tables' containing score quantities for each node
#' needed to run MCMC schemes
#'@references Friedman N and Koller D (2003). A Bayesian approach to structure discovery in bayesian networks. Machine Learning 50, 95-125.
#'@examples
#'#' #find a MAP DAG with search space defined by PC and plus1 neighbourhood
#'Bostonscore<-scoreparameters("bge",Boston)
#'Bostonspace<-scorespace(Bostonscore, 0.05, 14)
#'\dontrun{
#'orderfit<-orderMCMC(Bostonscore, scoretable=Bostonspace)
#'partitionfit<-orderMCMC(Bostonscore, scoretable=Bostonspace)
#'}
#'@author Polina Suter, Jack Kuipers
#'@export
scorespace<-function(scorepar, alpha=0.05, hardlimit=14, plus1=TRUE, cpdag=TRUE,
startspace=NULL, blacklist=NULL, verbose=FALSE) {
result<-list()
n<-scorepar$n
nsmall<-scorepar$nsmall
matsize<-ifelse(scorepar$DBN,n+nsmall,n)
#defining startorder and updatenodes
if(!scorepar$DBN) {
if(scorepar$bgn!=0) {
updatenodes<-c(1:n)[-scorepar$bgnodes]
} else {
updatenodes<-c(1:n)
}
} else { #for DBNs startorder is defined in main.R
updatenodes<-c(1:nsmall)
}
#creating blacklist objects
if (is.null(blacklist)) {
blacklist<-matrix(0,nrow=matsize,ncol=matsize)
}
diag(blacklist)<-1
if(!is.null(scorepar$bgnodes)) {
for(i in scorepar$bgnodes) {
blacklist[,i]<-1
}
}
#defining startskel
if (is.null(startspace)){
startspace<-definestartspace(alpha,scorepar,cpdag=cpdag,algo="pc")
}
startskel<-1*(startspace&!blacklist)
blacklistparents<-list()
for (i in 1:matsize) {
blacklistparents[[i]]<-which(blacklist[,i]==1)
}
if(verbose) {
cat(paste("maximum parent set size is", max(apply(startskel,2,sum))),"\n")
}
if(max(apply(startskel,2,sum))>hardlimit) {
stop("the size of maximal parent set is higher that the hardlimit; redifine the search space or increase the hardlimit!")
}
ptab<-listpossibleparents.PC.aliases(startskel,isgraphNEL=FALSE,n,updatenodes)
if (verbose) {
cat("skeleton ready \n")
flush.console()
}
if (plus1==FALSE) {
parenttable<-ptab$parenttable # basic parenttable without plus1 lists
aliases<-ptab$aliases #aliases for each node since all nodes in parent tables are named as 1,2,3,4. not real parent names
numberofparentsvec<-ptab$numberofparentsvec
numparents<-ptab$numparents
rowmaps<-parentsmapping(parenttable,numberofparentsvec,n,updatenodes)
scoretable<-scorepossibleparents.alias(parenttable,aliases,n,scorepar,updatenodes,rowmaps,
numparents,numberofparentsvec)
} else {
parenttable<-ptab$parenttable # basic parenttable without plus1 lists
aliases<-ptab$aliases #aliases for each node since all nodes in parent tables are done as 1,2,3,4... not real parent names
numberofparentsvec<-ptab$numberofparentsvec
numparents<-ptab$numparents
plus1lists<-PLUS1(matsize,aliases,updatenodes,blacklistparents)
rowmaps<-parentsmapping(parenttable,numberofparentsvec,n,updatenodes)
scoretable<-scorepossibleparents.PLUS1(parenttable,plus1lists,n,scorepar,updatenodes,
rowmaps,numparents,numberofparentsvec)
}
colnames(blacklist)<-rownames(blacklist)<-colnames(startskel)
result$tables<-scoretable
result$blacklist<-blacklist
result$adjacency<-startskel
class(result)<-"scorespace"
result
}
|
/scratch/gouwar.j/cran-all/cranData/BiDAG/R/othercnstr.R
|
#implements partition MCMC scheme for structure learning problem, searches in plus1 neighbourhood of a search space defined
#by aliases and scoretables
partitionMCMCplus1<-function(n,nsmall,startpermy,startparty,iterations,stepsave,parenttable,scoretable,scoretab,
aliases,scoresneeded,scoresallowed,plus1lists, rowmapsneeded,rowmapsallowed,
needednodetable,numberofparentsvec,numberofpartitionparentsvec,needednodebannedrow,
neededparentsvec,moveprobs,bgnodes,matsize,chainout=TRUE,compress=TRUE) {
result<-list()
if(!is.null(bgnodes)) {
mainnodes<-c(1:matsize)[-bgnodes]
} else {mainnodes<-c(1:matsize)}
currentpermy<-startpermy[1:nsmall] #starting permutation
currentparty<-startparty #starting partition
currentposy<-parttolist(nsmall,currentparty) #create a list of which nodes are in which partition element
currentpartitionscores<-partitionscoreplus1(n,nsmall,currentpermy,c(1:nsmall),bgnodes,parenttable,aliases,scoretab,plus1lists,rowmapsneeded,
rowmapsallowed,scoresneeded,scoresallowed,
currentpermy,currentparty,currentposy) #starting score of all DAGs compatible with the starting permutation and partition
currenttotallogscore<-sum(currentpartitionscores$totscores[mainnodes]) #log total score of all DAGs in the starting partition and permutation
currentDAG<-samplescore.partition.plus1(matsize,mainnodes,currentpartitionscores,scoretable,
scoresallowed,scoresneeded,scoretab,
parenttable,needednodetable,
numberofparentsvec,needednodebannedrow,
numberofpartitionparentsvec,plus1lists) #log score of a single sampled DAG from the partition and permutation
L1 <- list() #stores the adjacency matrix of a DAG sampled from the partition and permutation
L2 <- vector() #stores the log BGe score of a DAG sampled from the partition and permutation
L3 <- vector() #stores the log BGe score of the entire partition following the permutation
L4 <- list() #stores the permutations
L5 <- list() #stores the partitions
zlimit<- floor(iterations/stepsave) + 1 # number of outer iterations
length(L1) <- zlimit
length(L2) <- zlimit
length(L3) <- zlimit
length(L4) <- zlimit
length(L5) <- zlimit
L1[[1]]<-currentDAG$incidence #starting DAG adjacency matrix
L2[1]<-currentDAG$logscore #starting DAG score
L3[1]<-currenttotallogscore #starting partition score
L4[[1]]<-currentpermy #starting permutation
L5[[1]]<-currentparty #starting partition
maxdag<-currentDAG$incidence
maxscore<-L2[1]
# Set some flags for when we need to recalculate neighbourhoods
permdiffelemflag<-1
permneighbourflag<-1
partstepflag<-1
partjoinholeflag<-1
for (z in 1:zlimit){ #the MCMC chain loop with 'iteration' steps is in two parts
count<-1
while (count <=stepsave){ #since we only save the results to the lists each 'stepsave'
chosenmove<-sample.int(5,1,prob=moveprobs) # sample what type of move
if(chosenmove<3){ # if it is <3 then we swap two elements
if(length(currentparty)>1){ # if the partition only has one element then we cannot move
switch(as.character(chosenmove),
"1"={ # swap any two elements from diffent partition elements
if(permdiffelemflag>0){ # do we need to recalculate the neighbourhood?
permdiffelemposs<-parttopermdiffelemposs(nsmall,currentparty)
permdiffelemflag<-0
}
temp<-swapdiffelementnodes(nsmall,currentparty,currentposy,currentpermy,permdiffelemposs)
proposedpermy<-temp[[1]]
rescorenodes<-temp[[2]]
scorepositions<-temp[[3]]
},
"2"={ # swap any elements in adjacent partition elements
if(permneighbourflag>0){ # do we need to recalculate the neighbourhood?
permneighbourposs<-parttopermneighbourposs(nsmall,currentparty)
permneighbourflag<-0
}
temp<-swapadjacentnodes(nsmall,currentparty,currentposy,currentpermy,permneighbourposs)
proposedpermy<-temp[[1]]
rescorenodes<-temp[[2]]
scorepositions<-temp[[3]]
},
{# if neither is chosen, we have a problem
cat("The move sampling has failed! \n")
})
proposedpartitionrescored<-partitionscoreplus1(n,nsmall,rescorenodes,scorepositions,bgnodes,parenttable,aliases,scoretab,plus1lists,rowmapsneeded,rowmapsallowed,scoresneeded,scoresallowed,
proposedpermy,currentparty,currentposy) #their scores
proposedtotallogscore<-currenttotallogscore-sum(currentpartitionscores$totscores[rescorenodes])+sum(proposedpartitionrescored$totscores[rescorenodes]) #and the new log total score by updating only the necessary nodes
scoreratio<-exp(proposedtotallogscore-currenttotallogscore) #acceptance probability
count<-count+1
if(runif(1)<scoreratio){ #Move accepted then set the current permutation and scores to the proposal
currentpermy<-proposedpermy
currentpartitionscores$neededrow[rescorenodes]<-proposedpartitionrescored$neededrow[rescorenodes]
currentpartitionscores$allowedrow[rescorenodes]<-proposedpartitionrescored$allowedrow[rescorenodes]
currentpartitionscores$plus1allowedlists[rescorenodes]<-proposedpartitionrescored$plus1allowedlists[rescorenodes]
currentpartitionscores$plus1neededlists[rescorenodes]<-proposedpartitionrescored$plus1neededlists[rescorenodes]
currentpartitionscores$totscores[rescorenodes]<-proposedpartitionrescored$totscores[rescorenodes]
currenttotallogscore<-proposedtotallogscore
}
}
} else if(chosenmove<5) { # we move in the space of partitions
switch(as.character(chosenmove),
"3"={ # we split a partition element or join one
if(partstepflag>0){ # do we need to recalculate the neighbourhood?
currentpartstepposs<-partysteps(nsmall,currentparty)
currentpartstepnbhood<-sum(currentpartstepposs)
partstepflag<-0
}
temp<-partitionsplitorjoin(nsmall,currentparty,currentposy,currentpermy,currentpartstepposs)
proposedparty<-temp[[1]]
proposedposy<-temp[[2]]
proposedpermy<-temp[[3]]
rescorenodes<-temp[[4]]
proposedpartstepposs<-temp[[5]]
scorepositions<-temp[[6]]
proposedpartstepnbhood<-sum(proposedpartstepposs)
},
"4"={ # we move a single node into another partition element or into a new one
if(partjoinholeflag>0){ # do we need to recalculate the neighbourhood?
currentpartjoinposs<-partyjoin(nsmall,currentparty,currentposy)
currentpartjoinnbhood<-sum(currentpartjoinposs)
currentpartholeposs<-partyhole(nsmall,currentparty,currentposy)
currentpartholenbhood<-sum(currentpartholeposs)
partjoinholeflag<-0
}
joinorhole<-sample.int(2,1,prob=c(currentpartjoinnbhood,currentpartholenbhood)) # choose the type of move
switch(as.character(joinorhole),
"1"={ # we join the node to another partition element
temp<-joinnode(nsmall,currentparty,currentposy,currentpermy,currentpartjoinposs)
},
"2"={ # we place the node in a new partition element
temp<-holenode(nsmall,currentparty,currentposy,currentpermy,currentpartholeposs)
},
{# if nothing is chosen, we have a problem
cat("The move sampling has failed! \n")
})
proposedparty<-temp[[1]]
proposedposy<-temp[[2]]
proposedpermy<-temp[[3]]
rescorenodes<-temp[[4]]
scorepositions<-temp[[5]]
# these neighbourhoods should be updated for efficiency
proposedpartjoinposs<-partyjoin(nsmall,proposedparty,proposedposy)
proposedpartjoinnbhood<-sum(proposedpartjoinposs)
proposedpartholeposs<-partyhole(nsmall,proposedparty,proposedposy)
proposedpartholenbhood<-sum(proposedpartholeposs)
},
{# if nothing is chosen, we have a problem
cat("The move sampling has failed! \n")
})
proposedpartitionrescored<-partitionscoreplus1(n,nsmall,rescorenodes,scorepositions,bgnodes,parenttable,aliases,scoretab,plus1lists,rowmapsneeded,rowmapsallowed,scoresneeded,scoresallowed,
proposedpermy,proposedparty,proposedposy) #only rescore the necessary nodes
proposedtotallogscore<-currenttotallogscore-sum(currentpartitionscores$totscores[rescorenodes])+sum(proposedpartitionrescored$totscores[rescorenodes]) #and calculate the new log total score by updating only the necessary nodes
count<-count+1
scoreratio<-exp(proposedtotallogscore-currenttotallogscore)
switch(as.character(chosenmove),
"3"={ # we split a partition element or joined one
scoreratio<-scoreratio*(currentpartstepnbhood/proposedpartstepnbhood) # neighbourhood correction
},
"4"={# we moved a single node
scoreratio<-scoreratio*((currentpartjoinnbhood+currentpartholenbhood)/(proposedpartjoinnbhood+proposedpartholenbhood)) # neighbourhood correction
},
{# if nothing is chosen, we have a problem
cat("The move sampling has failed! \n")
})
if(runif(1)<scoreratio){ #Move accepted then set the current partition and scores to the proposal
currentpermy<-proposedpermy
currentparty<-proposedparty
currentposy<-proposedposy
currentpartitionscores$neededrow[rescorenodes]<-proposedpartitionrescored$neededrow[rescorenodes]
currentpartitionscores$allowedrow[rescorenodes]<-proposedpartitionrescored$allowedrow[rescorenodes]
currentpartitionscores$plus1allowedlists[rescorenodes]<-proposedpartitionrescored$plus1allowedlists[rescorenodes]
currentpartitionscores$plus1neededlists[rescorenodes]<-proposedpartitionrescored$plus1neededlists[rescorenodes]
currentpartitionscores$totscores[rescorenodes]<-proposedpartitionrescored$totscores[rescorenodes]
currenttotallogscore<-proposedtotallogscore
permdiffelemflag<-1 # need to recalculate the permutation possibilities
permneighbourflag<-1 # in principle these could be updated instead
switch(as.character(chosenmove),
"3"={ # we split a partition element or joined one
partjoinholeflag<-1
currentpartstepposs<-proposedpartstepposs
currentpartstepnbhood<-proposedpartstepnbhood
},
"4"={# we made a different partition move?
partstepflag<-1
currentpartjoinposs<-proposedpartjoinposs
currentpartjoinnbhood<-proposedpartjoinnbhood
currentpartholeposs<-proposedpartholeposs
currentpartholenbhood<-proposedpartholenbhood
},
{# if nothing is chosen, we have a problem
cat("The move sampling has failed! \n")
})
}
}
}
currentDAG<-samplescore.partition.plus1(matsize,mainnodes,currentpartitionscores,
scoretable,scoresallowed,scoresneeded,scoretab,parenttable,needednodetable,numberofparentsvec,
needednodebannedrow,numberofpartitionparentsvec,plus1lists)
if(chainout) {
if(compress) {
L1[[z]]<-Matrix(currentDAG$incidence,sparse=TRUE) #store compressed adjacency matrix of a sampled DAG each 'stepsave'
} else {
L1[[z]]<-currentDAG$incidence #store adjacency matrix of a sampled DAG each 'stepsave'
}
}
L2[z]<-currentDAG$logscore #and its log score
L3[z]<-currenttotallogscore #store the current total partition score
L4[[z]]<-currentpermy[1:nsmall] #store current permutation each 'stepsave'
L5[[z]]<-currentparty #store current partition each 'stepsave'
if(L2[z]>maxscore) {
maxscore<-L2[z]
maxdag<-currentDAG$incidence
}
}
result$incidence<-L1
result$DAGscores<-L2
result$partitionscores<-L3
result$order<-L4
result$partition<-L5
result$maxscore<-maxscore
result$maxdag<-Matrix(maxdag,sparse=TRUE)
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/BiDAG/R/partitionMCMC.R
|
#implements partition MCMC scheme for structure learning problem, searches in plus1 neighbourhood of a search space defined
#by startspace or iterativeMCMC function
#authors Polina Suter, Jack Kuipers, partly derived from <doi:10.1080/01621459.2015.1133426>
partitionMCMCplus1sample<-function(param,startspace,blacklist=NULL,moveprobs,numit,stepsave,
startorder=NULL,scoretable=NULL,DAG,gamma=1,verbose=TRUE,
scoreout=FALSE,chainout=TRUE,compress=TRUE,alpha=NULL){
MCMCtraces<-list()
n<-param$n
nsmall<-param$nsmall
matsize<-ifelse(param$DBN,n+nsmall,n)
if(!param$DBN) {
if(param$bgn!=0) {
updatenodes<-c(1:n)[-param$bgnodes]
} else {
updatenodes<-c(1:n)
}
} else {
updatenodes<-c(1:nsmall)
}
if(is.null(startspace) & is.null(scoretable)) {
if(verbose) cat("defining a search space with iterativeMCMC \n")
searchspace<-iterativeMCMC(scorepar=param,moveprobs=NULL,plus1it=NULL,
iterations=NULL,stepsave=NULL,softlimit=9,hardlimit=14,
verbose=verbose,chainout=FALSE,scoreout=TRUE,
gamma=gamma,cpdag=FALSE,mergetype="skeleton",
blacklist=blacklist,alpha=alpha,alphainit = 0.01)
startspace<-searchspace$scoretable$adjacency
if(param$DBN) {
maxDAG<-DBNbacktransform(searchspace$DAG,param)
} else {
maxDAG<-searchspace$DAG
scoretable<-searchspace$scoretable$tables
}
if(!is.matrix(maxDAG)) maxDAG<-as.matrix(maxDAG)
if(!is.null(param$bgnodes)) {
forpart<-DAGtopartition(param$nsmall,maxDAG[updatenodes,updatenodes])
forpart$permy<-updatenodes[forpart$permy]
} else {
forpart<-DAGtopartition(n,maxDAG)
}
if (is.null(blacklist)) {
blacklist<-matrix(0,nrow=matsize,ncol=matsize)
}
diag(blacklist)<-1
if(!is.null(param$bgnodes)) {
for(i in param$bgnodes) {
blacklist[,i]<-1
}
}
blacklistparents<-list()
for (i in 1:matsize) {
blacklistparents[[i]]<-which(blacklist[,i]==1)
}
} else {
if (is.null(blacklist)) {
blacklist<-matrix(0,nrow=matsize,ncol=matsize)
}
diag(blacklist)<-1
if(!is.null(param$bgnodes)) {
for(i in param$bgnodes) {
blacklist[,i]<-1
}
}
if(is.null(scoretable)) {
startspace<-1*(startspace&!blacklist)
} else {
startspace<-scoretable$adjacency
blacklist<-scoretable$blacklist
scoretable<-scoretable$tables
}
blacklistparents<-list()
for (i in 1:matsize) {
blacklistparents[[i]]<-which(blacklist[,i]==1)
}
if (is.null(DAG)) {
forpart<-list()
if(is.null(param$bgnodes)) {
forpart$permy<-c(1:n)
forpart$party<-c(n)
updatenodes<-c(1:n)
} else {
forpart$permy<-c(1:n)[-param$bgnodes]
forpart$party<-c(param$nsmall)
updatenodes<-c(1:n)[-param$bgnodes]
}
} else {
if(!is.matrix(DAG)) DAG<-as.matrix(DAG)
if(!is.null(param$bgnodes)) {
forpart<-DAGtopartition(param$nsmall,DAG[updatenodes,updatenodes])
forpart$permy<-updatenodes[forpart$permy]
} else {
forpart<-DAGtopartition(n,DAG)
}
}
}
if(verbose) cat("core space defined, score table are being computed \n")
permy<-forpart$permy
party<-forpart$party
#starttable<-Sys.time()
ptab<-listpossibleparents.PC.aliases(startspace,isgraphNEL=FALSE,n,updatenodes=updatenodes)
parenttable<-ptab$parenttable
aliases<-ptab$aliases
numberofparentsvec<-ptab$numberofparentsvec
numparents<-ptab$numparents
plus1lists<-PLUS1(n,aliases,updatenodes,blacklistparents)
rowmapsallowed<-parentsmapping(parenttable,numberofparentsvec,n,updatenodes)
starttable<-Sys.time()
if (is.null(scoretable)) {
scoretable<-scorepossibleparents.PLUS1(parenttable,plus1lists,n,param,updatenodes,
rowmapsallowed,numparents,numberofparentsvec)
}
scoretab<-list()
for (i in updatenodes) {
scoretab[[i]]<-matrix(sapply(scoretable[[i]], unlist),nrow=nrow(scoretable[[i]][[1]]))}
posetparenttable<-poset(ptab$parenttable,ptab$numberofparentsvec,rowmapsallowed,n,updatenodes=updatenodes)
plus1allowedpart<-plus1allowed.partition(posetparenttable,scoretable,numberofparentsvec,rowmapsallowed,
n,plus1lists=plus1lists,numparents,updatenodes)
needednodetable<-partitionlist(parenttable,ptab$numberofparentsvec,n,updatenodes=updatenodes)
numberofpartitionparentsvec<-partitionlistnumberofparents(needednodetable,ptab$numberofparentsvec,
n,updatenodes)
needednodebannedrow<-partitionmapneedednodebannedrow(ptab$numparents,ptab$numberofparentsvec,n,
updatenodes)
rowmapsneeded<-neededparentsmapping(parenttable,ptab$numberofparentsvec,needednodetable,
numberofpartitionparentsvec,needednodebannedrow,n,updatenodes)
neededposetparents<-needed.poset(ptab$parenttable,ptab$numberofparentsvec,
needednodebannedrow,rowmapsneeded,n,updatenodes)
neededposetparenttable<-lapply(neededposetparents,function(x)x$table)
neededposetparentsvec<-lapply(neededposetparents,function(x)x$sizes)
plus1neededpart<-plus1needed.partition(ptab$numparents,parenttable,neededposetparenttable,neededposetparentsvec,
ptab$numberofparentsvec,rowmapsallowed,needednodebannedrow,scoretable,
plus1lists,n,updatenodes)
endtable<-Sys.time()
if(verbose) cat("score tables calculated, partition MCMC is running \n")
partres<-partitionMCMCplus1(n,param$nsmall,permy,party,numit,stepsave,parenttable,scoretable,scoretab,
aliases,plus1neededpart,plus1allowedpart,plus1lists,rowmapsneeded,rowmapsallowed,
needednodetable,ptab$numberofparentsvec,
numberofpartitionparentsvec,needednodebannedrow,
neededposetparentsvec,moveprobs,param$bgnodes,matsize=matsize,chainout=TRUE,compress=compress)
endmcmc<-Sys.time()
if(param$DBN) {
MCMCchain<-lapply(partres$incidence,function(x)DBNtransform(x,param=param))
MCMCtraces$incidence<-MCMCchain
} else {
MCMCtraces$incidence<-lapply(partres$incidence,function(x)assignLabels(x,param$labels))
}
MCMCtraces$DAGscores<-partres$DAGscores
MCMCtraces$partitionscores<-partres$partitionscores
MCMCtraces$order<-partres$order
MCMCtraces$partition<-partres$partition
maxobj<-storemaxMCMC(partres,param)
maxN<-which.max(unlist(partres[[2]]))
result<-list()
result$info<-list()
tabletime<-endtable-starttable
if(units(tabletime)=="mins") {
tabletime<-as.numeric(tabletime*60)
}
mcmctime<-endmcmc-endtable
if(units(mcmctime)=="mins") {
mcmctime<-as.numeric(mcmctime*60)
}
result$info$runtimes<-c(tabletime,mcmctime)
names(result$info$runtimes)<-c("scoretables","MCMCchain")
result$trace<-MCMCtraces$DAGscores
MCMCtraces$DAGscores<-NULL
result$traceadd<-MCMCtraces
result$DAG<-maxobj$DAG
result$CPDAG<-graph2m(dag2cpdag(m2graph(result$DAG)))
result$score<-maxobj$score
result$maxorder<-maxobj$order
if(scoreout) {
result$scoretable<-list()
result$scoretable$adjacency<-startspace
result$scoretable$tables<-scoretable
result$scoretable$blacklist<-blacklist
attr( result$scoretable,"class")<-"scorespace"
}
attr(result,"class")<-"partitionMCMC"
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/BiDAG/R/partitionMCMCmain.R
|
#The code is partly derived from the order MCMC implementation from Kuipers J, Moffa G (2017) <doi:10.1080/01621459.2015.1133426>
#This function gives the sum of scores compatible with a given partition
partitionscore<-function(n,scorenodes,parenttable,scoretable,permy,party,posy) {
partitionscores<-rep(0,n)
allscores<-vector("list",n)
allowedscorerows<-vector("list",n)
m<-length(party)
tablesize<-dim(parenttable[[1]]) # just to remove some arguments
for (i in scorenodes) {
position<-which(permy==i)
partyelement<-posy[position]
if(partyelement==m) {# no parents are allowed
partitionscores[i]<-scoretable[[i]][1,1]
allscores[[i]]<-partitionscores[i] # there is only one score
allowedscorerows[[i]]<-c(1) # there is only one score
} else {
bannednodes<-permy[which(posy<=partyelement)]
requirednodes<-permy[which(posy==(partyelement+1))]
allowedrows<-c(2:tablesize[1]) # first we remove the banned rows
for (j in 1:tablesize[2]){ # working columnwise allows R to speed up
bannedrows<-which(parenttable[[i]][allowedrows,j]%in%bannednodes)
if(length(bannedrows)>0){
allowedrows<-allowedrows[-bannedrows]
}
}
notrequiredrows<-allowedrows
for (j in 1:tablesize[2]){ # now we remove the allowable rows instead
requiredrows<-which(parenttable[[i]][notrequiredrows,j]%in%requirednodes)
if(length(requiredrows)>0){
notrequiredrows<-notrequiredrows[-requiredrows]
}
}
allowedrows<-setdiff(allowedrows,notrequiredrows) # and keep just the difference!
allscores[[i]]<-scoretable[[i]][allowedrows,1]
allowedscorerows[[i]]<-allowedrows
maxallowed<-max(allscores[[i]])
partitionscores[i]<-maxallowed+log(sum(exp(allscores[[i]]-maxallowed)))
}
}
scores<-list()
scores$allscores<-allscores
scores$allowedrows<-allowedscorerows
scores$totscores<-partitionscores
return(scores)
}
# This function takes in a partition and lists which partition
# element each node belongs to
parttolist<-function(n,party){
posy<-rep(0,n)
kbot<-1
m<-length(party)
for (j in 1:m){
ktop<-kbot+party[j]-1
posy[kbot:ktop]<-j
kbot<-ktop+1
}
return(posy)
}
# This function calculates permutations between neighbouring partition elements
# given the partition
parttopermneighbourposs<-function(n,party){
m<-length(party)
possibs<-rep(0,m-1)
if(m>1){
for (i in 1:(m-1)){
possibs[i]<-party[i]*party[i+1]
}
}
return(possibs)
}
# This function calculates permutations excluding nodes in the same partition element
# given the partition
parttopermdiffelemposs<-function(n,party){
m<-length(party)
possibs<-rep(0,m-1)
if(m>1){
remainder<-n
for (i in 1:(m-1)){
remainder<-remainder-party[i]
possibs[i]<-party[i]*remainder
}
}
return(possibs)
}
# This function returns the number of ways a partition can be joined or split
# note that adding an if statement for elemsize=1 seems to make the function slower
partysteps<-function(n,party){
partypossibs<-rep(0,n)
kbot<-1
m<-length(party)
for (j in 1:m){
elemsize<-party[j]
ktop<-kbot+elemsize-1
partypossibs[kbot:ktop]<-choose(elemsize,1:elemsize)
kbot<-ktop+1
}
return(partypossibs[1:(n-1)])
}
# This function returns the number of ways a node can be joined to another partition element
partyjoin<-function(n,party,posy){
m<-length(party)
joinpossibs<-rep(0,n)
for(k in 1:n){
joinpossibs[k]<-m-1
nodeselement<-posy[k]
if(party[nodeselement]==1){ #nodes in a partition element of size 1
if(nodeselement<m){
if(party[nodeselement+1]==1){ #and if the next partition element is also size 1
joinpossibs[k]<-m-2 #we only allow them to jump to the left to count the swap only once
}
}
}
}
return(joinpossibs)
}
# This function returns the number of ways a node can be move to a new partition element
partyhole<-function(n,party,posy){
m<-length(party)
holepossibs<-rep(0,n)
for(k in 1:n){
nodeselement<-posy[k]
if(party[nodeselement]==1){ #nodes in a partition element of size 1 cannot move to the neighbouring holes
holepossibs[k]<-m-1
if(nodeselement<m){
if(party[nodeselement+1]==1){ #and if the next partition element is also size 1
holepossibs[k]<-m-2 #we only allow them to jump to the left to count the swap only once
}
}
} else if(party[nodeselement]==2){ #nodes in a partition element of size 2 cannot move to the hole on the left
holepossibs[k]<-m #since this would count the same splitting twice
} else {
holepossibs[k]<-m+1
}
}
return(holepossibs)
}
# this function takes in an adjacency matrix and returns the partition and permutation
DAGtopartition <- function(n,incidence,bgnodes=NULL){
party<-c() # to store the partition
bgn<-length(bgnodes)
permy <- numeric(n-bgn) # to store the permutation
m <- n-bgn # counter
while(m>0){
topnodes<-which(colSums(incidence)==0) # find the outpoints
topmain<-setdiff(topnodes,bgnodes)
incidence[topnodes,]<-0 # remove their edges
incidence[cbind(topnodes,topnodes)]<-1 # add a one to their columns so they are no longer counted
l<-length(topmain) # the number of outpoints
m<-m-l
permy[(m+1):(m+l)]<-topmain
party<-c(l,party)
}
partypermy<-list()
partypermy$party<-party
partypermy$permy<-permy
return(partypermy)
}
|
/scratch/gouwar.j/cran-all/cranData/BiDAG/R/partitionfns.R
|
#the original code taken from <doi:10.1080/01621459.2015.1133426>
# swap only nodes in different partition elements # move 1
swapdiffelementnodes<-function(n,currentparty,currentposy,currentpermy,permpossibs){
scorelength<-length(permpossibs)
selectedelement<-sample.int(scorelength,1,prob=permpossibs) # sample an element
leftnodes<-which(currentposy==selectedelement) #nodes in the left partition element
maxleftnodes<-max(leftnodes)
sampledleftnode<-propersample(leftnodes)
sampledrightnode<-propersample(c((maxleftnodes+1):n)) #chose the right node from the remaining ones
sampledelements<-c(sampledleftnode,sampledrightnode) #the sampled pair
rightnodes<-which(currentposy==currentposy[sampledrightnode]) #the remaining nodes on the right
minrightnodes<-min(rightnodes)
centralnodes<-c()
if((maxleftnodes+1)<minrightnodes){ #the nodes inbetween
centralnodes<-c((maxleftnodes+1):(minrightnodes-1))
}
scorepositions<-c(leftnodes,centralnodes,sampledrightnode) #all these need to be rescored
if(selectedelement>1){ #as well as the partition element further left, if it exists
scorepositions<-c(which(currentposy==(selectedelement-1)),scorepositions)
}
proposedpermy<-currentpermy #create the new permutation
proposedpermy[sampledelements]<-currentpermy[rev(sampledelements)] #by swapping the elements
rescorenodes<-proposedpermy[scorepositions] #map to the correct labels
return(list(proposedpermy,rescorenodes,scorepositions))
}
# swap any two adjacent nodes # move 2
swapadjacentnodes<-function(n,currentparty,currentposy,currentpermy,permpossibs){
scorelength<-length(permpossibs)
selectedelement<-sample.int(scorelength,1,prob=permpossibs) # sample an element
leftnodes<-which(currentposy==selectedelement) #nodes in the left partition element
rightnodes<-which(currentposy==(selectedelement+1)) #and in the right
sampledelements<-c(propersample(leftnodes),propersample(rightnodes)) #sample a pair
scorepositions<-c(leftnodes,sampledelements[2]) #need to rescore the left partition element and just the selected node in the right partition
if(selectedelement>1){ #as well as the partition element further left, if it exists
scorepositions<-c(which(currentposy==(selectedelement-1)),scorepositions)
}
proposedpermy<-currentpermy #create the new permutation
proposedpermy[sampledelements]<-currentpermy[rev(sampledelements)] #by swapping the elements
rescorenodes<-proposedpermy[scorepositions] #map to the correct labels
return(list(proposedpermy,rescorenodes,scorepositions))
}
# Split or join adjacent partition elements # move 3
partitionsplitorjoin<-function(n,currentparty,currentposy,currentpermy,partpossibs){
scorelength<-length(partpossibs)
sampledelement<-sample.int(scorelength,1,prob=partpossibs) # sample an element
# vectors to be updated later
proposedposy<-currentposy
proposedpartposs<-partpossibs
if(currentposy[sampledelement]==currentposy[sampledelement+1]){ #if the next node is in the same partition element we split
involvednodes<-which(currentposy==currentposy[sampledelement]) #the nodes in the partition element to be split
newleftelementsize<-sampledelement-min(involvednodes)+1 #the size of the new partition element on the left
newrightelementsize<-length(involvednodes)-newleftelementsize #and the right
proposedparty<-append(currentparty,newrightelementsize,currentposy[sampledelement]) #create the proposed partition
proposedparty[currentposy[sampledelement]]<-newleftelementsize
# update the list to be slightly more efficient
proposedposy[(sampledelement+1):n]<-currentposy[(sampledelement+1):n]+1
# sample which nodes go to the new left and right partition elements # note that involvednodes must have at least two elements so `sample' should work fine
newleftnodes<-sample(involvednodes,newleftelementsize)
newrightnodes<-involvednodes[-which(involvednodes%in%newleftnodes)]
proposedpermy<-currentpermy #and update the proposed permutation according to how the nodes are distributed
proposedpermy[(sampledelement-newleftelementsize+1):sampledelement]<-currentpermy[newleftnodes]
proposedpermy[(sampledelement+1):(sampledelement+newrightelementsize)]<-currentpermy[newrightnodes]
# update the neighbourhood of the new left element
proposedpartposs[(sampledelement-newleftelementsize+1):sampledelement]<-choose(newleftelementsize,1:newleftelementsize)
scorepositions<-c((sampledelement-newleftelementsize+1):sampledelement) #we only need to rescore the nodes in the new left partition element
if(currentposy[sampledelement]>1){ # and the partition element to the left, if it exists
scorepositions<-c(which(currentposy==(currentposy[sampledelement]-1)),scorepositions)
}
if(currentposy[sampledelement]<length(currentparty)){ #update the neighbourhood of the right element
proposedpartposs[(sampledelement+1):(sampledelement+newrightelementsize)]<-choose(newrightelementsize,1:newrightelementsize)
} else { # if the partition element was last then we cut the binomial coeffiecients short
if(newrightelementsize>1){
proposedpartposs[(sampledelement+1):(sampledelement+newrightelementsize-1)]<-choose(newrightelementsize,1:(newrightelementsize-1))
}
}
rescorenodes<-proposedpermy[scorepositions] #finally map to the correct labels
}
if(currentposy[sampledelement]<currentposy[sampledelement+1]){ #otherwise join partition elements
newelementsize<-currentparty[currentposy[sampledelement]]+currentparty[currentposy[sampledelement+1]]
proposedparty<-currentparty[-currentposy[sampledelement]] #create the proposed partition
proposedparty[currentposy[sampledelement]]<-newelementsize
# update the list to be slightly more efficient
proposedposy[(sampledelement+1):n]<-currentposy[(sampledelement+1):n]-1
proposedpermy<-currentpermy #now changing the permutation has no effect
scorepositions<-which(currentposy==currentposy[sampledelement]) #we only need to rescore the nodes from the old element on the left and the element further left
newelementstart<-min(scorepositions) #store the start of the new element
if(proposedposy[sampledelement]>1){ #include the partition element to the left, if it exists
scorepositions<-c(which(proposedposy==(proposedposy[sampledelement]-1)),scorepositions)
}
if(proposedposy[sampledelement]<length(proposedparty)){ #update the neighbourhood
proposedpartposs[(newelementstart):(newelementstart+newelementsize-1)]<-choose(newelementsize,1:newelementsize)
} else { # if the new element is last we cut the binomial coeffiecients short
proposedpartposs[(newelementstart):(newelementstart+newelementsize-2)]<-choose(newelementsize,1:(newelementsize-1)) # the new element must have a size of at least two
}
rescorenodes<-proposedpermy[scorepositions] #again map to the correct labels
}
return(list(proposedparty,proposedposy,proposedpermy,rescorenodes,proposedpartposs,
scorepositions))
}
# move a node to a different partition element (following further restrictions) # move 4 part 1
joinnode<-function(n,currentparty,currentposy,currentpermy,joinpossibs){
m<-length(currentparty)
scorelength<-length(joinpossibs)
nodetomove<-sample.int(scorelength,1,prob=joinpossibs) # sample an element
nodeselement<-currentposy[nodetomove]
elementtomoveto<-sample.int(joinpossibs[nodetomove],1) # sample where to go to
if(elementtomoveto==nodeselement){ #if element selected is the same, replace by m which is not otherwise sampled
elementtomoveto<-m
} else if (elementtomoveto==(nodeselement+1)){
if(currentparty[nodeselement]==1){ #if partition element has size one
if(currentparty[nodeselement+1]==1){ #if next partition element is also size one
elementtomoveto<-m-1 #replace not allowed sample by 'm-1' which also not be otherwise sampled
}
}
}
proposedparty<-currentparty # update the partition
proposedparty[elementtomoveto]<-currentparty[elementtomoveto]+1
if(currentparty[nodeselement]>1){
proposedparty[nodeselement]<-currentparty[nodeselement]-1
} else {
proposedparty<-proposedparty[-nodeselement]
}
proposedposy<-parttolist(n,proposedparty) #should be updated for efficiency
if(elementtomoveto<nodeselement){
leftnodes<-which(currentposy==elementtomoveto)
newleftnodes<-which(proposedposy==elementtomoveto)
maxleftnodes<-max(leftnodes)
rightnodes<-which(currentposy==nodeselement) # this includes the node which is moved
minrightnodes<-min(rightnodes)
proposedpermy<-currentpermy[-nodetomove]
proposedpermy<-append(proposedpermy,currentpermy[nodetomove],maxleftnodes)
centralnodes<-c()
newcentralnodes<-c()
if((maxleftnodes+1)<minrightnodes){
centralnodes<-c((maxleftnodes+1):(minrightnodes-1))
newcentralnodes<-centralnodes+1
}
newscorepositions<-c(newleftnodes,newcentralnodes) #all these need to be rescored
if(elementtomoveto>1){ #as well as the partition element further left, if it exists
newscorepositions<-c(which(currentposy==(elementtomoveto-1)),newscorepositions)
}
} else {
leftnodes<-which(currentposy==nodeselement) # this includes the node which is moved
if(length(leftnodes)>1) {
newleftnodes<-which(proposedposy==nodeselement)
} else {
newleftnodes<-c()
}
maxleftnodes<-max(leftnodes)
rightnodes<-which(currentposy==elementtomoveto)
minrightnodes<-min(rightnodes)
proposedpermy<-currentpermy[-nodetomove]
newnodepos<-min(rightnodes)-1
proposedpermy<-append(proposedpermy,currentpermy[nodetomove],newnodepos-1)
centralnodes<-c()
newcentralnodes<-c()
if((maxleftnodes+1)<minrightnodes){
centralnodes<-c((maxleftnodes+1):(minrightnodes-1))
newcentralnodes<-centralnodes-1
}
newscorepositions<-c(newleftnodes,newcentralnodes,newnodepos)
if(nodeselement>1){ #as well as the partition element further left, if it exists
newscorepositions<-c(which(currentposy==(nodeselement-1)),newscorepositions)
}
}
newrescorenodes<-proposedpermy[newscorepositions]
return(list(proposedparty,proposedposy,proposedpermy,newrescorenodes,newscorepositions))
}
# move a node to a new partition element (following further restrictions) # move 4 part 2
holenode<-function(n,currentparty,currentposy,currentpermy,holepossibs){
m<-length(currentparty)
scorelength<-length(holepossibs)
nodetomove<-sample.int(scorelength,1,prob=holepossibs) # sample an element
nodeselement<-currentposy[nodetomove]
holetomoveto<-sample.int(holepossibs[nodetomove],1) # sample where to go to
if(currentparty[nodeselement]==1){
if(holetomoveto>=nodeselement){
holetomoveto<-holetomoveto+1 #counts hole when partition element removed
if(nodeselement<m){
if(currentparty[nodeselement+1]==1){ #if next partition element is also size one
holetomoveto<-holetomoveto+1 #shift one hole further along
}
}
}
proposedparty<-currentparty[-nodeselement]
proposedparty<-append(proposedparty,1,holetomoveto-1)
proposedposy<-parttolist(n,proposedparty) #should be updated for efficiency
if(holetomoveto<nodeselement){
leftnodes<-which(proposedposy==holetomoveto) # this should be the node which is moved
maxleftnodes<-max(leftnodes)
rightnodes<-which(proposedposy==nodeselement) # these were to the left of the moved node
minrightnodes<-min(rightnodes)
proposedpermy<-currentpermy[-nodetomove]
proposedpermy<-append(proposedpermy,currentpermy[nodetomove],leftnodes-1)
centralnodes<-c()
if((maxleftnodes+1)<minrightnodes){
centralnodes<-c((maxleftnodes+1):(minrightnodes-1))
}
scorepositions<-c(leftnodes,centralnodes,rightnodes) #all these need to be rescored
if(holetomoveto>1){ #as well as the partition element further left, if it exists
scorepositions<-c(which(currentposy==(holetomoveto-1)),scorepositions)
}
} else {
leftnodes<-which(proposedposy==nodeselement) # these were to the right of the moved node
maxleftnodes<-max(leftnodes)
rightnodes<-which(proposedposy==holetomoveto) # this should be the node that is moved
minrightnodes<-min(rightnodes)
proposedpermy<-currentpermy[-nodetomove]
proposedpermy<-append(proposedpermy,currentpermy[nodetomove],rightnodes-1)
centralnodes<-c()
if((maxleftnodes+1)<minrightnodes){
centralnodes<-c((maxleftnodes+1):(minrightnodes-1))
}
scorepositions<-c(leftnodes,centralnodes,rightnodes) #all these need to be rescored
if(nodeselement>1){ #as well as the partition element further left, if it exists
scorepositions<-c(which(currentposy==(nodeselement-1)),scorepositions)
}
}
} else {
if(currentparty[nodeselement]==2){
if(holetomoveto==nodeselement){
holetomoveto<-m+1
}
}
proposedparty<-currentparty
proposedparty[nodeselement]<-currentparty[nodeselement]-1
proposedparty<-append(proposedparty,1,holetomoveto-1)
proposedposy<-parttolist(n,proposedparty) #should be updated for efficiency
if(holetomoveto<=nodeselement){
leftnodes<-which(proposedposy==holetomoveto) # this is the node which is moved
maxleftnodes<-max(leftnodes)
rightnodes<-which(proposedposy==(nodeselement+1)) # these are the nodes left over
minrightnodes<-min(rightnodes)
proposedpermy<-currentpermy[-nodetomove]
proposedpermy<-append(proposedpermy,currentpermy[nodetomove],leftnodes-1)
centralnodes<-c()
if((maxleftnodes+1)<minrightnodes){
centralnodes<-c((maxleftnodes+1):(minrightnodes-1))
}
scorepositions<-c(leftnodes,centralnodes) #all these need to be rescored
if(holetomoveto>1){ #as well as the partition element further left, if it exists
scorepositions<-c(which(currentposy==(holetomoveto-1)),scorepositions)
}
} else {
leftnodes<-which(proposedposy==nodeselement) # these are the nodes left over
maxleftnodes<-max(leftnodes)
rightnodes<-which(proposedposy==holetomoveto) # this is the node which is moved
minrightnodes<-min(rightnodes)
proposedpermy<-currentpermy[-nodetomove]
proposedpermy<-append(proposedpermy,currentpermy[nodetomove],rightnodes-1)
centralnodes<-c()
if((maxleftnodes+1)<minrightnodes){
centralnodes<-c((maxleftnodes+1):(minrightnodes-1))
}
scorepositions<-c(leftnodes,centralnodes,rightnodes) #all these need to be rescored
if(nodeselement>1){ #as well as the partition element further left, if it exists
scorepositions<-c(which(currentposy==(nodeselement-1)),scorepositions)
}
}
}
rescorenodes<-proposedpermy[scorepositions] #map to the correct labels
return(list(proposedparty,proposedposy,proposedpermy,rescorenodes,scorepositions))
}
# swap only nodes in different partition elements # move 1
swapdiffelementnodes.old<-function(n,currentparty,currentposy,currentpermy,permpossibs){
scorelength<-length(permpossibs)
selectedelement<-sample.int(scorelength,1,prob=permpossibs) # sample an element
leftnodes<-which(currentposy==selectedelement) #nodes in the left partition element
maxleftnodes<-max(leftnodes)
sampledleftnode<-propersample(leftnodes)
sampledrightnode<-propersample(c((maxleftnodes+1):n)) #chose the right node from the remaining ones
sampledelements<-c(sampledleftnode,sampledrightnode) #the sampled pair
rightnodes<-which(currentposy==currentposy[sampledrightnode]) #the remaining nodes on the right
minrightnodes<-min(rightnodes)
centralnodes<-c()
if((maxleftnodes+1)<minrightnodes){ #the nodes inbetween
centralnodes<-c((maxleftnodes+1):(minrightnodes-1))
}
rescorenodes<-c(leftnodes,centralnodes,sampledrightnode) #all these need to be rescored
if(selectedelement>1){ #as well as the partition element further left, if it exists
rescorenodes<-c(which(currentposy==(selectedelement-1)),rescorenodes)
}
proposedpermy<-currentpermy #create the new permutation
proposedpermy[sampledelements]<-currentpermy[rev(sampledelements)] #by swapping the elements
rescorenodes<-proposedpermy[rescorenodes] #map to the correct labels
scorepositions<-which(proposedpermy%in%rescorenodes)
rescorenodes<-proposedpermy[scorepositions]
return(list(proposedpermy,rescorenodes,scorepositions))
}
# swap any two adjacent nodes # move 2
swapadjacentnodes.old<-function(n,currentparty,currentposy,currentpermy,permpossibs){
scorelength<-length(permpossibs)
selectedelement<-sample.int(scorelength,1,prob=permpossibs) # sample an element
leftnodes<-which(currentposy==selectedelement) #nodes in the left partition element
rightnodes<-which(currentposy==(selectedelement+1)) #and in the right
sampledelements<-c(propersample(leftnodes),propersample(rightnodes)) #sample a pair
rescorenodes<-c(leftnodes,sampledelements[2]) #need to rescore the left partition element and just the selected node in the right partition
if(selectedelement>1){ #as well as the partition element further left, if it exists
rescorenodes<-c(which(currentposy==(selectedelement-1)),rescorenodes)
}
proposedpermy<-currentpermy #create the new permutation
proposedpermy[sampledelements]<-currentpermy[rev(sampledelements)] #by swapping the elements
rescorenodes<-proposedpermy[rescorenodes] #map to the correct labels
scorepositions<-which(proposedpermy%in%rescorenodes)
rescorenodes<-proposedpermy[scorepositions]
return(list(proposedpermy,rescorenodes,scorepositions))
}
# Split or join adjacent partition elements # move 3
partitionsplitorjoin.old<-function(n,currentparty,currentposy,currentpermy,partpossibs){
scorelength<-length(partpossibs)
sampledelement<-sample.int(scorelength,1,prob=partpossibs) # sample an element
# vectors to be updated later
proposedposy<-currentposy
proposedpartposs<-partpossibs
if(currentposy[sampledelement]==currentposy[sampledelement+1]){ #if the next node is in the same partition element we split
involvednodes<-which(currentposy==currentposy[sampledelement]) #the nodes in the partition element to be split
newleftelementsize<-sampledelement-min(involvednodes)+1 #the size of the new partition element on the left
newrightelementsize<-length(involvednodes)-newleftelementsize #and the right
proposedparty<-append(currentparty,newrightelementsize,currentposy[sampledelement]) #create the proposed partition
proposedparty[currentposy[sampledelement]]<-newleftelementsize
# update the list to be slightly more efficient
proposedposy[(sampledelement+1):n]<-currentposy[(sampledelement+1):n]+1
# sample which nodes go to the new left and right partition elements # note that involvednodes must have at least two elements so `sample' should work fine
newleftnodes<-sample(involvednodes,newleftelementsize)
newrightnodes<-involvednodes[-which(involvednodes%in%newleftnodes)]
proposedpermy<-currentpermy #and update the proposed permutation according to how the nodes are distributed
proposedpermy[(sampledelement-newleftelementsize+1):sampledelement]<-currentpermy[newleftnodes]
proposedpermy[(sampledelement+1):(sampledelement+newrightelementsize)]<-currentpermy[newrightnodes]
# update the neighbourhood of the new left element
proposedpartposs[(sampledelement-newleftelementsize+1):sampledelement]<-choose(newleftelementsize,1:newleftelementsize)
rescorenodes<-c((sampledelement-newleftelementsize+1):sampledelement) #we only need to rescore the nodes in the new left partition element
if(currentposy[sampledelement]>1){ # and the partition element to the left, if it exists
rescorenodes<-c(which(currentposy==(currentposy[sampledelement]-1)),rescorenodes)
}
if(currentposy[sampledelement]<length(currentparty)){ #update the neighbourhood of the right element
proposedpartposs[(sampledelement+1):(sampledelement+newrightelementsize)]<-choose(newrightelementsize,1:newrightelementsize)
} else { # if the partition element was last then we cut the binomial coeffiecients short
if(newrightelementsize>1){
proposedpartposs[(sampledelement+1):(sampledelement+newrightelementsize-1)]<-choose(newrightelementsize,1:(newrightelementsize-1))
}
}
rescorenodes<-proposedpermy[rescorenodes] #finally map to the correct labels
}
if(currentposy[sampledelement]<currentposy[sampledelement+1]){ #otherwise join partition elements
newelementsize<-currentparty[currentposy[sampledelement]]+currentparty[currentposy[sampledelement+1]]
proposedparty<-currentparty[-currentposy[sampledelement]] #create the proposed partition
proposedparty[currentposy[sampledelement]]<-newelementsize
# update the list to be slightly more efficient
proposedposy[(sampledelement+1):n]<-currentposy[(sampledelement+1):n]-1
proposedpermy<-currentpermy #now changing the permutation has no effect
rescorenodes<-which(currentposy==currentposy[sampledelement]) #we only need to rescore the nodes from the old element on the left and the element further left
newelementstart<-min(rescorenodes) #store the start of the new element
if(proposedposy[sampledelement]>1){ #include the partition element to the left, if it exists
rescorenodes<-c(which(proposedposy==(proposedposy[sampledelement]-1)),rescorenodes)
}
if(proposedposy[sampledelement]<length(proposedparty)){ #update the neighbourhood
proposedpartposs[(newelementstart):(newelementstart+newelementsize-1)]<-choose(newelementsize,1:newelementsize)
} else { # if the new element is last we cut the binomial coeffiecients short
proposedpartposs[(newelementstart):(newelementstart+newelementsize-2)]<-choose(newelementsize,1:(newelementsize-1)) # the new element must have a size of at least two
}
rescorenodes<-proposedpermy[rescorenodes] #again map to the correct labels
}
scorepositions<-which(proposedpermy%in%rescorenodes)
rescorenodes<-proposedpermy[scorepositions]
return(list(proposedparty,proposedposy,proposedpermy,rescorenodes,proposedpartposs,
scorepositions))
}
# move a node to a different partition element (following further restrictions) # move 4 part 1
joinnode.old<-function(n,currentparty,currentposy,currentpermy,joinpossibs){
m<-length(currentparty)
scorelength<-length(joinpossibs)
nodetomove<-sample.int(scorelength,1,prob=joinpossibs) # sample an element
nodeselement<-currentposy[nodetomove]
elementtomoveto<-sample.int(joinpossibs[nodetomove],1) # sample where to go to
if(elementtomoveto==nodeselement){ #if element selected is the same, replace by m which is not otherwise sampled
elementtomoveto<-m
} else if (elementtomoveto==(nodeselement+1)){
if(currentparty[nodeselement]==1){ #if partition element has size one
if(currentparty[nodeselement+1]==1){ #if next partition element is also size one
elementtomoveto<-m-1 #replace not allowed sample by 'm-1' which also not be otherwise sampled
}
}
}
proposedparty<-currentparty # update the partition
proposedparty[elementtomoveto]<-currentparty[elementtomoveto]+1
if(currentparty[nodeselement]>1){
proposedparty[nodeselement]<-currentparty[nodeselement]-1
} else {
proposedparty<-proposedparty[-nodeselement]
}
proposedposy<-parttolist(n,proposedparty) #should be updated for efficiency
if(elementtomoveto<nodeselement){
leftnodes<-which(currentposy==elementtomoveto)
maxleftnodes<-max(leftnodes)
rightnodes<-which(currentposy==nodeselement) # this includes the node which is moved
minrightnodes<-min(rightnodes)
proposedpermy<-currentpermy[-nodetomove]
proposedpermy<-append(proposedpermy,currentpermy[nodetomove],maxleftnodes)
centralnodes<-c()
if((maxleftnodes+1)<minrightnodes){
centralnodes<-c((maxleftnodes+1):(minrightnodes-1))
}
rescorenodes<-c(leftnodes,centralnodes,nodetomove) #all these need to be rescored
if(elementtomoveto>1){ #as well as the partition element further left, if it exists
rescorenodes<-c(which(currentposy==(elementtomoveto-1)),rescorenodes)
}
} else {
leftnodes<-which(currentposy==nodeselement) # this includes the node which is moved
maxleftnodes<-max(leftnodes)
rightnodes<-which(currentposy==elementtomoveto)
minrightnodes<-min(rightnodes)
proposedpermy<-currentpermy[-nodetomove]
proposedpermy<-append(proposedpermy,currentpermy[nodetomove],min(rightnodes)-2)
centralnodes<-c()
if((maxleftnodes+1)<minrightnodes){
centralnodes<-c((maxleftnodes+1):(minrightnodes-1))
}
rescorenodes<-c(leftnodes,centralnodes) #all these need to be rescored - the moved node is included in leftnodes
if(nodeselement>1){ #as well as the partition element further left, if it exists
rescorenodes<-c(which(currentposy==(nodeselement-1)),rescorenodes)
}
}
rescorenodes<-currentpermy[rescorenodes] #map to the correct labels
scorepositions<-which(proposedpermy%in%rescorenodes)
rescorenodes<-proposedpermy[scorepositions]
return(list(proposedparty,proposedposy,proposedpermy,rescorenodes,scorepositions))
}
# move a node to a new partition element (following further restrictions) # move 4 part 2
holenode.old<-function(n,currentparty,currentposy,currentpermy,holepossibs){
m<-length(currentparty)
scorelength<-length(holepossibs)
nodetomove<-sample.int(scorelength,1,prob=holepossibs) # sample an element
nodeselement<-currentposy[nodetomove]
holetomoveto<-sample.int(holepossibs[nodetomove],1) # sample where to go to
if(currentparty[nodeselement]==1){
if(holetomoveto>=nodeselement){
holetomoveto<-holetomoveto+1 #counts hole when partition element removed
if(nodeselement<m){
if(currentparty[nodeselement+1]==1){ #if next partition element is also size one
holetomoveto<-holetomoveto+1 #shift one hole further along
}
}
}
proposedparty<-currentparty[-nodeselement]
proposedparty<-append(proposedparty,1,holetomoveto-1)
proposedposy<-parttolist(n,proposedparty) #should be updated for efficiency
if(holetomoveto<nodeselement){
leftnodes<-which(proposedposy==holetomoveto) # this should be the node which is moved
maxleftnodes<-max(leftnodes)
rightnodes<-which(proposedposy==nodeselement) # these were to the left of the moved node
minrightnodes<-min(rightnodes)
proposedpermy<-currentpermy[-nodetomove]
proposedpermy<-append(proposedpermy,currentpermy[nodetomove],leftnodes-1)
centralnodes<-c()
if((maxleftnodes+1)<minrightnodes){
centralnodes<-c((maxleftnodes+1):(minrightnodes-1))
}
rescorenodes<-c(leftnodes,centralnodes,rightnodes) #all these need to be rescored
if(holetomoveto>1){ #as well as the partition element further left, if it exists
rescorenodes<-c(which(currentposy==(holetomoveto-1)),rescorenodes)
}
} else {
leftnodes<-which(proposedposy==nodeselement) # these were to the right of the moved node
maxleftnodes<-max(leftnodes)
rightnodes<-which(proposedposy==holetomoveto) # this should be the node that is moved
minrightnodes<-min(rightnodes)
proposedpermy<-currentpermy[-nodetomove]
proposedpermy<-append(proposedpermy,currentpermy[nodetomove],rightnodes-1)
centralnodes<-c()
if((maxleftnodes+1)<minrightnodes){
centralnodes<-c((maxleftnodes+1):(minrightnodes-1))
}
rescorenodes<-c(leftnodes,centralnodes,rightnodes) #all these need to be rescored
if(nodeselement>1){ #as well as the partition element further left, if it exists
rescorenodes<-c(which(currentposy==(nodeselement-1)),rescorenodes)
}
}
} else {
if(currentparty[nodeselement]==2){
if(holetomoveto==nodeselement){
holetomoveto<-m+1
}
}
proposedparty<-currentparty
proposedparty[nodeselement]<-currentparty[nodeselement]-1
proposedparty<-append(proposedparty,1,holetomoveto-1)
proposedposy<-parttolist(n,proposedparty) #should be updated for efficiency
if(holetomoveto<=nodeselement){
leftnodes<-which(proposedposy==holetomoveto) # this is the node which is moved
maxleftnodes<-max(leftnodes)
rightnodes<-which(proposedposy==(nodeselement+1)) # these are the nodes left over
minrightnodes<-min(rightnodes)
proposedpermy<-currentpermy[-nodetomove]
proposedpermy<-append(proposedpermy,currentpermy[nodetomove],leftnodes-1)
centralnodes<-c()
if((maxleftnodes+1)<minrightnodes){
centralnodes<-c((maxleftnodes+1):(minrightnodes-1))
}
rescorenodes<-c(leftnodes,centralnodes) #all these need to be rescored
if(holetomoveto>1){ #as well as the partition element further left, if it exists
rescorenodes<-c(which(currentposy==(holetomoveto-1)),rescorenodes)
}
} else {
leftnodes<-which(proposedposy==nodeselement) # these are the nodes left over
maxleftnodes<-max(leftnodes)
rightnodes<-which(proposedposy==holetomoveto) # this is the node which is moved
minrightnodes<-min(rightnodes)
proposedpermy<-currentpermy[-nodetomove]
proposedpermy<-append(proposedpermy,currentpermy[nodetomove],rightnodes-1)
centralnodes<-c()
if((maxleftnodes+1)<minrightnodes){
centralnodes<-c((maxleftnodes+1):(minrightnodes-1))
}
rescorenodes<-c(leftnodes,centralnodes,rightnodes) #all these need to be rescored
if(nodeselement>1){ #as well as the partition element further left, if it exists
rescorenodes<-c(which(currentposy==(nodeselement-1)),rescorenodes)
}
}
}
rescorenodes<-proposedpermy[rescorenodes] #map to the correct labels
scorepositions<-which(proposedpermy%in%rescorenodes)
rescorenodes<-proposedpermy[scorepositions]
return(list(proposedparty,proposedposy,proposedpermy,rescorenodes,
scorepositions))
}
|
/scratch/gouwar.j/cran-all/cranData/BiDAG/R/partitionmoves.R
|
# this function scores by propagating through the partition poset graph
posetpartitionscoreparents<-function(numberofparents,neededposetparenttable,
neededparentsvec,numberofparentsvec,rowmaps,
needednodebannedrow,scoretable,n,updatenodes=c(1:n)) {
scorematrices<-list()
for (j in updatenodes) {
np<-numberofparents[j]
if(np==1) {
scorematrices[[j]]<-as.matrix(scoretable[[j]][2,1])
} else if (np>1){
binomcoefs<-choose(np,c(np:1))*(2^c(np:1)-1)
nrows<-nrow(neededposetparenttable[[j]])
P_local <- numeric(nrows)
nrowold<-length(rowmaps[[j]]$forward) # size of the other poset graph
# at the top of the graph we only allow one possible parent set
for (i in nrows:(nrows-np+1)) {
k <- needednodebannedrow[[j]][i] # the banned nodes row, there should be (n-1) banned nodes
conjugatescore<-scoretable[[j]][rowmaps[[j]]$backwards[nrowold-rowmaps[[j]]$forward[k]+1],1]
P_local[i]<-conjugatescore
}
# for each other level of the poset graph we need to add the parents divided by the difference in levels
cutoff<-1
for(level in 0:(np-2)) {
for (i in (nrows-np):cutoff) { # find the parents in the poset graph
k <- needednodebannedrow[[j]][i] # the banned nodes row
parentnodes <- neededposetparenttable[[j]][i,c(1:neededparentsvec[[j]][i])]
maxparents<-max(P_local[parentnodes])
# take the sum of the parent scores and divide by the relevant factor
parentsum<-log(sum(exp(P_local[parentnodes]-maxparents)))+maxparents -
log(np-rev(numberofparentsvec[[j]])[k]-level+1)
# take the conjugate score
conjugatescore<-scoretable[[j]][rowmaps[[j]]$backwards[nrowold-rowmaps[[j]]$forward[k]+1],1]
# find max and combine
maxoverall<-max(parentsum,conjugatescore)
P_local[i]<- log(exp(parentsum-maxoverall)+exp(conjugatescore-maxoverall)) + maxoverall
}
cutoff<-cutoff+binomcoefs[level+1]
}
scorematrices[[j]]<-as.matrix(P_local)
}
}
return(scorematrices)
}
#builds a table where entries correspond to partition scores of required and allowed nodes
plus1needed.partition<-function(numparents,parenttable,neededposetparenttable,neededparentsvec,
numberofparentsvec,rowmapsneeded,needednodebannedrow,
scoretable,plus1lists,n,updatenodes=c(1:n)) {
revnumberofparentsvec<-lapply(numberofparentsvec,rev)
rowmaps<-rowmapsneeded
if (is.null(plus1lists)) {
scorematrices<-list()
for (j in updatenodes){
np<-numparents[j]
if(np==1) {
scorematrices[[j]]<-as.matrix(scoretable[[j]][2,1])
} else if (np>1) {
binomcoefs<-choose(np,c(np:1))*(2^c(np:1)-1)
nrows<-nrow(neededposetparenttable[[j]])
P_local <- numeric(nrows)
nrowold<-length(rowmaps[[j]]$forward) # size of the other poset graph
# at the top of the graph we only allow one possible parent set
for (i in nrows:(nrows-np+1)) {
k <- needednodebannedrow[[j]][i] # the banned nodes row, there should be (n-1) banned nodes
conjugatescore<-scoretable[[j]][rowmaps[[j]]$backwards[nrowold-rowmaps[[j]]$forward[k]+1],1]
P_local[i]<-conjugatescore
}
# for each other level of the poset graph we need to add the parents divided by the difference in levels
cutoff<-1
for(level in 0:(np-2)) {
for (i in (nrows-np):cutoff) { # find the parents in the poset graph
k <- needednodebannedrow[[j]][i] # the banned nodes row
parentnodes <- neededposetparenttable[[j]][i,c(1:neededparentsvec[[j]][i])]
maxparents<-max(P_local[parentnodes])
# take the sum of the parent scores and divide by the relevant factor
parentsum<-log(sum(exp(P_local[parentnodes]-maxparents)))+maxparents - log(np-revnumberofparentsvec[[j]][k]-level+1)
# take the conjugate score
conjugatescore<-scoretable[[j]][rowmaps[[j]]$backwards[nrowold-rowmaps[[j]]$forward[k]+1],1]
# find max and combine
maxoverall<-max(parentsum,conjugatescore)
P_local[i]<- log(exp(parentsum-maxoverall)+exp(conjugatescore-maxoverall)) + maxoverall
}
cutoff<-cutoff+binomcoefs[level+1]
}
scorematrices[[j]]<-as.matrix(P_local)
}
}
return(scorematrices)
} else{
scorematrices.needed<-list()
for (j in updatenodes){
np<-numparents[j] #number of possible parents for node j
binomcoefs<-choose(np,c(np:1))*(2^c(np:1)-1)
ll<-length(plus1lists$parents[[j]])+1
nrows<-nrow(neededposetparenttable[[j]])
nrowspar<-nrow(parenttable[[j]])
if(np>0) {P_local <- matrix(nrow=nrows,ncol=ll)}
for (li in 1:ll){
if (np==0){
scorematrices.needed[[j]]<-NULL
break #we don't have allowed table, all scores already in the scoretable plus1 lists
}
else if(np==1) {
#we need just 1 additional parent set which is not in the scoretable plus1 node + the only parent in scoretable
P_local[1,li]<-as.matrix(scoretable[[j]][[li]][2,1])
scorematrices.needed[[j]]<-P_local
} else if (np>1){
nrowold<-length(rowmaps[[j]]$forward) # size of the other poset graph
for (i in nrows:(nrows-np+1)) {
k <- needednodebannedrow[[j]][i] # the banned nodes row, there should be (n-1) banned nodes
conjugatescore<-scoretable[[j]][[li]][rowmaps[[j]]$backwards[nrowold-rowmaps[[j]]$forward[k]+1],1]
P_local[i,li]<-conjugatescore
}
# for each other level of the poset graph we need to add the parents divided by the difference in levels
cutoff<-1
for(level in 0:(np-2)){
for (i in (nrows-np):cutoff) { # find the parents in the poset graph
k <- needednodebannedrow[[j]][i] # the banned nodes row
parentnodes <- neededposetparenttable[[j]][i,c(1:neededparentsvec[[j]][i])]
maxparents<-max(P_local[parentnodes,li])
# take the sum of the parent scores and divide by the relevant factor
parentsum<-log(sum(exp(P_local[parentnodes,li]-maxparents)))+maxparents-log(np-revnumberofparentsvec[[j]][k]-level+1)
# take the conjugate score
conjugatescore<-scoretable[[j]][[li]][rowmaps[[j]]$backwards[nrowold-rowmaps[[j]]$forward[k]+1],1]
# find max and combine
maxoverall<-max(parentsum,conjugatescore)
P_local[i,li]<- log(exp(parentsum-maxoverall)+exp(conjugatescore-maxoverall)) + maxoverall
}
cutoff<-cutoff+binomcoefs[level+1]
}
scorematrices.needed[[j]]<-as.matrix(P_local)
}
}
}
return(scorematrices.needed)
}
}
parentlistnonempty<-function(elements,n){
matrixofparents<-matrix(NA,nrow=(2^length(elements)-1),ncol=n)
cutoff<-0
for (r in 1:length(elements)){
possparents<-combinations(length(elements),r,elements)
heighty<-nrow(possparents)
matrixofparents[(1:heighty)+cutoff,1:r]<-possparents
cutoff<-cutoff+heighty
}
return(matrixofparents)
}
partitionlistnumberofparents<-function(needednodetable,numberofparentsvec,n,updatenodes){
numberofpartitionparentsvec<-list()
for (i in updatenodes){
if(length(numberofparentsvec[[i]])==1) {numberofpartitionparentsvec[[i]]<-0}
else {
nrows<-length(numberofparentsvec[[i]])
npar<-numberofparentsvec[[i]][nrows]
P_local<-c()
for (j in 1:(nrows-1)) { # last row has no possibilities
np<-npar-numberofparentsvec[[i]][j]
P_local<-c(P_local,rep(c(1:np),choose(np,c(1:np))))
}
numberofpartitionparentsvec[[i]]<-P_local
}
}
return(numberofpartitionparentsvec)
}
partitionmapneedednodebannedrow<-function(numberofparents,numberofparentsvec,n,updatenodes) {
needednodebannedrow<-list()
for (i in updatenodes) {
j<-numberofparents[i]
needednodebannedrow[[i]]<-rep(1:2^j,2^(j-numberofparentsvec[[i]])-1)
}
return(needednodebannedrow)
}
partitionlist<-function(parenttable,numberofparentsvec,n,updatenodes) {
needednodetable<-list()
for (i in updatenodes) {
cutoff<-0
nrows<-nrow(parenttable[[i]])
ncols<-ncol(parenttable[[i]])
matrixofparents<-matrix(NA,nrow=(3^ncols-2^ncols),ncol=ncols)
if(nrows>1) {
for (j in 1:(nrows-1)){ # last row has no possibilities
parentnodes <- parenttable[[i]][j,1:numberofparentsvec[[i]][j]]
elements<-setdiff(c(1:ncols),parentnodes)
newpart<-parentlistnonempty(elements,ncols)
heighty<-nrow(newpart)
matrixofparents[(1:heighty)+cutoff,]<-newpart
cutoff<-cutoff+heighty
}
}
needednodetable[[i]]<-as.matrix(matrixofparents)
}
return(needednodetable)
}
neededparentsmapping<-function(parenttable,numberofparentsvec,needednodetable,
numberofpartitionparentsvec,needednodebannedrow,n,updatenodes) {
maps<-list()
for (i in updatenodes){
nrows<-nrow(needednodetable[[i]])
ncols<-ncol(needednodetable[[i]])
maps[[i]]<-list()
P_local <- numeric(3^ncols) # we leave some elements empty
P_localinv <- numeric(3^ncols) # here too
if(nrows>1){
for (j in 1:nrows) {# the needed nodes
needednodes <- needednodetable[[i]][j,c(1:numberofpartitionparentsvec[[i]][j])]
k <- needednodebannedrow[[i]][j] # the banned nodes row
if(k>1) { # if there is at least one banned node
bannednodes <- parenttable[[i]][k,c(1:numberofparentsvec[[i]][k])]
P_local[j]<-sum(3^bannednodes)/3+2*sum(3^needednodes)/3+1
} else {P_local[j]<-2*sum(3^needednodes)/3+1}
P_localinv[P_local[j]]<-j # the inverse mapping
}
}
maps[[i]]$forward<-P_local
maps[[i]]$backwards<-P_localinv
}
return(maps)
}
needed.poset<-function(parenttable,numberofparentsvec,needednodebannedrow,neededrowmaps,n,
updatenodes=c(1:n)) {
posetparents<-list()
for (i in updatenodes) {
nrows<-length(needednodebannedrow[[i]])
numpar<-numberofparentsvec[[i]][nrow(parenttable[[i]])]
if(nrows>1) {
posetneededtable<-matrix(NA,nrow=nrows,ncol=numpar)
offsets<-rep(1,nrows)
if(nrows>1) {
for(j in (nrows:(2^numpar))){
k <- needednodebannedrow[[i]][j] # the banned nodes row, there should be at least one banned node
bannednodes <- parenttable[[i]][k,c(1:numberofparentsvec[[i]][k])]
# we can either delete one of the banned nodes
children1<-neededrowmaps[[i]]$backwards[neededrowmaps[[i]]$forward[j]-3^bannednodes/3]
# move one of the banned nodes to a needed noded
children2<-neededrowmaps[[i]]$backwards[neededrowmaps[[i]]$forward[j]+3^bannednodes/3]
children<-c(children1,children2)
posetneededtable[cbind(children,offsets[children])]<-j
offsets[children]<-offsets[children]+1
}
}
}
posetparents[[i]]<-list()
if (numpar==1) {
posetneededtable<-matrix(NA,nrow=1,ncol=1)
posetneededtable[1,1]<-1
posetparents[[i]]$sizes<-c(1)
posetparents[[i]]$table<-posetneededtable
}
else if (numpar>1) {
posetparents[[i]]$sizes<-offsets-1
posetparents[[i]]$table<-posetneededtable}
else {
posetparents[[i]]$sizes<-vector("integer",length=0)
posetparents[[i]]$table<-NULL}
}
return(posetparents)
}
plus1allowed.partition<-function(posetparenttable,scoretable,numberofparentsvec,rowmaps,n,plus1lists=NULL,
numparents,updatenodes=c(1:n)){
orderscore<-list(length=n) #first level of list: the only level - list of matrices - 3 dimensions
revnumberofparentsvec<-lapply(numberofparentsvec,rev)
if (is.null(plus1lists)){
for (j in updatenodes){
len<-numparents[j] #maximum number of parents for node j
binomcoefs<-choose(len,c(0:len))
nrows<-nrow(posetparenttable[[j]])
P_local<-vector("numeric",length=nrows)
P_local[nrows] <-scoretable[[j]][1,1]
maxoverall<-max(scoretable[[j]][,1])
P_local[1]<-log(sum(exp(scoretable[[j]][,1]-maxoverall)))+maxoverall
cutoff<-1
if(nrows>2){
for(level in 1:(len-1)){
cutoff<-cutoff+binomcoefs[level]
for (i in (nrows-1):cutoff) {
#so we go through all rows where non-zero entries more than number of banned parents
# find the parents in the poset graph
posetparentnodes <- posetparenttable[[j]][i,c(1:revnumberofparentsvec[[j]][i])]
maxparents<-max(P_local[posetparentnodes])
parentsum<-log(sum(exp(P_local[posetparentnodes]-maxparents)))+maxparents-log(len-revnumberofparentsvec[[j]][i]-level+1)
conjugatescore<-scoretable[[j]][rowmaps[[j]]$backwards[nrows-rowmaps[[j]]$forward[i]+1],1]
maxoverall<-max(parentsum,conjugatescore)
P_local[i]<- log(exp(parentsum-maxoverall)+exp(conjugatescore-maxoverall)) + maxoverall
}
}
}
orderscore[[j]]<-as.matrix(P_local)
}
return(orderscore)
} else {
for (j in updatenodes) {
len<-numparents[j] #maximum number of parents for node j
binomcoefs<-choose(len,c(0:len))
ll<-length(plus1lists$parents[[j]])+1
nrows<-nrow(posetparenttable[[j]])
P_local <- matrix(nrow=nrows,ncol=ll)
for (li in 1:ll){
P_local[nrows,li] <-scoretable[[j]][[li]][1,1]
maxoverall<-max(scoretable[[j]][[li]][,1])
P_local[1,li]<-log(sum(exp(scoretable[[j]][[li]][,1]-maxoverall)))+maxoverall
cutoff<-1
if(nrows>2){
for(level in 1:(len-1)){
cutoff<-cutoff+binomcoefs[level]
for (i in (nrows-1):cutoff) {
#so we go through all rows where non-zero entries more than number of banned parents
# find the parents in the poset graph
posetparentnodes <- posetparenttable[[j]][i,c(1:revnumberofparentsvec[[j]][i])]
maxparents<-max(P_local[posetparentnodes,li])
parentsum<-log(sum(exp(P_local[posetparentnodes,li]-maxparents)))+maxparents-log(len-revnumberofparentsvec[[j]][i]-level+1)
conjugatescore<-scoretable[[j]][[li]][rowmaps[[j]]$backwards[nrows-rowmaps[[j]]$forward[i]+1],1]
maxoverall<-max(parentsum,conjugatescore)
P_local[i,li]<- log(exp(parentsum-maxoverall)+exp(conjugatescore-maxoverall)) + maxoverall
}
}
}
}
orderscore[[j]]<-P_local
}
return(orderscore)
}
}
|
/scratch/gouwar.j/cran-all/cranData/BiDAG/R/partitionposetfns.R
|
partitionscoreplus1<-function(n,nsmall,scorenodes,scorepositions,bgnodes,parenttable,aliases,scoretab,plus1lists,rowmapsneeded,
rowmapsallowed,scoresneeded,scoresallowed,permy,party,posy){
partitionscores<-rep(0,n)
scoresvec<-vector("numeric")
allowedrow<-vector("numeric")
scoretabrow<-vector("numeric")
neededrow<-vector("numeric")
m<-length(party)
plus1neededlists<-list() #lists where score is taken from allowedscore, so only plus1 lists where top element is required
plus1allowedlists<-list() #lists where score is taken from neededscore, i.e. NOT_LISTS where top element is required
k<-1
for (i in scorenodes){
tablesize<-dim(parenttable[[i]]) # just to remove some arguments
position<-scorepositions[k]
partyelement<-posy[position]
if(partyelement==m){# no parents are allowed
if(is.null(bgnodes)) {
partitionscores[i]<-scoretab[[i]][1,1] #changed
scoretabrow[i]<-1
neededrow[i]<-0
allowedrow[i]<-0
} else {
neededrow[i]<--1
allowednodes<-bgnodes
bannednodes<-permy[1:nsmall]
bannedpool<-which(aliases[[i]]%in%bannednodes)
if(length(bannedpool)==tablesize[2]){ #no parents from the main table allowed
allowedrow[i]<-nrow(parenttable[[i]]) #choose last row, so only plus1 parent is allowed and required
}
else {
allowedrow[i]<-rowmapsallowed[[i]]$backwards[sum(2^bannedpool)/2+1]
}
plus1neededlists[[i]]<-c(1,which(plus1lists$parents[[i]]%in%allowednodes)+1)
scoresvec<-scoresallowed[[i]][allowedrow[i],plus1neededlists[[i]]]
maxallowed<-max(scoresvec)
partitionscores[i]<-maxallowed+log(sum(exp(scoresvec-maxallowed)))
}
}
else if(tablesize[1]==0) { #no parents in PC-table, 1 row in scoretables, then only plus1 lists with requirednodes are good
nextpartyelement<-partyelement+1 #new
lastbannedindex<-sum(party[1:partyelement]) #new
requirednodes<-permy[(lastbannedindex+1):(lastbannedindex+party[nextpartyelement])] #new
# allowedneeded<-intersect(requirednodes,aliases[[i]])
allowedrow[i]<-1
neededrow[i]<-0
plus1neededlists[[i]]<-which(plus1lists$parents[[i]]%in%requirednodes)+1
scoresvec<-scoresallowed[[i]][neededrow[i],plus1neededlists[[i]]]
maxallowed<-max(scoresvec)
partitionscores[i]<-maxallowed+log(sum(exp(scoresvec-maxallowed)))
} else {
lastbannedindex<-sum(party[1:partyelement]) #new
bannednodes<-permy[1:lastbannedindex] #new
nextpartyelement<-partyelement+1 #new
lastreqindex<-lastbannedindex+party[nextpartyelement] #new
requirednodes<-permy[(lastbannedindex+1):lastreqindex] #new
if(lastreqindex==nsmall) { #new
allowednotrequired<-bgnodes #new
} else {
allowednotrequired<-c(permy[(lastreqindex+1):nsmall], bgnodes) #new
}
bannedpool<-which(aliases[[i]]%in%bannednodes)
allowedneeded<-which(aliases[[i]]%in%requirednodes)
if(length(bannedpool)==tablesize[2]){ #no parents from the main table allowed
neededrow[i]<-0 #in the main table no rows are allowed, so no score from neededscores is taken
allowedrow[i]<-nrow(parenttable[[i]]) #choose last row, so only plus1 parent is allowed and required
plus1neededlists[[i]]<-which(plus1lists$parents[[i]]%in%requirednodes)+1 #plus1 lists for nodes which are in required set
scoresvec<-scoresallowed[[i]][allowedrow[i],plus1neededlists[[i]]]
maxallowed<-max(scoresvec)
partitionscores[i]<-maxallowed+log(sum(exp(scoresvec-maxallowed)))
}
else if(length(bannedpool)<tablesize[2]&&length(allowedneeded)==0){ #no nodes from main tables are in required nodes set,
#but some are in the allowed set
neededrow[i]<-0 #in the main table no rows are needed, so no score from neededscores is taken
allowedrow[i]<-rowmapsallowed[[i]]$backwards[sum(2^bannedpool)/2+1]
plus1neededlists[[i]]<-which(plus1lists$parents[[i]]%in%requirednodes)+1
scoresvec<-scoresallowed[[i]][allowedrow[i],plus1neededlists[[i]]]
maxallowed<-max(scoresvec)
partitionscores[i]<-maxallowed+log(sum(exp(scoresvec-maxallowed)))
}
else if (length(aliases[[i]])==1){ #only 1 parent in possible parent set in the main table and it is required
neededrow[i]<-1
allowedrow[i]<-1
plus1neededlists[[i]]<-c(which(plus1lists$parents[[i]]%in%requirednodes)+1)
#top element does not enter the sum
#first list is added because we do not want to add "no parents" case to the score
plus1allowedlists[[i]]<-c(1,which(plus1lists$parents[[i]]%in%allowednotrequired)+1)
scoresvec<-scoresneeded[[i]][neededrow[i],plus1allowedlists[[i]]]
scoresvec<-c(scoresvec,scoresallowed[[i]][allowedrow[i],plus1neededlists[[i]]])
maxallowed<-max(scoresvec)
partitionscores[i]<-maxallowed+log(sum(exp(scoresvec-maxallowed)))
}
else {
allowedrow[i]<-rowmapsallowed[[i]]$backwards[sum(2^bannedpool)/2+1]
if (length(bannedpool)==0){
neededrow[i]<-rowmapsneeded[[i]]$backwards[2*sum(3^allowedneeded)/3+1]
} else {
neededrow[i]<-rowmapsneeded[[i]]$backwards[sum(3^bannedpool)/3+2*sum(3^allowedneeded)/3+1]}
plus1neededlists[[i]]<-which(plus1lists$parents[[i]]%in%requirednodes)+1
plus1allowedlists[[i]]<-c(1,which(plus1lists$parents[[i]]%in%allowednotrequired)+1)
scoresvec<-scoresneeded[[i]][neededrow[i],plus1allowedlists[[i]]]
scoresvec<-c(scoresvec,scoresallowed[[i]][allowedrow[i],plus1neededlists[[i]]])
maxallowed<-max(scoresvec)
partitionscores[i]<-maxallowed+log(sum(exp(scoresvec-maxallowed)))
}
}
k<-k+1
}
scores<-list()
scores$neededrow<-neededrow
scores$allowedrow<-allowedrow
scores$plus1neededlists<-plus1neededlists
scores$plus1allowedlists<-plus1allowedlists
scores$totscores<-partitionscores
return(scores)
}
|
/scratch/gouwar.j/cran-all/cranData/BiDAG/R/partitionscore.R
|
#'Estimating posterior probabilities of single edges
#'
#'This function estimates the posterior probabilities of edges by averaging over a sample of DAGs
#'obtained via an MCMC scheme.
#'
#'@param MCMCchain an object of class \code{partitionMCMC}, \code{orderMCMC} or \code{iterativeMCMC}, representing the output of structure sampling function \code{\link{partitionMCMC}} or \code{\link{orderMCMC}} (the latter when parameter \code{chainout}=TRUE;
#'@param pdag logical, if TRUE (FALSE by default) all DAGs in the MCMCchain are first converted to equivalence class (CPDAG) before the averaging
#'@param burnin number between \code{0} and \code{1}, indicates the percentage of the samples which will be discarded as `burn-in' of the MCMC chain; the rest of the samples will be used to calculate the posterior probabilities; 0.2 by default
#'@param endstep number between \code{0} and \code{1}; 1 by default
#'@return a square matrix with dimensions equal to the number of variables; each entry \code{[i,j]} is an estimate of the posterior probability of the edge from node \code{i} to node \code{j}
#'@examples
#'Bostonscore<-scoreparameters("bge", Boston)
#'\dontrun{
#'samplefit<-sampleBN(Bostonscore, "order")
#'edgesposterior<-edgep(samplefit, pdag=TRUE, burnin=0.2)
#'}
#'@author Polina Suter
#'@export
edgep<-function(MCMCchain,pdag=FALSE,burnin=0.2,endstep=1) {
if(is(MCMCchain,"orderMCMC") | is(MCMCchain,"partitionMCMC")) {
if(is.null(MCMCchain$traceadd)) {
stop("Model averaging is not possible! No DAG trace was saved. Try chainout=TRUE.")
}
} else if(is(MCMCchain,"iterativeMCMC")){
if(is.null(MCMCchain$traceadd)) {
stop("Model averaging is not possible! No DAG trace was saved. Try chainout=TRUE.")
} else {
ln<-length(MCMCchain$traceadd[[1]])
MCMCchain$traceadd[[1]]<-c(MCMCchain$traceadd[[1]][[ln-1]],MCMCchain$traceadd[[1]][[ln]])
}
} else {
stop("MCMCchain must be an object of classes 'orderMCMC','partitionMCMC' or 'iterativeMCMC'!")
}
if(MCMCchain$info$sampletype=="MAP"){
warning("The algorithm was called for MAP search. This option is not recommended for model averaging! \n
Use sampleBN() or parameter MAP=FALSE in constructor functions.")
}
DBN<-MCMCchain$info$DBN
MCMCinfo<-MCMCchain$info
MCMCchain<-MCMCchain$traceadd$incidence
varlabels<-colnames(MCMCchain[[1]])
if(endstep==1) {
endstep<-length(MCMCchain)
} else {
endstep<-ceiling(length(MCMCchain)*endstep)
}
startstep<-max(as.integer(burnin*endstep),1)
if (pdag) {
cpdags<-lapply(MCMCchain[startstep:endstep],dagadj2cpadj)
incidence<-as.matrix(Reduce('+', cpdags))/(endstep-startstep+1)
} else {
incidence<-as.matrix(Reduce('+', MCMCchain[startstep:endstep]))/(endstep-startstep+1)
}
colnames(incidence)<-varlabels
rownames(incidence)<-varlabels
if(DBN) {
incidence<-DBNcut(incidence,dyn=MCMCinfo$nsmall,b=MCMCinfo$bgn)
incidence.init<-DBNinit(incidence,dyn=MCMCinfo$nsmall,b=MCMCinfo$bgn)
incidence[1:(MCMCinfo$nsmall+MCMCinfo$bgn),1:(MCMCinfo$nsmall+MCMCinfo$bgn)]<-incidence.init
}
return(incidence)
}
#'Estimating a graph corresponding to a posterior probability threshold
#'
#'This function constructs a directed graph (not necessarily acyclic) including all edges with a posterior probability above a certain threshold. The posterior probability is evaluated as the Monte Carlo estimate from a sample of DAGs obtained via an MCMC scheme.
#'
#'@param MCMCchain object of class \code{partitionMCMC}, \code{orderMCMC} or \code{iterativeMCMC}, representing the output of structure sampling function \code{\link{partitionMCMC}} or \code{\link{orderMCMC}} (the latter when parameter \code{chainout}=TRUE;
#'@param p threshold such that only edges with a higher posterior probability will be retained in the directed graph summarising the sample of DAGs
#'@param pdag logical, if TRUE (FALSE by default) all DAGs in the MCMCchain are first converted to equivalence class (CPDAG) before the averaging
#'@param burnin number between \code{0} and \code{1}, indicates the percentage of the samples which will be the discarded as `burn-in' of the MCMC chain; the rest of the samples will be used to calculate the posterior probabilities; 0.2 by default
#'@return a square matrix with dimensions equal to the number of variables representing the adjacency matrix of the directed graph summarising the sample of DAGs
#'@examples
#'Bostonscore<-scoreparameters("bge", Boston)
#'\dontrun{
#'partfit<-sampleBN(Bostonscore, "partition")
#'hdag<-modelp(partfit, p=0.9)
#'}
#'@author Polina Suter
#'@export
modelp<-function(MCMCchain, p, pdag=FALSE, burnin=0.2) {
if(is(MCMCchain,"orderMCMC") | is(MCMCchain,"partitionMCMC")) {
if(is.null(MCMCchain$traceadd)) {
stop("Model averaging is not possible! No DAG trace was saved. Try chainout=TRUE.")
}
} else if(is(MCMCchain,"iterativeMCMC")){
if(is.null(MCMCchain$traceadd)) {
stop("Model averaging is not possible! No DAG trace was saved. Try chainout=TRUE.")
} else {
ln<-length(MCMCchain$traceadd[[1]])
MCMCchain$traceadd[[1]]<-c(MCMCchain$traceadd[[1]][[ln-1]],MCMCchain$traceadd[[1]][[ln]])
}
} else {
stop("MCMCchain must be an object of classes 'orderMCMC','partitionMCMC' or 'iterativeMCMC'!")
}
if(MCMCchain$info$sampletype=="MAP"){
#warning("The algorithm was called for MAP search. This option is not recommended for model averaging! \n
#Use sampleBN() or parameter MAP=FALSE in constructor functions.")
}
DBN<-MCMCchain$info$DBN
MCMCinfo<-MCMCchain$info
MCMCchain<-MCMCchain$traceadd$incidence
varlabels<-colnames(MCMCchain[[1]])
n<-nrow(MCMCchain[[1]])
incidence<-matrix(rep(0, n*n), nrow=n, ncol=n)
endstep<-length(MCMCchain)
startstep<-max(as.integer(burnin*endstep),1)
if (pdag) {
cpdags<-lapply(MCMCchain[startstep:endstep],dagadj2cpadj)
incidence[which(as.matrix(Reduce('+', cpdags)/(endstep-startstep+1))>p)]<-1
} else {
incidence[which(as.matrix(Reduce('+', MCMCchain[startstep:endstep]))/(endstep-startstep+1)>p)]<-1
}
colnames(incidence)<-varlabels
rownames(incidence)<-varlabels
if(DBN) {
incidence.init<-DBNinit(incidence,dyn=MCMCinfo$nsmall,b=MCMCinfo$bgn)
incidence<-DBNcut(incidence,dyn=MCMCinfo$nsmall,b=MCMCinfo$bgn)
incidence[1:(MCMCinfo$nsmall+MCMCinfo$bgn),1:(MCMCinfo$nsmall+MCMCinfo$bgn)]<-incidence.init
}
return(incidence)
}
#'Performance assessment of iterative MCMC scheme against a known Bayesian network
#'
#'This function compute 8 different metrics of structure fit of an object of class \code{iterativeMCMC} to the ground truth DAG (or CPDAG). Object of class
#'\code{iterativeMCMC} stores MAP graph at from each search space expansion step. This function computes structure fit of
#'each of the stored graphs to the ground truth one. Computed metrics include: TP, FP, TPR, FPR, FPRn, FDR, SHD. See metrics description in
#'see also \code{\link{compareDAGs}}.
#'
#'@param MCMCmult an object which of class \code{iterativeMCMC}, see also \code{\link{iterativeMCMC}})
#'@param truedag ground truth DAG which generated the data used in the search procedure; represented by an object of class \code{\link[graph]{graphNEL}} or an adjacency matrix
#@param sample logical (FALSE by default), indicates if \code{MCMCmult} contains sample or maximum score DAGs
#'@param cpdag logical, if TRUE (FALSE by default) all DAGs are first converted to their respective equivalence classes (CPDAG)
#'@param p threshold such that only edges with a higher posterior probability will be retained in the directed graph summarising the sample of DAGs at each iteration from \code{MCMCmult} if parameter \code{sample} set to TRUE
#'@param trans logical, for DBNs indicates if model comparions are performed for transition structure; when \code{trans} equals FALSE the comparison is performed for initial structures of estimated models and the ground truth DBN; for usual BNs the parameter is disregarded
#'@return an object if class \code{itersim}, a matrix with the number of rows equal to the number of expansion iterations in \code{iterativeMCMC}, and 8 columns reporting for
#'the maximally scoring DAG uncovered at each iteration: the number of true positive edges ('TP'), the number of false positive edges ('FP'),
#'the true positive rate ('TPR'), the structural Hamming distance ('SHD'), false positive rate ('FPR'),
#'false discovery rate ('FDR') and the score of the DAG (`score').
#' @examples
#' gsim.score<-scoreparameters("bge", gsim)
#' \dontrun{
#' MAPestimate<-learnBN(gsim.score,"orderIter")
#' itercomp(MAPestimate, gsimmat)
#' }
#'@author Polina Suter
#'@export
itercomp<-function(MCMCmult, truedag, cpdag=TRUE, p=0.5,trans=TRUE) {
if(MCMCmult$info$DBN) { #we need to extract either transition or initial structure
if(trans) {
if(!is.matrix(truedag)) truedag<-graph2m(truedag)
truedag<-m2graph(DBNcut(truedag,dyn=MCMCmult$info$nsmall,b=MCMCmult$info$bgn))
trueskeleton<-graph2skeleton(truedag)
} else {
if(!is.matrix(truedag)) truedag<-graph2m(truedag)
truedag<-m2graph(DBNinit(truedag,dyn=MCMCmult$info$nsmall,b=MCMCmult$info$bgn))
trueskeleton<-graph2skeleton(truedag)
}
if(MCMCmult$info$split) {
if(trans) {
MCMCmult$maxtrace<-MCMCmult$trans$maxtrace
MCMCmult$trans$maxtrace<-NULL
} else {
MCMCmult$maxtrace<-MCMCmult$init$maxtrace
MCMCmult$init$maxtrace<-NULL
}
} else {
newtrace<-lapply(MCMCmult$maxtrace,function(x)x$DAG)
if(trans) {
newtrace<-lapply(newtrace,DBNcut,dyn=MCMCmult$info$nsmall,b=MCMCmult$info$bgn)
for(i in 1:length(newtrace)) {
MCMCmult$maxtrace[[i]]$DAG<-newtrace[[i]]
}
} else {
newtrace<-lapply(newtrace,DBNinit,dyn=MCMCmult$info$nsmall,b=MCMCmult$info$bgn)
for(i in 1:length(newtrace)) {
MCMCmult$maxtrace[[i]]$DAG<-newtrace[[i]]
}
}
}
}
mapdags<-lapply(MCMCmult$maxtrace, function(x)x$DAG)
score<-unlist(lapply(MCMCmult$maxtrace, function(x)x$score))
res<-Reduce(rbind, lapply(mapdags, compareDAGs, truedag, cpdag=cpdag))
res<-cbind(res,score)
rownames(res)<-c(1:nrow(res))
attr(res,"class")<-"itercomp"
return(res)
}
#'Performance assessment of sampling algorithms against a known Bayesian network
#'
#'This function compute 8 different metrics of structure fit of an object of classes \code{orderMCMC} and \code{partitionMCMC} to the ground truth DAG (or CPDAG). First posterior probabilities
#'of single edges are calculated based on a sample stores in the object of class \code{orderMCMC} or \code{partitionMCMC}. This function computes structure fit of
#'each of the consensus graphs to the ground truth one based on a defined range of posterior thresholds. Computed metrics include: TP, FP, TPR, FPR, FPRn, FDR, SHD. See metrics description in
#'see also \code{\link{compareDAGs}}.
#'
#'@param MCMCchain an object of class \code{partitionMCMC} or \code{orderMCMC}, representing the output of structure sampling function \code{\link{partitionMCMC}} or \code{\link{orderMCMC}} (the latter when parameter \code{chainout}=TRUE;
#'@param truedag ground truth DAG which generated the data used in the search procedure; represented by an object of class \code{\link[graph]{graphNEL}}
#'@param p a vector of numeric values between 0 and 1, defining posterior probabilities according to which the edges of assessed structures are drawn, please note very low barriers can lead to very dense structures; by default
#'\eqn{p=c(0.99, 0.95, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2)}
#'@param pdag logical, if TRUE (default) all DAGs in the MCMCchain are first converted to equivalence class (CPDAG) before the averaging
#'@param burnin number between \code{0} and \code{1}, indicates the percentage of the samples which will be the discarded as `burn-in' of the MCMC chain; the rest of the samples will be used to calculate the posterior probabilities; 0.2 by default
#'@param trans logical, for DBNs indicates if model comparions are performed for transition structure; when \code{trans} equals FALSE the comparison is performed for initial structures of estimated models and the ground truth DBN; for usual BNs the parameter is disregarded
#'@return an object if class \code{samplesim}, a matrix with the number of rows equal to the number of elements in 'p', and 8 columns reporting for
#'the consensus graphss (corresponfing to each of the values in 'p') the number of true positive edges ('TP'), the number of false positive edges ('FP'), the number of false negative edges ('FN'),
#'the true positive rate ('TPR'), the structural Hamming distance ('SHD'), false positive rate ('FPR'),
#'false discovery rate ('FDR') and false positive rate normalized by TP+FN ('FPRn').
#' @examples
#' gsim.score<-scoreparameters("bge", gsim)
#' \dontrun{
#' MAPestimate<-learnBN(gsim.score,"orderIter",scoreout=TRUE)
#' ordersample<-sampleBN(gsim.score, "order", scoretable=getSpace(MAPestimate))
#' samplecomp(ordersample, gsimmat)
#' }
#'@author Polina Suter
#'@export
samplecomp<-function(MCMCchain, truedag, p=c(0.99,0.95,0.9,0.8,0.7,0.6,0.5,0.4,0.3,0.2),
pdag=TRUE, burnin=0.2, trans=TRUE) {
if(is(MCMCchain,"orderMCMC") | is(MCMCchain,"partitionMCMC")) {
if(is.null(MCMCchain$traceadd)) {
stop("Model averaging is not possible! No DAG trace was saved. Try chainout=TRUE.")
}
} else if(is(MCMCchain,"iterativeMCMC")){
if(is.null(MCMCchain$traceadd)) {
stop("Model averaging is not possible! No DAG trace was saved. Try chainout=TRUE.")
} else {
ln<-length(MCMCchain$traceadd[[1]])
MCMCchain$traceadd[[1]]<-c(MCMCchain$traceadd[[1]][[ln-1]],MCMCchain$traceadd[[1]][[ln]])
}
} else {
stop("MCMCchain must be an object of classes 'orderMCMC','partitionMCMC' or 'iterativeMCMC'!")
}
if(MCMCchain$info$sampletype=="MAP"){
warning("The algorithm was called for MAP search. This option is not recommended for model averaging! \n
Use sampleBN() or parameter MAP=FALSE in constructor functions.")
}
if(is.matrix(truedag)) truedag<-m2graph(truedag)
MCMCmatlist<-MCMCchain$traceadd$incidence
n<-nrow(MCMCmatlist[[1]])
truecp<-pcalg::dag2cpdag(truedag)
if(MCMCchain$info$DBN) {
pdag<-FALSE
if(trans==TRUE) {
cat("comparison is performed for transition structures \n")
trueadj<-DBNcut(graph2m(truedag),MCMCchain$info$nsmall,MCMCchain$info$bgn)
truedag<-m2graph(trueadj)
truecpadj<-graph2m(truecp)
truecpadj<-DBNcut(truecpadj,MCMCchain$info$nsmall,MCMCchain$info$bgn)
truecp<-m2graph(truecpadj)
} else {
cat("comparison is performed for initial structures \n")
trueadj<-DBNinit(graph2m(truedag),MCMCchain$info$nsmall,MCMCchain$info$bgn)
truedag<-m2graph(trueadj)
truecpadj<-DBNinit(graph2m(truecp),MCMCchain$info$nsmall,MCMCchain$info$bgn)
truecp<-m2graph(truecpadj)
n<-MCMCchain$info$nsmall+MCMCchain$info$bgn
}
}
endstep<-length(MCMCmatlist)
startstep<-max(as.integer(burnin*endstep),1)
if(pdag) {
dags<-lapply(MCMCmatlist[startstep:endstep],dagadj2cpadj) #first convert every DAG in the sample to equivalence class
} else {dags<-MCMCmatlist[startstep:endstep]}
if(MCMCchain$info$DBN){
if(trans) {
dags<-lapply(dags,DBNcut,dyn=MCMCchain$info$nsmall,b=MCMCchain$info$bgn)
} else {
dags<-lapply(dags,DBNinit,dyn=MCMCchain$info$nsmall,b=MCMCchain$info$bgn)
}
}
postprobmat<-as.matrix(Reduce('+', dags))/(endstep-startstep+1)
if(length(p)==1) {
mlist<-matrix(0, nrow=n,ncol=n)
mlist[which(postprobmat>p)]<-1
res<-compareDAGs(mlist,truedag, cpdag=pdag)
res<-c(res,p)
names(res)[length(res)]<-"p"
} else {
mlist<-list()
i<-1
for (py in 1:length(p)) {
mlist[[i]]<-matrix(0, nrow=n,ncol=n)
mlist[[i]][which(postprobmat>p[py])]<-1
i<-i+1
}
res<-lapply(mlist,compareDAGs,truedag, cpdag=pdag)
res<-Reduce(rbind,res)
res<-cbind(res,p)
rownames(res)<-c(1:nrow(res))
}
attr(res,"class")<-"samplecomp"
return(res)
}
modelpcore<-function(MCMCchain, p, pdag=FALSE, burnin=0.2, DBN=FALSE, nsmall=0, dyn=0, b=0) {
varlabels<-colnames(MCMCchain[[1]])
n<-nrow(MCMCchain[[1]])
incidence<-matrix(rep(0, n*n), nrow=n, ncol=n)
endstep<-length(MCMCchain)
startstep<-max(as.integer(burnin*endstep),1)
if (pdag) {
cpdags<-lapply(MCMCchain[startstep:endstep],dagadj2cpadj)
incidence[which(as.matrix(Reduce('+', cpdags)/(endstep-startstep+1))>p)]<-1
} else {
incidence[which(as.matrix(Reduce('+', MCMCchain[startstep:endstep])/(endstep-startstep+1))>p)]<-1
}
colnames(incidence)<-varlabels
rownames(incidence)<-varlabels
if(DBN) {
incidence<-DBNcut(incidence,dyn=dyn,b=b)
incidence.init<-DBNinit(incidence,dyn=dyn,b=b)
incidence[1:(dyn+b),1:(dyn+b)]<-incidence.init
}
return(incidence)
}
|
/scratch/gouwar.j/cran-all/cranData/BiDAG/R/performanceassess.R
|
#PLOT methods for classes:
#orderMCMC
#partitionMCMC
#iterativeMCMC
#itercomp
#samplecomp
#' Plotting object of class 'iterativeMCMC'
#'
#' @param x object of class 'iterativeMCMC'
#' @param ... ignore
#' @param main name of the graph; "iterative MCMC, DAG scores" by default
#' @param xlab name of x-axis; "MCMC step"
#' @param ylab name of y-axis; "DAG logscore"
#' @param type type of line in the plot; "l" by default
#' @param col colour of line in the plot; "blue" by default
#'
#' @rdname iterativeMCMC
#' @method plot iterativeMCMC
#' @export
plot.iterativeMCMC <-function(x,...,main="iterative MCMC, DAG scores", xlab="MCMC step", ylab="DAG logscore", type="l", col="blue"){
old.par<-par(no.readonly = TRUE)
on.exit(par(old.par))
x<-x$trace
nchains<-length(x)
nsteps<-length(x[[1]])
scorevec<-Reduce('c',x)
scoremax<-max(scorevec)
scoremin<-min(scorevec)
# Add extra space to right of plot area; change clipping to figure
par(mar=c(5.1, 4.1, 4.1, 8.1), xpd=TRUE)
plot(scorevec,type=type,xlab=xlab,ylab=ylab, ...,
ylim=c(scoremin,scoremax),main=main,
col=col)
legend(length(scorevec)*1.04, scoremin+(scoremax-scoremin)*0.2,col="grey", lty=2,angle=90,
legend=c("search \nspace \nexpansion"),bty="n")
par(xpd=FALSE)
for(i in 1:(nchains-1)) {
abline(v=i*nsteps,col="grey",lty=2,ylim=c(scoremin,scoremax))
}
par(xpd=TRUE)
par(old.par)
}
#' Plotting object of class 'orderMCMC'
#'
#' @param x object of class 'orderMCMC'
#' @param ... other parameters to be passed through to plotting functions
#' @param burnin number between \code{0} and \code{1}, indicates the percentage of the samples which will be discarded as `burn-in' of the MCMC chain; the rest of the samples will be used to calculate the posterior probabilities; 0.2 by default
#' @param main name of the graph; "DAG logscores" by default
#' @param xlab name of x-axis; "iteration"
#' @param ylab name of y-axis; "logscore"
#' @param type type of line in the plot; "l" by default
#' @param col colour of line in the plot; "#0c2c84" by default
#'
#' @rdname orderMCMC
#' @method plot orderMCMC
#' @export
plot.orderMCMC <-function(x, ..., burnin = 0.2, main="DAG logscores", xlab="iteration", ylab="logscore", type="l", col="#0c2c84"){
old.par<-par(no.readonly = TRUE)
on.exit(par(old.par))
scorevec<-x$trace
vecl<-length(scorevec)
burnin<-ceiling(vecl*burnin)
score20<-min(scorevec[burnin:vecl])
scoremax<-max(scorevec)
scoremin<-min(scorevec)
par(mfrow=c(1,2))
par(mar = c(2, 2, 2, 2)) # Set the margin on all sides to 2
plot(scorevec,col=col, xlab=xlab, ylab=ylab, type=type, ...,
ylim=c(scoremin,scoremax+(scoremax-scoremin)*0.02), main=main)
par(mar = c(2, 2, 2, 2)) # Set the margin on all sides to 2
plot(x=c(burnin:vecl),y=scorevec[burnin:vecl], type=type, col=col, xlab=xlab, ylab=ylab,
ylim=c(score20,scoremax), main="excluding burn-in")
}
#' Plotting object of class 'partitionMCMC'
#'
#' @param x object of class 'partitionMCMC'
#' @param ... other parameters to be passed through to plotting functions
#' @param burnin number between \code{0} and \code{1}, indicates the percentage of the samples which will be discarded as `burn-in' of the MCMC chain; the rest of the samples will be used to calculate the posterior probabilities; 0.2 by default
#' @param main name of the graph; "DAG logscores" by default
#' @param xlab name of x-axis; "iteration"
#' @param ylab name of y-axis; "logscore"
#' @param type type of line in the plot; "l" by default
#' @param col colour of line in the plot; "#0c2c84" by default
#'
#' @rdname partitionMCMC
#' @method plot partitionMCMC
#' @export
plot.partitionMCMC <-function(x, ..., burnin = 0.2, main="DAG logscores", xlab="iteration", ylab="logscore", type="l", col="#0c2c84"){
old.par<-par(no.readonly = TRUE)
on.exit(par(old.par))
scorevec<-x$trace
vecl<-length(scorevec)
burnin<-ceiling(vecl*burnin)
score20<-min(scorevec[burnin:vecl])
scoremax<-max(scorevec)
scoremin<-min(scorevec)
par(mfrow=c(1,2))
par(mar = c(2, 2, 2, 2)) # Set the margin on all sides to 2
plot(scorevec,col=col, xlab=xlab, ylab=ylab, type=type, ...,
ylim=c(scoremin,scoremax+(scoremax-scoremin)*0.02), main=main)
par(mar = c(2, 2, 2, 2)) # Set the margin on all sides to 2
plot(x=c(burnin:vecl),y=scorevec[burnin:vecl], type=type, col=col, xlab=xlab, ylab=ylab,
ylim=c(score20,scoremax), main="excluding burn-in")
}
#' Plotting object of class 'itercomp'
#'
#' @param x object of class 'itercomp'
#' @param ... other parameters to be passed through to plotting functions
#' @param vars a tuple of variables which will be used for 'x' and 'y' axes; possible values: "SHD", "TP", "FP", "TPR", "FPR", "FPRn", "FDR", "score"
#' @param type type of line in the plot;"b" by default
#' @param col colour of line in the plot; "blue" by default
#' @param showit (optional) vector of integers specifying indices of search expansion iterations to be labelled; by default no iterations are labelled
#' @rdname itercomp
#' @method plot itercomp
#' @export
plot.itercomp <-function(x, ..., vars = c("FP", "TP"), type="b", col="blue", showit=c()){
old.par<-par(no.readonly = TRUE)
on.exit(par(old.par))
nit<-nrow(x)
if(nit>1) {
scales<-vector()
for(i in colnames(x)) {
localrange<-max(x[,i])-min(x[,i])
scales[i]<-localrange/nit*0.15
}
plot(x[,vars[1]], x[,vars[2]], type=type, col=col, xlab=vars[1],ylab=vars[2], ...)
if(length(showit)>0) {
for(i in showit) {
text(x[i,vars[1]]+scales[vars[1]], x[i,vars[2]]-0.5*scales[vars[2]], i)
}
}
}
}
#' Plotting object of class 'samplecomp'
#'
#' @param x object of class 'samplecomp'
#' @param ... other parameters to be passed through to plotting functions
#' @param vars a tuple of variables which will be used for 'x' and 'y' axes; possible values: "SHD", "TP", "FP", "TPR", "FPR", "FPRn", "FDR"
#' @param type type of line in the plot; "b" by default
#' @param col colour of line in the plotl; "blue" by default
#' @param showp logical, defines if points are labelled with the posterior threshold corresponding to the assessed model
#'
#' @rdname samplecomp
#' @method plot samplecomp
#' @export
plot.samplecomp <-function(x, ..., vars = c("FP", "TP"), type="b", col="blue", showp=NULL){
old.par<-par(no.readonly = TRUE)
on.exit(par(old.par))
nit<-nrow(x)
if(is.null(nit)) {
message("plotting structure fit for the only threshold")
plot(x[vars[1]], x[vars[2]], type="p", col=col, xlab=vars[1],ylab=vars[2], ...)
} else if(nit>1) {
scales<-vector()
lims<-matrix(ncol=2,nrow=2)
k<-1
for(i in vars) {
localrange<-max(x[,i])-min(x[,i])
scales[i]<-localrange/nit*0.15
lims[k,]<-c(min(x[,i])-localrange*0.15,max(x[,i])+localrange*0.15)
k<-k+1
}
plot(x[,vars[1]], x[,vars[2]], type=type, col=col, xlab=vars[1],ylab=vars[2], ...,
xlim=lims[1,],ylim=lims[2,])
if(showp) {
for(i in 1:nit) {
text(x[i,vars[1]]+1.3*scales[vars[1]], x[i,vars[2]]-scales[vars[2]], x[i,"p"])
}
}
}
}
|
/scratch/gouwar.j/cran-all/cranData/BiDAG/R/plotS3.R
|
#'Plotting posterior probabilities of single edges
#'
#'This function plots posterior probabilities of all possible edges in the graph as a function of MCMC iterations. It can be used for convergence diagnostics of MCMC
#'sampling algorithms order MCMC and partition MCMC.
#' @param MCMCtrace an object of class MCMCres
#' @param cutoff number representing a threshold of posterior probability below which lines will not be plotted
#' @param pdag logical, when true DAGs in a sample will be first coverted to CPDAGs
#' @param onlyedges (optional) binary matrix, only edges corresponding to entries which equal 1 will be plotted
#' @param highlight (optional) binary matrix, edges corresponding to entries which equal 1 are highlighted with "red"
#' @param ... (optional) parameters passed to the plot function
#'
#'@return plots posterior probabilities of edges in the graph as a function of MCMC iterations
#'@examples
#'score100<-scoreparameters("bde", Asia[1:100,])
#'orderfit100<-orderMCMC(score100,plus1=TRUE,chainout=TRUE)
#'\dontrun{
#'score5000<-scoreparameters("bde", Asia)
#'orderfit5000<-orderMCMC(score5000,plus1=TRUE,chainout=TRUE)
#'plotpedges(orderfit100, pdag=TRUE)
#'plotpedges(orderfit5000, pdag=TRUE)
#'}
#'@author Polina Suter
#'@export
plotpedges<-function(MCMCtrace,cutoff=0.2,pdag=FALSE,onlyedges=NULL,highlight=NULL, ...) {
old.par<-par(no.readonly = TRUE)
on.exit(par(old.par))
MCMCtrace<-MCMCtrace$traceadd$incidence
if(is.null(MCMCtrace)) {
stop("no saved MCMC steps found! try chainout=TRUE when sampling")
}
#cols5<-c("#f2f0f7","#b3cde3","#8c96c6","#8856a7","#810f7c","#41ae76")
cols5<-c("#8c96c6","#8c96c6","#8c96c6","#8c96c6","#8c96c6","#41ae76")
lchain<-length(MCMCtrace)
if(pdag==TRUE) {
MCMCtrace<-lapply(MCMCtrace,dagadj2cpadj)
}
countmatrix<-MCMCtrace[[1]]
posteriors<-list()
counter<-1
posteriors[[1]]<-countmatrix/counter
for (i in 2:lchain) {
countmatrix<-as.matrix(countmatrix+MCMCtrace[[i]])
counter<-counter+1
posteriors[[i]]<-countmatrix/counter
}
if(!is.null(onlyedges)) {
cutoffelems<-which(onlyedges==1)
} else {
cutoffelems<-which(posteriors[[lchain]]>cutoff)
}
numelem<-length(cutoffelems)
if(is.null(highlight)) {
redi<-c()
} else {
highlight<-as.matrix(highlight)
redi<-which(highlight==1)
}
postvec<-list()
colvec<-vector()
k<-1
for(i in cutoffelems){
postvec[[k]]<-lapply(posteriors,function(x)x[i])
if(i%in%redi) {
colvec[k]<-6
} else {
colvec[k]<-defcolrange(posteriors[[lchain]][i])
}
k<-k+1
}
colvec<-as.integer(colvec)
plot(x=c(1:lchain),y=postvec[[1]],type="l",
col=cols5[colvec[1]],xlab="MCMC step",ylab="estimated posterior",
ylim=c(0,1), ...) #posterior probabilities of single edges
for(i in 2:numelem){
lines(x=c(1:lchain),postvec[[i]],type="l",col=cols5[colvec[i]])
}
}
#'Comparing posterior probabilitites of single edges
#'
#'This function can be used to compare posterior probabilities of edges in a graph
#'
#'
#'@param pmat a list of square matrices, representing posterior probabilities of single edges in a Bayesian network; see \code{\link{edgep}} for obtaining such a matrix from a single MCMC run
#'@param highlight numeric, defines maximum acceptable difference between posterior probabilities of an edge in two samples; points corresponding to higher differences are highlighted in red
#'@param printedges when TRUE the function also returns squared correlation and RMSE of posterior probabilities higher than the value defined
#'by the argument 'cut' as well as the list of all edges whose posterior probabilities in the first two matrices differ more than 'highlight'; FALSE by default
#'@param cut numeric value corresponding to a minimum posterior probabilitity which is included into calculation of squared correlation and MSE when 'printedges' equals TRUE
#'@param ... prameters passed further to the \code{plot} function (e.g. \code{xlab}, \code{ylab}, \code{main}) in case when the length of \code{pmat} equals 2
#'@return plots concordance of posterior probabilitites of single edges based on several matrices (minimum 2 matrices); highlights the edges whose posterior probabilities in a pair of matrices differ by more than 'highlight';
#'when 'printedges' set to TRUE, the function returns also squared correlation and RMSE of posterior probabilities
#'higher than the value defined by the argument 'cut' as well as the list of all edges whose posterior probabilities in the first two matrices differ by more than 'highlight'.
#'@examples
#'Asiascore<-scoreparameters("bde", Asia)
#'\dontrun{
#'orderfit<-list()
#'orderfit[[1]]<-sampleBN(Asiascore,algorithm="order")
#'orderfit[[2]]<-sampleBN(Asiascore,algorithm="order")
#'orderfit[[3]]<-sampleBN(Asiascore,algorithm="order")
#'pedges<-lapply(orderfit,edgep,pdag=TRUE)
#'plotpcor(pedges, xlab="run1", ylab="run2",printedges=TRUE)
#'}
#'@author Polina Suter
#'@export
plotpcor<-function(pmat,highlight=0.3,printedges=FALSE,cut=0.05, ...) {
old.par<-par(no.readonly = TRUE)
on.exit(par(old.par))
nruns<-length(pmat)
if(nruns<2) stop("the number of matrices in the list must be at least two!")
varnames<-colnames(pmat[[1]])
if(nruns>2) {
vecy<-list()
for(i in 1:nruns) {
vecy[[i]]<-as.vector(pmat[[i]])
}
if(printedges) diffedges<-pcorcore(vecy[[1]],vecy[[2]],pmat[[1]],pmat[[2]],highlight,1,2,printedges=TRUE)
par(mfrow=c(nruns,nruns))
for(i in 1:nruns) {
for(j in 1:nruns) {
pcorcore(vecy[[i]],vecy[[j]],pmat[[i]],pmat[[j]],highlight,i,j,name=names(pmat[i]))
}
}
par(old.par)
if(printedges) return(pcorvals(vecy[[1]], vecy[[2]], diffedges, cut, varnames))
} else {
vec1<-as.vector(pmat[[1]])
vec2<-as.vector(pmat[[2]])
if(highlight<1){
diffmat<-abs(pmat[[1]]-pmat[[2]])
pointstohighlight<-which(diffmat>highlight)
diffedges<-which(diffmat>highlight,arr.ind=TRUE)
if(length(pointstohighlight)>0) {
vec1high<-vec1[pointstohighlight]
vec2high<-vec2[pointstohighlight]
vec1<-vec1[-pointstohighlight]
vec2<-vec2[-pointstohighlight]
} else {
highlight<-1
}
}
plot(x=seq(0,1,by=0.1),y=seq(0,1,by=0.1),type="l",col="blue",lty=2,
xlim=c(0,1),ylim=c(0,1), ...)
lines(vec1,vec2,type="p",col="grey")
if(highlight<1) lines(vec1high,vec2high,type="p",col="red")
if (printedges) {
return(pcorvals(vec1, vec2, diffedges, cut, varnames))
}
}
}
pcorcore<-function(vec1,vec2,mat1,mat2,highlight,i,j,printedges=FALSE,name=NULL) {
if(printedges) {
diffmat<-abs(mat1-mat2)
return(which(diffmat>highlight,arr.ind=TRUE)) } else {
if(highlight<1){
diffmat<-abs(mat1-mat2)
pointstohighlight<-which(diffmat>highlight)
if(length(pointstohighlight)>0) {
vec1high<-vec1[pointstohighlight]
vec2high<-vec2[pointstohighlight]
vec1<-vec1[-pointstohighlight]
vec2<-vec2[-pointstohighlight]
} else {
highlight<-1
}
}
}
par(mar = c(1, 1, 1, 1))
if(i == j) {
plot(x=1,xlim=c(0,1),ylim=c(0,1),type = "n")
if(is.null(name)) text(0.5,0.5,i,cex=3) else text(0.5,0.5,name, cex=2)
} else {
plot(x=seq(0,1,by=0.1),y=seq(0,1,by=0.1),type="l",col="blue",lty=2,
xlab="",ylab="",xlim=c(0,1),ylim=c(0,1))
lines(vec1,vec2,type="p",col="grey")
if(highlight<1) lines(vec1high,vec2high,type="p",col="red")
}
}
pcorvals<-function(vec1, vec2, diffedges, cut, varnames) {
rsqindex <- intersect(which(vec1 < cut), which(vec2 < cut))
Rsq <- cor(vec1[-rsqindex], vec2[-rsqindex]) ^ 2
res <- list()
res$MSE <- sum((vec1[-rsqindex] - vec2[-rsqindex]) ^ 2) / length(vec1[-rsqindex])
res$R2 <- Rsq
if (length(diffedges) > 0) {
res$diffedges <- matrix(varnames[diffedges], ncol = 2, nrow = nrow(diffedges))
colnames(res$diffedges) <- c("from", "to")
}
return(res)
}
|
/scratch/gouwar.j/cran-all/cranData/BiDAG/R/plotfns.R
|
#' Plotting a DBN
#'
#' This function can be used for plotting initial and transition structures of a dynamic Bayesian network.
#'
#'@param DBN binary matrix (or a graph object) representing a 2-step DBN (compact or unrolled)
#'@param struct option used to determine if the initial or the transition structure should be plotted; acceptable values are init or trans
#'@param b number of static variables in the DBN, 0 by default; note that for function to work correctly all static variables have to be in the first b columns of the matrix
#'@param shape string, defining the shape of the box around each node; possible values are circle, ellipse, box
#'@param ... optional parameters passed to \code{Rgraphviz} plotting functions e.g. \code{main}, \code{fontsize}
#'@return plots the DBN defined by the adjacency matrix 'DBN' and number of static and dynamic variables. When 'struct' equals "trans" the transition structure is plotted,
#'otherwise initial structure is plotted
#'@examples
#'plotDBN(DBNmat, "init", b=3)
#'plotDBN(DBNmat, "trans", b=3)
#'
#' @author Polina Suter
#' @export
plotDBN<-function(DBN,struct=c("init","trans"),b=0,shape="circle",...){
dyn<-(ncol(DBN)-b)/2
old.par<-par(no.readonly = TRUE)
oldgraphpar<-graph.par()
on.exit(par(old.par))
on.exit(graph.par(oldgraphpar),add=TRUE)
a<-d<-1.2
c<-12
if(is.matrix(DBN)) {
DBN<-DBN
} else if (is(DBN,"graphNEL")) {
DBN<-graph2m(DBN)
} else {
DBN<-as.matrix(DBN)
}
nodelabs<-colnames(DBN)
statcol<-"lightgrey"
dyn1col<-"#f7f4f9"
dyn2col<-"#d4b9da"
if(struct=="init") {
shapevec <- rep(shape, dyn+b)
nodelabs<-nodelabs[1:(dyn+b)]
if(b>0){
legadj<-matrix(0,nrow=2,ncol=2)
colnames(legadj)<-c("stat","1")
legadj[1,2]<-1
legendG<-m2graph(legadj)
staticnames<-nodelabs[1:b]
legcol<-c(statcol,dyn1col)
} else {
staticnames<-c()
}
dynamicnames<-nodelabs[1:dyn+b]
adj<-DBN[1:(dyn+b),1:(dyn+b)]
arcslist<-adjacency2edgel(adj,nodes=nodelabs)
graph.obj = new("graphNEL", nodes = nodelabs, edgeL = arcslist,
edgemode = 'directed')
subGList<-list()
sg<-list()
graph.par(list(nodes=list(col=dyn1col, lty="solid", lwd=1, ...),graph=list(...,cex.main=1.5)))
if(b!=0) {
sg1 = subGraph(dynamicnames, graph.obj)
sg2 = subGraph(staticnames, graph.obj)
sgL = list(list(graph=sg1, cluster = TRUE),
list(graph=sg2, cluster = TRUE))
graph.obj <- Rgraphviz::layoutGraph(graph.obj, subGList= sgL,nodeAttrs = list(shape = shapevec))
graph::nodeRenderInfo(graph.obj)[["fill"]][staticnames] = statcol
graph::nodeRenderInfo(graph.obj)[["fill"]][dynamicnames] = dyn1col
graph::nodeRenderInfo(graph.obj)[["shape"]][c(staticnames,dynamicnames)] = shape
} else {
graph.obj <- Rgraphviz::layoutGraph(graph.obj,nodeAttrs = list(shape = shapevec))
}
if(b>0) {
layout(matrix(c(1,1,1,1,1,3,3,
1,1,1,1,1,2,2,
1,1,1,1,1,3,3), nrow = 3, ncol = 7, byrow = TRUE))
legendG <- Rgraphviz::layoutGraph(legendG,nodeAttrs = list(shape = shapevec))
graph::nodeRenderInfo(legendG)[["fill"]]["stat"] = statcol
graph::edgeRenderInfo(legendG)[["lwd"]]["stat~1"] = 0
graph::nodeRenderInfo(legendG)[["shape"]][c("stat","1")] = shape
Rgraphviz::renderGraph(graph.obj,nodeAttrs = list(shape = shapevec))
graph.par(list(graph=list(main="nodes(t):")))
Rgraphviz::renderGraph(legendG,nodeAttrs = list(shape = shapevec))
} else {
graph.par(list(nodes=list(col=dyn1col, lty="solid", lwd=1, ...)))
Rgraphviz::renderGraph(graph.obj,nodeAttrs = list(shape = shape))
}
}
if(struct=="trans") {
shapevec = rep(shape, 2*dyn+b) #added
if(b>0) {
legadj<-matrix(0,nrow=3,ncol=3)
legadj[1,2]<-1
legadj[2,3]<-1
colnames(legadj)<-c("stat","i","i+1")
legendG<-m2graph(legadj)
legcol<-c(statcol,dyn1col,dyn2col)
names(legcol)<-c("stat","i","i+1")
} else {
legadj<-matrix(0,nrow=2,ncol=2)
legadj[1,2]<-1
colnames(legadj)<-c("i","i+1")
legendG<-m2graph(legadj)
staticnames<-c()
}
adjt<-DBNcut(DBN[1:(b+2*dyn),1:(b+2*dyn)],dyn,b)
graph.obj<-m2graph(adjt)
dyn1names<-nodelabs[1:dyn+b]
dyn2names<-nodelabs[1:dyn+b+dyn]
sgDyn1 = subGraph(dyn1names, graph.obj)
sgDyn2 = subGraph(dyn2names, graph.obj)
if(b>0) {
staticnames<-nodelabs[1:b]
sgStat = subGraph(staticnames, graph.obj)
sgL = list(list(graph=sgStat, cluster = TRUE),
list(graph=sgDyn1, cluster = TRUE),
list(graph=sgDyn2, cluster = TRUE))
} else {
sgL = list(list(graph=sgDyn1, cluster = TRUE),
list(graph=sgDyn2, cluster = TRUE))
}
graph.par(list(nodes=list(col=dyn1col, lty="solid", lwd=1, ...),graph=list(...)))
graph.obj <- Rgraphviz::layoutGraph(graph.obj, subGList= sgL,nodeAttrs = list(shape = shape))
if(b>0) graph::nodeRenderInfo(graph.obj)[["fill"]][staticnames] = statcol
graph::nodeRenderInfo(graph.obj)[["fill"]][dyn1names] = dyn1col
graph::nodeRenderInfo(graph.obj)[["fill"]][dyn2names] = dyn2col
graph::nodeRenderInfo(graph.obj)[["shape"]][c(staticnames,dyn1names,dyn2names)]<-shape
layout(matrix(c(1,1,1,1,1,3,3,
1,1,1,1,1,2,2,
1,1,1,1,1,3,3), nrow = 3, ncol = 7, byrow = TRUE))
Rgraphviz::renderGraph(graph.obj,nodeAttrs = list(shape = shape))
legendG <- Rgraphviz::layoutGraph(legendG)
if(b>0) {
graph::nodeRenderInfo(legendG)[["shape"]][c("stat","i","i+1")] <- shape
graph::nodeRenderInfo(legendG)[["fill"]]["stat"] = statcol
graph::nodeRenderInfo(legendG)[["fill"]]["i"] = dyn1col
graph::nodeRenderInfo(legendG)[["fill"]]["i+1"] = dyn2col
graph::edgeRenderInfo(legendG)[["lwd"]]["stat~i"] = 0
graph::edgeRenderInfo(legendG)[["lwd"]]["i~i+1"] = 0
} else {
graph::nodeRenderInfo(legendG)[["shape"]][c("i","i+1")] <- shape
graph::nodeRenderInfo(legendG)[["fill"]]["i"] = dyn1col
graph::nodeRenderInfo(legendG)[["fill"]]["i+1"] = dyn2col
graph::edgeRenderInfo(legendG)[["lwd"]]["i~i+1"] = 0
}
#plot graph
graph.par(list(graph=list(main="nodes(t):",cex.main=1.8),nodes=list(fontsize=16)))
#plot legend
Rgraphviz::renderGraph(legendG,nodeAttrs = list(shape = shapevec))
}
}
assigncolor<-function(nit,ncol) {
colind<-1:(ncol-1)
ncolsmall<-ncol-1
nitsmall<-nit-2
colvec<-vector()
if (nit<=ncol+1) {
if(nit<3){
return(rep(ncol,nit))
} else {
return(c(tail(colind,nitsmall),ncol,ncol))
}
} else {
rmndr <- nitsmall %% ncolsmall
div<-nitsmall %/% ncolsmall
if(rmndr==0) firstadd<-ncol else firstadd<-tail(colind,rmndr)[1]
for(i in 1:ncolsmall) {
if(i>=firstadd) {
colvec<-c(colvec,rep(colind[i],div+1))
} else {
colvec<-c(colvec,rep(colind[i],div))
}
}
}
return(c(colvec,ncol,ncol))
}
#' Plotting difference between two DBNs
#'
#' This function plots an estimated DBN such that the edges which are different to the ground truth DBN are highlighted.
#'
#'@param eDBN object of class graphNEL (or its adjacency matrix), representing estimated structure (not necessarily acyclic) to be compared to the ground truth graph
#'@param trueDBN object of class graphNEL (or its adjacency matrix), representing the ground truth structure (not necessarily acyclic)
#'@param struct option used to determine if the initial or the transition structure should be plotted; accaptable values are init or trans
#'@param b number of static variables in one time slice of a DBN; note that for function to work correctly all static variables have to be in the first b columns of the matrix
#'@param showcl logical, when TRUE (default) nodes are shown in clusters according to the time slice the belong to
#'@param orientation orientation of the graph layout, possible options are 'TB' (top-bottom) and 'LR' (left-right)
#'@param ... optional parameters passed to \code{Rgraphviz} plotting functions e.g. \code{main}, \code{fontsize}
#'@return plots the graph highlights differences between 'eDBN' (estimated DBN) and 'trueDBN' (ground truth); edges which are different in 'eDBN' compared to 'trueDBN' are coloured according to the type of a difference: false-positive, false-negative and error in direction.
#'@examples
#'dbnscore<-scoreparameters("bge",DBNdata,
#'dbnpar = list(samestruct=TRUE, slices=5, b=3),
#'DBN=TRUE)
#'\dontrun{
#'orderDBNfit<-learnBN(dbnscore,algorithm="order")
#'iterDBNfit<-learnBN(dbnscore,algorithm="orderIter")
#'plotdiffsDBN(getDAG(orderDBNfit),DBNmat,struct="trans",b=3)
#'plotdiffsDBN(getDAG(iterDBNfit),DBNmat,struct="trans",b=3)
#'}
#'@export
#'@author Polina Suter
plotdiffsDBN<-function(eDBN,trueDBN,struct=c("init","trans"),b=0, showcl=TRUE, orientation="TB",...) {
old.par<-par(no.readonly = TRUE)
oldgraphpar<-graph.par()
on.exit(par(old.par))
on.exit(graph.par(oldgraphpar),add=TRUE)
shape<-"circle"
a<-d<-1.2
c<-12
if(is.matrix(eDBN)) {
adj<-eDBN
} else if (is(eDBN,"graphNEL")) {
adj<-graph2m(eDBN)
} else {
adj<-as.matrix(eDBN)
}
dyn<-(ncol(adj)-b)/2
if(!is.matrix(trueDBN)) {
if (is(trueDBN,"graphNEL")) {
adjt<-graph2m(trueDBN)
} else {
adjt<-as.matrix(trueDBN)
}
} else {
adjt<-trueDBN
}
nodelabs<-colnames(adj)
statcol<-"lightgrey"
dyn1col<-"#f7f4f9"
dyn2col<-"#d4b9da"
if(struct=="init") {
n<-b+dyn
nodelabs<-nodelabs[1:(dyn+b)]
if(b>0){
legadj<-matrix(0,nrow=2,ncol=2)
colnames(legadj)<-c("stat","1")
legadj[1,2]<-1
legendG<-m2graph(legadj)
staticnames<-nodelabs[1:b]
legcol<-c(statcol,dyn1col)
colvector = c(rep(statcol,b),rep(dyn1col,dyn))
names(colvector)<-nodelabs
} else {
legcol<-c(dyn1col)
colvector = c(rep(dyn1col,dyn))
names(colvector)<-nodelabs
}
dynamicnames<-nodelabs[1:dyn+b]
adj<-adj[1:(dyn+b),1:(dyn+b)]
adjt<-adjt[1:(dyn+b),1:(dyn+b)]
jointmat<-1*(adj|adjt)
#define edges with wrong directions
FPlist<-NULL
FNlist<-NULL
EDlist<-NULL
MissDlist<-NULL #missing directions
ExtrDlist<-NULL #extra directions
BiFP<-NULL
BiFN<-NULL
comedges<-0
diffmat<-matrix(0,nrow=nrow(jointmat),ncol=ncol(jointmat))
for(i in 1:n) {
for(j in 1:n) {
bi1<-adj[i,j]+adj[j,i]
bi2<-adjt[i,j]+adjt[j,i]
if(bi1==2 | bi2==2) {
if(bi1!=bi2) {
if(bi2==2 & bi1==0) { #FN
diffmat[i,j]<-3
diffmat[j,i]<-3
FNlist<-rbind(FNlist,c(nodelabs[i],nodelabs[j]))
FNlist<-rbind(FNlist,c(nodelabs[j],nodelabs[i]))
} else if (bi2==2 & bi1==1) { #ED FN
diffmat[i,j]<-4
diffmat[j,i]<-4
MissDlist<-rbind(MissDlist,c(nodelabs[i],nodelabs[j]))
MissDlist<-rbind(MissDlist,c(nodelabs[j],nodelabs[j]))
} else if (bi2==1 & bi1==2) { #ED FP
diffmat[i,j]<-4
diffmat[j,i]<-4
ExtrDlist<-rbind(ExtrDlist,c(nodelabs[i],nodelabs[j]))
ExtrDlist<-rbind(ExtrDlist,c(nodelabs[j],nodelabs[j]))
} else { #FP
diffmat[i,j]<-2
diffmat[j,i]<-2
FPlist<-rbind(FPlist,c(nodelabs[i],nodelabs[j]))
FPlist<-rbind(FPlist,c(nodelabs[j],nodelabs[i]))
}
} else comedges<-1
} else {
if(adj[i,j]!=adjt[i,j]){
if(adj[j,i]!=adjt[j,i]) {#ED
if(adj[i,j]==1) {
diffmat[i,j]<-4
jointmat[j,i]<-0
EDlist<-rbind(EDlist,c(nodelabs[i],nodelabs[j]))
} else {
diffmat[j,i]<-4
jointmat[i,j]<-0
EDlist<-rbind(EDlist,c(nodelabs[j],nodelabs[i]))
}
} else if (adj[i,j]==1) { #FP
diffmat[i,j]<-2
FPlist<-rbind(FPlist,c(nodelabs[i],nodelabs[j]))
} else {#FN
diffmat[i,j]<-3
FNlist<-rbind(FNlist,c(nodelabs[i],nodelabs[j]))
}
} else if(adj[i,j]==1) comedges<-1
}
}
}
jointarcs<-adjacency2edgel(jointmat,nodes=nodelabs)
graph.par(list(nodes=list(lty="solid", lwd=1, ...), graph=list(...)))
graph.obj = new("graphNEL", nodes = nodelabs, edgeL = jointarcs,
edgemode = 'directed')
if(b!=0) {
sg1 = subGraph(dynamicnames, graph.obj)
sg2 = subGraph(staticnames, graph.obj)
sgL = list(list(graph=sg1, cluster = TRUE),
list(graph=sg2, cluster = TRUE))
colvector<-c(rep(statcol,b),
rep(dyn1col,dyn))
names(colvector)<-nodelabs
} else {
sg1 = subGraph(dynamicnames, graph.obj)
sgL = list(list(graph=sg1, cluster = TRUE))
colvector<-c(rep(dyn1col,dyn))
names(colvector)<-nodelabs
}
if(showcl) {
graph.plot = Rgraphviz::layoutGraph(graph.obj, subGList = sgL)
} else {
graph.plot = Rgraphviz::layoutGraph(graph.obj)
}
if(!is.null(FPlist)) {
FP<-apply(FPlist, 1, paste, collapse = "~")
graph::edgeRenderInfo(graph.plot)[["col"]][FP] = "red"
}
if(!is.null(FNlist)) {
FN<-apply(FNlist, 1, paste, collapse = "~")
graph::edgeRenderInfo(graph.plot)[["col"]][FN] = "grey"
graph::edgeRenderInfo(graph.plot)[["lty"]][FN] = "dashed"
}
if(!is.null(EDlist)) {
ED<-apply(EDlist, 1, paste, collapse = "~")
graph::edgeRenderInfo(graph.plot)[["col"]][ED] = "blue"
}
if(!is.null(BiFP)) {
BiFP<-apply(BiFP, 1, paste, collapse = "~")
graph::edgeRenderInfo(graph.plot)[["col"]][BiFP] = "red"
}
if(!is.null(BiFN)) {
BiFN<-apply(BiFN, 1, paste, collapse = "~")
graph::edgeRenderInfo(graph.plot)[["col"]][BiFN] = "grey"
graph::edgeRenderInfo(graph.plot)[["lty"]][BiFN] = "dashed"
}
if(!is.null(MissDlist)) {
MissD<-apply(MissDlist, 1, paste, collapse = "~")
graph::edgeRenderInfo(graph.plot)[["col"]][MissD] = "blue"
graph::edgeRenderInfo(graph.plot)[["lty"]][MissD] = "solid"
}
if(!is.null(ExtrDlist)) {
ExtrD<-apply(ExtrDlist, 1, paste, collapse = "~")
graph::edgeRenderInfo(graph.plot)[["col"]][ExtrD] = "blue"
graph::edgeRenderInfo(graph.plot)[["lty"]][ExtrD] = "solid"
}
tpname<-"true positive"
fpname<-"false positive"
fnname<-"false negative"
edname<-"difference in direction"
tpcol<-"black"
edcol<-"blue"
fpcol<-"red"
fncol<-"grey"
tplty<-1
fplty<-1
fnlty<-2
edlty<-1
if(is.null(FNlist) & is.null(BiFN)) {
fncol<-NULL
fnlwd<-NULL
fnlty<-NULL
fnname<-NULL
}
if(is.null(FPlist) & is.null(BiFP)) {
fpcol<-NULL
fplwd<-NULL
fplty<-NULL
fpname<-NULL
}
if(is.null(MissDlist) & is.null(ExtrDlist) & is.null(EDlist)) {
edcol<-NULL
edlwd<-NULL
edlty<-NULL
edname<-NULL
}
if(comedges==0) {
tpcol<-NULL
tplwd<-NULL
tplty<-NULL
tpname<-NULL
}
graph::nodeRenderInfo(graph.plot)<-list(fill=colvector,shape="circle",...)
layout(matrix(c(1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,
2,2,2,3,3,3,4,4,4,
2,2,2,3,3,3,4,4,4), nrow = 9, ncol = 9, byrow = TRUE))
if(b>0) {
#graph
Rgraphviz::renderGraph(graph.plot)
#graph legend
graph.par(list(graph=list(main="nodes(t):",cex.main=1.8),...))
legendG <- Rgraphviz::layoutGraph(legendG,attrs = list(graph = list(rankdir = "TB")))
graph::nodeRenderInfo(legendG)[["shape"]][c("stat","1")] <- shape
graph::nodeRenderInfo(legendG)[["fill"]]["stat"] = statcol
graph::nodeRenderInfo(legendG)[["fill"]]["1"] = dyn1col
graph::edgeRenderInfo(legendG)[["lwd"]]["stat~1"] = 0
Rgraphviz::renderGraph(legendG)
} else {
Rgraphviz::renderGraph(graph.plot)
}
par(mar = c(0,0,0,0))
plot(1:10,1:10,type="n", axes = FALSE, xlab = "", ylab = "")
plot(1:10,1:10,type="n", axes = FALSE, xlab = "", ylab = "")
op <- par(cex = 1.7)
legend("topleft",legend=c(tpname,
fpname,
fnname,
edname),lty=c(tplty,fplty,fnlty,edlty),
col=c(tpcol,fpcol,fncol,edcol),bty="n",title="edges:",cex=0.7)
}
if(struct=="trans") {
n<-b+2*dyn
if(b>0) {
legadj<-matrix(0,nrow=3,ncol=3)
legadj[1,2]<-1
legadj[2,3]<-1
colnames(legadj)<-c("stat","i","i+1")
legendG<-m2graph(legadj)
legcol<-c(statcol,dyn1col,dyn2col)
colvector = c(rep(statcol,b),rep(dyn1col,dyn),rep(dyn2col,dyn))
names(colvector)<-nodelabs
names(colvector)<-nodelabs
} else {
legadj<-matrix(0,nrow=2,ncol=2)
legadj[1,2]<-1
colnames(legadj)<-c("i","i+1")
legendG<-m2graph(legadj)
legcol<-c(dyn1col,dyn2col)
colvector = c(rep(dyn1col,dyn),rep(dyn2col,dyn))
names(colvector)<-nodelabs
}
adj<-DBNcut(adj[1:(b+2*dyn),1:(b+2*dyn)],dyn,b)
adjt<-DBNcut(adjt[1:(b+2*dyn),1:(b+2*dyn)],dyn,b)
jointmat<-1*(adj|adjt)
arcslist<-adjacency2edgel(jointmat,nodes=nodelabs)
graph.obj = new("graphNEL", nodes = nodelabs, edgeL = arcslist,
edgemode = 'directed')
staticnames<-nodelabs[1:b]
dyn1names<-nodelabs[1:dyn+b]
dyn2names<-nodelabs[1:dyn+b+dyn]
sgStat = subGraph(staticnames, graph.obj)
sgDyn1 = subGraph(dyn1names, graph.obj)
sgDyn2 = subGraph(dyn2names, graph.obj)
sgL = list(list(graph=sgStat, cluster = TRUE, attrs = c(rankdir="LR",rank="sink")),
list(graph=sgDyn1, cluster = TRUE, attrs = c(rank="same")),
list(graph=sgDyn2, cluster = TRUE, attrs = c(rank="same")))
if(showcl) {
graph.plot = Rgraphviz::layoutGraph(graph.obj, subGList = sgL,attrs = list(graph = list(rankdir = orientation)))
} else {
graph.plot = Rgraphviz::layoutGraph(graph.obj,attrs = list(graph = list(rankdir = orientation)))
}
graph::nodeRenderInfo(graph.plot)<-list(fill=colvector,shape="circle",...)
#define edges with wrong directions
FPlist<-NULL
FNlist<-NULL
EDlist<-NULL
MissDlist<-NULL #missing directions
ExtrDlist<-NULL #extra directions
BiFP<-NULL
BiFN<-NULL
comedges<-0
diffmat<-matrix(0,nrow=nrow(jointmat),ncol=ncol(jointmat))
for(i in 1:n) {
for(j in 1:n) {
bi1<-adj[i,j]+adj[j,i]
bi2<-adjt[i,j]+adjt[j,i]
if(bi1==2 | bi2==2) {
if(bi1!=bi2) {
if(bi2==2 & bi1==0) { #FN
diffmat[i,j]<-3
diffmat[j,i]<-3
FNlist<-rbind(FNlist,c(nodelabs[i],nodelabs[j]))
FNlist<-rbind(FNlist,c(nodelabs[j],nodelabs[i]))
} else if (bi2==2 & bi1==1) { #ED FN
diffmat[i,j]<-4
diffmat[j,i]<-4
MissDlist<-rbind(MissDlist,c(nodelabs[i],nodelabs[j]))
MissDlist<-rbind(MissDlist,c(nodelabs[j],nodelabs[j]))
} else if (bi2==1 & bi1==2) { #ED FP
diffmat[i,j]<-4
diffmat[j,i]<-4
ExtrDlist<-rbind(ExtrDlist,c(nodelabs[i],nodelabs[j]))
ExtrDlist<-rbind(ExtrDlist,c(nodelabs[j],nodelabs[j]))
} else { #FP
diffmat[i,j]<-2
diffmat[j,i]<-2
FPlist<-rbind(FPlist,c(nodelabs[i],nodelabs[j]))
FPlist<-rbind(FPlist,c(nodelabs[j],nodelabs[i]))
}
} else if(adj[i,j]==1) comedges<-1
} else {
if(adj[i,j]!=adjt[i,j]){
if(adj[j,i]!=adjt[j,i]) {#ED
if(adj[i,j]==1) {
diffmat[i,j]<-4
jointmat[j,i]<-0
EDlist<-rbind(EDlist,c(nodelabs[i],nodelabs[j]))
} else {
diffmat[j,i]<-4
jointmat[i,j]<-0
EDlist<-rbind(EDlist,c(nodelabs[j],nodelabs[i]))
}
} else if (adj[i,j]==1) { #FP
diffmat[i,j]<-2
FPlist<-rbind(FPlist,c(nodelabs[i],nodelabs[j]))
} else {#FN
diffmat[i,j]<-3
FNlist<-rbind(FNlist,c(nodelabs[i],nodelabs[j]))
}
} else if(adj[i,j]==1) comedges<-1
}
}
}
if(!is.null(FPlist)) {
FP<-apply(FPlist, 1, paste, collapse = "~")
graph::edgeRenderInfo(graph.plot)[["col"]][FP] = "red"
}
if(!is.null(FNlist)) {
FN<-apply(FNlist, 1, paste, collapse = "~")
graph::edgeRenderInfo(graph.plot)[["col"]][FN] = "grey"
graph::edgeRenderInfo(graph.plot)[["lty"]][FN] = "dashed"
}
if(!is.null(EDlist)) {
ED<-apply(EDlist, 1, paste, collapse = "~")
graph::edgeRenderInfo(graph.plot)[["col"]][ED] = "blue"
}
if(!is.null(BiFP)) {
BiFP<-apply(BiFP, 1, paste, collapse = "~")
graph::edgeRenderInfo(graph.plot)[["col"]][BiFP] = "red"
}
if(!is.null(BiFN)) {
BiFN<-apply(BiFN, 1, paste, collapse = "~")
graph::edgeRenderInfo(graph.plot)[["col"]][BiFN] = "grey"
graph::edgeRenderInfo(graph.plot)[["lty"]][BiFN] = "dashed"
}
if(!is.null(MissDlist)) {
MissD<-apply(MissDlist, 1, paste, collapse = "~")
graph::edgeRenderInfo(graph.plot)[["col"]][MissD] = "blue"
graph::edgeRenderInfo(graph.plot)[["lty"]][MissD] = "solid"
}
if(!is.null(ExtrDlist)) {
ExtrD<-apply(ExtrDlist, 1, paste, collapse = "~")
graph::edgeRenderInfo(graph.plot)[["col"]][ExtrD] = "blue"
graph::edgeRenderInfo(graph.plot)[["lty"]][ExtrD] = "solid"
}
tpname<-"true positive"
fpname<-"false positive"
fnname<-"false negative"
edname<-"difference in direction"
tpcol<-"black"
edcol<-"blue"
fpcol<-"red"
fncol<-"grey"
tplty<-1
fplty<-1
fnlty<-2
edlty<-1
if(is.null(FNlist) & is.null(BiFN)) {
fncol<-NULL
fnlwd<-NULL
fnlty<-NULL
fnname<-NULL
}
if(is.null(FPlist) & is.null(BiFP)) {
fpcol<-NULL
fplwd<-NULL
fplty<-NULL
fpname<-NULL
}
if(is.null(MissDlist) & is.null(ExtrDlist) & is.null(EDlist)) {
edcol<-NULL
edlwd<-NULL
edlty<-NULL
edname<-NULL
}
if(comedges==0) {
tpcol<-NULL
tplwd<-NULL
tplty<-NULL
tpname<-NULL
}
graph.par(list(graph=list(...,cex.main=1.5),
nodes=list(lty="solid", lwd=1, fixedsize=FALSE,...)))
layout(matrix(c(1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,
2,2,2,3,3,4,4,4,4,
2,2,2,3,3,4,4,4,4), nrow = 9, ncol = 9, byrow = TRUE))
#plot graph
Rgraphviz::renderGraph(graph.plot)
#plot legend
legendG <- Rgraphviz::layoutGraph(legendG,attrs = list(graph = list(rankdir = "TB")))
if(b>0) {
graph::nodeRenderInfo(legendG)[["shape"]][c("stat","i","i+1")] <- shape
graph::nodeRenderInfo(legendG)[["fill"]]["stat"] = statcol
graph::nodeRenderInfo(legendG)[["fill"]]["i"] = dyn1col
graph::nodeRenderInfo(legendG)[["fill"]]["i+1"] = dyn2col
graph::edgeRenderInfo(legendG)[["lwd"]]["stat~i"] = 0
graph::edgeRenderInfo(legendG)[["lwd"]]["i~i+1"] = 0
} else {
graph::nodeRenderInfo(legendG)[["shape"]][c("i","i+1")] <- shape
graph::nodeRenderInfo(legendG)[["fill"]]["i"] = dyn1col
graph::nodeRenderInfo(legendG)[["fill"]]["i+1"] = dyn2col
graph::edgeRenderInfo(legendG)[["lwd"]]["i~i+1"] = 0
}
graph.par(list(graph=list(main="nodes(t):",cex.main=1.8),...))
Rgraphviz::renderGraph(legendG)
par(mar = c(0,0,0,0))
plot(1:10,1:10,type="n", axes = FALSE, xlab = "", ylab = "")
plot(1:10,1:10,type="n", axes = FALSE, xlab = "", ylab = "")
op <- par(cex = 1.7)
legend(1,10,legend=c(tpname,
fpname,
fnname,
edname),lty=c(tplty,fplty,fnlty,edlty),
col=c(tpcol,fpcol,fncol,edcol),bty="n",title="edges:",cex=0.7)
}
}
#' Plotting difference between two graphs
#'
#' This function plots edges from two graphs in one and indicates similarities and differences between these graphs.
#' It is also possible to use this function for plotting mistakes in estimated graph when the ground truth graph is known.
#'
#'@param graph1 object of class graphNEL or its adjacency matrix
#'@param graph2 object of class graphNEL or its adjacency matrix
#'@param estimated logical, indicates if graph1 is estimated graph and graph2 is ground truth DAG, TRUE by default; this affects the legend and colouring of the edges
#'@param name1 character, custom name for 'graph1'
#'@param name2 character, custom name for 'graph2'
#'@param clusters (optional) a list of nodes to be represented on the graph as clusters
#'@param ... optional parameters passed to \code{Rgraphviz} plotting functions e.g. \code{main}, \code{fontsize}
#'@return plots the graph which includes edges from graph1 and graph2; edges which are different in graph1 compared to graph2 are coloured according to the type of a difference
#'@examples
#'Asiascore<-scoreparameters("bde",Asia)
#'Asiamap<-orderMCMC(Asiascore)
#'plotdiffs(Asiamap$DAG,Asiamat)
#'Asiacp<-pcalg::dag2cpdag(m2graph(Asiamat))
#'mapcp<-pcalg::dag2cpdag(m2graph(Asiamap$DAG))
#'plotdiffs(mapcp,Asiacp)
#'@author Polina Suter
#'@export
plotdiffs<-function(graph1,graph2,estimated=TRUE,name1="graph1",
name2="graph2",clusters=NULL, ...) {
old.par<-par(no.readonly = TRUE)
on.exit(par(old.par))
if(is(graph1,"graphNEL")) {
adj<-graph2m(graph1)
} else {
adj<-as.matrix(graph1)
}
if(is(graph2,"graphNEL")) {
adjt<-graph2m(graph2)
} else {
adjt<-as.matrix(graph2)
}
nodelabs<-colnames(adj)
if(is.null(nodelabs)) {
nodelabs<-paste("v",1:ncol(adj),sep="")
}
jointmat<-1*(adj|adjt)
n<-nrow(adj)
FPlist<-NULL
FNlist<-NULL
EDlist<-NULL
MissDlist<-NULL #missing directions
ExtrDlist<-NULL #extra directions
BiFP<-NULL
BiFN<-NULL
graph.par(list(nodes=list(lty="solid", lwd=1, ...),
graph=list(...)))
comedges<-0
if(estimated) {
diffmat<-matrix(0,nrow=nrow(jointmat),ncol=ncol(jointmat))
for(i in 1:n) {
for(j in 1:n) {
bi1<-adj[i,j]+adj[j,i]
bi2<-adjt[i,j]+adjt[j,i]
if(bi1==2 | bi2==2) {
if(bi1!=bi2) {
if(bi2==2 & bi1==0) { #FN
diffmat[i,j]<-3
diffmat[j,i]<-3
FNlist<-rbind(FNlist,c(nodelabs[i],nodelabs[j]))
FNlist<-rbind(FNlist,c(nodelabs[j],nodelabs[i]))
} else if (bi2==2 & bi1==1) { #ED FN
diffmat[i,j]<-4
diffmat[j,i]<-4
MissDlist<-rbind(MissDlist,c(nodelabs[i],nodelabs[j]))
MissDlist<-rbind(MissDlist,c(nodelabs[j],nodelabs[j]))
} else if (bi2==1 & bi1==2) { #ED FP
diffmat[i,j]<-4
diffmat[j,i]<-4
ExtrDlist<-rbind(ExtrDlist,c(nodelabs[i],nodelabs[j]))
ExtrDlist<-rbind(ExtrDlist,c(nodelabs[j],nodelabs[j]))
} else { #FP
diffmat[i,j]<-2
diffmat[j,i]<-2
FPlist<-rbind(FPlist,c(nodelabs[i],nodelabs[j]))
FPlist<-rbind(FPlist,c(nodelabs[j],nodelabs[i]))
}
} else comedges<-1
} else {
if(adj[i,j]!=adjt[i,j]){
if(adj[j,i]!=adjt[j,i]) {#ED
if(adj[i,j]==1) {
diffmat[i,j]<-4
jointmat[j,i]<-0
EDlist<-rbind(EDlist,c(nodelabs[i],nodelabs[j]))
} else {
diffmat[j,i]<-4
jointmat[i,j]<-0
EDlist<-rbind(EDlist,c(nodelabs[j],nodelabs[i]))
}
} else if (adj[i,j]==1) { #FP
diffmat[i,j]<-2
FPlist<-rbind(FPlist,c(nodelabs[i],nodelabs[j]))
} else {#FN
diffmat[i,j]<-3
FNlist<-rbind(FNlist,c(nodelabs[i],nodelabs[j]))
}
} else if(adj[i,j]==1) comedges<-1
}
}
}
jointarcs<-adjacency2edgel(jointmat,nodes=nodelabs)
graph.obj = new("graphNEL", nodes = nodelabs, edgeL = jointarcs,
edgemode = 'directed')
if(is.null(clusters)){
subGList = NULL } else {
numsubg<-length(clusters)
sg<-list()
subGList<-list()
for(i in 1:numsubg) {
sg[[i]] <-subGraph(clusters[[i]], graph.obj)
subGList[[i]]<-list(graph = sg[[i]], cluster = TRUE)
}
}
graph.plot = Rgraphviz::layoutGraph(graph.obj,subGList = subGList)
if(!is.null(FPlist)) {
FP<-apply(FPlist, 1, paste, collapse = "~")
graph::edgeRenderInfo(graph.plot)[["col"]][FP] = "red"
}
if(!is.null(FNlist)) {
FN<-apply(FNlist, 1, paste, collapse = "~")
graph::edgeRenderInfo(graph.plot)[["col"]][FN] = "grey"
graph::edgeRenderInfo(graph.plot)[["lty"]][FN] = "dashed"
}
if(!is.null(EDlist)) {
ED<-apply(EDlist, 1, paste, collapse = "~")
graph::edgeRenderInfo(graph.plot)[["col"]][ED] = "blue"
}
if(!is.null(BiFP)) {
BiFP<-apply(BiFP, 1, paste, collapse = "~")
graph::edgeRenderInfo(graph.plot)[["col"]][BiFP] = "red"
}
if(!is.null(BiFN)) {
BiFN<-apply(BiFN, 1, paste, collapse = "~")
graph::edgeRenderInfo(graph.plot)[["col"]][BiFN] = "grey"
graph::edgeRenderInfo(graph.plot)[["lty"]][BiFN] = "dashed"
}
if(!is.null(MissDlist)) {
MissD<-apply(MissDlist, 1, paste, collapse = "~")
graph::edgeRenderInfo(graph.plot)[["col"]][MissD] = "blue"
graph::edgeRenderInfo(graph.plot)[["lty"]][MissD] = "solid"
}
if(!is.null(ExtrDlist)) {
ExtrD<-apply(ExtrDlist, 1, paste, collapse = "~")
graph::edgeRenderInfo(graph.plot)[["col"]][ExtrD] = "blue"
graph::edgeRenderInfo(graph.plot)[["lty"]][ExtrD] = "solid"
}
tpname<-"common edge"
fpname<-"false positive"
fnname<-"false negative"
edname<-"difference in direction"
tpcol<-"black"
edcol<-"blue"
fpcol<-"red"
fncol<-"grey"
tplty<-1
fplty<-1
fnlty<-2
edlty<-1
if(is.null(FNlist) & is.null(BiFN)) {
fncol<-NULL
fnlwd<-NULL
fnlty<-NULL
fnname<-NULL
}
if(is.null(FPlist) & is.null(BiFP)) {
fpcol<-NULL
fplwd<-NULL
fplty<-NULL
fpname<-NULL
}
if(is.null(MissDlist) & is.null(ExtrDlist) & is.null(EDlist)) {
edcol<-NULL
edlwd<-NULL
edlty<-NULL
edname<-NULL
}
if(comedges==0) {
tpcol<-NULL
tplwd<-NULL
tplty<-NULL
tpname<-NULL
}
graph::edgeRenderInfo(graph.plot)[["lwd"]] = 2
#graph::edgeRenderInfo(graph.plot)[["col"]] = "black"
#graph::edgeRenderInfo(graph.plot)[["lty"]] = 1
layout(matrix(c(1,1,2), nrow = 1, ncol = 3, byrow = TRUE))
Rgraphviz::renderGraph(graph.plot)
par(mar = c(0,0,0,0))
plot(1:10,1:10,type="n", axes = FALSE, xlab = "", ylab = "")
op <- par(cex = 1.7)
legend("center",legend=c(tpname,
fpname,
fnname,
edname),lty=c(tplty,fplty,fnlty,edlty),
col=c(tpcol,fpcol,fncol,edcol),bty="n",cex=0.6)
} else {
diffmat<-matrix(0,nrow=nrow(jointmat),ncol=ncol(jointmat))
for(i in 1:n) {
for(j in 1:n) {
bi1<-adj[i,j]+adj[j,i]
bi2<-adjt[i,j]+adjt[j,i]
if(bi1==2 | bi2==2) {
if(bi1!=bi2) {
if(bi2==2 & bi1==0) { #FN
diffmat[i,j]<-3
diffmat[j,i]<-3
FNlist<-rbind(FNlist,c(nodelabs[i],nodelabs[j]))
FNlist<-rbind(FNlist,c(nodelabs[j],nodelabs[i]))
} else if (bi2==2 & bi1==1) { #ED FN
diffmat[i,j]<-4
diffmat[j,i]<-4
MissDlist<-rbind(MissDlist,c(nodelabs[i],nodelabs[j]))
MissDlist<-rbind(MissDlist,c(nodelabs[j],nodelabs[j]))
} else if (bi2==1 & bi1==2) { #ED FP
diffmat[i,j]<-4
diffmat[j,i]<-4
ExtrDlist<-rbind(ExtrDlist,c(nodelabs[i],nodelabs[j]))
ExtrDlist<-rbind(ExtrDlist,c(nodelabs[j],nodelabs[j]))
} else { #FP
diffmat[i,j]<-2
diffmat[j,i]<-2
FPlist<-rbind(FPlist,c(nodelabs[i],nodelabs[j]))
FPlist<-rbind(FPlist,c(nodelabs[j],nodelabs[i]))
}
} else {
if(adj[i,j]==1) comedges<-1
}
} else {
if(adj[i,j]!=adjt[i,j]){
if(adj[j,i]!=adjt[j,i]) {#ED
if(adj[i,j]==1) {
diffmat[i,j]<-4
jointmat[j,i]<-0
EDlist<-rbind(EDlist,c(nodelabs[i],nodelabs[j]))
} else {
diffmat[j,i]<-4
jointmat[i,j]<-0
EDlist<-rbind(EDlist,c(nodelabs[j],nodelabs[i]))
}
} else if (adj[i,j]==1) { #FP
diffmat[i,j]<-2
FPlist<-rbind(FPlist,c(nodelabs[i],nodelabs[j]))
} else {#FN
diffmat[i,j]<-3
FNlist<-rbind(FNlist,c(nodelabs[i],nodelabs[j]))
}
} else {
if(adj[i,j]==1) comedges<-1
}
}
}
}
fpcol<-"#74c476"
fncol="#df65b0"
jointarcs<-adjacency2edgel(jointmat,nodes=nodelabs)
graph.obj = new("graphNEL", nodes = nodelabs, edgeL = jointarcs,
edgemode = 'directed')
if(is.null(clusters)){
subGList = NULL } else {
numsubg<-length(clusters)
sg<-list()
subGList<-list()
for(i in 1:numsubg) {
sg[[i]] <-subGraph(clusters[[i]], graph.obj)
subGList[[i]]<-list(graph = sg[[i]], cluster = TRUE)
}
}
graph.plot = Rgraphviz::layoutGraph(graph.obj,subGList = subGList)
graph::edgeRenderInfo(graph.plot)[["lwd"]] = 2
if(!is.null(FPlist)) {
FP<-apply(FPlist, 1, paste, collapse = "~")
graph::edgeRenderInfo(graph.plot)[["col"]][FP] = fpcol
graph::edgeRenderInfo(graph.plot)[["lty"]][FP] = "solid"
graph::edgeRenderInfo(graph.plot)[["lwd"]][FP] = 1
}
if(!is.null(FNlist)) {
FN<-apply(FNlist, 1, paste, collapse = "~")
graph::edgeRenderInfo(graph.plot)[["col"]][FN] = fncol
graph::edgeRenderInfo(graph.plot)[["lty"]][FN] = "solid"
graph::edgeRenderInfo(graph.plot)[["lwd"]][FN] = 1
}
if(!is.null(EDlist)) {
ED<-apply(EDlist, 1, paste, collapse = "~")
graph::edgeRenderInfo(graph.plot)[["col"]][ED] = "blue"
}
if(!is.null(BiFP)) {
BiFP<-apply(BiFP, 1, paste, collapse = "~")
graph::edgeRenderInfo(graph.plot)[["col"]][BiFP] = fpcol
graph::edgeRenderInfo(graph.plot)[["lty"]][BiFP] = "solid"
graph::edgeRenderInfo(graph.plot)[["lwd"]][BiFP] = 1
}
if(!is.null(BiFN)) {
BiFN<-apply(BiFN, 1, paste, collapse = "~")
graph::edgeRenderInfo(graph.plot)[["col"]][BiFN] = fncol
graph::edgeRenderInfo(graph.plot)[["lty"]][BiFN] = "solid"
graph::edgeRenderInfo(graph.plot)[["lwd"]][BiFN] = 1
}
if(!is.null(MissDlist)) {
MissD<-apply(MissDlist, 1, paste, collapse = "~")
graph::edgeRenderInfo(graph.plot)[["col"]][MissD] = "blue"
graph::edgeRenderInfo(graph.plot)[["lty"]][MissD] = "solid"
}
if(!is.null(ExtrDlist)) {
ExtrD<-apply(ExtrDlist, 1, paste, collapse = "~")
graph::edgeRenderInfo(graph.plot)[["col"]][ExtrD] = "blue"
graph::edgeRenderInfo(graph.plot)[["lty"]][ExtrD] = "solid"
}
tpname<-"common edge"
fpname<-paste("present only in",name1)
fnname<-paste("present only in",name2)
edname<-"difference in direction"
tpcol<-"black"
edcol<-"blue"
tplwd<-2
fplwd<-1
fnlwd<-1
edlwd<-2
if(is.null(FNlist) & is.null(BiFN)) {
fncol<-NULL
fnlwd<-NULL
fnlty<-NULL
fnname<-NULL
}
if(is.null(FPlist) & is.null(BiFP)) {
fpcol<-NULL
fplwd<-NULL
fplty<-NULL
fpname<-NULL
}
if(is.null(MissDlist) & is.null(ExtrDlist)) {
edcol<-NULL
edlwd<-NULL
edlty<-NULL
edname<-NULL
}
if(comedges==0) {
tpcol<-NULL
tplwd<-NULL
tplty<-NULL
tpname<-NULL
}
layout(matrix(c(1,1,1,1,
1,1,1,1,
1,1,1,1,
1,1,1,1,
1,1,1,1,
2,2,3,3), nrow = 6, ncol = 4, byrow = TRUE))
Rgraphviz::renderGraph(graph.plot)
par(mar = c(0,0,0,0))
plot(1:10,1:10,type="n", axes = FALSE, xlab = "", ylab = "")
op <- par(cex = 1.7)
legend("top",legend=c(tpname,fpname,fnname,edname),lwd=c(tplwd,fplwd,fnlwd,edlwd),
col=c(tpcol,fpcol,fncol,edcol),bty="n",cex=0.6)
}
}
#' Highlighting similarities between two graphs
#'
#' This function plots nodes and edges from two graphs in one and indicates similarities between these graphs.
#'
#'@param graph1 binary adjacency matrix of a graph
#'@param graph2 binary adjacency matrix of a graph, column names should coincide with column names of 'graph1'
#'@param name1 character, custom name for 'graph1'; when NULL no legend will be plotted
#'@param name2 character, custom name for 'graph2'
#'@param bidir logical, defines if arrows of bidirected edges are drawn; FALSE by defauls.
#'@param ... optional parameters passed to \pkg{Rgraphviz} plotting functions e.g. \code{main}, \code{fontsize}
#'@return plots the graph which includes nodes and edges two graphs; nodes which are connected to at least one other node in both graphs are plotted only once and coloured orange, edges which are shared by two graphs
#'are coloured orange; all other nodes and edges a plotted once for each 'graph1' and 'graph2' and coloured blue and green accordingly.
#'@author Polina Suter
#'@export
plot2in1<-function(graph1, graph2, name1=NULL,
name2=NULL,bidir=FALSE, ...) {
if(is(graph1,"graphNEL")) {
graph1<-graph2m(graph1)
} else if (!is.matrix(graph1)){
graph1<-as.matrix(graph1)
}
if(is(graph2,"graphNEL")) {
graph2<-graph2m(graph2)
} else if (!is.matrix(graph2)){
graph2<-as.matrix(graph2)
}
if(!all(colnames(graph1)==colnames(graph2))) stop("adjacency matrices 'graph1' and 'graph2' have different column names!")
old.par<-par(no.readonly = TRUE)
on.exit(par(old.par))
mycolors <- c("#cbd5e8", "#ccebc5","#fdcdac","#E8CED4FF")
glist<-list()
glist[[1]]<- connectedSubGraph(graph1)
glist[[2]]<- connectedSubGraph(graph2)
clustm<-list()
clustm[[1]]<-setdiff(colnames(glist[[1]]),colnames(glist[[2]]))
clustm[[2]]<-setdiff(colnames(glist[[2]]),colnames(glist[[1]]))
clustm[[3]]<-intersect(colnames(glist[[1]]),colnames(glist[[2]]))
numsubg<-length(clustm)
allmuts<-unique(c(colnames(glist[[1]]),colnames(glist[[2]])))
glist[[1]]<-getSubGraph(graph1,allmuts)
glist[[2]]<-getSubGraph(graph2,allmuts)
nodelabs<-colnames(glist[[2]])
n<-nrow(glist[[2]])
jointgraph<-1*Reduce("|",glist)
jointarcs<-adjacency2edgel(jointgraph,nodes=nodelabs)
graph.obj = new("graphNEL", nodes = nodelabs, edgeL = jointarcs,
edgemode = 'directed')
sg<-list()
subGList<-list()
for(i in 1:numsubg) {
if(!is.null(clustm[[i]])) {
sg[[i]] <-subGraph(clustm[[i]], graph.obj)
subGList[[i]]<-list(graph = sg[[i]], cluster = TRUE)
}
}
graph.par(list(nodes=list(lty="solid", lwd=1, ...), graph=list(...)))
graph.plot = Rgraphviz::layoutGraph(graph.obj,subGList = subGList)
graph::nodeRenderInfo(graph.plot)[["fill"]][clustm[[1]]] = mycolors[1] #M
graph::nodeRenderInfo(graph.plot)[["fill"]][clustm[[2]]] = mycolors[2] #T
graph::nodeRenderInfo(graph.plot)[["fill"]][clustm[[3]]] = mycolors[3] #CNA
sumgraph<-Reduce("+",glist)
arcl<-list()
for(i in 1:2) {
edgy<-which(glist[[i]]>0,arr.ind = TRUE)
arcl[[i]]<-matrix(ncol=2,nrow=nrow(edgy))
for(j in 1:nrow(edgy)) {
arcl[[i]][j,]<-c(nodelabs[edgy[j,1]],nodelabs[edgy[j,2]])
}
}
commedges<-which(sumgraph>1,arr.ind = TRUE)
if(nrow(commedges>0)) {
arcl[[3]]<-matrix(ncol=2,nrow=nrow(commedges))
for(j in 1:nrow(commedges)) {
arcl[[3]][j,]<-c(nodelabs[commedges[j,1]], nodelabs[commedges[j,2]])
}
}
for(i in 1:length(arcl)) {
if(!is.null(arcl[[i]])) {
arcl[[i]]<-apply(arcl[[i]], 1, paste, collapse = "~")
graph::edgeRenderInfo(graph.plot)[["col"]][arcl[[i]]] = mycolors[i]
}
}
u <- names(which(graph::edgeRenderInfo(graph.plot)[["direction"]] == "both"))
if(!bidir) {
graph::edgeRenderInfo(graph.plot)[["arrowhead"]][u] = "none"
graph::edgeRenderInfo(graph.plot)[["arrowtail"]][u] = "none"
}
graph::edgeRenderInfo(graph.plot)[["lwd"]]<-2
layout(matrix(c(1,1,1,1,1,
1,1,1,1,1,
1,1,1,1,1,
1,1,1,1,1,
1,1,1,1,1,
1,1,1,2,1), nrow = 6, ncol = 5, byrow = TRUE))
Rgraphviz::renderGraph(graph.plot)
if(!is.null(name1)){
par(mar = c(0,0,0,0))
plot(1:10,1:10,type="n", axes = FALSE, xlab = "", ylab = "")
op <- par(cex = 1.7)
legend("topright",
col=c("#cbd5e8", "#ccebc5","#fdcdac"),
lwd = 2,
lty = 1,
legend = c(name1,name2,"both"),
cex = 0.6,
bg = NA,bty = "n")
legend("topright",
col = "black",
pt.bg = c("#cbd5e8", "#ccebc5","#fdcdac"),
pch = 21,
lwd = 1,
legend = c(name1,name2,"both"),
cex = 0.6,
lty = 0,
bty = "n")
}
}
|
/scratch/gouwar.j/cran-all/cranData/BiDAG/R/plotusingrgraphviz.R
|
##############################
#print methods for S3 classes#
# orderMCMC #
# partitionMCMC #
# iterativeMCMC #
# scoreparameters #
# scorespace #
# intersim #
# sampsim #
##############################
#' Print object of class 'scoreparameters'
#'
#' @param x object of class 'scoreparameters'
#' @param ... ignored
#'
#' @rdname scoreparameters
#' @export
print.scoreparameters <-function(x, ...){
cat("object of class 'scoreparameters' \n\n")
cat("$type \n")
cat(x$type,"\n\n")
cat("$data \n")
if(!x$DBN) {
cat("data contains",nrow(x$data),"observations of",ncol(x$data),"variables \n\n" )
} else {
cat("data contains",nrow(x$data),"rows and",ncol(x$data),"columns \n\n")
}
cat("$DBN \n")
cat(x$DBN,"\n\n")
if (x$DBN) {
cat("$slices \n")
cat(x$slices,"\n\n")
if(x$bgn>0) {
cat("$static \n")
cat(x$static,"\n\n")
cat("$firstslice \n")
cat("...\n\n")
cat("$otherslices \n")
cat("...\n\n")
}
} else {
if(x$type=="bge") {
if(x$bgn>0) {
cat("$bgnodes \n")
cat(x$bgnodes,"\n\n")
}
cat("$am \n")
cat(x$am,"\n\n")
cat("$aw \n")
cat(x$am,"\n\n")
cat("$means \n")
cat(x$means[1],"...", x$means[length(x$means)] ,"\n\n")
cat("$SigmaN \n")
cat("matrix", ncol(x$SigmaN), "x",ncol(x$SigmaN), "\n\n")
} else if (x$type%in%c("bde","bdecat")) {
cat("$chi \n")
cat(x$chi,"\n\n")
cat("$pf \n")
cat(x$pf,"\n\n")
if(x$bgn>0) {
cat("$bgnodes \n")
cat(x$bgnodes,"\n\n")
}
}
}
if(!is.null(x$weightvector)) {
cat("$weightvector \n")
cat("...numeric vector of length",length(x$weightvector),"\n\n")
}
}
#' Prints object of class 'orderMCMC'
#'
#' @param x object of class 'orderMCMC'
#' @param ... ignored
#'
#' @rdname orderMCMC
#' @export
print.orderMCMC <-function(x, ...){
cat("object of class 'orderMCMC', from Call:", "\n")
print(x$info$fncall)
cat("\n")
cat("$DAG\n")
cat("adjacency matrix of a DAG with", ncol(x$DAG), "nodes and ", sum(x$DAG)," edges \n\n")
cat("$score\n")
cat(x$score,"\n\n")
cat("$maxorder\n")
cat(x$maxorder[1],"...",x$maxorder[length(x$maxorder)],"\n\n")
cat("$info\n")
cat("... \n\n")
cat("$trace\n")
cat(x$trace[1], "...", x$trace[length(x$trace)])
cat("\n\n")
if(!is.null(x$traceadd)) {
cat("$traceadd\n")
cat("adjacency matrices of sampled DAGs, corresponding orders and order scores \n\n")
}
if(!is.null(x$scoretable)) {
cat("$scoretable\n")
cat("score tables corresponding to core search space $endspace\n\n")
}
}
#' Prints object of class 'partitionMCMC'
#'
#' @param x object of class 'partitionMCMC'
#' @param ... ignored
#'
#' @rdname partitionMCMC
#' @export
print.partitionMCMC <-function(x, ...){
cat("object of class 'partitionMCMC', from Call:", "\n")
print(x$info$fncall)
cat("\n")
cat("$DAG\n")
cat("adjacency matrix of a DAG with", ncol(x$DAG), "nodes and ", sum(x$DAG)," edges", "\n\n")
cat("$score\n")
cat(x$score,"\n\n")
cat("$maxorder\n")
cat(x$maxorder[1],"...",x$maxorder[length(x$maxorder)],"\n\n")
cat("$info\n")
cat("... \n")
cat("$trace\n")
cat(x$trace[1], "...", x$trace[length(x$trace)])
cat("\n\n")
if(!is.null(x$traceadd)) {
cat("$traceadd\n")
cat("adjacency matrices of sampled DAGs, corresponding orders and order scores \n\n")
}
if(!is.null(x$scoretable)) {
cat("$scoretable\n")
cat("score tables corresponding to core search space $endspace \n\n")
}
}
#' Prints object of class 'iterativeMCMC'
#'
#' @param x object of class 'iterativeMCMC'
#' @param ... ignored
#'
#' @rdname iterativeMCMC
#' @export
print.iterativeMCMC <-function(x, ...){
cat("object of class 'iterativeMCMC', from Call:", "\n")
print(x$info$fncall)
cat("\n")
cat("$DAG\n")
cat("adjacency matrix of a DAG with", ncol(x$DAG), "nodes and ", sum(x$DAG)," edges", "\n\n")
cat("$maxorder\n")
cat(x$maxorder[1],"...",x$maxorder[length(x$maxorder)],"\n\n")
cat("$score\n")
cat(x$score,"\n\n")
cat("$maxtrace:\n")
cat("local maximums at each expansion step:\n")
cat(unlist(lapply(x$maxtrace,function(x)x$score)),"\n\n")
cat("$info\n")
cat("... \n")
cat("$trace\n")
cat(x$trace[[1]][1], "...", x$trace[[length(x$trace)]][length(x$trace[[1]][1])])
cat("\n\n")
if(!is.null(x$traceadd)) {
cat("$traceadd\n")
cat("adjacency matrices of sampled DAGs, corresponding orders and order scores\n")
cat("\n")
}
if(!is.null(x$scoretable)) {
cat("$scoretable\n")
cat("scoretable, object of class 'scorespace'\n\n")
}
}
#' Prints 'scorespace' object
#'
#' @param x object of class 'scorespace'
#' @param ... ignored
#'
#' @rdname scorespace
#' @export
print.scorespace <-function(x, ...){
cat("object of class 'scorespace'")
n<-ncol(x$adjacency)
cat("\n\n")
cat("$adjacency \n")
cat("matrix", n,"x",n, "\n\n")
n<-length(x$tables)
notnull<-which(unlist(lapply(x$tables,function(x)!is.null(x))))
if(length(notnull)<n) {
nullnodes<-which(unlist(lapply(x$tables,is.null)))
cat("forced root nodes (no score tables): ")
for(i in nullnodes) {
cat(nullnodes[i]," ")
}
cat("\n\n")
}
if(is.list(x$tables[[notnull[1]]])) {
cat("$tables", "\n")
cat("[[",notnull[1],"]][[1]]", "\n",sep="")
cat(x$tables[[notnull[1]]][[1]][1,],"\n")
cat("...", "\n")
lastt<-length(x$tables[[n]])
cat("[[",n,"]][[",lastt,"]]\n", sep="")
cat(x$tables[[notnull[length(notnull)]]][[lastt]][1,],"\n")
cat("...", "\n")
} else {
cat("$tables[[",notnull[1],"]]", "\n")
cat(x$tables[[notnull[1]]][1,], "...\n",sep="")
cat("...\n")
cat("$tables[[", length(x$tables),"]]\n",sep="")
cat(x$tables[[notnull[length(notnull)]]][1,],"...\n")
}
cat("\n")
cat("$blacklist \n")
cat("matrix", n,"x",n, "\n")
}
#' Prints itercomp object.
#'
#' @param x object of class 'itercomp'
#' @param ... ignored
#' @rdname itercomp
#' @export
print.itercomp <-function(x, ...){
cat("object of class 'itercomp'")
cat("\n\n")
nit<-nrow(x)
if(nit<5) {
print(x[1:nit,,drop=FALSE])
} else {
print(x[1:2,])
cat("... \n")
print(x[c(nit-1,nit),])
}
}
#' Prints samplecomp object.
#'
#' @param x object of class 'samplecomp'
#' @param ... ignored
#' @rdname samplecomp
#' @export
print.samplecomp <-function(x, ...){
cat("object of class 'samplecomp'")
cat("\n\n")
np<-nrow(x)
if(is.null(np)) {
print(x[1:length(x)])
} else if(np<10) {
print(x[1:np,,drop=FALSE])
} else {
print(x[1:5,])
cat("... \n")
print(x[c(np-4,np),])
}
}
|
/scratch/gouwar.j/cran-all/cranData/BiDAG/R/printmethods.R
|
#'Bayesian network structure sampling from the posterior distribution
#'
#'This function can be used for structure sampling using three different MCMC schemes. Order MCMC scheme (\code{algorithm="order"}) is the most computationally
#'efficient however it imposes a non-uniform prior in the space of DAGs. Partition MCMC (\code{algorithm="partition"}) is less computationally efficient and requires more iterations
#'to reach convergence, however it implements sampling using a uniform prior in the space of DAGs.
#'Due to the superexponential size of the search space as the number of nodes increases, the
#'MCMC search is performed on a reduced search space. By default the search space is limited to the skeleton found through the PC algorithm by means of conditional independence tests
#'(using the functions \code{\link[pcalg]{skeleton}} and \code{\link[pcalg]{pc}} from the `pcalg' package [Kalisch et al, 2012]).
#'It is also possible to define an arbitrary search space by inputting an adjacency matrix, for example estimated by partial correlations or other network algorithms.
#'Also implemented is the possibility to expand the default or input search space, by allowing each node in the network to have one additional parent.
#'This offers improvements in the learning and sampling of Bayesian networks. The iterative MCMC scheme (\code{algorithm="orderIter"}) allows for iterative expansions of the search space.
#'This is useful in cases when the initial search space is poor in a sense that it contains only a limited number of true positive edges. Iterative expansions of the search space
#'efficiently solve this issue. However this scheme requires longer runtimes due to the need of running multiple consecutive MCMC chains.
#'This function is a wrapper for the three individual structure learning and sampling functions that implement each of the described algorithms; for details see \code{\link{orderMCMC}},
#'\code{\link{partitionMCMC}},\code{\link{iterativeMCMC}}.
#' @param scorepar an object of class \code{scoreparameters}, containing the data and score parameters, see constructor function \code{\link{scoreparameters}}
#' @param algorithm MCMC scheme to be used for sampling from posterior distribution; possible options are "order" (\code{\link{orderMCMC}}), "orderIter" (\code{\link{iterativeMCMC}}) or "partition" (\code{\link{partitionMCMC}})
#' @param chainout logical, if TRUE the saved MCMC steps are returned, TRUE by default
#' @param scoreout logical, if TRUE the search space and score tables are returned, FALSE by default
#' @param moveprobs a numerical vector of 4 (for "order" and "orderIter" algorithms) or 5 values (for "partition" algorithm) representing probabilities of the different moves in the space of
#' order and partitions accordingly. The moves are described in the corresponding algorithm specific functions \code{\link{orderMCMC}} and \code{\link{partitionMCMC}}
#' @param iterations integer, the number of MCMC steps, the default value is \eqn{6n^{2}\log{n}} orderMCMC, \eqn{20n^{2}\log{n}} for partitionMCMC and \eqn{3.5n^{2}\log{n}} for iterativeMCMC; where n is the number of nodes in the Bayesian network
#' @param stepsave integer, thinning interval for the MCMC chain, indicating the number of steps between two output iterations, the default is \code{iterations/1000}
#' @param alpha numerical significance value in \code{\{0,1\}} for the conditional independence tests at the PC algorithm stage
#' @param gamma tuning parameter which transforms the score by raising it to this power, 1 by default
#' @param cpdag logical, if TRUE the CPDAG returned by the PC algorithm will be used as the search
#'space, if FALSE (default) the full undirected skeleton will be used as the search space
#' @param hardlimit integer, limit on the size of parent sets in the search space;
#' @param verbose logical, if TRUE messages about the algorithm's progress will be printed, FALSE by default
#' @param compress logical, if TRUE adjacency matrices representing sampled graphs will be stored as a sparse Matrix (recommended); TRUE by default
#' @param startspace (optional) a square sparse or ordinary matrix, of dimensions equal to the number of nodes, which defines the search space for the order MCMC in the form of an adjacency matrix. If NULL, the skeleton obtained from the PC-algorithm will be used. If \code{startspace[i,j]} equals to 1 (0) it means that the edge from node \code{i} to node \code{j} is included (excluded) from the search space. To include an edge in both directions, both \code{startspace[i,j]} and \code{startspace[j,i]} should be 1.
#' @param blacklist (optional) a square sparse or ordinary matrix, of dimensions equal to the number of nodes, which defines edges to exclude from the search space. If \code{blacklist[i,j]} equals to 1 it means that the edge from node \code{i} to node \code{j} is excluded from the search space.
#' @param scoretable (optional) object of class \code{scorespace} containing list of score tables calculated for example by the last iteration of the function \code{iterativeMCMC}. When not NULL, parameter \code{startspace} is ignored.
#' @param startpoint (optional) integer vector of length n (representing an order when \code{algorithm="order"} or \code{algorithm="orderIter"}) or an adjacency matrix or sparse adjacency matrix (representing a DAG when \code{algorithm="partition"}), which will be used as the starting point in the MCMC algorithm, the default starting point is random
#' @param plus1 logical, if TRUE (default) the search is performed on the extended search space; only changable for orderMCMC; for other algorithms is fixed to TRUE
#' @param iterpar addition list of parameters for the MCMC scheme implemeting iterative expansions of the search space; for more details see \code{\link{iterativeMCMC}}; list(posterior = 0.5, softlimit = 9, mergetype = "skeleton", accum = FALSE,
#'plus1it = NULL, addspace = NULL, alphainit = NULL)
#' @return Depending on the value or the parameter \code{algorithm} returns an object of class \code{orderMCMC}, \code{partitionMCMC} or \code{iterativeMCMC} which contains log-score trace of sampled DAGs as well
#' as adjacency matrix of the maximum scoring DAG(s), its score and the order or partition score. The output can optionally include DAGs sampled in MCMC iterations and the score tables.
#' Optional output is regulated by the parameters \code{chainout} and \code{scoreout}. See \code{\link{orderMCMC class}}, \code{\link{partitionMCMC class}}, \code{\link{iterativeMCMC class}} for a detailed description of the classes' structures.
#' @note see also extractor functions \code{\link{getDAG}}, \code{\link{getTrace}}, \code{\link{getSpace}}, \code{\link{getMCMCscore}}.
#'@references P. Suter, J. Kuipers, G. Moffa, N.Beerenwinkel (2023) <doi:10.18637/jss.v105.i09>
#'@references Friedman N and Koller D (2003). A Bayesian approach to structure discovery in bayesian networks. Machine Learning 50, 95-125.
#'@references Kalisch M, Maechler M, Colombo D, Maathuis M and Buehlmann P (2012). Causal inference using graphical models with the R package pcalg. Journal of Statistical Software 47, 1-26.
#'@references Geiger D and Heckerman D (2002). Parameter priors for directed acyclic graphical models and the characterization of several probability distributions. The Annals of Statistics 30, 1412-1440.
#'@references Kuipers J, Moffa G and Heckerman D (2014). Addendum on the scoring of Gaussian acyclic graphical models. The Annals of Statistics 42, 1689-1691.
#'@references Spirtes P, Glymour C and Scheines R (2000). Causation, Prediction, and Search, 2nd edition. The MIT Press.
#'@examples
#'\dontrun{
#'Asiascore <- scoreparameters("bde", Asia)
#'iterativefit <- learnBN(Asiascore, algorithm = "orderIter")
#'orderfit <- sampleBN(Asiascore, scoretable = iterativefit)
#'
#'myScore<-scoreparameters("bge",Boston)
#'MCMCchains<-list()
#'MCMCchains[[1]]<-sampleBN(myScore,"partition")
#'MCMCchains[[2]]<-sampleBN(myScore,"partition")
#'edge_posterior<-lapply(MCMCchains,edgep,pdag=TRUE)
#'plotpcor(edge_posterior)
#'}
#'@author Polina Suter, Jack Kuipers, the code partly derived from the order MCMC implementation from Kuipers J, Moffa G (2017) <doi:10.1080/01621459.2015.1133426>
#'@export
sampleBN<-function(scorepar, algorithm = c("order", "orderIter", "partition"), chainout = TRUE,
scoreout = FALSE, alpha = 0.05, moveprobs = NULL, iterations = NULL, stepsave = NULL,
gamma = 1, verbose = FALSE, compress = TRUE, startspace = NULL, blacklist = NULL,
scoretable = NULL, startpoint = NULL, plus1 = TRUE, cpdag = FALSE, hardlimit = 12,
iterpar = list(posterior = 0.5, softlimit = 9, mergetype = "skeleton", accum = FALSE,
plus1it = NULL, addspace = NULL, alphainit = NULL)){
if(length(algorithm)>1) algorithm<-"order"
if(algorithm=="order") {
MCMCresult<-orderMCMC(scorepar,MAP=FALSE,chainout=chainout,scoreout=scoreout,alpha=alpha,moveprobs=moveprobs,
iterations=iterations,stepsave=stepsave,gamma=gamma,verbose=verbose,compress=compress,
startspace=startspace,blacklist=blacklist,scoretable=scoretable,startorder=startpoint,
plus1=plus1,cpdag=cpdag,hardlimit=hardlimit)
} else if (algorithm=="partition") {
if(is(scoretable,"iterativeMCMC")){
scoretable<-getSpace(scoretable)
}
MCMCresult<-partitionMCMC(scorepar,scoreout=scoreout,alpha=alpha,moveprobs=moveprobs,
iterations=iterations,stepsave=stepsave,gamma=gamma,verbose=verbose,compress=compress,
startspace=startspace,blacklist=blacklist,scoretable=scoretable,startDAG=startpoint)
} else {
iterpardef<-list(posterior = 0.5, softlimit = 9, mergetype = "skeleton", accum = FALSE, plus1it = NULL, addspace = NULL, alphainit = NULL)
iterpardef[names(iterpar)]<-iterpar[names(iterpar)]
iterpar<-iterpardef
MCMCresult<-iterativeMCMC(scorepar,MAP=FALSE,chainout=chainout,scoreout=scoreout,alpha=alpha,moveprobs=moveprobs,
iterations=iterations,stepsave=stepsave,gamma=gamma,verbose=verbose,compress=compress,
startspace=startspace,blacklist=blacklist,scoretable=scoretable,startorder=startpoint,
cpdag=cpdag,hardlimit=hardlimit,posterior=iterpar$posterior,softlimit=iterpar$softlimit,
mergetype=iterpar$mergetype,accum=iterpar$accum,plus1it=iterpar$plus1it,
addspace=iterpar$addspace,alphainit=iterpar$alphainit)
}
return(MCMCresult)
}
|
/scratch/gouwar.j/cran-all/cranData/BiDAG/R/sampleBN.R
|
#samples maximum scoring DAG from a given order, returns incidence matrix of this DAG
#added samplenodes for root nodes, so we sample only for nodes which can have parents
samplescoreplus1.max<-function(matsize,samplenodes,scores,plus1lists,maxmatrices,scoretable,parenttable,
numberofparentsvec,aliases) {
incidence<-matrix(0,ncol=matsize,nrow=matsize) # store the adjacency matrix
sampledscore<-0
for (i in samplenodes){
if(is.null(plus1lists)) {
krow<-maxmatrices$maxrow[[i]][scores$therow[i]]
parentset<-aliases[[i]][parenttable[[i]][krow,1:numberofparentsvec[[i]][krow]]] #take right parent set
incidence[parentset,i]<-1 # fill in elements of the adjacency matrix
sampledscore<-sampledscore+scoretable[[i]][krow]
} else {
klist<-which.max(maxmatrices$maxmatrix[[i]][scores$therow[i],scores$allowedlists[[i]]])
abslist<-scores$allowedlists[[i]][klist]
krow<-maxmatrices$maxrow[[i]][scores$therow[i],abslist]
parentrow<-plus1lists$aliases[[i]][abslist,c(1,parenttable[[i]][krow,!is.na(parenttable[[i]][krow,])]+1)] #take right parent set
parentset<-parentrow[which(parentrow>0)] # removing NAs
incidence[parentset,i]<-1 # fill in elements of the adjacency matrix
sampledscore<-sampledscore+scoretable[[i]][[abslist]][krow]
}
}
DAG<-list()
DAG$incidence<-incidence
DAG$logscore<-sampledscore
return(DAG)
}
#samples a DAG from a given order, returns incidence matrix of this DAG
samplescoreplus1<-function(matsize,samplenodes,scores,plus1lists,scoretable,bannedscores,parenttable,
numberofparentsvec,aliases) {
incidence<-matrix(numeric(matsize*matsize),nrow=matsize) # store the adjacency matrix
sampledscore<-0
for (i in samplenodes){
if (scores$therow[i]==1) {
allowedrows<-c(1:nrow(parenttable[[i]]))
} else {
bannednodes<-parenttable[[i]][scores$therow[i],1:numberofparentsvec[[i]][scores$therow[i]]]
tablesize<-dim(parenttable[[i]]) # just to remove some arguments
if(tablesize[1]==1||length(bannednodes)==tablesize[2]){ # no parents are allowed
allowedrows<-c(1) # there is only one score
} else {
allowedrows<-c(2:tablesize[1])
for (j in 1:tablesize[2]) {
# working columnwise allows R to speed up
bannedrows<-which(parenttable[[i]][allowedrows,j]%in%bannednodes)
if(length(bannedrows)>0) { allowedrows<-allowedrows[-bannedrows] }
}
allowedrows<-c(1,allowedrows)
}
}
if(is.null(plus1lists)) {
allowedscores<-matrix(scoretable[[i]][allowedrows,1],nrow=length(allowedrows))
k<-sample.int(length(allowedscores),1,prob=exp(allowedscores-scores$totscores[i]))
krow<-allowedrows[k]
parentset<-aliases[[i]][parenttable[[i]][krow,which(parenttable[[i]][krow,]>0)]] #take right parent set
incidence[parentset,i]<-1 # fill in elements of the adjacency matrix
sampledscore<-sampledscore+scoretable[[i]][krow,1]
} else {
#first sample plus1 list
klist<-sample.int(length(scores$allowedlists[[i]]),1,
prob=exp(bannedscores[[i]][scores$therow[i],scores$allowedlists[[i]]]-scores$totscores[i]))
abslist<-scores$allowedlists[[i]][klist]
#then sample a row from allowed rows
k<-sample.int(length(allowedrows),1,prob=exp(scoretable[[i]][[abslist]][allowedrows]-scores$totscores[i]))
krow<-allowedrows[k]
parentrow<-plus1lists$aliases[[i]][abslist,c(1,parenttable[[i]][krow,!is.na(parenttable[[i]][krow,])]+1)]
parentset<-parentrow[which(parentrow>0)] # removing NAs
incidence[parentset,i]<-1 # fill in elements of the adjacency matrix
sampledscore<-sampledscore+scoretable[[i]][[abslist]][krow]
}
}
DAG<-list()
DAG$incidence<-incidence
DAG$logscore<-sampledscore
return(DAG)
}
#samples a DAG from a given partition, returns incidence matrix of this DAG
samplescore.partition.plus1<-function(matsize,samplenodes,scores,scoretable,scoresallowed,scoresneeded,scoretab,parenttable,
needednodetable,numberofparentsvec,
needednodebannedrow,numberofpartitionparentsvec,
plus1lists) {
incidence<-matrix(numeric(matsize*matsize),nrow=matsize) # store the adjacency matrix
sampledscore<-0
logscorevec<-vector(length=matsize)
for (i in samplenodes) {
tablesize<-dim(parenttable[[i]]) # just to remove some arguments
if (scores$neededrow[i]==0&&scores$allowedrow[i]==0){ #no parents are allowed
sampledscore<-sampledscore+scoretable[[i]][[1]][1,1]
} else if (scores$neededrow[i]==-1) { #bgnodes + last partition
bannednodes<-parenttable[[i]][scores$allowedrow[i],1:numberofparentsvec[[i]][scores$allowedrow[i]]]
if(tablesize[1]==1||length(bannednodes)==tablesize[2]) {#no allowed paernts in main table
allowedscores<- scoretab[[i]][1,scores$plus1neededlists[[i]]]
k<-sample.int(length(allowedscores),1,prob=exp(allowedscores-scores$totscores[i]))
klist<-scores$plus1neededlists[[i]][k]
sampledscore<-sampledscore+scoretable[[i]][[klist]][1]
} else {
allowedrows<-c(2:tablesize[1])
for (j in 1:tablesize[2]) {
# working columnwise allows R to speed up
bannedrows<-which(parenttable[[i]][allowedrows,j]%in%bannednodes)
if(length(bannedrows)>0) { allowedrows<-allowedrows[-bannedrows] }
}
allowedrows<-c(1,allowedrows)
klist<-sample.int(length(scores$plus1neededlists[[i]]),1,
prob=exp(scoresallowed[[i]][scores$allowedrow[i],scores$plus1neededlists[[i]]]-scores$totscores[i]))
abslist<-scores$plus1neededlists[[i]][klist]
#then sample a row from allowed rows
k<-sample.int(length(allowedrows),1,prob=exp(scoretable[[i]][[abslist]][allowedrows]-scores$totscores[i]))
krow<-allowedrows[k]
parentrow<-plus1lists$aliases[[i]][abslist,c(1,parenttable[[i]][krow,!is.na(parenttable[[i]][krow,])]+1)]
parentset<-parentrow[which(parentrow>0)] # removing NAs
incidence[parentset,i]<-1 # fill in elements of the adjacency matrix
sampledscore<-sampledscore+scoretable[[i]][[abslist]][krow]
}
} else {
if (scores$allowedrow[i]==1){
allowedrows2<-c(1:nrow(parenttable[[i]]))
} else {
bannednodes<-parenttable[[i]][scores$allowedrow[i],1:numberofparentsvec[[i]][scores$allowedrow[i]]]
if(tablesize[1]==1||length(bannednodes)==tablesize[2]){ # no parents are allowed
allowedrows2<-c(1) # there is only one score
} else {
allowedrows2<-c(2:tablesize[1])
for (j in 1:tablesize[2]) { # working columnwise allows R to speed up
bannedrows<-which(parenttable[[i]][allowedrows2,j]%in%bannednodes)
if(length(bannedrows)>0){
allowedrows2<-allowedrows2[-bannedrows]
}
}
allowedrows2<-c(1,allowedrows2)
}
}
if (scores$neededrow[i]==0) {
klist2<-sample.int(length(scores$plus1neededlists[[i]]),1,
prob=exp(scoresallowed[[i]][scores$allowedrow[i],scores$plus1neededlists[[i]]]-scores$totscores[i]))
abslist2<-scores$plus1neededlists[[i]][klist2]
k2<-sample.int(length(allowedrows2),1,prob=exp(scoretable[[i]][[abslist2]][allowedrows2]-scores$totscores[i]))
krow2<-allowedrows2[k2]
parentrow<-plus1lists$aliases[[i]][abslist2,c(1,parenttable[[i]][krow2,!is.na(parenttable[[i]][krow2,])]+1)]
parentset<-parentrow[which(parentrow>0)] # removing NAs
incidence[parentset,i]<-1 # fill in elements of the adjacency matrix
sampledscore<-sampledscore+scoretable[[i]][[abslist2]][krow2]
} else {
requirednodes<-needednodetable[[i]][scores$neededrow[i],1:(numberofpartitionparentsvec[[i]][scores$neededrow[i]])]
bannedrow<-needednodebannedrow[[i]][scores$neededrow[i]]
bannednodes<-parenttable[[i]][bannedrow,1:numberofparentsvec[[i]][bannedrow]]
allowedrows1<-c(2:tablesize[1])
if(bannedrow>1) {
for (j in 1:tablesize[2]) { # working columnwise allows R to speed up
bannedrows<-which(parenttable[[i]][allowedrows1,j]%in%bannednodes)
if(length(bannedrows)>0) {
allowedrows1<-allowedrows1[-bannedrows]
}
}
}
notrequiredrows<-allowedrows1
for (j in 1:tablesize[2]) { # now we remove the allowable rows instead
requiredrows<-which(parenttable[[i]][notrequiredrows,j]%in%requirednodes)
if(length(requiredrows)>0) {
notrequiredrows<-notrequiredrows[-requiredrows]
}
}
allowedscores2<-matrix(scoretab[[i]][allowedrows2,scores$plus1neededlists[[i]]],nrow=length(allowedrows2))
allowedtablesize2<-dim(allowedscores2)
allowedrows1<-setdiff(allowedrows1,notrequiredrows) # and keep just the difference!
allowedscores1<-matrix(scoretab[[i]][allowedrows1,scores$plus1allowedlists[[i]]],nrow=length(allowedrows1))
allowedtablesize1<-dim(allowedscores1)
kboarder<-allowedtablesize2[1]*allowedtablesize2[2]
k<-sample.int(length(allowedscores2)+length(allowedscores1),1,prob=c(exp(allowedscores2-scores$totscores[i]),exp(allowedscores1-scores$totscores[i])))
if(k>kboarder) {
colnumber<-ceiling((k-kboarder)/allowedtablesize1[1])
rownumber<-k-kboarder-(colnumber-1)*allowedtablesize1[1]
krow<-allowedrows1[rownumber]
klist<-scores$plus1allowedlists[[i]][colnumber]
parentrow<-plus1lists$aliases[[i]][klist,c(1,parenttable[[i]][krow,!is.na(parenttable[[i]][krow,])]+1)]
parentset<-parentrow[which(parentrow>0)] # removing NAs
incidence[parentset,i]<-1 # fill in elements of the adjacency matrix
sampledscore<-sampledscore+scoretable[[i]][[klist]][krow]
}
else {
colnumber<-ceiling(k/allowedtablesize2[1])
rownumber<-k-(colnumber-1)*allowedtablesize2[1]
krow<-allowedrows2[rownumber]
klist<-scores$plus1neededlists[[i]][colnumber]
parentrow<-plus1lists$aliases[[i]][klist,c(1,parenttable[[i]][krow,!is.na(parenttable[[i]][krow,])]+1)]
parentset<-parentrow[which(parentrow>0)] # removing NAs
incidence[parentset,i]<-1 # fill in elements of the adjacency matrix
sampledscore<-sampledscore+scoretable[[i]][[klist]][krow]
}
}
}
}
DAG<-list()
DAG$incidence<-incidence
DAG$logscore<-sampledscore
return(DAG)
}
#old version of function, kept for just in case
samplescore.partition.plus1.old<-function(n,scores,scoretable,scoresallowed,scoresneeded,scoretab,parenttable,needednodetable,numberofparentsvec,
needednodebannedrow,numberofpartitionparentsvec,plus1lists){
incidence<-matrix(numeric(n*n),nrow=n) # store the adjacency matrix
sampledscore<-0
logscorevec<-vector(length=n)
for (i in 1:n)
{
if (scores$therow1[i]==0&&scores$therow2[i]==0){
sampledscore<-sampledscore+scoretable[[i]][[1]][1,1]
} else { tablesize<-dim(parenttable[[i]]) # just to remove some arguments
if (scores$therow2[i]==1){
allowedrows2<-c(1:nrow(parenttable[[i]]))
}
else{
bannednodes<-parenttable[[i]][scores$therow2[i],1:numberofparentsvec[[i]][scores$therow2[i]]]
if(tablesize[1]==1||length(bannednodes)==tablesize[2]){ # no parents are allowed
allowedrows2<-c(1) # there is only one score
} else{
allowedrows2<-c(2:tablesize[1])
for (j in 1:tablesize[2])
{ # working columnwise allows R to speed up
bannedrows<-which(parenttable[[i]][allowedrows2,j]%in%bannednodes)
if(length(bannedrows)>0){
allowedrows2<-allowedrows2[-bannedrows]
}
}
allowedrows2<-c(1,allowedrows2)
}
}
if (scores$therow1[i]==0) {
klist2<-sample.int(length(scores$allowedlists2[[i]]),1,
prob=exp(scoresneeded[[i]][scores$therow2[i],scores$allowedlists2[[i]]]-scores$totscores[i]))
abslist2<-scores$allowedlists2[[i]][klist2]
k2<-sample.int(length(allowedrows2),1,prob=exp(scoretable[[i]][[abslist2]][allowedrows2]-scores$totscores[i]))
krow2<-allowedrows2[k2]
parentrow<-plus1lists$aliases[[i]][abslist2,c(1,parenttable[[i]][krow2,!is.na(parenttable[[i]][krow2,])]+1)]
parentset<-parentrow[which(parentrow>0)] # removing NAs
incidence[parentset,i]<-1 # fill in elements of the adjacency matrix
sampledscore<-sampledscore+scoretable[[i]][[abslist2]][krow2]
} else {
requirednodes<-needednodetable[[i]][scores$therow1[i],1:(numberofpartitionparentsvec[[i]][scores$therow1[i]])]
bannedrow<-needednodebannedrow[[i]][scores$therow1[i]]
bannednodes<-parenttable[[i]][bannedrow,1:numberofparentsvec[[i]][bannedrow]]
allowedrows1<-c(2:tablesize[1])
if(bannedrow>1) {
for (j in 1:tablesize[2]) { # working columnwise allows R to speed up
bannedrows<-which(parenttable[[i]][allowedrows1,j]%in%bannednodes)
if(length(bannedrows)>0) {
allowedrows1<-allowedrows1[-bannedrows]
}
}
}
notrequiredrows<-allowedrows1
for (j in 1:tablesize[2]) { # now we remove the allowable rows instead
requiredrows<-which(parenttable[[i]][notrequiredrows,j]%in%requirednodes)
if(length(requiredrows)>0) {
notrequiredrows<-notrequiredrows[-requiredrows]
}
}
allowedscores2<-matrix(scoretab[[i]][allowedrows2,scores$allowedlists2[[i]]],nrow=length(allowedrows2))
allowedtablesize2<-dim(allowedscores2)
allowedrows1<-setdiff(allowedrows1,notrequiredrows) # and keep just the difference!
allowedscores1<-matrix(scoretab[[i]][allowedrows1,scores$allowedlists1[[i]]],nrow=length(allowedrows1))
allowedtablesize1<-dim(allowedscores1)
kboarder<-allowedtablesize2[1]*allowedtablesize2[2]
k<-sample.int(length(allowedscores2)+length(allowedscores1),1,prob=c(exp(allowedscores2-scores$totscores[i]),exp(allowedscores1-scores$totscores[i])))
if(k>kboarder) {
colnumber<-ceiling((k-kboarder)/allowedtablesize1[1])
rownumber<-k-kboarder-(colnumber-1)*allowedtablesize1[1]
krow<-allowedrows1[rownumber]
klist<-scores$allowedlists1[[i]][colnumber]
parentrow<-plus1lists$aliases[[i]][klist,c(1,parenttable[[i]][krow,!is.na(parenttable[[i]][krow,])]+1)]
parentset<-parentrow[which(parentrow>0)] # removing NAs
incidence[parentset,i]<-1 # fill in elements of the adjacency matrix
sampledscore<-sampledscore+scoretable[[i]][[klist]][krow]
}
else {
colnumber<-ceiling(k/allowedtablesize2[1])
rownumber<-k-(colnumber-1)*allowedtablesize2[1]
krow<-allowedrows2[rownumber]
klist<-scores$allowedlists2[[i]][colnumber]
parentrow<-plus1lists$aliases[[i]][klist,c(1,parenttable[[i]][krow,!is.na(parenttable[[i]][krow,])]+1)]
parentset<-parentrow[which(parentrow>0)] # removing NAs
incidence[parentset,i]<-1 # fill in elements of the adjacency matrix
sampledscore<-sampledscore+scoretable[[i]][[klist]][krow]
}
}
}
}
DAG<-list()
DAG$incidence<-incidence
DAG$logscore<-sampledscore
return(DAG)
}
|
/scratch/gouwar.j/cran-all/cranData/BiDAG/R/samplefns.R
|
#'Score against DBN
#'
#'Scoring observations against a DBN structure
#'
#'@param scorepar object of class 'scoreparameters'
#'@param incidence adjacency matrix of a DAG
#'@param datatoscore matrix or vector containing observations to be scored
#'@param marginalise (logical) should marginal score be used?
#'@param onlymain (logical) should static nodes be included in the score?
#'@param datainit optional, in case of unbalanced design, the mean score of available samples for T0 are computed
#'@return vector of log-scores
#'@author Polina Suter
#'@export
scoreagainstDBN<-function(scorepar, incidence, datatoscore=NULL,
marginalise=FALSE, onlymain=FALSE, datainit=NULL){
backDBN<-DBNbacktransform_l(incidence,scorepar,coln=TRUE)
if(scorepar$split) {
initMat<-backDBN$init
transMat<-backDBN$trans
} else {
initMat<-DBNinit(backDBN,scorepar$nsmall,scorepar$bgn)
transMat<-backDBN
}
if(!is.null(datatoscore)) {
datasplit<-splitDBNdata(datatoscore,scorepar)
} else {
datasplit<-NULL
}
if(scorepar$stationary) {
if(scorepar$slices==2) {
totscore<-scoreagainstDAG(scorepar$firstslice, initMat, datatoscore=datasplit$init,onlymain=TRUE)+
scoreagainstDAG(scorepar$otherslices, transMat, datatoscore=datasplit$trans,onlymain=TRUE)
} else {
totscore<-0
if(is.null(datainit)) {
totscore<-totscore+scoreagainstDAG(scorepar$firstslice, initMat, datatoscore=datasplit[[scorepar$slices]],onlymain=TRUE)
} else {
for(i in 1:length(datainit)) {
totscore<-totscore+scoreagainstDAG(scorepar$firstslice, initMat, datatoscore=datainit[[i]],onlymain=TRUE)
}
totscore<-totscore/length(datainit)
}
for(i in 1:(scorepar$slices-1)) {
nas<-which(apply(datasplit[[i]],1,function(x)all(!is.na(x))))
if((nrow(datasplit[[i]])-length(nas))>0) {
addscore<-rep(0,nrow(datasplit[[i]]))
addscore[nas]<-scoreagainstDAG(scorepar$otherslices, transMat, datatoscore=datasplit[[i]][nas,],onlymain=TRUE)
} else {
addscore<-scoreagainstDAG(scorepar$otherslices, transMat, datatoscore=datasplit[[i]],onlymain=TRUE)
}
totscore<-totscore+addscore
}
}
} else {
totscore<-0
if(is.null(datainit)) {
totscore<-totscore+scoreagainstDAG(scorepar$paramsets[[scorepar$slices]], initMat, datatoscore=datasplit[[scorepar$slices]],onlymain=TRUE)
} else {
for(i in 1:length(datainit)) {
totscore<-totscore+scoreagainstDAG(scorepar$paramsets[[scorepar$slices]], initMat, datatoscore=datainit[[i]],onlymain=TRUE)
}
totscore<-totscore/length(datainit)
for(i in 1:(scorepar$slices-1)) {
nas<-which(apply(datasplit[[i]],1,function(x)all(!is.na(x))))
if((nrow(datasplit[[i]])-length(nas))>0) {
addscore<-rep(0,nrow(datasplit[[i]]))
addscore[nas]<-scoreagainstDAG(scorepar$paramsets[[i]], transMat, datatoscore=datasplit[[i]][nas,],onlymain=TRUE)
} else {
addscore<-scoreagainstDAG(scorepar$paramsets[[i]], transMat, datatoscore=datasplit[[i]],onlymain=TRUE)
}
totscore<-totscore+addscore
}
}
}
return(totscore)
}
#Score against DBN
#
#Scoring observations against a DBN structure
#
#@param scorepar object of class 'scoreparameters'
#@param incidence adjacency matrix of a DAG
#@param datatoscore matrix or vector containing observations to be scored
#@param marginalise (logical) should marginal score be used?
#@param onlymain (logical) should static nodes be included in the score?
#@param datainit optional, in case of unbalanced design, the mean score of available samples for T0 are computed
#@return vector of log-scores
#@author Polina Suter
scoreagainstDBN3<-function(scorepar, incidence, datatoscore=NULL,
marginalise=FALSE, onlymain=FALSE, datainit=NULL){
backDBN<-DBNbacktransform_l(incidence,scorepar,coln=TRUE)
if(scorepar$split) {
initMat<-backDBN$init
transMat<-backDBN$trans
} else {
initMat<-DBNinit(backDBN,scorepar$nsmall,scorepar$bgn)
transMat<-backDBN
}
if(!is.null(datatoscore)) {
datasplit<-splitDBNdata(datatoscore,scorepar)
} else {
datasplit<-NULL
}
if(scorepar$stationary) {
if(scorepar$slices==2) {
totscore<-scoreagainstDAG(scorepar$firstslice, initMat, datatoscore=datasplit$init,onlymain=TRUE)+
scoreagainstDAG(scorepar$otherslices, transMat, datatoscore=datasplit$trans,onlymain=TRUE)
} else {
totscore<-0
if(is.null(datainit)) {
totscore<-totscore+scoreagainstDAG(scorepar$firstslice, initMat, datatoscore=datasplit[[scorepar$slices]],onlymain=TRUE)
} else {
for(i in 1:length(datainit)) {
totscore<-totscore+scoreagainstDAG(scorepar$firstslice, initMat, datatoscore=datainit[[i]],onlymain=TRUE)
}
totscore<-totscore/length(datainit)
}
for(i in 1:(scorepar$slices-1)) {
nas<-which(apply(datasplit[[i]],1,function(x)all(!is.na(x))))
if((nrow(datasplit[[i]])-length(nas))>0) {
addscore<-rep(0,nrow(datasplit[[i]]))
addscore[nas]<-scoreagainstDAG(scorepar$otherslices, transMat, datatoscore=datasplit[[i]][nas,],onlymain=TRUE)
} else {
addscore<-scoreagainstDAG(scorepar$otherslices, transMat, datatoscore=datasplit[[i]],onlymain=TRUE)
}
totscore<-addscore
}
}
} else {
totscore<-0
if(is.null(datainit)) {
totscore<-totscore+scoreagainstDAG(scorepar$paramsets[[scorepar$slices]], initMat, datatoscore=datasplit[[scorepar$slices]],onlymain=TRUE)
} else {
for(i in 1:length(datainit)) {
totscore<-totscore+scoreagainstDAG(scorepar$paramsets[[scorepar$slices]], initMat, datatoscore=datainit[[i]],onlymain=TRUE)
}
totscore<-totscore/length(datainit)
for(i in 1:(scorepar$slices-1)) {
nas<-which(apply(datasplit[[i]],1,function(x)all(!is.na(x))))
if((nrow(datasplit[[i]])-length(nas))>0) {
addscore<-rep(0,nrow(datasplit[[i]]))
addscore[nas]<-scoreagainstDAG(scorepar$paramsets[[i]], transMat, datatoscore=datasplit[[i]][nas,],onlymain=TRUE)
} else {
addscore<-scoreagainstDAG(scorepar$paramsets[[i]], transMat, datatoscore=datasplit[[i]],onlymain=TRUE)
}
totscore<-addscore
}
}
}
return(totscore)
}
splitDBNdata<-function(datatoscore,param,addinit=NULL) {
datasplit<-list()
if(param$slices==2) {
if(param$bgn>0) {
datasplit$init<-datatoscore[,c(1:param$nsmall+param$bgn,1:param$bgn)]
datasplit$trans<-datatoscore[,c(1:param$nsmall+param$nsmall+param$bgn,1:param$bgn,1:param$nsmall+param$bgn)]
} else {
datasplit$init<-datatoscore[,1:param$nsmall]
datasplit$trans<-datatoscore[,c(1:param$nsmall+param$nsmall,1:param$nsmall)]
}
} else {
for (i in 1:(param$slices-1)) {
datasplit[[i]]<-datatoscore[,c(1:param$nsmall+i*param$nsmall,1:param$nsmall+(i-1)*param$nsmall)]
}
if(is.null(addinit)) {
datasplit[[param$slices]]<-datatoscore[,1:param$nsmall]
} else {
datasplit[[param$slices]]<-addinit
}
}
return(datasplit)
}
DBNbacktransform_l<-function(DBNmat,param,coln=FALSE) {
if(!is.null(colnames(DBNmat))) {
oldnodelabels<-colnames(DBNmat)
newnodelabels<-oldnodelabels
newnodelabels[param$intstr$cols]<-oldnodelabels[param$usrtrans$cols]
if(param$bgn==0) newnodelabels[param$trans$rows]<-oldnodelabels[param$usrinitstr$rows] else {
newnodelabels[c(param$intstr$rows[1:param$bgn],param$trans$rows)]<-oldnodelabels[param$usrinitstr$rows]
}
}
newDBNmat<-matrix(0,nrow=param$n+param$nsmall,ncol=param$n+param$nsmall)
newDBNmat[param$intstr$rows,param$intstr$cols]<-1*(DBNmat[param$usrintstr$rows,param$usrintstr$cols]|DBNmat[param$usrinitstr$rows,param$usrinitstr$cols])
newDBNmat[param$trans$rows,param$trans$cols]<-DBNmat[param$usrtrans$rows,param$usrtrans$cols]
if(!param$split) {
if(coln) colnames(newDBNmat)<-rownames(newDBNmat)<-newnodelabels
return(newDBNmat)
} else {
res<-list()
initDBNmat<-DBNmat[1:param$n,1:param$n]
newinitDBNmat<-DBNmat[1:param$n,1:param$n]
if(param$bgn>0) {
newinitDBNmat[,1:param$bgn+param$nsmall]<-initDBNmat[,1:param$bgn]
}
newinitDBNmat[,1:param$nsmall]<-initDBNmat[,1:param$nsmall+param$bgn]
initDBNmat<-newinitDBNmat
if(param$bgn>0) {
newinitDBNmat[1:param$bgn+param$nsmall,]<-initDBNmat[1:param$bgn,]
}
newinitDBNmat[1:param$nsmall,]<-initDBNmat[1:param$nsmall+param$bgn,]
res$init<-newinitDBNmat
transDBNmat<-matrix(0,nrow=param$n+param$nsmall,ncol=param$n+param$nsmall)
DBNmat<-DBNcut_l(DBNmat,dyn=param$nsmall,b=param$bgn)
transDBNmat[param$intstr$rows,param$intstr$cols]<-DBNmat[param$usrintstr$rows,param$usrintstr$cols]
transDBNmat[param$trans$rows,param$trans$cols]<-DBNmat[param$usrtrans$rows,param$usrtrans$cols]
res$trans<-transDBNmat
return(res)
}
}
DBNcut_l<-function(adj,dyn,b){
adj[,1:(dyn+b)]<-0
return(adj)
}
DBNunitedata<-function(dbndata,dyn,b){
nsmall<-dyn
n<-b+dyn
bgn<-b
slices<-(ncol(dbndata)-bgn)/dyn
if(all(is.character(colnames(dbndata)))){
nodeslabels<-colnames(dbndata)
} else {
if(b>0) {
staticnames<-sapply(c(1:bgn), function(x)paste("s",x,sep=""))
dynamicnames<-rep(sapply(c(1:nsmall), function(x)paste("v",x,sep="")),slices)
for(i in 2:slices) {
dynamicnames[1:nsmall+(i-1)*nsmall]<-paste(dynamicnames[1:nsmall+(i-1)*nsmall],".",i,sep="")
}
nodeslabels<-c(staticnames,dynamicnames)
} else {
nodeslabels<-rep(sapply(c(1:n), function(x)paste("v",x,sep="")),slices)
for(i in 2:slices) {
nodeslabels[1:nsmall+(i-1)*nsmall]<-paste(nodeslabels[1:nsmall+(i-1)*nsmall],".",i,sep="")
}
}
colnames(dbndata)<-nodeslabels
}
labels.short<-nodeslabels[1:(n+nsmall)]
# other slices we layer the data,
datalocal <- dbndata[,1:(2*nsmall+bgn)]
collabels<-colnames(datalocal)
if (bgn>0){
bgdata<-dbndata[,1:b]
if(slices > 2){ # layer on later time slices
for(jj in 1:(slices-2)){
datatobind<-cbind(bgdata,dbndata[,nsmall*jj+1:(2*nsmall)+bgn])
colnames(datatobind)<-collabels
datalocal <- rbind(datalocal,datatobind)
}
}
} else {
if(slices > 2){ # layer on later time slices
for(jj in 1:(slices-2)){
datatobind<-dbndata[,n*jj+1:(2*n)]
colnames(datatobind)<-collabels
datalocal <- rbind(datalocal,datatobind)
}
}
}
return(datalocal)
}
|
/scratch/gouwar.j/cran-all/cranData/BiDAG/R/scoreagainstDBN.R
|
#'Calculating the score of a sample against a DAG
#'
#'This function calculates the score of a given sample against a DAG represented by its incidence matrix.
#'@param scorepar an object of class \code{scoreparameters}; see constructor function \code{\link{scoreparameters}}
#'@param incidence a square matrix of dimensions equal to the number of variables with entries in \code{\{0,1\}}, representing the adjacency matrix of the DAG against which the score is calculated
#'@param datatoscore (optional) a matrix (vector) containing binary (for BDe score) or continuous (for the BGe score) observations (or just one observation) to be scored; the number of columns should be equal to the number of variables in the Bayesian network, the number of rows should be equal to the number of observations; by default all data from \code{scorepar} parameter is used
#'@param marginalise (optional for continuous data) defines, whether to use the posterior mean for scoring (default) or to marginalise over the posterior distribution (more computationally costly)
#'@param onlymain (optional), defines the the score is computed for nodes excluding 'bgnodes'; FALSE by default
#'@param bdecatCvec (optional for categorical data)
#'@return the log of the BDe/BGe score of given observations against a DAG
#'@references Heckerman D and Geiger D, (1995). Learning Bayesian networks: A unification for discrete and Gaussian domains. In Eleventh Conference on Uncertainty in Artificial Intelligence, pages 274-284, 1995.
#'@examples
#' Asiascore<-scoreparameters("bde", Asia[1:100,]) #we wish to score only first 100 observations
#' scoreagainstDAG(Asiascore, Asiamat)
#'
#'@export
#'@author Jack Kuipers, Polina Suter
scoreagainstDAG <- function(scorepar, incidence, datatoscore=NULL, marginalise=FALSE, onlymain=FALSE,bdecatCvec=NULL){
if(onlymain & !marginalise) {
n<-scorepar$n
mainnodes<-scorepar$mainnodes
} else {
n<-scorepar$n
mainnodes<-c(1:n)
}
if (is.null(datatoscore)) {
datatoscore<-scorepar$data
}
if(is.vector(datatoscore)){ # if input is a vector
datatoscore <- matrix(datatoscore, nrow=1) # cast it as a matrix
}
if (scorepar$type=="bge") {
if(marginalise==FALSE){
datatoscore <- t(t(datatoscore) - scorepar$muN) # recentre around posterior mean
} else {
datatoscore <- t(t(datatoscore) - scorepar$means) # recentre about data mean
}
}
if (scorepar$type=="bge" && marginalise!=FALSE){
return(scoreagainstDAGmargBGe(n, scorepar, incidence, datatoscore))
} else if (scorepar$type=="mixed") {
binscore<-scoreagainstDAG(scorepar$binpar, incidence[1:scorepar$nbin,1:scorepar$nbin])
gausscore<-scoreagainstDAG(scorepar$gausspar, incidence)
return(binscore+gausscore)
}else if (scorepar$type=="bde"){
samplescores <- matrix(0,nrow=nrow(datatoscore),ncol=n)
for (j in mainnodes) {
parentnodes <- which(incidence[,j]==1)
samplescores[,j]<-scoreagainstDAGcore(j,parentnodes,n,scorepar,datatoscore)
}} else {
if (!is.null(bdecatCvec)) {
scorepar$Cvec <- bdecatCvec
}
samplescores <- matrix(0,nrow=nrow(datatoscore),ncol=n)
for (j in 1:n) {
parentnodes <- which(incidence[,j]==1)
samplescores[,j]<-scoreagainstDAGcore(j,parentnodes,n,scorepar,datatoscore)
}
}
return(rowSums(samplescores))
}
# this function scores a nodes against its parents based on the BGe or BDe (binary) score
# author Jack Kuipers
scoreagainstDAGcore<-function(j,parentnodes,n,param,datatoscore) {
samplenodescores<-rep(0,nrow(datatoscore)) # store
lp<-length(parentnodes) # number of parents
switch(param$type,
"bge" = {
Sigma <- param$SigmaN
A <- Sigma[j,j]
if(lp==0){# no parents
samplenodescores <- -datatoscore[,j]^2/(2*A) - log(2*pi*A)/2
} else {
D <- as.matrix(Sigma[parentnodes,parentnodes])
choltemp<-chol(D)
B <- Sigma[j,parentnodes]
C <- backsolve(choltemp,B,transpose=TRUE)
E <- backsolve(choltemp,C) #computing betas
# myE<-B%*%solve(D) #same as E this is how it is done in formulas
# print(E)
# print(myE)
#myK<-A-B%*%solve(D)%*%as.matrix(B) same as K below but from formula
K <- A - sum(C^2) #sigma2
#print(myK)
#print(K)
coreMat <- c(1,-E)%*%t(c(1,-E))/K
xs <- datatoscore[,c(j,parentnodes)]
#print(-(datatoscore[,j]-sum(myE*datatoscore[,parentnodes]))^2/(2*K) - log(2*pi*K)/2)
samplenodescores <- -rowSums(xs%*%coreMat*xs)/2 - log(2*pi*K)/2
}
},
"bde" = {
noparams<-2^lp # number of binary states of the parents
switch(as.character(lp),
"0"={# no parents
N1<-sum(param$d1[,j],na.rm=TRUE)
N0<-sum(param$d0[,j],na.rm=TRUE)
NT<-N0+N1
theta<-(N1+param$chi/(2*noparams))/(NT+param$chi/noparams) # the probability of each state
samplenodescores[which(datatoscore[,j]==1)]<-log(theta) # log scores of 1s
samplenodescores[which(datatoscore[,j]==0)]<-log(1-theta) # log scores of 0s
},
"1"={# one parent
corescore<-param$scoreconstvec[lp+1]
summys<-param$data[,parentnodes]
summysfull<-datatoscore[,parentnodes]
for(i in 1:noparams-1){
totest<-which(summys==i)
N1<-sum(param$d1[totest,j],na.rm=TRUE)
N0<-sum(param$d0[totest,j],na.rm=TRUE)
NT<-N0+N1
theta<-(N1+param$chi/(2*noparams))/(NT+param$chi/noparams) # the probability of each state
toscore<-which(summysfull==i)
samplenodescores[toscore[which(datatoscore[toscore,j]==1)]]<-log(theta) # log scores of 1s
samplenodescores[toscore[which(datatoscore[toscore,j]==0)]]<-log(1-theta) # log scores of 0s
}
},
{ # more parents
corescore<-param$scoreconstvec[lp+1]
summys<-colSums(2^(c(0:(lp-1)))*t(param$data[,parentnodes]))
tokeep<-which(!is.na(summys+param$d1[,j])) # remove NAs either in the parents or the child
if(length(tokeep)<length(summys)){
N1s<-collectC(summys[tokeep],param$d1[tokeep,j],noparams)
N0s<-collectC(summys[tokeep],param$d0[tokeep,j],noparams)
} else {
N1s<-collectC(summys,param$d1[,j],noparams)
N0s<-collectC(summys,param$d0[,j],noparams)
}
NTs<-N0s+N1s
thetas<-(N1s+param$chi/(2*noparams))/(NTs+param$chi/noparams) # the probability of each state
summysfull<-colSums(2^(c(0:(lp-1)))*t(datatoscore[,parentnodes]))
ones<-which(datatoscore[,j]==1)
samplenodescores[ones]<-log(thetas[summysfull[ones]+1])
zeros<-which(datatoscore[,j]==0)
samplenodescores[zeros]<-log(1-thetas[summysfull[zeros]+1])
})
},
"bdecat" = {
lp<-length(parentnodes) # number of parents
chi<-param$chi
Cj <- param$Cvec[j] # number of levels of j
# Get parameters
switch(as.character(lp),
"0"={# no parents
Cp <- 1 # effectively 1 parent level
summys <- rep(0, nrow(param$data))
},
"1"={# one parent
Cp <- param$Cvec[parentnodes] # number of parent levels
summys <- param$data[,parentnodes]
},
{ # more parents
Cp <- prod(param$Cvec[parentnodes])
# use mixed radix mapping to unique parent states
summys<-colSums(cumprod(c(1,param$Cvec[parentnodes[-lp]]))*t(param$data[,parentnodes]))
})
if(!is.null(param$weightvector)){
Ns <- collectCcatwt(summys, param$data[,j], param$weightvector, Cp, Cj)
} else{
Ns <- collectCcat(summys, param$data[,j], Cp, Cj)
}
NTs <- rowSums(Ns)
# Score data
switch(as.character(lp),
"0"={# no parents
samplenodescores <- log(Ns[datatoscore[,j]+1] + chi/Cj) - log(NTs + chi)
},
"1"={# one parent
pa_idx <- datatoscore[,parentnodes] + 1
j_pa_idx <- Cp * datatoscore[,j] + pa_idx
samplenodescores <- log(Ns[j_pa_idx]+chi/(Cp*Cj)) - log(NTs[pa_idx] + chi/Cp)
},
{ # more parents
pa_idx <- colSums(cumprod(c(1,param$Cvec[parentnodes[-lp]]))*t(datatoscore[,parentnodes])) + 1
j_pa_idx <- Cp * datatoscore[,j] + pa_idx
samplenodescores <- log(Ns[j_pa_idx]+chi/(Cp*Cj)) - log(NTs[pa_idx] + chi/Cp)
})
})
return(samplenodescores)
}
# This function scores a data vector against a DAG by marginalising
# over the posterior distribution of the BGe score
# This is equivalent to the difference in scores of the DAG with
# and without the extra data vector
# author Jack Kuipers
scoreagainstDAGmargBGe <- function(n, scorepar, incidence, datatoscore){
baselinescore <- DAGscore(scorepar, incidence)
scorepar2 <- scorepar # store updated scoring components
scorepar2$N <- scorepar$N + 1 # updated size
scorepar2$awpN <- scorepar$awpN + 1 # updated parameter
# update the constant part of the score
scorepar2$scoreconstvec <- scorepar$scoreconstvec -
log(pi)/2 + log(scorepar2$am+scorepar2$N-1)/2 - log(scorepar2$am+scorepar2$N)/2 +
lgamma((1:n-n+scorepar2$awpN)/2) - lgamma((1:n-n-1+scorepar2$awpN)/2)
# we store part of the score including the T0 matrix and the data covariance matrix
T0cov <- scorepar$TN - ((scorepar$am*scorepar$N)/(scorepar$am+scorepar$N))* (scorepar$means)%*%t(scorepar$means)
samplescores <- rep(0, nrow(datatoscore))
for(ii in 1:nrow(datatoscore)){
xs <- as.numeric(datatoscore[ii,]) # this has been recentered by subtracting the data means
scorepar2$means <- scorepar$means + xs/(scorepar$N+1) # update the mean and posterior matrix
scorepar2$TN <- T0cov + xs%*%t(xs)*scorepar$N/(scorepar$N+1) + (((scorepar2$am)*scorepar2$N)/(scorepar2$am+scorepar2$N)) * (scorepar2$means)%*%t(scorepar2$means)
samplescores[ii] <- DAGscore(scorepar2, incidence)
}
return(samplescores-baselinescore)
}
|
/scratch/gouwar.j/cran-all/cranData/BiDAG/R/scoreagainstdag.R
|
#author Polina Suter, Jack Kuipers, the code partly derived from the order MCMC implementation from Kuipers J, Moffa G (2017) <doi:10.1080/01621459.2015.1133426>
TableDAGscore.alias <- function(parentrows, j, n,alias,param,parentmaps=NULL,numparents=NULL,
numberofparentsvec=NULL) {
if (param$type=="bde" & !param$DBN) {
nrows<-nrow(parentrows)
parentnodes<- alias[parentrows[nrows,!is.na(parentrows[nrows,])]]
P_local<-DAGbinarytablescore(j,parentnodes,n,param,parentrows,parentmaps,numparents,numberofparentsvec)
} else if (param$type=="bdecat" & !param$DBN) {
nrows<-nrow(parentrows)
parentnodes<- alias[parentrows[nrows,!is.na(parentrows[nrows,])]]
P_local<-DAGcattablescore(j,parentnodes,n,param,parentrows,parentmaps,numparents,numberofparentsvec)
} else {
nrows<-nrow(parentrows)
P_local <- numeric(nrows)
for (i in 1:nrows) {
parentnodes <- alias[parentrows[i,!is.na(parentrows[i,])]]
P_local[i]<-DAGcorescore(j,parentnodes,n,param)
}
}
return(P_local)
}
#author Polina Suter, Jack Kuipers, the code partly derived from the order MCMC implementation from Kuipers J, Moffa G (2017) <doi:10.1080/01621459.2015.1133426>
TableDAGscore.alias.plus1<-function(parentrows, j, n,alias,param,parentmaps=NULL,numparents=NULL,numberofparentsvec=NULL) {
if (param$type=="bde" & !param$DBN) {
nrows <- nrow(parentrows)
parentnodes <- alias[parentrows[nrows,!is.na(parentrows[nrows,])]+1]
addpar <- alias[1]
P_local <- DAGbinarytablescoreplus1(j,parentnodes,addpar,n,param,parentrows,parentmaps,numparents,numberofparentsvec)
} else if (param$type=="bdecat" & !param$DBN) {
nrows <- nrow(parentrows)
parentnodes <- alias[parentrows[nrows,!is.na(parentrows[nrows,])]+1]
addpar <- alias[1]
P_local <- DAGcattablescoreplus1(j,parentnodes,addpar,n,param,parentrows,parentmaps,numparents,numberofparentsvec)
} else {
nrows<-nrow(parentrows)
P_local <- numeric(nrows)
for (i in 1:nrows) {
parentnodes <- alias[c(1,parentrows[i,!is.na(parentrows[i,])]+1)]
P_local[i] <- DAGcorescore(j,parentnodes,n,param)
}
}
return(P_local)
}
#author Polina Suter, Jack Kuipers, the code partly derived from the order MCMC implementation from Kuipers J, Moffa G (2017) <doi:10.1080/01621459.2015.1133426>
listpossibleparents.PC.aliases<-function(skeletonedges,isgraphNEL=FALSE,n,updatenodes=c(1:n)){
if(isgraphNEL==FALSE){
l1<-ncol(skeletonedges)
} else {l1<-length(skeletonedges)}
listy<-vector("list",l1)
aliases<-vector("list",l1)
numparents<-vector("numeric",l1)
#we keep record of which parent table lengths we already constructed
table.with.k.parents<-matrix(rep(0,l1*2),nrow=2,ncol=l1)
for (i in updatenodes){
if (isgraphNEL==TRUE) {possparents<-skeletonedges[[i]]$edges
} else{possparents<-which(skeletonedges[,i]==1)}
aliases[[i]]<-possparents
l<-length(possparents)
numparents[i]<-l
possparents<-c(1:l)
if (l==0){
matrixofparents<-matrix(rep(NA,1),1,1)
} else if (table.with.k.parents[1,l]>0){
matrixofparents<-listy[[table.with.k.parents[2,l]]]
} else {
matrixofparents<-rep(NA,l)
for (r in 1:l){
combpossparents<-combinations(l,r,possparents)
if(r<l){
for (j in 1:(l-r)){
combpossparents <- cbind(combpossparents, NA)
}
}
matrixofparents<-rbind(matrixofparents,combpossparents,deparse.level=0)
}
}
listy[[i]] <- matrixofparents
table.with.k.parents[1,l]<-1
table.with.k.parents[2,l]<-i
}
listz<-list()
listz$parenttable<-listy
listz$aliases<-aliases
listz$numparents<-numparents
listz$numberofparentsvec<-lapply(numparents,function(x)rep(c(0:x),choose(x,c(0:x))))
return(listz)
}
#author Polina Suter, Jack Kuipers, the code partly derived from the order MCMC implementation from Kuipers J, Moffa G (2017) <doi:10.1080/01621459.2015.1133426>
scorepossibleparents.alias<-function(parenttable,aliases,n,param,
updatenodes=c(1:n),parentmaps=NULL,
numparents=NULL,numberofparentsvec=NULL){
listz<-vector("list",n)
for (i in updatenodes) {
scoretemp<-TableDAGscore.alias(parenttable[[i]], i, n,aliases[[i]],param,parentmaps[[i]],numparents[i],numberofparentsvec[[i]])
listz[[i]] <- as.matrix(scoretemp)
}
return(listz)
}
#authors Polina Suter, Jack Kuipers
PLUS1<-function(n,aliases,updatenodes=c(1:n),blacklistparents=NULL) {
listz<-list()
plus1mask<-list()
plus1parents<-list()
plus1aliases<-list()
for (i in updatenodes){
plus1mask[[i]]<-rep(1,n)
plus1mask[[i]][c(aliases[[i]],blacklistparents[[i]])]<-0
plus1parents[[i]]<-which(plus1mask[[i]]==1)
nrows<-length(plus1parents[[i]])+1
ncols<-length(aliases[[i]])+1
plus1aliases[[i]]<-matrix(c(NaN,plus1parents[[i]],rep(aliases[[i]], each = nrows) ),
nrow=nrows,ncol=ncols)
}
listz$mask<-plus1mask
listz$parents<-plus1parents
listz$aliases<-plus1aliases
return(listz)
}
#authors Polina Suter, Jack Kuipers
scorepossibleparents.PLUS1<-function(parenttable,plus1lists,n,param,updatenodes,
parentmaps,numparents,numberofparentsvec){
listy<-vector("list",n)
aliases<-plus1lists$aliases
for (i in updatenodes){ #for every node which needs to be updated
k<-nrow(aliases[[i]])
ncols<-ncol(aliases[[i]])
listz<-vector("list",k)
for (j in 1:k){ #for every list
if (j==1) {
scoretemp<-TableDAGscore.alias(parenttable[[i]], i, n,aliases[[i]][j,which(!is.na(aliases[[i]][j,]))],param,parentmaps[[i]],numparents[i],
numberofparentsvec[[i]])
} else {
scoretemp<-TableDAGscore.alias.plus1(parenttable[[i]], i, n,aliases[[i]][j,],param,parentmaps[[i]],numparents[i],numberofparentsvec[[i]])}
listz[[j]] <- as.matrix(scoretemp)
}
listy[[i]]<-listz
}
return(listy)
}
bgNodeScore<-function(n,param) {
totscores<-vector()
for(i in param$bgnodes) {
totscores[i]<-DAGcorescore(i,NULL,n,param)
}
return(totscores)
}
|
/scratch/gouwar.j/cran-all/cranData/BiDAG/R/scorefns.R
|
newspaceskel<-function(n,startspace,currspace,softlimit,hardlimit,posterior,blacklist,
MCMCtrace=NULL,mergetype="skeleton") {
switch(mergetype,
"dag" = {
mdag<-modelpcore(MCMCtrace,p=posterior,pdag=FALSE)
newadj<-1*(!blacklist&(startspace|mdag))
toomanyneib<-which(apply(newadj,2,sum)>hardlimit)
if(length(toomanyneib)>0){newadj[,toomanyneib]<-(1*(!blacklist&mdag))[,toomanyneib]}
toomanyneib<-which(apply(newadj,2,sum)>hardlimit)
if(length(toomanyneib)>0){newadj[,toomanyneib]<-currspace[,toomanyneib]}
},
"cpdag" = {
mcp<-modelpcore(n,MCMCtrace,p=posterior,pdag=TRUE)
newadj<-1*(!blacklist&(startspace|mcp))
toomanyneib<-which(apply(newadj,2,sum)>softlimit)
if(length(toomanyneib)>0) {
mdag<-modelpcore(MCMCtrace,p=posterior,pdag=FALSE)
newadj[,toomanyneib]<-(1*(!blacklist&(startspace|mdag)))[,toomanyneib]
}
tootoomanyneib<-which(apply(newadj,2,sum)>hardlimit)
if(length(tootoomanyneib)>0) {newadj[,tootoomanyneib]<-mdag[,tootoomanyneib]}
tootoomanyneib<-which(apply(newadj,2,sum)>hardlimit)
if(length(tootoomanyneib)>0) {newadj[,tootoomanyneib]<-currspace[,tootoomanyneib]}
},
"skeleton" = {
mskel<-1*(modelpcore(MCMCtrace,p=posterior,pdag=FALSE)|t(modelpcore(MCMCtrace,p=posterior,pdag=FALSE)))
newadj<-1*(!blacklist&(startspace|mskel))
toomanyneib<-which(apply(newadj,2,sum)>4)
if(length(toomanyneib)>0) {
newadj[,toomanyneib]<-(1*(!blacklist&(startspace|modelpcore(MCMCtrace,p=posterior,pdag=TRUE))))[,toomanyneib]
}
toomanyneib<-which(apply(newadj,2,sum)>softlimit)
if(length(toomanyneib)>0) {
mdag<-modelpcore(MCMCtrace,p=posterior,pdag=FALSE)
newadj[,toomanyneib]<-(1*(!blacklist&(startspace|mdag)))[,toomanyneib]
}
tootoomanyneib<-which(apply(newadj,2,sum)>hardlimit)
if(length(tootoomanyneib)>0) {newadj[,tootoomanyneib]<-mdag[,tootoomanyneib]}
tootoomanyneib<-which(apply(newadj,2,sum)>hardlimit)
if(length(tootoomanyneib)>0) {newadj[,tootoomanyneib]<-currspace[,tootoomanyneib]}
}
)
return(newadj)
}
newspacemap<-function(n,startspace,currspace,softlimit,hardlimit,blacklist,
maxdag=NULL,mergetype="skeleton",accum) {
if(!is.matrix(maxdag)) maxdag<-as.matrix(maxdag)
switch(mergetype,
"dag" = {
maxdag<-maxdag
newadj<-1*(!blacklist&(startspace|maxdag))
toomanyneib<-which(apply(newadj,2,sum)>hardlimit)
if(length(toomanyneib)>0){newadj[,toomanyneib]<-(1*(!blacklist&maxdag))[,toomanyneib]}
toomanyneib<-which(apply(newadj,2,sum)>hardlimit)
if(length(toomanyneib)>0){newadj[,toomanyneib]<-currspace[,toomanyneib]}
if(accum) {
newnewadj<-1*(newadj|currspace)
toomanyneib<-which(apply(newnewadj,2,sum)>hardlimit)
if(length(toomanyneib)>0){newnewadj[,toomanyneib]<-newadj[,toomanyneib]}
newadj<-newnewadj
}
},
"cpdag" = {
maxcp<-dagadj2cpadj(maxdag)
newadj<-1*(!blacklist&(startspace|maxcp))
toomanyneib<-which(apply(newadj,2,sum)>softlimit)
if(length(toomanyneib)>0) {
newadj[,toomanyneib]<-(1*(!blacklist&(startspace|maxdag)))[,toomanyneib]
}
tootoomanyneib<-which(apply(newadj,2,sum)>hardlimit)
if(length(tootoomanyneib)>0) {newadj[,tootoomanyneib]<-maxdag[,tootoomanyneib]}
tootoomanyneib<-which(apply(newadj,2,sum)>hardlimit)
if(length(tootoomanyneib)>0) {newadj[,tootoomanyneib]<-currspace[,tootoomanyneib]}
if(accum) {
newnewadj<-1*(newadj|currspace)
toomanyneib<-which(apply(newnewadj,2,sum)>hardlimit)
if(length(toomanyneib)>0){newnewadj[,toomanyneib]<-newadj[,toomanyneib]}
newadj<-newnewadj
}
},
"skeleton" = {
maxskel<-1*(maxdag|transp(maxdag))
newadj<-1*(!blacklist&(startspace|maxskel))
toomanyneib<-which(apply(newadj,2,sum)>7)
if(length(toomanyneib)>0) {
newadj[,toomanyneib]<-(1*(!blacklist&(startspace|dagadj2cpadj(maxdag))))[,toomanyneib]
}
toomanyneib<-which(apply(newadj,2,sum)>softlimit)
if(length(toomanyneib)>0) {
newadj[,toomanyneib]<-(1*(!blacklist&(startspace|maxdag)))[,toomanyneib]
}
tootoomanyneib<-which(apply(newadj,2,sum)>hardlimit)
if(length(tootoomanyneib)>0) {newadj[,tootoomanyneib]<-maxdag[,tootoomanyneib]}
tootoomanyneib<-which(apply(newadj,2,sum)>hardlimit)
if(length(tootoomanyneib)>0) {newadj[,tootoomanyneib]<-currspace[,tootoomanyneib]}
if(accum) {
newnewadj<-1*(newadj|currspace)
toomanyneib<-which(apply(newnewadj,2,sum)>hardlimit)
if(length(toomanyneib)>0){newnewadj[,toomanyneib]<-newadj[,toomanyneib]}
newadj<-newnewadj
}
}
)
return(newadj)
}
definestartspace<-function(alpha,param,cpdag=FALSE,algo="pc",alphainit=NULL) {
if(is.null(alphainit)) {alphainit<-alpha}
local_type <- param$type
if(local_type=="usr") {
if(param$pctesttype%in%c("bde","bge","bdecat")) {
local_type<-param$pctesttype
}
}
if(param$DBN){
if(param$stationary) {
othersliceskel <- definestartspace(alpha,param$otherslices,cpdag=FALSE,algo="pc")
firstsliceskel <- definestartspace(alphainit,param$firstslice,cpdag=FALSE,algo="pc")
startspace <- othersliceskel
startspace[param$intstr$rows,param$intstr$cols] <- 1*(startspace[param$intstr$rows,param$intstr$cols] | firstsliceskel[param$intstr$rows,param$intstr$cols])
#diag(startspace[param$trans$rows,param$trans$cols])<-1
} else {
skels<-list()
skels[[1]]<-definestartspace(alphainit,param$paramsets[[1]],cpdag=FALSE,algo="pc")
startspace<-skels[[1]]
for(i in 2:(length(param$paramsets)-1)) {
skels[[i]]<-definestartspace(alpha,param$paramsets[[i]],cpdag=FALSE,algo="pc")
startspace<-1*(skels[[i]]|startspace)
}
firstsliceskel <- definestartspace(alphainit,param$paramsets[[length(param$paramsets)]],cpdag=FALSE,algo="pc")
startspace[param$intstr$rows,param$intstr$cols] <- 1*(startspace[param$intstr$rows,param$intstr$cols] | firstsliceskel[param$intstr$rows,param$intstr$cols])
#diag(startspace[param$trans$rows,param$trans$cols])<-1
}
} else { # otherwise use old versions
if(local_type=="bde") {
if(cpdag){
pc.skel<-pc(suffStat = list(d1=param$d1,d0=param$d0,data=param$data),
indepTest = weightedbinCItest, alpha = alpha, labels = colnames(param$data),
verbose = FALSE)
} else {
pc.skel<-pcalg::skeleton(suffStat = list(d1=param$d1,d0=param$d0,data=param$data),
indepTest = weightedbinCItest, alpha = alpha, labels = colnames(param$data),
verbose = FALSE)
}
} else if(local_type=="bdecat") {
if(cpdag){
pc.skel<-pc(suffStat = param,
indepTest = weightedcatCItest, alpha = alpha, labels = colnames(param$data),
verbose = FALSE)
} else {
pc.skel<-pcalg::skeleton(suffStat = param,
indepTest = weightedcatCItest, alpha = alpha, labels = colnames(param$data),
verbose = FALSE)
}
} else if(local_type=="bge") {
if(is.null(param$weightvector)) {
cormat<-cor(param$data)
N<-nrow(param$data)
} else { N<-sum(param$weightvector)
cormat<-cov.wt(param$data,wt=param$weightvector,cor=TRUE)$cor}
if(cpdag){
pc.skel<-pcalg::pc(suffStat = list(C = cormat, n = N),
indepTest = gaussCItest,
alpha=alpha,labels=colnames(param$data),skel.method="stable",verbose = FALSE)
} else {
pc.skel<-pcalg::skeleton(suffStat = list(C = cormat, n = N),
indepTest = gaussCItest,
alpha=alpha,labels=colnames(param$data),method="stable",verbose = FALSE)
}
} else if (local_type=="usr") {
if(cpdag){
pc.skel<-pc(suffStat = param,
indepTest = usrCItest, alpha = alpha, labels = colnames(param$data),
verbose = FALSE)
} else {
pc.skel<-pcalg::skeleton(suffStat = param,
indepTest = usrCItest, alpha = alpha, labels = colnames(param$data),
verbose = FALSE)
}
}
g<-pc.skel@graph
startspace<-1*(graph2m(g))
}
return(startspace)
}
|
/scratch/gouwar.j/cran-all/cranData/BiDAG/R/spacefns.R
|
#SUMMARY methods for classes:
#scoreparameters
#scorespace
#orderMCMC
#partitionMCMC
#iterativeMCMC
#itercomp
#samplecomp
#' Summary of object of class 'scoreparameters'
#'
#' @param object object of class 'scoreparameters'
#' @param ... ignored
#'
#' @rdname scoreparameters
#' @method summary scoreparameters
#' @export
summary.scoreparameters <-function(object, ...){
cat("object of class 'scoreparameters' \n")
cat("number of nodes (variables):", object$n, "\n")
cat("number of observations:", nrow(object$data), "\n")
cat("score type:", object$type, "\n")
if(object$DBN==TRUE) {
cat("score object created for a DBN \n")
if(object$bgn>0) {
cat("static nodes (no parents are allowed):", object$static, "\n")
}
} else {
if(object$bgn>0) {
cat("root nodes (no parents are allowed):", object$bgnodes, "\n")
}
}
if(!is.null(object$weightvector)) {
cat("data is weighted \n")
}
}
#' Summary of object of class 'scorespace'
#'
#' @param object object of class 'scorespace'
#' @param ... ignored
#'
#' @rdname scorespace
#' @method summary scorespace
#' @export
summary.scorespace <-function(object, ...){
cat("object of class 'scorespace'")
cat("\n\n")
n<-ncol(object$adjacency)
possedges<-n*n-n
cat("Core search space ($adjacency) contains ", sum(object$adjacency), " edges out of ", possedges,
"edges in a full search space","\n")
if(is.list(object$tables[[length(object$tables)]])) {
cat("Search space is extended", "\n")
} else {
cat("Search space is NOT extended", "\n")
}
nbl<-sum(object$blacklist)
cat(nbl, " edges from the full space were blacklisted \n")
}
#' Summary of object of class 'orderMCMC'
#'
#' @param object object of class 'orderMCMC'
#' @param ... ignored
#'
#' @rdname orderMCMC
#' @method summary orderMCMC
#' @export
summary.orderMCMC <- function(object, ...) {
cat("object of class 'orderMCMC'")
cat("\n\n")
cat("Results:","\n")
cat("maximum score DAG with", ncol(object$DAG), "nodes and ", sum(object$DAG)," edges:", "\n")
cat("maximum DAG score=", object$score,"\n")
cat("scores of samped DAGs (trace):")
cat(object$trace[1], "...", object$trace[length(object$trace)])
cat("\n\n")
cat("MCMC settings:","\n")
cat(paste("algorithm:",object$info$algo,"\n"))
cat(paste("number of MCMC iterations:",object$info$iterations,"\n"))
cat(paste("number of MCMC sampling steps (length of trace):",object$info$samplesteps,"\n"))
cat(paste("initial search space:",object$info$spacealgo,"\n"))
cat(paste("sample/MAP: ",object$info$sampletype,"\n"))
cat("\n")
cat("Additional output:","\n")
cat("\n")
if(!is.null(object$traceadd)) {
cat(paste("traceadd contains",length(object$traceadd$incidence),"sampled DAGs","\n"))
}
if(!is.null(object$scoretable)) {
cat("scoretable, object of class 'scorespace' \n" )
}
cat("\n")
}
#' Summary of object of class 'iterativeMCMC'
#'
#' @param object object of class 'iterativeMCMC'
#' @param ... ignored
#'
#' @rdname iterativeMCMC
#' @method summary iterativeMCMC
#' @export
summary.iterativeMCMC <- function(object, ...) {
cat("object of class 'iterativeMCMC'")
cat("\n\n")
cat("Results:","\n")
cat("maximum score DAG with", ncol(object$DAG), "nodes and ", sum(object$DAG)," edges: \n")
cat("maximum DAG score=", object$score,"\n")
cat("\n")
cat(paste("algorithm:",object$info$algo,"\n"))
cat(paste("number of search space expansion steps:", length(object$maxtrace),"\n"))
cat(paste("number of edges in the intial search space:",sum(object$startspace),"\n"))
cat(paste("number of added edges:",sum(object$endspace)-sum(object$startspace),"\n"))
cat(paste("total number of MCMC iterations:",object$info$iterations*length(object$maxtrace),"\n"))
cat(paste("total number of MCMC sampling steps (length of trace):",object$info$samplesteps*length(object$maxtrace),"\n"))
cat(paste("number of MCMC iterations per expansion step:",object$info$iterations,"\n"))
cat(paste("number of MCMC sampling steps per expansion step:",object$info$samplesteps,"\n"))
cat(paste("initial search space:",object$info$spacealgo,"\n"))
cat(paste("sample/MAP: ",object$info$sampletype,"\n"))
cat("\n")
cat("Additional output:","\n")
if(!is.null(object$traceadd)) {
cat(paste("traceadd contains",length(object$traceadd$incidence[[1]])*length(object$maxtrace),"sampled DAGs \n"))
cat("\n")
}
if(!is.null(object$scoretable)) {
cat("scoretable, object of class 'scorespace' \n")
}
cat("\n")
}
#' Summary of object of class 'partitionMCMC'
#'
#' @param object object of class 'partitionMCMC'
#' @param ... ignored
#'
#' @rdname partitionMCMC
#' @method summary partitionMCMC
#' @export
summary.partitionMCMC <- function(object, ...) {
cat("object of class 'partitionMCMC'")
cat("\n\n")
cat("Results:","\n")
cat("maximum score DAG with", ncol(object$DAG), "nodes and ", sum(object$DAG)," edges:", "\n")
cat("maximum DAG score=", object$score,"\n")
cat("scores of samped DAGs (trace):")
cat(object$trace[1], "...", object$trace[length(object$trace)])
cat("\n\n")
cat("MCMC settings:","\n")
cat(paste("algorithm:",object$info$algo,"\n"))
cat(paste("number of MCMC iterations:",object$info$iterations,"\n"))
cat(paste("number of MCMC sampling steps (length of trace):",object$info$samplesteps,"\n"))
cat(paste("initial search space:",object$info$spacealgo,"\n"))
cat(paste("sample/MAP: ",object$info$sampletype,"\n"))
cat("\n")
cat("Additional output:","\n")
cat("\n")
if(!is.null(object$traceadd)) {
cat(paste("traceadd contains",length(object$traceadd$incidence),"sampled DAGs","\n"))
}
if(!is.null(object$scoretable)) {
cat("scoretable, object of class 'scorespace' \n" )
}
cat("\n")
}
#' Summary of object of class 'itercomp'
#'
#' @param object object of class 'itercomp'
#' @param ... ignored
#'
#' @rdname itercomp
#' @method summary itercomp
#' @export
summary.itercomp <-function(object, ...){
cat("object of class 'itercomp'")
cat("\n\n")
n<-nrow(object)
colo<-colnames(object)
if(n>1) {
cat("structure fit changes: first -> last expansion iteration: \n")
for(i in 1:ncol(object)) {
cat(colo[i],":", object[1,i], "->",object[n,i],"\n")
}
}
}
#' Summary of object of class 'samplecomp'
#'
#' @param object object of class 'samplecomp'
#' @param ... ignored
#'
#' @rdname samplecomp
#' @method summary samplecomp
#' @export
summary.samplecomp <-function(object, ...){
cat("object of class 'samplecomp'")
cat("\n\n")
n<-nrow(object)
colo<-colnames(object)
keymetrics<-c("TPR","FDR","SHD")
if(n>1) {
cat("best thresholds p for key metrics: \n")
besttpr<-which.max(object[,"TPR"])[1]
cat("TPR",":","p =",object[besttpr,"p"], "TPR =",object[besttpr,"TPR"],"\n")
bestfdr<-which.min(object[,"FDR"])[1]
cat("FDR",":","p =",object[bestfdr,"p"], "FDR =",object[bestfdr,"FDR"],"\n")
bestshd<-which.min(object[,"SHD"])[1]
cat("SHD",":","p =",object[bestshd,"p"], "SHD =",object[bestshd,"SHD"],"\n")
}
}
|
/scratch/gouwar.j/cran-all/cranData/BiDAG/R/summary.R
|
### This function returns the objects needed to evaluate the user defined score
# edgepf is the factor to penalise each edge
# edgepmat is a matrix of edge penalisation factors
# chi is the general number of pseudo count
# delta is a scaling factor for zero pseudo counts when the parents are not all on
# eta is a scaling factor for zero pseudo counts when the node has no parents
usrscoreparameters <- function(initparam, usrpar=list(pctesttype="usrCItest", edgepf=2, edgepmat=NULL,
chi=0.5, delta=NULL, eta=NULL)){
if(is.null(usrpar$chi)) {usrpar$chi <- 0.5}
if(is.null(usrpar$edgepf)) {usrpar$edgepf <- 2}
initparam$chi <- usrpar$chi #1
initparam$pf <- usrpar$edgepf
if(is.null(usrpar$delta)) {usrpar$delta <- 100*initparam$chi}
if(is.null(usrpar$eta)) {usrpar$eta <- 10*initparam$chi}
initparam$delta <- usrpar$delta
initparam$eta <- usrpar$eta
if (is.null(initparam$weightvector)) {
initparam$N <- nrow(initparam$data)
initparam$d1 <- initparam$data
initparam$d0 <- (1-initparam$data)
} else {
initparam$N <- sum(initparam$weightvector)
initparam$d1 <- initparam$data*initparam$weightvector
initparam$d0 <- (1-initparam$data)*initparam$weightvector
}
maxparents <- ncol(initparam$data) - 1
initparam$scoreconstvec <- rep(0, maxparents+1)
if (is.null(usrpar$edgepmat)) {
initparam$logedgepmat <- NULL
} else {
initparam$logedgepmat <- log(usrpar$edgepmat)
}
initparam$scoreconstvec <- lgamma(initparam$chi/2)+lgamma((1+initparam$delta)*initparam$chi/4)-3*lgamma(initparam$chi/4)-lgamma(initparam$delta*initparam$chi/4) - c(0:maxparents)*log(initparam$pf)
initparam$scoreconstvec[1] <- lgamma((1+initparam$eta)*initparam$chi/2)-lgamma(initparam$chi/2)-lgamma(initparam$eta*initparam$chi/2) # simpler result with no parents
initparam
}
### This function evaluates the log score of a node given its parents
usrDAGcorescore <- function (j,parentnodes,n,param) {
lp<-length(parentnodes) # number of parents
chi<-param$chi
scoreconstvec<-param$scoreconstvec
switch(as.character(lp),
"0"={# no parents
N1<-sum(param$d1[,j])
N0<-sum(param$d0[,j])
NT<-N0+N1
corescore <- scoreconstvec[lp+1] + lgamma(N0+param$eta*chi/2) + lgamma(N1+chi/2) - lgamma(NT+(1+param$eta)*chi/2)
},
"1"={# one parent
corescore<-scoreconstvec[lp+1]
summys<-param$data[,parentnodes]
for(i in 0:1){
totest<-which(summys==i)
N1<-sum(param$d1[totest,j])
N0<-sum(param$d0[totest,j])
NT<-N0+N1
if(i==0){
corescore <- corescore + lgamma(N0+param$delta*chi/4) + lgamma(N1+chi/4) - lgamma(NT+(1+param$delta)*chi/4)
} else {
corescore <- corescore + lgamma(N0+chi/4) + lgamma(N1+chi/4) - lgamma(NT+chi/2)
}
}
if (!is.null(param$logedgepmat)) { # if there is an additional edge penalisation
corescore <- corescore - param$logedgepmat[parentnodes, j]
}
},
{ # more parents
summys<-1*(rowSums(param$data[,parentnodes])==lp)
N1s<-collectC(summys,param$d1[,j],2)
N0s<-collectC(summys,param$d0[,j],2)
NTs<-N1s+N0s
corescore <- scoreconstvec[lp+1] + sum(lgamma(N0s+c(param$delta,1)*chi/4)) +
sum(lgamma(N1s+chi/4)) - sum(lgamma(NTs+c(1+param$delta,2)*chi/4))
if (!is.null(param$logedgepmat)) { # if there is an additional edge penalisation
corescore <- corescore - sum(param$logedgepmat[parentnodes, j])
}
})
corescore
}
### This function prints out the user defined score objects
usrprint.scoreparameters <- function (x,...){
cat("CBN mimic score is being used","\n")
cat("Prior pseudo counts:", x$chi,"\n")
cat("Edge penalization factor:", x$pf,"\n")
cat("Prior pseudo count scaling with 1 parent:", x$delta,"\n")
cat("Prior pseudo count scaling with no parent:", x$eta,"\n")
if(is.null(x$weightvector)) {
cat("Data is not weighted\n")
} else {
"Data is weighted according to the weight vector"
}
cat("Score constant vector:", x$scoreconstvec,"\n")
}
### This function defines the CI tests for the starting skeleton
# actually not needed here as we revert to the bde score
# but just to test in case!
usrdefinestartspace <- function(alpha,param,cpdag,n){
if(cpdag){
pc.skel<-pc(suffStat = list(d1=param$d1,d0=param$d0,data=param$data),
indepTest = weightedbinCItest, alpha = alpha, labels = colnames(param$data),
verbose = FALSE)
} else {
pc.skel<-pcalg::skeleton(suffStat = list(d1=param$d1,d0=param$d0,data=param$data),
indepTest = weightedbinCItest, alpha = alpha, labels = colnames(param$data),
verbose = FALSE)
}
pc.skel
}
|
/scratch/gouwar.j/cran-all/cranData/BiDAG/R/usrscorefns.R
|
#' Calculates the bidimensional regression between two 2D configurations
#'
#' @description Calculates the bidimensional regression between two 2D configurations using both Euclidean and Affine transformations following the approach by Tobler (1965).
#' This function assumes strict data format and returns all coefficients and statistics in a single structure. Same functionality is now re-implemented in a R-friendly style, see \code{\link{lm2}} function.
#'
#' @param coord table that must contain two columns for dependent variables (named \code{depV1} and \code{depV2}) and two columns for independent variables (named \code{indepV1} and \code{indepV2}).
#'
#' @return an S3 class \code{BiDimRegression} containing all essential measures of the bidimensional regression
#' * \code{euclidean.r, affine.r} - the regression coefficient, defined analogously to Pearson's r.
#' * \code{euclidean.rsqr, affine.rsqr} - the squared regression coefficient.
#' * \code{euclidean.diABSqr, affine.diABSqr} - the squared distortion index for dependent variables; following Waterman and Gordon's (1984) extension of the bidimensional regression, it provides a measure of comparison of distortions, but the range of values is 0 to 1 following Friedman and Kohler (2003).
#' * \code{euclidean.dMaxABSqr, affine.dMaxABSqr} - the maximal squared distortion index for dependent variables.
#' * \code{euclidean.diXYSqr, affine.diXYSqr} - the distortion index for independent variables.
#' * \code{euclidean.dMaxXYSqr, affine.dMaxXYSqr} - the maximal squared distortion index for independent variables.
#' * \code{euclidean.scaleFactorX, affine.scaleFactorX} - the scaling factor of the first dimension (1.0 means no scaling; values below 1.0 indicate a contraction, values above 1.0 indicate an expansion).
#' * \code{euclidean.scaleFactorY, affine.scaleFactorY} - the scaling factor of the second dimension.
#' * \code{euclidean.angleDEG, affine.angleDEG} - the rotation angle \bold{in degrees}.
#' * \code{euclidean.shear, affine.shear} - shearing of the transformed configuration, always zero for the Euclidean transformation.
#' * \code{euclidean.ttestDF, affine.ttestDF} - degrees of freedom (DF) for the t-tests regarding the model parameters (alphas and betas).
#' * \code{euclidean.alpha1.*, euclidean.alpha2.*, affine.alpha1.*, affine.alpha2.*} - intercept vectors, information includes \code{.coeff} for coefficient, \code{.SE} for standard error, \code{tValue} for _t_-statistics, and \code{pValue} for significance.
#' * \code{euclidean.beta1.*, euclidean.beta2.*, affine.beta1.*, affine.beta2.*, affine.beta3.*, affine.beta4.*} - slope vectors, information includes \code{.coeff} for coefficient, \code{.SE} for standard error, \code{tValue} for _t_-statistics, and \code{pValue} for significance.
#' * \code{euclidean.fValue, affine.fValue} - F-statistics, following the advice of Nakaya (1997).
#' * \code{euclidean.df1, affine.df1} - degrees of freedom of the nominator used for the F-statistics propagated by Nakaya (1997); df1 = p-2, with p is the number of elements needed to calculate the referring model: p=4 for the Euclidean and p=6 for the affine geometry {Nakaya, 1997, Table 1}.
#' * \code{euclidean.df2, affine.df2} - degrees of freedom of the denominator used for the F-statistics propagated by Nakaya (1997); df2 = 2n-p, with p is the number of elements needed to calculate the referring model (see df1) and n is the number of coordinate pairs.
#' * \code{euclidean.pValue, affine.pValue} - the significance level based on the preceding F-statistics.
#' * \code{euclidean.dAICso, affine.dAICso} - the AIC difference between the regarding bidimensional regression model and the bidimensional null model (S0) according to Nakaya (1997), formula 56.
#' * \code{eucVSaff.*} - statistical comparison between Euclidean and Affine models, include \code{.fValue} for F-statistics, \code{.df1} and \code{.df2} for the degrees of freedom, \code{.pValue} for the significance level, and \code{.dAIC} for AIC difference between two models.
#' @md
#'
#' @export
#' @importFrom utils packageVersion
#'
#' @examples
#' resultingMeasures <- BiDimRegression(NakayaData)
#' print(resultingMeasures)
#'
#' @seealso \code{\link{lm2}}
#' @importFrom stats pf lm
BiDimRegression <-
function (coord)
{
# set standard variables
n <- dim(coord)[1] # number of coordinates
vecZero <- c(rep(0, n))
vecOne <- c(rep(1, n))
A <- coord$depV1
B <- coord$depV2
X <- coord$indepV1
Y <- coord$indepV2
# calculating means
Am <- mean(A)
Bm <- mean(B)
Xm <- mean(X)
Ym <- mean(Y)
# calculating (co)variances
X2 <- sum(X^2)
Y2 <- sum(Y^2)
sumX2Y2 <- sum(X^2+Y^2)
A2 <- sum(A^2)
B2 <- sum(B^2)
sumA2B2 <- sum(A^2+B^2)
varA <- (sum((A-Am)*(A-Am)))/(n)
varB <- (sum((B-Bm)*(B-Bm)))/(n)
varX <- (sum((X-Xm)*(X-Xm)))/(n)
varY <- (sum((Y-Ym)*(Y-Ym)))/(n)
covABXY <- sum(((A-Am)+(B-Bm))*((X-Xm)+(Y-Ym)))/n
# ----------- Calculating the Euclidean regression model
euc_par <- data.frame(
ps = 4L,
name = "Euclidean"
)
euc_dataMatrix <- matrix(c(vecOne, vecZero, vecZero, vecOne, X, Y, -Y, X), ncol=4)
euc_target <- matrix(c(A, B),ncol=1)
euc_data <- data.frame(
y = (euc_target),
x1 = (euc_dataMatrix[,1]),
x2 = (euc_dataMatrix[,2]),
x3 = (euc_dataMatrix[,3]),
x4 = (euc_dataMatrix[,4])
)
euc_regression <- lm(y ~ 0 + x1 + x2 + x3 +x4, data=euc_data)
euc_alpha <- data.frame(
coeff = c(NA, NA),
SE = c(NA, NA),
tValue = c(NA, NA),
pValue = c(NA, NA)
)
euc_beta <- data.frame(
coeff = c(NA, NA),
SE = c(NA, NA),
tValue = c(NA, NA),
pValue = c(NA, NA)
)
# retrieving alphas
for(iAlpha in 1:2) {
for(iPar in 1:4) {
euc_alpha[iAlpha, iPar] <- summary(euc_regression)$coeff[iAlpha,iPar]
}
}
# retrieving betas
for(iBeta in 1:2) {
for(iPar in 1:4) {
euc_beta[iBeta , iPar] <- summary(euc_regression)$coeff[iBeta+2,iPar]
}
}
# calculating the scale factors (=sigma)
euc_scaleFactorX <- sqrt(euc_beta$coeff[1]*euc_beta$coeff[1] + euc_beta$coeff[2]*euc_beta$coeff[2])
euc_scaleFactorY <- euc_scaleFactorX # no shear (by definition) for the Euclidean solution
# ==> same scaling factors for dimension 1 and dimension 2
# calculating the rotation (angle) (=theta)
euc_angleRAD <- atan(euc_beta$coeff[2]/euc_beta$coeff[1])
euc_angleDEG <- euc_angleRAD*180/pi
if (euc_beta$coeff[1] < 0)
{
euc_angleDEG <- euc_angleDEG + 180
}
# calculating the shear (gamma)
euc_shear = 0L # per definition shear must be ZERO within an Euclidean geometry
# calculating the predicted values
euc_Apred <- euc_alpha$coeff[1]+euc_beta$coeff[1]*X-euc_beta$coeff[2]*Y
euc_Bpred <- euc_alpha$coeff[2]+euc_beta$coeff[2]*X+euc_beta$coeff[1]*Y
euc_Xpred <- (euc_alpha$coeff[2]*euc_beta$coeff[2]-B*euc_beta$coeff[2]-
euc_alpha$coeff[1]*euc_beta$coeff[1]+A*euc_beta$coeff[1])/
(euc_beta$coeff[1]*euc_beta$coeff[1]-euc_beta$coeff[2]*euc_beta$coeff[2])
euc_Ypred <- (euc_beta$coeff[1]*B-euc_alpha$coeff[2]*euc_beta$coeff[1]+
euc_alpha$coeff[1]*euc_beta$coeff[2]-A*euc_beta$coeff[2])/
(euc_beta$coeff[1]*euc_beta$coeff[1]+euc_beta$coeff[2]*euc_beta$coeff[2])
# calculating the bidimensional correlation coefficient
euc_r <- sqrt(sum(((euc_Apred-Am)*(euc_Apred-Am)) + ((euc_Bpred-Bm)*(euc_Bpred-Bm)))/
sum(((A-Am)*(A-Am)) + ((B-Bm)*(B-Bm))))
euc_rsqr <- euc_r*euc_r
# conducting the inference statistics following Nakaya (1997)
euc_F <- ((2*n - euc_par$ps)/2)*(euc_rsqr/(1-euc_rsqr))
euc_df1 <- 2L
euc_df2 <- 2*n - euc_par$ps # set the degrees of freedom: df1/df2
euc_p <- pf(euc_F, euc_df1, euc_df2, lower.tail = FALSE, log.p = FALSE)
# ------- Calculating the distortion index following Waterman and Gordon (1984),
# adjusted by Friedman and Kohler (2003)
# --- first: calculating distortion index for original configuration
euc_dDistanceXY <- sqrt(sum((X-euc_Xpred)*(X-euc_Xpred))+sum((Y-euc_Ypred)*(Y-euc_Ypred)))
euc_dDistanceXYSqr <- euc_dDistanceXY*euc_dDistanceXY
euc_dMaxXYSqr <- sum((X-Xm)*(X-Xm)+((Y-Ym)*(Y-Ym)))
euc_dMaxXY <- sqrt(euc_dMaxXYSqr)
euc_diXYSqr <- euc_dDistanceXYSqr/euc_dMaxXYSqr
euc_diXY <- sqrt(euc_diXYSqr)
# --- second: calculating distortion index for target configuration
euc_dDistanceAB <- sqrt(sum((A-euc_Apred)*(A-euc_Apred))+sum((B-euc_Bpred)*(B-euc_Bpred)))
euc_dDistanceABSqr <- euc_dDistanceAB*euc_dDistanceAB
euc_dMaxABSqr <- sum((A-Am)*(A-Am)+((B-Bm)*(B-Bm))) # referring to target configuration
euc_dMaxAB <- sqrt(euc_dMaxABSqr)
euc_diABSqr <- euc_dDistanceABSqr/euc_dMaxABSqr
euc_diAB <- sqrt(euc_diABSqr)
# ------- Calculation of DAIC (Difference AIC = Akaike Information Criterion)
# DAICso: AIC difference DAICso between a bidimensional regression model and
# the bidimensional null model
# --> if DAICso < 0, the bidimensional regression model is
# better than the bidimensional null model.
# [calculation according to Nakaya (1997), formula 56]
euc_dAICso <- 2L*n*log(1-euc_rsqr)+2L*(euc_par$ps-2L)
# +++++++ end of Euclidean regression model
# ----------- Calculating the Affine regression model
aff_par <- data.frame(
ps = 6L,
name = "Affine"
)
aff_dataMatrix <- matrix(c(
vecOne, vecZero,
vecZero, vecOne,
X, vecZero,
Y, vecZero,
vecZero, X,
vecZero, Y), ncol=6)
aff_target <- matrix(c(A, B),ncol=1)
aff_data <- data.frame(
y = (aff_target),
x1 = (aff_dataMatrix[,1]),
x2 = (aff_dataMatrix[,2]),
x3 = (aff_dataMatrix[,3]),
x4 = (aff_dataMatrix[,4]),
x5 = (aff_dataMatrix[,5]),
x6 = (aff_dataMatrix[,6])
)
aff_regression <- lm(y ~ 0 + x1 + x2 + x3 + x4 + x5 + x6, data=aff_data)
aff_alpha <- data.frame(
coeff = c(NA, NA),
SE = c(NA, NA),
tValue = c(NA, NA),
pValue = c(NA, NA)
)
aff_beta <- data.frame(
coeff = c(NA, NA),
SE = c(NA, NA),
tValue = c(NA, NA),
pValue = c(NA, NA)
)
# retrieving alphas
for(iAlpha in 1:2) {
for(iPar in 1:4) {
aff_alpha[iAlpha, iPar] <- summary(aff_regression)$coeff[iAlpha,iPar]
}
}
# retrieving betas
for(iBeta in 1:4) {
for(iPar in 1:4) {
aff_beta[iBeta , iPar] <- summary(aff_regression)$coeff[iBeta+2,iPar]
}
}
# calculating the rotation (angle) (=theta)
aff_angleRAD <- atan(aff_beta$coeff[3]/aff_beta$coeff[1])
aff_angleDEG <- aff_angleRAD*180/pi
if (aff_beta$coeff[1] < 0)
{
aff_angleDEG <- aff_angleDEG+180
}
# calculating the shear (gamma)
aff_shear <- (((aff_beta$coeff[4]/aff_beta$coeff[2])*sin(aff_angleRAD))+cos(aff_angleRAD))/
(((aff_beta$coeff[4]/aff_beta$coeff[2])*cos(aff_angleRAD))-sin(aff_angleRAD))
# calculating the scale factors (=sigma)
aff_scaleFactorX <- sqrt(aff_beta$coeff[1]*aff_beta$coeff[1]+aff_beta$coeff[3]*aff_beta$coeff[3])
if (is.nan(aff_shear))
{
aff_shear <- (aff_beta$coeff[1]-cos(aff_angleRAD)*aff_scaleFactorX)/aff_beta$coeff[3]
}
if (is.nan(aff_shear))
{
aff_shear <- (sin(aff_angleRAD)*aff_scaleFactorX+aff_beta$coeff[2])/aff_beta$coeff[4]
}
aff_scaleFactorY <- aff_beta$coeff[2]/(aff_shear*cos(aff_angleRAD)-sin(aff_angleRAD))
if (is.nan(aff_scaleFactorY))
{
aff_scaleFactorY <- aff_scaleFactorX
}
# calculating the predicted values
aff_Apred <- aff_alpha$coeff[1]+aff_beta$coeff[1]*X+aff_beta$coeff[2]*Y
aff_Bpred <- aff_alpha$coeff[2]+aff_beta$coeff[3]*X+aff_beta$coeff[4]*Y
aff_Xpred <- -(aff_Bpred*aff_beta$coeff[2]-aff_Apred*aff_beta$coeff[4]-
aff_alpha$coeff[2]*aff_beta$coeff[2]+aff_alpha$coeff[1]*aff_beta$coeff[4])/
(aff_beta$coeff[2]*aff_beta$coeff[3]+aff_beta$coeff[1]*aff_beta$coeff[4])
aff_Ypred <- -(aff_Apred*aff_beta$coeff[3]-aff_Bpred*aff_beta$coeff[1]-
aff_alpha$coeff[1]*aff_beta$coeff[3]+aff_alpha$coeff[2]*aff_beta$coeff[1])/
(aff_beta$coeff[2]*aff_beta$coeff[3]+aff_beta$coeff[1]*aff_beta$coeff[4])
# calculating the bidimensional correlation coefficient
aff_r <- sqrt(sum(((aff_Apred-Am)*(aff_Apred-Am))+((aff_Bpred-Bm)*(aff_Bpred-Bm)))/sum(((A-Am)*(A-Am))+((B-Bm)*(B-Bm))))
aff_rsqr <- aff_r*aff_r
# conducting the inference statistics according to
# Nakaya (1997), formula 50
aff_F <- ((2*n-aff_par$ps)/4) * (aff_rsqr/(1-aff_rsqr))
aff_df1 <- 4L
aff_df2 <- 2*n-aff_par$ps; # set the degrees of freedom: df1/df2
aff_p <-pf(aff_F, aff_df1, aff_df2, lower.tail = FALSE, log.p = FALSE)
# ------- Calculating the distortion index following Waterman and Gordon (1984),
# adjusted by Friedman and Kohler (2003)
# --- first: calculating distortion index for original configuration
aff_dDistanceXY <- sqrt(sum((X-aff_Xpred)*(X-aff_Xpred))+sum((Y-aff_Ypred)*(Y-aff_Ypred)))
euc_dDistanceXY <- sqrt(sum((X-euc_Xpred)*(X-euc_Xpred))+sum((Y-euc_Ypred)*(Y-euc_Ypred)))
aff_dDistanceXYSqr <- aff_dDistanceXY*aff_dDistanceXY
euc_dDistanceXYSqr <- euc_dDistanceXY*euc_dDistanceXY
aff_dMaxXYSqr <- sum((X-Xm)*(X-Xm)+((Y-Ym)*(Y-Ym)))
euc_dMaxXYSqr <- sum((X-Xm)*(X-Xm)+((Y-Ym)*(Y-Ym)))
aff_dMaxXY <- sqrt(aff_dMaxXYSqr)
euc_dMaxXY <- sqrt(euc_dMaxXYSqr)
aff_diXYSqr <- aff_dDistanceXYSqr/aff_dMaxXYSqr
aff_diXY <- sqrt(aff_diXYSqr)
# --- second: calculating distortion index for target configuration
aff_dDistanceAB <- sqrt(sum((A-aff_Apred)*(A-aff_Apred))+sum((B-aff_Bpred)*(B-aff_Bpred)))
aff_dDistanceABSqr <- aff_dDistanceAB*aff_dDistanceAB
aff_dMaxABSqr <- sum((A-Am)*(A-Am)+((B-Bm)*(B-Bm))) # referring to target configuration
aff_dMaxAB <- sqrt(aff_dMaxABSqr)
aff_diABSqr <- aff_dDistanceABSqr/aff_dMaxABSqr
aff_diAB <- sqrt(aff_diABSqr)
# ------- Calculation of DAIC (Difference AIC = Akaike Information Criterion)
# DAICso: AIC difference DAICso between a bidimensional regression model and
# the bidimensional null model
# --> if DAICso < 0, the bidimensional regression model is
# better than the bidimensional null model.
# [calculation according to Nakaya (1997), formula 56]
aff_dAICso <- 2*n*log(1-aff_rsqr)+2*(aff_par$ps-2)
euc_dAICso <- 2*n*log(1-euc_rsqr)+2*(euc_par$ps-2)
#+++++++++++ end of affine solution
# ---- Calculation of DAICs (Difference AIC = Akaike Information Criterion)
# between the different fitted bidimensional regression models
# [see Nakaya (1997), table 4]
# -- comparative test between Euclidean and Affine regression model
dAICea <- 2*n*log((1-aff_rsqr)/(1-euc_rsqr))+2*(aff_par$ps-euc_par$ps)
f_ea <- ((2*n-aff_par$ps)/(aff_par$ps-euc_par$ps))*
((aff_rsqr-euc_rsqr)/(1-aff_rsqr))
df1_ea <- as.integer(aff_par$ps-euc_par$ps)
df2_ea <- as.integer(2*n-aff_par$ps)
p_ea <- pf(f_ea, df1_ea, df2_ea, lower.tail = FALSE, log.p = FALSE)
# df of parameter t-tests
tTestDF <- aff_regression$df[1]
# return all the results to a data.frame
res_euc <- data.frame(r=euc_r, rsqr=euc_rsqr, diABSqr=euc_diABSqr, dMaxABSqr=euc_dMaxABSqr, diXYSqr=euc_diXYSqr, dMaxXYSqr=euc_dMaxXYSqr, scaleFactorX=euc_scaleFactorX, scaleFactorY=euc_scaleFactorY, angleDEG=euc_angleDEG, shear=euc_shear, ttestDF=tTestDF, alpha1=euc_alpha[1,], alpha2=euc_alpha[2,], beta1=euc_beta[1,], beta2=euc_beta[2,], beta3=euc_beta[3,], beta4=euc_beta[4,], fValue=euc_F, df1=euc_df1, df2=euc_df2, pValue=euc_p, dAICso=euc_dAICso)
res_aff <- data.frame(r=aff_r, rsqr=aff_rsqr, diABSqr=aff_diABSqr, dMaxABSqr=aff_dMaxABSqr, diXYSqr=aff_diXYSqr, dMaxXYSqr=aff_dMaxXYSqr, scaleFactorX=aff_scaleFactorX, scaleFactorY=aff_scaleFactorY, angleDEG=aff_angleDEG, shear=aff_shear, ttestDF=tTestDF, alpha1=aff_alpha[1,], alpha2=aff_alpha[2,], beta1=aff_beta[1,], beta2=aff_beta[2,], beta3=aff_beta[3,], beta4=aff_beta[4,], fValue=aff_F, df1=aff_df1, df2=aff_df2, pValue=aff_p, dAICso=aff_dAICso)
euclideanVSaffine <- data.frame(dAIC=dAICea, fValue=f_ea, pValue=p_ea, df1=df1_ea, df2=df2_ea)
results_sum <- data.frame(euclidean=res_euc, affine=res_aff, eucVSaff=euclideanVSaffine)
class(results_sum) <- "BiDimRegression"
invisible(results_sum) # returns the measures of the bidimensional regression
}
#' @export
#' @importFrom stats printCoefmat
print.BiDimRegression <-
function(x, ...)
{
# print the results of the BiDimensional Regression Analysis
cat(sprintf('\n-------------------\n'))
cat(sprintf('BiDimRegression %s \n', packageVersion("BiDimRegression")))
cat(sprintf('Date-Time: %s\n', date()))
cat(sprintf('----------------------------------------------------------------------\n'))
euc_alpha=matrix(c(x$euclidean.alpha1.coeff, x$euclidean.alpha1.SE, x$euclidean.alpha1.tValue,
x$euclidean.ttestDF, x$euclidean.alpha1.pValue,
x$euclidean.alpha2.coeff, x$euclidean.alpha2.SE, x$euclidean.alpha2.tValue,
x$euclidean.ttestDF, x$euclidean.alpha2.pValue),
nrow=2, byrow = TRUE)
euc_beta=matrix(c(x$euclidean.beta1.coeff, x$euclidean.beta1.SE,
x$euclidean.beta1.tValue, x$euclidean.ttestDF, x$euclidean.beta1.pValue,
x$euclidean.beta2.coeff, x$euclidean.beta2.SE, x$euclidean.beta2.tValue,
x$euclidean.ttestDF, x$euclidean.beta2.pValue),
nrow=2, byrow = TRUE)
aff_alpha=matrix(c(x$affine.alpha1.coeff, x$affine.alpha1.SE, x$affine.alpha1.tValue,
x$affine.ttestDF, x$affine.alpha1.pValue,
x$affine.alpha2.coeff, x$affine.alpha2.SE, x$affine.alpha2.tValue,
x$affine.ttestDF, x$affine.alpha2.pValue),
nrow=2, byrow = TRUE)
aff_beta=matrix(c(x$affine.beta1.coeff, x$affine.beta1.SE, x$affine.beta1.tValue,
x$affine.ttestDF, x$affine.beta1.pValue,
x$affine.beta2.coeff, x$affine.beta2.SE, x$affine.beta2.tValue, x$affine.ttestDF, x$affine.beta2.pValue,
x$affine.beta3.coeff, x$affine.beta3.SE, x$affine.beta3.tValue, x$affine.ttestDF, x$affine.beta3.pValue,
x$affine.beta4.coeff, x$affine.beta4.SE, x$affine.beta4.tValue, x$affine.ttestDF, x$affine.beta4.pValue),
nrow=4, byrow = TRUE)
# -- Overall analysis
overallStats=matrix(c(
x$euclidean.r, x$euclidean.rsqr, x$euclidean.fValue, x$euclidean.df1, x$euclidean.df2, x$euclidean.pValue,
x$affine.r, x$affine.rsqr, x$affine.fValue, x$affine.df1, x$affine.df2, x$affine.pValue),
nrow=2, byrow = TRUE)
colnames(overallStats) <- c("r", "r-sqr", "F-value", "df1", "df2", "p-value")
rownames(overallStats) <- c("Euclidean", "Affine")
cat(sprintf('\n--- overall statistics ---\n'))
printCoefmat(overallStats, P.values=TRUE, has.Pvalue=TRUE, digits=3, tst.ind=4, signif.stars=TRUE)
# show warning for unidentified model
if (x$euclidean.df2 < x$euclidean.df1) {
cat(sprintf('WARNING: Euclidean model is not defined'))
}
if (x$affine.df2 < x$affine.df1) {
cat(sprintf('\t\t\t\t\tWARNING: Affine model is not defined\n'))
}
cat(sprintf('----------------------------------------------------------------------\n'))
cat(sprintf('\n--- parameters ---\n'))
cat(sprintf('\n- Euclidean -\n'))
# -- Parameter analyses
colNames <- c("Parameter", "Std.Err", "t-value", "df", "p-value")
rowNamesAlphas <- c("alpha1", "alpha2")
rowNames2Betas <- c("beta1 ", "beta2 ")
rowNames4Betas <- c("beta1 ", "beta2 ", "beta3 ", "beta4 ")
colnames(euc_alpha) <- colNames
colnames(euc_beta) <- colNames
colnames(aff_alpha) <- colNames
colnames(aff_beta) <- colNames
rownames(euc_alpha) <- rowNamesAlphas
rownames(euc_beta) <- rowNames2Betas
rownames(aff_alpha) <- rowNamesAlphas
rownames(aff_beta) <- rowNames4Betas
printCoefmat(euc_alpha, P.values = TRUE, has.Pvalue = TRUE, digits=3, tst.ind=4)
cat(sprintf('\n'))
printCoefmat(euc_beta, P.values = TRUE, has.Pvalue = TRUE, digits=3, tst.ind=4)
cat(sprintf('\n'))
cat(sprintf('\n- Affine -\n'))
printCoefmat(aff_alpha, P.values = TRUE, has.Pvalue = TRUE, digits=3, tst.ind=4)
cat(sprintf('\n'))
printCoefmat(aff_beta, P.values = TRUE, has.Pvalue = TRUE, digits=3, tst.ind=4)
cat(sprintf('\n'))
cat(sprintf('----------------------------------------------------------------------\n'))
cat(sprintf('\n--- details ---\n'))
cat(sprintf('\n- Euclidean -\t\t\t- Affine -\n'))
cat(sprintf('scaleX\t= scaleY = %4.3f\tscaleX\t= %4.3f, scaleY = %4.3f\n',
x$euclidean.scaleFactorX, x$affine.scaleFactorX, x$affine.scaleFactorY))
cat(sprintf('shear\t= %4.3f\t\t\tshear\t= %4.3f\n',
x$euclidean.shear, x$affine.shear))
cat(sprintf('angle\t= %4.3f DEG\t\tangle\t= %4.3f DEG\n',
x$euclidean.angleDEG, x$affine.angleDEG))
cat(sprintf('---\t\t\t\t---\n'))
cat(sprintf('DAIC (agst.0)\t= %4.2f\tDAIC (agst.0)\t= %4.2f\n',
x$euclidean.dAICso, x$affine.dAICso))
cat(sprintf('---\t\t\t\t---\n'))
cat(sprintf('dMaxABSqr\t= %4.3f\tdMaxABSqr\t= %4.3f\n',
x$euclidean.dMaxABSqr, x$affine.dMaxABSqr))
cat(sprintf('diABSqr \t= %4.3f\t\tdiABSqr \t= %4.3f\n',
x$euclidean.diABSqr, x$affine.diABSqr))
cat(sprintf('dMaxXYSqr\t= %4.3f\tdMaxXYSqr\t= %4.3f\n',
x$euclidean.dMaxXYSqr, x$affine.dMaxXYSqr))
cat(sprintf('diXYSqr \t= %4.3f\t\tdiXYSqr \t= %4.3f\n',
x$euclidean.diXYSqr, x$affine.diXYSqr))
cat(sprintf('----------------------------------------------------------------------\n'))
cat(sprintf('\n--- comparative statistics of fitted bidimensional regxsion models\n\n'))
comparativeStats=matrix(c(
x$eucVSaff.fValue, as.integer(x$eucVSaff.df1), as.integer(x$eucVSaff.df2), x$eucVSaff.pValue),
nrow=1, byrow = TRUE)
colnames(comparativeStats) <- c("F-value", "df1", "df2", "p-value")
rownames(comparativeStats) <- c("Euclidean vs. Affine")
printCoefmat(comparativeStats, P.values=TRUE, has.Pvalue=TRUE, digits=3, cs.ind=1, signif.stars=TRUE)
cat(sprintf('\n'))
if (x$eucVSaff.df2 < x$eucVSaff.df1) {
cat(sprintf('WARNING: model is not defined\n'))
}
if (x$eucVSaff.pValue <= .05) {
if (x$eucVSaff.dAIC<0) {
superiorSolution <- '(significantly better: Affine solution)'
} else {
superiorSolution <- '(significantly better: Euclidean solution)'
}
} else {
superiorSolution = '(not significantly different solutions)'
}
cat(sprintf('\nDAICea = %4.3f %s\n\n', x$eucVSaff.dAIC, superiorSolution))
cat(sprintf('**********************************************************************\n\n\n'))
}
#' @export
summary.BiDimRegression <-
function(object, ...)
{
summary.BiDimRegression <- object
class(summary.BiDimRegression) <- "summary.BiDimRegression"
return(summary.BiDimRegression)
}
#' @export
print.summary.BiDimRegression <-
function(x, ...)
{
# -- short version of the statistics
# Overall analysis
overallStats=matrix(c(
x$euclidean.r, x$euclidean.rsqr, x$euclidean.fValue, x$euclidean.df1, x$euclidean.df2, x$euclidean.pValue,
x$affine.r, x$affine.rsqr, x$affine.fValue, x$affine.df1, x$affine.df2, x$affine.pValue),
nrow=2, byrow = TRUE)
colnames(overallStats) <- c("r", "r-sqr", "F-value", "df1", "df2", "p-value")
rownames(overallStats) <- c("Euclidean", "Affine")
cat(sprintf('\n--- summary statistics from bidimensional regressions ---\n'))
printCoefmat(overallStats, P.values=TRUE, has.Pvalue=TRUE, digits=3, tst.ind=4, signif.stars=TRUE)
}
|
/scratch/gouwar.j/cran-all/cranData/BiDimRegression/R/BiDimRegression.R
|
library(Formula)
#' Fitting Bidimensional Regression Models
#'
#' lm2 is used to fit bidimensional linear regression models using
#' Euclidean and Affine transformations following the approach by Tobler (1965).
#'
#' @usage
#' lm2(formula, data, transformation)
#'
#' @param formula a symbolic description of the model to be fitted in the format \code{A + B ~ C + D}, where
#' \code{A} and \code{B} are dependent and \code{C} and \code{D} are independent variables
#' @param data a data frame containing variables for the model.
#' @param transformation the transformation to be used, either \code{'euclidean'}, \code{'affine'}, or \code{'projective'}.
#'
#' @return lm2 returns an object of class "lm2".
#' An object of class "lm" is a list containing at least the following components:
#' \item{\code{transformation}}{string with the transformation type (\code{euclidean}, \code{affine}, or \code{projective})}
#' \item{\code{npredictors}}{number of predictors used in the model: 4 for euclidean, 6 for affine, 8 for projective.}
#' \item{\code{df_model, df_residual}}{degrees of freedom for the model and for the residuals}
#' \item{\code{transformation_matrix}}{\code{3x3} transformation matrix}
#' \item{\code{coeff}}{transformation coefficients, with \code{a} denoting the intercept terms.}
#' \item{\code{transformed_coeff}}{\code{scale}, \code{angle}, and \code{sheer} coefficients, depends on transformation.}
#' \item{\code{fitted_values}}{data frame containing fitted values for the original data set}
#' \item{\code{residuals}}{data frame containing residuals for the original fit}
#' \item{\code{r.squared, adj.r.squared}}{R-squared and adjusted R-squared.}
#' \item{\code{F, p.value}}{F-statistics and the corresponding p-value, given the \code{df_model} and \code{df_residual} degrees of freedom.}
#' \item{\code{dAIC}}{Akaike Information Criterion (AIC) difference between the regression model and the null model. A negative values indicates that the regression model is better. See \cite{Nakaya (1997)}.}
#' \item{\code{distortion_index}}{Distortion index following \cite{Waterman and Gordon (1984)}, as adjusted by \cite{Friedman and Kohler (2003)}}
#' \item{\code{lm}}{an underlying \link[=lm]{linear model} for \code{Euclidean} and \code{affine} transformations.}
#' \item{\code{formula}}{formula, describing input and output columns}
#' \item{\code{data}}{data used to fit the model}
#' \item{\code{Call}}{function call information, incorporates the \code{formula}, \code{transformation}, and \code{data}.}
#' @export
#' @seealso \code{\link{anova.lm2}} \code{\link{BiDimRegression}}
#'
#' @examples
#' lm2euc <- lm2(depV1 + depV2 ~ indepV1 + indepV2, NakayaData, 'euclidean')
#' lm2aff <- lm2(depV1 + depV2 ~ indepV1 + indepV2, NakayaData, 'affine')
#' lm2prj <- lm2(depV1 + depV2 ~ indepV1 + indepV2, NakayaData, 'projective')
#' anova(lm2euc, lm2aff, lm2prj)
#' predict(lm2euc)
#' summary(lm2euc)
lm2 <- function(formula, data, transformation) { UseMethod("lm2") }
#' @export
#' @importFrom methods is
lm2.formula <- function(formula, data, transformation){
# Check arguments ---------------------------------------------------------
# Are they present?
if(missing(formula))
{
stop("'formula' is missing or incorrect")
}
if (missing(data)){
stop('argument "data" is missing')
}
if (missing(transformation)){
stop('argument "transformation" is missing')
}
# Valid type and values?
if (!is.data.frame(data))
{
stop('argument "data" must be a data frame')
}
if (!is.character(transformation) || !(tolower(transformation) %in% c('euclidean', 'affine', 'projective'))){
stop("unknown transformation, please use either 'euclidean', 'affine', or 'projective'")
}
# Extract variables from dataframe ----------------------------------------
model_formula <- Formula::Formula(formula)
DV <- Formula::model.part(model_formula, data = data, lhs = 1)
if (any(!sapply(DV, is.numeric))){
stop('Non-numeric dependent varaible')
}
IV <- Formula::model.part(model_formula, data = data, rhs = 1)
if (any(!sapply(IV, is.numeric))){
stop('Non-numeric independent varaible')
}
# Fit the model -----------------------------------------------------------
lm2model <- lm2fit(cbind(DV, IV), tolower(transformation))
# Adding information about the call ---------------------------------------
lm2model$Call <- match.call(expand.dots = FALSE)
lm2model$formula <- formula
m <- match(c("formula", "data", "transformation"), names(lm2model$Call), 0L)
lm2model$formula <- lm2model$Call[2]
lm2model$data <- lm2model$Call[3]
return(lm2model)
}
#' Fits the specified model and computes stats
#'
#' Calls a specific transformation model function and then computes statistics
#' that is common across all transformations.
#' This function should not be called directly, please use \code{\link{lm2}}.
#'
#' @param data the preprocessed data frame from \code{\link{lm2}} function,
#' so that the first two columns are the dependent variables and the other
#' two are independent variables
#' @param transformation the transformation to be used, either \code{'euclidean'} or \code{'affine'}.
#'
#' @return returns an object of class "lm2", see \code{\link{lm2}}
#' for the description.
#'
#' @keywords internal
#' @importFrom stats pf
lm2fit <- function(data, transformation){
lm2model <- switch(transformation,
euclidean = lm2euclidean(data),
affine = lm2affine(data),
projective= lm2projective(data),
stop("unknown transformation, please use either 'euclidean' or 'affine'"))
class(lm2model) <- 'lm2'
# common stats for the bidimensional regresion
var_mean <- colMeans(data)
n <- nrow(data)
lm2model$r.squared <- 1- sum((lm2model$fitted_values[, 1]-data[, 1])^2 +(lm2model$fitted_values[, 2]-data[, 2])^2)/
sum((data[, 1]-var_mean[[1]])^2 +(data[, 2]-var_mean[[2]])^2)
lm2model$adj.r.squared <- 1-( ( (n-1)/(n-lm2model$npredictors-1)) * ( (n-2)/(n-lm2model$npredictors-2)) * ((n+1)/n))*(1-lm2model$r.squared)
lm2model$dAIC<- 2*n*log(1-lm2model$r.squared)+2*lm2model$df_model
lm2model$F <- (lm2model$df_residual/lm2model$df_model)*(lm2model$r.squared/(1-lm2model$r.squared))
lm2model$p.value<- pf(lm2model$F, lm2model$df_model, lm2model$df_residual, lower.tail= FALSE, log.p= FALSE)
## ------- the distortion index following Waterman and Gordon (1984), adjusted by Friedman and Kohler (2003)
di<- data.frame(D.sqr= c(NA,NA), Dmax.sqr= c(NA,NA), DI.sqr= c(NA,NA), row.names = c('Dependent', 'Independent'))
di$D.sqr[1]<- sum((data[, 1]-lm2model$fitted_values[, 1])^2)+
sum((data[, 2]-lm2model$fitted_values[, 2])^2)
di$D.sqr[2]<- sum((data[, 3]-lm2model$fitted.I[, 3])^2)+
sum((data[, 4]-lm2model$fitted.I[, 4])^2)
di$Dmax.sqr[1] <- sum((data[, 1]-var_mean[[1]])^2 +(data[, 2]-var_mean[[2]])^2)
di$Dmax.sqr[2] <- sum((data[, 3]-var_mean[[3]])^2 +(data[, 4]-var_mean[[4]])^2)
di$DI.sqr <- di$D.sqr/di$Dmax.sqr
lm2model$distortion_index <- di
return(lm2model)
}
# Euclidean ---------------------------------------------------------------
#' Computes model for the euclidean transformation
#'
#' @param data the preprocessed data frame from \code{\link{lm2}} function,
#' so that the first two columns are the dependent variables and the other
#' two are independent variables
#'
#' @return object with transformation specific data to be supplemented with further stats
#' @keywords internal
#' @importFrom stats lm predict setNames
lm2euclidean <- function(data){
lm2model <- list(transformation= 'euclidean',
npredictors= 4,
df_model= 2L,
df_residual= 2*nrow(data)-4L)
# arranging the data frame for the lm function
cZeros <- c(rep(0, nrow(data)))
cOnes <- c(rep(1, nrow(data)))
lm_data <- data.frame(
y= c(data[, 1], data[, 2]),
a1= c(cOnes, cZeros),
a2= c(cZeros, cOnes),
b1 = c( data[, 3], data[, 4]),
b2 = c(-data[, 4], data[, 3]))
# using lm to fit the model
lm2model$lm <- stats::lm(y ~ 0 + a1 + a2 + b1 + b2, data= lm_data)
# coefficients and the transformation matrix
lm2model$coeff <- summary(lm2model$lm)$coeff[, 1]
lm2model$transformation_matrix <- matrix(c(lm2model$coeff['b1'], -lm2model$coeff['b2'], lm2model$coeff['a1'],
lm2model$coeff['b2'], lm2model$coeff['b1'], lm2model$coeff['a2'],
0,0,1), nrow=3)
# calculating the transformed coefficients
lm2model$transformed_coeff <- c(
sqrt(lm2model$coeff[['b1']]^2 + lm2model$coeff[['b2']]^2),
sqrt(lm2model$coeff[['b1']]^2 + lm2model$coeff[['b2']]^2),
atan2(lm2model$coeff[['b2']], lm2model$coeff[['b1']])
)
names(lm2model$transformed_coeff) <- c('scale1', 'scale2', 'angle')
# getting the predicted values for dependent variables
lm2model$fitted_values <- setNames(data.frame(matrix(predict(lm2model$lm), ncol=2)), colnames(data)[1:2])
# getting the residuals
lm2model$residuals <- setNames(data.frame(matrix(lm2model$lm$residuals, ncol=2)), colnames(data)[1:2])
return(lm2model)
}
# Affine ------------------------------------------------------------------
#' Computes model for the affine transformation
#'
#' @param data the preprocessed data frame from \code{\link{lm2}} function,
#' so that the first two columns are the dependent variables and the other
#' two are independent variables
#'
#' @return object with transformation specific data to be supplemented with further stats
#' @keywords internal
#' @importFrom stats lm predict setNames
lm2affine <- function(data){
lm2model <- list(transformation= 'affine',
npredictors= 6,
df_model= 4L,
df_residual= 2*nrow(data)-6L)
# re-arraging data for affine regression model
cZeros <- c(rep(0, nrow(data)))
cOnes <- c(rep(1, nrow(data)))
lm_data <- data.frame(
y= c(data[, 1], data[, 2]),
a1= c(cOnes, cZeros),
a2= c(cZeros, cOnes),
b1= c(data[, 3], cZeros),
b2= c(data[, 4], cZeros),
b3= c(cZeros, data[, 3]),
b4= c(cZeros, data[, 4]))
# using lm to fit the model
lm2model$lm <- stats::lm(y ~ 0 + a1 + a2 + b1 + b2 + b3 +b4, data= lm_data)
# coefficients and the transformation matrix
lm2model$coeff <- summary(lm2model$lm)$coeff[, 1]
lm2model$transformation_matrix <- matrix(c(lm2model$coeff['b1'], lm2model$coeff['b2'], lm2model$coeff['a1'],
lm2model$coeff['b3'], lm2model$coeff['b4'], lm2model$coeff['a2'],
0,0,1), nrow=3)
# Calculating the transformed coefficients
aff_angle <- atan2(lm2model$coeff[['b3']], lm2model$coeff[['b1']])
aff_shear <- ((lm2model$coeff[['b4']]/lm2model$coeff[['b2']])*sin(aff_angle)+cos(aff_angle))/
((lm2model$coeff[['b4']]/lm2model$coeff[['b2']])*cos(aff_angle)-sin(aff_angle))
aff_scale1 <- sqrt(lm2model$coeff[['b1']]^2+lm2model$coeff[['b3']]^2)
if (is.nan(aff_shear))
{
aff_shear <- (lm2model$coeff[['b1']]-cos(aff_angle)*aff_scale1)/lm2model$coeff[['b3']]
}
if (is.nan(aff_shear))
{
aff_shear <- (sin(aff_angle)*aff_scale1+lm2model$coeff[['b2']])/lm2model$coeff[['b4']]
}
aff_scale2 <- lm2model$coeff[['b2']]/(aff_shear*cos(aff_angle)-sin(aff_angle))
if (is.nan(aff_scale2))
{
aff_scale2 <- aff_scale1
}
lm2model$transformed_coefficients <- c(
aff_scale1, aff_scale2, aff_shear, aff_angle
)
names(lm2model$transformed_coefficients) <- c('scale1', 'scale2', 'shear', 'angle')
# getting the predicted values for dependent variables
lm2model$fitted_values <- setNames(data.frame(matrix(predict(lm2model$lm), ncol=2)), colnames(data)[1:2])
# getting the residuals
lm2model$residuals <- setNames(data.frame(matrix(lm2model$lm$residuals, ncol=2)), colnames(data)[1:2])
return(lm2model)
}
# Projective --------------------------------------------------------------
#' Computes model for the projective transformation
#'
#' @param data the preprocessed data frame from \code{\link{lm2}} function,
#' so that the first two columns are the dependent variables and the other
#' two are independent variables
#'
#' @return object with transformation specific data to be supplemented with further stats
#' @keywords internal
#' @importFrom stats setNames
lm2projective <- function(data){
## Preparing a placeholder for the class
lm2model <- list(transformation= 'projective',
npredictors= 8,
df_model= 6L,
df_residual= 2*nrow(data)-8L)
# preparing matrix for the projective model
A <- matrix(0, nrow= nrow(data) * 2, ncol = 9)
cZeros <- rep(0, nrow(data))
cOnes <- rep(1, nrow(data))
# laying out parameters
A[, 1] <- c(data[, 3], cZeros)
A[, 2] <- c(data[, 4], cZeros)
A[, 3] <- c(cOnes, cZeros)
A[, 4] <- c(cZeros, data[, 3])
A[, 5] <- c(cZeros, data[, 4])
A[, 6] <- c(cZeros, cOnes)
A[, 7] <- c( -data[, 1]*data[, 3], -data[, 2]*data[, 3])
A[, 8] <- c( -data[, 1]*data[, 4], -data[, 2]*data[, 4])
A[, 9] <- c( data[, 1], data[, 2])
# using singular value decomposition
V <- svd(A)$v
# copying over results
lm2model$coeff <- -V[1:8, 9]/V[9,9]
names(lm2model$coeff) <- c('a1', 'a2', 'a3', 'b1', 'b1', 'b3', 'c1', 'c2')
lm2model$transformation_matrix <- matrix(c(lm2model$coeff, 1), nrow=3)
# computing the predicted values for dependent variables
IV <- data[, 3:4]
IV$z <- 1
fitted_DV <- data.matrix(IV) %*% lm2model$transformation_matrix
lm2model$fitted_values <- data.frame(fitted_DV[, 1:2]/fitted_DV[, 3])
colnames(lm2model$fitted_values) <- colnames(data)[1:2]
# getting the residuals
lm2model$residuals <- setNames(data[, 1:2]-lm2model$fitted_values, colnames(data)[1:2])
return(lm2model)
}
# Printing and summary ----------------------------------------------------
#' @export
print.lm2 <- function(x, ...){
cat(sprintf('Call:\n'))
cat(deparse(x$Call))
cat('\n\n')
cat('Coefficients:\n')
coeff <- data.frame(as.list(x$coeff))
rownames(coeff) <- ''
printCoefmat(coeff)
# transformed coefficients, if applicable
if ("transformed_coeff" %in% names(x)){
cat('\nTransformed coefficients:\n')
transformed_coeff <- data.frame(as.list(x$transformed_coeff))
rownames(transformed_coeff) <- ''
printCoefmat(transformed_coeff)
}
# correlation strength
cat('\nMultiple R-squared:', x$r.squared, '\tAdjusted R-squared:', x$adj.r.squared)
}
#' Makes a lightweight summary lm2 object
#'
#' Drops heavy bits, like the data frame with predicted values or the lm object.
#' However, the print tells more! :)
#'
#' @param object an object of class "lm2", see \code{\link{lm2}}
#'
#' @export
#' @keywords internal
summary.lm2 <- function(object, ...){
# copying most of the object
object_summary<- object
class(object_summary) <- "summary.lm2"
# dropping heavy bits
if ('lm' %in% names(object_summary)){
object_summary$coeff <- summary(object_summary$lm)$coeff
}
else{
object_summary$coeff <- data.frame(as.list(object$coeff))
rownames(object_summary$coeff) <- ''
}
object_summary$fitted_values <- NULL
object_summary$lm <- NULL
return(object_summary)
}
#' @export
print.summary.lm2 <- function(x, ...){
cat(sprintf('Call:\n'))
cat(deparse(x$Call))
cat('\n\n')
cat('Coefficients:\n')
printCoefmat(x$coeff)
# transformed coefficients
if ('transformed_coeff' %in% names(x)){
cat('\nTransformed coefficients:\n')
transformed_coeff <- data.frame(as.list(x$transformed_coeff))
rownames(transformed_coeff) <- ''
printCoefmat(transformed_coeff)
}
# distortion index
cat('\nDistortion index:\n')
di <- t(x$distortion_index)
rownames(di)<- c('Distortion distance, squared',
'Maximal distortion distance, squared',
'Distortion index, squared')
printCoefmat(di)
# statistics
cat('\nMultiple R-squared:', x$r.squared, '\tAdjusted R-squared:', x$adj.r.squared)
cat('\nF-statistic:', x$F, 'on', x$df_model, 'and', x$df_residual, 'DF, p-value:', format.pval(x$p.value))
cat('\nDifference in AIC to the null model:', x$dAIC)
if (x$dAIC<2){
cat('*')
}
}
# Predicting -------------------------------------------------------------
#' Predict method for Bidimensional Regression Model Fits
#'
#' Predicted values based on the bidimensional regressional model object.
#'
#' @param object an object of class "lm2"
#' @param newdata An optional two column data frame with independent variables.
#' If omitted, the fitted values are used.
#' @param ... optional arguments
#'
#' @return a two column data frame with predicted values for dependent variables.
#' @export
#'
#' @seealso \code{\link{lm2}}
#' @examples
#' lm2euc <- lm2(depV1+depV2~indepV1+indepV2, NakayaData, transformation = 'Euclidean')
#' predict(lm2euc, NakayaData[, 3:4])
predict.lm2 <- function(object, newdata, ...) {
# returning predictions for original independent variable values
if (missing(newdata)){
return(object$fitted_values)
}
# otherwise, checking dimensionality
if (ncol(newdata)!=2) {
stop('New data must be a two column matrix/data.frame.')
}
newdata$z <- 1
newly_fitted <- data.matrix(newdata) %*% object$transformation_matrix
newly_fitted <- newly_fitted[, 1:2]/newly_fitted[, 3]
# colnames(newly_fitted) <- colnames(newdata)[1:2]
return(newly_fitted)
}
# Comparing models --------------------------------------------------------
#' Anova for lm2 objects
#'
#' Anova for lm2 objects, returns a table with pairwise comparisons
#' between models or, if only one model was supplied, with the null model.
#'
#'
#' @param object an object of class "lm2"
#' @param ... further objects of class "lm2"
#'
#' @return an anova data frame
#' @export
#'
#' @seealso \code{\link{lm2}}
#' @examples
#' lm2euc <- lm2(depV1+depV2~indepV1+indepV2, NakayaData, transformation = 'Euclidean')
#' lm2aff <- lm2(depV1+depV2~indepV1+indepV2, NakayaData, transformation = 'Affine')
#' anova(lm2euc, lm2aff)
#' @importFrom stats pf
anova.lm2 <- function(object, ...)
{
# checkings whether dots are lm2 objects
dots_are_lm2 <- as.logical(vapply(list(...), is, NA, "lm2"))
# merging models into a list
all_models <- c(list(object), list(...)[dots_are_lm2])
# checking that they all were fitted to the same data
same_df <- vapply(all_models, function(lm2object, df_name) { lm2object$data == df_name }, NA, all_models[[1]]$data)
if (any(!same_df)){
warning('Not all models are based on the same data as the first one, ignoring them')
}
all_models <- all_models[same_df]
# checking for duplicate transforms
transforms <- sapply(all_models, function(lm2object){lm2object$transformation})
retain <- rep(TRUE, length(all_models))
for(iModel in 2:length(transforms)){
if (transforms[iModel] %in% transforms[1:(iModel-1)]){
retain[iModel] <- FALSE
}
}
if (any(!retain)){
warning('DUplicate models, ignoring them')
}
all_models <- all_models[retain]
if (length(all_models)==1) {
# it all boiled down to a single model, for which we actually already computed statistics relative to the null model
# thus, we just copy the numbers into a table
anova_tbl <- data.frame(dAIC = object$dAIC,
df1 = as.integer(object$df_model),
df2 = as.integer(object$df_residual),
F= object$F,
p.value= object$p.value)
row.names(anova_tbl) <- c(paste(object$transformation, 'null', sep= ' vs. '))
}
else {
# we have more than one! First, let's order them based on complexity
predictorsN <- sapply(all_models, function(lm2object){lm2object$npredictors})
all_models <- all_models[order(predictorsN)]
# Let's do pairwise comparisons.
comparisonsN <- (length(all_models) * (length(all_models)-1))/2
anova_tbl <- data.frame(dAIC = rep(NA, comparisonsN),
df1 = rep(NA, comparisonsN),
df2 = rep(NA, comparisonsN),
F= rep(NA, comparisonsN),
p.value= rep(NA, comparisonsN))
iRow <- 1
pairs_labels <- rep('', comparisonsN)
for(iModel1 in 1:(length(all_models)-1)){
for(iModel2 in (iModel1+1):length(all_models)){
pairs_labels[iRow] <- paste(all_models[[iModel1]]$transformation, all_models[[iModel2]]$transformation, sep=' vs. ')
anova_tbl$df1[iRow]<- all_models[[iModel2]]$df_model-all_models[[iModel1]]$df_model
anova_tbl$df2[iRow]<- all_models[[iModel2]]$df_residual
anova_tbl$F[iRow] <- (anova_tbl$df2[iRow]/anova_tbl$df1[iRow])*((all_models[[iModel2]]$r.squared-all_models[[iModel1]]$r.squared)/(1-all_models[[iModel2]]$r.squared))
anova_tbl$p.value[iRow]<- pf(anova_tbl$F[iRow], anova_tbl$df1[iRow], anova_tbl$df2[iRow], lower.tail = FALSE, log.p = FALSE)
anova_tbl$dAIC[iRow] <- 2*nrow(all_models[[iModel2]]$fitted_values)*log((1-all_models[[iModel2]]$r.squared)/
(1-all_models[[iModel1]]$r.squared))+2*(all_models[[iModel2]]$npredictors-all_models[[iModel1]]$npredictors)
iRow <- iRow + 1
}
row.names(anova_tbl) <- pairs_labels
}
}
# packaging
anova_object <- list(anova_table = anova_tbl)
class(anova_object) <- 'anova.lm2'
return(anova_object)
}
#' @export
print.anova.lm2 <- function(x, ...){
cat('Bidimensional regression:\n')
printCoefmat(x$anova_table, cs.ind = c(1,4), P.values= TRUE, has.Pvalue=TRUE, na.print = '')
}
|
/scratch/gouwar.j/cran-all/cranData/BiDimRegression/R/bidimensional.R
|
#' Eye gaze calibration data
#'
#' A dataset containing a monocular eye gaze recording with calibration sequence.
#' Courtesy of Bamberger Baby Institut (BamBI).
#'
#' @format A data frame with 365 rows and 6 variables:
#' \describe{
#' \item{time}{sample timestamp, in milliseconds}
#' \item{x, y}{recorded gaze, in internal eye tracker units}
#' \item{target_x, target_y}{location of the calibration target on the screen, in pixels}
#' \item{target}{index of the target within the sequence}
#'
#' ...
#' }
"EyegazeData"
|
/scratch/gouwar.j/cran-all/cranData/BiDimRegression/R/data.R
|
## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----raw-data, echo=TRUE, message=FALSE, warning=FALSE, fig.width= 5, out.width = "100%", fig.asp= 3/4, dpi = 200----
library(BiDimRegression)
library(dplyr)
library(ggplot2)
ggplot(data= EyegazeData, aes(x= x, y= y, color= target, fill= target)) +
geom_point(data= EyegazeData %>% group_by(target, target_x, target_y) %>% summarise(.groups="drop"),
aes(x= target_x, y= target_y), shape= 21, size= 10, fill= 'white') +
geom_point(alpha= 0.5, shape= 21) +
ggtitle('Raw eye gaze')
## ----Adjust-gaze, fig.width= 5, out.width = "100%", fig.asp= 3/4, dpi = 200----
lm2aff <- lm2(target_x + target_y ~ x + y, EyegazeData, transformation = 'affine')
adjusted_gaze <- data.frame(predict(lm2aff))
colnames(adjusted_gaze) <- c('adjX', 'adjY')
adjusted_gaze <- cbind(EyegazeData, adjusted_gaze)
ggplot(data= adjusted_gaze, aes(x= adjX, y= adjY, color= target, fill= target)) +
geom_point(data= adjusted_gaze %>% group_by(target, target_x, target_y) %>% summarise(.groups="drop"),
aes(x= target_x, y= target_y), shape= 21, size= 10, fill= 'white') +
geom_point(alpha= 0.5, shape= 21) +
xlab('x')+
ylab('y')+
ggtitle('Adjusted eye gaze')
|
/scratch/gouwar.j/cran-all/cranData/BiDimRegression/inst/doc/calibration.R
|
---
title: "Eye gaze mapping"
author: "Alexander (Sasha) Pastukhov"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Eye gaze mapping}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
Bidimensional regression can be used to transform the eye gaze data into a the screen coordinate system using a calibration sequence. For this, we use known target coordinates as independent variables. Please note that the example below assumes that participants fixate faithfully for most of the time and that recording artifacts, such as blinks, were already removed. This example will use the \code{EyegazeData} example dataset.
## Plotting raw data
```{r raw-data, echo=TRUE, message=FALSE, warning=FALSE, fig.width= 5, out.width = "100%", fig.asp= 3/4, dpi = 200}
library(BiDimRegression)
library(dplyr)
library(ggplot2)
ggplot(data= EyegazeData, aes(x= x, y= y, color= target, fill= target)) +
geom_point(data= EyegazeData %>% group_by(target, target_x, target_y) %>% summarise(.groups="drop"),
aes(x= target_x, y= target_y), shape= 21, size= 10, fill= 'white') +
geom_point(alpha= 0.5, shape= 21) +
ggtitle('Raw eye gaze')
```
## Using lm2 to transform the eye gaze
```{r Adjust-gaze, fig.width= 5, out.width = "100%", fig.asp= 3/4, dpi = 200}
lm2aff <- lm2(target_x + target_y ~ x + y, EyegazeData, transformation = 'affine')
adjusted_gaze <- data.frame(predict(lm2aff))
colnames(adjusted_gaze) <- c('adjX', 'adjY')
adjusted_gaze <- cbind(EyegazeData, adjusted_gaze)
ggplot(data= adjusted_gaze, aes(x= adjX, y= adjY, color= target, fill= target)) +
geom_point(data= adjusted_gaze %>% group_by(target, target_x, target_y) %>% summarise(.groups="drop"),
aes(x= target_x, y= target_y), shape= 21, size= 10, fill= 'white') +
geom_point(alpha= 0.5, shape= 21) +
xlab('x')+
ylab('y')+
ggtitle('Adjusted eye gaze')
```
|
/scratch/gouwar.j/cran-all/cranData/BiDimRegression/inst/doc/calibration.Rmd
|
---
title: "Eye gaze mapping"
author: "Alexander (Sasha) Pastukhov"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Eye gaze mapping}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
Bidimensional regression can be used to transform the eye gaze data into a the screen coordinate system using a calibration sequence. For this, we use known target coordinates as independent variables. Please note that the example below assumes that participants fixate faithfully for most of the time and that recording artifacts, such as blinks, were already removed. This example will use the \code{EyegazeData} example dataset.
## Plotting raw data
```{r raw-data, echo=TRUE, message=FALSE, warning=FALSE, fig.width= 5, out.width = "100%", fig.asp= 3/4, dpi = 200}
library(BiDimRegression)
library(dplyr)
library(ggplot2)
ggplot(data= EyegazeData, aes(x= x, y= y, color= target, fill= target)) +
geom_point(data= EyegazeData %>% group_by(target, target_x, target_y) %>% summarise(.groups="drop"),
aes(x= target_x, y= target_y), shape= 21, size= 10, fill= 'white') +
geom_point(alpha= 0.5, shape= 21) +
ggtitle('Raw eye gaze')
```
## Using lm2 to transform the eye gaze
```{r Adjust-gaze, fig.width= 5, out.width = "100%", fig.asp= 3/4, dpi = 200}
lm2aff <- lm2(target_x + target_y ~ x + y, EyegazeData, transformation = 'affine')
adjusted_gaze <- data.frame(predict(lm2aff))
colnames(adjusted_gaze) <- c('adjX', 'adjY')
adjusted_gaze <- cbind(EyegazeData, adjusted_gaze)
ggplot(data= adjusted_gaze, aes(x= adjX, y= adjY, color= target, fill= target)) +
geom_point(data= adjusted_gaze %>% group_by(target, target_x, target_y) %>% summarise(.groups="drop"),
aes(x= target_x, y= target_y), shape= 21, size= 10, fill= 'white') +
geom_point(alpha= 0.5, shape= 21) +
xlab('x')+
ylab('y')+
ggtitle('Adjusted eye gaze')
```
|
/scratch/gouwar.j/cran-all/cranData/BiDimRegression/vignettes/calibration.Rmd
|
# Mutation and Gene Expression Automated Detected Algorithm Build V2.0
#
# Date of V1.0: February - March 2013
# Date of V2.0: May - June 2013
# Author: M Wappett
# Decription: Input a matrix of continuous data with genes as rows and samples as columns. Input a matrix of discreet mutation data ("WT" or "MUT" calls) with genes as rows and samples as columns. Algorithm may take some considerable time to run (1 hour for 20,500 genes across 800 samples).
BEEM <- function(bisepData=data, mutData=mutData, sampleType=c("cell_line", "cell_line_low", "patient", "patient_low"), minMut=10)
{
# Define confidence criteria
if(missing(bisepData)) stop("Need to input BISEP object")
if(missing(mutData)) stop("Need to input mutation data matrix")
if(missing(sampleType)) stop("Need to specify sample type")
print(paste("Minimum number of mutations considered for each gene is: ", minMut, sep=""))
if(sampleType == "cell_line")
{
print("Selected CELL LINE sample type")
pI <- 0.5
dTA <- 2.5
bI <- 0.7
}
else if(sampleType == "patient")
{
print("Selected PATIENT sample type")
pI <- 0.5
dTA <- 2.5
bI <- 0.5
}
else if(sampleType == "cell_line_low")
{
print("Selected CELL LINE LOW sample type")
pI <- 0.5
dTA <- 3.5
bI <- 1.1
}
else if(sampleType == "patient_low")
{
print("Selected PATIENT LOW sample type")
pI <- 0.5
dTA <- 3
bI <- 0.9
}
else
{
stop("Don't recognise sample type - please review options")
}
# Extract objects from input + stop if object incorrect
if("BISEP" %in% names(bisepData) && "BI" %in% names(bisepData) && "DATA" %in% names(bisepData))
{
biIndex <- bisepData$BI
big.model <- bisepData$BISEP
data2 <- bisepData$DATA
}
else
{
stop("Input object isn't from BISEP function")
}
subBiIndex <- subset(biIndex, biIndex[,6] > bI & biIndex[,5] < pI & biIndex[,4] > dTA)
subBiIndex2 <- subBiIndex[order(-subBiIndex[,6]),]
midpoint <- big.model[,1]
names(midpoint) <- rownames(data2)
w1 <- rownames(subBiIndex)
midpointBI <- midpoint[w1]
if(length(w1) <= 1)
{
stop("No bimodal genes in input matrix")
}
data3 <- data2[w1,]
rownames(data3) <- w1
med1 <- apply(data3, 1, function(x) mean(as.numeric(x[1:dim(data3)[2]])))
w1 <- which(med1 > 2)
data3 <- data3[w1,]
w1 <- rownames(data3)
midpointBI <- midpointBI[w1]
# Perform analysis of mutation data, and then integrate with the gene expression analysis outcomes
mutData <- as.data.frame(mutData, stringsAsFactors=FALSE)
# Check size of objects + stop if both are empty
if(dim(mutData)[1] == 0)
{
stop("No genes with mutation frequency higher than that specified")
}
if(dim(data3)[1] == 0)
{
stop("No bimodal genes in input matrix")
}
# Evaluate the significance of this
w1 <- which(colnames(mutData) %in% colnames(data3))
mutData2 <- mutData[,w1]
c1 <- apply(mutData2, 1, function(x) length( x [x == "MUT"] ))
mutData2$mutCount <- c1
mutData3 <- subset(mutData2, mutData2$mutCount >= minMut)
w1 <- which(colnames(data3) %in% colnames(mutData3))
if(length(w1) == 0) { stop("No overlapping sample names") }
data4 <- data3[,w1]
# Check size of objects + stop if both are empty
if(dim(mutData3)[1] == 0)
{
stop("No genes with mutation frequency higher than that specified")
}
if(dim(data3)[1] == 0)
{
stop("No bimodal genes in input matrix")
}
# Order mutation data columns the same as expression data columns.
mutData3 <- mutData3[,colnames(data4)]
print(paste("Number of bimodal expression genes : ", dim(data4)[1], sep=""))
print(paste("Number of mutation genes wih frequency greater than", minMut, ":", dim(mutData3)[1], sep=" "))
list1 <- as.list(as.data.frame(t(data4)))
names1 <- colnames(data4)
names2 <- colnames(mutData3)
outList <- list(data.frame(0))
ind1 <- 0
pb <- txtProgressBar(min=0, max=length(list1), style=3)
for(i in 1:length(list1))
{
data5 <- list1[[i]][order(list1[[i]])]
names1_1 <- names1[order(list1[[i]])]
mutData4 <- mutData3[,names1_1]
mpNum <- midpointBI[[i]] - (5*((max(data5) - min(data5))/100))
sampleMidIndex <- which.min(abs(data5 - mpNum))
lower <- data5[1:sampleMidIndex]
upper <- data5[(sampleMidIndex+1): length(data5)]
contTab <- matrix(ncol=2, nrow=2)
valVec <- numeric(0)
list2 <- as.list(as.data.frame(t(mutData4)))
for(j in 1:length(list2))
{
lowerM <- list2[[j]][1:sampleMidIndex]
upperM <- list2[[j]][(sampleMidIndex+1):length(list2[[j]])]
count1 <- length(which(lowerM == "MUT"))
count2 <- length(which(upperM == "MUT"))
count3 <- count1 + count2
contTab[1,1] <- count1
contTab[1,2] <- count2
contTab[2,1] <- length(lower) - count1
contTab[2,2] <- length(upper) - count2
per1 <- (count1/length(lower))*100
per2 <- (count2/length(upper))*100
per3 <- per1 - per2
pV <- fisher.test(contTab)
ind1 <- ind1 + 1
if(per3 > 0)
{
outList[[ind1]] <- c(names(list1)[i], rownames(mutData4)[j], count1, count2, pV[[1]], per1, per2, length(lowerM), length(upperM), "LowEnriched")
}
else
{
outList[[ind1]] <- c(names(list1)[i], rownames(mutData4)[j], count1, count2, pV[[1]], per1, per2, length(lowerM), length(upperM), "HighEnriched")
}
}
Sys.sleep(0.5)
setTxtProgressBar(pb, i)
}
Sys.sleep(1)
close(pb)
print("Summarising...")
genePairs2 <- do.call("rbind", outList)
genePairs2 <- as.data.frame(genePairs2, stringsAsFactors=FALSE)
genePairs2 <- genePairs2[order(genePairs2[,5]),]
genePairs3 <- subset(genePairs2, genePairs2[,10] == "HighEnriched" & genePairs2[,5] < 0.25)
genePairs3 <- as.data.frame(genePairs3, stringsAsFactors=FALSE)
genePairs3[,5] <- as.numeric(genePairs3[,5])
genePairs4 <- genePairs3[order(genePairs3[,5]),]
colnames(genePairs4) <- c("Gene1", "Gene2", "LowerExpressionMutationCount", "HighExpressionMutationCount", "Fishers P Value", "Percentage of lower samples mutated", "Percenage of high samples mutated", "Size of low expression population", "Size of high expression population", "Enrichment Status")
return(genePairs4)
print("Complete")
}
|
/scratch/gouwar.j/cran-all/cranData/BiSEp/R/BEEM.R
|
# Bimodal Gene Expression Exclusivity Build V3.0
#
# Date of V1.0: November - December 2012
# Date of V2.0: April 2013
# Date of V3.0: May - June 2013
# Author: M Wappett
# Decription: Takes as input the list object output from the BISEP function
# Load relevant libraries
BIGEE <- function(bisepData=data, sampleType=c("cell_line", "cell_line_low", "patient", "patient_low"))
{
# Define confidence criteria
if(missing(bisepData)) stop("Need to input expression data matrix")
if(missing(sampleType)) stop("Need to specify sample type")
if(sampleType == "cell_line")
{
print("Selected CELL LINE sample type")
pI <- 0.5
dTA <- 2.5
bI <- 0.7
numALL <- 1.9
}
else if(sampleType == "patient")
{
print("Selected PATIENT sample type")
pI <- 0.5
dTA <- 2.5
bI <- 0.5
numALL <- 1.9
}
else if(sampleType == "cell_line_low")
{
print("Selected CELL LINE sample type")
pI <- 0.5
dTA <- 3.5
bI <- 1.1
numALL <- 1.9
}
else if(sampleType == "patient_low")
{
print("Selected PATIENT sample type")
pI <- 0.5
dTA <- 3
bI <- 0.9
numALL <- 1.9
}
else
{
stop("Don't recognise sample type - please review options")
}
# Extract objects from input + stop if object incorrect
if("BISEP" %in% names(bisepData) && "BI" %in% names(bisepData) && "DATA" %in% names(bisepData))
{
biIndex <- bisepData$BI
big.model <- bisepData$BISEP
data2 <- bisepData$DATA
}
else
{
stop("Input object isn't from BISEP function")
}
print("Subsetting bimodal index")
subBiIndex <- subset(biIndex, biIndex[,6] > bI & biIndex[,5] < pI & biIndex[,4] > dTA)
subBiIndex2 <- subBiIndex[order(-subBiIndex[,6]),]
print("Filtering")
# Calculate the mid-points for all genes identified as being bimodal
midpoint <- big.model[,1]
names(midpoint) <- rownames(data2)
w1 <- rownames(subBiIndex)
if(length(w1) <= 1)
{
stop("No bimodal genes in input matrix")
}
midpointBI <- midpoint[w1]
data3 <- data2[w1,]
rownames(data3) <- w1
med1 <- apply(data3, 1, function(x) mean(as.numeric(x[1:dim(data3)[2]])))
w1 <- which(med1 > 2)
data3 <- data3[w1,]
w1 <- rownames(data3)
midpointBI <- midpointBI[w1]
print("Setting up synthetic lethal detection")
# Set data up for pure synthetic lethal detection
list3 <- as.list(as.data.frame(t(data3)))
outList <- list(data.frame=(0))
ind1 <- 0
names1 <- names(list3)
print("Running SL detection")
# Run pure synthetic lethal detection
subBiIndex3 <- subBiIndex[rownames(data3),]
rangeTab <- rbind(c(0.55, 0.5), c(0.5, 0), c(0.45, 0.5), c(0.4, 1), c(0.35, 1.5), c(0.30, 2), c(0.25, 2.5), c(0.20, 3), c(0.15, 3.5), c(0.10, 4), c(0.05, 4.5), c(0.00, 5))
print(paste("Number of bimodal genes: ", length(list3), sep=""))
pb <- txtProgressBar(min=0, max=length(list3), style=3)
for(i in 1:length(list3))
{
range1 <- range(list3[[i]])
range1_2 <- ((range1[2] - range1[1])/100)* (rangeTab[which.min(abs(rangeTab[,1] -subBiIndex3[i,5])),2])
midpoint1 <- midpointBI[i]
for(j in 1:(length(list3)))
{
range2 <- range(list3[[j]])
range2_2 <- ((range2[2] - range2[1])/100)* (rangeTab[which.min(abs(rangeTab[,1] -subBiIndex3[j,5])),2])
midpoint2 <- midpointBI[j]
midpoint3 <- midpoint1 - range1_2
midpoint4 <- midpoint2 - range2_2
num1 <- which(list3[[i]] < midpoint3)
pair2 <- list3[[j]][num1]
num2 <- which(pair2 < midpoint4)
num3 <- which(list3[[j]] < midpoint4)
num4 <- (sum(length(num1), length(num3))/(length(list3[[1]])*2)) * 100
len1 <- c(length(num1), length(num3))
perLL <- length(num2)/length(list3[[i]])*100
sd1 <- sd(len1)
rank1 <- (num4 / sd1)
if(perLL == 0 & num4 > numALL & num4 < 50)
{
ind1 <- ind1 + 1
BBMM <- c(names1[i], names1[j])
outList[[ind1]] <- BBMM
}
else
{
#Do Nothing
}
}
Sys.sleep(0.5)
setTxtProgressBar(pb, i)
}
Sys.sleep(1)
close(pb)
print("Summarising...")
x2 <- do.call("rbind", outList)
x3 <- as.data.frame(x2[,1:2], stringsAsFactors=FALSE)
x4 <- x3[x3[,1] <= x3[,2],]
colnames(x4) <- c("gene", "gene2")
biIndex$gene <- rownames(biIndex)
colnames(biIndex)[7] <- "gene"
x5 <- merge(x4, biIndex, by.x="gene", by.y="gene")
colnames(biIndex)[7] <- "gene2"
x6 <- merge(x5, biIndex, by.x="gene2", by.y="gene2")
x7 <- x6[,1:2]
score1 <- ((x6[,8] + x6[,14]) * (x6[,7] + x6[,13]))* (x6[,6] + x6[,12])
x7$score <- score1
x7 <- x7[order(-x7[,3]),]
return(x7)
print("Complete")
}
|
/scratch/gouwar.j/cran-all/cranData/BiSEp/R/BIGEE.R
|
# Bimodality Algorithm Build: BISEP
#
# Date of V1.0: November - December 2012
# Date of V2.0: April 2013
# Date of V3.0: May - June 2013
# Author: M Wappett
# Decription: Input a matrix of continuous data with genes as rows and samples as columns. Algorithm may take some considerable time to run (1 hour for 20,500 genes across 800 samples)
BISEP <- function(data=data)
{
# Deal with missing data
if(missing(data)) stop("Need to input expression data matrix")
# Deal with any negative values (< 1 min)
data2 <- apply(data, 1, function(x) {
w1 <- which(x[1:dim(data)[2]] < 0)
len1 <- length(w1)
vals1 <- runif(len1)
x[w1] <- vals1
x
})
data2 <- t(data2)
# Work out bimodality indexes of genes, and then rank based on this index (< 10 mins)
bimodalIndex <- function(dataset, verbose = TRUE)
{
# From Coombes,KR (2012) ClassDiscovery: Classes and methods for "class discovery" with microarrays or proteomics.
bim <- matrix(NA, nrow = nrow(dataset), ncol = 6)
if (verbose)
cat("1 ")
for (i in 1:nrow(dataset)) {
if (verbose && 0 == i%%100)
cat(".")
if (verbose && 0 == i%%1000)
cat(paste("\n", 1 + i/1000, " ", sep = ""))
x <- as.vector(as.matrix(dataset[i, ]))
if (any(is.na(x)))
next
mc <- Mclust(x, G = 2, modelNames = "E")
sigma <- sqrt(mc$parameters$variance$sigmasq)
delta <- abs(diff(mc$parameters$mean))/sigma
pi <- mc$parameters$pro[1]
bi <- delta * sqrt(pi * (1 - pi))
bim[i, ] <- c(mc$parameters$mean, sigma = sigma, delta = delta,
pi = pi, bim = bi)
}
if (verbose)
cat("\n")
dimnames(bim) <- list(rownames(dataset), c("mu1", "mu2",
"sigma", "delta", "pi", "BI"))
bim <- as.data.frame(bim)
bim
}
print("Calculating bimodal index.....")
biIndex <- bimodalIndex(data2)
# Set up bimodality in gene expression (BIG) function and run
indexing<-function(x.sort,test.bnd,alpha)
{
n<-length(x.sort);
low<-x.sort[1:test.bnd];
hgh<-x.sort[(test.bnd+1):n];
n1<-length(low);
n2<-length(hgh);
qr.low<-quantile(low,seq(0,1,0.01));
qr.hgh<-quantile(hgh,seq(0,1,0.01));
v1<-abs(qr.low[[75]][1]-qr.low[[25]][1])/1.34896;# statistics of low cluster
v2<-abs(qr.hgh[[75]][1]-qr.hgh[[25]][1])/1.34896;# statistics of high cluster
gap.bet.2.cls<-abs(max(low)-min(hgh));
dst.bet.2.cls<-abs(qr.low[[75]][1]-qr.hgh[[25]][1]);
het.std<-sqrt((n1*v1^2+n2*v2^2)*(1/n1+1/n2)/n);
rtv<-(alpha*gap.bet.2.cls+(1-alpha)*dst.bet.2.cls)/het.std;
return(rtv);
}
big<-function(x,alpha,top)
#get index for one gene
#x: expression vector of a gene
#alpha: weight on gap between two clusters
#top: stop rule for scanning
{
x.sort<-sort(x);
x.sort.or<-order(diff(x.sort),decreasing=T);#order of gaps
max.index<-0;#default index
my.boundary<-0;#default boundary
prediction<-c();
for(i in 1:top)
{
new.ind<-indexing(x.sort,x.sort.or[i],alpha);
new.bnd<-mean(c(x.sort[x.sort.or[i]],x.sort[x.sort.or[i]+1]));
prediction<-rbind(prediction,c(new.ind,new.bnd));
}
sel<-which(prediction[,1]==max(prediction[,1]))[1];#find the highest BIG index
return(list(prediction,c(prediction[sel,1],prediction[sel,2])));
}
Besag.Clifford<-function(BI,H,B)#Beasg-Clifford algorithm for p value
#BI: bimodal index list
#H: rejection number (100: reasonable)
#B: random sampling times (1e5: reasonable)
{
p<-c();
for(i in 1:length(BI))
{
bs<-c();
h<-0;
while(h<H & length(bs)<B)
{
bs<-c(bs,BI[sample(1:length(BI),1)]);
h<-sum(1*(bs>=BI[i]));
}
p<-c(p,h/length(bs));
}
return(p);
}
BIG<-function(dat)
#the subroutine to use BIG algorithm
#dat is a list
#X: expression matrix (logarithm or normalisation pre-processed)
#H: rejection number used by Beasg.Clifford algorithm
#B: random sampling times used by Besag.Clifford algorithm
#alpha: weight on gap between two clusters
#top: stop rule when scanning a gap vector
{
if(length(dat$X)==0)
{
print("no expression matrix!");
return();
} else {
X<-dat$X;
H<-100;
if(length(dat$H)>0)
H<-dat$H;
B<-10000;
if(length(dat$B)>0)
B<-dat$B;
alpha<-0.75;
if(length(dat$alpha)>0)
alpha<-dat$alpha;
top<-10;
if(length(dat$top)>0)
top<-dat$top;
BI<-c();
all<-c();
for(i in 1:nrow(X))
{
buf<-big(X[i,],alpha,top);
all<-c(all,buf);
BI<-rbind(BI,buf[[2]]);
}
p<-Besag.Clifford(BI[,1],H,B);
model<-list(BI=BI[,1],BND=BI[,2],p=p);
return(model);
}
}
data<-function(fn)
{
X<-as.matrix(read.csv(fn,header=FALSE));
return(X);
}
# Run the big method (will take ~2 mins per 1,000 samples)
print("Calculating BISEP values.....")
big.model<-BIG(list(X=data2))
bmT <- as.data.frame(cbind(big.model[[2]], big.model[[3]]), stringsAsFactors=FALSE)
rownames(bmT) <- rownames(data2)
outList <- list(BISEP=bmT, BI=biIndex, DATA=data2)
return(outList)
}
|
/scratch/gouwar.j/cran-all/cranData/BiSEp/R/BISEP.R
|
# Functional Redundancy Algorithm Build V2.0
#
# Date of V1.0: November - December 2012
# Date of V2.0: June 2013
# Author: M Wappett
# Decription: Input a matrix of continuous data with genes as rows and samples as columns. Algorithm may take some considerable time to run (~ 10 hours for 80,000 gene pairs as results)
# Read command line arguments
FURE <- function(data=data, inputType=inputType)
{
# Define confidence criteria
if(missing(data)) stop("Need to input expression data matrix")
if(missing(inputType)) stop("Need to specify sample type")
# Is the input matrix from the SlinG or BEEM algorithms?
colNum <- 0
if(inputType == "BIGEE")
{
colNum <- 4
}
else if(inputType == "BEEM")
{
colNum <- 11
}
# Do GO term functional redundancy mapping
tab1 <- merge(toTable(org.Hs.eg.db::org.Hs.egSYMBOL2EG), toTable(org.Hs.eg.db::org.Hs.egGO))
w1 <- which(data[,1] %in% tab1[,2])
x8 <- data[w1,]
w2 <- which(x8[,2] %in% tab1[,2])
x9 <- x8[w2,]
unG4 <- unique(c(x9[,1], x9[,2]))
tab1 <- merge(toTable(org.Hs.eg.db::org.Hs.egSYMBOL2EG[unG4]), toTable(org.Hs.eg.db::org.Hs.egGO))
tab2 <- select(GO.db::GO.db, tab1$go_id, "TERM", "GOID")
tab3 <- cbind(tab1, tab2)
tab4 <- unique(tab1[,1:2])
redundantIDs <- character(0)
redundantTerms <- character(0)
outList <- list(0)
for(i in 1:dim(x9)[1])
{
s1 <- subset(tab3, tab3[,2] == x9[i,1])
s2 <- subset(tab3, tab3[,2] == x9[i,2])
outList[[i]] <- c(s1[1,1], s2[1,1])
m1 <- merge(s1, s2, by.x="TERM", by.y="TERM")
if(dim(m1)[1] > 0)
{
redundantIDs[i] <- paste(m1[,4], collapse = " / ")
redundantTerms[i] <- paste(m1[,1], collapse = " / ")
}
else
{
redundantIDs[i] <- "No Redundancy"
redundantTerms[i] <- "No Redundancy"
}
}
x9$redundant_ids <- redundantIDs
x9$redundant_terms <- redundantTerms
unG5 <- as.list(tab4[,1])
# Perform semantic similarity mapping from gene Ontologies (this may take some considerable time - up to 8 hours; suggest running overnight)
hsGOmf <- godata('org.Hs.eg.db', ont="MF")
hsGObp <- godata('org.Hs.eg.db', ont="BP")
hsGOcc <- godata('org.Hs.eg.db', ont="CC")
v1 <- mgeneSim(genes=unG5, semData=hsGOmf, measure="Wang",verbose=FALSE)
v2 <- mgeneSim(genes=unG5, semData=hsGObp, measure="Wang",verbose=FALSE)
v3 <- mgeneSim(genes=unG5, semData=hsGOcc, measure="Wang",verbose=FALSE)
mfScore <- character(0)
for(i in 1:dim(x9)[1])
{
w1 <- which(tab4[,2] %in% x9[i,1])
w2 <- which(tab4[,2] %in% x9[i,2])
w3 <- tab4[w1,1]
w4 <- tab4[w2,1]
w5 <- which(colnames(v1) == w4 | colnames(v1) == w3 )
w6 <- which(rownames(v1) == w4 | rownames(v1) == w3 )
w7 <- length(c(w5, w6))
if(w7 == 4)
{
mfScore[i] <- v1[w5[1],w6[2]]
}
else
{
mfScore[i] <- "No Score"
}
}
bpScore <- character(0)
for(i in 1:dim(x9)[1])
{
w1 <- which(tab4[,2] %in% x9[i,1])
w2 <- which(tab4[,2] %in% x9[i,2])
w3 <- tab4[w1,1]
w4 <- tab4[w2,1]
w5 <- which(colnames(v2) == w4 | colnames(v2) == w3 )
w6 <- which(rownames(v2) == w4 | rownames(v2) == w3 )
w7 <- length(c(w5, w6))
if(w7 == 4)
{
bpScore[i] <- v2[w5[1],w6[2]]
}
else
{
bpScore[i] <- "No Score"
}
}
ccScore <- character(0)
for(i in 1:dim(x9)[1])
{
w1 <- which(tab4[,2] %in% x9[i,1])
w2 <- which(tab4[,2] %in% x9[i,2])
w3 <- tab4[w1,1]
w4 <- tab4[w2,1]
w5 <- which(colnames(v3) == w4 | colnames(v3) == w3 )
w6 <- which(rownames(v3) == w4 | rownames(v3) == w3 )
w7 <- length(c(w5, w6))
if(w7 == 4)
{
ccScore[i] <- v3[w5[1],w6[2]]
}
else
{
ccScore[i] <- "No Score"
}
}
x9$MolecularFunctionScore <- mfScore
x9$BiologicalProcessScore <- bpScore
x9$CellularComponentScore <- ccScore
w1 <- which(x9[,colNum] == "No Redundancy")
x10 <- x9[-w1,]
outList <- list(allPairsScored=x9, funcRedundantPairs=x10)
return(outList)
}
|
/scratch/gouwar.j/cran-all/cranData/BiSEp/R/FURE.R
|
expressionPlot <- function(bisepData=data, gene1, gene2)
{
if(missing(bisepData)) stop("Need to input expression data matrix")
if(missing(gene1)) stop("Need to specify gene 1")
if(missing(gene2)) stop("Need to specify gene 2")
# Extract objects from input + stop if object incorrect
if("BISEP" %in% names(bisepData) && "BI" %in% names(bisepData) && "DATA" %in% names(bisepData))
{
biIndex <- bisepData$BI
big.model <- bisepData$BISEP
data2 <- bisepData$DATA
}
else
{
stop("Input object isn't from BISEP function")
}
# Do some gene formatting
gene1 <- toupper(gene1)
gene2 <- toupper(gene2)
if(length(which(rownames(data2) %in% gene1)) == 0) stop("Gene 1 not recognised")
if(length(which(rownames(data2) %in% gene2)) == 0) stop("Gene 2 not recognised")
plot(as.numeric(data2[gene1,]), as.numeric(data2[gene2,]), pch=16, main=paste(gene1, "vs.", gene2, "Log2 Gene Expression plot", sep=" "), xlab=gene1, ylab=gene2)
abline(v=big.model[gene1,1], col="red")
abline(h=big.model[gene2,1], col="red")
}
|
/scratch/gouwar.j/cran-all/cranData/BiSEp/R/expressionPlot.R
|
waterfallPlot <- function(bisepData=data, mutData=mutData, expressionGene, mutationGene)
{
if(missing(bisepData)) stop("Need to input BISEP object")
if(missing(mutData)) stop("Need to input mutation data matrix")
if(missing(expressionGene)) stop("Need to specify gene 1")
if(missing(mutationGene)) stop("Need to specify gene 2")
# Extract objects from input + stop if object incorrect
if("BISEP" %in% names(bisepData) && "BI" %in% names(bisepData) && "DATA" %in% names(bisepData))
{
biIndex <- bisepData$BI
big.model <- bisepData$BISEP
data <- bisepData$DATA
}
else
{
stop("Input object isn't from BISEP function")
}
if("WT" %in% unique(mutData[,1]) || "MUT" %in% unique(mutData[,1]))
{
# Do some gene formatting
gene1 <- toupper(expressionGene)
gene2 <- toupper(mutationGene)
if(length(which(rownames(data) %in% gene1)) == 0) stop("Gene 1 not recognised")
if(length(which(rownames(mutData) %in% gene2)) == 0) stop("Gene 2 not recognised")
# Sort out your matrices
mutData2 <- mutData[,which(colnames(mutData) %in% colnames(data))]
data2 <- data[,which(colnames(data) %in% colnames(mutData2))]
data2 <- data2[,colnames(mutData2)]
d1 <- as.data.frame(cbind(colnames(data2), as.numeric(data2[gene1,]), as.character(mutData2[gene2,])), stringsAsFactors=FALSE)
colnames(d1) <- c("sample_name", gene1, gene2)
d1[,2] <- as.numeric(d1[,2])
d1 <- d1[order(d1[,2]),]
d1$Colour <- "orange"
d1$Colour[which(d1[,3] == "MUT")] <- "black"
y <- 2+3*d1[,2] + rnorm(length(d1[,2]))
d.x <- density(d1[,2])
d.y <- density(y)
layout( matrix( c(1,1,2,2,2,2,2,2,2,2),ncol=5) )
plot(d.y$y, d.y$x, ylim=range(y), xlim=rev(range(d.y$y)), type='l')
barplot((d1[,2] - big.model[gene1,1]), col=d1[,4], border=NA, main=paste(gene1, " Expression" ," coloured by mutation status of ", gene2, sep=""), space=F, cex.main=2)
legend("bottomright", c("WT", "MUT"), col=c("orange", "black"), pch=15)
}
else
{
stop("Input object isn't of required type - containing only 'MUT' or 'WT' values")
}
}
|
/scratch/gouwar.j/cran-all/cranData/BiSEp/R/waterfallPlot.R
|
### R code from vignette source 'BiSEp.Snw'
###################################################
### code chunk number 1: BiSEp.Snw:18-23
###################################################
require(BiSEp)
data(INPUT_data)
INPUT_data[1:2,1:6]
###################################################
### code chunk number 2: packages
###################################################
BISEP_data <- BISEP(INPUT_data)
biIndex <- BISEP_data$BI
bisepIndex <- BISEP_data$BISEP
###################################################
### code chunk number 3: BiSEp.Snw:40-43
###################################################
biIndex[1:10,]
bisepIndex[1:10,]
###################################################
### code chunk number 4: fig1
###################################################
plot(density(INPUT_data["TUSC3",]), main="TUSC3 Density Distribution")
###################################################
### code chunk number 5: fig2
###################################################
plot(density(INPUT_data["MLH1",]), main="MLH1 Density Distribution")
###################################################
### code chunk number 6: packages
###################################################
plot(density(INPUT_data["MLH1",]), main="MLH1 Density Distribution")
###################################################
### code chunk number 7: BiSEp.Snw:72-73
###################################################
BIGEE_out <- BIGEE(BISEP_data, sampleType="cell_line")
###################################################
### code chunk number 8: BiSEp.Snw:78-79
###################################################
BIGEE_out[1:4,]
###################################################
### code chunk number 9: fig3
###################################################
expressionPlot(BISEP_data, gene1="SMARCA4", gene2="SMARCA1")
###################################################
### code chunk number 10: fig4
###################################################
expressionPlot(BISEP_data, gene1="MTAP", gene2="MLH1")
###################################################
### code chunk number 11: BiSEp.Snw:101-103
###################################################
data(MUT_data)
MUT_data[1:4,1:10]
###################################################
### code chunk number 12: BiSEp.Snw:106-107
###################################################
BEEMout <- BEEM(BISEP_data, mutData=MUT_data, sampleType="cell_line", minMut=40)
###################################################
### code chunk number 13: BiSEp.Snw:112-113
###################################################
BEEMout
###################################################
### code chunk number 14: fig5
###################################################
waterfallPlot(BISEP_data, MUT_data, expressionGene="MICB",
mutationGene="PBRM1")
###################################################
### code chunk number 15: fig6
###################################################
waterfallPlot(BISEP_data, MUT_data, expressionGene="BOK",
mutationGene="BRCA2")
###################################################
### code chunk number 16: packages
###################################################
fOut <- FURE(BIGEE_out[1,], inputType="BIGEE")
frPairs <- fOut$funcRedundantPairs
allPairs <- fOut$allPairs
###################################################
### code chunk number 17: BiSEp.Snw:145-146
###################################################
allPairs[1,]
|
/scratch/gouwar.j/cran-all/cranData/BiSEp/inst/doc/BiSEp.R
|
TASC <- function(vect,
method=c("A","B"),
tau = 0.01,
numberOfSamples = 999,
sigma = seq(0.1, 20, by=.1),
na.rm=FALSE,
error = c("mean", "min")){
# Check type of input vector
if(!is.numeric(vect))
stop("The input vector must consist of numerical values!")
# Check for NA values
if (!na.rm && any(is.na(vect)))
stop("Cannot trinarize in the presence of NA values!")
else
if (na.rm)
{
vect <- vect[!is.na(vect)]
}
if (any(!is.finite(vect)))
stop("Cannot trinarize Inf values!")
# Check type of method
method <- match.arg(method, c("A","B"))
error <- match.arg(error, c("mean", "min"))
# Check type and value of tau
if(!is.numeric(tau))
stop("'tau' must be numeric!")
if(tau < 0 || tau > 1)
stop("'tau' has to be in [0,1]!")
# Check type and value of numberofSamples
if(!is.numeric(numberOfSamples))
stop("'numberOfSamples' must be numeric!")
if(numberOfSamples < 0)
stop("'numberOfSamples' has to be >= 0!")
# Check type of sigma
if(method == "B" && !is.numeric(sigma))
stop("'sigma' must consist of numerical values!")
# Verify input length
if(length(vect) < 3)
stop("The input vector must have at least 3 entries!")
# Check whether the input is constant
if(length(unique(vect)) == 1)
stop("The input vector is constant!")
#set.seed(proc.time()[3]*1000)
runif(1)
#calculate the results according to the <method> argument
if(method == "A"){
if(error == "min"){
res <- TASCA_C_min(vect, tau, numberOfSamples)
me <- "TASC A (min)"
}else{
res <- TASCA_C(vect, tau, numberOfSamples)
me <- "TASC A"
}
result <- new(
"TASCResult",
originalMeasurements = vect,
trinarizedMeasurements = res$binarized_vector,
threshold1 = res$threshold1,
threshold2 = res$threshold2,
p.value = res$p_value,
intermediateSteps = res$other_results$P_Mat[-1,,drop=FALSE],
intermediateHeights1 = res$other_results$H_Mat1[-1,,drop=FALSE],
intermediateHeights2 = res$other_results$H_Mat2[-1,,drop=FALSE],
intermediateStrongestSteps = res$other_results$v_vec[-1,,drop=FALSE],
method = me
)
}else if(method == "B"){
if(error == "min"){
res <- TASCB_C_min(vect, tau, numberOfSamples, sigma)
me <- "TASC B (min)"
}else{
res <- TASCB_C(vect, tau, numberOfSamples, sigma)
me <- "TASC B"
}
result <- new(
"TASCResult",
originalMeasurements = vect,
trinarizedMeasurements = res$binarized_vector,
threshold1 = res$threshold1,
threshold2 = res$threshold2,
p.value = res$p_value,
intermediateSteps = res$other_results$steps[-1,,drop=FALSE],
intermediateHeights1 = res$other_results$H_Mat1[-1,,drop=FALSE],
intermediateHeights2 = res$other_results$H_Mat2[-1,,drop=FALSE],
intermediateStrongestSteps = res$other_results$v_vec[-1,,drop=FALSE],
method = me
)
}else{
stop(sprintf("'method' has to be either \"A\" or \"B\"!", method))
}
return(result)
}
TASCA_C <- function(vect, tau, numberOfSamples){
#call the C-Function
result <- .Call("TASCA",
as.double(vect),
as.double(tau),
as.integer(numberOfSamples))
#all the matrices have to be transposed, due to different interpretation of the sequence of values
#between C and R (in C line by line, and in R column by column)
result$other_results$Cc <- t(result$other_results$Cc)
result$other_results$Ind <- t(result$other_results$Ind)
result$other_results$P_Mat <- t(result$other_results$P_Mat)
result$other_results$Q_Mat <- t(result$other_results$Q_Mat)
result$other_results$H_Mat1 <- t(result$other_results$H_Mat1)
result$other_results$H_Mat2 <- t(result$other_results$H_Mat2)
result$other_results$v_vec <- matrix(result$other_results$v_vec, ncol = 2, byrow = TRUE)
return(result);
}
TASCB_C <- function(vect, tau, numberOfSamples, sigma){
result <- .Call("TASCB",
as.double(vect),
as.double(tau),
as.integer(numberOfSamples),
as.double(sigma))
#all the matrices have to be transposed, due to different interpretation of the sequence of values
#between C and R (in C line by line, and in R column by column)
result$other_results$smoothed = t(result$other_results$smoothed)
result$other_results$zerocrossing = t(result$other_results$zerocrossing)
result$other_results$steps = t(result$other_results$steps)
result$other_results$H_Mat1 = t(result$other_results$H_Mat1)
result$other_results$H_Mat2 = t(result$other_results$H_Mat2)
result$other_results$smoothedX = t(result$other_results$smoothedX)
result$other_results$meanlist = t(result$other_results$meanlist)
result$other_results$v_vec <- matrix(result$other_results$v_vec, ncol = 2, byrow = TRUE)
return(result)
}
TASCA_C_min <- function(vect, tau, numberOfSamples){
#call the C-Function
result <- .Call("TASCA_min",
as.double(vect),
as.double(tau),
as.integer(numberOfSamples))
#all the matrices have to be transposed, due to different interpretation of the sequence of values
#between C and R (in C line by line, and in R column by column)
result$other_results$Cc <- t(result$other_results$Cc)
result$other_results$Ind <- t(result$other_results$Ind)
result$other_results$P_Mat <- t(result$other_results$P_Mat)
result$other_results$Q_Mat <- t(result$other_results$Q_Mat)
result$other_results$H_Mat1 <- t(result$other_results$H_Mat1)
result$other_results$H_Mat2 <- t(result$other_results$H_Mat2)
result$other_results$v_vec <- matrix(result$other_results$v_vec, ncol = 2, byrow = TRUE)
return(result);
}
TASCB_C_min <- function(vect, tau, numberOfSamples, sigma){
result <- .Call("TASCB_min",
as.double(vect),
as.double(tau),
as.integer(numberOfSamples),
as.double(sigma))
#all the matrices have to be transposed, due to different interpretation of the sequence of values
#between C and R (in C line by line, and in R column by column)
result$other_results$smoothed = t(result$other_results$smoothed)
result$other_results$zerocrossing = t(result$other_results$zerocrossing)
result$other_results$steps = t(result$other_results$steps)
result$other_results$H_Mat1 = t(result$other_results$H_Mat1)
result$other_results$H_Mat2 = t(result$other_results$H_Mat2)
result$other_results$smoothedX = t(result$other_results$smoothedX)
result$other_results$meanlist = t(result$other_results$meanlist)
result$other_results$v_vec <- matrix(result$other_results$v_vec, ncol = 2, byrow = TRUE)
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/BiTrinA/R/TASC.R
|
# Provides two methods to binarize a vector consisting of real values. The methods are scale-space-based. With <method>="A",
# the threshold for the binarization is calculated according to the BASC A algorithm and with <method>="B" according to the
# BASC B algorithm. For details on how this is done have a look at the vignette. The <tau> argument is a parameter for the bootstrap-test
# and it is an indicator what quality the binarization should have. The resulting p-value is shows how good the binarization fulfills this
# quality requirement. With <numberofSamples> you can control the number of samples that are used for the bootstrap test.
# <sigma> is only used for the BASC B algorithm and so it will be ignored if <method> equals "A". If <method>="B" then <sigma> should be a sequence
# of ascending numbers and the values are used as parameters for the bessel function.
binarize.BASC <- function(vect, method=c("A","B"), tau = 0.01, numberOfSamples = 999, sigma = seq(0.1, 20, by=.1), na.rm=FALSE){
# Check type of input vector
if(!is.numeric(vect))
stop("The input vector must consist of numerical values!")
# Check for NA values
if (!na.rm && any(is.na(vect)))
stop("Cannot binarize in the presence of NA values!")
else
if (na.rm)
{
vect <- vect[!is.na(vect)]
}
if (any(!is.finite(vect)))
stop("Cannot binarize Inf values!")
# Check type of method
method <- match.arg(method, c("A","B"))
# Check type and value of tau
if(!is.numeric(tau))
stop("'tau' must be numeric!")
if(tau < 0 || tau > 1)
stop("'tau' has to be in [0,1]!")
# Check type and value of numberofSamples
if(!is.numeric(numberOfSamples))
stop("'numberOfSamples' must be numeric!")
if(numberOfSamples < 0)
stop("'numberOfSamples' has to be >= 0!")
# Check type of sigma
if(method == "B" && !is.numeric(sigma))
stop("'sigma' must consist of numerical values!")
# Verify input length
if(length(vect) < 3)
stop("The input vector must have at least 3 entries!")
# Check whether the input is constant
if(length(unique(vect)) == 1)
stop("The input vector is constant!")
#set.seed(proc.time()[3]*1000)
runif(1)
#calculate the results according to the <method> argument
if(method == "A"){
res <- binarizeBASCA_C(vect, tau, numberOfSamples)
result <- new(
"BASCResult",
originalMeasurements = vect,
binarizedMeasurements = res$binarized_vector,
threshold = res$threshold,
p.value = res$p_value,
intermediateSteps = res$other_results$P_Mat,
intermediateHeights = res$other_results$H_Mat,
intermediateStrongestSteps = res$other_results$v_vec,
method = "BASC A"
)
}
else if(method == "B"){
res <- binarizeBASCB_C(vect, tau, numberOfSamples, sigma)
result <- new(
"BASCResult",
originalMeasurements = vect,
binarizedMeasurements = res$binarized_vector,
threshold = res$threshold,
p.value = res$p_value,
intermediateSteps = res$other_results$steps,
intermediateHeights = res$other_results$H_Mat,
intermediateStrongestSteps = res$other_results$v_vec,
method = "BASC B"
)
}
else{
stop(sprintf("'method' has to be either \"A\" or \"B\"!", method))
}
return(result)
}
#interface for the Call to the C-Function, which does all the calculations for binarization.BASC with method
#argument = "A"
binarizeBASCA_C <- function(vect, tau, numberOfSamples){
#call the C-Function
result <- .Call("binarizeBASCA",
as.double(vect),
as.double(tau),
as.integer(numberOfSamples))
#all the matrices have to be transposed, due to different interpretation of the sequence of values
#between C and R (in C line by line, and in R column by column)
result$other_results$Cc <- t(result$other_results$Cc)
result$other_results$Ind <- t(result$other_results$Ind)
result$other_results$P_Mat <- t(result$other_results$P_Mat)
result$other_results$Q_Mat <- t(result$other_results$Q_Mat)
result$other_results$H_Mat <- t(result$other_results$H_Mat)
return(result);
}
#interface for the Call to the C-Function, which does all the calculations for binarization.BASC with method
#argument = "B"
binarizeBASCB_C <- function(vect, tau, numberOfSamples, sigma){
result <- .Call("binarizeBASCB",
as.double(vect),
as.double(tau),
as.integer(numberOfSamples),
as.double(sigma))
#all the matrices have to be transposed, due to different interpretation of the sequence of values
#between C and R (in C line by line, and in R column by column)
result$other_results$smoothed = t(result$other_results$smoothed)
result$other_results$zerocrossing = t(result$other_results$zerocrossing)
result$other_results$steps = t(result$other_results$steps)
result$other_results$H_Mat = t(result$other_results$H_Mat)
result$other_results$smoothedX = t(result$other_results$smoothedX)
result$other_results$meanlist = t(result$other_results$meanlist)
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/BiTrinA/R/binarizeBASC.R
|
# A method which uses the k-Means algorithm to binarize a real-valued vector.
# <nstart> controls how many random sets should be chosen by the 'kmeans' method and <iter.max> is the
# maximum number of iterations that are allowed (see also the help for kmeans)
binarize.kMeans <- function(vect, nstart=1, iter.max=10, dip.test=TRUE, na.rm=FALSE){
#some checks of the arguments
if(!is.numeric(vect))
stop("The input vector must consist of numerical values!")
if (!na.rm && any(is.na(vect)))
stop("Cannot binarize in the presence of NA values!")
else
if (na.rm)
{
vect <- vect[!is.na(vect)]
}
if (any(!is.finite(vect)))
stop("Cannot binarize Inf values!")
if(!is.numeric(nstart))
stop("'nstart' must be numeric!")
if(nstart < 0)
stop("'nstart' must be >= 0!")
if(!is.numeric(iter.max))
stop("'iter.max' must be numeric!")
if(iter.max < 0)
stop("'iter.max' must be >= 0!")
if(length(vect) < 3)
stop("The input vector must have at least 3 entries!")
if(length(unique(vect))==1)
stop("The input vector is constant!")
if (dip.test)
{
p.value <- dip.test(vect)$p.value
}
else
p.value <- as.numeric(NA)
#start the standard kmeans method to do all the calulations
km_res <- kmeans(vect, 2, nstart = nstart, iter.max = iter.max)
#the center with greater value should get the binarized value 1, the other 0.
if(km_res$centers[1] > km_res$centers[2]){
binarizeddata <- abs(km_res$cluster - 2)
}
else{
binarizeddata <- km_res$cluster - 1
}
#calculate the threshold as mean of the calculated centers
#threshold <- min(km_res$centers) + dist(km_res$centers)[1] / 2
#threshold <- mean(km_res$centers)
threshold <- (max(vect[!as.logical(binarizeddata)]) + min(vect[as.logical(binarizeddata)])) / 2
#put all computed results into a 'BinarizationResult' object and return it
return(new("BinarizationResult",
originalMeasurements = vect,
binarizedMeasurements = as.integer(binarizeddata),
threshold = threshold,
p.value = p.value,
method = "k-Means"))
}
|
/scratch/gouwar.j/cran-all/cranData/BiTrinA/R/binarizeKMeans.R
|
binarizeMatrix <- function(mat, method=c("BASCA","BASCB","kMeans"), adjustment="none", ...){
binFunc <- switch(match.arg(method, c("BASCA","BASCB","kMeans")),
"BASCA" = function(x, ...){
bin <- binarize.BASC(x, method="A", ...)
return(c(bin@binarizedMeasurements,
list(bin@threshold, [email protected])))
},
"BASCB" = function(x, ...){
bin <- binarize.BASC(x, method="B", ...)
return(c(bin@binarizedMeasurements,
list(bin@threshold, [email protected])))
},
"kMeans" = function(x, ...){
bin <- binarize.kMeans(x, ...)
return(c(bin@binarizedMeasurements,
list(bin@threshold, [email protected])))
})
bin <- do.call("rbind.data.frame", apply(mat, 1, binFunc, ...))
if(!is.null(colnames(mat))){
colnames(bin) <- c(colnames(mat), "threshold", "p.value")
}else{
colnames(bin) <- c(paste("V",seq_len(ncol(mat)),sep=""),
"threshold", "p.value")
}
bin[,"p.value"] <- p.adjust(bin[,"p.value"], method=adjustment)
return(bin)
}
trinarizeMatrix <- function(mat, method=c("TASCA","TASCB","kMeans"), adjustment="none", ...){
triFunc <- switch(match.arg(method, c("TASCA","TASCB","kMeans")),
"TASCA" = function(x, ...){
tri <- TASC(x, method="A", ...)
return(c(tri@trinarizedMeasurements,
list(tri@threshold1, tri@threshold2, [email protected])))
},
"TASCB" = function(x, ...){
tri <- TASC(x, method="B", ...)
return(c(tri@trinarizedMeasurements,
list(tri@threshold1, tri@threshold2, [email protected])))
},
"kMeans" = function(x, ...){
tri <- trinarize.kMeans(x, ...)
return(c(tri@trinarizedMeasurements,
list(tri@threshold1, tri@threshold2, [email protected])))
})
tri <- do.call("rbind.data.frame", apply(mat, 1, triFunc, ...))
if(!is.null(colnames(mat))){
colnames(tri) <- c(colnames(mat), "threshold1", "threshold2", "p.value")
}else{
colnames(tri) <- c(paste("V",seq_len(ncol(mat)),sep=""),
"threshold1", "threshold2", "p.value")
}
tri[,"p.value"] <- p.adjust(tri[,"p.value"], method=adjustment)
return(tri)
}
|
/scratch/gouwar.j/cran-all/cranData/BiTrinA/R/binarizeMatrix.R
|
#############################################Class BinarizationResult############################################
#This is the base class of all results of the binarization functions. It provides the basic methods show, print and
#a method called plotBinarization. It also checks all created object for validity.
setClass(
Class = "BinarizationResult",
representation = representation(
originalMeasurements = "numeric",
binarizedMeasurements = "integer",
threshold = "numeric",
method = "character",
p.value = "numeric"
),
validity = function(object){
#extract object slots
omeasure <- object@originalMeasurements
bmeasure <- object@binarizedMeasurements
thresh <- object@threshold
meth <- object@method
p.value <- [email protected]
#initialize the basic strings
valid_methods <- c(
"BASC A",
"BASC B",
"Scan Statistic",
"Edge Detector: First Edge",
"Edge Detector: Maximum Edge",
"k-Means"
)
for(i in seq(1, length(valid_methods))){
valid_methods_string <- ifelse(i==1, sprintf("\"%s\"", valid_methods[i]), sprintf("%s, \"%s\"", valid_methods_string, valid_methods[i]))
}
#initialize the critical error messages
critical_invalid_strings <- c(
"'originalMeasurements' isn't set!",
"'binarizedMeasurements' isn't set!",
"'threshold' isn't set!",
"'method' isn't set!",
"'p.value' isn't set!"
)
#check object for critical errors
critical_invalid <- c(
!length(omeasure),
!length(bmeasure),
!length(thresh),
!length(meth),
!length(p.value)
)
#if critical error occured return the corresponding error messages
if (sum(as.integer(critical_invalid))){
return(critical_invalid_strings[which(critical_invalid)])
}
#initialize the weak error messages
weak_invalid_strings <- c(
"Only zeros and ones are valid values for 'binarizedMeasurements'.",
sprintf("'method' must be element of {%s}, but it is \"%s\".", valid_methods_string, as.character(meth)),
"Length of original and binarized Measurements must be the same.",
sprintf("'threshold' must be within the borders of the original values, which is the interval [%f, %f], but it is %f.", min(omeasure), max(omeasure), thresh),
"'p.value' must be in range [0,1]."
)
#check object for weak errors
weak_invalid <- c(
length(which(bmeasure > 1)) || length(which(bmeasure < 0)),
length(which(valid_methods == meth)) < 1,
length(bmeasure) != length(omeasure),
thresh < min(omeasure) || thresh > max(omeasure),
(!is.na(p.value) && (p.value < 0 || p.value > 1))
)
#if weak error occured return the corresponding error messages
if (sum(as.integer(weak_invalid))){
return(weak_invalid_strings[which(weak_invalid)])
}
#object is valid
return(TRUE)
}
)
#This method prints the last three slots out to console (binarizedMeasurements is limited to 10 values). It is called
#when creating an object without an assignment or by only typing the name of a BinarizationResult-object at console.
setMethod(
f = "show",
signature = "BinarizationResult",
definition = function(object){
cat("Method: ", object@method, "\n",sep="")
if (length(object@binarizedMeasurements) <= 10)
cat("\nBinarized vector: [ ", paste(object@binarizedMeasurements, collapse=" "),
" ]\n",sep="")
else
cat("\nBinarized vector: [ ",paste(object@binarizedMeasurements[1:10], collapse=" "),
" ...]\n",sep="")
cat("\nThreshold: ", object@threshold, "\n", sep="")
if (!is.na([email protected]))
cat("\np value: ", [email protected], "\n", sep="")
}
)
#setGeneric(
# name = "plot",
# def = function(x, twoDimensional=FALSE, showLegend=TRUE, showThreshold=TRUE, ...){
# standardGeneric("plot")
# }
#)
setGeneric("plot", useAsDefault = plot)
#This Method plots the computed binarization in a one- or two-dimensional way.
setMethod(
f = "plot",
signature = c("BinarizationResult"),
definition = function(x, twoDimensional=FALSE, showLegend=TRUE, showThreshold=TRUE, ...)
{
if (twoDimensional)
plot(1:length(x@binarizedMeasurements), x, showLegend=showLegend, showThreshold=showThreshold, ...)
else
{
#extract the base values of x
vect_length <- length(x@originalMeasurements)
min_val <- min(x@originalMeasurements) #floor(min(c(x@originalMeasurements,0)))
max_val <- max(x@originalMeasurements) #ceiling(max(c(x@originalMeasurements,0)))
#get the ... argument into a list
args <- list(...)
#check for several standard graphic parameters and if they aren't set, set them to default values
if (is.null(args$ylab))
args$ylab <- ""
if (is.null(args$xlab))
args$xlab <- ""
if (is.null(args$lty))
args$lty <- 2
if (is.null(args$pch)){
args$pch <- x@binarizedMeasurements
}
else
if (length(args$pch) == 2)
{
pchs <- args$pch
args$pch <- rep(pchs[1], length(x@binarizedMeasurements))
args$pch[as.logical(x@binarizedMeasurements)] <- rep(pchs[2], sum(x@binarizedMeasurements))
}
col <- args$col
if (is.null(col))
{
col <- c("red","green","black")
}
if (length(col) == 2 || length(col) == 3)
{
args$col <- rep(col[1], length(x@binarizedMeasurements))
args$col[as.logical(x@binarizedMeasurements)] <- rep(col[2], sum(x@binarizedMeasurements))
if (length(col) == 2)
col <- c(col,"black")
}
if (is.null(args$type))
args$type <- "p"
if (is.null(args$yaxt))
args$yaxt="n"
#plotting the axes shouldn't be controlled by standard plot function
#this method does it later
#args$axes <- FALSE
#check for the limit standard graphic parameters and if they aren't set, set them to default values
if (is.null(args$xlim))
args$xlim <- c(min_val,max_val)
if (is.null(args$ylim))
args$ylim <- c(-0.1,0.1)
#set the point coordinates
args$x <- x@originalMeasurements
args$y <- rep(0,vect_length)
#plot them
do.call("plot", args)
#plot the threshold as line
if (as.logical(showThreshold))
{
par(new=TRUE)
largs <- list(...)
if (is.null(largs$lty))
largs$lty <- 2
if (length(col) == 3)
largs$col <- col[3]
else
largs$col <- "black"
do.call("abline", c(largs,v=x@threshold))
}
#if axes isn't set or TRUE plot the x-axis
#if (is.null(list(...)$axes) || as.logical(list(...)$axes) || list(...)$yaxt != "n")
#{
# if (is.null(args$lwd))
# {
# lwd <- 1
# }
# else
# {
# lwd <- args$lwd
# }
# at <- round(seq(min_val,max_val,by=(max_val-min_val)/5),1)
# axis(1, at=at, lwd=lwd, pos=-0.01)
# #axis(1, at=at, lwd=lwd, pos=-0.05)#c(min_val,-10))
# #axis(1, at=at, lwd=lwd, pos=-0.1)#c(min_val,-10))
#}
if (as.logical(showLegend))
{
if (is.null(args$lwd))
{
lwd <- 1
}
else{
lwd <- args$lwd
}
if (as.logical(showThreshold))
{
if (is.null(args$pch)){
pch <- c(0,1,NA)
}
else
if (length(args$pch) > 2)
{
pch <- c(15, 16, NA)
}
else{
pch <- c(unique(args$pch), NA)
}
names <- c("zeros", "ones", "threshold")
lty <- c(NA, NA, args$lty[1])
}
else
{
if (is.null(args$pch))
{
pch <- c(0,1)
}
else
if (length(args$pch) > 2)
{
pch <- c(15, 16)
}
else{
pch <- unique(args$pch)
}
names <- c("zeros", "ones")
lty <- c(NA, NA)
#if (is.null(args$col)){
# col <- "black"
#}
#else if (length(args$col) < 3){
# col <- args$col
#}
#else{
# col <- args$col[1:2]
#}
}
legend("topleft", names, pch=pch,
lty=lty, inset=c(0.05, 0.05), bty="n", cex=0.8, lwd=lwd, col=col)
}
}
}
)
setMethod(
f = "plot",
signature = c("numeric","BinarizationResult"),
definition = function(x, y, showLegend=TRUE, showThreshold=TRUE, ...)
{
#extract the base values of y
vect_length <- length(y@originalMeasurements)
min_val <- min(y@originalMeasurements)
max_val <- max(y@originalMeasurements)
#get the ... argument into a list
args <- list(...)
#check for several standard graphic parameters and if they aren't set, set them to default values
if (is.null(args$ylab))
args$ylab <- ""
if (is.null(args$xlab))
args$xlab <- ""
if (is.null(args$lty))
args$lty <- 2
if (is.null(args$cex.axis))
args$cex.axis <- par("cex.axis")
if (is.null(args$cex.lab))
args$cex.lab <- par("cex.lab")
if (is.null(args$pch))
{
args$pch <- y@binarizedMeasurements
}
else
if (length(args$pch) == 2)
{
pchs <- args$pch
args$pch <- rep(pchs[1], length(y@binarizedMeasurements))
args$pch[as.logical(y@binarizedMeasurements)] <- rep(pchs[2], sum(y@binarizedMeasurements))
}
col <- args$col
if (is.null(col)){
col <- c("red","green","black")
}
if (length(col) == 2 || length(col) == 3){
args$col <- rep(col[1], length(y@binarizedMeasurements))
args$col[as.logical(y@binarizedMeasurements)] <- rep(col[2], sum(y@binarizedMeasurements))
if (length(col) == 2)
col <- c(col,"black")
}
if (is.null(args$type))
args$type <- "p"
#plotting the axes shouldn't be controlled by standard plot function
#this method does it later
#args$axes <- FALSE
args$xaxt="n"
#maxx is the minimal value >= vect_length and dividable by vect_length DIV 5
#(for example: if vect_length = 11 => maxx is 12 and if vect_length = 19 => maxx is 21)
#maxx <- ifelse(vect_length%%5==0, vect_length, vect_length%/%5*6)
#while(maxx < vect_length)
# maxx <- maxx + vect_length%/%5
#check for the limit standard graphic arguments. if not set set them to default values
#if (is.null(args$xlim))
# args$xlim <- c(0, maxx)
#if (is.null(args$ylim))
# args$ylim <- c(min_val, max_val)
#plot the binarization
args$x <- x
#seq(along = x@originalMeasurements)
args$y <- y@originalMeasurements
print(args)
do.call("plot", args)
#plot the threshold as line
if (as.logical(showThreshold))
{
par(new=TRUE)
largs <- list(...)
if (is.null(largs$lty))
largs$lty <- 2
if (length(col) == 3)
largs$col <- col[3]
else
largs$col <- "black"
do.call("abline", c(largs,h=y@threshold))
}
#if axes isn't set or TRUE plot the x and y axis according to maxx, min_val, max_val
if (is.null(list(...)$axes) || as.logical(list(...)$axes) || list(...)$xaxt != "n")
{
if (is.null(args$lwd))
{
lwd <- 1
}
else
{
lwd <- args$lwd
}
axis(1, at=x, lwd=lwd, cex.axis=args$cex.axis, cex.lab=args$cex.lab)
}
if (as.logical(showLegend))
{
if (is.null(args$lwd))
{
lwd <- 1
}
else{
lwd <- args$lwd
}
if (as.logical(showThreshold))
{
if (is.null(args$pch)){
pch <- c(0,1,NA)
}
else
if (length(args$pch) > 2)
{
pch <- c(15, 16, NA)
}
else{
pch <- c(unique(args$pch), NA)
}
names <- c("zeros", "ones", "threshold")
lty <- c(NA, NA, args$lty[1])
}
else
{
if (is.null(args$pch))
{
pch <- c(0,1)
}
else
if (length(args$pch) > 2)
{
pch <- c(15, 16)
}
else{
pch <- unique(args$pch)
}
names <- c("zeros", "ones")
lty <- c(NA, NA)
}
legend("topleft", names, pch=pch,
lty=lty, inset=c(0.05, 0.05), bty="n", cex=0.8, lwd=lwd, col=col)
}
}
)
#This method prints the last three slots out to console
setMethod(
f = "print",
signature = "BinarizationResult",
definition = function(x){
cat("Method: ", x@method, "\n", sep="")
cat("\nThreshold: ", x@threshold, "\n", sep="")
cat("\nBinarized vector: [ ", paste(x@binarizedMeasurements, collapse=" "),
" ]\n", sep="")
if (!is.na([email protected]))
cat("\np value: ",[email protected],"\n", sep="")
}
)
#############################################Class BASCResult##############################################
#This is the result class for the two BASC algorithms. It provides an additional method called plotStepFunctions and is
#derived from the BinarizationResult class.
setClass(
Class = "BASCResult",
representation = representation(
intermediateSteps = "matrix",
intermediateHeights = "matrix",
intermediateStrongestSteps = "integer"
),
contains = "BinarizationResult",
validity = function(object){
#extract relevant object slots
isteps <- object@intermediateSteps
iheights <- object@intermediateHeights
istrsteps <- object@intermediateStrongestSteps
omeasure <- object@originalMeasurements
#initialize the critical error messages
critical_invalid_strings <- c(
"'intermediateSteps' isn't set!",
"'intermediateHeights' isn't set!",
"'intermediateStrongestSteps' isn't set!"
)
#check object for critical errors
critical_invalid <- c(
!length(isteps),
!length(iheights),
!length(istrsteps)
)
#if critical error occured return the corresponding error messages
if (sum(as.integer(critical_invalid))){
return(critical_invalid_strings[which(critical_invalid)])
}
#initialize weak error messages
weak_invalid_strings <- c(
"'intermediateSteps' and 'intermediateHeights' must have the same dimensionality.",
"'intermediateStrongestSteps' must have the same length as the number of rows of 'intermediateSteps'.",
"The values of 'intermediateSteps' must be in range [0, #Measurements].",
"The values of 'intermediateStrongestSteps' must be in range [1, #Measurements]."
)
#check object for weak errors
weak_invalid <- c(
as.logical(sum(dim(isteps) != dim(iheights))),
length(istrsteps) != nrow(isteps),
(sum(isteps < 0) || sum(isteps > length(omeasure))),
(sum(istrsteps < 1) || sum(istrsteps > length(omeasure)))
)
#if weak error occured return the corresponding error messages
if (sum(as.integer(weak_invalid))){
return(weak_invalid_strings[which(weak_invalid)])
}
#object is valid
return(TRUE)
}
)
setGeneric(
name = "plotStepFunctions",
def = function(x, showLegend=TRUE, connected=FALSE, withOriginal=TRUE, ...){
standardGeneric("plotStepFunctions")
}
)
#This method plots all the computed optimal step functions with n steps in one diagram. These step functions are formed
#by the two BASC algorithms and are used to determine the optimal jumping point and are also used to calculate the
#P-Value.
setMethod(
f = "plotStepFunctions",
signature = "BASCResult",
definition = function(x, showLegend=TRUE, connected=FALSE, withOriginal=TRUE, ...){
#check the input BASCResult-Object
if (ncol(x@intermediateSteps) == 0 || nrow(x@intermediateSteps) == 0)
stop("intermediateSteps has no values to plot.")
if (ncol(x@intermediateHeights) == 0 || nrow(x@intermediateHeights) == 0)
stop("intermediateHeights has no values to plot.")
if (length(x@intermediateStrongestSteps) == 0)
stop("intermediateStrongestSteps has no values to plot.")
#get the value-count
vect_count <- length(x@originalMeasurements)
#steps is a matrix with all the jump indices computed by the C-function concatenated with
#1:vect_count which is used for plotting the original step-function
if (as.logical(withOriginal)){
steps <- matrix(nrow=nrow(x@intermediateSteps)+1, ncol = vect_count, data = rep(0,(nrow(x@intermediateSteps)+1) * vect_count))
steps[1:(nrow(steps)-1),1:ncol(x@intermediateSteps)] <- x@intermediateSteps
steps[nrow(steps),] <- seq(along=x@originalMeasurements)
}
else{
steps <- matrix(nrow=nrow(x@intermediateSteps), ncol = vect_count, data = rep(0,nrow(x@intermediateSteps) * vect_count))
steps[1:nrow(steps),1:ncol(x@intermediateSteps)] <- x@intermediateSteps
}
#heights is a matrix with all the jump heights computed by the C-function concatenated with
#the jump heights of the original step-function
if (as.logical(withOriginal)){
heights <- matrix(nrow=nrow(x@intermediateSteps)+1, ncol = vect_count, data = rep(0,(nrow(x@intermediateSteps)+1) * vect_count))
heights[1:(nrow(heights)-1),1:ncol(x@intermediateSteps)] <- x@intermediateHeights
heights[nrow(heights),] <- c(diff(sort(x@originalMeasurements)), 0)
}
else{
heights <- matrix(nrow=nrow(x@intermediateSteps), ncol = vect_count, data = rep(0,nrow(x@intermediateSteps) * vect_count))
heights[1:nrow(heights),1:ncol(x@intermediateSteps)] <- x@intermediateHeights
}
heights <- t(apply(heights,1,function(x)x/sum(x)))
#the maximal y-value is calculated. y starts at 1, all the individual jump-heights are added and
#between every single step-function there's 0.5 free space
maxy <- nrow(steps) * 0.5 + sum(heights) + 1
# #maxx is the minimal value >= vect_length and dividable by vect_length DIV 5
# #(for example: if vect_length = 11 => maxx is 12 and if vect_length = 19 => maxx is 21)
#maxx <- ifelse((vect_count%%5)==0, vect_count, (vect_count%/%5)*6)
#while(maxx < vect_count)
# maxx <- maxx + (vect_count%/%5)
maxx <- vect_count
#calculate the coordinates of the lines of the step-functions
lines <- sapply(
#loop over the rows of steps from last row to first row
rev(seq(along = steps[,1])),
function(i, st, he){
#calculate the base y-value of the current "line"
#it is calculated like maxy but only for the first i lines.
cury <- ifelse(i < nrow(he), sum(he[seq(i + 1, nrow(he)),]) + (nrow(he) - i + 1) * 0.5, 0.5)
#cury <- ifelse(i < nrow(he), sum(he[seq(i + 1, nrow(he)),]), 0.5)
#get the current steps and heights row
cur_steps <- st[i, st[i,] > 0]
cur_heights <- he[i,]
#count is the current number of single lines of the current step functions
#except the last line which is always added directly before the return statement
count <- min(vect_count-1, length(cur_steps))
if (!as.logical(connected))
lines <- matrix(nrow=2, ncol=2+4*count)
else
lines <- matrix(nrow=2, ncol=2+2*count)
#construct the coordinates of the lines first and last x,y-pair will be added
#after the next block
lines[,seq(2, ncol(lines)-1)] <- matrix(
sapply(
seq(1, count),
function(j,s,h,base){
#the NAs are neccessary because vertical lines direct at a step shouldn't be
#plotted
if (!as.logical(connected)){
result <- matrix(ncol=4, nrow=2, rep(NA,8))
result[1,c(1,4)] <- rep(s[j], 2)
result[2, 1] <- ifelse(j==1, base, base + sum(h[1:j-1]))
result[2, 4] <- base + sum(h[1:j])
}
else{
result <- matrix(ncol=2, nrow=2, rep(NA,4))
result[1,c(1,2)] <- rep(s[j], 2)
result[2, 1] <- ifelse(j==1, base, base + sum(h[1:j-1]))
result[2, 2] <- base + sum(h[1:j])
}
return(result)
},
cur_steps,
cur_heights,
cury
)
)
#set the first and the last coordinates pair and return all the coordinates for the
#current step-function
lines[, 1] <- c(0, cury)
lines[, ncol(lines)] <- c(vect_count, lines[2, ncol(lines) - 1])
return(lines)
},
steps,
heights
)
#calculate the coordinates for the lines of the respective strongest steps
if (as.logical(withOriginal)){
ncol <- 3 * (nrow(steps) - 1)
sequence <- rev(seq(1, nrow(steps) - 1))
}
else{
ncol <- 3 * nrow(steps)
sequence <- rev(seq(1, nrow(steps)))
}
strongestLines <- matrix(
nrow = 2,
ncol = ncol,
data = sapply(
sequence,
function(i, st, he, x, l){
#get the current values
cur_steps <- st[i, st[i,] > 0]
cur_heights <- he[i,]
cur_l <- l[[length(l) - i + 1]]
#get the coordinates from the current values
result <- matrix(nrow = 2, ncol = 3, data = rep(NA, 6))
result[1,c(2,3)] <- rep(x@intermediateStrongestSteps[i], 2)
result[2,2] <- cur_l[2, which(cur_l[1,] == x@intermediateStrongestSteps[i])[1]]
result[2,3] <- cur_l[2, which(cur_l[1,] == x@intermediateStrongestSteps[i])[2]]
return(result)
},
steps,
heights,
x,
lines
)
)
#insert NA's at the strongestStep positions, because this lines are plotted seperatly
if (as.logical(connected)){
for(i in seq(along=lines)){
l <- lines[[i]]
matched <- match(strongestLines[2,],l[2,])
matched <- matched[!is.na(matched)]
if (length(matched) > 0){
ind <- max(matched)
l <- matrix(nrow=2,data=c(l[,1:(ind-1)],NA,NA,l[,-(1:(ind-1))]))
lines[[i]] <- l
}
}
}
#put the additional arguments in args
args <- list(...)
#check several standard graphics parameter and set them to default values if they aren't
#set yet
if (is.null(args$xlim))
args$xlim <- c(0, maxx)
if (is.null(args$ylim))
args$ylim <- c(0, maxy*1.01)
if (is.null(args$pch))
args$pch <- c(1,20)
if (is.null(args$type))
args$type <- "o"
else
args$type <- args$type[1] #if args$type is a vector then take first element for standard-lines
#and the second element for strongest-step lines others will be ignored
if (is.null(args$cex))
args$cex <- c(1,1.2)
if (is.null(args$ylab))
args$ylab <- ""
if (is.null(args$xlab))
args$xlab <- ""
if (is.null(args$lty))
args$lty <- 1
else
args$lty <- args$lty[1] #same handling as args$type
if (!is.null(args$col))
args$col <- args$col[1] #same handling as args$type
#drawing axes will be handled later by this function and not by the standard plot function
args$axes <- FALSE
#plot the step functions
lapply(
lines,
function(l){
args$x <- l[1,]
args$y <- l[2,]
do.call("plot", args)
#par(new=TRUE) is neccessary beacause the old lines shouldn't be deleted
par(new=TRUE)
}
)
#setup args for plotting strongest steps
args$x <- strongestLines[1,]
args$y <- strongestLines[2,]
if (is.null(list(...)$type))
args$type <- "l"
else if (length(list(...)$type) > 1)
args$type <- list(...)$type[2]
else
args$type <- list(...)$type
if (is.null(list(...)$lty))
args$lty <- 2
else if (length(list(...)$lty) > 1)
args$lty <- list(...)$lty[2]
else
args$lty <- list(...)$lty
if (!is.null(list(...)$col) & length(list(...)$col) > 1){
args$col <- list(...)$col[2]
}
#plot the strongest steps of the step functions
do.call("plot", args)
if (is.null(list(...)$lwd))
lwd <- 1
else
lwd <- list(...)$lwd
#if axes isn't set or set to TRUE then plot the x-axes
if (is.null(list(...)$axes) || as.logical(list(...)$axes)){
axis(1, pos=0, at=seq(0,maxx,by=(vect_count%/%5)), lwd = lwd)
}
#if showLegend is TRUE plot a legend
if (as.logical(showLegend)){
#if lty wasn't set take the default values for the line types else take the first
#two values (if possible) for the line type
if (is.null(list(...)$lty))
lty <- c(1,2)
else if (length(list(...)$lty) == 1)
lty <- list(...)$lty
else
lty <- list(...)$lty[c(1,2)]
if (is.null(list(...)$col))
col <- "black"
else if (length(list(...)$col) == 1)
col <- list(...)$col
else
col <- list(...)$col[1:2]
legend("topleft", c("steps","strongest steps"), lty=lty, col=col, inset=c(0.05,0), bty="n", cex=0.8, lwd=lwd)
}
}
)
|
/scratch/gouwar.j/cran-all/cranData/BiTrinA/R/result.R
|
#############################################Class TrinarizationResult############################################
#This is the base class of all results of the trinarization functions. It provides the basic methods show, print and
#a method called plotBinarization. It also checks all created object for validity.
setClass(
Class = "TrinarizationResult",
representation = representation(
originalMeasurements = "numeric",
trinarizedMeasurements = "integer",
threshold1 = "numeric",
threshold2 = "numeric",
method = "character",
p.value = "numeric"
),
validity = function(object){
#extract object slots
omeasure <- object@originalMeasurements
bmeasure <- object@trinarizedMeasurements
thresh1 <- object@threshold1
thresh2 <- object@threshold2
meth <- object@method
p.value <- [email protected]
#initialize the basic strings
valid_methods <- c(
"TASC A",
"TASC A (min)",
"TASC B",
"TASC B (min)",
"k-Means"
)
for(i in seq(1, length(valid_methods))){
valid_methods_string <- ifelse(i==1, sprintf("\"%s\"", valid_methods[i]), sprintf("%s, \"%s\"", valid_methods_string, valid_methods[i]))
}
#initialize the critical error messages
critical_invalid_strings <- c(
"'originalMeasurements' isn't set!",
"'trinarizedMeasurements' isn't set!",
"'threshold1' isn't set!",
"'threshold2' isn't set!",
"'method' isn't set!",
"'p.value' isn't set!"
)
#check object for critical errors
critical_invalid <- c(
!length(omeasure),
!length(bmeasure),
!length(thresh1),
!length(thresh2),
!length(meth),
!length(p.value)
)
#if critical error occured return the corresponding error messages
if (sum(as.integer(critical_invalid))){
return(critical_invalid_strings[which(critical_invalid)])
}
#initialize the weak error messages
weak_invalid_strings <- c(
"Only zeros, ones and twos are valid values for 'trinarizedMeasurements'.",
sprintf("'method' must be element of {%s}, but it is \"%s\".", valid_methods_string, as.character(meth)),
"Length of original and trinarized Measurements must be the same.",
sprintf("'threshold1' and 'threshold2' must be within the borders of the original values, which is the interval [%f, %f], but they are %f and %f.", min(omeasure), max(omeasure), thresh1, thresh2),
"'p.value' must be in range [0,1]."
)
#check object for weak errors
weak_invalid <- c(
length(which(bmeasure > 2)) || length(which(bmeasure < 0)),
length(which(valid_methods == meth)) < 1,
length(bmeasure) != length(omeasure),
thresh1 < min(omeasure) || thresh1 > max(omeasure) || thresh2 < min(omeasure) || thresh2 > max(omeasure),
(!is.na(p.value) && (p.value < 0 || p.value > 1))
)
#if weak error occured return the corresponding error messages
if (sum(as.integer(weak_invalid))){
return(weak_invalid_strings[which(weak_invalid)])
}
#object is valid
return(TRUE)
}
)
#This method prints the last three slots out to console (trinarizedMeasurements is limited to 10 values). It is called
#when creating an object without an assignment or by only typing the name of a TrinarizationResult-object at console.
setMethod(
f = "show",
signature = "TrinarizationResult",
definition = function(object){
cat("Method: ", object@method, "\n",sep="")
if(length(object@trinarizedMeasurements) <= 10){
cat("\nTrinarized vector: [ ", paste(object@trinarizedMeasurements, collapse=" "),
" ]\n",sep="")
}else{
cat("\nTrinarized vector: [ ",paste(object@trinarizedMeasurements[1:10], collapse=" "),
" ...]\n",sep="")
}
cat("\nThreshold1: ", object@threshold1, "\n", sep="")
cat("\nThreshold2: ", object@threshold2, "\n", sep="")
if(!is.na([email protected])){
cat("\np value: ", [email protected], "\n", sep="")
}
}
)
#This method prints the last three slots out to console
setMethod(
f = "print",
signature = "TrinarizationResult",
definition = function(x){
cat("Method: ", x@method, "\n", sep="")
cat("\nThreshold1: ", x@threshold1, "\n", sep="")
cat("\nThreshold2: ", x@threshold2, "\n", sep="")
cat("\nTrinarized vector: [ ", paste(x@trinarizedMeasurements, collapse=" "),
" ]\n", sep="")
if (!is.na([email protected])){
cat("\np value: ",[email protected],"\n", sep="")
}
}
)
setGeneric("plot", useAsDefault = plot)
#This Method plots the computed binarization in a one- or two-dimensional way.
setMethod(
f = "plot",
signature = c("TrinarizationResult"),
definition = function(x, twoDimensional=FALSE, showLegend=TRUE, showThreshold=TRUE, ...)
{
if (twoDimensional){
plot(1:length(x@trinarizedMeasurements), x, showLegend=showLegend, showThreshold=showThreshold, ...)
}else{
#extract the base values of x
vect_length <- length(x@originalMeasurements)
min_val <- min(x@originalMeasurements) #floor(min(c(x@originalMeasurements,0)))
max_val <- max(x@originalMeasurements) #ceiling(max(c(x@originalMeasurements,0)))
#get the ... argument into a list
args <- list(...)
#check for several standard graphic parameters and if they aren't set, set them to default values
if(is.null(args$ylab)){
args$ylab <- ""
}
if(is.null(args$xlab)){
args$xlab <- ""
}
if(is.null(args$lty)){
args$lty <- 2
}
if(is.null(args$pch)){
args$pch <- x@trinarizedMeasurements
}else if(length(args$pch) == 3){
args$pch <- args$pch[x@trinarizedMeasurements+1]
}
col <- args$col
if(is.null(col)){
col <- c("red","green","blue","black")
}
if(length(col) < 3){
col <- rep(col, 3)[1:3]
}
if(length(col) == 3){
col <- c(col, "black")
}
if(length(col) == 4){
args$col <- col[x@trinarizedMeasurements+1]
}
if(is.null(args$type)){
args$type <- "p"
}
if(is.null(args$yaxt)){
args$yaxt="n"
}
#plotting the axes shouldn't be controlled by standard plot function
#this method does it later
#args$axes <- FALSE
#check for the limit standard graphic parameters and if they aren't set, set them to default values
if(is.null(args$xlim)){
args$xlim <- c(min_val,max_val)
}
if(is.null(args$ylim)){
args$ylim <- c(-0.1,0.1)
}
#set the point coordinates
args$x <- x@originalMeasurements
args$y <- rep(0,vect_length)
#plot them
do.call("plot", args)
#plot the threshold as line
if(as.logical(showThreshold)){
par(new=TRUE)
largs <- list(...)
if(is.null(largs$lty)){
largs$lty <- 2
}
largs$col <- col[4]
do.call("abline", c(largs,v=x@threshold1))
do.call("abline", c(largs,v=x@threshold2))
}
#if axes isn't set or TRUE plot the x-axis
#if (is.null(list(...)$axes) || as.logical(list(...)$axes) || list(...)$yaxt != "n"){
# if (is.null(args$lwd)){
# lwd <- 1
# }else{
# lwd <- args$lwd
# }
# at <- round(seq(min_val,max_val,by=(max_val-min_val)/5),1)
# axis(1, at=at, lwd=lwd, pos=-0.01)
# #axis(1, at=at, lwd=lwd, pos=-0.05)#c(min_val,-10))
# #axis(1, at=at, lwd=lwd, pos=-0.1)#c(min_val,-10))
#}
if(as.logical(showLegend)){
if(is.null(args$lwd)){
lwd <- 1
}else{
lwd <- args$lwd
}
if(as.logical(showThreshold)){
if(is.null(args$pch)){
pch <- c(0,1,2,NA)
}else if(length(args$pch) > 3){
pch <- c(15, 16, 17, NA)
}else{
pch <- c(unique(args$pch), NA)
}
names <- c("zero", "one", "two", "threshold")
lty <- c(NA, NA, NA, args$lty[1])
}else{
if(is.null(args$pch)){
pch <- c(0,1,2)
}else if(length(args$pch) > 3){
pch <- c(15, 16, 17)
}else{
pch <- unique(args$pch)
}
names <- c("zero", "one", "two")
lty <- c(NA, NA, NA)
#if(is.null(args$col)){
# col <- "black"
#}else if (length(args$col) < 3){
# col <- args$col
#}else{
# col <- args$col[1:2]
#}
}
legend("topleft", names, pch=pch,
lty=lty, inset=c(0.05, 0.05), bty="n", cex=0.8, lwd=lwd, col=col)
}
}
}
)
setMethod(
f = "plot",
signature = c("numeric","TrinarizationResult"),
definition = function(x, y, showLegend=TRUE, showThreshold=TRUE, ...)
{
#extract the base values of y
vect_length <- length(y@originalMeasurements)
min_val <- min(y@originalMeasurements)
max_val <- max(y@originalMeasurements)
#get the ... argument into a list
args <- list(...)
#check for several standard graphic parameters and if they aren't set, set them to default values
if(is.null(args$ylab)){
args$ylab <- ""
}
if(is.null(args$xlab)){
args$xlab <- ""
}
if(is.null(args$lty))
args$lty <- 2
if(is.null(args$cex.axis))
args$cex.axis <- par("cex.axis")
if(is.null(args$cex.lab))
args$cex.lab <- par("cex.lab")
if(is.null(args$pch))
{
args$pch <- y@trinarizedMeasurements
}
else
if(length(args$pch) == 3)
{
args$pch <- args$pch[y@trinarizedMeasurements+1]
}
col <- args$col
if(is.null(col)){
col <- c("red","green","blue","black")
}
if(length(col) < 3){
col <- rep(col, 3)[1:3]
}
if(length(col) == 3){
col <- c(col, "black")
}
if(length(col) == 4){
args$col <- col[y@trinarizedMeasurements+1]
}
if(is.null(args$type))
args$type <- "p"
#plotting the axes shouldn't be controlled by standard plot function
#this method does it later
#args$axes <- FALSE
args$xaxt="n"
#maxx is the minimal value >= vect_length and dividable by vect_length DIV 5
#(for example: if vect_length = 11 => maxx is 12 and if vect_length = 19 => maxx is 21)
#maxx <- ifelse(vect_length%%5==0, vect_length, vect_length%/%5*6)
#while(maxx < vect_length)
# maxx <- maxx + vect_length%/%5
#check for the limit standard graphic arguments. if not set set them to default values
#if (is.null(args$xlim))
# args$xlim <- c(0, maxx)
#if (is.null(args$ylim))
# args$ylim <- c(min_val, max_val)
#plot the binarization
args$x <- x
#seq(along = x@originalMeasurements)
args$y <- y@originalMeasurements
print(args)
do.call("plot", args)
#plot the threshold as line
if(as.logical(showThreshold)){
par(new=TRUE)
largs <- list(...)
if (is.null(largs$lty))
largs$lty <- 2
largs$col <- col[4]
do.call("abline", c(largs,h=y@threshold1))
do.call("abline", c(largs,h=y@threshold2))
}
#if axes isn't set or TRUE plot the x and y axis according to maxx, min_val, max_val
if(is.null(list(...)$axes) || as.logical(list(...)$axes) || list(...)$xaxt != "n")
{
if(is.null(args$lwd))
{
lwd <- 1
}
else
{
lwd <- args$lwd
}
axis(1, at=x, lwd=lwd, cex.axis=args$cex.axis, cex.lab=args$cex.lab)
}
if(as.logical(showLegend))
{
if(is.null(args$lwd))
{
lwd <- 1
}
else{
lwd <- args$lwd
}
if(as.logical(showThreshold))
{
if(is.null(args$pch)){
pch <- c(0,1,2,NA)
}
else
if(length(args$pch) > 3)
{
pch <- c(15, 16, 17, NA)
}
else{
pch <- c(unique(args$pch), NA)
}
names <- c("zero", "one", "two", "threshold")
lty <- c(NA, NA, NA, args$lty[1])
}
else
{
if(is.null(args$pch))
{
pch <- c(0,1,2)
}
else
if(length(args$pch) > 3)
{
pch <- c(15, 16, 17)
}
else{
pch <- unique(args$pch)
}
names <- c("zero", "one", "two")
lty <- c(NA, NA, NA)
}
legend("topleft", names, pch=pch,
lty=lty, inset=c(0.05, 0.05), bty="n", cex=0.8, lwd=lwd, col=col)
}
}
)
#############################################Class TASCResult##############################################
#This is the result class for the TASC algorithm. It provides an additional method called plotStepFunctions and is
#derived from the BinarizationResult class.
setClass(
Class = "TASCResult",
representation = representation(
intermediateSteps = "matrix",
intermediateHeights1 = "matrix",
intermediateHeights2 = "matrix",
intermediateStrongestSteps = "matrix"),
contains = "TrinarizationResult",
validity = function(object){
#extract relevant object slots
isteps <- object@intermediateSteps
iheights1 <- object@intermediateHeights1
iheights2 <- object@intermediateHeights2
istrsteps <- object@intermediateStrongestSteps
omeasure <- object@originalMeasurements
#initialize the critical error messages
critical_invalid_strings <- c(
"'intermediateSteps' isn't set!",
"'intermediateHeights1' isn't set!",
"'intermediateHeights2' isn't set!",
"'intermediateStrongestSteps' isn't set!"
)
#check object for critical errors
critical_invalid <- c(
!length(isteps),
!length(iheights1),
!length(iheights2),
!length(istrsteps)
)
#if critical error occured return the corresponding error messages
if(sum(as.integer(critical_invalid))){
return(critical_invalid_strings[which(critical_invalid)])
}
#initialize weak error messages
weak_invalid_strings <- c(
"'intermediateSteps' and 'intermediateHeights' must have the same dimensionality.",
"'intermediateStrongestSteps' must have the same number of rows as 'intermediateSteps'.",
"The values of 'intermediateSteps' must be in range [0, #Measurements].",
"The values of 'intermediateStrongestSteps' must be in range [1, #Measurements]."
)
#check object for weak errors
weak_invalid <- c(
as.logical(sum(dim(isteps) != dim(iheights1)) || sum(dim(isteps) != dim(iheights2))),
nrow(istrsteps) != nrow(isteps),
(sum(isteps < 0) || sum(isteps > length(omeasure))),
(sum(istrsteps[,1] < 1) || sum(istrsteps[,1] > length(omeasure)) || sum(istrsteps[,2] < 1) || sum(istrsteps[,2] > length(omeasure)))
)
#if weak error occured return the corresponding error messages
if(sum(as.integer(weak_invalid))){
return(weak_invalid_strings[which(weak_invalid)])
}
#object is valid
return(TRUE)
}
)
#This method plots all the computed optimal step functions with n steps in one diagram. These step functions are formed
#by the two BASC algorithms and are used to determine the optimal jumping point and are also used to calculate the
#P-Value.
setMethod(
f = "plotStepFunctions",
signature = "TASCResult",
definition = function(x, showLegend=TRUE, connected=FALSE, withOriginal=TRUE, ...){
#check the input BASCResult-Object
if(ncol(x@intermediateSteps) == 0 || nrow(x@intermediateSteps) == 0)
stop("intermediateSteps has no values to plot.")
if(ncol(x@intermediateHeights1) == 0 || nrow(x@intermediateHeights1) == 0)
stop("intermediateHeights has no values to plot.")
if(ncol(x@intermediateHeights2) == 0 || nrow(x@intermediateHeights2) == 0)
stop("intermediateHeights has no values to plot.")
if(ncol(x@intermediateStrongestSteps) == 0 || nrow(x@intermediateStrongestSteps) == 0)
stop("intermediateStrongestSteps has no values to plot.")
#get the value-count
vect_count <- length(x@originalMeasurements)
#steps is a matrix with all the jump indices computed by the C-function concatenated with
#1:vect_count which is used for plotting the original step-function
if(as.logical(withOriginal)){
steps <- matrix(nrow=nrow(x@intermediateSteps)+1, ncol = vect_count, data = rep(0,(nrow(x@intermediateSteps)+1) * vect_count))
steps[1:(nrow(steps)-1),1:ncol(x@intermediateSteps)] <- x@intermediateSteps
steps[nrow(steps),] <- seq(along=x@originalMeasurements)
}else{
steps <- matrix(nrow=nrow(x@intermediateSteps), ncol = vect_count, data = rep(0,nrow(x@intermediateSteps) * vect_count))
steps[1:nrow(steps),1:ncol(x@intermediateSteps)] <- x@intermediateSteps
}
#heights is a matrix with all the jump heights computed by the C-function concatenated with
#the jump heights of the original step-function
if(as.logical(withOriginal)){
heights1 <- matrix(nrow=nrow(x@intermediateSteps)+1, ncol = vect_count, data = rep(0,(nrow(x@intermediateSteps)+1) * vect_count))
heights1[1:(nrow(heights1)-1),1:ncol(x@intermediateSteps)] <- x@intermediateHeights1
heights1[nrow(heights1),] <- c(diff(sort(x@originalMeasurements)), 0)
heights2 <- matrix(nrow=nrow(x@intermediateSteps)+1, ncol = vect_count, data = rep(0,(nrow(x@intermediateSteps)+1) * vect_count))
heights2[1:(nrow(heights2)-1),1:ncol(x@intermediateSteps)] <- x@intermediateHeights2
heights2[nrow(heights2),] <- c(diff(sort(x@originalMeasurements)), 0)
}else{
heights1 <- matrix(nrow=nrow(x@intermediateSteps), ncol = vect_count, data = rep(0,nrow(x@intermediateSteps) * vect_count))
heights1[1:nrow(heights1),1:ncol(x@intermediateSteps)] <- x@intermediateHeights1
heights2 <- matrix(nrow=nrow(x@intermediateSteps), ncol = vect_count, data = rep(0,nrow(x@intermediateSteps) * vect_count))
heights2[1:nrow(heights1),1:ncol(x@intermediateSteps)] <- x@intermediateHeights2
}
heights1 <- t(apply(heights1,1,function(x)x/sum(x)))
heights2 <- t(apply(heights2,1,function(x)x/sum(x)))
#the maximal y-value is calculated. y starts at 1, all the individual jump-heights are added and
#between every single step-function there's 0.5 free space
maxy <- nrow(steps) * 0.5 + sum(heights1) + 1
# #maxx is the minimal value >= vect_length and dividable by vect_length DIV 5
# #(for example: if vect_length = 11 => maxx is 12 and if vect_length = 19 => maxx is 21)
#maxx <- ifelse((vect_count%%5)==0, vect_count, (vect_count%/%5)*6)
#while(maxx < vect_count)
# maxx <- maxx + (vect_count%/%5)
maxx <- vect_count
#calculate the coordinates of the lines of the step-functions
lines <- sapply(
#loop over the rows of steps from last row to first row
rev(seq(along = steps[,1])),
function(i, st, he){
#calculate the base y-value of the current "line"
#it is calculated like maxy but only for the first i lines.
cury <- ifelse(i < nrow(he), sum(he[seq(i + 1, nrow(he)),]) + (nrow(he) - i + 1) * 0.5, 0.5)
#cury <- ifelse(i < nrow(he), sum(he[seq(i + 1, nrow(he)),]), 0.5)
#get the current steps and heights row
cur_steps <- st[i, st[i,] > 0]
cur_heights <- he[i,]
#count is the current number of single lines of the current step functions
#except the last line which is always added directly before the return statement
count <- min(vect_count-1, length(cur_steps))
if(!as.logical(connected))
lines <- matrix(nrow=2, ncol=2+4*count)
else
lines <- matrix(nrow=2, ncol=2+2*count)
#construct the coordinates of the lines first and last x,y-pair will be added
#after the next block
lines[,seq(2, ncol(lines)-1)] <- matrix(
sapply(
seq(1, count),
function(j,s,h,base){
#the NAs are neccessary because vertical lines direct at a step shouldn't be
#plotted
if(!as.logical(connected)){
result <- matrix(ncol=4, nrow=2, rep(NA,8))
result[1,c(1,4)] <- rep(s[j], 2)
result[2, 1] <- ifelse(j==1, base, base + sum(h[1:j-1]))
result[2, 4] <- base + sum(h[1:j])
}
else{
result <- matrix(ncol=2, nrow=2, rep(NA,4))
result[1,c(1,2)] <- rep(s[j], 2)
result[2, 1] <- ifelse(j==1, base, base + sum(h[1:j-1]))
result[2, 2] <- base + sum(h[1:j])
}
return(result)
},
cur_steps,
cur_heights,
cury
)
)
#set the first and the last coordinates pair and return all the coordinates for the
#current step-function
lines[, 1] <- c(0, cury)
lines[, ncol(lines)] <- c(vect_count, lines[2, ncol(lines) - 1])
return(lines)
},
steps,
heights1
)
#calculate the coordinates for the lines of the respective strongest steps
if(as.logical(withOriginal)){
ncol <- 3 * (nrow(steps) - 1)
sequence <- rev(seq(1, nrow(steps) - 1))
}
else{
ncol <- 3 * nrow(steps)
sequence <- rev(seq(1, nrow(steps)))
}
strongestLines <- matrix(
nrow = 2,
ncol = ncol,
data = sapply(
sequence,
function(i, st, he, x, l){
#get the current values
cur_steps <- st[i, st[i,] > 0]
cur_heights <- he[i,]
cur_l <- l[[length(l) - i + 1]]
#get the coordinates from the current values
result <- matrix(nrow = 2, ncol = 3, data = rep(NA, 6))
result[1,c(2,3)] <- rep(x@intermediateStrongestSteps[i,1], 2)
result[2,2] <- cur_l[2, which(cur_l[1,] == x@intermediateStrongestSteps[i,1])[1]]
result[2,3] <- cur_l[2, which(cur_l[1,] == x@intermediateStrongestSteps[i,1])[2]]
return(result)
},
steps,
heights1,
x,
lines
)
)
strongestLines2 <- matrix(
nrow = 2,
ncol = ncol,
data = sapply(
sequence,
function(i, st, he, x, l){
#get the current values
cur_steps <- st[i, st[i,] > 0]
cur_heights <- he[i,]
cur_l <- l[[length(l) - i + 1]]
#get the coordinates from the current values
result <- matrix(nrow = 2, ncol = 3, data = rep(NA, 6))
result[1,c(2,3)] <- rep(x@intermediateStrongestSteps[i,2], 2)
result[2,2] <- cur_l[2, which(cur_l[1,] == x@intermediateStrongestSteps[i,2])[1]]
result[2,3] <- cur_l[2, which(cur_l[1,] == x@intermediateStrongestSteps[i,2])[2]]
return(result)
},
steps,
heights2,
x,
lines
)
)
strongestLines <- cbind(strongestLines, strongestLines2)
#insert NA's at the strongestStep positions, because this lines are plotted seperatly
if(as.logical(connected)){
for(i in seq(along=lines)){
l <- lines[[i]]
matched <- match(strongestLines[2,],l[2,])
matched <- matched[!is.na(matched)]
if (length(matched) > 0){
ind <- max(matched)
l <- matrix(nrow=2,data=c(l[,1:(ind-1)],NA,NA,l[,-(1:(ind-1))]))
lines[[i]] <- l
}
}
}
#put the additional arguments in args
args <- list(...)
#check several standard graphics parameter and set them to default values if they aren't
#set yet
if(is.null(args$xlim))
args$xlim <- c(0, maxx)
if(is.null(args$ylim))
args$ylim <- c(0, maxy*1.01)
if(is.null(args$pch))
args$pch <- c(1,20)
if(is.null(args$type))
args$type <- "o"
else
args$type <- args$type[1] #if args$type is a vector then take first element for standard-lines
#and the second element for strongest-step lines others will be ignored
if(is.null(args$cex))
args$cex <- c(1,1.2)
if(is.null(args$ylab))
args$ylab <- ""
if(is.null(args$xlab))
args$xlab <- ""
if(is.null(args$lty))
args$lty <- 1
else
args$lty <- args$lty[1] #same handling as args$type
if(!is.null(args$col))
args$col <- args$col[1] #same handling as args$type
#drawing axes will be handled later by this function and not by the standard plot function
args$axes <- FALSE
#plot the step functions
lapply(
lines,
function(l){
args$x <- l[1,]
args$y <- l[2,]
do.call("plot", args)
#par(new=TRUE) is neccessary beacause the old lines shouldn't be deleted
par(new=TRUE)
}
)
#setup args for plotting strongest steps
args$x <- strongestLines[1,]
args$y <- strongestLines[2,]
if(is.null(list(...)$type))
args$type <- "l"
else if(length(list(...)$type) > 1)
args$type <- list(...)$type[2]
else
args$type <- list(...)$type
if(is.null(list(...)$lty))
args$lty <- 2
else if(length(list(...)$lty) > 1)
args$lty <- list(...)$lty[2]
else
args$lty <- list(...)$lty
if(!is.null(list(...)$col) & length(list(...)$col) > 1){
args$col <- list(...)$col[2]
}
#plot the strongest steps of the step functions
do.call("plot", args)
if(is.null(list(...)$lwd))
lwd <- 1
else
lwd <- list(...)$lwd
#if axes isn't set or set to TRUE then plot the x-axes
if(is.null(list(...)$axes) || as.logical(list(...)$axes)){
axis(1, pos=0, at=seq(0,maxx,by=(vect_count%/%5)), lwd = lwd)
}
#if showLegend is TRUE plot a legend
if(as.logical(showLegend)){
#if lty wasn't set take the default values for the line types else take the first
#two values (if possible) for the line type
if(is.null(list(...)$lty))
lty <- c(1,2)
else if(length(list(...)$lty) == 1)
lty <- list(...)$lty
else
lty <- list(...)$lty[c(1,2)]
if(is.null(list(...)$col))
col <- "black"
else if(length(list(...)$col) == 1)
col <- list(...)$col
else
col <- list(...)$col[1:2]
legend("topleft", c("steps","strongest steps"), lty=lty, col=col, inset=c(0.05,0), bty="n", cex=0.8, lwd=lwd)
}
}
)
|
/scratch/gouwar.j/cran-all/cranData/BiTrinA/R/result_TASC.R
|
# A method which uses the k-Means algorithm to trinarize a real-valued vector.
# <nstart> controls how many random sets should be chosen by the 'kmeans' method and <iter.max> is the
# maximum number of iterations that are allowed (see also the help for kmeans)
trinarize.kMeans <- function(vect, nstart=1, iter.max=10, dip.test=TRUE, na.rm=FALSE){
#some checks of the arguments
if(!is.numeric(vect))
stop("The input vector must consist of numerical values!")
if (!na.rm && any(is.na(vect)))
stop("Cannot binarize in the presence of NA values!")
else
if (na.rm)
{
vect <- vect[!is.na(vect)]
}
if (any(!is.finite(vect)))
stop("Cannot binarize Inf values!")
if(!is.numeric(nstart))
stop("'nstart' must be numeric!")
if(nstart < 0)
stop("'nstart' must be >= 0!")
if(!is.numeric(iter.max))
stop("'iter.max' must be numeric!")
if(iter.max < 0)
stop("'iter.max' must be >= 0!")
if(length(vect) < 3)
stop("The input vector must have at least 3 entries!")
if(length(unique(vect))==1)
stop("The input vector is constant!")
if (dip.test)
{
p.value <- dip.test(vect)$p.value
}
else
p.value <- as.numeric(NA)
#start the standard kmeans method to do all the calulations
km_res <- kmeans(vect, 3, nstart = nstart, iter.max = iter.max)
#the center with greater value should get the binarized value 2, then 1, the other 0.
cent <- order(km_res$centers)
trinarizeddata <- km_res$cluster
trinarizeddata[km_res$cluster == 1] <- which(cent ==1)
trinarizeddata[km_res$cluster == 2] <- which(cent ==2)
trinarizeddata[km_res$cluster == 3] <- which(cent ==3)
trinarizeddata <- trinarizeddata-1
#calculate the threshold as mean of the calculated centers
threshold1 <- (max(vect[trinarizeddata == 0]) + min(vect[trinarizeddata == 1])) / 2
threshold2 <- (max(vect[trinarizeddata == 1]) + min(vect[trinarizeddata == 2])) / 2
#put all computed results into a 'TrinarizationResult' object and return it
return(new("TrinarizationResult",
originalMeasurements = vect,
trinarizedMeasurements = as.integer(trinarizeddata),
threshold1 = threshold1,
threshold2 = threshold2,
p.value = p.value,
method = "k-Means"))
}
|
/scratch/gouwar.j/cran-all/cranData/BiTrinA/R/trinarizeKMeans.R
|
### R code from vignette source 'Vignette.Rnw'
###################################################
### code chunk number 1: Vignette.Rnw:36-37
###################################################
set.seed(13579)
###################################################
### code chunk number 2: Vignette.Rnw:195-196 (eval = FALSE)
###################################################
## install.packages("BiTrinA")
###################################################
### code chunk number 3: Vignette.Rnw:199-200
###################################################
library("BiTrinA")
###################################################
### code chunk number 4: Vignette.Rnw:213-214
###################################################
data(binarizationExample)
###################################################
### code chunk number 5: Vignette.Rnw:220-227
###################################################
pdf("density.pdf")
par(mar=c(2,2,1,1))
#plot(density(binarizationExample[1,]),main="")
#abline(v=mean(binarizationExample[1,]), lty="dashed")
plot(function(x)dnorm(x,mean=0,sd=1)+dnorm(x,mean=10,sd=1),xlim=c(-5,15),main="")
abline(v=5, lty="dashed")
dev.off()
###################################################
### code chunk number 6: Vignette.Rnw:241-243
###################################################
bin <- binarize.kMeans(binarizationExample[1,])
print(bin)
###################################################
### code chunk number 7: Vignette.Rnw:250-251
###################################################
print(bin@binarizedMeasurements)
###################################################
### code chunk number 8: Vignette.Rnw:255-256 (eval = FALSE)
###################################################
## plot(bin)
###################################################
### code chunk number 9: Vignette.Rnw:259-260 (eval = FALSE)
###################################################
## plot(bin, twoDimensional=TRUE)
###################################################
### code chunk number 10: Vignette.Rnw:264-270
###################################################
pdf("plot_oneD.pdf")
plot(bin)
dev.off()
pdf("plot_twoD.pdf")
plot(bin, twoDimensional=TRUE)
dev.off()
###################################################
### code chunk number 11: Vignette.Rnw:286-288
###################################################
label <- c(rep(0,5), rep(1,5))
bin <- binarize.kMeans(binarizationExample[10,])
###################################################
### code chunk number 12: Vignette.Rnw:293-296 (eval = FALSE)
###################################################
## plot(bin, twoDimensional=TRUE,
## col=label+1, pch=label,
## showLegend=FALSE)
###################################################
### code chunk number 13: Vignette.Rnw:298-301
###################################################
pdf("plot_bin_with_label.pdf")
plot(bin, twoDimensional=TRUE, col=label+1, pch=label, showLegend=FALSE)
dev.off()
###################################################
### code chunk number 14: Vignette.Rnw:314-316
###################################################
binMatrix <- binarizeMatrix(binarizationExample,
method="kMeans")
###################################################
### code chunk number 15: Vignette.Rnw:320-323
###################################################
binMatrixFDR <- binarizeMatrix(binarizationExample,
method="kMeans",
adjustment="fdr")
###################################################
### code chunk number 16: Vignette.Rnw:330-332
###################################################
bin <- binarize.BASC(binarizationExample[1,], method="A")
print(bin)
###################################################
### code chunk number 17: Vignette.Rnw:346-347
###################################################
print(bin@intermediateStrongestSteps)
###################################################
### code chunk number 18: Vignette.Rnw:353-359
###################################################
pdf("stepsA.pdf")
plotStepFunctions(bin, connected=TRUE)
dev.off()
pdf("stepsB.pdf")
plotStepFunctions(binarize.BASC(binarizationExample[1,], method="B"), connected=TRUE)
dev.off()
###################################################
### code chunk number 19: Vignette.Rnw:361-362 (eval = FALSE)
###################################################
## plotStepFunctions(bin)
###################################################
### code chunk number 20: Vignette.Rnw:386-387
###################################################
data(trinarizationExample)
###################################################
### code chunk number 21: Vignette.Rnw:393-395
###################################################
tri <- TASC(trinarizationExample[1,], method="A")
print(tri)
###################################################
### code chunk number 22: Vignette.Rnw:423-424
###################################################
print(tri@intermediateStrongestSteps)
###################################################
### code chunk number 23: Vignette.Rnw:431-437
###################################################
pdf("triA.pdf")
par(mfrow = c(1,2), mar = c(2,2,1,1))
plotStepFunctions(tri, connected=TRUE)
par(mar = c(2,2,1,1))
plot(tri, twoDimensional = TRUE)
dev.off()
###################################################
### code chunk number 24: Vignette.Rnw:439-441 (eval = FALSE)
###################################################
## plotStepFunctions(tri)
## plot(tri, twoDimensional = TRUE)
###################################################
### code chunk number 25: Vignette.Rnw:469-474
###################################################
binMatrix <- binarizeMatrix(binarizationExample,
method="kMeans",
adjustment="fdr")
significantRows <- sum(binMatrix[,12] < 0.05)
print(significantRows)
###################################################
### code chunk number 26: Vignette.Rnw:482-488
###################################################
binarizations <- apply(binarizationExample, 1, binarize.BASC, method="A")
pVals <- p.adjust(sapply(binarizations, function(x)
{
return([email protected])
}), method="fdr")
significantRows <- sum(pVals < 0.05)
###################################################
### code chunk number 27: Vignette.Rnw:490-491
###################################################
print(significantRows)
###################################################
### code chunk number 28: Vignette.Rnw:496-502
###################################################
binarizations <- apply(binarizationExample, 1, binarize.BASC, method="B")
pVals <- p.adjust(sapply(binarizations, function(x)
{
return([email protected])
}), method="fdr")
significantRows <- sum(pVals < 0.05)
###################################################
### code chunk number 29: Vignette.Rnw:504-505
###################################################
print(significantRows)
###################################################
### code chunk number 30: Vignette.Rnw:524-526
###################################################
tauValues <- seq(0,0.25, 0.05)
print(tauValues)
###################################################
### code chunk number 31: Vignette.Rnw:531-544
###################################################
significantFeatures <- sapply(tauValues, function(tau)
{
binMatrix <- binarizeMatrix(binarizationExample,
method="BASCB",
adjustment="fdr",
tau=tau)
significantRows <- sum(binMatrix[,12] < 0.05)
return(significantRows)})
names(significantFeatures) <- tauValues
print(significantFeatures)
|
/scratch/gouwar.j/cran-all/cranData/BiTrinA/inst/doc/Vignette.R
|
# BiasCorrector: A GUI to Correct Measurement Bias in DNA Methylation Analyses
# Copyright (C) 2019-2022 Lorenz Kapsner
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# give back deleted rows
omitnas_modal <- function(omitnas,
data_type) {
if (!is.null(omitnas)) {
if (data_type == "experimental") {
data_type <- "experimental data"
} else {
data_type <- "calibration data"
}
if (omitnas == 1) {
message <- paste0("Deleted 1 row containing missing ",
"values from ", data_type, ".")
} else {
message <- paste0("Deleted ", omitnas, " rows containing ",
"missing values from ", data_type, ".")
}
# show modal here
shiny::showModal(shiny::modalDialog(
message,
title = "Missing values deleted"
))
}
invisible(gc())
}
|
/scratch/gouwar.j/cran-all/cranData/BiasCorrector/R/app_omitnas_modal.R
|
# BiasCorrector: A GUI to Correct Measurement Bias in DNA Methylation Analyses
# Copyright (C) 2019-2022 Lorenz Kapsner
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# render regression-statistics table
render_regressionstatistics <- function(dt,
mode = NULL,
minmax) {
#% col2rgb("lawngreen"): red=124, green=252, blue=0
#% rgb (124, 252, 0, max=255, alpha=90) --> "#7CFC005A"
# https://stackoverflow.com/questions/49636423/how-to-change-the-
# cell-color-of-a-cell-of-an-r-shiny-data-table-dependent-on-it
if (isFALSE(minmax)) {
cols <- c(
"Name", "Relative error", # 2
"SSE [h]", "R\u00B2 [h]", "a", "b", "d", "b1", "s", " ", # 8
# "SSE [c]", "R\u00B2 [c]", "ax\u00B3", "bx\u00B2", "cx", "d",
"SSE [c]", "R\u00B2 [c]", "a", "b", "c", "d", # 6
"better_model"
)
ncols <- 16
hyperlength <- 9
lastcolor <- ncols
} else if (isTRUE(minmax)) {
cols <- c(
"Name", "Relative error", # 2
"SSE [h]", "R\u00B2 [h]", "b", " ", # 4
# "SSE [c]", "R\u00B2 [c]", "ax\u00B3", "bx\u00B2", " ",
"SSE [c]", "R\u00B2 [c]", "a", "b", " ", # 5
"y\u2080", "y\u2081", "m\u2080", "m\u2081", # 4
"better_model"
)
ncols <- 15
hyperlength <- 5
lastcolor <- 10
}
if (is.null(mode)) {
dt[, ("better_sse") := ifelse(
get("SSE_cubic") <= get("SSE_hyperbolic"),
1,
0
)]
cols <- c(cols, "better_sse")
t <- DT::datatable(dt,
colnames = cols,
rownames = FALSE,
options = list(
scrollX = TRUE,
pageLength = 20,
columnDefs = list(
list(
targets = c(ncols, ncols + 1),
visible = FALSE
)
),
dom = "ltip"
)
) %>%
DT::formatRound(columns = c(2:ncols), digits = 3) %>%
# hyperbolic parameters
DT::formatStyle(
columns = 3,
valueColumns = "better_sse",
fontWeight = DT::styleEqual(0, "bold")
) %>%
DT::formatStyle(
columns = 3:hyperlength,
valueColumns = "better_model",
backgroundColor = DT::styleEqual(0, "#7CFC005A")
) %>%
# cubic parameters
DT::formatStyle(
columns = hyperlength + 2,
valueColumns = "better_sse",
fontWeight = DT::styleEqual(1, "bold")
) %>%
DT::formatStyle(
columns = (hyperlength + 2):lastcolor,
valueColumns = "better_model",
backgroundColor = DT::styleEqual(1, "#7CFC005A")
) # %>%
#" formatStyle(columns = c(1:11), fontSize = "80%")
} else if (mode == "corrected") {
t <- DT::datatable(dt,
colnames = cols,
rownames = FALSE,
options = list(
scrollX = TRUE,
pageLength = 20,
columnDefs = list(
list(
targets = ncols,
visible = FALSE
)
),
dom = "ltip"
)
) %>%
DT::formatRound(columns = c(2:ncols), digits = 3)
} else {
t <- "error"
}
return(t)
}
|
/scratch/gouwar.j/cran-all/cranData/BiasCorrector/R/app_render_regressionstatistics.R
|
# BiasCorrector: A GUI to Correct Measurement Bias in DNA Methylation Analyses
# Copyright (C) 2019-2022 Lorenz Kapsner
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# requirements error + modal view
requirements_error <- function(data_type) {
footer_dis <- shiny::actionButton("dismiss_modal", label = "Dismiss")
title_filereq <- "File requirements error!"
if (data_type %in% c("experimentalFile", "calibrationFile")) {
title <- title_filereq
message <- paste0("The file provided does not meet the file ",
"requirements! Please upload a new file! For the ",
"specific CSV file requirements please refere ",
"to our FAQ.")
footer <- footer_dis
} else if (data_type == "locusname") {
title <- "No locus specified!"
message <- paste0("Please specify an appropriate name for the gene ",
"or locus of your experiment!")
footer <- footer_dis
} else if (data_type == "samplename") {
title <- "No sample name specified!"
message <- paste0("Please specify an appropriate name of the sample ",
"of your experiment!")
footer <- footer_dis
} else if (data_type == "csv") {
title <- title_filereq
message <- paste0("The files provided do not meet the file requirements! ",
"Please upload new files! For the specific CSV file ",
"requirements please refere to our FAQ.")
footer <- footer_dis
} else if (data_type == "dim") {
title <- title_filereq
message <- paste0("The files provided do not meet the file requirements! ",
"All files have to have the same number of columns and ",
"rows. Please upload new files! For the specific CSV ",
"file requirements please refere to our FAQ.")
footer <- footer_dis
} else if (data_type == "four") {
title <- title_filereq
message <- paste0("Please upload at least 4 CSV files containing the ",
"calibration data. For the specific CSV file ",
"requirements please refere to our FAQ.")
footer <- footer_dis
} else if (data_type == "naming") {
title <- title_filereq
message <- paste0("The files provided do not meet the file requirements! ",
"All files need to have the same rownames (locus ids) ",
"and columnnames (CpG sites). Please upload new files! ",
"For the specific CSV file requirements please refere ",
"to our FAQ.")
footer <- footer_dis
} else if (data_type == "filename") {
title <- title_filereq
message <- paste0("The files provided do not meet the file requirements! ",
"Filenaming of the calibration files must be done ",
"properly. Please upload new files! For the specific ",
"CSV file requirements please refer to our FAQ.")
footer <- footer_dis
} else if (data_type == "calibrange") {
title <- title_filereq
message <- paste0("The file provided does not meet the file ",
"requirements! Calibration steps must be in range ",
"'0 <= calibration step <= 100'. Please upload a new ",
"file! For the specific CSV file requirements please ",
"refer to our FAQ.")
footer <- footer_dis
} else if (data_type == "calibrange2") {
title <- title_filereq
message <- paste0("The files provided do not meet the file requirements! ",
"Calibration steps must be in range '0 <= calibration ",
"step <= 100'. Please upload new files! For the ",
"specific CSV file requirements please refere to our ",
"FAQ.")
footer <- footer_dis
} else if (data_type == "calibrange3") {
title <- title_filereq
message <- paste0("The calibration steps provided do not meet the file ",
"requirements! Calibration steps must be in range ",
"'0 <= calibration step <= 100'. Each calibration step ",
"may only be assigned once. Please upload new files! ",
"For the specific CSV file requirements please refere ",
"to our FAQ.")
footer <- shiny::modalButton("OK")
} else if (data_type == "inconsistency") {
title <- title_filereq
message <- paste0("The files provided do not meet the file requirements! ",
"Please specify an equal number of CpG-sites for each ",
"gene locus. Please upload new files! For the specific ",
"CSV file requirements please refere to our FAQ.")
footer <- footer_dis
}
shinyjs::logjs(message)
# show modal here
shiny::showModal(shiny::modalDialog(
message,
title = title,
footer = footer
))
}
|
/scratch/gouwar.j/cran-all/cranData/BiasCorrector/R/app_requirements_error.R
|
# BiasCorrector: A GUI to Correct Measurement Bias in DNA Methylation Analyses
# Copyright (C) 2019-2022 Lorenz Kapsner
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# open modal help function -> handling reactive values
open_modal <- function(description, rv) {
rv$modal_closed <- FALSE
rv$modal_type <- description
requirements_error(description)
}
|
/scratch/gouwar.j/cran-all/cranData/BiasCorrector/R/app_utils.R
|
# BiasCorrector: A GUI to Correct Measurement Bias in DNA Methylation Analyses
# Copyright (C) 2019-2022 Lorenz Kapsner
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#' @title Launch BiasCorrector
#'
#' @param port The port, BiasCorrector is running on (default: 3838)
#' @param maxfilesize A positive integer. The maximum file size allowed
#' for upload.
#' @param logfilename A character string. The name of the logfile
#' (default = biascorrector.log).
#' @param plotdir A character string. Defaults to 'plots'. This directory
#' is being created inside tempdir.
#' @param csvdir A character string. Defaults to 'csv'. This directory is
#' being created inside tempdir.
#' @param parallel A boolean. If TRUE (default), initializing
#' `future::plan("multiprocess")` before running the code.
#'
#' @return The function returns the BiasCorrector shiny application.
#'
#' @import shiny shinydashboard
#' @importFrom magrittr "%>%"
#' @importFrom data.table .N ":="
#'
#' @examples
#' if (interactive()) {
#' launch_app()
#' }
#'
#' @export
#'
launch_app <- function(port = 3838,
plotdir = "plots",
csvdir = "csv",
logfilename = "biascorrector.log",
maxfilesize = 100,
parallel = TRUE) {
#" stopifnot(
#" is.numeric(maxfilesize),
#" maxfilesize > 0,
#" is.character(logfilename),
#" is.character(plotdir),
#" is.character(csvdir),
#" is.numeric(port)
#" )
tempdir <- tempdir()
global_env_hack <- function(key,
val,
pos) {
assign(
key,
val,
envir = as.environment(pos)
)
}
global_env_hack("tempdir",
tempdir,
1L)
global_env_hack("plotdir",
paste0(tempdir, "/", plotdir, "/"),
1L)
global_env_hack("csvdir",
paste0(tempdir, "/", csvdir, "/"),
1L)
# logfilename
global_env_hack("logfilename",
paste0(tempdir, "/", logfilename),
1L)
# maximum filesize in MB
global_env_hack("maxfilesize",
maxfilesize,
1L)
global_env_hack("parallel",
parallel,
1L)
# set shiny option here
options(shiny.maxRequestSize = maxfilesize * 1024^2)
options(shiny.port = port)
shiny::shinyAppDir(
appDir = system.file("application",
package = "BiasCorrector")
)
}
|
/scratch/gouwar.j/cran-all/cranData/BiasCorrector/R/launch_app.R
|
# BiasCorrector: A GUI to Correct Measurement Bias in DNA Methylation Analyses
# Copyright (C) 2019-2022 Lorenz Kapsner
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#' @title module_calibrationfile_server
#'
#' @param input Shiny server input object
#' @param output Shiny server output object
#' @param session Shiny session object
#' @param rv The global 'reactiveValues()' object, defined in server.R
#' @param input_re The Shiny server input object, wrapped into a reactive
#' expression: input_re = reactive({input})
#' @param ... Further arguments, such as `logfilename`, `csvdir` and `plotdir`
#'
#' @return The function returns a shiny server module.
#'
#' @seealso \url{https://shiny.rstudio.com/articles/modules.html}
#'
#' @examples
#' if (interactive()) {
#' rv <- list()
#' logfilename <- paste0(tempdir(), "/log.txt")
#' shiny::callModule(
#' module_calibrationfile_server,
#' "moduleCalibrationFile",
#' rv = rv,
#' logfilename = logfilename
#' )
#' }
#'
#' @export
#'
# module_calibrationfile_server
module_calibrationfile_server <- function(input,
output,
session,
rv,
input_re,
...) {
arguments <- list(...)
# error handling with fileimport
observeEvent(
eventExpr = {
req(isTRUE(rv$type2cal_uploaded) || isTRUE(rv$type1cal_uploaded))
},
handlerExpr = {
rBiasCorrection::write_log(
message = paste0("(app) Entered observeEvent after fileimport of ",
"calibration file"),
logfilename = arguments$logfilename
)
# if type 1 data
if (rv$type_locus_sample == "1") {
# check here, if there have been deleted rows containing missin values
tryCatch(#
expr = {
omitnas_modal(rv$omitnas, "calibration")
rv$omitnas <- NULL
},
error = function(e) {
e
}
)
output$calibration_data <- renderUI({
# the prefix "moduleCalibrationFile" is necessary, otherwise,
# one is not able to load the datatable here
DT::dataTableOutput("moduleCalibrationFile-dt1")
})
output$dt1 <- DT::renderDataTable({
DT::datatable(rv$fileimport_calibration,
options = list(scrollX = TRUE,
pageLength = 20,
dom = "ltip",
rowCallback = DT::JS(rv$row_callback)),
rownames = FALSE) %>%
DT::formatRound(columns = c(2:ncol(rv$fileimport_calibration)),
digits = 3)
})
output$cal_samples <- reactive({
len <- unique(rv$fileimport_calibration[, get("true_methylation")])
message <- paste0("Number of unique calibration samples: ",
length(len))
rBiasCorrection::write_log(message = message,
logfilename = arguments$logfilename)
message
})
output$cal_samples_raw <- reactive({
len <- unique(rv$fileimport_calibration[, get("true_methylation")])
message <- paste0("Unique calibration steps (% methylation):\n",
paste(len, collapse = "\n"))
rBiasCorrection::write_log(message = message,
logfilename = arguments$logfilename)
message
})
# aggregated data
output$calibration_data_aggregated <- DT::renderDataTable({
DT::datatable(rv$aggregated_calibration,
options = list(scrollX = TRUE,
pageLength = 20,
dom = "ltip",
rowCallback = DT::JS(rv$row_callback)),
rownames = FALSE) %>%
DT::formatRound(columns = c(3:ncol(rv$aggregated_calibration)),
digits = 3)
})
# Download calibration data
output$download_calibration <- downloadHandler(
filename = function() {
paste0("raw_calibration_data.csv")
},
content = function(file) {
rBiasCorrection::write_csv(rv$fileimport_calibration, file)
},
contentType = "text/csv"
)
# Download aggregated calibration data
output$download_calibration_aggr <- downloadHandler(
filename = function() {
paste0("aggregated_calibration_data.csv")
},
content = function(file) {
rBiasCorrection::write_csv(rv$aggregated_calibration, file)
},
contentType = "text/csv"
)
# if type 2 data
} else if (rv$type_locus_sample == "2") {
# render assignment of calibration steps
output$calibration_data <- renderUI({
select_output_list <- lapply(seq_len(
nrow(rv$calibr_steps)
), function(g) {
selectname <- paste0("select", g)
div(
class = "row",
div(
class = "col-sm-6", style = "text-align: left",
h5(tags$b(paste0(rv$calibr_steps[g, get("name")], ":")))
),
div(
class = "col-sm-6", style = "text-align: center",
numericInput(
inputId = selectname,
min = 0,
max = 100,
label = NULL,
step = 0.01,
value = rv$calibr_steps[g, get("step")],
width = "100%"
)
),
tags$hr(style = "margin: 0.5%")
)
})
select_output_list <- list(
select_output_list,
div(
class = "row", style = "text-align: center",
actionButton("confirm_steps",
"Confirm assignment of calibration steps")
)
)
do.call(tagList, select_output_list)
})
output$cal_samples <- reactive({
message <- paste0("Unique calibration samples: ",
nrow(rv$calibr_steps))
rBiasCorrection::write_log(message = message,
logfilename = arguments$logfilename)
message
})
output$cal_samples_raw <- reactive({
message <- paste0("Unique calibration steps:\n",
paste(levels(
factor(rv$calibr_steps[, get("step")])
),
collapse = "\n"))
rBiasCorrection::write_log(message = message,
logfilename = arguments$logfilename)
message
})
}
}
)
# confirm-Button for Type2-Data
observeEvent(input_re()$confirm_steps, {
rv$choices_list <- data.table::data.table(
"name" = character(),
"step" = numeric()
)
lapply(seq_len(nrow(rv$calibr_steps)), function(g) {
selectname <- paste0("select", g)
rv$choices_list <- rbind(
rv$choices_list,
cbind("name" = rv$calibr_steps[g, get("name")],
"step" = as.numeric(
eval(parse(text = paste0("input_re()$", selectname))))
)
)
})
message(rv$choices_list)
# assign rv$fileimport_calibration
filecheck <- type2_fileconfirm(rv$fileimport_list,
rv$choices_list,
rv)
if (is.character(filecheck)) {
open_modal(filecheck, rv)
} else {
# store correct formatted calibration data in reactive list
rv$fileimport_calibration <- filecheck
removeUI(selector = "#moduleCalibrationFile-calibration_data",
immediate = TRUE)
# create reactive selectinput:
sel_in <- reactive({
selectInput(inputId = "selectType2",
label = "Select locus:",
multiple = FALSE,
selectize = FALSE,
choices = names(rv$fileimport_calibration))
})
# create reactive df-selection:
df <- reactive({
temp <- rv$fileimport_calibration[[input_re()$selectType2]]
})
output$calibration_select <- renderUI({
s <- sel_in()
do.call(tagList, list(tags$hr(), s))
})
# render the UI output
output$calibration_data2 <- renderUI({
output$dt2 <- DT::renderDataTable({
temp <- df()
DT::datatable(temp,
options = list(scrollX = TRUE,
pageLength = 20,
dom = "ltip",
rowCallback = DT::JS(rv$row_callback)),
rownames = FALSE) %>%
DT::formatRound(columns = c(2:ncol(temp)), digits = 3)
})
# merge selectInput and dataframe to list
output_list <- list(DT::dataTableOutput("moduleCalibrationFile-dt2"))
# print out list!
do.call(tagList, output_list)
})
# Download experimental data
output$download_calibration <- downloadHandler(
filename = function() {
paste0("raw_calibration_data_",
gsub("[[:punct:]]", "", input_re()$selectType2),
".csv")
},
content = function(file) {
rBiasCorrection::write_csv(df(), file)
},
contentType = "text/csv"
)
}
})
}
#' @title module_calibrationfile_ui
#'
#' @param id A character. The identifier of the shiny object
#'
#' @return The function returns a shiny ui module.
#'
#' @seealso \url{https://shiny.rstudio.com/articles/modules.html}
#'
#' @examples
#' if (interactive()) {
#' shinydashboard::tabItems(
#' shinydashboard::tabItem(
#' tabName = "calibration",
#' module_calibrationfile_ui(
#' "moduleCalibrationFile"
#' )
#' )
#' )
#' }
#'
#' @export
#'
# module_calibrationfile_ui
module_calibrationfile_ui <- function(id) {
ns <- NS(id)
tagList(
fluidRow(
column(
9,
box(
tabsetPanel(
tabPanel(
"Calibration Data",
uiOutput(ns("calibration_data")),
uiOutput(ns("calibration_data2"))
),
tabPanel(
"Aggregated Calibration Data",
DT::dataTableOutput(ns("calibration_data_aggregated"))
)
),
width = 12
)
),
column(
3,
box(verbatimTextOutput(ns("cal_samples")),
verbatimTextOutput(ns("cal_samples_raw")),
tags$head(tags$style(paste0("#cal_samples_raw{overflow-y:scroll; ",
"max-height: 10vh; ",
"background: ghostwhite;}"))),
uiOutput(ns("calibration_select")),
tags$hr(),
div(class = "row",
style = "text-align: center;",
shinyjs::disabled(
downloadButton(ns("download_calibration"),
"Download calibration file",
style = paste0(
"white-space: normal; ",
"text-align:center; ",
"padding: 9.5px 9.5px 9.5px 9.5px; ",
"margin: 6px 10px 6px 10px;")
))),
tags$hr(),
div(class = "row",
style = "text-align: center;",
downloadButton(ns("download_calibration_aggr"),
"Download aggregated calibration file",
style = paste0(
"white-space: normal; ",
"text-align:center; ",
"padding: 9.5px 9.5px 9.5px 9.5px; ",
"margin: 6px 10px 6px 10px;")
)),
tags$hr(),
width = 12
)
)
)
)
}
|
/scratch/gouwar.j/cran-all/cranData/BiasCorrector/R/moduleCalibrationFile.R
|
# BiasCorrector: A GUI to Correct Measurement Bias in DNA Methylation Analyses
# Copyright (C) 2019-2022 Lorenz Kapsner
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#' @title module_correctedplots_server
#'
#' @inheritParams module_calibrationfile_server
#'
#' @return The function returns a shiny server module.
#'
#' @seealso \url{https://shiny.rstudio.com/articles/modules.html}
#'
#' @examples
#' if (interactive()) {
#' rv <- list()
#' logfilename <- paste0(tempdir(), "/log.txt")
#' shiny::callModule(
#' module_correctedplots_server,
#' "moduleCorrectedPlots",
#' rv = rv,
#' logfilename = logfilename
#' )
#' }
#'
#' @export
#'
# module_correctedplots_server
module_correctedplots_server <- function(input,
output,
session,
rv,
input_re,
...) {
arguments <- list(...)
# correct all hyperbolic
observe({
req(rv$plotting_finished)
if (is.null(rv$fileimport_cal_corrected_h)) {
if (rv$type_locus_sample == "1") {
withProgress(message = "BiasCorrecting calibration data", value = 0, {
incProgress(
1 / 2,
detail = "... using hyperbolic regression parameters ...")
# hyperbolic correction
rv$choices_list <- rv$reg_stats[, c("Name"), with = FALSE
][
, ("better_model") := 0
]
# correct calibration data (to show corrected calibration curves)
solved_eq_h <- rBiasCorrection::solving_equations(
datatable = rv$fileimport_calibration,
regmethod = rv$choices_list,
type = 1,
rv = rv,
mode = "corrected",
logfilename = arguments$logfilename,
minmax = rv$minmax
)
rv$fileimport_cal_corrected_h <- solved_eq_h[["results"]]
colnames(rv$fileimport_cal_corrected_h) <- colnames(
rv$fileimport_calibration
)
rv$substitutions_corrected_h <- solved_eq_h[["substitutions"]]
colnames(rv$substitutions_corrected_h) <- c(
"Sample ID",
"CpG site",
"BiasCorrected value",
"Substituted value",
"Regression"
)
})
} else if (rv$type_locus_sample == "2") {
message("fileimport_cal_corrected_h: Not implemented yet.\n")
# TODO
# Calibration Data (to show corrected calibration curves)
# initialize calibration results list
#% rv$fileimport_cal_corrected <- list()
#% # iterate over fileimport_cal (in type 2 data, this is a list
#% # with one calibrationdata data.table for each locus)
#% for (a in names(rv$fileimport_cal)) {
#% # get unique elements of true_methylation for one specific locus
#% # (we are treating them here as if they were sample ids)
#% for (b in rv$fileimport_cal[[a]][,unique(true_methylation)]) {
#% # get the regression parameters of that locus (locusname is
#% # saved in "a")
#% rv$result_list <- rv$result_list_type2[[a]]
#% # get subset of the calibration data of that methylation step
#% caldata <- rv$fileimport_cal[[a]][true_methylation==b,]
#% nc <- ncol(caldata)
#% vec <- c("true_methylation",
#% colnames(caldata)[2:(nc-1)], "row_means")
#% # solve equation for that calibrationstep
#% # save result of each calibrationstep in tmp object
#% tmp <- rBiasCorrection::solving_equations(
#% datatable = caldata[,vec,with=F],
#% regmethod = rv$reg_stats[[a]][,.(Name, better_model)],
#% type = 2,
#% rv = rv,
#% mode = "corrected",
#% logfilename = arguments$logfilename)[["results"]]
#% # imediatelly rename columnames
#% colnames(tmp) <- vec
#% # if new calibration step is saved for the first time
#% if (is.null(rv$fileimport_cal_corrected[[a]])) {
#% rv$fileimport_cal_corrected[[a]] <- tmp
#% } else {
#% # we should not need fill, since there should be no
#% # differences in colnames for one file
#% rv$fileimport_cal_corrected[[a]] <- rbind(
#% rv$fileimport_cal_corrected[[a]],
#% tmp,
#% use.names=T, fill=F)
#% }
#% }
#% }
}
}
})
# correct all cubic
observe({
req(rv$plotting_finished)
if (is.null(rv$fileimport_cal_corrected_c)) {
if (rv$type_locus_sample == "1") {
withProgress(message = "BiasCorrecting calibration data", value = 0, {
incProgress(
1 / 2,
detail = "... using cubic regression parameters ...")
# cubic correction
rv$choices_list <- rv$reg_stats[, c("Name"), with = FALSE
][
, ("better_model") := 1
]
# correct calibration data (to show corrected calibration curves)
solved_eq_c <- rBiasCorrection::solving_equations(
datatable = rv$fileimport_calibration,
regmethod = rv$choices_list,
type = 1,
rv = rv,
mode = "corrected",
logfilename = arguments$logfilename,
minmax = rv$minmax
)
rv$fileimport_cal_corrected_c <- solved_eq_c[["results"]]
colnames(rv$fileimport_cal_corrected_c) <- colnames(
rv$fileimport_calibration
)
rv$substitutions_corrected_c <- solved_eq_c[["substitutions"]]
colnames(rv$substitutions_corrected_c) <- c(
"Sample ID",
"CpG site",
"BiasCorrected value",
"Substituted value",
"Regression"
)
})
} else if (rv$type_locus_sample == "2") {
message("fileimport_cal_corrected_c: Not implemented yet.\n")
}
}
})
observeEvent(
eventExpr = {
# this is needed, to start plotting, when we have the bias
# corrected calibration values!
req(!is.null(rv$fileimport_cal_corrected_h) &
!is.null(rv$fileimport_cal_corrected_c))
},
handlerExpr = {
# type 1 data:
if (rv$type_locus_sample == "1") {
if (isFALSE(rv$corrected_finished)) {
# plot hyperbolic
withProgress(
message = "Plotting BiasCorrected calibration plots", value = 0, {
incProgress(
1 / 2,
detail = "... working hard on hyperbolic correction ...")
# calculate new calibration curves from corrected calibration data
regression_results <- rBiasCorrection::regression_utility(
data = rv$fileimport_cal_corrected_h,
samplelocusname = rv$sample_locus_name,
rv = rv,
mode = "corrected",
logfilename = arguments$logfilename,
minmax = rv$minmax,
seed = rv$seed
)
plotlist_reg <- regression_results[["plot_list"]]
rv$result_list_hyperbolic <- regression_results[["result_list"]]
rBiasCorrection::plotting_utility(
data = rv$fileimport_cal_corrected_h,
plotlist_reg = plotlist_reg,
type = 1,
samplelocusname = rv$sample_locus_name,
locus_id = NULL,
rv = rv,
mode = "corrected_h",
plotdir = arguments$plotdir,
logfilename = arguments$logfilename,
minmax = rv$minmax,
plot_height = rv$plot_height,
plot_width = rv$plot_width,
plot_textsize = rv$plot_textsize
)
# save regression statistics to reactive value
rv$reg_stats_corrected_h <- rBiasCorrection::statistics_list(
resultlist = rv$result_list_hyperbolic,
minmax = rv$minmax
)
for (i in rv$choices_list[, get("Name")]) {
rv$reg_stats_corrected_h[
get("Name") == i, ("better_model") := rv$choices_list[
get("Name") == i, as.integer(
as.character(get("better_model"))
)]
]
}
rBiasCorrection::createbarerrorplots(
statstable_pre = rv$reg_stats,
statstable_post = rv$reg_stats_corrected_h,
rv = rv,
type = 1,
plotdir = arguments$plotdir,
logfilename = arguments$logfilename,
mode = "corrected_h",
plot_height = rv$plot_height,
plot_width = rv$plot_width,
plot_textsize = rv$plot_textsize
)
})
# plot cubic
withProgress(
message = "Plotting BiasCorrected calibration plots",
value = 0, {
incProgress(
1 / 2,
detail = "... working hard on cubic correction ...")
# calculate new calibration curves from corrected calibration data
regression_results <- rBiasCorrection::regression_utility(
data = rv$fileimport_cal_corrected_c,
samplelocusname = rv$sample_locus_name,
rv = rv,
mode = "corrected",
logfilename = arguments$logfilename,
minmax = rv$minmax,
seed = rv$seed
)
plotlist_reg <- regression_results[["plot_list"]]
rv$result_list_cubic <- regression_results[["result_list"]]
rBiasCorrection::plotting_utility(
data = rv$fileimport_cal_corrected_c,
plotlist_reg = plotlist_reg,
type = 1,
samplelocusname = rv$sample_locus_name,
locus_id = NULL,
rv = rv,
mode = "corrected_c",
plotdir = arguments$plotdir,
logfilename = arguments$logfilename,
minmax = rv$minmax,
plot_height = rv$plot_height,
plot_width = rv$plot_width,
plot_textsize = rv$plot_textsize
)
# save regression statistics to reactive value
rv$reg_stats_corrected_c <- rBiasCorrection::statistics_list(
resultlist = rv$result_list_cubic,
minmax = rv$minmax
)
for (i in rv$choices_list[, get("Name")]) {
rv$reg_stats_corrected_c[
get("Name") == i, ("better_model") := rv$choices_list[
get("Name") == i, as.integer(
as.character(get("better_model"))
)]
]
}
rBiasCorrection::createbarerrorplots(
statstable_pre = rv$reg_stats,
statstable_post = rv$reg_stats_corrected_c,
rv = rv,
type = 1,
plotdir = arguments$plotdir,
logfilename = arguments$logfilename,
mode = "corrected_c",
plot_height = rv$plot_height,
plot_width = rv$plot_width,
plot_textsize = rv$plot_textsize
)
})
# when finished
rv$corrected_finished <- TRUE
rBiasCorrection::write_log(
message = "Finished plotting corrected",
logfilename = arguments$logfilename)
}
# else if type 2 data
} else if (rv$type_locus_sample == "2") {
if (isFALSE(rv$corrected_finished)) {
a <- 1
rv$result_list_type2_corrected <- list()
withProgress(
message = "Plotting BiasCorrected results",
value = 0, {
incProgress(
1 / 2,
detail = "... working hard ...")
for (locus in names(rv$fileimport_cal_corrected)) {
rv$vec_cal <- names(rv$fileimport_cal_corrected[[a]])[-1]
#% print(paste("Length rv$vec_cal:", length(rv$vec_cal)))
regression_results <- rBiasCorrection::regression_utility(
data = rv$fileimport_cal_corrected[[a]],
samplelocusname = rv$sample_locus_name,
locus_id = gsub("[[:punct:]]", "", locus),
rv = rv,
mode = "corrected",
logfilename = arguments$logfilename,
minmax = rv$minmax,
seed = rv$seed
)
plotlist_reg <- regression_results[["plot_list"]]
rv$result_list <- regression_results[["result_list"]]
rBiasCorrection::plotting_utility(
data = rv$fileimport_cal_corrected[[a]],
plotlist_reg = plotlist_reg,
type = 2,
samplelocusname = rv$sample_locus_name,
locus_id = gsub("[[:punct:]]", "", locus),
rv = rv,
mode = "corrected",
plotdir = arguments$plotdir,
logfilename = arguments$logfilename,
minmax = rv$minmax,
plot_height = rv$plot_height,
plot_width = rv$plot_width,
plot_textsize = rv$plot_textsize
)
# save regression statistics to reactive value
waround <- rBiasCorrection::statistics_list(
resultlist = rv$result_list,
minmax = rv$minmax
)
rv$reg_stats_corrected[[locus]] <- waround
rv$result_list_type2_corrected[[locus]] <- rv$result_list
# create barplots
rBiasCorrection::createbarerrorplots(
statstable_pre = rv$reg_stats[[locus]],
statstable_post = rv$reg_stats_corrected[[locus]],
rv = rv,
type = 2,
locus_id = locus,
plotdir = arguments$plotdir,
logfilename = arguments$logfilename,
plot_height = rv$plot_height,
plot_width = rv$plot_width,
plot_textsize = rv$plot_textsize
)
a <- a + 1
}
})
# on finished
rv$corrected_finished <- TRUE
rBiasCorrection::write_log(
message = "Finished plotting corrected",
logfilename = arguments$logfilename)
}
}
}
)
# when plotting has finished
observe({
req(rv$corrected_finished)
# type 1 data:
if (rv$type_locus_sample == "1") {
### Plot tab ###
# create a list of plotnames to populate selectInput
plot_output_list <- lapply(
seq_len(length(rv$vec_cal)),
function(g) {
paste0(gsub("[[:punct:]]", "", rv$vec_cal[g]))
})
names(plot_output_list) <- rv$vec_cal
# create reactive selectinput:
sel_in2 <- reactive({
selectInput(inputId = "selectPlot_corrected",
label = "Select CpG site:",
multiple = FALSE,
selectize = FALSE,
choices = plot_output_list)
})
# create download button for each plot
output$download_plots_corrected_h <- downloadHandler(
filename = function() {
paste0(rv$sample_locus_name,
"_",
gsub("[[:punct:]]",
"",
input_re()$selectPlot_corrected),
"_corrected_h.png")
},
content = function(file) {
file.copy(paste0(arguments$plotdir,
rv$sample_locus_name,
"_",
gsub("[[:punct:]]",
"",
input_re()$selectPlot_corrected),
"_corrected_h.png"),
file)
},
contentType = "image/png"
)
output$download_plot_sse_corrected_h <- downloadHandler(
filename = function() {
paste0(rv$sample_locus_name,
"_error_",
gsub("[[:punct:]]",
"",
input_re()$selectPlot_corrected),
"_corrected_h.png")
},
content = function(file) {
file.copy(paste0(arguments$plotdir,
rv$sample_locus_name,
"_error_",
gsub("[[:punct:]]",
"",
input_re()$selectPlot_corrected),
"_corrected_h.png"),
file)
},
contentType = "image/png"
)
output$download_plots_corrected_c <- downloadHandler(
filename = function() {
paste0(rv$sample_locus_name,
"_",
gsub("[[:punct:]]",
"",
input_re()$selectPlot_corrected),
"_corrected_c.png")
},
content = function(file) {
file.copy(paste0(arguments$plotdir,
rv$sample_locus_name,
"_",
gsub("[[:punct:]]",
"",
input_re()$selectPlot_corrected),
"_corrected_c.png"),
file)
},
contentType = "image/png"
)
output$download_plot_sse_corrected_c <- downloadHandler(
filename = function() {
paste0(rv$sample_locus_name,
"_error_",
gsub("[[:punct:]]",
"",
input_re()$selectPlot_corrected),
"_corrected_c.png")
},
content = function(file) {
file.copy(paste0(arguments$plotdir,
rv$sample_locus_name,
"_error_",
gsub("[[:punct:]]",
"",
input_re()$selectPlot_corrected),
"_corrected_c.png"),
file)
},
contentType = "image/png"
)
# render head of page with selectInput and downloadbutton
output$select_plotinput_corrected <- renderUI({
s <- sel_in2()
do.call(tagList, list(s, tags$hr()))
})
# for debugging
observeEvent(input_re()$selectPlot_corrected, {
message(input_re()$selectPlot_corrected)
})
# render plots from local temporary file
output$plots_corrected_h <- renderImage(
expr = {
#% width <- session$clientData
##% [["output_moduleCorrectedPlots-plots_corrected_width"]]
filename <- paste0(arguments$plotdir,
rv$sample_locus_name,
"_", gsub("[[:punct:]]",
"",
input_re()$selectPlot_corrected),
"_corrected_h.png")
# Return a list containing the filename
#% list(src = filename,
#% width = width)
list(src = filename)
},
deleteFile = FALSE
)
output$plots_corrected_c <- renderImage(
expr = {
#% width <- session$clientData
##% [["output_moduleCorrectedPlots-plots_corrected_width"]]
filename <- paste0(arguments$plotdir,
rv$sample_locus_name,
"_", gsub("[[:punct:]]",
"",
input_re()$selectPlot_corrected),
"_corrected_c.png")
# Return a list containing the filename
#% list(src = filename,
#% width = width)
list(src = filename)
},
deleteFile = FALSE
)
# render plots from local temporary file
output$plots_sse_corrected_h <- renderImage(
expr = {
#% width <- session$clientData
##% [["output_moduleCorrectedPlots-plots_sse_corrected_width"]]
filename <- paste0(arguments$plotdir,
rv$sample_locus_name,
"_error_",
gsub("[[:punct:]]",
"",
input_re()$selectPlot_corrected),
"_corrected_h.png")
# Return a list containing the filename
#% list(src = filename,
#% width = width)
list(src = filename)
},
deleteFile = FALSE
)
output$plots_sse_corrected_c <- renderImage(
expr = {
#% width <- session$clientData
##% [["output_moduleCorrectedPlots-plots_sse_corrected_width"]]
filename <- paste0(arguments$plotdir,
rv$sample_locus_name,
"_error_",
gsub("[[:punct:]]",
"",
input_re()$selectPlot_corrected),
"_corrected_c.png")
# Return a list containing the filename
#% list(src = filename,
#% width = width)
list(src = filename)
},
deleteFile = FALSE
)
# type 2 data:
} else if (rv$type_locus_sample == "2") {
# create list of loci to populate selectInput "select_plotlocus"
list_plot_locus <- list()
for (i in seq_len(length(rv$fileimport_cal_corrected))) {
list_plot_locus[[i]] <- names(rv$fileimport_cal_corrected)[i]
}
select_plotlocus <- reactive({
selectInput(inputId = "select_plotlocus_corrected",
label = "Select locus:",
multiple = FALSE,
selectize = FALSE,
choices = list_plot_locus
)
})
# create list of cpg-sites for each locus to populate selectInput
# "select_plot_cpg"
list_plot_cpg <- list()
for (i in seq_len(length(rv$fileimport_cal_corrected))) {
list_plot_cpg[[names(rv$fileimport_cal_corrected)[i]]] <- names(
rv$fileimport_cal_corrected[[i]])[-1]
}
# only return list of CpG-sites for each locus, if there is already
# a selection of the locus in select_plotlocus
cpg_output <- reactive({
if (!is.null(input_re()$select_plotlocus_corrected)) {
return(list_plot_cpg[input_re()$select_plotlocus_corrected])
}
})
# always wrap selectInput into reactive-function
select_plot_cpg <- reactive({
selectInput(inputId = "select_plot_type2_corrected",
label = "Select CpG site:",
multiple = FALSE,
selectize = FALSE,
choices = cpg_output())
})
# render second selectInput
output$s2_plotoutput_corrected <- renderUI({
s3 <- select_plot_cpg()
s3
})
# create download button for each plot
output$download_plots_corrected <- downloadHandler(
filename = function() {
paste0(gsub("[[:punct:]]",
"",
input_re()$select_plotlocus_corrected),
"-",
rv$sample_locus_name,
"_",
gsub("[[:punct:]]",
"",
input_re()$select_plot_type2_corrected),
"_corrected.png")
},
content = function(file) {
file.copy(paste0(arguments$plotdir,
gsub("[[:punct:]]",
"",
input_re()$select_plotlocus_corrected),
"-",
rv$sample_locus_name,
"_",
gsub("[[:punct:]]",
"",
input_re()$select_plot_type2_corrected),
"_corrected.png"),
file)
},
contentType = "image/png"
)
output$download_plotssse_corrected_h <- downloadHandler(
filename = function() {
paste0("Errorplot_",
rv$sample_locus_name,
"_",
gsub("[[:punct:]]",
"",
input_re()$select_plotlocus_corrected),
"-",
gsub("[[:punct:]]",
"",
input_re()$select_plot_type2_corrected),
"_corrected_h.png")
},
content = function(file) {
file.copy(paste0(arguments$plotdir,
"Errorplot_",
rv$sample_locus_name,
"_",
gsub("[[:punct:]]",
"",
input_re()$select_plotlocus_corrected),
"-",
gsub("[[:punct:]]",
"",
input_re()$select_plot_type2_corrected),
"_corrected_h.png"),
file)
},
contentType = "image/png"
)
# render Plot UI
output$select_plotinput_corrected <- renderUI({
s1 <- select_plotlocus()
s2 <- uiOutput("moduleCorrectedPlots-s2_plotoutput_corrected")
b1 <- div(class = "row",
style = "text-align: center",
downloadButton(
"moduleCorrectedPlots-download_plots_corrected_h",
"Download Corrected Plot (hyperbolic correction)",
style = paste0(
"white-space: normal; ",
"text-align:center; ",
"padding: 9.5px 9.5px 9.5px 9.5px; ",
"margin: 6px 10px 6px 10px;")
))
c1 <- div(class = "row",
style = "text-align: center",
downloadButton(
"moduleCorrectedPlots-download_plotssse_corrected_h",
"Download Error Plot (hyperbolic correction)",
style = paste0(
"white-space: normal; ",
"text-align:center; ",
"padding: 9.5px 9.5px 9.5px 9.5px; ",
"margin: 6px 10px 6px 10px;")))
b2 <- div(class = "row",
style = "text-align: center",
downloadButton(
"moduleCorrectedPlots-download_plots_corrected_c",
"Download Corrected Plot (hyperbolic correction)",
style = paste0(
"white-space: normal; ",
"text-align:center; ",
"padding: 9.5px 9.5px 9.5px 9.5px; ",
"margin: 6px 10px 6px 10px;")))
c2 <- div(class = "row",
style = "text-align: center",
downloadButton(
"moduleCorrectedPlots-download_plotssse_corrected_c",
"Download Error Plot (hyperbolic correction)",
style = paste0(
"white-space: normal; ",
"text-align:center; ",
"padding: 9.5px 9.5px 9.5px 9.5px; ",
"margin: 6px 10px 6px 10px;")))
do.call(tagList, list(s1,
s2,
tags$hr(),
b1,
b2,
tags$hr(),
c1,
c2,
tags$hr())
)
})
# render plot from local temporary file
output$plots_corrected <- renderImage(
expr = {
#% width <- session$clientData
##% [["output_moduleCorrectedPlots-plots_corrected_width"]]
filename <- paste0(arguments$plotdir,
gsub("[[:punct:]]",
"",
input_re()$select_plotlocus_corrected),
"-",
rv$sample_locus_name,
"_",
gsub("[[:punct:]]",
"",
input_re()$select_plot_type2_corrected),
"_corrected.png")
#% print(filename)
# Return a list containing the filename
#% list(src = filename,
#% width = width)
list(src = filename)
},
deleteFile = FALSE
)
# render plots from local temporary file
output$plots_sse_corrected <- renderImage(
expr = {
#% width <- session$clientData
##% [["output_moduleCorrectedPlots-plots_sse_corrected_width"]]
filename <- paste0(arguments$plotdir,
gsub("[[:punct:]]",
"",
input_re()$select_plotlocus_corrected),
"-",
rv$sample_locus_name,
"_",
gsub("[[:punct:]]",
"",
input_re()$select_plot_type2_corrected),
"_errorplot.png")
# Return a list containing the filename
# list(src = filename,
# width = width)
list(src = filename)
},
deleteFile = FALSE
)
}
})
}
#' @title module_correctedplots_ui
#'
#' @param id A character. The identifier of the shiny object
#'
#' @return The function returns a shiny ui module.
#'
#' @seealso \url{https://shiny.rstudio.com/articles/modules.html}
#'
#' @examples
#' if (interactive()) {
#' shinydashboard::tabItems(
#' shinydashboard::tabItem(
#' tabName = "correctedplots",
#' module_correctedplots_ui(
#' "moduleCorrectedPlots"
#' )
#' )
#' )
#' }
#'
#' @export
#'
# module_correctedplots_ui
module_correctedplots_ui <- function(id) {
ns <- NS(id)
tagList(
fluidRow(
column(
9,
box(
title = "BiasCorrected Regression Plots",
fluidRow(
column(
6,
div(class = "row",
style = "text-align: center",
h5(tags$b(
"Calibration data corrected with hyperbolic regression:")
)),
imageOutput(ns("plots_corrected_h")),
tags$head(
tags$style(
type = "text/css",
paste0(
"#moduleCorrectedPlots-plots_corrected_h img ",
"{max-height: 100%; max-width: 100%; width: auto; ",
"display: block; margin-left: auto; margin-right: auto;}")
)
)
),
column(
6,
div(class = "row",
style = "text-align: center",
h5(tags$b(
paste0("Calibration data corrected with cubic ",
"polynomial regression:")))
),
imageOutput(ns("plots_corrected_c")),
tags$head(
tags$style(
type = "text/css",
paste0(
"#moduleCorrectedPlots-plots_corrected_c img ",
"{max-height: 100%; max-width: 100%; width: auto; ",
"display: block; margin-left: auto; margin-right: auto;}")
)
)
)
),
fluidRow(
column(
6,
div(class = "row",
style = "text-align: center",
downloadButton(
"moduleCorrectedPlots-download_plots_corrected_h",
"Download Corrected Plot (hyperbolic correction)",
style = paste0(
"white-space: normal; ",
"text-align:center; ",
"padding: 9.5px 9.5px 9.5px 9.5px; ",
"margin: 6px 10px 6px 10px;")))
),
column(
6,
div(class = "row",
style = "text-align: center",
downloadButton(
"moduleCorrectedPlots-download_plots_corrected_c",
"Download Corrected Plot (cubic correction)",
style = paste0(
"white-space: normal; ",
"text-align:center; ",
"padding: 9.5px 9.5px 9.5px 9.5px; ",
"margin: 6px 10px 6px 10px;")))
)
),
width = 12
),
box(
title = "Efficiency of BiasCorrection",
fluidRow(
column(
6,
div(class = "row",
style = "text-align: center",
h5(tags$b(
paste0("Theoretical efficiency of BiasCorrection ",
"with hyperbolic regression:")))
),
imageOutput(ns("plots_sse_corrected_h")),
tags$head(
tags$style(
type = "text/css",
paste0(
"#moduleCorrectedPlots-plots_sse_corrected_h img ",
"{max-height: 100%; max-width: 100%; width: auto; ",
"display: block; margin-left: auto; margin-right: auto;}")
)
)
),
column(
6,
div(class = "row",
style = "text-align: center",
h5(tags$b(
paste0("Theoretical efficiency of BiasCorrection ",
"with cubic polynomial regression:")))
),
imageOutput(ns("plots_sse_corrected_c")),
tags$head(
tags$style(
type = "text/css",
paste0(
"#moduleCorrectedPlots-plots_sse_corrected_c img ",
"{max-height: 100%; max-width: 100%; width: auto; ",
"display: block; margin-left: auto; margin-right: auto;}")
)
)
)
),
fluidRow(
column(
6,
div(class = "row",
style = "text-align: center",
downloadButton(
"moduleCorrectedPlots-download_plot_sse_corrected_h",
"Download Error Plot (hyperbolic correction)",
style = paste0(
"white-space: normal; ",
"text-align:center; ",
"padding: 9.5px 9.5px 9.5px 9.5px; ",
"margin: 6px 10px 6px 10px;")))
),
column(
6,
div(class = "row",
style = "text-align: center",
downloadButton(
"moduleCorrectedPlots-download_plot_sse_corrected_c",
"Download Error Plot (cubic correction)",
style = paste0(
"white-space: normal; ",
"text-align:center; ",
"padding: 9.5px 9.5px 9.5px 9.5px; ",
"margin: 6px 10px 6px 10px;")))
)
),
width = 12
)
),
column(
3,
box(
title = "Plot Selection",
uiOutput(ns("select_plotinput_corrected")),
width = 12
)
)
)
)
}
|
/scratch/gouwar.j/cran-all/cranData/BiasCorrector/R/moduleCorrectedPlots.R
|
# BiasCorrector: A GUI to Correct Measurement Bias in DNA Methylation Analyses
# Copyright (C) 2019-2022 Lorenz Kapsner
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#' @title module_correctedstats_server
#'
#' @inheritParams module_calibrationfile_server
#'
#' @return The function returns a shiny server module.
#'
#' @seealso \url{https://shiny.rstudio.com/articles/modules.html}
#'
#' @examples
#' if (interactive()) {
#' rv <- list()
#' logfilename <- paste0(tempdir(), "/log.txt")
#' shiny::callModule(
#' module_correctedstats_server,
#' "moduleCorrectedStats",
#' rv = rv,
#' logfilename = logfilename
#' )
#' }
#'
#' @export
#'
# module_correctedstats_server
module_correctedstats_server <- function(input,
output,
session,
rv,
input_re) {
# when plotting has finished
observe({
req(rv$corrected_finished)
## regression statistics
output$regression_stats_corrected_h <- renderUI({
output$dt_reg_corrected_h <- DT::renderDataTable({
dt <- rv$reg_stats_corrected_h
# use formatstyle to highlight lower SSE values
render_regressionstatistics(dt,
mode = "corrected",
minmax = rv$minmax)
})
d <- DT::dataTableOutput("moduleCorrectedStatistics-dt_reg_corrected_h")
do.call(tagList, list(d))
})
output$regression_stats_corrected_c <- renderUI({
output$dt_reg_corrected_c <- DT::renderDataTable({
dt <- rv$reg_stats_corrected_c
# use formatstyle to highlight lower SSE values
render_regressionstatistics(dt,
mode = "corrected",
minmax = rv$minmax)
})
d <- DT::dataTableOutput("moduleCorrectedStatistics-dt_reg_corrected_c")
do.call(tagList, list(d))
})
# create download button for regression statistics
output$download_regstat_corrected_h <- downloadHandler(
filename = function() {
paste0(
rv$sample_locus_name,
"_corrected_regression_stats_h_",
gsub("\\-",
"",
substr(Sys.time(), 1, 10)),
"_",
gsub("\\:",
"",
substr(Sys.time(), 12, 16)),
".csv"
)
},
content = function(file) {
rBiasCorrection::write_csv(
table = rv$reg_stats_corrected_h[, -which(
colnames(rv$reg_stats_corrected_h) == "better_model")
, with = FALSE],
filename = file)
},
contentType = "text/csv"
)
output$download_regstat_corrected_c <- downloadHandler(
filename = function() {
paste0(
rv$sample_locus_name,
"_corrected_regression_stats_c_",
gsub("\\-", "", substr(Sys.time(), 1, 10)), "_",
gsub("\\:", "", substr(Sys.time(), 12, 16)), ".csv"
)
},
content = function(file) {
rBiasCorrection::write_csv(
table = rv$reg_stats_corrected_c[, -which(
colnames(rv$reg_stats_corrected_c) == "better_model")
, with = FALSE],
filename = file)
},
contentType = "text/csv"
)
# substitutions
output$substitutions_corrected_h <- DT::renderDataTable({
DT::datatable(rv$substitutions_corrected_h,
options = list(scrollX = TRUE,
pageLength = 20,
dom = "ltip"),
rownames = FALSE) %>%
DT::formatRound(columns = c(3:4), digits = 3)
})
output$substitutions_corrected_c <- DT::renderDataTable({
DT::datatable(rv$substitutions_corrected_c,
options = list(scrollX = TRUE,
pageLength = 20,
dom = "ltip"),
rownames = FALSE) %>%
DT::formatRound(columns = c(3:4), digits = 3)
})
output$download_subs_corrected_h <- downloadHandler(
filename = function() {
paste0(rv$sample_locus_name,
"_substituted_corrected_h_",
rBiasCorrection::get_timestamp(), ".csv")
},
content = function(file) {
rBiasCorrection::write_csv(
table = rv$substitutions_corrected_h,
filename = file)
},
contentType = "text/csv"
)
output$download_subs_corrected_c <- downloadHandler(
filename = function() {
paste0(rv$sample_locus_name,
"_substituted_corrected_c_",
rBiasCorrection::get_timestamp(),
".csv")
},
content = function(file) {
rBiasCorrection::write_csv(
table = rv$substitutions_corrected_c,
filename = file)
},
contentType = "text/csv"
)
})
}
#' @title module_correctedstatistics_ui
#'
#' @param id A character. The identifier of the shiny object
#'
#' @return The function returns a shiny ui module.
#'
#' @seealso \url{https://shiny.rstudio.com/articles/modules.html}
#'
#' @examples
#' if (interactive()) {
#' shinydashboard::tabItems(
#' shinydashboard::tabItem(
#' tabName = "correctedstats",
#' module_correctedstatistics_ui(
#' "moduleCorrectedStats"
#' )
#' )
#' )
#' }
#'
#' @export
#'
# module_correctedstatistics_ui
module_correctedstatistics_ui <- function(id) {
ns <- NS(id)
tagList(
fluidRow(
column(
9,
box(
title = "Regression Statistics (corrected)",
tabsetPanel(
tabPanel(
"Hyperbolic Correction",
uiOutput(ns("regression_stats_corrected_h"))
),
tabPanel(
"Cubic Correction",
uiOutput(ns("regression_stats_corrected_c"))
)
),
width = 12
)
),
column(
3,
box(
title = "Download Regression Statistics (corrected)",
uiOutput(ns("statistics_select")),
div(class = "row",
style = "text-align: center",
downloadButton(
ns("download_regstat_corrected_h"),
"Download regression statistics (hyperbolic correction)",
style = paste0(
"white-space: normal; ",
"text-align:center; ",
"padding: 9.5px 9.5px 9.5px 9.5px; ",
"margin: 6px 10px 6px 10px;"))),
div(class = "row",
style = "text-align: center",
downloadButton(
ns("download_regstat_corrected_c"),
"Download regression statistics (cubic correction)",
style = paste0(
"white-space: normal; ",
"text-align:center; ",
"padding: 9.5px 9.5px 9.5px 9.5px; ",
"margin: 6px 10px 6px 10px;"))),
tags$hr(),
width = 12
)
)
),
fluidRow(
column(
9,
box(
title = "Substitutions (corrected)",
tabsetPanel(
tabPanel(
"Hyperbolic Correction",
DT::dataTableOutput(ns("substitutions_corrected_h"))
),
tabPanel(
"Cubic Correction",
DT::dataTableOutput(ns("substitutions_corrected_c"))
)
),
width = 12
)
),
column(
3,
box(
title = "Download substitutions (corrected)",
div(class = "row",
style = "text-align: center",
downloadButton(
ns("download_subs_corrected_h"),
"Download substitutions (hyperbolic correction)",
style = paste0(
"white-space: normal; ",
"text-align:center; ",
"padding: 9.5px 9.5px 9.5px 9.5px; ",
"margin: 6px 10px 6px 10px;"))),
div(class = "row",
style = "text-align: center",
downloadButton(
ns("download_subs_corrected_c"),
"Download substitutions (cubic correction)",
style = paste0(
"white-space: normal; ",
"text-align:center; ",
"padding: 9.5px 9.5px 9.5px 9.5px; ",
"margin: 6px 10px 6px 10px;"))),
tags$hr(),
width = 12
)
)
)
)
}
|
/scratch/gouwar.j/cran-all/cranData/BiasCorrector/R/moduleCorrectedStatistics.R
|
# BiasCorrector: A GUI to Correct Measurement Bias in DNA Methylation Analyses
# Copyright (C) 2019-2022 Lorenz Kapsner
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#' @title module_experimentalfile_server
#'
#' @inheritParams module_calibrationfile_server
#'
#' @return The function returns a shiny server module.
#'
#' @seealso \url{https://shiny.rstudio.com/articles/modules.html}
#'
#' @examples
#' if (interactive()) {
#' rv <- list()
#' logfilename <- paste0(tempdir(), "/log.txt")
#' shiny::callModule(
#' module_experimentalfile_server,
#' "moduleExperimentalFile",
#' rv = rv,
#' logfilename = logfilename
#' )
#' }
#'
#' @export
#'
# module_experimentalfile_server
module_experimentalfile_server <- function(input,
output,
session,
rv,
...) {
arguments <- list(...)
# error handling with fileimport
observeEvent(
eventExpr = {
req(!is.null(rv$fileimport_experimental))
},
handlerExpr = {
rBiasCorrection::write_log(
message = paste0("(app) Entered observeEvent after fileimport ",
"of experimental file"),
logfilename = arguments$logfilename
)
# if type 1 data
if (rv$type_locus_sample == "1") {
output$experimental_data <- DT::renderDataTable({
DT::datatable(rv$fileimport_experimental,
options = list(scrollX = TRUE,
pageLength = 20,
dom = "ltip",
rowCallback = DT::JS(rv$row_callback)),
rownames = FALSE) %>%
DT::formatRound(columns = c(2:ncol(rv$fileimport_experimental)),
digits = 3)
})
output$exp_samples <- reactive({
len <- unique(rv$fileimport_experimental[, get("sample_id")])
message <- paste0("Number of unique samples: ",
length(len))
rBiasCorrection::write_log(message = message,
logfilename = arguments$logfilename)
message
})
output$exp_samples_raw <- reactive({
len <- sort(unique(rv$fileimport_experimental[, get("sample_id")]))
message <- paste0("Unique sample IDs:\n",
paste(len, collapse = "\n"))
rBiasCorrection::write_log(message = message,
logfilename = arguments$logfilename)
message
})
# aggregated data
output$experimental_data_aggregated <- DT::renderDataTable({
DT::datatable(rv$aggregated_experimental,
options = list(scrollX = TRUE,
pageLength = 20,
dom = "ltip",
rowCallback = DT::JS(rv$row_callback)),
rownames = FALSE) %>%
DT::formatRound(columns = c(3:ncol(rv$aggregated_experimental)),
digits = 3)
})
# if type 2 data
} else if (rv$type_locus_sample == "2") {
output$experimental_data <- DT::renderDataTable({
# https://stackoverflow.com/questions/58526047/customizing-how-
# datatables-displays-missing-values-in-shiny
DT::datatable(rv$fileimport_experimental,
options = list(scrollX = TRUE,
pageLength = 20,
dom = "ltip",
rowCallback = DT::JS(rv$row_callback)),
rownames = FALSE) %>%
DT::formatRound(columns = c(2:ncol(rv$fileimport_experimental)),
digits = 3)
})
output$exp_samples <- reactive({
len <- unique(rv$fileimport_experimental[, get("locus_id")])
message <- paste0("Number of unique loci: ",
length(len))
rBiasCorrection::write_log(message = message,
logfilename = arguments$logfilename)
message
})
output$exp_samples_raw <- reactive({
len <- sort(unique(rv$fileimport_experimental[, get("locus_id")]))
message <- paste0("Unique locus IDs:\n",
paste(len, collapse = "\n"))
rBiasCorrection::write_log(message = message,
logfilename = arguments$logfilename)
message
})
}
# Download experimental data
output$download_experimental <- downloadHandler(
filename = function() {
paste0("raw_experimental_data.csv")
},
content = function(file) {
rBiasCorrection::write_csv(table = rv$fileimport_experimental,
filename = file)
},
contentType = "text/csv"
)
# Download aggregated experimental data
output$download_experimental_aggr <- downloadHandler(
filename = function() {
paste0("aggregated_experimental_data.csv")
},
content = function(file) {
rBiasCorrection::write_csv(table = rv$aggregated_experimental,
filename = file)
},
contentType = "text/csv"
)
}
)
}
#' @title module_experimentalfile_ui
#'
#' @param id A character. The identifier of the shiny object
#'
#' @return The function returns a shiny ui module.
#'
#' @seealso \url{https://shiny.rstudio.com/articles/modules.html}
#'
#' @examples
#' if (interactive()) {
#' shinydashboard::tabItems(
#' shinydashboard::tabItem(
#' tabName = "experimental",
#' module_experimentalfile_ui(
#' "moduleExperimentalFile"
#' )
#' )
#' )
#' }
#'
#' @export
#'
# module_experimentalfile_ui
module_experimentalfile_ui <- function(id) {
ns <- NS(id)
tagList(
fluidRow(
column(
9,
box(
tabsetPanel(
tabPanel(
"Experimental Data",
DT::dataTableOutput(ns("experimental_data"))
),
tabPanel(
"Aggregated Experimental Data",
DT::dataTableOutput(ns("experimental_data_aggregated"))
)
),
width = 12
)
),
column(
3,
box(verbatimTextOutput(ns("exp_samples")),
verbatimTextOutput(ns("exp_samples_raw")),
tags$head(
tags$style(
paste0("#exp_samples_raw{overflow-y:scroll; ",
"max-height: 10vh; background: ghostwhite;}"))),
tags$hr(),
div(class = "row",
style = "text-align: center",
downloadButton(
ns("download_experimental"),
"Download experimental file",
style = paste0(
"white-space: normal; ",
"text-align:center; ",
"padding: 9.5px 9.5px 9.5px 9.5px; ",
"margin: 6px 10px 6px 10px;")
)),
tags$hr(),
div(class = "row",
style = "text-align: center",
downloadButton(
ns("download_experimental_aggr"),
"Download aggregated experimental file",
style = paste0(
"white-space: normal; ",
"text-align:center; ",
"padding: 9.5px 9.5px 9.5px 9.5px; ",
"margin: 6px 10px 6px 10px;")
)),
tags$hr(),
width = 12
)
)
)
)
}
|
/scratch/gouwar.j/cran-all/cranData/BiasCorrector/R/moduleExperimentalFile.R
|
# BiasCorrector: A GUI to Correct Measurement Bias in DNA Methylation Analyses
# Copyright (C) 2019-2022 Lorenz Kapsner
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#' @title module_fileupload_server
#'
#' @inheritParams module_calibrationfile_server
#'
#' @return The function returns a shiny server module.
#'
#' @seealso \url{https://shiny.rstudio.com/articles/modules.html}
#'
#' @examples
#' if (interactive()) {
#' rv <- list()
#' logfilename <- paste0(tempdir(), "/log.txt")
#' shiny::callModule(
#' module_fileupload_server,
#' "moduleEileUpload",
#' rv = rv,
#' logfilename = logfilename
#' )
#' }
#'
#' @export
#'
# module_fileupload_server
module_fileupload_server <- function(input,
output,
session,
rv,
input_re,
...) {
arguments <- list(...)
# TODO original selection of data type
# observe Radiobuttonevents
#% observeEvent(
#% eventExpr = input_re()[["moduleFileupload-type_locus_sample"]],
#% handlerExpr = {
#% waround <- input_re()[["moduleFileupload-type_locus_sample"]]
#% rv$type_locus_sample <- waround
#% }
#% )
observe({
req(rv$type_locus_sample)
output$type_locus_sample <- reactive({
return(TRUE)
})
outputOptions(output, "type_locus_sample", suspendWhenHidden = FALSE)
})
# loading example data
observeEvent(
eventExpr = input_re()[["moduleFileupload-load_example_data"]],
handlerExpr = {
# mimic normal fileimport
# experimental file
rv$exp_filereq <- TRUE
rv$sample_locus_name <- "Example_Locus"
rBiasCorrection::write_log(
message = paste0("Locus name: Example_Locus\n(--> stored as: ",
rv$sample_locus_name, ")"),
logfilename = arguments$logfilename)
waround_ex <- rBiasCorrection::example.data_experimental[["dat"]]
rv$fileimport_experimental <- waround_ex
# calibration file
waround_ca <- rBiasCorrection::example.data_calibration[["dat"]]
rv$fileimport_calibration <- waround_ca
rv$vec_cal <- rBiasCorrection::example.data_calibration[["vec_cal"]]
# calculate aggregated inputs
rv$aggregated_experimental <- rBiasCorrection::aggregated_input(
datatable = rv$fileimport_experimental,
description = "experimental",
vec_cal = rv$vec_cal,
type = 1
)
rv$aggregated_calibration <- rBiasCorrection::aggregated_input(
datatable = rv$fileimport_calibration,
description = "calibration",
vec_cal = rv$vec_cal
)
# set upload flag
rv$type1cal_uploaded <- TRUE
})
# Experimental file
observe({
req(input_re()[["moduleFileupload-experimentalFile"]])
if (isFALSE(rv$exp_filereq)) {
rBiasCorrection::write_log(
message = "(app) Entered observation for experimental file.",
logfilename = arguments$logfilename)
# check file ending
rv$ending <- strsplit(
input_re()[["moduleFileupload-experimentalFile"]]$name,
".",
fixed = TRUE)[[1]]
# if type 1 data
if (rv$type_locus_sample == "1") {
# render fileInput with option "multiple = F"
output$fileinput_cal <- renderUI({
fileInput("calibrationFile",
paste0("Please choose one CSV file containing ",
"the calibration DNA samples."),
multiple = FALSE,
accept = c(".csv", "text/csv")
)
})
# check userinput of locusname
if (input_re()[["moduleFileupload-locusname"]] == "") {
open_modal("locusname", rv)
} else {
rv$exp_filereq <- TRUE
rv$sample_locus_name <- rBiasCorrection::handle_text_input(
input_re()[["moduleFileupload-locusname"]])
rBiasCorrection::write_log(
message = paste0("Locus name: ",
input_re()[["moduleFileupload-locusname"]],
"\n(--> stored as: ",
rv$sample_locus_name, ")"),
logfilename = arguments$logfilename)
#% shinyjs::disable("moduleFileupload-locusname")
#% removeUI(selector = "#locusname", immediate = T)
}
# if type 2 data
} else if (rv$type_locus_sample == "2") {
# render fileInput with option "multiple = TRUE"
output$fileinput_cal <- renderUI({
fileInput("calibrationFile",
paste0("Please choose at least 4 different CSV files ",
"containing the calibration data (one file per ",
"distinct calibration DNA sample; for specific ",
"file naming please refer to our FAQ)."),
multiple = TRUE,
accept = c(".csv", "text/csv")
)
})
# check userinput of samplename
if (input_re()[["moduleFileupload-samplename"]] == "") {
open_modal("samplename", rv)
} else {
rv$exp_filereq <- TRUE
rv$sample_locus_name <- rBiasCorrection::handle_text_input(
input_re()[["moduleFileupload-samplename"]]
)
rBiasCorrection::write_log(
message = paste0("Sample name: ",
input_re()[["moduleFileupload-samplename"]],
"\n(--> stored as: ",
rv$sample_locus_name, ")"),
logfilename = arguments$logfilename)
#% shinyjs::disable("moduleFileupload-samplename")
#% removeUI(selector = "#samplename", immediate = T)
}
}
}
if (rv$exp_filereq == TRUE && is.null(rv$fileimport_experimental)) {
#% removeUI(selector = "#tag1", immediate = T)
#% shinyjs::disable("moduleFileupload-type_locus_sample")
if (rv$ending[2] %in% c("csv", "CSV")) {
file <- reactiveFileReader(1000, session,
input_re()[["moduleFileupload-experimentalFile"]]$datapath,
data.table::fread,
header = TRUE
)
tryCatch(
expr = {
rv$fileimport_experimental <- rBiasCorrection::clean_dt(
datatable = file(),
description = "experimental",
type = rv$type_locus_sample,
logfilename = arguments$logfilename
)[["dat"]]
#% updateTabItems(session, "tabs", "panel_1")
},
error = function(e) {
e
# error handling fileimport
open_modal("experimentalFile", rv)
}
)
# test, if we imported valid file
if (is.null(rv$fileimport_experimental)) {
# error handling fileimport
open_modal("experimentalFile", rv)
} else {
# check here, if there have been deleted rows containing
# missing values
tryCatch(
expr = {
omitnas_modal(rv$omitnas, "experimental")
rv$omitnas <- NULL
},
error = function(e) {
rBiasCorrection::write_log(
message = paste0("Errormessage: ", e),
logfilename = arguments$logfilename)
}
)
# workaround to tell ui, that experimental file is there
output$file_uploaded <- reactive({
return(TRUE)
})
outputOptions(output, "file_uploaded",
suspendWhenHidden = FALSE)
}
} else {
# error handling fileimport
open_modal("experimentalFile", rv)
}
}
})
# calibration file
###### Calibration data
observe({
req(input_re()[["calibrationFile"]])
rv$ending <- NULL
if (is.null(rv$fileimport_calibration) |
is.null(rv$fileimport_list)) {
# if calibration file is of data type 1
if (rv$type_locus_sample == "1") {
# check file ending
rv$ending <- strsplit(
input_re()[["calibrationFile"]]$name,
".",
fixed = TRUE)[[1]]
# if ending suggests it might be a csv file
if (rv$ending[2] %in% c("csv", "CSV")) {
file <- reactiveFileReader(1000, session,
input_re()[["calibrationFile"]]$datapath,
data.table::fread,
header = TRUE
)
# try to import file
tryCatch(
expr = {
if (is.null(rv$fileimport_calibration)) {
cal_type_1 <- rBiasCorrection::clean_dt(
datatable = file(),
description = "calibration",
type = "1",
logfilename = arguments$logfilename)
rv$fileimport_calibration <- cal_type_1[["dat"]]
rv$vec_cal <- cal_type_1[["vec_cal"]]
}
},
error = function(e) {
e
# error handling fileimport
open_modal("calibrationFile", rv)
}
)
# go on, if we imported valid file
if (!is.null(rv$fileimport_calibration)) {
# try to check, if colnames of experimental data are
# same as those of calibration data
tryCatch(
expr = {
# check, if colnames of experimental and calibration
# data are equal:
if (!all.equal(colnames(rv$fileimport_calibration)[-1],
colnames(rv$fileimport_experimental)[-1])) {
# error handling fileimport
open_modal("calibrationFile", rv)
}
},
error = function(e) {
e
# error handling fileimport
open_modal("calibrationFile", rv)
}
)
# check here, if there are calibration steps
# outside the range 0 <= CS <= 100
if (rv$fileimport_calibration[, min(
as.numeric(
as.character(get("true_methylation")))
)] < 0 ||
rv$fileimport_calibration[, max(
as.numeric(
as.character(get("true_methylation")))
)] > 100) {
open_modal("calibrange", rv)
} else {
# check here, if there have been deleted rows
# containing missin values
#% tryCatch(expr = {
#% omitnasModal(rv$omitnas, "calibration")
#% rv$omitnas <- NULL
#% }, error = function(e) {
#% print(e)
#% })
# calculate aggregated inputs
rv$aggregated_experimental <- rBiasCorrection::aggregated_input(
datatable = rv$fileimport_experimental,
description = "experimental",
vec_cal = rv$vec_cal,
type = 1
)
rv$aggregated_calibration <- rBiasCorrection::aggregated_input(
datatable = rv$fileimport_calibration,
description = "calibration",
vec_cal = rv$vec_cal
)
rv$type1cal_uploaded <- TRUE
}
# if we have the value "NULL" in our file-variable;
# this happens, when cleanDT returns error
} else {
# error handling fileimport
open_modal("calibrationFile", rv)
}
# else, if ending is no csv-file
} else {
# error handling fileimport
open_modal("calibrationFile", rv)
}
# if calibration file is of data type 2
} else if (rv$type_locus_sample == "2") {
if (isFALSE(rv$type2cal_uploaded)) {
# loop through calibration files
for (i in seq_len(nrow(input_re()[["calibrationFile"]]))) {
# check file ending
rv$ending <- strsplit(
input_re()[["calibrationFile"]]$name[i],
".",
fixed = TRUE
)[[1]]
file <- reactiveFileReader(1000, session,
input_re()[["calibrationFile"]]$datapath[i],
data.table::fread,
header = TRUE
)
if (rv$ending[2] %in% c("csv", "CSV")) {
waround_fup <- rBiasCorrection::clean_dt(
datatable = file(),
description = "calibration",
type = "2",
logfilename = arguments$logfilename
)[["dat"]]
ind <- input_re()[["calibrationFile"]]$name[i]
rv$fileimport_list[[ind]] <- waround_fup
} else {
# error handling fileimport
open_modal("csv", rv)
}
}
# nolint start
# chech type 2 file requirements here
# filecheck <- rBiasCorrection::type2_filereq(
# filelist = rv$fileimport_list,
# rv = rv,
# logfilename = arguments$logfilename)
#
# if (is.character(filecheck)) {
# open_modal(filecheck, rv)
# } else if (isTRUE(filecheck)) {
# rv$type2cal_uploaded <- TRUE
# }
# nolint end
}
}
}
})
}
#' @title module_fileupload_ui
#'
#' @param id A character. The identifier of the shiny object
#' @param ... Further arguments, such as `maxfilesize`
#'
#' @return The function returns a shiny ui module.
#'
#' @seealso \url{https://shiny.rstudio.com/articles/modules.html}
#'
#' @examples
#' if (interactive()) {
#' shinydashboard::tabItems(
#' shinydashboard::tabItem(
#' tabName = "fileupload",
#' module_fileupload_ui(
#' "moduleFileUpload",
#' maxfilesize = maxfilesize
#' )
#' )
#' )
#' }
#'
#' @export
#'
# module_fileupload_ui
module_fileupload_ui <- function(id,
...) {
arguments <- list(...)
ns <- NS(id)
tagList(
fluidRow(
# type of data box
box(
# TODO original selection of data type
#% title = "Type of Data",
#% # Radiobuttons: Type of data
#% radioButtons(
#% inputId = ns("type_locus_sample"),
#% label = h5(paste0("Please specify the type of DNA methylation ",
#% "data to be corrected for measurement biases")),
#% choices = list(
#% paste0("One locus in many samples ",
#% "(e.g. pyrosequencing data)") = 1,
#% paste0("Many loci in one sample ",
#% "(e.g. next-generation sequencing ",
#% "data or microarray data)") = 2),
#% selected = character(0)),
#%
#% tags$hr(),
#%
#% conditionalPanel(
#% condition = "input['moduleFileupload-type_locus_sample'] == 1",
#% textInput(ns("locusname"),
#% label = NULL,
#% placeholder = "Locus name")
#% ),
#%
#% conditionalPanel(
#% condition = "input['moduleFileupload-type_locus_sample'] == 2",
#% textInput(ns("samplename"),
#% label = NULL,
#% placeholder = "Sample-ID")
#% ),
#% conditionalPanel(
#% condition = "input['moduleFileupload-type_locus_sample'] != null",
#% verbatimTextOutput(ns("samplelocus_out"))
#% ), width = 6)
title = "File upload",
h5("Please type in the ID of the interrogated locus"),
textInput(ns("locusname"),
label = NULL,
placeholder = "Locus ID"
),
conditionalPanel(
condition = "output['moduleFileupload-type_locus_sample']",
verbatimTextOutput(ns("samplelocus_out"))
),
width = 6
),
box(
title = "Description",
h5(paste0("This application is a graphical user interface (GUI) ",
"to the algorithms implemented in the R-package ",
"'rBiasCorrection'.")),
h5(paste0("If you use these 'BiasCorrector' or 'rBiasCorrection' ",
"packages to correct DNA methylation data for a ",
"publication, please refer to the 'Info'-tab to find out ",
"how to cite them.")),
tags$hr(),
h5(paste0("You can test this application with example data by ",
"pressing the 'Load Example Data'-button below.")),
div(
class = "row", style = "text-align: center",
actionButton(
ns("load_example_data"),
"Load Example Data",
style = paste0(
"white-space: normal; ",
"text-align:center; ",
"padding: 9.5px 9.5px 9.5px 9.5px; ",
"margin: 6px 10px 6px 10px;")
)
)
)
),
# experimental fileupload box
fluidRow(
conditionalPanel(
condition = "output['moduleFileupload-type_locus_sample']",
box(
title = "Data Input: Experimental Data",
h5(paste0("Please upload the CSV file* containing ",
"the experimental data.")),
# Input: Select a file
fileInput(
ns("experimentalFile"),
paste0("Please choose one CSV file containing the experimental ",
"data that are to be corrected."),
multiple = FALSE,
accept = c(".csv", "text/csv")
),
h6(paste("Max. file size: ",
arguments$maxfilesize,
" MB")),
h6(paste0("*For the specific CSV file requirements ",
"please refer to our"),
a("FAQ!",
href = paste0("https://github.com/kapsner/",
"rBiasCorrection/blob/master/FAQ.md"))),
width = 6
)
),
# calibration fileupload box
conditionalPanel(
condition = "output['moduleFileupload-file_uploaded']",
box(
title = "Data Input: Calibration Data",
h5(paste0("Please upload the CSV files* containing ",
"the calibration data.")),
uiOutput(ns("fileinput_cal")),
#% fileInput("calibrationFile",
#% paste0("Calibration data: choose one CSV file ",
#% "containing the calibration data"),
#% multiple = rv$import_type2,
#% accept = c(".csv")),
h6(paste("Max. file size: ",
arguments$maxfilesize,
" MB")),
h6(paste0("*For the specific CSV file requirements ",
"please refere to our"),
a("FAQ!",
href = paste0("https://github.com/kapsner/",
"rBiasCorrection/blob/master/FAQ.md"))),
width = 6
)
)
)
)
}
|
/scratch/gouwar.j/cran-all/cranData/BiasCorrector/R/moduleFileupload.R
|
# BiasCorrector: A GUI to Correct Measurement Bias in DNA Methylation Analyses
# Copyright (C) 2019-2022 Lorenz Kapsner
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#' @title module_info_server
#'
#' @inheritParams module_calibrationfile_server
#'
#' @return The function returns a shiny server module.
#'
#' @seealso \url{https://shiny.rstudio.com/articles/modules.html}
#'
#' @examples
#' if (interactive()) {
#' rv <- list()
#' logfilename <- paste0(tempdir(), "/log.txt")
#' shiny::callModule(
#' module_info_server,
#' "moduleInfo",
#' rv = rv,
#' logfilename = logfilename
#' )
#' }
#'
#' @export
#'
# module_info_server
module_info_server <- function(input,
output,
session,
rv,
input_re) {
output$citation_correction <- renderPrint({
utils::citation("rBiasCorrection")
})
output$citation_corrector <- renderPrint({
utils::citation("BiasCorrector")
})
}
#' @title module_info_ui
#'
#' @param id A character. The identifier of the shiny object
#'
#' @return The function returns a shiny ui module.
#'
#' @seealso \url{https://shiny.rstudio.com/articles/modules.html}
#'
#' @examples
#' if (interactive()) {
#' shinydashboard::tabItems(
#' shinydashboard::tabItem(
#' tabName = "info",
#' module_info_ui(
#' "moduleInfo"
#' )
#' )
#' )
#' }
#'
#' @export
#'
# module_info_ui
module_info_ui <- function(id) {
ns <- NS(id)
tagList(
fluidRow(
box(
title = "Info",
tabsetPanel(
tabPanel(
title = "Citation",
h5(paste0("If you use these 'BiasCorrector' or ",
"'rBiasCorrection' packages to correct ",
"DNA methylation data for a publication, ",
"please cite them as follows:")),
h5(tags$b("rBiasCorrection:")),
verbatimTextOutput(ns("citation_correction")),
h5(tags$b("BiasCorrector:")),
verbatimTextOutput(ns("citation_corrector"))
),
tabPanel(
title = "Version",
h5(paste0("rBiasCorrection: ",
utils::packageVersion("rBiasCorrection")
)),
h5(paste0("BiasCorrector: ",
utils::packageVersion("BiasCorrector")
))
)
),
width = 12
)
)
)
}
|
/scratch/gouwar.j/cran-all/cranData/BiasCorrector/R/moduleInfo.R
|
# BiasCorrector: A GUI to Correct Measurement Bias in DNA Methylation Analyses
# Copyright (C) 2019-2022 Lorenz Kapsner
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#' @title module_log_server
#'
#' @inheritParams module_calibrationfile_server
#'
#' @return The function returns a shiny server module.
#'
#' @seealso \url{https://shiny.rstudio.com/articles/modules.html}
#'
#' @examples
#' if (interactive()) {
#' rv <- list()
#' logfilename <- paste0(tempdir(), "/log.txt")
#' shiny::callModule(
#' module_log_server,
#' "moduleLog",
#' rv = rv,
#' logfilename = logfilename
#' )
#' }
#'
#' @export
#'
# module_log_server
module_log_server <- function(input,
output,
session,
rv,
input_re,
...) {
arguments <- list(...)
# logfileviewer
observe({
file <- reactiveFileReader(500, session,
arguments$logfilename,
readLines)
rv$logfile <- file()
output$download_logfile <- downloadHandler(
filename = function() {
paste0("BC_logfile.txt")
},
content = function(file) {
write(rv$logfile, file)
},
contentType = "text/csv"
)
})
output$log_out <- reactive({
paste(paste0(rv$logfile, collapse = "\n"))
})
}
#' @title module_log_ui
#'
#' @param id A character. The identifier of the shiny object
#'
#' @return The function returns a shiny ui module.
#'
#' @seealso \url{https://shiny.rstudio.com/articles/modules.html}
#'
#' @examples
#' if (interactive()) {
#' shinydashboard::tabItems(
#' shinydashboard::tabItem(
#' tabName = "log",
#' module_log_ui(
#' "moduleLog"
#' )
#' )
#' )
#' }
#'
#' @export
#'
# module_log_ui
module_log_ui <- function(id) {
ns <- NS(id)
tagList(fluidRow(
box(
title = "Log",
verbatimTextOutput(ns("log_out")),
tags$head(tags$style(
paste0(
"#moduleLog-log_out{overflow-y:scroll; ",
"max-height: 70vh; background: ghostwhite;}"
)
)),
width = 9
),
box(
title = "Download Log File",
div(
class = "row",
style = "text-align: center;",
shinyjs::disabled(downloadButton(
ns("download_logfile"),
"Download Log File",
paste0(
"white-space: normal; ",
"text-align:center; ",
"padding: 9.5px 9.5px 9.5px 9.5px; ",
"margin: 6px 10px 6px 10px;"
)
))
),
tags$hr(),
width = 3
)
))
}
|
/scratch/gouwar.j/cran-all/cranData/BiasCorrector/R/moduleLog.R
|
# BiasCorrector: A GUI to Correct Measurement Bias in DNA Methylation Analyses
# Copyright (C) 2019-2022 Lorenz Kapsner
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#' @title module_modelselection_server
#'
#' @inheritParams module_calibrationfile_server
#'
#' @return The function returns a shiny server module.
#'
#' @seealso \url{https://shiny.rstudio.com/articles/modules.html}
#'
#' @examples
#' if (interactive()) {
#' rv <- list()
#' logfilename <- paste0(tempdir(), "/log.txt")
#' shiny::callModule(
#' module_modelselection_server,
#' "moduleModelSelection",
#' rv = rv,
#' logfilename = logfilename
#' )
#' }
#'
#' @export
#'
# module_modelselection_server
module_modelselection_server <- function(input,
output,
session,
rv,
input_re) {
observe({
req(rv$better_model_stats)
# model selection only implemented for type 1 data
if (rv$type_locus_sample == "1") {
# select all at once
observeEvent(
eventExpr = input_re()[["moduleModelSelection-reg_all"]],
handlerExpr = {
if (input_re()[["moduleModelSelection-reg_all"]] %in% c("0", "1")) {
rv$radioselection <- rep(
input_re()[["moduleModelSelection-reg_all"]],
times = length(rv$vec_cal)
)
} else {
rv$radioselection <- as.character(
rv$better_model_stats[, get("better_model")]
)
}
})
if (is.null(rv$radioselection)) {
rv$radioselection <- as.character(
rv$better_model_stats[, get("better_model")]
)
}
# render radio buttons for tab 5
output$reg_radios <- renderUI({
radio_output_list <- lapply(
seq_len(length(rv$vec_cal)),
function(g) {
radioname <- paste0("radio", g)
div(
class = "row",
style = "margin: 0.5%; text-align: center;",
div(
class = "col-sm-4",
style = "text-align: left;",
h5(tags$b(paste0("Regression type for ",
rv$vec_cal[g], ":")))
),
div(
class = "col-sm-4",
style = "text-align: left;",
div(
class = "row",
style = "text-align: center;",
radioButtons(
inputId = paste0("moduleModelSelection-",
radioname),
label = NULL,
choices = list(
"hyperbolic" = "0",
"cubic" = "1"
),
selected = as.character(rv$radioselection[g]),
inline = TRUE
)
)
),
div(
class = "col-sm-4",
verbatimTextOutput(
paste0("moduleModelSelection-text_",
radioname))
)
)
})
do.call(tagList,
list(radio_output_list)) # needed to display properly.
})
} else if (rv$type_locus_sample == "2") {
# type 2 data:
# trigger claculation of results (bypass manual model selection)
#% shinyjs::click("results")
}
})
observe({
req(rv$better_model_stats)
if (rv$type_locus_sample == "1") {
lapply(seq_len(length(rv$vec_cal)), function(k) {
radioname <- paste0("radio", k)
if (!is.null(input_re()[[paste0("moduleModelSelection-",
radioname)]])) {
if (rv$selection_method == "SSE") {
output[[paste0("text_", radioname)]] <- reactive({
paste(
"SSE:",
as.character(
ifelse(
input_re()[[paste0("moduleModelSelection-",
radioname)]] == "1",
rv$better_model_stats[
get("Name") == rv$vec_cal[k], round(
get("SSE_cubic"),
3)
],
rv$better_model_stats[
get("Name") == rv$vec_cal[k], round(
get("SSE_hyperbolic"),
3)
]
))
)
})
} else if (rv$selection_method == "RelError") {
output[[paste0("text_", radioname)]] <- reactive({
paste(
"Rel.Error:",
as.character(
ifelse(input_re()[[paste0("moduleModelSelection-",
radioname)]] == "1",
rv$better_model_stats[
get("Name") == rv$vec_cal[k], round(
get("relative_error_c"),
3)
],
rv$better_model_stats[
get("Name") == rv$vec_cal[k], round(
get("relative_error_h"),
3)
]
))
)
})
}
}
})
}
})
}
#' @title module_modelselection_ui
#'
#' @param id A character. The identifier of the shiny object
#'
#' @return The function returns a shiny ui module.
#'
#' @seealso \url{https://shiny.rstudio.com/articles/modules.html}
#'
#' @examples
#' if (interactive()) {
#' shinydashboard::tabItems(
#' shinydashboard::tabItem(
#' tabName = "modelselection",
#' module_modelselection_ui(
#' "moduleModelSelection"
#' )
#' )
#' )
#' }
#'
#' @export
#'
# module_modelselection_ui
module_modelselection_ui <- function(id) {
ns <- NS(id)
tagList(
fluidRow(
box(
title = "Select Regression Model",
radioButtons(
ns("reg_all"),
label = "Select algorithm for all CpG sites",
choices = list(
"best" = "2",
"hyperbolic" = "0",
"cubic polynomial" = "1"
),
selected = character(0), inline = TRUE
),
uiOutput(ns("reg_radios")),
width = 12
)
)
)
}
|
/scratch/gouwar.j/cran-all/cranData/BiasCorrector/R/moduleModelSelection.R
|
# BiasCorrector: A GUI to Correct Measurement Bias in DNA Methylation Analyses
# Copyright (C) 2019-2022 Lorenz Kapsner
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#' @title module_plotting_server
#'
#' @inheritParams module_calibrationfile_server
#'
#' @return The function returns a shiny server module.
#'
#' @seealso \url{https://shiny.rstudio.com/articles/modules.html}
#'
#' @examples
#' if (interactive()) {
#' rv <- list()
#' logfilename <- paste0(tempdir(), "/log.txt")
#' shiny::callModule(
#' module_plotting_server,
#' "modulePlotting",
#' rv = rv,
#' logfilename = logfilename
#' )
#' }
#'
#' @export
#'
# module_plotting_server
module_plotting_server <- function(input,
output,
session,
rv,
input_re,
...) {
arguments <- list(...)
observe({
# this is needed, to open new tab (Regression plots)
# before rendering the plots!
# if (input_re()$tabs == "panel_3") {
req(rv$run)
# type 1 data:
if (rv$type_locus_sample == "1") {
if (isFALSE(rv$plotting_finished)) {
withProgress(
expr = {
regression_results <- rBiasCorrection::regression_utility(
data = rv$fileimport_calibration,
samplelocusname = rv$sample_locus_name,
rv = rv,
logfilename = arguments$logfilename,
minmax = rv$minmax,
seed = rv$seed
)
plotlist_reg <- regression_results[["plot_list"]]
rv$result_list <- regression_results[["result_list"]]
},
value = 1 / 2,
message = "Calculating calibration curves"
)
withProgress(
expr = {
rBiasCorrection::plotting_utility(
data = rv$fileimport_calibration,
plotlist_reg = plotlist_reg,
type = 1,
samplelocusname = rv$sample_locus_name,
rv = rv,
plotdir = arguments$plotdir,
logfilename = arguments$logfilename,
minmax = rv$minmax,
plot_height = rv$plot_height,
plot_width = rv$plot_width,
plot_textsize = rv$plot_textsize
)
# save regression statistics to reactive value
rv$reg_stats <- rBiasCorrection::statistics_list(
resultlist = rv$result_list,
minmax = rv$minmax)
# on finished
rv$plotting_finished <- TRUE
rBiasCorrection::write_log(message = "Finished plotting",
logfilename = arguments$logfilename)
},
value = 1 / 2,
message = "Plotting calibration curves"
)
}
# else if type 2 data
} else if (rv$type_locus_sample == "2") {
if (isFALSE(rv$plotting_finished)) {
a <- 1
rv$result_list_type2 <- list()
for (b in names(rv$fileimport_calibration)) {
withProgress(
expr = {
rv$vec_cal <- names(rv$fileimport_calibration[[a]])[-1]
#% print(paste("Length rv$vec_cal:", length(rv$vec_cal)))
regression_results <- rBiasCorrection::regression_utility(
data = rv$fileimport_calibration[[a]],
samplelocusname = rv$sample_locus_name,
locus_id = gsub("[[:punct:]]", "", b),
rv = rv,
logfilename = arguments$logfilename,
minmax = rv$minmax,
seed = rv$seed
)
plotlist_reg <- regression_results[["plot_list"]]
rv$result_list <- regression_results[["result_list"]]
},
value = 1 / 2,
message = "Calculating calibration curves",
detail = b
)
withProgress(
expr = {
rBiasCorrection::plotting_utility(
data = rv$fileimport_calibration[[a]],
plotlist_reg = plotlist_reg,
type = 2,
samplelocusname = rv$sample_locus_name,
locus_id = gsub("[[:punct:]]", "", b),
rv = rv,
plotdir = arguments$plotdir,
logfilename = arguments$logfilename,
minmax = rv$minmax,
plot_height = rv$plot_height,
plot_width = rv$plot_width,
plot_textsize = rv$plot_textsize
)
# save regression statistics to reactive value
rv$reg_stats[[b]] <- rBiasCorrection::statistics_list(
resultlist = rv$result_list,
minmax = rv$minmax
)
rv$result_list_type2[[b]] <- rv$result_list
a <- a + 1
},
value = 1 / 2,
message = "Plotting calibration curves",
detail = b
)
}
# on finished
rv$plotting_finished <- TRUE
rBiasCorrection::write_log(
message = "Finished plotting",
logfilename = arguments$logfilename)
}
}
})
# when plotting has finished
observe({
req(rv$plotting_finished)
# type 1 data:
if (rv$type_locus_sample == "1") {
### Plot tab ###
# create a list of plotnames to populate selectInput
plot_output_list <- lapply(
seq_len(length(rv$vec_cal)),
function(g) {
paste0(gsub("[[:punct:]]", "", rv$vec_cal[g]))
})
names(plot_output_list) <- rv$vec_cal
# create reactive selectinput:
sel_in2 <- reactive({
selectInput(inputId = "selectPlot",
label = "Select CpG site",
multiple = FALSE,
selectize = FALSE,
choices = plot_output_list)
})
# create download button for each plot
output$download_plots <- downloadHandler(
filename = function() {
paste0(rv$sample_locus_name,
"_",
gsub("[[:punct:]]",
"",
input_re()$selectPlot),
".png")
},
content = function(file) {
file.copy(paste0(arguments$plotdir,
rv$sample_locus_name,
"_",
gsub("[[:punct:]]",
"",
input_re()$selectPlot),
".png"),
file)
},
contentType = "image/png"
)
# render head of page with selectInput and downloadbutton
# TODO align selectinput and button aside of each other
output$select_plotinput <- renderUI({
s <- sel_in2()
b <- div(class = "row",
style = "text-align: center",
downloadButton(
"modulePlotting-download_plots",
"Download Plot",
style = paste0(
"white-space: normal; ",
"text-align:center; ",
"padding: 9.5px 9.5px 9.5px 9.5px; ",
"margin: 6px 10px 6px 10px;")))
do.call(tagList, list(s,
tags$hr(),
b,
tags$hr()))
})
# for debugging
observeEvent(
eventExpr = input_re()$selectPlot,
handlerExpr = {
message(input_re()$selectPlot)
})
# render plots from local temporary file
output$plots <- renderImage(
expr = {
filename <- paste0(arguments$plotdir,
rv$sample_locus_name,
"_",
gsub("[[:punct:]]",
"",
input_re()$selectPlot),
".png")
# Return a list containing the filename
list(src = filename)
},
deleteFile = FALSE
)
# type 2 data:
} else if (rv$type_locus_sample == "2") {
# create list of loci to populate selectInput
# "select_plotlocus"
list_plot_locus <- list()
for (i in seq_len(length(rv$fileimport_calibration))) {
list_plot_locus[[i]] <- names(rv$fileimport_calibration)[i]
}
select_plotlocus <- reactive({
selectInput(inputId = "select_plotlocus",
label = "Select locus:",
multiple = FALSE,
selectize = FALSE,
choices = list_plot_locus)
})
# create list of cpg-sites for each locus to populate selectInput
# "select_plot_cpg"
list_plot_cpg <- list()
for (i in seq_len(length(rv$fileimport_calibration))) {
list_plot_cpg[[names(rv$fileimport_calibration)[i]]] <- names(
rv$fileimport_calibration[[i]]
)[-1]
}
# only return list of CpG-sites for each locus, if there is
# already a selection of the locus in select_plotlocus
cpg_output <- reactive({
if (!is.null(input_re()$select_plotlocus)) {
return(list_plot_cpg[input_re()$select_plotlocus])
}
})
# always wrap selectInput into reactive-function
select_plot_cpg <- reactive({
selectInput(inputId = "selectPlotType2",
label = "Select CpG site:",
multiple = FALSE,
selectize = FALSE,
choices = cpg_output())
})
# render second selectInput
output$s2_plotoutput <- renderUI({
select_plot_cpg()
})
# create download button for each plot
output$download_plots <- downloadHandler(
filename = function() {
paste0(gsub("[[:punct:]]",
"",
input_re()$select_plotlocus),
"-",
rv$sample_locus_name,
"_",
gsub("[[:punct:]]",
"",
input_re()$selectPlotType2),
".png")
},
content = function(file) {
file.copy(paste0(arguments$plotdir,
gsub("[[:punct:]]",
"",
input_re()$select_plotlocus),
"-",
rv$sample_locus_name,
"_",
gsub("[[:punct:]]",
"",
input_re()$selectPlotType2),
".png"),
file)
},
contentType = "image/png"
)
# render Plot UI
output$select_plotinput <- renderUI({
s1 <- select_plotlocus()
s2 <- uiOutput("modulePlotting-s2_plotoutput")
b <- div(class = "row",
style = "text-align: center",
downloadButton(
"modulePlotting-download_plots",
"Download Plot",
style = paste0(
"white-space: normal; ",
"text-align:center; ",
"padding: 9.5px 9.5px 9.5px 9.5px; ",
"margin: 6px 10px 6px 10px;")))
do.call(tagList, list(s1,
s2,
tags$hr(),
b,
tags$hr()))
})
# render plot from local temporary file
output$plots <- renderImage(
expr = {
filename <- paste0(arguments$plotdir,
gsub("[[:punct:]]",
"",
input_re()$select_plotlocus),
"-",
rv$sample_locus_name,
"_",
gsub("[[:punct:]]",
"",
input_re()$selectPlotType2),
".png")
#% print(filename)
# Return a list containing the filename
list(src = filename)
},
deleteFile = FALSE
)
}
})
}
#' @title module_plotting_ui
#'
#' @param id A character. The identifier of the shiny object
#'
#' @return The function returns a shiny ui module.
#'
#' @seealso \url{https://shiny.rstudio.com/articles/modules.html}
#'
#' @examples
#' if (interactive()) {
#' shinydashboard::tabItems(
#' shinydashboard::tabItem(
#' tabName = "plotting",
#' module_plotting_ui(
#' "modulePlotting"
#' )
#' )
#' )
#' }
#'
#' @export
#'
# module_plotting_ui
module_plotting_ui <- function(id) {
ns <- NS(id)
tagList(
fluidRow(
column(
9,
box(
title = "Regression Plot",
imageOutput(ns("plots")),
tags$head(
tags$style(
type = "text/css",
paste0(
"#modulePlotting-plots img ",
"{max-height: 100%; max-width: 100%; width: auto; ",
"display: block; margin-left: auto; margin-right: auto;}")
)
),
width = 12
)
),
column(
3,
box(
title = "Plot Selection",
uiOutput(ns("select_plotinput")),
width = 12
)
)
)
)
}
|
/scratch/gouwar.j/cran-all/cranData/BiasCorrector/R/modulePlotting.R
|
# BiasCorrector: A GUI to Correct Measurement Bias in DNA Methylation Analyses
# Copyright (C) 2019-2022 Lorenz Kapsner
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#' @title module_results_server
#'
#' @inheritParams module_calibrationfile_server
#'
#' @return The function returns a shiny server module.
#'
#' @seealso \url{https://shiny.rstudio.com/articles/modules.html}
#'
#' @examples
#' if (interactive()) {
#' rv <- list()
#' logfilename <- paste0(tempdir(), "/log.txt")
#' shiny::callModule(
#' module_results_server,
#' "moduleResults",
#' rv = rv,
#' logfilename = logfilename
#' )
#' }
#'
#' @export
#'
# module_results_server
module_results_server <- function(input,
output,
session,
rv,
input_re,
...) {
arguments <- list(...)
observe({
req(rv$calculate_results)
if (rv$calculate_results) {
message("\nCalculate results\n")
if (rv$type_locus_sample == "1") {
rv$choices_list <- tryCatch(
expr = {
o <- data.table::data.table(
"Name" = character(),
"better_model" = numeric()
)
for (l in seq_len(length(rv$vec_cal))) {
radioname <- paste0("radio", l)
o <- rbind(o,
cbind(
"Name" = rv$vec_cal[l],
"better_model" = as.numeric(
eval(
parse(
text = paste0(
"input_re()[[\"moduleModelSelection-",
radioname, "\"]]"))
)
)
)
)
}
o
},
error = function(e) {
e
o <- rv$better_model_stats[, c("Name",
"better_model"
), with = FALSE]
o
},
finally = function(f) {
return(o)
}
)
message(rv$choices_list)
# calculating final results
withProgress(
message = "BiasCorrecting experimental data",
value = 0, {
incProgress(
1 / 1,
detail = "... working on BiasCorrection ...")
# Experimental data
solved_eq <- rBiasCorrection::solving_equations(
rv$fileimport_experimental,
rv$choices_list,
type = 1,
rv = rv,
logfilename = arguments$logfilename,
minmax = rv$minmax
)
rv$final_results <- solved_eq[["results"]]
rv$substitutions <- solved_eq[["substitutions"]]
})
} else if (rv$type_locus_sample == "2") {
# initialize temp results
rv$temp_results <- list()
rv$substitutions <- rBiasCorrection::substitutions_create()
# iterate over unique names in locus_id of experimental file
# (to correctly display decreasing order of CpG-sites in final
# results)
# calculating final results
withProgress(
message = "BiasCorrecting experimental data",
value = 0, {
incProgress(
1 / 1,
detail = "... working on BiasCorrection ...")
# Experimental data
# iterate over unique locus ids in experimental file
for (locus in rv$fileimport_experimental[, unique(
get("locus_id")
)]) {
# get regression results
rv$result_list <- rv$result_list_type2[[locus]]
# get copy of experimental data for that specific locus
expdata <- rv$fileimport_experimental[get("locus_id") == locus, ]
# get colnames of that specific locus (different loci can have
# different numbers of CpG-sites)
vec <- c("locus_id", colnames(expdata)[2:(expdata[, min(
get("CpG_count")
)] + 1)], "row_means")
# solve equations for that locus and append temp_results
solved_eq <- rBiasCorrection::solving_equations(
expdata[, vec, with = FALSE],
rv$reg_stats[[locus]][
,
c("Name", "better_model"),
with = FALSE
],
type = 2,
rv = rv,
logfilename = arguments$logfilename,
minmax = rv$minmax
)
rv$temp_results[[locus]] <- solved_eq[["results"]]
rv$substitutions <- rbind(rv$substitutions,
solved_eq[["substitutions"]])
}
# iterate over temp_results (key = locus-name) and iteratively
# append final results
for (i in names(rv$temp_results)) {
if (is.null(rv$final_results)) {
rv$final_results <- rv$temp_results[[i]]
} else {
# set use.names = T and fill = T because, as pointed out
# before, different loci can have different numbers of CpG
# sites and!!
# the best fitting algorithm can be cubic or hyperbolic for
# the same CpG site-number of different loci
rv$final_results <- rbind(
rv$final_results,
rv$temp_results[[i]],
use.names = TRUE,
fill = TRUE
)
}
}
vec <- colnames(rv$final_results)[grepl("row_means",
colnames(
rv$final_results
)
)]
# reorder the columns so that the rownames are at the end!
rv$final_results <- cbind(rv$final_results[, -vec, with = FALSE],
rv$final_results[, vec, with = FALSE],
CpG_sites = unique(
rv$fileimport_experimental[, get(
"CpG_count"
), by = get("locus_id")])$CpG_count
)
})
}
output$dtfinal <- DT::renderDataTable({
# https://stackoverflow.com/questions/49636423/how-to-change-the-
# cell-color-of-a-cell-of-an-r-shiny-data-table-dependent-on-it
DT::datatable(rv$final_results,
options = list(scrollX = TRUE,
pageLength = 20,
dom = "ltip",
rowCallback = DT::JS(rv$row_callback)),
rownames = FALSE) %>%
DT::formatRound(columns = c(2:ncol(rv$final_results)),
digits = 3)
})
# show corrected results for experimental data
output$corrected_data <- renderUI({
dt <- DT::dataTableOutput("moduleResults-dtfinal")
do.call(tagList, list(dt))
})
# Download corrected results
output$download_final <- downloadHandler(
filename = function() {
paste0(rv$sample_locus_name,
"_corrected_values_",
rBiasCorrection::get_timestamp(), ".csv")
},
content = function(file) {
rBiasCorrection::write_csv(
table = rv$final_results,
filename = file)
},
contentType = "text/csv"
)
output$download_all_data <- downloadHandler(
filename = paste0(
rv$sample_locus_name,
"_all-results_",
gsub("\\-",
"",
substr(Sys.time(), 1, 10)),
"_",
gsub("\\:",
"",
substr(Sys.time(), 12, 16)),
".zip"
),
content = function(fname) {
message(paste0("getwd(): ", getwd()))
# temporarily set tempdir as wd
oldwd <- getwd()
# fix for CRAN-submission!!
on.exit(setwd(oldwd))
setwd(tempdir())
message(paste0("getwd(): ", getwd()))
# create files where is no difference in export
# between type 1 and 2
rBiasCorrection::write_csv(
rv$fileimport_experimental,
paste0(
arguments$csvdir,
"raw_experimental_data.csv")
)
rBiasCorrection::write_csv(
rv$aggregated_experimental,
paste0(
arguments$csvdir,
"aggregated_experimental_data.csv")
)
rBiasCorrection::write_csv(
rv$final_results,
paste0(
arguments$csvdir,
rv$sample_locus_name,
"_corrected_values.csv")
)
rBiasCorrection::write_csv(
rv$substitutions,
paste0(arguments$csvdir,
rv$sample_locus_name,
"_substituted_values.csv")
)
rBiasCorrection::write_csv(
rv$substitutions_corrected_h,
paste0(
arguments$csvdir,
rv$sample_locus_name,
"_substituted_corrected_h.csv")
)
rBiasCorrection::write_csv(
rv$substitutions_corrected_c,
paste0(
arguments$csvdir,
rv$sample_locus_name,
"_substituted_corrected_c.csv")
)
write(rv$logfile,
paste0(
arguments$csvdir,
"BC_logfile.txt")
)
# create other files
if (rv$type_locus_sample == "1") {
rBiasCorrection::write_csv(
rv$fileimport_calibration,
paste0(
arguments$csvdir,
"raw_calibration_data.csv")
)
rBiasCorrection::write_csv(
rv$aggregated_calibration,
paste0(
arguments$csvdir,
"aggregated_calibration_data.csv")
)
rBiasCorrection::write_csv(
rv$reg_stats,
paste0(arguments$csvdir,
rv$sample_locus_name,
"_regression_stats.csv")
)
rBiasCorrection::write_csv(
rv$reg_stats_corrected_h,
paste0(
arguments$csvdir,
rv$sample_locus_name,
"_corrected_regression_stats_h.csv")
)
rBiasCorrection::write_csv(
rv$reg_stats_corrected_c,
paste0(
arguments$csvdir,
rv$sample_locus_name,
"_corrected_regression_stats_c.csv")
)
} else if (rv$type_locus_sample == "2") {
# regression stats
for (key in names(rv$fileimport_calibration)) {
rBiasCorrection::write_csv(
rv$reg_stats[[key]],
paste0(
arguments$csvdir,
rv$sample_locus_name,
"_regression_stats_",
gsub("[[:punct:]]",
"",
key),
".csv")
)
}
for (key in names(rv$fileimport_cal_corrected)) {
rBiasCorrection::write_csv(
rv$reg_stats_corrected[[key]],
paste0(
arguments$csvdir,
"BC_regression_stats_corrected_",
gsub("[[:punct:]]",
"",
key),
".csv")
)
}
# raw calibrations data
for (key in names(rv$fileimport_calibration)) {
rBiasCorrection::write_csv(
rv$fileimport_calibration[[key]],
paste0(
arguments$csvdir,
"raw_calibration_data_",
gsub("[[:punct:]]",
"",
key),
".csv")
)
}
}
utils::zip(
zipfile = fname,
files = c(
paste0("csv/",
list.files(arguments$csvdir)),
paste0("plots/",
list.files(arguments$plotdir))
))
if (file.exists(paste0(tempdir(), "/", fname, ".zip"))) {
file.rename(paste0(tempdir(), "/", fname, ".zip"), fname)
}
# return to old wd
setwd(oldwd)
message(paste0("getwd(): ", getwd()))
},
contentType = "application/zip"
)
# present substitutions in extra tab (only if there were some)
if (nrow(rv$substitutions) > 0) {
rv$substitutions_calc <- TRUE
# workaround to tell ui, that experimental file is there
output$got_substitutions <- reactive({
return(TRUE)
})
outputOptions(output,
"got_substitutions",
suspendWhenHidden = FALSE)
}
output$description <- renderText({
str1 <- paste0("The results table shows the ",
"BiasCorrected experimental data.")
str2 <- paste0("Column 1 shows the sample ID (type 1 data) ",
"or the locus ID (type 2 data).")
str3 <- paste0("All other columns represent the BiasCorrected ",
"experimental data for the CpG sites and the ",
"row-means of all CpG sites respectively.")
str4 <- paste0("The suffixes '_h' and '_c' in the column names ",
"indicate the regression algorithm used for ",
"BiasCorrection of the respective CpG site ",
"('_h': hyperbolic regression; '_c': cubic ",
"polynomial regression).")
HTML(
paste(
str1,
str2,
str3,
str4,
sep = "<br/><br/>"
)
)
})
rv$calculate_results <- FALSE
}
})
# Presentation of substituted values
observe({
req(rv$substitutions_calc)
output$description_sub <- renderText({
str1 <- paste0("Substitutions occur, when no result is found in ",
"the range of plausible values between 0 and 100 ",
"during the BiasCorrection.")
str2 <- paste0("A 'border zone' is implemented in the ranges ",
"0 - 10% and 100 + 10%.")
str3 <- paste0("If a result is in the range -10 < x < 0 ",
"percentage or 100 < x < 110 percentage, the ",
"value is substituted in the final results with ",
"0% or 100% respectively.")
str4 <- paste0("Values beyond these border zones will be ",
"substituted with a blank value in the final ",
"output, as they seem implausible and could ",
"indicate substantial errors in the underlying ",
"data.")
str5 <- paste0("For a detailed feedback, the substitutions ",
"table shows the results of the algorithm ",
"'BiasCorrected value' and the corresponding ",
"substitution 'Substituted value' for the ",
"respective CpG site.")
HTML(
paste(
str1,
str2,
str3,
str4,
str5,
sep = "<br/><br/>"
)
)
})
# this workaround is related to this issue:
# TODO issue: https://github.com/rstudio/shiny/issues/2116
output$substituted_out <- renderUI({
t <- DT::dataTableOutput("moduleResults-substituted_values")
do.call(tagList, list(t))
})
# change colnames for better display
colnames(rv$substitutions) <- c("Sample ID",
"CpG site",
"BiasCorrected value",
"Substituted value",
"Regression")
output$download_substituted <- downloadHandler(
filename = function() {
paste0(rv$sample_locus_name,
"_substituted_values_",
rBiasCorrection::get_timestamp(),
".csv"
)
},
content = function(file) {
rBiasCorrection::write_csv(
table = rv$substitutions,
filename = file)
},
contentType = "text/csv"
)
output$substituted_values <- DT::renderDataTable({
DT::datatable(rv$substitutions,
options = list(scrollX = TRUE,
pageLength = 20,
dom = "ltip"),
rownames = FALSE) %>%
DT::formatRound(columns = c(3:4), digits = 3)
})
# msg2 <- "Please refer to the tab 'Substituted values'
# for further information."
msg2 <- paste0("Please scroll down to the section ",
"'Substituted values' for further information.")
if (nrow(rv$substitutions) == 1) {
msg1 <- "Substituted 1 value. "
} else {
msg1 <- paste0("Substituted ",
nrow(rv$substitutions),
" values.")
}
# show modal here
showModal(modalDialog(
paste(msg1, msg2),
title = "Substituted values"
))
})
}
#' @title module_results_ui
#'
#' @param id A character. The identifier of the shiny object
#'
#' @return The function returns a shiny ui module.
#'
#' @seealso \url{https://shiny.rstudio.com/articles/modules.html}
#'
#' @examples
#' if (interactive()) {
#' shinydashboard::tabItems(
#' shinydashboard::tabItem(
#' tabName = "results",
#' module_results_ui(
#' "moduleResults"
#' )
#' )
#' )
#' }
#'
#' @export
#'
# module_results_ui
module_results_ui <- function(id) {
ns <- NS(id)
tagList(
fluidRow(
column(
9,
box(
title = "BiasCorrected Results",
uiOutput(ns("corrected_data")),
width = 12
)
),
column(
3,
box(
title = "Download BiasCorrected Results",
div(class = "row",
style = "text-align: center",
downloadButton(
"moduleResults-download_final",
"Download corrected values",
style = paste0(
"white-space: normal; ",
"text-align:center; ",
"padding: 9.5px 9.5px 9.5px 9.5px; ",
"margin: 6px 10px 6px 10px;"))
),
tags$hr(),
div(class = "row",
style = "text-align: center",
downloadButton(
"moduleResults-download_all_data",
"Download zip archive (tables and plots)",
style = paste0(
"white-space: normal; ",
"text-align:center; ",
"padding: 9.5px 9.5px 9.5px 9.5px; ",
"margin: 6px 10px 6px 10px;"))
),
tags$hr(),
width = 12
),
box(
title = "Description",
htmlOutput(ns("description")),
width = 12
)
)
),
fluidRow(
conditionalPanel(
condition = "output['moduleResults-got_substitutions']",
column(
9,
box(
title = "Substituted values",
uiOutput(ns("substituted_out")),
width = 12
)
),
column(
3,
box(
title = "Download Substitutions",
div(class = "row",
style = "text-align: center",
downloadButton(
"moduleResults-download_substituted",
"Download substituted values",
style = paste0(
"white-space: normal; ",
"text-align:center; ",
"padding: 9.5px 9.5px 9.5px 9.5px; ",
"margin: 6px 10px 6px 10px;"))
),
tags$hr(),
width = 12
),
box(
title = "What are 'substitutions'?",
htmlOutput(ns("description_sub")),
width = 12
)
)
)
)
)
}
|
/scratch/gouwar.j/cran-all/cranData/BiasCorrector/R/moduleResults.R
|
# BiasCorrector: A GUI to Correct Measurement Bias in DNA Methylation Analyses
# Copyright (C) 2019-2022 Lorenz Kapsner
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#' @title module_settings_server
#'
#' @inheritParams module_calibrationfile_server
#'
#' @return The function returns a shiny server module.
#'
#' @seealso \url{https://shiny.rstudio.com/articles/modules.html}
#'
#' @examples
#' if (interactive()) {
#' rv <- list()
#' logfilename <- paste0(tempdir(), "/log.txt")
#' shiny::callModule(
#' module_settings_server,
#' "moduleSettings",
#' rv = rv,
#' logfilename = logfilename
#' )
#' }
#'
#' @export
#'
# module_settings_server
module_settings_server <- function(input,
output,
session,
rv,
input_re,
...) {
arguments <- list(...)
# observe Radiobuttonevents
observeEvent(
eventExpr = input_re()[["moduleSettings-settings_minmax"]],
handlerExpr = {
rBiasCorrection::write_log(
message = paste0(
"Settings: minmax = ",
input_re()[["moduleSettings-settings_minmax"]]),
logfilename = arguments$logfilename
)
rv$minmax <- input_re()[["moduleSettings-settings_minmax"]]
})
# observe Radiobuttonevents
observeEvent(
eventExpr = input_re()[["moduleSettings-settings_selection_method"]],
handlerExpr = {
rBiasCorrection::write_log(
message = paste0(
"Settings: selection_method = ",
input_re()[["moduleSettings-settings_selection_method"]]),
logfilename = arguments$logfilename
)
waround12 <- input_re()[["moduleSettings-settings_selection_method"]]
rv$selection_method <- waround12
})
# seed
observeEvent(
eventExpr = input_re()[["moduleSettings-settings_seed"]],
handlerExpr = {
rBiasCorrection::write_log(
message = paste0(
"Settings: seed = ",
input_re()[["moduleSettings-settings_seed"]]),
logfilename = arguments$logfilename
)
rv$seed <- input_re()[["moduleSettings-settings_seed"]]
}
)
# plot height
observeEvent(
eventExpr = input_re()[["moduleSettings-settings_plot_height"]],
handlerExpr = {
rBiasCorrection::write_log(
message = paste0(
"Settings: plot_height = ",
input_re()[["moduleSettings-settings_plot_height"]]),
logfilename = arguments$logfilename
)
rv$plot_height <- input_re()[["moduleSettings-settings_plot_height"]]
}
)
# plot width
observeEvent(
eventExpr = input_re()[["moduleSettings-settings_plot_width"]],
handlerExpr = {
rBiasCorrection::write_log(
message = paste0(
"Settings: plot_width = ",
input_re()[["moduleSettings-settings_plot_width"]]),
logfilename = arguments$logfilename
)
rv$plot_width <- input_re()[["moduleSettings-settings_plot_width"]]
}
)
# plot text size
observeEvent(
eventExpr = input_re()[["moduleSettings-settings_plot_textsize"]],
handlerExpr = {
rBiasCorrection::write_log(
message = paste0(
"Settings: plot_textsize = ",
input_re()[["moduleSettings-settings_plot_textsize"]]),
logfilename = arguments$logfilename
)
rv$plot_textsize <- input_re()[["moduleSettings-settings_plot_textsize"]]
}
)
observe({
req(rv$plot_textsize)
# load exampledata 1
gdat <- rBiasCorrection::example._plot.df_agg
coef_h <- rBiasCorrection::example._plot_coef_h
coef_c <- rBiasCorrection::example._plot_coef_c
rBiasCorrection::create_exampleplot(
data = gdat,
coef_hyper = coef_h,
coef_cubic = coef_c,
plot_height = rv$plot_height,
plot_width = rv$plot_width,
plot_textsize = rv$plot_textsize,
filename = paste0(
arguments$tempdir,
"/exampleplot.png"
)
)
# render plots from local temporary file
output$settings_exampleplot <- renderImage(
expr = {
list(src = paste0(
arguments$tempdir,
"/exampleplot.png"
))
},
deleteFile = FALSE
)
output$settings_download_exampleplot <- downloadHandler(
filename = function() {
"Exampleplot.png"
},
content = function(file) {
file.copy(paste0(
arguments$tempdir,
"/exampleplot.png"
),
file)
},
contentType = "image/png"
)
})
}
#' @title module_settings_ui
#'
#' @param id A character. The identifier of the shiny object
#'
#' @return The function returns a shiny ui module.
#'
#' @seealso \url{https://shiny.rstudio.com/articles/modules.html}
#'
#' @examples
#' if (interactive()) {
#' shinydashboard::tabItems(
#' shinydashboard::tabItem(
#' tabName = "settings",
#' module_settings_ui(
#' "moduleSettings"
#' )
#' )
#' )
#' }
#'
#' @export
#'
# module_settings_ui
module_settings_ui <- function(id) {
ns <- NS(id)
tagList(
fluidRow(
# type of data box
box(
title = "Settings",
radioButtons(
ns("settings_selection_method"),
label = paste0(
"Criterion to automatically (pre-) select ",
"the regression method for correction"),
choices = list(
"Sum of squared errors (SSE)" = "SSE",
"Relative Error" = "RelError"
),
selected = "SSE"
),
tags$hr(),
checkboxInput(
ns("settings_minmax"),
label = "Use 'min-max'-correction (default: off)",
value = FALSE
),
helpText(
paste0(
"[CAUTION: This is an experimental feature ",
"and has neither been tested nor validated!]")
),
width = 12
),
box(
title = "Expert Settings",
h5(
tags$b("It is recommended to not change these ",
"settings unless you know exactly, what ",
"you are doing!")
),
tags$hr(),
numericInput(
ns("settings_seed"),
label = "Seed",
value = 1234,
min = 0,
max = Inf,
step = 1,
width = "30%"
),
helpText(
paste0("The seed makes the calculation of the ",
"unknowns of both, the hyperbolic and the ",
"cubic regression equation reproducible.")
),
tags$hr(),
column(
4,
numericInput(
ns("settings_plot_height"),
label = "Plot height (unit: inch)",
value = 5.3,
min = 1,
max = 50,
step = 0.01
),
helpText(
paste0("If you need a different resolution of ",
"the resulting plots, you can set the ",
"plot height (in inches) manually here.")
),
tags$hr(),
numericInput(
ns("settings_plot_width"),
label = "Plot width (unit: inch)",
value = 6.2,
min = 1,
max = 50,
step = 0.01
),
helpText(
paste0("If you need a different resolution of ",
"the resulting plots, you can set the ",
"plot width (in inches) manually here.")
),
tags$hr(),
numericInput(
ns("settings_plot_textsize"),
label = "Plot font size",
value = 15.15,
min = 1,
max = 50,
step = 0.01
),
helpText(
paste0(
"The font size of the plots. ",
"It is passed further to the 'size'-argument ",
"of ggplot2's 'element_text' function."
)
)
),
column(
8,
imageOutput(ns("settings_exampleplot")),
tags$head(
tags$style(
type = "text/css",
paste0(
"#moduleSettings-settings_exampleplot img ",
"{max-height: 100%; max-width: 100%; width: auto; ",
"display: block; margin-left: auto; margin-right: auto;}"))
),
div(class = "row",
style = "text-align: center",
downloadButton(
"moduleSettings-settings_download_exampleplot",
"Download Example Plot",
style = paste0(
"white-space: normal; ",
"text-align:center; ",
"padding: 9.5px 9.5px 9.5px 9.5px; ",
"margin: 6px 10px 6px 10px;")))
),
column(12,
tags$hr(),
helpText(
paste0("Please note, that the decimal separator ",
"of the numeric input fields is depending ",
"on your operating system's and/or your browser's ",
"language settings. It can be a comma (',') ",
"or a period ('.').")
)
),
width = 12
)
)
)
}
|
/scratch/gouwar.j/cran-all/cranData/BiasCorrector/R/moduleSettings.R
|
# BiasCorrector: A GUI to Correct Measurement Bias in DNA Methylation Analyses
# Copyright (C) 2019-2022 Lorenz Kapsner
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#' @title module_statistics_server
#'
#' @inheritParams module_calibrationfile_server
#'
#' @return The function returns a shiny server module.
#'
#' @seealso \url{https://shiny.rstudio.com/articles/modules.html}
#'
#' @examples
#' if (interactive()) {
#' rv <- list()
#' logfilename <- paste0(tempdir(), "/log.txt")
#' shiny::callModule(
#' module_statistics_server,
#' "moduleStatistics",
#' rv = rv,
#' logfilename = logfilename
#' )
#' }
#'
#' @export
#'
# module_statistics_server
module_statistics_server <- function(input,
output,
session,
rv,
input_re) {
observe({
req(rv$reg_stats_corrected_c)
output$description <- renderText({
str1 <- paste0("The table shows the regression parameters ",
"of the hyperbolic regression and the cubic ",
"polynomial regression.<br/>")
str2 <- paste0("Column 1 presents the CpG site's ID.")
str3 <- paste0("Column 2 presents the mean of the relative errors ",
"for every CpG interrogated CpG site. It is ",
"calculated for every ",
"CpG site as a mean of the relative errors between the ",
"actual and the observed methylation degrees of each ",
"methylation step across all available calibrator ",
"DNAs for the respective CpG site. ",
"<br/>Formula:<br/> <i>abs(methylation_true - ",
"methylation_observed) / methylation_true </i>")
str4 <- paste0("Columns 3-9 comprise the sum of squared errors of the ",
"hyperbolic regression ('SSE [h]'), the coefficient ",
"of determination ('R\u00B2 [h]') and the coefficients ",
"of the hyperbolic equation that describes the ",
"hyperbolic regression curves for the respective ",
"CpG sites.")
str5 <- paste0("Columns 10-15 summarise the sum of squared errors of ",
"the cubic polynomial regression ('SSE [c]'), the ",
"coefficient of determination ('R\u00B2 [c]') and the ",
"coefficients of the cubic polynomial equations.")
str6 <- paste0("The rows highlighted with a green background colour ",
"indicate the regression method (hyperbolic or cubic ",
"polynomial) that is suggested by BiasCorrector for ",
"correcting data. This automatic choice of the ",
"regression method relies on either minimising the ",
"value of SSE (the default setting) or minimising ",
"the average relative error as selected by the user ",
"in the Settings tab.")
str7 <- paste0("The bold marked sum of squared errors indicate, that ",
"in comparison of the SSE this regression equation ",
"better fits the data points for the respecitve CpG ",
"site.")
HTML(
paste(
str1,
str2,
str3,
str4,
str5,
str6,
str7,
sep = "<br/><br/>"
)
)
})
# type 1 data:
if (rv$type_locus_sample == "1") {
rv$better_model_stats <- rBiasCorrection::better_model(
statstable_pre = rv$reg_stats,
statstable_post_hyperbolic = rv$reg_stats_corrected_h,
statstable_post_cubic = rv$reg_stats_corrected_c,
selection_method = rv$selection_method
)
output$regression_statistics <- renderUI({
output$dt_reg <- DT::renderDataTable({
# use formatstyle to highlight lower SSE values
render_regressionstatistics(
dt = rv$reg_stats[
, ("better_model") := rv$better_model_stats[
, get("better_model")
]
],
minmax = rv$minmax)
})
d <- DT::dataTableOutput("moduleStatistics-dt_reg")
do.call(tagList, list(d))
})
# create download button for regression statistics
output$download_regstat <- downloadHandler(
filename = function() {
paste0(
rv$sample_locus_name,
"_regression_stats_",
gsub("\\-",
"",
substr(Sys.time(), 1, 10)),
"_",
gsub("\\:",
"",
substr(Sys.time(), 12, 16)),
".csv"
)
},
content = function(file) {
rBiasCorrection::write_csv(
table = rv$reg_stats[
, -which(colnames(rv$reg_stats) == "better_model"), with = FALSE
],
filename = file)
},
contentType = "text/csv"
)
# type 2 data:
} else if (rv$type_locus_sample == "2") {
# create reactive selectinput:
sel_in_locus <- reactive({
selectInput(
inputId = "selectRegStatsLocus",
label = "Select locus:",
multiple = FALSE,
selectize = FALSE,
choices = names(rv$fileimport_calibration))
})
# create reactive df-selection:
df_regs <- reactive({
dt <- rv$reg_stats[[input_re()$selectRegStatsLocus]]
})
output$dt_regs <- DT::renderDataTable({
dt <- df_regs()
render_regressionstatistics(dt = dt,
minmax = rv$minmax)
})
# render head of page with selectInput and downloadbutton
output$statistics_select <- renderUI({
s1 <- sel_in_locus()
do.call(tagList, list(s1, tags$hr()))
})
output$biascorrection <- renderUI({
do.call(tagList,
list(
div(
class = "row",
style = "text-align: center",
actionButton(
"results", "BiasCorrect experimental data",
style = paste0(
"white-space: normal; ",
"text-align:center; ",
"padding: 9.5px 9.5px 9.5px 9.5px; ",
"margin: 6px 10px 6px 10px;")
)
)
)
)
})
output$regression_statistics <- renderUI({
dt <- DT::dataTableOutput("moduleStatistics-dt_regs")
do.call(tagList, list(dt))
})
# create download button for regression statistics
output$download_regstat <- downloadHandler(
filename = function() {
paste0(
rv$sample_locus_name,
"_regression_stats_",
gsub("[[:punct:]]",
"",
input_re()$selectRegStatsLocus),
"_",
gsub("\\-",
"",
substr(Sys.time(), 1, 10)),
"_",
gsub("\\:",
"",
substr(Sys.time(), 12, 16)),
".csv"
)
},
content = function(file) {
rBiasCorrection::write_csv(
table = rv$reg_stats[[input_re()$selectRegStatsLocus]][
, -which(
colnames(
rv$reg_stats[[input_re()$selectRegStatsLocus]]
) == "better_model"
), with = FALSE
],
filename = file)
},
contentType = "text/csv"
)
}
})
}
#' @title module_statistics_ui
#'
#' @param id A character. The identifier of the shiny object
#'
#' @return The function returns a shiny ui module.
#'
#' @seealso \url{https://shiny.rstudio.com/articles/modules.html}
#'
#' @examples
#' if (interactive()) {
#' shinydashboard::tabItems(
#' shinydashboard::tabItem(
#' tabName = "statistics",
#' module_statistics_ui(
#' "moduleStatistics"
#' )
#' )
#' )
#' }
#'
#' @export
#'
# module_statistics_ui
module_statistics_ui <- function(id) {
ns <- NS(id)
tagList(
fluidRow(
column(
9,
box(
title = "Regression Statistics",
uiOutput(ns("regression_statistics")),
width = 12
)
),
column(
3,
box(
title = "Download Regression Statistics",
uiOutput(ns("statistics_select")),
div(class =
"row",
style = "text-align: center",
downloadButton(
ns("download_regstat"),
"Download regression statistics",
style = paste0(
"white-space: normal; ",
"text-align:center; ",
"padding: 9.5px 9.5px 9.5px 9.5px; ",
"margin: 6px 10px 6px 10px;"
)
)
),
tags$hr(),
width = 12
),
conditionalPanel(
condition = "input['moduleFileupload-type_locus_sample'] == 2",
box(
title = "BiasCorrect Experimental Data",
uiOutput(ns("biascorrection")),
tags$hr(),
width = 12
)
),
box(
title = "Description",
htmlOutput(ns("description")),
width = 12
)
)
)
)
}
|
/scratch/gouwar.j/cran-all/cranData/BiasCorrector/R/moduleStatistics.R
|
# BiasCorrector: A GUI to Correct Measurement Bias in DNA Methylation Analyses
# Copyright (C) 2019-2022 Lorenz Kapsner
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
type2_fileconfirm <- function(filelist,
choiceslist,
rv,
...) {
arguments <- list(...)
rBiasCorrection::write_log(
message = "Entered 'type2FileConfirm'-Function",
logfilename = arguments$logfilename
)
rv$calibr_steps <- choiceslist[, ("step") := as.numeric(
get("step")
)][order(get("step"), decreasing = FALSE)]
if (rv$calibr_steps[, min(get("step"))] < 0 ||
rv$calibr_steps[, max(get("step"))] > 100) {
rBiasCorrection::write_log(
message = paste0("### ERROR ###\nCalibration steps must be ",
"in range '0 <= calibration step <= 100'."),
logfilename = arguments$logfilename
)
return("calibrange2")
} else if (rv$calibr_steps[, sum(duplicated(get("step")))] > 0) {
rBiasCorrection::write_log(
message = paste0("### ERROR ###\nThe calibration steps ",
"provided do not meet the file requirements!",
"\nCalibration steps must be in range '0 <= ",
"calibration step <= 100'.\nEach calibration ",
"step may only be assigned once."),
logfilename = arguments$logfilename
)
return("calibrange3")
} else {
# get unique gene names of first table (all tables must be equal,
# has been checked anywhere else??!)
gene_names <- unique(
filelist[[rv$calibr_steps[1, get("name")]]][
, c("locus_id", "CpG_count"), with = FALSE
]
)
# get list of colnames
col_names <- colnames(filelist[[rv$calibr_steps[1, get("name")]]])
# initialize final calibration_list
final_calibs <- list()
for (g in gene_names[, get("locus_id")]) {
# create empty matrix/data.table of dimension CpG_count + 2
#% (true_methylation + rownames)
m <- data.table::data.table(
matrix(
nrow = 0,
ncol = (as.numeric(
gene_names[get("locus_id") == g, get("CpG_count")]
) + 2)
)
)
# rename columns
colnames(m) <- c("true_methylation",
col_names[2:(ncol(m) - 1)],
"row_means")
# store empty data.table with right dimensions in list
final_calibs[[g]] <- m
}
# loop through provided calibration files, extract
# calibration data for each locus and
# rbind it to final_calibs for specific locus id
for (n in seq_len(nrow(rv$calibr_steps))) {
# get imported calibration data (step by step)
basefile <- filelist[[rv$calibr_steps[n, get("name")]]]
calstep <- rv$calibr_steps[n, get("step")]
vec <- colnames(basefile)
# loop through loci in basefile and append results to
# final_calibs
for (locus in gene_names[, get("locus_id")]) {
vec2 <- c(vec[2:(gene_names[get("locus_id") == locus, get("CpG_count")]
+ 1)],
"row_means")
add_df <- basefile[get("locus_id") == locus, (vec2), with = FALSE]
final_calibs[[locus]] <- rbind(
final_calibs[[locus]],
cbind(true_methylation = rep(calstep, nrow(add_df)),
add_df
)
)
}
}
return(final_calibs)
}
}
|
/scratch/gouwar.j/cran-all/cranData/BiasCorrector/R/type2Files.R
|
# directories
#% tempdir <- tempdir()
#% plotdir <- paste0(tempdir, "/plots/")
#% csvdir <- paste0(tempdir, "/csv/")
#
# logfilename
#% logfilename <- paste0(tempdir, "/biascorrector.log")
#
# maximum filesize in MB
#% maxfilesize <- 100
#% options(shiny.maxRequestSize = maxfilesize*1024^2)
|
/scratch/gouwar.j/cran-all/cranData/BiasCorrector/inst/application/global.R
|
# BiasCorrector: A GUI to Correct Measurement Bias in DNA Methylation Analyses
# Copyright (C) 2019-2022 Lorenz Kapsner
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
shiny::shinyServer(function(input, output, session) {
rv <- shiny::reactiveValues(
ending = NULL,
exp_filereq = FALSE,
type_locus_sampe = NULL,
# 1
# currently there is only type 1 correction implemented
fileimport_experimental = NULL,
fileimport_calibration = NULL,
fileimport_list = NULL,
reg_stats = NULL,
modal_closed = TRUE,
modal_type = NULL,
calculate_results = FALSE,
final_results = NULL,
sample_locus_name = NULL,
type2cal_uploaded = FALSE,
type1cal_uploaded = FALSE,
plotting_finished = FALSE,
substitutions = NULL,
substitutions_calc = NULL,
type2_calcres = NULL,
choices_list = NULL,
result_list_type2 = NULL,
result_list = NULL,
temp_results = NULL,
vec_cal = NULL,
plotdir = NULL,
csvdir = NULL,
tempdir = NULL,
b = NULL,
omitnas = NULL,
calibr_steps = NULL,
logfile = NULL,
corrected_finished = FALSE,
fileimport_cal_corrected = NULL,
reg_stats_corrected = NULL,
result_list_type2_corrected = NULL,
minmax = FALSE, # initial minmax-value
row_callback = c(
"function(row, data) {",
" for(var i=0; i<data.length; i++) {",
" if(data[i] === null) {",
" $('td:eq('+i+')', row).html('NA')",
" .css({",
"'color': 'rgb(151,151,151)',",
"'font-style': 'italic'});",
" }",
" }",
"}"
)
)
# run start function
rBiasCorrection::on_start(
plotdir = plotdir,
csvdir = csvdir,
logfilename = logfilename,
parallel = parallel
)
message(paste0("plotdir: ", plotdir))
message(paste0("csvdir: ", csvdir))
message(paste0("tempdir: ", tempdir))
# TODO original selection of data type (hard coded to type 1 data)
rv$type_locus_sample <- 1
# scientific purpose
shiny::showModal(shiny::modalDialog(
title = paste0(
"This program is to be used for scientific ",
"research purposes only"
),
paste0(
"I hereby confirm to use this program only for ",
"scientific research purposes."
),
footer = shiny::tagList(
shiny::actionButton("dismiss_modal", label = "Cancel"),
shiny::modalButton("Confirm")
)
))
shiny::observeEvent(input$dismiss_modal, {
rBiasCorrection::write_log(message = "dismiss modal",
logfilename = logfilename)
rv$modal_closed <- TRUE
rv$modal_type <- NULL
shiny::removeModal()
session$reload()
})
shiny::observeEvent(input$reset, {
rBiasCorrection::write_log(message = "restarting app",
logfilename = logfilename)
rBiasCorrection::clean_up(plotdir, csvdir)
session$reload()
})
output$samplelocus_out <- shiny::reactive({
paste(rv$sample_locus_name)
})
input_reactive <- reactive({
input
})
###### Experimental data
# Fileupload module
shiny::callModule(
module_fileupload_server,
"moduleFileupload",
rv = rv,
input_re = input_reactive,
logfilename = logfilename
)
# table rendering module
shiny::callModule(
module_experimentalfile_server,
"moduleExperimentalFile",
rv = rv,
logfilename = logfilename
)
# some ui stuff
shiny::observe({
shiny::req(rv$fileimport_experimental)
cat(
paste0(
"\nSome UI Stuff: disable Radiobuttons, ",
"experimentalFile and textInput\n"
)
)
# disable exampledata-button
shinyjs::disable("moduleFileupload-load_example_data")
# enable logfile-download
shinyjs::enable("moduleLog-download_logfile")
# disable radiobuttons
shinyjs::disable("moduleFileupload-type_locus_sample")
# disable upload possibility of experimental file
shinyjs::disable("moduleFileupload-experimentalFile")
# disable textinput
if (rv$type_locus_sample == "1") {
shinyjs::disable("moduleFileupload-locusname")
} else if (rv$type_locus_sample == "1") {
shinyjs::disable("moduleFileupload-samplename")
}
# render menu with experimental file
output$menu <- shinydashboard::renderMenu({
shinydashboard::sidebarMenu(
shinydashboard::menuItem(
"Experimental Data",
tabName = "panel_1",
icon = icon("table")
)
)
})
})
shiny::observeEvent(
eventExpr = {
req(isTRUE(rv$type2cal_uploaded) ||
isTRUE(rv$type1cal_uploaded))
},
handlerExpr = {
# error handling, when uploading new data in same session
output$menu <- shinydashboard::renderMenu({
shinydashboard::sidebarMenu(
shinydashboard::menuItem(
"Experimental Data",
tabName = "panel_1",
icon = icon("table")
),
shinydashboard::menuItem(
"Calibration Data",
tabName = "panel_2",
icon = icon("table")
),
shiny::actionButton(
"run",
"Run Analysis",
style = paste0(
"white-space: normal; ",
"text-align:center; ",
"padding: 9.5px 9.5px 9.5px 9.5px;",
"margin: 6px 10px 6px 10px;"
)
)
)
})
shinydashboard::updateTabItems(session, "tabs", "panel_2")
cat("\nSome UI Stuff: disable calibrationFile\n")
# disable upload possibility of calibration file
shinyjs::disable("calibrationFile")
})
# ###### Calibration data
# table rendering module
shiny::callModule(
module_calibrationfile_server,
"moduleCalibrationFile",
rv = rv,
input_re = input_reactive,
logfilename = logfilename
)
shiny::observe({
shiny::req(rv$fileimport_calibration)
# enable calibrationfile download button
shinyjs::enable("moduleCalibrationFile-download_calibration")
})
###### Run Analysis
shiny::observeEvent(input$run, {
if (!is.null(rv$fileimport_calibration)) {
output$menu <- shinydashboard::renderMenu({
shinydashboard::sidebarMenu(
shinydashboard::menuItem(
"Experimental Data",
tabName = "panel_1",
icon = icon("table")
),
shinydashboard::menuItem(
"Calibration Data",
tabName = "panel_2",
icon = icon("table")
),
shinydashboard::menuItem(
"Regression Results",
icon = icon("chart-line"),
startExpanded = TRUE,
shinydashboard::menuSubItem(
"Regression Plots",
tabName = "panel_3",
icon = icon("chart-line"),
selected = TRUE
)
)
)
})
shinydashboard::updateTabItems(session, "tabs", "panel_3")
# disable run-analysis button
shinyjs::disable("run")
# disable some settings here
shinyjs::disable("moduleSettings-settings_minmax")
} else if (rv$type_locus_sample == "2") {
shiny::showModal(
shiny::modalDialog(
"Please confirm the assignment of the calibration steps.",
title = "Confirmation needed",
footer = shiny::modalButton("OK")
)
)
}
})
###### Plotting
observe({
# this is needed, to open new tab (Regression plots)
# before rendering the plots!
if (input$tabs == "panel_3") {
rv$run <- TRUE
}
})
shiny::callModule(
module_plotting_server,
"modulePlotting",
rv = rv,
input_re = input_reactive,
logfilename = logfilename,
plotdir = plotdir
)
# when plotting has finished
shiny::observe({
shiny::req(rv$plotting_finished)
if (rv$type_locus_sample == "1") {
output$menu <- shinydashboard::renderMenu({
shinydashboard::sidebarMenu(
shinydashboard::menuItem(
"Experimental Data",
tabName = "panel_1",
icon = icon("table")
),
shinydashboard::menuItem(
"Calibration Data",
tabName = "panel_2",
icon = icon("table")
),
shinydashboard::menuItem(
"Regression Results",
icon = icon("chart-line"),
startExpanded = FALSE,
shinydashboard::menuSubItem(
"Regression Plots",
tabName = "panel_3",
icon = icon("chart-line"),
selected = TRUE
),
shinydashboard::menuSubItem(
"Regression Statistics",
tabName = "panel_4",
icon = icon("angellist")
),
shinydashboard::menuSubItem(
"Corrected Regression Plots",
tabName = "panel_7",
icon = icon("chart-line")
),
shinydashboard::menuSubItem(
"Corrected Regression Statistics",
tabName = "panel_8",
icon = icon("angellist")
),
shinydashboard::menuSubItem(
"Select Regression Model",
tabName = "panel_5",
icon = icon("chart-line")
),
shiny::actionButton(
"results",
"BiasCorrect experimental data",
style = paste0(
"white-space: normal; ",
"text-align:center; ",
"padding: 9.5px 9.5px 9.5px 9.5px; ",
"margin: 6px 10px 6px 10px;"
)
)
)
)
})
} else {
output$menu <- shinydashboard::renderMenu({
shinydashboard::sidebarMenu(
shinydashboard::menuItem(
"Experimental Data",
tabName = "panel_1",
icon = icon("table")
),
shinydashboard::menuItem(
"Calibration Data",
tabName = "panel_2",
icon = icon("table")
),
shinydashboard::menuItem(
"Regression Results",
icon = icon("chart-line"),
startExpanded = FALSE,
shinydashboard::menuSubItem(
"Regression Plots",
tabName = "panel_3",
icon = icon("chart-line"),
selected = TRUE
),
shinydashboard::menuSubItem(
"Regression Statistics",
tabName = "panel_4",
icon = icon("chart-line")
),
shinydashboard::menuSubItem(
"Corrected Regression Plots",
tabName = "panel_7",
icon = icon("angellist")
)
)
)
})
}
shinydashboard::updateTabItems(session, "tabs", "panel_3")
})
###### Regression Statistics
shiny::callModule(
module_statistics_server,
"moduleStatistics",
rv = rv,
input_re = input_reactive
)
###### Plot Corrected Results
shiny::callModule(
module_correctedplots_server,
"moduleCorrectedPlots",
rv = rv,
input_re = input_reactive,
logfilename = logfilename,
plotdir = plotdir
)
###### Statistics Corrected Results
shiny::callModule(
module_correctedstats_server,
"moduleCorrectedStatistics",
rv = rv,
input_re = input_reactive
)
###### Model Selection
shiny::callModule(
module_modelselection_server,
"moduleModelSelection",
rv = rv,
input_re = input_reactive
)
# Calculate results for experimental data
shiny::observeEvent(input$results, {
# disable Biascorrection-Button
shinyjs::disable("results")
if (rv$type_locus_sample == "1") {
output$menu <- shinydashboard::renderMenu({
shinydashboard::sidebarMenu(
shinydashboard::menuItem(
"Experimental Data",
tabName = "panel_1",
icon = icon("table")
),
shinydashboard::menuItem(
"Calibration Data",
tabName = "panel_2",
icon = icon("table")
),
shinydashboard::menuItem(
"Regression Results",
icon = icon("chart-line"),
startExpanded = FALSE,
shinydashboard::menuSubItem(
"Regression Plots",
tabName = "panel_3",
icon = icon("chart-line")
),
shinydashboard::menuSubItem(
"Regression Statistics",
tabName = "panel_4",
icon = icon("angellist")
),
shinydashboard::menuSubItem(
"Corrected Regression Plots",
tabName = "panel_7",
icon = icon("chart-line")
),
shinydashboard::menuSubItem(
"Corrected Regression Statistics",
tabName = "panel_8",
icon = icon("angellist")
),
shinydashboard::menuSubItem(
"Select Regression Model",
tabName = "panel_5",
icon = icon("chart-line")
)
),
shinydashboard::menuItem(
"BiasCorrected Results",
tabName = "panel_6",
icon = icon("angellist")
)
)
})
} else if (rv$type_locus_sample == "2") {
output$menu <- shinydashboard::renderMenu({
shinydashboard::sidebarMenu(
shinydashboard::menuItem(
"Experimental Data",
tabName = "panel_1",
icon = icon("table")
),
shinydashboard::menuItem(
"Calibration Data",
tabName = "panel_2",
icon = icon("table")
),
shinydashboard::menuItem(
"Regression Results",
icon = icon("chart-line"),
startExpanded = FALSE,
shinydashboard::menuSubItem(
"Regression Plots",
tabName = "panel_3",
icon = icon("chart-line")
),
shinydashboard::menuSubItem(
"Regression Statistics",
tabName = "panel_4",
icon = icon("chart-line")
),
shinydashboard::menuSubItem(
"Corrected Regression Plots",
tabName = "panel_7",
icon = icon("angellist")
)
),
shinydashboard::menuItem(
"BiasCorrected Results",
tabName = "panel_6",
icon = icon("angellist")
)
)
})
}
# reset reactive values
rv$calculate_results <- TRUE
rv$final_results <- NULL
shinydashboard::updateTabItems(session, "tabs", "panel_6")
})
###### Calcluate Results
shiny::callModule(
module_results_server,
"moduleResults",
rv = rv,
input_re = input_reactive,
logfilename = logfilename,
csvdir = csvdir,
plotdir = plotdir
)
###### Logs
shiny::callModule(
module_log_server,
"moduleLog",
rv = rv,
input_re = input_reactive,
logfilename = logfilename
)
###### Settings
shiny::callModule(
module_settings_server,
"moduleSettings",
rv = rv,
input_re = input_reactive,
logfilename = logfilename,
tempdir = tempdir
)
###### Info
shiny::callModule(module_info_server,
"moduleInfo",
rv = rv,
input_re = input_reactive)
})
|
/scratch/gouwar.j/cran-all/cranData/BiasCorrector/inst/application/server.R
|
# BiasCorrector: A GUI to Correct Measurement Bias in DNA Methylation Analyses
# Copyright (C) 2019-2022 Lorenz Kapsner
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# define UI
shiny::shinyUI(shiny::tagList(
# https://github.com/rstudio/shinydashboard/issues/255
shiny::tags$head(shiny::tags$style(shiny::HTML(
paste0(
".wrapper {height: auto !important; ",
"position:relative; overflow-x:hidden; ",
"overflow-y:hidden}"
)
))),
shinydashboard::dashboardPage(
skin = "black",
# App title --> change for development
shinydashboard::dashboardHeader(title = "BiasCorrector"),
# h5("based on Moskalev et al. 2011"),
# Sidebar Layout with input and output definitions
shinydashboard::dashboardSidebar(
# shinyjs stuff
shinyjs::useShinyjs(),
#Sidebar Panel
shinydashboard::sidebarMenu(
id = "tabs",
shinydashboard::menuItem("File Upload",
tabName = "dashboard",
icon = icon("file")),
shinydashboard::sidebarMenuOutput("menu"),
shinydashboard::menuItem("Log",
tabName = "panel_9",
icon = icon("file-alt")),
shiny::tags$hr(),
shinydashboard::menuItem("Settings",
tabName = "settings",
icon = icon("user-cog")),
shinydashboard::menuItem("Info",
tabName = "info",
icon = icon("info-circle")),
shiny::actionButton("reset",
"Reset App",
width = "80%") # Restart session
),
shiny::div(
style = paste0(
"position:fixed; bottom:0; left:0; ",
"white-space: normal; text-align:left; ",
"padding: 9.5px 9.5px 9.5px 9.5px; ",
"margin: 6px 10px 6px 10px; ",
"box-sizing:border-box; heigth: auto; ",
"width: 230px;"
),
shiny::HTML(paste0(
"Version:",
"<br/>rBiasCorrection: ", utils::packageVersion("rBiasCorrection"),
"<br/>BiasCorrector: ", utils::packageVersion("BiasCorrector")
))
)
),
shinydashboard::dashboardBody(
# shinyjs stuff
shinyjs::useShinyjs(),
shinydashboard::tabItems(
shinydashboard::tabItem(
tabName = "dashboard",
module_fileupload_ui("moduleFileupload",
maxfilesize = maxfilesize)
),
# experimental data panels
shinydashboard::tabItem(
tabName = "panel_1",
module_experimentalfile_ui("moduleExperimentalFile")
),
# calibration data panel
shinydashboard::tabItem(
tabName = "panel_2",
module_calibrationfile_ui("moduleCalibrationFile")
),
# regression plots
shinydashboard::tabItem(
tabName = "panel_3",
module_plotting_ui("modulePlotting")
),
# regression statistics
shinydashboard::tabItem(
tabName = "panel_4",
module_statistics_ui("moduleStatistics")
),
# select regression model
shinydashboard::tabItem(
tabName = "panel_5",
module_modelselection_ui("moduleModelSelection")
),
# select regression model
shinydashboard::tabItem(
tabName = "panel_6",
module_results_ui("moduleResults")
),
# select regression model
shinydashboard::tabItem(
tabName = "panel_7",
module_correctedplots_ui("moduleCorrectedPlots")
),
# select regression model
shinydashboard::tabItem(
tabName = "panel_8",
module_correctedstatistics_ui("moduleCorrectedStatistics")
),
shinydashboard::tabItem(
tabName = "panel_9",
module_log_ui("moduleLog")
),
shinydashboard::tabItem(
tabName = "settings",
module_settings_ui("moduleSettings")
),
shinydashboard::tabItem(
tabName = "info",
module_info_ui("moduleInfo")
)
)
)
)
))
|
/scratch/gouwar.j/cran-all/cranData/BiasCorrector/inst/application/ui.R
|
# Package BiasedUrn, file urn1.R
# R interface to univariate noncentral hypergeometric distributions
# *****************************************************************************
# dFNCHypergeo
# Mass function, Fisher's NonCentral Hypergeometric distribution
# *****************************************************************************
dFNCHypergeo <-
function(x, m1, m2, n, odds, precision=1E-7) {
stopifnot(is.numeric(x), is.numeric(m1), is.numeric(m2),
is.numeric(n), is.numeric(odds), is.numeric(precision));
.Call("dFNCHypergeo",
as.integer(x), # Number of red balls drawn, scalar or vector
as.integer(m1), # Number of red balls in urn
as.integer(m2), # Number of white balls in urn
as.integer(n), # Number of balls drawn from urn
as.double(odds), # Odds of getting a red ball among one red and one white
as.double(precision), # Precision of calculation
PACKAGE = "BiasedUrn");
}
# *****************************************************************************
# dWNCHypergeo
# Mass function, Wallenius' NonCentral Hypergeometric distribution
# *****************************************************************************
dWNCHypergeo <-
function(x, m1, m2, n, odds, precision=1E-7 ) {
stopifnot(is.numeric(x), is.numeric(m1), is.numeric(m2),
is.numeric(n), is.numeric(odds), is.numeric(precision));
.Call("dWNCHypergeo",
as.integer(x), # Number of red balls drawn, scalar or vector
as.integer(m1), # Number of red balls in urn
as.integer(m2), # Number of white balls in urn
as.integer(n), # Number of balls drawn from urn
as.double(odds), # Odds of getting a red ball among one red and one white
as.double(precision), # Precision of calculation
PACKAGE = "BiasedUrn");
}
# *****************************************************************************
# pFNCHypergeo
# Cumulative distribution function for
# Fisher's NonCentral Hypergeometric distribution
# *****************************************************************************
pFNCHypergeo <-
function(x, m1, m2, n, odds, precision=1E-7, lower.tail=TRUE) {
stopifnot(is.numeric(x), is.numeric(m1), is.numeric(m2), is.numeric(n),
is.numeric(odds), is.numeric(precision), is.vector(lower.tail));
.Call("pFNCHypergeo",
as.integer(x), # Number of red balls drawn, scalar or vector
as.integer(m1), # Number of red balls in urn
as.integer(m2), # Number of white balls in urn
as.integer(n), # Number of balls drawn from urn
as.double(odds), # Odds of getting a red ball among one red and one white
as.double(precision), # Precision of calculation
as.logical(lower.tail), # TRUE: P(X <= x), FALSE: P(X > x)
PACKAGE = "BiasedUrn");
}
# *****************************************************************************
# pWNCHypergeo
# Cumulative distribution function for
# Wallenius' NonCentral Hypergeometric distribution
# *****************************************************************************
pWNCHypergeo <-
function(x, m1, m2, n, odds, precision=1E-7, lower.tail=TRUE) {
stopifnot(is.numeric(x), is.numeric(m1), is.numeric(m2), is.numeric(n),
is.numeric(odds), is.numeric(precision), is.vector(lower.tail));
.Call("pWNCHypergeo",
as.integer(x), # Number of red balls drawn, scalar or vector
as.integer(m1), # Number of red balls in urn
as.integer(m2), # Number of white balls in urn
as.integer(n), # Number of balls drawn from urn
as.double(odds), # Odds of getting a red ball among one red and one white
as.double(precision), # Precision of calculation
as.logical(lower.tail), # TRUE: P(X <= x), FALSE: P(X > x)
PACKAGE = "BiasedUrn");
}
# *****************************************************************************
# qFNCHypergeo
# Quantile function for
# Fisher's NonCentral Hypergeometric distribution.
# Returns the lowest x for which P(X<=x) >= p when lower.tail = TRUE
# Returns the lowest x for which P(X >x) <= p when lower.tail = FALSE
# *****************************************************************************
# Note: qWNCHypergeo if more accurate than qFNCHypergeo when odds = 1
qFNCHypergeo <-
function(p, m1, m2, n, odds, precision=1E-7, lower.tail=TRUE) {
stopifnot(is.numeric(p), is.numeric(m1), is.numeric(m2), is.numeric(n),
is.numeric(odds), is.numeric(precision), is.vector(lower.tail));
.Call("qFNCHypergeo",
as.double(p), # Cumulative probability
as.integer(m1), # Number of red balls in urn
as.integer(m2), # Number of white balls in urn
as.integer(n), # Number of balls drawn from urn
as.double(odds), # Odds of getting a red ball among one red and one white
as.double(precision), # Precision of calculation
as.logical(lower.tail), # TRUE: P(X <= x), FALSE: P(X > x)
PACKAGE = "BiasedUrn");
}
# *****************************************************************************
# qWNCHypergeo
# Quantile function for
# Wallenius' NonCentral Hypergeometric distribution.
# Returns the lowest x for which P(X<=x) >= p when lower.tail = TRUE
# Returns the lowest x for which P(X >x) <= p when lower.tail = FALSE
# *****************************************************************************
qWNCHypergeo <-
function(p, m1, m2, n, odds, precision=1E-7, lower.tail=TRUE) {
stopifnot(is.numeric(p), is.numeric(m1), is.numeric(m2), is.numeric(n),
is.numeric(odds), is.numeric(precision), is.vector(lower.tail));
.Call("qWNCHypergeo",
as.double(p), # Cumulative probability
as.integer(m1), # Number of red balls in urn
as.integer(m2), # Number of white balls in urn
as.integer(n), # Number of balls drawn from urn
as.double(odds), # Odds of getting a red ball among one red and one white
as.double(precision), # Precision of calculation
as.logical(lower.tail), # TRUE: P(X <= x), FALSE: P(X > x)
PACKAGE = "BiasedUrn");
}
# *****************************************************************************
# rFNCHypergeo
# Random variate generation function for
# Fisher's NonCentral Hypergeometric distribution.
# *****************************************************************************
rFNCHypergeo <-
function(nran, m1, m2, n, odds, precision=1E-7) {
stopifnot(is.numeric(nran), is.numeric(m1), is.numeric(m2),
is.numeric(n), is.numeric(odds), is.numeric(precision));
.Call("rFNCHypergeo",
as.integer(nran), # Number of random variates desired
as.integer(m1), # Number of red balls in urn
as.integer(m2), # Number of white balls in urn
as.integer(n), # Number of balls drawn from urn
as.double(odds), # Odds of getting a red ball among one red and one white
as.double(precision), # Precision of calculation
PACKAGE = "BiasedUrn");
}
# *****************************************************************************
# rWNCHypergeo
# Random variate generation function for
# Wallenius' NonCentral Hypergeometric distribution.
# *****************************************************************************
rWNCHypergeo <-
function(nran, m1, m2, n, odds, precision=1E-7) {
stopifnot(is.numeric(nran), is.numeric(m1), is.numeric(m2),
is.numeric(n), is.numeric(odds), is.numeric(precision));
.Call("rWNCHypergeo",
as.integer(nran), # Number of random variates desired
as.integer(m1), # Number of red balls in urn
as.integer(m2), # Number of white balls in urn
as.integer(n), # Number of balls drawn from urn
as.double(odds), # Odds of getting a red ball among one red and one white
as.double(precision), # Precision of calculation
PACKAGE = "BiasedUrn");
}
# *****************************************************************************
# meanFNCHypergeo
# Calculates the mean of
# Fisher's NonCentral Hypergeometric distribution.
# *****************************************************************************
meanFNCHypergeo <- function(
m1, # Number of red balls in urn
m2, # Number of white balls in urn
n, # Number of balls drawn from urn
odds, # Odds of getting a red ball among one red and one white
precision=1E-7) { # Precision of calculation
stopifnot(is.numeric(m1), is.numeric(m2), is.numeric(n),
is.numeric(odds), is.numeric(precision));
.Call("momentsFNCHypergeo", as.integer(m1), as.integer(m2),
as.integer(n), as.double(odds), as.double(precision),
as.integer(1), # 1 for mean, 2 for variance
PACKAGE = "BiasedUrn");
}
# *****************************************************************************
# meanWNCHypergeo
# Calculates the mean of
# Wallenius' NonCentral Hypergeometric distribution.
# *****************************************************************************
meanWNCHypergeo <- function(
m1, # Number of red balls in urn
m2, # Number of white balls in urn
n, # Number of balls drawn from urn
odds, # Odds of getting a red ball among one red and one white
precision=1E-7) { # Precision of calculation
stopifnot(is.numeric(m1), is.numeric(m2), is.numeric(n),
is.numeric(odds), is.numeric(precision));
.Call("momentsWNCHypergeo", as.integer(m1), as.integer(m2),
as.integer(n), as.double(odds), as.double(precision),
as.integer(1), # 1 for mean, 2 for variance
PACKAGE = "BiasedUrn");
}
# *****************************************************************************
# varFNCHypergeo
# Calculates the variance of
# Fisher's NonCentral Hypergeometric distribution.
# *****************************************************************************
varFNCHypergeo <- function(
m1, # Number of red balls in urn
m2, # Number of white balls in urn
n, # Number of balls drawn from urn
odds, # Odds of getting a red ball among one red and one white
precision=1E-7) { # Precision of calculation
stopifnot(is.numeric(m1), is.numeric(m2), is.numeric(n),
is.numeric(odds), is.numeric(precision));
.Call("momentsFNCHypergeo", as.integer(m1), as.integer(m2),
as.integer(n), as.double(odds), as.double(precision),
as.integer(2), # 1 for mean, 2 for variance
PACKAGE = "BiasedUrn");
}
# *****************************************************************************
# varWNCHypergeo
# Calculates the variance of
# Wallenius' NonCentral Hypergeometric distribution.
# *****************************************************************************
varWNCHypergeo <- function(
m1, # Number of red balls in urn
m2, # Number of white balls in urn
n, # Number of balls drawn from urn
odds, # Odds of getting a red ball among one red and one white
precision=1E-7) { # Precision of calculation
stopifnot(is.numeric(m1), is.numeric(m2), is.numeric(n),
is.numeric(odds), is.numeric(precision));
.Call("momentsWNCHypergeo", as.integer(m1), as.integer(m2),
as.integer(n), as.double(odds), as.double(precision),
as.integer(2), # 1 for mean, 2 for variance
PACKAGE = "BiasedUrn");
}
# *****************************************************************************
# modeFNCHypergeo
# Calculates the mode of
# Fisher's NonCentral Hypergeometric distribution.
# *****************************************************************************
# Note: The result is exact regardless of the precision parameter.
# The precision parameter is included only for analogy with modeWNCHypergeo.
modeFNCHypergeo <- function(
m1, # Number of red balls in urn
m2, # Number of white balls in urn
n, # Number of balls drawn from urn
odds, # Odds of getting a red ball among one red and one white
precision=0) { # Precision of calculation
stopifnot(is.numeric(m1), is.numeric(m2), is.numeric(n),
is.numeric(odds));
.Call("modeFNCHypergeo", as.integer(m1), as.integer(m2),
as.integer(n), as.double(odds),
PACKAGE = "BiasedUrn");
}
# *****************************************************************************
# modeWNCHypergeo
# Calculates the mode of
# Fisher's NonCentral Hypergeometric distribution.
# *****************************************************************************
modeWNCHypergeo <- function(
m1, # Number of red balls in urn
m2, # Number of white balls in urn
n, # Number of balls drawn from urn
odds, # Odds of getting a red ball among one red and one white
precision=1E-7) { # Precision of calculation
stopifnot(is.numeric(m1), is.numeric(m2), is.numeric(n),
is.numeric(odds), is.numeric(precision));
.Call("modeWNCHypergeo", as.integer(m1), as.integer(m2),
as.integer(n), as.double(odds), as.double(precision),
PACKAGE = "BiasedUrn");
}
# *****************************************************************************
# oddsFNCHypergeo
# Estimate odds ratio from mean for
# Fisher's NonCentral Hypergeometric distribution
# *****************************************************************************
# Uses Cornfield's approximation. Specified precision is ignored.
oddsFNCHypergeo <-
function(mu, m1, m2, n, precision=0.1) {
stopifnot(is.numeric(mu), is.numeric(m1), is.numeric(m2),
is.numeric(n), is.numeric(precision));
.Call("oddsFNCHypergeo",
as.double(mu), # Observed mean of x1
as.integer(m1), # Number of red balls in urn
as.integer(m2), # Number of white balls in urn
as.integer(n), # Number of balls drawn from urn
as.double(precision), # Precision of calculation
PACKAGE = "BiasedUrn");
}
# *****************************************************************************
# oddsWNCHypergeo
# Estimate odds ratio from mean for
# Wallenius' NonCentral Hypergeometric distribution
# *****************************************************************************
oddsWNCHypergeo <-
function(mu, m1, m2, n, precision=0.1) {
stopifnot(is.numeric(mu), is.numeric(m1), is.numeric(m2),
is.numeric(n), is.numeric(precision));
.Call("oddsWNCHypergeo",
as.double(mu), # Observed mean of x1
as.integer(m1), # Number of red balls in urn
as.integer(m2), # Number of white balls in urn
as.integer(n), # Number of balls drawn from urn
as.double(precision), # Precision of calculation
PACKAGE = "BiasedUrn");
}
# *****************************************************************************
# numFNCHypergeo
# Estimate number of balls of each color from experimental mean for
# Fisher's NonCentral Hypergeometric distribution
# *****************************************************************************
# Uses Cornfield's approximation. Specified precision is ignored.
numFNCHypergeo <-
function(mu, n, N, odds, precision=0.1) {
stopifnot(is.numeric(mu), is.numeric(n), is.numeric(N),
is.numeric(odds), is.numeric(precision));
.Call("numFNCHypergeo",
as.double(mu), # Observed mean of x1
as.integer(n), # Number of balls sampled
as.integer(N), # Number of balls in urn before sampling
as.double(odds), # Odds of getting a red ball among one red and one white
as.double(precision), # Precision of calculation (ignored)
PACKAGE = "BiasedUrn");
}
# *****************************************************************************
# numWNCHypergeo
# Estimate number of balls of each color from experimental mean for
# Wallenius' NonCentral Hypergeometric distribution
# *****************************************************************************
# Uses approximation. Specified precision is ignored.
numWNCHypergeo <-
function(mu, n, N, odds, precision=0.1) {
stopifnot(is.numeric(mu), is.numeric(n), is.numeric(N),
is.numeric(odds), is.numeric(precision));
.Call("numWNCHypergeo",
as.double(mu), # Observed mean of x1
as.integer(n), # Number of balls sampled
as.integer(N), # Number of balls in urn before sampling
as.double(odds), # Odds of getting a red ball among one red and one white
as.double(precision), # Precision of calculation (ignored)
PACKAGE = "BiasedUrn");
}
# *****************************************************************************
# minHypergeo
# Minimum of x for central and noncentral Hypergeometric distributions
# *****************************************************************************
minHypergeo <- function(m1, m2, n) {
stopifnot(m1>=0, m2>=0, n>=0, n<=m1+m2);
max(n-m2, 0);
}
# *****************************************************************************
# maxHypergeo
# Maximum of x for central and noncentral Hypergeometric distributions
# *****************************************************************************
maxHypergeo <- function(m1, m2, n) {
stopifnot(m1>=0, m2>=0, n>=0, n<=m1+m2);
min(m1, n);
}
|
/scratch/gouwar.j/cran-all/cranData/BiasedUrn/R/urn1.R
|
# Package BiasedUrn, file urn2.R
# R interface to multivariate noncentral hypergeometric distributions
# *****************************************************************************
# dMFNCHypergeo
# Mass function for
# Multivariate Fisher's NonCentral Hypergeometric distribution
# *****************************************************************************
dMFNCHypergeo <-
function(
x, # Number of balls drawn of each color, vector or matrix
m, # Number of balls of each color in urn, vector
n, # Number of balls drawn from urn, scalar
odds, # Odds for each color, vector
precision=1E-7) { # Precision of calculation, scalar
stopifnot(is.numeric(x), is.numeric(m), is.numeric(n), is.numeric(odds), is.numeric(precision));
# Convert x to integer vector or matrix without loosing dimensions:
if (is.matrix(x)) {
xx <- matrix(as.integer(x), nrow=dim(x)[1], ncol=dim(x)[2]);
}
else {
xx <- as.integer(x);
}
.Call("dMFNCHypergeo", xx, as.integer(m), as.integer(n),
as.double(odds), as.double(precision), PACKAGE = "BiasedUrn");
}
# *****************************************************************************
# dMWNCHypergeo
# Mass function for
# Multivariate Wallenius' NonCentral Hypergeometric distribution
# *****************************************************************************
dMWNCHypergeo <-
function(
x, # Number of balls drawn of each color, vector or matrix
m, # Number of balls of each color in urn, vector
n, # Number of balls drawn from urn, scalar
odds, # Odds for each color, vector
precision=1E-7) { # Precision of calculation, scalar
stopifnot(is.numeric(x), is.numeric(m), is.numeric(n), is.numeric(odds), is.numeric(precision));
# Convert x to integer vector or matrix without loosing dimensions:
if (is.matrix(x)) {
xx <- matrix(as.integer(x), nrow=dim(x)[1], ncol=dim(x)[2]);
}
else {
xx <- as.integer(x);
}
.Call("dMWNCHypergeo", xx, as.integer(m), as.integer(n),
as.double(odds), as.double(precision), PACKAGE = "BiasedUrn");
}
# *****************************************************************************
# rMFNCHypergeo
# Random variate generation function for
# Multivariate Fisher's NonCentral Hypergeometric distribution.
# *****************************************************************************
rMFNCHypergeo <-
function(nran, m, n, odds, precision=1E-7) {
stopifnot(is.numeric(nran), is.numeric(m),
is.numeric(n), is.numeric(odds), is.numeric(precision));
.Call("rMFNCHypergeo",
as.integer(nran), # Number of random variates desired, scalar
as.integer(m), # Number of balls of each color in urn, vector
as.integer(n), # Number of balls drawn from urn, scalar
as.double(odds), # Odds for each color, vector
as.double(precision), # Precision of calculation, scalar
PACKAGE = "BiasedUrn");
}
# *****************************************************************************
# rMWNCHypergeo
# Random variate generation function for
# Multivariate Wallenius' NonCentral Hypergeometric distribution.
# *****************************************************************************
rMWNCHypergeo <-
function(nran, m, n, odds, precision=1E-7) {
stopifnot(is.numeric(nran), is.numeric(m),
is.numeric(n), is.numeric(odds), is.numeric(precision));
.Call("rMWNCHypergeo",
as.integer(nran), # Number of random variates desired, scalar
as.integer(m), # Number of balls of each color in urn, vector
as.integer(n), # Number of balls drawn from urn, scalar
as.double(odds), # Odds for each color, vector
as.double(precision), # Precision of calculation, scalar
PACKAGE = "BiasedUrn");
}
# *****************************************************************************
# momentsMFNCHypergeo
# Calculates the mean and variance of the
# Multivariate Fisher's NonCentral Hypergeometric distribution.
# Results are returned as a data frame.
# *****************************************************************************
momentsMFNCHypergeo <- function(
m, # Number of balls of each color in urn, vector
n, # Number of balls drawn from urn, scalar
odds, # Odds for each color, vector
precision = 0.1) { # Precision of calculation, scalar
stopifnot(is.numeric(m), is.numeric(n),
is.numeric(odds), is.numeric(precision));
res <- .Call("momentsMFNCHypergeo", as.integer(m),
as.integer(n), as.double(odds), as.double(precision),
PACKAGE = "BiasedUrn");
# Convert result to data frame
colnames(res) <- list("xMean","xVariance")
as.data.frame(res);
}
# *****************************************************************************
# momentsMWNCHypergeo
# Calculates the mean and variance of the
# Multivariate Wallenius' NonCentral Hypergeometric distribution.
# Results are returned as a data frame.
# *****************************************************************************
momentsMWNCHypergeo <- function(
m, # Number of balls of each color in urn, vector
n, # Number of balls drawn from urn, scalar
odds, # Odds for each color, vector
precision = 0.1) { # Precision of calculation, scalar
stopifnot(is.numeric(m), is.numeric(n),
is.numeric(odds), is.numeric(precision));
res <- .Call("momentsMWNCHypergeo", as.integer(m),
as.integer(n), as.double(odds), as.double(precision),
PACKAGE = "BiasedUrn");
# Convert result to data frame
colnames(res) <- list("xMean","xVariance")
as.data.frame(res);
}
# *****************************************************************************
# meanMFNCHypergeo
# Calculates the mean of the
# Multivariate Fisher's NonCentral Hypergeometric distribution.
# *****************************************************************************
meanMFNCHypergeo <- function(
m, # Number of balls of each color in urn, vector
n, # Number of balls drawn from urn, scalar
odds, # Odds for each color, vector
precision = 0.1) { # Precision of calculation, scalar
momentsMFNCHypergeo(m, n, odds, precision)$xMean
}
# *****************************************************************************
# meanMWNCHypergeo
# Calculates the mean of the
# Multivariate Wallenius' NonCentral Hypergeometric distribution.
# *****************************************************************************
meanMWNCHypergeo <- function(
m, # Number of balls of each color in urn, vector
n, # Number of balls drawn from urn, scalar
odds, # Odds for each color, vector
precision = 0.1) { # Precision of calculation, scalar
momentsMWNCHypergeo(m, n, odds, precision)$xMean
}
# *****************************************************************************
# varMFNCHypergeo
# Calculates the variance of the
# Multivariate Fisher's NonCentral Hypergeometric distribution.
# *****************************************************************************
varMFNCHypergeo <- function(
m, # Number of balls of each color in urn, vector
n, # Number of balls drawn from urn, scalar
odds, # Odds for each color, vector
precision = 0.1) { # Precision of calculation, scalar
momentsMFNCHypergeo(m, n, odds, precision)$xVariance
}
# *****************************************************************************
# varMWNCHypergeo
# Calculates the variance of the
# Multivariate Wallenius' NonCentral Hypergeometric distribution.
# *****************************************************************************
varMWNCHypergeo <- function(
m, # Number of balls of each color in urn, vector
n, # Number of balls drawn from urn, scalar
odds, # Odds for each color, vector
precision = 0.1) { # Precision of calculation, scalar
momentsMWNCHypergeo(m, n, odds, precision)$xVariance
}
# *****************************************************************************
# oddsMFNCHypergeo
# Estimate odds ratio from mean for the
# Multivariate Fisher's NonCentral Hypergeometric distribution
# *****************************************************************************
# Uses Cornfield's approximation. Specified precision is ignored.
oddsMFNCHypergeo <-
function(mu, m, n, precision=0.1) {
stopifnot(is.numeric(mu), is.numeric(m), is.numeric(n), is.numeric(precision));
# Convert mu to double vector or matrix without loosing dimensions:
if (is.matrix(mu)) {
mux <- matrix(as.double(mu), nrow=dim(mu)[1], ncol=dim(mu)[2]);
}
else {
mux <- as.double(mu);
}
.Call("oddsMFNCHypergeo",
mux, # Observed mean of each x, vector
as.integer(m), # Number of balls of each color in urn, vector
as.integer(n), # Number of balls drawn from urn, scalar
as.double(precision), # Precision of calculation, scalar
PACKAGE = "BiasedUrn");
}
# *****************************************************************************
# oddsMWNCHypergeo
# Estimate odds ratio from mean for the
# Multivariate Wallenius' NonCentral Hypergeometric distribution
# *****************************************************************************
# Uses approximation. Specified precision is ignored.
oddsMWNCHypergeo <-
function(mu, m, n, precision=0.1) {
stopifnot(is.numeric(mu), is.numeric(m), is.numeric(n), is.numeric(precision));
# Convert mu to double vector or matrix without loosing dimensions:
if (is.matrix(mu)) {
mux <- matrix(as.double(mu), nrow=dim(mu)[1], ncol=dim(mu)[2]);
}
else {
mux <- as.double(mu);
}
.Call("oddsMWNCHypergeo",
mux, # Observed mean of each x, vector
as.integer(m), # Number of balls of each color in urn, vector
as.integer(n), # Number of balls drawn from urn, scalar
as.double(precision), # Precision of calculation, scalar
PACKAGE = "BiasedUrn");
}
# *****************************************************************************
# numMFNCHypergeo
# Estimate number of balls of each color from experimental mean for
# Multivariate Fisher's NonCentral Hypergeometric distribution
# *****************************************************************************
# Uses Cornfield's approximation. Specified precision is ignored.
numMFNCHypergeo <-
function(mu, n, N, odds, precision=0.1) {
stopifnot(is.numeric(mu), is.numeric(n), is.numeric(N), is.numeric(odds), is.numeric(precision));
# Convert mu to double vector or matrix without loosing dimensions:
if (is.matrix(mu)) {
mux <- matrix(as.double(mu), nrow=dim(mu)[1], ncol=dim(mu)[2]);
}
else {
mux <- as.double(mu);
}
.Call("numMFNCHypergeo",
mux, # Observed mean of each x, vector
as.integer(n), # Number of balls drawn from urn, scalar
as.integer(N), # Number of balls in urn before sampling, scalar
as.double(odds), # Odds for each color, vector
as.double(precision), # Precision of calculation, scalar (ignored)
PACKAGE = "BiasedUrn");
}
# *****************************************************************************
# numMWNCHypergeo
# Estimate number of balls of each color from experimental mean for
# Multivariate Wallenius' NonCentral Hypergeometric distribution
# *****************************************************************************
# Uses approximation. Specified precision is ignored.
numMWNCHypergeo <-
function(mu, n, N, odds, precision=0.1) {
stopifnot(is.numeric(mu), is.numeric(n), is.numeric(N), is.numeric(odds), is.numeric(precision));
# Convert mu to double vector or matrix without loosing dimensions:
if (is.matrix(mu)) {
mux <- matrix(as.double(mu), nrow=dim(mu)[1], ncol=dim(mu)[2]);
}
else {
mux <- as.double(mu);
}
.Call("numMWNCHypergeo",
mux, # Observed mean of each x, vector
as.integer(n), # Number of balls drawn from urn, scalar
as.integer(N), # Number of balls in urn before sampling, scalar
as.double(odds), # Odds for each color, vector
as.double(precision), # Precision of calculation, scalar (ignored)
PACKAGE = "BiasedUrn");
}
# *****************************************************************************
# minMHypergeo
# Minimum of x for central and noncentral
# Multivariate Hypergeometric distributions
# *****************************************************************************
# m = Number of balls of each color in urn, vector
# n = Number of balls drawn from urn, scalar
minMHypergeo <- function(m, n) {
stopifnot(m>=0, n>=0, n<=sum(m));
pmax(n - sum(m) + m, 0);
}
# *****************************************************************************
# maxMHypergeo
# Maximum of x for central and noncentral
# Multivariate Hypergeometric distributions
# *****************************************************************************
# m = Number of balls of each color in urn, vector
# n = Number of balls drawn from urn, scalar
maxMHypergeo <- function(m, n) {
stopifnot(m>=0, n>=0, n<=sum(m));
pmin(m, n);
}
|
/scratch/gouwar.j/cran-all/cranData/BiasedUrn/R/urn2.R
|
# ApproxHypergeo.R
# This demo compares a Wallenius' and a Fisher's noncentral hypergeometric
# distribution with the same mean rather than the same odds in order to
# make them approximate each other better.
require(BiasedUrn)
require(stats)
ApproxHypPlot <- function(m1, m2, n, w.odds) {
xmin <- minHypergeo(m1, m2, n)
xmax <- maxHypergeo(m1, m2, n)
x <- xmin : xmax
w.mean <- meanWNCHypergeo(m1, m2, n, w.odds)
f.odds <- oddsFNCHypergeo(w.mean, m1, m2, n)
wnc <- dWNCHypergeo(x, m1, m2, n, w.odds)
fnc <- dFNCHypergeo(x, m1, m2, n, f.odds)
fnc0 <- dFNCHypergeo(x, m1, m2, n, w.odds)
plot (x, fnc, type="l", col="red",
main = "Hypergeometric distributions",
sub = "Blue = Wallenius, Red = Fisher w. same mean,\n Green = Fisher w. same odds",
xlab = "", ylab = "Probability")
points (x, wnc, type="l", col="blue")
points (x, fnc0, type="l", col="green", lty="dashed")
}
ApproxHypPlot(80, 60, 100, 0.5)
|
/scratch/gouwar.j/cran-all/cranData/BiasedUrn/demo/ApproxHypergeo.R
|
# CompareHypergeo.R
# This demo shows the difference between the three distributions:
# 1. Wallenius' noncentral hypergeometric distribution
# 2. Fisher's noncentral hypergeometric distribution
# 3. The (central) hypergeometric distribution
require(BiasedUrn)
require(stats)
ComparePlot <- function(m1, m2, n, odds) {
xmin <- minHypergeo(m1, m2, n)
xmax <- maxHypergeo(m1, m2, n)
x <- xmin : xmax
wnc <- dWNCHypergeo(x, m1, m2, n, odds)
fnc <- dFNCHypergeo(x, m1, m2, n, odds)
hyp <- dhyper(x, m1, m2, n)
plot (x, wnc, type="l", col="blue",
main = "Hypergeometric distributions",
sub = "Blue = Wallenius, Red = Fisher, Green = Central",
xlab = "x", ylab = "Probability")
points (x, fnc, type="l", col="red")
points (x, hyp, type="l", col="green")
}
ComparePlot(80, 60, 100, 0.5)
|
/scratch/gouwar.j/cran-all/cranData/BiasedUrn/demo/CompareHypergeo.R
|
# OddsPrecision.R
# This demo tests the precision of the odds functions for
# Wallenius' and a Fisher's noncentral hypergeometric distributions
# by calculating the mean of distributions with known odds and then
# estimating the odds from the means.
require(BiasedUrn)
require(stats)
OddsEst <- function(m1, m2, n, odds) {
meanW <- meanWNCHypergeo(m1, m2, n, odds, 1E-9)
oddsEstW <- oddsWNCHypergeo(meanW, m1, m2, n)
meanF <- meanFNCHypergeo(m1, m2, n, odds, 1E-9)
oddsEstF <- oddsFNCHypergeo(meanF, m1, m2, n)
list(Odds=odds, Wallenius.mean = meanW, Fisher.mean = meanF,
Wallenius.estimated.odds = oddsEstW, Fisher.estimated.odds = oddsEstF,
Wallenius.rel.error = (oddsEstW-odds)/odds,
Fisher.rel.error = (oddsEstF-odds)/odds)
}
OddsEst(10, 12, 15, 0.6)
|
/scratch/gouwar.j/cran-all/cranData/BiasedUrn/demo/OddsPrecision.R
|
# SampleWallenius.R
# This demo makes random samples from Wallenius' noncentral hypergeometric
# distribution and compares measured and expected frequencies
require(BiasedUrn)
require(stats)
MakeSamples <- function(m1, m2, n, odds) {
nsamp <- 100000 # Desired number of samples from distribution
xmin <- minHypergeo(m1, m2, n) # Lower limit for x
xmax <- maxHypergeo(m1, m2, n) # Upper limit for x
# Make nsamp samples from Wallenius' distribution
X <- rWNCHypergeo(nsamp, m1, m2, n, odds)
# Get table of frequencies
XTab <- as.data.frame(table(X))
# Relative frequencies
XTab$Freq <- XTab$Freq / nsamp
# Get expected frequencies
XTab$Expected <- dWNCHypergeo(as.integer(levels(XTab$X)), m1, m2, n, odds)
print("X frequencies in Wallenius' noncentral hypergeometric distribution")
# List measured vs. expected frequencies
# (How do I get rid of the row names?)
print(XTab, digits=5)
# Draw histogram
# (Why does my histogram show densities bigger than 1?)
hist(X, freq=FALSE)
}
MakeSamples(6, 8, 5, 1.5)
|
/scratch/gouwar.j/cran-all/cranData/BiasedUrn/demo/SampleWallenius.R
|
# UrnTheory.R
# This opens the file UrnTheory.pdf to explain the biased urn models.
vignette("UrnTheory", package="BiasedUrn")
|
/scratch/gouwar.j/cran-all/cranData/BiasedUrn/demo/UrnTheory.R
|
#
# Difference-Against-Mean-Bib-plot_base_graphics.R
# Author: Dr. Robin Haunschild
# Version: 0.0.1
# Date: 07/10/2017
#
#' @title Create a difference against mean plot using journal and paper percentile values
#'
#' @description
#' Provide journal and paper percentile values in a data frame, e.g. df,
#' and the function call DAMBibPlot(df) creates the difference against mean plot.
#' DAMBibPlot takes some optional arguments to modify its behaviour, see arguments and details.
#'
#' @details
#' DAMBibPlot(df=data_frame, off_set=numeric_value, print_stats=boolean, do_plot=boolean)
#' Only the argument df is necessary. All other aruments are optional.
#'
#' Literature:
#'
#' - Bland, J. M., & Altman, D. G. (1986). Statistical Methods for Assessing Agreement between Two Methods of Clinical Measurement. Lancet, 1(8476), 307-310, https://www.ncbi.nlm.nih.gov/pubmed/2868172
#'
#' Cleveland, W. S. (1985). The elements of graphing data. Monterey, CA: Wadsworth Advanced Books and Software.
#'
#' - Bornmann, L., & Haunschild, R. (2017). Plots for visualizing paper impact and journal impact of single researchers in a single graph, DOI: 10.1007/s11192-018-2658-1, preprint: https://arxiv.org/abs/1707.04050
#'
#'
#' An example data frame is provided as \code{example_researcher} in the package. It can be used to create a difference against mean plot using default values.
#'
#' @examples
#'
#' data(example_researcher)
#'
#' DAMBibPlot(example_researcher)
#'
#' @param df data frame with journal and paper percentiles
#' @param off_set determines the location of additional plotted information (number of points in each
#' quadrant), values between 0 and 40 might be useful (optional parameter). The default value is 0.
#' @param print_stats boolean variable (optional parameter) which determines if the additional statistical values are printed
#' to the R console (T: yes print, F: no do not print). The default value is T.
#' @param do_plot boolean variable (optional parameter) which determines if the difference against mean plot is actually produced
#' (T: yes plot, F: no do not plot). The default value is T.
#' @param digits integer value to determine the number of desired digits after the decimal point for statistical values (optional parameter). The default value is 1.
#' @param ... additional arguments to pass to the \link{plot} function
#'
#' @export
DAMBibPlot <- function(df, off_set=0, print_stats=TRUE, do_plot=TRUE, digits=1, ...) {
colnames(df) <- c('jif_perc', 'p_perc')
df <- df[!is.na(df$p_perc),]
df <- df[!is.na(df$jif_perc),]
df_t10 <- df[df$p_perc>=90,]
df_b90 <- df[df$p_perc<90,]
perc_diff <- df$p_perc-df$jif_perc
perc_avg <- (df$jif_perc+df$p_perc)/2
df <- data.frame(perc_diff, perc_avg)
perc_diff <- df_t10$p_perc-df_t10$jif_perc
perc_avg <- (df_t10$jif_perc+df_t10$p_perc)/2
df_t10 <- data.frame(perc_diff, perc_avg)
perc_diff <- df_b90$p_perc-df_b90$jif_perc
perc_avg <- (df_b90$jif_perc+df_b90$p_perc)/2
df_b90 <- data.frame(perc_diff, perc_avg)
df2 <- df[df$perc_diff>=0 & df$perc_avg<50,]
df1 <- df[df$perc_diff>=0 & df$perc_avg>=50,]
df4 <- df[df$perc_diff<0 & df$perc_avg>=50,]
df3 <- df[df$perc_diff<0 & df$perc_avg<50,]
n <- length(df$perc_avg)
nq1 <- length(df1$perc_avg)
nq2 <- length(df2$perc_avg)
nq3 <- length(df3$perc_avg)
nq4 <- length(df4$perc_avg)
pq1 <- round(nq1/n*100, digits)
pq2 <- round(nq2/n*100, digits)
pq3 <- round(nq3/n*100, digits)
pq4 <- round(nq4/n*100, digits)
nc1 <- nq1+nq4
nc2 <- nq2+nq3
pc1 <- round(nc1/n*100, digits)
pc2 <- round(nc2/n*100, digits)
nr1 <- nq1+nq2
nr2 <- nq3+nq4
pr1 <- round(nr1/n*100, digits)
pr2 <- round(nr2/n*100, digits)
avg1_perc_diff <- median(df1$perc_diff)
avg1_perc_avg <- median(df1$perc_avg)
avg1 <- data.frame(avg1_perc_avg, avg1_perc_diff)
avg2_perc_diff <- median(df2$perc_diff)
avg2_perc_avg <- median(df2$perc_avg)
avg2 <- data.frame(avg2_perc_avg, avg2_perc_diff)
avg3_perc_diff <- median(df3$perc_diff)
avg3_perc_avg <- median(df3$perc_avg)
avg3 <- data.frame(avg3_perc_avg, avg3_perc_diff)
avg4_perc_diff <- median(df4$perc_diff)
avg4_perc_avg <- median(df4$perc_avg)
avg4 <- data.frame(avg4_perc_avg, avg4_perc_diff)
avg_diff <- median(df$perc_diff)
avg_perc <- median(df$perc_avg)
if(do_plot) {
par(mar = c(5, 6, 4, 2) + 0.1)
plot(df_b90$perc_avg, df_b90$perc_diff, type='p', pch=16, xlim=c(0,100), ylim=c(-100,100),
ylab="Difference(paper impact - journal impact)\nHigher journal impact\t\tHigher paper impact",
xlab="Low impact\t\t\t\t\t\t\t\t\tHigh impact\nAverage of paper impact and journal impact", ...)
points(df_t10$perc_avg, df_t10$perc_diff, type='p', pch=1)
abline(h=0, col='red')
abline(v=50, col='red')
x1 <- 40-off_set
x2 <- 60+off_set
text(x1,95, bquote(paste('n'['q2']*'=', .(nq2), '; ', .(pq2),'%')))
text(x2,95, bquote(paste('n'['q1']*'=', .(nq1), '; ', .(pq1),'%')))
text(x1,-95, bquote(paste('n'['q3']*'=', .(nq3), '; ', .(pq3),'%')))
text(x2,-95, bquote(paste('n'['q4']*'=', .(nq4), '; ', .(pq4),'%')))
mtext(bquote(paste('n'['c2']*'=', .(nc2), '; ', .(pc2), '%\t\t\t\t\t\t\t\tn'['c1']*'=', .(nc1), '; ', .(pc1), '%')), side=3, line=1)
mtext(bquote(paste('n'['r2']*'=', .(nr2), '; ', .(pr2), '%\t\t\t\tn'['r1']*'=', .(nr1), '; ', .(pr1), '%')), side=4, line=1)
points(avg1, col='red', type='p', pch=0)
points(avg2, col='red', type='p', pch=0)
points(avg3, col='red', type='p', pch=0)
points(avg4, col='red', type='p', pch=0)
abline(h=avg_diff, col='red', lty=2)
abline(v=avg_perc, col='red', lty=2)
}
if(print_stats) {
print(paste('n(r1)=', nr1, '; ', pr1, '%', sep=''))
print(paste('n(r2)=', nr2, '; ', pr2, '%', sep=''))
print(paste('n(c1)=', nc1, '; ', pc1, '%', sep=''))
print(paste('n(c2)=', nc2, '; ', pc2, '%', sep=''))
print(paste('n(q1)=', nq1, '; ', pq1, '%', sep=''))
print(paste('n(q2)=', nq2, '; ', pq2, '%', sep=''))
print(paste('n(q3)=', nq3, '; ', pq3, '%', sep=''))
print(paste('n(q4)=', nq4, '; ', pq4, '%', sep=''))
print(paste('avg(q1)=', round(avg1$avg1_perc_avg,digits), ', ', round(avg1$avg1_perc_diff,digits), sep=''))
print(paste('avg(q2)=', round(avg2$avg2_perc_avg,digits), ', ', round(avg2$avg2_perc_diff,digits), sep=''))
print(paste('avg(q3)=', round(avg3$avg3_perc_avg,digits), ', ', round(avg3$avg3_perc_diff,digits), sep=''))
print(paste('avg(q4)=', round(avg4$avg4_perc_avg,digits), ', ', round(avg4$avg4_perc_diff,digits), sep=''))
print(paste('avg(total)=', round(avg_perc,digits), ', ', round(avg_diff,digits), sep=''))
}
}
|
/scratch/gouwar.j/cran-all/cranData/BibPlots/R/Difference-Against-Mean-Bib-plot_base_graphics.R
|
#
# beamplot.R
# Author: Dr. Robin Haunschild
# Version: 0.0.1
# Date: 05/20/2019
#
#' @title Create a beamplot using raw citations from a WoS download
#'
#' @description
#' Create a beamplot using raw citations from a WoS download. Use the format
#' "Other File Format --> Tab-delimited (Win, UTF-8)" and provide the downloaded file name.
#' a simple weighting of citation counts is also available for comparison of older with newer publications.
#'
#' @details
#' beamplot(wos_file="WoS_savedrecs.txt", do_weight=boolean)
#' Only the argument wos_file is mandatory. The argument do_weight is optional and FALSE by default.
#'
#' Literature:
#'
#' - Haunschild, R., Bornmann, L., & Adams, J. (2019). R package for producing beamplots as a preferred alternative to the h index when assessing single researchers (based on downloads from Web of Science), Scientometrics, DOI 10.1007/s11192-019-03147-3, preprint: https://arxiv.org/abs/1905.09095
#'
#' @examples
#'
#' \dontrun{beamplot("WoS_savedrecs.txt")}
#'
#' @param wos_file is the file name of the downloaded WoS export in the format Tab-delimited (Win, UTF-8).
#' @param do_weight is a boolean to spcify if citation counts should be weighted with their age. The older the publication, the smaller the weight. The weight depends on on the difference between the year until that citations are counted (i.e., the current calendar year in the case of WoS downloads) and the publication year. A weighting factor of 1 is used for a difference of 0, 1/2 for a difference of 1, ..., and 1/11 for differences of ten or more.
#' @param ... further parameters passed to stripchart.
#'
#' @export
beamplot <- function(wos_file, do_weight=FALSE, ...) {
rd <- read.csv(wos_file, sep='\t', header=FALSE, quote="")
fl <- head(rd, 1)
rd <- read.csv(wos_file, sep='\t', header=FALSE, quote="", skip=1)
fl<-as.matrix(fl)
colnames(fl) <- NULL
colnames(rd) <- fl
cits <- data.frame(rd$PY, rd$TC)
colnames(cits) <- c('PY', 'TC')
xlabel <- 'Number of citations'
if(do_weight) {
#
# Simple weighting with age and attenuation after ten years.
weights <- as.data.frame(cbind(rd$PY, 0))
colnames(weights) <- c('PY', 'x')
weights$x <- (as.numeric(format(Sys.time(), "%Y"))-weights$PY+1)
weights[weights$x>10,]$x <- 11
cits_cn <- colnames(cits)
# df_cits <- merge(cits, weights, by = 'PY')
df_cits <- as.data.frame(cbind(cits, weights))
cits <- data.frame(df_cits$PY, df_cits$TC/df_cits$x)
colnames(cits) <- cits_cn
# Simple linear weighting without attenuation after ten years ...
# cits$TC <- cits$TC/(as.numeric(format(Sys.time(), "%Y"))-cits$PY+1)
xlabel <- 'Number of age-weighted citations'
}
lrd <- split(cits$TC, cits$PY)
yvals <- seq( min(cits$PY), max(cits$PY), by= 1)
par(mar=c(5,6,6,2) + 0.2)
stripchart(cits$TC ~ cits$PY, method='stack', offset=0.2, pch=18, las=1,
xlab=xlabel,
ylab='Publication Year', ...
)
abline(v=median(cits$TC, na.rm=TRUE), lty=5, col='black')
# Plot year lines
for (i in 1:length(lrd)){
if (length(lrd[[i]])) {segments(min(lrd[[i]]), i, max(lrd[[i]]), i)}
}
# Plot median of citations in each year
for (i in 1:length(lrd)){
if (length(lrd[[i]])){points(median(lrd[[i]], na.rm=TRUE),i-0.2, pch=17, col='black')}
}
# Plot number of papers of upper x axis
npubs <- matrix(0)
for (i in 1:length(lrd)){
if (length(lrd[[i]])) { npubs[i] <- length(lrd[[i]]) } else { npubs[i] <- 0 }
}
x2vals <- seq( 0, max(npubs), by= 1)
f <- max(cits$TC)/max(npubs)
axis(3, at=f*x2vals, labels=x2vals, xlim=c(0, max(npubs)), col.ticks="red", col="red", col.lab="red", col.axis="red")
for (i in 1:length(lrd)){
if (length(lrd[[i]])){points(f*npubs[[i]],i+0.2, pch=16, col='red')}
}
mtext(side=3, line=3, 'Number of publications', col="red")
abline(v=f*median(npubs, na.rm=TRUE), lty=5, col='red')
}
|
/scratch/gouwar.j/cran-all/cranData/BibPlots/R/beamplot.R
|
#
# beamplot.R
# Author: Dr. Robin Haunschild
# Version: 0.0.1
# Date: 06/05/2019
#
#' @title Create a beamplot using raw citations from a Scopus download
#'
#' @description
#' Create a beamplot using raw citations from a Scopus download. Use the CSV/Excel format
#' and provide the downloaded file name.
#' A simple weighting of citation counts is also available for comparison of older with newer publications.
#'
#' @details
#' beamplot_scopus(scopus_file="Scopus.csv", do_weight=boolean)
#' Only the argument scopus_file is mandatory. The argument do_weight is optional and FALSE by default.
#'
#' Literature:
#'
#' - Haunschild, R., Bornmann, L., & Adams, J. (2019). R package for producing beamplots as a preferred alternative to the h index when assessing single researchers (based on downloads from Web of Science), Scientometrics, DOI 10.1007/s11192-019-03147-3, preprint: https://arxiv.org/abs/1905.09095
#'
#' @examples
#'
#' \dontrun{beamplot_scopus("Scopus.csv")}
#'
#' @param scopus_file is the file name of the downloaded Scopus export in the format CSV/Excel.
#' @param do_weight is a boolean to spcify if citation counts should be weighted with their age. The older the publication, the smaller the weight. The weight depends on on the difference between the year until that citations are counted (i.e., the current calendar year in the case of Scopus downloads) and the publication year. A weighting factor of 1 is used for a difference of 0, 1/2 for a difference of 1, ..., and 1/11 for differences of ten or more.
#' @param ... further parameters passed to stripchart.
#'
#' @export
beamplot_scopus <- function(scopus_file, do_weight=FALSE, ...) {
df <- read.csv(scopus_file, sep=',', quote='"')
cits <- data.frame(df$Year, df$Cited.by)
colnames(cits) <- c('PY', 'TC')
cits[is.na(cits$TC),]$TC <- 0
xlabel <- 'Number of citations'
if(do_weight) {
#
# Simple weighting with age and attenuation after ten years.
weights <- as.data.frame(cbind(df$Year, 0))
colnames(weights) <- c('PY', 'x')
weights$x <- (as.numeric(format(Sys.time(), "%Y"))-weights$PY+1)
weights[weights$x>10,]$x <- 11
cits_cn <- colnames(cits)
# df_cits <- merge(cits, weights, by = 'PY')
df_cits <- as.data.frame(cbind(cits, weights))
cits <- data.frame(df_cits$PY, df_cits$TC/df_cits$x)
colnames(cits) <- cits_cn
# Simple linear weighting without attenuation after ten years ...
# cits$TC <- cits$TC/(as.numeric(format(Sys.time(), "%Y"))-cits$PY+1)
xlabel <- 'Number of age-weighted citations'
}
lrd <- split(cits$TC, cits$PY)
yvals <- seq( min(cits$PY), max(cits$PY), by= 1)
par(mar=c(5,6,6,2) + 0.2)
stripchart(cits$TC ~ cits$PY, method='stack', offset=0.2, pch=18, las=1,
xlab=xlabel,
ylab='Publication Year', ...
)
abline(v=median(cits$TC, na.rm=TRUE), lty=5, col='black')
# Plot year lines
for (i in 1:length(lrd)){
if (length(lrd[[i]])) {segments(min(lrd[[i]]), i, max(lrd[[i]]), i)}
}
# Plot median of citations in each year
for (i in 1:length(lrd)){
if (length(lrd[[i]])){points(median(lrd[[i]], na.rm=TRUE),i-0.2, pch=17, col='black')}
}
# Plot number of papers of upper x axis
npubs <- matrix(0)
for (i in 1:length(lrd)){
if (length(lrd[[i]])) { npubs[i] <- length(lrd[[i]]) } else { npubs[i] <- 0 }
}
x2vals <- seq( 0, max(npubs), by= 1)
f <- max(cits$TC)/max(npubs)
axis(3, at=f*x2vals, labels=x2vals, xlim=c(0, max(npubs)), col.ticks="red", col="red", col.lab="red", col.axis="red")
for (i in 1:length(lrd)){
if (length(lrd[[i]])){points(f*npubs[[i]],i+0.2, pch=16, col='red')}
}
mtext(side=3, line=3, 'Number of publications', col="red")
abline(v=f*median(npubs, na.rm=TRUE), lty=5, col='red')
}
|
/scratch/gouwar.j/cran-all/cranData/BibPlots/R/beamplot_scopus.R
|
#' Example data set from publication for scatter plot and difference against mean plot
#'
#' Contains the data set (\code{example_researcher}).
#'
#' @name example_researcher
#' @aliases example_researcher
#' @keywords data
#'
NULL
|
/scratch/gouwar.j/cran-all/cranData/BibPlots/R/example_researcher_dataset.R
|
#
# inv_perc_beamplot.R
# Author: Dr. Robin Haunschild
# Version: 0.0.1
# Date: 10/12/2021
#
#' @title Create a beamplot using inverted percentile values
#'
#' @description
#' Create a beamplot using inverted percentile values.
#'
#' @details
#' inv_perc_beamplot(rd, au_name='Name of researcher')
#' Only the rd is argument mandatory. It has to be a dataframe with two columns: (i) publication year and (ii) inverted percentile value with one row per paper/dataset.
#'
#' Literature:
#'
#' - Haunschild, R., Bornmann, L., & Adams, J. (2019). R package for producing beamplots as a preferred alternative to the h index when assessing single researchers (based on downloads from Web of Science), Scientometrics, DOI 10.1007/s11192-019-03147-3, preprint: https://arxiv.org/abs/1905.09095
#' - Bornmann, L. & Marx, W. (2014a). Distributions instead of single numbers: percentiles and beam plots for the assessment of single researchers. Journal of the American Society of Information Science and Technology, 65(1), 206–208
#' - Bornmann, L. & Marx, W. (2014b). How to evaluate individual researchers working in the natural and life sciences meaningfully? A proposal of methods based on percentiles of citations. Scientometrics, 98(1), 487-509. DOI: 10.1007/s11192-013-1161-y.
#' - Bornmann, L., & Haunschild, R. (2018). Plots for visualizing paper impact and journal impact of single researchers in a single graph. Scientometrics, 115(1), 385-394. DOI: 10.1007/s11192-018-2658-1.
#'
#' @examples
#'
#' \dontrun{inv_perc_beamplot(rd, au_name='Name of researcher')}
#'
#' @param rd is a dataframe with two columns: (i) publication year and (ii) inverted percentile value with one row per paper/dataset.
#' @param au_name is the name of the researcher this beamplot belongs to.
#' @param ... further parameters passed to stripchart.
#'
#' @export
inv_perc_beamplot <- function(rd, au_name = 'Example Researcher', ...) {
# replace column names
colnames(rd)<-c('py','perc')
#remove rows with missing values
rd <- rd[!is.na(rd$perc),]
# split in list
lrd <- split(rd$perc,rd$py)
lrd <- lapply(lrd, sort)
xvals <- seq(100, 0, by= -10)
yvals <- seq( min(rd$py), max(rd$py), by= 1)
# par(mar=c(5,6,4,2) + 0.2)
# for plotting many points: pch='.', ps=0.01
stripchart(rd$perc ~ rd$py, method='stack', offset=0.1, xaxt='n', pch=18, las=1, xlim=c(100,0),
main= au_name,
xlab='Low Impact ---------------- Percentile ---------------- High Impact',
ylab='Publication Year', ...
)
axis(1, xvals, xvals)
axis(2, yvals, yvals)
# plot 50% and median
abline(v=50, lty=5)
abline(v=median(rd$perc, na.rm=TRUE), lty=5, col='red')
# plot year lines
for (i in 1:length(lrd)){
if (length(lrd[[i]])) {segments(lrd[[i]][1],i, tail(lrd[[i]],1), i)}
}
# plot medians
for (i in 1:length(lrd)){
if (length(lrd[[i]])){points(median(lrd[[i]], na.rm=TRUE),i-0.2, pch=17, col='red')}
}
}
|
/scratch/gouwar.j/cran-all/cranData/BibPlots/R/inv_perc_beamplot.R
|
#
# ncr_comp.R
# Author: Dr. Robin Haunschild
# Version: 0.0.1
# Date: 01/22/2019
#
#' @title Create a spectrogram using data from the free software CRExplorer
#'
#' @description
#' Provide the contents of CSV files from the 'CRExplorer' in data frames, e.g. df1 and df2,
#' and the function call ncr_comp(df1, df2, py1, py2) creates a plot with both sets of NCR values.
#' Here, py1 and py2 are the lowest and highest publication year to be used in the plot.
#' The function ncr_comp takes some optional arguments to modify its behaviour, see arguments and details.
#'
#' @details
#' ncr_comp <- function(df1, df2, py1, py2, col_cr = "red", smoothing = TRUE, par_pch = 20, ...)
#' Only the arguments df1, df2, py1, and py2 are necessary. All other aruments are optional.
#' Please use the function \code{legend} to add a user-defined legend
#' The solid curve represents the data from df1 and the dotted curve represents the data from df2.
#'
#' Literature:
#'
#' - Thor, A., Bornmann, L., Marx, W., Haunschild, R., Leydesdorff, L., & Mutz, Ruediger (2017). Website of the free software 'CRExplorer', http://www.crexplorer.net
#'
#' @param df1 data frame 1 with reference publication year and number of cited references, e. g., as exported from the CRExplorer (File > Export > CSV (Graph)).
#' @param df2 data frame 2 with reference publication year and number of cited references, e. g., as exported from the CRExplorer (File > Export > CSV (Graph)).
#' @param py1 determines lowest reference publication year which should be shown in the graph.
#' @param py2 determines highest reference publication year which should be shown in the graph.
#' @param smoothing boolean variable (optional parameter) which determines if the lines of the spectrogram are smoothed or not.
#' (T: yes apply smoothing, F: no do not apply smoothing). The default value is T.
#' @param col_cr character color name value to determine color of the line and points of the number of cited references (optional parameter). The default value is "red".
#' @param par_pch integer value to set the point type (optional parameter). The default value is 20.
#' @param ... additional arguments to pass to the \link{plot}, \link{points}, and \link{lines} functions.
#'
#' @export
ncr_comp <- function(df1, df2, py1, py2, col_cr = "red", smoothing = TRUE, par_pch = 20, ...) {
df1$Median.5 <- 0
df2$Median.5 <- 0
colnames(df1) <- c('Year', 'NCR', 'Median.5')
colnames(df2) <- c('Year', 'NCR', 'Median.5')
df1 <- df1[df1$Year>=py1 & df1$Year<=py2,]
df2 <- df2[df2$Year>=py1 & df2$Year<=py2,]
dfm <- merge(df1, df2, by='Year', all=TRUE)
dfm[is.na(dfm)] <- 0
df1m <- data.frame(dfm$Year, dfm$NCR.x, dfm$Median.5.x)
colnames(df1m) <- c('Year', 'NCR', 'Median.5')
df2m <- data.frame(dfm$Year, dfm$NCR.y, dfm$Median.5.y)
colnames(df2m) <- c('Year', 'NCR', 'Median.5')
if(smoothing) {
df1f <- fit_ncr(df1m)
# df <- df1m
# lyear <- tail(df$Year, 1)
# dftmp <- data.frame(lyear+1.0, 0, 0)
# colnames(dftmp) <- colnames(df)
# df <- rbind(df, dftmp)
# ncr <- spline(df$Year, df$NCR, method="periodic", n=10*length(df$NCR))
# ncr$y[ncr$y<0] <- 0
# ncr_max <- length(ncr$x)-10
# ncr$x <- ncr$x[1:ncr_max]
# ncr$y <- ncr$y[1:ncr_max]
# df1f <- df[df$Year<lyear+1, ]
df2f <- fit_ncr(df2m)
# df <- df2m
# lyear <- tail(df$Year, 1)
# dftmp <- data.frame(lyear+1.0, 0, 0)
# colnames(dftmp) <- colnames(df)
# df <- rbind(df, dftmp)
# ncr <- spline(df$Year, df$NCR, method="periodic", n=10*length(df$NCR))
# ncr$y[ncr$y<0] <- 0
# ncr_max <- length(ncr$x)-10
# ncr$x <- ncr$x[1:ncr_max]
# ncr$y <- ncr$y[1:ncr_max]
# df2f <- df[df$Year<lyear+1, ]
} else {
df1f <- data.frame(df1m$Year, df1m$NCR)
df2f <- data.frame(df2m$Year, df2m$NCR)
}
max1 <- max(df1f$y)
max2 <- max(df2f$y)
if(max1>max2) {
plot(df1f, type = 'l', lty = 1, col = col_cr, ylab = 'Number of cited references', xlab = 'Reference publication year', ...)
lines(df2f, lty = 3, col = col_cr)
} else {
plot(df2f, type = 'l', lty = 3, col = col_cr, ylab = 'Number of cited references', xlab = 'Reference publication year', ...)
lines(df1f, lty = 1, col = col_cr)
}
}
fit_ncr <- function(df, type="NCR") {
if(type=="Med") {
df$NCR <- df$Median.5
}
lyear <- tail(df$Year, 1)
dftmp <- data.frame(lyear+1.0, 0, 0)
colnames(dftmp) <- colnames(df)
df <- rbind(df, dftmp)
# Splines fit without NCR line in negative NCR regime
ncr <- spline(df$Year, df$NCR, method="periodic", n=10*length(df$NCR))
if(type=="NCR") {
ncr$y[ncr$y<0] <- 0
}
ncr_max <- length(ncr$x)-10
ncr$x <- ncr$x[1:ncr_max]
ncr$y <- ncr$y[1:ncr_max]
df <- df[df$Year<lyear+1, ]
return(ncr)
}
|
/scratch/gouwar.j/cran-all/cranData/BibPlots/R/ncr_comp.R
|
#
# perc_beamplot.R
# Author: Dr. Robin Haunschild
# Version: 0.0.1
# Date: 10/12/2021
#
#' @title Create a beamplot using percentile values
#'
#' @description
#' Create a beamplot using percentile values.
#'
#' @details
#' perc_beamplot(rd, au_name='Name of researcher')
#' Only the rd is argument mandatory. It has to be a dataframe with two columns: (i) publication year and (ii) percentile value with one row per paper/dataset.
#'
#' Literature:
#'
#' - Haunschild, R., Bornmann, L., & Adams, J. (2019). R package for producing beamplots as a preferred alternative to the h index when assessing single researchers (based on downloads from Web of Science), Scientometrics, DOI 10.1007/s11192-019-03147-3, preprint: https://arxiv.org/abs/1905.09095
#' - Bornmann, L. & Marx, W. (2014a). Distributions instead of single numbers: percentiles and beam plots for the assessment of single researchers. Journal of the American Society of Information Science and Technology, 65(1), 206–208
#' - Bornmann, L. & Marx, W. (2014b). How to evaluate individual researchers working in the natural and life sciences meaningfully? A proposal of methods based on percentiles of citations. Scientometrics, 98(1), 487-509. DOI: 10.1007/s11192-013-1161-y.
#' - Bornmann, L., & Haunschild, R. (2018). Plots for visualizing paper impact and journal impact of single researchers in a single graph. Scientometrics, 115(1), 385-394. DOI: 10.1007/s11192-018-2658-1.
#'
#' @examples
#'
#' \dontrun{perc_beamplot(rd, au_name='Name of researcher')}
#'
#' @param rd is a dataframe with two columns: (i) publication year and (ii) percentile value with one row per paper/dataset.
#' @param au_name is the name of the researcher this beamplot belongs to.
#' @param ... further parameters passed to stripchart.
#'
#' @export
perc_beamplot <- function(rd, au_name = 'Example Researcher', ...) {
# replace column names
colnames(rd)<-c('py','perc')
#remove rows with missing values
rd <- rd[!is.na(rd$perc),]
# split in list
lrd <- split(rd$perc,rd$py)
lrd <- lapply(lrd, sort)
xvals <- seq(0, 100, by= 10)
yvals <- seq( min(rd$py), max(rd$py), by= 1)
# par(mar=c(5,6,4,2) + 0.2)
# for plotting many points: pch='.', ps=0.01
stripchart(rd$perc ~ rd$py, method='stack', offset=0.1, xaxt='n', pch=18, las=1, xlim=c(0,100),
main= au_name,
xlab='Low Impact ---------------- Percentile ---------------- High Impact',
ylab='Publication Year', ...
)
axis(1, xvals, xvals)
axis(2, yvals, yvals)
# plot 50% and median
abline(v=50, lty=5)
abline(v=median(rd$perc, na.rm=TRUE), lty=5, col='red')
# plot year lines
for (i in 1:length(lrd)){
if (length(lrd[[i]])) {segments(lrd[[i]][1],i, tail(lrd[[i]],1), i)}
}
# plot medians
for (i in 1:length(lrd)){
if (length(lrd[[i]])){points(median(lrd[[i]], na.rm=TRUE),i-0.2, pch=17, col='red')}
}
}
|
/scratch/gouwar.j/cran-all/cranData/BibPlots/R/perc_beamplot.R
|
#
# percentiles_scatter_plot.R
# Author: Dr. Robin Haunschild
# Version: 0.0.1
# Date: 07/10/2017
#
#' @title Create a scatter plot using journal and paper percentile values
#'
#' @description
#' Provide journal and paper percentile values in a data frame, e.g. df,
#' and the function call jpscatter(df) creates the scatter plot.
#' The function jpscatter takes some optional arguments to modify its behaviour, see arguments and details.
#'
#' @details
#' jpscatter(df=data_frame, off_set=numeric_value, print_stats=boolean, do_plot=boolean, digits=integer)
#' Only the argument df is necessary. All other aruments are optional.
#'
#' Literature:
#'
#' - Bornmann, L., & Haunschild, R. (2017). Plots for visualizing paper impact and journal impact of single researchers in a single graph, DOI: 10.1007/s11192-018-2658-1, preprint: https://arxiv.org/abs/1707.04050
#'
#'
#' An example data frame is provided as \code{example_researcher} in the package. It can be used to create a scatter plot using default values.
#'
#' @examples
#'
#' data(example_researcher)
#'
#' jpscatter(example_researcher)
#'
#' @param df data frame with journal and paper percentiles
#' @param off_set determines the location of additional plotted information (number of points in each
#' quadrant), values between 0 and 40 might be useful (optional parameter). The default value is 0.
#' @param print_stats boolean variable (optional parameter) which determines if the additional statistical values are printed
#' to the R console (T: yes print, F: no do not print). The default value is T.
#' @param do_plot boolean variable (optional parameter) which determines if the scatter plot is actually produced
#' (T: yes plot, F: no do not plot). The default value is T.
#' @param digits integer value to determine the number of desired digits after the decimal point for statistical values (optional parameter). The default value is 1.
#' @param ... additional arguments to pass to the \link{plot} function
#'
#' @export
jpscatter <- function(df, off_set=0, print_stats=TRUE, do_plot=TRUE, digits=1, ...) {
colnames(df) <- c('jif_perc', 'p_perc')
df <- df[!is.na(df$p_perc),]
df <- df[!is.na(df$jif_perc),]
df2 <- df[df$jif_perc>=50 & df$p_perc<50,]
df1 <- df[df$jif_perc>=50 & df$p_perc>=50,]
df4 <- df[df$jif_perc<50 & df$p_perc>=50,]
df3 <- df[df$jif_perc<50 & df$p_perc<50,]
n <- length(df$p_perc)
nq1 <- length(df1$p_perc)
nq2 <- length(df2$p_perc)
nq3 <- length(df3$p_perc)
nq4 <- length(df4$p_perc)
pq1 <- round(nq1/n*100, digits)
pq2 <- round(nq2/n*100, digits)
pq3 <- round(nq3/n*100, digits)
pq4 <- round(nq4/n*100, digits)
nc1 <- nq1+nq4
nc2 <- nq2+nq3
pc1 <- round(nc1/n*100, digits)
pc2 <- round(nc2/n*100, digits)
nr1 <- nq1+nq2
nr2 <- nq3+nq4
pr1 <- round(nr1/n*100, digits)
pr2 <- round(nr2/n*100, digits)
avg1_jif_perc <- median(df1$jif_perc)
avg1_p_perc <- median(df1$p_perc)
avg1 <- data.frame(avg1_p_perc, avg1_jif_perc)
avg2_jif_perc <- median(df2$jif_perc)
avg2_p_perc <- median(df2$p_perc)
avg2 <- data.frame(avg2_p_perc, avg2_jif_perc)
avg3_jif_perc <- median(df3$jif_perc)
avg3_p_perc <- median(df3$p_perc)
avg3 <- data.frame(avg3_p_perc, avg3_jif_perc)
avg4_jif_perc <- median(df4$jif_perc)
avg4_p_perc <- median(df4$p_perc)
avg4 <- data.frame(avg4_p_perc, avg4_jif_perc)
avg_jif_perc <- median(df$jif_perc)
avg_p_perc <- median(df$p_perc)
if(do_plot) {
par(mar = c(5, 6, 4, 2) + 0.1)
plot(df$p_perc, df$jif_perc, type='p', pch=16, xlim=c(0,100), ylim=c(0,100),
ylab="Journal impact",
xlab="Paper impact", ...)
abline(h=50, col='red')
abline(v=50, col='red')
abline(a=0,b=1, col='red')
x1 <- 40-off_set
x2 <- 60+off_set
text(x1,100, bquote(paste('n'['q2']*'=', .(nq2), '; ', .(pq2),'%')))
text(x2,100, bquote(paste('n'['q1']*'=', .(nq1), '; ', .(pq1),'%')))
text(x1,0, bquote(paste('n'['q3']*'=', .(nq3), '; ', .(pq3),'%')))
text(x2,0, bquote(paste('n'['q4']*'=', .(nq4), '; ', .(pq4),'%')))
mtext(bquote(paste('n'['c2']*'=', .(nc2), '; ', .(pc2), '%\t\t\t\t\t\t\t\tn'['c1']*'=', .(nc1), '; ', .(pc1), '%')), side=3, line=1)
mtext(bquote(paste('n'['r2']*'=', .(nr2), '; ', .(pr2), '%\t\t\t\tn'['r1']*'=', .(nr1), '; ', .(pr1), '%')), side=4, line=1)
points(avg1, col='red', type='p', pch=0)
points(avg2, col='red', type='p', pch=0)
points(avg3, col='red', type='p', pch=0)
points(avg4, col='red', type='p', pch=0)
abline(h=avg_jif_perc, col='red', lty=2)
abline(v=avg_p_perc, col='red', lty=2)
}
if(print_stats) {
print(paste('n(r1)=', nr1, '; ', pr1, '%', sep=''))
print(paste('n(r2)=', nr2, '; ', pr2, '%', sep=''))
print(paste('n(c1)=', nc1, '; ', pc1, '%', sep=''))
print(paste('n(c2)=', nc2, '; ', pc2, '%', sep=''))
print(paste('n(q1)=', nq1, '; ', pq1, '%', sep=''))
print(paste('n(q2)=', nq2, '; ', pq2, '%', sep=''))
print(paste('n(q3)=', nq3, '; ', pq3, '%', sep=''))
print(paste('n(q4)=', nq4, '; ', pq4, '%', sep=''))
print(paste('avg(q1)=', round(avg1$avg1_p_perc,digits), ', ', round(avg1$avg1_jif_perc,digits), sep=''))
print(paste('avg(q2)=', round(avg2$avg2_p_perc,digits), ', ', round(avg2$avg2_jif_perc,digits), sep=''))
print(paste('avg(q3)=', round(avg3$avg3_p_perc,digits), ', ', round(avg3$avg3_jif_perc,digits), sep=''))
print(paste('avg(q4)=', round(avg4$avg4_p_perc,digits), ', ', round(avg4$avg4_jif_perc,digits), sep=''))
print(paste('avg(total)=', round(avg_p_perc,digits), ', ', round(avg_jif_perc,digits), sep=''))
}
}
|
/scratch/gouwar.j/cran-all/cranData/BibPlots/R/percentiles_scatter_plot.R
|
#
# rpys.R
# Author: Dr. Robin Haunschild
# Version: 0.0.3
# Date: 08/18/2021
#
#' @title Create a spectrogram using data from the free software CRExplorer
#'
#' @description
#' Provide the contents of the CSV (Graph) file from the 'CRExplorer' in a data frame, e.g. df,
#' and the function call rpys(df, py1, py2) creates the spectrogram.
#' Here, py1 and py2 are the lowest and highest publication year to be used in the plot.
#' The function rpys takes some optional arguments to modify its behaviour, see arguments and details.
#'
#' @details
#' rpys(df=data_frame, py1=integer_value, py2=integer_value, smoothing=boolean, col_cr=character_color_name, col_med=character_color_name, par_pch=integer, plot_NCR=boolean, plot_Med=boolean, ...)
#' Only the argument df is necessary. All other aruments are optional.
#'
#' Literature:
#'
#' - Thor, A., Bornmann, L., & Haunschild, R. (2021). Website of the free software 'CRExplorer', http://www.crexplorer.net
#' - Thor, A., Bornmann, L., & Haunschild, R. (2018). CitedReferencesExplorer (CRExplorer) manual. Retrieved December 19, 2019, from https://andreas-thor.github.io/cre/manual.pdf
#'
#' An example data frame is provided as \code{rpys_example_data} in the package. It can be used to create an example spectrogram.
#'
#' @examples
#'
#' data(rpys_example_data)
#'
#' rpys(rpys_example_data, 1935, 2010)
#'
#' @param df data frame with reference publication year, number of cited references, and median deviation as exported from the CRExplorer (File > Export > CSV (Graph)).
#' @param py1 determines lowest reference publication year which should be shown in the graph (optional parameter).
#' @param py2 determines highest reference publication year which should be shown in the graph (optional parameter).
#' @param smoothing boolean variable (optional parameter) which determines if the lines of the spectrogram are smoothed or not.
#' (T: yes apply smoothing, F: no do not apply smoothing). The default value is T.
#' @param col_cr character color name value to determine color of the line and points of the number of cited references (optional parameter). The default value is "red".
#' @param col_med character color name value to determine color of the line and points of the median deviation (optional parameter). The default value is "blue".
#' @param par_pch integer value to set the point type (optional parameter). The default value is 20.
#' @param plot_NCR boolean variable (optional parameter) which determines the NCR curve should be plotted.
#' @param plot_Med boolean variable (optional parameter) which determines the median deviation curve should be plotted.
#' @param ... additional arguments to pass to the \link{plot}, \link{points}, and \link{lines} functions.
#'
#' @export
rpys <- function(df, py1=min(df$Year), py2=max(df$Year), col_cr="red", col_med="blue", smoothing=TRUE, par_pch=20, plot_NCR=TRUE, plot_Med=TRUE, ...) {
colnames(df) <- c("Year", "NCR", "Median.5")
nuller <- (df$Year/df$Year)-1
df <- df[df$Year<py2+1, ]
df <- df[df$Year>py1-1, ]
if(smoothing) {
dftmp <- data.frame(py2+1,0,0)
colnames(dftmp) <- colnames(df)
df <- rbind(df, dftmp)
# Splines plot without NCR line in negative NCR regime
ncr <- spline(df$Year, df$NCR, method="periodic", n=10*length(df$NCR))
ncr$y[ncr$y<0] <- 0
max_val <- length(ncr$x)-10
ncr$x <- ncr$x[1:max_val]
ncr$y <- ncr$y[1:max_val]
med5 <- spline(df$Year, df$Median.5, n=10*length(df$NCR))
med5$x <- med5$x[1:max_val]
med5$y <- med5$y[1:max_val]
df <- df[df$Year<py2+1, ]
} else {
ncr <- data.frame(df$Year, df$NCR)
med5 <- data.frame(df$Year, df$Median.5)
}
if(plot_NCR && plot_Med) {
plot(df$Year, df$NCR, type='p', pch=par_pch, col=col_cr, xlim=c(min(df$Year),max(df$Year)), ylim=c(min(df$Median.5), max(df$NCR)), xlab='Reference publication year', ylab='Number of cited references', ...)
}
if(plot_NCR && !plot_Med) {
plot(df$Year, df$NCR, type='p', pch=par_pch, col=col_cr, xlim=c(min(df$Year),max(df$Year)), ylim=c(min(df$NCR), max(df$NCR)), xlab='Reference publication year', ylab='Number of cited references', ...)
}
if(!plot_NCR && plot_Med) {
plot(df$Year, df$Med, type='p', pch=par_pch, col=col_med, xlim=c(min(df$Year),max(df$Year)), ylim=c(min(df$Median.5), max(df$Median.5)), xlab='Reference publication year', ylab='Median deviation of the number of cited references', ...)
}
if(plot_NCR && plot_Med) {
points(df$Year, df$Median.5, type='p', pch=par_pch, col=col_med, ...)
}
abline(h=0,col="black")
if(plot_NCR) {
lines(ncr, col=col_cr, ...)
}
if(plot_Med) {
lines(med5, col=col_med, ...)
}
}
|
/scratch/gouwar.j/cran-all/cranData/BibPlots/R/rpys.R
|
#
# rpys_bl.R
# Author: Dr. Robin Haunschild
# Version: 0.0.2
# Date: 10/07/2021
#
#' @title Create a spectrogram with bars and lines using data from the free software CRExplorer
#'
#' @description
#' Provide the contents of the CSV (Graph) file from the 'CRExplorer' in a data frame, e.g. df,
#' and the function call \link{rpys_bl}(df) creates a spectrogram. Previously, you should use
#' the function \link{rpys} for a plain line graph to determin the proper parameters, e.g., x_offset and x_range.
#' Determination of the proper x_offset and x_range is a bit tricky.
#' Usage of a wrong value of x_range will cause an error. Usage of a wrong value of x_offset will produce
#' a plot. However, the line for the median deviation and the bars might not be at the proper location.
#' First, adjust x_range if necessary, and second, adjust x_offset so that the x axis is properly aligned
#' with the line and bars. Comapare the plot from \link{rpys_bl} with your data and the plot from the function \link{rpys}.
#' The function \link{rpys_bl} takes some optional arguments to modify its behaviour, see arguments and details.
#'
#' @details
#' rpys_bl(df=data_frame, py1=integer_value, py2=integer_value, x_range=integer_value, smoothing=boolean,
#' col_cr=character_color_name, col_med=character_color_name, col_ol=character_color_name, par_mar=integer_vector, plot_NCR=boolean, plot_Med=boolean,
#' x_offset=integer_value, x_min=integer_value, x_max=integer_value, x_step1=integer_value, x_step2=integer_value, y1_min=integer_value, y1_max=integer_value, y1_step=integer_value,
#' y2_min=integer_value, y2_max=integer_value, y2_step=integer_value,
#' lx=integer_value, ly=integer_value,
#' pl_offset=integer_value, bar_border=string_value, outliers=integer_value, lpos=integer_value, pl_cex=floating_point_value,
#' TFmin=integer_value,TFmax=integer_value, ...)
#' Only the argument df is necessary. All other aruments are optional, but many should be provided to produce nice plots.
#'
#' Literature:
#'
#' - Thor, A., Bornmann, L., & Haunschild, R. (2021). Website of the free software 'CRExplorer', http://www.crexplorer.net
#' - Thor, A., Bornmann, L., & Haunschild, R. (2018). CitedReferencesExplorer (CRExplorer) manual. Retrieved December 19, 2019, from https://andreas-thor.github.io/cre/manual.pdf
#' - Tukey, J. W. (1977). Exploratory data analysis. Boston, MA, USA: Addison-Wesley Publishing Company.
#'
#' An example data frame is provided as \code{rpys_example_data} in the package. It can be used to create an example spectrogram.
#'
#' @examples
#'
#' data(rpys_example_data)
#'
#' rpys_bl(rpys_example_data)
#'
#' rpys_bl(rpys_example_data, x_min=1930, x_max=2020, x_range=91, x_offset=1, lx=1926, ly=135,
#' y1max=300, y1_step=50, y2_min=-150, y2_max=150, y2_step=25, lpos=1)
#'
#' rpys_bl(rpys_example_data, py1=1930, py2=2020, x_offset=1, lx=1926, ly=135, y1max=300,
#' y1_step=50, y2_min=-150, y2_max=150, y2_step=25, lpos=1)
#'
#' @param df data frame with reference publication year, number of cited references, and median deviation as exported from the CRExplorer (File > Export > CSV (Graph)).
#' @param py1 determines lowest reference publication year which should be shown on the x axis (optional parameter). The default is the minimum RPY.
#' @param py2 determines highest reference publication year which should be shown on the x axis (optional parameter). The default is the maximum RPY.
#' @param x_min determines lowest reference publication year which should be shown on the x axis (optional parameter). The default is the minimum RPY.
#' @param x_max determines highest reference publication year which should be shown on the x axis (optional parameter). The default is the maximum RPY.
#' @param x_range is the range of the x axis (optional parameter). The default is py2-py1+1.
#' @param x_offset determines the x axis offset to adjust the median deviation curve properly (optional parameter). The default is 0.
#' @param x_step1 is the interval of major x tics (optional parameter).
#' @param x_step2 is the interval of minor x tics (optional parameter).
#' @param y1_min is the minimum left y axis value (optional parameter).
#' @param y1_max is the maximum left y axis value (optional parameter).
#' @param y1_step is the interval left y axis (optional parameter).
#' @param y2_min is the minimum right y axis value (optional parameter).
#' @param y2_max is the maximum right y axis value (optional parameter).
#' @param y2_step is the interval right y axis (optional parameter).
#' @param lx is the x position of the legend (optional parameter).
#' @param ly is the y position of the legend according to the right y axis (optional parameter).
#' @param pl_offset is the offset of the year label (optional parameter).
#' @param bar_border is the color around the bars (optional parameter).
#' @param outliers is an integer that indicates if outliers should be detected (optional parameter):
#' (0: no outlier detection, 1: outliers are detected and marked, 2: only extreme outliers are detected and marked)
#' @param lpos is an integer that determines the position of the outlier year label around the point (optional parameter).
#' Values of 1, 2, 3, and 4, respectively indicate positions below, to the left of, above, and to the right of the specified coordinates.
#' @param pl_cex is the cex value of the year labels (optional parameter).
#' @param TFmin is the first year that should be used for outlier detection according to Tukey's fences.
#' @param TFmax is the last year that should be used for outlier detection according to Tukey's fences.
#' @param smoothing boolean variable (optional parameter) which determines if the lines of the spectrogram are smoothed or not.
#' (T: yes apply smoothing, F: no do not apply smoothing). The default value is T.
#' @param col_cr is a character color name value to determine color of the bars of the number of cited references (optional parameter). The default value is "grey".
#' @param col_med is a character color name value to determine color of the line of the median deviation (optional parameter). The default value is "blue".
#' @param col_ol is a character color name value to determine color of the outlier labels (optional parameter). The default value is "red".
#' @param par_mar integer vector to set the margins (optional parameter). The default value is c(5, 5, 1, 5).
#' @param plot_NCR boolean variable (optional parameter) which determines the NCR curve should be plotted.
#' @param plot_Med boolean variable (optional parameter) which determines the median deviation curve should be plotted.
#' @param ... additional arguments to pass to the \link{plot} function.
#'
#' @export
rpys_bl <- function(df, py1=min(df$Year), py2=max(df$Year), x_range=py2-py1+1,
col_cr="grey", col_med="blue", col_ol='red', smoothing=TRUE, par_mar = c(5, 5, 1, 5),
x_offset=0, x_min=py1, x_max=py2, x_step1=10, x_step2=5, y1_min=0, y1_max=max(df$NCR), y1_step=(max(df$NCR)-min(df$NCR))/5,
y2_min=min(df$Median.5), y2_max=max(df$Median.5), y2_step=(max(df$Median.5)-min(df$Median.5))/5,
lx=median(df$Year), ly=median(df$Median.5),
pl_offset=(max(df$NCR)-min(df$NCR))/50, bar_border='white', outliers=2, lpos=3, pl_cex=0.9,
TFmin=py1,TFmax=py2,
plot_NCR=TRUE, plot_Med=TRUE, ...) {
if(length(df)>3) {
df <- df[,c(1,2,3)]
}
colnames(df) <- c("Year", "NCR", "Median.5")
df1 <- df
df00 <- as.data.frame(matrix(data=c(min(df1$Year)-2, 0L, 0L), nrow=1, ncol=3))
df0 <- as.data.frame(matrix(data=c(min(df1$Year)-1, 0L, 0L), nrow=1, ncol=3))
colnames(df0) <- colnames(df1)
colnames(df00) <- colnames(df1)
df <- rbind(rbind(df00, df0), df1)
if(smoothing) {
med <- spline(df$Year, df$Median.5, method="periodic", n=10*length(df$Year))
} else {
med <- data.frame(df$Year, df$Median.5)
colnames(med) <- c('x', 'y')
}
df_med <- data.frame(med$x, med$y)
colnames(df_med) <- c('Year', 'NCR')
med5p <- df1[df1$Median.5>=0 & df1$Year>=TFmin & df1$Year<=TFmax,]$Median.5
TFu1 <- quantile(med5p, 3/4)+1.5*(quantile(med5p, 3/4)-quantile(med5p, 1/4))
TFu2 <- quantile(med5p, 3/4)+3*(quantile(med5p, 3/4)-quantile(med5p, 1/4))
px_TFu1 <- df1[df1$Median.5>TFu1,]$Year
px_TFu2 <- df1[df1$Median.5>TFu2,]$Year
if(outliers == 2) {
px <- px_TFu2
py <- df[df$Year %in% px,3]
} else if(outliers == 1) {
px <- px_TFu1
py <- df[df$Year %in% px,3]
} else {
px <- NA
py <- NA
}
xlab_text <- ''
ylab_text <- ''
log_axes <- !plot_Med
if(!plot_Med) {
xlab_text <- 'Reference publication year'
ylab_text <- 'Number of cited references'
}
par(mar = par_mar)
if(plot_NCR) {
barData <- df$NCR
x <- barplot(barData,
axes = FALSE,
col = col_cr,
border=bar_border,
xlab = xlab_text,
ylab = ylab_text,
ylim = c(y1_min, y1_max) )[, 1]
axis(1, at = 1.2*seq(1, x_range, x_step1)+x_offset, labels = seq(x_min, x_max, x_step1))
rug(x = 1.2*seq(1, x_range, x_step2)+x_offset, ticksize = -0.01, side = 1)
axis(2, at = seq(y1_min, y1_max, y1_step), labels = seq(y1_min, y1_max, y1_step))
}
if(plot_Med) {
if(plot_NCR) {
par(new = TRUE)
plot(x = df_med$Year, y = df_med$NCR, type = "l", col = col_med, lwd=1.5, axes = FALSE, xlab = "Reference publication year", ylab = "Number of cited references", ylim=c(y2_min, y2_max), ...)
axis(4, at = seq(y2_min, y2_max, y2_step), labels = seq(y2_min, y2_max, y2_step), col=col_med, col.axis=col_med)
mtext("Five-year-median deviation", side = 4, line = 3, cex = par("cex.lab"), col=col_med)
} else {
plot(x = df_med$Year, y = df_med$NCR, type = "l", col = col_med, lwd=1.5, axes = TRUE, xlab = "Reference publication year", ylab = "Five-year-median deviation", ylim=c(y2_min, y2_max), ...)
}
abline(h=0, lty='dotted', col=col_med)
if(outliers>0) {
points(px, py, pch=8, col=col_ol)
for(i in seq(1, length(px))) {text(px[i]+1, py[i]+pl_offset, px[i], col=col_ol, srt=90, pos=lpos, cex=pl_cex) }
}
}
if(plot_NCR & plot_Med) {
legend(lx, ly, legend=c('Number of cited references (NCR)', 'Deviation from the 5-year-median'), lty=1, lwd=c(5, 1.5), col=c(col_cr, col_med))
}
box()
}
|
/scratch/gouwar.j/cran-all/cranData/BibPlots/R/rpys_bl.R
|
#' Example data set for the rpys function
#'
#' Contains the data sets (\code{rpys_example_data}).
#'
#' @name rpys_example_data
#' @aliases rpys_example_data
#' @keywords data
#'
NULL
|
/scratch/gouwar.j/cran-all/cranData/BibPlots/R/rpys_example_data_dataset.R
|
#' bifactorIndices
#'
#' Computes all available bifactor indices for the input given.
#'
#' @param Lambda is a matrix of factor loadings or an object that can be converted to a
#' matrix of factor loadings by \code{\link{getLambda}}. Currently fitted \pkg{lavaan}
#' objects and fitted \pkg{mirt} objects are supported in addition to raw factor loading
#' matrix input. For \code{Mplus} output files, use \code{\link{bifactorIndicesMplus}}.
#' @param Theta is a vector of residual variances. If omitted, \code{Theta} will be computed from
#' input for \code{Lambda}.
#' @param UniLambda is a matrix of factor loadings or an object that can be converted to
#' a matrix of factor loadings such as a fitted \pkg{lavaan} objects or fitted \pkg{mirt}
#' object. Defaults to \code{NULL}, as \code{UniLambda} is only required if you wish to
#' compute \code{\link{ARPB}}.
#' @param standardized lets the function know whether to look for standardized or
#' unstandardized results from \pkg{lavaan} and defaults to \code{TRUE}. If \code{Lambda} is not a
#' \pkg{lavaan} object, then \code{standardized} will be ignored.
#' @param Phi is the correlation matrix of factors and defaults to \code{NULL}. User should generally ignore this
#' parameter. If not provided, \code{bifactorIndices} will try to determine \code{Phi} from \code{Lambda} when \code{Lambda}
#' is a fitted lavaan model or will assume it is the identity matrix otherwise.
#' @param Thresh is a list of vectors of item thresholds, used only when items are categorical.\code{bifactorIndices}
#' will try to determine \code{Thresh} from \code{Lambda} when \code{Lambda}
#' is a fitted lavaan model and the indicators are categorical.
#' \code{Thresh} defaults to null, which indicates items are continuous.
#'
#' @return A list of bifactor indices, including three different ECV indices, IECV, PUC,
#' Omega, OmegaH, Factor Determinacy (FD), Construct Replicability (H) and ARPB.
#' Please note that many of these indices are interpretable even
#' when the model being used is not a bifactor model; some indices may be useful for
#' two-tier, trifactor, correlated traits, and even unidimensional models.
#'
#' @details Currently, factor loading matrices, fitted \pkg{lavaan} objects, and fitted \pkg{mirt}
#' objects are supported. For \code{Mplus} output, see \code{\link{bifactorIndicesMplus}}.
#' IRT parameters from \pkg{mirt} are converted to standardized factor loadings via the
#' correspondence described in Kamata & Bauer (2008). If you wish to use standardized
#' coefficients, item error variance will be computed directly from standardized factor
#' loadings. \code{\link{ARPB}} will only be computed if the factor loadings from a unidimensional model
#' are included, while \code{\link{ECV_GS}} and \code{\link{ECV_SG}} will only be computed for
#' models with a general factor, and \code{\link{PUC}} will only be conputed for a true bifactor
#' model. Note that if a correlated traits model is provided, the omega indices
#' will simply be the regular omega values for those factors. Interpretations for individual
#' indices as well as details about their computation can be found in the man page for the
#' individual indices.
#'
#' Formulas for all indices can be found in Rodriguez et al. (2016). When indicators are categorical,
#' the methodology of Green and Yang (2009) is used for computing Omega and OmegaH.
#'
#' @references
#' Green, S. B., & Yang, Y. (2009). Reliability of summed item scores using
#' structural equation modeling: An alternative to coefficient alpha.
#' \emph{Psychometrika, 74}(1), 155-167 \doi{10.1007/s11336-008-9099-3}.
#'
#' Kamata, A., & Bauer, D. J. (2008). A note on the relation between factor analytic and item
#' response theory models. \emph{Structural Equation Modeling: A Multidisciplinary Journal, 15}
#' (1), 136-153.
#'
#' #' Rodriguez, A., Reise, S. P., & Haviland, M. G. (2016). Evaluating bifactor models:
#' calculating and interpreting statistical indices. \emph{Psychological Methods, 21}(2),
#' 137 \doi{10.1037/met0000045}.
#'
#' @export
#'
#' @seealso
#' \code{\link{bifactorIndicesMplus}},
#' \code{\link{bifactorIndices_expl}},
#' \code{\link{bifactorIndicesMplus_expl}},
#' \code{\link{bifactorIndicesMplus_ESEM}},
#' \code{\link{ECV_SS}},
#' \code{\link{ECV_SG}},
#' \code{\link{ECV_GS}},
#' \code{\link{IECV}},
#' \code{\link{PUC}},
#' \code{\link{Omega_S}},
#' \code{\link{Omega_H}},
#' \code{\link{cat_Omega_S}},
#' \code{\link{cat_Omega_H}},
#' \code{\link{H}},
#' \code{\link{FD}},
#' \code{\link{ARPB}}
#'
#' @examples
#'
#' # Computing bifactor indices from fitted lavaan object
#' # (using mirt object is similar). Use of the unidimensional
#' # model is optional; it is only used to compute ARPB.
#'
#'\donttest{
#'SRS_UnidimensionalModel <-
#' "SRS =~ SRS_1 + SRS_2 + SRS_3 + SRS_4 + SRS_5 +
#' SRS_6 + SRS_7 + SRS_8 + SRS_9 + SRS_10 +
#' SRS_11 + SRS_12 + SRS_13 + SRS_14 + SRS_15 +
#' SRS_16 + SRS_17 + SRS_18 + SRS_19 + SRS_20"
#'
#'SRS_Unidimensional <- lavaan::cfa(SRS_UnidimensionalModel,
#' SRS_data,
#' ordered = paste0("SRS_", 1:20),
#' orthogonal = TRUE)
#'
#'
#' SRS_BifactorModel <-
#' "SRS =~ SRS_1 + SRS_2 + SRS_3 + SRS_4 + SRS_5 +
#' SRS_6 + SRS_7 + SRS_8 + SRS_9 + SRS_10 +
#' SRS_11 + SRS_12 + SRS_13 + SRS_14 + SRS_15 +
#' SRS_16 + SRS_17 + SRS_18 + SRS_19 + SRS_20
#' Function =~ SRS_5 + SRS_9 + SRS_12 + SRS_15 + SRS_18
#' Pain =~ SRS_1 + SRS_2 + SRS_8 + SRS_11 + SRS_17
#' SelfImage =~ SRS_4 + SRS_6 + SRS_10 + SRS_14 + SRS_19
#' MentalHealth =~ SRS_3 + SRS_7 + SRS_13 + SRS_16 + SRS_20"
#'
#' SRS_bifactor <- lavaan::cfa(SRS_BifactorModel,
#' SRS_data,
#' ordered = paste0("SRS_", 1:20),
#' orthogonal = TRUE)
#'
#' bifactorIndices(SRS_bifactor, UniLambda = SRS_Unidimensional)
#'}
#'
#'
#' # Computing bifactor indices from standardized factor loading matrices
#' Lambda <- matrix(c(.82, .10, 0, 0,
#' .77, .35, 0, 0,
#' .79, .32, 0, 0,
#' .66, .39, 0, 0,
#' .51, 0, .71, 0,
#' .56, 0, .43, 0,
#' .68, 0, .13, 0,
#' .60, 0, .50, 0,
#' .83, 0, 0, .47,
#' .60, 0, 0, .27,
#' .78, 0, 0, .28,
#' .55, 0, 0, .75),
#' ncol = 4, byrow = TRUE)
#' colnames(Lambda) <- c("General", "SF1", "SF2", "SF3")
#' bifactorIndices(Lambda)
#'
#'
#' # bifactorIndices can also be used on two-tier models
#' MTMM_model <- "
#' Trait1 =~ T1M1_1 + T1M1_2 + T1M1_3 +
#' T1M2_1 + T1M2_2 + T1M2_3 +
#' T1M3_1 + T1M3_2 + T1M3_3
#' Trait2 =~ T2M1_1 + T2M1_2 + T2M1_3 +
#' T2M2_1 + T2M2_2 + T2M2_3 +
#' T2M3_1 + T2M3_2 + T2M3_3
#' Trait3 =~ T3M1_1 + T3M1_2 + T3M1_3 +
#' T3M2_1 + T3M2_2 + T3M2_3 +
#' T3M3_1 + T3M3_2 + T3M3_3
#'
#' Method1 =~ T1M1_1 + T1M1_2 + T1M1_3 +
#' T2M1_1 + T2M1_2 + T2M1_3 +
#' T3M1_1 + T3M1_2 + T3M1_3
#' Method2 =~ T1M2_1 + T1M2_2 + T1M2_3 +
#' T2M2_1 + T2M2_2 + T2M2_3 +
#' T3M2_1 + T3M2_2 + T3M2_3
#' Method3 =~ T1M3_1 + T1M3_2 + T1M3_3 +
#' T2M3_1 + T2M3_2 + T2M3_3 +
#' T3M3_1 + T3M3_2 + T3M3_3
#'
#' Trait1 ~~ 0*Method1
#' Trait1 ~~ 0*Method2
#' Trait1 ~~ 0*Method3
#' Trait2 ~~ 0*Method1
#' Trait2 ~~ 0*Method2
#' Trait2 ~~ 0*Method3
#' Trait3 ~~ 0*Method1
#' Trait3 ~~ 0*Method2
#' Trait3 ~~ 0*Method3
#'
#' Method1 ~~ 0*Method2
#' Method1 ~~ 0*Method3
#' Method2 ~~ 0*Method3"
#'
#' MTMM_fit <- lavaan::cfa(MTMM_model, MTMM_data)
#' bifactorIndices(MTMM_fit)
#'
bifactorIndices <- function(Lambda, Theta = NULL, UniLambda = NULL, standardized = TRUE, Phi = NULL, Thresh = NULL) {
## If categorical and lavaan, then force standardized = TRUE and message about it
if (!standardized & "lavaan" %in% class(Lambda) ) {
if (length(lavaan::lavInspect(Lambda, "ordered")) > 0) {
standardized <- TRUE
message("Only bifactor indices based on standardized coefficients make sense for categorical indicators.")
}
}
## if fitted mirt object, then throw warning about Omegas probably being meaningless
if ("SingleGroupClass" %in% class(Lambda)) {
message("Interpreting omega indices for IRT models is not recommended at this time")
}
## Make Lambda, Theta, Phi, and UniLambda matrices. Do Theta and Phi first because
## they need the Lambda object, not the Lambda matrix
if (is.null(Theta)) {Theta <- getTheta(Lambda, standardized = standardized)}
# Can do Phi for SingleGroupClass and for lavaan
if (is.null(Phi)) {
if ("SingleGroupClass" %in% class(Lambda)) {
Phi <- mirt::summary(Lambda)$fcor
} else if ("lavaan" %in% class(Lambda)) {
Phi <- lavaan::lavInspect(Lambda, "std.lv")$psi
# I hate that dumb symmetric matrix print method
class(Phi) <- "matrix"
} else {
message("Latent variable covariance matrix is assumed to be the identity. This influences
FD and, in the case of categorical models, the omega indices.")
Phi <- diag(nrow = ncol(Lambda)) ## we can only get here if Lambda was provided as a matrix
}
}
# Can do Thresh for lavaan. Maybe we should do it for mirt as well.
if (is.null(Thresh) & ("lavaan" %in% class(Lambda))) {
# Check to see if items are ordered; if so, rip out the thresholds
if (length(lavaan::lavInspect(Lambda, "ordered")) > 0) {
thresh_long <- lavaan::lavInspect(Lambda, "std")$tau ## if categorical, then standardized.
rownames(thresh_long) <- sapply(strsplit(rownames(thresh_long), "[|]"), "[[", 1)
items <- unique(rownames(thresh_long))
Thresh <- lapply(items, function (i) {
thresh_long[rownames(thresh_long) == i]
})
}
}
Lambda <- getLambda(Lambda, standardized = standardized)
if (!is.null(UniLambda)) {UniLambda <- getLambda(UniLambda, standardized = standardized)}
## Build up the lists of indices. FactorLevelIndices first
FactorLevelIndices = list(ECV_SS = ECV_SS(Lambda),
ECV_SG = ECV_SG(Lambda),
ECV_GS = ECV_GS(Lambda))
if (is.null(Thresh)) {
FactorLevelIndices[["Omega"]] <- Omega_S(Lambda, Theta)
FactorLevelIndices[["OmegaH"]] <- Omega_H(Lambda, Theta)
} else {
FactorLevelIndices[["Omega"]] <- cat_Omega_S(Lambda, Thresh)
FactorLevelIndices[["OmegaH"]] <- cat_Omega_H(Lambda, Thresh)
}
if (standardized) {
FactorLevelIndices[["H"]] <- H(Lambda)
FactorLevelIndices[["FD"]] <- FD(Lambda, Phi)
} else {
message("H and FD are currently only available when standardized = TRUE")
}
## Remove any NULL values and convert to dataframe
FactorLevelIndices <- FactorLevelIndices[which(!sapply(FactorLevelIndices, is.null))]
FactorLevelIndices <- as.data.frame(FactorLevelIndices)
## Item level indices next. Figure out label on ARPB later
ARPB_indices <- ARPB(Lambda, UniLambda)
ItemLevelIndices <- list(IECV = IECV(Lambda),
RelParameterBias = ARPB_indices[[2]])
## Remove any NULL values and convert to dataframe
ItemLevelIndices <- ItemLevelIndices[which(!sapply(ItemLevelIndices, is.null))]
ItemLevelIndices <- as.data.frame(ItemLevelIndices)
if (isTRUE(all.equal(dim(ItemLevelIndices), c(0, 0)))) {ItemLevelIndices <- NULL}
if (!is.null(ItemLevelIndices) && ncol(ItemLevelIndices) == 2) {colnames(ItemLevelIndices)[2] <- "RelParBias"}
## Model level indices next
if (is.null(getGen(Lambda))) { # No general factor
ECV <- NULL
Omega <- NULL
OmegaH <- NULL
} else {
Gen <- getGen(Lambda)
ECV <- ECV_SG(Lambda)[Gen]
if (is.null(Thresh)) {
Omega <- Omega_S(Lambda, Theta)[Gen]
OmegaH <- Omega_H(Lambda, Theta)[Gen]
} else {
Omega <- cat_Omega_S(Lambda, Thresh)[Gen]
OmegaH <- cat_Omega_H(Lambda, Thresh)[Gen]
}
}
ModelLevelIndices <- c(ECV = ECV, PUC = PUC(Lambda), Omega = Omega, OmegaH = OmegaH, ARPB = ARPB_indices[[1]])
## Now put them all together
indicesList <- list(ModelLevelIndices = ModelLevelIndices,
FactorLevelIndices = FactorLevelIndices,
ItemLevelIndices = ItemLevelIndices
)
## if any index type is entirely missing, remove that index type entirely (e.g., no model or item level indices if not bifactor)
indicesList[which(!sapply(indicesList, is.null))]
}
#' bifactorIndicesMplus
#'
#' Computes all available bifactor indices given an \code{Mplus} .out file for a bifactor model
#'
#' @param Lambda is an Mplus .out file. Defaults to an open file dialog box
#' @param UniLambda is an object that the function can convert to a matrix of factor loadings.
#' The expected behavior is to store an Mplus output file as a variable and pass that variable
#' as \code{UniLambda}. Defaults to \code{NULL}, as \code{UniLambda} is only required if you wish to
#' compute \code{\link{ARPB}}.
#' @param standardized lets the function know whether it should be looking in
#' the unstandardized results or the STDYX results from the Mplus output.
#'
#' @return A list of bifactor indices, including three different ECV indices, IECV, PUC, Omega,
#' OmegaH, and ARPB. Please note that many of these indices are interpretable even when the
#' model being used is not a bifactor model; some indices may be useful for two-tier, trifactor,
#' correlated traits, and even unidimensional models.
#'
#' @details To use this function, simply call it without any arguments and a dialog box
#' will pop up for you to select a .out file of a confirmatory bifactor model.
#'
#' ARPB will only be computed if the factor loadings from a unidimensional model
#' (as a vector or as the result of using \code{\link[MplusAutomation]{readModels}} on an
#' \code{Mplus} .out file) are included. Note that if a correlated traits model is provided,
#' the omega indices will simply be the regular omega values for those factors. Interpretations
#' for individual indices as well as details about their computation can be found in the
#' man page for the individual indices.
#'
#' @seealso \code{\link{bifactorIndices}},
#' \code{\link{bifactorIndices_expl}},
#' \code{\link{bifactorIndicesMplus_expl}},
#' \code{\link{bifactorIndicesMplus_ESEM}},
#' \code{\link{ECV_SS}},
#' \code{\link{ECV_SG}},
#' \code{\link{ECV_GS}},
#' \code{\link{IECV}},
#' \code{\link{PUC}},
#' \code{\link{Omega_S}},
#' \code{\link{Omega_H}},
#' \code{\link{H}},
#' \code{\link{FD}},
#' \code{\link{ARPB}}
#'
#' @export
#'
bifactorIndicesMplus <- function(Lambda = file.choose(), UniLambda = NULL, standardized = TRUE) {
## Expectation is that UniLambda is either a .out file, which will have a class of "character"
if ("character" %in% class(UniLambda)) {UniLambda <- MplusAutomation::readModels(UniLambda)}
if (!("mplus.model" %in% class(Lambda))) {Lambda <- MplusAutomation::readModels(Lambda)}
## Check if categorical indicators.
categorical <- !is.null(Lambda$input$variable$categorical)
## categorical -> standardized
if (!standardized & categorical) {
standardized <- TRUE
message("Only bifactor indices based on standardized coefficients make sense for categorical indicators.")
}
# Fetch Phi matrix. Why isn't there a getPhi function?
if (standardized) {
# need to have standardized parameters if standardized!
if (is.null(Lambda$parameters$stdyx.standardized)) {
stop("You must request STDYX output for computing bifactor indices based on standardized coefficients.")
}
params <- Lambda$parameters$stdyx.standardized
} else {
params <- Lambda$parameters$unstandardized
}
## We need factor names to be in the same order as factor loading matrix
facNames <- params[grep(".BY", params$paramHeader), "paramHeader"]
facNames <- gsub(".BY", "", facNames, fixed = TRUE)
facNames <- unique(facNames)
facVar <- sapply(facNames, function (fac) {
params[params$paramHeader == "Variances" & params$param == fac,"est"]
})
# grab factor correlations, make them more easily parsed, then grab them
factorCorrs <- params[grep(".WITH", params$paramHeader), ]
factorCorrs$paramHeader <- gsub(".WITH", "", factorCorrs$paramHeader, fixed = TRUE)
Phi <- lapply(1:length(facNames), function (x) {
fac1 <- facNames[x]
sapply(1:length(facNames), function (y) {
fac2 <- facNames[y]
## Factor variances are different
if (x == y) {
facVar[x]
} else {
## Look for (x,y) and if that's not there look for (y,x)
if (length(factorCorrs[factorCorrs$paramHeader == fac1 & factorCorrs$param == fac2, "est"]) == 1) {
factorCorrs[factorCorrs$paramHeader == fac1 & factorCorrs$param == fac2, "est"]
} else {
factorCorrs[factorCorrs$paramHeader == fac2 & factorCorrs$param == fac1, "est"]
}
}
})
})
Phi <- matrix(unlist(Phi), byrow=TRUE, nrow=length(Phi) )
if (categorical) {
Lambda <- getLambda(Lambda, standardized = standardized)
Theta <- getTheta(Lambda, standardized = standardized)
## now get thresholds
items <- rownames(Lambda)
thresh_long <- params[params$paramHeader == "Thresholds",]
thresh_long$itemName <- sapply(strsplit(thresh_long$param, "[$]"), "[[", 1)
Thresh <- lapply(items, function (i) {
thresh_long[thresh_long$itemName == i, "est"]
})
} else {
Theta <- getTheta(Lambda, standardized = standardized)
Lambda <- getLambda(Lambda, standardized = standardized)
Thresh <- NULL
}
# if diag(Phi) is not all ones, then we are in trouble. Let's deal with that
D <- diag(x = sqrt(diag(Phi)))
Phi <- solve(D) %*% Phi %*% t(solve(D))
Lambda <- Lambda %*% solve(D)
colnames(Lambda) <- facNames
bifactorIndices(Lambda, Theta, UniLambda, standardized, Phi, Thresh)
}
|
/scratch/gouwar.j/cran-all/cranData/BifactorIndicesCalculator/R/BifactorIndices.R
|
#' BifactorIndicesCalculator: A package for computing statistical indices relevant to bifactor measurement models.
#'
#' The BifactorIndicesCalculator package provides functions use to compute
#' indices described in Rodriguez et al. (2016) for confirmatory or exploratory models.
#'
#' @section BifactorIndicesCalculator functions:
#' The BifactorIndicesCalculator package includes helper functions that can compute the various
#' indices from Rodriguez et al. (2016) for fitted \pkg{lavaan} or \pkg{mirt} objects using
#' \code{\link{bifactorIndices}} and \code{Mplus} output files using
#' \code{\link{bifactorIndicesMplus}}. For users of other software and those
#' interested, a matrix of standardized factor loadings can also be input to
#' \code{\link{bifactorIndices}}.
#'
#' Calculation of bifactor indices for exploratory bifactor models can also be accomplished
#' using \code{\link{bifactorIndices_expl}} for exploratory models fit using \pkg{psych}
#' or using \code{\link{bifactorIndicesMplus_expl}} for exploratory models fit using
#' \code{Mplus}. For ESEM models fit using \code{Mplus}, please use \code{\link{bifactorIndicesMplus_ESEM}}
#'
#' @section References:
#' Rodriguez, A., Reise, S. P., & Haviland, M. G. (2016).
#' Evaluating bifactor models: calculating and interpreting
#' statistical indices. \emph{Psychological Methods, 21}(2), 137.
#'
#' @docType package
#' @name BifactorIndicesCalculator
NULL
|
/scratch/gouwar.j/cran-all/cranData/BifactorIndicesCalculator/R/BifactorIndicesCalculator.R
|
#' ECV_SS
#'
#' Computes an ECV index for all factors which can be interpreted as the proportion of common
#' variance of the items in each factor which is due to that factor;
#' \code{ECV_SS} should be read 'ECV of a specific factor with respect to itself.' Here, ECV is computed
#' only with respect to items which load on the factor. Note that \code{ECV_SS} of the general factor
#' is simply the ECV. Stucky and Edelen (2015, p. 201) do not refer to this form of ECV. In the Excel
#' version of the bifactor indices calculator (Dueber, 2017), this index is referred to as
#' 'ECV (NEW).' \code{ECV_SS} is useful in that it can be computed when there is no general factor, such
#' as in a two-tier model, and interpreted in the same way as ECV for general factors.
#'
#' \code{ECV_SS} is called by \code{\link{bifactorIndices}} and the various convenience functions
#' for exploratory models and/or Mplus output,
#' which are the only functions in this package intended for casual users.``
#'
#' @param Lambda is a matrix of factor loadings. Be sure that all factors have the same variance
#' before calling this function.
#'
#' @return A vector of ECVs for all factors
#'
#' @examples
#' Lambda <- matrix(c(.82, .10, 0, 0,
#' .77, .35, 0, 0,
#' .79, .32, 0, 0,
#' .66, .39, 0, 0,
#' .51, 0, .71, 0,
#' .56, 0, .43, 0,
#' .68, 0, .13, 0,
#' .60, 0, .50, 0,
#' .83, 0, 0, .47,
#' .60, 0, 0, .27,
#' .78, 0, 0, .28,
#' .55, 0, 0, .75),
#' ncol = 4, byrow = TRUE)
#' colnames(Lambda) <- c("General", "SF1", "SF2", "SF3")
#' ECV_SS(Lambda)
#'
#'
#' @references
#' Dueber, D. M. (2017). Bifactor Indices Calculator: A Microsoft Excel-based tool to calculate various indices relevant to bifactor CFA models. \doi{10.13023/edp.tool.01}
#'
#' Stucky, B. D., & Edelen, M. O. (2015). Using hierarchical IRT models to create unidimensional measures from multidimensional data. In S. P. Reise & D. A. Revicki (Eds.), \emph{Handbook of item response theory modeling: Applications to typical performance assessment} (pp.183-206). New York: Routledge.
#'
#' @export
#'
#' @seealso \code{\link{ECV_SG}}, \code{\link{ECV_GS}}, \code{\link{bifactorIndices}}
#'
ECV_SS <- function(Lambda) {
ECV_SS_C <- function(Fac, Lambda) {
## Isolate the items which load on the chosen factor FAC
inFactor <- Lambda[,Fac] != 0
## Square the loadings
L2 = Lambda^2
## Compute the appropriate ratio of sums
sum(L2[,Fac]*inFactor)/sum(L2*inFactor)
}
ECV_results <- sapply(1:ncol(Lambda), ECV_SS_C, Lambda)
names(ECV_results) <- colnames(Lambda)
ECV_results
}
#' ECV_SG
#'
#' Computes an ECV index for all factors which can be interpreted as the proportion of
#' common variance of all items which is due to the specific factor;
#' \code{ECV_SG} should be read 'ECV of a specific factor with respect to the general
#' factor.' Here,
#' ECV is computed with respect to the items of the general factor using the specific factor loadings in
#' the numerator; Stucky and Edelen (2015, p. 199)
#' refer to this index simply as 'specific-dimension ECV.' Note that \code{ECV_SG} of the general factor
#' is simply the ECV. In the Excel version of the Bifactor
#' Indices Calculator (Dueber, 2017), this form of ECV is referred to as 'ECV (S&E).'
#
#' \code{ECV_SG} is called by \code{\link{bifactorIndices}} and the various convenience functions
#' for exploratory models and/or Mplus output,
#' which are the only functions in this package intended for casual users.
#'
#' @param Lambda is a matrix of factor loadings. Be sure that all factors have the same
#' variance before calling this function.
#'
#' @return A vector of ECVs for all factors
#'
#' @examples
#' Lambda <- matrix(c(.82, .10, 0, 0,
#' .77, .35, 0, 0,
#' .79, .32, 0, 0,
#' .66, .39, 0, 0,
#' .51, 0, .71, 0,
#' .56, 0, .43, 0,
#' .68, 0, .13, 0,
#' .60, 0, .50, 0,
#' .83, 0, 0, .47,
#' .60, 0, 0, .27,
#' .78, 0, 0, .28,
#' .55, 0, 0, .75),
#' ncol = 4, byrow = TRUE)
#' colnames(Lambda) <- c("General", "SF1", "SF2", "SF3")
#' ECV_GS(Lambda)
#'
#' @references
#' Dueber, D. M. (2017). Bifactor Indices Calculator: A Microsoft Excel-based tool to
#' calculate various indices relevant to bifactor CFA models. \doi{10.13023/edp.tool.01}
#'
#' Stucky, B. D., & Edelen, M. O. (2015). Using hierarchical IRT models to create
#' unidimensional measures from multidimensional data. In S. P. Reise & D. A. Revicki
#' (Eds.), \emph{Handbook of item response theory modeling: Applications to typical
#' performance assessment} (pp.183-206). New York: Routledge.
#'
#' @export
#'
#' @seealso \code{\link{ECV_SS}}, \code{\link{ECV_GS}}, \code{\link{bifactorIndices}}
#'
ECV_SG <- function(Lambda) {
if (is.null(getGen(Lambda))) return(NULL)
ECV_SG_C <- function(Fac, Lambda) {
## Make a matrix of logical vectors for non-zero elements of Lambda.
inFactor <- Lambda[,Fac] != 0
## Square the loadings
L2 <- Lambda^2
## Compute the appropriate ratio of sums
sum(L2[,Fac]*inFactor)/sum(L2)
}
ECV_results <- sapply(1:ncol(Lambda), ECV_SG_C, Lambda)
names(ECV_results) <- colnames(Lambda)
ECV_results
}
#' ECV_GS
#'
#' Computes an ECV index for all factors which can be interpreted as the proportion of common
#' variance of the items in each specific factor which is due to the general factor;
#' \code{ECV_GS} should be read 'ECV of the general factor with respect to a specific
#' factor.' Here, ECV is
#' computed only with respect to the items of a specific factor using the general factor
#' loadings in the numerator;
#' Stucky and Edelen (2015, p. 201) refer to this index as the 'within-domain ECV' for the
#' specific factor. In the
#' Excel version of the bifactor indices calculator (Dueber, 2017), this index is not computed.
#'
#' \code{ECV_GS} is called by \code{\link{bifactorIndices}} and the various convenience functions
#' for exploratory models and/or Mplus output,
#' which are the only functions in this package intended for casual users.
#'
#' @param Lambda is a matrix of factor loadings. Be sure that all factors have the same variance
#' before calling this function.
#'
#' @return A vector of ECVs for all factors
#'
#' @examples
#' Lambda <- matrix(c(.82, .10, 0, 0,
#' .77, .35, 0, 0,
#' .79, .32, 0, 0,
#' .66, .39, 0, 0,
#' .51, 0, .71, 0,
#' .56, 0, .43, 0,
#' .68, 0, .13, 0,
#' .60, 0, .50, 0,
#' .83, 0, 0, .47,
#' .60, 0, 0, .27,
#' .78, 0, 0, .28,
#' .55, 0, 0, .75),
#' ncol = 4, byrow = TRUE)
#' colnames(Lambda) <- c("General", "SF1", "SF2", "SF3")
#' ECV_GS(Lambda)
#'
#' @references
#' Dueber, D. M. (2017). Bifactor Indices Calculator: A Microsoft Excel-based tool to calculate
#' various indices relevant to bifactor CFA models. \doi{10.13023/edp.tool.01}
#'
#' Stucky, B. D., & Edelen, M. O. (2015). Using hierarchical IRT models to create unidimensional
#' measures from multidimensional data. In S. P. Reise & D. A. Revicki (Eds.), \emph{Handbook of item
#' response theory modeling: Applications to typical performance assessment} (pp.183-206).
#' New York: Routledge.
#'
#' @export
#'
#' @seealso \code{\link{ECV_SS}}, \code{\link{ECV_SG}}, \code{\link{bifactorIndices}}
#'
ECV_GS <- function(Lambda) {
ECV_GS_C <- function(Fac, Lambda, genFac) {
## Make a matrix of logical vectors for non-zero elements of Lambda.
inFactor <- Lambda[,Fac] != 0
## Square the loadings
L2 <- Lambda^2
## Compute the appropriate ratio of sums
sum(L2[,genFac]*inFactor)/sum(L2*inFactor)
}
## ECV_GS only makes sense when there is a general factor
if (is.null(getGen(Lambda))) return(NULL)
genFac <- getGen(Lambda)
ECV_results <- sapply(1:ncol(Lambda), ECV_GS_C, Lambda, genFac)
names(ECV_results) <- colnames(Lambda)
ECV_results
}
#' IECV
#'
#' Computes an ECV index for each item which can be interpreted as the proportion of common
#' variance of that item due to the general factor. Stucky and Edelen (2015, p. 201) define
#' I-ECV, which is also computed in the Excel version of the bifactor indices calculator
#' (Dueber, 2017).
#'
#' \code{IECV} is called by \code{\link{bifactorIndices}} and the various convenience functions
#' for exploratory models and/or Mplus output,
#' which are the only functions in this package intended for casual users.
#'
#' @param Lambda is a matrix of factor loadings. Be sure that all factors have the same variance
#' before calling this function.
#'
#' @return A vector of item ECVs
#'
#' @examples
#' Lambda <- matrix(c(.82, .10, 0, 0,
#' .77, .35, 0, 0,
#' .79, .32, 0, 0,
#' .66, .39, 0, 0,
#' .51, 0, .71, 0,
#' .56, 0, .43, 0,
#' .68, 0, .13, 0,
#' .60, 0, .50, 0,
#' .83, 0, 0, .47,
#' .60, 0, 0, .27,
#' .78, 0, 0, .28,
#' .55, 0, 0, .75),
#' ncol = 4, byrow = TRUE)
#' colnames(Lambda) <- c("General", "SF1", "SF2", "SF3")
#' IECV(Lambda)
#'
#' @references
#' Dueber, D. M. (2017). Bifactor Indices Calculator: A Microsoft Excel-based tool to calculate
#' various indices relevant to bifactor CFA models. \doi{10.13023/edp.tool.01}
#'
#' Stucky, B. D., & Edelen, M. O. (2015). Using hierarchical IRT models to create unidimensional
#' measures from multidimensional data. In S. P. Reise & D. A. Revicki (Eds.), \emph{Handbook of item
#' response theory modeling: Applications to typical performance assessment} (pp.183-206).
#' New York: Routledge.
#'
#' @export
#'
#' @seealso \code{\link{ECV_SS}}, \code{\link{ECV_SG}}, \code{\link{ECV_GS}}, \code{\link{bifactorIndices}}
#'
IECV <- function(Lambda) {
## Compute IECV for single item
IECV_C <- function(Item, Lambda, genFac) {
## Square the loadings
L2 <- Lambda^2
IECV <- L2[Item, genFac]/sum(L2[Item,])
names(IECV) <- rownames(Lambda)[Item]
IECV
}
## I-ECV only makes sense when there is a general factor
if (is.null(getGen(Lambda))) return(NULL)
genFac <- getGen(Lambda)
IECV_results <- sapply(1:nrow(Lambda), IECV_C, Lambda, genFac)
names(IECV_results) <- rownames(Lambda)
IECV_results
}
|
/scratch/gouwar.j/cran-all/cranData/BifactorIndicesCalculator/R/ECV_Indices.R
|
#' bifactorIndices_expl
#'
#' Computes all available bifactor indices for the input given.
#'
#' @param Lambda is a factor loading matrix from EFA or an object which can be converted to such.
#' Currently only \code{psych::fa()} objects are supported.
#'
#' @param ItemsBySF is a list, indexed by factor, of vectors of item names belonging to each specific
#' factor. You must NOT include the general factor in this list, and the list must have names which
#' match the factor names in \code{Lambda}. It is recommended you look at the EFA solution first
#' to see which factor is which. Defaults to \code{NULL}, in which case composition of specific
#' factors is automated by comparing loadings to \code{LoadMin}
#'
#' @param LoadMin is the minimum loading size so that an item is considered to "belong" to a factor.
#' If \code{ItemsBySF} is not provided, then items are assigned to factors based on whether their
#' loading on that factor is greater than \code{LoadMin}. If \code{ItemsBySF} is provided, then
#' warnings are issued whenever items load above \code{LoadMin} on factors to which they do not belong,
#' or do not load above \code{LoadMin} on factors to which they do belong, \code{LoadMin} defaults to 0.2.
#'
#' @return A list of bifactor indices, including three different ECV indices, Omega, and
#' OmegaH.
#'
#' @details Only standardized models are considered for exploratory models. PUC and ARPB are not
#' supported for exploratory models currently, although that may change.
#'
#' @seealso \code{\link{bifactorIndices}},
#' \code{\link{bifactorIndicesMplus}},
#' \code{\link{bifactorIndicesMplus_expl}},
#' \code{\link{bifactorIndicesMplus_ESEM}},
#' \code{\link{ECV_SS}},
#' \code{\link{ECV_SG}},
#' \code{\link{ECV_GS}},
#' \code{\link{IECV}},
#' \code{\link{Omega_S}},
#' \code{\link{Omega_H}},
#' \code{\link{H}},
#' \code{\link{FD}}
#'
#'
#' @export
#'
#' @examples
#'
#'# psych::fa() can not access the rotations We have to load the library.
#'library(psych)
#'SRS_BEFA <- fa(SRS_data, nfactors = 5, rotate = "bifactor")
#'
#'# inspect the solution to see which exploratory factors belong to which subdomain
#'SRS_BEFA$loadings
#'ItemsBySF = list(MR4 = paste0("SRS_", c(5, 9, 12, 15, 18)),
#' MR2 = paste0("SRS_", c(1, 2, 8, 11, 17)),
#' MR3 = paste0("SRS_", c(4, 6, 10, 14, 19)),
#' MR5 = paste0("SRS_", c(3, 7, 13, 16, 20)))
#'
#'bifactorIndices_expl(SRS_BEFA, ItemsBySF = ItemsBySF)
bifactorIndices_expl <- function(Lambda, ItemsBySF = NULL, LoadMin = 0.2) {
## I'll make this into S3 methods eventually
## This is the method for pscyh::fa
## Leaving this as is until I have a reason not to.
getLambdaExploratory <- function (Lambda) {
Lambda <- Lambda$loadings
class(Lambda) <- "matrix"
Lambda
}
if ("psych" %in% class(Lambda)) Lambda <- getLambdaExploratory(Lambda)
Items <- rownames(Lambda)
names(Items) <- Items
Factors <- colnames(Lambda)
names(Factors) <- Factors
if (is.null(ItemsBySF)) {
ItemsBySF <- lapply(Factors, function (Fac) {
Items[Lambda[,Fac] > LoadMin]
})
names(ItemsBySF) <- Factors
SmallLambda <- round(Lambda, 3)
SmallLambda[SmallLambda < LoadMin] <- 0
cat("This matrix describes assignment of items to factors \n")
print(ifelse(SmallLambda == 0, "", SmallLambda), quote = FALSE)
cat("\n \n")
} else {
# Need to insert the general factor here!
if (length(ItemsBySF) == length(Factors) - 1) {
GenFac <- setdiff(Factors, names(ItemsBySF))
ItemsBySF[[paste(GenFac)]] <- Items
# We also need to reorder ItemsBySF so it matches the order of Factors.
# I think everything below this should be completely redone to avoid the confusion
# of two different lists of factors!!
ItemsBySF <- ItemsBySF[Factors]
} else {
stop("An error was made in the specification of ItemsBySF. It should have one fewer
elements than the total number of factors")
}
# issue a warning for each loading above LoadMin on the wrong factor or loading below LoadMin on the right factor
for (I in Items) {
for (Fac in Factors) {
if (!(I %in% ItemsBySF[[Fac]]) & (Lambda[I,Fac] > LoadMin)) {
message(paste0("Item ", I, " loads on factor ", Fac, " above ", LoadMin))
}
if ((I %in% ItemsBySF[[Fac]]) & (Lambda[I,Fac] < LoadMin)) {
message(paste0("Item ", I, " loads on factor ", Fac, " below ", LoadMin))
}
}
}
}
# Is there single factor that pervades all items
FactorLengths <- sapply(ItemsBySF, length)
# Issue a warning if no true general factor
if (max(FactorLengths) != nrow(Lambda)) message("The exploratory model has no general factor")
## Some of the indices we want involve all items
GlobalIndices <- bifactorIndices(Lambda)
## For specific factor indices, we only use the items on the specific factor
SpecificIndicesList <- lapply(Factors, function (Fac) {
bifactorIndices(Lambda[ItemsBySF[[Fac]],])
})
SpecificIndices <- as.data.frame(t(sapply(Factors, function (Fac) {
SpecificIndicesList[[Fac]]$FactorLevelIndices[Fac,]
})))
if (max(FactorLengths) == nrow(Lambda)) {
GenFac <- which(FactorLengths == nrow(Lambda))
ModelIndices <- GlobalIndices[["FactorLevelIndices"]][GenFac,]
names(ModelIndices) <- c("ECV", "Omega", "OmegaH", "H", "FD")
# ECV_SG taken from version with all items
SpecificIndices$ECV_SG <- GlobalIndices$FactorLevelIndices$ECV_SS
# ECV_GS is the general factor's ECV_SS when only items on the specific are included
SpecificIndices$ECV_GS <- sapply(Factors, function (Fac) {
SpecificIndicesList[[Fac]]$FactorLevelIndices[GenFac,"ECV_SS"]
})
# Reorder the columns
SpecificIndices <- SpecificIndices[,c("ECV_SS", "ECV_SG", "ECV_GS", "Omega", "OmegaH", "H", "FD")]
# If only one factor is general, then we can do I-ECV
if (sum(FactorLengths == nrow(Lambda)) == 1) {
# The I-ECV function cannot be used because there is no "true" general factor
L2 <- Lambda^2
IECV <- L2[,GenFac] / rowSums(L2)
}
return(list(ModelLevelIndices = ModelIndices,
FactorLevelIndices = SpecificIndices,
IECV = IECV)
)
} else {
return(SpecificIndices)
}
}
#' bifactorIndicesMplus_expl
#'
#' Computes all available bifactor indices given an \code{Mplus} .out file for a bifactor EFA
#'
#' @param Lambda is an Mplus .out file. Defaults to an open file dialog box
#'
#' @param ItemsBySF is a list, indexed by factor, of vectors of item names belonging to each
#' factor. You must include the general factor in this list, and the list must have names which
#' match the factor names in Mplus. Defaults to \code{NULL}, in which case composition of specific
#' factors in automated by comparing loadings to \code{LoadMin}
#'
#' @param LoadMin is the minimum loading size so that an item is considered to "belong" to a factor.
#' If \code{ItemsBySF} is not provided, then items are assigned to factors based on whether their
#' loading on that factor is greater than \code{LoadMin}. If \code{ItemsBySF} is provided, then
#' warnings are issued whenever items load above \code{LoadMin} on factors to which they do not belong,
#' or do not load above \code{LoadMin} on factors to which they do belong,
#'
#' @return A list of bifactor indices, including three different ECV indices, Omega, and
#' OmegaH.
#'
#' @details To use this function, simply call it without any arguments and a dialog box
#' will pop up for you to select a .out file of an exploratory bifactor model.
#'
#' EFA models are not currently (3/3/2020) supported by \code{MplsuAutomation::ReadModels()},
#' but they will be in the very near future, at which time this function will be completed.
#'
#' @seealso \code{\link{bifactorIndices}},
#' \code{\link{bifactorIndicesMplus}},
#' \code{\link{bifactorIndicesMplus_ESEM}},
#' \code{\link{bifactorIndices_expl}}
#'
#'
#' @export
#'
bifactorIndicesMplus_expl <- function(Lambda = file.choose(), ItemsBySF = NULL, LoadMin = 0.2) {
## If Lambda hasn't been put through MplusAutomation::readModels, then we need to do that
if (!("mplus.model" %in% class(Lambda))) {Lambda <- MplusAutomation::readModels(Lambda)}
stop("MplusAutomation does not support EFA output yet, but should soon!")
}
#' bifactorIndicesMplus_ESEM
#'
#' Computes all available bifactor indices given an \code{Mplus} .out file for a bifactor ESEM
#'
#' @param Lambda is an Mplus .out file. Defaults to an open file dialog box
#'
#' @param ItemsBySF is a list, indexed by factor, of vectors of item names belonging to each
#' factor. You must NOT include the general factor in this list, and the list must have names which
#' match the factor names in Mplus. Defaults to \code{NULL}, in which case composition of specific
#' factors is automated by comparing loadings to \code{LoadMin}
#'
#' @param LoadMin is the minimum loading size so that an item is considered to "belong" to a factor.
#' If \code{ItemsBySF} is not provided, then items are assigned to factors based on whether their
#' loading on that factor is greater than \code{LoadMin}. If \code{ItemsBySF} is provided, then
#' warnings are issued whenever items load above \code{LoadMin} on factors to which they do not belong,
#' or do not load above \code{LoadMin} on factors to which they do belong,
#'
#' @return A list of bifactor indices, including three different ECV indices, Omega, and
#' OmegaH.
#'
#' @details To use this function, simply call it without any arguments and a dialog box
#' will pop up for you to select a .out file for an ESEM model.
#'
#' Only standardized models are considered for exploratory models. PUC and ARPB are not
#' supported for exploratory models currently, although that may change.
#'
#' @seealso \code{\link{bifactorIndices}},
#' \code{\link{bifactorIndicesMplus}},
#' \code{\link{bifactorIndicesMplus_expl}},
#' \code{\link{bifactorIndices_expl}}
#'
#'
#' @export
#'
bifactorIndicesMplus_ESEM <- function(Lambda = file.choose(),
ItemsBySF = NULL,
LoadMin = 0.2) {
## If Lambda hasn't been put through MplusAutomation::readModels, then we need to do that
if (!("mplus.model" %in% class(Lambda))) {Lambda <- MplusAutomation::readModels(Lambda)}
## Now we need to fish out the factor loading matrix
Lambda <- getLambda(Lambda)
bifactorIndices_expl(Lambda, ItemsBySF = ItemsBySF, LoadMin = LoadMin)
}
|
/scratch/gouwar.j/cran-all/cranData/BifactorIndicesCalculator/R/Exploratory_BifactorIndices.R
|
#' Omega_S
#'
#' Computes an omega reliability estimate for all factors as described in Rodriguez, Reise, and
#' Haviland (2016).
#'
#' \code{Omega_S} is called by \code{\link{bifactorIndices}} and the various convenience functions
#' for exploratory models and/or Mplus output,
#' which are the only functions in this package intended for casual users.
#'
#' @param Lambda is a matrix of factor loadings
#' @param Theta is a vector of indicator error variances
#'
#' @return A \code{numeric}, the omega reliability estimate for all factors.
#'
#' @examples
#' Lambda <- matrix(c(.82, .10, 0, 0,
#' .77, .35, 0, 0,
#' .79, .32, 0, 0,
#' .66, .39, 0, 0,
#' .51, 0, .71, 0,
#' .56, 0, .43, 0,
#' .68, 0, .13, 0,
#' .60, 0, .50, 0,
#' .83, 0, 0, .47,
#' .60, 0, 0, .27,
#' .78, 0, 0, .28,
#' .55, 0, 0, .75),
#' ncol = 4, byrow = TRUE)
#' colnames(Lambda) <- c("General", "SF1", "SF2", "SF3")
#' Theta <- rep(1, nrow(Lambda)) - rowSums(Lambda^2)
#' Omega_S(Lambda, Theta)
#'
#' @references
#' Rodriguez, A., Reise, S. P., & Haviland, M. G. (2016). Evaluating bifactor models:
#' calculating and interpreting statistical indices. \emph{Psychological Methods, 21}(2),
#' 137 \doi{10.1037/met0000045}.
#'
#' @export
#'
#' @seealso \code{\link{Omega_H}}, \code{\link{bifactorIndices}}
#'
Omega_S <- function(Lambda, Theta) {
Omega_S_C <- function(Fac, Lambda, Theta) {
## Make a matrix of logical vectors for non-zero elements of Lambda.
inFactor <- Lambda[,Fac] != 0
## Compute the appropriate ratio of sums
sum(colSums(Lambda*inFactor)^2)/(sum(colSums(Lambda*inFactor)^2) + sum(Theta*inFactor))
}
if (is.null(Theta)) return(NULL)
omega_results <- sapply(1:ncol(Lambda), Omega_S_C, Lambda = Lambda, Theta = Theta)
names(omega_results) <- colnames(Lambda)
omega_results
}
#' cat_Omega_S
#'
#' Computes an omega reliability estimate for all factors as described in Rodriguez, Reise, and
#' Haviland (2016).
#'
#' \code{cat_Omega_S} is called by \code{\link{bifactorIndices}} and the various convenience functions
#' for exploratory models and/or Mplus output,
#' which are the only functions in this package intended for casual users.
#'
#' @param Lambda is a matrix of standardized factor loadings
#' @param Thresh is a list (indexed by items) of vectors of item thresholds (items must be
#' on a standardized metric).
#' @param Phi is the latent variable covariance matrix. Defaults to \code{NULL}, and
#' the identity matrix will be used. No other options are currently available.
#' @param Denom specifies how the variance of the total score will be computed. Defaults
#' to \code{NULL}, and the model implied total score variance will be used. No other options
#' are currently available.
#'
#' @return A \code{numeric}, the omega reliability estimate for all factors using the technique of
#' Green and Yang (2009).
#'
#' @examples
#'
#' Lambda <- matrix(c(.82, .10, 0, 0,
#' .77, .35, 0, 0,
#' .79, .32, 0, 0,
#' .66, .39, 0, 0,
#' .51, 0, .71, 0,
#' .56, 0, .43, 0,
#' .68, 0, .13, 0,
#' .60, 0, .50, 0,
#' .83, 0, 0, .47,
#' .60, 0, 0, .27,
#' .78, 0, 0, .28,
#' .55, 0, 0, .75),
#' ncol = 4, byrow = TRUE)
#' colnames(Lambda) <- c("General", "SF1", "SF2", "SF3")
#'
#' Thresh = list(c(-1, 0, 1), c(-0.5, 0, 0.5),
#' c(0, 1, 2), c(0, 0.5, 1),
#' c(-2, -1, 0), c(-1, -0.5, 0),
#' c(-1, 0, 2), c(-0.5, 0, 1),
#' c(-2, 0, 1), c(-1, 0, 0.5),
#' c(-1, 0, 1), c(-0.5, 0, 0.5))
#'
#' cat_Omega_S(Lambda, Thresh)
#'
#' @references
#' Rodriguez, A., Reise, S. P., & Haviland, M. G. (2016). Evaluating bifactor models:
#' calculating and interpreting statistical indices. \emph{Psychological Methods, 21}(2),
#' 137 \doi{10.1037/met0000045}.
#'
#' Green, S. B., & Yang, Y. (2009). Reliability of summed item scores using
#' structural equation modeling: An alternative to coefficient alpha.
#' \emph{Psychometrika, 74}(1), 155-167 \doi{10.1007/s11336-008-9099-3}.
#'
#' @export
#'
#' @seealso \code{\link{Omega_H}}, \code{\link{bifactorIndices}}
#'
#'
cat_Omega_S <- function(Lambda, Thresh, Phi = NULL, Denom = NULL) {
cat_Omega_S_C <- function(Fac, Lambda, Thresh, Phi = NULL, Denom = NULL) {
## Make a matrix of logical vectors for non-zero elements of Lambda.
inFactor <- Lambda[,Fac] != 0
## subset Lambda to only include appropriate items
## and make sure it is still a MATRIX!!
subLambda <- Lambda[inFactor,]
subLambda <- as.matrix(subLambda)
## create Phi matrix
if (is.null(Phi)) {Phi <- diag(ncol(subLambda))}
num_items <- nrow(subLambda)
## latent item covariances (correlations, because they're standardized!!)
lat_cov <- subLambda %*% Phi %*% t(subLambda)
if (is.null(Denom)) {
poly_cor <- lat_cov
diag(poly_cor) <- rep(1, num_items)
}
## Create covariance matrix of parallel items
par_cov_mat <- sapply(1:num_items, function (j) {
t_j <- Thresh[[j]]
sapply(1:num_items, function(jp) {
t_jp <- Thresh[[jp]]
## cov(x_j, x_jp)
## first, the left half of the expression in equation 19 in Green and Yang (2009)
left <- sum(sapply(1:length(t_j), function(c) {
sapply(1:length(t_jp), function(cp) {
r <- lat_cov[j, jp]
mnormt::pmnorm(c(t_j[c], t_jp[cp]), c(0, 0), matrix(c(1, r, r, 1), 2))
}) ## end cp
})) ## end c
## now the two expression in the right half of the expression in equation 19 in G&Y
right_j <- sum(stats::pnorm(t_j))
right_jp <- sum(stats::pnorm(t_jp))
## put it together and what do you get? Bibbidi-Bobbidi-Boo
left - right_j * right_jp
}) ## end jp
}) ## end j
## create covariance matrix of items... copy/paste of par_cov_mat,
## but switch from lat_cov_mat to poly_cor
item_cov_mat <- sapply(1:num_items, function (j) {
t_j <- Thresh[[j]]
sapply(1:num_items, function(jp) {
t_jp <- Thresh[[jp]]
## cov(x_j, x_jp)
## first, the left half of the expression in equation 19 in Green and Yang (2009)
left <- sum(sapply(1:length(t_j), function(c) {
sapply(1:length(t_jp), function(cp) {
r <- poly_cor[j, jp]
mnormt::pmnorm(c(t_j[c], t_jp[cp]), c(0, 0), matrix(c(1, r, r, 1), 2))
}) ## end cp
})) ## end c
## now the two expression in the right half of the expression in equation 19 in G&Y
right_j <- sum(stats::pnorm(t_j))
right_jp <- sum(stats::pnorm(t_jp))
## put it together and what do you get? Bibbidi-Bobbidi-Boo
left - right_j * right_jp
}) ## end jp
}) ## end j
numer <- sum(par_cov_mat)
denom <- sum(item_cov_mat)
numer/denom
}
omega_results <- sapply(1:ncol(Lambda), cat_Omega_S_C, Lambda = Lambda, Thresh = Thresh)
names(omega_results) <- colnames(Lambda)
omega_results
}
#' OmegaH
#'
#' Computes hierarchical omega reliability estimate for all factors as described in
#' Rodriguez, Reise, and Haviland (2016).
#'
#' \code{Omega_H} is called by \code{\link{bifactorIndices}} and the various convenience functions
#' for exploratory models and/or Mplus output,
#' which are the only functions in this package intended for casual users.
#'
#' @param Lambda is a matrix of factor loadings
#' @param Theta is a vector of indicator error variances
#'
#' @return A \code{numeric}, the omega reliability estimate for all factors.
#'
#' @examples
#' Lambda <- matrix(c(.82, .10, 0, 0,
#' .77, .35, 0, 0,
#' .79, .32, 0, 0,
#' .66, .39, 0, 0,
#' .51, 0, .71, 0,
#' .56, 0, .43, 0,
#' .68, 0, .13, 0,
#' .60, 0, .50, 0,
#' .83, 0, 0, .47,
#' .60, 0, 0, .27,
#' .78, 0, 0, .28,
#' .55, 0, 0, .75),
#' ncol = 4, byrow = TRUE)
#' colnames(Lambda) <- c("General", "SF1", "SF2", "SF3")
#' Theta <- rep(1, nrow(Lambda)) - rowSums(Lambda^2)
#' Omega_H(Lambda, Theta)
#'
#' @section References:
#' Rodriguez, A., Reise, S. P., & Haviland, M. G. (2016). Evaluating bifactor models:
#' Calculating and interpreting statistical indices. Psychological Methods, 21(2),
#' 137 \doi{10.1037/met0000045}.
#'
#' @export
#'
#' @seealso \code{\link{Omega_S}}, \code{\link{bifactorIndices}}
#'
#'
Omega_H <- function(Lambda, Theta) {
Omega_H_C <- function(Fac, Lambda, Theta) {
## Make a matrix of logical vectors for non-zero elements of Lambda.
inFactor <- Lambda[,Fac] != 0
## Compute the appropriate ratio of sums
sum(Lambda[,Fac])^2/(sum(colSums(Lambda*inFactor)^2) + sum(Theta*inFactor))
}
if (is.null(Theta)) return(NULL)
omega_results <- sapply(1:ncol(Lambda), Omega_H_C, Lambda = Lambda, Theta = Theta)
names(omega_results) <- colnames(Lambda)
omega_results
}
#' cat_Omega_H
#'
#' Computes hierarchical omega reliability estimate for all factors as described in Rodriguez, Reise, and
#' Haviland (2016).
#'
#' \code{cat_Omega_H} is called by \code{\link{bifactorIndices}} and the various convenience functions
#' for exploratory models and/or Mplus output,
#' which are the only functions in this package intended for casual users.
#'
#' @param Lambda is a matrix of standardized factor loadings
#' @param Thresh is a list (indexed by items) of vectors of item thresholds
#' @param Phi is the latent variable covariance matrix. Defaults to \code{NULL}, and
#' the identity matrix will be used. No other options are currently available.
#' @param Denom specifies how the variance of the total score will be computed. Defaults
#' to \code{NULL}, and the model implied total score variance will be used. No other options
#' are currently available.
#'
#' @return A \code{numeric}, the hierarchical omega reliability estimate for all factors using
#' the technique of Green and Yang (2009).
#'
#' @examples
#' Lambda <- matrix(c(.82, .10, 0, 0,
#' .77, .35, 0, 0,
#' .79, .32, 0, 0,
#' .66, .39, 0, 0,
#' .51, 0, .71, 0,
#' .56, 0, .43, 0,
#' .68, 0, .13, 0,
#' .60, 0, .50, 0,
#' .83, 0, 0, .47,
#' .60, 0, 0, .27,
#' .78, 0, 0, .28,
#' .55, 0, 0, .75),
#' ncol = 4, byrow = TRUE)
#' colnames(Lambda) <- c("General", "SF1", "SF2", "SF3")
#'
#' Thresh = list(c(-1, 0, 1), c(-0.5, 0, 0.5),
#' c(0, 1, 2), c(0, 0.5, 1),
#' c(-2, -1, 0), c(-1, -0.5, 0),
#' c(-1, 0, 2), c(-0.5, 0, 1),
#' c(-2, 0, 1), c(-1, 0, 0.5),
#' c(-1, 0, 1), c(-0.5, 0, 0.5))
#'
#' cat_Omega_H(Lambda, Thresh)
#'
#' @references
#' Rodriguez, A., Reise, S. P., & Haviland, M. G. (2016). Evaluating bifactor models:
#' calculating and interpreting statistical indices. \emph{Psychological Methods, 21}(2),
#' 137 \doi{10.1037/met0000045}.
#'
#' Green, S. B., & Yang, Y. (2009). Reliability of summed item scores using
#' structural equation modeling: An alternative to coefficient alpha.
#' \emph{Psychometrika, 74}(1), 155-167 \doi{10.1007/s11336-008-9099-3}.
#'
#' @export
#'
#' @seealso \code{\link{Omega_H}}, \code{\link{bifactorIndices}}
#'
#'
cat_Omega_H <- function(Lambda, Thresh, Phi = NULL, Denom = NULL) {
cat_Omega_H_C <- function(Fac, Lambda, Thresh, Phi = NULL, Denom = NULL) {
## Make a matrix of logical vectors for non-zero elements of Lambda.
inFactor <- Lambda[,Fac] != 0
## subset Lambda to only include appropriate items
subLambda <- Lambda[inFactor,]
subLambda <- as.matrix(subLambda)
## create Phi matrix
if (is.null(Phi)) {Phi <- diag(ncol(subLambda))}
num_items <- nrow(subLambda)
## latent item covariances (correlations, because they're standardized!!)
lat_cov <- subLambda %*% Phi %*% t(subLambda)
if (is.null(Denom)) {
poly_cor <- lat_cov
diag(poly_cor) <- rep(1, num_items)
}
# Now that we have the poly_cor matrix, we need to restrict to just the factor of interest
#for computing the numerator
subLambda <- subLambda[, Fac]
subLambda <- as.matrix(subLambda)
Phi <- Phi[Fac, Fac]
lat_cov <- subLambda %*% as.matrix(Phi) %*% t(subLambda)
## Create covariance matrix of parallel items
par_cov_mat <- sapply(1:num_items, function (j) {
t_j <- Thresh[[j]]
sapply(1:num_items, function(jp) {
t_jp <- Thresh[[jp]]
## cov(x_j, x_jp)
## first, the left half of the expression in equation 19 in Green and Yang (2009)
left <- sum(sapply(1:length(t_j), function(c) {
sapply(1:length(t_jp), function(cp) {
r <- lat_cov[j, jp]
mnormt::pmnorm(c(t_j[c], t_jp[cp]), c(0, 0), matrix(c(1, r, r, 1), 2))
}) ## end cp
})) ## end c
## now the two expression in the right half of the expression in equation 19 in G&Y
right_j <- sum(stats::pnorm(t_j))
right_jp <- sum(stats::pnorm(t_jp))
## put it together and what do you get? Bibbidi-Bobbidi-Boo
left - right_j * right_jp
}) ## end jp
}) ## end j
## create covariance matrix of items... copy/paste of par_cov_mat,
## but switch from lat_cov_mat to poly_cor
item_cov_mat <- sapply(1:num_items, function (j) {
t_j <- Thresh[[j]]
sapply(1:num_items, function(jp) {
t_jp <- Thresh[[jp]]
## cov(x_j, x_jp)
## first, the left half of the expression in equation 19 in Green and Yang (2009)
left <- sum(sapply(1:length(t_j), function(c) {
sapply(1:length(t_jp), function(cp) {
r <- poly_cor[j, jp]
mnormt::pmnorm(c(t_j[c], t_jp[cp]), c(0, 0), matrix(c(1, r, r, 1), 2))
}) ## end cp
})) ## end c
## now the two expression in the right half of the expression in equation 19 in G&Y
right_j <- sum(stats::pnorm(t_j))
right_jp <- sum(stats::pnorm(t_jp))
## put it together and what do you get? Bibbidi-Bobbidi-Boo
left - right_j * right_jp
}) ## end jp
}) ## end j
numer <- sum(par_cov_mat)
denom <- sum(item_cov_mat)
numer/denom
}
omega_results <- sapply(1:ncol(Lambda), cat_Omega_H_C, Lambda = Lambda, Thresh = Thresh)
names(omega_results) <- colnames(Lambda)
omega_results
}
|
/scratch/gouwar.j/cran-all/cranData/BifactorIndicesCalculator/R/Omega_Indices.R
|
#' PUC
#'
#' \code{PUC} computes the proportion of uncontaminated correlations for a bifactor mode
#'
#' \code{PUC} is called by \code{\link{bifactorIndices}} and the various convenience functions
#' for exploratory models and/or Mplus output,
#' which are the only functions in this package intended for casual users.
#'
#' @param Lambda is a matrix of factor loadings
#'
#' @return \code{numeric}
#'
#' @seealso \code{\link{bifactorIndices}}
#'
#' @export
#'
#' @examples
#' Lambda <- matrix(c(.82, .10, 0, 0,
#' .77, .35, 0, 0,
#' .79, .32, 0, 0,
#' .66, .39, 0, 0,
#' .51, 0, .71, 0,
#' .56, 0, .43, 0,
#' .68, 0, .13, 0,
#' .60, 0, .50, 0,
#' .83, 0, 0, .47,
#' .60, 0, 0, .27,
#' .78, 0, 0, .28,
#' .55, 0, 0, .75),
#' ncol = 4, byrow = TRUE)
#' colnames(Lambda) <- c("General", "SF1", "SF2", "SF3")
#' PUC(Lambda)
#'
#'
PUC <- function(Lambda) {
if (!isBifactor(Lambda)) return(NULL)
### Count how many items are on each factor
numItemsOnFactor <- colSums(Lambda != 0)
## contaminated correlations: add up n*(n-1)/2 for all factors, then subtract off the n(n-1)/2 for general factor
specificCorrelationCount <- sum(sapply(numItemsOnFactor, function (x) {x*(x-1)/2})) - (nrow(Lambda)*(nrow(Lambda)-1)/2)
## PUC = 1 - PCC
1 - specificCorrelationCount/(nrow(Lambda)*(nrow(Lambda)-1)/2)
}
#' ARPB
#'
#' \code{ARPB} computes absolute relative bias in factor loadings between the general factor of a
#' bifactor model and a unidimensional model.
#'
#'\code{ARPB} is called by \code{\link{bifactorIndices}} and the various convenience functions
#' for exploratory models and/or Mplus output,
#' which are the only functions in this package intended for casual users.
#'
#' @param Lambda is a matrix of factor loadings
#' @param UniLambda is a matrix of factor loadings
#'
#' @return a list where the first element is the average absolute relative paramter bias, and the second
#' element is a vector of absolute relative bias by item
#'
#' @seealso \code{\link{bifactorIndices}}
#'
#' @export
#'
#' @examples
#' Lambda <- matrix(c(.82, .10, 0, 0,
#' .77, .35, 0, 0,
#' .79, .32, 0, 0,
#' .66, .39, 0, 0,
#' .51, 0, .71, 0,
#' .56, 0, .43, 0,
#' .68, 0, .13, 0,
#' .60, 0, .50, 0,
#' .83, 0, 0, .47,
#' .60, 0, 0, .27,
#' .78, 0, 0, .28,
#' .55, 0, 0, .75),
#' ncol = 4, byrow = TRUE)
#' colnames(Lambda) <- c("General", "SF1", "SF2", "SF3")
#' UniLambda <- c(.78, .84, .82, .77, .69, .62, .69, .66, .82, .56, .74, .65)
#' ARPB(Lambda, UniLambda)
#'
#'
ARPB <- function(Lambda, UniLambda) {
if (is.null(UniLambda)) return(NULL)
if (is.null(getGen(Lambda))) return(NULL)
genFac <- getGen(Lambda)
genLambda <- Lambda[,genFac]
relBias <- abs((UniLambda - genLambda)/genLambda)
names(relBias) <- rownames(Lambda)
list(ARPB = mean(relBias), AbsRelBias = relBias)
}
#' Factor Determinacy
#'
#' \code{FD} computes factor determinacies for all factors provided
#' standardized factor loadings and an interfactor correlation matrix.
#'
#' \code{FD} is called by \code{\link{bifactorIndices}} and the various convenience functions
#' for exploratory models and/or Mplus output,
#' which are the only functions in this package intended for casual users.
#'
#' @param Lambda is a matrix of standardized factor loadings
#' @param Phi is the matrix of factor intercorrelations. For bifactor models
#' \code{Phi} is diagonal with ones on the diagonal.
#'
#' @return a vector of factor determinacies.
#'
#' @seealso \code{\link{bifactorIndices}}
#'
#' @export
#'
#' @examples
#' Lambda <- matrix(c(.82, .10, 0, 0,
#' .77, .35, 0, 0,
#' .79, .32, 0, 0,
#' .66, .39, 0, 0,
#' .51, 0, .71, 0,
#' .56, 0, .43, 0,
#' .68, 0, .13, 0,
#' .60, 0, .50, 0,
#' .83, 0, 0, .47,
#' .60, 0, 0, .27,
#' .78, 0, 0, .28,
#' .55, 0, 0, .75),
#' ncol = 4, byrow = TRUE)
#' Phi <- matrix(c(1, 0, 0, 0,
#' 0, 1, 0, 0,
#' 0, 0, 1, 0,
#' 0, 0, 0, 1), ncol = 4)
#' colnames(Lambda) <- c("General", "SF1", "SF2", "SF3")
#' FD(Lambda, Phi)
#'
FD <- function(Lambda, Phi) {
Psi <- getTheta(Lambda)
Sigma <- Lambda %*% Phi %*% t(Lambda) + diag(Psi)
FacDet <- sqrt(diag(Phi %*% t(Lambda) %*% solve(Sigma) %*% Lambda %*% Phi))
names(FacDet) <- colnames(Lambda)
FacDet
}
#' Construct Replicability
#'
#' \code{H} computes construct replicability for all factors given
#' standardized factor loadings.
#'
#' \code{H} is called by \code{\link{bifactorIndices}} and the various convenience functions
#' for exploratory models and/or Mplus output,
#' which are the only functions in this package intended for casual users.
#'
#' @param Lambda is a matrix of standardized factor loadings
#'
#' @return a vector of construct reliabilities.
#'
#' @seealso \code{\link{bifactorIndices}}
#'
#' @export
#'
#' @examples
#' Lambda <- matrix(c(.82, .10, 0, 0,
#' .77, .35, 0, 0,
#' .79, .32, 0, 0,
#' .66, .39, 0, 0,
#' .51, 0, .71, 0,
#' .56, 0, .43, 0,
#' .68, 0, .13, 0,
#' .60, 0, .50, 0,
#' .83, 0, 0, .47,
#' .60, 0, 0, .27,
#' .78, 0, 0, .28,
#' .55, 0, 0, .75),
#' ncol = 4, byrow = TRUE)
#' colnames(Lambda) <- c("General", "SF1", "SF2", "SF3")
#' H(Lambda)
#'
H <- function(Lambda) {
1/(1+1/(colSums(Lambda^2/(1-Lambda^2))))
}
|
/scratch/gouwar.j/cran-all/cranData/BifactorIndicesCalculator/R/Other_Indices.R
|
#' getLambda
#'
#' getLambda computes or extracts a matrix of factor loadings given some input. Methods exist to
#' support an input of
#' a \code{dataframe}, an \code{mplus.model} from \pkg{MplusAutomation}, a \code{SingleGroupClass} object from \pkg{mirt}, and a
#' \code{lavaan} object from \pkg{lavaan}. Please do not use a \code{tibble}, as they do not support
#' row names, and it is best if your items are given names.
#'
#' @param x an object to be converted into a factor loading matrix, or an object containing a fitted
#' model from which a factor loading matrix will be extracted. Supported classes are
#' \code{data.frame}, \code{matrix}, \code{mplus.model}, \code{lavaan}, and \code{SingleGroupClass}.
#'
#' @param standardized can be used to specify whether a standardized or unstandardized factor
#' loading matrix should be returned. Only relevant for \code{lavaan} and \code{mplus.model} input. The
#' standardized matrix for \code{mplus.model} is taken from stdyx results.
#'
#' @return A matrix of factor loadings
#'
getLambda <- function(x, standardized = TRUE) {
UseMethod("getLambda")
}
getLambda.default <- function(x, standardized = TRUE) {
x[is.na(x)] <- 0
as.matrix(x)
}
getLambda.lavaan <- function(x, standardized = TRUE) {
if (standardized) {
x <- lavaan::lavInspect(x, "std")$lambda
x[is.na(x)] <- 0
as.matrix(x)
} else {
## Make sure all factors have a variance of one.
x <- lavaan::lavInspect(x, "std.lv")$lambda
x[is.na(x)] <- 0
as.matrix(x)
}
}
getLambda.SingleGroupClass <- function(x, standardized = TRUE) {
## the summary method for mirt likes to print to screen, so this next line very awkwardly suppresses that printing
temp <- utils::capture.output(FitSum <- mirt::summary(x))
x <- FitSum$rotF
x[is.na(x)] <- 0
as.matrix(x)
}
getLambda.mplus.model <- function(x, standardized = TRUE) {
if (standardized) {
## check to make sure standardized output was requested
if (is.null(x$parameters$stdyx.standardized)) stop("You must request standardized output from Mplus when standardized = TRUE")
getLambda(x$parameters$stdyx.standardized)
} else {
getLambda(x$parameters$unstandardized)
}
}
getLambda.mplus.params <- function(x) {
## I am not proud of this function, but it works...
## This line throws warnings because not every row has a period. But, all the rows we care about *do* have a period. So, I am suppressing the warnings
x <- suppressWarnings(tidyr::separate(x, col = "paramHeader", into = c("Fac", "op"), sep = "\\."))
loadings <- stats::na.omit(x[x$op == "BY",])
Facs <- unique(loadings$Fac)
Items <- unique(loadings$param)
Lambda <- matrix(ncol = length(Facs), nrow = length(Items))
for (i in 1:length(Facs)) {
for (j in 1:length(Items)) {
if (length(loadings[loadings$Fac == Facs[i] & loadings$param == Items[j], "est"]) == 0) {
Lambda[j,i] <- 0
} else {
Lambda[j,i] <- loadings[loadings$Fac == Facs[i] & loadings$param == Items[j], "est"]
}
}
}
rownames(Lambda) <- Items
colnames(Lambda) <- Facs
Lambda
}
#' getTheta
#'
#' \code{getTheta} extracts or computes a vector of residual variance for items. If a
#' factor loading matrix is provided, then the vector of residual variances is
#' computed from that matrix if \code{standardized} is \code{TRUE}.
#'
#' @param x an object that can be converted into a factor loading matrix, or an
#' object containing a fitted model from which a vector of residual variances
#' can be extracted. Supported classes are \code{data.frame}, \code{matrix}, \code{mplus.model},
#' \code{lavaan}, and \code{SingleGroupClass}
#' @param standardized can be used to specify whether a standardized or unstandardized factor
#' loading matrix should be returned. Only relevant for \code{lavaan} and \code{mplus.model}
#' input. The standardized matrix for \code{mplus.model} is taken from stdyx results.
#'
#' @return a vector of residual variances for items. If x is a fitted model, then
#' the residual variances are extracted from the fitted model. \pkg{lavaan}, \pkg{mirt}
#' (\code{SingleGroupClass}), and \code{Mplus} (\code{mplus.model}) models are supported.
#' If \code{Mplus} does not report residual variances for categorical variables, then
#' factor loadings are used to compute the residual variance for standardized models
#' and an error is thrown for unstandardized models. In both cases, the user is
#' alerted that residual variances could not be found in the input and perhaps the
#' model should be rerun.
#'
#' @seealso \code{\link{getLambda}}
#'
#'
getTheta <- function(x, standardized = TRUE) {
UseMethod("getTheta")
}
getTheta.default <- function(x, standardized = TRUE) {
if(!standardized) {
stop("Not enough information is provided to compute indicator residual variances. Either provide indicator residual variances or use a standardized solution.")
} else {
## This is excessive. There's no way to get here unless x is a data frame or matrix. But, better safe than sorry
Lambda <- getLambda(x)
Ones <- rep(1, nrow(Lambda))
Ones - rowSums(Lambda^2) }
}
getTheta.SingleGroupClass <- function(x, standardized = TRUE) {
## the summary method for mirt likes to print to screen, so this next line very awkwardly suppresses that printing
temp <- utils::capture.output(FitSum <- mirt::summary(x))
Theta <- 1 - FitSum$h2
as.vector(Theta)
}
getTheta.lavaan <- function(x, standardized = TRUE) {
if (standardized) {
diag(lavaan::lavInspect(x, "std")$theta)
} else {
diag(lavaan::lavInspect(x, "std.lv")$theta)
}
}
getTheta.mplus.model <- function(x, standardized = TRUE) {
if (standardized) {
if (is.null(x$parameters$stdyx.standardized)) stop("You must request standardized output from Mplus when standardized = TRUE")
pars <- x$parameters$stdyx.standardized
} else {
pars <- x$parameters$unstandardized
}
## This line throws warning because not every row has a period. But, all the rows we care about *do* have a period. So, I am suppressing the warnings
loadings <- suppressWarnings(tidyr::separate(pars, col = "paramHeader", into = c("Fac", "op"), sep = "\\."))
loadings_2 <- stats::na.omit(loadings[loadings$op == "BY",])
items <- unique(loadings_2$param)
## Item names are not preserved below.
if (length(x$input$variable$categorical) == 0) {
Theta <- c()
thetaOutput <- pars[pars$paramHeader == "Residual.Variances",]
for (i in 1:length(items)) {
Theta <- c(Theta, thetaOutput[thetaOutput$param == items[i], "est"])
}} else {
Theta <- c()
for (i in 1:length(items)) {
Theta <- c(Theta, x$parameters$r2[x$parameters$r2$param == items[i], "resid_var"])
}}
names(Theta) <- items
Theta
}
#' getGen
#'
#' \code{getGen} detects whether or not a single factor loads on all items, and returns the column
#' index of the general factor if it exists.
#'
#' @param Lambda is a factor loading matrix
#'
#' @return The index of the general factor, or \code{NULL} if there is no general factor
#'
#'
getGen <- function(Lambda) {
## Make a matrix of logical vectors for non-zero elements of Lambda. Let's replace NA with zero at the start!!
inFactorMat <- Lambda != 0
## Now, compute the column sums. [[ If colSum is nrow, then we have a general factor. We cannot have more than one ]]
itemsOnFactor <- colSums(inFactorMat == TRUE)
if (length(which(itemsOnFactor == nrow(Lambda))) == 1 ) {
which(itemsOnFactor == nrow(Lambda))
} else {
NULL
}
}
#' isBifactor
#'
#' Determines whether a model has bifactor structure.
#'
#' @param Lambda Matrix of factor loadings
#'
#' @return Logical. If each item loads on a general factor and at most one specific factor, returns TRUE. Otherwise FALSE.
#'
isBifactor <- function(Lambda) {
if (is.null(getGen(Lambda))) return(FALSE)
inFactorMat <- Lambda != 0
return(sum(rowSums(inFactorMat) > 2) == 0)
}
|
/scratch/gouwar.j/cran-all/cranData/BifactorIndicesCalculator/R/UtilityFunctions.R
|
#' Response Data to the SRS-22r
#'
#'A dataset containing 500 responses to the 20 item SRS-22r scoliosis
#'quality of life outcome measure.
#'
#'The Function subscale consists of items 5, 9, 12, 15, and 18.
#'The Pain subscale consists of items 1, 2, 8, 11, and 17.
#'The SelfImage subscale consists of items 4, 6, 10, 14, and 19.
#'The MentalHealth subscale consists of items 3, 7, 13, 16, and 20.
#'
#'@format A data frame with 500 rows and 20 columns
#'Rownames indicate the item number
#'
"SRS_data"
#' Simulated data for multi-trait multi-method
#'
#'A dataset containing 500 responses to a hypothetical 27 item instrument
#'measuring three traits from three different sources. Each trait is measured
#'by 3 items collected from each of three sources (methods).
#'
#'@format A data frame with 500 rows and 27 columns
#' The item names indicate the trait number, method number, and item number
"MTMM_data"
|
/scratch/gouwar.j/cran-all/cranData/BifactorIndicesCalculator/R/data.R
|
BigQuic <- function(X = NULL, inputFileName = NULL, outputFileName = NULL, lambda = 0.5, numthreads = 4, maxit = 5, epsilon = 1e-3, k = 0, memory_size = 8000, verbose = 0, isnormalized = 1, seed = NULL, use_ram = FALSE){
outputFileNames <- vector(length = length(lambda))
outFlag <- TRUE
inFlag <- TRUE
if (use_ram)
{
#precMatrices <- vector(length = length(lambda))
precMatrices <- vector(length = 0)
}
if (!is.null(seed) && length(seed) != 0)
{
set.seed(seed)
}
if (!is.null(X))
{
inFlag <- FALSE
#WRITE INPUT FILE FOR BIGQUIC
inputFileName <- tempfile(pattern = "BigQuic_input_matrix", fileext = ".Bmat")
write.table(x = cbind(dim(X)[2], dim(X)[1]), file = inputFileName, row.names = FALSE, col.names = FALSE)
write.table(x = X, file = inputFileName, append = TRUE, row.names = FALSE, col.names = FALSE)
}
if (!is.null(outputFileName))
{
tempFileName <- outputFileName
}
else
{
tempFileName <- inputFileName
outFlag <- FALSE
}
for (i in 1:length(lambda))
{
j = i
while (file.exists(paste0(tempFileName, ".", j, ".output")))
{
j <- j + 1
}
outputFileName <- paste0(tempFileName, ".", j, ".output")
#Check input file is at least kinda valid
format_Check <- read.table(file = inputFileName, nrows = 1)
if (!is.integer(format_Check[[1]]) || !is.integer(format_Check[[2]]))
{
stop("The file is not formatted correctly for BigQuic, the first line
should be p (the number of attributes) then n (the number of
samples). Then the rest of the file should contain the matrix,
e.g.
4 2
1 2 3 4
4 3 2 1")
}
BigQuicHelper(argvPasser = c("-l", lambda[i], "-n", numthreads, "-t", maxit, "-e", epsilon, "-k", k, "-m", memory_size, "-q", verbose, "-r", isnormalized, inputFileName, outputFileName))
outputFileNames[i] <- outputFileName
if (use_ram)
{
M <- read.table(file = outputFileName, skip = 1)
#########GET FROM FILE DIM(X)
precDim <- max(format_Check[[1]],format_Check[[2]])
precMatrices <- c(precMatrices, sparseMatrix(i = M[,1], j = M[,2], x = M[,3], dims = c(precDim, precDim), symmetric = FALSE)) # Inverse/thetahat
}
if (outFlag == FALSE && use_ram == TRUE)
{
file.remove(outputFileName)
}
}
if(use_ram && outFlag == FALSE)
{
output <- BigQuic_object_builder$new(precision_matrices = precMatrices, inputFileName = inputFileName, lambda = lambda, numthreads = numthreads, maxit = maxit, epsilon = epsilon, k = k, memory_size = memory_size, verbose = verbose, isnormalized = isnormalized, use_ram = use_ram, clean = FALSE, inFlag = inFlag, outFlag = outFlag)
} else if (use_ram && outFlag == TRUE)
{
output <- BigQuic_object_builder$new(precision_matrices = precMatrices, output_file_names = outputFileNames, inputFileName = inputFileName, lambda = lambda, numthreads = numthreads, maxit = maxit, epsilon = epsilon, k = k, memory_size = memory_size, verbose = verbose, isnormalized = isnormalized, use_ram = use_ram, clean = FALSE, inFlag = inFlag, outFlag = outFlag)
} else
{
output <- BigQuic_object_builder$new(output_file_names = outputFileNames, inputFileName = inputFileName, lambda = lambda, numthreads = numthreads, maxit = maxit, epsilon = epsilon, k = k, memory_size = memory_size, verbose = verbose, isnormalized = isnormalized, use_ram = use_ram, clean = FALSE, inFlag = inFlag, outFlag = outFlag)
}
if("AsIs" %in% class(X)){
class(X) <- class(X)[-match("AsIs", class(X))]
if(!is.matrix(X)){
X <- as.matrix(X)
if(!is.matrix(X))
{
stop("X is not a matrix, nor a matrix protected with AsIs")
}
}
}
output$setX(X)
output$setSeed(seed)
return(output)
}
BigQuic_object_builder <- setRefClass(Class = "BigQuic_object",
fields = list(precision_matrices = "list", X = "matrix", inputFileName = "character", lambda = "numeric",
numthreads = "numeric", maxit = "numeric", epsilon = "numeric", k = "numeric", memory_size = "numeric",
verbose = "numeric", isnormalized = "numeric", seed = "numeric", use_ram = "logical",
clean = "logical", output_file_names = "character", opt.lambda = "numeric", inFlag = "logical",
outFlag = "logical"),
methods = list(cleanFiles = function(verbose = FALSE)
{
if (clean == FALSE)
{
if (outFlag == TRUE || use_ram == FALSE)
{
for (i in 1:length(output_file_names))
{
file.remove(output_file_names[i])
if (verbose)
{
print(c("Deleted file: ", output_file_names[i]))
}
}
}
if (inFlag == FALSE)
{
file.remove(inputFileName)
if (verbose)
{
print(c("Deleted file: ", inputFileName))
}
}
clean <<- TRUE
}
else
{
print("Files were already cleaned up. ")
}
},
setSeed = function(inputSeed)
{
if (!is.null(inputSeed))
{
seed <<- inputSeed
}
},
setOptLambda = function(optLambda)
{
if(!is.null(optLambda))
{
opt.lambda <<- optLambda
}
},
setX = function(inputX)
{
if(!is.null(inputX))
{
X <<- inputX
}
},
finalize = function()
{
cleanFiles()
}
))
generate_sample <- function(n = 200, p = 150, seed = NULL)
{
if (!is.null(seed))
{
set.seed(seed)
} else
{
set.seed('1')
}
X <- rbinom(p*n,1,prob=0.15);
dim(X) <- c(n,p);
X <- X %*% diag(1+9*runif(p))
return(X)
}
|
/scratch/gouwar.j/cran-all/cranData/BigQuic/R/BigQuic.R
|
BigQuic.select = function(BigQuic_result = NULL, stars.thresh = 0.1,
stars.subsample.ratio = NULL, rep.num = 20,
verbose = TRUE, verbose2 = 0)
{
if(length(BigQuic_result$lambda) == 1){
BigQuic_result$setOptLambda(BigQuic_result$lambda)
return(BigQuic_result)
}
X <- read.table(file = BigQuic_result$inputFileName, skip = 1, )
gcinfo(FALSE)
n = nrow(X)
d = ncol(X)
nlambda = length(BigQuic_result$lambda)
if(is.null(stars.subsample.ratio))
{
if(n>144) stars.subsample.ratio = 10*sqrt(n)/n
if(n<=144) stars.subsample.ratio = 0.8
}
merge <- list()
for(i in 1:nlambda)
{
merge[[i]] <- Matrix(0,d,d)
}
for(i in 1:rep.num)
{
if(verbose)
{
mes <- paste(c("Conducting Subsampling....in progress:",
floor(100*i/rep.num), "%"), collapse="")
cat(mes, "\r")
flush.console()
}
ind.sample = sample(c(1:n), floor(n*stars.subsample.ratio), replace=FALSE)
#HERE RUN BIGQUIC lambda times and store the resulting matrices as a list in tmp
path <- list()
for (j in 1:nlambda)
{
temp <- BigQuic(X = as.matrix(X[ind.sample,]),
lambda = BigQuic_result$lambda[j],
numthreads = BigQuic_result$numthreads,
maxit = BigQuic_result$maxit,
epsilon = BigQuic_result$epsilon,
k = BigQuic_result$k,
memory_size = BigQuic_result$memory_size,
verbose = verbose2,
isnormalized = BigQuic_result$isnormalized,
seed = BigQuic_result$seed, use_ram = TRUE)
path[[j]] <- temp$precision_matrices[[1]]
#Probably no longer needed because they are cleaned by the garbage
#collection now.
#temp$cleanFiles(verbose = FALSE)
}
for(k in 1:nlambda)
{
merge[[k]] = merge[[k]] + path[[k]]
}
rm(ind.sample,path)
gc() #Should there really be an explicit call to the garbage collector here?
}
if(verbose)
{
mes = "Conducting Subsampling....done. "
cat(mes, "\r")
cat("\n")
flush.console()
}
variability = rep(0,nlambda)
for(i in 1:nlambda)
{
merge[[i]] = merge[[i]]/rep.num
variability[i] = 4*sum(merge[[i]]*(1-merge[[i]]))/(d*(d-1))
}
#find an index, use max to take 1 if index doesn't make sense
#The index is where the smallest lambda is that has maximum low enough variability
#opt.index = max(which.max(variability <= stars.thresh)[1]-1,1)
variability <- rev(variability)
monotonocity_index <- 0
for (i in 1:(nlambda - 1))
{
if (variability[i] > variability[i + 1])
{
monotonocity_index <- i
break
}
}
variability = variability[1:monotonocity_index] # monotonocity and hence sparsity
variability = variability[variability <= stars.thresh] # reproducibility
if (is.null(variability))
{
#Should report to the user that no lambdas are suitable.
return("No lambda's are suitable for this method because they have too much variability and are therefore not reproducible!")
} else
{
opt.index = which.max(variability)
}
opt.lambda = BigQuic_result$lambda[nlambda - opt.index + 1]
BigQuic_result$setOptLambda(opt.lambda)
# if (BigQuic_result$use_ram == TRUE)
# {
# BigQuic_result <- BigQuic_result$precision_matrices[[which(BigQuic_result$lambda == BigQuic_result$opt.lambda)]]
# }
# else
# {
# #BigQuic_result <-
# }
return(BigQuic_result)
}
plot.BigQuic_object = function(x, ...){
if(x$use_ram == FALSE){
format_Check <- read.table(file = inputFileName, nrows = 1)
if (!is.integer(format_Check[[1]]) || !is.integer(format_Check[[1]]))
{
stop("The file is not formatted correctly for BigQuic, the first line
should be p (the number of attributes) then n (the number of
samples). Then the rest of the file should contain the matrix,
e.g.
4 2
1 2 3 4
4 3 2 1")
}
M <- read.table(file = x$output_file_names[which(x$lambda == x$opt.lambda, arr.ind = TRUE)], skip = 1, )
precDim <- max(format_Check[[1]],format_Check[[2]])
x$precision_matrices[[which(x$lambda == x$opt.lambda, arr.ind = TRUE)]] <- sparseMatrix(i = M[,1], j = M[,2], x = M[,3], dims = c(precDim, precDim), symmetric = FALSE)
}
col_vec1 <- unlist(summary(x$precision_matrices[[which(x$lambda == x$opt.lambda, arr.ind = TRUE)]])[3])
col_vec1[col_vec1 < 0] <- 0
#col_vec1 excludes color from the diagonal as not interesting
col_vec1[summary(x$precision_matrices[[which(x$lambda == x$opt.lambda, arr.ind = TRUE)]])[1] == summary(x$precision_matrices[[which(x$lambda == x$opt.lambda, arr.ind = TRUE)]])[2]] <- 0
col_vec2 <- unlist(summary(x$precision_matrices[[which(x$lambda == x$opt.lambda, arr.ind = TRUE)]])[3])
col_vec2[col_vec2 > 0] <- 0
col_vec2 <- col_vec2*-1
#col_vec2 excludes color from the diagonal as not interesting
col_vec2[summary(x$precision_matrices[[which(x$lambda == x$opt.lambda, arr.ind = TRUE)]])[1] == summary(x$precision_matrices[[which(x$lambda == x$opt.lambda, arr.ind = TRUE)]])[2]] <- 0
#Make anything green very green to see it better.
#col_vec2[col_vec2 != 0] <- 1
if(max(col_vec1) > 0){
col_vec1 <- col_vec1/max(col_vec1)
} else{
print("No positive associations.")
}
if(max(col_vec2) > 0){
col_vec2 <- col_vec2/max(col_vec2)
} else{
print("No negative associations.")
}
plot(summary(x$precision_matrices[[which(x$lambda == x$opt.lambda, arr.ind = TRUE)]])[1:2], col=rgb(col_vec1,col_vec2,0,1))
}
|
/scratch/gouwar.j/cran-all/cranData/BigQuic/R/BigQuic.select.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
BigQuicHelper <- function(argvPasser) {
invisible(.Call('BigQuic_BigQuicHelper', PACKAGE = 'BigQuic', argvPasser))
}
|
/scratch/gouwar.j/cran-all/cranData/BigQuic/R/RcppExports.R
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.