content
stringlengths
0
14.9M
filename
stringlengths
44
136
active <- function(blauObj){ sprintf('These elements are active: %s. Access with object$element.', paste(names(blauObj), collapse = ", ")) }
/scratch/gouwar.j/cran-all/cranData/Blaunet/R/active.R
blau <- function(square.data, graph = NULL, directed.el = FALSE, node.ids = NULL, weights = NULL, ecology.ids = NULL, exclude = NULL, dimensions = NULL, memberships = NULL, complete.cases = FALSE){ #exclude extraneous columns up front if (!is.null(exclude)){ excluded <- correctFormat(exclude, square.data) square.data <- square.data[-excluded] } #need: put this after everything has been created #right now can cause unexpected behavior if (complete.cases == TRUE){ square.data <- square.data[complete.cases(as.data.frame(square.data)),] } else { square.data <- as.data.frame(square.data) } #now we need checks. the function checks for the non-nullity of each argument #blau object blauObj <- list() #should have a dataframe/list/matrix/etc for each option class(blauObj) <- 'blau' #ERROR CHECKS: it's vital that if the program is extended and new error checks for input format are needed that they be added here. The reason is simple: the input options are cleaned up and checks are displayed IMMEDIATELY to the user. There should be no waiting 60 seconds only to find an error in the input arguments. ALSO: getting errors out of the way and cleaning up the options arguments makes the following code MUCH easier to write and read. #checks whether arguments that should be length 1 are length 1 if (!isCorrectLength(node.ids) || !isCorrectLength(ecology.ids) || !isCorrectLength(weights)) {message('Error in Argument Length')} #checks whether arguments are are in numeric form. if they're not, converts to numeric form. all column identifiers should be nuemric after this point. #if column names are needed (for writing, say), use colnames(OBJECT[colnumber]) node.ids <- correctFormat(node.ids, square.data) ecology.ids <- correctFormat(ecology.ids, square.data) weights <- correctFormat(weights, square.data) dimensions <- correctFormat(dimensions, square.data) memberships <- correctFormat(memberships, square.data) #sorts the data frame by ecologies #sorting is IMPORTANT becuase we'd like all of the data to be grouped by ecologies. if (!is.null(ecology.ids)) { square.data <- square.data[order(square.data[, ecology.ids]), ] } #list of binary and continuous columns continuousCols <- NULL binaryCols <- NULL numericCols <- sapply(square.data, is.numeric) charCols <- which(numericCols == FALSE) for (colCyc in 1:ncol(square.data)) { if (!isBinary(square.data[,colCyc]) && numericCols[colCyc] == TRUE) { continuousCols <- c(continuousCols, colCyc) } else if (isBinary(square.data[,colCyc]) && numericCols[colCyc] == TRUE) { binaryCols <- c(binaryCols, colCyc) } } #SINGLE ARGUMENT ASSIGNMENTS #idCol-- if there's no idCol, just use the row numbers. both names and numeric (and mixed) are good inputs if (is.null(node.ids)){ tempNodeId <- c(1:nrow(square.data)) } else { tempNodeId <- as.character(as.matrix(square.data[node.ids])) } #ecologyId-- if no ecology.ids, everyone is in same ecology (#1). Else, people are placed in ecologies. #this is like schoolID in the original program. if (is.null(ecology.ids)) { tempEcologyId <- rep(1, nrow(square.data)) } else { tempEcologyId <- as.character(as.matrix(square.data[ecology.ids])) } #put node and ecology identifiers together into one object blauObj$ids <- as.data.frame(cbind(tempNodeId, tempEcologyId)) colnames(blauObj$ids) <- c('nodeId', 'ecologyId') #weights if (is.null(weights)) { blauObj$weights <- as.matrix(rep(1, nrow(square.data))) } #default is a matrix of 1's else { blauObj$weights <- as.matrix(square.data[weights]) } #checks whether the graph argument is usable #if yes, puts it in a memory-efficient edgelist from the network package #the user MUST SPECIFY node ids in object that is turned into an edgelist #otherwise, there is no way we can assure that nodes are matched correctly if (!is.null(graph)) { if (inherits(graph, 'network')==TRUE){ blauObj$graph <- graph } else { blauObj$graph <- network(as.matrix(graph)) #make sure there are no nodes in the network that aren't in node ids for (name in network.vertex.names(blauObj$graph)){ if (!any(blauObj$ids[,1] == name)){ message(sprintf('Graph vertex with name %s is not present in node.ids.', name)) } } } } #MULTIPLE ARGUMENT ASSIGNMENTS WITH DEFAULT SETTINGS #blauDimensions--subset the columns that are NOT already assigned AND are continuous variables if (is.null(dimensions)) { ignoredCols <- unique(c(node.ids, ecology.ids, weights, binaryCols, memberships, charCols)) ignoredCols <- ignoredCols[!is.na(ignoredCols)] totalCols <- c(1:ncol(square.data)) specifiedCols <- totalCols[-ignoredCols] blauObj$dimensions <- as.matrix(square.data[specifiedCols]) } else { #if not null, take specified columns and raise an error if there's overlap with columns reserved by other options ignoredCols <- unique(c(node.ids, ecology.ids, weights, binaryCols, memberships, charCols)) if (length(intersect(dimensions, ignoredCols)) > 0) { message('You have overlaps between specified Blau dimensions and other columns.') } else { blauObj$dimensions <- as.matrix(square.data[dimensions]) } } #memberships-- just like with blauDimensions, if NULL, we automatically assign all binary unassigned variables to this category. if not NULL, we make sure there's no overlap and just take the user specified columns. if (is.null(memberships)){ ignoredCols <- unique(c(node.ids, ecology.ids, weights, dimensions,continuousCols, charCols)) ignoredCols <- ignoredCols[!is.na(ignoredCols)] totalCols <- c(1:ncol(square.data)) specifiedCols <- totalCols[-ignoredCols] blauObj$memberships <- as.matrix(square.data[specifiedCols]) } else { ignoredCols <- unique(c(node.ids, ecology.ids, weights, dimensions,continuousCols, charCols)) if (length(intersect(memberships,ignoredCols)) > 0) { message('You have overlaps specified between membership columns and other columns.') } else { blauObj$memberships <- as.matrix(square.data[memberships]) } } #name the rows with the id names rownames(blauObj$ids) <- blauObj$ids[,1] rownames(blauObj$dimensions) <- blauObj$ids[,1] rownames(blauObj$memberships) <- blauObj$ids[,1] rownames(blauObj$weights) <- blauObj$ids[,1] if (!is.null(blauObj$primaryMembership)){ rownames(blauObj$primaryMembership) <- blauObj$ids[,1] } #missing weight values presentObs <- complete.cases(blauObj$weights) blauObj$ids <- blauObj$ids[presentObs, , drop=FALSE] blauObj$dimensions <- blauObj$dimensions[presentObs, , drop=FALSE] blauObj$memberships <- blauObj$memberships[presentObs, , drop=FALSE] blauObj$weights <- blauObj$weights[presentObs, , drop=FALSE] if (!is.null(blauObj$primaryMembership)){ blauObj$primaryMembership <- blauObj$primaryMembership[presentObs, , drop=FALSE] } #for the soul who decides to input a character matrix #this is here because datatypes in R can get confusing when both characters and numbers are stored in a data.frame #also, we're programming for potential R neophytes if (is.character(blauObj$dimensions)){ message('The dimensions contain at least one character column. Dmensions must be numeric') } if (is.character(blauObj$memberships)){ message('The memberships contain at least one character column. Memberships must be numeric') } if (is.character(blauObj$weights)){ message('The weights contain at least one character element. Weights must be numeric') } #initilize null items for checks in subsequent functions #any new elements that are added to the blauobject should be intialized here blauObj$isInNiche <- NULL blauObj$topbounds <- NULL blauObj$lowbounds <- NULL blauObj$nodalLocal <- NULL blauObj$nodalGlobal <- NULL blauObj$nodalNetwork <- NULL blauObj$dyadic <- NULL #returns the data object return(blauObj) }
/scratch/gouwar.j/cran-all/cranData/Blaunet/R/blau.R
blaunetgui <- function(){ oldwd <- getwd() on.exit(setwd(oldwd)) setwd(paste(.libPaths(), "/Blaunet/scripts/", sep="")) source("blaunetgui.R") }
/scratch/gouwar.j/cran-all/cranData/Blaunet/R/blaunetgui.r
calc.dyadic <- function(blauObj, m.dist) { #gets rid of extraneous nodes nameList <- network.vertex.names(blauObj$graph) diff_names <- setdiff(nameList, rownames(blauObj$dimensions)) blauObj$graph <- delete.vertices(blauObj$graph, vapply(diff_names, function(x) which(nameList == x), 1)) edgelist <- as.matrix(blauObj$graph, matrix.type='edgelist') charEL <- charEdgelist(edgelist, attr(edgelist, 'vnames')) #if we're given an undirected graph (undirected EL/symmetric adjacency matrix) #duplicate the EL with the origin nodes reversed if (is.directed(blauObj$graph) == FALSE) { charEL <- unique(rbind(charEL, cbind(charEL[,2], charEL[,1]))) } #sort edgelist by first element if (nrow(charEL) > 1){ charEL <- charEL[order(charEL[, 1]), ] } if (m.dist == TRUE){ blauObj$dyadic <- as.data.frame(matrix(0, nrow = nrow(charEL), ncol = 6)) } else{ blauObj$dyadic <- as.data.frame(matrix(0, nrow = nrow(charEL), ncol = 5)) } edgelistNames <- matrix(0, nrow = 0, ncol = 2) #here's where we take advantage of treating the network as directed for (rowCyc in 1:nrow(charEL)){ edge <- as.vector(charEL[rowCyc,]) edgelistNames <- rbind(edgelistNames, c(edge[1], edge[2])) nichea <- blauObj$isInNiche[edge[1],] nicheb <- blauObj$isInNiche[edge[2],] k <- ncol(blauObj$isInNiche) if ("ecologyNames" %in% colnames(blauObj$isInNiche)==TRUE) { if (nichea[k]==nicheb[k]) { for (niche in 1:(k-1)) { #CoNicher if (nichea[niche]==nicheb[niche] && nichea[niche]==1) blauObj$dyadic[rowCyc, 1] <- blauObj$dyadic[rowCyc, 1] + 1 #spanner if (sum(blauObj$isInNiche[edge[1],(1:(k-1))]) >= 1 && sum(blauObj$isInNiche[edge[2],(1:(k-1))]) >=1 && nichea[niche] + nicheb[niche] == 1) blauObj$dyadic[rowCyc, 4] <- blauObj$dyadic[rowCyc, 4] + 1 } #co-outsider if (sum(blauObj$isInNiche[edge[1],(1:(k-1))]) + sum(blauObj$isInNiche[edge[2],(1:(k-1))]) == 0 ){ blauObj$dyadic[rowCyc, 2] <- 1 } #Straddler if (sum(blauObj$isInNiche[edge[1],(1:(k-1))]) >= 1 && sum(blauObj$isInNiche[edge[2],(1:(k-1))]) == 0){ blauObj$dyadic[rowCyc, 3] <- sum(blauObj$isInNiche[edge[1],(1:(k-1))]) } if (sum(blauObj$isInNiche[edge[1],(1:(k-1))]) == 0 && sum(blauObj$isInNiche[edge[2],(1:(k-1))]) >= 1){ blauObj$dyadic[rowCyc, 3] <- sum(blauObj$isInNiche[edge[2],(1:(k-1))]) } } } else { for (niche in 1:(k)) { #CoNicher if (nichea[niche]==nicheb[niche] && nichea[niche]==1) blauObj$dyadic[rowCyc, 1] <- blauObj$dyadic[rowCyc, 1] + 1 #spanner if (sum(blauObj$isInNiche[edge[1],]) >= 1 && sum(blauObj$isInNiche[edge[2],]) >=1 && nichea[niche] + nicheb[niche] ==1) blauObj$dyadic[rowCyc, 4] <- blauObj$dyadic[rowCyc, 4] + 1 } #co-outsider if (sum(blauObj$isInNiche[edge[1],]) + sum(blauObj$isInNiche[edge[2],]) == 0 ){ blauObj$dyadic[rowCyc, 2] <- 1 } #Straddler if (sum(blauObj$isInNiche[edge[1],]) >= 1 && sum(blauObj$isInNiche[edge[2],]) == 0){ blauObj$dyadic[rowCyc, 3] <- sum(blauObj$isInNiche[edge[1],]) } if (sum(blauObj$isInNiche[edge[1],]) == 0 && sum(blauObj$isInNiche[edge[2],]) >= 1){ blauObj$dyadic[rowCyc, 3] <- sum(blauObj$isInNiche[edge[2],]) } } #euclidean dist blauObj$dyadic[rowCyc,5] <- dist(rbind(blauObj$dimensions[edge[1],], blauObj$dimensions[edge[2],]), method='euclidean') #mahalanobis dist if (m.dist == TRUE){ blauObj$dyadic[rowCyc,6] <- sqrt(mahalanobis(blauObj$dimensions[edge[1],], blauObj$dimensions[edge[2],], cov(blauObj$dimensions))) } } blauObj$dyadic <- cbind(edgelistNames, blauObj$dyadic) return(blauObj) }
/scratch/gouwar.j/cran-all/cranData/Blaunet/R/calc.dyadic.R
calc.dyadic.ecology <- function(blauObj, m.dist){ #splitter function if (m.dist == TRUE){ blauObj$dyadic <- as.data.frame(matrix(0, nrow = 0, ncol= 8)) } else{ blauObj$dyadic <- as.data.frame(matrix(0, nrow = 0, ncol= 7)) } uniqueEcologies <- unique(blauObj$ids[,2]) uniqueEcologies <- uniqueEcologies[!is.na(uniqueEcologies)] for(ecologyId in uniqueEcologies) { ecologyRows <- which(blauObj$ids[,2] == ecologyId) miniBlau <- splittify(blauObj, ecologyId, ecologyRows) miniBlau <- calc.dyadic(miniBlau, m.dist) blauObj$dyadic <- rbind(blauObj$dyadic, miniBlau$dyadic) } return(blauObj) }
/scratch/gouwar.j/cran-all/cranData/Blaunet/R/calc.dyadic.ecology.R
calc.niches <- function(blauObj, dev.range) { #initialize data objects topbounds <- matrix(0, ncol = ncol(blauObj$dimensions), nrow = ncol(blauObj$memberships)) lowbounds <- matrix(0, ncol = ncol(blauObj$dimensions), nrow = ncol(blauObj$memberships)) means <- matrix(0, ncol = ncol(blauObj$dimensions), nrow = ncol(blauObj$memberships)) #calculate top and low boundaries for (memCyc in 1:ncol(blauObj$memberships)) { for (dimCyc in 1:ncol(blauObj$dimensions)) { memRows <- which(blauObj$memberships[,memCyc] == 1) dimRows <- blauObj$dimensions[memRows, dimCyc] memRows <- memRows[!is.na(dimRows)] #gets rid of the missing values in the relevant dimension meanData <- blauObj$dimensions[memRows, dimCyc] #rows for relevant dimension if (length(meanData) == 0){ #for when there is no information #this can happen in one of two cases: #1) no members in the group #2) all members of group have NA along the relevant dimension means[memCyc, dimCyc] <- NA topbounds[memCyc, dimCyc] <- NA lowbounds[memCyc, dimCyc] <- NA } else if (length(meanData) == 1){ #impute our only information if there's 1 obs for the dimension means[memCyc,dimCyc] <- meanData #should be just a number topbounds[memCyc,dimCyc] <- meanData lowbounds[memCyc,dimCyc] <- meanData } else if (length(meanData) > 1) { meanWeights <- blauObj$weights[memRows,] means[memCyc,dimCyc] <- sum(meanData*meanWeights)/sum(meanWeights) # Calculate the standard deviation # Information on weighted Standard Deviation found at # http://www.sosmath.com/CBB/viewtopic.php?t=2656 sdDenominator <- ((length(meanWeights) - 1) * sum(meanWeights))/(length(meanWeights)) sdNumerator <- 0 for (dataCyc in 1:length(meanData)){ sdNumerator <- sdNumerator + (meanWeights[dataCyc] * (meanData[dataCyc] - means[memCyc,dimCyc])^2 ) } stdDev <- sqrt(sdNumerator/sdDenominator) topbounds[memCyc, dimCyc] <- means[memCyc, dimCyc] + stdDev * dev.range[dimCyc] lowbounds[memCyc, dimCyc] <- means[memCyc, dimCyc] - stdDev * dev.range[dimCyc] if (lowbounds[memCyc, dimCyc]<0 & min(dimRows,na.rm=T)>=0) lowbounds[memCyc, dimCyc] <- 0 } } } blauObj$topbounds <- topbounds blauObj$lowbounds <- lowbounds colnames(blauObj$topbounds) <- colnames(blauObj$dimensions) rownames(blauObj$topbounds) <- colnames(blauObj$memberships) colnames(blauObj$lowbounds) <- colnames(blauObj$dimensions) rownames(blauObj$lowbounds) <- colnames(blauObj$memberships) #calculate if each node is in a given niche blauObj$isInNiche <- matrix(0, nrow = nrow(blauObj$memberships), ncol = ncol(blauObj$memberships)) #the inside 'apply' takes each row in dimensions and checks if it's within the boundaries #the outside 'apply' checks if all elements of each row in the matrix are true for (memCyc in 1:nrow(blauObj$lowbounds)){ blauObj$isInNiche[,memCyc] <- apply(t(apply(blauObj$dimensions, 1, function(x) x >= blauObj$lowbounds[memCyc,] & x <= blauObj$topbounds[memCyc,])), 1, all) } #overwrite NAs with zeroes blauObj$isInNiche[is.na(blauObj$isInNiche)] <- 0 colnames(blauObj$isInNiche) <- vapply(colnames(blauObj$memberships), function(x) paste(x, "niche", sep="_"), "a") rownames(blauObj$isInNiche) <- rownames(blauObj$memberships) return(blauObj) }
/scratch/gouwar.j/cran-all/cranData/Blaunet/R/calc.niches.R
calc.niches.ecology <- function(blauObj, uniqueEcologies, dev.range){ uniqueEcologies <- unique(blauObj$ids[,2]) uniqueEcologies <- uniqueEcologies[!is.na(uniqueEcologies)] blauObj$isInNiche <- matrix(0, nrow = nrow(blauObj$dimensions), ncol = (ncol(blauObj$memberships) + 1)) #extra column for ecology names colnames(blauObj$isInNiche) <- c(vapply(colnames(blauObj$memberships), function(x) paste(x, "niche", sep="_"), "a"), 'ecologyNames') rownames(blauObj$isInNiche) <- blauObj$ids[,1] for(ecologyId in uniqueEcologies){ #iterate through each ecology: all of the calculations for the ecology happen here and they are appended to $isInNiche, $topbounds, and $lowbounds ecologyRows <- which(blauObj$ids[,2] == ecologyId) #pull out ROW identifiers for each row in the ecology miniBlau <- splittify(blauObj, ecologyId, ecologyRows) miniBlau <- calc.niches(miniBlau, dev.range) #memberships, dimensions, primaryMemberships are used by niches blauObj$isInNiche[ecologyRows,] <- cbind(miniBlau$isInNiche, (rep(ecologyId, nrow(miniBlau$isInNiche)))) topbounds <- cbind(as.data.frame(miniBlau$topbounds), as.data.frame(rep(ecologyId, nrow(miniBlau$topbounds)))) #temp object to add ecology names lowbounds <- cbind(as.data.frame(miniBlau$lowbounds), as.data.frame(rep(ecologyId, nrow(miniBlau$lowbounds)))) #temp object to add ecology names colnames(topbounds) <- c(colnames(blauObj$dimensions), 'ecologyNames') rownames(topbounds) <- colnames(blauObj$memberships) colnames(lowbounds) <- c(colnames(blauObj$dimensions), 'ecologyNames') rownames(lowbounds) <- colnames(blauObj$memberships) blauObj$topbounds <- rbind(blauObj$topbounds, topbounds) #add it to the bottom blauObj$lowbounds <- rbind(blauObj$lowbounds, lowbounds) #add it to the bottom } tempData <- blauObj$isInNiche[,which(colnames(blauObj$isInNiche) != 'ecologyNames')] class(tempData) <- 'numeric' blauObj$isInNiche <- cbind(as.data.frame(tempData), blauObj$isInNiche[,which(colnames(blauObj$isInNiche) == 'ecologyNames')]) colnames(blauObj$isInNiche)[ncol(blauObj$isInNiche)] <- 'ecologyNames' return(blauObj) }
/scratch/gouwar.j/cran-all/cranData/Blaunet/R/calc.niches.ecology.R
calc.nodal <- function(blauObj, mode){ #requires focalNiche (primMem) specification if (mode == 'local'){ #initialize blauObj$nodalLocal <- as.data.frame(matrix(0, nrow = nrow(blauObj$memberships), ncol= 3)) rownames(blauObj$nodalLocal) <- rownames(blauObj$isInNiche) #in focal niche blauObj$nodalLocal[,1] <- blauObj$isInNiche[, blauObj$primMemCol] #total number of niches individual is in blauObj$nodalLocal[,2] <- matrix(apply(blauObj$isInNiche, 1, function(x) sum(x, na.rm=TRUE)), ncol = 1, byrow = TRUE) #if individual is in primary org but outside of primary niche for(nodeCyc in 1:length(blauObj$isInNiche[, blauObj$primMemCol])){ if (!is.na(blauObj$isInNiche[nodeCyc, blauObj$primMemCol]) && !is.na(blauObj$memberships[nodeCyc, blauObj$primMemCol])){ if (blauObj$isInNiche[nodeCyc, blauObj$primMemCol] == 0 && blauObj$memberships[nodeCyc, blauObj$primMemCol] == 1){ blauObj$nodalLocal[nodeCyc, 3] <- 1 } } } return(blauObj) } #does not require focalNiche(primMem) else if (mode == 'global'){ #number of organizations individual is in orgs <- matrix(apply(blauObj$memberships, 1, function(x) sum(x, na.rm = TRUE)), ncol = 1, byrow = TRUE) #number of niches individual is in niches <- matrix(apply(blauObj$isInNiche, 1, function(x) c(sum(x, na.rm = TRUE), c(paste(which(x == 1), collapse=' ')))), ncol = 2, byrow = TRUE) blauObj$nodalGlobal <- cbind(orgs, niches) rownames(blauObj$nodalGlobal) <- rownames(blauObj$isInNiche) return(blauObj) } }
/scratch/gouwar.j/cran-all/cranData/Blaunet/R/calc.nodal.R
calc.nodal.ecology <- function(blauObj, uniqueEcologies, mode){ uniqueEcologies <- unique(blauObj$ids[,2]) uniqueEcologies <- uniqueEcologies[!is.na(uniqueEcologies)] if (mode == 'local'){ blauObj$nodalLocal <- matrix(0, nrow = 0, ncol = 2) for(ecologyId in uniqueEcologies){ ecologyRows <- which(blauObj$ids[,2] == ecologyId) miniBlau <- splittify(blauObj, ecologyId, ecologyRows) miniBlau <- calc.nodal(miniBlau, mode) #memberships, weights, dimensions, primaryMemberships are used by niches blauObj$nodalLocal <- rbind(blauObj$nodalLocal, miniBlau$nodalLocal) } } else if (mode == 'global'){ blauObj$nodalGlobal <- matrix(0, nrow = 0, ncol = 3) for(ecologyId in uniqueEcologies){ ecologyRows <- which(blauObj$ids[,2] == ecologyId) miniBlau <- splittify(blauObj, ecologyId, ecologyRows) miniBlau <- calc.nodal(miniBlau, mode) #memberships, weights, dimensions, primaryMemberships are used by niches blauObj$nodalGlobal <- rbind(blauObj$nodalGlobal, miniBlau$nodalGlobal) } } if (mode == 'network'){ blauObj$nodalNetwork <- matrix(0, nrow = 0, ncol = 2) for(ecologyId in uniqueEcologies){ ecologyRows <- which(blauObj$ids[,2] == ecologyId) miniBlau <- splittify(blauObj, ecologyId, ecologyRows) miniBlau <- calc.nodal.network(miniBlau) #memberships, weights, dimensions, primaryMemberships are used by niches blauObj$nodalNetwork <- rbind(blauObj$nodalNetwork, miniBlau$nodalNetwork) } } return(blauObj) }
/scratch/gouwar.j/cran-all/cranData/Blaunet/R/calc.nodal.ecology.R
calc.nodal.network <- function(blauObj){ #initialize blauObj$nodalNetwork <- as.data.frame(matrix(0, nrow = nrow(blauObj$memberships), ncol= 2)) rownames(blauObj$nodalNetwork) <- rownames(blauObj$isInNiche) #gets rid of nodes not in the current ecology namelist <- network.vertex.names(blauObj$graph) diff_names <- setdiff(namelist, rownames(blauObj$dimensions)) blauObj$graph <- delete.vertices(blauObj$graph, vapply(diff_names, function(x) which(namelist == x), 1)) edgelist <- as.matrix(blauObj$graph, matrix.type='edgelist') #make a named edgelist, makes our computations easier charEL <- charEdgelist(edgelist, attr(edgelist, 'vnames')) #if we're given an undirected graph (undirected EL/symmetric adjacency matrix) #duplicate the EL with the origin nodes reversed if (is.directed(blauObj$graph) == FALSE) { charEL <- rbind(charEL, cbind(charEL[,2], charEL[,1])) } #sort edgelist by first element if (nrow(charEL) > 1){ charEL <- charEL[order(charEL[, 1]), ] } #this is kind of a confusing piece of code at first #it sets a 'current' origin node and cycles through all of that node's neighbors #when it hits a new 'current' node, it records all of the information for the previous 'current' node #then it resets the list of niches spanned to and begins recording information on the new current node currentNode <- charEL[1,1] spannedTo <- c() #cycle through directed edgelist #the origin node is element 1, the destination node is element 2 for (rowCyc in 1:nrow(charEL)){ edge <- as.vector(charEL[rowCyc,]) #since EL is sorted, if we see a different origin node, #record changes to nodalNetwork #update current node #reset spannedTo if (edge[1] != currentNode){ blauObj$nodalNetwork[currentNode,1] <- ifelse(length(spannedTo) > 0, 1, 0) blauObj$nodalNetwork[currentNode,2] <- length(spannedTo) #start new spanner record currentNode <- edge[1] spannedTo <- c() niches1 <- blauObj$isInNiche[edge[1], ] niches2 <- blauObj$isInNiche[edge[2], ] spannedTo <- union(spannedTo, (which((niches2 - niches1) == 1))) } else { niches1 <- blauObj$isInNiche[edge[1], ] niches2 <- blauObj$isInNiche[edge[2], ] #nodal spanners are defined as: #node1 is not in nicheA but has a friend in nicheA #node1 is then said to 'span' to nicheA #niches spanned to are indicated by 1's #we get number spanned to spannedTo <- union(spannedTo, (which((niches2 - niches1) == 1))) } } #save the last elements when loop stops blauObj$nodalNetwork[currentNode,1] <- ifelse(length(spannedTo) > 0, 1, 0) blauObj$nodalNetwork[currentNode,2] <- length(spannedTo) return(blauObj) }
/scratch/gouwar.j/cran-all/cranData/Blaunet/R/calc.nodal.network.R
charEdgelist <- function(edgelist, vertexNames) { char <- matrix('0', ncol = ncol(edgelist), nrow = nrow(edgelist)) for (col in 1:ncol(edgelist)){ for (row in 1:nrow(edgelist)) { char[row, col] <- vertexNames[edgelist[row, col]] } } return(char) }
/scratch/gouwar.j/cran-all/cranData/Blaunet/R/charEdgelist.R
correctFormat <- function(arg, square.data) { if (!is.null(arg)){ #should be a max length of 1 if (length(arg) <= 1) { if (is.character(arg)) { arg_temp <- type.convert(arg, as.is=T) arg_temp <- which(colnames(square.data) == arg_temp) ; return(arg_temp) } else if (is.numeric(arg)) { return(arg) } else { message(sprintf('Input for option %s could not be treated as a character or vector', arg)) } } else if (length(arg > 1)){ argList <- rep(0,length(arg)) if (is.numeric(arg)) { argList <- arg ; return(unique(argList)) } else { for (each in 1:length(arg)) { arg_temp <- type.convert(arg[each], as.is=T) if (is.numeric(arg_temp)) { argList[each] <- as.numeric(arg_temp) } else if (is.character(arg_temp)) { argList[each] <- as.numeric(which(colnames(square.data) == arg_temp)) } else message('errors') } return(unique(argList)) } } } }
/scratch/gouwar.j/cran-all/cranData/Blaunet/R/correctFormat.R
dyadic <- function(blauObj, dev.range, ecologies.off=FALSE, m.dist = TRUE) { if (ecologies.off == TRUE){ blauObj <- niches(blauObj, dev.range, ecologies.off) } uniqueEcologies <- unique(blauObj$ids[,2]) if(length(uniqueEcologies) == 1 || ecologies.off == TRUE){ #if there's only one ecology and ,we don't have isInNiche if(is.null(blauObj$isInNiche)){ blauObj <- niches(blauObj, dev.range) } blauObj <- calc.dyadic(blauObj, m.dist) } else if(length(uniqueEcologies) > 1){ if(is.null(blauObj$IsInNiche)){ blauObj <- niches(blauObj, dev.range) } blauObj <- calc.dyadic.ecology(blauObj, m.dist) } if (m.dist == TRUE){ colnames(blauObj$dyadic) <- c('Ego', 'Alter', 'CoNicher', 'CoOutsider', 'Straddler', 'Spanner', 'EucDist', 'MahalanobisDist') } else{ colnames(blauObj$dyadic) <- c('Ego', 'Alter', 'CoNicher', 'CoOutsider', 'Straddler', 'Spanner', 'EucDist') } return(blauObj) }
/scratch/gouwar.j/cran-all/cranData/Blaunet/R/dyadic.R
ecology.summary <- function(blauObj, percent = FALSE){ uniqueEcologies <- unique(blauObj$ids[,2]) uniqueEcologies <- uniqueEcologies[!is.na(uniqueEcologies)] sum.ecology <- matrix(0, nrow = 0, ncol = (ncol(blauObj$memberships) + 2)) #ecology name, niche name, num in org, num in niche, num in org but not niche, num not in any other niche, overlap 2+ other niches ##no boundaries, too cluttered colnames(sum.ecology) <- c("Ecology", "Org/Niche", colnames(blauObj$memberships)) if (length(uniqueEcologies)==1) { focalNiches <- blauObj$isInNiche sum.mat <- t(as.matrix(focalNiches)) %*% as.matrix(focalNiches) orig.diag <- diag(sum.mat) #how many individuals are exclusively in a niche? #get this by summing up all isInNiche rows with sum = 1 mat.diagonal <- as.matrix(rep(0, ncol(focalNiches))) for (node in 1:nrow(focalNiches)){ if(sum(focalNiches[node,]) == 1){ mat.diagonal <- mat.diagonal + focalNiches[node,] } } #manually replace because diag function gets confused for (elem in 1:nrow(mat.diagonal)){ sum.mat[elem,elem] <- mat.diagonal[elem,1] } if (percent == TRUE){ sum.mat <- sum.mat / orig.diag } sum.ecology <- rbind(sum.ecology, cbind(cbind(rep(blauObj$ids[1,2], ncol(blauObj$memberships)), colnames(blauObj$memberships)), sum.mat)) } else { for (ecologyId in uniqueEcologies){ ecologyRows <- which(blauObj$ids[,2] == ecologyId) focalNiches <- blauObj$isInNiche[ecologyRows, 1:(ncol(blauObj$isInNiche)-1)] sum.mat <- t(as.matrix(focalNiches)) %*% as.matrix(focalNiches) orig.diag <- diag(sum.mat) #how many individuals are exclusively in a niche? #get this by summing up all isInNiche rows with sum = 1 mat.diagonal <- as.matrix(rep(0, ncol(focalNiches))) for (node in 1:nrow(focalNiches)){ if(sum(focalNiches[node,]) == 1){ mat.diagonal <- mat.diagonal + focalNiches[node,] } } #manually replace because diag function gets confused for (elem in 1:ncol(mat.diagonal)){ sum.mat[elem,elem] <- mat.diagonal[1,elem] } if (percent == TRUE){ sum.mat <- sum.mat / orig.diag } sum.ecology <- rbind(sum.ecology, cbind(cbind(rep(ecologyId, ncol(blauObj$memberships)), colnames(blauObj$memberships)), sum.mat)) } } rownames(sum.ecology) <- NULL return(sum.ecology) }
/scratch/gouwar.j/cran-all/cranData/Blaunet/R/ecology.summary.R
export.dyadic <- function(blauObj){ if (is.null(blauObj$dyadic)){ message("Nothing to export.") } else{ return(blauObj$dyadic) } }
/scratch/gouwar.j/cran-all/cranData/Blaunet/R/export.dyadic.R
export.nodal <- function(blauObj, niches = TRUE){ if (is.null(blauObj$isInNiche)){ message("Nothing to export.") } if (niches == TRUE){ if ("ecologyNames" %in% colnames(blauObj$isInNiche)){ to.export <- cbind(blauObj$ids,blauObj$isInNiche[, 1:(ncol(blauObj$isInNiche)-1)]) } else{ to.export <- cbind(blauObj$ids,blauObj$isInNiche) } } else{ to.export <- data.frame(matrix(0, nrow = nrow(blauObj$nodalLocal), ncol=0)) } if (!is.null(blauObj$nodalLocal)){ to.export <- cbind(to.export, blauObj$nodalLocal) } if (!is.null(blauObj$nodalGlobal)){ to.export <- cbind(to.export, blauObj$nodalGlobal) } if (!is.null(blauObj$nodalNetwork)){ to.export <- cbind(to.export, blauObj$nodalNetwork) } rownames(to.export) <- NULL return(to.export) }
/scratch/gouwar.j/cran-all/cranData/Blaunet/R/export.nodal.R
getPresentCases <- function(blauObj, presentCases){ blauObj$ids <- blauObj$ids[presentCases, , drop=FALSE] blauObj$memberships <- blauObj$memberships[presentCases, , drop=FALSE] blauObj$dimensions <- blauObj$dimensions[presentCases, , drop=FALSE] blauObj$weights <- blauObj$weights[presentCases, , drop=FALSE] if (!is.null(blauObj$isInNiche)) { blauObj$isInNiche <- blauObj$isInNiche[presentCases, , drop=FALSE] } if (!is.null(blauObj$primaryMembership)) { blauObj$primaryMembership <- blauObj$primaryMembership[presentCases, , drop=FALSE] } return(blauObj) #we don't cut down connections, we keep the full network object #we don't cut down top/lowbounds because they're computed in a manner specified with the input function and aren't the same size #we don't cut down 'final' elements such as nodalLocal because they've been computed already according to user specification }
/scratch/gouwar.j/cran-all/cranData/Blaunet/R/getPresentCases.R
isBinary <- function(arg) { binary <- TRUE arg <- as.matrix(na.exclude(arg)) if (is.matrix(arg)) { u_mat <- unique(arg) for (each in u_mat) { if (each != 0 && each != 1) { binary <- FALSE; break } } #breaks and returns FALSE as soon as it hits a non-missing, non (0,1) value return(binary) } else { message('Blau dimension columns must be coercable to matrix') } }
/scratch/gouwar.j/cran-all/cranData/Blaunet/R/isBinary.R
isCorrectLength <- function(arg, argLength = 1) { #spits out an error if argument is of wrong length. returns true if there are no errors (NULL or correct length) if (!is.null(arg)){ if (length(arg) != argLength) {sprintf('Option %s takes argument of at most length %s', arg, argLength)} else {TRUE} } else {TRUE} }
/scratch/gouwar.j/cran-all/cranData/Blaunet/R/isCorrectLength.R
niche.summary <- function(blauObj){ uniqueEcologies <- unique(blauObj$ids[,2]) uniqueEcologies <- uniqueEcologies[!is.na(uniqueEcologies)] sum.niche <- matrix(0, nrow = length(uniqueEcologies)*ncol(blauObj$memberships), ncol = 7) #ecology name, niche name, num in org, num in niche, num in org but not niche, num not in any other niche, overlap 2+ other niches ##no boundaries, too cluttered colnames(sum.niche) <- c("Ecology", "Org/Niche", "OrgMem", "NicheMem", "NicheExc", "NicheOvr", "MemExc") rowCount <- 1 if (length(uniqueEcologies)==1) { nicheNum <- 1 focalMemberships <- blauObj$memberships focalNiches <- blauObj$isInNiche for (colCyc in 1:ncol(focalMemberships)){ #in org but not in niche #basically find the 1's diff <- focalMemberships[, nicheNum] - focalNiches[, nicheNum] numOutside <- length(which(diff == 1)) #in focal niche but in no other niche nicheExcl <- sum(apply(focalNiches, 1, function(x) ifelse(sum(x) == 1 && x[nicheNum] == 1, 1, 0)), na.rm = TRUE) #overlaps with at least 2 niches numNonExclusive <- sum(focalNiches[, nicheNum], na.rm = TRUE) - nicheExcl sum.niche[rowCount,] <- c(blauObj$ids[1,2], colnames(blauObj$memberships)[nicheNum], sum(focalMemberships[, nicheNum], na.rm = TRUE), sum(focalNiches[, nicheNum], na.rm = TRUE), nicheExcl, numNonExclusive, numOutside) rowCount <- rowCount + 1 nicheNum <- nicheNum + 1 } } else { for (ecologyId in uniqueEcologies){ nicheNum <- 1 ecologyRows <- which(blauObj$ids[,2] == ecologyId) focalMemberships <- blauObj$memberships[ecologyRows, ] focalNiches <- blauObj$isInNiche[ecologyRows, 1:(ncol(blauObj$isInNiche)-1)] #exclude last column, which is ecology index for (colCyc in 1:ncol(focalMemberships)){ #in org but not in niche #basically find the 1's diff <- focalMemberships[, nicheNum] - focalNiches[, nicheNum] numOutside <- length(which(diff == 1)) #in focal niche but in no other niche nicheExcl <- sum(apply(focalNiches, 1, function(x) ifelse(sum(x) == 1 && x[nicheNum] == 1, 1, 0)), na.rm = TRUE) #overlaps with at least 2 niches numNonExclusive <- sum(focalNiches[, nicheNum], na.rm = TRUE) - nicheExcl sum.niche[rowCount,] <- c(ecologyId, colnames(blauObj$memberships)[nicheNum], sum(focalMemberships[, nicheNum], na.rm = TRUE), sum(focalNiches[, nicheNum], na.rm = TRUE), nicheExcl, numNonExclusive, numOutside) rowCount <- rowCount + 1 nicheNum <- nicheNum + 1 } } } return(sum.niche) }
/scratch/gouwar.j/cran-all/cranData/Blaunet/R/niche.summary.R
niches <- function(blauObj, dev.range, ecologies.off = FALSE){ uniqueEcologies <- unique(blauObj$ids[,2]) if (length(uniqueEcologies) == 1 || ecologies.off == TRUE){ blauObj <- calc.niches(blauObj, dev.range) rownames(blauObj$isInNiche) <- rownames(blauObj$memberships) } else if (length(uniqueEcologies)) { blauObj <- calc.niches.ecology(blauObj, uniqueEcologies, dev.range) } presentCases <- which(complete.cases(blauObj$dimensions)) blauObj <- getPresentCases(blauObj, presentCases) return(blauObj) }
/scratch/gouwar.j/cran-all/cranData/Blaunet/R/niches.R
nodal.global <- function(blauObj, dev.range, ecologies.off = FALSE){ if (ecologies.off == TRUE){ blauObj <- niches(blauObj, dev.range, ecologies.off) } uniqueEcologies <- unique(blauObj$ids[,2]) if(length(uniqueEcologies) == 1 || ecologies.off == TRUE){ #if there's only one ecology and ,we don't have isInNiche if(is.null(blauObj$isInNiche)){ blauObj <- niches(blauObj, dev.range) } blauObj <- calc.nodal(blauObj, mode = "global") #has isInNiches now; does a bunch of stuff if a primaryMembership is specified } else if(length(uniqueEcologies) > 1) { #if there's more than one ecology, we need to split up primary membership, weights, dimensions and memberships if(is.null(blauObj$isInNiche)){ blauObj <- niches(blauObj, dev.range) } blauObj <- calc.nodal.ecology(blauObj, uniqueEcologies, mode = "global") } colnames(blauObj$nodalGlobal) <- c("TotalOrgs", "Nicher", "NicheList") return(blauObj) }
/scratch/gouwar.j/cran-all/cranData/Blaunet/R/nodal.global.R
nodal.local <- function(blauObj, focal.niche=NULL, dev.range, ecologies.off=FALSE){ if (is.null(focal.niche)){ return("Primary Membership needed for nodal.local") } blauObj$primMemCol <- correctFormat(focal.niche, blauObj$memberships) if (ecologies.off == TRUE){ blauObj <- niches(blauObj, dev.range, ecologies.off) } uniqueEcologies <- unique(blauObj$ids[,2]) if(length(uniqueEcologies) == 1 || ecologies.off == TRUE){ #if there's only one ecology and, we don't have isInNiche if(is.null(blauObj$isInNiche)){ blauObj <- niches(blauObj, dev.range) } blauObj <- calc.nodal(blauObj, mode = "local") #has isInNiches now; does a bunch of stuff if a primaryMembership is specified } else if(length(uniqueEcologies) > 1) { #if there's more than one ecology, we need to split up primary membership, weights, dimensions and memberships if(is.null(blauObj$isInNiche)){ blauObj <- niches(blauObj, dev.range) } blauObj <- calc.nodal.ecology(blauObj, uniqueEcologies, mode = "local") } colnames(blauObj$nodalLocal) <- c("FocNicher", "Nicher", "MemNotNiche") return(blauObj) }
/scratch/gouwar.j/cran-all/cranData/Blaunet/R/nodal.local.R
nodal.network <- function(blauObj, dev.range, ecologies.off = FALSE){ if (ecologies.off == TRUE){ blauObj <- niches(blauObj, dev.range, ecologies.off) } uniqueEcologies <- unique(blauObj$ids[,2]) if(length(uniqueEcologies) == 1 || ecologies.off == TRUE){ #if there's only one ecology and ,we don't have isInNiche if(is.null(blauObj$isInNiche)){ blauObj <- niches(blauObj, dev.range) } blauObj <- calc.nodal.network(blauObj) #has isInNiches now; does a bunch of stuff if a primaryMembership is specified } else if(length(uniqueEcologies) > 1) { #if there's more than one ecology, we need to split up primary membership, weights, dimensions and memberships if(is.null(blauObj$isInNiche)){ blauObj <- niches(blauObj, dev.range) } blauObj <- calc.nodal.ecology(blauObj, uniqueEcologies, mode = "network") } colnames(blauObj$nodalNetwork) <- c("Spanner", "NumSpannedTo") return(blauObj) }
/scratch/gouwar.j/cran-all/cranData/Blaunet/R/nodal.network.R
splittify <- function(blauObj, ecologyId, ecologyRows) { miniBlau <- list() #this object holds the ROW parts of the blau object that are in the relevant ecology miniBlau$ids <- blauObj$ids[ecologyRows, , drop=FALSE] miniBlau$memberships <- blauObj$memberships[ecologyRows, , drop=FALSE] miniBlau$weights <- blauObj$weights[ecologyRows, , drop=FALSE] miniBlau$dimensions <- blauObj$dimensions[ecologyRows, , drop=FALSE] if (!is.null(blauObj$primMemCol)){ miniBlau$primMemCol <- blauObj$primMemCol } if (!is.null(blauObj$isInNiche)){ miniBlau$isInNiche <- blauObj$isInNiche[which(blauObj$isInNiche[,'ecologyNames'] == ecologyId), which(colnames(blauObj$isInNiche) != 'ecologyNames'), drop=FALSE] #this picks the rows in the ecology, and all columns except for the one with the ecology names } if (!is.null(blauObj$topbounds) && !is.null(blauObj$lowbounds)){ miniBlau$topbounds <- blauObj$topbounds[which(blauObj$topbounds[,'ecologyNames'] == ecologyId), which(colnames(blauObj$topbounds) != 'ecologyNames'), drop=FALSE] miniBlau$lowbounds <- blauObj$lowbounds[which(blauObj$lowbounds[,'ecologyNames'] == ecologyId), which(colnames(blauObj$lowbounds) != 'ecologyNames'), drop=FALSE] } #keep the full network because the networked function will delete extraneous vertices miniBlau$graph <- blauObj$graph return(miniBlau) }
/scratch/gouwar.j/cran-all/cranData/Blaunet/R/splittify.R
.onAttach <- function(libname, pkgname){ msg<-paste('BlauNet: Calculate and Analyze Blau Status for (Covert) Organizations\n') msg<-paste(msg,"Copyright (c) 2014-2022, Cheng Wang, Wayne State University\n", " Michael Genkin, Singapore Management University\n", " George Berry, Cornell University\n", " Liyuan Chen\n", " Matthew E. Brashears, University of South Carolina\n",sep="") msg<-paste(msg,'Type "blaunetgui()" to run analysis in graphic user interface (GUI).\n') packageStartupMessage(msg) }
/scratch/gouwar.j/cran-all/cranData/Blaunet/R/zzz.R
showanalysis <- function(h,...) { if ("cov" %in% ls(envir=blnetevn)==FALSE) {gmessage("Sorry! Attribute file is not loaded.", parent = window)} else { assign("b1",character(0),envir=blnetevn) assign("b2",character(0),envir=blnetevn) assign("b3","",envir=blnetevn) assign("b4",character(0),envir=blnetevn) assign("b5",character(0),envir=blnetevn) assign("b7",character(0),envir=blnetevn) assign("b8","FALSE",envir=blnetevn) assign("m1",names(blnetevn$cov),envir=blnetevn) assign("m3",names(blnetevn$cov),envir=blnetevn) toplevel <- gwindow("BlauNet Analysis", width=800, height=800, parent = window, visible=FALSE) cg <- ggroup(horizontal = TRUE,cont = toplevel) tbl0 <- gtable(blnetevn$m1,expand=TRUE,multiple=TRUE,cont=cg) cg1 <- ggroup(horizontal = FALSE, cont = cg) addSpring(cg1) gbplus1 <- gbutton("+", cont=cg1) gbminus1 <- gbutton("-", cont=cg1) addSpring(cg1) addSpring(cg1) gbplus2 <- gbutton("+", cont=cg1) gbminus2 <- gbutton("-", cont=cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) gbplus4 <- gbutton("+", cont=cg1) gbminus4 <- gbutton("-", cont=cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) gbplus5 <- gbutton("+", cont=cg1) gbminus5 <- gbutton("-", cont=cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) gbplus7 <- gbutton("+", cont=cg1) gbminus7 <- gbutton("-", cont=cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) cg2 <- gframe("Options", horizontal=FALSE, cont=cg) b1temp <- data.frame(Node.ids="",stringsAsFactors=FALSE) b2temp <- data.frame(Ecology.ids="",stringsAsFactors=FALSE) b4temp <- data.frame(Dimensions=rep("",length(blnetevn$m3)),stringsAsFactors=FALSE) b5temp <- data.frame(Groups=rep("",length(blnetevn$m3)),stringsAsFactors=FALSE) b7temp <- data.frame(Weights="",stringsAsFactors=FALSE) tbl1 <- gtable(b1temp,expand=TRUE,multiple=FALSE,cont=cg2) size(tbl1)[2] <- 50 tbl2 <- gtable(b2temp,expand=TRUE,multiple=FALSE,cont=cg2) size(tbl2)[2] <- 50 if ('el' %in% ls(envir=blnetevn)) { gcheckboxgroup("Network included",cont=cg2,handler = function(h,...) assign("b3",svalue(h$obj),envir=blnetevn)) } tbl4 <- gtable(b4temp,expand=TRUE,multiple=TRUE,cont=cg2) size(tbl4)[2] <- 120 tbl5 <- gtable(b5temp,expand=TRUE,multiple=TRUE,cont=cg2) size(tbl5)[2] <- 120 tbl7 <- gtable(b7temp,expand=TRUE,multiple=FALSE,cont=cg2) size(tbl7)[2] <- 50 glabel("Complete.cases",cont=cg2) gradio(c("TRUE","FALSE"), selected = 2, cont = cg2, handler = function(h,...) assign("b8",svalue(h$obj),envir=blnetevn)) addHandlerClicked(gbplus1, handler = function(h,...) { temp <- svalue(tbl0) if ("" %in% temp==FALSE & tbl1[1]=="" & length(temp)==1) { assign("m1",blnetevn$m3[which(blnetevn$m3 %in% setdiff(blnetevn$m1,temp))],envir=blnetevn) km1 <- c(blnetevn$m1,rep("",length(blnetevn$m3)-length(blnetevn$m1))) for (i in 1:length(blnetevn$m3)) tbl0[i] <- km1[i] assign("b1",blnetevn$m3[which(blnetevn$m3 %in% union(blnetevn$b1,temp))],envir=blnetevn) tbl1[1] <- blnetevn$b1 } }) addHandlerClicked(gbminus1, handler = function(h,...) { temp <- svalue(tbl1) if ("" %in% temp==FALSE & length(temp)==1) { assign("m1",blnetevn$m3[which(blnetevn$m3 %in% union(blnetevn$m1,temp))],envir=blnetevn) km1 <- c(blnetevn$m1,rep("",length(blnetevn$m3)-length(blnetevn$m1))) for (i in 1:length(blnetevn$m3)) tbl0[i] <- km1[i] assign("b1",character(0),envir=blnetevn) tbl1[1] <- "" } }) addHandlerClicked(gbplus2, handler = function(h,...) { temp <- svalue(tbl0) if ("" %in% temp==FALSE & tbl2[1]=="" & length(temp)==1) { assign("m1",blnetevn$m3[which(blnetevn$m3 %in% setdiff(blnetevn$m1,temp))],envir=blnetevn) km1 <- c(blnetevn$m1,rep("",length(blnetevn$m3)-length(blnetevn$m1))) for (i in 1:length(blnetevn$m3)) tbl0[i] <- km1[i] assign("b2",blnetevn$m3[which(blnetevn$m3 %in% union(blnetevn$b2,temp))],envir=blnetevn) tbl2[1] <- blnetevn$b2 } }) addHandlerClicked(gbminus2, handler = function(h,...) { temp <- svalue(tbl2) if ("" %in% temp==FALSE & length(temp)==1) { assign("m1",blnetevn$m3[which(blnetevn$m3 %in% union(blnetevn$m1,temp))],envir=blnetevn) km1 <- c(blnetevn$m1,rep("",length(blnetevn$m3)-length(blnetevn$m1))) for (i in 1:length(blnetevn$m3)) tbl0[i] <- km1[i] assign("b2",character(0),envir=blnetevn) tbl2[1] <- "" } }) addHandlerClicked(gbplus4, handler = function(h,...) { temp <- svalue(tbl0) if ("" %in% temp==FALSE & length(temp)>0) { assign("m1",blnetevn$m3[which(blnetevn$m3 %in% setdiff(blnetevn$m1,temp))],envir=blnetevn) km1 <- c(blnetevn$m1,rep("",length(blnetevn$m3)-length(blnetevn$m1))) for (i in 1:length(blnetevn$m3)) tbl0[i] <- km1[i] assign("b4",blnetevn$m3[which(blnetevn$m3 %in% union(blnetevn$b4,temp))],envir=blnetevn) kb4 <- c(blnetevn$b4,rep("",length(blnetevn$m3)-length(blnetevn$b4))) for (j in 1:length(blnetevn$m3)) tbl4[j] <- kb4[j] } }) addHandlerClicked(gbminus4, handler = function(h,...) { temp <- svalue(tbl4) if ("" %in% temp==FALSE & length(temp)>0) { assign("m1",blnetevn$m3[which(blnetevn$m3 %in% union(blnetevn$m1,temp))],envir=blnetevn) km1 <- c(blnetevn$m1,rep("",length(blnetevn$m3)-length(blnetevn$m1)+1)) for (i in 1:length(blnetevn$m3)) tbl0[i] <- km1[i] assign("b4",blnetevn$m3[which(blnetevn$m3 %in% setdiff(blnetevn$b4,temp))],envir=blnetevn) kb4 <- c(blnetevn$b4,rep("",length(blnetevn$m3)-length(blnetevn$b4)+1)) for (j in 1:length(blnetevn$m3)) tbl4[j] <- kb4[j] } }) addHandlerClicked(gbplus5, handler = function(h,...) { temp <- svalue(tbl0) if ("" %in% temp==FALSE & length(temp)>0) { assign("m1",blnetevn$m3[which(blnetevn$m3 %in% setdiff(blnetevn$m1,temp))],envir=blnetevn) km1 <- c(blnetevn$m1,rep("",length(blnetevn$m3)-length(blnetevn$m1))) for (i in 1:length(blnetevn$m3)) tbl0[i] <- km1[i] assign("b5",blnetevn$m3[which(blnetevn$m3 %in% union(blnetevn$b5,temp))],envir=blnetevn) kb5 <- c(blnetevn$b5,rep("",length(blnetevn$m3)-length(blnetevn$b5))) for (j in 1:length(blnetevn$m3)) tbl5[j] <- kb5[j] } }) addHandlerClicked(gbminus5, handler = function(h,...) { temp <- svalue(tbl5) if ("" %in% temp==FALSE & length(temp)>0) { assign("m1",blnetevn$m3[which(blnetevn$m3 %in% union(blnetevn$m1,temp))],envir=blnetevn) km1 <- c(blnetevn$m1,rep("",length(blnetevn$m3)-length(blnetevn$m1)+1)) for (i in 1:length(blnetevn$m3)) tbl0[i] <- km1[i] assign("b5",blnetevn$m3[which(blnetevn$m3 %in% setdiff(blnetevn$b5,temp))],envir=blnetevn) kb5 <- c(blnetevn$b5,rep("",length(blnetevn$m3)-length(blnetevn$b5)+1)) for (j in 1:length(blnetevn$m3)) tbl5[j] <- kb5[j] } }) addHandlerClicked(gbplus7, handler = function(h,...) { temp <- svalue(tbl0) if ("" %in% temp==FALSE & tbl7[1]=="" & length(temp)==1) { assign("m1",blnetevn$m3[which(blnetevn$m3 %in% setdiff(blnetevn$m1,temp))],envir=blnetevn) km1 <- c(blnetevn$m1,rep("",length(blnetevn$m3)-length(blnetevn$m1))) for (i in 1:length(blnetevn$m3)) tbl0[i] <- km1[i] assign("b7",blnetevn$m3[which(blnetevn$m3 %in% union(blnetevn$b7,temp))],envir=blnetevn) tbl7[1] <- blnetevn$b7 } }) addHandlerClicked(gbminus7, handler = function(h,...) { temp <- svalue(tbl7) if ("" %in% temp==FALSE & length(temp)==1) { assign("m1",blnetevn$m3[which(blnetevn$m3 %in% union(blnetevn$m1,temp))],envir=blnetevn) km1 <- c(blnetevn$m1,rep("",length(blnetevn$m3)-length(blnetevn$m1))) for (i in 1:length(blnetevn$m3)) tbl0[i] <- km1[i] assign("b7",character(0),envir=blnetevn) tbl7[1] <- "" } }) visible(toplevel) <- TRUE button <- gbutton("Continue", cont = cg2, handler = function(h, ...) { if (length(blnetevn$b1)==0 | length(blnetevn$b4)==0 | length(blnetevn$b5)==0) {gmessage("Missing required information.", parent = toplevel)} else { dispose(toplevel) assign("b6",rep(1.5,length(blnetevn$b4)),envir=blnetevn) extralevel <- gwindow("Dev.range", width=800, height=300) ge <- gpanedgroup(cont = extralevel, horizontal = FALSE) cge <- ggroup(cont = ge, horizontal = FALSE) for (da in 1:length(blnetevn$b4)) { glabel(blnetevn$b4[da],cont=cge) assign(paste0("b6da", da),gslider(from = 0, to = 5, by = .05, value = 1.5, cont=cge, handler = function(h,...) svalue(get(paste0("b6da", da))) )) } addSpring(cge) addSpring(cge) addSpring(cge) button <- gbutton("Continue", cont = cge, handler = function(h, ...) { b6tmp <- c() for (da in 1:length(blnetevn$b4)) { b6tmp <- c(b6tmp,svalue(get(paste0("b6da", da)))) } assign("b6",b6tmp,envir=blnetevn) if (length(blnetevn$b7)>0) tmpweight <- blnetevn$b7 else tmpweight <- NULL if (length(blnetevn$b2)==0 & blnetevn$b3!="Network included") { b <- blau(blnetevn$cov, node.ids=blnetevn$b1, dimension=blnetevn$b4, memberships=blnetevn$b5,weights=tmpweight,complete.cases = blnetevn$b8) b <- niches(b, dev.range = blnetevn$b6) } if (length(blnetevn$b2)==0 & blnetevn$b3=="Network included") { b <- blau(blnetevn$cov, node.ids=blnetevn$b1, graph = blnetevn$el, dimension=blnetevn$b4, memberships=blnetevn$b5,weights=tmpweight,complete.cases = blnetevn$b8) b <- niches(b, dev.range = blnetevn$b6) } if (length(blnetevn$b2)!=0 & blnetevn$b3!="Network included") { b <- blau(blnetevn$cov, node.ids=blnetevn$b1, ecology.ids=blnetevn$b2, dimension=blnetevn$b4, memberships=blnetevn$b5,weights=tmpweight,complete.cases = blnetevn$b8 ) b <- niches(b, dev.range = blnetevn$b6) } if (length(blnetevn$b2)!=0 & blnetevn$b3=="Network included") { b <- blau(blnetevn$cov, node.ids=blnetevn$b1, ecology.ids=blnetevn$b2, graph = blnetevn$el, dimension=blnetevn$b4, memberships=blnetevn$b5,weights=tmpweight,complete.cases = blnetevn$b8 ) b <- niches(b, dev.range = blnetevn$b6) } if (blnetevn$b3=="Network included") { b <- nodal.network(b) } assign("bobj",b,envir=blnetevn) dispose(extralevel) if (blnetevn$b3=="Network included") sndlevel <- gwindow("Niche Analysis Results", width=905, height=30) if (blnetevn$b3!="Network included") sndlevel <- gwindow("Niche Analysis Results", width=800, height=30) g <- gpanedgroup(cont = sndlevel, horizontal = TRUE) cg <- ggroup(cont = g, horizontal = TRUE) button <- gbutton("Show Object", cont = cg, handler = function(h, ...) { blauobj <- data.frame(capture.output(b)) nw1 <- gwindow("Object Information", width = 800, height = 600) ng1 <- ggroup(horizontal = FALSE, cont = nw1 ) button <- gbutton("Save Blau object: blauobj.Rdata", expand = FALSE, cont = ng1, handler = function(h, ...) { save(blauobj, file="blauobj.Rdata") }) gseparator(cont = ng1) gdf(blauobj, expand = TRUE, fill=TRUE, cont = ng1) }) addSpace(cg, 5) button <- gbutton("Nodal Result", cont = cg, handler = function(h, ...) { b <- nodal.global(b) Nodalstatus <- b$nodalGlobal[,2] nstemp1 <- nstemp2 <- nstemp3 <- rep(0,nrow(blnetevn$cov)) nstemp1[as.numeric(b$nodalGlobal[,2])==0] <- 1 nstemp2[as.numeric(b$nodalGlobal[,2])==1] <- 1 nstemp3[as.numeric(b$nodalGlobal[,2])>1] <- 1 b0 <- data.frame(matrix(rep(NA,length(b$memberships)),nrow=nrow(b$memberships))) for (i in 1:nrow(b$memberships)) { for (j in 1:length(blnetevn$b5)) { if (is.na(b$memberships[i,j])==FALSE & is.na(b$isInNiche[i,j])==FALSE) { if (b$memberships[i,j]==1 & b$isInNiche[i,j]==1) b0[i,j] <- "Member & nicher" if (b$memberships[i,j]==1 & b$isInNiche[i,j]==0) b0[i,j] <- "Member not in niche" if (b$memberships[i,j]==0 & b$isInNiche[i,j]==1) b0[i,j] <- "Non-member but in niche" if (b$memberships[i,j]==0 & b$isInNiche[i,j]==0) b0[i,j] <- "Neither member nor nicher" } } } colnames(b0) <- paste(blnetevn$b5,"_mem&niche",sep="") if (blnetevn$b3!="Network included") { nodaloutput <- data.frame(cbind(b$ids,b$nodalGlobal,nstemp1,nstemp2,nstemp3,b$memberships,b$isInNiche[,1:(ncol(b$isInNiche)-1)],b0)) } else { nodaloutput <- data.frame(cbind(b$ids,b$nodalGlobal,nstemp1,nstemp2,nstemp3,b$memberships,b$isInNiche[,1:(ncol(b$isInNiche)-1)],b0,b$nodalNetwork)) } names(nodaloutput)[4] <- "Niches" names(nodaloutput)[6] <- "Outsider" names(nodaloutput)[7] <- "Insider_Exclusive" names(nodaloutput)[8] <- "Insider_Manifolder" nw2 <- gwindow("Nodal Result",width = 800, height = 600) ng2 <- ggroup(horizontal = FALSE, cont = nw2) button1 <- gbutton("Save as csv file: nodaloutput.csv", expand = FALSE, cont = ng2, handler = function(h, ...) { write.table(nodaloutput, "nodaloutput.csv", row.names=F, col.names=T, sep=",") }) button2 <- gbutton("Save as R file: nodaloutput.Rdata", expand = FALSE, cont = ng2, handler = function(h, ...) { save(nodaloutput, file="nodaloutput.Rdata") }) button3 <- gbutton("Save as SAS file: nodaloutput.txt & nodaloutput.sas", expand = FALSE, cont = ng2, handler = function(h, ...) { write.foreign(nodaloutput, "nodaloutput.txt", "nodaloutput.sas", package="SAS") }) button4 <- gbutton("Save as Stata file: nodaloutput.dta", expand = FALSE, cont = ng2, handler = function(h, ...) { write.dta(nodaloutput, ("nodaloutput.dta")) }) button5 <- gbutton("Save as SPSS file: nodaloutput.txt & nodaloutput.sps", expand = FALSE, cont = ng2, handler = function(h, ...) { write.foreign(nodaloutput, "nodaloutput.txt", "nodaloutput.sps", package="SPSS") }) gseparator(cont = ng2) vars <- gdf(nodaloutput, expand = TRUE, fill=TRUE, cont = ng2) }) addSpace(cg, 5) button <- gbutton("Niche Breadth Summary", cont = cg, handler = function(h, ...) { nbtemp <- data.frame(blnetevn$bobj$lowbounds,stringsAsFactors = FALSE) for (o in 1:nrow(nbtemp)) { for (p in 1:length(blnetevn$b4)) { nbtemp[o,p] <- paste(format(round(blnetevn$bobj$lowbounds[o,p],2),nsmall=2),"-",format(round(blnetevn$bobj$topbounds[o,p],2),nsmall=2),sep="") } } nichebreadth <- cbind(rownames(nbtemp),nbtemp) names(nichebreadth)[1] <- "GROUPS" nw13 <- gwindow("Niche Breadth Summary",width = 800, height = 600) ng13 <- ggroup(horizontal = FALSE, cont = nw13) button1 <- gbutton("Save as csv file: nichebreadth.csv", expand = FALSE, cont = ng13, handler = function(h, ...) { write.table(nichebreadth, "nichebreadth.csv", row.names=F, col.names=T, sep=",") }) button2 <- gbutton("Save as R file: nichebreadth.Rdata", expand = FALSE, cont = ng13, handler = function(h, ...) { save(nichebreadth, file="nichebreadth.Rdata") }) button3 <- gbutton("Save as SAS file: nichebreadth.txt & nichebreadth.sas", expand = FALSE, cont = ng13, handler = function(h, ...) { write.foreign(nichebreadth, "nichebreadth.txt", "nichebreadth.sas", package="SAS") }) button4 <- gbutton("Save as Stata file: nichebreadth.dta", expand = FALSE, cont = ng13, handler = function(h, ...) { write.dta(nichebreadth, ("nichebreadth.dta")) }) button5 <- gbutton("Save as SPSS file: nichebreadth.txt & nichebreadth.sps", expand = FALSE, cont = ng13, handler = function(h, ...) { write.foreign(nichebreadth, "nichebreadth.txt", "nichebreadth.sps", package="SPSS") }) gseparator(cont = ng13) vars <- gdf(nichebreadth, expand = TRUE, fill=TRUE, cont = ng13) }) addSpace(cg, 5) button <- gbutton("Focal Niche Summary", cont = cg, handler = function(h, ...) { fns <- data.frame(niche.summary(b),stringsAsFactors = FALSE) es2 <- data.frame(ecology.summary(b,percent=TRUE),stringsAsFactors = FALSE) if (length(blnetevn$b2)==0) { pm <- c() fnstemp <- matrix(as.numeric(fns[,3]),ncol=1) es2temp <- es2[,3:(2+length(blnetevn$b5))] for (p in 1:length(blnetevn$b5)) { pm <- c(pm,round(matrix(as.numeric(es2temp[p,]),nrow=1) %*% fnstemp)) } } if (length(blnetevn$b2)==1) { pm <- c() for (o in unique(es2$Ecology)) { fnstemp <- matrix(as.numeric(fns[which(fns$Ecology==o),3]),ncol=1) es2temp <- es2[which(es2$Ecology==o),3:(2+length(blnetevn$b5))] for (p in 1:length(blnetevn$b5)) { pm <- c(pm,round(matrix(as.numeric(es2temp[p,]),nrow=1) %*% fnstemp)) } } } nichesummary <- cbind(fns[,1:4],pm,fns[,5:7],round(as.numeric(fns[,5])/as.numeric(fns[,4]),4)*100) names(nichesummary)[5] <- "PredictedNicheMem" names(nichesummary)[9] <- "ExclusivePercent" nw5 <- gwindow("Focal Niche Summary",width = 800, height = 600) ng5 <- ggroup(horizontal = FALSE, cont = nw5) button1 <- gbutton("Save as csv file: nichesummary.csv", expand = FALSE, cont = ng5, handler = function(h, ...) { write.table(nichesummary, "nichesummary.csv", row.names=F, col.names=T, sep=",") }) button2 <- gbutton("Save as R file: nichesummary.Rdata", expand = FALSE, cont = ng5, handler = function(h, ...) { save(nichesummary, file="nichesummary.Rdata") }) button3 <- gbutton("Save as SAS file: nichesummary.txt & nichesummary.sas", expand = FALSE, cont = ng5, handler = function(h, ...) { write.foreign(nichesummary, "nichesummary.txt", "nichesummary.sas", package="SAS") }) button4 <- gbutton("Save as Stata file: nichesummary.dta", expand = FALSE, cont = ng5, handler = function(h, ...) { write.dta(nichesummary, ("nichesummary.dta")) }) button5 <- gbutton("Save as SPSS file: nichesummary.txt & nichesummary.sps", expand = FALSE, cont = ng5, handler = function(h, ...) { write.foreign(nichesummary, "nichesummary.txt", "nichesummary.sps", package="SPSS") }) gseparator(cont = ng5) vars <- gdf(nichesummary, expand = TRUE, fill=TRUE, cont = ng5) }) addSpace(cg, 5) button <- gbutton("Niche by Niche Summary", cont = cg, handler = function(h, ...) { es1 <- data.frame(ecology.summary(b),stringsAsFactors = FALSE) es2 <- data.frame(ecology.summary(b,percent=TRUE),stringsAsFactors = FALSE) names(es2)[3:(2+length(blnetevn$b5))] <- paste(names(es2)[3:(2+length(blnetevn$b5))],"_CC",sep="") if (length(blnetevn$b2)==0) { AVG_CC <- mean(as.numeric(as.matrix(es2[,3:(2+length(blnetevn$b5))])),na.rm=TRUE) STD_CC <- sd(as.numeric(as.matrix(es2[,3:(2+length(blnetevn$b5))])),na.rm=TRUE) AVG_CC <- rep(AVG_CC,length(blnetevn$b5)) STD_CC <- rep(STD_CC,length(blnetevn$b5)) ecologysummary <- cbind(es1,es2[,3:(2+length(blnetevn$b5))],AVG_CC,STD_CC) } if (length(blnetevn$b2)==1) { AVG_CC <- STD_CC <- c() for (o in unique(es2$Ecology)) { es2temp <- es2[which(es2$Ecology==o),] AVG_CC <- c(AVG_CC,mean(as.numeric(as.matrix(es2temp[,3:(2+length(blnetevn$b5))])),na.rm=TRUE)) STD_CC <- c(STD_CC,sd(as.numeric(as.matrix(es2temp[,3:(2+length(blnetevn$b5))])),na.rm=TRUE)) } AVG_CC <- rep(AVG_CC,each=length(blnetevn$b5)) STD_CC <- rep(STD_CC,each=length(blnetevn$b5)) ecologysummary <- cbind(es1,es2[,3:(2+length(blnetevn$b5))],AVG_CC,STD_CC) } for (o in 1:nrow(ecologysummary)) { for (p in (3+length(blnetevn$b5)):ncol(ecologysummary)) { ecologysummary[o,p] <- format(round(as.numeric(ecologysummary[o,p]),2),nsmall=2) } } nw4 <- gwindow("Niche by Niche Summary",width = 800, height = 600) ng4 <- ggroup(horizontal = FALSE, cont = nw4) button1 <- gbutton("Save as csv file: ecologysummary.csv", expand = FALSE, cont = ng4, handler = function(h, ...) { write.table(ecologysummary, "ecologysummary.csv", row.names=F, col.names=T, sep=",") }) button2 <- gbutton("Save as R file: ecologysummary.Rdata", expand = FALSE, cont = ng4, handler = function(h, ...) { save(ecologysummary, file="ecologysummary.Rdata") }) button3 <- gbutton("Save as SAS file: ecologysummary.txt & ecologysummary.sas", expand = FALSE, cont = ng4, handler = function(h, ...) { write.foreign(ecologysummary, "ecologysummary.txt", "ecologysummary.sas", package="SAS") }) button4 <- gbutton("Save as Stata file: ecologysummary.dta", expand = FALSE, cont = ng4, handler = function(h, ...) { write.dta(ecologysummary, ("ecologysummary.dta")) }) button5 <- gbutton("Save as SPSS file: ecologysummary.txt & ecologysummary.sps", expand = FALSE, cont = ng4, handler = function(h, ...) { write.foreign(ecologysummary, "ecologysummary.txt", "ecologysummary.sps", package="SPSS") }) gseparator(cont = ng4) vars <- gdf(ecologysummary, expand = TRUE, fill=TRUE, cont = ng4) }) addSpace(cg, 5) if (blnetevn$b3=="Network included") { button <- gbutton("Dyadic Result", cont = cg, handler = function(h, ...) { b <- dyadic(b, dev.range = blnetevn$b6) dyadicoutput <- data.frame(b$dyadic) nw6 <- gwindow("Dyadic Result",width = 800, height = 600) ng6 <- ggroup(horizontal = FALSE, cont = nw6) button1 <- gbutton("Save as csv file: dyadicoutput.csv", expand = FALSE, cont = ng6, handler = function(h, ...) { write.table(dyadicoutput, "dyadicoutput.csv", row.names=F, col.names=T, sep=",") }) button2 <- gbutton("Save as R file: dyadicoutput.Rdata", expand = FALSE, cont = ng6, handler = function(h, ...) { save(dyadicoutput, file="dyadicoutput.Rdata") }) button3 <- gbutton("Save as SAS file: dyadicoutput.txt & dyadicoutput.sas", expand = FALSE, cont = ng6, handler = function(h, ...) { write.foreign(dyadicoutput, "dyadicoutput.txt", "dyadicoutput.sas", package="SAS") }) button4 <- gbutton("Save as Stata file: dyadicoutput.dta", expand = FALSE, cont = ng6, handler = function(h, ...) { write.dta(dyadicoutput, "dyadicoutput.dta") }) button5 <- gbutton("Save as SPSS file: dyadicoutput.txt & dyadicoutput.sps", expand = FALSE, cont = ng6, handler = function(h, ...) { write.foreign(dyadicoutput, "dyadicoutput.txt", "dyadicoutput.sps", package="SPSS") }) gseparator(cont = ng6) vars <- gdf(dyadicoutput, expand = TRUE, fill=TRUE, cont = ng6) }) addSpace(cg, 5) } if (blnetevn$b3!="Network included") { button <- gbutton("Correlation Matrix", cont = cg, handler = function(h, ...) { if (length(blnetevn$b2)==0) { b <- blau(blnetevn$cov, node.ids=blnetevn$b1, dimension=blnetevn$b4, memberships=blnetevn$b5,weights=tmpweight,complete.cases = blnetevn$b8) b <- niches(b, dev.range = blnetevn$b6) } if (length(blnetevn$b2)!=0) { b <- blau(blnetevn$cov, node.ids=blnetevn$b1, ecology.ids=blnetevn$b2, dimension=blnetevn$b4, memberships=blnetevn$b5,weights=tmpweight,complete.cases = blnetevn$b8 ) b <- niches(b, dev.range = blnetevn$b6) } b <- nodal.global(b) y1 <- cbind(data.frame(rownames(b$nodalGlobal)),data.frame(matrix(as.numeric(b$nodalGlobal[,1:2]),ncol=2))) names(y1)[1] <- "nodeId" names(y1)[2] <- "TotalOrgs" names(y1)[3] <- "Niches" y2 <- cbind(blnetevn$cov[,which(blnetevn$b1==names(blnetevn$cov))],blnetevn$cov[,blnetevn$b4[1:length(blnetevn$b4)]]) names(y2)[1] <- "nodeId" x <- merge(y1,y2,by="nodeId") x <- x[,-1] x <- cor(x) coroutput <- data.frame(cbind(rownames(x),x)) names(coroutput)[1] <- "Names" nw7 <- gwindow("Correlation Matrix",width = 800, height = 600) ng7 <- ggroup(horizontal = FALSE, cont = nw7) button1 <- gbutton("Save as csv file: correlation.csv", expand = FALSE, cont = ng7, handler = function(h, ...) { write.table(coroutput, "correlation.csv", row.names=F, col.names=T, sep=",") }) button2 <- gbutton("Save as R file: correlation.Rdata", expand = FALSE, cont = ng7, handler = function(h, ...) { save(coroutput, file="correlation.Rdata") }) button3 <- gbutton("Save as SAS file: correlation.txt & correlation.sas", expand = FALSE, cont = ng7, handler = function(h, ...) { write.foreign(coroutput, "correlation.txt", "correlation.sas", package="SAS") }) button4 <- gbutton("Save as Stata file: correlation.dta", expand = FALSE, cont = ng7, handler = function(h, ...) { write.dta(coroutput, ("correlation.dta")) }) button5 <- gbutton("Save as SPSS file: correlation.txt & correlation.sps", expand = FALSE, cont = ng7, handler = function(h, ...) { write.foreign(coroutput, "correlation.txt", "correlation.sps", package="SPSS") }) gseparator(cont = ng7) vars <- gdf(coroutput, expand = TRUE, fill=TRUE, cont = ng7) }) } if (blnetevn$b3=="Network included") { button <- gbutton("Correlation Matrix", cont = cg, handler = function(h, ...) { if (length(blnetevn$b2)==0) { b <- blau(blnetevn$cov, node.ids=blnetevn$b1, graph = blnetevn$el, dimension=blnetevn$b4, memberships=blnetevn$b5,weights=tmpweight,complete.cases = blnetevn$b8) b <- niches(b, dev.range = blnetevn$b6) } if (length(blnetevn$b2)!=0) { b <- blau(blnetevn$cov, node.ids=blnetevn$b1, ecology.ids=blnetevn$b2, graph = blnetevn$el, dimension=blnetevn$b4, memberships=blnetevn$b5,weights=tmpweight,complete.cases = blnetevn$b8 ) b <- niches(b, dev.range = blnetevn$b6) } b <- nodal.global(b) y1 <- cbind(data.frame(rownames(b$nodalGlobal)),data.frame(matrix(as.numeric(b$nodalGlobal[,1:2]),ncol=2))) names(y1)[1] <- "nodeId" names(y1)[2] <- "TotalOrgs" names(y1)[3] <- "Niches" y2 <- cbind(blnetevn$cov[,which(blnetevn$b1==names(blnetevn$cov))],blnetevn$cov[,blnetevn$b4[1:length(blnetevn$b4)]]) names(y2)[1] <- "nodeId" x <- merge(y1,y2,by="nodeId") n <- network(as.matrix(blnetevn$adj)) outdegree <- degree(n,cmode="outdegree") indegree <- degree(n,cmode="indegree") betweenness <- betweenness(n) closeness <- closeness(n) eigenvector <- round(evcent(n),4) y3 <- cbind(data.frame(rownames(b$nodalGlobal)),data.frame(cbind(outdegree,indegree,betweenness,closeness,eigenvector))) names(y3)[1] <- "nodeId" x <- merge(x,y3,by="nodeId") x <- x[,-1] x <- cor(x) coroutput <- data.frame(cbind(rownames(x),x)) names(coroutput)[1] <- "Names" nw8 <- gwindow("Correlation Matrix",width = 800, height = 600) ng8 <- ggroup(horizontal = FALSE, cont = nw8) button1 <- gbutton("Save as csv file: correlation.csv", expand = FALSE, cont = ng8, handler = function(h, ...) { write.table(coroutput, "correlation.csv", row.names=F, col.names=T, sep=",") }) button2 <- gbutton("Save as R file: correlation.Rdata", expand = FALSE, cont = ng8, handler = function(h, ...) { save(coroutput, file="correlation.Rdata") }) button3 <- gbutton("Save as SAS file: correlation.txt & correlation.sas", expand = FALSE, cont = ng8, handler = function(h, ...) { write.foreign(coroutput, "correlation.txt", "correlation.sas", package="SAS") }) button4 <- gbutton("Save as Stata file: correlation.dta", expand = FALSE, cont = ng8, handler = function(h, ...) { write.dta(coroutput, ("correlation.dta")) }) button5 <- gbutton("Save as SPSS file: correlation.txt & correlation.sps", expand = FALSE, cont = ng8, handler = function(h, ...) { write.foreign(coroutput, "correlation.txt", "correlation.sps", package="SPSS") }) gseparator(cont = ng8) vars <- gdf(coroutput, expand = TRUE, fill=TRUE, cont = ng8) }) } }) } }) } }
/scratch/gouwar.j/cran-all/cranData/Blaunet/inst/scripts/analysis.R
showblaububble <- function(h,...) { if ("cov" %in% ls(envir=blnetevn)==FALSE) {gmessage("Sorry! Attribute file is not loaded.", parent = window)} else { assign("u1",character(0),envir=blnetevn) assign("u2",character(0),envir=blnetevn) assign("u3","all",envir=blnetevn) assign("u4",character(0),envir=blnetevn) assign("u5",character(0),envir=blnetevn) assign("u6",0.33,envir=blnetevn) assign("m1",names(blnetevn$cov),envir=blnetevn) assign("m3",names(blnetevn$cov),envir=blnetevn) toplevel <- gwindow("Blau Bubbles", width=800, height=600, parent = window, visible=FALSE) cg <- ggroup(horizontal = TRUE,cont = toplevel) tbl0 <- gtable(blnetevn$m1,expand=TRUE,multiple=TRUE,cont=cg) cg1 <- ggroup(horizontal = FALSE, cont = cg) addSpring(cg1) addSpring(cg1) gbplus1 <- gbutton("+", cont=cg1) gbminus1 <- gbutton("-", cont=cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) gbplus2 <- gbutton("+", cont=cg1) gbminus2 <- gbutton("-", cont=cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) gbplus4 <- gbutton("+", cont=cg1) gbminus4 <- gbutton("-", cont=cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) cg2 <- gframe("Options", horizontal=FALSE, cont=cg) u1temp <- data.frame(Node.ids="",stringsAsFactors=FALSE) u2temp <- data.frame(Ecology.ids="",stringsAsFactors=FALSE) u4temp <- data.frame(Dimensions=rep("",length(blnetevn$m3)),stringsAsFactors=FALSE) u5temp <- data.frame(Groups=rep("",length(blnetevn$m3)),stringsAsFactors=FALSE) tbl1 <- gtable(u1temp,expand=TRUE,multiple=FALSE,cont=cg2) size(tbl1)[2] <- 50 tbl2 <- gtable(u2temp,expand=TRUE,multiple=FALSE,cont=cg2) size(tbl2)[2] <- 50 tbl4 <- gtable(u4temp,expand=TRUE,multiple=TRUE,cont=cg2) size(tbl4)[2] <- 120 addHandlerClicked(gbplus1, handler = function(h,...) { temp <- svalue(tbl0) if ("" %in% temp==FALSE & tbl1[1]=="" & length(temp)==1) { assign("m1",blnetevn$m3[which(blnetevn$m3 %in% setdiff(blnetevn$m1,temp))],envir=blnetevn) km1 <- c(blnetevn$m1,rep("",length(blnetevn$m3)-length(blnetevn$m1))) for (i in 1:length(blnetevn$m3)) tbl0[i] <- km1[i] assign("u1",blnetevn$m3[which(blnetevn$m3 %in% union(blnetevn$u1,temp))],envir=blnetevn) tbl1[1] <- blnetevn$u1 } }) addHandlerClicked(gbminus1, handler = function(h,...) { temp <- svalue(tbl1) if ("" %in% temp==FALSE& length(temp)==1) { assign("m1",blnetevn$m3[which(blnetevn$m3 %in% union(blnetevn$m1,temp))],envir=blnetevn) km1 <- c(blnetevn$m1,rep("",length(blnetevn$m3)-length(blnetevn$m1))) for (i in 1:length(blnetevn$m3)) tbl0[i] <- km1[i] assign("u1",character(0),envir=blnetevn) tbl1[1] <- "" } }) addHandlerClicked(gbplus2, handler = function(h,...) { temp <- svalue(tbl0) if ("" %in% temp==FALSE & tbl2[1]=="" & length(temp)==1) { assign("m1",blnetevn$m3[which(blnetevn$m3 %in% setdiff(blnetevn$m1,temp))],envir=blnetevn) km1 <- c(blnetevn$m1,rep("",length(blnetevn$m3)-length(blnetevn$m1))) for (i in 1:length(blnetevn$m3)) tbl0[i] <- km1[i] assign("u2",blnetevn$m3[which(blnetevn$m3 %in% union(blnetevn$u2,temp))],envir=blnetevn) tbl2[1] <- blnetevn$u2 } }) addHandlerClicked(gbminus2, handler = function(h,...) { temp <- svalue(tbl2) if ("" %in% temp==FALSE & length(temp)==1) { assign("m1",blnetevn$m3[which(blnetevn$m3 %in% union(blnetevn$m1,temp))],envir=blnetevn) km1 <- c(blnetevn$m1,rep("",length(blnetevn$m3)-length(blnetevn$m1))) for (i in 1:length(blnetevn$m3)) tbl0[i] <- km1[i] assign("u2",character(0),envir=blnetevn) tbl2[1] <- "" } }) addHandlerClicked(gbplus4, handler = function(h,...) { temp <- svalue(tbl0) if ("" %in% temp==FALSE & length(temp)>0) { assign("m1",blnetevn$m3[which(blnetevn$m3 %in% setdiff(blnetevn$m1,temp))],envir=blnetevn) km1 <- c(blnetevn$m1,rep("",length(blnetevn$m3)-length(blnetevn$m1))) for (i in 1:length(blnetevn$m3)) tbl0[i] <- km1[i] assign("u4",blnetevn$m3[which(blnetevn$m3 %in% union(blnetevn$u4,temp))],envir=blnetevn) ku4 <- c(blnetevn$u4,rep("",length(blnetevn$m3)-length(blnetevn$u4))) for (j in 1:length(blnetevn$m3)) tbl4[j] <- ku4[j] } }) addHandlerClicked(gbminus4, handler = function(h,...) { temp <- svalue(tbl4) if ("" %in% temp==FALSE & length(temp)>0) { assign("m1",blnetevn$m3[which(blnetevn$m3 %in% union(blnetevn$m1,temp))],envir=blnetevn) km1 <- c(blnetevn$m1,rep("",length(blnetevn$m3)-length(blnetevn$m1)+1)) for (i in 1:length(blnetevn$m3)) tbl0[i] <- km1[i] assign("u4",blnetevn$m3[which(blnetevn$m3 %in% setdiff(blnetevn$u4,temp))],envir=blnetevn) ku4 <- c(blnetevn$u4,rep("",length(blnetevn$m3)-length(blnetevn$u4)+1)) for (j in 1:length(blnetevn$m3)) tbl4[j] <- ku4[j] } }) visible(toplevel) <- TRUE button <- gbutton("Continue", expand = FALSE, cont = cg2, handler = function(h, ...) { if (length(blnetevn$u1)==0 | length(blnetevn$u4)==0 ) {gmessage("Missing required information.", parent = toplevel)} else { dispose(toplevel) seclevel <- gwindow("Blau Bubble Options", width=600, height=350, parent = window) secg <- ggroup(cont = seclevel, use.scrollwindow=T, horizontal = FALSE) secgv <- gvbox(cont = secg) tbl <- gformlayout(cont = secgv) if (length(blnetevn$u2)!=0) { glabel("Please select which ecology you want to generat Blau bubbles", cont=tbl) gradio(c("all",as.matrix(unique(blnetevn$cov[which(colnames(blnetevn$cov)==blnetevn$u2)]))), selected = 1, , cont=tbl, handler = function(h,...){ assign("u3",svalue(h$obj),envir=blnetevn) }) gseparator(cont = secgv) } gcheckboxgroup(blnetevn$u4, cont = tbl, label="Please identify the categorical variable(s)", handler = function(h,...){ assign("u5",svalue(h$obj),envir=blnetevn) }) gseparator(cont = secgv) glabel("radius (0.33 by default)",cont=secg) gslider(from = 0, to = 1, by = .01, value = 0.33, ,cont=secg, handler = function(h,...){ assign("u6",svalue(h$obj),envir=blnetevn) }) button <- gbutton("Continue", expand = FALSE, cont = secg, handler = function(h, ...) { dispose(seclevel) if (blnetevn$u3=="all") { cov1 <- blnetevn$cov if ("el" %in% ls(envir=blnetevn)) adj1 <- blnetevn$adj } else { cov1 <- blnetevn$cov[which(blnetevn$cov[which(colnames(blnetevn$cov)==blnetevn$u2)]==blnetevn$u3),] if ("el" %in% ls(envir=blnetevn)) adj1 <- blnetevn$adj[which(blnetevn$cov[which(colnames(blnetevn$cov)==blnetevn$u2)]==blnetevn$u3),which(blnetevn$cov[which(colnames(blnetevn$cov)==blnetevn$u2)]==blnetevn$u3)] } k <- nrow(cov1) distmat <- matrix(rep(0,k*k),nrow=k) data <- data.frame(matrix(rep(0,k*length(blnetevn$u4)),nrow=k)) for (o in 1:length(blnetevn$u4)) { if (blnetevn$u4[o] %in% blnetevn$u5) { data[,o] <- cov1[which(colnames(cov1)==blnetevn$u4[o])] } else { min <- min(cov1[which(colnames(cov1)==blnetevn$u4[o])],na.rm=TRUE) max <- max(cov1[which(colnames(cov1)==blnetevn$u4[o])],na.rm=TRUE) data[,o] <- (cov1[which(colnames(cov1)==blnetevn$u4[o])]-min)/(max-min) } } colnames(data) <- blnetevn$u4 for (p in 1:k) for (q in 1:k) if (p!=q) { distance <- valid_count <- 0 for (r in 1:length(blnetevn$u4)) { i_ <- data[p,r] j_ <- data[q,r] if (is.na(i_)==FALSE && is.na(j_)==FALSE) { if (blnetevn$u4[r] %in% blnetevn$u5) { if (i_==j_) distance <- distance+0 else distance <- distance+1 } else distance <- distance+(i_-j_)^2 valid_count <- valid_count+1 } } distmat[p,q] <- sqrt(distance)/sqrt(valid_count) } blaububble <- matrix(rep(0,k*k),nrow=k) blaububble[which(distmat<=blnetevn$u6)] <- 1 blaububble[which(distmat>blnetevn$u6)] <- 0 diag(blaububble) <- 0 distmat <- data.frame(distmat) distmat <- data.frame(cbind(as.matrix(cov1[which(colnames(cov1)==blnetevn$u1)]),distmat)) colnames(distmat) <- c("ego",as.matrix(cov1[which(colnames(cov1)==blnetevn$u1)])) blaububble <- data.frame(blaububble) rownames(blaububble) <- as.matrix(cov1[which(colnames(cov1)==blnetevn$u1)]) colnames(blaububble) <- as.matrix(cov1[which(colnames(cov1)==blnetevn$u1)]) bbubble <- data.frame(cbind(c(rownames(blaububble)),blaububble)) colnames(bbubble) <- c("ego",as.matrix(cov1[which(colnames(cov1)==blnetevn$u1)])) thdlevel <- gwindow("Blau Bubble Results", width=515, height=30) thdg <- gpanedgroup(cont = thdlevel, horizontal = TRUE) tg <- ggroup(cont = thdg, horizontal = TRUE) button <- gbutton("Blau Distance Matrix", cont = tg, handler = function(h, ...) { fthlevel1 <- gwindow("Blau Distance Matrix",width = 800, height = 600) fg1 <- ggroup(horizontal = FALSE, cont = fthlevel1) button1 <- gbutton("Save as csv file: blaudistance.csv", expand = FALSE, cont = fg1, handler = function(h, ...) { write.table(distmat, "blaudistance.csv", row.names=F, col.names=T, sep=",") }) button2 <- gbutton("Save as R file: blaudistance.Rdata", expand = FALSE, cont = fg1, handler = function(h, ...) { save(distmat, file="blaudistance.Rdata") }) button3 <- gbutton("Save as SAS file: blaudistance.txt & blaudistance.sas", expand = FALSE, cont = fg1, handler = function(h, ...) { write.foreign(distmat, "blaudistance.txt", "blaudistance.sas", package="SAS") }) button4 <- gbutton("Save as Stata file: blaudistance.dta", expand = FALSE, cont = fg1, handler = function(h, ...) { write.dta(distmat, ("blaudistance.dta")) }) button5 <- gbutton("Save as SPSS file: blaudistance.txt & blaudistance.sps", expand = FALSE, cont = fg1, handler = function(h, ...) { write.foreign(distmat, "blaudistance.txt", "blaudistance.sps", package="SPSS") }) gseparator(cont = fg1) vars <- gdf(distmat, expand = TRUE, fill=TRUE, cont = fg1) }) addSpace(tg, 5) button <- gbutton("Blau Bubble Matrix", cont = tg, handler = function(h, ...) { fthlevel1 <- gwindow("Blau Bubble Matrix",width = 800, height = 600) fg1 <- ggroup(horizontal = FALSE, cont = fthlevel1) button1 <- gbutton("Save as csv file: blaububble.csv", expand = FALSE, cont = fg1, handler = function(h, ...) { write.table(bbubble, "blaububble.csv", row.names=F, col.names=T, sep=",") }) button2 <- gbutton("Save as R file: blaububble.Rdata", expand = FALSE, cont = fg1, handler = function(h, ...) { save(bbubble, file="blaububble.Rdata") }) button3 <- gbutton("Save as SAS file: blaububble.txt & blaububble.sas", expand = FALSE, cont = fg1, handler = function(h, ...) { write.foreign(bbubble, "blaububble.txt", "blaububble.sas", package="SAS") }) button4 <- gbutton("Save as Stata file: blaububble.dta", expand = FALSE, cont = fg1, handler = function(h, ...) { write.dta(bbubble, ("blaububble.dta")) }) button5 <- gbutton("Save as SPSS file: blaububble.txt & blaububble.sps", expand = FALSE, cont = fg1, handler = function(h, ...) { write.foreign(bbubble, "blaububble.txt", "blaububble.sps", package="SPSS") }) gseparator(cont = fg1) vars <- gdf(bbubble, expand = TRUE, fill=TRUE, cont = fg1) }) addSpace(tg, 5) button <- gbutton("Blau Bubble List", cont = tg, handler = function(h, ...) { blaublist <- data.frame(as.matrix(network(as.matrix(blaububble)),matrix.type="edgelist")) if (nrow(blaublist)>0) { dim.distance <- rep(0,nrow(blaublist)) distmattmp <- distmat[,-1] for (x in 1:nrow(blaublist)){ dim.distance[x] <- distmattmp[blaublist[x,1],blaublist[x,2]] } rm(x) blaublist <- cbind(blaublist,dim.distance) colnames(blaublist) <- c("i","j","dim.distance") if ("el" %in% ls(envir=blnetevn)) { geodesic.distance <- present.edges <- rep(0,nrow(blaublist)) tempadj <- symmetrize(adj1,rule="weak") tempel <- data.frame(as.matrix(network(tempadj),matrix.type="edgelist")) colnames(tempel) <- c("i","j") gd <- geodist(network(symmetrize(adj1,rule="weak")))$gdist for (x in 1:nrow(blaublist)){ if (nrow(merge(blaublist[x,1:2],tempel))>0) present.edges[x] <- 1 geodesic.distance[x] <- gd[blaublist[x,1],blaublist[x,2]] } blaublist <- cbind(blaublist,present.edges,geodesic.distance) rm(x) } blaublist[,1] <- as.matrix(cov1[which(colnames(cov1)==blnetevn$u1)])[blaublist[,1]] blaublist[,2] <- as.matrix(cov1[which(colnames(cov1)==blnetevn$u1)])[blaublist[,2]] } else { if ("el" %in% ls(envir=blnetevn)) { blaublist <- data.frame(i=double(),j=double(),dim.distance=double(),present.edges=double(),geodesic.distance=double()) } else { blaublist <- data.frame(i=double(),j=double(),dim.distance=double()) } } fthlevel2 <- gwindow("Blau Bubble List",width = 800, height = 600) fg2 <- ggroup(horizontal = FALSE, cont = fthlevel2) button1 <- gbutton("Save as csv file: blaububblelist.csv", expand = FALSE, cont = fg2, handler = function(h, ...) { write.table(blaublist, "blaububblelist.csv", row.names=F, col.names=T, sep=",") }) button2 <- gbutton("Save as R file: blaububblelist.Rdata", expand = FALSE, cont = fg2, handler = function(h, ...) { save(blaublist, file="blaububblelist.Rdata") }) button3 <- gbutton("Save as SAS file: blaububble.txt & blaububble.sas", expand = FALSE, cont = fg2, handler = function(h, ...) { write.foreign(blaublist, "blaububblelist.txt", "blaububblelist.sas", package="SAS") }) button4 <- gbutton("Save as Stata file: blaububble.dta", expand = FALSE, cont = fg2, handler = function(h, ...) { write.dta(blaublist, ("blaububblelist.dta")) }) button5 <- gbutton("Save as SPSS file: blaububble.txt & blaububble.sps", expand = FALSE, cont = fg2, handler = function(h, ...) { write.foreign(blaublist, "blaububblelist.txt", "blaububblelist.sps", package="SPSS") }) gseparator(cont = fg2) vars <- gdf(blaublist, expand = TRUE, fill=TRUE, cont = fg2) }) addSpace(tg, 5) button <- gbutton("Nodal Bubble List", cont = tg, handler = function(h, ...) { nodes <- cov1[which(colnames(cov1)==blnetevn$u1)] co_bubble <- degree <- coincidence <- rep(0,k) co_bubble_list <- rep("",k) for (x in 1:k) { co_bubble[x] <- sum(blaububble[x,]) co_bubble_list[x] <- paste(colnames(blaububble)[which(blaububble[x,]==1)], collapse = " ") } rm(x) nodalbubble <- cbind(nodes,co_bubble,co_bubble_list) if ("el" %in% ls(envir=blnetevn)) { tempadj <- symmetrize(adj1,rule="weak") colnames(tempadj) <- as.matrix(cov1[which(colnames(cov1)==blnetevn$u1)]) degree <- coincidence <- rep(0,k) alter_list <- coincidence_list <- rep("",k) for (x in 1:k) { degree[x] <- sum(tempadj[x,]) alter_list[x] <- paste(colnames(tempadj)[which(tempadj[x,]==1)], collapse = " ") y <- intersect(colnames(blaububble)[which(blaububble[x,]==1)],colnames(tempadj)[which(tempadj[x,]==1)]) coincidence[x] <- length(y) coincidence_list[x] <- paste(y, collapse = " ") } rm(x,y) nodalbubble <- cbind(nodalbubble,degree,alter_list,coincidence,coincidence_list) } fthlevel3 <- gwindow("Nodal Bubble List",width = 800, height = 600) fg3 <- ggroup(horizontal = FALSE, cont = fthlevel3) button1 <- gbutton("Save as csv file: nodalbubblelist.csv", expand = FALSE, cont = fg3, handler = function(h, ...) { write.table(nodalbubble, "nodalbubble.csv", row.names=F, col.names=T, sep=",") }) button2 <- gbutton("Save as R file: nodalbubblelist.Rdata", expand = FALSE, cont = fg3, handler = function(h, ...) { save(nodalbubble, file="nodalbubblelist.Rdata") }) button3 <- gbutton("Save as SAS file: nodalbubblelist.txt & nodalbubblelist.sas", expand = FALSE, cont = fg3, handler = function(h, ...) { write.foreign(nodalbubble, "nodalbubblelist.txt", "nodalbubblelist.sas", package="SAS") }) button4 <- gbutton("Save as Stata file: nodalbubblelist.dta", expand = FALSE, cont = fg3, handler = function(h, ...) { write.dta(nodalbubble, "nodalbubblelist.dta") }) button5 <- gbutton("Save as SPSS file: nodalbubblelist.txt & nodalbubblelist.sps", expand = FALSE, cont = fg3, handler = function(h, ...) { write.foreign(nodalbubble, "nodalbubblelist.txt", "nodalbubblelist.sps", package="SPSS") }) gseparator(cont = fg3) vars <- gdf(nodalbubble, expand = TRUE, fill=TRUE, cont = fg3) }) }) }})} }
/scratch/gouwar.j/cran-all/cranData/Blaunet/inst/scripts/blaububbles.R
rm(list=ls()) require(Blaunet) require(digest) require(gWidgets2) require(gWidgets2tcltk) require(plot3D) require(plot3Drgl) require(network) require(sna) require(ergm) require(haven) require(foreign) rm(list=ls()) oldwd <- getwd() on.exit(setwd(oldwd)) blnetevn <- new.env() clearmemory <- function(h,...) { rm(list=setdiff(ls(envir=.GlobalEnv),c(objls,"objls")),envir=.GlobalEnv) gmessage("The computer memory is successfully cleared.", parent = window) } source('open.R') source('browse.R') source('network.R') source('graph.R') source('dimensions.R') source('nicheplot.R') source('analysis.R') source('dynamics.R') source('blaububbles.R') showabout <- function(h,...) gmessage("Blaunet graphic package 2.2.1", parent = window) commandpdf <- function(h,...) { if (Sys.info()[1]=="Windows") shell.exec(paste(.libPaths(), "/Blaunet/scripts/command.pdf", sep="")) else if (Sys.info()[1]=="Darwin") system(paste("open ",.libPaths(), "/Blaunet/scripts/command.pdf", sep="")) else if (Sys.info()[1]=="Linux") system(paste("xdg-open ",.libPaths(), "/Blaunet/scripts/command.pdf", sep="")) } graphicpdf <- function(h,...) { if (Sys.info()[1]=="Windows") shell.exec(paste(.libPaths(), "/Blaunet/scripts/graphic.pdf", sep="")) else if (Sys.info()[1]=="Darwin") system(paste("open ",.libPaths(), "/Blaunet/scripts/graphic.pdf", sep="")) else if (Sys.info()[1]=="Linux") system(paste("xdg-open ",.libPaths(), "/Blaunet/scripts/graphic.pdf", sep="")) } ###################################################################### window <- gwindow("Blaunet", width = 1024, height = 600, visible=FALSE) action_list = list( OpenFile = gaction(label = "Open Attribute File", handler = loadfile, parent = window), OpenFile1 = gaction(label = "Open Attribute File", icon = "open", icon.col=10, handler = loadfile, parent = window), OpenNet = gaction(label = "Open Network File", handler = loadnet, parent = window), OpenNet1 = gaction(label = "Open Network File", icon = "arrows", icon.col=2, handler = loadnet, parent = window), clear = gaction(label = "Clear Memory", handler = clearmemory, parent = window), clear1 = gaction(label = "Clear Memory", icon = "clear", icon.col=2, handler = clearmemory, parent = window), quit = gaction(label = "Quit", handler = function(...) dispose(window), parent = window), quit1 = gaction(label = "Quit", icon = "quit", icon.col=2, handler = function(...) dispose(window), parent = window), browsefile = gaction(label = "Browse Attribute File", handler = brattr, parent = window), browseadj = gaction(label = "Browse Adjcency Matrix", handler = bradj, parent = window), browseel = gaction(label = "Browse Network Edgelist", handler = brel, parent = window), Info = gaction(label = "Info", handler = showinfo, parent = window), Density = gaction(label = "Density", handler = showdensity, parent = window), Centrality = gaction(label = "Centrality", handler = showcentrality, parent = window), Dyad_census = gaction(label = "Dyad Census", handler = showdcensus, parent = window), ReciprocityIndex = gaction(label = "Reciprocity Index", handler = showreciprocityindex, parent = window), Triad_census = gaction(label = "Triad Census", handler = showtcensus, parent = window), GlobalClustering = gaction(label = "Global Clustering Coefficient", handler = showglobalcustering, parent = window), LocalClustering = gaction(label = "Local Clustering Coefficient", handler = showlocalcustering, parent = window), Graph = gaction(label = "Network Graph", handler = showgraph, parent = window), HistogramOutdegree = gaction(label = "Histogram Out-degree", handler = showhoutdegree, parent = window), HistogramIndegree = gaction(label = "Histogram In-degree", handler = showhindegree, parent = window), Dimensions = gaction(label = "Salient Dimensions", handler = showdimensions, parent = window), Nicheplot= gaction(label = "Niche Plot", handler = nicheplot, parent = window), Analysis = gaction(label = "Niche Analysis", handler = showanalysis, parent = window), Dynamics = gaction(label = "Niche Dynamics", handler = showdynamics, parent = window), Blaububbles = gaction(label = "Blau Bubbles", handler = showblaububble, parent = window), About = gaction(label = "About", handler = showabout, parent = window), Commandpdf = gaction(label = "Command Line Manual", handler = commandpdf, parent = window), Graphicpdf = gaction(label = "Graphic Package Manual", handler = graphicpdf, parent = window) ) tool_bar_list<- c(action_list$OpenFile1, sep = gseparator(), action_list$OpenNet1, sep = gseparator(), action_list$clear1, sep = gseparator(), action_list$quit1) menu_bar_list <- list(Data = list( OpenFile = action_list$OpenFile, OpenNet = action_list$OpenNet, sep = gseparator(), Clear = action_list$clear, sep = gseparator(), Quit = action_list$quit ), Browse = list( attribute = action_list$browsefile, adjacency = action_list$browseadj, edgelist = action_list$browseel ), Network = list( Info = action_list$Info, Density = action_list$Density, Centrality = action_list$Centrality, DyadCensus = action_list$Dyad_census, ReciprocityIndex = action_list$ReciprocityIndex, TriadCcensus = action_list$Triad_census, GlobalClustering = action_list$GlobalClustering, LocalClustering = action_list$LocalClustering ), Graph = list( Graph = action_list$Graph, HistogramOutdegree = action_list$HistogramOutdegree, HistogramIndegree = action_list$HistogramIndegree ), Analysis = list( Dimensions = action_list$Dimensions, Nicheplot = action_list$Nicheplot, Analysis = action_list$Analysis, Dynamics = action_list$Dynamics, Blaububble = action_list$Blaububble ), Help = list( About = action_list$About, Gpdf = action_list$Graphicpdf, Cpdf = action_list$Commandpdf ) ) group <- ggroup(horizontal = FALSE, spacing=1, cont = window) menu_bar <- gmenu(menu_bar_list, cont = window) tool_bar <- gtoolbar(tool_bar_list, style = c("both-horiz"), expand=TRUE, cont = window) no_changes <- c("save","save.as","cut") if (Sys.info()[1]=="Windows") { setwd(gsub("scripts", "data", getwd())) txt <- gedit(getwd(), cont = group) button <- gbutton("Set Working Directory", cont = group, handler = function(h, ...) setwd(choose.dir())) addHandlerChanged(button, handler=function(h,...) svalue(txt) <- getwd()) } else { setwd(gsub("scripts", "data", getwd())) txt <- gedit(getwd(), cont = group) button <- gbutton("Set working directory", cont = group, handler = function(h, ...) setwd(svalue(txt))) } glabel("Title: A Toolkit for Calculating, Visualizing, and Analyzing Social Distance Using Blau Status Analysis ", container=group, anchor=c(-1,1)) glabel("Depends: R (>= 3.0.0)", container=group, anchor=c(-1,1)) glabel("Imports: digest, gWidgets2, gWidgets2tcltk, plot3D, plot3Drgl, rgl, network, sna, ergm, statnet.common, haven, foreign", container=group, anchor=c(-1,1)) glabel("Version: 2.2.1", container=group, anchor=c(-1,1)) glabel("Authors: Cheng Wang*, Michael Genkin*, George Berry, Liyuan Chen, Matthew Brashears *Both authors contributed equally to this work and their names are randomly ordered", container=group, anchor=c(-1,1)) glabel("Maintainer: Cheng Wang <[email protected]>", container=group, anchor=c(-1,1)) glabel("Description: An integrated set of tools to calculate, visualize, and analyze positions in social distance between individuals belonging to (covert) organizational groups. Relational (network) data may be incorporated for additional analysis.", container=group, anchor=c(-1,1)) glabel("License: GPL-3", container=group, anchor=c(-1,1)) glabel("Blaunet Users Facebook group: https://www.facebook.com/groups/425015561030239/", container=group, anchor=c(-1,1)) glabel("Funding: This project is supported by Defense Threat Reduction Agency (DTRA) Grant HDTRA-10-1-0043.", container=group, anchor=c(-1,1)) glabel("Repository: CRAN", container=group, anchor=c(-1,1)) glabel("Date/Publication: 2022-09-26 09:28:06", container=group, anchor=c(-1,1)) sb <- gstatusbar("", container=window) #id <- addHandlerUnrealize(window, handler = function(h,...) {!gconfirm("Really close", parent = h$obj)}) objls <- ls(envir=.GlobalEnv) visible(window) <- TRUE
/scratch/gouwar.j/cran-all/cranData/Blaunet/inst/scripts/blaunetgui.R
brattr <- function(h,...) { if ("cov" %in% ls(envir=blnetevn)) { nw <- gwindow("Attribute File",width = 800, height = 600) group <- ggroup(horizontal = FALSE, cont = nw) vars <- gdf(blnetevn$cov, expand = TRUE, fill=TRUE, cont = group) } else gmessage("Sorry! Attribute file is not loaded.", parent = window) } bradj <- function(h,...) { if ("adj" %in% ls(envir=blnetevn)) { nw <- gwindow("Adjacency Matrix",width = 800, height = 600) group <- ggroup(horizontal = FALSE, cont = nw) ego <- rownames(blnetevn$adj) adj1 <- cbind(ego,blnetevn$adj) vars <- gdf(adj1, expand = TRUE, fill=TRUE, cont = group) } else gmessage("Sorry! Network file is not loaded.", parent = window) } brel <- function(h,...) { if ("el" %in% ls(envir=blnetevn)) { nw <- gwindow("Edge List",width = 800, height = 600) group <- ggroup(horizontal = FALSE, cont = nw) vars <- gdf(blnetevn$el, expand = TRUE, fill=TRUE, cont = group) } else gmessage("Sorry! Network file is not loaded.", parent = window) }
/scratch/gouwar.j/cran-all/cranData/Blaunet/inst/scripts/browse.R
showdimensions <- function(h,...) { if ("cov" %in% ls(envir=blnetevn)==FALSE) {gmessage("Sorry! Attribute file is not loaded.", parent = window)} else { assign("c1",character(0),envir=blnetevn) assign("c2",character(0),envir=blnetevn) assign("c3","all",envir=blnetevn) assign("c4",character(0),envir=blnetevn) assign("c5",character(0),envir=blnetevn) assign("c6",0.05,envir=blnetevn) assign("c7",character(0),envir=blnetevn) assign("c8",character(0),envir=blnetevn) assign("c9",character(0),envir=blnetevn) assign("m1",names(blnetevn$cov),envir=blnetevn) assign("m3",names(blnetevn$cov),envir=blnetevn) toplevel <- gwindow("Salient Dimensions", width=600, height=800, parent = window, visible=FALSE) cg <- ggroup(horizontal = TRUE,cont = toplevel) tbl0 <- gtable(blnetevn$m1,expand=TRUE,multiple=TRUE,cont=cg) cg1 <- ggroup(horizontal = FALSE, cont = cg) gbplus1 <- gbutton("+", cont=cg1) gbminus1 <- gbutton("-", cont=cg1) gbplus2 <- gbutton("+", cont=cg1) gbminus2 <- gbutton("-", cont=cg1) addSpring(cg1) addSpring(cg1) gbplus4 <- gbutton("+", cont=cg1) gbminus4 <- gbutton("-", cont=cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) gbplus5 <- gbutton("+", cont=cg1) gbminus5 <- gbutton("-", cont=cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) cg2 <- ggroup(horizontal = FALSE, cont = cg) c1temp <- data.frame(Node.ids="",stringsAsFactors=FALSE) c2temp <- data.frame(Ecology.ids="",stringsAsFactors=FALSE) c4temp <- data.frame(Dimensions=rep("",length(blnetevn$m3)),stringsAsFactors=FALSE) c5temp <- data.frame(Groups=rep("",length(blnetevn$m3)),stringsAsFactors=FALSE) tbl1 <- gtable(c1temp,index=TRUE, cont=cg2) size(tbl1)[2] <- 50 tbl2 <- gtable(c2temp,cont=cg2) size(tbl2)[2] <- 50 tbl4 <- gtable(c4temp,expand=TRUE,multiple=TRUE,cont=cg2) tbl5 <- gtable(c5temp,expand=TRUE,multiple=TRUE,cont=cg2) glabel("Alpha (0.05 by default)",cont=cg2) gcombobox(c(0.05,0.001,0.01,0.1), cont=cg2, handler = function(h,...) assign("c6",svalue(h$obj),envir=blnetevn)) addHandlerClicked(gbplus1, handler = function(h,...) { temp <- svalue(tbl0) if ("" %in% temp==FALSE & tbl1[1]=="" & length(temp)==1) { assign("m1",blnetevn$m3[which(blnetevn$m3 %in% setdiff(blnetevn$m1,temp))],envir=blnetevn) km1 <- c(blnetevn$m1,rep("",length(blnetevn$m3)-length(blnetevn$m1))) for (i in 1:length(blnetevn$m3)) tbl0[i] <- km1[i] assign("c1",blnetevn$m3[which(blnetevn$m3 %in% union(blnetevn$c1,temp))],envir=blnetevn) tbl1[1] <- blnetevn$c1 } }) addHandlerClicked(gbminus1, handler = function(h,...) { temp <- svalue(tbl1) if ("" %in% temp==FALSE & length(temp)==1) { assign("m1",blnetevn$m3[which(blnetevn$m3 %in% union(blnetevn$m1,temp))],envir=blnetevn) km1 <- c(blnetevn$m1,rep("",length(blnetevn$m3)-length(blnetevn$m1))) for (i in 1:length(blnetevn$m3)) tbl0[i] <- km1[i] assign("c1",character(0),envir=blnetevn) tbl1[1] <- "" } }) addHandlerClicked(gbplus2, handler = function(h,...) { temp <- svalue(tbl0) if ("" %in% temp==FALSE & tbl2[1]=="" & length(temp)==1) { assign("m1",blnetevn$m3[which(blnetevn$m3 %in% setdiff(blnetevn$m1,temp))],envir=blnetevn) km1 <- c(blnetevn$m1,rep("",length(blnetevn$m3)-length(blnetevn$m1))) for (i in 1:length(blnetevn$m3)) tbl0[i] <- km1[i] assign("c2",blnetevn$m3[which(blnetevn$m3 %in% union(blnetevn$c2,temp))],envir=blnetevn) tbl2[1] <- blnetevn$c2 } }) addHandlerClicked(gbminus2, handler = function(h,...) { temp <- svalue(tbl2) if ("" %in% temp==FALSE & length(temp)==1) { assign("m1",blnetevn$m3[which(blnetevn$m3 %in% union(blnetevn$m1,temp))],envir=blnetevn) km1 <- c(blnetevn$m1,rep("",length(blnetevn$m3)-length(blnetevn$m1))) for (i in 1:length(blnetevn$m3)) tbl0[i] <- km1[i] assign("c2",character(0),envir=blnetevn) tbl2[1] <- "" } }) addHandlerClicked(gbplus4, handler = function(h,...) { temp <- svalue(tbl0) if ("" %in% temp==FALSE & length(temp)>0) { assign("m1",blnetevn$m3[which(blnetevn$m3 %in% setdiff(blnetevn$m1,temp))],envir=blnetevn) km1 <- c(blnetevn$m1,rep("",length(blnetevn$m3)-length(blnetevn$m1))) for (i in 1:length(blnetevn$m3)) tbl0[i] <- km1[i] assign("c4",blnetevn$m3[which(blnetevn$m3 %in% union(blnetevn$c4,temp))],envir=blnetevn) kc4 <- c(blnetevn$c4,rep("",length(blnetevn$m3)-length(blnetevn$c4))) for (j in 1:length(blnetevn$m3)) tbl4[j] <- kc4[j] } }) addHandlerClicked(gbminus4, handler = function(h,...) { temp <- svalue(tbl4) if ("" %in% temp==FALSE & length(temp)>0) { assign("m1",blnetevn$m3[which(blnetevn$m3 %in% union(blnetevn$m1,temp))],envir=blnetevn) km1 <- c(blnetevn$m1,rep("",length(blnetevn$m3)-length(blnetevn$m1)+1)) for (i in 1:length(blnetevn$m3)) tbl0[i] <- km1[i] assign("c4",blnetevn$m3[which(blnetevn$m3 %in% setdiff(blnetevn$c4,temp))],envir=blnetevn) kc4 <- c(blnetevn$c4,rep("",length(blnetevn$m3)-length(blnetevn$c4)+1)) for (j in 1:length(blnetevn$m3)) tbl4[j] <- kc4[j] } }) addHandlerClicked(gbplus5, handler = function(h,...) { temp <- svalue(tbl0) if ("" %in% temp==FALSE & length(temp)>0) { assign("m1",blnetevn$m3[which(blnetevn$m3 %in% setdiff(blnetevn$m1,temp))],envir=blnetevn) km1 <- c(blnetevn$m1,rep("",length(blnetevn$m3)-length(blnetevn$m1))) for (i in 1:length(blnetevn$m3)) tbl0[i] <- km1[i] assign("c5",blnetevn$m3[which(blnetevn$m3 %in% union(blnetevn$c5,temp))],envir=blnetevn) kc5 <- c(blnetevn$c5,rep("",length(blnetevn$m3)-length(blnetevn$c5))) for (j in 1:length(blnetevn$m3)) tbl5[j] <- kc5[j] } }) addHandlerClicked(gbminus5, handler = function(h,...) { temp <- svalue(tbl5) if ("" %in% temp==FALSE & length(temp)>0) { assign("m1",blnetevn$m3[which(blnetevn$m3 %in% union(blnetevn$m1,temp))],envir=blnetevn) km1 <- c(blnetevn$m1,rep("",length(blnetevn$m3)-length(blnetevn$m1)+1)) for (i in 1:length(blnetevn$m3)) tbl0[i] <- km1[i] assign("c5",blnetevn$m3[which(blnetevn$m3 %in% setdiff(blnetevn$c5,temp))],envir=blnetevn) kc5 <- c(blnetevn$c5,rep("",length(blnetevn$m3)-length(blnetevn$c5)+1)) for (j in 1:length(blnetevn$m3)) tbl5[j] <- kc5[j] } }) visible(toplevel) <- TRUE button <- gbutton("Continue", expand = FALSE, cont = cg2, handler = function(h, ...) { if (length(blnetevn$c1)==0 | length(blnetevn$c4)==0) {gmessage("Missing required information.", parent = toplevel)} else { dispose(toplevel) if ('el' %in% ls(envir=blnetevn)==FALSE & length(blnetevn$c5)==0) { gmessage("You must select group options.") } else { if (length(blnetevn$c5)>0) { if (length(blnetevn$c2)!=0) { seclevel <- gwindow("Salient Dimension Options", width=600, height=600, parent = window) secg <- ggroup(cont = seclevel, use.scrollwindow=T, horizontal = FALSE) secgv <- gvbox(cont = secg) tbl <- gformlayout(cont = secgv) gcheckboxgroup(c("all",as.matrix(unique(blnetevn$cov[which(colnames(blnetevn$cov)==blnetevn$c2)]))), cont = tbl, label="Please select which ecology you want to identify salient dimensions:", handler = function(h,...){ assign("c3",svalue(h$obj),envir=blnetevn) }) gcheckboxgroup(blnetevn$c4, cont = tbl, label="Please identify categorical variables:", handler = function(h,...){ assign("c7",svalue(h$obj),envir=blnetevn) }) button <- gbutton("Continue", expand = FALSE, cont = secg, handler = function(h, ...) { dispose(seclevel) if ("all" %in% blnetevn$c3) { cov1 <- blnetevn$cov cov1[is.na(cov1)] <- 0 if ('el' %in% ls(envir=blnetevn)) adj1 <- as.matrix(blnetevn$adj) } else { cov1 <- blnetevn$cov[which(blnetevn$cov[,which(colnames(blnetevn$cov)==blnetevn$c2)] %in% blnetevn$c3),] cov1[is.na(cov1)] <- 0 if ('el' %in% ls(envir=blnetevn)) {adj1 <- as.matrix(blnetevn$adj[which(blnetevn$cov[,which(colnames(blnetevn$cov)==blnetevn$c2)] %in% blnetevn$c3),which(blnetevn$cov[,which(colnames(blnetevn$cov)==blnetevn$c2)] %in% blnetevn$c3)])} } if ('el' %in% ls(envir=blnetevn)) { tempadj <- symmetrize(adj1,rule="weak") deg <- degree(network(tempadj), cmode="outdegree") bet <- betweenness(network(tempadj)) eig <- evcent(network(tempadj)) tempel <- as.matrix(network(tempadj),matrix.type="edgelist") lclu <- rep(0,nrow(cov1)) for (i in 1:nrow(cov1)) { k <- tempel[which(tempel[,1]==i),2] if (length(k)>=2) { m <- rbind(tempel, t(combn(k,2))) lclu[i] <- nrow(m[duplicated(m), , drop = FALSE])/nrow(t(combn(k,2))) } else {lclu[i] <- NA} } rm(i,k,m) k <- c() for (i in 1:length(blnetevn$c5)) { alteringroup <- rep(0,nrow(cov1)) for (j in 1:nrow(cov1)) { if (length(tempel[which(tempel[,1]==j),2])>=1) alteringroup[j] <- sum(cov1[tempel[which(tempel[,1]==j),2],which(colnames(cov1)==blnetevn$c5[i])]) else alteringroup[j] <- 0 } temp <- cbind(cov1[which(colnames(cov1)==blnetevn$c5[i])],cov1[which(colnames(cov1) %in% blnetevn$c5[-i])],cov1[which(colnames(cov1) %in% blnetevn$c4)],deg,bet,eig,lclu,alteringroup) fit1 <- glm(temp, family=binomial) k <- unique(c(k,names(which(summary(fit1)$coefficients[,4]<blnetevn$c6)))) } assign("c8",blnetevn$c4[which(blnetevn$c4 %in% k)],envir=blnetevn) if (length(blnetevn$c8)>0) { g <- c() gm <- list() for (i in 1:length(blnetevn$c5)) { g <- c(g,which(names(blnetevn$cov)==blnetevn$c5[i])) gm[[i]] <- matrix(rep(0,nrow(blnetevn$cov)*nrow(blnetevn$cov)),nrow=nrow(blnetevn$cov)) } for (l in 1:length(blnetevn$c5)) { tempgm <- cbind(c(1:nrow(blnetevn$cov)),cov1[,g[l]]) tempgm <- tempgm[which(tempgm[,2]==1),] tempgm <- t(combn(tempgm[,1],2)) tempgm <- network(tempgm,directed=FALSE) tempgm1 <- nrow(blnetevn$cov)-network.size(tempgm) if (tempgm1>0) add.vertices(tempgm,tempgm1) gm[[l]] <- as.matrix(tempgm) } rm(i,l) for (l in 1:length(blnetevn$c5)) { n <- network(gm[[l]]) emf <- "n~" for (m in 1:length(blnetevn$c8)) { k1 <- cov1[,which(names(cov1)==blnetevn$c8[m])] k1[which(is.na(k1)==TRUE)] <- round(mean(k1,na.rm=TRUE)) n %v% blnetevn$c8[m] <- k1 if (blnetevn$c8[m] %in% blnetevn$c7) { emf <- paste(emf,"+nodematch(","\'",blnetevn$c8[m],"\'",")",sep="") } else { emf <- paste(emf,"+absdiff(","\'",blnetevn$c8[m],"\'",")",sep="") } } o <- setdiff(union(l,1:length(blnetevn$c5)),intersect(l,1:length(blnetevn$c5))) if (nrow(cov1)<=500) { for (m in 1:length(o)) emf <- paste(emf,"+edgecov(gm[[",o[m],"]])",sep="") emf <- paste(emf,"+edgecov(tempadj)",sep="") } emf <- paste(substr(emf, 1, 2), substr(emf, 4, nchar(emf)), sep='') em <- ergm(as.formula(emf)) blnetevn$c9 <- union(blnetevn$c9,blnetevn$c8[which(summary(em)$coefs[1:length(blnetevn$c8),4]<blnetevn$c6)]) } assign("c9",blnetevn$c9,envir=blnetevn) } else {assign("c9","",envir=blnetevn)} k <- data.frame(blnetevn$c9) colnames(k) <- "Dimensions" thdlevel <- gwindow("Salient Dimensions",width = 200, height = 400) tg <- ggroup(horizontal = FALSE, cont = thdlevel) dims <- gtable(k, expand = TRUE, cont = tg) } else { k <- c() for (j in 1:length(blnetevn$c5)) { temp <- cbind(cov1[which(colnames(cov1)==blnetevn$c5[j])],cov1[which(colnames(cov1) %in% blnetevn$c5[-j])],cov1[which(colnames(cov1) %in% blnetevn$c4)]) fit1 <- glm(temp, family=binomial) k <- unique(c(k,names(which(summary(fit1)$coefficients[,4]<blnetevn$c6)))) } assign("c8",blnetevn$c4[which(blnetevn$c4 %in% k)],envir=blnetevn) if (length(blnetevn$c8)>0) { g <- c() gm <- list() for (i in 1:length(blnetevn$c5)) { g <- c(g,which(names(cov1)==blnetevn$c5[i])) gm[[i]] <- matrix(rep(0,nrow(cov1)*nrow(cov1)),nrow=nrow(cov1)) } for (l in 1:length(blnetevn$c5)) { tempgm <- cbind(c(1:nrow(blnetevn$cov)),cov1[,g[l]]) tempgm <- tempgm[which(tempgm[,2]==1),] tempgm <- t(combn(tempgm[,1],2)) tempgm <- network(tempgm,directed=FALSE) tempgm1 <- nrow(blnetevn$cov)-network.size(tempgm) if (tempgm1>0) add.vertices(tempgm,tempgm1) gm[[l]] <- as.matrix(tempgm) } rm(i,l) for (l in 1:length(blnetevn$c5)) { n <- network(gm[[l]]) emf <- "n~" for (m in 1:length(blnetevn$c8)) { k1 <- cov1[,which(names(cov1)==blnetevn$c8[m])] k1[which(is.na(k1)==TRUE)] <- round(mean(k1,na.rm=TRUE)) n %v% blnetevn$c8[m] <- k1 if (blnetevn$c8[m] %in% blnetevn$c7) { emf <- paste(emf,"+nodematch(","\'",blnetevn$c8[m],"\'",")",sep="") } else { emf <- paste(emf,"+absdiff(","\'",blnetevn$c8[m],"\'",")",sep="") } } o <- setdiff(union(l,1:length(blnetevn$c5)),intersect(l,1:length(blnetevn$c5))) if (nrow(cov1)<=500) { for (m in 1:length(o)) emf <- paste(emf,"+edgecov(gm[[",o[m],"]])",sep="") } emf <- paste(substr(emf, 1, 2), substr(emf, 4, nchar(emf)), sep='') em <- ergm(as.formula(emf)) blnetevn$c9 <- union(blnetevn$c9,blnetevn$c8[which(summary(em)$coefs[1:length(blnetevn$c8),4]<blnetevn$c6)]) } assign("c9",blnetevn$c9,envir=blnetevn) } else {assign("c9","",envir=blnetevn)} k <- data.frame(blnetevn$c9) colnames(k) <- "Dimensions" thdlevel <- gwindow("Salient Dimensions",width = 200, height = 400) tg <- ggroup(horizontal = FALSE, cont = thdlevel) dims <- gtable(k, expand = TRUE, cont = tg) } }) } else { seclevel <- gwindow("Salient Dimension Options", width=600, height=600, parent = window) secg <- ggroup(cont = seclevel, use.scrollwindow=T, horizontal = FALSE) secgv <- gvbox(cont = secg) tbl <- gformlayout(cont = secgv) gcheckboxgroup(blnetevn$c4, cont = tbl, label="Please identify categorical variables:", handler = function(h,...){ assign("c7",svalue(h$obj),envir=blnetevn) }) button <- gbutton("Continue", expand = FALSE, cont = secg, handler = function(h, ...) { dispose(seclevel) cov1 <- blnetevn$cov cov1[is.na(cov1)] <- 0 if ('el' %in% ls(envir=blnetevn)) { adj1 <- blnetevn$adj tempadj <- symmetrize(adj1,rule="weak") deg <- degree(network(tempadj), cmode="outdegree") bet <- betweenness(network(tempadj)) eig <- evcent(network(tempadj)) tempel <- as.matrix(network(tempadj),matrix.type="edgelist") lclu <- rep(0,nrow(cov1)) for (i in 1:nrow(cov1)) { k <- tempel[which(tempel[,1]==i),2] if (length(k)>=2) { m <- rbind(tempel, t(combn(k,2))) lclu[i] <- nrow(m[duplicated(m), , drop = FALSE])/nrow(t(combn(k,2))) } else {lclu[i] <- NA} } rm(i,k,m) k <- c() for (i in 1:length(blnetevn$c5)) { alteringroup <- rep(0,nrow(cov1)) for (j in 1:nrow(cov1)) { if (length(tempel[which(tempel[,1]==j),2])>=1) alteringroup[j] <- sum(cov1[tempel[which(tempel[,1]==j),2],which(colnames(cov1)==blnetevn$c5[i])]) else alteringroup[j] <- 0 } temp <- cbind(cov1[which(colnames(cov1)==blnetevn$c5[i])],cov1[which(colnames(cov1) %in% blnetevn$c5[-i])],cov1[which(colnames(cov1) %in% blnetevn$c4)],deg,bet,eig,lclu,alteringroup) fit1 <- glm(temp, family=binomial) k <- unique(c(k,names(which(summary(fit1)$coefficients[,4]<blnetevn$c6)))) } assign("c8",blnetevn$c4[which(blnetevn$c4 %in% k)],envir=blnetevn) if (length(blnetevn$c8)>0) { g <- c() gm <- list() for (i in 1:length(blnetevn$c5)) { g <- c(g,which(names(blnetevn$cov)==blnetevn$c5[i])) gm[[i]] <- matrix(rep(0,nrow(blnetevn$cov)*nrow(blnetevn$cov)),nrow=nrow(blnetevn$cov)) } for (l in 1:length(blnetevn$c5)) { tempgm <- cbind(c(1:nrow(blnetevn$cov)),cov1[,g[l]]) tempgm <- tempgm[which(tempgm[,2]==1),] tempgm <- t(combn(tempgm[,1],2)) tempgm <- network(tempgm,directed=FALSE) tempgm1 <- nrow(blnetevn$cov)-network.size(tempgm) if (tempgm1>0) add.vertices(tempgm,tempgm1) gm[[l]] <- as.matrix(tempgm) } rm(i,l) for (l in 1:length(blnetevn$c5)) { n <- network(gm[[l]]) emf <- "n~" for (m in 1:length(blnetevn$c8)) { k1 <- cov1[,which(names(cov1)==blnetevn$c8[m])] k1[which(is.na(k1)==TRUE)] <- round(mean(k1,na.rm=TRUE)) n %v% blnetevn$c8[m] <- k1 if (blnetevn$c8[m] %in% blnetevn$c7) { emf <- paste(emf,"+nodematch(","\'",blnetevn$c8[m],"\'",")",sep="") } else { emf <- paste(emf,"+absdiff(","\'",blnetevn$c8[m],"\'",")",sep="") } } o <- setdiff(union(l,1:length(blnetevn$c5)),intersect(l,1:length(blnetevn$c5))) if (nrow(cov1)<=500) { for (m in 1:length(o)) emf <- paste(emf,"+edgecov(gm[[",o[m],"]])",sep="") emf <- paste(emf,"+edgecov(as.matrix(network(tempel)))",sep="") } emf <- paste(substr(emf, 1, 2), substr(emf, 4, nchar(emf)), sep='') em <- ergm(as.formula(emf)) blnetevn$c9 <- union(blnetevn$c9,blnetevn$c8[which(summary(em)$coefs[1:length(blnetevn$c8),4]<blnetevn$c6)]) } assign("c9",blnetevn$c9,envir=blnetevn) } else {assign("c9",character(0),envir=blnetevn)} k <- data.frame(blnetevn$c9) colnames(k) <- "Dimensions" thdlevel <- gwindow("Salient Dimensions",width = 300, height = 400) tg <- ggroup(horizontal = FALSE, cont = thdlevel) dims <- gtable(k, expand = TRUE, cont = tg) } else { k <- c() for (j in 1:length(blnetevn$c5)) { temp <- cbind(cov1[which(colnames(cov1)==blnetevn$c5[j])],cov1[which(colnames(cov1) %in% blnetevn$c5[-j])],cov1[which(colnames(cov1) %in% blnetevn$c4)]) fit1 <- glm(temp, family=binomial) k <- unique(c(k,names(which(summary(fit1)$coefficients[,4]<blnetevn$c6)))) } assign("c8",blnetevn$c4[which(blnetevn$c4 %in% k)],envir=blnetevn) if (length(blnetevn$c8)>0) { g <- c() gm <- list() for (i in 1:length(blnetevn$c5)) { g <- c(g,which(names(blnetevn$cov)==blnetevn$c5[i])) gm[[i]] <- matrix(rep(0,nrow(blnetevn$cov)*nrow(blnetevn$cov)),nrow=nrow(blnetevn$cov)) } for (l in 1:length(blnetevn$c5)) { tempgm <- cbind(c(1:nrow(blnetevn$cov)),cov1[,g[l]]) tempgm <- tempgm[which(tempgm[,2]==1),] tempgm <- t(combn(tempgm[,1],2)) tempgm <- network(tempgm,directed=FALSE) tempgm1 <- nrow(blnetevn$cov)-network.size(tempgm) if (tempgm1>0) add.vertices(tempgm,tempgm1) gm[[l]] <- as.matrix(tempgm) } rm(i,l) for (l in 1:length(blnetevn$c5)) { n <- network(gm[[l]]) emf <- "n~" for (m in 1:length(blnetevn$c8)) { k1 <- cov1[,which(names(cov1)==blnetevn$c8[m])] k1[which(is.na(k1)==TRUE)] <- round(mean(k1,na.rm=TRUE)) n %v% blnetevn$c8[m] <- k1 if (blnetevn$c8[m] %in% blnetevn$c7) { emf <- paste(emf,"+nodematch(","\'",blnetevn$c8[m],"\'",")",sep="") } else { emf <- paste(emf,"+absdiff(","\'",blnetevn$c8[m],"\'",")",sep="") } } o <- setdiff(union(l,1:length(blnetevn$c5)),intersect(l,1:length(blnetevn$c5))) if (nrow(cov1)<=500) { for (m in 1:length(o)) emf <- paste(emf,"+edgecov(gm[[",o[m],"]])",sep="") } emf <- paste(substr(emf, 1, 2), substr(emf, 4, nchar(emf)), sep='') em <- ergm(as.formula(emf)) blnetevn$c9 <- union(blnetevn$c9,blnetevn$c8[which(summary(em)$coefs[1:length(blnetevn$c8),4]<blnetevn$c6)]) } assign("c9",blnetevn$c9,envir=blnetevn) } else {assign("c9",character(0),envir=blnetevn)} k <- data.frame(blnetevn$c9) colnames(k) <- "Dimensions" thdlevel <- gwindow("Salient Dimensions",width = 300, height = 400) tg <- ggroup(horizontal = FALSE, cont = thdlevel) dims <- gtable(k, expand = TRUE, cont = tg) } }) } } else { if (length(blnetevn$c2)!=0) { seclevel <- gwindow("Salient Dimension Options", width=600, height=600, parent = window) secg <- ggroup(cont = seclevel, use.scrollwindow=T, horizontal = FALSE) secgv <- gvbox(cont = secg) tbl <- gformlayout(cont = secgv) gradio(c("all",as.matrix(unique(blnetevn$cov[which(colnames(blnetevn$cov)==blnetevn$c2)]))), cont = tbl, selected = 1, label="Please select which ecology you want to identify salient dimensions:", handler = function(h,...){ assign("c3",svalue(h$obj),envir=blnetevn) }) gcheckboxgroup(blnetevn$c4, cont = tbl, label="Please identify categorical variables:", handler = function(h,...){ assign("c7",svalue(h$obj),envir=blnetevn) }) button <- gbutton("Continue", expand = FALSE, cont = secg, handler = function(h, ...) { dispose(seclevel) if (blnetevn$c3=="all") { cov1 <- blnetevn$cov cov1[is.na(cov1)] <- 0 if ('adj' %in% ls(envir=blnetevn)) adj1 <- blnetevn$adj } else { cov1 <- blnetevn$cov[which(blnetevn$cov[which(colnames(blnetevn$cov)==blnetevn$c2)]==blnetevn$c3),] cov1[is.na(cov1)] <- 0 if ('adj' %in% ls(envir=blnetevn)) adj1 <- blnetevn$adj[which(blnetevn$cov[which(colnames(blnetevn$cov)==blnetevn$c2)]==blnetevn$c3),which(blnetevn$cov[which(colnames(blnetevn$cov)==blnetevn$c2)]==blnetevn$c3)] } if ('el' %in% ls(envir=blnetevn)) { n <- network(adj1) emf <- "n~edges+mutual" for (m in 1:length(blnetevn$c4)) { k1 <- cov1[,which(names(cov1)==blnetevn$c4[m])] k1[which(is.na(k1)==TRUE)] <- round(mean(k1,na.rm=TRUE)) n %v% blnetevn$c4[m] <- k1 if (blnetevn$c4[m] %in% blnetevn$c7) { emf <- paste(emf,"+nodematch(","\'",blnetevn$c4[m],"\'",")",sep="") } else { emf <- paste(emf,"+absdiff(","\'",blnetevn$c4[m],"\'",")",sep="") } } em <- ergm(as.formula(emf)) blnetevn$c9 <- blnetevn$c4[which(summary(em)$coefs[3:(length(blnetevn$c4)+2),4]<blnetevn$c6)] assign("c9",blnetevn$c9,envir=blnetevn) k <- data.frame(blnetevn$c9) colnames(k) <- "Dimensions" thdlevel <- gwindow("Salient Dimensions",width = 300, height = 400) tg <- ggroup(horizontal = FALSE, cont = thdlevel) dims <- gtable(k, expand = TRUE, cont = tg) } }) } else { seclevel <- gwindow("Salient Dimension Options", width=600, height=600, parent = window) secg <- ggroup(cont = seclevel, use.scrollwindow=T, horizontal = FALSE) tbl <- glayout(cont = secg) j <- 1 tbl[j,1] <- "Please identify categorical variables:" tbl[j,2] <- gcheckboxgroup(blnetevn$c4, handler = function(h,...){ assign("c7",svalue(h$obj),envir=blnetevn) }) j <- j + 1 rm(j) button <- gbutton("Continue", expand = FALSE, cont = secg, handler = function(h, ...) { dispose(seclevel) if ('el' %in% ls(envir=blnetevn)) { adj1 <- blnetevn$adj n <- network(as.matrix(adj1)) emf <- "n~edges+mutual" for (m in 1:length(blnetevn$c4)) { k1 <- cov1[,which(names(cov1)==blnetevn$c4[m])] k1[which(is.na(k1)==TRUE)] <- round(mean(k1,na.rm=TRUE)) n %v% blnetevn$c4[m] <- k1 if (blnetevn$c4[m] %in% blnetevn$c7) { emf <- paste(emf,"+nodematch(","\'",blnetevn$c4[m],"\'",")",sep="") } else { emf <- paste(emf,"+absdiff(","\'",blnetevn$c4[m],"\'",")",sep="") } } em <- ergm(as.formula(emf)) blnetevn$c9 <- blnetevn$c4[which(summary(em)$coefs[3:(length(blnetevn$c4)+2),4]<blnetevn$c6)] assign("c9",blnetevn$c9,envir=blnetevn) k <- data.frame(blnetevn$c9) colnames(k) <- "Dimensions" thdlevel <- gwindow("Salient Dimensions",width = 300, height = 400) tg <- ggroup(horizontal = FALSE, cont = thdlevel) dims <- gtable(k, expand = TRUE, cont = tg) } }) } } } }})} }
/scratch/gouwar.j/cran-all/cranData/Blaunet/inst/scripts/dimensions.R
showdynamics <- function(h,...) { if ("cov" %in% ls(envir=blnetevn)==FALSE) {gmessage("Sorry! Attribute file is not loaded.", parent = window)} else { assign("dy1",character(0),envir=blnetevn) assign("dy2",character(0),envir=blnetevn) assign("dy3","",envir=blnetevn) assign("dy4",character(0),envir=blnetevn) assign("dy5",character(0),envir=blnetevn) assign("dy7",character(0),envir=blnetevn) assign("dy8","FALSE",envir=blnetevn) assign("dy9","all",envir=blnetevn) assign("m2",names(blnetevn$cov),envir=blnetevn) assign("m3",names(blnetevn$cov),envir=blnetevn) toplevel <- gwindow("Niche Dynamics", width=800, height=800, parent = window, visible=FALSE) cg <- ggroup(horizontal = TRUE,cont = toplevel) tbl0 <- gtable(blnetevn$m2,expand=TRUE,multiple=TRUE,cont=cg) cg1 <- ggroup(horizontal = FALSE, cont = cg) addSpring(cg1) gbplus1 <- gbutton("+", cont=cg1) gbminus1 <- gbutton("-", cont=cg1) addSpring(cg1) addSpring(cg1) gbplus2 <- gbutton("+", cont=cg1) gbminus2 <- gbutton("-", cont=cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) gbplus4 <- gbutton("+", cont=cg1) gbminus4 <- gbutton("-", cont=cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) gbplus5 <- gbutton("+", cont=cg1) gbminus5 <- gbutton("-", cont=cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) gbplus7 <- gbutton("+", cont=cg1) gbminus7 <- gbutton("-", cont=cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) cg2 <- gframe("Options", horizontal=FALSE, cont=cg) dy1temp <- data.frame(Node.ids="",stringsAsFactors=FALSE) dy2temp <- data.frame(Ecology.ids="",stringsAsFactors=FALSE) dy4temp <- data.frame(Dimensions=rep("",length(blnetevn$m3)),stringsAsFactors=FALSE) dy5temp <- data.frame(Groups=rep("",length(blnetevn$m3)),stringsAsFactors=FALSE) dy7temp <- data.frame(Weights="",stringsAsFactors=FALSE) tbl1 <- gtable(dy1temp,expand=TRUE,multiple=FALSE,cont=cg2) size(tbl1)[2] <- 50 tbl2 <- gtable(dy2temp,expand=TRUE,multiple=FALSE,cont=cg2) size(tbl2)[2] <- 50 if ('el' %in% ls(envir=blnetevn)) { gcheckboxgroup("Network included",cont=cg2,handler = function(h,...) assign("dy3",svalue(h$obj),envir=blnetevn)) } tbl4 <- gtable(dy4temp,expand=TRUE,multiple=TRUE,cont=cg2) size(tbl4)[2] <- 120 tbl5 <- gtable(dy5temp,expand=TRUE,multiple=TRUE,cont=cg2) size(tbl5)[2] <- 120 tbl7 <- gtable(dy7temp,expand=TRUE,multiple=FALSE,cont=cg2) size(tbl7)[2] <- 50 glabel("Complete.cases",cont=cg2) gradio(c("TRUE","FALSE"), selected = 2, cont = cg2, handler = function(h,...) assign("dy8",svalue(h$obj),envir=blnetevn)) addHandlerClicked(gbplus1, handler = function(h,...) { temp <- svalue(tbl0) if ("" %in% temp==FALSE & tbl1[1]=="" & length(temp)==1) { assign("m2",blnetevn$m3[which(blnetevn$m3 %in% setdiff(blnetevn$m2,temp))],envir=blnetevn) km2 <- c(blnetevn$m2,rep("",length(blnetevn$m3)-length(blnetevn$m2))) for (i in 1:length(blnetevn$m3)) tbl0[i] <- km2[i] assign("dy1",blnetevn$m3[which(blnetevn$m3 %in% union(blnetevn$dy1,temp))],envir=blnetevn) tbl1[1] <- blnetevn$dy1 } }) addHandlerClicked(gbminus1, handler = function(h,...) { temp <- svalue(tbl1) if ("" %in% temp==FALSE & length(temp)==1) { assign("m2",blnetevn$m3[which(blnetevn$m3 %in% union(blnetevn$m2,temp))],envir=blnetevn) km2 <- c(blnetevn$m2,rep("",length(blnetevn$m3)-length(blnetevn$m2))) for (i in 1:length(blnetevn$m3)) tbl0[i] <- km2[i] assign("dy1",character(0),envir=blnetevn) tbl1[1] <- "" } }) addHandlerClicked(gbplus2, handler = function(h,...) { temp <- svalue(tbl0) if ("" %in% temp==FALSE & tbl2[1]=="" & length(temp)==1) { assign("m2",blnetevn$m3[which(blnetevn$m3 %in% setdiff(blnetevn$m2,temp))],envir=blnetevn) km2 <- c(blnetevn$m2,rep("",length(blnetevn$m3)-length(blnetevn$m2))) for (i in 1:length(blnetevn$m3)) tbl0[i] <- km2[i] assign("dy2",blnetevn$m3[which(blnetevn$m3 %in% union(blnetevn$dy2,temp))],envir=blnetevn) tbl2[1] <- blnetevn$dy2 } }) addHandlerClicked(gbminus2, handler = function(h,...) { temp <- svalue(tbl2) if ("" %in% temp==FALSE & length(temp)==1) { assign("m2",blnetevn$m3[which(blnetevn$m3 %in% union(blnetevn$m2,temp))],envir=blnetevn) km2 <- c(blnetevn$m2,rep("",length(blnetevn$m3)-length(blnetevn$m2))) for (i in 1:length(blnetevn$m3)) tbl0[i] <- km2[i] assign("dy2",character(0),envir=blnetevn) tbl2[1] <- "" } }) addHandlerClicked(gbplus4, handler = function(h,...) { temp <- svalue(tbl0) if ("" %in% temp==FALSE & length(temp)>0) { assign("m2",blnetevn$m3[which(blnetevn$m3 %in% setdiff(blnetevn$m2,temp))],envir=blnetevn) km2 <- c(blnetevn$m2,rep("",length(blnetevn$m3)-length(blnetevn$m2))) for (i in 1:length(blnetevn$m3)) tbl0[i] <- km2[i] assign("dy4",blnetevn$m3[which(blnetevn$m3 %in% union(blnetevn$dy4,temp))],envir=blnetevn) kd4 <- c(blnetevn$dy4,rep("",length(blnetevn$m3)-length(blnetevn$dy4))) for (j in 1:length(blnetevn$m3)) tbl4[j] <- kd4[j] } }) addHandlerClicked(gbminus4, handler = function(h,...) { temp <- svalue(tbl4) if ("" %in% temp==FALSE & length(temp)>0) { assign("m2",blnetevn$m3[which(blnetevn$m3 %in% union(blnetevn$m2,temp))],envir=blnetevn) km2 <- c(blnetevn$m2,rep("",length(blnetevn$m3)-length(blnetevn$m2)+1)) for (i in 1:length(blnetevn$m3)) tbl0[i] <- km2[i] assign("dy4",blnetevn$m3[which(blnetevn$m3 %in% setdiff(blnetevn$dy4,temp))],envir=blnetevn) kd4 <- c(blnetevn$dy4,rep("",length(blnetevn$m3)-length(blnetevn$dy4)+1)) for (j in 1:length(blnetevn$m3)) tbl4[j] <- kd4[j] } }) addHandlerClicked(gbplus5, handler = function(h,...) { temp <- svalue(tbl0) if ("" %in% temp==FALSE & length(temp)>0) { assign("m2",blnetevn$m3[which(blnetevn$m3 %in% setdiff(blnetevn$m2,temp))],envir=blnetevn) km2 <- c(blnetevn$m2,rep("",length(blnetevn$m3)-length(blnetevn$m2))) for (i in 1:length(blnetevn$m3)) tbl0[i] <- km2[i] assign("dy5",blnetevn$m3[which(blnetevn$m3 %in% union(blnetevn$dy5,temp))],envir=blnetevn) kd5 <- c(blnetevn$dy5,rep("",length(blnetevn$m3)-length(blnetevn$dy5))) for (j in 1:length(blnetevn$m3)) tbl5[j] <- kd5[j] } }) addHandlerClicked(gbminus5, handler = function(h,...) { temp <- svalue(tbl5) if ("" %in% temp==FALSE & length(temp)>0) { assign("m2",blnetevn$m3[which(blnetevn$m3 %in% union(blnetevn$m2,temp))],envir=blnetevn) km2 <- c(blnetevn$m2,rep("",length(blnetevn$m3)-length(blnetevn$m2)+1)) for (i in 1:length(blnetevn$m3)) tbl0[i] <- km2[i] assign("dy5",blnetevn$m3[which(blnetevn$m3 %in% setdiff(blnetevn$dy5,temp))],envir=blnetevn) kd5 <- c(blnetevn$dy5,rep("",length(blnetevn$m3)-length(blnetevn$dy5)+1)) for (j in 1:length(blnetevn$m3)) tbl5[j] <- kd5[j] } }) addHandlerClicked(gbplus7, handler = function(h,...) { temp <- svalue(tbl0) if ("" %in% temp==FALSE & tbl7[1]=="" & length(temp)==1) { assign("m2",blnetevn$m3[which(blnetevn$m3 %in% setdiff(blnetevn$m2,temp))],envir=blnetevn) km2 <- c(blnetevn$m2,rep("",length(blnetevn$m3)-length(blnetevn$m2))) for (i in 1:length(blnetevn$m3)) tbl0[i] <- km2[i] assign("dy7",blnetevn$m3[which(blnetevn$m3 %in% union(blnetevn$dy7,temp))],envir=blnetevn) tbl7[1] <- blnetevn$dy7 } }) addHandlerClicked(gbminus7, handler = function(h,...) { temp <- svalue(tbl7) if ("" %in% temp==FALSE & length(temp)==1) { assign("m2",blnetevn$m3[which(blnetevn$m3 %in% union(blnetevn$m2,temp))],envir=blnetevn) km2 <- c(blnetevn$m2,rep("",length(blnetevn$m3)-length(blnetevn$m2))) for (i in 1:length(blnetevn$m3)) tbl0[i] <- km2[i] assign("dy7",character(0),envir=blnetevn) tbl7[1] <- "" } }) visible(toplevel) <- TRUE gbutton("Continue", cont = cg2, width=20, handler = function(h, ...) { if (length(blnetevn$dy1)==0 | length(blnetevn$dy4)==0 | length(blnetevn$dy5)==0) {gmessage("Missing required information.", parent = toplevel)} else { if (length(blnetevn$dy7)>0) tmpweight <- blnetevn$dy7 else tmpweight <- NULL cc_calc <- function(x,y) { cc <- data.frame(matrix(rep(0,blnetevn$m1*blnetevn$n1*(length(y)+3)),nrow=blnetevn$m1*blnetevn$n1)) names(cc)[1:2] <- blnetevn$dy4 names(cc)[3:(length(y)+2)] <- paste("cc",y,"_niche",sep="") names(cc)[length(y)+3] <- "meancc" cc[,1] <- rep(1:blnetevn$m1,each=blnetevn$n1) cc[,2] <- rep(1:blnetevn$n1,blnetevn$m1) for (i in 1:length(y)) { k1 <- x[which(x[,i+3]==1),c(1:3,i+3)] k2 <- table(k1[,2],k1[,3]) for (f in 1:(nrow(k2))) { for (g in 1:(ncol(k2))) { o <- which(cc[,1]==as.numeric(rownames(k2)[f]) & cc[,2]==as.numeric(colnames(k2)[g])) cc[o,(i+2)] <- k2[f,g]/sum(k2) } } } cc[,(length(y)+3)] <- rowMeans(cc[,3:(length(y)+2)]) return(cc) } mr_calc <- function(x,y) { mr <- data.frame(matrix(rep(0,blnetevn$m1*blnetevn$n1*(length(y)+3)),nrow=blnetevn$m1*blnetevn$n1)) names(mr)[1:2] <- blnetevn$dy4 names(mr)[3:(length(y)+2)] <- paste("mr",y,sep="") names(mr)[length(y)+3] <- "meanmr" mr[,1] <- rep(1:blnetevn$m1,each=blnetevn$n1) mr[,2] <- rep(1:blnetevn$n1,blnetevn$m1) for (i in 1:length(y)) { k1 <- x[which(x[,i+length(y)+3]==1),c(1:3,i+length(y)+3)] k2 <- table(k1[,2],k1[,3]) for (f in 1:(nrow(k2))) { for (g in 1:(ncol(k2))) { o <- which(mr[,1]==as.numeric(rownames(k2)[f]) & mr[,2]==as.numeric(colnames(k2)[g])) mr[o,(i+2)] <- k2[f,g]/sum(k2) } } } mr[,(length(y)+3)] <- rowMeans(mr[,3:(length(y)+2)]) return(mr) } dispose(toplevel) assign("dy6",rep(1.5,length(blnetevn$dy4)),envir=blnetevn) extralevel <- gwindow("Dev.range", width=800, height=300) ge <- gpanedgroup(cont = extralevel, horizontal = FALSE) cge <- ggroup(cont = ge, horizontal = FALSE) for (da in 1:length(blnetevn$dy4)) { glabel(blnetevn$dy4[da],cont=cge) assign(paste0("dy6da", da),gslider(from = 0, to = 5, by = .05, value = 1.5, cont=cge, handler = function(h,...) svalue(get(paste0("dy6da", da))) )) } addSpring(cge) addSpring(cge) addSpring(cge) button <- gbutton("Continue", cont = cge, handler = function(h, ...) { dy6tmp <- c() for (da in 1:length(blnetevn$dy4)) { dy6tmp <- c(dy6tmp,svalue(get(paste0("dy6da", da)))) } assign("dy6",dy6tmp,envir=blnetevn) dispose(extralevel) if (length(blnetevn$dy4)==2) { single <- function(attr) { b <- blau(attr, node.ids=blnetevn$dy1, dimension=blnetevn$dy4, memberships=blnetevn$dy5,weights=tmpweight,complete.cases=blnetevn$dy8) b <- niches(b, dev.range = blnetevn$dy6) assign("bobj",b,envir=blnetevn) k <- data.frame(cbind(b$ids$nodeId,b$dimensions,b$isInNiche,b$memberships)) names(k)[1] <- "nodeId" k[,2] <- as.numeric(k[,2]) k[,3] <- as.numeric(k[,3]) ccw <- gwindow("Dimension Category Selection", width=600, height=700, parent = window) cg1 <- ggroup(cont = ccw, use.scrollwindow=T, horizontal = FALSE) dd1 <- data.frame(table(k[,2])) colnames(dd1)[1] <- blnetevn$dy4[1] #if (class(dd1[,1])!="numeric") dd1 <- dd1[order(grepl("^\\d+$", dd1[,1]), sprintf("%10s", dd1[,1])),] dd2 <- data.frame(table(k[,3])) colnames(dd2)[1] <- blnetevn$dy4[2] #if (class(dd2[,1])!="numeric") dd2 <- dd2[order(grepl("^\\d+$", dd2[,1]), sprintf("%10s", dd2[,1])),] glabel(paste("This is the frequency table for ",blnetevn$dy4[1],".",sep=""),cont=cg1) dim1 <- gdf(dd1, expand = TRUE, fill=TRUE, cont = cg1) glabel(paste("Please slide the bar below to set the categories for dimension ",blnetevn$dy4[1],"?",sep=""),cont=cg1) assign("m1",1,envir=blnetevn) assign("n1",1,envir=blnetevn) gslider(from = 1, to = max(k[,2],na.rm = TRUE)-min(k[,2],na.rm = TRUE)+1, by = 1, value = 1, cont=cg1, handler = function(h,...){ assign("m1",svalue(h$obj),envir=blnetevn) }) gseparator(cont = cg1) gdim2 <- glabel(paste("This is the frequency table for ",blnetevn$dy4[2],".",sep=""),cont=cg1) dim2 <- gdf(dd2, expand = TRUE, fill=TRUE, cont = cg1) glabel(paste("Please slide the bar below to set the categories for dimension ",blnetevn$dy4[2],"?",sep=""),cont=cg1) gslider(from = 1, to = max(k[,3],na.rm = TRUE)-min(k[,3],na.rm = TRUE)+1, by = 1, value = 1, cont=cg1, handler = function(h,...){ assign("n1",svalue(h$obj),envir=blnetevn) }) gseparator(cont = cg1) gbutton("Continue", cont = cg1, width=20, handler = function(h, ...){ dispose(ccw) x <- seq(min(k[,2],na.rm = TRUE),max(k[,2],na.rm = TRUE),by = (max(k[,2],na.rm = TRUE)-min(k[,2],na.rm = TRUE))/(blnetevn$m1-1)) y <- seq(min(k[,3],na.rm = TRUE),max(k[,3],na.rm = TRUE),by = (max(k[,3],na.rm = TRUE)-min(k[,3],na.rm = TRUE))/(blnetevn$n1-1)) k[,2] <- cut(k[,2],b=blnetevn$m1,label=c(1:blnetevn$m1)) k[,3] <- cut(k[,3],b=blnetevn$n1,label=c(1:blnetevn$n1)) cc <- cc_calc(k,blnetevn$dy5) mr <- mr_calc(k,blnetevn$dy5) ie <- cbind(cc[,1],cc[,2],mr[,(length(blnetevn$dy5)+3)]-cc[,(length(blnetevn$dy5)+3)]) ie_poly <- lm(ie[,3]~ie[,1]+I(ie[,1]^2)+I(ie[,1]^3)+ie[,2]+I(ie[,2]^2)+I(ie[,2]^3)+ie[,1]*ie[,2]) ie <- cbind(ie,predict(ie_poly)) final <- data.frame(cbind(cc,mr[,3:ncol(mr)],ie[,3:4])) colnames(final)[ncol(final)-1] <- "ie" colnames(final)[ncol(final)] <- "ie_poly" trdlevel <- gwindow("Plot...", width=580, height=30) g2 <- gpanedgroup(cont = trdlevel, horizontal = TRUE) cg2 <- ggroup(cont = g2, horizontal = TRUE) button1 <- gbutton("Plot Carrying Capacity", cont = cg2, width=20, handler = function(h, ...) { x1 <- x2 <- y1 <- y2 <- z1 <- z2 <- rep(0,length(blnetevn$dy5)) for (i in 1:length(blnetevn$dy5)) { x1[i] <- x2[i] <- mean(attr[which(attr[,blnetevn$dy5[i]]==1),blnetevn$dy4[1]],na.rm = TRUE) y1[i] <- y2[i] <- mean(attr[which(attr[,blnetevn$dy5[i]]==1),blnetevn$dy4[2]],na.rm = TRUE) z1[i] <- min(cc[,(3+length(blnetevn$dy5))]) z2[i] <- max(cc[,(3+length(blnetevn$dy5))]) } z <- matrix(cc[,(3+length(blnetevn$dy5))],nrow=length(x)) cc3Drgl <- function() { nbcol = blnetevn$m1*blnetevn$n1 color = rev(rainbow(nbcol, start = 0/6, end = 4/6)) zcol = cut(cc[,(3+length(blnetevn$dy5))], nbcol) persp3d(x,y,z, xlab = blnetevn$dy4[1], ylab = blnetevn$dy4[2], zlab = "Carrying capacity", col=color[zcol]) par3d(windowRect = c(900, 50, 1500, 650)) scatter3Drgl(x2,y2,z2,dev = rgl.cur(), col = "red") rgl.close() scatter3Drgl(x2,y2,z2,add = TRUE, col = "red") scatter3Drgl(x1,y1,z1,add = TRUE, col = "red") segments3Drgl(x1,y1,z1,x2,y2,z2,add = TRUE,col="red") text3Drgl(x2, y2, z2, labels=blnetevn$dy5,add = TRUE, col = "red", cex=0.9) } cc3Drgl() }) addSpace(cg2, 5) button2 <- gbutton("Plot Membership Rate", cont = cg2, width=20, handler = function(h, ...) { x1 <- x2 <- y1 <- y2 <- z1 <- z2 <- rep(0,length(blnetevn$dy5)) for (i in 1:length(blnetevn$dy5)) { x1[i] <- x2[i] <- mean(attr[which(attr[,blnetevn$dy5[i]]==1),blnetevn$dy4[1]],na.rm = TRUE) y1[i] <- y2[i] <- mean(attr[which(attr[,blnetevn$dy5[i]]==1),blnetevn$dy4[2]],na.rm = TRUE) z1[i] <- min(mr[,(3+length(blnetevn$dy5))]) z2[i] <- max(mr[,(3+length(blnetevn$dy5))]) } z <- matrix(mr[,(3+length(blnetevn$dy5))],nrow=length(x)) mr3Drgl <- function() { nbcol = blnetevn$m1*blnetevn$n1 color = rev(rainbow(nbcol, start = 0/6, end = 4/6)) zcol = cut(mr[,(3+length(blnetevn$dy5))], nbcol) persp3d(x,y,z, col=color[zcol], xlab = blnetevn$dy4[1], ylab = blnetevn$dy4[2], zlab = "Membership rate") par3d(windowRect = c(900, 50, 1500, 650)) scatter3Drgl(x2,y2,z2,dev = rgl.cur(), col = "red") rgl.close() scatter3Drgl(x2,y2,z2,add = TRUE, col = "red") scatter3Drgl(x1,y1,z1,add = TRUE, col = "red") segments3Drgl(x1,y1,z1,x2,y2,z2,add = TRUE,col="red") text3Drgl(x2, y2, z2, labels=blnetevn$dy5,add = TRUE, col = "red", cex=0.9) } mr3Drgl() }) addSpace(cg2, 5) button3 <- gbutton("Plot Intensity of Exploitation", cont = cg2, width=20, handler = function(h, ...) { x1 <- x2 <- y1 <- y2 <- z1 <- z2 <- rep(0,length(blnetevn$dy5)) for (i in 1:length(blnetevn$dy5)) { x1[i] <- x2[i] <- mean(attr[which(attr[,blnetevn$dy5[i]]==1),blnetevn$dy4[1]],na.rm = TRUE) y1[i] <- y2[i] <- mean(attr[which(attr[,blnetevn$dy5[i]]==1),blnetevn$dy4[2]],na.rm = TRUE) z1[i] <- min(ie[,4]) z2[i] <- max(ie[,4]) } z <- matrix(ie[,4],nrow=length(x)) ie3Drgl <- function() { nbcol = blnetevn$m1*blnetevn$n1 color = rev(rainbow(nbcol, start = 0/6, end = 4/6)) zcol = cut(ie[,3], nbcol) persp3d(x,y,z, col=color[zcol], xlab = blnetevn$dy4[1], ylab = blnetevn$dy4[2], zlab = "Intensity of exploitation") par3d(windowRect = c(900, 50, 1500, 650)) scatter3Drgl(x2,y2,z2,dev = rgl.cur(), col = "red") rgl.close() scatter3Drgl(x2,y2,z2,add = TRUE, col = "red") scatter3Drgl(x1,y1,z1,add = TRUE, col = "red") segments3Drgl(x1,y1,z1,x2,y2,z2,add = TRUE,col="red") text3Drgl(x2, y2, z2, labels=blnetevn$dy5,add = TRUE, col = "red", cex=0.9) } ie3Drgl() ithlevel <- gwindow("Intensity of Exploitation Equation", width = 1100, height = 100) ig <- ggroup(cont = ithlevel, horizontal = F, expand=T) polyf <- data.frame(matrix(ie_poly$coefficients,nrow=1)) names(polyf) <- c("Intercept",blnetevn$dy4[1],paste(blnetevn$dy4[1],"^2",sep=""),paste(blnetevn$dy4[1],"^3",sep=""), blnetevn$dy4[2],paste(blnetevn$dy4[2],"^2",sep=""),paste(blnetevn$dy4[2],"^3",sep=""),paste(blnetevn$dy4[1],"*",blnetevn$dy4[2],sep="")) gdf(polyf,expand=TRUE,fill=TRUE,cont=ig) }) addSpace(cg2, 5) button4 <- gbutton("Show Table", cont = cg2, width=20, handler = function(h, ...) { fourthlevel <- gwindow("Table for Carrying Capacity, Membership Rate, & Intensity of Exploitation",width = 800, height = 600) ng4 <- ggroup(horizontal = FALSE, cont = fourthlevel) button1 <- gbutton("Save as csv file: cc&mr&ie.csv", expand = FALSE, cont = ng4, handler = function(h, ...) { write.table(final, "cc&mr&ie.csv", row.names=F, col.names=T, sep=",") }) button2 <- gbutton("Save as R file: cc&mr&ie.Rdata", expand = FALSE, cont = ng4, handler = function(h, ...) { save(final, file="cc&mr&ie.Rdata") }) button3 <- gbutton("Save as SAS file: cc&mr&ie.txt & cc&mr&ie.sas", expand = FALSE, cont = ng4, handler = function(h, ...) { write.foreign(final, "cc&mr&ie.txt", "cc&mr&ie.sas", package="SAS") }) button4 <- gbutton("Save as Stata file: cc&mr&ie.dta", expand = FALSE, cont = ng4, handler = function(h, ...) { write.dta(final, ("cc&mr&ie.dta")) }) button5 <- gbutton("Save as SPSS file: cc&mr&ie.txt & cc&mr&ie.sps", expand = FALSE, cont = ng4, handler = function(h, ...) { write.foreign(final, "cc&mr&ie.txt", "cc&mr&ie.sps", package="SPSS") }) gseparator(cont = ng4) vars <- gdf(final, expand = TRUE, fill=TRUE, cont = ng4) }) }) } if (length(blnetevn$dy2)==0) { single(blnetevn$cov) } if (length(blnetevn$dy2)>0) { if (nrow(unique(blnetevn$cov[which(colnames(blnetevn$cov)==blnetevn$dy2)]))==1) { single(blnetevn$cov) } else if (nrow(unique(blnetevn$cov[which(colnames(blnetevn$cov)==blnetevn$dy2)]))>1) { dylevel <- gwindow("Niche Dynamics Options", width=600, height=600, parent = window) dyg <- ggroup(cont = dylevel, use.scrollwindow=T, horizontal = FALSE) tbl <- glayout(cont = dyg) glabel("Please select which ecology you want to test niche dynamics:", cont=dyg) gradio(c("all",as.matrix(unique(blnetevn$cov[which(colnames(blnetevn$cov)==blnetevn$dy2)]))), check=1, cont=dyg, handler = function(h,...){ assign("dy9",svalue(h$obj),envir=blnetevn) }) buttondy <- gbutton("Continue", expand = FALSE, cont = dyg, handler = function(h, ...) { dispose(dylevel) if (blnetevn$dy9!="all") { cov1 <- blnetevn$cov[which(blnetevn$cov[,which(names(blnetevn$cov)==blnetevn$dy2)]==blnetevn$dy9),] single(cov1) } else { z1 <- which(names(blnetevn$cov)==blnetevn$dy1) z2 <- which(names(blnetevn$cov)==blnetevn$dy2) z3 <- which(names(blnetevn$cov)==blnetevn$dy4[1]) z4 <- which(names(blnetevn$cov)==blnetevn$dy4[2]) t <- table(blnetevn$cov[z2]) k <- data.frame(cbind(blnetevn$cov[z1],blnetevn$cov[z3],blnetevn$cov[z4])) ccw <- gwindow("Dimension Category Selection", width=600, height=700, parent = window) cg1 <- ggroup(cont = ccw, use.scrollwindow=T, horizontal = FALSE) dd1 <- data.frame(t(table(k[,2])))[,2:3] colnames(dd1)[1] <- blnetevn$dy4[1] dd2 <- data.frame(t(table(k[,3])))[,2:3] colnames(dd2)[1] <- blnetevn$dy4[2] glabel(paste("This is the frequency table for ",blnetevn$dy4[1],".",sep=""),cont=cg1) dim1 <- gdf(dd1, expand = TRUE, fill=TRUE, cont = cg1) glabel(paste("Please slide the bar below to set the categories for dimension ",blnetevn$dy4[1],"?",sep=""),cont=cg1) assign("m1",1,envir=blnetevn) assign("n1",1,envir=blnetevn) gslider(from = 1, to = max(k[,2],na.rm = TRUE)-min(k[,2],na.rm = TRUE)+1, by = 1, value = 1, cont=cg1, handler = function(h,...){ assign("m1",svalue(h$obj),envir=blnetevn) }) gseparator(cont = cg1) gdim2 <- glabel(paste("This is the frequency table for ",blnetevn$dy4[2],".",sep=""),cont=cg1) dim2 <- gdf(dd2, expand = TRUE, fill=TRUE, cont = cg1) glabel(paste("Please slide the bar below to set the categories for dimension ",blnetevn$dy4[2],"?",sep=""),cont=cg1) gslider(from = 1, to = max(k[,3],na.rm = TRUE)-min(k[,3],na.rm = TRUE)+1, by = 1, value = 1, cont=cg1, handler = function(h,...){ assign("n1",svalue(h$obj),envir=blnetevn) }) gseparator(cont = cg1) gbutton("Continue", cont = cg1, width=20, handler = function(h, ...){ dispose(ccw) nichem1 <- nichem2 <- matrix(rep(0,(length(t)-1)*length(blnetevn$dy5)),nrow=length(t)-1) netop1 <- netop2 <- rightc1 <- rightc2 <- leftc1 <- leftc2 <- matrix(rep(0,length(t)*length(blnetevn$dy5)),nrow=length(t)) for (o in 1:(length(t)-1)) { a1 <- blnetevn$cov[which(blnetevn$cov[,which(names(blnetevn$cov)==blnetevn$dy2)]==names(t)[o]),] a1[,z3] <- as.numeric(cut(a1[,z3],b=blnetevn$m1,label=c(1:blnetevn$m1))) a1[,z4] <- as.numeric(cut(a1[,z4],b=blnetevn$n1,label=c(1:blnetevn$n1))) a2 <- blnetevn$cov[which(blnetevn$cov[,which(names(blnetevn$cov)==blnetevn$dy2)]==names(t)[o+1]),] a2[,z3] <- as.numeric(cut(a2[,z3],b=blnetevn$m1,label=c(1:blnetevn$m1))) a2[,z4] <- as.numeric(cut(a2[,z4],b=blnetevn$n1,label=c(1:blnetevn$n1))) for(p in 1:length(blnetevn$dy5)) { nichem1[o,p] <- mean(a2[which(a2[,p+3]==1),z3],na.rm=T)-mean(a1[which(a1[,p+3]==1),z3],na.rm=T) nichem2[o,p] <- mean(a2[which(a2[,p+3]==1),z4],na.rm=T)-mean(a1[which(a1[,p+3]==1),z4],na.rm=T) } } for (o in 1:length(t)) { a <- blnetevn$cov[which(blnetevn$cov[,which(names(blnetevn$cov)==blnetevn$dy2)]==names(t)[o]),] bo <- blau(a, node.ids=blnetevn$dy1, dimension=blnetevn$dy4, memberships=blnetevn$dy5,weights=tmpweight,complete.cases=blnetevn$dy8) bo <- niches(bo, dev.range = blnetevn$dy6) ko <- data.frame(cbind(bo$ids$nodeId,bo$dimensions,bo$isInNiche,bo$memberships)) cco <- cc_calc(ko,blnetevn$dy5) mro <- mr_calc(ko,blnetevn$dy5) ieo <- data.frame(cbind(cco[,1],cco[,2],mro[,(length(blnetevn$dy5)+3)]-cco[,(length(blnetevn$dy5)+3)])) names(ieo) <- c("x1","x2","y") ie_polyo <- lm(y~x1+I(x1^2)+I(x1^3)+x2+I(x2^2)+I(x2^3)+x1*x2,data=ieo) cuto1 <- seq(from = blnetevn$dy6[1]/10, to = blnetevn$dy6[1], by = blnetevn$dy6[1]/10) cuto2 <- seq(from = blnetevn$dy6[2]/10, to = blnetevn$dy6[2], by = blnetevn$dy6[2]/10) for(p in 1:length(blnetevn$dy5)) { mean1 <- mean(a[which(a[,which(names(a)==blnetevn$dy5[p])]==1),z3],na.rm=T) sd1 <- sd(a[which(a[,which(names(a)==blnetevn$dy5[p])]==1),z3],na.rm=T) mean2 <- mean(a[which(a[,which(names(a)==blnetevn$dy5[p])]==1),z4],na.rm=T) sd2 <- sd(a[which(a[,which(names(a)==blnetevn$dy5[p])]==1),z4],na.rm=T) r1 <- mean1+cuto1*sd1 iep <- data.frame(r1,rep(mean2,length(r1))) names(iep) <- c("x1","x2") rightc1[o,p] <- sum(predict(ie_polyo,iep)) l1 <- mean1-cuto1*sd1 iep <- data.frame(l1,rep(mean2,length(l1))) names(iep) <- c("x1","x2") leftc1[o,p] <- sum(predict(ie_polyo,iep)) netop1[o,p] <- leftc1[o,p]-rightc1[o,p] r2 <- mean2+cuto2*sd2 iep <- data.frame(rep(mean1,length(r2)),r2) names(iep) <- c("x1","x2") rightc2[o,p] <- sum(predict(ie_polyo,iep)) l2 <- mean2-cuto2*sd2 iep <- data.frame(rep(mean1,length(l2)),l2) names(iep) <- c("x1","x2") leftc2[o,p] <- sum(predict(ie_polyo,iep)) netop2[o,p] <- leftc2[o,p]-rightc2[o,p] } } niche_movement <- data.frame(cbind(c(nichem1),c(netop1[1:(length(t)-1),]),c(nichem2),c(netop2[1:(length(t)-1),]))) names(niche_movement)[1] <- "Niche_movement_dim1" names(niche_movement)[2] <- "Niche_opportunity_dim1" names(niche_movement)[3] <- "Niche_movement_dim2" names(niche_movement)[4] <- "Niche_opportunity_dim2" assign("niche_movement",niche_movement,envir=blnetevn) eq3 <- lm(Niche_movement_dim1~Niche_opportunity_dim1,data=niche_movement) eq4 <- lm(Niche_movement_dim2~Niche_opportunity_dim2,data=niche_movement) outeq3 <- paste(capture.output(summary(eq3)), collapse="\n") outeq4 <- paste(capture.output(summary(eq4)), collapse="\n") eqw <- gwindow("Predicted Niche Movement", width=800, height=750, parent = window) eqg1 <- ggroup(cont = eqw, use.scrollwindow=T, horizontal = FALSE) button1 <- gbutton("Save as csv file: niche_movement.csv", expand = FALSE, cont = eqg1, handler = function(h, ...) { write.table(niche_movement, "niche_movement.csv", row.names=F, col.names=T, sep=",") }) button2 <- gbutton("Save as R file: niche_movement.Rdata", expand = FALSE, cont = eqg1, handler = function(h, ...) { save(niche_movement, file="niche_movements.Rdata") }) button3 <- gbutton("Save as SAS file: niche_movement.txt & niche_movement.sas", expand = FALSE, cont = eqg1, handler = function(h, ...) { write.foreign(niche_movement, "niche_movements.txt", "niche_movements.sas", package="SAS") }) button4 <- gbutton("Save as Stata file: niche_movement.dta", expand = FALSE, cont = eqg1, handler = function(h, ...) { write.dta(niche_movement, ("niche_movements.dta")) }) button5 <- gbutton("Save as SPSS file: niche_movement.txt & niche_movement.sps", expand = FALSE, cont = eqg1, handler = function(h, ...) { write.foreign(niche_movement, "niche_movement.txt", "niche_movements.sps", package="SPSS") }) gseparator(cont = eqg1) glabel(paste("dim1 = ",blnetevn$dy4[1],sep=""),cont=eqg1) gtext(outeq3, cont=eqg1, expand=TRUE, font.attr=list(family="monospace"), height=200) glabel(paste("dim2 = ",blnetevn$dy4[2],sep=""),cont=eqg1) gtext(outeq4, cont=eqg1, expand=TRUE, font.attr=list(family="monospace"), height=200) }) } }) } } } else { gmessage("You must select 2 dimensions.") } }) } }) } }
/scratch/gouwar.j/cran-all/cranData/Blaunet/inst/scripts/dynamics.R
showgraph <- function(h,...) { if ("adj" %in% ls(envir=blnetevn)) { n <- network(as.matrix(blnetevn$adj)) assign("g1",0,envir=blnetevn) assign("g2",0,envir=blnetevn) assign("g3",0,envir=blnetevn) assign("g4",0,envir=blnetevn) assign("g5","fruchtermanreingold",envir=blnetevn) toplevel <- gwindow("Plot Graph", width = 400, height = 220, parent = window) cg <- ggroup(cont = toplevel, use.scrollwindow=T, horizontal = FALSE) tbl <- glayout(cont = cg) i <- 1 tbl[i,1] <- gcheckbox("Show vertex name", cont=tbl, label="Show vertex name", handler = function(h,...) { assign("g1",svalue(h$obj),envir=blnetevn) }) i <- i + 1 tbl[i,1] <- "Vertex color by" tbl[i,2] <- gcombobox(c("None",names(blnetevn$cov)), selected = 1, cont = tbl, handler = function(h,...){ if (svalue(h$obj) %in% names(blnetevn$cov)) { assign("g2",which(names(blnetevn$cov) %in% svalue(h$obj)),envir=blnetevn) } else {assign("g2",0,envir=blnetevn)} }) i <- i + 1 tbl[i,1] <- "Vertex side by" tbl[i,2] <- gcombobox(c("None",names(blnetevn$cov)), selected = 1, cont = tbl, handler = function(h,...){ if (svalue(h$obj) %in% names(blnetevn$cov)) { assign("g3",which(names(blnetevn$cov) %in% svalue(h$obj)),envir=blnetevn) } else {assign("g3",0,envir=blnetevn)} }) i <- i + 1 tbl[i,1] <- "Vertex size by" tbl[i,2] <- gcombobox(c("None",names(blnetevn$cov)), selected = 1, cont = tbl, handler = function(h,...){ if (svalue(h$obj) %in% names(blnetevn$cov)) { assign("g4",which(names(blnetevn$cov) %in% svalue(h$obj)),envir=blnetevn) } else {assign("g4",0,envir=blnetevn)} }) i <- i + 1 tbl[i,1] <- "Layout" tbl[i,2] <- gcombobox(c("fruchtermanreingold","kamadakawai","spring","circle","eigen","hall","mds","princoord","target","random"), cont = tbl, handler = function(h,...)assign("g5",svalue(h$obj),envir=blnetevn) ) button <- gbutton("Plot Graph", cont = cg, handler = function(h, ...) { add_legend <- function(...) { par(fig=c(0, 1, 0, 1), oma=c(0, 0, 0, 0),mar=c(0, 0, 0, 0), new=TRUE) plot(0, 0, type='n', bty='n', xaxt='n', yaxt='n') legend(...) } if (blnetevn$g2>0) { temp1 <- sort(unique(blnetevn$cov[,blnetevn$g2])) temp2 <- as.character(temp1) temp3 <- rep("",nrow(blnetevn$cov)) for (i in 1:nrow(blnetevn$cov)) temp3[i] <- rainbow(length(temp1))[which(temp1 %in% blnetevn$cov[i,blnetevn$g2])] } if (blnetevn$g3>0) { temp4 <- sort(unique(blnetevn$cov[,blnetevn$g3])) temp5 <- as.character(temp4) temp6 <- rep(0,nrow(blnetevn$cov)) for (i in 1:nrow(blnetevn$cov)) temp6[i] <- which(temp4 %in% blnetevn$cov[i,blnetevn$g3])+2 temp7 <- c("triangle","diamond","pentagon","hexagon","heptagon","octagon","nonagon","decagon","hendecagon","dodecagon")[1:length(temp5)] temp8 <- rep("",length(temp5)) for (i in 1:length(temp5)) temp8[i] <- paste(temp7[i],":",temp5[i],sep="") } oldpar <- par(no.readonly = TRUE) on.exit(par(oldpar)) par(mar = c(5, 4, 1.4, 0.2)) if (blnetevn$g1==FALSE & blnetevn$g2==0 & blnetevn$g3==0 & blnetevn$g4==0) gplot(n,mode=blnetevn$g5) if (blnetevn$g1==TRUE & blnetevn$g2==0 & blnetevn$g3==0 & blnetevn$g4==0) gplot(n, label=network.vertex.names(n),mode=blnetevn$g5) if (blnetevn$g1==TRUE & blnetevn$g2==0 & blnetevn$g3>0 & blnetevn$g4==0) gplot(n, label=network.vertex.names(n), vertex.sides=temp6,vertex.cex=1.5,mode=blnetevn$g5) if (blnetevn$g1==FALSE & blnetevn$g2>0 & blnetevn$g3==0 & blnetevn$g4==0) gplot(n, vertex.col=temp3,mode=blnetevn$g5) if (blnetevn$g1==FALSE & blnetevn$g2>0 & blnetevn$g3>0 & blnetevn$g4==0) gplot(n, vertex.col=temp3,vertex.sides=temp6,vertex.cex=1.5,mode=blnetevn$g5) if (blnetevn$g1==FALSE & blnetevn$g2==0 & blnetevn$g3==0 & blnetevn$g4>0) gplot(n, vertex.cex=blnetevn$cov[,blnetevn$g4]/mean(blnetevn$cov[,blnetevn$g4]),mode=blnetevn$g5) if (blnetevn$g1==FALSE & blnetevn$g2==0 & blnetevn$g3>0 & blnetevn$g4>0) gplot(n, vertex.cex=blnetevn$cov[,g4]/mean(blnetevn$cov[,g4]),vertex.sides=temp6,mode=blnetevn$g5) if (blnetevn$g1==FALSE & blnetevn$g2>0 & blnetevn$g3==0 & blnetevn$g4>0) gplot(n, vertex.col=temp3, vertex.cex=blnetevn$cov[,g4]/mean(blnetevn$cov[,g4]),mode=blnetevn$g5) if (blnetevn$g1==FALSE & blnetevn$g2>0 & blnetevn$g3>0 & blnetevn$g4>0) gplot(n, vertex.col=temp3, vertex.cex=blnetevn$cov[,g4]/mean(blnetevn$cov[,g4]),vertex.sides=temp6,mode=blnetevn$g5) if (blnetevn$g1==TRUE & blnetevn$g2>0 & blnetevn$g3==0 & blnetevn$g4==0) gplot(n, label=network.vertex.names(n), vertex.col=temp3,mode=blnetevn$g5) if (blnetevn$g1==TRUE & blnetevn$g2>0 & blnetevn$g3>0 & blnetevn$g4==0) gplot(n, label=network.vertex.names(n), vertex.col=temp3,vertex.sides=temp6,vertex.cex=1.5,mode=blnetevn$g5) if (blnetevn$g1==TRUE & blnetevn$g2==0 & blnetevn$g3==0 & blnetevn$g4>0) gplot(n, label=network.vertex.names(n), vertex.cex=blnetevn$cov[,blnetevn$g4]/mean(blnetevn$cov[,blnetevn$g4]),mode=blnetevn$g5) if (blnetevn$g1==TRUE & blnetevn$g2==0 & blnetevn$g3>0 & blnetevn$g4>0) gplot(n, label=network.vertex.names(n), vertex.cex=blnetevn$cov[,blnetevn$g4]/mean(blnetevn$cov[,blnetevn$g4]),vertex.sides=temp6,mode=blnetevn$g5) if (blnetevn$g1==TRUE & blnetevn$g2>0 & blnetevn$g3==0 & blnetevn$g4>0) gplot(n, label=network.vertex.names(n), vertex.col=temp3, vertex.cex=blnetevn$cov[,blnetevn$g4]/mean(blnetevn$cov[,blnetevn$g4]),mode=blnetevn$g5) if (blnetevn$g1==TRUE & blnetevn$g2>0 & blnetevn$g3>0 & blnetevn$g4>0) gplot(n, label=network.vertex.names(n), vertex.col=temp3, vertex.cex=blnetevn$cov[,blnetevn$g4]/mean(blnetevn$cov[,blnetevn$g4]),vertex.sides=temp6,mode=blnetevn$g5) if (blnetevn$g2>0) { add_legend("topleft",temp2,text.col=rainbow(length(temp1)),bty='n',title = names(blnetevn$cov)[blnetevn$g2]) } if (blnetevn$g3>0) { add_legend("bottomleft",temp8,bty='n',title = names(blnetevn$cov)[blnetevn$g3]) } par(mar=c(5, 4, 4, 2) + 0.1) }) } else gmessage("Sorry! Network file is not loaded.", parent = window) } showhoutdegree <- function(h,...) { if ("adj" %in% ls(envir=blnetevn)) { outdegree <- degree(network(as.matrix(blnetevn$adj)),cmode="outdegree") hist(outdegree) } else gmessage("Sorry! Network file is not loaded.", parent = window) } showhindegree <- function(h,...) { if ("adj" %in% ls(envir=blnetevn)) { indegree <- degree(network(as.matrix(blnetevn$adj)),cmode="indegree") hist(indegree) } else gmessage("Sorry! Network file is not loaded.", parent = window) }
/scratch/gouwar.j/cran-all/cranData/Blaunet/inst/scripts/graph.R
showinfo <- function(h,...) { if ("adj" %in% ls(envir=blnetevn)) { out <- capture.output(summary(network(as.matrix(blnetevn$adj))))[1:19] window <- gwindow("Network Information", visible = FALSE) exp_group <- gexpandgroup("Summary", cont = window) label <- glabel(out, cont = exp_group) visible(exp_group) <- TRUE visible(window) <- TRUE} else gmessage("Sorry! Network file is not loaded.", parent = window) } showdensity <- function(h,...) { if ("adj" %in% ls(envir=blnetevn)) { gmessage(paste("The network density is ",network.density(network(as.matrix(blnetevn$adj))),".",sep=""), parent = window) } else gmessage("Sorry! Network file is not loaded.", parent = window) } showcentrality <- function(h,...) { if ("adj" %in% ls(envir=blnetevn)) { n <- network(as.matrix(blnetevn$adj)) outd <- degree(n,cmode="outdegree") ind <- degree(n,cmode="indegree") bet <- betweenness(n) clo <- closeness(n) eig <- round(evcent(n),4) output <- data.frame(cbind(network.vertex.names(n),outd,ind,bet,clo,eig)) names(output) <- c("nodeId","outdegree","indegree","betweenness","closeness","eigenvector") nw <- gwindow("Centrality", width = 600, height = 400) group <- ggroup(horizontal = FALSE, cont = nw) button1 <- gbutton("Save as csv file: centrality.csv", expand = FALSE, cont = group, handler = function(h, ...) { write.table(output, "centrality.csv", row.names=F, col.names=T, sep=",") }) button2 <- gbutton("Save as R file: centrality.Rdata", expand = FALSE, cont = group, handler = function(h, ...) { save(output, file="centrality.Rdata") }) button3 <- gbutton("Save as SAS file: centrality.txt & centrality.sas", expand = FALSE, cont = group, handler = function(h, ...) { write.foreign(output, "centrality", "centrality.sas", package="SAS") }) button4 <- gbutton("Save as Stata file: centrality.dta", expand = FALSE, cont = group, handler = function(h, ...) { write.dta(output, ("centrality.dta")) }) button5 <- gbutton("Save as SPSS file: centrality.txt & centrality.sps", expand = FALSE, cont = group, handler = function(h, ...) { write.foreign(output, "centrality.txt", "centrality.sps", package="SPSS") }) gseparator(cont = group) vars <- gdf(output, expand = TRUE, fill=TRUE, cont = group) } else gmessage("Sorry! Network file is not loaded.", parent = window) } showdcensus <- function(h,...) { if ("adj" %in% ls(envir=blnetevn)) { out <- data.frame(dyad.census(network(as.matrix(blnetevn$adj)))) names(out) <- c("Mutual","Asymmetric","Null") window <- gwindow("Dyad Census", width = 300, height = 100) vars <- gdf(out, expand = TRUE, fill=TRUE, cont = window) } else gmessage("Sorry! Network file is not loaded.", parent = window) } showreciprocityindex <- function(h,...) { if ("adj" %in% ls(envir=blnetevn)) { n <- (network(as.matrix(blnetevn$adj))) ri <- (dyad.census(n)[1]*2)/(dyad.census(n)[1]*2+dyad.census(n)[2]) gmessage(paste("The reciprocity index is ",ri,".",sep=""), parent = window) } else gmessage("Sorry! Network file is not loaded.", parent = window) } showtcensus <- function(h,...) { if ("adj" %in% ls(envir=blnetevn)) { out <- data.frame(triad.census(network(as.matrix(blnetevn$adj)))) names(out) <- gsub("X","",names(out)) window <- gwindow("Triad Census", width = 800, height = 100) vars <- gdf(out, expand = TRUE, fill=TRUE, cont = window) } else gmessage("Sorry! Network file is not loaded.", parent = window) } showglobalcustering <- function(h,...) { if ("adj" %in% ls(envir=blnetevn)) { t<-triad.census(network(as.matrix(blnetevn$adj))) num <- 3*(t[9]+t[10]+t[12]+t[13]+t[14]+t[15]+t[16]) dem <- num+t[4]+t[5]+t[6]+t[7]+t[8]+t[11] gmessage(paste("The global clustering coefficient is ",num/dem,".",sep=""), parent = window) } else gmessage("Sorry! Network file is not loaded.", parent = window) } showlocalcustering <- function(h,...) { if ("adj" %in% ls(envir=blnetevn)) { m1 <- as.matrix(network(as.matrix(blnetevn$adj)),matrix.type='edgelist') m2 <- rbind(m1,cbind(m1[,2],m1[,1])) m2 <- m2[!duplicated(m2), ] m9 <- matrix(rep(0,2*attr(m1,"n")),ncol=2) for (i in 1:attr(m1,"n")) { m9[i,1] <- attr(m1,"vnames")[i] k <- m2[which(m2[,1]==i),2] if (length(k)>=2) { m3 <- t(combn(k,2)) m4 <- rbind(m2, m3) m9[i,2] <- nrow(m4[duplicated(m4), , drop = FALSE])/nrow(m3) } else { m9[i,2] <- NA } } m9 <- data.frame(m9) names(m9) <- c("nodeId","Local clustering coefficient") nw <- gwindow("Local Clustering Coefficient",width = 800, height = 600) group <- ggroup(horizontal = FALSE, cont = nw) button1 <- gbutton("Save as csv file: localclustering.csv", expand = FALSE, cont = group, handler = function(h, ...) { write.table(m9, "localclustering.csv", row.names=F, col.names=T, sep=",") }) button2 <- gbutton("Save as R file: localclustering.Rdata", expand = FALSE, cont = group, handler = function(h, ...) { save(m9, file="localclustering.Rdata") }) button3 <- gbutton("Save as SAS file: localclustering.txt & localclustering.sas", expand = FALSE, cont = group, handler = function(h, ...) { write.foreign(m9, "localclustering.txt", "localclustering.sas", package="SAS") }) button4 <- gbutton("Save as Stata file: localclustering.dta", expand = FALSE, cont = group, handler = function(h, ...) { write.dta(m9, ("localclustering.dta")) }) button5 <- gbutton("Save as SPSS file: localclustering.txt & localclustering.sps", expand = FALSE, cont = group, handler = function(h, ...) { write.foreign(m9, "localclustering.txt", "localclustering.sps", package="SPSS") }) gseparator(cont = group) vars <- gdf(m9, expand = TRUE, fill=TRUE, cont = group) } else gmessage("Sorry! Network file is not loaded.", parent = window) }
/scratch/gouwar.j/cran-all/cranData/Blaunet/inst/scripts/network.R
nicheplot <- function(h,...) { if ("cov" %in% ls(envir=blnetevn)==FALSE) {gmessage("Sorry! Attribute file is not loaded.", parent = window)} else { assign("d1",character(0),envir=blnetevn) assign("d2",character(0),envir=blnetevn) assign("d3","",envir=blnetevn) assign("d4",character(0),envir=blnetevn) assign("d5",character(0),envir=blnetevn) assign("d7",character(0),envir=blnetevn) assign("d8","FALSE",envir=blnetevn) assign("d9",character(0),envir=blnetevn) assign("m1",names(blnetevn$cov),envir=blnetevn) assign("m3",names(blnetevn$cov),envir=blnetevn) toplevel <- gwindow("Niche Plot", width=800, height=800, parent = window, visible=FALSE) cg <- ggroup(horizontal = TRUE,cont = toplevel) tbl0 <- gtable(blnetevn$m1,expand=TRUE,multiple=TRUE,cont=cg) cg1 <- ggroup(horizontal = FALSE, cont = cg) addSpring(cg1) gbplus1 <- gbutton("+", cont=cg1) gbminus1 <- gbutton("-", cont=cg1) addSpring(cg1) addSpring(cg1) gbplus2 <- gbutton("+", cont=cg1) gbminus2 <- gbutton("-", cont=cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) gbplus4 <- gbutton("+", cont=cg1) gbminus4 <- gbutton("-", cont=cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) gbplus5 <- gbutton("+", cont=cg1) gbminus5 <- gbutton("-", cont=cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) gbplus7 <- gbutton("+", cont=cg1) gbminus7 <- gbutton("-", cont=cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) addSpring(cg1) cg2 <- ggroup(horizontal = FALSE, cont = cg) d1temp <- data.frame(Node.ids="",stringsAsFactors=FALSE) d2temp <- data.frame(Ecology.ids="",stringsAsFactors=FALSE) d4temp <- data.frame(Dimensions=rep("",length(blnetevn$m3)),stringsAsFactors=FALSE) d5temp <- data.frame(Groups=rep("",length(blnetevn$m3)),stringsAsFactors=FALSE) d7temp <- data.frame(Weights="",stringsAsFactors=FALSE) tbl1 <- gtable(d1temp,expand=TRUE,multiple=FALSE,cont=cg2) size(tbl1)[2] <- 50 tbl2 <- gtable(d2temp,expand=TRUE,multiple=FALSE,cont=cg2) size(tbl2)[2] <- 50 if (('el' %in% ls(envir=blnetevn))==FALSE) { gcheckboxgroup("Show nodes",cont=cg2,handler = function(h,...) assign("d3",svalue(h$obj),envir=blnetevn)) } if ('el' %in% ls(envir=blnetevn)) { gradio(c("Do not show nodes and network ties","Show nodes","Show nodes and network ties"),selected=1,cont=cg2,handler = function(h,...) assign("d3",svalue(h$obj),envir=blnetevn)) } tbl4 <- gtable(d4temp,expand=TRUE,multiple=TRUE,cont=cg2) size(tbl4)[2] <- 120 tbl5 <- gtable(d5temp,expand=TRUE,multiple=TRUE,cont=cg2) size(tbl5)[2] <- 120 tbl7 <- gtable(d7temp,expand=TRUE,multiple=FALSE,cont=cg2) size(tbl7)[2] <- 50 glabel("Complete.cases",cont=cg2) gradio(c("TRUE","FALSE"), selected = 2, cont = cg2, handler = function(h,...) assign("d8",svalue(h$obj),envir=blnetevn)) addHandlerClicked(gbplus1, handler = function(h,...) { temp <- svalue(tbl0) if ("" %in% temp==FALSE & tbl1[1]=="" & length(temp)==1) { assign("m1",blnetevn$m3[which(blnetevn$m3 %in% setdiff(blnetevn$m1,temp))],envir=blnetevn) km1 <- c(blnetevn$m1,rep("",length(blnetevn$m3)-length(blnetevn$m1))) for (i in 1:length(blnetevn$m3)) tbl0[i] <- km1[i] assign("d1",blnetevn$m3[which(blnetevn$m3 %in% union(blnetevn$d1,temp))],envir=blnetevn) tbl1[1] <- blnetevn$d1 } }) addHandlerClicked(gbminus1, handler = function(h,...) { temp <- svalue(tbl1) if ("" %in% temp==FALSE & length(temp)==1) { assign("m1",blnetevn$m3[which(blnetevn$m3 %in% union(blnetevn$m1,temp))],envir=blnetevn) km1 <- c(blnetevn$m1,rep("",length(blnetevn$m3)-length(blnetevn$m1))) for (i in 1:length(blnetevn$m3)) tbl0[i] <- km1[i] assign("d1",character(0),envir=blnetevn) tbl1[1] <- "" } }) addHandlerClicked(gbplus2, handler = function(h,...) { temp <- svalue(tbl0) if ("" %in% temp==FALSE & tbl2[1]=="" & length(temp)==1) { assign("m1",blnetevn$m3[which(blnetevn$m3 %in% setdiff(blnetevn$m1,temp))],envir=blnetevn) km1 <- c(blnetevn$m1,rep("",length(blnetevn$m3)-length(blnetevn$m1))) for (i in 1:length(blnetevn$m3)) tbl0[i] <- km1[i] assign("d2",blnetevn$m3[which(blnetevn$m3 %in% union(blnetevn$d2,temp))],envir=blnetevn) tbl2[1] <- blnetevn$d2 } }) addHandlerClicked(gbminus2, handler = function(h,...) { temp <- svalue(tbl2) if ("" %in% temp==FALSE & length(temp)==1) { assign("m1",blnetevn$m3[which(blnetevn$m3 %in% union(blnetevn$m1,temp))],envir=blnetevn) km1 <- c(blnetevn$m1,rep("",length(blnetevn$m3)-length(blnetevn$m1))) for (i in 1:length(blnetevn$m3)) tbl0[i] <- km1[i] assign("d2",character(0),envir=blnetevn) tbl2[1] <- "" } }) addHandlerClicked(gbplus4, handler = function(h,...) { temp <- svalue(tbl0) if ("" %in% temp==FALSE & length(temp)>0) { assign("m1",blnetevn$m3[which(blnetevn$m3 %in% setdiff(blnetevn$m1,temp))],envir=blnetevn) km1 <- c(blnetevn$m1,rep("",length(blnetevn$m3)-length(blnetevn$m1))) for (i in 1:length(blnetevn$m3)) tbl0[i] <- km1[i] assign("d4",blnetevn$m3[which(blnetevn$m3 %in% union(blnetevn$d4,temp))],envir=blnetevn) kd4 <- c(blnetevn$d4,rep("",length(blnetevn$m3)-length(blnetevn$d4))) for (j in 1:length(blnetevn$m3)) tbl4[j] <- kd4[j] } }) addHandlerClicked(gbminus4, handler = function(h,...) { temp <- svalue(tbl4) if ("" %in% temp==FALSE & length(temp)>0) { assign("m1",blnetevn$m3[which(blnetevn$m3 %in% union(blnetevn$m1,temp))],envir=blnetevn) km1 <- c(blnetevn$m1,rep("",length(blnetevn$m3)-length(blnetevn$m1)+1)) for (i in 1:length(blnetevn$m3)) tbl0[i] <- km1[i] assign("d4",blnetevn$m3[which(blnetevn$m3 %in% setdiff(blnetevn$d4,temp))],envir=blnetevn) kd4 <- c(blnetevn$d4,rep("",length(blnetevn$m3)-length(blnetevn$d4)+1)) for (j in 1:length(blnetevn$m3)) tbl4[j] <- kd4[j] } }) addHandlerClicked(gbplus5, handler = function(h,...) { temp <- svalue(tbl0) if ("" %in% temp==FALSE & length(temp)>0) { assign("m1",blnetevn$m3[which(blnetevn$m3 %in% setdiff(blnetevn$m1,temp))],envir=blnetevn) km1 <- c(blnetevn$m1,rep("",length(blnetevn$m3)-length(blnetevn$m1))) for (i in 1:length(blnetevn$m3)) tbl0[i] <- km1[i] assign("d5",blnetevn$m3[which(blnetevn$m3 %in% union(blnetevn$d5,temp))],envir=blnetevn) kd5 <- c(blnetevn$d5,rep("",length(blnetevn$m3)-length(blnetevn$d5))) for (j in 1:length(blnetevn$m3)) tbl5[j] <- kd5[j] } }) addHandlerClicked(gbminus5, handler = function(h,...) { temp <- svalue(tbl5) if ("" %in% temp==FALSE & length(temp)>0) { assign("m1",blnetevn$m3[which(blnetevn$m3 %in% union(blnetevn$m1,temp))],envir=blnetevn) km1 <- c(blnetevn$m1,rep("",length(blnetevn$m3)-length(blnetevn$m1)+1)) for (i in 1:length(blnetevn$m3)) tbl0[i] <- km1[i] assign("d5",blnetevn$m3[which(blnetevn$m3 %in% setdiff(blnetevn$d5,temp))],envir=blnetevn) kd5 <- c(blnetevn$d5,rep("",length(blnetevn$m3)-length(blnetevn$d5)+1)) for (j in 1:length(blnetevn$m3)) tbl5[j] <- kd5[j] } }) addHandlerClicked(gbplus7, handler = function(h,...) { temp <- svalue(tbl0) if ("" %in% temp==FALSE & tbl7[1]=="" & length(temp)==1) { assign("m1",blnetevn$m3[which(blnetevn$m3 %in% setdiff(blnetevn$m1,temp))],envir=blnetevn) km1 <- c(blnetevn$m1,rep("",length(blnetevn$m3)-length(blnetevn$m1))) for (i in 1:length(blnetevn$m3)) tbl0[i] <- km1[i] assign("d7",blnetevn$m3[which(blnetevn$m3 %in% union(blnetevn$d7,temp))],envir=blnetevn) tbl7[1] <- blnetevn$d7 } }) addHandlerClicked(gbminus7, handler = function(h,...) { temp <- svalue(tbl7) if ("" %in% temp==FALSE & length(temp)==1) { assign("m1",blnetevn$m3[which(blnetevn$m3 %in% union(blnetevn$m1,temp))],envir=blnetevn) km1 <- c(blnetevn$m1,rep("",length(blnetevn$m3)-length(blnetevn$m1))) for (i in 1:length(blnetevn$m3)) tbl0[i] <- km1[i] assign("d7",character(0),envir=blnetevn) tbl7[1] <- "" } }) visible(toplevel) <- TRUE gbutton("Continue", expand = FALSE, cont = cg2, handler = function(h, ...) { if (length(blnetevn$d1)==0 | length(blnetevn$d4)==0 | length(blnetevn$d5)==0) {gmessage("Missing required information.", parent = toplevel)} else { dispose(toplevel) if (length(blnetevn$d7)>0) tmpweight <- blnetevn$d7 else tmpweight <- NULL bcolors <- rep(c(26,116,142,47,8,12,31,32,33,41,51,53,62,139,151,175,153,85,450,477),5) if (length(blnetevn$d2)>0) { extralevel <- gwindow("Niche Plot Options", width=600, height=600, parent = window) extrag <- ggroup(cont = extralevel, use.scrollwindow=T, horizontal = FALSE) extragv <- gvbox(cont = extrag) tbl <- gformlayout(cont = extragv) gcheckboxgroup(c("all",as.matrix(unique(blnetevn$cov[which(colnames(blnetevn$cov)==blnetevn$d2)]))), cont = tbl, label="Please select which ecology you want to make niche plot:", handler = function(h,...){ assign("d9",svalue(h$obj),envir=blnetevn) }) assign("d6",rep(1.5,length(blnetevn$d4)),envir=blnetevn) gseparator(cont = extrag) glabel("Dev.range",cont=extrag) for (da in 1:length(blnetevn$d4)) { glabel(blnetevn$d4[da],cont=extrag) assign(paste0("d6da", da),gslider(from = 0, to = 5, by = .05, value = 1.5, cont=extrag, handler = function(h,...) svalue(get(paste0("d6da", da))) )) } addSpring(extrag) addSpring(extrag) addSpring(extrag) button <- gbutton("Continue", expand = FALSE, cont = extrag, handler = function(h, ...) { d6tmp <- c() for (da in 1:length(blnetevn$d4)) { d6tmp <- c(d6tmp,svalue(get(paste0("d6da", da)))) } assign("d6",d6tmp,envir=blnetevn) dispose(extralevel) if ("all" %in% blnetevn$d9) { cov1 <- blnetevn$cov if ('el' %in% ls(envir=blnetevn)) el1 <- blnetevn$el } else { cov1 <- blnetevn$cov[which(blnetevn$cov[,which(colnames(blnetevn$cov)==blnetevn$d2)] %in% blnetevn$d9),] if ('el' %in% ls(envir=blnetevn)) { el1 <- blnetevn$el[which(blnetevn$el[,1] %in% unique(cov1[,which(colnames(cov1)==blnetevn$d1)]) & blnetevn$el[,2] %in% unique(cov1[,which(colnames(cov1)==blnetevn$d1)])),] } } if (('el' %in% ls(envir=blnetevn))==FALSE) { b <- blau(cov1, node.ids=blnetevn$d1, ecology.ids=blnetevn$d2, dimension=blnetevn$d4, memberships=blnetevn$d5,weights=tmpweight,complete.cases=d8) b <- niches(b, dev.range = blnetevn$d6) } if ('el' %in% ls(envir=blnetevn)) { b <- blau(cov1, node.ids=blnetevn$d1, ecology.ids=blnetevn$d2, graph = el1, dimension=blnetevn$d4, memberships=blnetevn$d5,weights=tmpweight,complete.cases=blnetevn$d8 ) b <- niches(b, dev.range = blnetevn$d6) } assign("bobj",b,envir=blnetevn) if (length(blnetevn$d4)==2) { oldpar <- par(no.readonly = TRUE) on.exit(par(oldpar)) lowb <- b$lowbounds topb <- b$topbounds k1 <- unique(b$ids$ecologyId) k2 <- c(which(names(cov1)==blnetevn$d4[1]),which(names(cov1)==blnetevn$d4[2])) niche2d1 <- function(xa,xb,ya,yb) { add_legend <- function(...) { par(fig=c(0, 1, 0, 1), oma=c(0, 0, 0, 0), mar=c(0, 0, 0, 0), new=TRUE) plot(0, 0, type='n', bty='n', xaxt='n', yaxt='n') legend(...) } plot(-10000,-10000,xlim=c(xa,xb),ylim=c(ya,yb), xlab=blnetevn$d4[1], ylab=blnetevn$d4[2]) if (length(grep('Show nodes',blnetevn$d3))==1) { plot(cov1[,k2[1]],cov1[,k2[2]],xlim=c(xa,xb),ylim=c(ya,yb), xlab=blnetevn$d4[1], ylab=blnetevn$d4[2], col="red") } bcol <- 1 for (i in 1:length(k1)) { for (j in 1:length(blnetevn$d5)) { k <- (i-1)*length(blnetevn$d5)+j rect(lowb[k,1], lowb[k,2], topb[k,1], topb[k,2], border=colors()[bcolors[bcol]]) bcol <- bcol+1 } } if (blnetevn$d3=="Show nodes and network ties") { x1 <- y1 <- x2 <- y2 <- rep(0,nrow(blnetevn$el)) for (i in 1:nrow(el1)) { s <- which(as.character(el1[i,1])==as.character(blnetevn$cov[,blnetevn$d1])) x1[i] <- blnetevn$cov[s,k2[1]] y1[i] <- blnetevn$cov[s,k2[2]] t <- which(as.character(el1[i,2])==as.character(blnetevn$cov[,blnetevn$d1])) x2[i] <- blnetevn$cov[t,k2[1]] y2[i] <- blnetevn$cov[t,k2[2]] } segments(x1,y1,x2,y2,col="red") } add_legend("topleft",paste(rep(k1,each=length(blnetevn$d5)),"_",rownames(lowb),sep=""),text.col=colors()[bcolors[1:nrow(lowb)]],bty='n') } xa <- min(lowb[,1],cov1[,k2[1]],na.rm = TRUE) xb <- max(topb[,1],cov1[,k2[1]],na.rm = TRUE) ya <- min(lowb[,2],cov1[,k2[2]],na.rm = TRUE) yb <- max(topb[,2],cov1[,k2[2]],na.rm = TRUE) niche2d1(xa,xb,ya,yb) } else if (length(blnetevn$d4) == 3) { lowb <- b$lowbounds topb <- b$topbounds k1 <- unique(b$ids$ecologyId) k2 <- c(which(names(blnetevn$cov)==blnetevn$d4[1]),which(names(blnetevn$cov)==blnetevn$d4[2]),which(names(blnetevn$cov)==blnetevn$d4[3])) niche3drgl1 <- function() { scatter3Drgl(-10000,-10000,-10000, xlim=c(min(lowb[,1],blnetevn$cov[,k2[1]],na.rm = TRUE),max(topb[,1],blnetevn$cov[,k2[1]],na.rm = TRUE)), ylim=c(min(lowb[,2],blnetevn$cov[,k2[2]],na.rm = TRUE),max(topb[,2],blnetevn$cov[,k2[2]],na.rm = TRUE)), zlim=c(min(lowb[,3],blnetevn$cov[,k2[3]],na.rm = TRUE),max(topb[,3],blnetevn$cov[,k2[3]],na.rm = TRUE)), xlab=blnetevn$d4[1],ylab=blnetevn$d4[2],zlab=blnetevn$d4[3], colkey = FALSE, col="red") par3d(windowRect = c(900, 50, 1500, 650)) if (length(grep('Show nodes',blnetevn$d3))==1) { rgl.close() scatter3Drgl(blnetevn$cov[,k2[1]],blnetevn$cov[,k2[2]],blnetevn$cov[,k2[3]], xlim=c(min(lowb[,1],blnetevn$cov[,k2[1]],na.rm = TRUE),max(topb[,1],blnetevn$cov[,k2[1]],na.rm = TRUE)), ylim=c(min(lowb[,2],blnetevn$cov[,k2[2]],na.rm = TRUE),max(topb[,2],blnetevn$cov[,k2[2]],na.rm = TRUE)), zlim=c(min(lowb[,3],blnetevn$cov[,k2[3]],na.rm = TRUE),max(topb[,3],blnetevn$cov[,k2[3]],na.rm = TRUE)), xlab=blnetevn$d4[1],ylab=blnetevn$d4[2],zlab=blnetevn$d4[3], colkey = FALSE, col="red") par3d(windowRect = c(900, 50, 1500, 650)) } axes3d() box3Drgl(lowb[,1], lowb[,2], lowb[,3], topb[,1], topb[,2], topb[,3], add = TRUE, col = colors()[bcolors[1:nrow(lowb)]], alpha = 0.5, border = "black", lwd = 2) if (blnetevn$d3=="Show nodes and network ties") { x1 <- y1 <- z1 <- x2 <- y2 <- z2 <- rep(0,nrow(el1)) for (i in 1:nrow(el1)) { s <- which(as.character(el1[i,1])==as.character(cov1[,blnetevn$d1])) x1[i] <- cov1[s,k2[1]] y1[i] <- cov1[s,k2[2]] z1[i] <- cov1[s,k2[3]] t <- which(as.character(el1[i,2])==as.character(cov1[,blnetevn$d1])) x2[i] <- cov1[t,k2[1]] y2[i] <- cov1[t,k2[2]] z2[i] <- cov1[t,k2[3]] } segments3Drgl(x1,y1,z1,x2,y2,z2,add = TRUE,col="red") } legend3d("topleft",paste(rep(k1,each=length(blnetevn$d5)),"_",rownames(lowb),sep=""),text.col=colors()[bcolors[1:nrow(lowb)]],bty='n') } niche3drgl1() } else { dispose(toplevel) gmessage("You must select 2 or 3 dimensions.") } }) } else { extralevel <- gwindow("Dev.range", width=600, height=600, parent = window) extrag <- ggroup(cont = extralevel, use.scrollwindow=T, horizontal = FALSE) assign("d6",rep(1.5,length(blnetevn$d4)),envir=blnetevn) glabel("Dev.range",cont=extrag) for (da in 1:length(blnetevn$d4)) { glabel(blnetevn$d4[da],cont=extrag) assign(paste0("d6da", da),gslider(from = 0, to = 5, by = .05, value = 1.5, cont=extrag, handler = function(h,...) svalue(get(paste0("d6da", da))) )) } addSpring(extrag) addSpring(extrag) addSpring(extrag) button <- gbutton("Continue", expand = FALSE, cont = extrag, handler = function(h, ...) { dispose(extralevel) if (('el' %in% ls(envir=blnetevn))==FALSE) { b <- blau(blnetevn$cov, node.ids=blnetevn$d1, dimension=blnetevn$d4, memberships=blnetevn$d5,weights=tmpweight,complete.cases=blnetevn$d8) b <- niches(b, dev.range = blnetevn$d6) } if ('el' %in% ls(envir=blnetevn)) { b <- blau(blnetevn$cov, node.ids=blnetevn$d1, graph = blnetevn$el, dimension=blnetevn$d4, memberships=blnetevn$d5,weights=tmpweight,complete.cases=blnetevn$d8) b <- niches(b, dev.range = blnetevn$d6) } assign("bobj",b,envir=blnetevn) if (length(blnetevn$d4)==2) { oldpar <- par(no.readonly = TRUE) on.exit(par(oldpar)) lowb <- b$lowbounds topb <- b$topbounds k2 <- c(which(names(blnetevn$cov)==blnetevn$d4[1]),which(names(blnetevn$cov)==blnetevn$d4[2])) niche2d2 <- function(xa,xb,ya,yb) { add_legend <- function(...) { par(fig=c(0, 1, 0, 1), oma=c(0, 0, 0, 0),mar=c(0, 0, 0, 0), new=TRUE) plot(0, 0, type='n', bty='n', xaxt='n', yaxt='n') legend(...) } plot(-10000,-10000,xlim=c(xa,xb),ylim=c(ya,yb), xlab=blnetevn$d4[1], ylab=blnetevn$d4[2]) if (length(grep('Show nodes',blnetevn$d3))==1) { plot(blnetevn$cov[,k2[1]],blnetevn$cov[,k2[2]],xlim=c(xa,xb),ylim=c(ya,yb), xlab=blnetevn$d4[1], ylab=blnetevn$d4[2],col="red") } for (i in 1:length(blnetevn$d5)) { rect(lowb[i,1], lowb[i,2], topb[i,1], topb[i,2], border=colors()[bcolors[i]]) } if (blnetevn$d3=="Show nodes and network ties") { r <- which(blnetevn$d1==names(blnetevn$cov)) x1 <- y1 <- x2 <- y2 <- rep(0,nrow(blnetevn$el)) for (i in 1:nrow(blnetevn$el)) { s <- which(as.character(el[i,1])==as.character(blnetevn$cov[,r])) x1[i] <- blnetevn$cov[s,k2[1]] y1[i] <- blnetevn$cov[s,k2[2]] t <- which(as.character(el[i,2])==as.character(blnetevn$cov[,r])) x2[i] <- blnetevn$cov[t,k2[1]] y2[i] <- blnetevn$cov[t,k2[2]] } segments(x1,y1,x2,y2,col="red") } add_legend("topleft",blnetevn$d5,text.col=colors()[bcolors[1:nrow(lowb)]],bty='n') } xa <- min(lowb[,1],blnetevn$cov[,k2[1]],na.rm = TRUE) xb <- max(topb[,1],blnetevn$cov[,k2[1]],na.rm = TRUE) ya <- min(lowb[,2],blnetevn$cov[,k2[2]],na.rm = TRUE) yb <- max(topb[,2],blnetevn$cov[,k2[2]],na.rm = TRUE) niche2d2(xa,xb,ya,yb) } else if (length(blnetevn$d4) == 3) { lowb <- b$lowbounds topb <- b$topbounds k2 <- c(which(names(blnetevn$cov)==blnetevn$d4[1]),which(names(blnetevn$cov)==blnetevn$d4[2]),which(names(blnetevn$cov)==blnetevn$d4[3])) niche3drgl2 <- function() { scatter3Drgl(-10000,-10000,-10000, xlim=c(min(lowb[,1],blnetevn$cov[,k2[1]],na.rm = TRUE),max(topb[,1],blnetevn$cov[,k2[1]],na.rm = TRUE)), ylim=c(min(lowb[,2],blnetevn$cov[,k2[2]],na.rm = TRUE),max(topb[,2],blnetevn$cov[,k2[2]],na.rm = TRUE)), zlim=c(min(lowb[,3],blnetevn$cov[,k2[3]],na.rm = TRUE),max(topb[,3],blnetevn$cov[,k2[3]],na.rm = TRUE)), xlab=blnetevn$d4[1],ylab=blnetevn$d4[2],zlab=blnetevn$d4[3], colkey = FALSE, col="red") par3d(windowRect = c(900, 50, 1500, 650)) if (length(grep('Show nodes',blnetevn$d3))==1) { rgl.close() scatter3Drgl(blnetevn$cov[,k2[1]],blnetevn$cov[,k2[2]],blnetevn$cov[,k2[3]], xlim=c(min(lowb[,1],blnetevn$cov[,k2[1]],na.rm = TRUE),max(topb[,1],blnetevn$cov[,k2[1]],na.rm = TRUE)), ylim=c(min(lowb[,2],blnetevn$cov[,k2[2]],na.rm = TRUE),max(topb[,2],blnetevn$cov[,k2[2]],na.rm = TRUE)), zlim=c(min(lowb[,3],blnetevn$cov[,k2[3]],na.rm = TRUE),max(topb[,3],blnetevn$cov[,k2[3]],na.rm = TRUE)), xlab=blnetevn$d4[1],ylab=blnetevn$d4[2],zlab=blnetevn$d4[3], colkey = FALSE, col="red") par3d(windowRect = c(900, 50, 1500, 650)) } axes3d() box3Drgl(lowb[,1], lowb[,2], lowb[,3], topb[,1], topb[,2], topb[,3], add = TRUE, col = colors()[bcolors[1:nrow(lowb)]], alpha = 0.5, border = "black", lwd = 2) if (blnetevn$d3=="Show nodes and network ties") { r <- which(blnetevn$d1==names(blnetevn$cov)) x1 <- y1 <- z1 <- x2 <- y2 <- z2 <- rep(0,nrow(blnetevn$el)) for (i in 1:nrow(blnetevn$el)) { s <- which(as.character(blnetevn$el[i,1])==as.character(blnetevn$cov[,r])) x1[i] <- blnetevn$cov[s,k2[1]] y1[i] <- blnetevn$cov[s,k2[2]] z1[i] <- blnetevn$cov[s,k2[3]] t <- which(as.character(blnetevn$el[i,2])==as.character(blnetevn$cov[,r])) x2[i] <- blnetevn$cov[t,k2[1]] y2[i] <- blnetevn$cov[t,k2[2]] z2[i] <- blnetevn$cov[t,k2[3]] } segments3Drgl(x1,y1,z1,x2,y2,z2,add = TRUE,col="red") } legend3d("topleft",blnetevn$d5,text.col=colors()[bcolors[1:nrow(lowb)]],bty='n') } niche3drgl2() } else { gmessage("You must select 2 or 3 dimensions.") } }) } } }) } }
/scratch/gouwar.j/cran-all/cranData/Blaunet/inst/scripts/nicheplot.R
loadfile <- function(h,...) { f <- gfile(text="Select a file", type="open") if (f=="") return; if (grepl('[:punct:.]rda',f) | grepl('[:punct:.]Rdata',f) | grepl('[:punct:.]dta',f) | grepl('[:punct:.]csv',f) | grepl('[:punct:.]sav',f) | grepl('[:punct:.]xpt',f) | grepl('[:punct:.]dat',f) | grepl('[:punct:.]DAT',f) | grepl('[:punct:.]txt',f)) { if (grepl('[:punct:.]rda',f) | grepl('[:punct:.]Rdata',f)) { c1 <- load(f) load(f,envir=blnetevn) if (class(get(c1))=="data.frame" | class(get(c1))=="matrix") { assign("cov",get(c1),envir=blnetevn) } else if (class(get(c1))=="list") { assign("adj",get(c1)$adj,envir=blnetevn) assign("el",get(c1)$el,envir=blnetevn) assign("cov",get(c1)$square.data,envir=blnetevn) } gmessage(paste("Congratulations! Your attribute file ",f," is now loaded",sep=''), parent = window) } else if (grepl('[:punct:.]dta',f)) { c1 <- data.frame(read_dta(f)) assign("cov",c1,envir=blnetevn) gmessage(paste("Congratulations! Your attribute file ",f," is now loaded",sep=''), parent = window) } else if (grepl('[:punct:.]csv',f)) { c1 <- read.csv(f,header=T,sep=",") assign("cov",c1,envir=blnetevn) gmessage(paste("Congratulations! Your attribute file ",f," is now loaded",sep=''), parent = window) } else if (grepl('[:punct:.]sav',f)) { c1 <- read.spss(f) assign("cov",c1,envir=blnetevn) gmessage(paste("Congratulations! Your attribute file ",f," is now loaded",sep=''), parent = window) } else if (grepl('[:punct:.]xpt',f)) { c1 <- read.xport(f) assign("cov",c1,envir=blnetevn) gmessage(paste("Congratulations! Your attribute file ",f," is now loaded",sep=''), parent = window) } else if (grepl('[:punct:.]dat',f) | grepl('[:punct:.]DAT',f) | grepl('[:punct:.]txt',f)) { c1 <- read.table(f) assign("cov",c1,envir=blnetevn) gmessage(paste("Congratulations! Your attribute file ",f," is now loaded",sep=''), parent = window) } } else gmessage("Sorry! Unknown file format.", parent = window) } loadnet <- function(h,...) { f <- gfile(text="Select a file", type="open") if (f=="") return; if (grepl('[:punct:.]rda',f) | grepl('[:punct:.]Rdata',f) | grepl('[:punct:.]paj',f) | grepl('[:punct:.]dta',f) | grepl('[:punct:.]csv',f) | grepl('[:punct:.]sav',f) | grepl('[:punct:.]xpt',f) | grepl('[:punct:.]dat',f) | grepl('[:punct:.]DAT',f) | grepl('[:punct:.]net',f) | grepl('[:punct:.]txt',f)) { if (grepl('[:punct:.]rda',f) | grepl('[:punct:.]Rdata',f)) { f1 <- get(load(f)) if (class(f1)=="data.frame") f1 <-as.matrix(f1) net <- network(f1) } else if (grepl('[:punct:.]paj',f)) { f1 <- read.paj(f) if (class(f1)=="data.frame") f1 <-as.matrix(f1) net <- network(f1) } else if (grepl('[:punct:.]dta',f)) { f1 <- data.frame(read_dta(f)) f1 <-as.matrix(f1) net <- network(f1) } else if (grepl('[:punct:.]csv',f)) { f1 <- read.csv(f,header=F,sep=",") if (class(f1)=="data.frame") f1 <-as.matrix(f1) net <- network(f1) } else if (grepl('[:punct:.]sav',f)) { f1 <- read.spss(f) if (class(f1)=="data.frame") f1 <-as.matrix(f1) net <- network(f1) } else if (grepl('[:punct:.]xpt',f)) { f1 <- read.xport(f) if (class(f1)=="data.frame") f1 <-as.matrix(f1) net <- network(f1) } else if (grepl('[:punct:.]dat',f) | grepl('[:punct:.]DAT',f) | grepl('[:punct:.]net',f) | grepl('[:punct:.]txt',f)) { f1 <- read.table(f) if (class(f1)=="data.frame") f1 <-as.matrix(f1) net <- network(f1) } el <- data.frame(as.matrix(net,matrix.type='edgelist')) names(el) <- c("i","j") adj <- data.frame(as.matrix(net)) rownames(adj) <- colnames(adj) <- attr(as.matrix(net,matrix.type='edgelist'),"vnames") assign("adj",adj,envir=blnetevn) assign("el",el,envir=blnetevn) gmessage(paste("Congratulations! Your network file ",f," is now loaded",sep=''), parent = window) } else gmessage("Sorry! Unknown file format.", parent = window) }
/scratch/gouwar.j/cran-all/cranData/Blaunet/inst/scripts/open.R
Blend <- function(exp, X, Y, conc = NULL, effects = NULL) { # Funcao para trabalhar com problemas de otmizacao, baseada # no artigo: "Kalirajan, K. P. 1990. On the estimation of a # regression model with fixed and random coefficients. # Journal of Applied Statistics, 17: 237-244." # Desenvolvida por Marcelo Angelo Cirillo e # Paulo Cesar Ossani em 11/2017 # Entrada: # exp - Vetor com os nomes dos experimentos. # X - Variaveis regressoras, sem o vetor das concentracoes. # Y - Variavel resposta. # conc - Vetor com as concentracoes dos experimentos. # effects - Vetor dos efeitos das misturas em uma mistura de referencia (exemplo: centroide) # Retorna: # MPred - Matriz com os valores preditos e observados. # MCPred - Matriz com os valores preditos por componentes. # Mexp - Matriz com o Design das experiencias # theta - Vetor com as estimativas de theta. X <- as.data.frame(X) Y <- as.data.frame(Y) exp <- as.data.frame(exp) if (nrow(exp) != nrow(X)) stop("Number of lines in 'exp' should be equal to 'X'. Verify!") if (nrow(Y) != nrow(X)) stop("Number of lines in 'Y' should be equal to 'X'. Verify!") if (nrow(X) != length(conc) && !is.null(conc)) stop("Number of lines in 'conc' should be equal to 'X'. Verify!") if (nrow(X) != length(effects) && !is.null(effects)) stop("Number of lines in 'effects' should be equal to 'X'. Verify!") if (is.null(effects)) effects <- rep(1, nrow(X)) if (is.null(conc)) conc <- rep(1, nrow(X)) exp.Table <- table(exp) # tabela com as quantidade de amostras em cada experimento exp.Names <- names(exp.Table) # nomes dos experimentos num.exp <- length(exp.Table) # numero de experimentos if ((sum(exp.Table) / num.exp) != exp.Table[[1]]) stop("The experiments should be balanced. Verify!") Xc <- cbind(exp, X, conc) Y <- cbind(exp, Y) # acrescenta nomes dos experimentos a variavel Y ## Calculo da matris Z MZ <- NULL # matriz Z for(i in 1:num.exp) { MaZ <- matrix(0.0, nrow = exp.Table[i], ncol = num.exp) MaZ[,i] <- 1 MZ <- rbind(MZ, cbind(exp.Names[i], as.data.frame(MaZ))) } MZ <- as.data.frame(MZ) ## Calculo da matris S MS <- NULL # matriz S ncz <- ncol(MZ) ncx <- ncol(Xc) # numero de variaveis regressoras + 1 for(i in 1:num.exp) { MaZ <- (MZ[MZ[,1] == exp.Names[i], 2:ncz]) MaX <- (Xc[Xc[,1] == exp.Names[i], 2:ncx]) MaS <- cbind(exp.Names[i], as.data.frame(t(rbind(t(MaX),t(MaZ))))) MS <- rbind(MS, MaS) } MS <- as.data.frame(MS) ## Calculo da matriz inversa e encontra as covariancias em Z MInv <- NULL # Matriz inversa MSt <- NULL # Matriz com as covariancias de Z ncs <- ncol(MS) # ncx <- ncol(Xc) # numero de variaveis regressoras + 1 for(i in 1:num.exp) { S <- as.matrix(MS[MS[,1] == exp.Names[i], 2:ncs]) Inv <- ginv(t(S) %*% S) MaInv <- cbind(exp.Names[i], as.data.frame(Inv)) MaSt <- cbind(exp.Names[i], as.data.frame(Inv[ncx:(ncs-1), ncx:(ncs-1)])) # resultado referente aos calculo na matriz Z MInv <- rbind(MInv, MaInv) MSt <- rbind(MSt, MaSt) } MSt <- as.data.frame(MSt) MInv <- as.data.frame(MInv) ## Calculo dos valores de theta Mtheta <- NULL # Matriz com valores de theta Vtheta <- NULL # Matriz com valores de theta nci <- ncol(MInv) for(i in 1:num.exp) { MaI <- as.matrix(MInv[MInv[,1] == exp.Names[i], 2:nci]) # matriz inversa de cada experimento MaS <- as.matrix(MS[MS[,1] == exp.Names[i], 2:ncs]) # matriz S de cada experimento MaY <- as.matrix(Y[Y[,1] == exp.Names[i], 2]) # vetor Y de cada experimento Vatheta <- MaI %*% t(MaS) %*% MaY Vtheta <- cbind(Vtheta, Vatheta) Matheta <- cbind(exp.Names[i], as.data.frame(Vatheta)) Mtheta <- rbind(Mtheta, Matheta) } Mtheta <- as.data.frame(Mtheta) ## Calculo dos valores de sigma MSigma <- NULL # Matriz com valores de sigma n <- exp.Table[[1]] # numero de elementos em cada experimento p <- ncol(X) + 1 # numero de variaveis regressoras + concentracao q <- 1 # numero de variaveis estocasticas nct <- ncol(Mtheta) for(i in 1:num.exp) { MaY <- as.matrix(Y[Y[,1] == exp.Names[i], 2]) # vetor Y de cada experimento MaS <- as.matrix(MS[MS[,1] == exp.Names[i], 2:ncs]) # matriz S de cada experimento MaTh <- as.matrix(Mtheta[Mtheta[,1] == exp.Names[i], 2:nct]) # matriz theta de cada experimento Ma <- (MaY - MaS%*%MaTh) MaSigma <- cbind(exp.Names[i], as.data.frame((t(Ma) %*% Ma) / (n - p - q))) MSigma <- rbind(MSigma, MaSigma) } MSigma <- as.data.frame(MSigma) ## Calculo dos valores de delta T1 <- cov(Vtheta) T2 <- 0 ncst <- ncol(MSt) for(i in 1:num.exp) { MaSt <- as.matrix(MSt[MSt[,1] == exp.Names[i], 2:ncst]) # matriz theta de cada experimento T2 <- T2 + MSigma[i,2] * MaSt } T2 <- T2 / num.exp dif <- T1 - T2 dvs <- svd(dif) if (length(dvs$d) > 1) { MDelta <- diag(dvs$d) } else { MDelta <- dvs$d } ## Calculo da matris V MV <- NULL # matriz V t <- NULL auxV <- matrix (0, nrow = n, ncol = n) for(i in 1:num.exp) { MaZ <- as.matrix(MZ[MZ[,1] == exp.Names[i], 2:ncz]) V <- MaZ %*% MDelta %*% t(MaZ) + MSigma[i,2] * diag(1, n) for(j in 1:num.exp) { if (j == i) { aux = V } else aux <- auxV t <- cbind(t, aux) } MV <- rbind(MV, t) t <- NULL } MVInv <- ginv(MV) ## Estimativa de theta NS <- NULL ncs <- ncol(MS) for(i in 1:num.exp) { MaS <- as.matrix(MS[MS[,1] == exp.Names[i], 2:ncs]) # matriz S de cada experimento NS <- rbind(NS, MaS) } MInvtheta <- ginv(t(NS) %*% MVInv %*% NS) Esttheta <- MInvtheta %*% t(NS) %*% MVInv %*% Y[,2] colnames(Esttheta) <- "theta" rownames(Esttheta) <- c(colnames(Xc[,2:ncol(Xc)]), paste("exp", exp.Names)) ## Calculo dos Valores preditos MU <- as.matrix(cbind(Xc[,2:ncol(Xc)], MZ[,2:ncol(MZ)])) VlrPred <- MU %*% Esttheta ## Matriz com os valores Preditos MPred <- cbind(exp, effects, conc, VlrPred, Y[,2]) colnames(MPred) <- c("Experiments", "Effects", "Concentrations", "Predicted values", "Observed values") MPred <- as.data.frame(MPred) ## Matriz com os valores Preditos por Componentes MCPred <- NULL X <- as.matrix(X) nreg <- ncol(X) # numero de variaveis regressoras for(i in 1:nreg) { MCPred <- cbind(MCPred, X[,i] * Esttheta[i] + Esttheta[nreg + 1] * conc) # variaveis regressoras + concentracoes for(j in 1:num.exp) { MCPred[,i] <- MCPred[,i] + Esttheta[nreg + 1 + j] * MZ[,j+1] # + efeitos da matriz Z } } MCPred <- cbind(exp, effects, MCPred) colnames(MCPred) <- c("experiments", "effects", colnames(X)) MCPred <- as.data.frame(MCPred) ## Matriz de experimentos Mexp <- as.matrix(MZ[,2:ncol(MZ)]) colnames(Mexp) <- paste("exp.", 1:ncol(Mexp)) Lista <- list(MPred = MPred, MCPred = MCPred, theta = Esttheta, Mexp = Mexp) return(Lista) }
/scratch/gouwar.j/cran-all/cranData/Blendstat/R/Blend.R
Plot.Blend <- function(BL, titles = c(NA,NA), posleg = 2, xlabel = NA, ylabel = NA, boxleg = FALSE, color = TRUE, expcolor = NA, casc = TRUE) { # Rotina para plotar graficos de Blendstate, # Desenvolvida por Marcelo Angelo Cirillo e # Paulo Cesar Ossani em 11/2017 # Entrada: # BL - Dados da funcao Blend. # titles - Titulos para o grafico dos efeitos das concentracoes e componentes. Se nao for definido assume texto padrao. # posleg - 1 para legenda a esquerda, # 2 para legenda a direita (default), # 3 para legenda acima, # 4 para legenda abaixo. # xlabel - Nomeia o eixo X, se nao definido retorna padrao. # ylabel - Nomeia o eixo Y, se nao definido retorna padrao. # boxleg - Coloca moldura na legenda (default = TRUE). # color - Graficos coloridos (default = TRUE). # expcolor - Vetor com as cores dos experimentos. # casc - Efeito cascata na apresentacao dos graficos (default = TRUE). # Retorna: # Varios graficos. if (!is.numeric(posleg) || posleg < 1 || posleg > 4 || (floor(posleg)-posleg) != 0) stop("Input to set the position of the legend 'posleg' is incorrect, should be an integer number between [1,4]. Verify!") if (!is.logical(boxleg)) stop("'boxleg' input is incorrect, it should be TRUE or FALSE. Verify!") if (!is.character(xlabel) && !is.na(xlabel)) stop("'xlabel' input is incorrect, it should be of type character or string. Verify!") if (!is.character(ylabel) && !is.na(ylabel)) stop("'ylabel' input is incorrect, it should be of type character or string. Verify!") if (is.na(xlabel)) # || !is.character(xlabel)) xlabel = "Effects" # Nomeia Eixo X if (is.na(ylabel)) # || !is.character(ylabel)) ylabel = "Predicted values" # Nomeia Eixo Y if (!is.logical(color)) stop("'color' input is incorrect, it should be TRUE or FALSE. Verify!") if (!is.logical(casc)) stop("'casc' input is incorrect, it should be TRUE or FALSE. Verify!") ##### INICIO - Informacoes usadas nos Graficos ##### Exp.Table <- table(BL$MPred[,1]) # tabela com as quantidade de amostras em cada experimento Exp.Names <- names(Exp.Table) # nomes dos experimentos Num.Exp <- length(Exp.Table) # numero de experimentos if (Num.Exp != 0 && length(expcolor) != Num.Exp && !is.na(expcolor) || Num.Exp == 0 && length(expcolor) != 1 && !is.na(expcolor)) stop("'expcolor' input is incorrect, it should be in an amount equal to the number of experiments. Verify!") boxleg = ifelse(boxleg,"o","n") # moldura nas legendas, "n" sem moldura, "o" com moldura cor <- 1 # cor inicial dos pontos e legendas ##### FIM - Informacoes usadas nos Graficos ##### if (!is.character(titles[1]) || is.na(titles[1])) titles[1] = c("Study of the effects of concentrations") if (!is.character(titles[2]) || is.na(titles[2])) titles[2] = c("Component") Init.Form <- 15 # formato inicial dos pontos Form.Points <- Init.Form:(Init.Form + Num.Exp-1) if (color) { if (!is.na(expcolor[1])) { cor1 <- expcolor } else { cor1 <- cor:(cor + Num.Exp - 1) } } else { cor1 <- cor } #### INICIO - Efeitos da concentracao #### if (posleg == 1) Pos = "left" # posicao das legendas nos graficos if (posleg == 2) Pos = "right" if (posleg == 3) Pos = "top" if (posleg == 4) Pos = "bottom" Concentration <- BL$MPred[,3] if (casc) dev.new() # efeito cascata na apresentacao dos graficos plt <- xyplot(BL$MPred[,4] ~ BL$MPred[,2] | Concentration, xlab = xlabel, ylab = ylabel, main = titles[1], data = BL$MPred, # dados group = BL$MPred[,1], # grupos dos experimentos type = "b", # tipo de grafico col = cor1, pch = Form.Points, grid = TRUE, as.table = TRUE, # desenha graficos da esquerda para direita de cima para baixo key = list(space = Pos, # criando a legenda points = list(col = cor1, pch = Form.Points), columns = ifelse(posleg < 3, 1, Num.Exp), bty = boxleg, text = list(paste("Exp -", Exp.Names)))) # auto.key=list(space="right") # legenda automatica print(plt, split=c(1,1,1,1), more = F) #### FIM - Efeitos da concentracao #### #### INICIO - Preditos por componentes #### Data <- BL$MCPred Tit <- colnames(Data[,3:ncol(Data)]) # windows(width = 5, height = 5, pointsize = 1) nc <- (ncol(Data)-2) # numero de variaveis regressoras if (casc) dev.new() # efeito cascata na apresentacao dos graficos for(i in 1:nc) { # variaveis regressoras plt <- xyplot(Data[, 2 + i] ~ Data[,2], xlab = xlabel, ylab = ylabel, main = paste(titles[2], Tit[i]), data = Data, # dados group = Data[,1], # grupos dos experimentos type = "b", # tipo de grafico col = cor1, pch = Form.Points, grid = TRUE, as.table = TRUE, # desenha graficos da esquerda para direita de cima para baixo key = list(space = Pos, # criando a legenda points = list(col = cor1, pch = Form.Points), columns = ifelse(posleg < 3, 1, Num.Exp), bty = boxleg, text = list(paste("Exp -", Exp.Names)))) num <- i/2 if ((ceiling(num) - num) != 0) { # se for impar print(plt, split=c(1,1,1,2), more = ifelse(i < nc, TRUE, FALSE)) } else { print(plt, split=c(1,2,1,2), more = FALSE) if (i < nc) if (casc) dev.new() # efeito cascata na apresentacao dos graficos } } #### FIM - Preditos por componentes #### }
/scratch/gouwar.j/cran-all/cranData/Blendstat/R/Plot.Blend.R
#' \code{BlockCov} package #' #' Estimation of Large Block Covariance Matrices #' #' See the README on #' \href{https://cran.r-project.org/package=BlockCov/vignettes/Vignettes.pdf}{CRAN} #'\href{https://github.com/Marie-PerrotDockes/BlockCov#readme}{GitHub} #' #' @docType package #' @name BlockCov NULL
/scratch/gouwar.j/cran-all/cranData/BlockCov/R/BlockCov.R
#' This function computes an estimator of the covariance matrix and the square root of its inverse and permutes its rows and columns if it is necessary to make the block structure appear. #' #' @param E the observation matrix such that each of its row has a block structure correlation matrix Sigma to estimate up to a permutation of its columns and rows. #' @param k numerical or NULL, the rank for the low rank approximation. If NULL the rank is computed using the slope_change function applied on the eigenvalues of the low rank part of Sigma. Default to NULL. #' @param nb_nn0 numerical or NULL, corresponds to the number of non null values to keep in the estimation of the covariance matrix. #' If NULL the number of non null values is computed using the slope_change function to the Frobenius norm of the difference between the empirical correlation matrix and its estimation with nb_nn0 non null values. Default to NULL. #' @param method_k character if "Cattell" (the default) then the Cattell criterion \insertCite{cattell1966}{BlockCov} is performed on the singular values of the covariance matrix. #' to estimate the number of rank use in the low rank approximation, while "PA" use the parrallel analysis \insertCite{Horn1965}{BlockCov} #' wich can be more accurate if the number of rows of E is not to small but which is much slower. #' @param times numeric the number of resampling done for the "PA" method, ignored if metod_k is different from "PA". #' @param method_0 character if "Elbow" (the default) then the Elbow criterion (see \insertCite{blc;textual}{BlockCov} for details) is performed #' to estimate the number of rank use in the low rank approximation, while "BL" use the approach proposed in #' \insertCite{bickel2008;textual}{BlockCov} based on cross-validation #' wich can be more accurate if the number of rows of E is not to small but which is much slower. #' @param N numeric the number of fold used for the "BL" method. Ignored if method_0 is different from "BL" #' @param big logical, default to FALSE. If the dataset is too big the empirical correlation is calculated by crossprod(E) * 1 / n to fasten the computation #' @param reorder logical, default to FALSE. Whether or not the columns of E are permuted. If TRUE a hierarchical clustering is first performed and the columns are permuted according to it. #' @param inv_12 logical, default to FALSE Whether or not computing the square root of the inverse of the covariance matrix. #' @return A list with the elements #' \item{Sigma_est}{estimator of the covariance matrix} #' \item{k}{rank of the low rank part of the covariance matrix} #' \item{nb_nn0}{number of non null values of the upper triangular part of the covariance matrix} #' \item{S_inv_12}{square root of the inverse of the estimated covariance matrix} #' \item{order}{permutation to apply to the rows and the columns of the covariance to make the block structure appear} #' @importFrom Matrix Matrix nearPD t image #' @importFrom stats cor dist hclust runif #' @importFrom BBmisc which.last #'@importFrom Rdpack reprompt #'@importFrom stats as.dist #'@importFrom dplyr desc #'@importFrom dplyr n #' @examples #' n <- 30 #' q <- 100 #' Sigma <- Simu_Sigma(q = q, diag = FALSE, equal = TRUE) #' Matrix::image(Sigma) #' E <- matrix(rnorm(n * q), ncol = q) %*% chol(as.matrix(Sigma)) #' res <- Sigma_estimation(E, inv_12 = TRUE) #' Matrix::image(res$Sigma_est) #' Matrix::image(res$S_inv_12) #'@references #'\insertAllCited{} #' @export Sigma_estimation <- function(E, k = NULL, nb_nn0 = NULL, big = FALSE, reorder = FALSE, inv_12 = FALSE, method_k = "Cattell", times =10, method_0="Elbow", N=10) { ord <- NULL q <- ncol(E) n <- nrow(E) if (!big) { corE <- cor(as.matrix(E)) } else { corE <- crossprod(E) * 1 / n } if (reorder) { clust <- hclust(as.dist(1 - corE)) ord <- clust$order E <- E[, ord] if (!big) { corE <- cor(as.matrix(E)) } else { corE <- crossprod(E) * 1 / n } } vec_up_emp <- corE[upper.tri(corE)] Pti_sig <- Matrix(0, ncol = (q - 1), nrow = (q - 1)) Pti_sig[upper.tri(Pti_sig, diag = TRUE)] <- vec_up_emp Pti_sig[lower.tri(Pti_sig)] <- t(as.matrix(Pti_sig))[lower.tri(t(as.matrix(Pti_sig)))] res_svd <- svd(Pti_sig) vp <- res_svd$d u <- res_svd$u tv <- t(res_svd$v) if (is.null(k)){ if(method_k =="Cattell") k <- max(slope_change(vp), 2) if(method_k =="PA") k <- which.last(vp > max(PA(E, times))) } large_vp <- vp[1:k] corE_aprx <- u[, 1:k] %*% diag(large_vp) %*% tv[1:k, ] vec_up <- corE_aprx[upper.tri(corE_aprx, diag = TRUE)] l <- length(vec_up) a_vup <- abs(vec_up) ord_vup <- order(a_vup) v_ord <- a_vup[ord_vup] if (is.null(nb_nn0)) { if(method_0 =="Elbow"){ error <- c(0,cumsum(v_ord^2)) nb_nn0 <- slope_change(rev(error)) } if(method_0 == "BL"){ nb_nn0 <- cv_bl(E, v_ord, N=N) } } seuil <- rev(v_ord)[nb_nn0] vec_up[a_vup < seuil] <- 0 Sig_est <- Matrix(0, q, q) Sig_est[upper.tri(Sig_est)] <- vec_up Sig_est <- Sig_est + t(Sig_est) diag(Sig_est) <- 1 if (reorder) { reord <- order(ord) Sig_est <- Sig_est[reord, reord] } Sig_est <- nearPD(Sig_est, corr = TRUE)$mat if (inv_12) { res_svd <- svd(Sig_est) vp2 <- res_svd$d u <- res_svd$u tv <- t(res_svd$v) sel <- which(vp2 > 0.1) vp[-sel] <- 0 S_inv_12 <- u[, sel] %*% diag(sqrt(1 / vp2[sel])) %*% tv[sel, ] } else { S_inv_12 <- NULL } return(list(Sigma_est = Sig_est, k = k, nb_nn0 = nb_nn0, S_inv_12 = S_inv_12, order = ord)) }
/scratch/gouwar.j/cran-all/cranData/BlockCov/R/Sigma_estimation.R
#' This function generates a block structured symmetric positive definite matrix to test the BlockCov methodology. #' #' @param q integer corresponding to the size of the covariance matrix. #' @param diag logical, whether or not the covariance matrix is block-diagonal. #' @param equal logical, whether or not the values in the blocks are equal. #' @return Sigma a correlation matrix to test the BlockCov methodology. #' @importFrom Matrix Matrix #' @examples #' Sigma <- Simu_Sigma(q = 100, diag = FALSE, equal = TRUE) #' Matrix::image(Sigma) #' @export Simu_Sigma <- function(q, diag = TRUE, equal = TRUE) { list_a <- c(floor(0.1 * q), floor(0.2 * q), floor(0.3 * q), floor(0.2 * q), floor(0.2 * q)) list_rho <- c(0.7, 0.75, 0.65, 0.8, 0.7) nb_bloc <- length(list_a) position <- cumsum(list_a) Z <- Matrix(0, nrow = q, ncol = nb_bloc) if (equal) { Z[1:position[1], 1] <- rep(sqrt(list_rho[1]), list_a[1]) for (i in 2:nb_bloc) { Z[(position[(i - 1)] + 1):position[i], i] <- rep(sqrt(list_rho[i]), list_a[i]) } } else { Z[1:position[1], 1] <- runif(list_a[1], sqrt(0.6), sqrt(0.8)) for (i in 2:nb_bloc) { if (i == 3) { Z[(position[(i - 1)] + 1):position[i], i] <- runif(list_a[i], sqrt(0.3), sqrt(0.4)) } else { Z[(position[(i - 1)] + 1):position[i], i] <- runif(list_a[i], sqrt(0.6), sqrt(0.8)) } } } if (!diag) Z[floor(0.35 * q):floor(0.45 * q), 4] <- -0.5 Sigma <- Z %*% t(Z) diag(Sigma) <- 1 return(Sigma) }
/scratch/gouwar.j/cran-all/cranData/BlockCov/R/Simu_Sigma.R
#' Title #' #' @param E the observation matrix such that each of its row has a block structure correlation matrix Sigma to estimate up to a permutation of its columns and rows. #' @param v_ord the absolute value of the upper triangular part matrix \eqn{\Gamma} (including its diagonal) order in #' increasing order #' @param N number of replication in the "cross-validation" #' #' @return the number of non null values selected for the estimation of the covariance matrix #' @details In order to get the treshold one must do rev(v_ord)[cv_bl(E, v_ord, N=N)] #' @export #' #' @examples #' n <- 30 #' q <- 100 #' Sigma <- Simu_Sigma(q = q, diag = FALSE, equal = TRUE) #' Matrix::image(Sigma) #' E <- matrix(rnorm(n * q), ncol = q) %*% chol(as.matrix(Sigma)) #' k <- 5 #' v_up <- est_up(E, k = k) #' a_vup <- abs(v_up) #' ord_vup <- order(a_vup) #' v_ord <- a_vup[ord_vup] #' N <- 10 #' nb_nn0 <- cv_bl(E, v_ord, N=N) #' tresh <- rev(v_ord)[nb_nn0] cv_bl<- function(E, v_ord, N){ n <- nrow(E) n1 <- round(n*(1- 1/log(n))) r_hat <- lapply(1:N, function(i){ s1 <- sample(seq_len(n), n1) s2 <- seq_len(n)[-s1] v1 <- est_up(E[s1, ]) v2 <- est_up(E[s2, ]) ord <- order(abs(v1)) v1 <- v1[ord] v2 <- v2[ord] dif <- (v1 - v2)^2 ajout <- v2^2 - dif fin <- c(sum(dif), ajout) r_hat <- cumsum(fin) reord <- findInterval(v_ord, abs(v1), left.open = TRUE) r_hat[c((reord + 1))] }) r_hat_mean <- Reduce("+", r_hat) v_ord[which.min(r_hat_mean)] which.min(rev(r_hat_mean)) } #' Title #' #' @param E the observation matrix such that each of its row has a block structure correlation matrix Sigma wich has a low rank once its diagonal is removed. #' @param times number of random sampling #' #' @return the mean of the eigen values of the \code{times} sampled matrix #' @export #' #' @examples #' n <- 30 #' q <- 100 #' Sigma <- Simu_Sigma(q = q, diag = FALSE, equal = TRUE) #' Matrix::image(Sigma) #' E <- matrix(rnorm(n * q), ncol = q) %*% chol(as.matrix(Sigma)) #' random_eigen <- PA(E, times = 10) PA <- function(E, times = 10){ q<-ncol(E) Reduce("+", lapply(1:times, function(lalala){ corEs <- cor(as.matrix(apply(E,2,sample))) Pti_sigs <- Matrix(0, ncol = (q - 1), nrow = (q - 1)) Pti_sigs[upper.tri(Pti_sigs, diag = TRUE)] <- corEs[upper.tri(corEs)] Pti_sigs[lower.tri(Pti_sigs)] <- t(as.matrix(Pti_sigs))[lower.tri(t(as.matrix(Pti_sigs)))] res_svd_corE <- svd(as.matrix(Pti_sigs)) res_svd_corE$d })) / times } #' Title #' #' @param E the observation matrix such that each of its row has a block structure correlation matrix Sigma wich has a low rank once its diagonal is removed. #' @param k the rank of the correlation matrix of \code{E} once its diagonal has been removed #' #' @return an approximation of the correlation matrix of \code{E} with its diagonal removed #' @export #' #' @examples #' n <- 30 #' q <- 100 #' Sigma <- Simu_Sigma(q = q, diag = FALSE, equal = TRUE) #' Matrix::image(Sigma) #' E <- matrix(rnorm(n * q), ncol = q) %*% chol(as.matrix(Sigma)) #' k <- 5 #' v_up <- est_up(E, k = k) est_up <- function(E, k = 5){ q <- ncol(E) corE <- cor(as.matrix(E)) Pti_sig <- Matrix(0, ncol = (q - 1), nrow = (q - 1)) Pti_sig[upper.tri(Pti_sig, diag = TRUE)] <- corE[upper.tri(corE)] Pti_sig[lower.tri(Pti_sig)] <- t(as.matrix(Pti_sig))[lower.tri(t(as.matrix(Pti_sig)))] res_svd_corE <- svd(as.matrix(Pti_sig), nu =k, nv=k) vp_corE <- res_svd_corE$d U_corE <- res_svd_corE$u tV_corE <- t(res_svd_corE$v) largest_vp_corE <- vp_corE[1:k] corE_aprx <- U_corE[, 1:k] %*% diag(largest_vp_corE) %*% tV_corE[1:k, ] return(corE_aprx [upper.tri(corE_aprx)]) }
/scratch/gouwar.j/cran-all/cranData/BlockCov/R/cv_pa.R
#' Pipe operator #' #' See \code{magrittr::\link[magrittr]{\%>\%}} for details. #' #' @name %>% #' @rdname pipe #' @keywords internal #' @export #' @importFrom magrittr %>% #' @usage lhs \%>\% rhs NULL
/scratch/gouwar.j/cran-all/cranData/BlockCov/R/pipe_operator.R
#' This function fits to a numerical vector sorted in the non decreasing order two simple linear regressions and returns the index corresponding to the estimated change between the two regression models. #' #' @param Y numerical vector sorted in the non decreasing order. #' @return K the index corresponding to the estimated change between the two linear regression models. #' @importFrom Matrix Matrix #' @importFrom dplyr arrange filter mutate cummean #' @importFrom tibble tibble rowid_to_column #' @importFrom rlang .data #' @examples #' n <- 30 #' q <- 100 #' Sigma <- Simu_Sigma(q = q, diag = FALSE, equal = TRUE) #' Matrix::image(Sigma) #' E <- matrix(rnorm(n * q), ncol = q) %*% chol(as.matrix(Sigma)) #' corE <- cor(as.matrix(E)) #' vec_up_emp <- corE[upper.tri(corE)] #' G <- matrix(0, ncol = (q - 1), nrow = (q - 1)) #' G[upper.tri(G, diag = TRUE)] <- vec_up_emp #' G[lower.tri(G)] <- t(as.matrix(G))[lower.tri(t(as.matrix(G)))] #' res_svd <- svd(G) #' vp <- res_svd$d #' slope_change(vp) #' @export slope_change <- function(Y) { tb <- tibble(y = Y) %>% arrange(desc(.data$y)) %>% rowid_to_column(var = "x") %>% mutate( mx = (.data$x + 1) / 2, my = cummean(.data$y), pxy = .data$x * .data$y, spxy = cumsum(.data$pxy), s2xy = .data$spxy - .data$mx * .data$x * .data$my, s2x = (.data$x * (.data$x + 1) * (.data$x - 1)) / 12, b = .data$s2xy / .data$s2x, a = .data$my - .data$b * .data$mx, y2 = cumsum(.data$y^2), e = .data$y2 + (.data$b^2 * (.data$x * (.data$x + 1) * (2 * .data$x + 1)) / 6) + 2 * .data$a * .data$b * .data$x * .data$mx + .data$x * .data$a^2 - 2 * .data$b * .data$spxy - 2 * .data$a * .data$my * .data$x ) %>% filter(.data$x != n()) n <- length(Y) tb2 <- tibble(y = Y) %>% arrange(desc(.data$y)) %>% rowid_to_column(var = "x") %>% arrange(.data$y) %>% rowid_to_column(var = "k") %>% mutate( mx = (.data$x + n) / 2, my = cummean(.data$y), pxy = .data$x * .data$y, spxy = cumsum(.data$pxy), s2xy = .data$spxy - .data$mx * .data$k * .data$my, s2xi = (n * (n + 1) * (2 * n + 1) - .data$x * (.data$x - 1) * (2 * .data$x - 1)) / 6, s2x = .data$s2xi - .data$k * (n + .data$x)^2 / 4, b = .data$s2xy / .data$s2x, a = .data$my - .data$b * .data$mx, y2 = cumsum(.data$y^2), e = .data$y2 + (.data$b^2 * .data$s2xi) + 2 * .data$a * .data$b * .data$k * .data$mx + .data$k * .data$a^2 - 2 * .data$b * .data$spxy - 2 * .data$a * .data$my * .data$k ) %>% filter(.data$x != n()) errors <- tb$e + rev(tb2$e) which.min(errors) - 1 }
/scratch/gouwar.j/cran-all/cranData/BlockCov/R/slope_change.R
## ----setup, include=FALSE------------------------------------------------ knitr::opts_chunk$set(echo = TRUE, message=FALSE) library(BlockCov) set.seed(516) ## ---- , eval =FALSE------------------------------------------------------ # devtools::install_github("Marie-PerrotDockes/BlockCov") ## ------------------------------------------------------------------------ q <- 100 Sigma <- Simu_Sigma(q = q, diag = FALSE, equal = TRUE) ## ----fig0, fig.cap="\\label{fig:fig0}",fig.width=3.5,fig.height=3.5,echo=FALSE---- Matrix::image(Sigma) ## ------------------------------------------------------------------------ n <- 30 E <- matrix(rnorm(n * q), ncol = q) %*% chol(as.matrix(Sigma)) ## ------------------------------------------------------------------------ k <- 5 nb_nn0 <- sum(Sigma[upper.tri(Sigma, diag = FALSE)] != 0) res_known <- Sigma_estimation(E, k = k, nb_nn0 = nb_nn0) ## ----fig1, fig.cap="\\label{fig:fig1}",fig.width=3.5,fig.height=3.5------ Matrix::image(res_known$Sigma_est) ## ----fig2, fig.cap="\\label{fig:fig2}",fig.width=3.5,fig.height=3.5------ Matrix::image(Matrix::Matrix(cor(E))) ## ----warning=FALSE------------------------------------------------------- res <-Sigma_estimation(E, method_k = "Cattell", method_0 = "Elbow") ## ---- eval = FALSE------------------------------------------------------- # res <-Sigma_estimation(E) ## ------------------------------------------------------------------------ res_pabl <- Sigma_estimation(E, method_k = "PA", method_0 = "BL") ## ----fig3, fig.cap="\\label{fig:fig3}",fig.width=3.5,fig.height=3.5------ Matrix::image(res$Sigma_est) ## ----fig3pabl, fig.cap="\\label{fig:fig3pabl}",fig.width=3.5,fig.height=3.5---- Matrix::image(res_pabl$Sigma_est) ## ------------------------------------------------------------------------ res_both <- Sigma_estimation(E, method_k = "Cattell", method_0 = "Elbow", inv_12 = TRUE) ## ------------------------------------------------------------------------ samp <- sample(1:q, q, replace = FALSE) Sigma_samp <- Sigma[samp, samp] ## ----fig4, fig.cap="\\label{fig:fig4}",fig.width=3.5,fig.height=3.5------ Matrix::image(Sigma_samp) ## ------------------------------------------------------------------------ E <- matrix(rnorm(n * q), ncol = q) %*% chol(as.matrix(Sigma_samp)) res_samp <- Sigma_estimation(E, reorder = TRUE, inv_12 = TRUE) ## ----fig5, fig.cap="\\label{fig:fig5}",fig.width=3.5,fig.height=3.5------ Matrix::image(res_samp$Sigma_est) ## ----fig6, fig.cap="\\label{fig:fig6}",fig.width=3.5,fig.height=3.5------ ord <- res_samp$order Matrix::image(res_samp$Sigma_est[ord, ord]) ## ----fig7, fig.cap="\\label{fig:fig7}",fig.width=3.5,fig.height=3.5------ Matrix::image(Sigma_samp[ord, ord]) ## ----fig8, fig.cap="\\label{fig:fig8}",fig.width=3.5,fig.height=3.5------ Matrix::image(res_samp$S_inv_12 %*% Sigma_samp %*%res_samp$S_inv_12)
/scratch/gouwar.j/cran-all/cranData/BlockCov/inst/doc/Vignettes.R
--- title: "BlockCov package" author: "Marie Perrot-Dockès, Céline Lévy-Leduc" date: "`r Sys.Date()`" output: pdf_document vignette: > %\VignetteEngine{knitr::knitr} %\VignetteIndexEntry{BlockCov package} %VignetteEncoding{UTF-8} bibliography: REFERENCES.bib --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE, message=FALSE) library(BlockCov) set.seed(516) ``` # Installation ```{r, , eval =FALSE} devtools::install_github("Marie-PerrotDockes/BlockCov") ``` # Introduction This package implements the algorithm proposed by @blc. For further details we refer the reader to this paper. We shall consider the following framework. Let $\boldsymbol{E}_1, \boldsymbol{E}_2,\cdots,\boldsymbol{E}_n$, $n$ zero-mean i.i.d. $q$-dimensional random vectors having a covariance matrix $\boldsymbol{\Sigma}$ such that the number $q$ of its rows and columns is much larger than $n$. The goal of the package is to propose a new estimator of $\boldsymbol{\Sigma}$ and of the square root of its inverse in the particular case where $\boldsymbol{\Sigma}$ is assumed to have a block structure without limiting ourselves to diagonal blocks. More precisely, in this paper, we shall assume that $$ \boldsymbol{\Sigma}=\boldsymbol{Z}\;\boldsymbol{Z}'+\boldsymbol{D}, $$ where $\boldsymbol{Z}$ is a $q \times k$ sparse matrix with $k\ll q$, $\boldsymbol{Z}'$ denotes the transpose of the matrix $\boldsymbol{Z}$ and $\boldsymbol{D}$ is a diagonal matrix such that the diagonal terms of $\boldsymbol{\Sigma}$ are equal to one. Our approach consists in providing a low rank matrix approximation of the $\boldsymbol{Z}\;\boldsymbol{Z}'$ part of $\boldsymbol{\Sigma}$ and then in using a $\ell_1$ regularization in order to obtain a sparse estimator of $\boldsymbol{\Sigma}$. More precisely, since $\boldsymbol{\Sigma}$ is a correlation matrix, it is a symmetric matrix with ones on its diagonal, thus all the information is contained in its upper triangular part without its diagonal. If we know $\boldsymbol{P}$ the $(q-1)\times (q-1)$ symmetric matrix, which has for upper triangular part the upper triangular part of $\boldsymbol{\Sigma}$ without its diagonal, we know $\boldsymbol{\Sigma}$. The matrix $\boldsymbol{P}$ has the advantage to have a low rank. In the following, we propose to first estimate the block matrix $\boldsymbol{P}$. We shall moreover propose a methodology to estimate $\boldsymbol{\Sigma}$ in the case where the block structure is latent that is when the columns and rows of $\boldsymbol{\Sigma}$ have to be permuted according to an unknown permutation in order to make the block structure appear. In this case, a hierarchical clustering step has to be applied beforehand. # Simulation of $\boldsymbol{\Sigma}$ having a block structure In order to generate a matrix $\boldsymbol{\Sigma}$ having a block structure with extra-diagonal blocks and $q=100$, we can use the function `|Simu_Sigma|` as follows: ```{r} q <- 100 Sigma <- Simu_Sigma(q = q, diag = FALSE, equal = TRUE) ``` The matrix $\boldsymbol{\Sigma}$ is displayed in Figure \ref{fig:fig0}. ```{r fig0, fig.cap="\\label{fig:fig0}",fig.width=3.5,fig.height=3.5,echo=FALSE} Matrix::image(Sigma) ``` Using the matrix $\boldsymbol{\Sigma}$ generated by the function `|Simu_Sigma|` a $n\times q$ matrix $\boldsymbol{E}$ was generated such that its rows are independent zero-mean Gaussian random vectors having a covariance matrix equal to $\boldsymbol{\Sigma}$ and $n=30$. ```{r} n <- 30 E <- matrix(rnorm(n * q), ncol = q) %*% chol(as.matrix(Sigma)) ``` # Estimation of $\boldsymbol{\Sigma}$ (without estimating and $\boldsymbol{\Sigma}^{-1/2}$) ## Estimation of $\boldsymbol{\Sigma}$ when the parameters are known In order to get an estimator of $\boldsymbol{\Sigma}$ the function `|Sigma_estimation|` was applied. Since the data set was simulated, the rank of $\boldsymbol{P}$, the sub-matrix of $\boldsymbol{\Sigma}$, and its number of non null values are known. ```{r} k <- 5 nb_nn0 <- sum(Sigma[upper.tri(Sigma, diag = FALSE)] != 0) res_known <- Sigma_estimation(E, k = k, nb_nn0 = nb_nn0) ``` Our estimator $$\widehat{\boldsymbol{\Sigma}}$$ of $\boldsymbol{\Sigma}$ is given by `|res_known$Sigma_est|`. It is displayed in Figure \ref{fig:fig1} and is obtained by using: ```{r fig1, fig.cap="\\label{fig:fig1}",fig.width=3.5,fig.height=3.5} Matrix::image(res_known$Sigma_est) ``` The Frobenius norm $\|\boldsymbol{\Sigma}-\widehat{\boldsymbol{\Sigma}}\|$ is equal to `r round(norm(as.matrix(Sigma-res_known$Sigma_est), 'F'), 1)`. For comparison purpose, the sample correlation matrix is displayed in Figure \ref{fig:fig2}. ```{r fig2, fig.cap="\\label{fig:fig2}",fig.width=3.5,fig.height=3.5} Matrix::image(Matrix::Matrix(cor(E))) ``` The Frobenius norm $\|\boldsymbol{\Sigma}-\widehat{\boldsymbol{\Sigma}}_{\textrm{emp}}\|$ is equal to `r round(norm(as.matrix(Sigma-cor(E)), 'F'), 1)`, where $\widehat{\boldsymbol{\Sigma}}_{\textrm{emp}}$ denotes the sample correlation matrix. ## Estimation of $\boldsymbol{\Sigma}$ when the parameters are unknown In practice, the number of non null values and the rank of $\boldsymbol{P}$ are unknown. These parameters can be both estimated using the function `|Sigma_estimation|`. For choosing the rank of $\boldsymbol{P}$, two strategies are available in the package and compared in @blc: * The first strategy is the \textsf{Cattell} criterion based on the Cattell's scree plot described in @cattell1966. In the package, it can be used by setting \verb|method_k = "Cattell"| in the \verb|Sigma_estimation| function. * The second strategy is the \textsf{PA} permutation method proposed by @Horn1965. In the package, it can be used by setting \verb|method_k = "PA"| in the \verb|Sigma_estimation| function. To choose the number of non null values two methodologies are also available in the package and compared in @blc: * The \textsf{Elbow} method described in @blc. In the package, it can be used by setting \verb|method_0 = "Elbow"| in \verb|Sigma_estimation| function and * the \textsf{BL} approach proposed in @bickel2008 based on cross-validation. In the package, it can be used by setting \verb|method_0 = "BL"| in the \verb|Sigma_estimation| function. For example, an estimator of $\boldsymbol{\Sigma}$ using the \textsf{Cattell} criterion and the \textsf{Elbow} method can be obtained by using the `|Sigma_estimation|` function as follows: ```{r,warning=FALSE} res <-Sigma_estimation(E, method_k = "Cattell", method_0 = "Elbow") ``` It has to be noticed that "Cattell" and "Elbow" are the default value for `|method_k|` and `|method_0|` respectively. Hence, the same result can be obtain by using : ```{r, eval = FALSE} res <-Sigma_estimation(E) ``` The corresponding estimator of $\boldsymbol{\Sigma}$ is displayed in Figure \ref{fig:fig3}. The estimated rank and the estimated number of non null values can be obtained by `|res$k|` and `|res$nb_nn0|`, respectively. Here, the estimated rank is equal to `r res$k` and the estimated number of non null values is `r res$nb_nn0`. Note that the true values of these parameters are `r res_known$k` and `r res_known$nb_nn0`. An estimator of $\boldsymbol{\Sigma}$ using the \textsf{PA} criterion and the \textsf{BL} method can be obtained by using the \verb|Sigma_estimation| function as follows: ```{r} res_pabl <- Sigma_estimation(E, method_k = "PA", method_0 = "BL") ``` The corresponding estimator of $\boldsymbol{\Sigma}$ is displayed in Figure \ref{fig:fig3pabl}. Here, the estimated rank is equal to `r res_pabl$k` and the estimated number of non null values is `r res_pabl$nb_nn0`. This second approach is a little bit slower than the first one especially for large values of $q$ but can be more accurate when the number of samples is large enough, see @blc for further details on the numerical and statistical performance of the different strategies. ```{r fig3, fig.cap="\\label{fig:fig3}",fig.width=3.5,fig.height=3.5} Matrix::image(res$Sigma_est) ``` ```{r fig3pabl, fig.cap="\\label{fig:fig3pabl}",fig.width=3.5,fig.height=3.5} Matrix::image(res_pabl$Sigma_est) ``` We can see from this figure that the estimation of $\boldsymbol{\Sigma}$ does not seem to be altered by having to estimate the number of non null values and the rank of the matrix. The Frobenius norm $\|\boldsymbol{\Sigma}-\widehat{\boldsymbol{\Sigma}}\|$ is equal to `r round(norm(as.matrix(Sigma-res$Sigma_est), 'F'), 1)` for the first estimator and to `r round(norm(as.matrix(Sigma-res_pabl$Sigma_est), 'F'), 1)` for the second one. # Estimator of $\boldsymbol{\Sigma}^{-1/2}$ obtained from an estimator of $\boldsymbol{\Sigma}$ An estimator of $\boldsymbol{\Sigma}^{-1/2}$ can be obtained using the \verb|Sigma_estimation| function by setting the arguments \verb|res$S_inv_12| to true as follows: ```{r} res_both <- Sigma_estimation(E, method_k = "Cattell", method_0 = "Elbow", inv_12 = TRUE) ``` An estimator of $\boldsymbol{\Sigma}$ is obtained with \verb|res_both$Sigma_est| and an estimator of $\boldsymbol{\Sigma}^{-1/2}$ is obtained with \verb|res_both$S_inv_12|. It can be used to remove the dependence that may exist between the columns of $\boldsymbol{E}$. # Estimation of $\boldsymbol{\Sigma}$ and $\boldsymbol{\Sigma}^{-1/2}$ when the block structure is latent In practice, it is possible that the block structure of $\boldsymbol{\Sigma}$ only appears after having permuted its rows and columns according to a well chosen permutation. We explain hereafter how to estimate $\boldsymbol{\Sigma}$ and $\boldsymbol{\Sigma}^{-1/2}$ in this case. We first generate such a matrix by applying a random permutation to the rows and columns of the matrix $\boldsymbol{\Sigma}$ previously generated. ```{r} samp <- sample(1:q, q, replace = FALSE) Sigma_samp <- Sigma[samp, samp] ``` The corresponding matrix is displayed in Figure \ref{fig:fig4}. ```{r fig4, fig.cap="\\label{fig:fig4}",fig.width=3.5,fig.height=3.5} Matrix::image(Sigma_samp) ``` In such a situation where the columns and rows have to be permuted according to an unknown permutation, we propose to use a hierarchical clustering as the first step of our methodology and then use the same strategy. This is performed by putting \verb|reorder=TRUE| in the arguments of the function \verb|Sigma_estimation|. ```{r} E <- matrix(rnorm(n * q), ncol = q) %*% chol(as.matrix(Sigma_samp)) res_samp <- Sigma_estimation(E, reorder = TRUE, inv_12 = TRUE) ``` The estimated matrix is displayed in Figure \ref{fig:fig5}. ```{r fig5, fig.cap="\\label{fig:fig5}",fig.width=3.5,fig.height=3.5} Matrix::image(res_samp$Sigma_est) ``` The permutation to make the block structure appear is available from \verb|res_samp$order|. The corresponding estimated correlation matrix in which the columns have been permuted in order to make the block structure appear is obtained using the following lines and is displayed in Figure \ref{fig:fig6}: ```{r fig6, fig.cap="\\label{fig:fig6}",fig.width=3.5,fig.height=3.5} ord <- res_samp$order Matrix::image(res_samp$Sigma_est[ord, ord]) ``` This matrix has to be compared with the following one displayed in Figure \ref{fig:fig7}: ```{r fig7, fig.cap="\\label{fig:fig7}",fig.width=3.5,fig.height=3.5} Matrix::image(Sigma_samp[ord, ord]) ``` Once again, our strategy does not seem to be altered by the permutation of the columns of the original matrix $\boldsymbol{\Sigma}$. The Frobenius norm of the error is equal to `r round(norm(as.matrix(Sigma_samp-res_samp$Sigma_est), 'F'), 1)`. In this situation $\widehat{\boldsymbol{\Sigma}}^{-1/2}$ is still available. The matrix $\widehat{\boldsymbol{\Sigma}}^{-1/2}\boldsymbol{\Sigma}\widehat{\boldsymbol{\Sigma}}^{-1/2}$, which is displayed in Figure \ref{fig:fig8}, should be close to the identity matrix: ```{r fig8, fig.cap="\\label{fig:fig8}",fig.width=3.5,fig.height=3.5} Matrix::image(res_samp$S_inv_12 %*% Sigma_samp %*%res_samp$S_inv_12) ``` The associated Frobenius norm $||\widehat{\boldsymbol{\Sigma}}^{-1/2}\boldsymbol{\Sigma}\widehat{\boldsymbol{\Sigma}}^{-1/2}-\textrm{Id}_q||=$ `r round(norm(as.matrix(res_samp$S_inv_12%*%Sigma_samp%*%t(res_samp$S_inv_12)- diag(1,q))),1)`. All the values of the Frobenius norms are quite close meaning that our methodology is efficient even when the parameters are unknown and when the columns and rows have to be permuted in order to make the block structure appear. # References
/scratch/gouwar.j/cran-all/cranData/BlockCov/inst/doc/Vignettes.Rmd
--- title: "BlockCov package" author: "Marie Perrot-Dockès, Céline Lévy-Leduc" date: "`r Sys.Date()`" output: pdf_document vignette: > %\VignetteEngine{knitr::knitr} %\VignetteIndexEntry{BlockCov package} %VignetteEncoding{UTF-8} bibliography: REFERENCES.bib --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE, message=FALSE) library(BlockCov) set.seed(516) ``` # Installation ```{r, , eval =FALSE} devtools::install_github("Marie-PerrotDockes/BlockCov") ``` # Introduction This package implements the algorithm proposed by @blc. For further details we refer the reader to this paper. We shall consider the following framework. Let $\boldsymbol{E}_1, \boldsymbol{E}_2,\cdots,\boldsymbol{E}_n$, $n$ zero-mean i.i.d. $q$-dimensional random vectors having a covariance matrix $\boldsymbol{\Sigma}$ such that the number $q$ of its rows and columns is much larger than $n$. The goal of the package is to propose a new estimator of $\boldsymbol{\Sigma}$ and of the square root of its inverse in the particular case where $\boldsymbol{\Sigma}$ is assumed to have a block structure without limiting ourselves to diagonal blocks. More precisely, in this paper, we shall assume that $$ \boldsymbol{\Sigma}=\boldsymbol{Z}\;\boldsymbol{Z}'+\boldsymbol{D}, $$ where $\boldsymbol{Z}$ is a $q \times k$ sparse matrix with $k\ll q$, $\boldsymbol{Z}'$ denotes the transpose of the matrix $\boldsymbol{Z}$ and $\boldsymbol{D}$ is a diagonal matrix such that the diagonal terms of $\boldsymbol{\Sigma}$ are equal to one. Our approach consists in providing a low rank matrix approximation of the $\boldsymbol{Z}\;\boldsymbol{Z}'$ part of $\boldsymbol{\Sigma}$ and then in using a $\ell_1$ regularization in order to obtain a sparse estimator of $\boldsymbol{\Sigma}$. More precisely, since $\boldsymbol{\Sigma}$ is a correlation matrix, it is a symmetric matrix with ones on its diagonal, thus all the information is contained in its upper triangular part without its diagonal. If we know $\boldsymbol{P}$ the $(q-1)\times (q-1)$ symmetric matrix, which has for upper triangular part the upper triangular part of $\boldsymbol{\Sigma}$ without its diagonal, we know $\boldsymbol{\Sigma}$. The matrix $\boldsymbol{P}$ has the advantage to have a low rank. In the following, we propose to first estimate the block matrix $\boldsymbol{P}$. We shall moreover propose a methodology to estimate $\boldsymbol{\Sigma}$ in the case where the block structure is latent that is when the columns and rows of $\boldsymbol{\Sigma}$ have to be permuted according to an unknown permutation in order to make the block structure appear. In this case, a hierarchical clustering step has to be applied beforehand. # Simulation of $\boldsymbol{\Sigma}$ having a block structure In order to generate a matrix $\boldsymbol{\Sigma}$ having a block structure with extra-diagonal blocks and $q=100$, we can use the function `|Simu_Sigma|` as follows: ```{r} q <- 100 Sigma <- Simu_Sigma(q = q, diag = FALSE, equal = TRUE) ``` The matrix $\boldsymbol{\Sigma}$ is displayed in Figure \ref{fig:fig0}. ```{r fig0, fig.cap="\\label{fig:fig0}",fig.width=3.5,fig.height=3.5,echo=FALSE} Matrix::image(Sigma) ``` Using the matrix $\boldsymbol{\Sigma}$ generated by the function `|Simu_Sigma|` a $n\times q$ matrix $\boldsymbol{E}$ was generated such that its rows are independent zero-mean Gaussian random vectors having a covariance matrix equal to $\boldsymbol{\Sigma}$ and $n=30$. ```{r} n <- 30 E <- matrix(rnorm(n * q), ncol = q) %*% chol(as.matrix(Sigma)) ``` # Estimation of $\boldsymbol{\Sigma}$ (without estimating and $\boldsymbol{\Sigma}^{-1/2}$) ## Estimation of $\boldsymbol{\Sigma}$ when the parameters are known In order to get an estimator of $\boldsymbol{\Sigma}$ the function `|Sigma_estimation|` was applied. Since the data set was simulated, the rank of $\boldsymbol{P}$, the sub-matrix of $\boldsymbol{\Sigma}$, and its number of non null values are known. ```{r} k <- 5 nb_nn0 <- sum(Sigma[upper.tri(Sigma, diag = FALSE)] != 0) res_known <- Sigma_estimation(E, k = k, nb_nn0 = nb_nn0) ``` Our estimator $$\widehat{\boldsymbol{\Sigma}}$$ of $\boldsymbol{\Sigma}$ is given by `|res_known$Sigma_est|`. It is displayed in Figure \ref{fig:fig1} and is obtained by using: ```{r fig1, fig.cap="\\label{fig:fig1}",fig.width=3.5,fig.height=3.5} Matrix::image(res_known$Sigma_est) ``` The Frobenius norm $\|\boldsymbol{\Sigma}-\widehat{\boldsymbol{\Sigma}}\|$ is equal to `r round(norm(as.matrix(Sigma-res_known$Sigma_est), 'F'), 1)`. For comparison purpose, the sample correlation matrix is displayed in Figure \ref{fig:fig2}. ```{r fig2, fig.cap="\\label{fig:fig2}",fig.width=3.5,fig.height=3.5} Matrix::image(Matrix::Matrix(cor(E))) ``` The Frobenius norm $\|\boldsymbol{\Sigma}-\widehat{\boldsymbol{\Sigma}}_{\textrm{emp}}\|$ is equal to `r round(norm(as.matrix(Sigma-cor(E)), 'F'), 1)`, where $\widehat{\boldsymbol{\Sigma}}_{\textrm{emp}}$ denotes the sample correlation matrix. ## Estimation of $\boldsymbol{\Sigma}$ when the parameters are unknown In practice, the number of non null values and the rank of $\boldsymbol{P}$ are unknown. These parameters can be both estimated using the function `|Sigma_estimation|`. For choosing the rank of $\boldsymbol{P}$, two strategies are available in the package and compared in @blc: * The first strategy is the \textsf{Cattell} criterion based on the Cattell's scree plot described in @cattell1966. In the package, it can be used by setting \verb|method_k = "Cattell"| in the \verb|Sigma_estimation| function. * The second strategy is the \textsf{PA} permutation method proposed by @Horn1965. In the package, it can be used by setting \verb|method_k = "PA"| in the \verb|Sigma_estimation| function. To choose the number of non null values two methodologies are also available in the package and compared in @blc: * The \textsf{Elbow} method described in @blc. In the package, it can be used by setting \verb|method_0 = "Elbow"| in \verb|Sigma_estimation| function and * the \textsf{BL} approach proposed in @bickel2008 based on cross-validation. In the package, it can be used by setting \verb|method_0 = "BL"| in the \verb|Sigma_estimation| function. For example, an estimator of $\boldsymbol{\Sigma}$ using the \textsf{Cattell} criterion and the \textsf{Elbow} method can be obtained by using the `|Sigma_estimation|` function as follows: ```{r,warning=FALSE} res <-Sigma_estimation(E, method_k = "Cattell", method_0 = "Elbow") ``` It has to be noticed that "Cattell" and "Elbow" are the default value for `|method_k|` and `|method_0|` respectively. Hence, the same result can be obtain by using : ```{r, eval = FALSE} res <-Sigma_estimation(E) ``` The corresponding estimator of $\boldsymbol{\Sigma}$ is displayed in Figure \ref{fig:fig3}. The estimated rank and the estimated number of non null values can be obtained by `|res$k|` and `|res$nb_nn0|`, respectively. Here, the estimated rank is equal to `r res$k` and the estimated number of non null values is `r res$nb_nn0`. Note that the true values of these parameters are `r res_known$k` and `r res_known$nb_nn0`. An estimator of $\boldsymbol{\Sigma}$ using the \textsf{PA} criterion and the \textsf{BL} method can be obtained by using the \verb|Sigma_estimation| function as follows: ```{r} res_pabl <- Sigma_estimation(E, method_k = "PA", method_0 = "BL") ``` The corresponding estimator of $\boldsymbol{\Sigma}$ is displayed in Figure \ref{fig:fig3pabl}. Here, the estimated rank is equal to `r res_pabl$k` and the estimated number of non null values is `r res_pabl$nb_nn0`. This second approach is a little bit slower than the first one especially for large values of $q$ but can be more accurate when the number of samples is large enough, see @blc for further details on the numerical and statistical performance of the different strategies. ```{r fig3, fig.cap="\\label{fig:fig3}",fig.width=3.5,fig.height=3.5} Matrix::image(res$Sigma_est) ``` ```{r fig3pabl, fig.cap="\\label{fig:fig3pabl}",fig.width=3.5,fig.height=3.5} Matrix::image(res_pabl$Sigma_est) ``` We can see from this figure that the estimation of $\boldsymbol{\Sigma}$ does not seem to be altered by having to estimate the number of non null values and the rank of the matrix. The Frobenius norm $\|\boldsymbol{\Sigma}-\widehat{\boldsymbol{\Sigma}}\|$ is equal to `r round(norm(as.matrix(Sigma-res$Sigma_est), 'F'), 1)` for the first estimator and to `r round(norm(as.matrix(Sigma-res_pabl$Sigma_est), 'F'), 1)` for the second one. # Estimator of $\boldsymbol{\Sigma}^{-1/2}$ obtained from an estimator of $\boldsymbol{\Sigma}$ An estimator of $\boldsymbol{\Sigma}^{-1/2}$ can be obtained using the \verb|Sigma_estimation| function by setting the arguments \verb|res$S_inv_12| to true as follows: ```{r} res_both <- Sigma_estimation(E, method_k = "Cattell", method_0 = "Elbow", inv_12 = TRUE) ``` An estimator of $\boldsymbol{\Sigma}$ is obtained with \verb|res_both$Sigma_est| and an estimator of $\boldsymbol{\Sigma}^{-1/2}$ is obtained with \verb|res_both$S_inv_12|. It can be used to remove the dependence that may exist between the columns of $\boldsymbol{E}$. # Estimation of $\boldsymbol{\Sigma}$ and $\boldsymbol{\Sigma}^{-1/2}$ when the block structure is latent In practice, it is possible that the block structure of $\boldsymbol{\Sigma}$ only appears after having permuted its rows and columns according to a well chosen permutation. We explain hereafter how to estimate $\boldsymbol{\Sigma}$ and $\boldsymbol{\Sigma}^{-1/2}$ in this case. We first generate such a matrix by applying a random permutation to the rows and columns of the matrix $\boldsymbol{\Sigma}$ previously generated. ```{r} samp <- sample(1:q, q, replace = FALSE) Sigma_samp <- Sigma[samp, samp] ``` The corresponding matrix is displayed in Figure \ref{fig:fig4}. ```{r fig4, fig.cap="\\label{fig:fig4}",fig.width=3.5,fig.height=3.5} Matrix::image(Sigma_samp) ``` In such a situation where the columns and rows have to be permuted according to an unknown permutation, we propose to use a hierarchical clustering as the first step of our methodology and then use the same strategy. This is performed by putting \verb|reorder=TRUE| in the arguments of the function \verb|Sigma_estimation|. ```{r} E <- matrix(rnorm(n * q), ncol = q) %*% chol(as.matrix(Sigma_samp)) res_samp <- Sigma_estimation(E, reorder = TRUE, inv_12 = TRUE) ``` The estimated matrix is displayed in Figure \ref{fig:fig5}. ```{r fig5, fig.cap="\\label{fig:fig5}",fig.width=3.5,fig.height=3.5} Matrix::image(res_samp$Sigma_est) ``` The permutation to make the block structure appear is available from \verb|res_samp$order|. The corresponding estimated correlation matrix in which the columns have been permuted in order to make the block structure appear is obtained using the following lines and is displayed in Figure \ref{fig:fig6}: ```{r fig6, fig.cap="\\label{fig:fig6}",fig.width=3.5,fig.height=3.5} ord <- res_samp$order Matrix::image(res_samp$Sigma_est[ord, ord]) ``` This matrix has to be compared with the following one displayed in Figure \ref{fig:fig7}: ```{r fig7, fig.cap="\\label{fig:fig7}",fig.width=3.5,fig.height=3.5} Matrix::image(Sigma_samp[ord, ord]) ``` Once again, our strategy does not seem to be altered by the permutation of the columns of the original matrix $\boldsymbol{\Sigma}$. The Frobenius norm of the error is equal to `r round(norm(as.matrix(Sigma_samp-res_samp$Sigma_est), 'F'), 1)`. In this situation $\widehat{\boldsymbol{\Sigma}}^{-1/2}$ is still available. The matrix $\widehat{\boldsymbol{\Sigma}}^{-1/2}\boldsymbol{\Sigma}\widehat{\boldsymbol{\Sigma}}^{-1/2}$, which is displayed in Figure \ref{fig:fig8}, should be close to the identity matrix: ```{r fig8, fig.cap="\\label{fig:fig8}",fig.width=3.5,fig.height=3.5} Matrix::image(res_samp$S_inv_12 %*% Sigma_samp %*%res_samp$S_inv_12) ``` The associated Frobenius norm $||\widehat{\boldsymbol{\Sigma}}^{-1/2}\boldsymbol{\Sigma}\widehat{\boldsymbol{\Sigma}}^{-1/2}-\textrm{Id}_q||=$ `r round(norm(as.matrix(res_samp$S_inv_12%*%Sigma_samp%*%t(res_samp$S_inv_12)- diag(1,q))),1)`. All the values of the Frobenius norms are quite close meaning that our methodology is efficient even when the parameters are unknown and when the columns and rows have to be permuted in order to make the block structure appear. # References
/scratch/gouwar.j/cran-all/cranData/BlockCov/vignettes/Vignettes.Rmd
#' Variable selection method with multiple block-wise imputation (MBI) #' #' Fit a variable selection method with multiple block-wise imputation (MBI). #' #' The function uses the penalized generalized method of moments with multiple block-wise imputation to handle block-wise missing data, commonly found in multi-source datasets. #' #' @import MASS #' @import glmnet #' @import pryr #' @import doParallel #' @import foreach #' @import glmnetcr #' @import Matrix #' @importFrom stats sd var lm coef rnorm #' @param X Design matrix for block-wise missing covariates. #' @param y Response vector. #' @param cov_index Starting indexes of covariates in data sources. #' @param sub_index Starting indexes of subjects in missing groups. #' @param miss_source Indexes of missing data sources in missing groups, respectively ('NULL' represents no missing). #' @param complete Logical indicator of whether there is a group of complete cases. If there is a group of complete cases, #' it should be the first group. 'TRUE' represents that there is a group of complete cases. #' @param lambda A user supplied sequence of tuning parameter in penalty. If NULL, a sequence is automatically generated. #' @param eps1 Convergence threshold at certain stage of the algorithm. Default is 1e-3. #' @param eps2 Convergence threshold at certain stage of the algorithm. Default is 1e-7. #' @param eps3 Convergence threshold at certain stage of the algorithm. Default is 1e-8. #' @param max.iter The maximum number of iterations allowed. Default is 1000. #' @param lambda.min Smallest value for \code{lambda}, as a fraction of the maximum value in \code{lambda}. Default depends on the size of input. #' @param nlam The number of \code{lambda} values. Default is 100. #' @param beta0 Initial value for regression coefficients. If NULL, they are initialized automatically. #' @param a Tuning parameter in the SCAD penalty. Default is 3.7. #' @param gamma.ebic Parameter in the EBIC criterion. Default is 0.5. #' @param alpha1 A sequence of candidate values for the step size in the conjugate gradient algorithm. Default is 0.5^(0:12). #' @param h1 A sequence of candidate values for the parameter in the numerical calculation of the first derivative of the objective function. Default is 2^(-(8:30)). #' @param ratio Parameter in the numerical calculation of the first derivative. Default is 1. #' #' @return \item{beta}{Estimated coefficients matrix with \code{length(lambda)} rows and \code{dim(X)[2]} columns.} #' \item{lambda}{The actual sequence of \code{lambda} values used.} #' \item{bic1}{BIC criterion values. '0' should be ignored.} #' \item{notcon}{Value indicating whether the algorithm is converged or not. '0' represents convergence; otherwise non-convergence.} #' \item{intercept}{Intercept sequence of length \code{length(lambda)}.} #' \item{beta0}{Estimated coefficients matrix for standardized \code{X}} #' @author Fei Xue and Annie Qu #' @references Xue, F., and Qu, A. (2021) #' \emph{Integrating Multisource Block-Wise Missing Data in Model Selection (2021), Journal of the American Statistical Association, Vol. 116(536), 1914-1927}. #' @examples #' #' library(MASS) #' #' # Number of subjects #' n <- 30 #' #' # Number of total covariates #' p <- 4 #' #' # Number of missing groups of subjects #' ngroup <- 2 #' #' # Number of data sources #' nsource <- 2 #' #' # Starting indexes of covariates in data sources #' cov_index=c(1, 3) #' #' # Starting indexes of subjects in missing groups #' sub_index=c(1, 16) #' #' # Indexes of missing data sources in missing groups, respectively ('NULL' represents no missing) #' miss_source=list(NULL, 1) #' #' # Indicator of whether there is a group of complete cases. If there is a group of complete cases, #' # it should be the first group. #' complete=TRUE #' #' # Create a block-wise missing design matrix X and response vector y #' set.seed(1) #' sigma=diag(1-0.4,p,p)+matrix(0.4,p,p) #' X <- mvrnorm(n,rep(0,p),sigma) #' beta_true <- c(2.5, 0, 3, 0) #' y <- rnorm(n) + X%*%beta_true #' #' for (i in 1:ngroup) { #' if (!is.null(miss_source[[i]])) { #' if (i==ngroup) { #' if (miss_source[[i]]==nsource) { #' X[sub_index[i]:n, cov_index[miss_source[[i]]]:p] = NA #' } else { #' X[sub_index[i]:n, cov_index[miss_source[[i]]]:(cov_index[miss_source[[i]]+1]-1)] = NA #' } #' } else { #' if (miss_source[[i]]==nsource) { #' X[sub_index[i]:(sub_index[i+1]-1), cov_index[miss_source[[i]]]:p] = NA #' } else { #' X[sub_index[i]:(sub_index[i+1]-1), cov_index[miss_source[[i]]]: #' (cov_index[miss_source[[i]]+1]-1)] = NA #' } #' } #' } #' } #' #' # Now we can use the function with this simulated data #' #start.time = proc.time() #' result <- MBI(X=X, y=y, cov_index=cov_index, sub_index=sub_index, miss_source=miss_source, #' complete=complete, nlam = 15, eps2 = 1e-3, h1=2^(-(8:20))) #' #time = proc.time() - start.time #' #' theta=result$beta #' bic1=result$bic1 #' best=which.min(bic1[bic1!=0]) #' beta_est=theta[best,] #' #' #' @export MBI <- function (X, y, cov_index, sub_index, miss_source, complete, lambda=NULL, eps1 = 1e-3, eps2 = 1e-7, eps3=1e-8, max.iter = 1000, lambda.min=ifelse(n>p,.001,.05), nlam=100, beta0=NULL, a=3.7, gamma.ebic=0.5, alpha1=0.5^(0:12), h1=2^(-(8:30)), ratio=1) { dims=dim(X) n=dims[1] p=dims[2] nsource=length(cov_index) C0=NULL #m0=length(index0) #Number of original patterns #nm=dim(missid)[1] #Number of missing sources n_pat=length(sub_index) #Number of missing groups #length(unique(pat)) #Number of patterns # Indexes of covariates in each source cov_source=vector("list", nsource) for (i in 1:nsource) { if (i==nsource) { cov_source[[i]]=cov_index[i]:p } else { cov_source[[i]]=cov_index[i]:(cov_index[i+1]-1) } } # Indexes of missing covariates in each missing group miss_cov=vector("list", n_pat) # Indexes of observed covariates in each missing group obs_cov=vector("list", n_pat) for (i in 1:n_pat) { if (!is.null(miss_source[[i]])) { for (l in 1:length(miss_source[[i]])) { miss_cov[[i]] = c(miss_cov[[i]], cov_source[[miss_source[[i]][l]]]) } obs_cov[[i]]=(1:p)[!(1:p) %in% miss_cov[[i]]] } else { obs_cov[[i]]=1:p } } if (complete==TRUE & !is.null(miss_cov[[1]])) { stop("Complete case group should be the first group.") } # Useful groups for imputation of each missing group useful_group=vector("list", n_pat) # Number of total imputations m=0 for (i in 1:n_pat) { if (!is.null(miss_source[[i]])) { for (j in 1:n_pat) { miss_obs=intersect(miss_cov[[i]], obs_cov[[j]]) obs_obs=intersect(obs_cov[[i]], obs_cov[[j]]) if (j!=i & length(miss_obs)==length(miss_cov[[i]]) & length(obs_obs)>0) { useful_group[[i]]=c(useful_group[[i]], j) m=m+1 } } } else { m=m+1 useful_group[[i]]=i } } ##################################### Standardization ################################# X2=X centerx=rep(0,p) scalex=rep(1,p) centery=mean(y) y2=y-centery for (j in 1:p) { centerx[j]=mean(X[,j], na.rm=TRUE) scalex[j]=sd(X[,j], na.rm = TRUE) X2[,j]=(X[,j]-centerx[j])/scalex[j] } X3=array(0,dim=c(n,p,m)) XX=array(0,dim=c(n,p,m)) y3=matrix(0,n,m) index1=matrix(FALSE,n,m) # use which sample index2=matrix(TRUE,p,m) # take which derivative in estimating equations pat=rep(0,m) PCA_m=rep(2,m) index0=NULL miss2=is.na(X2) pat_count=1 index2_count=1 for (i in 1:n_pat) { pat[pat_count:(pat_count+length(useful_group[[i]])-1)]=i y3[,pat_count:(pat_count+length(useful_group[[i]])-1)]=y2 if (i<n_pat) { index1[sub_index[i]:(sub_index[i+1]-1), pat_count:(pat_count+length(useful_group[[i]])-1)]=TRUE X3[sub_index[i]:(sub_index[i+1]-1),,pat_count:(pat_count+length(useful_group[[i]])-1)]=X2[sub_index[i]:(sub_index[i+1]-1),] XX[sub_index[i]:(sub_index[i+1]-1),,pat_count:(pat_count+length(useful_group[[i]])-1)]=X2[sub_index[i]:(sub_index[i+1]-1),] } else { index1[sub_index[i]:n, pat_count:(pat_count+length(useful_group[[i]])-1)]=TRUE X3[sub_index[i]:n,,pat_count:(pat_count+length(useful_group[[i]])-1)]=X2[sub_index[i]:n,] XX[sub_index[i]:n,,pat_count:(pat_count+length(useful_group[[i]])-1)]=X2[sub_index[i]:n,] } for (j in 1:length(useful_group[[i]])) { index2[union(miss_cov[[i]], miss_cov[[useful_group[[i]][j]]]), index2_count]=FALSE if (j==1) { PCA_m[index2_count]=1 } if (useful_group[[i]][j]==1) { index0=c(index0, index2_count) } miss_tmp=is.na(X3[which.max(index1[,index2_count]),,index2_count]) if (sum(miss_tmp)>0) { ind_y=(1:p)[miss_tmp] ind_tmp=intersect(obs_cov[[i]], obs_cov[[useful_group[[i]][j]]]) XX[index1[,index2_count]==TRUE,ind_y,index2_count]=imputeglm.predict(X=X2, ind_y=ind_y, ind_x = ind_tmp, miss=miss2, newdata = X3[index1[,index2_count]==TRUE,ind_tmp,index2_count])$PRED } index2_count=index2_count+1 } pat_count=pat_count+length(useful_group[[i]]) } if (!complete) { index0=0 } yy=y3 ##### Next: Define initial values vary=rep(1,m) for (i in 1:m) { vary[i]=var(yy[index1[,i],i]) } # if (is.null(beta0)) { # if (!is.null(lambda)) { # if (lambda==0) { # if (sum(index1[,1])>p) { # beta0=lm(yy[index1[,1],1]~XX[index1[,1],,1])$coef[-1] # } else { # cvfit <- cv.glmnet(XX[index1[,1],,1], yy[index1[,1],1], nfolds=3) # fit <- cvfit$glmnet.fit # beta0=t(as.numeric(coef(fit, s=cvfit$lambda.min))[-1]) # } # } else { # model_0=MBI(X=X, y=y, cov_index=cov_index, sub_index=sub_index, miss_source=miss_source, complete=complete, lambda=0, eps1 = eps1, eps2 = eps2, eps3=eps3, max.iter = max.iter, lambda.min=lambda.min, nlam=nlam, beta0=beta0, C0=C0, a=a, gamma.ebic=gamma.ebic, alpha1=alpha1, h1=h1, ratio = ratio) # beta0=model_0$beta0 # } # } else { # model_0=MBI(X=X, y=y, cov_index=cov_index, sub_index=sub_index, miss_source=miss_source, complete=complete, lambda=0, eps1 = eps1, eps2 = eps2, eps3=eps3, max.iter = max.iter, lambda.min=lambda.min, nlam=nlam, beta0=beta0, C0=C0, a=a, gamma.ebic=gamma.ebic, alpha1=alpha1, h1=h1, ratio = ratio) # beta0=model_0$beta0 # } # } if (is.null(beta0)) { if (!is.null(lambda)) { if (lambda==0) { if (sum(index1[,1])>p & sum(index0==0)==0) { ##### Should we change more? beta0=lm(yy[index1[,1],1]~XX[index1[,1],,1])$coef[-1] } else if (sum(index1[,1])<=p & sum(index0==0)==0) { cvfit <- cv.glmnet(XX[index1[,1],,1], yy[index1[,1],1], nfolds=3) fit <- cvfit$glmnet.fit beta0=t(as.numeric(coef(fit, s=cvfit$lambda.min))[-1]) } else if (sum(index0==0)==1) { data_tmp=matrix(0,n,p) for (i in 1:m) { data_tmp=data_tmp+XX[,,i] } for (i in 1:n_pat) { index_tmp=index1[,min(which(pat==i))] data_tmp[index_tmp,]=data_tmp[index_tmp,]/sum(pat==i) } cvfit <- cv.glmnet(data_tmp, yy[,1], nfolds=3) fit <- cvfit$glmnet.fit beta0=t(as.numeric(coef(fit, s=cvfit$lambda.min))[-1]) # } } } else { model_0=MBI(X=X, y=y, cov_index=cov_index, sub_index=sub_index, miss_source=miss_source, complete=complete, lambda=0, eps1 = eps1, eps2 = eps2, eps3=eps3, max.iter = max.iter, lambda.min=lambda.min, nlam=nlam, beta0=beta0, a=a, gamma.ebic=gamma.ebic, alpha1=alpha1, h1=h1, ratio = ratio) beta0=model_0$beta0 } } else { model_0=MBI(X=X, y=y, cov_index=cov_index, sub_index=sub_index, miss_source=miss_source, complete=complete, lambda=0, eps1 = eps1, eps2 = eps2, eps3=eps3, max.iter = max.iter, lambda.min=lambda.min, nlam=nlam, beta0=beta0, a=a, gamma.ebic=gamma.ebic, alpha1=alpha1, h1=h1, ratio = ratio) beta0=model_0$beta0 } } N=sum(index2) N0=sum(index2[,index0]) numequ=apply(index2,2,sum) nn=apply(index1,2,sum) nnn=sqrt(apply(index1,2,sum)) g=rep(0,N) # fg=matrix(0,N,p) fq=rep(0,p) # sq=matrix(0,p,p) # fC=array(0,dim=c(N,N,p)) PCA_ind=array(NA,dim=c(n_pat,2,N)) PCA_ind0=matrix(0,n_pat,2) ind_C=rep(1,n_pat+1) ind_pat=rep(1,n_pat+1) #### similar to pat_start, but has one more element U_ind=array(NA,dim=c(n_pat,2,N)) #U_ind0=matrix(0,n_pat,2) # T1=diag(1,N,N) # T2=matrix(0,N,N) # Z1=array(0,dim = c(n,N,p)) Z2=matrix(0,n,N) tempequ1=rep(0,m+1) tempequ1[1]=1 for (s in 1:m) { tempequ1[s+1]=tempequ1[s]+numequ[s] # fg[tempequ1[s]:(tempequ1[s+1]-1),]=t(XX[index1[,s],index2[,s],s])%*%(-XX[index1[,s],,s])/(vary[s]*nn[s]) # for (j in 1:p) { # Z1[,tempequ1[s]:(tempequ1[s+1]-1),j]=apply(XX[,index2[,s],s], 2, function(x) x*(-XX[,j,s]))/(vary[s]*nnn[s]) # } if(PCA_m[s]==1) { PCA_ind[pat[s],1,(PCA_ind0[pat[s],1]+1):(PCA_ind0[pat[s],1]+numequ[s])]=tempequ1[s]:(tempequ1[s+1]-1) PCA_ind0[pat[s],1]=PCA_ind0[pat[s],1]+numequ[s] } else if (PCA_m[s]==2) { PCA_ind[pat[s],2,(PCA_ind0[pat[s],2]+1):(PCA_ind0[pat[s],2]+numequ[s])]=tempequ1[s]:(tempequ1[s+1]-1) PCA_ind0[pat[s],2]=PCA_ind0[pat[s],2]+numequ[s] } } size1=matrix(1,N,N) #size2=array(1,dim=c(N,N,p)) for (s1 in 1:m) { for (s2 in 1:m) { n_tmp=sum(index1[,s1]*index1[,s2]) if (n_tmp*sum(abs(index1[,s1]-index1[,s2]))!=0) { size1[tempequ1[s1]:(tempequ1[s1+1]-1),tempequ1[s2]:(tempequ1[s2+1]-1)]=nnn[s1]*nnn[s2]/n_tmp #size2[tempequ1[s1]:(tempequ1[s1+1]-1),tempequ1[s2]:(tempequ1[s2+1]-1),]=nnn[s1]*nnn[s2]/n_tmp } } } res=matrix(0,n,m) resvar=rep(1,m) for (i in 1:n_pat) { ind_C[i+1]=ind_C[i]+sum(PCA_ind0[i,]) ind_pat[i+1]=ind_pat[i]+sum(index1[,min(which(pat==i))]) } if (is.null(lambda)) { #fg_temp=fg #Z1_temp=Z1 #size2_temp=size2 for (s in 1:m) { res[index1[,s],s]=yy[index1[,s],s]-XX[index1[,s],,s]%*%(rep(0.01,p)) # resvar[s]=var(res[index1[,s],s]) #### not divided by variance of residuals g[tempequ1[s]:(tempequ1[s+1]-1)]=t(XX[index1[,s],index2[,s],s])%*%(res[index1[,s],s])/(resvar[s]*nn[s]) Z2[,tempequ1[s]:(tempequ1[s+1]-1)]=apply(XX[,index2[,s],s], 2, function(x) x*res[,s])/(resvar[s]*nnn[s]) #fg_temp[tempequ1[s]:(tempequ1[s+1]-1),]=fg_temp[tempequ1[s]:(tempequ1[s+1]-1),]*vary[s]/resvar[s] #Z1_temp[,tempequ1[s]:(tempequ1[s+1]-1),]=Z1_temp[,tempequ1[s]:(tempequ1[s+1]-1),]*vary[s]/resvar[s] } if (is.null(C0)) { #start.time = proc.time() C=matrix(0,N,N) for (ii in 1:n_pat) { C[ind_C[ii]:(ind_C[ii+1]-1),ind_C[ii]:(ind_C[ii+1]-1)]=t(Z2[ind_pat[ii]:(ind_pat[ii+1]-1),ind_C[ii]:(ind_C[ii+1]-1), drop=FALSE])%*%Z2[ind_pat[ii]:(ind_pat[ii+1]-1),ind_C[ii]:(ind_C[ii+1]-1), drop=FALSE] } #time = proc.time() - start.time #C=t(Z2)%*%Z2 C=C*size1 # Q0=Qfun(C=C, g=g, PCA_ind=PCA_ind, PCA_ind0=PCA_ind0, N=N, n_pat=n_pat, eps3 = eps3, index1=index1, pat=pat, res=res, numequ=numequ, tempequ1=tempequ1) # q_star=Q0$q_star #start.time = proc.time() fq=Qfirstdev1(h1=h1, beta_pre=rep(0.01,p), XX=XX, yy=yy, index1=index1, index2=index2, numequ=numequ, tempequ1=tempequ1, PCA_ind=PCA_ind, PCA_ind0=PCA_ind0, n_pat=n_pat, eps3=eps3, pat=pat, ind_C=ind_C, ind_pat=ind_pat, ratio = ratio)$fq_star #print("Get first derivative 0") time=proc.time() #print(time) #time = proc.time() - start.time lambda.max=max(abs(fq))+0.5 lambda=exp(seq(log(lambda.min*lambda.max),log(lambda.max),len=nlam)) } # else { # fq=2*t(fg_temp)%*%solve(C0,g) # sq=2*t(fg_temp)%*%solve(C0,fg_temp) # # lambda.max=5*max(abs(fq)+abs(sq%*%rep(10,p))) # lambda=exp(seq(log(lambda.min*lambda.max),log(lambda.max),len=nlam)) # } } L=length(lambda) beta=matrix(0,L,p) beta_pre=beta0 res=matrix(0,n,m) qq=rep(0,L) biq=rep(0,L) aiq=rep(0,L) bic=rep(0,L) ebic1=rep(0,L) ebic2=rep(0,L) ebic11=rep(0,L) ebic21=rep(0,L) ebicM=rep(0,L) ebicM1=rep(0,L) rss=rep(0,L) bic1=rep(0,L) rss1=rep(0,L) beta_out=matrix(0,L,p) intercept=rep(0,L) objective=matrix(0,L,2) objective1=matrix(0,L,2) notcon=0 ind=rep(TRUE, p) p1=p # fC0=array(0,dim=c(N,N,p)) end=FALSE beta_pre2=beta_pre+0.05 # if ((sum(index1[,1])+sum(index1[,2]))>p) { # beta_pre2=lm(c(yy[index1[,1],1],yy[index1[,2],2])~rbind(XX[index1[,1],,1],XX[index1[,2],,2]))$coef[-1] # } else { # cvfit <- cv.glmnet(rbind(XX[index1[,1],,1],XX[index1[,2],,2]), c(yy[index1[,1],1],yy[index1[,2],2]), nfolds=3) # fit <- cvfit$glmnet.fit # beta_pre2=t(as.numeric(coef(fit, s=cvfit$lambda.min))[-1]) # } # fg_temp=fg # Z1_temp=Z1 # for (s in 1:m) { # res[index1[,s],s]=yy[index1[,s],s]-XX[index1[,s],,s]%*%(as.numeric(beta_pre2)) # resvar[s]=var(res[index1[,s],s]) # g[tempequ1[s]:(tempequ1[s+1]-1)]=t(XX[index1[,s],index2[,s],s])%*%(res[index1[,s],s])/(resvar[s]*nn[s]) # Z2[,tempequ1[s]:(tempequ1[s+1]-1)]=apply(XX[,index2[,s],s], 2, function(x) x*res[,s])/(resvar[s]*nnn[s]) # # fg_temp[tempequ1[s]:(tempequ1[s+1]-1),]=fg_temp[tempequ1[s]:(tempequ1[s+1]-1),]*vary[s]/resvar[s] # Z1_temp[,tempequ1[s]:(tempequ1[s+1]-1),]=Z1_temp[,tempequ1[s]:(tempequ1[s+1]-1),]*vary[s]/resvar[s] # # # g[tempequ1[s]:(tempequ1[s+1]-1)]=t(XX[index1[,s],index2[,s],s])%*%(yy[index1[,s],s])/(vary[s]*nn[s]) # # Z2[,tempequ1[s]:(tempequ1[s+1]-1)]=apply(XX[,index2[,s],s], 2, function(x) x*yy[,s])/(vary[s]*nnn[s]) # } # C=t(Z2)%*%Z2 # C=C*size1 # Q0=Qfun(C=C, g=g, PCA_ind=PCA_ind, PCA_ind0=PCA_ind0, N=N, n_pat=n_pat, eps3 = eps3) # q_star=Q0$q_star # print('Start') #### Add print time=proc.time() # print(time) for (l in 1:L){ reset=FALSE conjugate=FALSE alpha=alpha1 lalpha=length(alpha) ff=rep(0,lalpha) # if (l==9) { # print(l) # } if (l!=1) { beta_pre=beta[l-1,] } fq2=Qfirstdev1(h1=h1, beta_pre=beta_pre2, XX=XX, yy=yy, index1=index1, index2=index2, numequ=numequ, tempequ1=tempequ1, PCA_ind=PCA_ind, PCA_ind0=PCA_ind0, n_pat=n_pat, eps3=eps3, pat=pat, ind_C=ind_C, ind_pat=ind_pat, ratio = ratio)$fq_star # print("Get first derivative 1") time=proc.time() # print(time) pe2=rep(0,p) if (lambda[l]!=0) { for (j in 1:p) { u=abs(beta_pre2[j]) # if (l==9) { # print(u) # } if (u<=lambda[l]+1e-15) { if (u==0) { if (fq2[j]>lambda[l]) { pe2[j]=-lambda[l] } else if (fq2[j]<-lambda[l]) { pe2[j]=lambda[l] } else { pe2[j]=-fq2[j] } } else { pe2[j]=lambda[l] } } else if (u<=a*lambda[l]) { pe2[j]=(a*lambda[l]-u)/(a-1) } else { pe2[j]=0 } } } firstdev2=fq2+pe2 for (i in 1:max.iter) { if (lambda[l]!=0) { p1=sum(beta_pre!=0) ind=(beta_pre!=0) # fC=fC0[,,1:p1, drop=FALSE] } else { # fC=fC0 } #if (sum(apply(missid,1,function(x) sum(ind[x], na.rm = T))!=0)<nm) { if (sum(ind)==0) { end=TRUE break # print(beta[l,]) } #fg_temp=fg[,ind, drop=FALSE] #Z1_temp=Z1[,,ind, drop=FALSE] #size2_temp=size2[,,ind, drop=FALSE] for (s in 1:m) { res[index1[,s],s]=yy[index1[,s],s]-XX[index1[,s],,s]%*%(as.numeric(beta_pre)) # resvar[s]=var(res[index1[,s],s]) g[tempequ1[s]:(tempequ1[s+1]-1)]=t(XX[index1[,s],index2[,s],s])%*%(res[index1[,s],s])/(resvar[s]*nn[s]) Z2[,tempequ1[s]:(tempequ1[s+1]-1)]=apply(XX[,index2[,s],s], 2, function(x) x*res[,s])/(resvar[s]*nnn[s]) #fg_temp[tempequ1[s]:(tempequ1[s+1]-1),]=fg_temp[tempequ1[s]:(tempequ1[s+1]-1),]*vary[s]/resvar[s] #Z1_temp[,tempequ1[s]:(tempequ1[s+1]-1),]=Z1_temp[,tempequ1[s]:(tempequ1[s+1]-1),]*vary[s]/resvar[s] } if (is.null(C0)) { C=matrix(0,N,N) for (ii in 1:n_pat) { C[ind_C[ii]:(ind_C[ii+1]-1),ind_C[ii]:(ind_C[ii+1]-1)]=t(Z2[ind_pat[ii]:(ind_pat[ii+1]-1),ind_C[ii]:(ind_C[ii+1]-1), drop=FALSE])%*%Z2[ind_pat[ii]:(ind_pat[ii+1]-1),ind_C[ii]:(ind_C[ii+1]-1), drop=FALSE] } #C=t(Z2)%*%Z2 C=C*size1 # Q0=Qfun1(C=C, g=g, PCA_ind=PCA_ind, PCA_ind0=PCA_ind0, N=N, n_pat=n_pat, eps3 = eps3, index1=index1, pat=pat, res=res, numequ=numequ, tempequ1=tempequ1, ind_C=ind_C) # print("Get Q function value 1") # time=proc.time() # print(time) # q_star=Q0$q_star fq=Qfirstdev1(h1=h1, beta_pre=beta_pre, XX=XX, yy=yy, index1=index1, index2=index2, numequ=numequ, tempequ1=tempequ1, PCA_ind=PCA_ind, PCA_ind0=PCA_ind0, n_pat=n_pat, eps3=eps3, pat=pat, ind_C=ind_C, ind_pat=ind_pat, ratio = ratio)$fq_star # print("Get first derivative 2") time=proc.time() # print(time) } # else { # fq=2*t(fg_temp)%*%solve(C0,g) # sq=2*t(fg_temp)%*%solve(C0,fg_temp) # } pe=rep(0,p) if (lambda[l]!=0) { for (j in 1:p) { u=abs(beta_pre[j]) if (u<=lambda[l]+1e-15) { if (u==0) { if (fq[j]>lambda[l]) { pe[j]=-lambda[l] } else if (fq[j]<-lambda[l]) { pe[j]=lambda[l] } else { pe[j]=-fq[j] } } else { pe[j]=lambda[l] } } else if (u<=a*lambda[l]) { pe[j]=(a*lambda[l]-u)/(a-1) } else { pe[j]=0 } pe[j]=pe[j]*sign(beta_pre[j]) } #pe=diag(lambda[l]/abs(beta_pre[ind]),p1,p1) #p1: number of nonzero covariates } firstdev=fq+pe if (conjugate) { if (firstdev[ind]%*%conj2[ind]>=0) { reset=TRUE } } if (i==1 | reset) { reset=FALSE # print('before') ff = foreach (j = 1:lalpha, .combine = rbind, .packages = c("MASS", "Matrix")) %do% { beta_t0=beta_pre[ind]-alpha[j]*firstdev[ind] beta_t=beta_pre beta_t[ind]=beta_t0 for (s in 1:m) { res[index1[,s],s]=yy[index1[,s],s]-XX[index1[,s],,s]%*%(as.numeric(beta_t)) # resvar[s]=var(res[index1[,s],s]) g[tempequ1[s]:(tempequ1[s+1]-1)]=t(XX[index1[,s],index2[,s],s])%*%(res[index1[,s],s])/(resvar[s]*nn[s]) Z2[,tempequ1[s]:(tempequ1[s+1]-1)]=apply(XX[,index2[,s],s], 2, function(x) x*res[,s])/(resvar[s]*nnn[s]) #fg_temp[tempequ1[s]:(tempequ1[s+1]-1),]=fg_temp[tempequ1[s]:(tempequ1[s+1]-1),]*vary[s]/resvar[s] #Z1_temp[,tempequ1[s]:(tempequ1[s+1]-1),]=Z1_temp[,tempequ1[s]:(tempequ1[s+1]-1),]*vary[s]/resvar[s] } C=matrix(0,N,N) for (ii in 1:n_pat) { C[ind_C[ii]:(ind_C[ii+1]-1),ind_C[ii]:(ind_C[ii+1]-1)]=t(Z2[ind_pat[ii]:(ind_pat[ii+1]-1),ind_C[ii]:(ind_C[ii+1]-1), drop=FALSE])%*%Z2[ind_pat[ii]:(ind_pat[ii+1]-1),ind_C[ii]:(ind_C[ii+1]-1), drop=FALSE] } #C=t(Z2)%*%Z2 C=C*size1 pe_t=0 for (jj in 1:p) { u=abs(beta_t[jj]) if (u<=lambda[l]) { pe_t=pe_t+lambda[l]*u } else if (u<=a*lambda[l]) { pe_t=pe_t-(u^2-2*a*lambda[l]*u+lambda[l]^2)/(2*(a-1)) } else { pe_t=pe_t+(a+1)*lambda[l]^2/2 } } ff_temp=Qfun1(C=C, g=g, PCA_ind=PCA_ind, PCA_ind0=PCA_ind0, N=N, n_pat=n_pat, eps3 = eps3, index1=index1, pat=pat, res=res, numequ=numequ, tempequ1=tempequ1, ind_C=ind_C)$q_star+pe_t return(ff_temp) } # print('after') beta_tmp=beta_pre[ind]-alpha[which.min(ff)]*firstdev[ind] conj2=-firstdev } else { conjugate=TRUE threshold=5*sqrt(sum(beta_pre[ind]^2))/(max(alpha)*sqrt(sum(conj2[ind]^2))) if ((conj2[ind]%*%(firstdev2[ind]-firstdev[ind]))==0) { gamma=threshold } else { gamma=max(0,-(firstdev[ind]%*%(firstdev[ind]))/(conj2[ind]%*%(firstdev2[ind]-firstdev[ind]))) if (gamma>threshold) { gamma=threshold } } #gamma=max(0,(firstdev[ind]%*%(firstdev[ind]-firstdev2[ind]))/(firstdev2[ind]%*%firstdev2[ind])) #gamma=(firstdev%*%(firstdev))/(conj2%*%(firstdev-firstdev2)) # if (abs(gamma)>100) { # print(gamma) # } conj=-firstdev+gamma*conj2 ##### conjugate direction #gamma=as.numeric(((beta_pre-beta_pre2)%*%(firstdev-firstdev2))/(sum((firstdev-firstdev2)^2))) # print('before') ff = foreach (j = 1:lalpha, .combine = rbind, .packages = c("MASS", "Matrix")) %do% { beta_t0=beta_pre[ind]+alpha[j]*conj[ind] beta_t=beta_pre beta_t[ind]=beta_t0 #fg_temp=fg #Z1_temp=Z1 for (s in 1:m) { res[index1[,s],s]=yy[index1[,s],s]-XX[index1[,s],,s]%*%(as.numeric(beta_t)) # resvar[s]=var(res[index1[,s],s]) g[tempequ1[s]:(tempequ1[s+1]-1)]=t(XX[index1[,s],index2[,s],s])%*%(res[index1[,s],s])/(resvar[s]*nn[s]) Z2[,tempequ1[s]:(tempequ1[s+1]-1)]=apply(XX[,index2[,s],s], 2, function(x) x*res[,s])/(resvar[s]*nnn[s]) #fg_temp[tempequ1[s]:(tempequ1[s+1]-1),]=fg_temp[tempequ1[s]:(tempequ1[s+1]-1),]*vary[s]/resvar[s] #Z1_temp[,tempequ1[s]:(tempequ1[s+1]-1),]=Z1_temp[,tempequ1[s]:(tempequ1[s+1]-1),]*vary[s]/resvar[s] } C=matrix(0,N,N) for (ii in 1:n_pat) { C[ind_C[ii]:(ind_C[ii+1]-1),ind_C[ii]:(ind_C[ii+1]-1)]=t(Z2[ind_pat[ii]:(ind_pat[ii+1]-1),ind_C[ii]:(ind_C[ii+1]-1), drop=FALSE])%*%Z2[ind_pat[ii]:(ind_pat[ii+1]-1),ind_C[ii]:(ind_C[ii+1]-1), drop=FALSE] } #C=t(Z2)%*%Z2 C=C*size1 pe_t=0 for (jj in 1:p) { u=abs(beta_t[jj]) if (u<=lambda[l]) { pe_t=pe_t+lambda[l]*u } else if (u<=a*lambda[l]) { pe_t=pe_t-(u^2-2*a*lambda[l]*u+lambda[l]^2)/(2*(a-1)) } else { pe_t=pe_t+(a+1)*lambda[l]^2/2 } } ff_temp=Qfun1(C=C, g=g, PCA_ind=PCA_ind, PCA_ind0=PCA_ind0, N=N, n_pat=n_pat, eps3 = eps3, index1=index1, pat=pat, res=res, numequ=numequ, tempequ1=tempequ1, ind_C=ind_C)$q_star+pe_t return(ff_temp) } # print('after') # if (lambda[l]!=0) { # print(lambda[l]) # } # if (max(abs(alpha[which.min(ff)]*conj[ind]))>10) { # print(abs(alpha[which.min(ff)]*conj[ind])) # } # if (l==31 & i>50) { # print(alpha[which.min(ff)]) # } # print(ff[which.min(ff)]) beta_tmp=beta_pre[ind]+alpha[which.min(ff)]*conj[ind] conj2=conj } # print(ff[which.min(ff)]) # if (abs(beta_tmp[1])<eps1) { # print(i) # } if (lambda[l]!=0) { beta_tmp[abs(beta_tmp)<eps1]=0 if (sum(abs(beta_tmp)<eps1)>0) { reset=TRUE } } beta[l,ind]=beta_tmp firstdev2=firstdev #print(min(ff)) # print(i) # if (beta[l,1]<2.601 & beta[l,1]>2.6) { # if (l==31) { # print(beta[l,]) # } # print(q_star+lambda[l]*sum(abs(beta_pre)!=0)) # } # print(g) # print(resvar) # print(q_star) # print(count) # if (t(g)%*%invCg+lambda[l]*sum(abs(beta[l,]))<1.1) { # print(i) # } diff=max(abs(beta[l,]-beta_pre)) # diff0=beta[l,]-beta_pre # print(t(g)%*%invCg+t(fq)%*%diff0[ind]+0.5*diff0[ind]%*%sq%*%diff0[ind]+0.5*beta[l,ind]%*%pe%*%beta[l,ind]) #print(diff) if (i>11) { diff1=max(abs(beta[l,]-beta_pre2), abs(beta_pre-beta_pre3)) } if (i>10) { beta_pre3=beta_pre2 } beta_pre2=beta_pre beta_pre=beta[l,] if (diff<eps2) { break } else { if (i>11) { if (diff1<eps2) { alpha=alpha1/max(abs(conj2)) } } } # print(paste("lambda=", lambda[l], ", iteration=", i, ", diff=", diff)) #### Add print time=proc.time() # print(time) # print(mem_used()) } if (i==max.iter) { converge=FALSE notcon=notcon+1 # print(diff) # print(l) #break #test } else { converge=TRUE } if (end) { break } # print(l) for (s in 1:m) { res[index1[,s],s]=yy[index1[,s],s]-XX[index1[,s],,s]%*%(as.numeric(beta[l,])) # resvar[s]=var(res[index1[,s],s]) g[tempequ1[s]:(tempequ1[s+1]-1)]=t(XX[index1[,s],index2[,s],s])%*%(res[index1[,s],s])/(resvar[s]*nn[s]) Z2[,tempequ1[s]:(tempequ1[s+1]-1)]=apply(XX[,index2[,s],s], 2, function(x) x*res[,s])/(resvar[s]*nnn[s]) } if (is.null(C0)) { C=matrix(0,N,N) for (ii in 1:n_pat) { C[ind_C[ii]:(ind_C[ii+1]-1),ind_C[ii]:(ind_C[ii+1]-1)]=t(Z2[ind_pat[ii]:(ind_pat[ii+1]-1),ind_C[ii]:(ind_C[ii+1]-1), drop=FALSE])%*%Z2[ind_pat[ii]:(ind_pat[ii+1]-1),ind_C[ii]:(ind_C[ii+1]-1), drop=FALSE] } #C=t(Z2)%*%Z2 C=C*size1 Q0=Qfun1(C=C, g=g, PCA_ind=PCA_ind, PCA_ind0=PCA_ind0, N=N, n_pat=n_pat, eps3 = eps3, index1=index1, pat=pat, res=res, numequ=numequ, tempequ1=tempequ1, ind_C=ind_C) # print("Get Q function value 4") time=proc.time() # print(time) TT=Q0$T2[1:(Q0$count-1),, drop=FALSE]%*%Q0$T1 g_star=TT%*%g C_star=TT%*%C%*%t(TT) # if(!class(try(solve(C_star),silent=T))=='matrix') { # print(eigen(C_star)$values) # print(C) # print(g) # print(l) # print(resvar) # } invCg_star=solve(C_star,g_star) } else { invCg=solve(C0,g) } qq[l]=t(g_star)%*%invCg_star rss[l]=sum(res[,index0]^2) for (k1 in 1:n_pat) { rss1[l]=rss1[l]+sum(res[,pat==k1]^2)/sum(pat==k1) } biq[l]=qq[l]+sum(beta[l,]!=0)*log(n) aiq[l]=qq[l]+sum(beta[l,]!=0)*2 bic[l]=n*log(rss[l])+sum(beta[l,]!=0)*log(n) bic1[l]=n*log(rss1[l])+sum(beta[l,]!=0)*log(n) ebic1[l]=n*log(rss[l])+(log(n)+2*gamma.ebic*log(p))*sum(beta[l,]!=0) ebic2[l]=n*log(rss[l])+log(n)*sum(beta[l,]!=0)+2*gamma.ebic*log(choose(p,sum(beta[l,]!=0))) ebic11[l]=n*log(rss1[l])+(log(n)+2*gamma.ebic*log(p))*sum(beta[l,]!=0) ebic21[l]=n*log(rss1[l])+log(n)*sum(beta[l,]!=0)+2*gamma.ebic*log(choose(p,sum(beta[l,]!=0))) ebicM[l]=n*log(rss[l])+(log(n)+2*log(p))*sum(beta[l,]!=0) ebicM1[l]=n*log(rss1[l])+(log(n)+2*log(p))*sum(beta[l,]!=0) objective[l,1]=n*log(rss[l]) objective[l,2]=log(n)*sum(beta[l,]!=0) objective1[l,1]=n*log(rss1[l]) ### newly added objective1[l,2]=log(n)*sum(beta[l,]!=0) beta_out[l,]=beta[l,]/scalex intercept[l] = centery-crossprod(beta_out[l,],as.numeric(centerx)) } returnlist=list("beta"=beta_out, 'bic1'=bic1, "lambda"=lambda, "notcon"=notcon, "intercept"=intercept, "beta0"=beta) return(returnlist) } Qfirstdev1 <- function (h1=2^(-(8:30)), beta_pre, XX, yy, index1, index2, numequ, tempequ1, PCA_ind, PCA_ind0, n_pat, eps3, pat, ind_C, ind_pat, ratio) { dims=dim(XX) n=dims[1] p=dims[2] m=dims[3] res=matrix(0,n,m) res1=matrix(0,n,m) res2=matrix(0,n,m) resvar=rep(1,m) resvar1=rep(1,m) resvar2=rep(1,m) N=sum(index2) Z2=matrix(0,n,N) Z21=matrix(0,n,N) Z22=matrix(0,n,N) g=rep(0,N) g1=rep(0,N) g2=rep(0,N) nn=apply(index1,2,sum) nnn=sqrt(apply(index1,2,sum)) for (s in 1:m) { res[index1[,s],s]=yy[index1[,s],s]-XX[index1[,s],,s]%*%(as.numeric(beta_pre)) # resvar1[s]=var(res1[index1[,s],s]) g[tempequ1[s]:(tempequ1[s+1]-1)]=t(XX[index1[,s],index2[,s],s])%*%(res[index1[,s],s])/(resvar[s]*nn[s]) Z2[,tempequ1[s]:(tempequ1[s+1]-1)]=apply(XX[,index2[,s],s], 2, function(x) x*res[,s])/(resvar[s]*nnn[s]) } C=matrix(0,N,N) for (ii in 1:n_pat) { C[ind_C[ii]:(ind_C[ii+1]-1),ind_C[ii]:(ind_C[ii+1]-1)]=t(Z2[ind_pat[ii]:(ind_pat[ii+1]-1),ind_C[ii]:(ind_C[ii+1]-1), drop=FALSE])%*%Z2[ind_pat[ii]:(ind_pat[ii+1]-1),ind_C[ii]:(ind_C[ii+1]-1), drop=FALSE] } #C=t(Z2)%*%Z2 maxeigen=base::norm((C+t(C))/2, type = '2') #print(maxeigen) qfun=Qfun1(C=C, g=g, PCA_ind=PCA_ind, PCA_ind0=PCA_ind0, N=N, n_pat=n_pat, eps3 = eps3, index1=index1, pat=pat, res=res, numequ=numequ, tempequ1=tempequ1, maxeigen=maxeigen, ind_C=ind_C) tempT=qfun$tempT C_star=qfun$C_star S_C_star=solve(C_star) #fq_star=rep(0,p) # start.time = proc.time() j1=NA result = foreach (j1 = 1:p, .combine = rbind, .packages = c("MASS", "Matrix")) %do% { h=h1 if (abs(beta_pre[j1])<1e-2 & abs(beta_pre[j1])>0) { h=h1*max(1e-2, abs(beta_pre[j1])) } temp_fq=0 temp_sq=0 temp_diff1=Inf temp_diff2=Inf for (hh in 1:length(h)) { # start.time = proc.time() beta_temp1=beta_pre beta_temp2=beta_pre beta_temp1[j1]=beta_pre[j1]+h[hh] beta_temp2[j1]=beta_pre[j1]-h[hh] for (s in 1:m) { res1[index1[,s],s]=yy[index1[,s],s]-XX[index1[,s],,s]%*%(as.numeric(beta_temp1)) # resvar1[s]=var(res1[index1[,s],s]) g1[tempequ1[s]:(tempequ1[s+1]-1)]=t(XX[index1[,s],index2[,s],s])%*%(res1[index1[,s],s])/(resvar1[s]*nn[s]) # Z21[,tempequ1[s]:(tempequ1[s+1]-1)]=apply(XX[,index2[,s],s], 2, function(x) x*res1[,s])/(resvar1[s]*nnn[s]) res2[index1[,s],s]=yy[index1[,s],s]-XX[index1[,s],,s]%*%(as.numeric(beta_temp2)) # resvar2[s]=var(res2[index1[,s],s]) g2[tempequ1[s]:(tempequ1[s+1]-1)]=t(XX[index1[,s],index2[,s],s])%*%(res2[index1[,s],s])/(resvar2[s]*nn[s]) # Z22[,tempequ1[s]:(tempequ1[s+1]-1)]=apply(XX[,index2[,s],s], 2, function(x) x*res2[,s])/(resvar2[s]*nnn[s]) } # C1=matrix(0,N,N) # C2=matrix(0,N,N) # for (i in 1:n_pat) { # C1[ind_C[i]:(ind_C[i+1]-1),ind_C[i]:(ind_C[i+1]-1)]=t(Z21[ind_pat[i]:(ind_pat[i+1]-1),ind_C[i]:(ind_C[i+1]-1), drop=FALSE])%*%Z21[ind_pat[i]:(ind_pat[i+1]-1),ind_C[i]:(ind_C[i+1]-1), drop=FALSE] # C2[ind_C[i]:(ind_C[i+1]-1),ind_C[i]:(ind_C[i+1]-1)]=t(Z22[ind_pat[i]:(ind_pat[i+1]-1),ind_C[i]:(ind_C[i+1]-1), drop=FALSE])%*%Z22[ind_pat[i]:(ind_pat[i+1]-1),ind_C[i]:(ind_C[i+1]-1), drop=FALSE] # } #C1=t(Z21)%*%Z21 #C2=t(Z22)%*%Z22 # time1 = proc.time() - start.time # start.time = proc.time() # q_star1=Qfun1(C=C1, g=g1, PCA_ind=PCA_ind, PCA_ind0=PCA_ind0, N=N, n_pat=n_pat, eps3 = eps3, index1=index1, pat=pat, res=res1, numequ=numequ, tempequ1=tempequ1, maxeigen=maxeigen, ind_C=ind_C)$q_star # q_star2=Qfun1(C=C2, g=g2, PCA_ind=PCA_ind, PCA_ind0=PCA_ind0, N=N, n_pat=n_pat, eps3 = eps3, index1=index1, pat=pat, res=res2, numequ=numequ, tempequ1=tempequ1, maxeigen=maxeigen, ind_C=ind_C)$q_star g_star1=tempT%*%g1 g_star2=tempT%*%g2 q_star1=t(g_star1)%*%S_C_star%*%g_star1 q_star2=t(g_star2)%*%S_C_star%*%g_star2 # time2 = proc.time() - start.time # start.time = proc.time() fq_star_par=(q_star1-q_star2)/(2*h[hh]) #print(sq_star[j1,j1]) if (hh==1) { fq_pre=fq_star_par } else { # if (fq_pre!=0) { fq_diff=max(abs((fq_star_par-fq_pre))) # } else { # fq_diff==max(abs((fq_star[j1]-fq_pre))) # } # if (is.na(fq_diff) | is.nan(fq_diff) | is.infinite(fq_diff)) { # print(fq_diff) # } if (fq_diff<temp_diff1) { temp_diff1=fq_diff temp_fq=fq_star_par } if (fq_diff<1e-3) { break } else { fq_pre=fq_star_par } } # time3 = proc.time() - start.time } if (fq_diff>1e-3) { fq_star_par=temp_fq # print(paste(j1,l,i)) } # print(hh) return(c(fq_star_par,q_star1,q_star2)) } # time = proc.time() - start.time returnlist=list('fq_star'=result[,1]+rnorm(p,0,0.1), 'q_star1'=result[,2], 'q_star2'=result[,3]) return(returnlist) } Qfun1 <- function (C, g, PCA_ind, PCA_ind0, N, n_pat, eps3=1e-8, index1, pat, res, numequ, tempequ1, maxeigen=NULL, ind_C) { # start.time = proc.time() U_ind=array(NA,dim=c(n_pat,2,N)) U_ind0=matrix(0,n_pat,2) T1=diag(1,N,N) T2=matrix(0,N,N) # time1 = proc.time() - start.time # start.time = proc.time() #maxeigen=max(eigen((C+t(C))/2)$values) if (is.null(maxeigen)){ maxeigen=base::norm((C+t(C))/2, type = '2') } # print(maxeigen) # time2 = proc.time() - start.time # start.time = proc.time() count=1 for (k1 in 1:n_pat) { id_im=which(pat==k1) n_im=length(id_im) if (n_im>1) { PCA_ind[k1,2,]=NA PCA_ind0[k1,2]=0 for (i in 2:n_im) { if (sum(abs(res[,id_im[1]]-res[,id_im[i]]))==0) { ##### If residuals of other layers are the same as the main layer in the same group? id_im[i]=0 } else { PCA_ind[k1,2,(PCA_ind0[k1,2]+1):(PCA_ind0[k1,2]+numequ[id_im[i]])]=tempequ1[id_im[i]]:(tempequ1[id_im[i]+1]-1) PCA_ind0[k1,2]=PCA_ind0[k1,2]+numequ[id_im[i]] } } } for (k2 in 1:2) { if (PCA_ind0[k1,k2]!=0) { if (k2==1 | U_ind0[k1,1]==0) { cov_temp=C[PCA_ind[k1,k2,1:PCA_ind0[k1,k2]], PCA_ind[k1,k2,1:PCA_ind0[k1,k2]], drop=FALSE] } else { temp1=T2[U_ind[k1,1,1:U_ind0[k1,1]], PCA_ind[k1,1,1:PCA_ind0[k1,1]], drop=FALSE] temp2=solve(temp1%*%C[PCA_ind[k1,1,1:PCA_ind0[k1,1]], PCA_ind[k1,1,1:PCA_ind0[k1,1]], drop=FALSE]%*%t(temp1)) temp3=C[PCA_ind[k1,k2,1:PCA_ind0[k1,k2]], PCA_ind[k1,1,1:PCA_ind0[k1,1]], drop=FALSE]%*%t(temp1) T1[PCA_ind[k1,k2,1:PCA_ind0[k1,k2]], PCA_ind[k1,1,1:PCA_ind0[k1,1]]]=-temp3%*%temp2%*%temp1 temp_ind=c(PCA_ind[k1,1,1:PCA_ind0[k1,1]],PCA_ind[k1,k2,1:PCA_ind0[k1,k2]]) temp4=T1[temp_ind, temp_ind, drop=FALSE]%*%C[temp_ind,temp_ind]%*%t(T1[temp_ind,temp_ind]) cov_temp=temp4[(PCA_ind0[k1,1]+1):(PCA_ind0[k1,1]+PCA_ind0[k1,2]),(PCA_ind0[k1,1]+1):(PCA_ind0[k1,1]+PCA_ind0[k1,2])] #cov_temp=C[PCA_ind[k1,k2,1:PCA_ind0[k1,k2]], PCA_ind[k1,k2,1:PCA_ind0[k1,k2]], drop=FALSE]-temp3%*%temp2%*%t(temp3) } eigens=eigen((cov_temp+t(cov_temp))/2) values=eigens$values # if (is.complex(values)) { # print(values) # } if (max(values)>maxeigen) { maxeigen=max(values) } # eigenthreshold=max(eps3, maxeigen*1e-10) n_tmp0=sum(index1[,min(which(pat==k1)), drop=FALSE]) ### Same as pQIFmp_scad20.R, except using the BIC in Cho's paper and ask number of selected PCA less than n nr=n_tmp0*PCA_ind0[k1,k2] if (k2==1) { ### Ask number of selected PCA less than n tm=n_tmp0-1 } else { tm=n_tmp0-1-U_ind0[k1,1] } eigenthreshold=max(eps3, maxeigen*1e-10, sum(values)*log(nr)/(nr)) rank=rankMatrix(cov_temp) if (min(values)>eigenthreshold & rank==min(dim(cov_temp)) & min(dim(cov_temp))<=tm) { U_ind0[k1,k2]=PCA_ind0[k1,k2] U_ind[k1,k2,1:U_ind0[k1,k2]]=count:(count+U_ind0[k1,k2]-1) #nu_num[count+1]=nu_num[count]+PCA_ind0[k1,k2] T2[U_ind[k1,k2,1:U_ind0[k1,k2]], PCA_ind[k1,k2,1:PCA_ind0[k1,k2]]]=diag(1,PCA_ind0[k1,k2],PCA_ind0[k1,k2]) } else if (max(values)>eigenthreshold & rank>0 & tm>0) { #nu_num[count+1]=nu_num[count]+sum(values>0) # if (k1>1) { # print(k1) # } U_ind0[k1,k2]=max(1,min(apply(index1[,which(pat==k1), drop=FALSE],2,sum), sum(values>eigenthreshold), rank, tm)) U_ind[k1,k2,1:U_ind0[k1,k2]]=count:(count+U_ind0[k1,k2]-1) U_temp=as.matrix(eigens$vectors[,1:U_ind0[k1,k2]]) T2[U_ind[k1,k2,1:U_ind0[k1,k2]], PCA_ind[k1,k2,1:PCA_ind0[k1,k2]]]=t(U_temp%*%diag(sign(U_temp[dim(U_temp)[1],]), length(sign(U_temp[dim(U_temp)[1],])), length(sign(U_temp[dim(U_temp)[1],])))) } count=count+U_ind0[k1,k2] } } } # print(count) # time3 = proc.time() - start.time # start.time = proc.time() ################################ Block diagonal matrix multiplication #################### ind_tT=rep(1,n_pat+1) #ind_Cstar=rep(1,n_pat+1) tempT=matrix(0,(count-1),N) C_star=matrix(0,(count-1),(count-1)) for (ii in 1:n_pat) { ind_tT[ii+1]=ind_tT[ii]+sum(U_ind0[ii,]) #ind_Cstar[ii+1]=ind_Cstar[ii]+sum(PCA_ind0[ii,]) tempT[ind_tT[ii]:(ind_tT[ii+1]-1),ind_C[ii]:(ind_C[ii+1]-1)]=T2[ind_tT[ii]:(ind_tT[ii+1]-1),ind_C[ii]:(ind_C[ii+1]-1), drop=FALSE]%*%T1[ind_C[ii]:(ind_C[ii+1]-1),ind_C[ii]:(ind_C[ii+1]-1), drop=FALSE] C_star[ind_tT[ii]:(ind_tT[ii+1]-1),ind_tT[ii]:(ind_tT[ii+1]-1)]=tempT[ind_tT[ii]:(ind_tT[ii+1]-1),ind_C[ii]:(ind_C[ii+1]-1), drop=FALSE]%*%C[ind_C[ii]:(ind_C[ii+1]-1),ind_C[ii]:(ind_C[ii+1]-1), drop=FALSE]%*%t(tempT[ind_tT[ii]:(ind_tT[ii+1]-1),ind_C[ii]:(ind_C[ii+1]-1), drop=FALSE]) } #time6 = proc.time() - start.time #tempT=T2[1:(count-1),, drop=FALSE]%*%T1 #C_star=tempT%*%C%*%t(tempT) g_star=tempT%*%g C_star=(C_star+t(C_star))/2 # if (min(eigen(C_star)$values)<1e-10) { # print(min(eigen(C_star)$values)) # } # time4 = proc.time() - start.time # start.time = proc.time() # if(!class(try(solve(C_star),silent=T))=='matrix') { # print(eigen(C_star)$values) # print(C) # print(g) # } q_star=t(g_star)%*%solve(C_star,g_star) # time5 = proc.time() - start.time returnlist=list('q_star'=q_star, 'C_star'=C_star, 'g_star'=g_star, 'T1'=T1, 'T2'=T2, 'count'=count, 'tempT'=tempT) return(returnlist) } Qfun1 <- function (C, g, PCA_ind, PCA_ind0, N, n_pat, eps3=1e-8, index1, pat, res, numequ, tempequ1, maxeigen=NULL, ind_C) { # start.time = proc.time() U_ind=array(NA,dim=c(n_pat,2,N)) U_ind0=matrix(0,n_pat,2) T1=diag(1,N,N) T2=matrix(0,N,N) # time1 = proc.time() - start.time # start.time = proc.time() #maxeigen=max(eigen((C+t(C))/2)$values) if (is.null(maxeigen)){ maxeigen=base::norm((C+t(C))/2, type = '2') } # print(maxeigen) # time2 = proc.time() - start.time # start.time = proc.time() count=1 for (k1 in 1:n_pat) { id_im=which(pat==k1) n_im=length(id_im) if (n_im>1) { PCA_ind[k1,2,]=NA PCA_ind0[k1,2]=0 for (i in 2:n_im) { if (sum(abs(res[,id_im[1]]-res[,id_im[i]]))==0) { ##### If residuals of other layers are the same as the main layer in the same group? id_im[i]=0 } else { PCA_ind[k1,2,(PCA_ind0[k1,2]+1):(PCA_ind0[k1,2]+numequ[id_im[i]])]=tempequ1[id_im[i]]:(tempequ1[id_im[i]+1]-1) PCA_ind0[k1,2]=PCA_ind0[k1,2]+numequ[id_im[i]] } } } for (k2 in 1:2) { if (PCA_ind0[k1,k2]!=0) { if (k2==1 | U_ind0[k1,1]==0) { cov_temp=C[PCA_ind[k1,k2,1:PCA_ind0[k1,k2]], PCA_ind[k1,k2,1:PCA_ind0[k1,k2]], drop=FALSE] } else { temp1=T2[U_ind[k1,1,1:U_ind0[k1,1]], PCA_ind[k1,1,1:PCA_ind0[k1,1]], drop=FALSE] temp2=solve(temp1%*%C[PCA_ind[k1,1,1:PCA_ind0[k1,1]], PCA_ind[k1,1,1:PCA_ind0[k1,1]], drop=FALSE]%*%t(temp1)) temp3=C[PCA_ind[k1,k2,1:PCA_ind0[k1,k2]], PCA_ind[k1,1,1:PCA_ind0[k1,1]], drop=FALSE]%*%t(temp1) T1[PCA_ind[k1,k2,1:PCA_ind0[k1,k2]], PCA_ind[k1,1,1:PCA_ind0[k1,1]]]=-temp3%*%temp2%*%temp1 temp_ind=c(PCA_ind[k1,1,1:PCA_ind0[k1,1]],PCA_ind[k1,k2,1:PCA_ind0[k1,k2]]) temp4=T1[temp_ind, temp_ind, drop=FALSE]%*%C[temp_ind,temp_ind]%*%t(T1[temp_ind,temp_ind]) cov_temp=temp4[(PCA_ind0[k1,1]+1):(PCA_ind0[k1,1]+PCA_ind0[k1,2]),(PCA_ind0[k1,1]+1):(PCA_ind0[k1,1]+PCA_ind0[k1,2])] #cov_temp=C[PCA_ind[k1,k2,1:PCA_ind0[k1,k2]], PCA_ind[k1,k2,1:PCA_ind0[k1,k2]], drop=FALSE]-temp3%*%temp2%*%t(temp3) } eigens=eigen((cov_temp+t(cov_temp))/2) values=eigens$values # if (is.complex(values)) { # print(values) # } if (max(values)>maxeigen) { maxeigen=max(values) } # eigenthreshold=max(eps3, maxeigen*1e-10) n_tmp0=sum(index1[,min(which(pat==k1)), drop=FALSE]) ### Same as pQIFmp_scad20.R, except using the BIC in Cho's paper and ask number of selected PCA less than n nr=n_tmp0*PCA_ind0[k1,k2] if (k2==1) { ### Ask number of selected PCA less than n tm=n_tmp0-1 } else { tm=n_tmp0-1-U_ind0[k1,1] } eigenthreshold=max(eps3, maxeigen*1e-10, sum(values)*log(nr)/(nr)) rank=rankMatrix(cov_temp) if (min(values)>eigenthreshold & rank==min(dim(cov_temp)) & min(dim(cov_temp))<=tm) { U_ind0[k1,k2]=PCA_ind0[k1,k2] U_ind[k1,k2,1:U_ind0[k1,k2]]=count:(count+U_ind0[k1,k2]-1) #nu_num[count+1]=nu_num[count]+PCA_ind0[k1,k2] T2[U_ind[k1,k2,1:U_ind0[k1,k2]], PCA_ind[k1,k2,1:PCA_ind0[k1,k2]]]=diag(1,PCA_ind0[k1,k2],PCA_ind0[k1,k2]) } else if (max(values)>eigenthreshold & rank>0 & tm>0) { #nu_num[count+1]=nu_num[count]+sum(values>0) # if (k1>1) { # print(k1) # } U_ind0[k1,k2]=max(1,min(apply(index1[,which(pat==k1), drop=FALSE],2,sum), sum(values>eigenthreshold), rank, tm)) U_ind[k1,k2,1:U_ind0[k1,k2]]=count:(count+U_ind0[k1,k2]-1) U_temp=as.matrix(eigens$vectors[,1:U_ind0[k1,k2]]) T2[U_ind[k1,k2,1:U_ind0[k1,k2]], PCA_ind[k1,k2,1:PCA_ind0[k1,k2]]]=t(U_temp%*%diag(sign(U_temp[dim(U_temp)[1],]), length(sign(U_temp[dim(U_temp)[1],])), length(sign(U_temp[dim(U_temp)[1],])))) } count=count+U_ind0[k1,k2] } } } # print(count) # time3 = proc.time() - start.time # start.time = proc.time() ################################ Block diagonal matrix multiplication #################### ind_tT=rep(1,n_pat+1) #ind_Cstar=rep(1,n_pat+1) tempT=matrix(0,(count-1),N) C_star=matrix(0,(count-1),(count-1)) for (ii in 1:n_pat) { ind_tT[ii+1]=ind_tT[ii]+sum(U_ind0[ii,]) #ind_Cstar[ii+1]=ind_Cstar[ii]+sum(PCA_ind0[ii,]) tempT[ind_tT[ii]:(ind_tT[ii+1]-1),ind_C[ii]:(ind_C[ii+1]-1)]=T2[ind_tT[ii]:(ind_tT[ii+1]-1),ind_C[ii]:(ind_C[ii+1]-1), drop=FALSE]%*%T1[ind_C[ii]:(ind_C[ii+1]-1),ind_C[ii]:(ind_C[ii+1]-1), drop=FALSE] C_star[ind_tT[ii]:(ind_tT[ii+1]-1),ind_tT[ii]:(ind_tT[ii+1]-1)]=tempT[ind_tT[ii]:(ind_tT[ii+1]-1),ind_C[ii]:(ind_C[ii+1]-1), drop=FALSE]%*%C[ind_C[ii]:(ind_C[ii+1]-1),ind_C[ii]:(ind_C[ii+1]-1), drop=FALSE]%*%t(tempT[ind_tT[ii]:(ind_tT[ii+1]-1),ind_C[ii]:(ind_C[ii+1]-1), drop=FALSE]) } #time6 = proc.time() - start.time #tempT=T2[1:(count-1),, drop=FALSE]%*%T1 #C_star=tempT%*%C%*%t(tempT) g_star=tempT%*%g C_star=(C_star+t(C_star))/2 # if (min(eigen(C_star)$values)<1e-10) { # print(min(eigen(C_star)$values)) # } # time4 = proc.time() - start.time # start.time = proc.time() # if(!class(try(solve(C_star),silent=T))=='matrix') { # print(eigen(C_star)$values) # print(C) # print(g) # } q_star=t(g_star)%*%solve(C_star,g_star) # time5 = proc.time() - start.time returnlist=list('q_star'=q_star, 'C_star'=C_star, 'g_star'=g_star, 'T1'=T1, 'T2'=T2, 'count'=count, 'tempT'=tempT) return(returnlist) } Qfirstdev2 <- function (h1=2^(-(8:30)), beta_pre, XX, yy, index1, index2, numequ, tempequ1, PCA_ind, PCA_ind0, n_pat, eps3, pat, ind_C, ind_pat, ratio) { dims=dim(XX) n=dims[1] p=dims[2] m=dims[3] res=matrix(0,n,m) res1=matrix(0,n,m) res2=matrix(0,n,m) resvar=rep(1,m) resvar1=rep(1,m) resvar2=rep(1,m) N=sum(index2) Z2=matrix(0,n,N) Z21=matrix(0,n,N) Z22=matrix(0,n,N) g1=rep(0,N) g2=rep(0,N) nn=apply(index1,2,sum) nnn=sqrt(apply(index1,2,sum)) for (s in 1:m) { res[index1[,s],s]=yy[index1[,s],s]-XX[index1[,s],,s]%*%(as.numeric(beta_pre)) # resvar1[s]=var(res1[index1[,s],s]) Z2[,tempequ1[s]:(tempequ1[s+1]-1)]=apply(XX[,index2[,s],s], 2, function(x) x*res[,s])/(resvar[s]*nnn[s]) } C=matrix(0,N,N) for (ii in 1:n_pat) { C[ind_C[ii]:(ind_C[ii+1]-1),ind_C[ii]:(ind_C[ii+1]-1)]=t(Z2[ind_pat[ii]:(ind_pat[ii+1]-1),ind_C[ii]:(ind_C[ii+1]-1), drop=FALSE])%*%Z2[ind_pat[ii]:(ind_pat[ii+1]-1),ind_C[ii]:(ind_C[ii+1]-1), drop=FALSE] } #C=t(Z2)%*%Z2 maxeigen=base::norm((C+t(C))/2, type = '2') #print(maxeigen) #fq_star=rep(0,p) # start.time = proc.time() p1=round(p*ratio) index_random=sort(sample.int(p, p1)) result=matrix(0, p, 3) j1=NA result[index_random,] = foreach (j1 = index_random, .combine = rbind, .packages = c("MASS", "Matrix")) %do% { h=h1 if (abs(beta_pre[j1])<1e-2 & abs(beta_pre[j1])>0) { h=h1*max(1e-2, abs(beta_pre[j1])) } temp_fq=0 temp_sq=0 temp_diff1=Inf temp_diff2=Inf for (hh in 1:length(h)) { # start.time = proc.time() beta_temp1=beta_pre beta_temp2=beta_pre beta_temp1[j1]=beta_pre[j1]+h[hh] beta_temp2[j1]=beta_pre[j1]-h[hh] for (s in 1:m) { res1[index1[,s],s]=yy[index1[,s],s]-XX[index1[,s],,s]%*%(as.numeric(beta_temp1)) # resvar1[s]=var(res1[index1[,s],s]) g1[tempequ1[s]:(tempequ1[s+1]-1)]=t(XX[index1[,s],index2[,s],s])%*%(res1[index1[,s],s])/(resvar1[s]*nn[s]) Z21[,tempequ1[s]:(tempequ1[s+1]-1)]=apply(XX[,index2[,s],s], 2, function(x) x*res1[,s])/(resvar1[s]*nnn[s]) res2[index1[,s],s]=yy[index1[,s],s]-XX[index1[,s],,s]%*%(as.numeric(beta_temp2)) # resvar2[s]=var(res2[index1[,s],s]) g2[tempequ1[s]:(tempequ1[s+1]-1)]=t(XX[index1[,s],index2[,s],s])%*%(res2[index1[,s],s])/(resvar2[s]*nn[s]) Z22[,tempequ1[s]:(tempequ1[s+1]-1)]=apply(XX[,index2[,s],s], 2, function(x) x*res2[,s])/(resvar2[s]*nnn[s]) } C1=matrix(0,N,N) C2=matrix(0,N,N) for (i in 1:n_pat) { C1[ind_C[i]:(ind_C[i+1]-1),ind_C[i]:(ind_C[i+1]-1)]=t(Z21[ind_pat[i]:(ind_pat[i+1]-1),ind_C[i]:(ind_C[i+1]-1), drop=FALSE])%*%Z21[ind_pat[i]:(ind_pat[i+1]-1),ind_C[i]:(ind_C[i+1]-1), drop=FALSE] C2[ind_C[i]:(ind_C[i+1]-1),ind_C[i]:(ind_C[i+1]-1)]=t(Z22[ind_pat[i]:(ind_pat[i+1]-1),ind_C[i]:(ind_C[i+1]-1), drop=FALSE])%*%Z22[ind_pat[i]:(ind_pat[i+1]-1),ind_C[i]:(ind_C[i+1]-1), drop=FALSE] } #C1=t(Z21)%*%Z21 #C2=t(Z22)%*%Z22 # time1 = proc.time() - start.time # start.time = proc.time() q_star1=Qfun1(C=C1, g=g1, PCA_ind=PCA_ind, PCA_ind0=PCA_ind0, N=N, n_pat=n_pat, eps3 = eps3, index1=index1, pat=pat, res=res1, numequ=numequ, tempequ1=tempequ1, maxeigen=maxeigen, ind_C=ind_C)$q_star q_star2=Qfun1(C=C2, g=g2, PCA_ind=PCA_ind, PCA_ind0=PCA_ind0, N=N, n_pat=n_pat, eps3 = eps3, index1=index1, pat=pat, res=res2, numequ=numequ, tempequ1=tempequ1, maxeigen=maxeigen, ind_C=ind_C)$q_star # time2 = proc.time() - start.time # start.time = proc.time() fq_star_par=(q_star1-q_star2)/(2*h[hh]) #print(sq_star[j1,j1]) if (hh==1) { fq_pre=fq_star_par } else { # if (fq_pre!=0) { fq_diff=max(abs((fq_star_par-fq_pre))) # } else { # fq_diff==max(abs((fq_star[j1]-fq_pre))) # } # if (is.na(fq_diff) | is.nan(fq_diff) | is.infinite(fq_diff)) { # print(fq_diff) # } if (fq_diff<temp_diff1) { temp_diff1=fq_diff temp_fq=fq_star_par } if (fq_diff<1e-3) { break } else { fq_pre=fq_star_par } } # time3 = proc.time() - start.time } if (fq_diff>1e-3) { fq_star_par=temp_fq # print(paste(j1,l,i)) } # print(hh) return(c(fq_star_par,q_star1,q_star2)) } # time = proc.time() - start.time returnlist=list('fq_star'=result[,1], 'q_star1'=result[,2], 'q_star2'=result[,3]) return(returnlist) }
/scratch/gouwar.j/cran-all/cranData/BlockMissingData/R/MBI.R
#' Imputation using generalized linear models for missing values #' #' The function performs imputation using generalized linear models for missing values in a dataset. It fits these models for each specified response variable separately, utilizing other specified variables, and returns the estimated coefficients and predicted values for each variable. The function handles different distribution families, #' such as Gaussian, Binomial, and Ordinal, for GLM estimation. #' #' #' @import MASS #' @import glmnet #' @import pryr #' @import doParallel #' @import foreach #' @import glmnetcr #' @import Matrix #' @importFrom stats predict fitted glm deviance coef #' @param X Data matrix containing all the variables that may contain missing values. #' @param ind_y A vector specifying the indices of response variables in the dataset. #' @param ind_x A vector specifying the indices of predictor variables in the dataset. By default, it is set to -ind_y, which means all variables other than the response variables are considered as predictors. #' @param miss A logical matrix indicating the missing values in the dataset. #' @param newdata Data matrix for which imputed values are required. It should have the same column names as the original dataset. #' @param family A character indicating the distribution family of the GLM. Possible values are "gaussian" (default), "binomial", and "ordinal". #' #' @return A list containing the imputed values for each response variable. \item{B}{A matrix of estimated coefficients, where each column contains the coefficients for a response variable, and each row corresponds to a predictor variable (including the intercept term)} #' \item{PRED}{A matrix of predicted values (or imputations), where each column contains the predicted values for a response variable, and each row corresponds to an observation in the newdata (if provided)} #' @author Fei Xue and Annie Qu #' @export #' #' @examples #' #' library(MASS) #' #' # Number of subjects #' n <- 700 #' #' # Number of total covariates #' p <- 40 #' #' # Number of missing groups of subjects #' ngroup <- 4 #' #' # Number of data sources #' nsource <- 4 #' #' # Starting indexes of covariates in data sources #' cov_index=c(1, 13, 25, 37) #' #' # Starting indexes of subjects in missing groups #' sub_index=c(1, 31, 251, 471) #' #' # Indexes of missing data sources in missing groups, respectively ('NULL' represents no missing) #' miss_source=list(NULL, 3, 2, 1) #' #' # Create a design matrix #' set.seed(1) #' sigma=diag(1-0.4,p,p)+matrix(0.4,p,p) #' X <- mvrnorm(n,rep(0,p),sigma) #' #' # Introduce some block-wise missing #' for (i in 1:ngroup) { #' if (!is.null(miss_source[[i]])) { #' if (i==ngroup) { #' if (miss_source[[i]]==nsource) { #' X[sub_index[i]:n, cov_index[miss_source[[i]]]:p] = NA #' } else { #' X[sub_index[i]:n, cov_index[miss_source[[i]]]:(cov_index[miss_source[[i]]+1]-1)] = NA #' } #' } else { #' if (miss_source[[i]]==nsource) { #' X[sub_index[i]:(sub_index[i+1]-1), cov_index[miss_source[[i]]]:p] = NA #' } else { #' X[sub_index[i]:(sub_index[i+1]-1), cov_index[miss_source[[i]]]: #' (cov_index[miss_source[[i]]+1]-1)] = NA #' } #' } #' } #' } #' #' # Define missing data pattern #' miss <- is.na(X) #' # Choose response and predictor variables #' ind_y <- 25:36 #' ind_x <- 13:24 #' # Data that need imputation #' newdata <- X[31:250,] #' # Use the function #' result <- imputeglm.predict(X = X, ind_y = ind_y, ind_x = ind_x, miss = miss, newdata = newdata) imputeglm.predict <- function (X, ind_y, ind_x=-ind_y, miss, newdata, family="gaussian") { ny=length(ind_y) nx=length(X[1,ind_x])+1 B=matrix(0,nx,ny) PRED=matrix(0,dim(newdata)[1],ny) for (l in 1:ny) { ind_obs=!miss[,ind_y[l]] x.t=X[ind_obs,ind_x] y.t=X[ind_obs,ind_y[l]] x.train=x.t[apply(miss[ind_obs,ind_x],1,sum)==0,] y.train=y.t[apply(miss[ind_obs,ind_x],1,sum)==0] data0=data.frame(y.train, x.train) newx=as.data.frame(newdata) colnames(newx)=colnames(data0)[-1] if (family=="ordinal") { data0[,1]=as.factor(data0[,1]) if (dim(x.train)[1]>dim(x.train)[2]) { fit=polr(y.train~., data = data0) coeff=c(0,fit$coef) ##### The intercept may be wrong pred=as.numeric(as.character(predict(fit, type = 'class', newdata=newx))) } else { fit <- glmnetcr(data0[,-1], data0[,1], maxit=500) select = select.glmnetcr(fit) coeff=fit$beta[1:nx,select] ##### The intercept may be wrong pred=as.numeric(fitted(fit, newx=newx, s=select)$class) } } else if (family=="binomial") { data0[,1]=as.factor(data0[,1]) if (dim(x.train)[1]>dim(x.train)[2]) { fit=glm(y.train~., family=family, data=data0) coeff=fit$coef prob=predict(fit,newdata=newx, type = 'response') pred=rep(as.numeric(levels(data0$y.train)[1]),dim(newdata)[1]) pred[prob>0.5]=as.numeric(levels(data0$y.train)[2]) } else { fit=glmnet(x.train, y.train, family=family) k <- fit$df n <- fit$nobs select=which.min(log(n)*k+deviance(fit)) coeff=t(as.numeric(coef(fit, s=select))) pred=as.numeric(predict(fit, newx=newdata, s=select, type = 'class')) } } else { if (dim(x.train)[1]>dim(x.train)[2]) { fit=glm(y.train~., family=family, data=data0) coeff=fit$coef pred=predict(fit,newdata=newx) } else { cvfit <- cv.glmnet(x.train, y.train, nfolds=3, family=family) fit <- cvfit$glmnet.fit coeff=t(as.numeric(coef(fit, s=cvfit$lambda.min))) pred=predict(fit, newx=newdata, s=cvfit$lambda.min) } } B[,l]=coeff PRED[,l]=pred } returnlist=list("B"=B, "PRED"=PRED) return(returnlist) }
/scratch/gouwar.j/cran-all/cranData/BlockMissingData/R/imputeglm.predict.R
#' @keywords internal "_PACKAGE" ## usethis namespace: start #' @importFrom DT dataTableOutput #' @importFrom DT renderDataTable #' @importFrom igraph delete_vertices #' @importFrom igraph get.edge.attribute #' @importFrom igraph graph.adjacency #' @importFrom igraph plot.igraph #' @importFrom intergraph asIgraph #' @importFrom intergraph asNetwork #' @importFrom network is.bipartite #' @importFrom network network #' @importFrom network network.density #' @importFrom network plot.network #' @importFrom network set.vertex.attribute #' @importFrom shinybusy add_busy_spinner #' @importFrom shinybusy remove_modal_spinner #' @importFrom shinybusy show_modal_spinner #' @importFrom shinyjs hide #' @importFrom shinyjs show #' @importFrom shinyjs useShinyjs #' @importFrom visNetwork visEdges #' @importFrom visNetwork visHierarchicalLayout #' @importFrom visNetwork visNetwork #' @importFrom visNetwork visNodes #' @importFrom visNetwork visOptions ## usethis namespace: end NULL
/scratch/gouwar.j/cran-all/cranData/BlockmodelingGUI/R/BlockmodelingGUI-package.R
#' Get citation files #' #' @param filetype Type of bibliographic file can be either \code{'ris'} for RIS files of \code{'BibText'} for BibText files. #' @param folder Folder where to save the file. Defaults to \code{'getwd()'}. #' @param overwrite Boolean. Whether to overwrite namesake files in the target folder. #' #' @return Copies the selected type of bibliographic file to the working folder. #' @export #' @examples #' GetCitationFile('ris') # to get the .ris file in the working folder GetCitationFile <- function(filetype=c('ris','BibText'),folder=getwd(),overwrite=F) { if(filetype!='ris'&&filetype!='BibText'){ stop('File type not selected correctly. Select one of "ris" or "BibText"!') } else { if(filetype=='ris'){ file.copy(from = './inst/extdata/citations.ris',to = folder, overwrite = overwrite) } else if(filetype=='BibText'){ file.copy(from = './inst/extdata/citations.bib',to = folder, overwrite = overwrite) } cat(paste('File', filetype, 'saved correctly in', folder,sep = ' ')) } }
/scratch/gouwar.j/cran-all/cranData/BlockmodelingGUI/R/GetCitationFile.R
#' Show citation info #' #' @param ... Options to pass to cat() #' @return Returns information on how to cite the app. #' @export #' @examples #' HowToCite() # to see information about citations HowToCite <- function(...){ cat(paste('To cite this app in any publication please cite the app and the package "blockmodeling" as follows, plus (at least) one of the articles below:\n', 'This app/package:', '\t - Telarico, Fabio Ashtar, and Ale\u0161 \u017Diberna. GUI for the Generalised Blockmodeling of Valued Networks (version 1.8.3). R. Ljubljana (Slovenia): Faculty of Social Sciences (FDV) at the University of Ljubljana, 2022. https://doi.org/10.5281/zenodo.6554608.\n', 'Package "blockmodeling" by Ale\u0161 \u017Diberna:', '\t - \u017Diberna, Ale\u0161. Blockmodeling: Generalized and Classical Blockmodeling of Valued Networks (version 1.0.5), 2021. https://CRAN.R-project.org/package=blockmodeling.', '\t - Matja\u0161i\u010D, Miha, Marjan Cugmas, and Ale\u0161 \u017Diberna. \'Blockmodeling: An R Package for Generalized Blockmodeling\'. Advances in Methodology and Statistics 17, no. 2 (1 July 2020): 49\u201366. https://doi.org/10.51936/uhir1119.\n', 'Methods:', '\t - Doreian, Patrick, Vladimir Batagelj, and Anuska Ferligoj. Generalized Blockmodeling. Cambridge University Press, 2005.', '\t - \u017Diberna, Ale\u0161. \'Generalized Blockmodeling of Sparse Networks\'. Advances in Methodology and Statistics 10, no. 2 (1 July 2013). https://doi.org/10.51936/orxk5673.', '\t - \u017Diberna, Ale\u0161. \'Generalized Blockmodeling of Valued Networks\'. Social Networks 29, no. 1 (January 2007): 105\u201326. https://doi.org/10.1016/j.socnet.2006.04.002.',sep = '\n' )) }
/scratch/gouwar.j/cran-all/cranData/BlockmodelingGUI/R/HowToCite.R
#' Launch Shiny App #' #' @param ... arguments to pass to shiny::runApp #' @return Runs the app. #' #' @rawNamespace import(shiny, except = c(renderDataTable, dataTableOutput)) #' @import blockmodeling #' @import htmlwidgets #' @import shinythemes #' @export #' @examples #' \dontrun{ #' app() # to run the app without special options #' app(launch.browser = T) # to run the app in a new browser session #' } app <- function(...) { appDir <- system.file(paste0('apps/BlockmodelingGUI.R'), package = 'BlockmodelingGUI') if (appDir == '') stop('Could not find the app. Try re-installing BlockmodelingGUI!', call. = FALSE) shiny::runApp(appDir, ...) }
/scratch/gouwar.j/cran-all/cranData/BlockmodelingGUI/R/app.R
if(!require(shiny)) install.packages('shiny') if(!require(htmlwidgets)) install.packages('htmlwidgets') if(!require(shinythemes))install.packages('shinythemes') if(!require(network)) install.packages('network') if(!require(blockmodeling)) install.packages('blockmodeling') if(!require(igraph)) install.packages('igraph') if(!require(visNetwork))install.packages('visNetwork') if(!require(intergraph))install.packages('intergraph') if(!require(DT))install.packages('DT') if(!require(shinybusy))install.packages('shinybusy') if(!require(shinyjs))install.packages('shinyjs') library(shiny) library(shinyjs) library(htmlwidgets) library(shinythemes) library(blockmodeling) library(intergraph) library(igraph) library(network) library(visNetwork) library(DT) library(shinybusy) # Sect. 1 Inputs #### ui <- fluidPage( useShinyjs(), # Initialise Shinyjs includeCSS("./style.css"), # CSS Styles theme = shinythemes::shinytheme("united"), tags$head( tags$link( rel = "shortcut icon", href = "https://www.fdv.uni-lj.si/App_Themes/Fdv/TemplateImages/icons/favicon.ico")), tags$head( tags$title("Generalised blockmodeling") ), tags$style(type="text/css", "#Info {white-space: pre-wrap;}"), ## 0. Decorations with HTML/CSS #### titlePanel({ withTags({ div(class="Header", table( tr( td(width = "267px", img(class = "HeaderLogo", src="https://www.uni-lj.si/images/mobile/logo_m.png"), ), td( div(style="h1","Generalised blockmodeling"), ), ), # /tr ), # /table div(class="titleRect", div(class="title2",.noWS = c('outside', 'after-begin', 'before-end'), 'This app provides some useful tools for Offering an accessible GUI for generalised blockmodeling of single-relation, one-mode networks. The user can execute blockmodeling without having to write a line code by using the app\'s visual helps. Moreover, there are several ways to visualisations networks and their partitions. Finally, the results can be exported as if they were produced by writing code. To lean more about how this can be helpful to you, you can check the descriptions of the package', a(href="https://cran.r-project.org/package=blockmodeling", "blockmodeling"), 'by Ale\u0161 \u017Diberna' )# /div "title2" ) # /div "titleRect" ) # div "Header" }) }), shinybusy::add_busy_spinner( spin = 'breeding-rhombus', color = '#978E83', timeout = 100, position = 'top-right', onstart = TRUE, margins = c(10, 10) ), # Sidebar tabsetPanel( tabPanel(title = "Data upload", fluidRow( column(3, ## 1. File-upload options #### # "FileInputOpt1" withTags({ div(h4(b("File-upload options"))) }), ### 1.1 Use a sample #### ### "Sample" checkboxInput(inputId = "Sample", label = 'Use a sample', value = F), conditionalPanel( condition = "input.Sample == false", ### 1.2 Select type of input #### ## "type" selectInput(inputId = "type", label = "Type of data?", choice = c("Adjacency Matrix*"=1,"Edges list"=2, "Incidence matrix"=3,"Pajek"=4), selected = 1, multiple = FALSE ), conditionalPanel( condition = "input.type != 4", #### 1.2.1 Upload list #### ### "List" fileInput(inputId = "List", label = "Upload the selected list as a plain-text file", multiple = F, accept = c("text/plain", ".csv",".tab"), buttonLabel = "Browse", placeholder = "Your list here"), #### 1.2.2 Headers edge list file #### ### "ListHeader" checkboxInput(inputId = "ListHeader", label = 'Headers?', value = T, ), ) ) ), column(4,offset = 1, ## 2. Customise file elaboration #### # "FileInputOpt2" withTags({ div(h4(b("Customise file elaboration"))) }), ### 2.1 Separator #### ## "sep" radioButtons(inputId = "sep", label = "Separator", choiceNames = c("tab","comma","semicolon","other"), choiceValues = c("\t",",",";","Other") ), conditionalPanel( condition = "input.sep == 'Other'", #### 2.1.1 Other separator #### ### "OtherSep" textAreaInput(inputId = "OtherSep", label = 'You selected "other", please indicate the right separator', cols = 3, value = "", ), ), ### 2.2 Trim blanks #### ## "whites" checkboxInput(inputId = "whites", label = 'Trim extra blanks', value = TRUE), conditionalPanel( condition = "input.type == 4", ### 2.3 Type of Pajek file #### ## "PajekInput" radioButtons(inputId = "PajekInput", label = "Type of Pajek file", selected = ".net", choiceNames = c(".mat",".net"), choiceValues = c("PajekMatrix","PajekNetwork") ), ### 2.4 Upload Pajek file #### ## "PajekFile" fileInput(inputId = "PajekFile", label = "Upload a Pajek file", multiple = F, buttonLabel = "Browse", placeholder = "Your Pajek file here", accept = c(".mat",".net") ), ), # if input.type == 4 ), # columnt 3 column(4, ## 3. Specify network properties #### # "NetworkOpt" withTags({ div(h4(b("Specify network properties"))) }), ### 3.1 Values/Weights #### ## "ValuedMatrix" checkboxInput(inputId = "ValuedMatrix", label = 'Valued/Weighted network', value = TRUE, ), #### 3.1.1 Values/Weights name #### # "ValuesName" # conditionalPanel( # condition = "input.ValuedMatrix == true", # textInput(inputId = "ValuesName", # label = "How do you want the weights to be called?", # value = "weights", # placeholder = "weights, values, counts, etc...") # ), withTags(div( h6(b(i("Only change the settings below if needed"))), )), ### 3.2 Direction #### # "directionality" checkboxInput(inputId = "directionality", label = 'Directional edges', value = TRUE, ), ### 3.3 Self-links #### # "loops" checkboxInput(inputId = "loops", label = 'Self-links', value = TRUE, ), ### 3.4 Delete isolated nodes #### checkboxInput(inputId = "DelIsolated", label = 'Delete isolated nodes', value = FALSE), ### 3.5 Add attribute #### conditionalPanel( condition = "input.Sample == false", ### "AddAttr" checkboxInput(inputId = 'AddAttr', label = 'Add a vertex attribute?', value = F), conditionalPanel( condition = 'input.AddAttr==true', conditionalPanel( condition = 'input.Sample==false', #### 3.5.1 Attribute values #### "AddAttrFile" fileInput(inputId = "AddAttrFile", label = 'Values from file', placeholder = 'A text file containining the values', multiple = F, accept = 'text/plain' ), #### 3.5.2 Attribute name #### 'AddAttrName' textInput(inputId = "AddAttrName", label = 'Attribute name', placeholder = 'Do not use blanks', value = NULL), ), ), ), ), ### 3.6 Button "Read data" #### # "aj" actionButton(inputId = "aj", label = "Read Data", icon = icon(name = "upload", lib = "font-awesome") ) ),# Fluid row ## 4. Show the network's summary #### # "summary" verbatimTextOutput("summary"), ### 4.1 Summary with adj #### ## "NetworkSummaryOpt" withTags({ div(h4(b("An extra option"))) }), # "IncludeAdj" checkboxInput(inputId = "IncludeAdj", label = 'Include the edgelist matrix?', value = F, width = "100%" ), ), # Tab panel Data tabPanel(title = "Generalised blockmodeling", ## 5. Block-modeling #### sidebarLayout( sidebarPanel(width = 9, withTags({ div(h3(b("Customise blockmodeling"))) }), hr(), fluidRow( column(4, ### 5.1 Approaches #### withTags(h4(b('Approaches'))), ### "blckmdlngApproach" selectInput(inputId = "blckmdlngApproach", label = "Select approach", choices = c("Binary"="bin", "Valued"="val", "Sum of squares homogeneity"="ss", "Absolute deviations homogeneity"="ad"), multiple=FALSE ), #### 5.1.1 M parameter for valued blockmodeling #### #### "ParamM" conditionalPanel( condition ="input.blckmdlngApproach == 'val'", numericInput(inputId = "ParamM", label = 'Select the M parameter', value = NULL, min = 0, step = 1 ) ), #### 5.1.2 Threshold parameter for binary blockmodeling #### #### "ParamThreshold", "ThresholdSelected" conditionalPanel( condition ='input.blckmdlngApproach == "bin"', # Asks whether the user wants to set a threshold checkboxInput(inputId = "ThresholdSelected", label = 'Use a binarisation threshold', value = F, width = "100%"), ), conditionalPanel( condition ="input.ThresholdSelected == true && input.blckmdlngApproach == 'bin'", numericInput(inputId = "ParamThreshold", label = 'Threshold parameter', value = NULL, min = 0, step = 1) ), hr(), ### 5.2 Block types #### withTags(h4(b('Block types'))), #### 5.2.1 Block-type parameters ##### 5.2.1 (A) Density parameter for block-type 'den' #### ##### "ParamDensity" shinyjs::hidden( numericInput(inputId = "ParamDensity", label = 'Select the density', value = NULL, step = 1) ), ##### 5.2.1 (B) Average parameter for block-type 'avg' #### ##### "ParamAverage" shinyjs::hidden( numericInput(inputId = "ParamAverage", label = 'Select the average', value = NULL, step = 1) ), #### 5.2.2 Show/Hide Block-types weights' menu #### #### 'blockTypeWeights_Show' checkboxInput(inputId = 'blockTypeWeights_Show', label = 'Show/Hide block-types weights\' menu', value = F), #### 5.2.3 Types of of allowed blocktypes #### ##### 5.2.3 (A) Pre-specified - Menu #### #### "blckmdlngPrespecified_Show" checkboxInput(inputId = "blckmdlngPrespecified_Show", label = 'Show/Hide menu to pre-specify the allowed blocktypes', value = FALSE ), ##### 5.2.3 (B) Non pre-specified #### #### "blckmdlngBlockTypes" conditionalPanel( condition = 'input.blckmdlngPrespecified_Switch == false', selectInput(inputId = "blckmdlngBlockTypes", label = "Allowed blocktypes", choices = NULL, multiple = TRUE ), #### 5.2.3 (C) Number of clusters (non pre-specified) #### ### "blckmdlngNumClusters" numericInput(inputId = "blckmdlngNumClusters", label = 'How many clusters to use in the generation of partitions?', value = 3, min = 1, step = 1 ), ), # Conditional blckmdlngPrespecified_Switch ), column(4, ### 5.3 Other options #### withTags(h4(b('Other options'))), withTags(h4('Computation')), #### 5.3.1 Number of repetitions #### #### "blckmdlngRepetitions" numericInput(inputId = "blckmdlngRepetitions", label = 'How many repetitions/different starting partitions to check?', value = 2, min = 1, step = 1 ), #### 5.3.2 Random Seed #### ### "blckmdlngRandomSeed" numericInput(inputId = "blckmdlngRandomSeed", label = 'Insert a random seed to use it', value = NULL, min = 0, step = 1 ), #### 5.3.3 Multi-core processing #### #### "MultiCore" checkboxInput(inputId = "MultiCore", label = 'Should the parallel computation be used?', value = FALSE ), ), column(4, withTags(h4('Results')), #### 5.3.4 Restore-memory max size numericInput(inputId = 'Restore_MaxMemory', label = 'Results to store in buffer memory', value = 3, min = 1, max = 10, step = 1), #### 5.3.5 Number of results to save #### #### "blckmdlngMaxSavedResults" numericInput(inputId = "blckmdlngMaxSavedResults", label = 'How many results to save?', value = 10, min = 1, step = 10), #### 5.3.6 Saving initial parameters #### fluidRow( column(5, #### "blckmdlngInitialParams" checkboxInput(inputId = "blckmdlngInitialParams", label = 'Should the initial parameters be saved?', value = TRUE), ), column(7, withTags(i("Saving the additional parameters can take up more memory, but also preserve precious information")), ), ), #### 5.3.6 Returning all #### fluidRow( column(7, #### "blckmdlngAll" checkboxInput(inputId = "blckmdlngAll", label = 'Should solution be shown for all partitions (not only the best one)?*', value = TRUE),), column(5, withTags(i("Disable for very complex calculation and/or low-end machines")), ), ), ### 5.8 Which best partition to print #### conditionalPanel( condition = 'input.blckmdlngAll==true', fluidRow( column(7, ### "whichIM" condition = "input.blckmdlngAll==true", numericInput(inputId = "whichIM", label = 'Which "best" partition should be printed?*', value = 1, min = 1, step = 1), ), # / col column(5, br(''),withTags(i('*Affects also error matrix and mean matrix')), ), # / col ), # / Fluid row ), # / Conditional panel #### 5.3.7 Printing extra info #### #### "blckmdlngPrintRep" checkboxInput(inputId = "blckmdlngPrintRep", label = 'Should some information about each optimization be printed?', value = TRUE), ), # Column 4 ), # / Col layout ), # / Sidebar panel mainPanel(width = 3, ### 5.6 Start blockmodeling #### ### "blckmdlngRun" withTags(h4(b("Start blockmodeling"))), actionButton(inputId = "blckmdlngRun", label = "Process data", icon = icon(name = "calculator", lib = "font-awesome") ), hr(), ### 5.7 Restore from memory #### withTags(h4(b("Restore from memory"))), checkboxInput(inputId = 'Restore_Switch', label = 'Check to restore previous results', value = F), conditionalPanel( condition = 'input.Restore_Switch==true', numericInput(inputId = 'Restore_Selector', label = 'Memory slot to restore', value = 1,min = 1,max = 10), withTags(h5(style='color=#ff0000', b(i('Press "Process data" to restore')) )), ), hr(), ### 5.7 Upload results #### withTags(h4(b("Upload results"))), checkboxInput(inputId = "blckmdlngRDS", label = "Upload blockmodelling results", value = F), ### 5.9 Load blockmodeling results from RDS #### ### "blckmdlngRDS", "blckmdlngFileRDS" #### Upload results as RDS file conditionalPanel( condition = 'input.blckmdlngRDS==true', fileInput(inputId = "blckmdlngFileRDS", label = NULL, multiple = F, buttonLabel = "Browse", placeholder = "Your RDS file here", accept = c(".RDS") ), withTags(h5(i('Use the "Read Data" button under the "Data upload" tab to read the matrix from this file'))), ),# Conditional panel RDS hr(), ### 5.10 Download blockmodeling RDS #### ### "DownloadBlckRDS" withTags(h4(b("Downloads"))), withTags(h5(b("Download results"))), p( downloadButton(outputId = "DownloadBlckRDS", label = "Download blockmodeling results", icon = icon(name = "download", lib = "font-awesome") ), conditionalPanel( condition = 'input.blckmdlngPrespecified_Switch==true', withTags(i('After processing the data it will be possible to download the custom blockmodel')), downloadButton(outputId = 'downloadCustomBlck', label = 'Download custom blockmodel', icon = icon(name = "download", lib = "font-awesome") ), ), ), p( ### 5.11 Download vector partitions #### ### "DownloadClu" downloadButton(outputId = "DownloadClu", label = "Download partitions as vector", icon = icon(name = "download", lib = "font-awesome") ), ), withTags(h5(b("Other downloads"))), ### 5.12 Download image matrix #### conditionalPanel( condition = "input.dropIM == true", p( # "DownloadIMtext" downloadButton(outputId = "DownloadIMtext", label = "Download image matrix as txt", inline=T, icon = icon(name = "table", lib = "font-awesome") ), ), ), # "DownloadIMrds" p( downloadButton(outputId = "DownloadIMrds", label = "Download image matrix as RDS", inline=T, icon = icon(name = "table", lib = "font-awesome") ), ), # "DropIM" checkboxInput(inputId = "dropIM", label = 'Drop one-element dimensions', value = TRUE, width = '100%'), ), # / main panel ),# / Sidebar Layout mainPanel( width=12, ### 5.4 Set block-types weights - Menu #### conditionalPanel( condition = 'input.blockTypeWeights_Show==true', sidebarPanel(width = 12, ### 'blockTypeWeights_Switch', checkboxInput(inputId = 'blockTypeWeights_Switch', label = 'Use block-type weights', value = F), withTags(h4(b('Set custom block-types\' weights'))), fluidRow( ### 'blockTypeWeights_com', 'blockTypeWeights_nul' column(4, withTags(h5(b('Basic'))), numericInput(inputId = 'blockTypeWeights_com', label = 'Complete',value = 1,min = 0), numericInput(inputId = 'blockTypeWeights_nul', label = 'Null',value = 1,min = 0), numericInput(inputId = 'blockTypeWeights_dnc', label = '"Do not care"',value = 1,min = 0), ), ### 'blockTypeWeights_rre','blockTypeWeights_cre' column(4, withTags(h5(b('Regular'))), numericInput(inputId = 'blockTypeWeights_rre', label = 'Row-regular',value = 1,min = 0), numericInput(inputId = 'blockTypeWeights_cre', label = 'Column-regular',value = 1,min = 0), numericInput(inputId = 'blockTypeWeights_reg', label = 'f-Regular',value = 1,min = 0), ), ### 'blockTypeWeights_dnc', 'blockTypeWeights_reg' column(4, withTags(h5(b('Advanced'))), # numericInput(inputId = 'blockTypeWeights_rdo', # label = 'Row-dominant',value = 1,min = 0), # numericInput(inputId = 'blockTypeWeights_cdo', # label = 'Column-dominant',value = 1,min = 0), numericInput(inputId = 'blockTypeWeights_den', label = 'Density',value = 1,min = 0), numericInput(inputId = 'blockTypeWeights_avg', label = 'Average',value = 1,min = 0), ),# /column ), # /fluidRow ), # /sidebarPanel ), # / Conditional Panel Show ### 5.5 Pre-specified block types #### conditionalPanel( condition = 'input.blckmdlngPrespecified_Show == true', sidebarLayout( sidebarPanel( # width = 12, ##### 5.5.1 Pre-specified switch #### #### "blckmdlngPrespecified_Switch" withTags(h5(b('Check the box to use the block model'))), checkboxInput(inputId = "blckmdlngPrespecified_Switch", label = 'Use pre-specified blocktypes?', value = FALSE), withTags(i('Even if the menu is hidden, your choice is remembered')), #### 5.5.2 DT table options #### fluidRow( column(width = 6, ##### 5.5.2 (A) Clusters' size #### ##### 'CustoomBlockModel_NumberCluster' numericInput(inputId = "CustoomBlockModel_NumberCluster", label = 'Number of cluster', value = 3, min = 2, step = 1), actionButton(inputId = 'SetSizeDT', label = 'Confirm', icon = icon(name = "window-maximize", lib = "font-awesome") ), ), # column column(width = 6, ##### 5.5.2 (B) Block types #### ##### 'TowardsDT' selectInput(inputId = "TowardsDT", label = "Select the allowed blocktypes", choices = NULL, selected = c("nul","com"), multiple = TRUE ), actionButton(inputId = "LoadBlocksIntoDT", label = "Load blocks", icon = icon(name = "clone", lib = "font-awesome") ), ),# End of column with inputs ), # End of fluid row hr(), #### 5.5.3 File upload for pre-specified block-types' array #### ##### 5.5.3 (A) Type of uploaded array #### # "ArrayInput" radioButtons(inputId = "ArrayInput", label = "Type of file to upload?", selected = ".RDS", choiceValues = c(".RDS",".RData"), choiceNames = c("R Data Serialized","R Data"), inline = T ), ##### 5.5.3 (B) Upload array as RDS file #### conditionalPanel( condition = 'input.ArrayInput==".RDS"', fileInput(inputId = "PrespecifiedArrayRDS", label = "Upload a RDS file", multiple = F, buttonLabel = "Browse", placeholder = "Your R file here", accept = c(".RDS") ), ),# Conditional panel RDS ##### 5.5.3 (C) Upload array as RData file #### conditionalPanel( condition = 'input.ArrayInput==".RData"', fileInput(inputId = "PrespecifiedArrayRData", label = "Upload a RData file", multiple = F, buttonLabel = "Browse", placeholder = "Your R file here", accept = c(".RData") ), ), # Conditional panel RData withTags(i("An array with four dimensions. The first is as long as the maximum number of allowed block types for a given block. The second dimension is the number of relations. The third and the fourth represent rows' and columns' clusters. For more information see", a(href="https://cran.r-project.org/web/packages/blockmodeling/blockmodeling.pdf#page=10",'here',target="_blank"))), ##### 5.5.3 (D) Button to upload the array ##### 'UploadArray' hr(), actionButton(inputId = 'UploadArray', label = 'Load the array to see the block model', icon = icon(name = 'upload', lib = 'font-awesome') ), ##### 5.5.3 (E) Switch to edit the uploaded array ##### 'EditUploadedArray' checkboxInput(inputId = 'EditUploadedArray', label = 'Ignore the uploaded array', value = F ), withTags(h5('Only turn on',i('after'),'loading a block model from file')), ), # / Sidebar mainPanel( #### 5.5.4 DT Table #### DT::dataTableOutput(outputId = 'CustomBlockModel',width = '100%'), fluidRow( column(width = 3, hr(), ), column(width = 4, actionButton(inputId = 'ResetSelectionDT', label = 'Reset selection', icon = icon(name = 'recycle', lib = "font-awesome")), ), column(width = 3, actionButton(inputId = 'SelectAllDT', label = 'Select all', icon = icon(name = 'check', lib = "font-awesome")), ), column(width = 2, withTags(p('')), ), ), # / fluidrow ), ), ), # / conditional panel blckmdlngPrespecified_Show==T ### 5.13 Show the blockmodeling's results #### ### "Tableblckmdlng", "Summaryblckmdlng" withTags({ div(h4(b("Summary of blockmodeling results"))) }), tabsetPanel( tabPanel(title = "Table", tableOutput("Tableblckmdlng"), ), tabPanel(title = "Summary", verbatimTextOutput("Summaryblckmdlng"), ), tabPanel(title = "Image matrix", tableOutput("TableIM") ), tabPanel(title = "Error matrix", fluidRow( column(4,tableOutput("TableEM")), column(5, withTags(p(''))), column(3, numericInput(inputId = 'DigitsEM', label = 'How many digits to show?', value = 3,min = 0,step = 1), ), ), ), tabPanel(title = "Mean matrix", fluidRow( column(4,tableOutput("TableMean")), column(5, withTags(p(''))), column(3, numericInput(inputId = 'DigitsMean', label = 'How many digits to show?', value = 3,min = 0,step = 1), ), ), ), ), ), # mainPanel # ), # Sidebar layout ), # Tab panel2 ## 6. Show the Adjacency Matrix #### ## "adjOptType","adj","adjPlot" tabPanel(title = "Adjacency matrix", sidebarLayout( sidebarPanel(width = 4, ### 6.1 Select network #### withTags(h4(b("Select matrix"))), radioButtons(inputId = "adjSelector", label = "Which matrix do you want to use?", choiceNames = c("original"), choiceValues = c(1) ), p(id='BlckNotRunYet_Plot',style="color:red;", 'Run the blockmodeling to be able to select\nthe partitioned network' ), ### 6.2 Select type of output #### conditionalPanel( condition = "input.adjSelector == 1", withTags(h4(b("Select output"))), radioButtons(inputId = "adjOptType", label = "Type of visualisation", choiceNames = c("table","plot"), choiceValues = c("t","p") ), conditionalPanel( condition = "input.adjOptType == 't'", withTags(h4(b("Export adjacency matrix"))), ### 6.4 Download adjacency matrix #### downloadButton(outputId = "downloadAdj", label = 'Download', icon = icon(name = "download", lib = "font-awesome") ), ), # /input.adjOptType ), # /input.adjSelector ### 6.3 Which best partition to print #### ### "whichIM_adjPlot" conditionalPanel( condition = "input.blckmdlngAll==true", conditionalPanel( condition = 'input.adjSelector==2', numericInput(inputId = "whichIM_adjPlot", label = 'Which "best" partition should be printed?', value = 1, min = 1, step = 1, ), ), ), ### 6.4 Margin size ## 'MatrixPlotMargin' numericInput(inputId = 'MatrixPlotMargin', label = 'Set the plot\'s margin', value = 1.6, min = 0, step = .1 ), ), mainPanel( ### 6.3 Table output original matrix #### tableOutput("adj"), ### 6.4 Output plot-matrix #### plotOutput(outputId = "adjPlot",height = '100%'), ), ), #</Sidebarlayout> ), # Tabpanel tabPanel(title = "Network Plot", ## 7 Various sys of network plots #### conditionalPanel( ### 7.1 "Network" and "igraph" sys #### condition = "input.PlotSys != 3", plotOutput("NetworkPlot", height = 640,width = 800), ), conditionalPanel( condition = "input.PlotSys == 3", ### 7.2 "visNetwork" sys #### visNetwork::visNetworkOutput("igraphPlot", height = 640,width = 800), ), ## 8. Plotting options#### # "PlotOpt" withTags({ div(h4(b("Plotting options"))) }), hr(), fluidRow( ### 8.1 Select matrix to plot #### ## "PlotSelector" column(6, withTags(i("Select network")), radioButtons(inputId = "PlotSelector", label = "Which matrix to use?", inline = T, choiceNames = c("original"), choiceValues = c(1), ), p(id='BlckNotRunYet_Plot',style="color:red;", 'Run the blockmodeling to be able to select\nthe partitioned network' ), ), ### 8.2 Select plotting sys #### ## "PlotSys" column(6, withTags(i("Select output")), radioButtons(inputId = "PlotSys", label = "Which package to use for plotting?", choiceNames = c("network","igraph","visNetwork"), choiceValues = c(1,2,3), inline = TRUE, selected = 2 ), ), column(3, conditionalPanel( condition = 'input.PlotSelector==2', #### 8.2.1 Which best partition to print #### ### "whichIM_Plot" numericInput(inputId = "whichIM_Plot", label = 'Which "best" partition should be used for plotting?', value = 1, min = 1, step = 1, ), ), # /conditionalPanel ), # /column ), # /fluidRow ### 8.3 Options for the "network" plotting sys #### conditionalPanel( condition = "input.PlotSys == 1", withTags({ div(h4(b("network Plotting Options"))) }), ## Layout with mutiple coloumns hr(), fluidRow( column(3, withTags(h5(b("General options"))), #### 8.3.1 Mode #### ## "PlotMode" radioButtons(inputId = "PlotMode", label = 'Nodes arrangement', choiceNames = c("Fruchterman-Reingold algorithm","Circle"), choiceValues = c("fruchtermanreingold","circle"), inline = TRUE ), #### 8.3.2 Isolate #### ### "PlotIsolate" checkboxInput(inputId = "PlotIsolate", label = 'Isolated nodes', value = TRUE ), #### 8.3.3 Interactive #### ### "PlotInteractive" # checkboxInput(inputId = "PlotInteractive", # label = 'Should the plot be plot be interactive?*', # value = FALSE # ), # withTags({ # div(b("(* Can be very slow!)")) # }), ), column(5, h4("Aesthetic options"), conditionalPanel( condition ="input.directionality == true", #### 8.3.4 Arrows #### ##### 8.3.4 (A) Whether to override arrows #### # "OverridePlotArrows" checkboxInput(inputId = "OverridePlotArrows", label = 'Override default arrows*', value = FALSE ), withTags(p('*',i('By default arrows are shown for directional networks'))), conditionalPanel( condition ="input.OverridePlotArrows == true", ##### 8.3.4 (B) Overriding plot arrows #### "PlotArrows" checkboxInput(inputId = "PlotArrows", label = 'Display arrows', value = FALSE ), ), ##### 8.3.4 (C) Arrow size #### #### "PlotArrowSize" sliderInput(inputId = "PlotArrowSize", ticks = TRUE, label = 'Dimension of plot\'s arrows', value = 1, min = .5, max = 20, step = .5 ), ), ##### 8.3.4 (D) Message "No directionality, No arrows" #### conditionalPanel( condition ="input.directionality == false", withTags( h5( i(style="color:red;", "Arrows ",u("cannot")," be set"), i("because they do not make sense for non-directional networks"), ) ) ), #### 8.3.5 Node Labels #### ##### 8.3.5 (A) Hide labels #### 'NetworkNodeLabelsHide' withTags(h5(b('Hide the nodes\' labels'))), checkboxInput(inputId = 'NetworkNodeLabelsHide', label = 'Check to hide', value = T), conditionalPanel( condition = 'input.NetworkNodeLabelsHide==false', ##### 8.3.5 (B) Labels' size #### "PlotLabelSize" sliderInput(inputId = "PlotLabelSize", ticks = TRUE, label = 'Dimension of plot\'s labels', value = 1, min = .5, max = 20, step = .5 ), ), #### 8.3.6 Nodes size #### ### "PlotNodeSize" sliderInput(inputId = "PlotNodeSize", ticks = TRUE, label = 'Dimension of plot\'s nodes', value = 5, min = .5, max = 10, step = .5 ), ) ) ),# END Conditional panel1 : network Plotting Options" conditionalPanel( condition = "input.PlotSys == 2", ### 8.4 Options for the "igraph" plotting sys #### withTags({ h4(b("igraph Plotting Options")) }), ## Layout with multiple coloumns hr(), fluidRow( column(3, withTags({ h4("Vertex") }), #### 8.4.1 Size of the node #### ### "PlotVertexSize" sliderInput(inputId = "PlotVertexSize", ticks = TRUE, label = 'Dimension of plot\'s nodes', value = 5, min = .5, max = 20, step = .5 ), #### 8.4.2 Color of the node's frame #### ### "PlotVertexFrameColour" selectInput(inputId = "PlotVertexFrameColour", label = 'Color of the nodes\' frame', choices = palette.colors(palette = palette.pals()[16]), selected = '#3283FE' ), ##### 8.4.3 Shape ### #### "PlotVertexShape" selectInput(inputId = "PlotVertexShape", label = "Shape of the plot's nodes?", choices = c("Circle"="circle", "Square"="square", "Rectangle"="rectangle", # "Circle and Square"="csquare", # "Circle and Rectangle"="crectangle", "Vertical Rectangle"="vrectangle", "Sphere"="sphere","None"="none"), selected = 'cicle', multiple = F ), #### 8.4.4 Hide the nodes' labels #### 'GraphNodeLabelsHide' withTags(h5(b('Hide the nodes\' labels'))), checkboxInput(inputId = 'GraphNodeLabelsHide', label = 'Check to hide', value = T), conditionalPanel( condition = 'input.GraphNodeLabelsHide==false', #### 8.4.3 Font Family of the nodes' labels #### ### "PlotVertexLabelFontFamily" radioButtons(inputId = "PlotVertexLabelFontFamily", label = "Node labels' font", choices = c("Serif"="serif","Sans serif"="sans"), inline = TRUE ), #### 8.4.4 Size of the node's labels #### ### "PlotVertexLabelSize" sliderInput(inputId = "PlotVertexLabelSize", ticks = TRUE, label = 'Dimension of node\'s labels', value = 1, min = .5, max = 20, step = .5 ), #### 8.4.5 Distance of the node's labels #### ### "PlotVertexLabelDist" sliderInput(inputId = "PlotVertexLabelDist", ticks = TRUE, label = 'Labels\' distance from the node', value = 0.5, min = .5, max = 3, step = .5 ), #### 8.4.6 Colour of the node's labels #### ### "PlotVertexLabelColour" selectInput(inputId = "PlotVertexLabelColour", label = 'Color of the nodes\' labels', choices = palette.colors(palette = palette.pals()[13]), selected = '#BAB0AC' ), ), ), column(4, offset = 1, h4("Edges"), conditionalPanel( condition = "input.ValuedMatrix == true", #### 8.4.7 Edges width (manual/valued) #### checkboxInput(inputId = "igraphPlotEdgeWidthValues", label = 'Edges\' width shows the network\'s values', value = FALSE ), conditionalPanel( condition = "input.igraphPlotEdgeWidthValues == true", ##### 8.4.7 (A) Max width of the edges #### #### "PlotEdgeWidth" sliderInput(inputId = "igraphPlotEdgeMaxWidth", ticks = TRUE, label = 'Max width of plot\'s edges', value = 1, min = .5, max = 20, step = .5), ), ), conditionalPanel( condition = "input.igraphPlotEdgeWidthValues == false", ##### 8.4.7 (B) Width of the edge #### #### "PlotEdgeWidth" sliderInput(inputId = "igraphPlotEdgeWidth", ticks = TRUE, label = 'Width of plot\'s edges', value = .5, min = .5, max = 20, step = .5), ), #### 8.4.8 Colour of the edge #### ### "PlotEdgeColour", 'igraphPlotEdgeShadeValues' checkboxInput(inputId = "igraphPlotEdgeShadeValues", label = 'Edges\' colour shows the network\'s values', value = FALSE ), conditionalPanel( condition ="input.igraphPlotEdgeShadeValues == false", selectInput(inputId = "PlotEdgeColour", label = 'Color of the plot\'s edges', choices = palette.colors(palette = palette.pals()[13]), selected = '#BAB0AC' ), ), conditionalPanel( condition = "input.directionality == true", #### 8.4.9 Arrows #### ##### 8.4.9 (A) Whether to override arrows #### #### "OverrideigraphPlotArrows" checkboxInput(inputId = "OverrideigraphPlotArrows", label = 'Ovveriding defaultarrow settings?', value = FALSE ), conditionalPanel( condition ="input.OverrideigraphPlotArrows == true", ##### 8.4.9 (B) Setting overidden plot arrows #### #### "igraphPlotArrows" checkboxInput(inputId = "igraphPlotArrow", label = 'Display arrows', value = FALSE ), ), ##### 8.4.9 (C) Size of the edge's arrows #### #### "igraphPlotArrowSize" sliderInput(inputId = "igraphPlotArrowSize", label = 'Size of the edge\'s arrows', value = 1, min = .5, max = 20, step = .5 ), ), ##### 8.4.9 (D) Message "No directionality, No arrows" #### conditionalPanel( condition ="input.directionality == false", withTags( h5( i(style="color:red;", "Arrows ",u("cannot")," be set"), i("because they do not make sense for non-directional networks"), ) ) ), #### 8.4.10 Font Family of the edges' labels #### ### "PlotEdgeLabelFontFamily" radioButtons(inputId = "PlotEdgeLabelFontFamily", label = "Edge labels' font", choices = c("Serif"="serif","Sans serif"="sans"), inline = TRUE ), #### 8.4.11 Color of the edges' labels #### ### "PlotEdgeLabelColour" selectInput(inputId = "PlotEdgeLabelColour", label = 'Colot of the plot\'s edges', choices = palette.colors(palette = palette.pals()[13]), selected = '#BAB0AC' ), ), column(4, #### 8.4.12 Aesthetic option #### h4("Aesthetic option"), #### "PlotEdgeCurved" checkboxInput(inputId = "PlotEdgeCurved", label = 'Curved edges', value = FALSE), conditionalPanel( condition = "input.PlotSelector==1", conditionalPanel( condition = "input.AddAttr==true", ##### 8.4.12 (B) Color of the nodes #### checkboxInput(inputId = 'AttrVertexColYN', label = 'Colour nodes from attribute', value = F), ), conditionalPanel( condition = 'input.AttrVertexColYN==false', ##### 8.4.12 (B) Color of the node without partitions #### #### "PlotVertexColour" selectInput(inputId = "PlotVertexColour", label = 'Color of the plot\'s nodes', choices = palette.colors(palette = palette.pals()[16]), selected = '#3283FE' ), ), ), conditionalPanel( condition = "input.PlotSelector==1&&input.AttrVertexColYN==true", selectInput(inputId = 'NodePaletteGraph', label = 'Select palette* for nodes\' colour', choices = palette.pals(), selected = palette.pals()[2], multiple = F ), ), conditionalPanel( condition = "input.PlotSelector==2", ##### 8.4.12 (C) Colour of the partitions #### selectInput(inputId = 'PlotPaletteGraph', label = 'Select palette* for clusters\' colour', choices = palette.pals(), selected = palette.pals()[2], multiple = F ), div(textOutput(outputId = 'WarningNumColoursGraph'),style='color:red;background-color: #DADADA; margin-top: 5px; margin-right: 5px; margin-bottom: 5px; margin-left: 5px;text-align: center'), withTags(div(b("8 colours"),':',i("R3, R4, ggplot2, Accent, Dark 2, Pastel 2, Set 2"))), withTags(div(b("9 colours"),':',i("Okabe-Ito, Pastel 1, Set 1"))), withTags(div(b("10 colours"),':',i("Paired, Set 3, Tableau 10, Classic Tableau"))), withTags(div(b("26 colours"),':',i("Alphabet"))), withTags(p(b("36 colours"),':',i("Polychrome 36"))), ), ), ), ),# END Conditional panel2 conditionalPanel( condition = "input.PlotSys == 3", ### 8.5 Options for the "visNetwork" plotting sys #### withTags({ h4(b("visNetwork Plotting Options")) }), # Layout with mutiple coloumns hr(), fluidRow( column(3, withTags({ h4("Plotting options") }), #### 8.5.1 Title of the plot #### ### "visTitle" textInput(inputId = "visTitle", label = 'Title of the plot', value = NULL ), #### 8.5.2 Subtitle of the plot #### ### "visSubtitle" textInput(inputId = "visSubtitle", label = 'Subtitle of the plot', value = NULL ), #### 8.5.3 Color background #### ### "visBackground" selectInput(inputId = "visBackground", label = 'Color of the plot\'s background', choices = c('peach'='#FBB4AE','pastel light blue'='#B3CDE3','pastel green'='#CCEBC5','pastel purple'='#DECBE4','pastel orange'='#FED9A6','pastel yellow'='#FFFFCC','pastel brown'='#E5D8BD','pastel pink'='#FDDAEC','pastel grey'='#F2F2F2','white'='#FFFFFF'), selected = '#FFFFFF' ), ), column(4, offset = 1, h4("Hierarchy"), #### 8.5.4 Hierarchy #### ### "visHier" checkboxInput(inputId = "visHier", label = 'Hierarchical network', value = FALSE ), conditionalPanel( condition = "input.visHier == 1", ##### 8.5.4 (A) Direction of the nodes #### #### "visHierDirection" radioButtons(inputId = "visHierDirection", label = "Direction", choices = c("up-down"="UD", "down-up"="DU", "left-right"="LR", "right-left"="RL"), inline = TRUE ), ##### 8.5.4 (B) Parent centralisation #### #### "visHierCentralisation" checkboxInput(inputId = "visHierCentralisation", label = 'Centralise parent nodes', value = FALSE ), ), ), column(4, h4("Aestetics"), withTags(h5(b("Nodes"))), #### 8.5.5 Nodes' colours #### conditionalPanel( condition = "input.PlotSelector==1", conditionalPanel( condition = "input.AddAttr==true", checkboxInput(inputId = 'visNetworkAttrVertexColYN', label = 'Colour nodes from attribute', value = F), ), conditionalPanel( condition = 'input.visNetworkAttrVertexColYN==false', #### "visNetworkNodeColour" selectInput(inputId = "visNetworkNodeColour", label = 'Color of the plot\'s nodes', choices = palette.colors(palette = palette.pals()[16]), selected = '#3283FE' ), #### "visNetworkNodeBorder" selectInput(inputId = "visNetworkNodeBorder", label = 'Color of the nodes\' border', choices = palette.colors(palette = palette.pals()[13]), selected = '#BAB0AC' ), ), ), conditionalPanel( condition = "input.PlotSelector==1&&input.visNetworkAttrVertexColYN==true", selectInput(inputId = 'visNetworkAttrPalette', label = 'Select palette* for nodes\' colour', choices = palette.pals(), selected = palette.pals()[2], multiple = F ), ), conditionalPanel( condition = 'input.PlotSelector==2', ### 'PlotPaletteVIS' selectInput(inputId = 'PlotPaletteVIS', label = 'Select palette for clusters\' colour', choices = palette.pals(), selected = palette.pals()[6], multiple = F), hr(), div(textOutput(outputId = 'WarningNumColoursVIS'),style='color:red;background-color: #DADADA; margin-top: 5px; margin-right: 5px; margin-bottom: 5px; margin-left: 5px;text-align: center'), hr(), withTags(div(b("8 colours"),':',i("R3, R4, ggplot2, Accent, Dark 2, Pastel 2, Set 2"))), withTags(div(b("9 colours"),':',i("Okabe-Ito, Pastel 1, Set 1"))), withTags(div(b("10 colours"),':',i("Paired, Set 3, Tableau 10, Classic Tableau"))), withTags(div(b("26 colours"),':',i("Alphabet"))), withTags(p(b("36 colours"),':',i("Polychrome 36"))), ), #### 8.5.6 Nodes' shape selectInput(inputId = 'visNetworkNodeShape', label = 'Shape', choices = c('Square'="square", 'Triangle'="triangle", 'Box'="box", 'Circle'="circle", 'Dot'="dot", 'Star'="star", 'Ellipse'="ellipse", 'Database'="database", 'Diamond'="diamond"), selected = 'circle', multiple = F), #### 8.5.6 Nodes' size ### "visNetworkNodeSize" sliderInput(inputId = "visNetworkNodeSize", ticks = TRUE, label = 'Dimension of plot\'s nodes', value = 5, min = .5, max = 20, step = .5 ), #### 8.5.6 Nodes' shadow ### 'visNetworkNodeShadow' checkboxInput(inputId = 'visNetworkNodeShadow', label = 'Draw a shadow?', value = T), conditionalPanel( condition = 'input.visNetworkNodeShadow==true', ### 'visNetworkNodeShadowSize' sliderInput(inputId = "visNetworkNodeShadowSize", ticks = TRUE, label = 'Dimension of nodes\' shadows', value = 5, min = .5, max = 20, step = .5 ), ), withTags(h5(b("Edges"))), #### 8.5.7 Edges' colour ### "visNetworkEdgeColour" textInput(inputId = "visNetworkEdgeColour", label = 'Color of the plot\'s edges', value = "SkyBlue" ), #### 8.5.7 Edges' highlight colour ### "visNetworkEdgeHighlight" textInput(inputId = "visNetworkEdgeHighlight", label = 'Color of the higlighted edge', value = "yellow" ), #### 8.5.8 Edges' shadow ### 'visNetworkNodeShadow' checkboxInput(inputId = 'visNetworkEdgeShadow', label = 'Draw a shadow?', value = T), ), ), )# Conditional panel3 ), # Tab panel4 tabPanel(title = 'Info', ## 9. Info package/app #### icon = icon(name = 'info-sign',lib = 'glyphicon'), div(''), ### 9.1 Text #### fluidRow( column(width = 2,p(''),), column(width = 8, htmlOutput(outputId = 'Info'), ), column(width = 2,p(''),), ), hr(), withTags(h4(b('Download citation files'))), fluidRow( column(width = 2,p(' ')), column(width = 5, ### 9.2 RIS file #### downloadButton(outputId = 'CitationRIS', label = 'In .ris format', icon = icon(name = 'save-file', lib = 'glyphicon')), ), column(width = 5, ### 9.3 bib file #### downloadButton(outputId = 'CitationBIB', label = 'In BibText format', icon = icon(name = 'save-file', lib = 'glyphicon')), ), ), # \fluidRow ), ),# Tabset panel withTags({ div(class="Header", table( tr( td( width = "134px", img(class = "FooterLogo", src="https://www.arrs.si/lib/img/arrs-logo-en.gif"), ), td( div(style="h3", b("Acknowledgment of financial support"), 'The development of this package is financially supported by the Slovenian Research Agency (', a(href="www.arrs.gov.si", "www.arrs.gov.si"), ') within the research project', a(href="fdv.uni-lj.si/en/research/institute-of-social-science/national-research-projects/P5438", "J5-2557 (Comparison and evaluation of different approaches to blockmodeling dynamic networks by simulations with application to Slovenian co-authorship networks)"), '.' ), ), ), ), # /table ) # div "Header" }) )# ui #Sect. 2 Output #### server <- function(input, output, session) { # 0. Reactive values #### Tbl<-reactiveValues(Current = NULL,Rows=NULL,Cols=NULL) Blck<-reactiveValues(RunAlready = FALSE,Custom=NULL, Count=0,Previous=list()) ## 0.1 Reset Blck$Previous's length observe({ if(length(Blck$Previous)!=input$Restore_MaxMemory){ length(Blck$Previous)<<-input$Restore_MaxMemory } }) # 0.2 Reset 'Blck$RunAlready' if it becomes NULL observeEvent(eventExpr = c(Blck$RunAlready),handlerExpr = { YN<-Blck$RunAlready if(is.null(YN)){ Blck$RunAlready<<-FALSE } }) ## 1. Reading data #### # "aj" ReadData<-eventReactive(input$aj,{ ### 1.1 Preloaded data if(input$blckmdlngRDS){ #### 1.1.1 From block model results dat<-mdllng()$initial.param$M MatrixType<-"adjacency" #### Blockmodeling was run beforehand Blck$RunAlready<<-TRUE } else if(input$Sample){ #### 1.1.2 From sample dat<-readRDS(file = "./Sample.rds") MatrixType<-"adjacency" #### Blockmodeling wasn't run yet Blck$RunAlready<<-FALSE } else { #### Blockmodeling wasn't run yet Blck$RunAlready<<-FALSE ### 1.2 Options for text/plain files #### if(input$type!=4){ #### 1.2.1 If the separator is 'other' #### ### "OtherSep" if(input$OtherSep!="")input$sep<-input$OtherSep ### Notification "Reading list in progress" showNotification(ui = "Reading data from uploaded list", type = 'default', id = 'ReadingList', duration = NULL, closeButton = F) #### 1.2.3 Determine type of file provided #### if(input$type==1){ MatrixType<-"adjacency" # For adj matrix, the row names should always be # in the first column ListRowNames<-1 } else { ListRowNames<-NULL if(input$type==2)MatrixType<-"edgelist" if(input$type==3)MatrixType<-"incidence" } #### 1.2.4 Reads the data from file #### #### "List", "sep", "whites", UploadedFile<-input$List dat <- read.delim(file = UploadedFile$datapath, sep = input$sep, strip.white = input$whites, # row.names = 1, row.names = ListRowNames, header = input$ListHeader) dat<-as.matrix(x = dat) removeNotification('ReadingList') } else { ### 1.3 Pajek input #### ## "PajekFile", "PajekInput" ### Notification "Reading list in progress" showNotification(ui = 'Reading Pajek file', type = 'default', id = 'ReadingPajek', duration = NULL, closeButton = F) MatrixType<-"adjacency" UploadedFile<-input$PajekFile #### 1.3.1 Reads the data from Pajek .net file #### if(input$PajekInput=="PajekNetwork"){ dat <- loadnetwork(filename = UploadedFile$datapath, useSparseMatrix = F) } #### 1.3.2 Reads the data from Pajek .mat file #### if(input$PajekInput=="PajekMatrix"){ loadmatrix(filename = UploadedFile$datapath) dat <- loadmatrix(filename = UploadedFile$datapath) } removeNotification('ReadingPajek') } } dat }) ## 2. Create network object #### NW<-eventReactive(ReadData(),{ dat<-ReadData() ### 2.1 Determine type of file provided if(input$type== 1){ MatrixType<-"adjacency" # For adj matrix, the row names should always be # in the first column ListRowNames<-1 } else { ListRowNames<-NULL if(input$type==2)MatrixType<-"edgelist" if(input$type==3)MatrixType<-"incidence" if(input$type==4)MatrixType<-"adjacency" } ### 2.2 Checks for valued networks #### ## "ValuedMatrix", "ValuesName" if(input$ValuedMatrix){ IgnoreEval<-FALSE ValuesName<-"weights" } else { IgnoreEval<-TRUE ValuesName<-NULL } ### 2.3 Turn the matrix into a network #### ## "directionality","loops", "parallel" dat <- network::network(x = dat, directed = input$directionality, loops = input$loops, # multiple = input$parallel, matrix.type = MatrixType, ignore.eval = IgnoreEval, names.eval = ValuesName) #### 2.3.1 Notification "Multiplex matrix" if(is.multiplex(dat)){ showNotification(ui = "The uploaded list contains a multiplex matrix. The matrix may need to be simplified by removing both loops and multiple edges.", type = 'warning', duration = 5, closeButton = T) } #### 2.3.2 Notification "Bipartite matrix" if(network::is.bipartite(dat)){ showNotification(ui = "The uploaded list contains a bipartite matrix. Bipartition will be ignored", type = 'warning', duration = 5, closeButton = T) } #### 2.3.3 Notification "Reading file completed" showNotification(ui = "Elaboration of uploaded file completed", type = 'default', duration = 10, closeButton = T) ### 2.4 Add attributes #### if(input$AddAttr){ AddAttrVal<-read.table(file = input$AddAttrFile$datapath, header = F,quote = "",col.names = F) network::set.vertex.attribute(x = dat,attrname = input$AddAttrName, value = AddAttrVal) } ### 2.5 Delete isolated nodes ?#### if(input$DelIsolated){ dat<-intergraph::asIgraph(x = dat) dat<-delete_vertices(graph = dat,v = V(dat)[degree(graph = dat)==0]) dat<-intergraph::asNetwork(x = dat) } dat }) ## 3. Get adjacency matrix #### # Converts edge lists and incidence matrices in adjacency matrix GetAdjacencyMatrix<-eventReactive(ReadData(),{ ### 3.1 data from file #### ### Determine type of file provided if(input$type==2||input$type==3){ #### 3.1.1 For edge lists and incidence matrices #### if(input$type==2)MatrixType<-"edgelist" if(input$type==3)MatrixType<-"incidence" #### Reads the data as a network object M<-NW() #### Converts the network in an adjacency matrix if(input$ValuedMatrix){ ##### 3.1.1 (A) for valued networks #### M<-as.matrix.network(x = dat,matrix.type = "adjacency", attrname = "weights") } else { ##### 3.1.1 (B) for non-valued networks #### M<-as.matrix.network(x = dat,matrix.type = "adjacency") } } else { ### 3.2 Otherwise, the data is already in the right format M<-ReadData() } M }) ## 4. Outputting summary text #### # "summary", "IncludeAdj" output$summary <- renderPrint({ dat<-NW() network::summary.network(dat,print.adj = input$IncludeAdj) }) # # Multiplex-network warnings # output$AdjWarning<-renderText({ # dat<-NW() # # igraph version # igraphDat <- intergraph::asIgraph(dat) # # # Manages multiplex networks # if(is.multiplex(dat)){ # ### # } # }) # 5. Plotting adjacency matrix #### # "adjPlot", "adjOptType", "adjSelector" output$adjPlot<-renderPlot({ # Checks if the user selected the original (= 1) or the # partitioned (= 2) matrix if(input$adjSelector==2){ ## 5.1 Plotting the partitioned adjacency matrix #### ## Loads blockmodeling's result dat<-mdllng() output<-plot(dat,main="",which = input$whichIM_adjPlot, mar=rep(input$MatrixPlotMargin,4)) ## Plots the partitioned matrix } else { ## 5.2 Checks if the user selected "plot" or "table" for the original matrix if(input$adjOptType=="t") return(NULL) ## 5.3 Prints original adjacency matrix #### ## Load the matrix dat<-GetAdjacencyMatrix() ## Plots the original matrix output<-plotMat(x = dat,ylab = '',xlab = '',plot.legend = F, main = '',title.line = '', mar=rep(input$MatrixPlotMargin,4)) } output },height = 600,width = 800,res = 128) # 6. Outputting the adjacency table #### # "adj" output$adj <- renderTable({ ## 6.1 Check what output was requested #### if(input$adjOptType=="p") return(NULL) if(input$adjSelector==2) return(NULL) ## 6.2 Reads matrix #### dat<-GetAdjacencyMatrix() ## 6.3 Prints the matrix #### dat },rownames = TRUE) # 7. Non interactive plots #### # "NetworkPlot", "PlotSys" output$NetworkPlot<-renderPlot({ ## Checks the plotting system if(input$PlotSys==1){ ## 7.1 Plotting with network #### ## 7.1.1 Adds the partitions if needed if(input$PlotSelector==2){ ## Reads data in dat<-NW() clu<-blockmodeling::clu(res=mdllng(),which = input$whichIM_Plot) dat<- network::set.vertex.attribute(x = dat, attrname = "cluster", value = clu) } else { dat<-NW() } ### 7.1.2 Checks setting for the arrows #### if (input$OverridePlotArrows){ PlotArrows<-input$PlotArrows } else { PlotArrows<-input$directionality } ### 7.1.3 With or without partitions? if(input$PlotSelector==2){ VertexCol<-"cluster" } else { VertexCol<-2 } ### Plotting network::plot.network(x = dat, usearrows = PlotArrows, mode = input$PlotMode, displayisolates = input$PlotIsolate, # interactive = PlotInteractive, arrowhead.cex = input$PlotArrowSize, label.cex = input$PlotLabelSize, vertex.cex = input$PlotNodeSize, vertex.col= VertexCol, label=network.vertex.names(dat), displaylabels= input$NetworkNodeLabelsHide ) } else { ## 7.2 Plotting with igraph ### ### Create graph { if(input$directionality){ iGraphDir<-'directed' } else { iGraphDir<-'undirected' } if(input$ValuedMatrix){ iGraphValued<-TRUE } else { iGraphValued<-NULL } dat2<- igraph::graph.adjacency(adjmatrix = GetAdjacencyMatrix(), weighted = iGraphValued, mode = iGraphDir, add.rownames = TRUE ) ## Add attributes if(input$AddAttr){ AddAttrVal<-read.table(file = input$AddAttrFile$datapath, header = F,quote = "",col.names = F) V(dat2)$Added.Attr<-AddAttrVal[[1]] } ### 7.3.3 With or without partitions? if(input$PlotSelector==2){ V(dat2)$cluster<-clu(res = mdllng(),which = input$whichIM_Plot) # Assigns colours to each partition NodesColours <- palette.colors(n = length(unique(V(dat2)$cluster)), palette = input$PlotPaletteGraph) V(dat2)$color <- NodesColours[V(dat2)$cluster] } else if(input$AttrVertexColYN){ NodesColours <- palette.colors(n = length(unique(V(dat2)$Added.Attr)), palette = input$NodePaletteGraph) for(i in 1:length(unique(V(dat2)$Added.Attr))){ V(dat2)$Added.Attr<-gsub(pattern = unique(V(dat2)$Added.Attr)[i], replacement = i, x = V(dat2)$Added.Attr) } V(dat2)$color <- NodesColours[as.numeric(V(dat2)$Added.Attr)] } else { V(dat2)$color <- input$PlotVertexColour } } ### 7.2.1 Edges width (manual/valued) #### # Checks if the user wants the edges' width to # represent the network's value if(input$igraphPlotEdgeWidthValues==FALSE){ igraphPlotEdgeWidth <- input$igraphPlotEdgeWidth } else { temp <- igraph::get.edge.attribute(dat2)$weight MaxTemp<- max(temp) igraphPlotEdgeWidth <- input$igraphPlotEdgeMaxWidth/MaxTemp*temp } ### 7.2.2 Arrow setting #### if(input$OverrideigraphPlotArrows){ #### Overriding arrows if(input$igraphPlotArrow==FALSE) igraphPlotArrow <- 0 if(input$igraphPlotArrow==TRUE) igraphPlotArrow <- 2 } else { #### Default settings if(input$directionality==FALSE) igraphPlotArrow <- 0 if(input$directionality==TRUE) igraphPlotArrow <- 2 } ### 7.2.1 Edges shade (manual/valued) #### # Checks if the user show the edges' values as # a shade of the colour of the network's edges if(input$igraphPlotEdgeShadeValues==FALSE){ igraphPlotEdgeColour <- input$PlotEdgeColour } else { # Prepare the final vector igraphPlotEdgeColour <- E(dat2)$weight # Extract unique weights values WhichWeights <- unique(igraphPlotEdgeColour) # Determine ratio min/max FairestGrey<-min(WhichWeights)/max(WhichWeights) # Create adequate greyscale greys <- grey.colors(n = length(WhichWeights), start = 1-FairestGrey, end = 0) # Extendd grey scale to the whole series of weights for(i in 1:length(WhichWeights)){ igraphPlotEdgeColour[E(dat2)$weight==WhichWeights[i]]<-greys[i] } } ### 7.2.3 Checks if the user wants to hide the nodes' labels if(input$GraphNodeLabelsHide){ iGraphLabels<-NA } else { iGraphLabels<-V(dat2)$name } if (input$PlotSys==2) { # If the user selected igraph ### Plots igraph igraph::plot.igraph(x = dat2, vertex.label= iGraphLabels, vertex.size = input$PlotVertexSize, # vertex.color= , vertex.frame.color = input$PlotVertexFrameColour, vertex.shape = input$PlotVertexShape, vertex.label.family = input$PlotVertexLabelFontFamily, vertex.label.cex = input$PlotVertexLabelSize, vertex.label.dist = input$PlotVertexLabelDist, vertex.label.color = input$PlotVertexLabelColour, edge.color = igraphPlotEdgeColour, edge.width = igraphPlotEdgeWidth, edge.arrow.mode = igraphPlotArrow, edge.arrow.size = input$igraphPlotArrowSize, # arrow.width = input$PlotArrowWidth, edge.label.family = input$PlotEdgeLabelFontFamily, # edge.label.cex = input$PlotEdgeLabelSize, # edge.label.dist = input$PlotEdgeLabelDist, edge.label.color = input$PlotEdgeLabelColour, edge.curved = input$PlotEdgeCurved ) } else { # If the user selected VisNetwork return(NULL) } # else of if PlotSystem != 2 } # else of if PlotSystem == 1 },height = 800,width = 600,res = 128) ## 7.3 Warning for short palette warningGraph<-eventReactive(input$PlotPaletteGraph,{ if(length(palette.colors(palette = input$PlotPaletteGraph))<length(unique(clu(res = mdllng(),which = input$whichIM_Plot)))){ wrn<-paste('Select a palette supporting at least', length(unique(clu(res = mdllng(),which = input$whichIM_Plot))), 'colours!') } else {wrn<-NULL} return(wrn) }) output$WarningNumColoursGraph<-renderText({ wrn<-warningGraph() wrn }) # 8. Plotting with VisNetwork #### output$igraphPlot<-renderVisNetwork({ if(input$PlotSys==3){ ## If the user selected VisNetwork ## Create graph ### Create graph { if(input$directionality){ iGraphDir<-'directed' } else { iGraphDir<-'undirected' } if(input$ValuedMatrix){ iGraphValued<-TRUE } else { iGraphValued<-NULL } dat2<- igraph::graph.adjacency(adjmatrix = GetAdjacencyMatrix(), weighted = iGraphValued, mode = iGraphDir, add.rownames = TRUE ) ## Add attributes if(input$AddAttr){ AddAttrVal<-read.table(file = input$AddAttrFile$datapath, header = F,quote = "",col.names = F) V(dat2)$Added.Attr<-AddAttrVal[[1]] } ### 8.1 With or without partitions? #### if(input$PlotSelector==2){ V(dat2)$cluster<-clu(res = mdllng(),which = input$whichIM_Plot) # Assigns colours to each partition NodesColours <- palette.colors(n = length(unique(V(dat2)$cluster)), palette = input$PlotPaletteVIS) V(dat2)$color <- NodesColours[V(dat2)$cluster] } else if(input$visNetworkAttrVertexColYN){ NodesColours <- palette.colors(n = length(unique(V(dat2)$Added.Attr)), palette = input$visNetworkAttrPalette) for(i in 1:length(unique(V(dat2)$Added.Attr))){ V(dat2)$Added.Attr<-gsub(pattern = unique(V(dat2)$Added.Attr)[i], replacement = i, x = V(dat2)$Added.Attr) } V(dat2)$color <- NodesColours[as.numeric(V(dat2)$Added.Attr)] } else { V(dat2)$color <- input$visNetworkNodeColour } } ## Converts to visNetwork dat3<-toVisNetworkData(dat2) dat3<<-dat3 ## adds correct labels dat3$nodes$label<-dat3$nodes$name visNetwork::visNetwork(nodes = dat3$nodes, edges = dat3$edges, main = input$visTitle, submain = input$visSubtitle, background=input$visBackground)%>% visNetwork::visOptions(nodesIdSelection = T, height = 600,width = 800, manipulation = F)%>% visNetwork::visNodes(shape = input$visNetworkNodeShape, size = input$visNetworkNodeSize, color = list(border = input$visNetworkNodeBorder), shadow = list(enabled = input$visNetworkNodeShadow, size = input$visNetworkNodeShadowSize))%>% visNetwork::visEdges(shadow = input$visNetworkEdgeShadow, color = list(color = input$visNetworkEdgeColour, highlight = input$visNetworkEdgeHighlight))%>% visNetwork::visHierarchicalLayout(enabled = input$visHier, direction = input$visHierDirection, parentCentralization = input$visHierCentralisation) } }) ## 8.4 Warning for short palette warningVIS<-eventReactive(input$PlotPaletteVIS,{ if(length(palette.colors(palette = input$PlotPaletteVIS))<length(unique(clu(res = mdllng(),which = input$whichIM_Plot)))){ wrn<-paste('Select a palette supporting at least', length(unique(clu(res = mdllng(),which = input$whichIM_Plot))), 'colours!') } else {wrn<-NULL} return(wrn) }) output$WarningNumColoursVIS<-renderText({ wrn<-warningVIS() wrn }) # 9. Operating blockmodeling #### # "blckmdlng" mdllng <- eventReactive(input$blckmdlngRun, { ## 9.1 Alternative blockmodeling sources #### if(input$Restore_Switch){ ### 9.1.1 Restore from Memory blck<<-Blck$Previous[[input$Restore_Selector]] ## Notification "Reading block-model results from RDS" showNotification(ui = "Restoring blockmodeling's results from memory", type = 'message', duration = 10, closeButton = T) } else if(input$blckmdlngRDS){ ### 9.1.2 Blockmodeling from RDS file #### ## Notification "Reading block-model results from RDS" showNotification(ui = "Reading blockmodeling's results from file", type = 'message', duration = 10, closeButton = T) ## Reading RDS file UploadedResults<-input$blckmdlngFileRDS blck<-readRDS(file = UploadedResults$datapath) } else { ## Loads data M<-GetAdjacencyMatrix() ## 9.2 Checks the parameters for PreSpecM #### ### 9.2.1 M parameter #### ## "paramM" if(input$blckmdlngApproach=="val"){ # For valued blockmodeling ParamM<-input$ParamM usePreSpecM<-T } else if(input$ThresholdSelected==TRUE&&input$blckmdlngApproach=="bin"){ if(any(input$blckmdlngBlockTypes=='den')){ # For binary blockmodeling, if chosen AND when no density block was chosen ParamM<-input$ParamThreshold usePreSpecM<-T } } else { # For all other options, including binary blockmodeling with M not chosen ParamM<-NULL usePreSpecM<-NULL } ### 9.2.2 Average parameter #### ## "ParamAverage" if(any(input$blckmdlngBlockTypes=='avg')){ ParamM<-c(ParamM, input$ParamAverage) usePreSpecM<-T } ### 9.2.3 Density parameter #### ## "ParamAverage" if(any(input$blckmdlngBlockTypes=='den')){ # if(is.null(ParamM)){ # ParamM<-input$ParamDensity # } else { ParamM<-c(ParamM,input$ParamDensity) # } usePreSpecM<-T } ## 9.3 Checks if multi-core was allowed #### if(input$MultiCore){ MultiCore<-0 } else { MultiCore<-1 } ## 9.4 Checks customised blockmodeling #### if(input$blckmdlngPrespecified_Switch|input$EditUploadedArray){ ### Use DT block-model condition<-magrittr::and(is.null(input$PrespecifiedArrayRDS), is.null(input$PrespecifiedArrayRData)) if(condition&input$EditUploadedArray){ showNotification(ui = 'Ignoring the uploaded array was activated, but no array had been uploaded. Please, correct!', duration = 10,type = 'warning') } condition<-magrittr::or(condition, input$EditUploadedArray) if(condition){ ### 9.4.2 Table block-model #### #### Notification "Reading the manually imputed, custom block-model" showNotification(ui = "Reading the manually imputed, custom block-model", type = "message",id = 'ManualCustomBlckmdlng', duration = 10, closeButton = T) #### Loads table from reactive df<-Tbl$Current #### 9.4.2(A) Finds out which is the first dimension of the array #### #### Prepare a shadow matrix num<-matrix(NA,ncol=ncol(df),nrow=nrow(df)) ##### counts the length of the blocktypes in each cell. Block types are #### all three character long and always separated by a comma for(i in 1:nrow(df)){ for(j in 1:ncol(df)){ num[i,j]<-nchar(df[i,j]) } } #### The position in the shadow matrix with the most character #### is the cell in the reactive table with the most block types WhereIsTheLongest<-which(num[,]==max(num[,]))[1] #### Finding out how many block types are in the fullest cell FirstDimension<- length( unlist( strsplit(split = ",", unlist(df)[WhereIsTheLongest] ) ) ) #### 9.4.2(B) Turning into an array #### # Creates an empty array of the right dimension BlockTypes<-array(NA,dim = c(FirstDimension,1,nrow=nrow(df),ncol=ncol(df))) # Fills it by layer, ... for(k in 1:FirstDimension){ # ... then by column,... for(i in 1:nrow(df)){ # ... and finally by row for(j in 1:ncol(df)){ # if the specific cell contains less block types than the max # the cell, the extra layers are filled with NAs if(length(unlist(strsplit(df[i,j],split = ",")))<k){ BlockTypes[k,1,i,j]<-NA } else { BlockTypes[k,1,i,j]<-unlist(strsplit(df[i,j],split = ","))[k] } } } if(dim(BlockTypes)[1]==1){ BlockTypes<-BlockTypes[1,1,,] } removeNotification('ManualCustomBlckmdlng') } } else { if(input$ArrayInput==".RDS"){ ### 9.4.3 Bloc-model from RDS #### ### Notification "Reading the custom block-model from RDS" showNotification(ui = "Reading the custom block-model from file", type = "message",id = 'FileCustomBlckmdlng', duration = 10, closeButton = T) # Reading RDS file UploadedFile<-input$PrespecifiedArrayRDS BlockTypes<-readRDS(file = UploadedFile$datapath) } else if (input$ArrayInput==".RData"){ ### 9.4.4 Bloc-model from RData #### ### Notification "Reading the custom block-model from RData" showNotification(ui = "Reading the custom block-model from RData", type = "message", duration = 10, closeButton = T) # Reading RData file UploadedFile<-input$PrespecifiedArrayRData load(file = UploadedFile$datapath) ImportedArray<-load(file = UploadedFile$datapath) BlockTypes<-eval(parse(text = ImportedArray)) } # else if RData removeNotification('FileCustomBlckmdlng') } # else if DT NumClusters<-dim(BlockTypes)[length(dim(BlockTypes))] } else { BlockTypes<-input$blckmdlngBlockTypes NumClusters<-input$blckmdlngNumClusters } ## Notification "Executing blockmodeling" showNotification(ui = 'Blockmodeling started succesfully!', type = "default", duration = 10, closeButton = T) ## Modal spinner, show shinybusy::show_modal_spinner(spin = 'semipolar', color = "#978E83", text = 'Computing clusters, please wait...') Blck$Custom<<-BlockTypes ## 9.5 Block types' weights ## blockTypeWeights if(input$blockTypeWeights_Switch){ blockTypeWeights<-c(com=input$blockTypeWeights_com, nul=input$blockTypeWeights_nul, dnc=input$blockTypeWeights_dnc, rre=input$blockTypeWeights_rre, cre=input$blockTypeWeights_cre, reg=input$blockTypeWeights_reg, # rdo=blockTypeWeights_rdo, # cdo=blockTypeWeights_cdo, den=blockTypeWeights_den, avg=blockTypeWeights_avg ) } else {blockTypeWeights<-1} ## 9.6 Executes blockmodeling #### blck<- optRandomParC(M = M, k = NumClusters, approaches = input$blckmdlngApproach, blocks = BlockTypes, rep = input$blckmdlngRepetitions, save.initial.param.opt = input$blckmdlngInitialParams, deleteMs = T, max.iden = input$blckmdlngMaxSavedResults, return.all = input$blckmdlngAll, return.err = T, RandomSeed = input$blckmdlngRandomSeed, printRep = input$blckmdlngPrintRep, usePreSpecM = usePreSpecM, preSpecM = ParamM, nCores = MultiCore, blockTypeWeights = blockTypeWeights ) ## 9.6.1 Modal spinner, remove #### shinybusy::remove_modal_spinner() ## 9.6.2 Remember that the blockmodel was run #### Blck$RunAlready<<-TRUE ## 9.6.3 Store result #### Blck$Count<<-Blck$Count+1 if(Blck$Count>input$Restore_MaxMemory){ ### 9.6.3 (A) Notification Restore memory reset showNotification(ui = paste("Result memory full, emptying"), type = "message", duration = 20, closeButton = T) ### 9.6.3 (B) Reset full memory Blck$Count<<-1 Blck$Previous<<-list(); length(Blck$Previous)<<-input$Restore_MaxMemory } ### 9.6.3 (C) Store result Blck$Previous[[Blck$Count]]<<-blck ### 9.6.3 (D) Notification Memory slot used showNotification(ui = paste("Result stored in slot:",Blck$Count), type = "message", duration = 20, closeButton = T) } # /else of RDS input ## Notification "Blockmodeling completed" showNotification(ui = "Blockmodeling completed. Result stored in slot", type = "message", duration = 2, closeButton = T) blck<<-blck blck }) ## 10. Outputs blockmodeling #### ### 10.1 Blockmodeling output in a table #### TableBlockmdllng<-eventReactive(mdllng(),{ ValueFromName<- function(Var.Name,collapse=F,sep=","){ x<-eval(parse(text = Var.Name)) if(collapse){ paste(x,collapse = sep) } } blck<-mdllng() tbl<-matrix(data=NA,byrow = F,ncol=5, nrow = length(blck$best)) colnames(tbl)<-c("Network size","Approaches", "Blocks", "Clusters size", "Error") tbl[1,1]<-nrow(blck$initial.param$M) tbl[1,2]<-paste(blck$initial.param$approaches,collapse = ",") tbl[1,3]<-paste(blck$initial.param$blocks,collapse = ",") if(nrow(tbl)!=1){ tbl[2:nrow(tbl),1:3]<-"" for(i in 1:nrow(tbl)){ Var.Name<-paste0("blck$best$best",i,"$resC$nUnitsRowClu") tbl[i,4]<-ValueFromName(Var.Name = Var.Name,collapse = T) Var.Name<-paste0("blck$best$best",i,"$err") tbl[i,5]<-ValueFromName(Var.Name = Var.Name,collapse = T) } } else { tbl[1,4]<-paste(blck$best$best1$resC$nUnitsRowClu,collapse = ',') tbl[1,5]<-paste(blck$best$best1$err,collapse = ',') } tbl }) ### 10.2 Renders blockmodeling output in a table #### output$Tableblckmdlng<- renderTable({ TableBlockmdllng() },colnames = T,rownames = F,striped = F,hover = T,bordered = T, spacing = "s",width = "auto",align = "c",digits = 0,quoted = F) ### 10.3 Renders blockmodeling output as summary #### output$Summaryblckmdlng <- renderPrint({ blck<-mdllng() blck }) ### 10.4 Image matrix (IM) #### #### 10.4.1 Disassembles image matrix as tables #### IM<-eventReactive(c(input$whichIM,mdllng()),{ Disassemble.Array<- function(array){ for(i in 1:dim(array)[3]){ list[[i]]<-array[,,i] } } list<-list() for(i in 1:length(mdllng()$best)){ list[[i]]<- blockmodeling::IM(res = mdllng(), drop = input$dropIM, which = i) } matrix<-list[[input$whichIM]] matrix<-as.data.frame(matrix) colnames(matrix)<-1:ncol(matrix) return(matrix) }) #### 10.4.2 Renders image matrix as tables #### output$TableIM<- renderTable({ IM() },colnames = T,rownames = T,striped = T,hover = T,bordered = T, spacing = "s",width = "auto",align = "c",digits = 0,quoted = F) ### 10.5 Renders error matrix as a table #### output$TableEM<- renderTable({ EM_Table<-EM(res = mdllng(), which = input$whichIM) colnames(EM_Table)<-1:ncol(EM_Table) formatC(x = EM_Table,format = 'f',digits = input$DigitsEM) },colnames = T,rownames = T,striped = T,hover = T,bordered = T, spacing = "s",width = "auto",align = "c",digits = 0,quoted = F) ### 10.6 Rendex mean matrix as a table #### output$TableMean<- renderTable({ Mean_Table<-blockmodeling::funByBlocks(x = mdllng(), which=input$whichIM, FUN='mean',na.rm=T) colnames(Mean_Table)<-1:ncol(Mean_Table) formatC(x = Mean_Table,format = 'f',digits = input$DigitsMean) },colnames = T,rownames = T,striped = T,hover = T,bordered = T, spacing = "s",width = "auto",align = "c",digits = 0,quoted = F) ## 11. Download blockmodeling results to file #### output$DownloadBlckRDS <- downloadHandler( filename = "Blockmodeling results.RDS", content = function(file) { saveRDS(object = mdllng(),file = file) } ) ## 11. Download clusters to file #### output$DownloadClu <- downloadHandler( filename = "partitions.clu", content = function(file) { blockmodeling::savevector(v = clu(res = mdllng(),which = input$whichIM), filename = file) } ) ## 12. Download image matrix #### ### 12.1 As plain text #### ## dropIM, whichIM output$DownloadIMtext <- downloadHandler( filename = "image.txt", content = function(file) { IM<-blockmodeling::IM(res = mdllng(), drop = input$dropIM, which = input$whichIM ) write.table(x = IM,file = file,append = F,quote = F) } ) ### 12.2 As RDS #### ## dropIM, whichIM output$DownloadIMrds <- downloadHandler( filename = "image.RDS", content = function(file) { IM<-blockmodeling::IM(res = mdllng(), drop = input$dropIM, which = input$whichIM ) saveRDS(object = IM,file = file,compress = F) } ) ## 13. Block-model from file/sample #### observeEvent(input$UploadArray,{ ## Prepares condition conditionArray<- magrittr::or(!is.null(input$PrespecifiedArrayRDS), !is.null(input$PrespecifiedArrayRData)) ## Checks condition if(conditionArray&!input$EditUploadedArray){ if(input$ArrayInput==".RDS"){ ### 13.1 Reading RDS file #### UploadedFile<-input$PrespecifiedArrayRDS Layers<-readRDS(file = UploadedFile$datapath) Layers<<-Layers } else if(input$ArrayInput==".RData"){ ### 13.2 Reading RData file #### UploadedFile<-input$PrespecifiedArrayRData load(file = UploadedFile$datapath) ImportedArray<-load(file = UploadedFile$datapath) Layers<-eval(parse(text = ImportedArray)) } ### 13.3 Unmaking array #### if(length(dim(Layers))==4){ ## Preparing data frame UnmakingArray<-matrix(NA,nrow = dim(Layers)[3],ncol = dim(Layers)[4]) colnames(UnmakingArray)<-1:ncol(UnmakingArray) # Filling by column ... for(i in 1:nrow(UnmakingArray)){ # ... then by row for(j in 1:ncol(UnmakingArray)){ # if the specific cell contains less block types than the max # the cell, the extra layers are filled with NAs UnmakingArray[i,j]<-paste(unlist(strsplit(Layers[,1,i,j],split = ",")),collapse = ",") } } Tbl$Current<<-UnmakingArray } else if(length(dim(Layers))==2){ ## Preparing data frame UnmakingArray<-matrix(NA,nrow = dim(Layers)[1],ncol = dim(Layers)[2]) colnames(UnmakingArray)<-1:ncol(UnmakingArray) # Filling by column ... for(i in 1:nrow(UnmakingArray)){ # ... then by row for(j in 1:ncol(UnmakingArray)){ # if the specific cell contains less block types than the max # the cell, the extra layers are filled with NAs UnmakingArray[i,j]<-paste(unlist(strsplit(Layers[i,j],split = ",")),collapse = ",") } } Tbl$Current<<-UnmakingArray } } }) ## 14. Cells' selection #### observeEvent(input$CustomBlockModel_cell_clicked,{ Tbl$Rows<<-c(Tbl$Rows,input$CustomBlockModel_cell_clicked$row) Tbl$Cols<<-c(Tbl$Cols,input$CustomBlockModel_cell_clicked$col) ### 14.1 Checks for de-selection #### if(length(Tbl$Rows!=1)){ pairs<-rep(0,length(Tbl$Rows)) for(i in 1:length(pairs)){ pairs[i]<-paste(Tbl$Rows[i],Tbl$Cols[i],sep = '_') ### Comment # Uses text with separator to avoid confusing combinations like: # Row 1, cell 22 and Row 12, cell 2 # as a single number both are '122' and could be deleted # as text they are different: '1_22' and and '12_2' } hit<-which( grepl(pattern = pairs[length(pairs)], x = pairs[-length(pairs)]) ) if(length(hit)>0){ hit<-c(hit,length(pairs)) Tbl$Rows<-Tbl$Rows[-hit] Tbl$Cols<-Tbl$Cols[-hit] } } }) ## 15. Reset and select All #### proxy=dataTableProxy(outputId = 'CustomBlockModel') ### 15.1 Reset #### observeEvent(input$ResetSelectionDT,{ reloadData(proxy = proxy,Tbl$Current,clearSelection = 'all') Tbl$Cols<<-Tbl$Rows<<-NULL }) ## 15.2 Select all #### observeEvent(input$SelectAllDT,{ selectAll<-matrix(NA,ncol = 2,nrow = nrow(Tbl$Current)*ncol(Tbl$Current)) Tbl$Rows<<-selectAll[,1]<-rep(1:nrow(Tbl$Current),each=ncol(Tbl$Current)) Tbl$Cols<<-selectAll[,2]<-rep(1:ncol(Tbl$Current),nrow(Tbl$Current)) selectCells(proxy = proxy,selected = selectAll) }) ## 16. (Re)Initialise TblCurrent if empty #### observeEvent(c(input$LoadBlocksIntoDT,input$UploadArray,input$SetSizeDT),{ TblCurrent<<-Tbl$Current if(is.null(TblCurrent)){ TblCurrent<-matrix(NA,nrow = input$CustoomBlockModel_NumberCluster, ncol = input$CustoomBlockModel_NumberCluster) colnames(TblCurrent)<-1:ncol(TblCurrent) rownames(TblCurrent)<-NULL Tbl$Current<<-TblCurrent } else { Tbl$Current<<-TblCurrent } }) ## 17. Change block-model size #### observeEvent(input$SetSizeDT,{ if(ncol(TblCurrent)!=input$CustoomBlockModel_NumberCluster){ if(ncol(TblCurrent)<input$CustoomBlockModel_NumberCluster){ ### Add columns and rows AddCols<-input$CustoomBlockModel_NumberCluster-ncol(TblCurrent) EmptyData<-rep(NA,nrow(TblCurrent)) for(i in 1:AddCols){ TblCurrent<-cbind(TblCurrent,EmptyData) } EmptyData<-rep(NA,ncol(TblCurrent)) for(i in 1:AddCols){ TblCurrent<-rbind(TblCurrent,EmptyData) } } else if(ncol(TblCurrent)>input$CustoomBlockModel_NumberCluster){ ### Remove columns and rows DelCols<-(input$CustoomBlockModel_NumberCluster+1):ncol(TblCurrent) TblCurrent<-TblCurrent[-DelCols,-DelCols] } colnames(TblCurrent)<-1:ncol(TblCurrent) rownames(TblCurrent)<-NULL Tbl$Current<<-TblCurrent } }) ## 18. Loading imputed data into table #### observeEvent(input$LoadBlocksIntoDT,{ HitRows<<-Tbl$Rows; HitCols<<-Tbl$Cols if(!is.null(Tbl$Rows)){ for(i in 1:length(HitRows)){ TblCurrent[HitRows[i],HitCols[i]]<-paste(input$TowardsDT,collapse = ",") Tbl$Rows<<-NULL Tbl$Cols<<-NULL } colnames(TblCurrent)<-1:ncol(TblCurrent) rownames(TblCurrent)<-NULL Tbl$Current<<-TblCurrent } }) ## 19. Visualise table #### output$CustomBlockModel<- DT::renderDataTable({ ## Read from reactive TblCurrent<<-Tbl$Current ## Converts to data frame in order to show row numbers TblCurrent<-as.data.frame(TblCurrent) ## Outputs TblCurrent },selection = list(mode="multiple",target='cell',selectable=matrix(c(-1:-nrow(Tbl$Current),rep(0,nrow(Tbl$Current))),ncol = 2)), options = list(paging =FALSE, searching=FALSE,ordering=FALSE),style='bootstrap4') ## 20. Download adjacency matrix #### output$downloadAdj<- downloadHandler( filename = "Adjacency Matrix.txt", content = function(file) { write.table(x = GetAdjacencyMatrix(), file = file) }, contentType = 'text/csv' ) ## 21. Download custom blockmodel #### output$downloadCustomBlck<- downloadHandler( filename = "Pre-specified Blockmodel.RDS", content = function(file) { saveRDS(object = Blck$Custom,file = file) } ) ## 22. Update interface #### ### 22.1 Block-type selectors #### observe({ if(input$blckmdlngApproach=='val'){ choices <- c("null or empty block"="nul", "complete block"="com", # "row-dominant"="rdo", # "column-dominant"="cdo", "(f-)regular block"="reg", "row (f-)regular"="rre", "column (f-)regular"= "cre", "row dominant"="rfn", "column dominant"= "cfn", "average block"="avg", "do not care block (the error is always zero)"="dnc") } else if(input$blckmdlngApproach=='bin'){ choices <- c("null or empty block"="nul", "complete block"="com", # "row-dominant"="rdo", # "column-dominant"="cdo", "(f-)regular block"="reg", "row (f-)regular"="rre", "column (f-)regular"= "cre", "row dominant"="rfn", "column dominant"= "cfn", "density block"="den", "do not care block (the error is always zero)"="dnc") } else if(input$blckmdlngApproach!='bin'&input$blckmdlngApproach!='val'){ choices <- c("null or empty block"="nul", "complete block"="com", "(f-)regular block"="reg", "row (f-)regular"="rre", "column (f-)regular"= "cre", "do not care block (the error is always zero)"="dnc") } updateSelectInput(inputId = 'blckmdlngBlockTypes',choices = choices, selected=c('nul','com')) updateSelectInput(inputId = 'TowardsDT',choices = choices) }) ### 22.2 Parameter density and average #### observe({ if(any(input$blckmdlngBlockTypes=='den')){ shinyjs::show(id = 'ParamDensity',anim = T,animType = 'slide') shinyjs::hide(id = 'ParamAverage',anim = T,animType = 'fade') dat<-NW() updateNumericInput(inputId = 'ParamDensity', value = network::network.density(x = dat)) } else if(any(input$blckmdlngBlockTypes=='avg')){ shinyjs::hide(id = 'ParamDensity',anim = T,animType = 'fade') shinyjs::show(id = 'ParamAverage',anim = T,animType = 'slide') M<-GetAdjacencyMatrix() updateNumericInput(inputId = 'ParamAverage', value = mean(x = M),) } else { shinyjs::hide(id = 'ParamDensity',anim = T,animType = 'fade') shinyjs::hide(id = 'ParamAverage',anim = T,animType = 'fade') } }) ### 22.3 Plot selector #### observe({ if(Blck$RunAlready){ updateRadioButtons(inputId = 'adjSelector', choiceNames = c("original","partitioned"), choiceValues = c(1,2), selected = 2, inline = T) updateRadioButtons(inputId = 'PlotSelector',, choiceNames = c("original","partitioned"), choiceValues = c(1,2), selected = 2, inline = T) shinyjs::hide(id = 'BlckNotRunYet_Plot') } else { updateRadioButtons(inputId = 'adjSelector', choiceNames = c("original"), choiceValues = c(1), selected = 1) updateRadioButtons(inputId = 'PlotSelector', choiceNames = c("original"), choiceValues = c(1), selected = 1) shinyjs::show(id = 'BlckNotRunYet_Plot') } }) ## 23 Info #### ### 23.1 Text to display #### output$Info<- renderUI( HTML(paste('<h3>To cite this app in any publication </h3> Please cite the app and the package "blockmodeling" as follows, plus <b>(<u>at least</u>) one</b> of the articles below:<br/>', '<b>1. This app/package</b>: <ul><li>Telarico, Fabio Ashtar, and Aleš Žiberna. <i>GUI for the Generalised Blockmodeling of Valued Networks</i> (version 1.8.3). R. Ljubljana (Slovenia): Faculty of Social Sciences (FDV) at the University of Ljubljana, 2022. <a href="https://doi.org/10.5281/zenodo.6554608">https://doi.org/10.5281/zenodo.6554608</a>.</li></ul>', '<b>2. Package "blockmodeling"</b> by Aleš Žiberna:<ul>', '<li>Žiberna, Aleš. <i>Blockmodeling: Generalized and Classical Blockmodeling of Valued Networks</i> (version 1.0.5), 2021. <a href="https://CRAN.R-project.org/package=blockmodeling">https://CRAN.R-project.org/package=blockmodeling</a>.</li>', '<li>Matjašič, Miha, Marjan Cugmas, and Aleš Žiberna. ‘Blockmodeling: An R Package for Generalized Blockmodeling’. Advances in Methodology and Statistics 17, no. 2 (1 July 2020): 49–66. <a href="https://doi.org/10.51936/uhir1119">https://doi.org/10.51936/uhir1119</a>.</li></ul>', '<b>3. Methods</b>:<ul>', '<li>Doreian, Patrick, Vladimir Batagelj, and Anuska Ferligoj. <i>Generalized Blockmodeling</i>. Cambridge University Press, 2005.</li><li>Žiberna, Aleš. ‘Generalized Blockmodeling of Sparse Networks’. <i>Advances in Methodology and Statistics</i> 10, no. 2 (1 July 2013). <a href="https://doi.org/10.51936/orxk5673">https://doi.org/10.51936/orxk5673</a>.</li><li>Žiberna, Aleš. ‘Generalized Blockmodeling of Valued Networks’. <i>Social Networks</i> 29, no. 1 (January 2007): 105–26. <a href="https://doi.org/10.1016/j.socnet.2006.04.002">https://doi.org/10.1016/j.socnet.2006.04.002</a>.</li></ul><br/>', 'The development of this package is financially supported by the Slovenian Research Agency (', a(href="www.arrs.gov.si", "www.arrs.gov.si"), ') within the research project', a(href="fdv.uni-lj.si/en/research/institute-of-social-science/national-research-projects/P5438", "J5-2557 (Comparison and evaluation of different approaches to blockmodeling dynamic networks by simulations with application to Slovenian co-authorship networks)"), '.', sep = '<br/>'))) ### 23.2 Download RIS file #### output$CitationRIS <- downloadHandler( filename = 'GUI Citations.ris', content = function(file) { writeLines(text = readRDS(file = './RIS_Citation.RDS'), con = file) }) ### 23.3 Download Bib file #### output$CitationBIB <- downloadHandler( filename = 'GUI Citations.bib', content = function(file) { writeLines(text = readRDS(file = './Bib_Citation.RDS'), con = file) }) } args = commandArgs(trailingOnly=TRUE) # Run the application shinyApp(ui = ui, server = server) if(length(args)==0){ shinyApp(ui = ui, server = server) } else { shinyApp(ui = ui, server = server,options=list(launch.browser=as.logical(args[1]))) }
/scratch/gouwar.j/cran-all/cranData/BlockmodelingGUI/inst/apps/BlockmodelingGUI.R
#' @importFrom stats dbinom qbeta g <- function(m,k,n,p) { sum(dbinom(m:k,size=n,prob=p)) } g.prime <- function(m,k,n,p) { # i.e., dg/dp #n*p^(m-1)*(1-p)^(n-m)*(choose(n-1,m-1)-choose(n-1,k)*(p/(1-p))^(k-m+1)) q <- (1-p) out <- exp(lchoose(n-1,m-1)+(m-1)*log(p)+(n-m)*log(q)) out <- out - exp(lchoose(n-1,k)+k*log(p)+(n-k-1)*log(q)) out <- n*out out } evaluate.g.maximum <- function(m,k,n) { if(m==0 || k==n) { max.value <- 1 } else { a <- (lchoose(n-1,m-1)-lchoose(n-1,k))/(k-m+1) best.p <- 1-1/(1+exp(a)) max.value <- g(m,k,n,best.p) } return(max.value) } where.g.maximum <- function(m,k,n) { if(m==0 && k==n) { return(NULL) # because best.p is non-unique } else if(m==0) { return(0) } else if(k==n) { return(1) } else { a <- (lchoose(n-1,m-1)-lchoose(n-1,k))/(k-m+1) best.p <- 1-1/(1+exp(a)) return(best.p) } } find.endpoints.given.confidence.level <- function(m,k,n,alpha=0.05,digits=4) { left.endpt <- NULL right.endpt <- NULL interval.length <- 10^(-digits) if(alpha==0) { if(m!=0 || k!=n) { return(NULL) } else { left.endpt <- 0 right.endpt <- 1 } } determine.next.x <- function(x0,a,b) { # Newton-Raphson method g0 <- g(m,k,n,x0) g.prime0 <- g.prime(m,k,n,x0) if(is.na(g.prime0) || g.prime0==0 || is.infinite(g.prime0)) { return((a+b)/2) } else { f0 <- g0-(1-alpha) # search for root for g(x)-(1-alpha) x1 <- x0-f0/g.prime0 if(x1>=a && x1<=b) { return(x1) } else { return((a+b)/2) } } } search.for.right.endpoint <- function() { while((b-a)>=interval.length) { x1 <- determine.next.x(x0,a,b) # update a and b g1 <- g(m,k,n,x1) if(g1<=(1-alpha)) { b <- x1 if(a<(x1-interval.length*0.9)) { # avoid getting stuck with b <- x1 <- x1 <- ... a1 <- x1-interval.length*0.9 ga1 <- g(m,k,n,a1) if(ga1>(1-alpha)) { a <- a1 } else { b <- a1 x1 <- a1 } } } else { a <- x1 if(b>(x1+interval.length*0.9)) { # avoid getting stuck with a <- x1 <- x1 <- ... b1 <- x1+interval.length*0.9 gb1 <- g(m,k,n,b1) if(gb1<=(1-alpha)) { b <- b1 } else { a <- b1 x1 <- b1 } } } x0 <- x1 } bb <- floor(b*10^digits)/10^digits # bb<=b if(bb<=a) { right.endpt <<- bb } else { gbb <- g(m,k,n,bb) if(gbb<(1-alpha)) { aa <- floor(a*10^digits)/10^digits # aa<=a right.endpt <<- aa } else { right.endpt <<- bb } } } search.for.left.endpoint <- function() { while((b-a)>=interval.length) { x1 <- determine.next.x(x0,a,b) # update a and b g1 <- g(m,k,n,x1) if(g1<=(1-alpha)) { a <- x1 if(b>(x1+interval.length*0.9)) { # avoid getting stuck with a <- x1 <- x1 <- ... b1 <- x1+interval.length*0.9 gb1 <- g(m,k,n,b1) if(gb1>(1-alpha)) { b <- b1 } else { a <- b1 x1 <- b1 } } } else { b <- x1 if(a<(x1-interval.length*0.9)) { # avoid getting stuck with b <- x1 <- x1 <- ... a1 <- x1-interval.length*0.9 ga1 <- g(m,k,n,a1) if(ga1<=(1-alpha)) { a <- a1 } else { b <- a1 x1 <- a1 } } } x0 <- x1 } aa <- ceiling(a*10^digits)/10^digits # aa>=a if(aa>=b) { left.endpt <<- aa } else { gaa <- g(m,k,n,aa) if(gaa<(1-alpha)) { bb <- ceiling(b*10^digits)/10^digits # bb>=b left.endpt <<- bb } else { left.endpt <<- aa } } } ## -- if(m==0 && k==n) { left.endpt <- 0 right.endpt <- 1 } else if(m==0) { # m==0 && k<n left.endpt <- 0 # search for right endpoint a <- 0 b <- 1 x0 <- 1/2 g0 <- g(m,k,n,x0) if(g0<=(1-alpha)) { b <- x0 } else { a <- x0 } search.for.right.endpoint() } else if(k==n) { # m>0 && k==n right.endpt <- 1 # search for left endpoint a <- 0 b <- 1 x0 <- 1/2 g0 <- g(m,k,n,x0) if(g0<=(1-alpha)) { a <- x0 } else { b <- x0 } search.for.left.endpoint() } else { # m>0 and k<n max.coverage <- evaluate.g.maximum(m,k,n) if(max.coverage<(1-alpha)) { return(NULL) } else { g.max <- where.g.maximum(m,k,n) ## search for left endpoint a <- 0 b <- g.max x0 <- (a+b)/2 g0 <- g(m,k,n,x0) if(g0<=(1-alpha)) { a <- x0 } else { b <- x0 } search.for.left.endpoint() ## search for right endpoint a <- g.max b <- 1 x0 <- (a+b)/2 g0 <- g(m,k,n,x0) if(g0<=(1-alpha)) { b <- x0 } else { a <- x0 } search.for.right.endpoint() if(left.endpt>right.endpt) { return(NULL) } } } return(c(left.endpt,right.endpt)) } evaluate.coverage <- function(L.vec, U.vec=NULL, digits=10) { n <- length(L.vec)-1 stopifnot(n>0) L.vec <- round(L.vec*10^digits)/10^digits if(is.null(U.vec)) { U.vec <- rev(1-L.vec) } U.vec <- round(U.vec*10^digits)/10^digits stopifnot(length(U.vec)==(n+1)) coverage.at.left.endpt.vec <- rep(NA, n+1) coverage.at.right.endpt.vec <- rep(NA, n+1) for(i in 1:length(L.vec)) { # evaluate coverage at all left endpoints left.endpt <- L.vec[i] if(left.endpt==0) { coverage.at.left.endpt.vec[i] <- ifelse(L.vec[1]==0,1,0) } else { X.range <- range(which(L.vec<left.endpt & left.endpt<=U.vec)) m <- X.range[1]-1 k <- X.range[2]-1 coverage.at.left.endpt.vec[i] <- g(m,k,n,p=left.endpt) } } for(i in 1:length(U.vec)) { # evaluate coverage at all right endpoints right.endpt <- U.vec[i] if(right.endpt==1) { coverage.at.right.endpt.vec[i] <- ifelse(U.vec[n+1]==1,1,0) } else { X.range <- range(which(U.vec>right.endpt & L.vec<=right.endpt)) m <- X.range[1]-1 k <- X.range[2]-1 coverage.at.right.endpt.vec[i] <- g(m,k,n,p=right.endpt) } } output <- cbind(coverage.at.left.endpt.vec,coverage.at.right.endpt.vec) rownames(output) <- paste("X=", c(0:n)) return(output) } # -- # main function # -- # #' Blyth-Still-Casella exact binomial confidence intervals #' #' computes the Blyth-Still-Casella exact binomial confidence intervals based on a refining procedure proposed by George Casella (1986). #' #' @param n number of trials #' @param X number of successes (optional) #' @param alpha confidence level = 1 - alpha #' @param digits number of significant digits after the decimal point #' @param CIs.init initial confidence intervals from which the refinement procedure begins #' (default starts from Clopper-Pearson confidence intervals) #' @param additional.info additional information about the types of interval endpoints and their possible range is provided if TRUE (default = FALSE) #' @return If \code{X} is specified, the corresponding confidence interval will be returned, otherwise a list of n + 1 confidence intervals will be returned. #' @return If \code{additional.info = FALSE}, only a list of confidence interval(s) will be returned. For any conincidental endpoint, midpoint of its range will be displayed. #' @return If \code{additional.info = TRUE}, the following lists will be returned: #' @return \tabular{ll}{ #' \code{CI} \tab a list of confidence intervals \cr #' \code{coinc.index} \tab indices of coincidental lower endpoints (L.Index) and their corresponding upper endpoints (U.index)\cr #' \code{endpoint.type} \tab whether the endpoint is coincidental (C) or non-coincidental (NC)\cr #' \code{range} \tab range for each endpoint\cr #' } #' @examples #' # to obtain 95% CIs for n = 30 and X = 0 to 30 #' blyth.still.casella(n = 30, alpha = 0.05, digits = 4) #' #' # to obtain 90% CIs, endpoint types, indices of coincidental enpoints (if any), #' # and range of each endpoint for n = 30 and X = 23 #' blyth.still.casella(n = 30, X = 23, alpha = 0.05, digits = 4, additional.info = TRUE) #' #' # use initial confidence intervals defined by the user instead of Clopper-Pearson CIs #' # CIs.input needs to be a (n + 1) x 2 matrix with sufficient coverage #' CIs.input <- matrix(c(0,1), nrow = 11, ncol = 2, byrow = TRUE) # start with [0,1] intervals #' blyth.still.casella(n = 10, alpha = 0.05, digits = 4, CIs.init = CIs.input, additional.info = TRUE) #' #' # use summary function to see the range for each endpoint #' output <- blyth.still.casella(n = 5, alpha = 0.1, digits = 4, additional.info = TRUE) #' summary(output) #' @export blyth.still.casella <- function(n, X=NULL, alpha=0.05, digits=2, CIs.init=NULL, additional.info=FALSE) { stopifnot(alpha>=0 && alpha<1) stopifnot(n==floor(n) && n>0) if(!is.null(X)) { stopifnot(X==floor(X) && X>=0 && X<=n) } if(alpha==0) { CI.mat <- matrix(NA, nrow=n+1, ncol=2) CI.mat[,1] <- 0 CI.mat[,2] <- 1 rownames(CI.mat) <- paste0("X=",0:n) colnames(CI.mat) <- c("L","U") Endpoint.Type.mat <- matrix("NC",nrow=n+1,ncol=2) rownames(Endpoint.Type.mat) <- paste0("X=",0:n) colnames(Endpoint.Type.mat) <- c("L","U") Range.mat <- matrix(NA,nrow=n+1,ncol=4) Range.mat[,1:2] <- 0 Range.mat[,3:4] <- 1 rownames(Range.mat) <- paste0("X=",0:n) colnames(Range.mat) <- c("L.min","L.max","U.min","U.max") if(!is.null(X)) { CI.mat <- CI.mat[X+1,,drop=FALSE] Endpoint.Type.mat <- Endpoint.Type.mat[X+1,,drop=FALSE] Range.mat <- Range.mat[X+1,,drop=FALSE] } if(!additional.info) { result <- CI.mat class(result) <- "customclass" return(result) } else { result <- list(CI=CI.mat,Endpoint.Type=Endpoint.Type.mat,Range=Range.mat) class(result) <- "customclass" return(result) } } if(n==1) { CI.mat <- matrix(NA, nrow=2, ncol=2) CI.mat[1,] <- c( 0,max(1-alpha,0.5)) CI.mat[2,] <- c(min(alpha,0.5), 1) CI.mat[1,2] <- ceiling(CI.mat[1,2]*10^digits)/10^digits CI.mat[2,1] <- floor(CI.mat[2,1]*10^digits)/10^digits #CI.mat[1,2] <- ceiling((1-alpha)*10^digits)/10^digits #CI.mat[2,1] <- floor(alpha*10^digits)/10^digits rownames(CI.mat) <- c("X=0","X=1") colnames(CI.mat) <- c("L","U") Endpoint.Type.mat <- matrix("NC",nrow=2,ncol=2) rownames(Endpoint.Type.mat) <- c("X=0","X=1") colnames(Endpoint.Type.mat) <- c("L","U") Range.mat <- cbind(CI.mat[,1],CI.mat[,1],CI.mat[,2],CI.mat[,2]) rownames(Range.mat) <- c("X=0","X=1") colnames(Range.mat) <- c("L.min","L.max","U.min","U.max") if(!is.null(X)) { CI.mat <- CI.mat[X+1,,drop=FALSE] Endpoint.Type.mat <- Endpoint.Type.mat[X+1,,drop=FALSE] Range.mat <- Range.mat[X+1,,drop=FALSE] } if(!additional.info) { result <- CI.mat class(result) <- "customclass" return(result) } else { result <- list(CI=CI.mat,Endpoint.Type=Endpoint.Type.mat,Range=Range.mat) class(result) <- "customclass" return(result) } } digits.save <- digits if(!is.null(CIs.init)) { stopifnot(ncol(CIs.init)==2 && nrow(CIs.init)==(n+1)) L.vec <- floor(CIs.init[,1]*10^digits)/10^digits U.vec <- ceiling(CIs.init[,2]*10^digits)/10^digits stopifnot(all(diff(L.vec)>=0)) stopifnot(all(diff(U.vec)>=0)) min.coverage <- min(evaluate.coverage(L.vec,U.vec,digits)[[1]], evaluate.coverage(L.vec,U.vec,digits)[[2]]) if(min.coverage<(1-alpha)) stop("Initial set of confidence intervals does not have sufficient coverage probability"); } else { # Clopper-Pearson exact confidence intervals L.vec <- qbeta(alpha/2,0:n,(n+1):1) U.vec <- qbeta(1-alpha/2,1:(n+1),n:0) L.vec <- floor(L.vec*10^digits)/10^digits U.vec <- ceiling(U.vec*10^digits)/10^digits } move.noncoincidental.endpoint <- function() { # move a noncoincidental lower endpoint (indexed by i) to the right L.at.i <- L.vec[i+1] j <- which(U.vec[0:(i-1)+1]>L.at.i) if(length(j)==0) { i <<- i-1 return(invisible(NULL)) }# if occurs, the endpoint is not movable j <- j[1]-1 # determine first.touch # L.at.i can possibly touch one of the following endpoints: # (1) L.at.(i+1), non-moving # (2) U.at.i , non-moving # pathological interval # (3) U.at.j , non-moving # (4) 0.5 , if i==(n/2) or (i+j)==n first.touch.vers <- rep(Inf,4) if(i<n) { # L.at.(i+1) exists first.touch.vers[1] <- L.vec[i+2] } if(i!=(n/2)) { # U.at.i not moving first.touch.vers[2] <- U.vec[i+1] } if((i+j)!=n) { # U.at.j not moving first.touch.vers[3] <- U.vec[j+1] } if(i==(n/2) || (i+j)==n) { # U.at.i moving or U.at.j moving first.touch.vers[4] <- 0.5 } first.touch <- min(first.touch.vers) if(first.touch==Inf) { i <<- i-1 return(invisible(NULL)) } if(g(j,i-1,n,first.touch)<(1-alpha)) { # insufficient coverage endpts <- find.endpoints.given.confidence.level(j,i-1,n,alpha,digits) if(is.null(endpts)) { i <<- (i-1) } else { right.endpt <- endpts[2] if(L.at.i < right.endpt) { L.vec[i+1] <<- right.endpt U.vec[n-i+1] <<- round((1-right.endpt)*10^digits)/10^digits coincidental.endpoint.idx[i+1] <<- NA i <<- (i-1) } else { # i.e., L.vec[i+1]>=right.endpt and cannot be moved coincidental.endpoint.idx[i+1] <<- NA i <<- (i-1) } } } else { # sufficient coverage if(L.at.i < first.touch) { # then move to first.touch L.vec[i+1] <<- first.touch U.vec[n-i+1] <<- round((1-first.touch)*10^digits)/10^digits # reset endpoint type to be non-coincidental coincidental.endpoint.idx[i+1] <<- NA } else { i <<- i-1 } } } move.coincidental.endpoint <- function() { # move a coincidental lower endpoint (indexed by i) to the right L.at.i <- L.vec[i+1] j.equal <- which(U.vec[0:(i-1)+1]==L.at.i)-1 # to remove index that have been assigned as coincidental endpoints from previous iteration if(i<n) { j.not.taken <- setdiff(j.equal, coincidental.endpoint.idx[(i+2):(n+1)]) if(!all(is.na(coincidental.endpoint.idx[(i+2):(n+1)]))) { # if not all elements are NA # force index to be smaller than already assigned index j.not.taken <- j.not.taken[j.not.taken < min(coincidental.endpoint.idx[(i+2):(n+1)], na.rm = T)] } }else { j.not.taken <- j.equal } if(length(j.not.taken)>0) { coincidental.endpoint.idx[i+1] <<- j.not.taken[length(j.not.taken)] j <- j.not.taken[length(j.not.taken)] }else{ stop("No matching j is found") } if(j==(n-i)) { i <<- i-1 return(invisible(NULL)) # although coincidental, not movable } if(g(j,i-1,n,L.at.i)<(1-alpha) || g(j+1,i,n,L.at.i)<(1-alpha)) { # j is not the index for coincidental point, since coverage is not met # to find the index whose coverage is sufficient new.j <- j k <- length(j.not.taken) # move to the second most large j.equal while (g(new.j,i-1,n,L.at.i)<(1-alpha) || g(new.j+1,i,n,L.at.i)<(1-alpha)) { if(k<1) { stop("Error: None of the endpoints satisfy the coverage") } else{ k <- k-1 new.j <- j.not.taken[k] } } coincidental.endpoint.idx[i+1] <<- new.j i <<- i-1 return(invisible(NULL)) # although coincidental, not movable } # determine first.touch # L.at.i can possibly touch one of the following endpoints: # (1) L.at.(i+1), non-moving # (2) U.at.i , non-moving # pathological interval # (3) 0.5 , if i+j<n # U.at.j can possibly touch one of the following endpoints: # (4) U.at.(j+1), non-moving # (5) 0.5 , if i+j<n # when either case (3) or (5) occurs: i.e., L.at.i=L.at.(n-j)=U.at.j=U.at.(n-i)=0.5 # # Case (6): l_i & u_j are actually separable first.touch.vers <- rep(Inf,6) if(i<n && (i+1)!=(n-j)) { # L.at.(i+1) not moving first.touch.vers[1] <- L.vec[i+2] } if(i!=(n-i)) { # U.at.i not moving first.touch.vers[2] <- U.vec[i+1] } if((i+j)<n) { # i<(n-j) first.touch.vers[3] <- 0.5 } if((j+1)!=(n-i)) { # U.at.(j+1) not moving first.touch.vers[4] <- U.vec[j+2] } if((i+j)<n) { # j<(n-i) # this case is repetitive first.touch.vers[5] <- 0.5 } # Case (6) if(j<(i-1)) { endpts.sep.set <- find.endpoints.given.confidence.level(j+1,i-1,n,alpha,digits) # defines the range in which l_i & u_j are separable if(!is.null(endpts.sep.set)) { if(endpts.sep.set[2]>L.at.i) { g.max <- where.g.maximum(j+1,i-1,n) first.choice <- round(g.max*10^digits)/10^digits second.choice <- floor(endpts.sep.set[2]*10^digits)/10^digits if(first.choice>endpts.sep.set[2] || first.choice<=L.at.i) { first.choice <- NULL } if(second.choice<=L.at.i) { second.choice <- NULL } if(!is.null(first.choice)) { first.touch.vers[6] <- first.choice } else if(!is.null(second.choice)) { first.touch.vers[6] <- second.choice } } } } first.touch <- min(first.touch.vers) if(first.touch==Inf) { i <<- i-1 return(invisible(NULL)) } if(g(j,i-1,n,first.touch)<(1-alpha) || g(j+1,i,n,first.touch)<(1-alpha)) { # insufficient coverage endpts.set1 <- find.endpoints.given.confidence.level(j,i-1,n,alpha,digits) endpts.set2 <- find.endpoints.given.confidence.level(j+1,i,n,alpha,digits) if(is.null(endpts.set1) || is.null(endpts.set2)) { i <<- (i-1) } else { right.endpt <- endpts.set1[2] # set 1's right endpoint if(right.endpt < endpts.set2[1] || right.endpt <= L.at.i) { i <<- (i-1) } else { L.vec[i+1] <<- right.endpt U.vec[n-i+1] <<- round((1-right.endpt)*10^digits)/10^digits U.vec[j+1] <<- right.endpt L.vec[n-j+1] <<- round((1-right.endpt)*10^digits)/10^digits i <<- (i-1) } } } else { # sufficient coverage if(L.at.i < first.touch) { L.vec[i+1] <<- first.touch U.vec[n-i+1] <<- round((1-first.touch)*10^digits)/10^digits U.vec[j+1] <<- first.touch L.vec[n-j+1] <<- round((1-first.touch)*10^digits)/10^digits # i stays at i } else { i <<- (i-1) } } } L.no.longer.changes <- FALSE iter <- 0 while(!L.no.longer.changes) { iter <- iter + 1 L.old <- L.vec i <- n coincidental.endpoint.idx <- rep(NA, n+1) # a matrix recording coincidental points while(i>0) { L.at.i <- L.vec[i+1] j.equal <- which(U.vec[0:(i-1)+1]==L.at.i)-1 # to remove index that have been assigned as coincidental endpoints from previous iteration if(i<n){ j.not.taken <- setdiff(j.equal, coincidental.endpoint.idx[(i+2):(n+1)]) if(!all(is.na(coincidental.endpoint.idx[(i+2):(n+1)]))) { # if not all elements are NA # force index to be smaller than the smallest index that has been assigned j.not.taken <- j.not.taken[j.not.taken < min(coincidental.endpoint.idx[(i+2):(n+1)], na.rm = T)] } }else { j.not.taken <- j.equal } j <- j.not.taken if(length(j)>0) { j <- j[length(j)] # last element # determine if separable separable <- TRUE if(j==(i-1)) { separable <- FALSE } else { if(g(j+1,i-1,n,L.at.i)<(1-alpha)) { separable <- FALSE } } if(separable) { move.noncoincidental.endpoint() } else { move.coincidental.endpoint() } } else { # L.at.i does not equal to any U.at.(0..(i-1)) move.noncoincidental.endpoint() } } L.new <- L.vec if(all(L.new==L.old)) L.no.longer.changes <- TRUE } # determine range for pairs of coincidental endpoints coincidental.endpt.mat <- cbind(c(0:n), coincidental.endpoint.idx) colnames(coincidental.endpt.mat) <- NULL # coincidental.endpt.mat.reduced: only include index for coincidental endpoints keep.ind <- which(apply(coincidental.endpt.mat,1,sum)!=n & !is.na(coincidental.endpt.mat[,2])) if(length(keep.ind)==0) { coincidental.endpt.mat.reduced <- c() } else { coincidental.endpt.mat.reduced <- coincidental.endpt.mat[keep.ind,,drop=FALSE] } if(!is.null(coincidental.endpt.mat.reduced)) { colnames(coincidental.endpt.mat.reduced) <- c("L.index", "U.index") } # find range for coinc. endpoints and store them in range.mat Range.mat <- cbind(L.vec,L.vec,U.vec,U.vec) if(!is.null(coincidental.endpt.mat.reduced)) { for(k in 1:nrow(coincidental.endpt.mat.reduced)) { # note that it is 'k', instead of 'i' i <- coincidental.endpt.mat.reduced[k,1] j <- coincidental.endpt.mat.reduced[k,2] endpts.set1 <- find.endpoints.given.confidence.level(j,i-1,n,alpha,digits) endpts.set2 <- find.endpoints.given.confidence.level(j+1,i,n,alpha,digits) if(is.null(endpts.set1) || is.null(endpts.set2)) { stop("Error in deciding allowable range for coincidental endpoints. Please consider increasing the argument 'digits'.") } allowable.range <- c(endpts.set2[1],endpts.set1[2]) Range.mat[i+1,1:2] <- allowable.range Range.mat[j+1,3:4] <- allowable.range Range.mat[n-i+1,3:4] <- round(rev(1-allowable.range)*10^digits)/10^digits Range.mat[n-j+1,1:2] <- round(rev(1-allowable.range)*10^digits)/10^digits } } # enforce monotonicity of endpoints in Range.mat while(TRUE) { no.more.adjustment <- TRUE if(!is.null(coincidental.endpt.mat.reduced)) { for(k in 1:nrow(coincidental.endpt.mat.reduced)) { # note that it is 'k', instead of 'i' i <- coincidental.endpt.mat.reduced[k,1] j <- coincidental.endpt.mat.reduced[k,2] range.lower.limit <- Range.mat[i+1,1] range.upper.limit <- Range.mat[i+1,2] # aliases rll <- range.lower.limit rul <- range.upper.limit rll.candidates.vec <- c() rul.candidates.vec <- c() if(i+j<n) { rul.candidates.vec <- c(rul.candidates.vec, 0.5) } if(i+j>n) { rll.candidates.vec <- c(rll.candidates.vec, 0.5) } if(i>0) { rll.candidates.vec <- c(rll.candidates.vec, Range.mat[i,1]) } if(j>0) { rll.candidates.vec <- c(rll.candidates.vec, Range.mat[j,3]) } if(i<n) { rul.candidates.vec <- c(rul.candidates.vec, Range.mat[i+2,2]) } if(j<n) { rul.candidates.vec <- c(rul.candidates.vec, Range.mat[j+2,4]) } if(rll<max(rll.candidates.vec)) { no.more.adjustment <- FALSE rll <- max(rll.candidates.vec) Range.mat[ i+1,1] <- rll Range.mat[ j+1,3] <- rll Range.mat[n-i+1,4] <- 1-rll Range.mat[n-j+1,2] <- 1-rll } if(rul>min(rul.candidates.vec)) { no.more.adjustment <- FALSE rul <- min(rul.candidates.vec) Range.mat[ i+1,2] <- rul Range.mat[ j+1,4] <- rul Range.mat[n-i+1,3] <- 1-rul Range.mat[n-j+1,1] <- 1-rul } } } if(no.more.adjustment) break; } rownames(Range.mat) <- paste0("X=",0:n) colnames(Range.mat) <- c("L.min","L.max","U.min","U.max") # update L.vec & U.vec using mid-ranges L.vec <- round((Range.mat[,1]+Range.mat[,2])/2*10^digits)/10^digits U.vec <- rev(round((1-L.vec)*10^digits)/10^digits) if(!is.null(coincidental.endpt.mat.reduced)) { # realign coincidental endpoints after rounding for(k in 1:nrow(coincidental.endpt.mat.reduced)) { i <- coincidental.endpt.mat.reduced[k,1] j <- coincidental.endpt.mat.reduced[k,2] realigned.endpt <- max(L.vec[i+1],U.vec[j+1]) L.vec[i+1] <- U.vec[j+1] <- realigned.endpt } } CI.mat <- cbind(L.vec,U.vec) rownames(CI.mat) <- paste0("X=",0:n) colnames(CI.mat) <- c("L","U") Endpoint.Type.mat <- matrix("NC",nrow=n+1,ncol=2) half.range.mat <- matrix(NA, nrow = n + 1, ncol = 2) if(!is.null(coincidental.endpt.mat.reduced)) { coincidental.Ls <- coincidental.endpt.mat.reduced[,1] coincidental.Us <- coincidental.endpt.mat.reduced[,2] Endpoint.Type.mat[coincidental.Ls+1,1] <- "C" Endpoint.Type.mat[coincidental.Us+1,2] <- "C" half.range.mat[coincidental.Ls+1,1] <- (Range.mat[coincidental.Ls+1,1] + Range.mat[coincidental.Ls+1,2])/2 half.range.mat[coincidental.Us+1,2] <- (Range.mat[coincidental.Us+1,1] + Range.mat[coincidental.Us+1,2])/2 } rownames(Endpoint.Type.mat) <- paste0("X=",0:n) colnames(Endpoint.Type.mat) <- c("L","U") rownames(half.range.mat) <- paste0("X=",0:n) colnames(half.range.mat) <- c("L","U") if(digits.save!=digits) { # digits.save < digits (which =4) digits <- digits.save # restore original precision CI.mat[,1] <- floor(CI.mat[,1]*10^digits)/10^digits CI.mat[,2] <- ceiling(CI.mat[,2]*10^digits)/10^digits Range.mat[,1] <- pmin(ceiling(Range.mat[,1]*10^digits)/10^digits, CI.mat[,1]) Range.mat[,2] <- pmax( floor(Range.mat[,2]*10^digits)/10^digits, CI.mat[,1]) Range.mat[,3] <- pmin(ceiling(Range.mat[,3]*10^digits)/10^digits, CI.mat[,2]) Range.mat[,4] <- pmax( floor(Range.mat[,4]*10^digits)/10^digits, CI.mat[,2]) } # if user specified the value of X if(!is.null(X)) { CI.mat <- CI.mat[X+1,,drop=FALSE] if(length(c(which(coincidental.endpt.mat.reduced[,1] == X), which(coincidental.endpt.mat.reduced[,2] == X))) == 0){ coincidental.endpt.mat.reduced <- NULL }else{ coincidental.endpt.mat.reduced <- coincidental.endpt.mat.reduced[c(which(coincidental.endpt.mat.reduced[,1] == X), which(coincidental.endpt.mat.reduced[,2] == X)),,drop=FALSE] } Endpoint.Type.mat <- Endpoint.Type.mat[X+1,,drop=FALSE] Range.mat <- Range.mat[X+1,,drop=FALSE] } if(!additional.info) { result <- CI.mat class(result) <- "customclass" return(result) } else { result <- list(CI=CI.mat, coinc.index = coincidental.endpt.mat.reduced, endpoint.type=Endpoint.Type.mat, range=Range.mat) class(result) <- "customclass" return(result) } } # to customize the summary function add.sign.to.number <- function(x, digits) { x.c <- lapply(x, function(x) {if(!is.na(x)) {x <- formatC(x, format = 'f', digits = digits)} else {x<- ""}}) x.plus.sign <- lapply(x.c, function(x) {if(x != "") x <- paste(c(bquote("\U00B1"), x), collapse = " ")}) x.plus.sign[sapply(x.plus.sign, is.null)] <- "" x.plus.sign <- unlist(x.plus.sign) return(x.plus.sign) } #' @export summary.customclass <- function(object, ...) { arguments <- list(...) if (!is.null(names(object))) { # check if x has Range matrix ls <- object$CI[,1] us <- object$CI[,2] # to figure out the number of digits used for calculation lus <- c(ls, us) if(all(lus %in% c(0,1))){ # if alpha = 0 l.for.digits <- 0 } else { # if alpha != 0 l.for.digits <- max(lus[!lus %in% c(0,1)]) } digits <- nchar(sub("^.+[.]","",l.for.digits)) # calculate ranges L.range <- floor(apply(object$range, 1, function(x) if(x[1] != x[2]) {1/2*(x[2] - x[1])} else NA)*10^digits)/10^digits L.range.plus.sign <- add.sign.to.number(L.range, digits) U.range <- floor(apply(object$range, 1, function(x) if(x[3] != x[4]) {1/2*(x[4] - x[3])} else NA)*10^digits)/10^digits U.range.plus.sign <- add.sign.to.number(U.range, digits) output <- data.frame(L = ls, L.range = L.range.plus.sign, U = us, U.range = U.range.plus.sign) return(output) } else { output <- object class(output) <- "matrix" return(summary(output)) } }
/scratch/gouwar.j/cran-all/cranData/BlythStillCasellaCI/R/blyth.still.casella.R
draw_space <- function(size){ par(mar=c(1,1,1,1)) plot(NULL,xlim=c(0,size),ylim=c(0,size),bty="n",main="",xlab="",ylab="",xaxt="n",yaxt="n") } horiz <- function(pos1,pos2,N,b,ratio,def,size){ x <- size/2 y <- size/2 r <- size/2 phi <- seq(pi/2, (2 * pi)+pi/2, length = def) V1 <- phi[ceiling((min(c(pos1,pos2))/N)*def):ceiling((max(c(pos1,pos2))/N)*def)] lines(x + (r-(r*b)*ratio)* (-cos(V1)), y + (r-(r*b)*ratio)* sin(V1), type = "l",lwd=1) } edge <- function(pos,N,b1,b2,ratio,def,size){ x <- size/2 y <- size/2 r <- size/2 phi <- seq(pi/2, (2 * pi)+pi/2, length = def) cx1 <- x + (r-(r*b1)*ratio) * (-cos(phi[ceiling(pos*def/N)])) cx2 <- x + (r-(r*b2)*ratio) * (-cos(phi[ceiling(pos*def/N)])) cy1 <- y + (r-(r*b1)*ratio) * (sin(phi[ceiling(pos*def/N)])) cy2 <- y + (r-(r*b2)*ratio) * (sin(phi[ceiling(pos*def/N)])) lines(c(cx1,cx2),c(cy1,cy2)) return(c(cx1,cx2,cy1,cy2)) } get_root <- function(arbre){ depth <- node.depth(arbre) (1:length(depth))[depth==max(depth)] } get_descent <- function(arbre,nb=FALSE){ out <- list() all <- mrca(arbre) tip <- length(arbre$tip.label) for (i in 1:arbre$Nnode){ if(!nb) out[[i]] <- rownames(all)[apply(all==(tip+i),1,sum)>0] if(nb) out[[i]] <- match(rownames(all)[apply(all==(tip+i),1,sum)>0],arbre$tip.label) } out } get_branch <- function(arbre){ brl <- arbre$edge.length brd <- arbre$edge desc <- get_descent(arbre) nodeorder <- unlist(lapply(desc,length)) names(nodeorder) <- (length(arbre$tip.label)+1):(length(arbre$tip.label)+arbre$Nnode) nodeorder <- sort(nodeorder,TRUE) supernode <- c(as.numeric(names(nodeorder)),1:length(arbre$tip.label)) out <- data.frame(node1=brd[,1],node2=brd[,2],brl1=rep(0,length(brd[,1])),brl2=brl) for(i in 2:length(supernode)){ a <- match(supernode[i],brd[,2]) if(!is.na(a)) out$brl1[a] <- out$brl2[a] + out$brl1[a] a2 <- (1:length(brd[,1]))[brd[,1]==supernode[i]] if(!is.na(a)) out$brl1[a2] <- out$brl1[a2] + out$brl1[a] } out$b1 <- out[,3]/max(out[,3]) out$b2 <- (out[,3] - out[,4])/max(out[,3]) out } desc_nb <- function(arbre){ out <- c(rep(1,length(arbre$tip.label)),unlist(lapply(get_descent(arbre),length))) names(out) <- 1:length(out) out } circular_tree <- function(phy,ratio=0.5,def=1000,pos_out=FALSE,tip_labels=TRUE,cex_tips=0.5){ size <- 300 branch <- get_branch(phy) dn <- desc_nb(phy) N <- length(phy$tip.label) pos_vector <- unique(unlist(get_descent(phy,nb=TRUE))) descnb <- get_descent(phy,nb=T) tipnb <- list() for(i in 1:length(pos_vector)) tipnb[i] <- pos_vector[i] pos_vector <- c(tipnb,descnb) draw_space(size) out <- list() for(i in 1:length(branch[,1])){ out[[i]] <- c(branch[i,1:2],edge(mean(c(min(pos_vector[[branch[i,2]]]),max(pos_vector[[branch[i,2]]]))),N,branch$b2[i],branch$b1[i],ratio,def,size)) } internal_node <- unique(branch[,1]) for(i in 1:length(internal_node)){ sub <- branch[branch[,1]==internal_node[i],] subtip <- pos_vector[sub[,2]] s1 <- (1:length(subtip))[unlist(lapply(subtip,min))==min(unlist(lapply(subtip,min)))] s2 <- (1:length(subtip))[unlist(lapply(subtip,max))==max(unlist(lapply(subtip,max)))] horiz(mean(c(min(subtip[[s1]]),max(subtip[[s1]]))),mean(c(min(subtip[[s2]]),max(subtip[[s2]]))),N,sub$b2[1],ratio,def,size) } out <- do.call(rbind,out) colnames(out) <- c("node1","node2","x1","x2","y1","y2") if(tip_labels){ postips <- out[out[,2]%in%(1:length(phy$t)),c(4,6)] text(postips[,1],postips[,2],phy$t,cex_tips) } if(pos_out){ return(out) } }
/scratch/gouwar.j/cran-all/cranData/BoSSA/R/circular_tree.r
get_edge <- function(){ lastPP <- get("last_plot.phylo", envir = .PlotPhyloEnv) subedge <- lastPP$edge XX <- lastPP$xx[subedge[, 1]] YY <- lastPP$yy[subedge[, 2]] cbind(XX,YY) }
/scratch/gouwar.j/cran-all/cranData/BoSSA/R/get_edge.R
plot.pplace <- function(x,type="precise",simplify=FALSE,main="",N=NULL,transfo=NULL,legend=TRUE,stl=FALSE,asb=FALSE,edge.width=1,max_width=10,cex.number=0.5,cex.text=0.8,transp=80,add=FALSE,color=NULL,discrete_col=FALSE,pch=16,run_id=NULL,...){ if(!is.null(run_id)){ x <- sub_pplace(x,run_id=run_id) } if(is.null(N)){ x$multiclass$N <- 1 } if(!is.null(N)){ x$multiclass$N <- N } if(simplify){ x$placement_positions <- x$placement_positions[order(x$placement_positions$ml_ratio,decreasing=TRUE),] x$placement_positions <- x$placement_positions[match(unique(x$placement_positions$placement_id),x$placement_positions$placement_id),] x$placement_positions$ml_ratio <- 1 } if(is.null(color)) color <- c("blue","green","yellow","red") if(type!="precise"){ placement_N <- aggregate(x$multiclass$N,list(x$multiclass$placement_id),sum) br_sum <- aggregate(placement_N[match(x$placement_positions$placement_id,placement_N[,1]),2]*x$placement_positions$ml_ratio,list(branch=x$placement_positions$location),sum) if(type=="number"){ plot(x$arbre,edge.width=edge.width,show.tip.label=stl,no.margin=TRUE,...) text(0,0,main,cex=cex.text,pos=4) edgelabels(round(br_sum[,2]),br_sum[,1],cex=cex.number) if(asb) add.scale.bar() } if(type=="fattree"){ vwidth <- rep(0.1,nrow(x$arbre$edge)) vwidth[br_sum[,1]] <- ceiling(max_width*br_sum[,2]/max(br_sum[,2])) plot(x$arbre,edge.width=vwidth,show.tip.label=stl,no.margin=TRUE,...) text(0,0,main,cex=cex.text,pos=4) if(asb) add.scale.bar() } if(type=="color"){ col_palette=rgb(colorRamp(color)(seq(0,1,length=100)), maxColorValue = 255) vcol <- rep(paste0("#000000",transp),nrow(x$arbre$edge)) coln <- ceiling(100*br_sum[,2]/max(br_sum[,2])) coln[coln>100] <- 100 vcol[br_sum[,1]] <- col_palette[coln] if(!legend){ plot(x$arbre,edge.color=vcol,edge.width=edge.width,show.tip.label=stl,no.margin=TRUE,...) text(0,0,main,cex=cex.text,pos=4) if(asb) add.scale.bar() } if(legend){ layout(matrix(c(rep(1,6),2),ncol=1)) plot(x$arbre,edge.color=vcol,edge.width=edge.width,show.tip.label=stl,no.margin=TRUE,...) text(0,0,main,cex=cex.text,pos=4) if(asb) add.scale.bar() par(mar=c(3,15,3,15)) image(1:length(col_palette),1:1,matrix(1:length(col_palette),ncol=1),col=col_palette,xaxt="n",yaxt="n",xlab="",ylab="") axis(3,at=c(1,length(col_palette)),labels=c(0,round(max(br_sum[,2]),0))) text(0,0,main,cex=cex.text,pos=4) } } } if(type=="precise"){ col_palette <- rgb(colorRamp(color)(seq(0,1,length=1000)), maxColorValue = 255) if(discrete_col){ col_palette <- rep(NA,1000) color <- col2rgb(color) color <- apply(color,2,function(X){rgb(X[1],X[2],X[3], maxColorValue=255)}) pos_col <- round(seq(1,1000,length.out=length(color)+1)) for(i in 1:length(color)){ col_palette[pos_col[i]:pos_col[i+1]] <- color[i] } } vcol <- col_palette[ceiling((x$placement_positions$pendant_bl/max(x$placement_positions$pendant_bl))*1000)] placement_N <- aggregate(x$multiclass$N,list(x$multiclass$placement_id),sum) cex_placement <- placement_N[match(x$placement_positions$placement_id,placement_N[,1]),2]*x$placement_positions$ml_ratio cex_placement[is.na(cex_placement)] <- 0 if(!is.null(transfo)) cex_placement <- transfo(cex_placement) if(legend & !add){ layout(matrix(c(rep(3,6),1,rep(3,6),2),ncol=2)) par(mar=c(3,7,3,3.5)) image(1:length(col_palette),1:1,matrix(1:length(col_palette),ncol=1),col=col_palette,xaxt="n",yaxt="n",xlab="",ylab="",main="pendant branch length",cex.main=1,font.main=1) axis(3,at=c(1,length(col_palette)),labels=c(0,round(max(x$placement_positions$pendant_bl),2))) par(mar=c(1,3.5,1,7)) plot.new() cex_legend <- seq(0,ceiling(max(cex_placement)),length.out=5) points(seq(0.1,0.9,by=0.2),y=rep(0.3,5),pch=16,cex=cex_legend) text(seq(0.1,0.9,by=0.2),y=rep(0.5,5),cex_legend,pos=3) text(0.5,0.9,"placement size",cex=1) } if(!add){ plot(x$arbre,edge.color="black",edge.width=edge.width,show.tip.label=stl,no.margin=TRUE,...) text(0,0,main,cex=cex.text,pos=4) } if(asb) add.scale.bar() pos_phylo <- get_edge() xpos <- pos_phylo[x$placement_positions$location,1] + x$placement_positions$distal_bl ypos <- pos_phylo[x$placement_positions$location,2] order_ploting <- order(cex_placement,decreasing=TRUE) points(xpos[order_ploting],ypos[order_ploting],col=paste(vcol[order_ploting],transp,sep=""),cex=cex_placement[order_ploting],pch=pch) if(add) warnings("Placement dots color may not be accurate. It is highly recommanded to use a single color when using the 'add' option. Placement dot size will be comparable between plots only if the same value is used in the 'transfo' option.") } } plot.jplace <- function(x,...){ if(ncol(x$placement_positions)==7) colnames(x$placement_positions) <- c("placement_id","location","ml_ratio","log_like","distal_bl","pendant_bl","tax_id") if(ncol(x$placement_positions)==6) colnames(x$placement_positions) <- c("placement_id","location","ml_ratio","log_like","distal_bl","pendant_bl") plot.pplace(x,...) }
/scratch/gouwar.j/cran-all/cranData/BoSSA/R/plot.pplace.R
pplace_to_matrix <- function(pplace,sample_info,N=NULL,tax_name=FALSE,run_id=NULL,round_type=NULL){ if(class(pplace)!="pplace"){ stop("ERROR: the input is not an object of class pplace") } if(!is.null(run_id)){ pplace <- sub_pplace(pplace,run_id=run_id) } if(!is.null(N) & length(N)!=nrow(pplace$multiclass)) stop("N should have a number of entry equal to the number of line of the \"multiclass\" table") if(!is.null(sample_info) & length(sample_info)!=nrow(pplace$multiclass)) stop("sample_info should have a number of entry equal to the number of line of the \"multiclass\" table") out <- NULL if(nrow(pplace$multiclass)>0){ agglk <- aggregate(pplace$multiclass$likelihood,list(pplace$multiclass$name),sum) lk_rescale <- pplace$multiclass$likelihood/agglk$x[match(pplace$multiclass$name,agglk[,1])] sample_id <- unique(unlist(sample_info)) tax_id <- unique(pplace$multiclass$tax_id) out <- matrix(0,ncol=length(tax_id),nrow=length(sample_id),dimnames=list(sample_id,tax_id)) if(class(sample_info)!="list"){ if(is.null(N)){ N <- rep(1,length(sample_info)) } for(i in 1:nrow(pplace$multiclass)){ if(is.null(round_type)) out[sample_info[i],pplace$multiclass$tax_id[i]] <- out[sample_info[i],pplace$multiclass$tax_id[i]] + N[i]*lk_rescale[i] if(!is.null(round_type)) out[sample_info[i],pplace$multiclass$tax_id[i]] <- out[sample_info[i],pplace$multiclass$tax_id[i]] + get(round_type)(N[i]*lk_rescale[i]) } } if(class(sample_info)=="list"){ if(!is.null(N) & class(N)!="list"){ stop("Sample_info and N should be list objects") } for(i in 1:length(sample_info)){ for(j in 1:length(sample_info[[i]])){ if(is.null(N)){ Nij <- rep(1,length(sample_info)) } if(!is.null(N)){ Nij <- N[[i]][j] } if(is.null(round_type)) out[sample_info[[i]][j],pplace$multiclass$tax_id[i]] <- out[sample_info[[i]][j],pplace$multiclass$tax_id[i]] + Nij*lk_rescale[i] if(!is.null(round_type)) out[sample_info[[i]][j],pplace$multiclass$tax_id[i]] <- out[sample_info[[i]][j],pplace$multiclass$tax_id[i]] + get(round_type)(Nij*lk_rescale[i]) } } } if(tax_name) colnames(out) <- pplace$taxa[match(tax_id,pplace$taxa[,1]),2] } return(out) }
/scratch/gouwar.j/cran-all/cranData/BoSSA/R/pplace_to_matrix.R
pplace_to_table <- function(pplace,type="full",run_id=NULL){ if(class(pplace)!="pplace"){ stop("ERROR: the input is not an object of class pplace") } if(!is.null(run_id)){ pplace <- sub_pplace(pplace,run_id=run_id) } out <- NULL if(nrow(pplace$multiclass)>0){ out <- merge(pplace$multiclass,pplace$placement_positions,by="placement_id") if(type=="best"){ out <- out[order(out$ml_ratio,decreasing=TRUE),] out <- out[match(unique(out$placement_id),out[,1]),] } out <- out[order(out$placement_id),] rownames(out) <- NULL colnames(out)[c(5,12)] <- c("tax_id_multilcass","tax_id_placement") } return(out) }
/scratch/gouwar.j/cran-all/cranData/BoSSA/R/pplace_to_table.R
pplace_to_taxonomy <- function(pplace,taxonomy,rank=c("phylum","class","order","family","genus","species"),type="all",tax_name=TRUE,run_id=NULL){ if(class(pplace)!="pplace"){ stop("ERROR: the input is not an object of class pplace") } if(sum(colnames(taxonomy)%in%rank)==0){ stop("ERROR: none of the rank provided is available in the taxonomy") } if(!is.null(run_id)){ pplace <- sub_pplace(pplace,run_id=run_id) } out <- pplace$multiclass[,c("name","tax_id")] if(type=="best"){ out <- out[order(out$likelihood,decreasing=TRUE)] out <- out[match(unique(out[,1]),out[,1]),] } out <- out[order(out[,1]),] if(sum(is.na(match(out$tax_id,taxonomy$tax_id)))/length(out$tax_id)>0.5) warning("the taxonomy doesn't seems to match the pplace object") out2 <- as.matrix(taxonomy[match(out$tax_id,taxonomy$tax_id),colnames(taxonomy)%in%rank]) rownames(out2) <- out[,1] if(tax_name){ out3 <- out2 tax_id <- unique(as.vector(out3)) tax_id <- tax_id[tax_id!=""] for(i in 1:length(tax_id)){ if(!is.na(tax_id[i])){ out2[out3==tax_id[i]] <- taxonomy$tax_name[taxonomy$tax_id==tax_id[i]] } } } out2[!is.na(out2) & out2==""] <- "Unclassified" return(out2) }
/scratch/gouwar.j/cran-all/cranData/BoSSA/R/pplace_to_taxonomy.R
print.jplace <- function(x,...){ cat("jplace object\n") cat(paste("call run: ",x$run,"\n",sep="")) cat(paste("Placement on a phylogenetic tree with ",length(x$arbre$tip.label)," tips and ",x$arbre$Nnode," internal nodes.\n",sep="")) cat(paste("sequence nb: ",length(unique(x$multiclass$name)),"\n",sep="")) cat(paste("placement nb: ",length(unique(x$placement_positions$placement_id)),"\n",sep="")) }
/scratch/gouwar.j/cran-all/cranData/BoSSA/R/print.jplace.R
print.protdb <- function(x,...){ cat("protdb object\n") cat(paste(gsub(" +"," ",x$header),"\n",sep="")) }
/scratch/gouwar.j/cran-all/cranData/BoSSA/R/print.pdb.R
print.pplace <- function(x,...){ cat("pplace object\n") cat(paste("run: ",nrow(x$run),"\n",sep="")) cat(paste("call run 1: ",x$run[1,2],"\n",sep="")) cat(paste("Placement on a phylogenetic tree with ",length(x$arbre$tip.label)," tips and ",x$arbre$Nnode," internal nodes.\n",sep="")) cat(paste("sequence nb: ",nrow(x$placement_names),"\n",sep="")) cat(paste("placement nb: ",length(unique(x$placement_positions$placement_id)),"\n",sep="")) }
/scratch/gouwar.j/cran-all/cranData/BoSSA/R/print.pplace.R
read_jplace <- function(jplace_file,full=TRUE){ json_data <- fromJSON(jplace_file) arbre <- json_data$tree arbre2 <- gsub("}","",gsub("{","#",arbre,fixed=TRUE),fixed=TRUE) arbre3 <- read.tree(text=gsub("#","",gsub(":[0-9].[0-9]+#",":",gsub("e-","",arbre2)))) edge_key <- rbind(1:nrow(arbre3$edge),arbre3$edge.l) arbre <- gsub("}","#",gsub("{","#",arbre,fixed=TRUE),fixed=TRUE) arbre <- read.tree(text=gsub("#[0-9]+#","",arbre)) out <- list(arbre,edge_key,json_data$tree) names(out) <- c("arbre","edge_key","original_tree") if(full){ a <- json_data$placements[,1] for(i in 1:length(a)){ a[[i]] <- cbind(rep(i,nrow(a[[i]])),a[[i]]) } a <- as.data.frame(do.call(rbind,a),stringsAsFactors=FALSE) colnames(a) <- c("placement_id",json_data$fields) col_num = c(1,3:ncol(a)) a[,col_num] <- apply(a[,col_num],2,as.numeric) out$placement_positions <- a b <- json_data$placements[,2] if(class(b[[1]])=="matrix"){ for(i in 1:length(b)){ b[[i]] <- cbind(rep(i,nrow(b[[i]])),b[[i]]) } b2 <- as.data.frame(do.call(rbind,b),stringsAsFactors=FALSE) colnames(b2) <- c("placement_id","name","nm") b2[,1] <- as.numeric(b2[,1]) b2[,3] <- as.numeric(b2[,3]) } if(class(b[[1]])!="matrix"){ for(i in 1:length(b)){ b[[i]] <- cbind(rep(i,length(b[[i]])),b[[i]]) } b2 <- as.data.frame(do.call(rbind,b),stringsAsFactors=FALSE) colnames(b2) <- c("placement_id","name") b2[,1] <- as.numeric(b2[,1]) } out$multiclass <- b2 pplacer_branch_id <- out$placement_positions$edge_num out$placement_positions$edge_num <- out$edge_key[1,match(pplacer_branch_id,out$edge_key[2,])] if(ncol(out$placement_positions)==7) out$placement_positions <- out$placement_positions[,c(1,4,5,6,3,7,2)] if(ncol(out$placement_positions)==6) out$placement_positions <- out$placement_positions[,c(1,2,4,3,5,6)] #out$edge_key <- NULL out$run <- as.character(json_data$metadata$invocation[1]) class(out) <- "jplace" } out }
/scratch/gouwar.j/cran-all/cranData/BoSSA/R/read_jplace.R
read_protdb <- function(X){ pdb <- scan(X,sep="\n",what="complex") category <- gsub(" ","",unlist(lapply(pdb,substring,1,6))) header <- unlist(lapply(pdb[category=="HEADER"],substring,11)) compound <- unlist(lapply(pdb[category=="COMPND"],substring,11)) id <- as.numeric(gsub(";","",gsub("MOL_ID:","",compound[grep("MOL_ID: ",compound)]))) ch <- gsub(" ","",(gsub(";","",gsub("CHAIN:","",compound[grep("CHAIN: ",compound)])))) mol <- gsub(",","",gsub("MOLECULE:","",compound[grep("MOLECULE: ",compound)])) compound <- data.frame(id=id,chain=ch,molecule=mol) atomt <- unlist(lapply(pdb[category=="ATOM"],substring,14,16)) aminoa <- as.character(unlist(lapply(pdb[category=="ATOM"],substring,18,20))) chain <- unlist(lapply(pdb[category=="ATOM"],substring,22,22)) namino <- as.numeric(unlist(lapply(pdb[category=="ATOM"],substring,23,26))) Xcoor <- as.numeric(unlist(lapply(pdb[category=="ATOM"],substring,32,38))) Ycoor <- as.numeric(unlist(lapply(pdb[category=="ATOM"],substring,39,46))) Zcoor <- as.numeric(unlist(lapply(pdb[category=="ATOM"],substring,47,54))) atom <- data.frame(atom=atomt,aa=aminoa,chain=chain,naa=namino,X=Xcoor,Y=Ycoor,Z=Zcoor) sequence <- list(NULL) uch <- unique(atom$chain) for(i in 1:length(uch)){ j <- i*2 subatom <- atom[atom$chain==uch[i],] uaa <- unique(subatom$naa) sequence[[j-1]] <- uaa sequence[[j]] <- as.character(subatom$aa[unlist(lapply(uaa,FUN <- function(X){grep(X,subatom$naa)[1]}))]) names(sequence)[j-1] <- paste("ref_",uch[i],sep="") names(sequence)[j] <- paste("chain_",uch[i],sep="") } out <- list(header) out[[2]] <- compound out[[3]] <- atom out[[4]] <- sequence names(out) <- c("header","compound","atom","sequence") class(out) <- "protdb" out }
/scratch/gouwar.j/cran-all/cranData/BoSSA/R/read_pdb.R
read_sqlite <- function(sqlite_file,jplace_file=gsub("sqlite$","jplace",sqlite_file),rank="species"){ out <- list() db <- dbConnect(SQLite(),dbname=sqlite_file) if(is.null(rank)) out$multiclass <- dbGetQuery(db,"select * from multiclass") if(!is.null(rank)) out$multiclass <- dbGetQuery(db,paste0("select * from multiclass where want_rank=\'",rank,"\'")) out$placement_classifications <- dbGetQuery(db,"select * from placement_classifications") out$placement_evidence <- dbGetQuery(db,"select * from placement_evidence") out$placement_median_identities <- dbGetQuery(db,"select * from placement_median_identities") out$placement_names <- dbGetQuery(db,"select * from placement_names") out$placement_nbc <- dbGetQuery(db,"select * from placement_nbc") out$placement_positions <- dbGetQuery(db,"select * from placement_positions") out$placements <- dbGetQuery(db,"select * from placements") out$ranks <- dbGetQuery(db,"select * from ranks") out$runs <- dbGetQuery(db,"select * from runs") out$sqlite_sequence <- dbGetQuery(db,"select * from sqlite_sequence") out$taxa <- dbGetQuery(db,"select * from taxa") dbDisconnect(db) out <- c(out,read_jplace(jplace_file,full=FALSE)) if(nrow(out$multiclass)>0){ pplacer_branch_id <- out$placement_positions$location out$placement_positions$location <- out$edge_key[1,match(pplacer_branch_id,out$edge_key[2,])] } class(out) <- "pplace" out }
/scratch/gouwar.j/cran-all/cranData/BoSSA/R/read_sqlite.R
refpkg <- function (refpkg_path, type = "summary", rank_tree = "species", rank_pie = c("phylum", "class", "order", "family", "genus"), scale_pie = TRUE, alpha_order = TRUE, cex.text = 0.7, cex.legend = 1, asb = TRUE, rotate_label = TRUE, out_krona="for_krona.txt",text2krona=NULL) { here <- getwd() setwd(refpkg_path) content <- fromJSON("CONTENTS.json") taxo <- read.csv(content$files$taxonomy, colClasses = "character") tree <- read.tree(content$files$tree) info <- read.csv(content$file$seq_info, colClasses = "character") info <- info[match(tree$t, info$seqname), ] setwd(here) taxid <- unique(unlist(taxo[taxo$tax_id%in%info$tax_id,-(1:4)])) taxid <- taxid[taxid!=""] taxo <- taxo[taxo$tax_id %in% taxid, , drop = FALSE] if (type == "summary") { cat("### Reference package summary\n\n") cat(paste("Path:", refpkg_path, "\n\n", sep = "")) cat(paste("Tree with ", length(tree$t), " tips ", tree$Nnode, " nodes\n\n", sep = "")) trank <- table(taxo$rank) pos <- match(colnames(taxo),names(trank)) trank <- trank[pos[!is.na(pos)]] cat("Classification:\n") for (i in 1:length(trank)) { cat(paste(names(trank)[i], trank[i], "\n")) } } if (type == "taxonomy") { return(taxo) } if (type == "info") { return(info) } if (type == "tree") { desc <- Descendants(tree, (length(tree$t) + 1):(length(tree$t) + tree$Nnode), type = "tips") info$classif <- taxo[, rank_tree][match(info$tax_id, taxo$tax_id)] info$classif[info$classif == ""] <- "not available" desc2 <- c(info$classif[match(tree$t, info[, 1])], sapply(desc, function(X, tree, info) { a <- unique(info$classif[match(tree$t[X], info[, 1])]) out <- "multiple" if (length(a) == 1) out <- a out }, tree, info)) pos <- match(desc2, taxo[, 1]) tax_name <- taxo$tax_name[pos[!is.na(pos)]] desc2[!is.na(pos)] <- tax_name colv <- c(rainbow(length(unique(tax_name))), "black", "grey") if (alpha_order) { names(colv) <- c(sort(unique(tax_name)), "multiple", "not available") } if (!alpha_order) { names(colv) <- c(unique(tax_name), "multiple", "not available") } layout(matrix(c(rep(1, 70), rep(2, 30)), ncol = 10)) plot(tree, edge.color = colv[desc2[tree$edge[, 2]]], tip.color = colv[desc2], cex = cex.text, no.margin = TRUE) if (asb) add.scale.bar() plot.new() names_id <- names(colv) pos <- match(names(colv), taxo[, 1]) names_id[!is.na(pos)] <- taxo$tax_name[pos[!is.na(pos)]] if (alpha_order) { text(0, seq(1, 0, length.out = length(colv)), names_id, col = colv, cex = cex.legend, pos = 4) } if (!alpha_order) { text(0, seq(0, 1, length.out = length(colv)), names_id, col = colv, cex = cex.legend, pos = 4) } } if (type == "pie" | type=="krona") { if(type=="pie") taxo2 <- cbind(taxo[,1,drop=FALSE],taxo[, colnames(taxo) %in% rank_pie, drop = FALSE]) if(type=="krona") taxo2 <- taxo[,-(2:4),drop=FALSE] taxo3 <- taxo2[match(info$tax_id,taxo2$tax_id),-1,drop=FALSE] taxid <- unique(unlist(taxo3)) for (i in 1:length(taxid)) { if (taxid[i] != "" & taxid[i]%in%taxo$tax_id) taxo3[taxo3 == taxid[i]] <- taxo$tax_name[taxo$tax_id == taxid[i]] } taxo3[taxo3==""] <- "NotAvailable" taxo4 <- apply(taxo3,1,paste,collapse="_") N <- table(taxo4) taxo5 <- taxo3[match(names(N),taxo4),] taxo5 <- as.data.frame(taxo5) taxo5$N <- as.numeric(N) if(type=="krona"){ write.table(taxo5[,c(ncol(taxo5),1:(ncol(taxo5)-1))],out_krona,col.names=FALSE,row.names=FALSE,quote=FALSE,sep="\t") if(!is.null(text2krona)){ system(paste("perl",text2krona,out_krona)) system(paste("rm",out_krona)) } } if(type=="pie"){ if(!scale_pie) taxo5$N <- 1 if (ncol(taxo5) > 2) taxo5 <- taxo5[do.call(order, taxo5[, -ncol(taxo5)]), ] if (ncol(taxo5) == 2) taxo5 <- taxo5[order(taxo5[, 1]), ] par(mar = c(0, 0, 0, 0)) plot.new() rset <- seq(0.15, 0.45, length.out = ncol(taxo5) - 1) rset2 <- rset - 2/3 * (rset[2] - rset[1]) rset2[1] <- 0.05 if (ncol(taxo5) == 2) { rset <- 0.45 rset2 <- 0.25 } if (length(table(taxo5[, 1])) == 1) rset2[1] <- 0 for (i in (ncol(taxo5) - 1):1) { aggtaxo <- aggregate(taxo5$N, list(taxo5[, i]), sum) aggtaxo2 <- aggtaxo$x names(aggtaxo2) <- aggtaxo[, 1] aggtaxo2 <- aggtaxo2[order(match(names(aggtaxo2), taxo5[, i]))] bisect.angles <- floating.pie(0.5, 0.5, as.numeric(aggtaxo2), radius = rset[i], startpos = pi/2) for (j in 1:length(bisect.angles)) { if (rotate_label) { srti <- bisect.angles[j] * 180/pi pie.labels(0.5, 0.5, bisect.angles[j], names(aggtaxo2)[j], radius = rset2[i], cex = cex.text, srt = srti, pos = 4) } if (!rotate_label) pie.labels(0.5, 0.5, bisect.angles[j], names(aggtaxo2)[j], radius = rset2[i], cex = cex.text) } } } } }
/scratch/gouwar.j/cran-all/cranData/BoSSA/R/refpkg.R
sub_pplace <- function(x,placement_id=NULL,ech_id=NULL,ech_regexp=NULL,run_id=NULL){ if(class(x)!="pplace" & class(x)!="jplace"){ stop("ERROR: the input is not an object of class pplace or jplace") } if(sum(!is.null(placement_id),!is.null(ech_id),!is.null(ech_regexp),!is.null(run_id))>1) stop("Subset can only be performed using a single criterion a time e.g. ech_id or run_id not both in the same call") if(class(x)=="jplace"){ if(!is.null(placement_id)){ x$multiclass <- x$multiclass[x$multiclass$placement_id%in%placement_id,] x$placement_positions <- x$placement_positions[x$placement_positions$placement_id%in%placement_id,] } if(!is.null(ech_id)){ x$multiclass <- x$multiclass[x$multiclass$name%in%ech_id,] x$placement_positions <- x$placement_positions[x$placement_positions$placement_id%in%x$multiclass$placement_id,] } if(!is.null(ech_regexp)){ x$multiclass <- x$multiclass[grep(ech_regexp,x$multiclass$name),] x$placement_positions <- x$placement_positions[x$placement_positions$placement_id%in%x$multiclass$placement_id,] } } if(class(x)=="pplace"){ if(!is.null(run_id)){ pid <- x$placements$placement_id[x$placements$run_id%in%run_id] x$run <- x$run[x$run$run_id%in%run_id,] } if(!is.null(placement_id)){ pid <- unique(placement_id) } if(!is.null(ech_id)){ pid <- unique(x$placement_names$placement_id[x$placement_names$name%in%ech_id]) x$multiclass <- x$multiclass[x$multiclass[,2]%in%ech_id,] x$placement_names <- x$placement_names[x$placement_names[,2]%in%ech_id,] } if(!is.null(ech_regexp)){ pid <- unique(x$placement_names$placement_id[grep(ech_regexp,x$placement_names$name)]) x$multiclass <- x$multiclass[grep(ech_regexp,x$multiclass$name),] x$placement_names <- x$placement_names[grep(ech_regexp,x$placement_names[,2]),] } x$multiclass <- x$multiclass[x$multiclass[,1]%in%pid,] x$placement_positions <- x$placement_positions[x$placement_positions$placement_id%in%pid,] x$placement_classifications <- x$placement_classifications[x$placement_classifications$placement_id%in%pid,] x$placement_evidence <- x$placement_evidence[x$placement_evidence$placement_id%in%pid,] x$placement_names <- x$placement_names[x$placement_names$placement_id%in%pid,] x$placements <- x$placements[x$placements$placement_id%in%pid,] if(nrow(x$placement_median_identities)>0) x$placement_median_identities <- x$placement_median_identities[x$placement_median_identities$placement_id%in%pid,] if(nrow(x$placement_nbc)>0) x$placement_nbc <- x$placement_nbc[x$placement_nbc$placement_id%in%pid,] x$sqlite_sequence[x$sqlite_sequence[,1]=="runs",2] <- length(unique(x$run$run_id)) x$sqlite_sequence[x$sqlite_sequence[,1]=="placements",2] <- length(pid) } return(x) }
/scratch/gouwar.j/cran-all/cranData/BoSSA/R/sub_pplace.R
write_jplace <- function(x,outfile){ if(class(x)!="pplace" & class(x)!="jplace"){ stop("ERROR: the input is not an object of class pplace or jplace") } if(class(x)=="pplace"){ if(nrow(x$run)>1){ x <- sub_pplace(x,run_id=x$run[1,1]) warning("Only the placements from the first run were exported") } } if(nrow(x$placement_positions)>0){ if(class(x)=="pplace"){ bid <- x$placement_positions$location x$placement_positions$location <- x$edge_key[2,match(bid,x$edge_key[1,])] } if(class(x)=="jplace"){ bid <- x$placement_positions$edge_num x$placement_positions$edge_num <- x$edge_key[2,match(bid,x$edge_key[1,])] } } write("{\"tree\":",outfile) tree_string <- x$original_tree write(paste("\"",tree_string,"\",",sep=""),outfile,append=TRUE) write("\"placements\":\n\t[",outfile,append=TRUE) pid <- unique(x$placement_positions[,1]) for(i in 1:length(pid)){ nmi <- NULL placei <- x$placement_positions[x$placement_positions[,1]==pid[i],] if(class(x)=="jplace" & !is.null(x$multiclass$nm)) nmi <- x$multiclass$nm[x$multiclass$placement_id==pid[i]] if(class(x)=="jplace") namei <- x$multiclass$name[x$multiclass$placement_id==pid[i]] if(class(x)=="pplace") namei <- x$placement_names$name[x$placement_names$placement_id==pid[i]] if(class(x)=="pplace") nmi <- x$placement_names$mass[x$placement_names$placement_id==pid[i]] if(class(x)=="pplace") colnames(placei)[2:7] <- c("edge_num","like_weight_ratio","likelihood","distal_length","pendant_length","classification") write_placement(placei,namei,nmi,outfile) if(i!=length(pid)){ write(",",outfile,append=TRUE) } } if(class(x)=="jplace") runinfo <- x$run[1] if(class(x)=="pplace") runinfo <- x$run$params[1] if(is.null(x$placement_positions$tax_id) & is.null(x$placement_positions$classification)) write(paste("],\n\"metadata\":\n{\"invocation\":\"",runinfo,"\"},\n\"version\": 3,\n\"fields\":[\"distal_length\",\"edge_num\",\"like_weight_ratio\",\"likelihood\",\"pendant_length\"]\n}",sep=""),outfile,append=TRUE) if(!is.null(x$placement_positions$tax_id) | !is.null(x$placement_positions$classification)) write(paste("],\n\"metadata\":\n{\"invocation\":\"",runinfo,"\"},\n\"version\": 3,\n\"fields\":[\"classification\",\"distal_length\",\"edge_num\",\"like_weight_ratio\",\"likelihood\",\"pendant_length\"]\n}",sep=""),outfile,append=TRUE) } write_placement <- function(placei,namei,nmi,outfile){ write("{\"p\":[",outfile,append=TRUE) for(i in 1:nrow(placei)){ comma <- "," if(i==nrow(placei)) comma <- "" if(is.null(placei$classification)) write(paste("\t[",placei$distal_length[i],",",placei$edge_num[i],",",placei$like_weight_ratio[i],",",placei$likelihood[i],",",placei$pendant_length[i],"]",comma,sep=""),outfile,append=TRUE) if(!is.null(placei$classification)) write(paste("\t[\"",placei$classification[i],"\",",placei$distal_length[i],",",placei$edge_num[i],",",placei$like_weight_ratio[i],",",placei$likelihood[i],",",placei$pendant_length[i],"]",comma,sep=""),outfile,append=TRUE) } if(is.null(nmi) & length(namei)==1) write(paste("],\n\"n\":[\"",namei,"\"]\n}",sep=""),outfile,append=TRUE) if(!is.null(nmi) & length(namei)==1) write(paste("],\n\"nm\":[\n\t[\"",namei,"\",",nmi,"]\n]\n}",sep=""),outfile,append=TRUE) if(is.null(nmi) & length(namei)>1){ write("],\n\"n\":[",outfile,append=TRUE) for(i in 1:length(namei)){ comma <- "," if(i==length(namei)) comma <- "" write(paste("[\"",namei[i],"\"]",comma,sep=""),outfile,append=TRUE) } write("]\n}",outfile,append=TRUE) } if(!is.null(nmi) & length(namei)>1){ write("],\n\"nm\":[",outfile,append=TRUE) for(i in 1:length(namei)){ comma <- "," if(i==length(namei)) comma <- "" write(paste("[\"",namei[i],"\",",nmi[i],"]",comma,sep=""),outfile,append=TRUE) } write("]\n}",outfile,append=TRUE) } }
/scratch/gouwar.j/cran-all/cranData/BoSSA/R/write_jplace.R
## ----style, echo = FALSE, results = 'asis'------------------------------------ ## ----load-packages, message=FALSE, warning=FALSE------------------------------ library("BoSSA") ## ----------------------------------------------------------------------------- refpkg_path <- paste(find.package("BoSSA"),"/extdata/example.refpkg",sep="") refpkg(refpkg_path) ## ----------------------------------------------------------------------------- taxo <- refpkg(refpkg_path,type="taxonomy") head(taxo) ## ----pie1, fig.width=5, fig.height=5------------------------------------------ refpkg(refpkg_path,type="pie",cex.text=0.5) ## ----pie2, fig.width=5, fig.height=5------------------------------------------ refpkg(refpkg_path,type="pie",rank_pie=c("class","order","family"),cex.text=0.6) ## ----refseqtree, fig.width=8, fig.height=8------------------------------------ refpkg(refpkg_path,type="tree",rank_tree="class",cex.text=0.5) ## ----------------------------------------------------------------------------- sqlite_file <- system.file("extdata", "example.sqlite", package = "BoSSA") jplace_file <- system.file("extdata", "example.jplace", package = "BoSSA") ## ----------------------------------------------------------------------------- pplace <- read_sqlite(sqlite_file,jplace_file) pplace ## ----------------------------------------------------------------------------- str(pplace) ## ----test1, fig.width=9, fig.height=9----------------------------------------- plot(pplace,type="number",main="number",cex.number=1.5) ## ----test2, fig.width=9,fig.height=9------------------------------------------ plot(pplace,type="color",main="color",edge.width=2) ## ----testfat, fig.width=9,fig.height=9---------------------------------------- plot(pplace,type="fattree",main="fattree") ## ----test3, fig.width=9,fig.height=9------------------------------------------ plot(pplace,type="precise",main="precise") ## ----test4, fig.width=9,fig.height=9------------------------------------------ plot(pplace,type="precise",main="precise",transfo=function(X){X*2}) ## ----------------------------------------------------------------------------- sub1 <- sub_pplace(pplace,placement_id=1:100) sub1 ## ----------------------------------------------------------------------------- ids <- sample(pplace$multiclass$name,50) sub2 <- sub_pplace(pplace,ech_id=ids) sub2 ## ----------------------------------------------------------------------------- pplace_table <- pplace_to_table(pplace,type="best") head(pplace_table,n=3) ## ----------------------------------------------------------------------------- example_contingency <- pplace_to_matrix(pplace,c(rep("sample1",50),rep("sample2",50)),tax_name=TRUE) example_contingency ## ----------------------------------------------------------------------------- example_taxo <- pplace_to_taxonomy(pplace,taxo,tax_name=TRUE,rank=c("order","family","genus","species")) head(example_taxo) ## ----------------------------------------------------------------------------- example_OTU <- matrix(sample(1:100, 500, replace = TRUE), nrow = 100, ncol = 5,dimnames=list(pplace$multiclass$name,paste("sample",1:5,sep="_"))) head(example_OTU) ## ----------------------------------------------------------------------------- #library(phyloseq) #example_phyloseq <- phyloseq(otu_table(example_OTU,taxa_are_rows=TRUE),tax_table(example_taxo)) #example_phyloseq ## ----------------------------------------------------------------------------- citation("BoSSA")
/scratch/gouwar.j/cran-all/cranData/BoSSA/inst/doc/bossa-analysis.R
--- title: "Example of placement analysis using BoSSA" output: prettydoc::html_pretty: theme: architect highlight: github vignette: > %\VignetteIndexEntry{Example of placement analysis using BoSSA} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- <!-- %% \VignetteEngine{knitr::knitr} --> ```{r style, echo = FALSE, results = 'asis'} ``` Please report comments or bugs to Pierre Lefeuvre - <[email protected]> [BoSSA CRAN page](https://cran.r-project.org/package=BoSSA) # Summary A phylogenetic placement corresponds to the position of a query sequence in a reference tree. Different tools exits to infer phylogenetic placements, such as [pplacer](https://matsen.fhcrc.org/pplacer/), [EPA](http://sco.h-its.org/exelixis/web/software/epa/index.html) or [RAPPAS](https://github.com/phylo42/RAPPAS). Importantly, these three programs produce placements under a common [file format](https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0031009). Placements can later be analysed using the [guppy](https://matsen.github.io/pplacer/generated_rst/guppy.html) software from the pplacer suite to obtain statistically based taxonomic classification of sequences. The BoSSA package implements functions to reads, plots and summarizes phylogentic placements. This vignette is intended to provide examples of placements analyses using BoSSA. # Important note - The placement mass (potentially) available in the jplace and sqlite files are imported in R (within the jplace and pplace objects) but aren't use in the analysis. You should use the "N" parameter (available in several fucntions for the package) to use different weight for each placement. - The reference packages shiped with BoSSA are incomplete (they lack the alignment file) in order to reduce the package size. Whereas, the information available is sufficient to draw summary statistics, it won't be enough to perform actual phylogenetic placement. - When the jplace or sqlite files are import into R, the node numbering available in the original file is converted to the class "phylo" numbering. # How to obtain phylogentic placement file suitable for analysis with BoSSA ? The process to obtain placement files is dependent of the program you use. Assuming you are using pplacer, the process would be (1) build a reference package that contains an align set of reference sequences and a reference phylogenetic tree, (2) align query sequences to the reference alignment, (3) use pplacer to infer placements (jplace file output, format describe [here](https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0031009)) and optionally (4) infer the classification of each sequences using guppy (sqlite file output). - The construction of the reference package could be a bit tricky. The [taxtastic](https://github.com/fhcrc/taxtastic) tool is extremely helpfull to this end. A tutorial can be find [here](http://fhcrc.github.io/taxtastic/quickstart.html). A BoSSA vignette on refpkg construction is also available. - [HMMER](http://hmmer.org/) and [MAFFT](https://mafft.cbrc.jp/alignment/software/addsequences.html) can be use to align sequences to a reference alignment. - For phylogenetic placement, a detailed tutorial is available [here](http://fhcrc.github.io/microbiome-demo/). Let's say you have obtained a reference package (refpkg), a placement file (jplace file) and a guppy classification output (sqlite file). The example files presented here are derived from other [reference packages and jplaces files](https://github.com/fhcrc/microbiome-demo/zipball/master) from the [Matsen group pplacer tutorials](https://fhcrc.github.io/microbiome-demo/). The sqlite file was obtained using the following command: ``` guppy classify --multiclass-min 0 --cutoff 0.5 -c example.refpkg --sqlite example.sqlite example.jplace ``` # Exploration of a reference package Let's start by loading the `BoSSA-package: ```{r load-packages, message=FALSE, warning=FALSE} library("BoSSA") ``` A good practice would be to inspect the refpkg content. ```{r } refpkg_path <- paste(find.package("BoSSA"),"/extdata/example.refpkg",sep="") refpkg(refpkg_path) ``` It is possible to extract the taxonomy of the sequences included in the refpkg. ```{r } taxo <- refpkg(refpkg_path,type="taxonomy") head(taxo) ``` or display a pie chart that summarize the taxonomy... ```{r pie1, fig.width=5, fig.height=5} refpkg(refpkg_path,type="pie",cex.text=0.5) ``` ... or a subset of the taxonomy levels. Here, an example with the "class", "order" and "family" levels. Note there is a slight decay between the text labels and slices... this will need a fix in a future package update. ```{r pie2, fig.width=5, fig.height=5} refpkg(refpkg_path,type="pie",rank_pie=c("class","order","family"),cex.text=0.6) ``` Finally, a tree display with branch colored according to a given taxonomic level is available. Here tips are colored according to the "order" classification. ```{r refseqtree, fig.width=8, fig.height=8} refpkg(refpkg_path,type="tree",rank_tree="class",cex.text=0.5) ``` # Loading the example data The BoSSA package comes along with examples of phylogenetic placements from the Masten group. ```{r} sqlite_file <- system.file("extdata", "example.sqlite", package = "BoSSA") jplace_file <- system.file("extdata", "example.jplace", package = "BoSSA") ``` To read the data, use the `read_sqlite` function. ```{r} pplace <- read_sqlite(sqlite_file,jplace_file) pplace ``` A summary of the object is printed with the number of runs, the command line, a short description of the phylogenetic tree, the number of placements and the number of sequences being placed. Pplace objects are stored in a list of 15 components, with 12 components being outputs from a [guppy classify](https://matsen.github.io/pplacer/generated_rst/guppy_classify.html#guppy-classify) run and 3 components corresponding to the phylogenetic tree used for placement: ```{r } str(pplace) ``` Among these: - the `run` element contains the run id and the command line summary - the `taxa` element is a data frame with the whole taxonomy available in the reference package - the `multiclass` element is a data frame with the taxonomic assignation of each placement - the `placement_positions` element is a data frame with the position of each placement over the reference phylogenetic tree - the `arbre` element is the class `phylo` object of the reference phylogenetic tree # Some plots Four different plots are available to display placements on a phylogenetic tree: - the `number` plot. Placement number associated to each branch is indicated. Note that this representation may be hard to read due to overlaps between number boxes. Placement numbers are obtained after the multiplication of their weights with the ML ratio of the placement probabilities. Placement sizes are later round. A zero indicates a size superior to 0 but inferior to 1. ```{r test1, fig.width=9, fig.height=9} plot(pplace,type="number",main="number",cex.number=1.5) ``` - the `color` plot is the best option. Branches with placement are colored according to the number of sequences they bear. ```{r test2, fig.width=9,fig.height=9} plot(pplace,type="color",main="color",edge.width=2) ``` - in the `fattree` plot, branch wicth is proportionnal to the number of sequences they bear. ```{r testfat, fig.width=9,fig.height=9} plot(pplace,type="fattree",main="fattree") ``` - in the `precise` plot dots are drawn at the exact placement positions. Whereas the color of the dots depend of the pendant branch length, their sizes depend on the placement sizes. Note that placements are drawn one above the other. ```{r test3, fig.width=9,fig.height=9} plot(pplace,type="precise",main="precise") ``` Note that it is possible to apply a function to modify the dot size using the `transfo` option. In the following example, the dot size is multiplied by 2. In some other cases `log` or `log10` transformations could be usefull. Beware that when using the `transfo` option, the legend does not anymore correspond to the placement size but to the transform dot size (*i.e.* the transform function applied to the dot size). ```{r test4, fig.width=9,fig.height=9} plot(pplace,type="precise",main="precise",transfo=function(X){X*2}) ``` # Subsetting the pplace object Placement object can be subseted. This could be done using placements ids... ```{r } sub1 <- sub_pplace(pplace,placement_id=1:100) sub1 ``` ...or using placements names. ```{r } ids <- sample(pplace$multiclass$name,50) sub2 <- sub_pplace(pplace,ech_id=ids) sub2 ``` # Conversion ### To a table Using the `pplace_to_table` function produces a table that contains the placement information along with the classification for each sequence. The output can be limited to the "best" placement (as in the example, i.e. the placements with the highest likelihood for each sequence). ```{r } pplace_table <- pplace_to_table(pplace,type="best") head(pplace_table,n=3) ``` ### To a contingency matrix The `pplace_to_matrix` produces a contingency table. Let say the first 50 sequences in the multiclass table correspond to sequence from "sample 1" and the following 50 correspond to "sample 2", the function output a contingency table for these two samples. You can either have the taxonomic names (tax_name=TRUE, in the example) or keep the taxonomic ids (tax_name=FALSE). ```{r} example_contingency <- pplace_to_matrix(pplace,c(rep("sample1",50),rep("sample2",50)),tax_name=TRUE) example_contingency ``` ### To a taxonomy Using the `pplace_to_taxonomy` function, a taxonomy table is obtained for each sequences with the taxonomy levels defined in the reference package. The taxonomy levels can be limited to a set of levels using the `rank` option. ```{r} example_taxo <- pplace_to_taxonomy(pplace,taxo,tax_name=TRUE,rank=c("order","family","genus","species")) head(example_taxo) ``` ### Make a phyloseq object Assuming the sequences in the pplace object represent centroids of sequence cluster obtained from multiple samples, using the taxonomy table and an appropriate OTU file, you can create a phyloseq object. ```{r} example_OTU <- matrix(sample(1:100, 500, replace = TRUE), nrow = 100, ncol = 5,dimnames=list(pplace$multiclass$name,paste("sample",1:5,sep="_"))) head(example_OTU) ``` The exemple below is not run (commented) due to errors/warnings triggered by the used of Bioconductor packages (i.e. phyloseq) in CRAN vignette on some platform. Just uncomment the code if you like to have a try. ```{r} #library(phyloseq) #example_phyloseq <- phyloseq(otu_table(example_OTU,taxa_are_rows=TRUE),tax_table(example_taxo)) #example_phyloseq ``` # Citation If you find BoSSA and/or its tutorials useful, you may cite: ```{r} citation("BoSSA") ``` # Other resources ### On phylogenetic placements [pplacer website](https://matsen.fhcrc.org/pplacer/) and [documentation](http://matsen.github.io/pplacer/) [taxtastic](https://github.com/fhcrc/taxtastic) ### Other R package with a related topic [ggtree](https://bioconductor.org/packages/release/bioc/html/ggtree.html) and [clstutils](https://bioconductor.org/packages/release/bioc/html/clstutils.html) ### R packages used by BoSSA [RSQLite](https://cran.r-project.org/package=RSQLite) and [jsonlite](https://cran.r-project.org/package=jsonlite) to read files, [ape](https://cran.r-project.org/package=ape) and [phangorn](https://cran.r-project.org/package=phangorn) to manipulate phylogenetic trees and [plotrix](https://cran.r-project.org/package=plotrix) for pie charts.
/scratch/gouwar.j/cran-all/cranData/BoSSA/inst/doc/bossa-analysis.Rmd
## ----style, echo = FALSE, results = 'asis'------------------------------------ ## ----------------------------------------------------------------------------- library("rentrez") library("httr") library("XML") library("ape") library("BoSSA") set_config(config(http_version = 0)) ## ---- eval=F------------------------------------------------------------------ # r_search <- entrez_search(db="taxonomy", term="polerovirus") # r_search$ids ## ---- eval=F------------------------------------------------------------------ # r_search <- entrez_search(db="nucleotide", term="txid119164[orgn] NOT srcdb_refseq[PROP]") # r_search$count ## ---- eval=F------------------------------------------------------------------ # r_search <- entrez_search(db="nucleotide", term="txid119164[orgn] NOT srcdb_refseq[PROP] AND 5000:6500[slen]") # r_search$count ## ---- eval=F------------------------------------------------------------------ # r_search <- entrez_search(db="nucleotide", term="txid119164[orgn] NOT srcdb_refseq[PROP] AND 5000:6500[slen]",retmax=1000) # gi <- r_search$ids # gi[1:10] ## ---- eval=F------------------------------------------------------------------ # r_search <- entrez_search(db="nucleotide", term="txid119164[orgn] NOT srcdb_refseq[PROP] AND 5000:6500[slen]",use_history=TRUE) # all_recs <- NULL # for(seq_start in seq(1,ceiling(r_search$count/200)*200,200)){ # all_recs <- c(all_recs,entrez_fetch(db="nuccore", web_history=r_search$web_history,rettype="gbc",retmode="xml",retmax=200,retstart=seq_start)) # } # rec_list <- do.call(c,lapply(all_recs,xmlToList)) ## ---- eval=F------------------------------------------------------------------ # write(sapply(rec_list,function(X){paste(">",X$INSDSeq_locus,"\n",X$INSDSeq_sequence,sep="")}),"polerovirus_from_genbank.fasta") ## ---- eval=F------------------------------------------------------------------ # source_name <- t(sapply(rec_list,function(X){c(X$INSDSeq_locus,X$INSDSeq_organism)})) ## ----------------------------------------------------------------------------- tree <- read.tree(paste(find.package("BoSSA"),"/extdata/polerovirus_from_genbank_MAFFT.tre",sep="")) plot(tree,cex=0.4,no.margin=TRUE) nodelabels(cex=0.8) ## ----------------------------------------------------------------------------- root_tree <- root(tree,node=231,resolve=TRUE) plot(root_tree,cex=0.4,no.margin=TRUE) ## ----------------------------------------------------------------------------- write.tree(root_tree,"polerovirus_ROOTED.tre") ## ---- eval=F------------------------------------------------------------------ # r_search <- entrez_search(db="taxonomy", term="txid119164[orgn]",retmax=1000) # tax_ids <- r_search$ids # write(tax_ids,"taxonomy_polerovirus.id") ## ---- eval=F------------------------------------------------------------------ # taxo <- read.csv(paste(find.package("BoSSA"),"/extdata/polerovirus_taxonomy.csv",sep="")) # tax_id <- taxo$tax_id[match(source_name[,2],taxo$tax_name)] # info <- data.frame(seqname=source_name[,1],accession=source_name[,1],tax_id=tax_id,species=source_name[,2],is_type=rep("no",nrow(source_name))) # write.table(info,"polerovirus_info.csv",sep=",",row.names=FALSE) ## ----------------------------------------------------------------------------- refpkg_path <- paste(find.package("BoSSA"),"/extdata/polerovirus.refpkg",sep="") refpkg(refpkg_path) ## ----tree1, fig.width=5, fig.height=5----------------------------------------- refpkg(refpkg_path,type="tree",cex.text=0.3,rank_tree="species") ## ----pie1, fig.width=5, fig.height=5------------------------------------------ refpkg(refpkg_path,type="pie",rank_pie="species",cex.text=0.6) ## ----------------------------------------------------------------------------- refpkg(refpkg_path,type="krona") ## ---- eval=F------------------------------------------------------------------ # d <- cophenetic.phylo(root_tree) # # ids <- NULL # for(i in 1:length(unique(info$species))){ # usp <- unique(info$species)[i] # sub <- info[as.character(info$species)==usp,] # acc <- as.character(sub[,1]) # # # the following code line prevent the code from crashing # # i.e. new sequences not available in the example may be uploaded # # when you will run the code # acc <- acc[!is.na(match(acc,colnames(d)))] # # if(length(acc)<=10){ # ids <- c(ids,acc) # } # if(length(acc)>10){ # h <- hclust(as.dist(d[acc,acc])) # grp <- cutree(h,k=10) # ids <- c(ids,names(grp)[match(1:10,grp)]) # } # } # # to_remove <- root_tree$tip.label[!root_tree$tip.label%in%ids] ## ---- eval=F------------------------------------------------------------------ # root_tree2 <- drop.tip(root_tree,to_remove) # write.tree(root_tree2,"polerovirus_ROOTED_SUBSAMPLED.tre") ## ----------------------------------------------------------------------------- citation("BoSSA")
/scratch/gouwar.j/cran-all/cranData/BoSSA/inst/doc/bossa-refpkg.R
--- title: "Reference package construction from scratch" output: prettydoc::html_pretty: theme: architect highlight: github vignette: > %\VignetteIndexEntry{Reference package construction from scratch} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r style, echo = FALSE, results = 'asis'} ``` Please report comments or bugs to Pierre Lefeuvre - <[email protected]> [BoSSA CRAN page](https://cran.r-project.org/package=BoSSA) # Important notice Note that most of the R code included in the vignette is not executed during vignette build. Some of the rentrez (very good) package code triggers HTTP failure during the CRAN check and is thus not evaluated during the build. It should run smoothly on you desktop computer. # Summary A phylogenetic placement corresponds to the position of a query sequence in a reference tree. Phylogenetic placement inference using [pplacer](https://matsen.fhcrc.org/pplacer/) requires the construction of "reference packages", *i.e.* informations on the phylogeny and taxonomy of a given group of organisms. This vignette presents example of reference package construction using [taxtastic](https://github.com/fhcrc/taxtastic), [rppr](http://matsen.github.io/pplacer/generated_rst/rppr.html) and R. # The polerovirus example Imagine you obtained some viral contigs that were assigned to the *Polerovirus* genus using a BLAST search. You want to obtain phylogenetic placements of these sequences using pplacer and now have to build a reference package for this genus. A search on the [ICTV website](https://talk.ictvonline.org/ictv-reports/ictv_9th_report/positive-sense-rna-viruses-2011/w/posrna_viruses/265/luteoviridae) informs us that polerovirus belongs to the Luteovidiae family and that their "genome size is fairly uniform ranging from 5.6 kb to 6.0 kb". ### Loading the required packages We will use the [rentrez package](https://cran.r-project.org/package=rentrez) to query NCBI from R. Along with a vignette available at the [rentrez CRAN webpage](https://cran.r-project.org/package=rentrez), examples of rentrez use are available [here](https://docs.ropensci.org/rentrez/). ```{r } library("rentrez") library("httr") library("XML") library("ape") library("BoSSA") set_config(config(http_version = 0)) ``` ### How many polerovirus sequences are available on NCBI ? We first search for the taxonomic id associated to the *Polerovirus* genus in NCBI... ```{r, eval=F} r_search <- entrez_search(db="taxonomy", term="polerovirus") r_search$ids ``` ... and use this taxonomic id (i.e. 119164) to query the nucleotide database. Note that we remove the reference sequence set from the search (using "NOT srcdb_refseq[PROP]") as these are duplicates from records already available in the nucleotide database. ```{r, eval=F} r_search <- entrez_search(db="nucleotide", term="txid119164[orgn] NOT srcdb_refseq[PROP]") r_search$count ``` This number represents the total number of sequences from the genus polerovirus. We will now search for full genomes using the "AND 5000:6500[slen]" query. ```{r, eval=F} r_search <- entrez_search(db="nucleotide", term="txid119164[orgn] NOT srcdb_refseq[PROP] AND 5000:6500[slen]") r_search$count ``` To recover all the GenBank identifiers (gi), we have to increase the retmax parameter (default is 20) to something superior to r_search$count. ```{r, eval=F} r_search <- entrez_search(db="nucleotide", term="txid119164[orgn] NOT srcdb_refseq[PROP] AND 5000:6500[slen]",retmax=1000) gi <- r_search$ids gi[1:10] ``` ### Download all these sequences #### Get the genbank informations in XML format We now use these gi to download the information relative to the sequences in XML format. XML is a handy format as it allow to directly extract specific fields form the genbank file. Because the number of sequence may be a bit large, we will use the history option in our rentrez query. Sequences will then be obtained by batch of 200. ```{r, eval=F} r_search <- entrez_search(db="nucleotide", term="txid119164[orgn] NOT srcdb_refseq[PROP] AND 5000:6500[slen]",use_history=TRUE) all_recs <- NULL for(seq_start in seq(1,ceiling(r_search$count/200)*200,200)){ all_recs <- c(all_recs,entrez_fetch(db="nuccore", web_history=r_search$web_history,rettype="gbc",retmode="xml",retmax=200,retstart=seq_start)) } rec_list <- do.call(c,lapply(all_recs,xmlToList)) ``` #### Extract the fasta from the XML ```{r, eval=F} write(sapply(rec_list,function(X){paste(">",X$INSDSeq_locus,"\n",X$INSDSeq_sequence,sep="")}),"polerovirus_from_genbank.fasta") ``` #### Extract the taxonomy from the XML Also, we need to extract the polerovirus classification of each sequence. ```{r, eval=F} source_name <- t(sapply(rec_list,function(X){c(X$INSDSeq_locus,X$INSDSeq_organism)})) ``` # Alignement and phylogeny Now we have the sequences on the disk we can align the sequences (here using MAFFT) and construct a phylogenetic tree (here using FastTree)... ``` ### in bash mafft --adjustdirection --reorder polerovirus_from_genbank.fasta > polerovirus_from_genbank_MAFFT.fasta FastTree -nt -gamma -gtr -log polerovirus_from_genbank_MAFFT.log polerovirus_from_genbank_MAFFT.fasta > polerovirus_from_genbank_MAFFT.tre ``` ...and import the tree in R for control. ```{r } tree <- read.tree(paste(find.package("BoSSA"),"/extdata/polerovirus_from_genbank_MAFFT.tre",sep="")) plot(tree,cex=0.4,no.margin=TRUE) nodelabels(cex=0.8) ``` Note that using the "adjustdirection" during alignment, MAFFT generates reverse complement sequences and align them together with the remaining sequences. The best orientation is conserved for each sequence. When the reverse complement is aligned, the program add the "\_R\_" prefix in the name of the sequence. In this case and in order to keep the correspondance between informations files and sequences names, you'll have to edit the sequences names. After plotting the tree, and the node labels, it is apparent that the node 231 may be a good rooting point (may change depending on the sequence set you download *i.e.* as more poleroviruses sequences are upload to NCBI, the tree structure and node numbering will change). ```{r } root_tree <- root(tree,node=231,resolve=TRUE) plot(root_tree,cex=0.4,no.margin=TRUE) ``` The rooted tree is then written to the disk. ```{r} write.tree(root_tree,"polerovirus_ROOTED.tre") ``` # The Taxonomy ### Download the polerovirus taxonomy IDs We need to obtain the complete taxonomy information for the polerovirus. Taxastic has a function for that. We first download the full taxonomy from NCBI using taxit and subset it for the poleroviruses tax ids. ```{r, eval=F} r_search <- entrez_search(db="taxonomy", term="txid119164[orgn]",retmax=1000) tax_ids <- r_search$ids write(tax_ids,"taxonomy_polerovirus.id") ``` ``` ### in bash taxit new_database taxit taxtable -f taxonomy_polerovirus.id -o polerovirus_taxonomy.csv ncbi_taxonomy.db ``` ### The information file We can now create and write the information file for the sequences from our phylogenetic tree. ```{r, eval=F} taxo <- read.csv(paste(find.package("BoSSA"),"/extdata/polerovirus_taxonomy.csv",sep="")) tax_id <- taxo$tax_id[match(source_name[,2],taxo$tax_name)] info <- data.frame(seqname=source_name[,1],accession=source_name[,1],tax_id=tax_id,species=source_name[,2],is_type=rep("no",nrow(source_name))) write.table(info,"polerovirus_info.csv",sep=",",row.names=FALSE) ``` # Create the refpkg using taxit The reference package is created using the taxit create command. ``` ### in bash taxit create -l polerovirus -P polerovirus.refpkg \ --taxonomy polerovirus_taxonomy.csv \ --aln-fasta polerovirus_from_genbank_MAFFT.fasta \ --seq-info polerovirus_info.csv \ --tree-stats polerovirus_from_genbank_MAFFT.log \ --tree-file polerovirus_ROOTED.tre --no-reroot ``` ### Check the refpkg using rppr Rppr offers the possibility to check the reference package using the following commands. ``` ### in bash rppr check -c polerovirus.refpkg rppr info -c polerovirus.refpk ``` ### Refpkg stats using BoSSA Using BoSSA, we can extract some summary statistics and draw some plots. - A reference package summary ```{r} refpkg_path <- paste(find.package("BoSSA"),"/extdata/polerovirus.refpkg",sep="") refpkg(refpkg_path) ``` - A tree with tips colored according to a taxonomic level, here the species ```{r tree1, fig.width=5, fig.height=5} refpkg(refpkg_path,type="tree",cex.text=0.3,rank_tree="species") ``` - A pie chart summarizing the taxonomy with here again, the species level. Note there is a slight decay between the text labels and slices... ```{r pie1, fig.width=5, fig.height=5} refpkg(refpkg_path,type="pie",rank_pie="species",cex.text=0.6) ``` - Alternatively, a input file for KronaTools to generate a krona chart could be generated. ```{r} refpkg(refpkg_path,type="krona") ``` A file (default is for_krona.txt) is generated and can be used to generate a krona chart with the ImportText.pl scrit from the [Krona software](https://github.com/marbl/Krona/wiki/KronaTools): ``` ImportText.pl for_krona.txt ``` # Subsample the refpkg It could be interesting to reduce the size of the reference package. Whereas, there is still a decent number of sequences in this polerovirus dataset, this number could be way higher and represent a computationnal burden for the rest of the analysis. Here is some code example to subset each species to a maximum of 10 sequences. [T-Coffee](http://tcoffee.readthedocs.io/en/latest/tcoffee_main_documentation.html#extracting-removing-sequences-with-the-identity) could be usefull if you would like to subsample based on diversity. ```{r, eval=F} d <- cophenetic.phylo(root_tree) ids <- NULL for(i in 1:length(unique(info$species))){ usp <- unique(info$species)[i] sub <- info[as.character(info$species)==usp,] acc <- as.character(sub[,1]) # the following code line prevent the code from crashing # i.e. new sequences not available in the example may be uploaded # when you will run the code acc <- acc[!is.na(match(acc,colnames(d)))] if(length(acc)<=10){ ids <- c(ids,acc) } if(length(acc)>10){ h <- hclust(as.dist(d[acc,acc])) grp <- cutree(h,k=10) ids <- c(ids,names(grp)[match(1:10,grp)]) } } to_remove <- root_tree$tip.label[!root_tree$tip.label%in%ids] ``` Note that the highly recommanded practice is to subset the alignement, re-align and compute a new phylogenetic tree, one can also directly subset the already available phylogeny (in a "quick and dirty" way). ```{r, eval=F} root_tree2 <- drop.tip(root_tree,to_remove) write.tree(root_tree2,"polerovirus_ROOTED_SUBSAMPLED.tre") ``` Using these new files, you can create another smaller refpkg using the "taxit create" command as described above. # Citation If you find BoSSA and/or its tutorials useful, you may cite: ```{r} citation("BoSSA") ``` # Other resources ### On phylogenetic placements [pplacer website](https://matsen.fhcrc.org/pplacer/) and [documentation](http://matsen.github.io/pplacer/) [taxtastic](https://github.com/fhcrc/taxtastic) ### Other R package with a related topic [ggtree](https://bioconductor.org/packages/release/bioc/html/ggtree.html) and [clstutils](https://bioconductor.org/packages/release/bioc/html/clstutils.html) ### On krona charts [krona](https://github.com/marbl/Krona/wiki/KronaTools) ### R packages used by BoSSA [RSQLite](https://cran.r-project.org/package=RSQLite) and [jsonlite](https://cran.r-project.org/package=jsonlite) to read files, [ape](https://cran.r-project.org/package=ape) and [phangorn](https://cran.r-project.org/package=phangorn) to manipulate phylogenetic trees and [plotrix](https://cran.r-project.org/package=plotrix) for pie charts.
/scratch/gouwar.j/cran-all/cranData/BoSSA/inst/doc/bossa-refpkg.Rmd
## ----style, echo = FALSE, results = 'asis'------------------------------------ ## ----------------------------------------------------------------------------- library("ape") library("BoSSA") ## ----------------------------------------------------------------------------- test_tree <- rtree(20) ## ----tree1, fig.width=5,fig.height=5------------------------------------------ circular_tree(test_tree) ## ----tree3, fig.width=5,fig.height=5------------------------------------------ coord <- circular_tree(test_tree,pos_out=TRUE,tip_labels=FALSE) # lines between tips 1 and tips 6, 9 and 12 lines(c(coord[coord[,2]==1,4],coord[coord[,2]==6,4]),c(coord[coord[,2]==1,6],coord[coord[,2]==6,6]),col="red") lines(c(coord[coord[,2]==1,4],coord[coord[,2]==9,4]),c(coord[coord[,2]==1,6],coord[coord[,2]==9,6]),col="red") lines(c(coord[coord[,2]==1,4],coord[coord[,2]==12,4]),c(coord[coord[,2]==1,6],coord[coord[,2]==12,6]),col="red") # lines between tip 19 and node 5 lines(c(coord[coord[,2]==19,4],coord[coord[,2]==length(test_tree$t)+5,4]),c(coord[coord[,2]==19,6],coord[coord[,2]==length(test_tree$t)+5,6]),col="blue") ## ----------------------------------------------------------------------------- citation("BoSSA")
/scratch/gouwar.j/cran-all/cranData/BoSSA/inst/doc/bossa-tree.R
--- title: "Circular tree plot" output: prettydoc::html_pretty: theme: architect highlight: github vignette: > %\VignetteIndexEntry{Inside out circular tree plot} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- <!-- %% \VignetteEngine{knitr::knitr} --> ```{r style, echo = FALSE, results = 'asis'} ``` Please report comments or bugs to Pierre Lefeuvre - <[email protected]> [BoSSA CRAN page](https://cran.r-project.org/package=BoSSA) # Cicular tree The "inside-out" circular tree is made to facilitate the visualisation of interactions between individuals in a tree. It is very important to notice that the phylogenetic tree must not be re-rooted or transformed after import in R using the ape read_tree function (i.e. node and tip numbering must be that obtained from the ape read_tree function). ### Loading of the required packages ```{r } library("ape") library("BoSSA") ``` ### Let's use a random tree ```{r} test_tree <- rtree(20) ``` ### Here is the "inside-out" circular tree plot ```{r tree1, fig.width=5,fig.height=5} circular_tree(test_tree) ``` ### Plotting interactions This plot was actually design to offer a conveniant way to display tip to tip, tip to node and node to node interactions. The coordinates of the tips and nodes can be obtained after drawing the tree using the pos_out=TRUE option and use to plot lines (or whatever else) over the tree. ```{r tree3, fig.width=5,fig.height=5} coord <- circular_tree(test_tree,pos_out=TRUE,tip_labels=FALSE) # lines between tips 1 and tips 6, 9 and 12 lines(c(coord[coord[,2]==1,4],coord[coord[,2]==6,4]),c(coord[coord[,2]==1,6],coord[coord[,2]==6,6]),col="red") lines(c(coord[coord[,2]==1,4],coord[coord[,2]==9,4]),c(coord[coord[,2]==1,6],coord[coord[,2]==9,6]),col="red") lines(c(coord[coord[,2]==1,4],coord[coord[,2]==12,4]),c(coord[coord[,2]==1,6],coord[coord[,2]==12,6]),col="red") # lines between tip 19 and node 5 lines(c(coord[coord[,2]==19,4],coord[coord[,2]==length(test_tree$t)+5,4]),c(coord[coord[,2]==19,6],coord[coord[,2]==length(test_tree$t)+5,6]),col="blue") ``` ### Citation If you find BoSSA and/or its tutorials useful, you may cite: ```{r} citation("BoSSA") ```
/scratch/gouwar.j/cran-all/cranData/BoSSA/inst/doc/bossa-tree.Rmd
--- title: "Example of placement analysis using BoSSA" output: prettydoc::html_pretty: theme: architect highlight: github vignette: > %\VignetteIndexEntry{Example of placement analysis using BoSSA} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- <!-- %% \VignetteEngine{knitr::knitr} --> ```{r style, echo = FALSE, results = 'asis'} ``` Please report comments or bugs to Pierre Lefeuvre - <[email protected]> [BoSSA CRAN page](https://cran.r-project.org/package=BoSSA) # Summary A phylogenetic placement corresponds to the position of a query sequence in a reference tree. Different tools exits to infer phylogenetic placements, such as [pplacer](https://matsen.fhcrc.org/pplacer/), [EPA](http://sco.h-its.org/exelixis/web/software/epa/index.html) or [RAPPAS](https://github.com/phylo42/RAPPAS). Importantly, these three programs produce placements under a common [file format](https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0031009). Placements can later be analysed using the [guppy](https://matsen.github.io/pplacer/generated_rst/guppy.html) software from the pplacer suite to obtain statistically based taxonomic classification of sequences. The BoSSA package implements functions to reads, plots and summarizes phylogentic placements. This vignette is intended to provide examples of placements analyses using BoSSA. # Important note - The placement mass (potentially) available in the jplace and sqlite files are imported in R (within the jplace and pplace objects) but aren't use in the analysis. You should use the "N" parameter (available in several fucntions for the package) to use different weight for each placement. - The reference packages shiped with BoSSA are incomplete (they lack the alignment file) in order to reduce the package size. Whereas, the information available is sufficient to draw summary statistics, it won't be enough to perform actual phylogenetic placement. - When the jplace or sqlite files are import into R, the node numbering available in the original file is converted to the class "phylo" numbering. # How to obtain phylogentic placement file suitable for analysis with BoSSA ? The process to obtain placement files is dependent of the program you use. Assuming you are using pplacer, the process would be (1) build a reference package that contains an align set of reference sequences and a reference phylogenetic tree, (2) align query sequences to the reference alignment, (3) use pplacer to infer placements (jplace file output, format describe [here](https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0031009)) and optionally (4) infer the classification of each sequences using guppy (sqlite file output). - The construction of the reference package could be a bit tricky. The [taxtastic](https://github.com/fhcrc/taxtastic) tool is extremely helpfull to this end. A tutorial can be find [here](http://fhcrc.github.io/taxtastic/quickstart.html). A BoSSA vignette on refpkg construction is also available. - [HMMER](http://hmmer.org/) and [MAFFT](https://mafft.cbrc.jp/alignment/software/addsequences.html) can be use to align sequences to a reference alignment. - For phylogenetic placement, a detailed tutorial is available [here](http://fhcrc.github.io/microbiome-demo/). Let's say you have obtained a reference package (refpkg), a placement file (jplace file) and a guppy classification output (sqlite file). The example files presented here are derived from other [reference packages and jplaces files](https://github.com/fhcrc/microbiome-demo/zipball/master) from the [Matsen group pplacer tutorials](https://fhcrc.github.io/microbiome-demo/). The sqlite file was obtained using the following command: ``` guppy classify --multiclass-min 0 --cutoff 0.5 -c example.refpkg --sqlite example.sqlite example.jplace ``` # Exploration of a reference package Let's start by loading the `BoSSA-package: ```{r load-packages, message=FALSE, warning=FALSE} library("BoSSA") ``` A good practice would be to inspect the refpkg content. ```{r } refpkg_path <- paste(find.package("BoSSA"),"/extdata/example.refpkg",sep="") refpkg(refpkg_path) ``` It is possible to extract the taxonomy of the sequences included in the refpkg. ```{r } taxo <- refpkg(refpkg_path,type="taxonomy") head(taxo) ``` or display a pie chart that summarize the taxonomy... ```{r pie1, fig.width=5, fig.height=5} refpkg(refpkg_path,type="pie",cex.text=0.5) ``` ... or a subset of the taxonomy levels. Here, an example with the "class", "order" and "family" levels. Note there is a slight decay between the text labels and slices... this will need a fix in a future package update. ```{r pie2, fig.width=5, fig.height=5} refpkg(refpkg_path,type="pie",rank_pie=c("class","order","family"),cex.text=0.6) ``` Finally, a tree display with branch colored according to a given taxonomic level is available. Here tips are colored according to the "order" classification. ```{r refseqtree, fig.width=8, fig.height=8} refpkg(refpkg_path,type="tree",rank_tree="class",cex.text=0.5) ``` # Loading the example data The BoSSA package comes along with examples of phylogenetic placements from the Masten group. ```{r} sqlite_file <- system.file("extdata", "example.sqlite", package = "BoSSA") jplace_file <- system.file("extdata", "example.jplace", package = "BoSSA") ``` To read the data, use the `read_sqlite` function. ```{r} pplace <- read_sqlite(sqlite_file,jplace_file) pplace ``` A summary of the object is printed with the number of runs, the command line, a short description of the phylogenetic tree, the number of placements and the number of sequences being placed. Pplace objects are stored in a list of 15 components, with 12 components being outputs from a [guppy classify](https://matsen.github.io/pplacer/generated_rst/guppy_classify.html#guppy-classify) run and 3 components corresponding to the phylogenetic tree used for placement: ```{r } str(pplace) ``` Among these: - the `run` element contains the run id and the command line summary - the `taxa` element is a data frame with the whole taxonomy available in the reference package - the `multiclass` element is a data frame with the taxonomic assignation of each placement - the `placement_positions` element is a data frame with the position of each placement over the reference phylogenetic tree - the `arbre` element is the class `phylo` object of the reference phylogenetic tree # Some plots Four different plots are available to display placements on a phylogenetic tree: - the `number` plot. Placement number associated to each branch is indicated. Note that this representation may be hard to read due to overlaps between number boxes. Placement numbers are obtained after the multiplication of their weights with the ML ratio of the placement probabilities. Placement sizes are later round. A zero indicates a size superior to 0 but inferior to 1. ```{r test1, fig.width=9, fig.height=9} plot(pplace,type="number",main="number",cex.number=1.5) ``` - the `color` plot is the best option. Branches with placement are colored according to the number of sequences they bear. ```{r test2, fig.width=9,fig.height=9} plot(pplace,type="color",main="color",edge.width=2) ``` - in the `fattree` plot, branch wicth is proportionnal to the number of sequences they bear. ```{r testfat, fig.width=9,fig.height=9} plot(pplace,type="fattree",main="fattree") ``` - in the `precise` plot dots are drawn at the exact placement positions. Whereas the color of the dots depend of the pendant branch length, their sizes depend on the placement sizes. Note that placements are drawn one above the other. ```{r test3, fig.width=9,fig.height=9} plot(pplace,type="precise",main="precise") ``` Note that it is possible to apply a function to modify the dot size using the `transfo` option. In the following example, the dot size is multiplied by 2. In some other cases `log` or `log10` transformations could be usefull. Beware that when using the `transfo` option, the legend does not anymore correspond to the placement size but to the transform dot size (*i.e.* the transform function applied to the dot size). ```{r test4, fig.width=9,fig.height=9} plot(pplace,type="precise",main="precise",transfo=function(X){X*2}) ``` # Subsetting the pplace object Placement object can be subseted. This could be done using placements ids... ```{r } sub1 <- sub_pplace(pplace,placement_id=1:100) sub1 ``` ...or using placements names. ```{r } ids <- sample(pplace$multiclass$name,50) sub2 <- sub_pplace(pplace,ech_id=ids) sub2 ``` # Conversion ### To a table Using the `pplace_to_table` function produces a table that contains the placement information along with the classification for each sequence. The output can be limited to the "best" placement (as in the example, i.e. the placements with the highest likelihood for each sequence). ```{r } pplace_table <- pplace_to_table(pplace,type="best") head(pplace_table,n=3) ``` ### To a contingency matrix The `pplace_to_matrix` produces a contingency table. Let say the first 50 sequences in the multiclass table correspond to sequence from "sample 1" and the following 50 correspond to "sample 2", the function output a contingency table for these two samples. You can either have the taxonomic names (tax_name=TRUE, in the example) or keep the taxonomic ids (tax_name=FALSE). ```{r} example_contingency <- pplace_to_matrix(pplace,c(rep("sample1",50),rep("sample2",50)),tax_name=TRUE) example_contingency ``` ### To a taxonomy Using the `pplace_to_taxonomy` function, a taxonomy table is obtained for each sequences with the taxonomy levels defined in the reference package. The taxonomy levels can be limited to a set of levels using the `rank` option. ```{r} example_taxo <- pplace_to_taxonomy(pplace,taxo,tax_name=TRUE,rank=c("order","family","genus","species")) head(example_taxo) ``` ### Make a phyloseq object Assuming the sequences in the pplace object represent centroids of sequence cluster obtained from multiple samples, using the taxonomy table and an appropriate OTU file, you can create a phyloseq object. ```{r} example_OTU <- matrix(sample(1:100, 500, replace = TRUE), nrow = 100, ncol = 5,dimnames=list(pplace$multiclass$name,paste("sample",1:5,sep="_"))) head(example_OTU) ``` The exemple below is not run (commented) due to errors/warnings triggered by the used of Bioconductor packages (i.e. phyloseq) in CRAN vignette on some platform. Just uncomment the code if you like to have a try. ```{r} #library(phyloseq) #example_phyloseq <- phyloseq(otu_table(example_OTU,taxa_are_rows=TRUE),tax_table(example_taxo)) #example_phyloseq ``` # Citation If you find BoSSA and/or its tutorials useful, you may cite: ```{r} citation("BoSSA") ``` # Other resources ### On phylogenetic placements [pplacer website](https://matsen.fhcrc.org/pplacer/) and [documentation](http://matsen.github.io/pplacer/) [taxtastic](https://github.com/fhcrc/taxtastic) ### Other R package with a related topic [ggtree](https://bioconductor.org/packages/release/bioc/html/ggtree.html) and [clstutils](https://bioconductor.org/packages/release/bioc/html/clstutils.html) ### R packages used by BoSSA [RSQLite](https://cran.r-project.org/package=RSQLite) and [jsonlite](https://cran.r-project.org/package=jsonlite) to read files, [ape](https://cran.r-project.org/package=ape) and [phangorn](https://cran.r-project.org/package=phangorn) to manipulate phylogenetic trees and [plotrix](https://cran.r-project.org/package=plotrix) for pie charts.
/scratch/gouwar.j/cran-all/cranData/BoSSA/vignettes/bossa-analysis.Rmd
--- title: "Reference package construction from scratch" output: prettydoc::html_pretty: theme: architect highlight: github vignette: > %\VignetteIndexEntry{Reference package construction from scratch} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r style, echo = FALSE, results = 'asis'} ``` Please report comments or bugs to Pierre Lefeuvre - <[email protected]> [BoSSA CRAN page](https://cran.r-project.org/package=BoSSA) # Important notice Note that most of the R code included in the vignette is not executed during vignette build. Some of the rentrez (very good) package code triggers HTTP failure during the CRAN check and is thus not evaluated during the build. It should run smoothly on you desktop computer. # Summary A phylogenetic placement corresponds to the position of a query sequence in a reference tree. Phylogenetic placement inference using [pplacer](https://matsen.fhcrc.org/pplacer/) requires the construction of "reference packages", *i.e.* informations on the phylogeny and taxonomy of a given group of organisms. This vignette presents example of reference package construction using [taxtastic](https://github.com/fhcrc/taxtastic), [rppr](http://matsen.github.io/pplacer/generated_rst/rppr.html) and R. # The polerovirus example Imagine you obtained some viral contigs that were assigned to the *Polerovirus* genus using a BLAST search. You want to obtain phylogenetic placements of these sequences using pplacer and now have to build a reference package for this genus. A search on the [ICTV website](https://talk.ictvonline.org/ictv-reports/ictv_9th_report/positive-sense-rna-viruses-2011/w/posrna_viruses/265/luteoviridae) informs us that polerovirus belongs to the Luteovidiae family and that their "genome size is fairly uniform ranging from 5.6 kb to 6.0 kb". ### Loading the required packages We will use the [rentrez package](https://cran.r-project.org/package=rentrez) to query NCBI from R. Along with a vignette available at the [rentrez CRAN webpage](https://cran.r-project.org/package=rentrez), examples of rentrez use are available [here](https://docs.ropensci.org/rentrez/). ```{r } library("rentrez") library("httr") library("XML") library("ape") library("BoSSA") set_config(config(http_version = 0)) ``` ### How many polerovirus sequences are available on NCBI ? We first search for the taxonomic id associated to the *Polerovirus* genus in NCBI... ```{r, eval=F} r_search <- entrez_search(db="taxonomy", term="polerovirus") r_search$ids ``` ... and use this taxonomic id (i.e. 119164) to query the nucleotide database. Note that we remove the reference sequence set from the search (using "NOT srcdb_refseq[PROP]") as these are duplicates from records already available in the nucleotide database. ```{r, eval=F} r_search <- entrez_search(db="nucleotide", term="txid119164[orgn] NOT srcdb_refseq[PROP]") r_search$count ``` This number represents the total number of sequences from the genus polerovirus. We will now search for full genomes using the "AND 5000:6500[slen]" query. ```{r, eval=F} r_search <- entrez_search(db="nucleotide", term="txid119164[orgn] NOT srcdb_refseq[PROP] AND 5000:6500[slen]") r_search$count ``` To recover all the GenBank identifiers (gi), we have to increase the retmax parameter (default is 20) to something superior to r_search$count. ```{r, eval=F} r_search <- entrez_search(db="nucleotide", term="txid119164[orgn] NOT srcdb_refseq[PROP] AND 5000:6500[slen]",retmax=1000) gi <- r_search$ids gi[1:10] ``` ### Download all these sequences #### Get the genbank informations in XML format We now use these gi to download the information relative to the sequences in XML format. XML is a handy format as it allow to directly extract specific fields form the genbank file. Because the number of sequence may be a bit large, we will use the history option in our rentrez query. Sequences will then be obtained by batch of 200. ```{r, eval=F} r_search <- entrez_search(db="nucleotide", term="txid119164[orgn] NOT srcdb_refseq[PROP] AND 5000:6500[slen]",use_history=TRUE) all_recs <- NULL for(seq_start in seq(1,ceiling(r_search$count/200)*200,200)){ all_recs <- c(all_recs,entrez_fetch(db="nuccore", web_history=r_search$web_history,rettype="gbc",retmode="xml",retmax=200,retstart=seq_start)) } rec_list <- do.call(c,lapply(all_recs,xmlToList)) ``` #### Extract the fasta from the XML ```{r, eval=F} write(sapply(rec_list,function(X){paste(">",X$INSDSeq_locus,"\n",X$INSDSeq_sequence,sep="")}),"polerovirus_from_genbank.fasta") ``` #### Extract the taxonomy from the XML Also, we need to extract the polerovirus classification of each sequence. ```{r, eval=F} source_name <- t(sapply(rec_list,function(X){c(X$INSDSeq_locus,X$INSDSeq_organism)})) ``` # Alignement and phylogeny Now we have the sequences on the disk we can align the sequences (here using MAFFT) and construct a phylogenetic tree (here using FastTree)... ``` ### in bash mafft --adjustdirection --reorder polerovirus_from_genbank.fasta > polerovirus_from_genbank_MAFFT.fasta FastTree -nt -gamma -gtr -log polerovirus_from_genbank_MAFFT.log polerovirus_from_genbank_MAFFT.fasta > polerovirus_from_genbank_MAFFT.tre ``` ...and import the tree in R for control. ```{r } tree <- read.tree(paste(find.package("BoSSA"),"/extdata/polerovirus_from_genbank_MAFFT.tre",sep="")) plot(tree,cex=0.4,no.margin=TRUE) nodelabels(cex=0.8) ``` Note that using the "adjustdirection" during alignment, MAFFT generates reverse complement sequences and align them together with the remaining sequences. The best orientation is conserved for each sequence. When the reverse complement is aligned, the program add the "\_R\_" prefix in the name of the sequence. In this case and in order to keep the correspondance between informations files and sequences names, you'll have to edit the sequences names. After plotting the tree, and the node labels, it is apparent that the node 231 may be a good rooting point (may change depending on the sequence set you download *i.e.* as more poleroviruses sequences are upload to NCBI, the tree structure and node numbering will change). ```{r } root_tree <- root(tree,node=231,resolve=TRUE) plot(root_tree,cex=0.4,no.margin=TRUE) ``` The rooted tree is then written to the disk. ```{r} write.tree(root_tree,"polerovirus_ROOTED.tre") ``` # The Taxonomy ### Download the polerovirus taxonomy IDs We need to obtain the complete taxonomy information for the polerovirus. Taxastic has a function for that. We first download the full taxonomy from NCBI using taxit and subset it for the poleroviruses tax ids. ```{r, eval=F} r_search <- entrez_search(db="taxonomy", term="txid119164[orgn]",retmax=1000) tax_ids <- r_search$ids write(tax_ids,"taxonomy_polerovirus.id") ``` ``` ### in bash taxit new_database taxit taxtable -f taxonomy_polerovirus.id -o polerovirus_taxonomy.csv ncbi_taxonomy.db ``` ### The information file We can now create and write the information file for the sequences from our phylogenetic tree. ```{r, eval=F} taxo <- read.csv(paste(find.package("BoSSA"),"/extdata/polerovirus_taxonomy.csv",sep="")) tax_id <- taxo$tax_id[match(source_name[,2],taxo$tax_name)] info <- data.frame(seqname=source_name[,1],accession=source_name[,1],tax_id=tax_id,species=source_name[,2],is_type=rep("no",nrow(source_name))) write.table(info,"polerovirus_info.csv",sep=",",row.names=FALSE) ``` # Create the refpkg using taxit The reference package is created using the taxit create command. ``` ### in bash taxit create -l polerovirus -P polerovirus.refpkg \ --taxonomy polerovirus_taxonomy.csv \ --aln-fasta polerovirus_from_genbank_MAFFT.fasta \ --seq-info polerovirus_info.csv \ --tree-stats polerovirus_from_genbank_MAFFT.log \ --tree-file polerovirus_ROOTED.tre --no-reroot ``` ### Check the refpkg using rppr Rppr offers the possibility to check the reference package using the following commands. ``` ### in bash rppr check -c polerovirus.refpkg rppr info -c polerovirus.refpk ``` ### Refpkg stats using BoSSA Using BoSSA, we can extract some summary statistics and draw some plots. - A reference package summary ```{r} refpkg_path <- paste(find.package("BoSSA"),"/extdata/polerovirus.refpkg",sep="") refpkg(refpkg_path) ``` - A tree with tips colored according to a taxonomic level, here the species ```{r tree1, fig.width=5, fig.height=5} refpkg(refpkg_path,type="tree",cex.text=0.3,rank_tree="species") ``` - A pie chart summarizing the taxonomy with here again, the species level. Note there is a slight decay between the text labels and slices... ```{r pie1, fig.width=5, fig.height=5} refpkg(refpkg_path,type="pie",rank_pie="species",cex.text=0.6) ``` - Alternatively, a input file for KronaTools to generate a krona chart could be generated. ```{r} refpkg(refpkg_path,type="krona") ``` A file (default is for_krona.txt) is generated and can be used to generate a krona chart with the ImportText.pl scrit from the [Krona software](https://github.com/marbl/Krona/wiki/KronaTools): ``` ImportText.pl for_krona.txt ``` # Subsample the refpkg It could be interesting to reduce the size of the reference package. Whereas, there is still a decent number of sequences in this polerovirus dataset, this number could be way higher and represent a computationnal burden for the rest of the analysis. Here is some code example to subset each species to a maximum of 10 sequences. [T-Coffee](http://tcoffee.readthedocs.io/en/latest/tcoffee_main_documentation.html#extracting-removing-sequences-with-the-identity) could be usefull if you would like to subsample based on diversity. ```{r, eval=F} d <- cophenetic.phylo(root_tree) ids <- NULL for(i in 1:length(unique(info$species))){ usp <- unique(info$species)[i] sub <- info[as.character(info$species)==usp,] acc <- as.character(sub[,1]) # the following code line prevent the code from crashing # i.e. new sequences not available in the example may be uploaded # when you will run the code acc <- acc[!is.na(match(acc,colnames(d)))] if(length(acc)<=10){ ids <- c(ids,acc) } if(length(acc)>10){ h <- hclust(as.dist(d[acc,acc])) grp <- cutree(h,k=10) ids <- c(ids,names(grp)[match(1:10,grp)]) } } to_remove <- root_tree$tip.label[!root_tree$tip.label%in%ids] ``` Note that the highly recommanded practice is to subset the alignement, re-align and compute a new phylogenetic tree, one can also directly subset the already available phylogeny (in a "quick and dirty" way). ```{r, eval=F} root_tree2 <- drop.tip(root_tree,to_remove) write.tree(root_tree2,"polerovirus_ROOTED_SUBSAMPLED.tre") ``` Using these new files, you can create another smaller refpkg using the "taxit create" command as described above. # Citation If you find BoSSA and/or its tutorials useful, you may cite: ```{r} citation("BoSSA") ``` # Other resources ### On phylogenetic placements [pplacer website](https://matsen.fhcrc.org/pplacer/) and [documentation](http://matsen.github.io/pplacer/) [taxtastic](https://github.com/fhcrc/taxtastic) ### Other R package with a related topic [ggtree](https://bioconductor.org/packages/release/bioc/html/ggtree.html) and [clstutils](https://bioconductor.org/packages/release/bioc/html/clstutils.html) ### On krona charts [krona](https://github.com/marbl/Krona/wiki/KronaTools) ### R packages used by BoSSA [RSQLite](https://cran.r-project.org/package=RSQLite) and [jsonlite](https://cran.r-project.org/package=jsonlite) to read files, [ape](https://cran.r-project.org/package=ape) and [phangorn](https://cran.r-project.org/package=phangorn) to manipulate phylogenetic trees and [plotrix](https://cran.r-project.org/package=plotrix) for pie charts.
/scratch/gouwar.j/cran-all/cranData/BoSSA/vignettes/bossa-refpkg.Rmd
--- title: "Circular tree plot" output: prettydoc::html_pretty: theme: architect highlight: github vignette: > %\VignetteIndexEntry{Inside out circular tree plot} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- <!-- %% \VignetteEngine{knitr::knitr} --> ```{r style, echo = FALSE, results = 'asis'} ``` Please report comments or bugs to Pierre Lefeuvre - <[email protected]> [BoSSA CRAN page](https://cran.r-project.org/package=BoSSA) # Cicular tree The "inside-out" circular tree is made to facilitate the visualisation of interactions between individuals in a tree. It is very important to notice that the phylogenetic tree must not be re-rooted or transformed after import in R using the ape read_tree function (i.e. node and tip numbering must be that obtained from the ape read_tree function). ### Loading of the required packages ```{r } library("ape") library("BoSSA") ``` ### Let's use a random tree ```{r} test_tree <- rtree(20) ``` ### Here is the "inside-out" circular tree plot ```{r tree1, fig.width=5,fig.height=5} circular_tree(test_tree) ``` ### Plotting interactions This plot was actually design to offer a conveniant way to display tip to tip, tip to node and node to node interactions. The coordinates of the tips and nodes can be obtained after drawing the tree using the pos_out=TRUE option and use to plot lines (or whatever else) over the tree. ```{r tree3, fig.width=5,fig.height=5} coord <- circular_tree(test_tree,pos_out=TRUE,tip_labels=FALSE) # lines between tips 1 and tips 6, 9 and 12 lines(c(coord[coord[,2]==1,4],coord[coord[,2]==6,4]),c(coord[coord[,2]==1,6],coord[coord[,2]==6,6]),col="red") lines(c(coord[coord[,2]==1,4],coord[coord[,2]==9,4]),c(coord[coord[,2]==1,6],coord[coord[,2]==9,6]),col="red") lines(c(coord[coord[,2]==1,4],coord[coord[,2]==12,4]),c(coord[coord[,2]==1,6],coord[coord[,2]==12,6]),col="red") # lines between tip 19 and node 5 lines(c(coord[coord[,2]==19,4],coord[coord[,2]==length(test_tree$t)+5,4]),c(coord[coord[,2]==19,6],coord[coord[,2]==length(test_tree$t)+5,6]),col="blue") ``` ### Citation If you find BoSSA and/or its tutorials useful, you may cite: ```{r} citation("BoSSA") ```
/scratch/gouwar.j/cran-all/cranData/BoSSA/vignettes/bossa-tree.Rmd
#' Play some Ultimate Tic-Tac-Toe? #' #' This function allows one to play the Ultimate version of Tic-Tac-Toe. #' In the Regular version of Tic-Tac-Toe, players take turns placing their marks, with the objective of achieving three marks in a row in any direction. #' 9x9 Tic-Tac-Toe or more commonly known as Ultimate Tic-Tac-Toe, adds a twist on the regular version of #' Tic-Tac-Toe that most of us have come to know. Perceive the board as a big Tic-Tac-Toe board, with #' the goal being to achieve 3 big marks in any direction. Big marks are achieved by winning the corresponding #' small Tic-Tac-Toe blocks. The player to move first may play anywhere on the board. However, following moves #' must correspond to the same big Tic-Tac-Toe block of the small Tic-Tac-Toe board where the last move was played. #' @keywords tictactoe tic tac toe ultimate connect row dataframe matrix game fun #' @importFrom graphics rect segments abline locator plot points #' @importFrom grDevices adjustcolor dev.new dev.next #' @export UltimateTicTacToe = function(){ if(names(dev.next())=="null device"){ dev.new() dev.new() }else{ dev.new() } n=18 plot(1:n, type = "n", xlim = c(1, n), axes = FALSE, xlab = "", ylab = "", bty = "o", lab = c(n, n, 1)) segments(0,c(6,12),n,c(6,12),lwd=3) segments(c(6,12),0,c(6,12),n,lwd=3) segments(0,c(2,4,8,10,14,16),n,c(2,4,8,10,14,16),lwd=1) segments(c(2,4,8,10,14,16),0,c(2,4,8,10,14,16),n,lwd=1) red_fade = adjustcolor("red" , alpha.f = 0.2) blue_fade = adjustcolor("blue", alpha.f = 0.2) played_moves=NULL turn=1 next_move = c(1:9) board99 = rep(list(rep(0,9)),9) current_status = rep(0,9) bg_ind = 1:9 bs_ind = 1:9 big_ind = list(c(3,15), c(9,15), c(15,15), c(3,9), c(9,9), c(15,9), c(3,3), c(9,3), c(15,3)) CS = list(c(0,12,6,18), c(6,12,12,18), c(12,12,18,18), c(0,6,6,12), c(6,6,12,12), c(12,6,18,12), c(0,0,6,6), c(6,0,12,6), c(12,0,18,6)) winning_seq = list(c(1,2,3), c(4,5,6), c(7,8,9), c(1,4,7), c(2,5,8), c(3,6,9), c(1,5,9), c(3,5,7)) repeat { for (j in 1:2) { repeat { P = locator(1) P$x = round(P$x/2-0.5)+0.5 P$y = round(P$y/2-0.5)+0.5 xy = paste(P, collapse = ":") V = get_square(c(P$x,P$y)) if (!is.element(xy, played_moves) && is.element(V[[1]],next_move)) break } played_moves = c(played_moves, xy) points(P$x*2,P$y*2,col=c("red","blue")[turn%%2+1],pch=c(1,4)[turn%%2+1],cex=4,lwd=3) segments(0,c(6,12),n,c(6,12),lwd=3) segments(c(6,12),0,c(6,12),n,lwd=3) abline(h=18,lwd=3,col="white") abline(h=0,lwd=3,col="white") abline(v=18,lwd=3,col="white") abline(v=0,lwd=3,col="white") next_move = conv_to_ind(V[[2]]) I = CS[[next_move]] rect(I[1],I[2],I[3],I[4],border="green",lwd=3) board99[[V[[1]]]][next_move] = turn%%2 + 1 for(i in bg_ind){ A = check_win(board99[[i]],winning_seq) if(A != 0){ current_status[i] = A bg_ind = bg_ind[-which(bg_ind==i)] } } for(i in bs_ind){ CSB = current_status[i] if(CSB!=0){ points(big_ind[[i]][1],big_ind[[i]][2],col=list(red_fade,blue_fade)[[CSB]],pch=c(1,4)[CSB],cex=20,lwd=12) bs_ind=bs_ind[-which(bs_ind==i)] } } turn = turn + 1 if (check_win(current_status,winning_seq)==1 || check_win(current_status,winning_seq)==2) break } if (check_win(current_status,winning_seq)==1 || check_win(current_status,winning_seq)==2) break } }
/scratch/gouwar.j/cran-all/cranData/BoardGames/R/TicTacToe.R
check_win=function(board,WS){ W1=W2=c(rep(0,9)) for(i in 1:8){ W1[i]=all(board[WS[[i]]]==1) W2[i]=all(board[WS[[i]]]==2) } if(sum(W1)<1 && sum(W2)<1){ win = 0 }else{ if(sum(W1)>=1){ win = 1 } if(sum(W2)>=1){ win = 2 } } return(win) } get_square = function(V){ #Square 2 if(3<V[1]&V[1]<=6 && 9>V[2]&V[2]>=6){ P = list(2,c(ceiling(9-V[2]),ceiling(V[1]-3))) } #Square 1 if(0<=V[1]&V[1]<=3 && 9>V[2]&V[2]>=6){ P = list(1,c(ceiling(9-V[2]),ceiling(V[1]))) } #Square 3 if(6<V[1]&V[1]<=9 && 9>V[2]&V[2]>=6){ P = list(3,c(ceiling(9-V[2]),ceiling(V[1]-6))) } #Square 4 if(V[1]<=3 && 3 <= V[2]&V[2] <6){ P = list(4,c(ceiling(6-V[2]),ceiling(V[1]))) } #Square 5 if(3<V[1] &V[1]<=6 && 3 <= V[2]&V[2] < 6){ P = list(5,c(ceiling(6-V[2]),ceiling(V[1]-3))) } #Square 6 if(6<V[1] &V[1]<=9 && 3<=V[2]&V[2] <6){ P = list(6,c(ceiling(6-V[2]),ceiling(V[1]-6))) } #Square 7 if(V[1]<=3 && V[2] <=3){ P = list(7,c(ceiling(3-V[2]),ceiling(V[1]))) } #Square 8 if(3<V[1]&V[1]<=6 && V[2] <=3){ P = list(8,c(ceiling(3-V[2]),ceiling(V[1]-3))) } #Square 9 if(6<V[1]&V[1]<=9 && V[2] <=3){ P = list(9,c(ceiling(3-V[2]),ceiling(V[1]-6))) } return(P) } conv_to_ind = function(move){ if(move[1]==1){ return(move[2]) } if(move[1]==2){ return(move[2]+3) } if(move[1]==3){ return(move[2]+6) } } legal_move = function(board,move){ P = get_square(move) I = conv_to_ind(P[[2]]) if(!all(board[[I]]==0)){ return(list(TRUE,I)) }else{ return(list(FALSE,I)) } }
/scratch/gouwar.j/cran-all/cranData/BoardGames/R/TicTacToe_Functions.R
#' Get surrounding elements of an element in a matrix. #' #' This function extracts all surrounding elements of a specified element in a matrix and returns the result as a vector. #' @param data Matrix. #' @param index Index position of element. Input as a vector of row then column positions. #' @param type Takes values of "direct" and "all". "direct" returns only the elements directly in contact with the specified element, whereas "all" returns every surrounding element including diagonals. Defaults to "all". #' @keywords elements surrounding vector matrix #' @export #' @examples #' M = matrix(1:20,4,5) #' get_surround(data = M, index = c(2,3)) get_surround = function(data, index, type="all"){ row_height = dim(data)[1] col_height = dim(data)[2] if(type!= "all" && type!="direct"){ print(" type has to take values of either 'all' or 'direct' ") }else{ if(type=="all"){ index1 = c(index[1]-1,index[1]-1,index[1]-1,index[1] ,index[1]+1,index[1]+1,index[1]+1,index[1] ) index2 = c(index[2]-1,index[2] ,index[2]+1,index[2]+1,index[2]+1,index[2] ,index[2]-1,index[2]-1) } if(type=="direct"){ index1 = c(index[1]-1,index[1]+1,index[1] ,index[1] ) index2 = c(index[2] ,index[2] ,index[2]-1,index[2]+1) } } adj_ind1 = index1[which(index1>=1 & index1<=row_height)[which(index1>=1 & index1<=row_height) %in% which(index2>=1 & index2<=col_height)]] adj_ind2 = index2[which(index1>=1 & index1<=row_height)[which(index1>=1 & index1<=row_height) %in% which(index2>=1 & index2<=col_height)]] empty_ind = c() for(i in 1:length(adj_ind1)){ empty_ind = c(data[adj_ind1[i],adj_ind2[i]], empty_ind) } return(empty_ind) } #' Get all diagonals vectors of a matrix. #' #' This function extracts all diagonal vectors of a matrix and returns the result as a list. #' @param data Matrix from which to extract diagonal elements #' @param direction Which side to begin on? Takes values of one of "left", "right" or "both". Defaults to "right". #' @keywords diagonal vectors matrix #' @export #' @examples #' M = matrix(rnorm(9),3,3) #' get_diags(M) get_diags = function(data, direction="right"){ if(direction=="right"){ I = row(data) - col(data) return(split(data,I)) } if(direction=="left"){ I = row(data) + col(data) return(split(data,I)) } if(direction == "both"){ I1 = row(data) - col(data) I2 = row(data) + col(data) return(c(split(data,I1),split(data,I2))) } } #' Get all column vectors of a matrix. #' #' This function extracts all column vectors of a matrix and returns the result as a list. #' @param data Matrix from which to extract column vectors. #' @keywords extract column vectors matrix #' @export #' @examples #' M = matrix(rnorm(9),3,3) #' get_cols(M) get_cols = function(data){ return(split(data,col(data))) } #' Get all row vectors of a matrix. #' #' This function extracts all row vectors of a matrix and returns the result as a list. #' @param data Matrix from which to extract row vectors. #' @keywords extract row vectors matrix #' @export #' @examples #' M = matrix(rnorm(9),3,3) #' get_rows(M) get_rows = function(data){ return(split(data,row(data))) }
/scratch/gouwar.j/cran-all/cranData/BoardGames/R/get_surround.R
#' Converts a set of x,y coordinates into a matrix index. #' #' This function converts a set of unit x,y coordinates into a matrix index. #' @param data Matrix or data frame. #' @param x x-coordinate #' @param y y-coordinate #' @keywords element coordinate convert matrix index #' @export #' @examples #' M = matrix(1:20,4,5) #' xy2index(data=M, x=3, y=2) xy2index = function(data, x, y){ return(c(dim(data)[2]-y+1,x)) } #' Converts a matrix index into a sex of x,y coordinates. #' #' This function converts a matrix index into unit x,y plotting coordinates. #' @param data Matrix or data frame. #' @param index A vector of index values. #' @keywords element coordinate convert matrix index #' @export #' @examples #' M = matrix(1:20,4,5) #' index2xy(data = M, index = c(3,4)) index2xy = function(data, index){ return(c(index[2],-(index[1]-dim(data)[1]-1))) } #' Palindrome checker. #' #' This function checks if the supplied vector is a palindrome (reads the same forwards and backwards). #' @param x Numeric or character vector. #' @param case.sensitive Does upper or lower casing matter? Defaults to FALSE. #' @keywords palindrome check vector case sensitive #' @export #' @examples #' test1 = 123 #' test2 = "12321" #' test3 = c("a",1,2,3,2,1,"a") #' is_palindrome(test1) #' is_palindrome(test2) #' is_palindrome(test3) is_palindrome = function(x, case.sensitive = FALSE){ if(case.sensitive == TRUE){ return(all(rev(unlist(strsplit(as.character(x),split=""))) == unlist(strsplit(as.character(x),split="")))) } if(case.sensitive == FALSE){ return(all(rev(unlist(strsplit(tolower(as.character(x)),split=""))) == unlist(strsplit(tolower(as.character(x)),split="")))) } } #' Detects if a certain sequence is present in a matrix. #' #' This function allows for the detection of a particular sequence in a matrix. #' @param data A matrix. #' @param sequence The desired sequence to search for. #' @param reps Number of repetitions of the sequence. #' @param diag Do you want to search diagonals? Defaults to TRUE. #' @keywords sequence vector matrix detect search #' @export #' @examples #' M = matrix(sample(c(1,2),25,replace=TRUE),5,5) #' detect_seq(data = M, sequence = "2", reps = 5) #' #or equivalently #' detect_seq(data = M, sequence = "22222", reps = 1) detect_seq = function(data, sequence, reps, diag=TRUE){ R = get_rows(data) C = get_cols(data) D = get_diags(data,direction = "both") if(diag==TRUE){ check_list = c(R,C,D) }else{ check_list = c(R,C) } return(sum(grepl(paste(rep(as.character(sequence) ,reps), collapse = ""), lapply(check_list,function(x){paste(x,collapse = "")})))>=1) }
/scratch/gouwar.j/cran-all/cranData/BoardGames/R/xy2index.R
#' Bodi: Boosting Diversity Algorithm #' #' We provide an implementation of the boosting diversity algorithm. This is a #' gradient boosting-based algorithm by incorporating a diversity term to #' guide the gradient boosting iterations. The idea is to trade off some individual #' optimality for global enhancement. The improvement is obtained with progressively #' generated predictors by boosting diversity. See Borel et al. (2021) <https://hal.archives-ouvertes.fr/hal-03041309v1> #' #' @name Bodi-package #' @aliases bodi-package bodi #' @docType package #' @author Yannig Goude [aut, cre], Mathias Bourel [aut], Jairo Cugliari [aut], Jean-Michel Poggi [aut]\cr #' #' Mantainer: Yannig Goude <[email protected]> #' #' @references \itemize{ \item Mathias Bourel, Jairo Cugliari, Yannig Goude, Jean-Michel Poggi. Boosting Diversity in Regression Ensembles. #' https://hal.archives-ouvertes.fr/hal-03041309v1 (2021). } #' @keywords Boosting Regression Ensemble NULL
/scratch/gouwar.j/cran-all/cranData/Bodi/R/Bodi.-package.R
#' Diversity Boosting Algorithm #' #' Train a set of initial learners by promoting diversity among them. For this, a gradient descent strategy is adopted where a specialized loss function induces diversity which yields on a reduction of the mean-square-error of the aggregated learner. #' #' @param target name of the target variable #' @param cov the model equation, a character string provided in the formula syntax. For example, for a linear model including covariates $X_1$ and $X_2$ it will be "X1+X2" and for a GAM with smooth effects it will be "s(X1)+s(X2)" #' @param data0 the learning set #' @param data1 the test set #' @param sample_size the size of the bootstrap sample as a proportion of the learning set size. sample_size=0.5 means that the resamples are of size n/2 where n is the number of rows of data0. #' @param grad_step step of the gradient descent #' @param diversity_weight the weight of the diversity encouraging penalty (kappa in the paper) #' @param Nstep the number of iterations of the diversity boosting algorithm ($N$ in the paper) #' @param model the type of base learner used in the algorithm if using a single base learner (model_list=NULL). Currently it could be either #' "gam" for an additive model, "rf" for a random forest, ""gbm" for gradient boosting machines, "rpart" for single CART trees. #' @param sampling the type of sampling procedure used in the resampling step. Could be either \code{"random"} for uniform random sampling with #' replacement or \code{"blocks"} for uniform sampling with replacement of blocks of consecutive data points. Default is "random". #' @param Nblock number of blocks for the block sampling. Equal to 10 by default. #' @param aggregation_type type of aggregation used for the ensemble method, default is uniform weights but it could be also "MLpol" an aggregation algorithm #' from the opera package #' @param param a list containing the parameters of the model chosen. It could be e.g. the number of trees for "rf", the depth of the tree for "rpart"... #' @param theorical_dw set to TRUE if one want to use the theoretical upper bound of the diversity weight kappa #' @param model_list a list of model among the possible ones (see the description of model argument). In that case the week learner is sample at each step in the list. #' "Still "experimental", be careful. #' @param w_list the prior weights of each model in the model_list #' @param param_list list of parameters of each model in the model_list #' @param cov_list list of covariates of each model in the model_list #' @return a list including the boosted models, the ensemble forecast #' \item{fitted_ensemble}{Fitted values (in-sample predictions) for the ensemble method (matrix).} #' \item{forecast_ensemble}{Forecast (out-sample predictions) for the ensemble method (matrix).} #' \item{fitted}{Fitted values of the last boosting iteration (vector).} #' \item{forecast}{Forecast of the last boosting iteration (vector).} #' \item{err_oob}{Estimated out-of-bag errors by iteration (vector).} #' \item{diversiy_oob}{Estimated out-of-bag diversity (vector).} #' #' @importFrom stats as.formula predict #' @examples #' all <- na.omit(airquality) #' smp <- sample(nrow(all), floor(.8 * nrow(all))) #' boosting_diversity("Ozone", "Solar.R+Wind+Temp+Month+Day", #' data0 = all[smp, ], data1 = all[-smp, ]) #' @author Yannig Goude <[email protected]> #' @export boosting_diversity <- function(target, cov, data0, data1, sample_size = 0.5, grad_step = 1, diversity_weight = 1, Nstep = 10, model = "gam", sampling = "random", Nblock = 10, aggregation_type = "uniform", param = list(), theorical_dw = FALSE, model_list = NULL, w_list = NULL, param_list = NULL, cov_list = NULL) { n <- nrow(data0) ####here the hypothesis about the relationship between y and covariates has to be specified in cov equation <- paste0(target, "~", cov) stopifnot(sampling %in% c('random', 'blocks')) if(sampling=="random") { subset1 <- sample(c(1:n), floor(n*sample_size), replace=TRUE) #subset2 <- sample(c(1:n), floor(n*sample_size), replace=T) if we don't want that I2=I-I1 subset2 <- c(1:n)[-subset1] } else {# if(sampling=="blocks") blocks <- buildBlock(Nblock, data0) s <- sample(c(1:Nblock), Nblock, replace=TRUE) ind <- unlist(blocks[s]) subset1 <- ind subset2 <- c(1:n)[-ind] } if(!is.null(model_list)) { model <- sample(model_list, 1, prob=w_list) param <- param_list[[model]] cov <- cov_list[[model]] equation <- paste0(target, "~", cov) } mod_out <- model if(model == "gam") { param$data <- data0[subset1,] param$formula <- as.formula(equation) g <- do.call(mgcv::gam, args=param) #g <- mgcv::gam(equation%>%as.formula, data=data0[subset1,]) forecast0 <- matrix(predict(g, newdata=data0), ncol=1, nrow=n) forecast1 <- matrix(predict(g, newdata=data1), ncol=1, nrow=nrow(data1)) } if(model=="rf") { param$data <- data0[subset1,] param$formula <- as.formula(equation) rf <- do.call(ranger::ranger, args=param) #rf <- ranger::ranger(equation%>%as.formula, data=data0[subset1,]) forecast0 <- matrix(predict(rf, data=data0)$predictions, ncol=1, nrow=n) forecast1 <- matrix(predict(rf, data=data1)$predictions, ncol=1, nrow=nrow(data1)) } if(model=="gbm") { param$data <- data0[subset1,] param$formula <- as.formula(equation) gb <- do.call(gbm::gbm, args=param) best.iter <- gbm::gbm.perf(gb,method="OOB", plot.it = FALSE) forecast0 <- matrix(predict(gb, newdata=data0, n.trees=best.iter), ncol=1, nrow=n) forecast1 <- matrix(predict(gb, newdata=data1, n.trees=best.iter), ncol=1, nrow=nrow(data1)) } if(model=="rpart") { param$data <- data0[subset1,] param$formula <- as.formula(equation) rp <- do.call(rpart::rpart, args=param) forecast0 <- matrix(predict(rp, newdata=data0), ncol=1, nrow=n) forecast1 <- matrix(predict(rp, newdata=data1), ncol=1, nrow=nrow(data1)) } err_oob <- mean((forecast0[subset2]-data0[subset2, target])^2) diversiy_oob <- 0 if(Nstep>1) { for(i in c(2:Nstep)) { last_forecast0 <- forecast0[, ncol(forecast0)] last_forecast1 <- forecast1[, ncol(forecast1)] if(ncol(forecast0)<=1) { gradient <- (last_forecast0-data0[, target]) #####initialize with only a classical gradient boosting step } else { if(aggregation_type=="uniform") { div <- last_forecast0- rowMeans(forecast0) } if(aggregation_type=="MLpol") { agg <- opera::mixture(Y = data0[, target] , experts = forecast0 , model = aggregation_type, loss.type = "square", loss.gradient = TRUE) div <- last_forecast0 - agg$prediction } if(theorical_dw==FALSE) { gradient <- (last_forecast0-data0[, target]) - diversity_weight*div } if(theorical_dw==TRUE) { gradient <- (last_forecast0-data0[, target]) - (i/(i-1))*div } } data0$gradient <- gradient equation <- paste0("gradient", "~", cov) if(!is.null(model_list)) { model <- sample(model_list, 1, prob=w_list) param <- param_list[[model]] cov <- cov_list[[model]] # print(model) # print(cov) equation <- paste0("gradient", "~", cov) } if(model=="gam") { param$data <- data0[subset2,] param$formula <- as.formula(equation) g <- do.call(mgcv::gam, args=param) boost_forecast0 <- last_forecast0 -grad_step*mgcv::predict.gam(g, newdata=data0) boost_forecast1 <- last_forecast1 -grad_step*mgcv::predict.gam(g, newdata=data1) } if(model=="rf") { param$data <- data0[subset2,] param$formula <- as.formula(equation) rf <- do.call(ranger::ranger, args=param) boost_forecast0 <- last_forecast0 -grad_step*predict(rf, data=data0)$predictions boost_forecast1 <- last_forecast1 -grad_step*predict(rf, data=data1)$predictions } if(model=="gbm") { param$data <- data0[subset2,] param$formula <- as.formula(equation) gb <- do.call(gbm::gbm, args=param) best.iter <- gbm::gbm.perf(gb,method="OOB", plot.it = FALSE) boost_forecast0 <- last_forecast0 -grad_step*predict(gb, newdata=data0, n.trees=best.iter) boost_forecast1 <- last_forecast1 -grad_step*predict(gb, newdata=data1, n.trees=best.iter) } if(model=="rpart") { param$data <- data0[subset2,] param$formula <- as.formula(equation) rp <- do.call(rpart::rpart, args=param) boost_forecast0 <- last_forecast0 -grad_step*predict(rp, newdata=data0) boost_forecast1 <- last_forecast1 -grad_step*predict(rp, newdata=data1) } forecast0 <- cbind(forecast0, boost_forecast0) forecast1 <- cbind(forecast1, boost_forecast1) err_oob <- c(err_oob, mean((boost_forecast0[subset1]-data0[subset1, target])^2)) diversiy_oob <- c(diversiy_oob, mean(rowMeans((forecast0[subset1,]-rowMeans(forecast0[subset1,]))^2))) if(sampling=="random") { subset1 <- sample(c(1:n), floor(n*sample_size), replace=TRUE) subset2 <- c(1:n)[-subset1] } if(sampling=="blocks") { blocks <- buildBlock(Nblock, data0) s <- sample(c(1:Nblock), Nblock, replace=TRUE) ind <- unlist(blocks[s]) subset1 <- ind subset2 <- c(1:n)[-ind] } mod_out <- c( mod_out, model) } } res <- list() res$fitted_ensemble <- forecast0 res$forecast_ensemble <- forecast1 colnames(res$fitted_ensemble) <- paste0("boosting", c(1:ncol(res$fitted_ensemble))) colnames(res$forecast_ensemble) <- paste0("boosting", c(1:ncol(res$forecast_ensemble))) res$fitted <- forecast0[, ncol(forecast0)] res$forecast<- forecast1[, ncol(forecast1)] res$err_oob <- err_oob res$diversiy_oob <- diversiy_oob # res$mod_out <- mod_out return(res) }
/scratch/gouwar.j/cran-all/cranData/Bodi/R/boosting_diversity6.R
#' buildBlock #' #' Compute blocks of consecutive data for blockwise CV or sampling. #' #' @param Nblock number of blocks #' @param data0 the learning set #' @return A list of vectors containing the indices of each block. #' @examples #' buildBlock(4, data.frame(id = 1:15)) #' @author Yannig Goude <[email protected]> #' @export buildBlock <- function(Nblock, data0) { borne_block<-floor(seq(1, nrow(data0), length=Nblock+1)) block_list<-list() l<-length(borne_block) for(i in c(2:(l-1))) { block_list[[i-1]] <- c(borne_block[i-1]:(borne_block[i]-1)) } block_list[[l-1]]<-c(borne_block[l-1]:(borne_block[l])) return(block_list) } ## Faster version #Nblock <- 4 #nx <- 15 #nrow(data0) #fuzz <- min((nx - 1L)/1000, 0.4 * nx/Nblock) #breaks <- seq(1 - fuzz, nx + fuzz, length.out = Nblock + 1L) #structure(split(seq(nx), cut(seq(nx), breaks)), names = NULL)
/scratch/gouwar.j/cran-all/cranData/Bodi/R/buildBlock.R
#' bears #' #' Body measurements for 143 wild bears. #' #' Wild bears were anesthetized, and their bodies were measured and weighed. #' One goal of the study was to make a table (or perhaps a set of tables) for #' hunters, so they could estimate the weight of a bear based on other #' measurements. This would be used because in the forest it is easier to #' measure the length of a bear, for example, than it is to weigh it. #' #' @name bears #' @docType data #' @format A data frame with 143 observations on the following 12 variables. #' \itemize{ \item{ID. Indentification number} #' \item{Age. Bear's age, in months. Note, wild bears are always born #' in January, so an expert can estimate the bear's age without directly asking #' it how old it is. } \item{Month. Month when the measurement was #' made. 1 = Jan., 12 = Dec. Since bears hibernate in the winter, their body #' shape probably depends on the season. } \item{Sex. 1 = male 2 = #' female } \item{Head.L. Length of the head, in inches } #' \item{Head.W. Width of the head, in inches } #' \item{Neck.G. Girth (distance around) the neck, in inches } #' \item{Length. Body length, in inches } \item{Chest.G. Girth #' (distance around) the chest, in inches } \item{Weight. Weight of the #' bear, in pounds } \item{Obs.No. Observation number for this bear. #' For example, the bear with ID = 41 (Bertha) was measured on four occasions, #' in the months coded 7, 8, 11, and 5. The value of Obs.No goes from 1 to 4 #' for these observations. } \item{Name. The names of the bears given #' to them by the researchers} } #' @references This data set was supplied by Gary Alt. Entertaining references #' are in Reader's Digest April, 1979, and Sports Afield September, 1981. #' @source This data is in the example data set Bears.MTW distributed with #' Minitab #' @keywords datasets #' @examples #' #' data(bears) #' boxplot(Weight~Sex, data = bears) #' NULL #' Bolstad Functions #' #' A set of R functions and data sets for the book Introduction to Bayesian #' Statistics, Bolstad, W.M. (2007), John Wiley & Sons ISBN 0-471-27020-2. Most #' of the package functions replicate the Minitab macros that are provided with #' the book. Some additional functions are provided to simplfy inference about #' the posterior distribution of the parameters of interest. #' #' \tabular{ll}{ Package: \tab Bolstad\cr Type: \tab Package\cr Version: \tab #' 0.2-26\cr Date: \tab 2015-05-01\cr License: \tab GPL 2\cr } #' #' @name Bolstad-package #' @aliases Bolstad-package Bolstad #' @docType package #' @author James Curran Maintainer: James Curran <j.curran@@auckland.ac.nz> ~~ #' The author and/or maintainer of the package ~~ #' @references Bolstad, W.M. (2007), Introduction to Bayesian Statistics, John #' Wiley & Sons. #' @keywords package NULL #' Moisture data #' #' Moisture level at two stages in a food manufacturing process, in-process and final. #' These data are given in Example 14.1 #' #' #' @name moisture.df #' @docType data #' @format A data frame with 25 observations on the following 6 variables. #' \itemize{ \item{batch. the batch number of the measurement} #' \item{proc.level. the in-process moisture level} \item{final.level. natural #' the final moisture level of the batch} \item{ls.fit the least squares fitted value #' of final.level given proc.level} \item{residiual. the least squares residual} #' \item{residiual.sq. the squaredvleast squares residual}} #' @keywords datasets #' @examples #' #' data(moisture.df) #' plot(final.level~proc.level, data = moisture.df) #' #' NULL #' Slug data #' #' Lengths and weights of 100 slugs from the species Limax maximus collected #' around Hamilton, New Zealand. #' #' #' @name slug #' @docType data #' @format A data frame with 100 observations on the following 4 variables. #' \itemize{ \item{length. length (mm) of the slug} #' \item{weight. weight (g) of the slug} \item{log.len. natural #' logarithm of the \code{length}} \item{log.wt. natural logarithm of #' the \code{weight}} } #' @references Barker, G. and McGhie, R. (1984). The Biology of Introduced #' Slugs (Pulmonata) in New Zealand: Introduction and Notes on Limax Maximus, #' NZ Entomologist 8, 106--111. #' @keywords datasets #' @examples #' #' data(slug) #' plot(weight~length, data = slug) #' plot(log.wt~log.len, data = slug) #' #' NULL #' Data for simple random sampling, stratified sampling, and clusting sampling #' experiments #' #' A simulated population made up of 100 individuals. The individuals come from #' three ethnic groups with population proportions of 40\%, 40\%, and 20\%, #' respectively. There are twenty neighborhoods, and five individuals live in #' each one. Now, the income distribution may be different for the three #' ethnic groups. Also, individuals in the same neighborhood tend to be more #' similar than individuals in different neighborhoods. #' #' #' @name sscsample.data #' @docType data #' @format A data frame with 100 observations on the following 3 variables. #' \itemize{ \item{income. Simulated income in $10,000} #' \item{ethnicity. A numerical vector indicating the ethnic group of #' the observation} \item{neighborhood. A numeric vector indicating the #' neighborhood of the observation} } #' @keywords datasets #' @examples #' #' data(sscsample.data) #' plot(income~ethnicity, data = sscsample.data) #' NULL
/scratch/gouwar.j/cran-all/cranData/Bolstad/R/Bolstad-package.R
#' Control Bolstad functions #' @param plot if \code{TRUE} then draw a plot (for functions that actually have plots) #' @param quiet if \code{TRUE} then suppress the function output #' @param ... additional parameters #' #' @return an invisible list of options and their values #' @export Bolstad.control = function(plot = TRUE, quiet = FALSE, ...){ invisible(list( plot = plot, quiet = quiet)) }
/scratch/gouwar.j/cran-all/cranData/Bolstad/R/Bolstad.control.R
#' Interquartile Range generic #' #' Compute the interquartile range. #' #' @param x an object. #' @param \dots any additional arguments. These are primarily used in \code{IQR.default} #' which calls \code{stats::IQR}. #' @details If \code{x} is an object of class \code{Bolstad} then the posterior #' IQR of the parameter of interest will be calculated. #' @author James Curran #' @export IQR = function(x, ...){ UseMethod("IQR") } #' @export IQR.default = function(x, ...){ stats::IQR(x, ...) } #' @export IQR.Bolstad = function(x, ...){ return(diff(quantile(x, probs = c(0.25, 0.75), ...))) }
/scratch/gouwar.j/cran-all/cranData/Bolstad/R/IQR.R
#' as.data.frame.Bolstad #' @param x an object of class \code{Bolstad} #' @param \dots, any extra arguments needed. #' @import stats #' @import graphics #' @export as.data.frame.Bolstad = function(x, ...){ result = data.frame(param.x = x$param.x, prior = x$prior, likelihood = x$likelihood, posterior = x$posterior, ...) names(result)[1] = x$name return(result) }
/scratch/gouwar.j/cran-all/cranData/Bolstad/R/as.data.frame.Bolstad.R
#' Bayesian inference for simple linear regression #' #' This function is used to find the posterior distribution of the simple #' linear regression slope variable \eqn{\beta}{beta} when we have a random #' sample of ordered pairs \eqn{(x_{i}, y_{i})} from the simple linear #' regression model: \deqn{ }{y_i = alpha_xbar + beta*x_i+epsilon_i}\deqn{ #' y_{i} = \alpha_{\bar{x}} + \beta x_{i}+\epsilon_{i} }{y_i = alpha_xbar + #' beta*x_i+epsilon_i}\deqn{ }{y_i = alpha_xbar + beta*x_i+epsilon_i} where the #' observation errors are, \eqn{\epsilon_i}{epsilon_i}, independent #' \eqn{normal(0,\sigma^{2})}{normal(0,sigma^2)} with known variance. #' #' #' @param y the vector of responses. #' @param x the value of the explantory variable associated with each response. #' @param slope.prior use a ``flat'' prior or a ``normal'' prior. for #' \eqn{\beta}{beta} #' @param intcpt.prior use a ``flat'' prior or a ``normal'' prior. for #' \eqn{\alpha_[\bar{x}]}{alpha_xbar} #' @param mb0 the prior mean of the simple linear regression slope variable #' \eqn{\beta}{beta}. This argument is ignored for a flat prior. #' @param sb0 the prior std. deviation of the simple linear regression slope #' variable \eqn{\beta}{beta} - must be greater than zero. This argument is #' ignored for a flat prior. #' @param ma0 the prior mean of the simple linear regression intercept variable #' \eqn{\alpha_{\bar{x}}}{alpha_xbar}. This argument is ignored for a flat #' prior. #' @param sa0 the prior std. deviation of the simple linear regression variable #' \eqn{\alpha_{\bar{x}}}{alpha_xbar} - must be greater than zero. This #' argument is ignored for a flat prior. #' @param sigma the value of the std. deviation of the residuals. By default, #' this is assumed to be unknown and the sample value is used instead. This #' affects the prediction intervals. #' @param alpha controls the width of the credible interval. #' @param plot.data if true the data are plotted, and the posterior regression #' line superimposed on the data. #' @param pred.x a vector of x values for which the predicted y values are #' obtained and the std. errors of prediction #' @return A list will be returned with the following components: #' \item{post.coef}{the posterior mean of the intecept and the slope} #' \item{post.coef}{the posterior standard deviation of the intercept the #' slope} \item{pred.x}{the vector of values for which predictions have been #' requested. If pred.x is NULL then this is not returned} \item{pred.y}{the #' vector predicted values corresponding to pred.x. If pred.x is NULL then this #' is not returned} \item{pred.se}{The standard errors of the predicted values #' in pred.y. If pred.x is NULL then this is not returned} #' @param \dots additional arguments that are passed to \code{Bolstad.control} #' @keywords misc #' @examples #' #' ## generate some data from a known model, where the true value of the #' ## intercept alpha is 2, the true value of the slope beta is 3, and the #' ## errors come from a normal(0,1) distribution #' set.seed(123) #' x = rnorm(50) #' y = 2 + 3*x + rnorm(50) #' #' ## use the function with a flat prior for the slope beta and a #' ## flat prior for the intercept, alpha_xbar. #' #' bayes.lin.reg(y,x) #' #' ## use the function with a normal(0,3) prior for the slope beta and a #' ## normal(30,10) prior for the intercept, alpha_xbar. #' #' bayes.lin.reg(y,x,"n","n",0,3,30,10) #' #' ## use the same data but plot it and the credible interval #' #' bayes.lin.reg(y,x,"n","n",0,3,30,10, plot.data = TRUE) #' #' ## The heart rate vs. O2 uptake example 14.1 #' O2 = c(0.47,0.75,0.83,0.98,1.18,1.29,1.40,1.60,1.75,1.90,2.23) #' HR = c(94,96,94,95,104,106,108,113,115,121,131) #' plot(HR,O2,xlab="Heart Rate",ylab="Oxygen uptake (Percent)") #' #' bayes.lin.reg(O2,HR,"n","f",0,1,sigma=0.13) #' #' ## Repeat the example but obtain predictions for HR = 100 and 110 #' #' bayes.lin.reg(O2,HR,"n","f",0,1,sigma=0.13,pred.x=c(100,110)) #' #' @export bayes.lin.reg bayes.lin.reg = function(y, x, slope.prior = c("flat", "normal"), intcpt.prior = c("flat", "normal"), mb0 = 0, sb0 = 0, ma0 = 0, sa0 = 0, sigma = NULL, alpha = 0.05, plot.data = FALSE, pred.x = NULL, ...) { if(sum(is.na(y)) > 0 || sum(is.na(x)) > 0) stop("Error: x and y may not contain missing values") if(length(y) != length(x)) stop("Error: x and y are unequal lengths") if(!is.null(sigma) && sigma <= 0){ stop("Error: the std. deviation of the resisuals, sigma, must be greater than or equal to zero") } intcpt.prior = match.arg(intcpt.prior, c("flat", "normal")) slope.prior = match.arg(slope.prior, c("flat", "normal")) if(!grepl("^(flat|normal)$", slope.prior)) stop("The slope prior must be one of \"normal\" or \"flat\"") if(!grepl("^(flat|normal)$", intcpt.prior)) stop("The intercept prior must be one of \"normal\" or \"flat\"") if(slope.prior == "normal" && sb0 <= 0) stop("Error: the prior std. devation sb0 must be greater than zero") if(intcpt.prior == "normal" && sa0 <= 0) stop("Error: the prior std. devation sa0 must be greater than zero") if(alpha <= 0 || alpha > 0.5) stop("Error: alpha must be in the range (0, 0.5]") if(length(y) <= 2) stop("Error: you really should have more than 2 points for a regression!") n = length(y) x.bar = mean(x) y.bar = mean(y) x2.bar = mean(x^2) xy.bar = mean(x * y) y2.bar = mean(y^2) b.ls = (xy.bar - x.bar * y.bar) / (x2.bar - x.bar^2) fitted = y.bar + b.ls * (x - x.bar) residuals = y - fitted A0 = y.bar - b.ls * x.bar Ax.bar = y.bar quiet = Bolstad.control(...)$quiet drawPlot = Bolstad.control(...)$plot sigma.known = TRUE if(is.null(sigma)){ sigma.known = FALSE sigma = sqrt(sum((y - (Ax.bar + b.ls * (x - x.bar)))^2)/ (n - 2)) if(!quiet){cat(paste("Standard deviation of residuals: ", signif(sigma, 3), "\n"))} } else { if(!quiet){ cat(paste("Known standard deviation: ", signif(sigma, 3), "\n")) } } SSx = n * (x2.bar - x.bar^2) lb = 0 ub = 0 prior.b = rep(0, 1001) beta = prior.b likelihood.b = prior.b posterior.b = prior.b d = as.data.frame(cbind(y, x)) if (slope.prior == "flat") { prior.prec.b = 0 mb0 = 0 bnd.mult.b = 4 } else { prior.prec.b = 1 / sb0^2 bnd.mult.b = 3 } if (intcpt.prior == "flat") { prior.prec.a = 0 ma0 = 0 bnd.mult.a = 4 } else { prior.prec.a = 1 / sa0^2 bnd.mult.a = 3 } ################ # SLOPE ################ prec.ls = SSx / sigma^2 sd.ls = sqrt(1 / prec.ls) post.prec.b = prior.prec.b + prec.ls post.var.b = 1 / post.prec.b post.sd.b = sqrt(post.var.b) post.mean.b = (prior.prec.b * mb0 + SSx / sigma^2 * b.ls) / post.prec.b lb = post.mean.b - bnd.mult.b * post.sd.b ub = post.mean.b + bnd.mult.b * post.sd.b beta = seq(lb, ub, length = 1001) if (slope.prior == "flat") { prior.b = rep(1, 1001) norm.const = 0.5 * (2 * sum(prior.b) - prior.b[1] - prior.b[1001] * ((ub - lb) * 0.001)) prior.b = prior.b / norm.const } else { prior.b = dnorm(beta, mb0, sb0) } likelihood.b = dnorm(beta, b.ls, sd.ls) posterior.b = dnorm(beta, post.mean.b, post.sd.b) if(drawPlot){ old.par = par(mfrow = c(2, 2)) y.max = max(c(prior.b, likelihood.b, posterior.b)) plot(beta, prior.b, type = "l", col = "black", lty = 1, ylim = c(0, 1.1 * y.max), xlab = expression(beta), ylab = "", main = expression(paste("Prior, likelihood and posterior for ", beta, sep = "")), sub = "(slope)") lines(beta, likelihood.b, lty = 2, col = "red") lines(beta, posterior.b, lty = 3, col = "blue") legend("topleft", bty = "n", cex = 0.7, lty = 1:3, col = c("black", "red", "blue"), legend = c("Prior", "Likelihood", "Posterior")) } #################################################################################### alpha.xbar = rep(0, 1001) prior.a = alpha.xbar likelihood.a = alpha.xbar posterior.a = alpha.xbar prec.ls = n / (sigma^2) sd.ls = sqrt(1 / prec.ls) post.prec.a = prior.prec.a + prec.ls post.var.a = 1 / post.prec.a post.sd.a = sqrt(post.var.a) post.mean.a = (prior.prec.a * ma0 + n / sigma^2 * Ax.bar) / post.prec.a lb = post.mean.a - bnd.mult.a * post.sd.a ub = post.mean.a + bnd.mult.a * post.sd.a alpha.xbar = seq(lb, ub, length = 1001) if(intcpt.prior == "flat") { prior.a = rep(1, 1001) norm.const = (2 * sum(prior.a) - prior.a[1] - prior.a[1001] * ((ub - lb) / 1000)) / 2 prior.a = prior.a / norm.const } else { prior.a = dnorm(alpha.xbar, ma0, sa0) } likelihood.a = dnorm(alpha.xbar, y.bar, sd.ls) posterior.a = dnorm(alpha.xbar, post.mean.a, post.sd.a) if(!quiet){ cat(sprintf("%-11s %-14s %-24s\n", " ", "Posterior Mean", "Posterior Std. Deviation")) cat(sprintf("%-11s %-14s %-24s\n", " ", "--------------", "------------------------")) cat(sprintf("Intercept: %-14.6g %-24.6g\n", signif(post.mean.a, 4), signif(post.sd.a, 5))) cat(sprintf("Slope: %-14.6g %-24.6g\n", signif(post.mean.b, 4), signif(post.sd.b, 5))) } y.max = max(c(prior.a, likelihood.a, posterior.a)) if(drawPlot){ plot(alpha.xbar, prior.a, type = "l", col = "black", lty = 1, ylim = c(0, 1.1 * y.max), xlab = expression(alpha), ylab = "", main = expression(paste("Prior, likelihood and posterior for ", alpha[bar(x)], sep = "")), sub = "(intercept)") lines(alpha.xbar, likelihood.a, lty = 2, col = "red") lines(alpha.xbar, posterior.a, lty = 3, col = "blue") legend("topleft", cex = 0.7, lty = 1:3, col = c("black", "red", "blue"), legend = c("Prior", "Likelihood", "Posterior"), bty = "n") } if(sigma.known){ s.e = sqrt(x2.bar - x.bar^2) x.lwr = x.bar - 3 * s.e x.upr = x.bar + 3 * s.e x.values = seq(x.lwr, x.upr, length = 1001) pred.y = post.mean.b * (x.values - x.bar) + post.mean.a se.pred = sqrt(post.var.a + (x.values - x.bar)^2 * post.var.b + sigma^2) t.crit = qt(1 - alpha * .5, n - 2) pred.lb = pred.y - t.crit * se.pred pred.ub = pred.y + t.crit * se.pred } else{ s.e = sqrt(x2.bar - x.bar^2) x.lwr = x.bar - 3 * s.e x.upr = x.bar + 3 * s.e x.values = seq(x.lwr, x.upr, length = 1001) pred.y = post.mean.b * (x.values - x.bar) + post.mean.a se.pred = sqrt(post.var.a + (x.values - x.bar)^2 * post.var.b + sigma^2) z.crit = qnorm(1 - alpha * 0.5) pred.lb = pred.y - z.crit * se.pred pred.ub = pred.y + z.crit * se.pred } y.max = max(pred.ub) y.min = min(pred.lb) if(drawPlot){ if(plot.data){ plot(y~x, main = paste("Predicitions with ", round(100 * (1 - alpha)) ,"% bounds", sep = ""), xlab = "x", ylab = "y", ylim = 1.1 * c(y.min, y.max)) lines(x.values, pred.y, lty = 1, col = "black") } else if (drawPlot && !plot.data){ plot(x.values, pred.y, type = "l", lty = 1, col = "black", main = paste("Predicitions with ", round(100 * (1 - alpha)), "% bounds", sep = ""), xlab = "x", ylab = "y", ylim = 1.1 * c(y.min, y.max)) } lines(x.values, pred.lb, lty = 2, col = "red") lines(x.values, pred.ub, lty = 3, col = "blue") legend("topleft", lty = 1:3, col = c("black", "red", "blue"), legend = c("Predicted value", paste(round(100 * (1 - alpha)), "% lower bound", sep = ""), paste(round(100 * (1 - alpha)), "% upper bound", sep = "")), cex = 0.7, bty = "n") } pred.y = NULL pred.se = NULL if(!is.null(pred.x)){ pred.y = post.mean.a + post.mean.b * (pred.x - x.bar) pred.se = sqrt(post.var.a + (pred.x - x.bar)^2 * post.var.b + sigma^2) predicted.values = cbind(pred.x, pred.y, pred.se) fmt = "%-8.4g %-12.4g %-11.5g\n" fmtS = "%-6s %-12s %-11s\n" if(!quiet){ cat(sprintf(fmtS, "x", "Predicted y", "SE")) cat(sprintf(fmtS, "------", "-----------", "-----------")) n.pred.x = length(pred.x) for(i in 1:n.pred.x){ cat(sprintf(fmt, signif(predicted.values[i, 1], 4), signif(predicted.values[i, 2], 4), signif(predicted.values[i, 3], 5))) } } } if(drawPlot){ par(old.par) } interceptResults = list(name = 'alpha[0]', param.x = alpha.xbar, prior = prior.a, likelihood = likelihood.a, posterior = posterior.a, mean = post.mean.a, var = post.var.a) slopeResults = list(name = 'beta', param.x = beta, prior = prior.b, likelihood = likelihood.b, posterior = posterior.b, mean = post.mean.b, var = post.var.b) class(interceptResults) = "Bolstad" class(slopeResults) = "Bolstad" if(!is.null(pred.x)){ invisible(list(intercept = interceptResults, slope = slopeResults, post.coef = c(post.mean.a, post.mean.b), post.coef.sd = c(post.sd.a, post.sd.b), pred.x = pred.x, pred.y = pred.y, pred.se = pred.se)) } else{ invisible(list(intercept = interceptResults, slope = slopeResults, post.coef = c(post.mean.a, post.mean.b), post.coef.sd = c(post.sd.a, post.sd.b))) } }
/scratch/gouwar.j/cran-all/cranData/Bolstad/R/bayes.lin.reg.r
#' Bayesian inference for multiple linear regression #' #' bayes.lm is used to fit linear models in the Bayesian paradigm. It can be used to carry out regression, #' single stratum analysis of variance and analysis of covariance (although these are not tested). This #' documentation is shamelessly adapated from the lm documentation #' #' #' @param formula an object of class \code{\link[stats]{formula}} (or one that can be coerced to that class): a symbolic #' description of the model to be fitted. The details of model specification are given under `Details'. #' @param data an optional data frame, list or environment (or object coercible by \code{\link[base]{as.data.frame}} to a #' data frame) containing the variables in the model. If not found in data, the variables are taken #' from \code{environment(formula)}, typically the environment from which \code{bayes.lm} is called. #' @param subset an optional vector specifying a subset of observations to be used in the fitting process. #' @param na.action a function which indicates what should happen when the data contain \code{NA}s. The #' default is set by the \code{\link[stats]{na.action}} setting of options, and is \code{link[stats]{na.fail}} #' if that is unset. The `factory-fresh' default is \code{\link[stats]{na.omit}}. Another possible value #' is \code{NULL}, no action. Value \code{\link[stats]{na.exclude}} can be useful. #' @param model,x,y logicals. If \code{TRUE} the corresponding components of the fit (the model frame, the model matrix, the response) #' are returned. #' \eqn{\beta}{beta}. This argument is ignored for a flat prior. #' @param center logical or numeric. If \code{TRUE} then the covariates will be centered on their means to make them #' orthogonal to the intercept. This probably makes no sense for models with factors, and if the argument #' is numeric then it contains a vector of covariate indices to be centered (not implemented yet). #' @param prior A list containing b0 (A vector of prior coefficients) and V0 (A prior covariance matrix) #' @param sigma the population standard deviation of the errors. If \code{FALSE} then this is estimated from the residual sum of squares from the ML fit. #' #' @details Models for \code{bayes.lm} are specified symbolically. A typical model has the form #' \code{response ~ terms} where \code{response} is the (numeric) response vector and \code{terms} is a #' series of terms which specifies a linear predictor for \code{response}. A terms specification of the #' form \code{first + second} indicates all the terms in \code{first} together with all the terms in #' \code{second} with duplicates removed. A specification of the form \code{first:second} indicates the #' set of terms obtained by taking the interactions of all terms in \code{first} with all terms in #' \code{second}. The specification \code{first*second} indicates the cross of \code{first} and \code{second}. #' This is the same as \code{first + second + first:second}. #' #' See \code{\link[stats]{model.matrix}} for some further details. The terms in the formula will be #' re-ordered so that main effects come first, followed by the interactions, all second-order, #' all third-order and so on: to avoid this pass a \code{terms} object as the formula #' (see \code{\link[stats]{aov}} and \code{demo(glm.vr)} for an example). #' #' A formula has an implied intercept term. To remove this use either \code{y ~ x - 1} or #' \code{y ~ 0 + x}. See \code{\link[stats]{formula}} for more details of allowed formulae. #' #' \code{bayes.lm} calls the lower level function \code{lm.fit} to get the maximum likelihood estimates #' see below, for the actual numerical computations. For programming only, you may consider doing #' likewise. #' #' \code{subset} is evaluated in the same way as variables in formula, that is first in data and #' then in the environment of formula. #' #' @return \code{bayes.lm} returns an object of class \code{Bolstad}. #' The \code{summary} function is used to obtain and print a summary of the results much like the usual #' summary from a linear regression using \code{\link[stats]{lm}}. #' The generic accessor functions \code{coef, fitted.values and residuals} #' extract various useful features of the value returned by \code{bayes.lm}. Note that the residuals #' are computed at the posterior mean values of the coefficients. #' #' An object of class "Bolstad" from this function is a list containing at least the following components: #' \item{coefficients}{a named vector of coefficients which contains the posterior mean} #' \item{post.var}{a matrix containing the posterior variance-covariance matrix of the coefficients} #' \item{post.sd}{sigma} #' \item{residuals}{the residuals, that is response minus fitted values (computed at the posterior mean)} #' \item{fitted.values}{the fitted mean values (computed at the posterior mean)} #' \item{df.residual}{the residual degrees of freedom} #' \item{call}{the matched call} #' \item{terms}{the \code{\link[stats]{terms}} object used} #' \item{y}{if requested, the response used} #' \item{x}{if requested, the model matrix used} #' \item{model}{if requested (the default), the model frame used} #' \item{na.action}{(where relevant) information returned by \code{model.frame} on the special #' handling of \code{NA}s} #' #' @keywords misc #' @examples #' data(bears) #' bears = subset(bears, Obs.No==1) #' bears = bears[,-c(1,2,3,11,12)] #' bears = bears[ ,c(7, 1:6)] #' bears$Sex = bears$Sex - 1 #' log.bears = data.frame(log.Weight = log(bears$Weight), bears[,2:7]) #' #' b0 = rep(0, 7) #' V0 = diag(rep(1e6,7)) #' #' fit = bayes.lm(log(Weight)~Sex+Head.L+Head.W+Neck.G+Length+Chest.G, data = bears, #' prior = list(b0 = b0, V0 = V0)) #' summary(fit) #' print(fit) #' #' #' ## Dobson (1990) Page 9: Plant Weight Data: #' ctl <- c(4.17,5.58,5.18,6.11,4.50,4.61,5.17,4.53,5.33,5.14) #' trt <- c(4.81,4.17,4.41,3.59,5.87,3.83,6.03,4.89,4.32,4.69) #' group <- gl(2, 10, 20, labels = c("Ctl","Trt")) #' weight <- c(ctl, trt) #' #' lm.D9 <- lm(weight ~ group) #' bayes.D9 <- bayes.lm(weight ~ group) #' #' summary(lm.D9) #' summary(bayes.D9) #' #' @export bayes.lm bayes.lm = function(formula, data, subset, na.action, model = TRUE, x = FALSE, y = FALSE, center = TRUE, prior = NULL, sigma = FALSE){ ret.x = x ret.y = y cl = match.call() mf = match.call(expand.dots = FALSE) m = match(c("formula", "data", "subset", "na.action"), names(mf), 0L) mf = mf[c(1L, m)] mf$drop.unused.levels = TRUE mf[[1L]] = quote(stats::model.frame) mf = eval(mf, parent.frame()) mt = attr(mf, "terms") y = model.response(mf, "numeric") if (is.empty.model(mt)) { # x = NULL # z = list(coefficients = if (is.matrix(y)) matrix(, 0, # 3) else numeric(), residuals = y, fitted.values = 0 * # y, rank = 0L, df.residual = if (!is.null(w)) sum(w != # 0) else if (is.matrix(y)) nrow(y) else length(y)) } else { x = model.matrix(mt, mf, contrasts) if(center){ if(is.logical(center)){ np = ncol(x) x[,2:np] = scale(x[,2:np], scale = FALSE, center = TRUE) } } z = z.ls = lm.fit(x, y) p1 = 1:z$rank z$cov.unscaled = chol2inv(z$qr$qr[p1, p1, drop = FALSE]) z$prior = prior if(!is.null(prior)){ prior.prec = solve(prior$V0) resVar = if(is.logical(sigma) && !sigma){ sum(z$residuals^2) / z$df.residual }else{ sigma^2 } ls.prec = solve(resVar * z$cov.unscaled) post.prec = prior.prec + ls.prec V1 = solve(post.prec) b1 = V1 %*% prior.prec %*% prior$b0 + V1 %*% ls.prec %*% coef(z.ls) z$post.mean = z$coefficients = as.vector(b1) z$post.var = V1 z$post.sd = sqrt(resVar) }else{ resVar = if(is.logical(sigma) && !sigma){ sum(z$residuals^2) / z$df.residual }else{ sigma^2 } z$post.mean = z$coefficients z$post.var = resVar * z$cov.unscaled z$post.sd = sqrt(resVar) } z$fitted.values = x %*% z$post.mean z$residuals = y - z$fitted.values z$df.residual = nrow(x) - ncol(x) } class(z) = c("Bolstad", "lm") z$na.action = attr(mf, "na.action") z$call = cl z$terms = mt if (model) z$model = mf if (ret.x) z$x = x if (ret.y) z$y = y z }
/scratch/gouwar.j/cran-all/cranData/Bolstad/R/bayes.lm.R
#' @importFrom utils tail bayes.t.gibbs = function(x, y, nIter = 10000, nBurn = 1000, sigmaPrior = c("chisq", "gamma")){ sigmaPrior = match.arg(sigmaPrior) nx = length(x) ny = length(y) xbar = mean(x) ybar = mean(y) Sx = nx * xbar Sy = ny * ybar SSx = sum((x - xbar)^2) SSy = sum((y - ybar)^2) if(sigmaPrior == "chisq"){ ## prior mean m0x = median(x) m0y = median(y) ## prior sd s0x = sd(x) s0y = sd(y) ## S0x = s0x^2 * qchisq(0.5, 1) S0y = s0y^2 * qchisq(0.5, 1) ## kappa = 1 kappa1x = kappa + nx kappa1y = kappa + ny N = nIter + nBurn sigma.sq.x = sigma.sq.y = mu.x = mu.y = rep(0, N) ## draw the initial values sigma.sq.x[1] = S0x / rchisq(1, kappa1x) sigma.sq.y[1] = S0y / rchisq(1, kappa1y) mu.x[1] = rnorm(1, m0x, s0x) mu.y[1] = rnorm(1, m0y, s0y) for(i in 2:N){ S1x = S0x + sum((x - mu.x[i - 1])^2) S1y = S0y + sum((y - mu.y[i - 1])^2) sigma.sq.x[i] = S1x / rchisq(1, kappa1x) sigma.sq.y[i] = S1y / rchisq(1, kappa1y) s1x = 1 / sqrt(1 / s0x^2 + nx / sigma.sq.x[i]) s1y = 1 / sqrt(1 / s0y^2 + ny / sigma.sq.y[i]) m1x = m0x * s1x^2 / s0x^2 + Sx * s1x^2 / sigma.sq.x[i] m1y= m0y * s1y^2 / s0y^2 + Sy * s1y^2 / sigma.sq.y[i] mu.x[i] = rnorm(1, m1x, s1x) mu.y[i] = rnorm(1, m1y, s1y) } res = data.frame(mu.x = tail(mu.x, nIter), mu.y = tail(mu.y, nIter), mu.diff = tail(mu.x - mu.y, nIter), sigma.sq.x = tail(sigma.sq.x, nIter), sigma.sq.y = tail(sigma.sq.y, nIter), tstat = tail((mu.x - mu.y) / sqrt(sigma.sq.x / nx + sigma.sq.y / ny), nIter)) }else{ ## prior means and sds ## prior mean m0x = median(x) m0y = median(y) ## prior sd s0x = sd(x) s0y = sd(y) alpha0x = beta0x = alpha0y = beta0y = 0.001 alpha1x = nx / 2 + alpha0x beta1x = beta0x + SSx / 2 alpha1y = ny / 2 + alpha0y beta1y = beta0y + SSy / 2 N = nIter + nBurn sigma.sq.x = sigma.sq.y = mu.x = mu.y = rep(0, N) ## draw the initial values sigma.sq.x[1] = 1/rgamma(1, alpha1x, beta1x) sigma.sq.y[1] = 1/rgamma(1, alpha1y, beta1y) mu.x[1] = rnorm(1, m0x, s0x) mu.y[1] = rnorm(1, m0y, s0y) for(i in 2:N){ beta1x = beta0x + sum((x - mu.x[i - 1])^2) * 0.5 sigma.sq.x[i] = 1 / rgamma(1, alpha1x, beta1x) beta1y = beta0y + sum((y - mu.y[i - 1])^2) * 0.5 sigma.sq.y[i] = 1 / rgamma(1, alpha1y, beta1y) s1x = 1 / sqrt(1 / s0x^2 + nx / sigma.sq.x[i]) s1y = 1 / sqrt(1 / s0y^2 + ny / sigma.sq.y[i]) m1x = m0x * s1x^2 / s0x^2 + Sx * s1x^2 / sigma.sq.x[i] m1y= m0y * s1y^2 / s0y^2 + Sy * s1y^2 / sigma.sq.y[i] mu.x[i] = rnorm(1, m1x, s1x) mu.y[i] = rnorm(1, m1y, s1y) } res = data.frame(mu.x = tail(mu.x, nIter), mu.y = tail(mu.y, nIter), mu.diff = tail(mu.x - mu.y, nIter), sigma.sq.x = tail(sigma.sq.x, nIter), sigma.sq.y = tail(sigma.sq.y, nIter), tstat = tail((mu.x - mu.y) / sqrt(sigma.sq.x / nx + sigma.sq.y / ny), nIter)) } return(res) }
/scratch/gouwar.j/cran-all/cranData/Bolstad/R/bayes.t.gibbs.R
#' Bayesian t-test #' #' @description Performs one and two sample t-tests (in the Bayesian hypothesis testing framework) on vectors of data #' @param x a (non-empty) numeric vector of data values. #' @param y an optional (non-empty) numeric vector of data values. #' @param alternative a character string specifying the alternative hypothesis, must be one of #' \code{"two.sided"} (default), \code{"greater"} or \code{"less"}. You can specify just the initial #' letter. #' @param mu a number indicating the true value of the mean (or difference in means if you are performing a two sample test). #' @param paired a logical indicating whether you want a paired t-test. #' @param var.equal a logical variable indicating whether to treat the two variances as being equal. #' If \code{TRUE} (default) then the pooled variance is used to estimate the variance otherwise the #' Welch (or Satterthwaite) approximation to the degrees of freedom is used. The unequal variance case is #' implented using Gibbs sampling. #' @param conf.level confidence level of interval. #' @param prior a character string indicating which prior should be used for the means, must be one of #' \code{"jeffreys"} (default) for independent Jeffreys' priors on the unknown mean(s) and variance(s), #' or \code{"joint.conj"} for a joint conjugate prior. #' @param m if the joint conjugate prior is used then the user must specify a prior mean in the one-sample #' or paired case, or two prior means in the two-sample case. Note that if the hypothesis is that there is no difference #' between the means in the two-sample case, then the values of the prior means should usually be equal, and if so, #' then their actual values are irrelvant.This parameter is not used if the user chooses a Jeffreys' prior. #' @param n0 if the joint conjugate prior is used then the user must specify the prior precision #' or precisions in the two sample case that represent our level of uncertainty #' about the true mean(s). This parameter is not used if the user chooses a Jeffreys' prior. #' @param sig.med if the joint conjugate prior is used then the user must specify the prior median #' for the unknown standard deviation. This parameter is not used if the user chooses a Jeffreys' prior. #' @param kappa if the joint conjugate prior is used then the user must specify the degrees of freedom #' for the inverse chi-squared distribution used for the unknown standard deviation. Usually the default #' of 1 will be sufficient. This parameter is not used if the user chooses a Jeffreys' prior. #' @param sigmaPrior If a two-sample t-test with unequal variances is desired then the user must choose between #' using an chi-squared prior ("chisq") or a gamma prior ("gamma") for the unknown population standard deviations. #' This parameter is only used if \code{var.equal} is set to \code{FALSE}. #' @param nIter Gibbs sampling is used when a two-sample t-test with unequal variances is desired. #' This parameter controls the sample size from the posterior distribution. #' @param nBurn Gibbs sampling is used when a two-sample t-test with unequal variances is desired. #' This parameter controls the number of iterations used to burn in the chains before the procedure #' starts sampling in order to reduce correlation with the starting values. #' @param formula a formula of the form \code{lhs ~ rhs} where lhs is a numeric variable giving the data values and rhs a factor with two #' levels giving the corresponding groups. #' @param data an optional matrix or data frame (or similar: see \code{\link{model.frame}}) containing #' the variables in the formula formula. By default the variables are taken from \code{environment(formula)}. #' @param subset currently ingored. #' @param na.action currently ignored. #' @param ... any additional arguments #' @return A list with class "htest" containing the following components: #' \item{statistic}{the value of the t-statistic.} #' \item{parameter}{the degrees of freedom for the t-statistic.} #' \item{p.value}{the p-value for the test.}" #' \item{conf.int}{a confidence interval for the mean appropriate to the specified alternative hypothesis.} #' \item{estimate}{the estimated mean or difference in means depending on whether it was a one-sample test or a two-sample test.} #' \item{null.value}{the specified hypothesized value of the mean or mean difference depending on whether it was a one-sample test or a two-sample test.} #' \item{alternative}{a character string describing the alternative hypothesis.} #' \item{method}{a character string indicating what type of t-test was performed.} #' \item{data.name}{a character string giving the name(s) of the data.} #' \item{result}{an object of class \code{Bolstad}} #' @examples #' bayes.t.test(1:10, y = c(7:20)) # P = .3.691e-01 #' #' ## Same example but with using the joint conjugate prior #' ## We set the prior means equal (and it doesn't matter what the value is) #' ## the prior precision is 0.01, which is a prior standard deviation of 10 #' ## we're saying the true difference of the means is between [-25.7, 25.7] #' ## with probability equal to 0.99. The median value for the prior on sigma is 2 #' ## and we're using a scaled inverse chi-squared prior with 1 degree of freedom #' bayes.t.test(1:10, y = c(7:20), var.equal = TRUE, prior = "joint.conj", #' m = c(0,0), n0 = rep(0.01, 2), sig.med = 2) #' #' ##' Same example but with a large outlier. Note the assumption of equal variances isn't sensible #' bayes.t.test(1:10, y = c(7:20, 200)) # P = .1979 -- NOT significant anymore #' #' ## Classical example: Student's sleep data #' plot(extra ~ group, data = sleep) #' #' ## Traditional interface #' with(sleep, bayes.t.test(extra[group == 1], extra[group == 2])) #' #' ## Formula interface #' bayes.t.test(extra ~ group, data = sleep) #' @author R Core with Bayesian internals added by James Curran #' @export bayes.t.test = function(x, ...){ UseMethod("bayes.t.test") } #' @describeIn bayes.t.test Bayesian t-test #' @export bayes.t.test.default = function(x, y = NULL, alternative = c("two.sided", "less", "greater"), mu = 0, paired = FALSE, var.equal = TRUE, conf.level = 0.95, prior = c("jeffreys", "joint.conj"), m = NULL, n0 = NULL, sig.med = NULL, kappa = 1, sigmaPrior = "chisq", nIter = 10000, nBurn = 1000, ...){ prior = match.arg(prior) if(prior == "joint.conj" & (is.null(m) | is.null(n0) | is.null(sig.med) | kappa < 1)){ m1 = "If you are using the joint conjugate prior, you need so specify:" m2 = "the prior mean(s), the prior precision(s), the prior median standard deviation," m3 = "and the degrees of freedom associated with the prior for the standard deviation" stop(paste(m1, m2, m3, sep = "\n")) } ## Shamelessly copied from t.test.default alternative = match.arg(alternative) if (!missing(mu) && (length(mu) != 1 || is.na(mu))) stop("'mu' must be a single number") if (!missing(conf.level) && (length(conf.level) != 1 || !is.finite(conf.level) || conf.level < 0 || conf.level > 1)) stop("'conf.level' must be a single number between 0 and 1") param.x = NULL tstat = 0 df = 0 pval = 0 cint = 0 estimate = 0 method = NULL if (!is.null(y)) { dname = paste(deparse(substitute(x)), "and", deparse(substitute(y))) if (paired) xok = yok = complete.cases(x, y) else { yok = !is.na(y) xok = !is.na(x) } y = y[yok] } else { dname = deparse(substitute(x)) if (paired) stop("'y' is missing for paired test") xok = !is.na(x) yok = NULL } x = x[xok] if (paired) { x = x - y y = NULL } nx = length(x) mx = mean(x) SSx = sum((x-mx)^2) vx = var(x) bolstadResult = NULL if (is.null(y)) { ## one sample or paired if (nx < 2) stop("not enough 'x' observations") stderr = sqrt(vx/nx) if (stderr < 10 * .Machine$double.eps * abs(mx)) stop("data are essentially constant") name = 'mu' name = if(!paired) 'mu' else 'mu[d]' if(prior == "jeffreys"){ S1 = SSx kappa1 = nx - 1 npost = nx mpost = mx se.post = sqrt(S1 / kappa1 / nx) df = kappa1 param.x = seq(mx - 4 * sqrt(vx), mx + 4 * sqrt(vx), length = 200) prior = 1 / diff(range(x)) likelihood = dnorm(mx, param.x, se.post) std.x = (param.x - mpost) / se.post posterior = dt(std.x, df = df) bolstadResult = list(name = name, param.x = param.x, prior = prior, likelihood = likelihood, posterior = posterior, mean = mpost, var = se.post^2, cdf = function(x)pt((x - mpost) / se.post, df = df), quantileFun = function(probs, ...){se.post * qt(probs, df = df, ...) + mpost}) class(bolstadResult) = 'Bolstad' tstat = (mpost - mu) / se.post estimate = mpost }else{ S0 = qchisq(0.5, kappa) * sig.med^2 S1 = SSx + S0 kappa1 = nx + kappa npost = n0 + nx sigma.sq.B = (S1 + (n0 * nx / kappa1) * (mx - m)^2)/npost mpost = (nx * mx + n0 * m) / npost se.post = sqrt(sigma.sq.B / kappa1) df = kappa1 estimate = mpost tstat = (mpost - mu) / se.post lb = min(mpost - 4 * se.post, m - 4 * sqrt(1 / n0)) ub = max(mpost + 4 * se.post, m + 4 * sqrt(1 / n0)) param.x = seq(lb, ub, length = 200) prior = dnorm(param.x, m, sqrt(1 / n0)) likelihood = dnorm(mx, param.x, se.post) std.x = (param.x - mpost) / se.post posterior = dt(std.x, df = df) } method = if (paired) "Paired t-test" else "One Sample t-test" } else { ## two sample ny = length(y) if (nx < 1 || (!var.equal && nx < 2)) stop("not enough 'x' observations") if (ny < 1 || (!var.equal && ny < 2)) stop("not enough 'y' observations") if (var.equal && nx + ny < 3) stop("not enough observations") name = 'mu[d]' my = mean(y) vy = var(y) stderr = sqrt((sum((x - mx)^2) + sum((y - my)^2) / (nx + ny - 2)) * (1 / nx + 1 / ny)) if (stderr < 10 * .Machine$double.eps * max(abs(mx), abs(my))) stop("data are essentially constant") SSp = sum((x - mx)^2) + sum((y - my)^2) method = paste(if (!var.equal) "Gibbs", "Two Sample t-test") estimate = c(mx, my) ## this may get changed elsewhere names(estimate) = c("posterior mean of x", "posterior mean of y") lb = mx - my - 4 * sqrt(vx/nx + vy/ny) ub = mx - my + 4 * sqrt(vx/nx + vy/ny) param.x = seq(lb, ub, length = 1000) name = 'mu[1]-mu[2]' if (var.equal) { if(prior == "jeffreys"){ kappa1 = nx + ny -2 kappa1 = nx + ny - 2 sigma.sq.Pooled = SSp / kappa1 mpost = mx - my se.post = sqrt(sigma.sq.Pooled * (1/nx + 1/ny)) df = kappa1 prior = 1 / diff(range(param.x)) likelihood = dnorm(mx - my, param.x, se.post) posterior = dt((param.x - mpost)/se.post, df) bolstadResult = list(name = name, param.x = param.x, prior = prior, likelihood = likelihood, posterior = posterior, mean = mpost, var = se.post^2, cdf = function(x)pt((x - mpost) / se.post, df = df), quantileFun = function(probs, ...){se.post * qt(probs, df = df, ...) + mpost}) class(bolstadResult) = 'Bolstad' estimate = c(mx, my) names(estimate) = c("posterior mean of x", "posterior mean of y") tstat = (mpost - mu) / se.post }else{ kappa1 = kappa + nx + ny n1post = nx + n0[1] n2post = ny + n0[2] S = qchisq(0.5, 1) * sig.med^2 S1 = S + SSp m1post = (nx * mx + n0[1] * m[1]) / n1post m2post = (ny * my + n0[2] * m[2]) / n2post sigma.sq.B = S1 / kappa1 mpost = m1post - m2post se.post = sqrt(sigma.sq.B * (1/n1post + 1/n2post)) df = kappa1 prior = dnorm(param.x, m[1] - m[2], sqrt(sum(1/n0))) likelihood = dnorm(mx - my, param.x, se.post) posterior = dt((param.x - mpost)/se.post, df) bolstadResult = list(name = name, param.x = param.x, prior = prior, likelihood = likelihood, posterior = posterior, mean = mpost, var = se.post^2, cdf = function(x)pt((x - mpost) / se.post, df = df), quantileFun = function(probs, ...){se.post * qt(probs, df = df, ...) + mpost}) class(bolstadResult) = 'Bolstad' estimate = c(m1post, m2post) names(estimate) = c("posterior mean of x", "posterior mean of y") tstat = (mpost - mu)/se.post } }else { res = bayes.t.gibbs(x, y, nIter, nBurn, sigmaPrior) se.post = sd(res$mu.diff) d = density(res$mu.diff, from = param.x[1], to = param.x[length(param.x)]) param.x = d$x likelihood = dnorm(mx - my, param.x, se.post) posterior = d$y mpost = mean(res$mu.diff) vpost = var(res$mu.diff) bolstadResult = list(name = name, param.x = d$x, prior = NULL, likelihood = likelihood, posterior = posterior, mean = mpost, var = vpost, cdf = function(x){r = sintegral(param.x, posterior); Fx = splinefun(r$x, r$y); return(Fx(x))}, quantileFun = function(probs, ...){quantile(res$mu.diff, probs = probs, ...)}) class(bolstadResult) = 'Bolstad' estimate = c(mean(res$mu.x), mean(res$mu.y)) names(estimate) = c("posterior mean of x", "posterior mean of y") tstat = mean(res$tstat) se.post = sd(res$mu.diff) snx = mean(res$sigma.sq.x / nx) sny = mean(res$sigma.sq.y / ny) df = (snx + sny)^2 / (snx^2 / (nx - 1) + sny^2 / (ny - 1)) } } if (alternative == "less") { pval = pt(tstat, df) cint = c(-Inf, tstat + qt(conf.level, df)) } else if (alternative == "greater") { pval = pt(tstat, df, lower.tail = FALSE) cint = c(tstat - qt(conf.level, df), Inf) } else { pval = 2 * pt(-abs(tstat), df) alpha = 1 - conf.level cint = qt(1 - alpha/2, df) cint = tstat + c(-cint, cint) } cint = mu + cint * se.post names(tstat) = "t" names(df) = "df" names(mu) = if (paired || !is.null(y)) "difference in means" else "mean" attr(cint, "conf.level") = conf.level rval = list(statistic = tstat, parameter = df, p.value = pval, conf.int = cint, estimate = estimate, null.value = mu, alternative = alternative, method = method, data.name = dname, result = bolstadResult) class(rval) = "htest" return(rval) } #' @describeIn bayes.t.test Bayesian t-test #' @export bayes.t.test.formula = function(formula, data, subset, na.action, ...){ ## shamelessly hacked from t.test.formula if (missing(formula) || (length(formula) != 3L) || (length(attr(terms(formula[-2L]), "term.labels")) != 1L)) stop("'formula' missing or incorrect") m = match.call(expand.dots = FALSE) if (is.matrix(eval(m$data, parent.frame()))) m$data = as.data.frame(data) m[[1L]] = quote(stats::model.frame) m$... = NULL mf = eval(m, parent.frame()) DNAME = paste(names(mf), collapse = " by ") names(mf) = NULL response = attr(attr(mf, "terms"), "response") g = factor(mf[[-response]]) if (nlevels(g) != 2L) stop("grouping factor must have exactly 2 levels") DATA = setNames(split(mf[[response]], g), c("x", "y")) y = do.call("bayes.t.test", c(DATA, list(...))) y$data.name = DNAME if (length(y$estimate) == 2L) names(y$estimate) = paste("mean in group", levels(g)) y }
/scratch/gouwar.j/cran-all/cranData/Bolstad/R/bayes.t.test.R
#' Binomial sampling with a beta prior #' #' Evaluates and plots the posterior density for \eqn{\pi}{pi}, the probability #' of a success in a Bernoulli trial, with binomial sampling and a continous #' \eqn{beta(a,b)} prior. #' #' #' @param x the number of observed successes in the binomial experiment. #' @param n the number of trials in the binomial experiment. #' @param a parameter for the beta prior - must be greater than zero #' @param b parameter for the beta prior - must be greater than zero #' @param pi A range of values for the prior to be calculated over. #' @param \dots additional arguments that are passed to \code{Bolstad.control} #' @return An object of class 'Bolstad' is returned. This is a list with the #' following components: \item{prior}{the prior density of \eqn{\pi}{pi}, i.e. #' the \eqn{beta(a,b)} density} \item{likelihood}{the likelihood of \eqn{x} #' given \eqn{\pi}{pi} and \eqn{n}, i.e. the #' \eqn{binomial(n,\pi)}{binomial(n,pi)} density} \item{posterior}{the #' posterior density of \eqn{\pi}{pi} given \eqn{x} and \eqn{n} - i.e. the #' \eqn{beta(a+x,b+n-x)} density} \item{pi}{the values of \eqn{\pi}{pi} for #' which the posterior density was evaluated} \item{mean}{the posterior mean} #' \item{var}{the posterior variance} \item{sd}{the posterior std. deviation} #' \item{quantiles}{a set of quantiles from the posterior} \item{cdf}{a #' cumulative distribution function for the posterior} \item{quantileFun}{a #' quantile function for the posterior} #' @seealso \code{\link{binodp}} \code{\link{binogcp}} #' @keywords misc #' @examples #' #' ## simplest call with 6 successes observed in 8 trials and a beta(1,1) uniform #' ## prior #' binobp(6,8) #' #' ## 6 successes observed in 8 trials and a non-uniform beta(0.5,6) prior #' binobp(6,8,0.5,6) #' #' ## 4 successes observed in 12 trials with a non uniform beta(3,3) prior #' ## plot the stored prior, likelihood and posterior #' results = binobp(4, 12, 3, 3) #' decomp(results) #' #' #' @export binobp binobp = function(x, n, a = 1, b = 1, pi = seq(0, 1, by = 0.001), ...){ ## n - the number of trials in the binomial ## x - the number of observed successes ## a,b - the parameters of the Beta prior density (must be > 0) ## the prior, likelihood, posterior, mean, variance and ## std. deviation are returned as a list if(x > n) stop("The number of observed successes (x) must be smaller than the number of trials (n)") if(a <= 0 || b <= 0) stop("The parameters of the prior must be greater than zero") prior = dbeta(pi, a, b) likelihood = dbinom(x, n, prob = pi) posterior = dbeta(pi, a + x, b + n - x) if(Bolstad.control(...)$plot){ finite = is.finite(posterior) ymax = 1.1 * max(posterior[is.finite(posterior)], prior[is.finite(prior)]) plot(posterior[finite] ~ pi[finite], ylim = c(0, ymax), type="l", lty = 1, xlab = expression(pi), ylab = "Density", col = "blue") finite = is.finite(prior) lines(prior[finite] ~ pi[finite], lty = 2, col = "red") ## left = min(pi) + diff(range(pi)) * 0.05 legend("topleft", bty = "n", lty = 1:2, legend = c("Posterior","Prior"), col = c("blue","red"), cex = 0.7) } m1 = (a + x) / (a + b + n) v1 = m1 * (1 - m1) / (a + b + n + 1) s1 = sqrt(v1) quiet = Bolstad.control(...)$quiet if(!quiet){ cat(paste("Posterior Mean : ",round(m1,7),"\n")) cat(paste("Posterior Variance : ",round(v1,7),"\n")) cat(paste("Posterior Std. Deviation : ",round(s1,7),"\n")) } probs = c(0.005,0.01,0.025,0.05,0.5,0.95,0.975,0.99,0.995) qtls = qbeta(probs,a+x,b+n-x) names(qtls) = probs if(!quiet){ cat("\nProb.\tQuantile \n") cat("------\t---------\n") for(i in 1:length(probs)){ cat(sprintf("%5.3f\t%9.7f\n", round(probs[i],3),round(qtls[i],7))) } } results = list(name = 'pi', param.x = pi, prior = prior, likelihood = likelihood, posterior = posterior, pi = pi, # for backwards compat. only mean = m1, var = v1, sd = s1, quantiles = qtls, cdf = function(y,...){pbeta(y, shape1 = a + x, shape2 = b + n - x, ...)}, quantileFun = function(probs, ...){ qbeta(probs, shape1 = a + x, shape2 = b + n - x, ...)}) class(results) = 'Bolstad' invisible(results) }
/scratch/gouwar.j/cran-all/cranData/Bolstad/R/binobp.r
#' Binomial sampling with a discrete prior #' #' Evaluates and plots the posterior density for \eqn{\pi}{pi}, the probability #' of a success in a Bernoulli trial, with binomial sampling and a discrete #' prior on \eqn{\pi}{pi} #' #' #' @param x the number of observed successes in the binomial experiment. #' @param n the number of trials in the binomial experiment. #' @param pi a vector of possibilities for the probability of success in a #' single trial. if \code{pi} is \code{NULL} then a discrete uniform prior for #' \eqn{\pi}{pi} will be used. #' @param pi.prior the associated prior probability mass. #' @param n.pi the number of possible \eqn{\pi}{pi} values in the prior #' @param \dots additional arguments that are passed to \code{Bolstad.control} #' @return A list will be returned with the following components: \item{pi}{the #' vector of possible \eqn{\pi}{pi} values used in the prior} #' \item{pi.prior}{the associated probability mass for the values in #' \eqn{\pi}{pi}} \item{likelihood}{the scaled likelihood function for #' \eqn{\pi}{pi} given \eqn{x} and \eqn{n}} \item{posterior}{the posterior #' probability of \eqn{\pi}{pi} given \eqn{x} and \eqn{n}} \item{f.cond}{the #' conditional distribution of \eqn{x} given \eqn{\pi}{pi} and \eqn{n}} #' \item{f.joint}{the joint distribution of \eqn{x} and \eqn{\pi}{pi} given #' \eqn{n}} \item{f.marg}{the marginal distribution of \eqn{x}} #' @seealso \code{\link{binobp}} \code{\link{binogcp}} #' @keywords misc #' @examples #' #' ## simplest call with 6 successes observed in 8 trials and a uniform prior #' binodp(6,8) #' #' ## same as previous example but with more possibilities for pi #' binodp(6, 8, n.pi = 100) #' #' ## 6 successes, 8 trials and a non-uniform discrete prior #' pi = seq(0, 1, by = 0.01) #' pi.prior = runif(101) #' pi.prior = sort(pi.prior / sum(pi.prior)) #' binodp(6, 8, pi, pi.prior) #' #' ## 5 successes, 6 trials, non-uniform prior #' pi = c(0.3, 0.4, 0.5) #' pi.prior = c(0.2, 0.3, 0.5) #' results = binodp(5, 6, pi, pi.prior) #' #' ## plot the results from the previous example using a side-by-side barplot #' results.matrix = rbind(results$pi.prior,results$posterior) #' colnames(results.matrix) = pi #' barplot(results.matrix, col = c("red", "blue"), beside = TRUE, #' xlab = expression(pi), ylab=expression(Probability(pi))) #' box() #' legend("topleft", bty = "n", cex = 0.7, #' legend = c("Prior", "Posterior"), fill = c("red", "blue")) #' #' @export binodp binodp = function(x, n, pi = NULL, pi.prior = NULL, n.pi = 10, ...){ ## n - the number of trials in the binomial ## x - the number of observed successes ## pi - the probability of success ## pi.prior - the associated prior probability mass ## ret - if true then the likelihood and posterior are returned as a ## list if(x > n) stop("The number of observed successes (x) must be smaller than the number of trials (n)") if(n.pi < 3) stop("Number of prior values of pi must be greater than 2") if(is.null(pi) | is.null(pi.prior)){ pi = seq(0,1, length = n.pi) pi.prior = rep(1 / n.pi, n.pi) } if(any(pi < 0) | any(pi > 1)) ## check that probabilities lie on [0,1] stop("Values of pi must be between 0 and 1 inclusive") if(any(pi.prior < 0) | any(pi.prior > 1)) stop("Prior probabilities must be between 0 and 1 inclusive") if(round(sum(pi.prior), 7) != 1){ warning("The prior probabilities did not sum to 1, therefore the prior has been normalized") pi.prior = pi.prior / sum(pi.prior) } ## make sure possible values are in ascending order o = order(pi) pi = pi[o] pi.prior = pi.prior[o] n.pi = length(pi) likelihood = dbinom(x,n,pi) lp = likelihood * pi.prior posterior = lp / sum(lp) if(Bolstad.control(...)$plot) { plot( pi, posterior, ylim = c(0, 1.1 * max(posterior, pi.prior)), pch = 20 , col = "blue", xlab = expression(pi), ylab = expression(Probabilty(pi)) ) points(pi, pi.prior, pch = 20, col = "red") legend( "topleft", bty = "n", fill = c("blue", "red"), legend = c("Posterior", "Prior"), cex = 0.7 ) } ## calculate the Conditional distribution f.cond = matrix(0,nrow=n.pi,ncol=n+1) rownames(f.cond) = as.character(round(pi,3)) colnames(f.cond) = as.character(0:n) for(i in 1:n.pi) f.cond[i,] = dbinom(0:n,n,pi[i]) ## caculate the joint distribution of pi and x given n f.joint = diag(pi.prior)%*%f.cond ## calculate the marginal distribtion f.marg = matrix(1,nrow=1,ncol=n.pi)%*%f.joint quiet = Bolstad.control(...)$quiet if(!quiet){ cat("Conditional distribution of x given pi and n:\n\n") print(round(f.cond,4)) cat("\nJoint distribution:\n\n") print(round(f.joint,4)) cat("\nMarginal distribution of x:\n\n") print(round(f.marg,4)) cat("\n\n") ## finally display the prior, likelihood, and posterior results = cbind(pi.prior,likelihood,posterior) rownames(results) = as.character(round(pi,3)) colnames(results) = c("Prior","Likelihood","Posterior") print(results) } mx = sum(pi * posterior) vx = sum((pi - mx)^2 * posterior) results = list(name = 'pi', param.x = pi, prior = pi.prior, likelihood = likelihood, posterior = posterior, mean = mx, var = vx, cdf = function(X, ...){cumDistFun(X, pi, posterior)}, quantileFun = function(probs, ...){qFun(probs, pi, posterior)}, pi = pi, pi.prior = pi.prior, ## this duplication is for backward compatibility f.cond = f.cond, f.joint = f.joint, f.marg = f.marg) class(results) = 'Bolstad' invisible(results) }
/scratch/gouwar.j/cran-all/cranData/Bolstad/R/binodp.r
#' Binomial sampling with a general continuous prior #' #' Evaluates and plots the posterior density for \eqn{\pi}{pi}, the probability #' of a success in a Bernoulli trial, with binomial sampling and a general #' continuous prior on \eqn{\pi}{pi} #' #' #' @param x the number of observed successes in the binomial experiment. #' @param n the number of trials in the binomial experiment. #' @param density may be one of "beta", "exp", "normal", "student", "uniform" #' or "user" #' @param params if density is one of the parameteric forms then then a vector #' of parameters must be supplied. beta: a, b exp: rate normal: mean, sd #' uniform: min, max #' @param n.pi the number of possible \eqn{\pi}{pi} values in the prior #' @param pi a vector of possibilities for the probability of success in a #' single trial. This must be set if density = "user". #' @param pi.prior the associated prior probability mass. This must be set if #' density = "user". #' @param \dots additional arguments that are passed to \code{Bolstad.control} #' @return A list will be returned with the following components: #' \item{likelihood}{the scaled likelihood function for \eqn{\pi}{pi} given #' \eqn{x} and \eqn{n}} \item{posterior}{the posterior probability of #' \eqn{\pi}{pi} given \eqn{x} and \eqn{n}} \item{pi}{the vector of possible #' \eqn{\pi}{pi} values used in the prior} \item{pi.prior}{the associated #' probability mass for the values in \eqn{\pi}{pi}} #' @seealso \code{\link{binobp}} \code{\link{binodp}} #' @keywords misc #' @examples #' #' ## simplest call with 6 successes observed in 8 trials and a continuous #' ## uniform prior #' binogcp(6, 8) #' #' ## 6 successes, 8 trials and a Beta(2, 2) prior #' binogcp(6, 8,density = "beta", params = c(2, 2)) #' #' ## 5 successes, 10 trials and a N(0.5, 0.25) prior #' binogcp(5, 10, density = "normal", params = c(0.5, 0.25)) #' #' ## 4 successes, 12 trials with a user specified triangular continuous prior #' pi = seq(0, 1,by = 0.001) #' pi.prior = rep(0, length(pi)) #' priorFun = createPrior(x = c(0, 0.5, 1), wt = c(0, 2, 0)) #' pi.prior = priorFun(pi) #' results = binogcp(4, 12, "user", pi = pi, pi.prior = pi.prior) #' #' ## find the posterior CDF using the previous example and Simpson's rule #' myCdf = cdf(results) #' plot(myCdf, type = "l", xlab = expression(pi[0]), #' ylab = expression(Pr(pi <= pi[0]))) #' #' ## use the quantile function to find the 95% credible region. #' qtls = quantile(results, probs = c(0.025, 0.975)) #' cat(paste("Approximate 95% credible interval : [" #' , round(qtls[1], 4), " ", round(qtls, 4), "]\n", sep = "")) #' #' ## find the posterior mean, variance and std. deviation #' ## using the output from the previous example #' post.mean = mean(results) #' post.var = var(results) #' post.sd = sd(results) #' #' # calculate an approximate 95% credible region using the posterior mean and #' # std. deviation #' lb = post.mean - qnorm(0.975) * post.sd #' ub = post.mean + qnorm(0.975) * post.sd #' #' cat(paste("Approximate 95% credible interval : [" #' , round(lb, 4), " ", round(ub, 4), "]\n", sep = "")) #' #' @export binogcp binogcp = function(x, n, density = c("uniform", "beta", "exp", "normal", "user"), params = c(0, 1), n.pi = 1000, pi = NULL, pi.prior = NULL, ...){ ## n - the number of trials in the binomial ## x - the number of observed successes ## density - may be one of "exp", "normal", "uniform" or "user" ## params - if the density is not "user" then a vector of parameters ## must be supplied. ## exp: rate ## normal: mean, sd ## uniform: min, max ## n.pi - the number of points to divide the [0, 1] interval into ## pi and pi.prior are only specified if density == "user" ## pi - the probability of success ## pi.prior - the associated prior probability mass ## plot - if true then the likelihood and posterior are returned as a ## list if(x > n) stop("The number of observed successes (x) must be smaller than the number of trials") if(n.pi < 100) stop("Number of prior values of pi must be greater than 100") if(is.null(pi) || is.null(pi.prior)) pi = ppoints(n.pi) else{ if(length(pi) != length(pi.prior)) stop("pi and pi.prior must have same length") if(any(pi < 0)) ## check that the density values are greater than 0 stop("Values of pi must be >= 0") } density = match.arg(density) if(density == "user" || !is.null(pi.prior)){ if(density != "user"){ warning("The density is 'user' because you have specified values or a function for pi.prior") } if(is.function(pi.prior)){ auc = integrate(pi.prior, 0, 1) normConst = if(abs(auc$value - 1) > 3 * auc$abs.error){ auc$value }else{ 1 } pi.prior = pi.prior(pi) / normConst if(normConst != 1){ warning("Prior didn't integrate to one so it has been normalised.") } } }else if(density == "beta"){ if(length(params) < 2){ warning("Beta prior requires two shape parameters. Default value Beta(1, 1) = Uniform is being used") a = 1 b = 1 }else{ if(params[1] <= 0 | params[2] <= 0) stop("Beta prior shape parameters must be greater than zero") a = params[1] b = params[2] } pi.prior = dbeta(pi, a,b) }else if(density == "exp"){ if(params[1] <= 0){ stop("Parameter for exponential density must be greater than zero") }else{ rate = params[1] pi.prior = dexp(pi, rate) } }else if(density == "normal"){ if(length(params) < 2) stop("Normal prior requires a mean and std. deviation") else{ mx = params[1] sx = params[2] if(sx <= 0) stop("Std. deviation for normal prior must be greater than zero") pi.prior = dnorm(pi, mx, sx) } }else if(density == "uniform"){ if(length(params) < 2) stop("Uniform prior requires a minimum and a maximum") else{ minx = params[1] maxx = params[2] if(maxx <= minx) stop("Maximum must be greater than minimum for a uniform prior") pi.prior = dunif(pi, minx, maxx) } }else{ stop(paste("Unrecognized density :", density)) } likelihood = (pi^x) * ((1 - pi)^(n - x)) ## Numerically integrate the denominator ## First calculate the height of the function to be integrated f.x.pi = likelihood * pi.prior ## Now get a linear approximation so that we don't have to worry about ## the number of points specified by the user ap = approx(pi, f.x.pi, n = 513) integral = sum(ap$y[2 * (1:256) - 1] + 4 * ap$y[2 * (1:256)] + ap$y[2 * (1:256) + 1]) integral = (ap$x[2] - ap$x[1]) * integral / 3 posterior = likelihood * pi.prior / integral if(Bolstad.control(...)$plot){ plot(pi, posterior, ylim = c(0, 1.1 * max(posterior, pi.prior)), lty = 1, type = "l", col = "blue", xlab = expression(pi), ylab = "Density") lines(pi, pi.prior, lty = 2, col = "red") left = min(pi) + diff(range(pi)) * 0.05 legend("topleft", bty = "n", lty = 1:2, col = c("blue", "red"), legend = c("Posterior", "Prior"), cex = 0.7) } results = list(name = 'pi', param.x = pi, prior = pi.prior, likelihood = likelihood, posterior = posterior, pi = pi, pi.prior = pi.prior) class(results) = 'Bolstad' invisible(results) }
/scratch/gouwar.j/cran-all/cranData/Bolstad/R/binogcp.r
#' Binomial sampling with a beta mixture prior #' #' Evaluates and plots the posterior density for \eqn{\pi}{pi}, the probability #' of a success in a Bernoulli trial, with binomial sampling when the prior #' density for \eqn{\pi}{pi} is a mixture of two beta distributions, #' \eqn{beta(a_0,b_0)} and \eqn{beta(a_1,b_1)}. #' #' #' @param x the number of observed successes in the binomial experiment. #' @param n the number of trials in the binomial experiment. #' @param alpha0 a vector of length two containing the parameters, #' \eqn{a_0}{a0} and \eqn{b_0}{b0}, for the first component beta prior - must #' be greater than zero. By default the elements of alpha0 are set to 1. #' @param alpha1 a vector of length two containing the parameters, #' \eqn{a_1}{a1} and \eqn{b_1}{b1}, for the second component beta prior - must #' be greater than zero. By default the elements of alpha1 are set to 1. #' @param p The prior mixing proportion for the two component beta priors. That #' is the prior is \eqn{pimes beta(a_0,b_0)+(1-p)imes #' beta(a_1,b_1)}{p*beta(a0,b0)+(1-p)*beta(a1,b1)}. \eqn{p} is set to 0.5 by #' default #' @param \dots additional arguments that are passed to \code{Bolstad.control} #' @return A list will be returned with the following components: \item{pi}{the #' values of \eqn{\pi}{pi} for which the posterior density was evaluated} #' \item{posterior}{the posterior density of \eqn{\pi}{pi} given \eqn{n} and #' \eqn{x}} \item{likelihood}{the likelihood function for \eqn{\pi}{pi} given #' \eqn{x} and \eqn{n}, i.e. the \eqn{binomial(n,\pi)}{binomial(n,pi)} density} #' \item{prior}{the prior density of \eqn{\pi}{pi} density} #' @seealso \code{\link{binodp}} \code{\link{binogcp}} \code{\link{normmixp}} #' @keywords misc #' @examples #' #' ## simplest call with 6 successes observed in 8 trials and a 50:50 mix #' ## of two beta(1,1) uniform priors #' binomixp(6,8) #' #' ## 6 successes observed in 8 trials and a 20:80 mix of a non-uniform #' ## beta(0.5,6) prior and a uniform beta(1,1) prior #' binomixp(6,8,alpha0=c(0.5,6),alpha1=c(1,1),p=0.2) #' #' ## 4 successes observed in 12 trials with a 90:10 non uniform beta(3,3) prior #' ## and a non uniform beta(4,12). #' ## Plot the stored prior, likelihood and posterior #' results = binomixp(4, 12, c(3, 3), c(4, 12), 0.9)$mix #' #' par(mfrow = c(3,1)) #' y.lims = c(0, 1.1 * max(results$posterior, results$prior)) #' #' plot(results$pi,results$prior,ylim=y.lims,type='l' #' ,xlab=expression(pi),ylab='Density',main='Prior') #' polygon(results$pi,results$prior,col='red') #' #' plot(results$pi,results$likelihood,type='l', #' xlab = expression(pi), ylab = 'Density', main = 'Likelihood') #' polygon(results$pi,results$likelihood,col='green') #' #' plot(results$pi,results$posterior,ylim=y.lims,type='l' #' ,xlab=expression(pi),ylab='Density',main='Posterior') #' polygon(results$pi,results$posterior,col='blue') #' #' #' #' #' @export binomixp binomixp = function(x, n, alpha0 = c(1, 1), alpha1 = c(1, 1), p = 0.5, ...) { if (n < x) stop("Error: n must be greater than or equal to x") if (length(alpha0) != 2 || length(alpha1) != 2) stop("Error: the parameters for the beta priors, alpha0 and alpha1, must have two elements each") if (any(alpha0 < 0) || any(alpha1 < 0)) stop("Error: the parameters for the beta priors, alpha0 and alpha1, must be greater than zero") if (p <= 0 || p >= 1) stop("Error: the mixing proportion p must be in the interval (0,1) exclusive") i = 1:x log0 = sum(log(alpha0[1] + i - 1) - log(alpha0[1] + alpha0[2] + i - 1) + log(n - i + 1) - log(i)) log1 = sum(log(alpha1[1] + i - 1) - log(alpha1[1] + alpha1[2] + i - 1) + log(n - i + 1) - log(i)) i = (x + 1):n log0 = log0 + sum(log(alpha0[2] + i - x - 1) - log(alpha0[1] + alpha0[2] + i - 1)) log1 = log1 + sum(log(alpha1[2] + i - x - 1) - log(alpha1[1] + alpha1[2] + i - 1)) f0 = exp(log0) f1 = exp(log1) quiet = Bolstad.control(...)$quiet if(!quiet){ cat("Prior probability of the data under component 0\n") cat("----------------------------\n") cat(paste("Log prob.:", signif(log0, 3), "\nProbability: ", signif(f0, 5), "\n\n")) cat("Prior probability of the data under component 1\n") cat("----------------------------\n") cat(paste("Log prob.:", signif(log1, 3), "\nProbability: ", signif(f1, 5), "\n\n")) } q0 = p q1 = 1 - q0 qp0 = q0 * f0/(q0 * f0 + q1 * f1) qp1 = 1 - qp0 if(!quiet){ cat(paste("Post. mixing proportion for component 0:", signif(qp0, 3), "\n")) cat(paste("Post. mixing proportion for component 1:", signif(qp1, 3), "\n")) } pi = seq(0, 1, by = 0.001) prior.0 = dbeta(pi, alpha0[1], alpha0[2]) prior.1 = dbeta(pi, alpha1[1], alpha1[2]) prior = q0 * prior.0 + q1 * prior.1 alpha0.post = alpha0 + c(x, n - x) alpha1.post = alpha1 + c(x, n - x) posterior.0 = dbeta(pi, alpha0.post[1], alpha0.post[2]) posterior.1 = dbeta(pi, alpha1.post[1], alpha1.post[2]) posterior = qp0 * posterior.0 + qp1 * posterior.1 loglik = x * log(pi) + (n - x) * log(1 - pi) loglik = loglik - max(loglik) likelihood = exp(loglik) normalizing.factor = sum(likelihood)/length(likelihood) likelihood = likelihood/normalizing.factor if (Bolstad.control(...)$plot) { o.par = par(mfrow = c(2, 2)) ## plot the priors and the mixture prior y.max = max(prior.0[is.finite(prior.0)], prior.1[is.finite(prior.1)], prior[is.finite(prior)]) plot(pi[is.finite(prior.0)], prior.0[is.finite(prior.0)], ylim = c(0, y.max * 1.1), xlab = expression(pi), ylab = "Density", main = "Mixture prior and its components", type = "l", lty = 2, col = "black") lines(pi[is.finite(prior.1)], prior.1[is.finite(prior.1)], lty = 3, col = "red") lines(pi[is.finite(prior)], prior[is.finite(prior)], lty = 1, col = "green") legend("topleft", cex = 0.7, bty = "n", legend = c(expression(prior[0]), expression(prior[1]), expression(prior[mix])), lty = c(2, 3, 1), col = c("black", "red", "green")) ## plot the posteriors and the mixture posterior y.max = max(posterior.0, posterior.1, posterior) plot(pi, posterior.0, ylim = c(0, y.max * 1.1), xlab = expression(pi), ylab = "Density", main = "Mixture posterior and its components", type = "l", lty = 2, col = "black") lines(pi, posterior.1, lty = 3, col = "red") lines(pi, posterior, lty = 1, col = "green") legend("topleft", bty = "n", legend = c(expression(posterior[0]), expression(posterior[1]), expression(posterior[mix])), lty = c(2, 3, 1), col = c("black", "red", "green")) ## plot the mixture posterior likelihood and mixture posterior y.max = max(prior[is.finite(prior)], posterior, likelihood) plot(pi, prior, ylim = c(0, y.max * 1.1), xlab = expression(pi), ylab = "Density", main = "Mixture prior, likelihood and mixture posterior", type = "l", lty = 2, col = "black") lines(pi, likelihood, lty = 3, col = "red") lines(pi, posterior, lty = 1, col = "green") legend("topleft", bty = "n", legend = c(expression(prior[mix]), expression(likelihood), expression(posterior[mix])), lty = c(2, 3, 1), col = c("black", "red", "green")) par(o.par) } results.comp1 = list(name = "pi", param.x = pi, prior = prior.0, likelihood = likelihood, posterior = posterior.0) class(results.comp1) = "Bolstad" results.comp2 = list(name = "pi", param.x = pi, prior = prior.1, likelihood = likelihood, posterior = posterior.1) class(results.comp2) = "Bolstad" results.mix = list(name = "pi", param.x = pi, prior = prior, likelihood = likelihood, posterior = posterior, pi = pi #for backwards compat. only ) class(results.mix) = "Bolstad" invisible(list(comp1 = results.comp1, comp2 = results.comp2, mix = results.mix)) }
/scratch/gouwar.j/cran-all/cranData/Bolstad/R/binomixp.r
#' Cumulative distribution function generic #' #' This function returns the cumulative distribution function (cdf) of the #' posterior distribution of the parameter interest over the range of values for #' which the posterior is specified. #' #' @param x An object for which we want to compute the cdf #' @param \dots Any other parameters. Not currently used. #' @return either the exact cdf of the posterior if a conjugate prior has been #' used, or a a \code{stats::splinefun} which will compute the lower #' tail probability of the parameter for any valid input. #' @author James Curran #' @export cdf = function(x, ...){ UseMethod("cdf") } #' @describeIn cdf Cumulative distribution function for posterior density #' @export #' cdf.Bolstad = function(x, ...){ if(class(x) != "Bolstad") stop("x must be an object of class Bolstad") if(any(grepl("cdf", names(x)))) return(x$cdf) res = sintegral(x$param.x, x$posterior)$cdf return(splinefun(res$x, res$y)) }
/scratch/gouwar.j/cran-all/cranData/Bolstad/R/cdf.R
#' Create prior generic #' #' @param x a vector of x values at which the prior is to be specified (the support of the prior). #' @param \dots optional exta arguments. Not currently used. #' @return a linear interpolation function where the weights have been scaled so #' the function (numerically) integrates to 1. #' @export createPrior = function(x, ...){ UseMethod("createPrior") }
/scratch/gouwar.j/cran-all/cranData/Bolstad/R/createPrior.R