content
stringlengths
0
14.9M
filename
stringlengths
44
136
## ## wdnet: Weighted directed network ## Copyright (C) 2024 Yelie Yuan, Tiandong Wang, Jun Yan and Panpan Zhang ## Yelie Yuan <[email protected]> ## ## This file is part of the R package wdnet. ## ## The R package wdnet is free software: You can redistribute it and/or ## modify it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 3 of the License, or any later ## version (at your option). See the GNU General Public License at ## <https://www.gnu.org/licenses/> for details. ## ## The R package wdnet is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. ## # Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 #' Degree preserving rewiring process for directed networks. #' #' @param iteration Integer, number of iterations for rewiring attempts. #' @param nattempts Integer, number of rewiring attempts per iteration. #' @param tnode Vector, target node sequence - 1. #' @param sout Vector, source nodes' out-degree. #' @param sin Vector, source nodes' in-degree. #' @param tout Vector, target nodes' out-degree. #' @param tin Vector, target nodes' in-degree. #' @param index_s Index of source nodes' out- and in-degree. #' \code{index_s}/\code{index_t} bridges the indices of source/target nodes and the #' target structure eta. #' @param index_t Index of target nodes' out- and in-degree. #' @param eta Matrix, target structure eta generated by #' \code{wdnet::get_eta_directed()}. #' @param rewire_history Logical, whether the rewiring history should be returned. #' @return Returns target node sequence, four directed assortativity coefficients after each iteration, and rewire history. #' #' @keywords internal #' dprewire_directed_cpp <- function(iteration, nattempts, tnode, sout, sin, tout, tin, index_s, index_t, eta, rewire_history) { .Call('_wdnet_dprewire_directed_cpp', PACKAGE = 'wdnet', iteration, nattempts, tnode, sout, sin, tout, tin, index_s, index_t, eta, rewire_history) } #' Degree preserving rewiring process for undirected networks. #' #' @param iteration Integer, number of iterations for rewiring attempts. #' @param nattempts Integer, number of rewiring attempts per iteration. #' @param node1 Vector, first column of edgelist. #' @param node2 Vector, second column of edgelist. #' @param degree1 Vector, degree of node1 and node2. #' @param degree2 Vector, degree of node2 and node1. degree1 #' and degree2 are used to calculate assortativity coefficient, #' i.e., degree correlation. #' @param index1 Index of the first column of edgelist. #' \code{index1} and \code{index2} bridge the nodes' degree and the #' structure \code{e}. #' @param index2 Index of the second column of edgelist. #' @param e Matrix, target structure (eta) generated by #' \code{wdnet::get_eta_undirected()}. #' @param rewire_history Logical, whether the rewiring history should be returned. #' @return Returns node sequences, assortativity coefficient after each iteration, and rewiring history. #' #' @keywords internal #' dprewire_undirected_cpp <- function(iteration, nattempts, node1, node2, degree1, degree2, index1, index2, e, rewire_history) { .Call('_wdnet_dprewire_undirected_cpp', PACKAGE = 'wdnet', iteration, nattempts, node1, node2, degree1, degree2, index1, index2, e, rewire_history) } #' Preferential attachment algorithm for simple situations, #' i.e., edge weight equals 1, each step adds one new edge. #' #' @param snode Source nodes. #' @param tnode Target nodes. #' @param scenario Sequence of alpha, beta, gamma, xi, rho scenarios. #' @param nnode Number of nodes in seed network. #' @param nedge Number of edges in seed network. #' @param delta_out Tuning parameter. #' @param delta_in Tuning parameter. #' @param directed Whether the network is directed. #' @return Returns a list that includes the total number of nodes, sequences of source and target nodes. #' #' @keywords internal #' rpanet_bag_cpp <- function(snode, tnode, scenario, nnode, nedge, delta_out, delta_in, directed) { .Call('_wdnet_rpanet_bag_cpp', PACKAGE = 'wdnet', snode, tnode, scenario, nnode, nedge, delta_out, delta_in, directed) } #' Preferential attachment network generation. #' #' @param nstep Number of steps. #' @param m Number of new edges in each step. #' @param new_node_id New node ID. #' @param new_edge_id New edge ID. #' @param source_node Sequence of source nodes. #' @param target_node Sequence of target nodes. #' @param outs Sequence of out-strength. #' @param ins Sequence of in-strength. #' @param edgeweight Weight of existing and new edges. #' @param scenario Scenario of existing and new edges. #' @param sample_recip Logical, whether reciprocal edges will be added. #' @param node_group Sequence of node group. #' @param spref_vec Sequence of node source preference. #' @param tpref_vec Sequence of node target preference. #' @param control List of controlling arguments. #' @return Sampled network. #' #' @keywords internal #' rpanet_binary_directed <- function(nstep, m, new_node_id, new_edge_id, source_node, target_node, outs, ins, edgeweight, scenario, sample_recip, node_group, spref_vec, tpref_vec, control) { .Call('_wdnet_rpanet_binary_directed', PACKAGE = 'wdnet', nstep, m, new_node_id, new_edge_id, source_node, target_node, outs, ins, edgeweight, scenario, sample_recip, node_group, spref_vec, tpref_vec, control) } #' Preferential attachment network generation. #' #' @param nstep Number of steps. #' @param m Number of new edges in each step. #' @param new_node_id New node ID. #' @param new_edge_id New edge ID. #' @param node_vec1 Sequence of nodes in the first column of edgelist. #' @param node_vec2 Sequence of nodes in the second column of edgelist. #' @param s Sequence of node strength. #' @param edgeweight Weight of existing and new edges. #' @param scenario Scenario of existing and new edges. #' @param pref_vec Sequence of node preference. #' @param control List of controlling arguments. #' @return Sampled network. #' #' @keywords internal #' rpanet_binary_undirected_cpp <- function(nstep, m, new_node_id, new_edge_id, node_vec1, node_vec2, s, edgeweight, scenario, pref_vec, control) { .Call('_wdnet_rpanet_binary_undirected_cpp', PACKAGE = 'wdnet', nstep, m, new_node_id, new_edge_id, node_vec1, node_vec2, s, edgeweight, scenario, pref_vec, control) } #' Preferential attachment network generation. #' #' @param nstep Number of steps. #' @param m Number of new edges in each step. #' @param new_node_id New node ID. #' @param new_edge_id New edge ID. #' @param source_node Sequence of source nodes. #' @param target_node Sequence of target nodes. #' @param outs Sequence of out-strength. #' @param ins Sequence of in-strength. #' @param edgeweight Weight of existing and new edges. #' @param scenario Scenario of existing and new edges. #' @param sample_recip Logical, whether reciprocal edges will be added. #' @param node_group Sequence of node group. #' @param spref_vec Sequence of node source preference. #' @param tpref_vec Sequence of node target preference. #' @param control List of controlling arguments. #' @return Sampled network. #' #' @keywords internal #' rpanet_linear_directed_cpp <- function(nstep, m, new_node_id, new_edge_id, source_node, target_node, outs, ins, edgeweight, scenario, sample_recip, node_group, spref_vec, tpref_vec, control) { .Call('_wdnet_rpanet_linear_directed_cpp', PACKAGE = 'wdnet', nstep, m, new_node_id, new_edge_id, source_node, target_node, outs, ins, edgeweight, scenario, sample_recip, node_group, spref_vec, tpref_vec, control) } #' Preferential attachment network generation. #' #' @param nstep Number of steps. #' @param m Number of new edges in each step. #' @param new_node_id New node ID. #' @param new_edge_id New edge ID. #' @param node_vec1 Sequence of nodes in the first column of edgelist. #' @param node_vec2 Sequence of nodes in the second column of edgelist. #' @param s Sequence of node strength. #' @param edgeweight Weight of existing and new edges. #' @param scenario Scenario of existing and new edges. #' @param pref_vec Sequence of node preference. #' @param control List of controlling arguments. #' @return Sampled network. #' #' @keywords internal #' rpanet_linear_undirected_cpp <- function(nstep, m, new_node_id, new_edge_id, node_vec1, node_vec2, s, edgeweight, scenario, pref_vec, control) { .Call('_wdnet_rpanet_linear_undirected_cpp', PACKAGE = 'wdnet', nstep, m, new_node_id, new_edge_id, node_vec1, node_vec2, s, edgeweight, scenario, pref_vec, control) } #' Fill missing nodes in the node sequence. Defined for \code{wdnet::rpanet}. #' #' @param nodes Source/target nodes, missing nodes are denoted as 0. #' @param edges Sampled edges according to preferential attachment. #' @return Source/target nodes. #' #' @keywords internal #' find_node_cpp <- function(nodes, edges) { .Call('_wdnet_find_node_cpp', PACKAGE = 'wdnet', nodes, edges) } #' Fill missing values in node sequence. Defined for \code{wdnet::rpanet}. #' #' @param node1 Nodes in the first column of edgelist, i.e., \code{edgelist[, 1]}. #' @param node2 Nodes in the second column of edgelist, i.e., \code{edgelist[, 2]}. #' @param start_edge Index of sampled edges, corresponds to the missing nodes in node1 and node2. #' @param end_edge Index of sampled edges, corresponds to the missing nodes in node1 and node2. #' @return Node sequence. #' #' @keywords internal #' find_node_undirected_cpp <- function(node1, node2, start_edge, end_edge) { .Call('_wdnet_find_node_undirected_cpp', PACKAGE = 'wdnet', node1, node2, start_edge, end_edge) } #' Aggregate edgeweight into nodes' strength. #' #' @param snode Source nodes. #' @param tnode Target nodes. #' @param weight Edgeweight. #' @param nnode Number of nodes. #' @param weighted Logical, true if the edges are weighted, #' false if not. #' @return Out-strength and in-strength. #' #' @keywords internal #' node_strength_cpp <- function(snode, tnode, weight, nnode, weighted = TRUE) { .Call('_wdnet_node_strength_cpp', PACKAGE = 'wdnet', snode, tnode, weight, nnode, weighted) } #' Uniformly draw a node from existing nodes for each time step. #' Defined for \code{wdnet::rpanet()}. #' #' @param total_node Number of existing nodes at each time step. #' @return Sampled nodes. #' #' @keywords internal #' sample_node_cpp <- function(total_node) { .Call('_wdnet_sample_node_cpp', PACKAGE = 'wdnet', total_node) } #' Fill edgeweight into the adjacency matrix. #' Defined for function \code{edgelist_to_adj}. #' #' @param adj An adjacency matrix. #' @param edgelist A two column matrix represents the edgelist. #' @param edgeweight A vector represents the weight of edges. #' @return Adjacency matrix with edge weight. #' #' @keywords internal #' fill_weight_cpp <- function(adj, edgelist, edgeweight) { .Call('_wdnet_fill_weight_cpp', PACKAGE = 'wdnet', adj, edgelist, edgeweight) }
/scratch/gouwar.j/cran-all/cranData/wdnet/R/RcppExports.R
## ## wdnet: Weighted directed network ## Copyright (C) 2024 Yelie Yuan, Tiandong Wang, Jun Yan and Panpan Zhang ## Jun Yan <[email protected]> ## ## This file is part of the R package wdnet. ## ## The R package wdnet is free software: You can redistribute it and/or ## modify it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 3 of the License, or any later ## version (at your option). See the GNU General Public License at ## <https://www.gnu.org/licenses/> for details. ## ## The R package wdnet is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. ## #' @importFrom stats weighted.mean #' @importFrom wdm wdm NULL ## Directed assortativity coefficient #' Compute the assortativity coefficient of a weighted and directed network. #' #' @param adj is an adjacency matrix of a weighted and directed network. #' @param type which type of assortativity coefficient to compute: "outin" #' (default), "inin", "outout" or "inout"? #' #' @return a scalar of assortativity coefficient #' #' @references \itemize{ \item Foster, J.G., Foster, D.V., Grassberger, P. and #' Paczuski, M. (2010). Edge direction and the structure of networks. #' \emph{Proceedings of the National Academy of Sciences of the United #' States}, 107(24), 10815--10820. \item Yuan, Y. Zhang, P. and Yan, J. #' (2021). #' Assortativity coefficients for weighted and directed networks. \emph{Journal #' of Complex Networks}, 9(2), cnab017. } #' #' @note When the adjacency matrix is binary (i.e., directed but unweighted #' networks), \code{dw_assort} returns the assortativity coefficient proposed #' in Foster et al. (2010). #' #' @keywords internal #' dw_assort <- function(adj, type = c("outin", "inin", "outout", "inout")) { stopifnot(dim(adj)[1] == dim(adj)[2]) ## determine the location of edges in the network in_str <- colSums(adj) out_str <- rowSums(adj) vert_from <- unlist(apply(adj, 2, function(x) { which(x > 0) })) number_to <- apply(adj, 2, function(x) { length(which(x > 0) == TRUE) }) temp_to <- cbind(seq(1:dim(adj)[1]), number_to) vert_to <- rep(temp_to[, 1], temp_to[, 2]) weight <- adj[which(adj > 0)] type <- match.arg(type) .type <- unlist(strsplit(type, "-")) x <- switch(.type[1], "out" = out_str, "in" = in_str )[vert_from] y <- switch(.type[2], "out" = out_str, "in" = in_str )[vert_to] weighted.cor <- function(x, y, w) { mean_x <- stats::weighted.mean(x, w) mean_y <- stats::weighted.mean(y, w) var_x <- sum((x - mean_x)^2 * w) var_y <- sum((y - mean_y)^2 * w) return(sum(w * (x - mean_x) * (y - mean_y)) / sqrt(var_x * var_y)) } return(weighted.cor(x, y, weight)) } #' Compute the assortativity coefficient(s) for a network. #' #' @param netwk A \code{wdnet} object that represents the network. If #' \code{NULL}, the function will compute the coefficient using either #' \code{edgelist} and \code{edgeweight}, or \code{adj}. #' @param edgelist A two-column matrix representing edges. #' @param edgeweight A numeric vector of edge weights with the same length as #' the number of rows in edgelist. If \code{NULL}, all edges will be assigned #' weight 1. #' @param adj The adjacency matrix of a network. #' @param directed Logical. Indicates whether the edges in \code{edgelist} or #' \code{adj} are directed. It will be omitted if \code{netwk} is provided. #' @param f1 A vector representing the first feature of existing nodes. The #' number of nodes should be equal to the length of both \code{f1} and #' \code{f2}. Defined for directed networks. If \code{NULL}, out-strength will #' be used. #' @param f2 A vector representing the second feature of existing nodes. Defined #' for directed networks. If \code{NULL}, in-strength will be used. #' #' @return Assortativity coefficient for undirected networks, or a list of four #' assortativity coefficients for directed networks. #' #' @references \itemize{ \item Foster, J.G., Foster, D.V., Grassberger, P. and #' Paczuski, M. (2010). Edge direction and the structure of networks. #' \emph{Proceedings of the National Academy of Sciences of the United #' States}, 107(24), 10815--10820. \item Yuan, Y. Zhang, P. and Yan, J. #' (2021). Assortativity coefficients for weighted and directed networks. #' \emph{Journal of Complex Networks}, 9(2), cnab017.} #' #' @note When the adjacency matrix is binary (i.e., directed but unweighted #' networks), \code{assortcoef} returns the assortativity coefficient proposed #' in Foster et al. (2010). #' #' @export #' #' @examples #' set.seed(123) #' control <- rpa_control_edgeweight( #' sampler = function(n) rgamma(n, shape = 5, scale = 0.2) #' ) #' netwk <- rpanet(nstep = 10^4, control = control) #' ret <- assortcoef(netwk) #' ret <- assortcoef( #' edgelist = netwk$edgelist, #' edgeweight = netwk$edge.attr$weight, #' directed = TRUE #' ) #' assortcoef <- function( netwk, edgelist, edgeweight, adj, directed, f1, f2) { netwk <- create_wdnet( netwk = netwk, edgelist = edgelist, edgeweight = edgeweight, directed = directed, adj = adj, weighted = TRUE ) edgelist <- netwk$edgelist edgeweight <- netwk$edge.attr$weight directed <- netwk$directed nnode <- max(edgelist) if ((!missing(f1)) || (!missing(f2))) { if (!directed) { stop("Node feature based assortativity coefficients are defined for directed networks.") } return(dw_feature_assort( netwk, f1 = f1, f2 = f2 )) } if (!directed) { edgelist <- rbind(edgelist, edgelist[, c(2, 1)]) edgeweight <- c(edgeweight, edgeweight) } snode <- edgelist[, 1] tnode <- edgelist[, 2] temp <- node_strength_cpp( snode = snode, tnode = tnode, nnode = nnode, weight = edgeweight, weighted = TRUE ) outs <- temp$outs ins <- temp$ins rm(temp) sout <- outs[snode] tin <- ins[tnode] if (!directed) { return(wdm::wdm( x = sout, y = tin, weights = edgeweight, method = "pearson" )) } sin <- ins[snode] tout <- outs[tnode] return(list( "outout" = wdm::wdm( x = sout, y = tout, weights = edgeweight, method = "pearson" ), "outin" = wdm::wdm( x = sout, y = tin, weights = edgeweight, method = "pearson" ), "inout" = wdm::wdm( x = sin, y = tout, weights = edgeweight, method = "pearson" ), "inin" = wdm::wdm( x = sin, y = tin, weights = edgeweight, method = "pearson" ) )) } #' Feature based assortativity coefficient #' #' Node feature based assortativity coefficients for weighted and directed #' networks. #' #' @param netwk A \code{wdnet} object that represents the network. #' @param f1 A vector, represents the first feature of existing nodes. Number of #' nodes \code{= length(f1) = length(f2)}. Defined for directed networks. If #' \code{NULL}, out-strength will be used. #' @param f2 A vector, represents the second feature of existing nodes. Defined #' for directed networks. If \code{NULL}, in-strength will be used. #' #' @return Directed weighted assortativity coefficients between source nodes' #' \code{f1} (or \code{f2}) and target nodes' \code{f2}(or \code{f1}). #' #' @examples #' set.seed(123) #' adj <- matrix(rbinom(400, 1, 0.2) * sample(1:3, 400, replace = TRUE), 20, 20) #' f1 <- runif(20) #' f2 <- abs(rnorm(20)) #' ret <- assortcoef(adj = adj, f1 = f1, f2 = f2) #' #' @keywords internal #' dw_feature_assort <- function(netwk, f1, f2) { nnode <- max(netwk$edgelist) snode <- netwk$edgelist[, 1] tnode <- netwk$edgelist[, 2] edgeweight <- netwk$edge.attr$weight if (is.null(f1)) { f1 <- netwk$node.attr$outs } if (is.null(f2)) { f2 <- netwk$node.attr$ins } stopifnot( 'Length of "f1" must equal number of nodes.' = length(f1) == nnode ) stopifnot( 'Length of "f2" must equal number of nodes.' = length(f2) == nnode ) sf1 <- f1[snode] sf2 <- f2[snode] tf1 <- f1[tnode] tf2 <- f2[tnode] ret <- list() ret$"f1-f1" <- wdm::wdm( x = sf1, y = tf1, weights = edgeweight, method = "pearson" ) ret$"f1-f2" <- wdm::wdm( x = sf1, y = tf2, weights = edgeweight, method = "pearson" ) ret$"f2-f1" <- wdm::wdm( x = sf2, y = tf1, weights = edgeweight, method = "pearson" ) ret$"f2-f2" <- wdm::wdm( x = sf2, y = tf2, weights = edgeweight, method = "pearson" ) return(ret) }
/scratch/gouwar.j/cran-all/cranData/wdnet/R/assortativity.R
## ## wdnet: Weighted directed network ## Copyright (C) 2024 Yelie Yuan, Tiandong Wang, Jun Yan and Panpan Zhang ## Jun Yan <[email protected]> ## ## This file is part of the R package wdnet. ## ## The R package wdnet is free software: You can redistribute it and/or ## modify it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 3 of the License, or any later ## version (at your option). See the GNU General Public License at ## <https://www.gnu.org/licenses/> for details. ## ## The R package wdnet is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. ## #' @importFrom igraph distances graph_from_adjacency_matrix #' @importFrom rARPACK eigs #' @importFrom utils modifyList NULL #' Degree-based centrality #' #' Compute the degree centrality measures of the vertices in a weighted and #' directed network represented through its adjacency matrix. #' #' @param adj is an adjacency matrix of a weighted and directed network #' @param alpha is a tuning parameter. The value of alpha must be nonnegative. #' By convention, alpha takes a value from 0 to 1 (default). #' @param mode which mode to compute: "out" (default) or "in"? For undirected #' networks, this setting is irrelevant. #' #' @return a list of node names and associated degree centrality measures #' #' @references #' \itemize{ #' \item Opsahl, T., Agneessens, F., Skvoretz, J. (2010). Node centrality #' in weighted networks: Generalizing degree and shortest paths. #' \emph{Social Networks}, 32, 245--251. #' \item Zhang, P., Zhao, J. and Yan, J. (2020+) Centrality measures of #' networks with application to world input-output tables #' } #' #' @note Function \code{degree_c} is an extension of function \code{strength} in #' package \code{igraph} and an alternative of function \code{degree_w} in #' package \code{tnet}. Function \code{degree_c} uses adjacency matrix as #' input. #' #' @keywords internal #' degree_c <- function(adj, alpha = 1, mode = "out") { if (alpha < 0) { stop("The tuning parameter alpha must be nonnegative!") } if (dim(adj)[1] != dim(adj)[2]) { stop("The adjacency matrix must be a square matrix!") } else { if (isSymmetric(adj) == TRUE) { warning("The analyzed network is undirected!") } deg_c_output <- matrix(NA_real_, nrow = dim(adj)[1], ncol = 2) adj_name <- colnames(adj) if (is.null(adj_name) == FALSE) { deg_c_output <- adj_name } else { deg_c_output[, 1] <- c(1:dim(adj)[1]) } colnames(deg_c_output) <- c("name", "degree") adj_deg <- adj adj_deg[which(adj_deg > 0)] <- 1 if (mode == "in") { deg_c_output[, 2] <- colSums(adj)^alpha + colSums(adj_deg)^(1 - alpha) } if (mode == "out") { deg_c_output[, 2] <- rowSums(adj)^alpha + rowSums(adj_deg)^(1 - alpha) } return(deg_c_output) } } #' Closeness centrality #' #' Compute the closeness centrality measures of the vertices in a weighted and #' directed network represented through its adjacency matrix. #' #' @param adj is an adjacency matrix of a weighted and directed network #' @param alpha is a tuning parameter. The value of alpha must be nonnegative. #' By convention, alpha takes a value from 0 to 1 (default). #' @param mode which mode to compute: "out" (default) or "in"? For undirected #' networks, this setting is irrelevant. #' @param method which method to use: "harmonic" (default) or "standard"? #' @param distance whether to consider the entries in the adjacency matrix as #' distances or strong connections. The default setting is \code{FALSE}. #' #' @return a list of node names and associated closeness centrality measures #' #' @references #' \itemize{ #' \item Dijkstra, E.W. (1959). A note on two problems in connexion with #' graphs. \emph{Numerische Mathematik}, 1, 269--271. #' \item Newman, M.E.J. (2003). The structure and function of complex #' networks. \emph{SIAM review}, 45(2), 167--256. #' \item Opsahl, T., Agneessens, F., Skvoretz, J. (2010). Node centrality #' in weighted networks: Generalizing degree and shortest paths. #' \emph{Social Networks}, 32, 245--251. #' \item Zhang, P., Zhao, J. and Yan, J. (2020+) Centrality measures of #' networks with application to world input-output tables #' } #' #' @note Function \code{closeness_c} is an extension of function #' \code{closeness} in package \code{igraph} and function \code{closeness_w} #' in package \code{tnet}. The method of computing distances between vertices #' is the \emph{Dijkstra's algorithm}. #' #' @keywords internal #' closeness_c <- function(adj, alpha = 1, mode = "out", method = "harmonic", distance = FALSE) { if (alpha < 0) { stop("The tuning parameter alpha must be nonnegative!") } if (dim(adj)[1] != dim(adj)[2]) { stop("The adjacency matrix must be a square matrix!") } else { closeness_c_output <- matrix(NA_real_, nrow = dim(adj)[1], ncol = 2) adj_name <- colnames(adj) if (is.null(adj_name) == FALSE) { closeness_c_output[, 1] <- adj_name } else { closeness_c_output[, 1] <- c(1:dim(adj)[1]) } colnames(closeness_c_output) <- c("name", "closeness") if (distance == FALSE) { adj <- (1 / adj)^alpha } else if (distance == TRUE) { adj <- adj^alpha } temp_g <- igraph::graph_from_adjacency_matrix(adj, mode = "directed", weighted = TRUE) if (method == "harmonic") { temp_d <- 1 / igraph::distances(temp_g, mode = mode, algorithm = "dijkstra") ## Not consider the distance of a vertex to itself diag(temp_d) <- NA if (mode == "in") { closeness_c_output[, 2] <- rowSums(temp_d, na.rm = TRUE) } if (mode == "out") { closeness_c_output[, 2] <- rowSums(temp_d, na.rm = TRUE) } } if (method == "standard") { temp_d <- igraph::distances(temp_g, mode = mode, algorithm = "dijkstra") diag(temp_d) <- NA if (mode == "in") { closeness_c_output[, 2] <- 1 / rowSums(temp_d, na.rm = TRUE) } if (mode == "out") { closeness_c_output[, 2] <- 1 / rowSums(temp_d, na.rm = TRUE) } } return(closeness_c_output) } } #' Weighted PageRank centrality #' #' Compute the weighted PageRank centrality measures of the vertices in a #' weighted and directed network represented through its adjacency matrix. #' #' @param adj is an adjacency matrix of a weighted and directed network #' @param gamma is the damping factor; it takes 0.85 (default) if not given. #' @param theta is a tuning parameter leveraging node degree and strength; theta #' = 0 does not consider edge weight; theta = 1 (default) fully considers edge #' weight. #' @param prior.info vertex-specific prior information for restarting when #' arriving at a sink. When it is not given (\code{NULL}), a random restart is #' implemented. #' #' @return a list of node names with corresponding weighted PageRank scores #' #' @references #' \itemize{ #' \item Zhang, P., Wang, T. and Yan, J. (2022) PageRank centrality and algorithms for #' weighted, directed networks with applications to World Input-Output Tables. #' \emph{Physica A: Statistical Mechanics and its Applications}, 586, 126438. #' } #' #' @note Function \code{wpr} is an extension of function \code{page_rank} in #' package \code{igraph}. #' #' @keywords internal #' wpr <- function(adj, gamma = 0.85, theta = 1, prior.info) { ## regularity conditions if (dim(adj)[1] != dim(adj)[2]) { stop("The adjacency matrix is not a square matrix!") } if ((gamma < 0) || (gamma > 1)) { stop("The damping factor is not between 0 and 1!") } if ((theta < 0) || (theta > 1)) { stop("The tuning parameter is not between 0 and 1!") } if (missing(prior.info)) { prior.info <- rep(1 / dim(adj)[1], dim(adj)[1]) warning("No prior information is given; A uniform prior is in use!") } if (length(prior.info) != dim(adj)[1]) { stop("The dimension of the prior information is incorrect!") } if ((sum(prior.info) == 0) || any(prior.info < 0)) { stop("The prior information is invalid!") } if (abs(sum(prior.info) - 1) > 1e-10) { prior.info <- prior.info / sum(prior.info) warning("The prior information is not normalized!") } ## get the unweighted adjacency matrix unweight.adj <- adj unweight.adj[unweight.adj > 0] <- 1 ## construct M and M.star matrix n <- dim(adj)[1] sink.node <- which(rowSums(adj) == 0) M <- theta * t(adj / rowSums(adj)) + (1 - theta) * t(unweight.adj / (rowSums(unweight.adj))) M[, sink.node] <- prior.info B <- matrix(rep(prior.info, n), nrow = n, ncol = n) M.star <- gamma * M + (1 - gamma) * B ## rARPACK cannot solve solve matrices of 2-by-2 if (dim(adj)[1] == 2) { eig_sol <- eigen(M.star) eigen_v <- eig_sol$vectors[, 1] eigen_vstd <- abs(eigen_v) / sum(abs(eigen_v)) name_v <- c(1:n) myres <- cbind(name_v, eigen_vstd) colnames(myres) <- c("name", "wpr") return(myres) } ## use rARPACK to solve large-scale matrix if (dim(adj)[1] > 2) { eig_sol <- rARPACK::eigs(M.star, k = 1, which = "LM", mattype = "matrix") eigen_v <- Re(eig_sol$vectors) eigen_vstd <- abs(eigen_v) / sum(abs(eigen_v)) name_v <- c(1:n) myres <- cbind(name_v, eigen_vstd) colnames(myres) <- c("name", "wpr") return(myres) } } #' Centrality measures #' #' Computes the centrality measures of the nodes in a weighted and directed #' network. #' #' @param netwk A \code{wdnet} object that represents the network. If #' \code{NULL}, the function will compute the coefficient using either #' \code{edgelist} and \code{edgeweight}, or \code{adj}. #' @param edgelist A two-column matrix representing edges of a directed #' network. #' @param edgeweight A vector representing the weight of edges. #' @param adj An adjacency matrix of a weighted and directed network. #' @param directed Logical. Indicates whether the edges in \code{edgelist} or #' \code{adj} are directed. #' @param measure Which measure to use: "degree" (degree-based centrality), #' "closeness" (closeness centrality), or "wpr" (weighted PageRank #' centrality)? #' @param degree.control A list of parameters passed to the degree centrality #' measure: #' \itemize{ #' \item `alpha` A tuning parameter. The value of alpha must be #' nonnegative. By convention, alpha takes a value from 0 to 1 (default). #' \item `mode` Which mode to compute: "out" (default) or "in"? #' For undirected networks, this setting is irrelevant.} #' @param closeness.control A list of parameters passed to the closeness #' centrality measure: #' \itemize{ #' \item `alpha` A tuning parameter. The value of alpha must be #' nonnegative. By convention, alpha takes a value from 0 to #' 1 (default). #' \item `mode` Which mode to compute: "out" (default) or "in"? #' For undirected networks, this setting is irrelevant. #' \item `method` Which method to use: "harmonic" (default) or #' "standard"? #' \item `distance` Whether to consider the entries in the adjacency #' matrix as distances or strong connections. The default setting is #' \code{FALSE}. #' } #' @param wpr.control A list of parameters passed to the weighted PageRank #' centrality measure: #' \itemize{ #' \item `gamma` The damping factor; it takes 0.85 (default) if not #' given. #' \item `theta` A tuning parameter leveraging node degree and #' strength; theta = 0 does not consider edge weight; theta = 1 (default) #' fully considers edge weight. #' \item `prior.info` Vertex-specific prior information for restarting when #' arriving at a sink. When it is not given (\code{NULL}), a random restart #' is implemented. #' } #' #' @return A list of node names and associated centrality measures #' #' @references #' \itemize{ #' \item Dijkstra, E.W. (1959). A note on two problems in connexion with #' graphs. \emph{Numerische Mathematik}, 1, 269--271. #' \item Newman, M.E.J. (2003). The structure and function of complex #' networks. \emph{SIAM review}, 45(2), 167--256. #' \item Opsahl, T., Agneessens, F., Skvoretz, J. (2010). Node centrality #' in weighted networks: Generalizing degree and shortest paths. #' \emph{Social Networks}, 32, 245--251. #' \item Zhang, P., Wang, T. and Yan, J. (2022) PageRank centrality and algorithms for #' weighted, directed networks with applications to World Input-Output Tables. #' \emph{Physica A: Statistical Mechanics and its Applications}, 586, 126438. #' \item Zhang, P., Zhao, J. and Yan, J. (2020+) Centrality measures of #' networks with application to world input-output tables #' } #' #' @note The degree-based centrality measure is an extension of function #' \code{strength} in package \code{igraph} and an alternative of function #' \code{degree_w} in package \code{tnet}. #' #' The closeness centrality measure is an extension of function #' \code{closeness} in package \code{igraph} and function \code{closeness_w} #' in package \code{tnet}. The method of computing distances between vertices #' is the \emph{Dijkstra's algorithm}. #' #' The weighted PageRank centrality measure is an extension of function #' \code{page_rank} in package \code{igraph}. #' #' @examples #' ## Generate a network according to the Erd\"{o}s-Renyi model of order 20 #' ## and parameter p = 0.3 #' edge_ER <- rbinom(400, 1, 0.3) #' weight_ER <- sapply(edge_ER, function(x) x * sample(3, 1)) #' adj_ER <- matrix(weight_ER, 20, 20) #' mydegree <- centrality( #' adj = adj_ER, #' measure = "degree", degree.control = #' list(alpha = 0.8, mode = "in") #' ) #' myclose <- centrality( #' adj = adj_ER, #' measure = "closeness", closeness.control = #' list(alpha = 0.8, mode = "out", method = "harmonic", distance = FALSE) #' ) #' mywpr <- centrality( #' adj = adj_ER, #' measure = "wpr", wpr.control = #' list(gamma = 0.85, theta = 0.75) #' ) #' #' @export #' centrality <- function( netwk, adj, edgelist, edgeweight, directed = TRUE, measure = c("degree", "closeness", "wpr"), degree.control = list(alpha = 1, mode = "out"), closeness.control = list( alpha = 1, mode = "out", method = "harmonic", distance = FALSE ), wpr.control = list( gamma = 0.85, theta = 1, prior.info = NULL )) { if (missing(adj)) { netwk <- create_wdnet( netwk = netwk, edgelist = edgelist, edgeweight = edgeweight, directed = directed ) # stopifnot( # "Network must be directed." = netwk$directed # ) adj <- edgelist_to_adj( edgelist = netwk$edgelist, edgeweight = netwk$edge.attr$weight, directed = netwk$directed ) } measure <- match.arg(measure) if (measure == "degree") { degree.control <- utils::modifyList(list(alpha = 1, mode = "out"), degree.control, keep.null = TRUE ) return(degree_c( adj = adj, alpha = degree.control$alpha, mode = degree.control$mode )) } if (measure == "closeness") { closeness.control <- utils::modifyList( list( alpha = 1, mode = "out", method = "harmonic", distance = FALSE ), closeness.control, keep.null = TRUE ) return(closeness_c(adj, alpha = closeness.control$alpha, mode = closeness.control$mode, method = closeness.control$method, distance = closeness.control$distance )) } wpr.control <- utils::modifyList( list(gamma = 0.85, theta = 1, prior.info = NULL), wpr.control, keep.null = TRUE ) if (is.null(wpr.control$prior.info)) { return(wpr(adj, gamma = wpr.control$gamma, theta = wpr.control$theta)) } return(wpr(adj, gamma = wpr.control$gamma, theta = wpr.control$theta, prior.info = wpr.control$prior.info )) }
/scratch/gouwar.j/cran-all/cranData/wdnet/R/centrality.R
## ## wdnet: Weighted directed network ## Copyright (C) 2024 Yelie Yuan, Tiandong Wang, Jun Yan and Panpan Zhang ## Jun Yan <[email protected]> ## ## This file is part of the R package wdnet. ## ## The R package wdnet is free software: You can redistribute it and/or ## modify it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 3 of the License, or any later ## version (at your option). See the GNU General Public License at ## <https://www.gnu.org/licenses/> for details. ## ## The R package wdnet is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. ## #' @importFrom igraph indent_print #' @importFrom utils capture.output NULL #' Checks whether the input is a \code{rpacontrol} object #' #' @param control A \code{rpacontrol} object. #' @return Logical, \code{TRUE} if the input is a \code{rpacontrol} object. #' @keywords internal #' is_rpacontrol <- function(control) { return(inherits(control, "rpacontrol")) } #' Prints \code{rpa_control_scenario()} in terminal #' #' @param control A list of control parameters for #' \code{rpa_control_scenario()}. #' #' @return Returns \code{NULL} invisibly. #' @keywords internal #' print_control_scenario <- function(control) { cat("Edge scenarios:\n") cat(" - alpha: ", control$alpha, "\n", sep = "") cat(" - beta: ", control$beta, "\n", sep = "") cat(" - gamma: ", control$gamma, "\n", sep = "") cat(" - xi: ", control$xi, "\n", sep = "") cat(" - rho: ", control$rho, "\n", sep = "") cat(" - beta.loop: ", control$beta.loop, "\n", sep = "") cat(" - source.first: ", control$source.first, "\n", sep = "") invisible(NULL) } #' Prints \code{rpa_control_edgeweight()} in terminal #' #' @param control A list of control parameters for #' \code{rpa_control_edgeweight()}. #' #' @return Returns \code{NULL} invisibly. #' @keywords internal #' print_control_edgeweight <- function(control) { cat("Edge weights:\n") cat(" - sampler: ") if (is.null(control$sampler)) { cat("NULL; all new edges have weight 1\n") } else { cat("\n") igraph::indent_print(control$sampler, .indent = " ") } invisible(NULL) } #' Prints \code{rpa_control_newedge()} in terminal #' #' @param control A list of control parameters for #' \code{rpa_control_newedge()}. #' #' @return Returns \code{NULL} invisibly. #' @keywords internal #' print_control_newedge <- function(control) { cat("New edges in each step:\n") cat(" - sampler: ") if (is.null(control$sampler)) { cat("NULL; add one new edge at each step\n") } else { cat("\n") igraph::indent_print(control$sampler, .indent = " ") } cat(" - snode.replace: ", control$snode.replace, "\n", sep = "") cat(" - tnode.replace: ", control$tnode.replace, "\n", sep = "") cat(" - node.replace: ", control$node.replace, "\n", sep = "") invisible(NULL) } #' Prints \code{rpa_control_reciprocal()} in terminal #' #' @param control A list of control parameters for #' \code{rpa_control_reciprocal()}. #' @return Returns \code{NULL} invisibly. #' @keywords internal #' print_control_reciprocal <- function(control) { cat("Reciprocal edges:\n") cat(" - group.prob: ") if (is.null(control$group.prob)) { cat("NULL\n") } else { cat(control$group.prob, "\n", sep = " ") } cat(" - recip.prob: ") if (is.null(control$recip.prob)) { cat("NULL; no immediate reciprocal edges\n") } else { cat("\n") igraph::indent_print(control$recip.prob, .indent = " ") } invisible(NULL) } #' Prints \code{rpa_control_preference()} in terminal #' #' @param control A list of control parameters for #' \code{rpa_control_preference()}. #' @param directed Logical, whether to print preference functions for directed #' networks only. If missing, print preference functions for both directed #' and undirected networks. #' #' @return Returns \code{NULL} invisibly. #' @keywords internal #' print_control_preference <- function(control, directed) { cat("Preference functions:\n") cat(" - ftype: ", control$ftype, "\n", sep = "") if (control$ftype == "default") { spref <- paste0( " - sparams: ", paste(control$sparams, collapse = " ") ) tpref <- paste0( " - tparams: ", paste(control$tparams, collapse = " ") ) pref <- paste0( " - params: ", paste(control$params, collapse = " ") ) } else if (control$ftype == "customized") { my_print <- function(pref, type) { if (inherits(pref, "XPtr")) { tmp <- utils::capture.output(pref) if (grepl("pointer:\\ \\(nil\\)", tmp)) { pref <- paste0(" - ", type, ": XPtr; not valid, please recompile the C++ code.", sep = "" ) } else { pref <- paste0(" - ", type, ": XPtr;", tmp, sep = "") } } else { pref <- paste0(" - ", type, ": ", pref) } } spref <- my_print(control$spref, "spref") tpref <- my_print(control$tpref, "tpref") pref <- my_print(control$pref, "pref") } else { stop("Preference function type is not valid.") } if (missing(directed)) { cat(spref, tpref, pref, sep = "\n") } else if (directed) { cat(spref, tpref, sep = "\n") } else { cat(pref, "\n") } invisible(NULL) } #' Prints \code{rpacontrol} in terminal #' #' @param x An object of class \code{rpacontrol}. #' @param control_name A string, the name of the control component. #' #' @return Returns \code{NULL} invisibly. #' @keywords internal #' print_control_details <- function(x, control_name) { switch(control_name, "scenario" = print_control_scenario(control = x$scenario), "edgeweight" = print_control_edgeweight(control = x$edgeweight), "newedge" = print_control_newedge(control = x$newedge), "reciprocal" = print_control_reciprocal(control = x$reciprocal), "preference" = print_control_preference(control = x$preference) ) cat("\n") invisible(NULL) } #' Prints \code{rpacontrol} objects #' #' These functions print \code{rpacontrol} objects in the terminal. #' \code{print.rpacontrol()} shows only the current controls, whereas #' \code{summary.rpacontrol()} includes both specified controls and the #' unspecified controls that use default values. #' #' @param x An object of class \code{rpacontrol}. #' @param object An object of class \code{rpacontrol}. #' @param ... Additional arguments. #' @return Returns the controls invisibly. #' @rdname print.rpacontrol #' @method print rpacontrol #' @export #' @examples #' #' control <- rpa_control_scenario() #' print(control) #' print.rpacontrol <- function(x, ...) { control_names <- names(x) for (each in control_names) { print_control_details(x, each) } invisible(x) } #' @rdname print.rpacontrol #' @method summary rpacontrol #' @export #' summary.rpacontrol <- function(object, ...) { control_default <- rpa_control_default() object <- control_default + object control_names <- names(rpa_control_default()) count <- 0 cat("Specified control(s):\n") cat("--------------------\n") for (each in control_names) { if (!identical(control_default[[each]], object[[each]])) { print_control_details(object, each) count <- 1 } } if (count == 0) cat("None\n\n") count <- 0 cat("\nDefault (unspecified) controls:\n") cat("------------------------------\n") for (each in control_names) { if (identical(control_default[[each]], object[[each]])) { print_control_details(object, each) count <- 1 } } if (count == 0) cat("None\n") invisible(object) }
/scratch/gouwar.j/cran-all/cranData/wdnet/R/cls_rpacontrol.R
## ## wdnet: Weighted directed network ## Copyright (C) 2024 Yelie Yuan, Tiandong Wang, Jun Yan and Panpan Zhang ## Jun Yan <[email protected]> ## ## This file is part of the R package wdnet. ## ## The R package wdnet is free software: You can redistribute it and/or ## modify it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 3 of the License, or any later ## version (at your option). See the GNU General Public License at ## <https://www.gnu.org/licenses/> for details. ## ## The R package wdnet is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. ## #' @importFrom igraph graph_from_edgelist E V plot.igraph is.igraph as_edgelist #' @importFrom igraph is.directed vertex.attributes edge.attributes #' @importFrom utils head NULL #' Creates a \code{wdnet} object using \code{edgelist}. #' #' @param edgelist A two-column matrix representing the edges. #' @param edgeweight A numeric vector of edge weights with the same length as #' the number of rows in edgelist. If \code{NULL}, all edges will be assigned #' weight 1. #' @param directed Logical, whether the network is directed (TRUE) or undirected #' (FALSE). #' @param nodegroup A numeric vector of node groups. #' @param ... Additional components to be added to the \code{wdnet} object. #' @return A \code{wdnet} object with the specified \code{edgelist}, #' \code{edgeweight} and \code{directed}. #' @examples #' edgelist <- matrix(c(1, 2, 2, 3, 3, 1), ncol = 2, byrow = TRUE) #' edgeweight <- c(1, 2, 3) #' nodegroup <- c(1, 1, 2) #' netwk <- edgelist_to_wdnet( #' edgelist = edgelist, #' edgeweight = edgeweight, #' directed = TRUE, #' nodegroup = nodegroup #' ) #' #' @export #' edgelist_to_wdnet <- function( edgelist, edgeweight, directed, nodegroup, ...) { if (missing(directed) || is.null(directed)) { # cat("Assume the network is directed.\n\n") directed <- TRUE } stopifnot(is.logical(directed)) if (missing(edgelist) || is.null(edgelist)) { stop('Please provide "edgelist".') } if (missing(edgeweight) || is.null(edgeweight)) { edgeweight <- rep(1, nrow(edgelist)) } if (nrow(edgelist) != length(edgeweight)) { stop("Number of rows in 'edgelist' must match the length of 'edgeweight'.") } if (ncol(edgelist) != 2) { stop('"edgelist" must have exactly 2 columns.') } if (missing(nodegroup)) { nodegroup <- NULL } max_node <- max(edgelist) if (!is.null(nodegroup)) { if (length(nodegroup) != max_node) { stop('Length of "nodegroup" must match the number of nodes in "edgelist".') } } mode(edgelist) <- "integer" weighted <- any(edgeweight != 1) netwk <- structure( list( "edgelist" = edgelist, "directed" = directed, "weighted" = weighted, "edge.attr" = data.frame("weight" = edgeweight) ), class = "wdnet" ) tmp <- node_strength_cpp( snode = edgelist[, 1], tnode = edgelist[, 2], weight = edgeweight, nnode = max_node, weighted = weighted ) if (directed) { netwk$node.attr <- data.frame( "outs" = tmp$outs, "ins" = tmp$ins ) } else { tmp$s <- tmp$outs + tmp$ins netwk$node.attr <- data.frame( "s" = tmp$s ) } netwk$node.attr$group <- nodegroup additional_components <- list(...) if (length(additional_components) > 0) { netwk <- c(netwk, additional_components) class(netwk) <- "wdnet" } if (!is_wdnet(netwk)) { stop('Failed to create a valid "wdnet" object.') } return(netwk) } #' Creates a \code{wdnet} object using an adjacency matrix #' #' @param adj An adjacency matrix used to extract \code{edgelist} and #' \code{edgeweight} using \code{igraph}. #' @param directed Logical, whether the network is directed (TRUE) or undirected #' (FALSE). If \code{adj} is asymmetric, the network is directed. #' @param weighted Logical, whether the network is weighted (TRUE) or unweighted #' (FALSE). #' @param nodegroup A numeric vector of node groups. #' @param ... Additional components to be added to the \code{wdnet} object. #' @return A \code{wdnet} object with the specified \code{adj}. #' @export #' @examples #' adj <- matrix(c(0, 1, 2, 0), nrow = 2, ncol = 2, byrow = TRUE) #' adj_to_wdnet(adj = adj, directed = TRUE, weighted = FALSE) #' adj_to_wdnet <- function( adj, directed = TRUE, weighted = TRUE, nodegroup, ...) { if (missing(adj) || is.null(adj)) { stop('Please provide "adj".') } if (missing(directed) || is.null(directed)) { # cat("Assume the network is directed.\n\n") directed <- TRUE } else if (!directed) { if (!isSymmetric(adj)) { directed <- TRUE cat('Network is directed because "adj" is asymmetric.\n\n') } } if (missing(weighted) || is.null(weighted)) { weighted <- any(adj[adj > 0] != 1) if (weighted) { cat("Assume the network is weighted.\n") } } stopifnot(is.logical(directed)) stopifnot(is.logical(weighted)) if (missing(nodegroup)) { nodegroup <- NULL } if (!is.null(nodegroup)) { if (length(nodegroup) != nrow(adj)) { stop('Length of "nodegroup" must match the number of nodes in "adj".') } } tmp <- adj_to_edgelist( adj = adj, directed = directed, weighted = weighted ) edgelist <- tmp$edgelist edgeweight <- tmp$edgeweight directed <- tmp$directed rm(tmp) edgelist_to_wdnet( edgelist = edgelist, edgeweight = edgeweight, directed = directed, nodegroup = nodegroup, ... ) } #' Creates a \code{wdnet} object from input data. #' #' This function creates a \code{wdnet} object from \code{edgelist} and #' \code{edgeweight} or \code{adj} or returns the existing \code{wdnet} object. #' For internal usage. #' #' @param netwk A \code{wdnet} object. If \code{NULL}, the function will use the #' provided \code{edgelist} and \code{edgeweight}, or \code{adj} parameters to #' create a new \code{wdnet} object. #' @param edgelist A two-column matrix representing edges. #' @param edgeweight A vector representing the weights of the edges. #' @param nodegroup A numeric vector of node groups. #' @param directed A logical value indicating whether the network is directed. #' Required if \code{netwk} is \code{NULL}. #' @param adj An adjacency matrix. #' @param weighted A logical value indicating whether the network is weighted. #' @param ... Additional components to be added to the wdnet list. #' #' @return A \code{wdnet} object. #' #' @keywords internal #' create_wdnet <- function( netwk, edgelist, edgeweight, nodegroup, directed, adj, weighted, ...) { if (missing(netwk) || is.null(netwk) || !is_wdnet(netwk)) { if (missing(edgelist) || is.null(edgelist)) { if (missing(adj) || is.null(adj)) { stop('Please provide either "edgelist" or "adj".') } else { netwk <- adj_to_wdnet( adj = adj, directed = directed, weighted = weighted, nodegroup = nodegroup, ... ) } } else { colnames(edgelist) <- NULL netwk <- edgelist_to_wdnet( edgelist = edgelist, edgeweight = edgeweight, directed = directed, nodegroup = nodegroup, ... ) } } else { if (!missing(directed)) { if (directed != netwk$directed) { cat( 'The "directed" argument is omitted since "netwk" is ', ifelse( netwk$directed, "directed.", "undirected." ), "\n" ) } } additional_components <- list(...) if (length(additional_components) > 0) { netwk <- c(netwk, additional_components) class(netwk) <- "wdnet" } if (!is.null(netwk$nodegroup)) { netwk$node.attr$group <- netwk$nodegroup netwk$nodegroup <- NULL } } invisible(netwk) } #' Checks if the input is a \code{wdnet} object #' #' @param netwk A \code{wdnet} object. #' @return Logical, \code{TRUE} if argument netwk is a \code{wdnet} object. #' @export #' @examples #' netwk <- rpanet(nstep = 1e3) #' is_wdnet(netwk) #' is_wdnet <- function(netwk) { valid_attrs <- c("edgelist", "directed", "weighted", "edge.attr", "node.attr") valid_cols <- ifelse(netwk$directed, c("outs", "ins"), c("s")) max_node <- max(netwk$edgelist) return(all( is.logical(netwk$directed), is.logical(netwk$weighted), ifelse( netwk$weighted, netwk$edge.attr$weight > 0, all(netwk$edge.attr$weight == 1) ), inherits(netwk, "wdnet"), "weight" %in% names(netwk$edge.attr), all(valid_attrs %in% names(netwk)), all(valid_cols %in% colnames(netwk$node.attr)), nrow(netwk$edgelist) == nrow(netwk$edge.attr), is.integer(netwk$edgelist), max_node == nrow(netwk$node.attr) )) } #' Converts a \code{wdnet} object to an \code{igraph} object #' #' @param netwk A \code{wdnet} object. #' #' @return An \code{igraph} object. #' #' @export #' @examples #' netwk <- rpanet(nstep = 1e3) #' g <- wdnet_to_igraph(netwk) #' wdnet_to_igraph <- function(netwk) { stopifnot(is_wdnet(netwk)) g <- igraph::graph_from_edgelist(netwk$edgelist, directed = netwk$directed ) if (is.data.frame(netwk$edge.attr)) { for (each in colnames(netwk$edge.attr)) { g <- igraph::set_edge_attr( g, each, value = netwk$edge.attr[[each]] ) } } if (is.data.frame(netwk$node.attr)) { for (each in colnames(netwk$node.attr)) { g <- igraph::set_vertex_attr( g, each, value = netwk$node.attr[[each]] ) } } return(g) } #' Converts an \code{igraph} object to a \code{wdnet} object #' #' @param g An \code{igraph} object. #' #' @return A \code{wdnet} object. #' #' @export #' @examples #' g <- igraph::sample_pa(50) #' netwk <- igraph_to_wdnet(g) #' igraph_to_wdnet <- function(g) { stopifnot(igraph::is.igraph(g)) edgelist <- igraph::as_edgelist(g, names = FALSE) mode(edgelist) <- "integer" edgeweight <- igraph::E(g)$weight directed <- igraph::is.directed(g) netwk <- create_wdnet( edgelist = edgelist, edgeweight = edgeweight, directed = directed ) nattr <- igraph::vertex.attributes(g) if (length(nattr) > 0) { nattr <- as.data.frame(nattr) for (each in colnames(nattr)) { if (each %in% colnames(netwk$node.attr)) { next } netwk$node.attr[[each]] <- nattr[[each]] } } eattr <- igraph::edge.attributes(g) if (length(eattr) > 0) { eattr <- as.data.frame(eattr) for (each in colnames(eattr)) { if (each %in% colnames(netwk$edge.attr)) { next } netwk$edge.attr[[each]] <- eattr[[each]] } } is_wdnet(netwk) netwk } #' Plots the input network #' #' Plots the input network via \code{igraph::plot.igraph()}. #' #' @param x A \code{wdnet} object. #' @param ... Additional parameters passed to \code{igraph::plot.igraph()}. #' @return Returns \code{NULL}, invisibly. #' @method plot wdnet #' @export #' plot.wdnet <- function(x, ...) { stopifnot(is_wdnet(x)) igraph::plot.igraph(wdnet_to_igraph(x), ...) invisible(NULL) } #' Prints the input network #' #' These functions print a network to the terminal. #' #' \code{summary.wdnet} prints the number of nodes and edges, preference #' functions, and whether the network is directed, weighted. \code{print.wdnet} #' prints the same information, and also lists some edges and node attributes, #' if available. Edge scenarios are 0: from initial network; 1: \code{alpha}; 2: #' \code{beta}; 3: \code{gamma}; 4: \code{xi}; 5; \code{rho}; 6: reciprocal. #' #' @param x A \code{wdnet} object. #' @param node.attrs Logical, whether to print node attributes, if available. #' @param edge.attrs Logical, whether to print edge attributes, if available. #' @param max.lines Integer, the maximum number of lines of edgelist and node #' attributes to print. The rest of the output will be truncated. #' @param object The graph of which the summary will be printed. #' @param ... Additional arguments. #' @rdname print.wdnet #' @method print wdnet #' @export #' print.wdnet <- function( x, node.attrs = TRUE, edge.attrs = TRUE, max.lines = 5, ...) { summary.wdnet(x) nedge <- nrow(x$edgelist) nnode <- nrow(x$node.attr) n <- min(max.lines, nedge) cat("\nEdges:\n") tmp <- data.frame(x$edgelist[seq_len(n), ]) if (x$directed) { colnames(tmp) <- c("source", "target") } else { colnames(tmp) <- c("i", "j") } if (edge.attrs) { if (is.null(x$edge.attr)) { cat("No edge attributes\n") } else { tmp <- cbind(tmp, x$edge.attr[seq_len(n), ]) colnames(tmp)[3:ncol(tmp)] <- colnames(x$edge.attr) } } print(tmp) if (n < nedge) cat("...omitted remaining edges\n") n <- min(max.lines, nnode) cat("\nNode attributes:\n") if (node.attrs) { if (is.null(x$node.attr)) { cat("No available node attributes to print.") } else { print(utils::head(x$node.attr, n)) if (n < max(x$edgelist)) cat("...omitted remaining nodes\n") } } invisible(x) } #' @rdname print.wdnet #' @method summary wdnet #' @export #' summary.wdnet <- function(object, ...) { stopifnot(is_wdnet(object)) cat( "Weighted: ", object$weighted, "\n", "Directed: ", object$directed, "\n", "Number of edges: ", nrow(object$edge.attr), "\n", "Number of nodes: ", nrow(object$node.attr), "\n", sep = "" ) invisible(object) }
/scratch/gouwar.j/cran-all/cranData/wdnet/R/cls_wdnet.R
## ## wdnet: Weighted directed network ## Copyright (C) 2024 Yelie Yuan, Tiandong Wang, Jun Yan and Panpan Zhang ## Jun Yan <[email protected]> ## ## This file is part of the R package wdnet. ## ## The R package wdnet is free software: You can redistribute it and/or ## modify it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 3 of the License, or any later ## version (at your option). See the GNU General Public License at ## <https://www.gnu.org/licenses/> for details. ## ## The R package wdnet is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. ## #' @importFrom Matrix Matrix t colSums rowSums diag NULL #' Directed clustering coefficient #' #' Compute the clustering coefficient of a weighted and directed network. #' #' @param netwk A \code{wdnet} object that represents the network. If #' \code{NULL}, the function will compute the coefficient using either #' \code{edgelist}, \code{edgeweight}, or \code{adj}. #' @param edgelist A two-column matrix, each row represents a directed edge of #' the network. #' @param edgeweight A vector representing the weight of edges. #' @param adj An adjacency matrix of a weighted and directed network. #' @param directed Logical. Indicates whether the edges in \code{edgelist} or #' \code{adj} are directed. #' @param method Which method used to compute clustering coefficients: Clemente #' and Grassi (2018) or Fagiolo (2007). #' @param isolates Binary, defines how to treat vertices with degree zero and #' one. If 0, then their clustering coefficient is returned as 0 and are #' included in the averaging. Otherwise, their clustering coefficient is \code{NaN} #' and are excluded in the averaging. Default value is 0. #' #' @return Lists of local clustering coefficients (in terms of a vector), global #' clustering coefficient (in terms of a scalar) and number of weighted #' directed triangles (in terms of a vector) based on \code{total}, \code{in}, #' \code{out}, middleman (\code{middle}), or \code{cycle} triplets. #' #' @references #' \itemize{ #' \item Barrat, A., Barthelemy, M., Pastor-Satorras, #' R. and Vespignani, A. (2004). The architecture of complex weighted #' networks. \emph{Proceedings of National Academy of Sciences of the United #' States of America}, 101(11), 3747--3752. #' \item Clemente, G.P. and Grassi, #' R. (2018). Directed clustering in weighted networks: A new perspective. #' \emph{Chaos, Solitons & Fractals}, 107, 26--38. #' \item Fagiolo, G. (2007). #' Clustering in complex directed networks. \emph{Physical Review E}, 76, #' 026107. #' } #' #' @note Self-loops (if exist) are removed prior to the computation of #' clustering coefficient. When the adjacency matrix is symmetric (i.e., #' undirected but possibly unweighted networks), \code{clustcoef} returns #' local and global clustering coefficients proposed by Barrat et al. (2010). #' #' @examples #' ## Generate a network according to the Erd\"{o}s-Renyi model of order 20 #' ## and parameter p = 0.3 #' edge_ER <- rbinom(400, 1, 0.3) #' weight_ER <- sapply(edge_ER, function(x) x * sample(3, 1)) #' adj_ER <- matrix(weight_ER, 20, 20) #' mycc <- clustcoef(adj = adj_ER, method = "Clemente") #' system.time(mycc) #' #' @export #' clustcoef <- function( netwk, edgelist, edgeweight, adj, directed = TRUE, method = c("Clemente", "Fagiolo"), isolates = 0) { if (missing(adj)) { netwk <- create_wdnet( netwk = netwk, edgelist = edgelist, edgeweight = edgeweight, directed = directed ) # stopifnot( # "Network must be directed." = netwk$directed # ) adj <- edgelist_to_adj( edgelist = netwk$edgelist, edgeweight = netwk$edge.attr$weight, directed = netwk$directed ) } stopifnot(dim(adj)[1] == dim(adj)[2]) method <- match.arg(method) ## Force to remove self-loops. diag(adj) <- 0 ## Extract the unweighted adjacency matrix adj <- Matrix::Matrix(adj, sparse = TRUE) A <- Matrix::Matrix(adj > 0, sparse = TRUE) ## Compute strength vector s_in <- Matrix::colSums(adj) s_out <- Matrix::rowSums(adj) s_tot <- s_in + s_out s_bil <- (Matrix::colSums(Matrix::t(adj) * A) + Matrix::colSums(Matrix::t(A) * adj)) / 2 ## Compute degree vector d_in <- Matrix::colSums(A) d_out <- Matrix::rowSums(A) d_tot <- d_in + d_out A_A <- A %*% A d_bil <- Matrix::diag(A_A) if (method == "Clemente") { A_At <- A %*% Matrix::t(A) At_A <- Matrix::t(A) %*% A W_A_A <- Matrix::colSums(Matrix::t(adj) * A_A) W_A_At <- Matrix::colSums(Matrix::t(adj) * A_At) W_At_At <- Matrix::colSums(Matrix::t(adj) * Matrix::t(A_A)) Wt_A_A <- Matrix::colSums(adj * A_A) Wt_A_At <- Matrix::colSums(adj * A_At) Wt_At_At <- Matrix::colSums(adj * Matrix::t(A_A)) Wt_At_A <- Matrix::colSums(adj * At_A) W_At_A <- Matrix::colSums(Matrix::t(adj) * At_A) denomTotal <- s_tot * (d_tot - 1) - 2 * s_bil denomIn <- s_in * (d_in - 1) denomOut <- s_out * (d_out - 1) denomMiddle <- (s_in * d_out + s_out * d_in) / 2 - s_bil numTriangles <- list( "total" = (W_A_A + W_A_At + W_At_A + W_At_At + Wt_A_A + Wt_A_At + Wt_At_A + Wt_At_At) / 2, "in" = (Wt_A_A + Wt_At_A) / 2, "out" = (W_A_At + W_At_At) / 2, "middle" = (Wt_A_At + W_At_A) / 2, "cycle" = (W_A_A + Wt_At_At) / 2 ) } if (method == "Fagiolo") { # W is adjhat W <- (adj / max(adj))^(1 / 3) W_W <- W %*% W W_W_W <- Matrix::colSums(Matrix::t(W) * W_W) W_W_Wt <- Matrix::rowSums(W_W * W) W_Wt_W <- Matrix::colSums(Matrix::t(W) * (Matrix::t(W) %*% W)) Wt_W_W <- Matrix::colSums(W * W_W) denomTotal <- d_tot * (d_tot - 1) - 2 * d_bil denomIn <- d_in * (d_in - 1) denomOut <- d_out * (d_out - 1) denomMiddle <- d_in * d_out - d_bil numTriangles <- list( "total" = (W_W_W + W_W_Wt + W_Wt_W + Wt_W_W), "in" = Wt_W_W, "out" = W_W_Wt, "middle" = W_Wt_W, "cycle" = W_W_W ) } localcc <- list( "total" = numTriangles$"total" / denomTotal, "in" = numTriangles$"in" / denomIn, "out" = numTriangles$"out" / denomOut, "middle" = numTriangles$"middle" / denomMiddle, "cycle" = numTriangles$"cycle" / denomMiddle ) if (isolates == "zero") { cat('Argument "isolates" has been revised; use "isolates = 0" instead.\n"') isolates <- 0 } if (isolates == 0) { localcc <- rapply(localcc, function(i) ifelse(is.na(i), 0, i), how = "replace" ) } globalcc <- list( "total" = mean(localcc$"total", na.rm = TRUE), "in" = mean(localcc$"in", na.rm = TRUE), "out" = mean(localcc$"out", na.rm = TRUE), "middle" = mean(localcc$"middle", na.rm = TRUE), "cycle" = mean(localcc$"cycle", na.rm = TRUE) ) return(list( "total" = list( "localcc" = localcc$"total", "globalcc" = globalcc$"total", "numtriangles" = numTriangles$"total" ), "out" = list( "localcc" = localcc$"out", "globalcc" = globalcc$"out", "numtriangles" = numTriangles$"out" ), "in" = list( "localcc" = localcc$"in", "globalcc" = globalcc$"in", "numtriangles" = numTriangles$"in" ), "middle" = list( "localcc" = localcc$"middle", "globalcc" = globalcc$"middle", "numtriangles" = numTriangles$"middle" ), "cycle" = list( "localcc" = localcc$"cycle", "globalcc" = globalcc$"cycle", "numtriangles" = numTriangles$"cycle" ) )) }
/scratch/gouwar.j/cran-all/cranData/wdnet/R/clustercoef.R
## ## wdnet: Weighted directed network ## Copyright (C) 2024 Yelie Yuan, Tiandong Wang, Jun Yan and Panpan Zhang ## Jun Yan <[email protected]> ## ## This file is part of the R package wdnet. ## ## The R package wdnet is free software: You can redistribute it and/or ## modify it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 3 of the License, or any later ## version (at your option). See the GNU General Public License at ## <https://www.gnu.org/licenses/> for details. ## ## The R package wdnet is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. ## #' @importFrom RcppXPtrUtils checkXPtr cppXPtr NULL #' Compile preference functions via \code{RcppXPtrUtils}. #' #' @param preference A list for defining the preference functions. #' @param directed Logical, whether to compile the preference functions for #' directed networks. If missing, the preference functions #' for both directed and undirected networks will be compiled. #' #' @return Returns the input list and their corresponding external pointers. #' #' @keywords internal #' compile_pref_func <- function(preference, directed) { if (missing(directed) || directed) { if (inherits(preference$spref, "character")) { tmp <- paste("double spref(double outs, double ins) { return ", preference$spref, ";}", sep = "" ) preference$spref.pointer <- RcppXPtrUtils::cppXPtr(code = tmp) rm(tmp) } else if (inherits(preference$spref, "XPtr")) { tryCatch( { RcppXPtrUtils::checkXPtr( ptr = preference$spref, type = "double", args = c("double", "double") ) }, error = function() { stop('Incorrect argument or return type for "spref"; all should be "double".') } ) preference$spref.pointer <- preference$spref tmp <- utils::capture.output(preference$spref.pointer) if (grepl("pointer:\\ \\(nil\\)", tmp)) { stop('"XPtr" for "spref" is not valid, please recompile the C++ code.') } rm(tmp) } else { stop('The class of "spref" must be either "XPtr" or "character".') } if (inherits(preference$tpref, "character")) { tmp <- paste("double tpref(double outs, double ins) { return ", preference$tpref, ";}", sep = "" ) preference$tpref.pointer <- RcppXPtrUtils::cppXPtr(code = tmp) rm(tmp) } else if (inherits(preference$tpref, "XPtr")) { tryCatch( { RcppXPtrUtils::checkXPtr( ptr = preference$tpref, type = "double", args = c("double", "double") ) }, error = function() { stop('Incorrect argument or return type for "tpref"; all should be "double".') } ) preference$tpref.pointer <- preference$tpref tmp <- utils::capture.output(preference$tpref.pointer) if (grepl("pointer:\\ \\(nil\\)", tmp)) { stop('"XPtr" for "tpref" is not valid, please recompile the C++ code.') } rm(tmp) } else { stop('The class of "tpref" must be either "XPtr" or "character".') } } if (missing(directed) || !directed) { if (inherits(preference$pref, "character")) { tmp <- paste("double pref(double s) { return ", preference$pref, ";}", sep = "" ) preference$pref.pointer <- RcppXPtrUtils::cppXPtr(code = tmp) rm(tmp) } else if (inherits(preference$pref, "XPtr")) { tryCatch( { RcppXPtrUtils::checkXPtr( ptr = preference$pref, type = "double", args = "double" ) }, error = function() { stop('Incorrect argument or return type for "pref"; all should be "double".') } ) preference$pref.pointer <- preference$pref tmp <- utils::capture.output(preference$pref.pointer) if (grepl("pointer:\\ \\(nil\\)", tmp)) { stop('"XPtr" for "pref" is not valid, please recompile the C++ code.') } rm(tmp) } else { stop('The class of "pref" must be either "XPtr" or "character".') } } return(preference) }
/scratch/gouwar.j/cran-all/cranData/wdnet/R/compile_pref.R
## ## wdnet: Weighted directed network ## Copyright (C) 2024 Yelie Yuan, Tiandong Wang, Jun Yan and Panpan Zhang ## Jun Yan <[email protected]> ## ## This file is part of the R package wdnet. ## ## The R package wdnet is free software: You can redistribute it and/or ## modify it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 3 of the License, or any later ## version (at your option). See the GNU General Public License at ## <https://www.gnu.org/licenses/> for details. ## ## The R package wdnet is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. ## #' @importFrom CVXR Variable sum_entries Minimize Maximize Problem solve NULL #' Get the node-level joint distributions and some empirical distributions with #' given edgelist. #' #' @param edgelist A two-column matrix representing the directed edges of a #' network. #' @param directed Logical, whether the network is directed. #' @param joint_dist Logical, whether to return edge-level distributions. #' #' @return A list of distributions and degree vectors. #' #' @keywords internal #' get_dist <- function(edgelist, directed = TRUE, joint_dist = FALSE) { if (!directed) edgelist <- rbind(edgelist, edgelist[, c(2, 1)]) edgelist <- as.matrix(edgelist) temp <- node_strength_cpp( snode = edgelist[, 1], tnode = edgelist[, 2], nnode = max(edgelist), weight = 1, weighted = FALSE ) outd <- temp$outs ind <- temp$ins nedge <- nrow(edgelist) nu <- data.frame("outdegree" = outd, "indegree" = ind) nu <- table(nu) / length(outd) d_out <- as.numeric(rownames(nu)) d_in <- as.numeric(colnames(nu)) p_out <- as.numeric(rowSums(nu)) p_in <- as.numeric(colSums(nu)) t1 <- nu * d_out t1 <- t1 / sum(t1) t2 <- t(t(nu) * d_in) t2 <- t2 / sum(t2) # source-out q_s_out <- rowSums(t1) # target-in q_t_in <- colSums(t2) # source-in q_s_in <- colSums(t1) # target-out q_t_out <- rowSums(t2) e <- eta <- NA # other joint distributions if (joint_dist) { e <- list( "outout" = table(data.frame( "source" = outd[edgelist[, 1]], "target" = outd[edgelist[, 2]] )) / nedge, "outin" = table(data.frame( "source" = outd[edgelist[, 1]], "target" = ind[edgelist[, 2]] )) / nedge, "inout" = table(data.frame( "source" = ind[edgelist[, 1]], "target" = outd[edgelist[, 2]] )) / nedge, "inin" = table(data.frame( "source" = ind[edgelist[, 1]], "target" = ind[edgelist[, 2]] )) / nedge ) eta <- table(data.frame( "source" = paste(outd[edgelist[, 1]], ind[edgelist[, 1]], sep = "-"), "target" = paste(outd[edgelist[, 2]], ind[edgelist[, 2]], sep = "-") )) / nedge } list( nu = nu, e = e, eta = eta, d_out = d_out, d_in = d_in, p_out = p_out, p_in = p_in, q_s_out = q_s_out, q_s_in = q_s_in, q_t_out = q_t_out, q_t_in = q_t_in ) } #' Get the constraints for the optimization problem. This function is defined #' for \code{get_eta_directed()}. #' #' @param constrs A list of constraints. #' @param target.assortcoef A list of target assortativity levels. #' @param rho A list of variable objects. #' #' @return A list of updated constraints. #' #' @keywords internal #' get_constr <- function(constrs, target.assortcoef, rho) { for (type in names(target.assortcoef)) { if (!is.null(target.assortcoef[[type]])) { if (length(target.assortcoef[[type]]) == 1) { constrs[[type]] <- rho[[type]] == target.assortcoef[[type]] } else { constrs[[paste0(type, "_max")]] <- rho[[type]] <= max(target.assortcoef[[type]]) constrs[[paste0(type, "_min")]] <- rho[[type]] >= min(target.assortcoef[[type]]) } } } return(constrs) } #' Get the value of an object from the optimization problem. This function is #' defined for \code{get_eta_directed()}. #' #' @param object An object from the optimization problem. #' @param result A list returned from \code{CVXR::solve()}. #' @param mydist A list returned from \code{get_dist()}. #' #' @return Returns the value of the object. #' #' @keywords internal #' get_values <- function(object, result, mydist) { outout <- result$getValue(object[["outout"]]) outin <- result$getValue(object[["outin"]]) inout <- result$getValue(object[["inout"]]) inin <- result$getValue(object[["inin"]]) if (deparse(substitute(object)) == "e" && !any( is.na(outout), is.na(outin), is.na(inout), is.na(inin) )) { rownames(outout) <- rownames(outin) <- mydist$d_out colnames(inout) <- colnames(outout) <- mydist$d_out rownames(inout) <- rownames(inin) <- mydist$d_in colnames(outin) <- colnames(inin) <- mydist$d_in } list( "outout" = outout, "outin" = outin, "inout" = inout, "inin" = inin ) } #' Parameters passed to CVXR::solve(). #' #' Defined for the convex optimization problems for solving \code{eta}. #' #' @param solver (Optional) A string indicating the solver to use. Defaults to #' "ECOS". #' @param ignore_dcp (Optional) A logical value indicating whether to override #' the DCP check for a problem. #' @param warm_start (Optional) A logical value indicating whether the previous #' solver result should be used to warm start. #' @param verbose (Optional) A logical value indicating whether to print #' additional solver output. #' @param parallel (Optional) A logical value indicating whether to solve in #' parallel if the problem is separable. #' @param gp (Optional) A logical value indicating whether the problem is a #' geometric program. Defaults to FALSE. #' @param feastol The feasible tolerance on the primal and dual residual. #' Defaults to 1e-5. #' @param reltol The relative tolerance on the duality gap. Defaults to 1e-5. #' @param abstol The absolute tolerance on the duality gap. Defaults to 1e-5. #' @param num_iter The maximum number of iterations. #' @param ... Additional options that will be passed to the specific solver. In #' general, these options will override any default settings imposed by CVXR. #' #' @return A list containing the parameters. #' @export #' #' @examples #' control <- cvxr_control(solver = "OSQP", abstol = 1e-5) cvxr_control <- function( solver = "ECOS", ignore_dcp = FALSE, warm_start = FALSE, verbose = FALSE, parallel = FALSE, gp = FALSE, feastol = 1e-5, reltol = 1e-5, abstol = 1e-5, num_iter = NULL, ...) { return(list( solver = solver, ignore_dcp = ignore_dcp, warm_start = warm_start, verbose = verbose, parallel = parallel, gp = gp, feastol = feastol, reltol = reltol, abstol = abstol, num_iter = num_iter, ... )) } #' Compute edge-level distributions for directed networks with respect to #' desired assortativity level(s). #' #' @param edgelist A two-column matrix representing the directed edges of a #' network. #' @param target.assortcoef A list representing the predetermined value or range #' of assortativity coefficients. #' @param eta.obj A convex function of \code{eta} to be minimized when #' \code{which.range} is \code{NULL}. Defaults to 0. #' @param which.range Character, "outout", "outin", "inout" or "inin"s, #' represents the interested degree based assortativity coefficient. #' @param control A list of parameters passed to \code{CVXR::solve()} when #' solving for \code{eta} or computing the range of assortativity coefficient. #' @return Assortativity coefficients and joint distributions. If #' \code{which.range} is specified, the range of the interested coefficient #' and the corresponding joint distributions will be returned, provided the #' predetermined \code{target.assortcoef} is satisfied. #' #' @keywords internal #' get_eta_directed <- function( edgelist, target.assortcoef = list( "outout" = NULL, "outin" = NULL, "inout" = NULL, "inin" = NULL ), eta.obj = function(x) 0, which.range, control = cvxr_control()) { stopifnot(all(names(target.assortcoef) %in% c( "outout", "outin", "inout", "inin" ))) mydist <- get_dist(edgelist = edgelist, directed = TRUE) m <- length(mydist$d_out) n <- length(mydist$d_in) s_outin <- c(t(mydist$nu * mydist$d_out)) s_outin <- s_outin / sum(s_outin) t_outin <- c(t(mydist$nu) * mydist$d_in) t_outin <- t_outin / sum(t_outin) index_s <- s_outin != 0 index_t <- t_outin != 0 eMat <- CVXR::Variable(sum(index_s), sum(index_t), nonneg = TRUE) constrs <- list( "rowSum" = CVXR::sum_entries(eMat, 1) == s_outin[index_s], "colSum" = CVXR::sum_entries(eMat, 2) == t_outin[index_t] ) rm(s_outin, t_outin) mat1 <- kronecker(diag(rep(1, m)), t(rep(1, n))) mat2 <- kronecker(rep(1, m), diag(rep(1, n))) e <- list( "outout" = mat1[, index_s] %*% eMat %*% t(mat1[, index_t]), "outin" = mat1[, index_s] %*% eMat %*% mat2[index_t, ], "inout" = t(mat2[index_s, ]) %*% eMat %*% t(mat1[, index_t]), "inin" = t(mat2[index_s, ]) %*% eMat %*% mat2[index_t, ] ) rm(mat1, mat2, m, n) my_sigma <- function(j, q) { (sum(j^2 * q) - sum(j * q)^2)^0.5 } sig <- list( s_out = my_sigma(mydist$d_out, mydist$q_s_out), s_in = my_sigma(mydist$d_in, mydist$q_s_in), t_out = my_sigma(mydist$d_out, mydist$q_t_out), t_in = my_sigma(mydist$d_in, mydist$q_t_in) ) rho <- list( "outout" = t(mydist$d_out) %*% (e$"outout" - mydist$q_s_out %*% t(mydist$q_t_out)) %*% mydist$d_out / sig$s_out / sig$t_out, "outin" = t(mydist$d_out) %*% (e$"outin" - mydist$q_s_out %*% t(mydist$q_t_in)) %*% mydist$d_in / sig$s_out / sig$t_in, "inout" = t(mydist$d_in) %*% (e$"inout" - mydist$q_s_in %*% t(mydist$q_t_out)) %*% mydist$d_out / sig$s_in / sig$t_out, "inin" = t(mydist$d_in) %*% (e$"inin" - mydist$q_s_in %*% t(mydist$q_t_in)) %*% mydist$d_in / sig$s_in / sig$t_in ) name_eMat <- function(eMat, a = mydist$d_out, b = mydist$d_in, index_a = index_s, index_b = index_t) { temp <- paste0(rep(a, each = length(b)), "-", rep(b, length(a)), split = "" ) colnames(eMat) <- temp[index_b] rownames(eMat) <- temp[index_a] names(attributes(eMat)$dimnames) <- c("source", "target") eMat } constrs <- get_constr(constrs, target.assortcoef, rho) retitems <- c( "value", "status", "solver", "solve_time", "setup_time", "num_iters" ) if (missing(which.range)) { problem <- CVXR::Problem(CVXR::Minimize(do.call(eta.obj, list(eMat))), constrs) result <- do.call(CVXR::solve, c(list(problem), control)) ret <- result[retitems] if (result$status == "solver_error" || result$status == "infeasible") { warning(paste0("Solver status: ", result$status)) return(ret) } ret$assortcoef <- get_values(rho, result, mydist) # ret$e <- get_values(e, result, mydist) ret$eta <- name_eMat(result$getValue(eMat)) return(ret) } else { tempRho <- rho stopifnot("'which.range' is not valid." = which.range %in% names(tempRho)) problem1 <- CVXR::Problem(CVXR::Minimize(tempRho[[which.range]]), constrs) result1 <- do.call(CVXR::solve, c(list(problem1), control)) if (result1$status == "solver_error" || result1$status == "infeasible") { warning(paste0("Lower bound solver status: ", result1$status)) } problem2 <- CVXR::Problem(CVXR::Maximize(tempRho[[which.range]]), constrs) result2 <- do.call(CVXR::solve, c(list(problem2), control)) if (result2$status == "solver_error" || result2$status == "infeasible") { warning(paste0("Upper bound solver status: ", result2$status)) } return(list( "range" = c( result1$getValue(tempRho[[which.range]]), result2$getValue(tempRho[[which.range]]) ), "lbound.solver.result" = result1[retitems], "ubound.solver.result" = result2[retitems] )) } } #' Compute edge-level distribution for undirected networks with respect to #' desired assortativity level. #' #' @param edgelist A two column matrix representing the undirected edges of a #' network. #' @param target.assortcoef Numeric, represents the predetermined assortativity #' coefficient. If \code{NULL}, the range of assortativity coefficient and #' corresponding joint distribution are returned. #' @param eta.obj A convex function of \code{eta} to be minimized when #' \code{target.assortcoef} is not \code{NULL}. Defaults to 0. #' @param control A list of parameters passed to \code{CVXR::solve()} when #' solving for \code{eta} or computing the range of assortativity coefficient. #' #' @return Assortativity level and corresponding edge-level distribution. #' #' @keywords internal #' get_eta_undirected <- function( edgelist, target.assortcoef = NULL, eta.obj = function(x) 0, control = cvxr_control()) { stopifnot((target.assortcoef <= 1 & target.assortcoef >= -1) || is.null(target.assortcoef)) mydist <- get_dist(edgelist = edgelist, directed = FALSE) k <- mydist$d_out q_k <- mydist$q_s_out rm(mydist) name_eMat <- function(eMat, k) { colnames(eMat) <- rownames(eMat) <- k eMat } if (!is.null(target.assortcoef)) { if (target.assortcoef == 0) { return(list( "assortcoef" = 0, "eta" = name_eMat(q_k %*% t(q_k), k) )) } } n <- length(k) sig2 <- sum(k^2 * q_k) - (sum(k * q_k))^2 eMat <- CVXR::Variable(n, n, nonneg = TRUE) rho <- t(k) %*% (eMat - q_k %*% t(q_k)) %*% k / sig2 constrs <- list( CVXR::sum_entries(eMat, 1) == q_k, eMat == t(eMat) ) retitems <- c( "value", "status", "solver", "solve_time", "setup_time", "num_iters" ) if (!is.null(target.assortcoef)) { constrs$"rho" <- rho == target.assortcoef problem <- CVXR::Problem( CVXR::Minimize(do.call(eta.obj, list(eMat))), constrs ) result <- do.call(CVXR::solve, c(list(problem), control)) ret <- result[retitems] if (result$status == "solver_error" | result$status == "infeasible") { warning(paste0("Solver status: ", result$status)) return(ret) } ret$assortcoef <- result$getValue(rho) ret$eta <- name_eMat(result$getValue(eMat), k) return(ret) } else { # constrs$"rho" <- rho <= 1 problem1 <- CVXR::Problem(CVXR::Minimize(rho), constrs) result1 <- do.call(CVXR::solve, c(list(problem1), control)) if (result1$status == "solver_error" | result1$status == "infeasible") { warning(paste0("Lower bound solver status: ", result1$status)) } problem2 <- CVXR::Problem(CVXR::Maximize(rho), constrs) result2 <- do.call(CVXR::solve, c(list(problem2), control)) if (result2$status == "solver_error" | result2$status == "infeasible") { warning(paste0("Upper bound solver status: ", result2$status)) } return(list( "range" = c(result1$getValue(rho), result2$getValue(rho)), "lbound.solver.result" = result1[retitems], "ubound.solver.result" = result2[retitems] )) } }
/scratch/gouwar.j/cran-all/cranData/wdnet/R/joint_dist.R
## ## wdnet: Weighted directed network ## Copyright (C) 2024 Yelie Yuan, Tiandong Wang, Jun Yan and Panpan Zhang ## Jun Yan <[email protected]> ## ## This file is part of the R package wdnet. ## ## The R package wdnet is free software: You can redistribute it and/or ## modify it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 3 of the License, or any later ## version (at your option). See the GNU General Public License at ## <https://www.gnu.org/licenses/> for details. ## ## The R package wdnet is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. ## #' @importFrom stats cor #' @importFrom CVXR norm2 #' @importFrom utils modifyList NULL #' Degree preserving rewiring for directed networks #' #' Degree preserving rewiring towards the target structure \code{eta}. #' #' @param edgelist A two-column matrix, each row represents a directed edge from #' the first column to the second column. #' @param eta A matrix generated by \code{wdnet::get_eta_directed()}. #' @param iteration An integer, the number of rewiring iterations, with each #' iteration consisting of \code{nattempts} rewiring attempts. #' @param nattempts An integer, the number of rewiring attempts for each #' iteration. Default value equals the number of rows in \code{edgelist}. #' @param rewire.history Logical, whether the rewiring history should be #' returned. #' #' @return Rewired edgelist, degree based assortativity coefficients after each #' iteration, rewiring history (including the index of sampled edges and #' rewiring result). For each rewiring attempt, two rows are sampled form the #' edgelist, for example Edge1:(v_1, v_2) and Edge2:(v_3, v_4). If the #' rewiring attempt is accepted, the sampled edges are replaced as (v_1, v_4), #' (v_3, v_2). #' #' @keywords internal #' dprewire_directed <- function( edgelist, eta, iteration = 200, nattempts, rewire.history = FALSE) { if (missing(nattempts) || is.null(nattempts)) nattempts <- nrow(edgelist) edgelist <- as.matrix(edgelist) snode <- edgelist[, 1] tnode <- edgelist[, 2] temp <- node_strength_cpp( snode = snode, tnode = tnode, nnode = max(edgelist), weight = 1, weighted = FALSE ) outd <- temp$outs ind <- temp$ins sout <- outd[snode] sin <- ind[snode] tout <- outd[tnode] tin <- ind[tnode] df_s <- data.frame( type = rownames(eta), index = seq_len(nrow(eta)) - 1 ) df_t <- data.frame( type = colnames(eta), index = seq_len(ncol(eta)) - 1 ) type_s <- paste0(sout, "-", sin, split = "") type_t <- paste0(tout, "-", tin, split = "") index_s <- df_s[match(type_s, df_s$type), "index"] index_t <- df_t[match(type_t, df_t$type), "index"] rm(df_s, df_t, type_s, type_t, temp, outd, ind) ret <- dprewire_directed_cpp( iteration, nattempts, tnode, sout, sin, tout, tin, index_s, index_t, eta, rewire.history ) rho <- data.frame( "Iteration" = c(0:iteration), "outout" = NA_real_, "outin" = NA_real_, "inout" = NA_real_, "inin" = NA_real_ ) rho[1, 2:5] <- c( "outout" = stats::cor(sout, tout), "outin" = stats::cor(sout, tin), "inout" = stats::cor(sin, tout), "inin" = stats::cor(sin, tin) ) rho[2:(iteration + 1), 2] <- ret$outout rho[2:(iteration + 1), 3] <- ret$outin rho[2:(iteration + 1), 4] <- ret$inout rho[2:(iteration + 1), 5] <- ret$inin colnames(rho) <- c("Iteration", "outout", "outin", "inout", "inin") edgelist[, 2] <- ret$tnode result <- list( "assortcoef" = rho, "netwk" = create_wdnet(edgelist = edgelist, directed = TRUE), "iteration" = iteration, "nattempts" = nattempts ) if (rewire.history) { colnames(ret$history) <- c("Attempt", "Edge1", "Edge2", "Accepted") ret$history[, 1:3] <- ret$history[, 1:3] + 1 result$history <- ret$history } return(result) } #' Degree preserving rewiring for undirected networks #' #' Degree preserving rewiring towards the target structure \code{eta}. #' #' @param edgelist A two column matrix, each row represents an undirected edge. #' @param iteration An integer, number of rewiring iterations, each iteration #' consists of \code{nattempts} rewiring attempts. #' @param nattempts An integer, number of rewiring attempts for each iteration. #' The default value equals the number of rows in \code{edgelist}. #' @param eta A matrix generated by \code{wdnet::get_eta_undirected()}. #' @param rewire.history Logical, whether the rewiring history should be #' returned. #' @return Rewired edgelist, assortativity coefficient after each iteration, and #' rewiring history (including the index of sampled edges and rewiring #' result). For each rewiring attempt, two rows are sampled from the #' \code{edgelist}, for example Edge1:\{v_1, v_2\} and Edge2:\{v_3, v_4\}, the #' function try to rewire the sampled edges as \{v_1, v_4\}, \{v_3, v_2\} #' (rewire type 1) or \{v_1, v_3\}, \{v_2, v_4\} (rewire type 2) with #' probability 1/2. #' #' @keywords internal #' dprewire_undirected <- function( edgelist, eta, iteration = 200, nattempts, rewire.history = FALSE) { if (missing(nattempts) || is.null(nattempts)) nattempts <- nrow(edgelist) edgelist <- as.matrix(edgelist) degree <- data.frame(table(c(edgelist)))$Freq d_df <- data.frame(type = rownames(eta), index = seq_len(nrow(eta)) - 1) node1 <- edgelist[, 1] node2 <- edgelist[, 2] index1 <- d_df[match(degree[node1], d_df$type), "index"] index2 <- d_df[match(degree[node2], d_df$type), "index"] rm(d_df) degree1 <- degree[c(node1, node2)] degree2 <- degree[c(node2, node1)] ret <- dprewire_undirected_cpp( iteration, nattempts, node1, node2, degree1, degree2, index1, index2, eta, rewire.history ) rm(node1, node2, degree1, degree2, index1, index2) rho <- data.frame("Iteration" = c(0:iteration), "Value" = NA_real_) rho[1, 2] <- assortcoef(edgelist = edgelist, directed = FALSE) rho[2:(iteration + 1), 2] <- ret$rho colnames(rho) <- c("Iteration", "Value") edgelist <- cbind(ret$node1, ret$node2) result <- list( "assortcoef" = rho, "netwk" = create_wdnet(edgelist = edgelist, directed = FALSE), "iteration" = iteration, "nattempts" = nattempts ) if (rewire.history) { colnames(ret$history) <- c( "Attempt", "Edge1", "Edge2", "RewireType", "Accepted" ) ret$history[, 1:4] <- ret$history[, 1:4] + 1 result$history <- ret$history } return(result) } #' Degree preserving rewiring. #' #' Rewire a given network to have predetermined assortativity coefficient(s) #' while preserving node degree. #' #' The algorithm first solves for an appropriate \code{eta} using #' \code{target.assortcoef}, \code{eta.obj}, and \code{cvxr_control}, then #' proceeds to the rewiring process and rewire the network towards the solved #' \code{eta}. If \code{eta} is given, the algorithm will skip the first step. #' This function only works for unweighted networks. #' #' Each rewiring attempt samples two rows from \code{edgelist}, for instance #' Edge 1:(v_1, v_2) and Edge 2:(v_3, v_4). For directed networks, if the #' rewiring attempt is accepted, the sampled edges are rewired as (v_1, v_4), #' (v_3, v_2); for undirected networks, the algorithm try to rewire the sampled #' edges as \{v_1, v_4\}, \{v_3, v_2\} (type 1) or \{v_1, v_3\}, \{v_2, v_4\} #' (type 2), each with probability 1/2. #' #' @param netwk A \code{wdnet} object representing an unweighted network. If #' \code{NULL}, the function will construct a network using either #' \code{edgelist}, or \code{adj}. #' @param edgelist A two column matrix, each row represents an edge of the #' network. #' @param adj An adjacency matrix of an unweighted network. #' @param directed Logical, whether the network is directed or not. It will be #' ignored if \code{netwk} is provided. #' @param target.assortcoef For directed networks, it is a list represents the #' predetermined value or range of assortativity coefficients. For undirected #' networks, it is a constant between -1 to 1. It will be ignored if #' \code{eta} is provided. #' @param control A list of parameters for controlling the rewiring process and #' the process for solving \code{eta}. \itemize{ \item `iteration` An #' integer, represents the number of rewiring iterations. Each iteration #' consists of \code{nattempts} rewiring attempts. The assortativity #' coefficient(s) of the network will be recorded after each iteration. #' \item `nattempts` An integer representing the number of rewiring #' attempts for each #' iteration. Default value equals the number of rows of \code{edgelist}. #' \item `history` Logical, whether the rewiring attempts should be #' recorded and returned. \item `eta.obj` A convex function of #' \code{eta} to be minimized when solving for \code{eta} with given #' \code{target.assortcoef}. Defaults to 0. It will be ignored if \code{eta} #' is provided. \item `cvxr_control` A list of parameters passed to #' \code{CVXR::solve()} for solving \code{eta} with given #' \code{target.assortcoef}. It will be ignored if \code{eta} is provided.} #' @param eta A matrix represents the target network structure. If specified, #' \code{target.assortcoef} will be ignored. For directed networks, the #' element at row "i-j" and column "k-l" represents the proportion of directed #' edges linking a source node with out-degree i and in-degree j to a target #' node with out-degree k and in-degree l. For undirected networks, \code{eta} #' is symmetric, the summation of the elements at row "i", column "j" and row #' "j", column "i" represents the proportion of edges linking to a node with #' degree i and a node with degree j. #' #' @return Rewired network; assortativity coefficient(s) after each iteration; #' rewiring history (including the index of sampled edges and rewiring result) #' and solver results. #' #' @export #' #' @examples #' \donttest{ #' set.seed(123) #' netwk1 <- rpanet(1e4, control = rpa_control_scenario( #' alpha = 0.4, beta = 0.3, gamma = 0.3 #' )) #' ## rewire a directed network #' target.assortcoef <- list("outout" = -0.2, "outin" = 0.2) #' ret1 <- dprewire( #' netwk = netwk1, #' target.assortcoef = target.assortcoef, #' control = list(iteration = 200) #' ) #' plot(ret1$assortcoef$Iteration, ret1$assortcoef$"outout") #' plot(ret1$assortcoef$Iteration, ret1$assortcoef$"outin") #' #' ## rewire an undirected network #' netwk2 <- rpanet(1e4, #' control = rpa_control_scenario( #' alpha = 0.3, beta = 0.1, gamma = 0.3, xi = 0.3 #' ), #' initial.network = list( #' directed = FALSE) #' ) #' ret2 <- dprewire( #' netwk = netwk2, #' target.assortcoef = 0.3, #' control = list( #' iteration = 300, eta.obj = CVXR::norm2, #' history = TRUE #' ) #' ) #' plot(ret2$assortcoef$Iteration, ret2$assortcoef$Value) #' } #' dprewire <- function( netwk, edgelist, directed, adj, target.assortcoef = list( "outout" = NULL, "outin" = NULL, "inout" = NULL, "inin" = NULL ), control = list( "iteration" = 200, "nattempts" = NULL, "history" = FALSE, "cvxr_control" = cvxr_control(), "eta.obj" = function(x) 0 ), eta) { netwk <- create_wdnet( netwk = netwk, edgelist = edgelist, edgeweight = NULL, directed = directed, adj = adj, weighted = FALSE ) if (netwk$weighted) { warning("Edge weights are omitted") } control <- utils::modifyList( list( "iteration" = 200, "nattempts" = NULL, "history" = FALSE, "cvxr_control" = cvxr_control(), "eta.obj" = function(x) 0 ), control, keep.null = TRUE ) solver.result <- NULL if (missing(eta)) { if (netwk$directed) { solver.result <- get_eta_directed( edgelist = netwk$edgelist, target.assortcoef = target.assortcoef, eta.obj = control$eta.obj, control = control$cvxr_control ) } else { stopifnot( is.numeric(target.assortcoef) && target.assortcoef >= -1 && target.assortcoef <= 1 ) solver.result <- get_eta_undirected( edgelist = netwk$edgelist, target.assortcoef = target.assortcoef, eta.obj = control$eta.obj, control = control$cvxr_control ) } if (solver.result$status == "solver_error" || solver.result$status == "infeasible") { return(list("solver.result" = solver.result)) } eta <- solver.result$eta } if (netwk$directed) { ret <- dprewire_directed( edgelist = netwk$edgelist, eta = eta, iteration = control$iteration, nattempts = control$nattempts, rewire.history = control$history ) } else { ret <- dprewire_undirected( edgelist = netwk$edgelist, eta = eta, iteration = control$iteration, nattempts = control$nattempts, rewire.history = control$history ) } ret$"solver.result" <- solver.result ret } #' Range of assortativity coefficients. #' #' The assortativity coefficient of a given network may not reach all the values #' between -1 and 1 via degree preserving rewiring. This function calculates the #' range of assortativity coefficients achievable through degree preserving #' rewiring. The algorithm is designed for unweighted networks. #' #' The ranges are computed using convex optimization. The optimization problems #' are defined and solved via the \code{R} package \code{CVXR}. For undirected #' networks, the function returns the range of the assortativity coefficient. #' For directed networks, the function computes the range of \code{which.range} #' while other assortativity coefficients are restricted through #' \code{target.assortcoef}. #' #' @param netwk A \code{wdnet} object representing an unweighted network. If #' \code{NULL}, the function will construct a network using either #' \code{edgelist} or \code{adj}. #' @param edgelist A two-column matrix, where each row represents an edge of the #' network. #' @param adj An adjacency matrix of an unweighted network. #' @param directed Logical, whether the network is directed or not. It will be #' ignored if \code{netwk} is provided. #' @param which.range The type of interested assortativity coefficient. For #' directed networks, it takes one of the values: "outout", "outin", "inout" #' and "inin". It will be ignored if the network is undirected. #' @param target.assortcoef A list of constraints, it contains the predetermined #' value or range imposed on assortativity coefficients other than #' \code{which.range}. It will be ignored if the network is undirected. #' @param control A list of parameters passed to \code{CVXR::solve()} for #' solving an appropriate \code{eta}, given the constraints #' \code{target.assortcoef}. #' #' @return Returns the range of the selected assortativity coefficient and the #' results from the solver. #' #' @export #' #' @examples #' \donttest{ #' set.seed(123) #' netwk <- rpanet(5e3, #' control = #' rpa_control_scenario(alpha = 0.5, beta = 0.5) #' ) #' ret1 <- dprewire.range( #' netwk = netwk, which.range = "outin", #' target.assortcoef = list("outout" = c(-0.3, 0.3), "inout" = 0.1) #' ) #' ret1$range #' } #' dprewire.range <- function( netwk, edgelist, adj, directed, which.range = c("outout", "outin", "inout", "inin"), control = cvxr_control(), target.assortcoef = list( "outout" = NULL, "outin" = NULL, "inout" = NULL, "inin" = NULL )) { netwk <- create_wdnet( netwk = netwk, edgelist = edgelist, edgeweight = NULL, directed = directed, adj = adj, weighted = FALSE ) if (netwk$weighted) { warning("Edge weights are ignored.") } if (netwk$directed) { which.range <- match.arg(which.range) result <- get_eta_directed( edgelist = netwk$edgelist, target.assortcoef = target.assortcoef, which.range = which.range, control = control ) } else { result <- get_eta_undirected( edgelist = netwk$edgelist, control = control ) } result }
/scratch/gouwar.j/cran-all/cranData/wdnet/R/rewire.R
## ## wdnet: Weighted directed network ## Copyright (C) 2024 Yelie Yuan, Tiandong Wang, Jun Yan and Panpan Zhang ## Jun Yan <[email protected]> ## ## This file is part of the R package wdnet. ## ## The R package wdnet is free software: You can redistribute it and/or ## modify it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 3 of the License, or any later ## version (at your option). See the GNU General Public License at ## <https://www.gnu.org/licenses/> for details. ## ## The R package wdnet is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. ## #' @importFrom utils modifyList NULL #' rpacontrol: Controls the Preferential Attachment (PA) Network Generation #' Process #' #' The \code{rpacontrol} object is designed to control the Preferential #' Attachment (PA) network generation process within the \code{rpanet()} #' function. It can have the following components: #' \itemize{ #' \item \code{scenario}: controls the edge scenarios #' at each step. For more information, please refer to #' \code{rpa_control_scenario()}. #' \item \code{edgeweight}: controls the weight of #' the edges; see \code{rpa_control_edgeweight()} for details. #' \item \code{newedge}: controls the creation of #' new edges at each step; see \code{rpa_control_newedge()} #' for details. #' \item \code{preference}: sets preference functions; see #' \code{rpa_control_preference()} for details. #' \item \code{reciprocal}: controls the creation of reciprocal #' edges; see \code{rpa_control_reciprocal()} for details. #' } #' @name rpacontrol #' NULL #' Add components to the control list #' #' `+` is used to combine components to control the PA network generation #' process. Available components are \code{rpa_control_scenario()}, #' \code{rpa_control_edgeweight()}, \code{rpa_control_newedge()}, #' \code{rpa_control_preference()} and \code{rpa_control_reciprocal()}. #' #' @param e1 A list of class \code{rpacontrol}. #' @param e2 A list of class \code{rpacontrol}. #' #' @return A list of class \code{rpacontrol} with components from \code{e1} and #' \code{e2}. #' @export #' #' @examples #' \donttest{ #' control <- rpa_control_scenario(alpha = 0.5, beta = 0.5) + #' rpa_control_preference( #' ftype = "customized", #' spref = "pow(outs, 2) + 1", #' tpref = "pow(ins, 2) + 1" #' ) #' } #' #' control <- rpa_control_scenario(alpha = 1) + #' rpa_control_edgeweight( #' sampler = function(n) rgamma(n, shape = 5, scale = 0.2) #' ) "+.rpacontrol" <- function(e1, e2) { e1 <- structure( utils::modifyList(e1, e2, keep.null = TRUE), class = "rpacontrol" ) if (is.list(e2$edgeweight$dparams)) { e1$edgeweight$dparams <- e2$edgeweight$dparams } if (is.list(e2$newedge$dparams)) { e1$newedge$dparams <- e2$newedge$dparams } e1 } #' Control edge scenarios. Defined for \code{rpanet}. #' #' @param alpha Probability of adding an edge from a new node to an existing #' node. #' @param beta Probability of adding an edge between existing nodes. #' @param gamma Probability of adding an edge from an existing node to a new #' node. #' @param xi Probability of adding an edge between two new nodes. #' @param rho Probability of adding a new node with a self-loop. #' @param beta.loop Logical. Determines whether self-loops are allowed under the #' beta scenario. Default value is \code{TRUE}. #' @param source.first Logical. Defined for \code{beta} scenario edges of #' directed networks. If \code{TRUE}, the source node of a new edge is sampled #' from existing nodes before the target node is sampled; if \code{FALSE}, the #' target node is sampled from existing nodes before the source node is #' sampled. Default value is \code{TRUE}. #' #' @return A list of class \code{rpacontrol} with components \code{alpha}, #' \code{beta}, \code{gamma}, \code{xi}, \code{rho}, \code{beta.loop} and #' \code{source.first} with meanings as explained under 'Arguments'. #' #' @export #' #' @examples #' control <- rpa_control_scenario(alpha = 0.5, beta = 0.5, beta.loop = FALSE) #' rpa_control_scenario <- function( alpha = 1, beta = 0, gamma = 0, xi = 0, rho = 0, beta.loop = TRUE, source.first = TRUE) { stopifnot( '"alpha + beta + gamma + xi + rho" must be equal to 1.' = round(alpha + beta + gamma + xi + rho, 10) == 1 ) scenario <- list( "alpha" = alpha, "beta" = beta, "gamma" = gamma, "xi" = xi, "rho" = rho, "beta.loop" = beta.loop, "source.first" = source.first ) structure(list("scenario" = scenario), class = "rpacontrol" ) } #' Control weight of new edges. Defined for \code{rpanet}. #' #' @param sampler A function used for sampling edge weights. If \code{NULL}, all #' new edges will default to a weight of 1. If a function is provided, it must #' accept a single argument, \code{n}, and return a vector of length \code{n} #' that represents the sampled edge weights. #' #' @return A list of class \code{rpacontrol} containing the \code{sampler} #' function. #' #' @export #' #' @examples #' # Sample edge weights from Gamma(5, 0.2). #' my_gamma <- function(n) rgamma(n, shape = 5, scale = 0.2) #' control <- rpa_control_edgeweight( #' sampler = my_gamma #' ) #' rpa_control_edgeweight <- function( sampler = NULL) { if (!is.null(sampler)) { tryCatch(do.call(sampler, list(5)), error = function(e) { message('Invalid "sampler" for rpa_control_edgeweight().') stop(e) }) } edgeweight <- list( "sampler" = sampler ) structure(list("edgeweight" = edgeweight), class = "rpacontrol" ) } #' Control new edges in each step. Defined for \code{rpanet}. #' #' @param sampler A function used for sampling the number of new edges to be #' added at each step. If \code{NULL}, one new edge will be added at each #' step. If a function is provided, it must accept a single argument, #' \code{n}, and return a vector of length \code{n} that represents the #' sampled number of new edges. #' @param snode.replace Logical. Determines whether the source nodes in the same #' step should be sampled with replacement. Defined for directed networks. #' @param tnode.replace Logical. Determines whether the target nodes in the same #' step should be sampled with replacement. Defined for directed networks. #' @param node.replace Logical. Determines whether the nodes in the same step #' should be sampled with replacement. Defined for undirected networks. If #' FALSE, self-loops will not be allowed under beta scenario. #' #' @return A list of class \code{rpacontrol} with components \code{sampler}, #' \code{snode.replace}, \code{tnode.replace} and \code{node.replace} with #' meanings as explained under 'Arguments'. #' #' @export #' #' @examples #' my_rpois <- function(n) rpois(n, lambda = 2) + 1 #' control <- rpa_control_newedge( #' sampler = my_rpois, #' node.replace = FALSE #' ) rpa_control_newedge <- function( sampler = NULL, snode.replace = TRUE, tnode.replace = TRUE, node.replace = TRUE) { if (!is.null(sampler)) { tryCatch(do.call(sampler, list(5)), error = function(e) { message('Invalid "sampler" for rpa_control_newedge().') stop(e) }) } newedge <- list( "sampler" = sampler, "snode.replace" = snode.replace, "tnode.replace" = tnode.replace, "node.replace" = node.replace ) structure(list("newedge" = newedge), class = "rpacontrol") } #' Set preference function(s). Defined for \code{rpanet}. #' #' @param ftype Preference function type. Either "default" or "customized". #' "customized" preference functions require "binary" or "linear" generation #' methods. If using default preference functions, \code{sparams}, #' \code{tparams} and \code{params} must be specified. If using customized #' preference functions, \code{spref}, \code{tpref} and \code{pref} must be #' specified. #' @param sparams A numerical vector of length 5 giving the parameters of the #' default source preference function. Defined for directed networks. #' Probability of choosing an existing node as the source node is proportional #' to \code{sparams[1] * out-strength^sparams[2] + sparams[3] * #' in-strength^sparams[4] + sparams[5]}. #' @param tparams A numerical vector of length 5 giving the parameters of the #' default target preference function. Defined for directed networks. #' Probability of choosing an existing node as the target node is proportional #' to \code{tparams[1] * out-strength^tparams[2] + tparams[3] * #' in-strength^tparams[4] + tparams[5]}. #' @param params A numerical vector of length 2 giving the parameters of the #' default preference function. Defined for undirected networks. Probability #' of choosing an existing node is proportional to \code{strength^params[1] + #' params[2].} #' @param spref Character expression or an object of class \code{XPtr} giving #' the customized source preference function. Defined for directed networks. #' Default value is \code{"outs + 1"}, i.e., node out-strength + 1. See #' Details and Examples for more information. #' @param tpref Character expression or an object of class \code{XPtr} giving #' the customized target preference function. Defined for directed networks. #' Default value is \code{"ins + 1"}, i.e., node in-strength + 1. #' @param pref Character expression or an object of class \code{XPtr} giving the #' customized preference function. Defined for undirected networks. Default #' value is \code{"s + 1"}, i.e, node strength + 1. #' #' @details If choosing customized preference functions, \code{spref}, #' \code{tpref} and \code{pref} will be used and the network generation method #' must be "binary" or "linear". \code{spref} (\code{tpref}) defines the #' source (target) preference function, it can be a character expression or an #' object of class \code{XPtr}. \itemize{ \item Character expression; it #' must be a one-line \code{C++} style expression of \code{outs} #' (node out-strength) and #' \code{ins} (node in-strength). For example, \code{"pow(outs, 2) + 1"}, #' \code{"pow(outs, 2) + pow(ins, 2) + 1"}, etc. The expression will be used #' to define an \code{XPtr} via \code{RcppXPtrUtils::cppXPtr}. The \code{XPtr} #' will be passed to the network generation function. The expression must not #' have variables other than \code{outs} and \code{ins}. \item `XPtr` an #' external pointer wrapped in an object of class \code{XPtr} defined via #' \code{RcppXPtrUtils::cppXPtr}. An example for defining an \code{XPtr} with #' \code{C++} source code is included in Examples. For more information #' about passing function pointers, see #' \url{https://gallery.rcpp.org/articles/passing-cpp-function-pointers-rcppxptrutils/}. #' Please note the supplied \code{C++} function accepts two \code{double} #' arguments and returns a \code{double}. The first and second arguments #' represent node out- and in-strength, respectively. Note that the \code{XPtr} will #' be invalid and cannot be used to control network generation #' in another separate R session. Therefore, we recommend preserving the source code of your #' preference function for future use.} #' #' \code{pref} is defined analogously. If using character expression, it must #' be a one-line \code{C++} style expression of \code{s} (node strength). If #' using \code{XPtr}, the supplied \code{C++} function accepts only one #' \code{double} argument and returns a \code{double}. #' #' @return A list of class \code{rpacontrol} with components \code{ftype}, #' \code{sparams}, \code{tparams}, \code{params} or \code{ftype}, #' \code{spref}, \code{tpref}, \code{pref} with function pointers #' \code{spref.pointer}, \code{tpref.pointer}, \code{pref.pointer}. #' #' @export #' #' @examples #' \donttest{ #' # Set source preference as out-strength^2 + in-strength + 1, #' # target preference as out-strength + in-strength^2 + 1. #' # 1. use default preference functions #' ctr1 <- rpa_control_preference( #' ftype = "default", #' sparams = c(1, 2, 1, 1, 1), tparams = c(1, 1, 1, 2, 1) #' ) #' # 2. use character expressions #' ctr2 <- rpa_control_preference( #' ftype = "customized", #' spref = "pow(outs, 2) + ins + 1", tpref = "outs + pow(ins, 2) + 1" #' ) #' # 3. define XPtr's with C++ source code #' spref.pointer <- RcppXPtrUtils::cppXPtr( #' code = #' "double spref(double outs, double ins) {return pow(outs, 2) + ins + 1;}" #' ) #' tpref.pointer <- RcppXPtrUtils::cppXPtr( #' code = #' "double tpref(double outs, double ins) {return outs + pow(ins, 2) + 1;}" #' ) #' ctr3 <- rpa_control_preference( #' ftype = "customized", #' spref = spref.pointer, #' tpref = tpref.pointer #' ) #' ret <- rpanet(1e5, control = ctr3) #' } rpa_control_preference <- function( ftype = c("default", "customized"), sparams = c(1, 1, 0, 0, 1), tparams = c(0, 0, 1, 1, 1), params = c(1, 1), spref = "outs + 1", tpref = "ins + 1", pref = "s + 1") { ftype <- match.arg(ftype) if (ftype == "default") { stopifnot( "Length or type of parameter is not valid" = all( length(sparams) == 5, length(tparams) == 5, length(params) == 2, is.numeric(sparams), is.numeric(tparams), is.numeric(params) ) ) preference <- list( "ftype" = ftype, "sparams" = sparams, "tparams" = tparams, "params" = params ) } else { preference <- list( "ftype" = ftype, "spref" = spref, "tpref" = tpref, "pref" = pref ) preference <- compile_pref_func(preference) } structure(list("preference" = preference), class = "rpacontrol" ) } #' Control reciprocal edges. Defined for \code{rpanet}. #' #' @param group.prob A vector of probability weights for sampling the group of #' new nodes. Defined for directed networks. Groups are from 1 to #' \code{length(group.prob)}. Its length must equal to the number of rows of #' \code{recip.prob}. #' @param recip.prob A square matrix giving the probability of adding a #' reciprocal edge after a new edge is introduced. Defined for directed #' networks. Its element \code{p_{ij}} represents the probability of adding a #' reciprocal edge from node \code{A}, which belongs to group \code{i}, to #' node \code{B}, which belongs to group \code{j}, immediately after a #' directed edge from \code{B} to \code{A} is added. #' @param selfloop.recip Logical, whether reciprocal edge of self-loops are #' allowed. #' #' @return A list of class \code{rpacontrol} with components \code{group.prob}, #' \code{recip.prob}, and \code{selfloop.recip} with meanings as explained #' under 'Arguments'. #' #' @export #' #' @examples #' control <- rpa_control_reciprocal( #' group.prob = c(0.4, 0.6), #' recip.prob = matrix(runif(4), ncol = 2) #' ) rpa_control_reciprocal <- function( group.prob = NULL, recip.prob = NULL, selfloop.recip = FALSE) { if (!is.null(group.prob)) { stopifnot( '"group.prob" must sum to 1.' = round(sum(group.prob), 10) == 1 ) if (!is.null(recip.prob)) { recip.prob <- as.matrix(recip.prob) stopifnot( '"recip.prob" or "group.prob" is not valid.' = length(group.prob) == nrow(recip.prob) & nrow(recip.prob) == ncol(recip.prob) ) stopifnot( '"recip.prob" is not valid.' = all(recip.prob >= 0) & all(recip.prob <= 1) ) stopifnot( '"group.prob" is not valid.' = round(sum(group.prob), 10) == 1 & all(group.prob >= 0) ) } else { stop('"recip.prob" can not be NULL when "group.prob" is specified.') } } if (is.null(group.prob)) { if (!is.null(recip.prob)) { stop('"group.prob" is not valid.') } } reciprocal <- list( "group.prob" = group.prob, "recip.prob" = recip.prob, "selfloop.recip" = selfloop.recip ) structure(list("reciprocal" = reciprocal), class = "rpacontrol" ) } #' Default controls for \code{rpanet} #' #' @return Returns a list of default controls. #' @keywords internal #' rpa_control_default <- function() { rpa_control_scenario() + rpa_control_edgeweight() + rpa_control_newedge() + rpa_control_reciprocal() + rpa_control_preference() }
/scratch/gouwar.j/cran-all/cranData/wdnet/R/rpa_control.R
## ## wdnet: Weighted directed network ## Copyright (C) 2024 Yelie Yuan, Tiandong Wang, Jun Yan and Panpan Zhang ## Jun Yan <[email protected]> ## ## This file is part of the R package wdnet. ## ## The R package wdnet is free software: You can redistribute it and/or ## modify it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 3 of the License, or any later ## version (at your option). See the GNU General Public License at ## <https://www.gnu.org/licenses/> for details. ## ## The R package wdnet is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. ## #' @importFrom stats rgamma rpois NULL #' Generate PA networks. #' #' Generate preferential attachment (PA) networks with linear or non-linear #' preference functions. #' #' @param nstep Number of steps. #' @param initial.network A \code{wdnet} object or a list representing the #' initial network. By default, \code{initial.network} has one directed edge #' from node 1 to node 2 with weight 1. It can contain the following components: #' a two-column matrix \code{edgelist} representing the edges; a vector #' \code{edgeweight} representing the weight of edges; a logical argument #' \code{directed} indicating whether the initial network is directed. If #' \code{edgeweight} is not specified, all edges from the initial network are #' assumed to have weight 1. In addition, an integer vector #' \code{nodegroup} can be added to the list for specifing node groups; #' \code{nodegroup} is defined for directed networks, if \code{NULL}, all #' nodes from the seed network are assumed to be in group 1. #' @param control An \code{rpacontrol} object controlling the PA network #' generation process. If not specified, all the control parameters will be #' set to default. For more details, see \code{rpa_control_scenario()}, #' \code{rpa_control_newedge()}, \code{rpa_control_edgeweight()}, #' \code{rpa_control_preference} and \code{rpa_control_reciprocal()}. Under #' the default setup, at each step, a new edge of weight 1 is added from a new #' node \code{A} to an existing node \code{B} (\code{alpha} scenario), where #' \code{B} is chosen with probability proportional to its in-strength + 1. #' @param method Which method to use: \code{binary}, \code{linear}, \code{bagx} #' or \code{bag}. For \code{bag} and \code{bagx} methods, \code{beta.loop} #' must be \code{TRUE}, default preference functions must be used, and #' \code{sparams} should be set to \code{c(1, 1, 0, 0, a)}, \code{tparams} to #' \code{c(0, 0, 1, 1, b)}, and \code{param} to \code{c(1, c)}, where #' \code{a}, \code{b}, and \code{c} are non-negative constants; furthermore, #' reciprocal edges and sampling without replacement are not considered, i.e., #' option \code{rpa_control_reciprocal()} must be set as default, #' \code{snode.replace}, \code{tnode.replace} and \code{node.replace} must be #' \code{TRUE}. In addition, \code{bag} method only works for unweighted #' networks and does not consider multiple edges, i.e., #' \code{rpa_control_edgeweight()} and \code{rpa_control_newedge()} must be #' set as default. #' #' #' @return Returns a \code{wdnet} object that includes the following components: #' \itemize{ #' \item \code{directed}: Logical, whether the network is directed. #' \item \code{weighted}: Logical, whether the network is weighted. #' \item \code{edgelist}: A two-column matrix representing the edges. #' \item \code{edge.attr}: A data frame including edge weights and edge #' scenarios (0: from initial network; 1: \code{alpha}; 2: \code{beta}; #' 3: \code{gamma}; 4: \code{xi}; 5; \code{rho}; 6: reciprocal edge). #' \item \code{node.attr}: A data frame including node out- and #' in-strength, node source and target preference scores (for directed #' networks), node strength and preference scores (for undirected #' networks), and node group (if applicable). #' \item \code{newedge}: The number of new edges at each step, including #' reciprocal edges. #' \item \code{control}: An \code{rpacontrol} object that is used to #' generate the network. #' } #' #' @note The \code{binary} method implements binary search algorithm; #' \code{linear} represents linear search algorithm; \code{bag} method #' implements the algorithm from Wan et al. (2017); \code{bagx} puts all the #' edges into a bag, then samples edges and find the source/target node of the #' sampled edge. #' #' @references \itemize{\item Wan P, Wang T, Davis RA, Resnick SI (2017). #' Fitting the Linear Preferential Attachment Model. Electronic Journal of #' Statistics, 11(2), 3738–3780.} #' #' @export #' #' @examples #' # Control edge scenario and edge weight through rpa_control_scenario() #' # and rpa_control_edgeweight(), respectively, #' # while keeping rpa_control_newedge(), #' # rpa_control_preference() and rpa_control_reciprocal() as default. #' set.seed(123) #' control <- rpa_control_scenario(alpha = 0.5, beta = 0.5) + #' rpa_control_edgeweight( #' sampler = function(n) rgamma(n, shape = 5, scale = 0.2) #' ) #' ret1 <- rpanet(nstep = 1e3, control = control) #' #' # In addition, set node groups and probability of creating reciprocal edges. #' control <- control + rpa_control_reciprocal( #' group.prob = c(0.4, 0.6), #' recip.prob = matrix(runif(4), ncol = 2) #' ) #' ret2 <- rpanet(nstep = 1e3, control = control) #' #' # Further, set the number of new edges in each step as Poisson(2) + 1 and use #' # ret2 as a seed network. #' control <- control + rpa_control_newedge( #' sampler = function(n) rpois(n, lambda = 2) + 1 #' ) #' ret3 <- rpanet(nstep = 1e3, initial.network = ret2, control = control) #' rpanet <- function( nstep, initial.network = list( edgelist = matrix(c(1, 2), nrow = 1), edgeweight = 1, directed = TRUE ), control, method = c("binary", "linear", "bagx", "bag")) { method <- match.arg(method) stopifnot('"nstep" must be greater than 0.' = nstep > 0) if (is.null(initial.network$edgelist)) { cat("Assume the initial network has only one edge between nodes 1 and 2.\n") initial.network$edgelist <- matrix(c(1, 2), nrow = 1) } if (is.null(initial.network$directed)) { cat("Assume the initial network is directed.\n") initial.network$directed <- TRUE } initial.network <- create_wdnet( netwk = initial.network, edgelist = initial.network$edgelist, edgeweight = initial.network$edgeweight, nodegroup = initial.network$nodegroup, directed = initial.network$directed ) if (missing(control) || is.null(control)) { control <- rpa_control_default() } else { is_rpacontrol(control) control <- rpa_control_default() + control } nnode <- max(initial.network$edgelist) nedge <- nrow(initial.network$edgelist) if (is.null(initial.network$node.attr$group)) { initial.network$node.attr$group <- rep(1, nnode) } else { initial.network$node.attr$group <- as.integer(initial.network$node.attr$group) stopifnot( '"nodegroup" of initial network is not valid.' = all(initial.network$node.attr$group > 0) & length(initial.network$node.attr$group) == nnode ) } if (length(control$reciprocal$group.prob) > 0) { stopifnot( 'Length of "group.prob" is not valid.' = max(initial.network$node.attr$group) <= length(control$reciprocal$group.prob) ) } if (control$preference$ftype == "customized") { control$preference <- compile_pref_func( control$preference, directed = initial.network$directed ) } if (is.null(control$newedge$sampler)) { m <- rep(1L, nstep) } else { tryCatch(do.call(control$newedge$sampler, list(5)), error = function(e) { message('Invalid "sampler" for rpa_control_newedge().') stop(e) }) m <- do.call(control$newedge$sampler, list(nstep)) } stopifnot( 'Invalid "sampler" for rpa_control_newedge().' = all(length(m) == nstep, m %% 1 == 0, m > 0) ) sum_m <- sum(m) sample.recip <- TRUE if (is.null(control$reciprocal$recip.prob)) { sample.recip <- FALSE } if (is.null(control$edgeweight$sampler)) { w <- rep(1L, sum_m * (1 + sample.recip)) } else { tryCatch(do.call(control$edgeweight$sampler, list(5)), error = function(e) { message('Invalid "sampler" for rpa_control_edgeweight().') stop(e) }) w <- do.call(control$edgeweight$sampler, list(sum_m * (1 + sample.recip))) } stopifnot( 'Invalid "sampler" for rpa_control_edgeweight().' = all(length(w) == sum_m * (1 + sample.recip), w > 0) ) if ((!initial.network$directed) && ((!control$newedge$snode.replace) || (!control$newedge$tnode.replace))) { cat('"snode.replace" and "tnode.replace" are ignored for undirected networks.') control$newedge$snode.replace <- control$tnode.replace <- TRUE } if (initial.network$directed && (!control$newedge$node.replace)) { cat('"node.replace" is ignored for directed networks.') control$newedge$node.replace <- TRUE } if (method == "bag" || method == "bagx") { stopifnot( '"bag" and "bagx" methods require "default" preference functions.' = control$preference$ftype == "default" ) if (initial.network$directed) { stopifnot( 'Source preference must be out-degree plus a non-negative constant for "bag" and "bagx" methods.' = all( control$preference$sparams[1:2] == 1, control$preference$sparams[3:4] == 0, control$preference$sparams[5] >= 0 ) ) stopifnot( 'Target preference must be in-degree plus a non-negative constant for "bag" and "bagx" methods.' = all( control$preference$tparams[1:2] == 0, control$preference$tparams[3:4] == 1, control$preference$tparams[5] >= 0 ) ) } else { stopifnot( 'Preference must be degree plus a non-negative constant for "bag" and "bagx" methods.' = control$preference$params[1] == 1 & control$preference$params[2] >= 0 ) } stopifnot( '"rpa_control_reciprocal" must set as default for "bag" and "bagx" methods.' = identical(control$reciprocal, rpa_control_reciprocal()$reciprocal) ) stopifnot( '"beta.loop" must be TRUE for "bag" and "bagx" methods.' = control$scenario$beta.loop ) if (method == "bag") { stopifnot( '"rpa_control_edgeweight" must set as default for "bag" method.' = identical(control$edgeweight, rpa_control_edgeweight()$edgeweight) ) stopifnot( 'Weight of existing edges must be 1 for "bag" method.' = all(initial.network$edge.attr$weight == 1) ) stopifnot( '"rpa_control_newedge" must set as default for "bag" method.' = identical(control$newedge, rpa_control_newedge()$newedge) ) } if (method == "bagx") { if (initial.network$directed) { stopifnot( '"snode.replace" must be TRUE for "bagx" method.' = control$newedge$snode.replace ) stopifnot( '"tnode.replace" must be TRUE for "bagx" method.' = control$newedge$tnode.replace ) } else { stopifnot( '"node.replace" must be TRUE for "bagx" method.' = control$newedge$node.replace ) } } return(rpanet_simple( nstep = nstep, initial.network = initial.network, control = control, m = m, sum_m = sum_m, w = w, ex_node = nnode, ex_edge = nedge, method = method )) } if ((!control$newedge$node.replace) && control$scenario$beta.loop) { control$scenario$beta.loop <- FALSE cat('"beta.loop" is set to FALSE since "node.replace" is FALSE.') } return(rpanet_general( nstep = nstep, initial.network = initial.network, control = control, m = m, sum_m = sum_m, w = w, nnode = nnode, nedge = nedge, method = method, sample.recip = sample.recip )) }
/scratch/gouwar.j/cran-all/cranData/wdnet/R/rpanet.R
## ## wdnet: Weighted directed network ## Copyright (C) 2024 Yelie Yuan, Tiandong Wang, Jun Yan and Panpan Zhang ## Jun Yan <[email protected]> ## ## This file is part of the R package wdnet. ## ## The R package wdnet is free software: You can redistribute it and/or ## modify it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 3 of the License, or any later ## version (at your option). See the GNU General Public License at ## <https://www.gnu.org/licenses/> for details. ## ## The R package wdnet is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. ## #' @importFrom utils modifyList #' @importFrom stats runif NULL #' Internal functions for generating PA networks #' #' These functions generate a PA network with linear (\code{rpanet_simple}) or #' non-linear (\code{rpanet_general}) preference functions #' #' @param nstep Number of steps when generating a network. #' @param initial.network A \code{wdnet} object or a list that represents the #' initial network. By default, \code{initial.network} has one directed edge from node 1 #' to node 2 with weight 1. It may have the following components: a two-column #' matrix \code{edgelist} representing the edges; a vector \code{edgeweight} #' representing the weight of edges; a logical argument \code{directed} indicating #' whether the initial network is directed; #' an integer vector \code{nodegroup} #' representing the group of nodes. \code{nodegroup} is defined for directed #' networks, if \code{NULL}, all nodes from the seed network are considered #' from group 1. #' @param control A list of parameters that controls the PA generation process. #' The default value is \code{wdnet:::rpa_control_default()}. By default, in #' each step, a new edge of weight 1 is added from a new node \code{A} to an #' existing node \code{B} (\code{alpha} scenario), where $\code{B} is chosen #' with probability proportional to its in-strength + 1. #' @param m Integer vector, number of new edges in each step. #' @param sum_m Integer, summation of \code{m}. #' @param w Vector, weight of new edges. #' @param nnode Integer, number of nodes in \code{initial.network}. #' @param nedge Integer, number of edges in \code{initial.network}. #' @param method Which method to use when generating PA networks: "binary" or #' "linear". #' @param sample.recip Whether reciprocal edges will be added. #' #' @return Returns a \code{wdnet} object that includes the following components: #' \itemize{ #' \item \code{directed}: Logical, whether the network is directed. #' \item \code{weighted}: Logical, whether the network is weighted. #' \item \code{edgelist}: A two-column matrix representing the edges. #' \item \code{edge.attr}: A data frame including edge weights and edge #' scenarios (0: from initial network; 1: \code{alpha}; 2: \code{beta}; #' 3: \code{gamma}; 4: \code{xi}; 5; \code{rho}; 6: reciprocal edge). #' \item \code{node.attr}: A data frame including node out- and #' in-strength, node source and target preference scores (for directed #' networks), node strength and preference scores (for undirected #' networks), and node group (if applicable). #' \item \code{newedge}: The number of new edges at each step, including #' reciprocal edges. #' \item \code{control}: An \code{rpacontrol} object that is used to #' generate the network. #' } #' #' @rdname rpanet.internal #' @keywords internal #' rpanet_general <- function( nstep, initial.network, control, m, sum_m, w, nnode, nedge, method, sample.recip) { edgeweight <- c(initial.network$edge.attr$weight, w) node_vec_length <- sum_m * 2 + max(nedge, nnode) node_vec1 <- integer(node_vec_length) node_vec2 <- integer(node_vec_length) scenario <- integer(node_vec_length) node_vec1[1:nedge] <- initial.network$edgelist[, 1] node_vec2[1:nedge] <- initial.network$edgelist[, 2] scenario[1:nedge] <- 0 control$preference$ftype.temp <- ifelse( control$preference$ftype == "default", 1, 2 ) if (initial.network$directed) { outs <- double(node_vec_length) ins <- double(node_vec_length) outs[1:nnode] <- initial.network$node.attr$outs ins[1:nnode] <- initial.network$node.attr$ins spref <- double(node_vec_length) tpref <- double(node_vec_length) if (!sample.recip) { control$reciprocal$group.prob <- 1 control$reciprocal$recip.prob <- matrix(0) } nodegroup <- integer(node_vec_length) nodegroup[1:nnode] <- initial.network$node.attr$group if (method == "binary") { ret_c <- rpanet_binary_directed( nstep, m, nnode, nedge, node_vec1, node_vec2, outs, ins, edgeweight, scenario, sample.recip, nodegroup, spref, tpref, control ) } else { ret_c <- rpanet_linear_directed_cpp( nstep, m, nnode, nedge, node_vec1, node_vec2, outs, ins, edgeweight, scenario, sample.recip, nodegroup, spref, tpref, control ) } } else { sample.recip <- FALSE s <- double(node_vec_length) s[1:nnode] <- initial.network$node.attr$s pref <- double(node_vec_length) if (method == "binary") { ret_c <- rpanet_binary_undirected_cpp( nstep, m, nnode, nedge, node_vec1, node_vec2, s, edgeweight, scenario, pref, control ) } else { ret_c <- rpanet_linear_undirected_cpp( nstep, m, nnode, nedge, node_vec1, node_vec2, s, edgeweight, scenario, pref, control ) } } control$preference$ftype.temp <- NULL control$preference$spref.pointer <- NULL control$preference$tpref.pointer <- NULL control$preference$pref.pointer <- NULL nnode <- ret_c$nnode nedge <- ret_c$nedge ret <- structure( list( "edgelist" = cbind( ret_c$node_vec1[1:nedge], ret_c$node_vec2[1:nedge] ), "newedge" = ret_c$m, "control" = control, "directed" = initial.network$directed, "edge.attr" = data.frame( "weight" = edgeweight[1:nedge], "scenario" = ret_c$scenario[1:nedge] ) ), class = "wdnet" ) ret$weighted <- any(ret$edge.attr$weight != 1) if (initial.network$directed) { ret$node.attr <- data.frame( "outs" = ret_c$outs[1:nnode], "ins" = ret_c$ins[1:nnode], "spref" = ret_c$spref[1:nnode], "tpref" = ret_c$tpref[1:nnode] ) } else { ret$node.attr <- data.frame( "s" = ret_c$s[1:nnode], "pref" = ret_c$pref[1:nnode] ) } if (sample.recip) { ret$node.attr$group <- ret_c$nodegroup[1:nnode] } else { ret$control$reciprocal$group.prob <- NULL ret$control$reciprocal$recip.prob <- NULL } is_wdnet(ret) return(ret) } #' @rdname rpanet.internal #' @keywords internal #' rpanet_simple <- function( nstep, initial.network, control, m, sum_m, w, ex_node, ex_edge, method) { delta <- control$preference$params[2] delta_out <- control$preference$sparams[5] delta_in <- control$preference$tparams[5] ex_weight <- sum(initial.network$edge.attr$weight) edgeweight <- c(initial.network$edge.attr$weight, w) scenario <- sample(1:5, size = sum_m, replace = TRUE, prob = c( control$scenario$alpha, control$scenario$beta, control$scenario$gamma, control$scenario$xi, control$scenario$rho ) ) if (!initial.network$directed) { delta_out <- delta_in <- delta / 2 } if (method == "bag") { snode <- c(initial.network$edgelist[, 1], rep(0, sum_m)) tnode <- c(initial.network$edgelist[, 2], rep(0, sum_m)) ret <- rpanet_bag_cpp( snode, tnode, scenario, ex_node, ex_edge, delta_out, delta_in, initial.network$directed ) snode <- ret$snode tnode <- ret$tnode } else { scenario1 <- scenario == 1 scenario4 <- scenario == 4 no_new_start <- !((scenario > 3) | scenario1) no_new_end <- scenario < 3 total_node <- tnode <- cumsum(c((scenario != 2) + scenario4)) + ex_node snode <- total_node - scenario4 tnode[no_new_end] <- 0 snode[no_new_start] <- 0 weight_intv <- cumsum(c(0, edgeweight)) temp_m <- cumsum(m[-nstep]) temp <- c(ex_weight, weight_intv[temp_m + ex_edge + 1]) total_weight <- rep(temp, m) temp <- c(ex_node, total_node[temp_m]) rm(temp_m) total_node <- rep(temp, m) rm(temp) rand_out <- runif(sum(no_new_start)) * (total_weight + delta_out * total_node)[no_new_start] rand_in <- runif(sum(no_new_end)) * (total_weight + delta_in * total_node)[no_new_end] temp_out <- rand_out <= total_weight[no_new_start] if (!all(temp_out)) { snode[no_new_start][!temp_out] <- sample_node_cpp( total_node[no_new_start][!temp_out] ) } temp_in <- rand_in <= total_weight[no_new_end] if (!all(temp_in)) { tnode[no_new_end][!temp_in] <- sample_node_cpp( total_node[no_new_end][!temp_in] ) } snode <- c(initial.network$edgelist[, 1], snode) tnode <- c(initial.network$edgelist[, 2], tnode) start_edge <- findInterval( rand_out[temp_out], weight_intv, left.open = TRUE ) end_edge <- findInterval(rand_in[temp_in], weight_intv, left.open = TRUE) if (initial.network$directed) { snode <- find_node_cpp(snode, start_edge) tnode <- find_node_cpp(tnode, end_edge) } else { ret <- find_node_undirected_cpp( snode, tnode, start_edge, end_edge ) snode <- ret$node1 tnode <- ret$node2 } } edgelist <- cbind(snode, tnode) ret <- create_wdnet( edgelist = edgelist, edgeweight = edgeweight, newedge = m, control = control, directed = initial.network$directed, weighted = any(edgeweight != 1) ) ret$edge.attr$scenario <- c(rep(0, ex_edge), scenario) if (initial.network$directed) { ret$node.attr$spref <- ret$node.attr$outs + control$preference$sparams[5] ret$node.attr$tpref <- ret$node.attr$ins + control$preference$tparams[5] } else { ret$node.attr$pref <- ret$node.attr$s + control$preference$params[2] } # is_wdnet(ret) return(ret) }
/scratch/gouwar.j/cran-all/cranData/wdnet/R/rpanet_internal.R
## ## wdnet: Weighted directed network ## Copyright (C) 2024 Yelie Yuan, Tiandong Wang, Jun Yan and Panpan Zhang ## Jun Yan <[email protected]> ## ## This file is part of the R package wdnet. ## ## The R package wdnet is free software: You can redistribute it and/or ## modify it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 3 of the License, or any later ## version (at your option). See the GNU General Public License at ## <https://www.gnu.org/licenses/> for details. ## ## The R package wdnet is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. ## #' @importFrom igraph graph_from_adjacency_matrix as_edgelist E NULL #' Converts an adjacency matrix to edgelist and edgeweight using the #' \code{igraph} package. #' #' @param adj Adjacency matrix of a network. #' @param directed Logical, whether the network is directed. This value is #' passed to \code{igraph::graph_from_adjacency_matrix()}. #' @param weighted Logical, whether the network is weighted. #' #' @return A list of edgelist, edgeweight and directed. #' #' @keywords internal #' adj_to_edgelist <- function(adj, directed = TRUE, weighted = TRUE) { if (!directed) { if (! isSymmetric(adj)) { directed <- TRUE cat('Returned network is directed because "adj" is asymmetric.\n\n') } } if (!weighted) { weighted <- NULL } g <- igraph::graph_from_adjacency_matrix(adj, mode = ifelse(directed, "directed", "undirected"), weighted = weighted, diag = TRUE ) edgelist <- igraph::as_edgelist(g) edgeweight <- igraph::E(g)$weight list( "edgelist" = edgelist, "edgeweight" = edgeweight, "directed" = directed ) } #' Convert edgelist and edgeweight to adjacency matrix. #' #' @param edgelist A two column matrix representing edges. #' @param edgeweight A vector representing the weight of edges. If \code{NULL}, #' all edges are considered to have a weight of 1. #' @param directed Logical, whether the network is directed. #' #' @return Returns an adjacency matrix. #' #' @keywords internal #' edgelist_to_adj <- function(edgelist, edgeweight, directed = TRUE) { nnode <- max(edgelist) adj <- matrix(0, nrow = nnode, ncol = nnode) if (missing(edgeweight)) { edgeweight <- rep(1, nrow(edgelist)) } if (length(edgelist) == 2) { edgelist <- matrix(edgelist, ncol = 2) edgeweight <- c(edgeweight, 0) } adj <- fill_weight_cpp(adj, edgelist - 1, edgeweight) if (!directed) { adj <- adj + t(adj) diag(adj) <- diag(adj) / 2 } return(adj) }
/scratch/gouwar.j/cran-all/cranData/wdnet/R/utils.R
## ## wdnet: Weighted directed network ## Copyright (C) 2024 Yelie Yuan, Tiandong Wang, Jun Yan and Panpan Zhang ## Yelie Yuan <[email protected]> ## ## This file is part of the R package wdnet. ## ## The R package wdnet is free software: You can redistribute it and/or ## modify it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 3 of the License, or any later ## version (at your option). See the GNU General Public License at ## <https://www.gnu.org/licenses/> for details. ## ## The R package wdnet is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. ## #' wdnet: Weighted and Directed Networks #' #' This package provides functions to conduct network analysis #' \itemize{ #' \item Assortativity, centrality, clustering coefficient #' for weighted and directed networks #' \item Rewire an unweighted network with given assortativity coefficient(s) #' \item Preferential attachment (PA) network generation #' } #' #' @section wdnet networks: wdnet networks have a class \code{wdnet}. It is a #' list containing the following components: #' \itemize{ #' \item A logical value \code{directed} indicating if the network is directed. #' \item A logical value \code{weighted} indicating if the network is weighted. #' \item A two-column matrix \code{edgelist} representing the edges. #' \item A data frame \code{node.attr} that includes node attributes, #' such as node strengths. #' \item A data frame \code{edge.attr} that includes edge attributes, #' such as edge weights. #' } #' #' @section Creating a \code{wdnet} Object: #' \itemize{ #' \item To generate a preferential attachment (PA) network, #' use \code{rpanet()}. #' \item To create a \code{wdnet} object from an edge list #' and edge weights, use \code{edgelist_to_wdnet()}. #' \item To create a \code{wdnet} object from an adjacency #' matrix, use \code{adj_to_wdnet()}. #' \item To convert an \code{igraph} object to a \code{wdnet} #' object, use \code{igraph_to_wdnet()}. #' } #' #' @section Further information: The development version of this package is #' available on Gitlab (\url{https://gitlab.com/wdnetwork/wdnet}). #' #' @useDynLib wdnet #' @keywords internal "_PACKAGE"
/scratch/gouwar.j/cran-all/cranData/wdnet/R/wdnet-package.R
#' Various example data and lookup tables #' #' These datasets are used for functions and examples throughout the package #' #' @format A data.frame #' @name wi_example_data #' "service_urls" #' @rdname wi_example_polys "watershed_lookup"
/scratch/gouwar.j/cran-all/cranData/wdnr.gis/R/data.R
#' Retrieve county polygon layer #' #' Return specific county polygon layer from \code{wi_counties} sf object #' #' @param ... Any Wisconsin counties provided as character strings, separated #' by commas #' #' @return An sf data.frame with the appropriate counties #' @export #' #' @examples #' \dontrun{ #' plot(filter_county_poly("door")) #' plot_layer(filter_county_poly("portage")) #' } filter_county_poly <- function(...) { input <- unlist(list(...)) counties <- input %>% standardize_county_names() %>% lapply(function(x) { if (x %in% wdnr.gis::wi_counties$county) { wdnr.gis::wi_counties[wdnr.gis::wi_counties$county == x, ] } else { return(NULL) } }) county_chk <- unlist(lapply(counties, is.null)) if (any(county_chk)) { if (any(!county_chk)) { warning("One or more counties may be missing") return(do.call("rbind", counties)) } else { stop("Are you sure that's a county in Wisconsin?") } } else { return(do.call("rbind", counties)) } }
/scratch/gouwar.j/cran-all/cranData/wdnr.gis/R/filter_layers.R
#' Retrieve a watershed polygon #' #' This function will retrieve a watershed boundary from WDNR's ArcGIS Rest #' Services. A subbasin (HUC8), watershed (HUC 10), or subwatershed (HUC 12) can #' be retrieved by passing the HUC code or name as a character string. #' See watershed_lookup for a full list of HUC codes and names. #' Use filter_huc() to see watersheds by county or #' classification level. #' #' A function to retrieve a watershed boundary from WDNR's #' subbasin (HUC8), watershed (HUC 10), or subwatershed (HUC 12) spatial layers. #' Use 'watershed_lookup' to see a full list of available HUC codes and names. #' #' @inheritParams get_hydro_layer #' @param huc_level "HUC_8","HUC_10", or "HUC_12" #' @param ... Additional parameters that are passed to #' \code{\link[arcpullr]{get_spatial_layer}} #' #' @return A sf polygon object #' @export #' #' @examples #' \dontrun{ #' get_watershed_layer(watershed_code = "07070006") #' get_watershed_layer(watershed_name = "Kickapoo") #' get_watershed_layer(county = "forest",huc_level = "HUC_12") #' } get_watershed_layer <- function(watershed_code = NULL, watershed_name = NULL, county = NULL, sf_object = NULL, huc_level = NULL, where = NULL, ...) { #error catching check_layer_args(watershed_code, watershed_name, county, sf_object, where) #user specific both a huc_code and watershed_name avoid_duplicate_sf_args(watershed_name, watershed_code, county, sf_object) #setting parameters based on watershed_name or huc_code #identify which HUC url to use within spatial query if (!is.null(watershed_code)) { huc_level <- wdnr.gis::watershed_lookup %>% dplyr::filter(.data$huc_codes %in% watershed_code) %>% dplyr::pull(huc_level) %>% unique() if (length(huc_level) == 0) { stop("There are no watershed codes that match those entered.") } else if (length(huc_level) > 1) { stop( "You've specified watersheds across different HUC levels.\n", "Please specify watersheds with similar HUC values", "(i.e. HUC8, HUC12, etc.)" ) } } else if (!is.null(watershed_name)) { huc_level <- wdnr.gis::watershed_lookup %>% dplyr::filter(.data$huc_names %in% watershed_name) %>% dplyr::pull(huc_level) %>% unique() if (length(huc_level) == 0) { stop("There are no watershed names that match those entered.") } else if (length(huc_level) > 1) { stop( "You've specified watersheds across different HUC levels.\n", "Please specify watersheds with similar HUC values", "(i.e. HUC8, HUC12, etc.)" ) } } # user did not specify huc_level when it is required if (is.null(huc_level)) { stop("You did not specify a huc_level (i.e. HUC_8, HUC_10, HUC_12)") } #find the correct polygon for a spatial query if (!is.null(sf_object)) { input_geometry <- sf_object } else if (!is.null(county)) { input_geometry <- filter_county_poly(county) } else { input_geometry <- "" } url <- switch( huc_level, "HUC_8" = list_urls(layers = "8-digit HUCs (Subbasins)"), "HUC_10" = list_urls(layers = "10-digit HUCs (Watersheds)"), "HUC_12" = list_urls(layers = "12-digit HUCs (Subwatersheds)") ) #format premade where statements if (any(!is.null(where), !is.null(watershed_name), !is.null(watershed_code))) { if (!is.null(where)) { query <- where } else if (!is.null(watershed_name)) { query <- switch( huc_level, "HUC_8" = arcpullr::sql_where(HUC8_NAME = watershed_name), "HUC_10" = arcpullr::sql_where(HUC10_NAME = watershed_name), "HUC_12" = arcpullr::sql_where(HUC12_NAME = watershed_name) ) } else if (!is.null(watershed_code)) { query <- switch( huc_level, "HUC_8" = arcpullr::sql_where(HUC8_CODE = watershed_code), "HUC_10" = arcpullr::sql_where(HUC10_CODE = watershed_code), "HUC_12" = arcpullr::sql_where(HUC12_CODE = watershed_code) ) } } else { query <- "" } # pull the data using the appropriate function (spatial query or not) out <- find_layer_query(url, query, input_geometry) return(out) } #' Match a watershed's name based on one or more regex #' #' This function will match the names of a HUC_8 or a HUC_12 watershed found in #' the \code{watershed_lookup} data set. #' #' @param ... One or more regex passed as character string #' @param pull Logical. Pull the unique values or #' #' @return A character string with full watershed names if pull = TRUE, or a #' data.frame with the number of rows equal to the number of matches otherwise #' @export #' #' @examples #' match_watershed_name("rainbow") match_watershed_name <- function(..., pull = TRUE) { args <- list(...) out <- lapply(args, function(x) { return(dplyr::filter(wdnr.gis::watershed_lookup, grepl(x, .data$huc_names, ignore.case = TRUE))) }) %>% do.call("rbind", .) if (nrow(out) == 0) { stop("No watershed names matched this string") } if (pull) { return(out$huc_names) } else { return(out) } } #' Retrieve WDNR's HYDRO spatial layer #' #' A function that can be used to retrieve WDNR's 24k Hydrography (HYDRO) layer. #' Either the"24K Hydrography Streams and Rivers" or the #' "24K Hydrography Lakes and Open Water" can be queried by setting 'layer_type' #' to 'lines' or 'polygons' respectively. #' A spatial query can be performed to limit the output of the function by #' supplying a county name, watershed code, watershed name, or custom sf polygon #' object. Use the 'watershed_lookup' to find valid watershed codes and names. #' WBIC's can also be provided in order to return features for #' specific waterbodies. The 'where' arguement can be used to run #' custom SQL queries. #' #' This function will retrieve WDNR's hydro layer. A county, watershed code, #' watershed_name, or custom sf polygon can be specifie to filter the layer. #' The layer type can be specified to query either the polylines or polygons #' hydro spatial layers. #' #' #' @param county A character object specifying a county name #' @param watershed_code A character object specifying the HUC code for a #' watershed #' @param watershed_name A character object specifying the HUC name for a #' watershed #' @param sf_object Any sf polygon object #' @param wbic A character object or string of WBIC's #' @param where SQL statement #' @param layer_type "lines", "polygons", or "flowlines" #' @param ... Additional parameters to pass to #' \code{\link[arcpullr]{get_spatial_layer}} #' #' @return An sf object of class polylines of polygons #' @export #' #' @examples #' \dontrun{ #' get_hydro_layer(county = "milwaukee", layer_type = "lines") #' get_hydro_layer(watershed_code = "07070006", layer_type = "polygons") #' get_hydro_layer(wbic = c("549400", "15000"), layer_type = "polygons") #' get_hydro_layer(county = "milwaukee", where = "HYDROTYPE = '508'") #' } get_hydro_layer <- function (county = NULL, watershed_code = NULL, watershed_name = NULL, sf_object = NULL, wbic = NULL, where = NULL, layer_type = "polygons", ...) { # error catching line_types <- c("line", "lines", "polyline", "polylines") poly_types <- c("poly", "polygon", "polygons") flow_types <- c("flow", "flowline", "flowlines") accepted_layer_types <- c( line_types, poly_types, flow_types ) if (is.null(layer_type) | !layer_type %in% accepted_layer_types) { stop( paste( "'layer_type' must be one of the following \n", paste0("* ", paste0(line_types, collapse = ", ")), paste0("* ", paste0(poly_types, collapse = ", ")), paste0("* ", paste0(flow_types, collapse = ", ")), sep = "\n" ) ) } check_layer_args( county, watershed_code, watershed_name, sf_object, wbic, where ) avoid_duplicate_sf_args( county, watershed_name, watershed_code, sf_object ) #format premade where statements if (!is.null(where) && !is.null(wbic)) { stop("You cannot specify both wbic and a where statement.") } else if (!is.null(where)){ query <- where } else if (!is.null(wbic)) { if (layer_type %in% c(line_types, flow_types)) { query <- arcpullr::sql_where(RIVER_SYS_WBIC = wbic) } else if(layer_type %in% poly_types){ query <- arcpullr::sql_where(WATERBODY_WBIC = wbic) } } else { query <- "" } # find the correct polygon for a spatial query if (!is.null(sf_object)) { input_geometry <- sf_object } else if (!is.null(county)) { input_geometry <- filter_county_poly(county) } else if (!is.null(watershed_code)) { input_geometry <- get_watershed_layer(watershed_code = watershed_code) } else if (!is.null(watershed_name)) { input_geometry <- get_watershed_layer(watershed_name = watershed_name) } else { input_geometry <- "" } # get the appropriate url based on what is desired if (layer_type %in% line_types) { query_url <- list_urls(layers = "24K Hydrography Streams and Rivers") } else if (layer_type %in% flow_types) { query_url <- list_urls(layers = "24K Flowlines") } else if (layer_type %in% poly_types) { query_url <- list_urls(layers = "24K Hydrography Lakes and Open Water") } # pull the data using the appropriate function (spatial query or not) out <- find_layer_query(query_url, query, input_geometry, ...) return(out) } #' Retrieve WDNR's FMDB Site spatial layer #' #' A function that can be used to retrieve the WDNR's Fish Management Database's #' (FMDB) monitoring site spatial layer. #' A spatial query can be performed to limit the output of the function by #' supplying a county name, watershed code, watershed name, or custom sf polygon #' object. Use the 'watershed_lookup' to find valid watershed codes and names. #' FMDB site sequance numbers (site_seq) or SWIMS (swims_site_seq) site sequance #' numbers can be provided to return specific sites. The 'where' #' arguement can be used to run custom SQL queries. #' #' #' @inheritParams get_hydro_layer #' @param site_seq A character object or string #' @param swims_site_seq A character object or string #' @param layer_type Character. Retrieve point stations, polygon stations, or #' both. #' @param ... Additional parameters to pass to #' \code{\link[arcpullr]{get_spatial_layer}} #' #' @return A sf object of class multipoints #' @export #' #' @examples #' \dontrun{ #' get_fmdb_site_layer(county = "milwaukee") #' get_fmdb_site_layer(watershed_code = "07070006") #' get_fmdb_site_layer(site_seq = c(7511,10175131,128290)) #' get_fmdb_site_layer(county = "waukesha", #' where = "STATION_TYPE_CODE = 'LAKE'") #' } get_fmdb_site_layer <- function (county = NULL, watershed_code = NULL, watershed_name = NULL, sf_object = NULL, site_seq = NULL, swims_site_seq = NULL, where = NULL, layer_type = "points", ...) { # error catching # add both back in here once you fix both query below if (is.null(layer_type) | !layer_type %in% c("points", "polygons")) { stop("layer_type must be one of the following:\n", paste(" *", c("points", "polygons"), collapse = "\n")) } check_layer_args( county, watershed_code, watershed_name, sf_object, site_seq, swims_site_seq, where ) avoid_duplicate_sf_args(county, watershed_name, watershed_code, sf_object) #format premade where statements if(!is.null(where)) { query <- where } else if(!is.null(site_seq)){ query <- arcpullr::sql_where(FMDB_SITE_SEQ_NO = site_seq) } else if(!is.null(swims_site_seq)){ query <- arcpullr::sql_where(SWIMS_STATION_ID = swims_site_seq) } else { query = "" } # find the correct polygon for a spatial query if (!is.null(sf_object)) { input_geometry <- sf_object } else if (!is.null(county)) { input_geometry <- filter_county_poly(county) } else if (!is.null(watershed_code)) { input_geometry <- get_watershed_layer(watershed_code = watershed_code) } else if (!is.null(watershed_name)) { input_geometry <- get_watershed_layer(watershed_name = watershed_name) } else { input_geometry <- "" } fmdb_poly_url <- list_urls(layers = "FMDB Polygon Stations") fmdb_point_url <- list_urls(layers = "FMDB Point Stations") # # need to figure out a better way to combine both points and polygons in # # single query # if (layer_type == "both") { # fmdb_poly <- find_layer_query(fmdb_poly_url, query, input_geometry, ...) # fmdb_points <- find_layer_query(fmdb_point_url, query, input_geometry, ...) # fmdb_poly <- # fmdb_poly %>% # sf::st_transform(crs = 3071) %>% # sf::st_centroid() %>% # sf::st_transform(crs = 4326) %>% # dplyr::mutate(shape.AREA = NA) %>% # dplyr::select(-.data$shape.AREA) %>% # suppressWarnings() # out <- rbind(fmdb_points, fmdb_poly) # } else if (layer_type == "points") { out <- find_layer_query(fmdb_point_url, query, input_geometry, ...) } else if (layer_type == "polygons") { out <- find_layer_query(fmdb_poly_url, query, input_geometry, ...) } else { stop("layer_type must be specified as 'points', 'polygons', or 'both'") } return(out) } #' Retrieve WDNR's roads spatial layer #' #' A function to retrieve WDNR's roads spatial layers. "layer_type" can be set #' to "major_roads" or "minor_roads" to query the Major Roads or County and #' Local Roads respectively. #' A spatial query can be performed to limit the output of the function by #' supplying a county name, watershed code, watershed name, or custom sf polygon #' object. Use the 'watershed_lookup' to find valid watershed codes and names. #' The "where" argument can be used to run custom SQL queries. #' #' #' @inheritParams get_hydro_layer #' @param layer_type "major_roads" or "minor_roads" #' @param ... Additional parameters to pass to #' \code{\link[arcpullr]{get_spatial_layer}} #' #' @return A sf object of class polylines #' @export #' #' @examples #' \dontrun{ #' get_roads_layer(county = "washington", layer_type = "major_roads") #' get_roads_layer(watershed_code = "07070006", layer_type = "minor_roads") #' get_roads_layer(where ="HWY_NUM = '43'",layer_type = "major_roads") #' } get_roads_layer <- function (county = NULL, watershed_code = NULL, watershed_name = NULL, sf_object = NULL, where = NULL, layer_type = "all", ...){ #error catching #Stop the function if the layer_type is not properly specified accept_layer_type <- c("all", "all_roads", "major_roads", "minor_roads") if (!(layer_type %in% accept_layer_type)) { stop("layer_type must be one of the following:\n", paste(" *", c("major_roads", "minor_roads", "all"), collapse = "\n")) } check_layer_args( county, watershed_code, watershed_name, sf_object, where ) avoid_duplicate_sf_args(county, watershed_name, watershed_code, sf_object) #format premade where statements if (!is.null(where)) { query <- where } else { query <- "" } # find the correct polygon for a spatial query if (!is.null(sf_object)) { input_geometry <- sf_object } else if (!is.null(county)) { input_geometry <- filter_county_poly(county) } else if (!is.null(watershed_code)) { input_geometry <- get_watershed_layer(watershed_code = watershed_code) } else if (!is.null(watershed_name)) { input_geometry <- get_watershed_layer(watershed_name = watershed_name) } else { input_geometry <- "" } #Query the REST API and return the object #Run the query on the major road layer major_url <- list_urls(layers = "Major Roads")[1] minor_url <- list_urls(layers = "County and Local Roads")[1] if (layer_type %in% c("all_roads", "all")) { majors <- find_layer_query(major_url, query, input_geometry, ...) minors <- find_layer_query(minor_url, query, input_geometry, ...) out <- rbind(majors, minors) } else if (layer_type == "major_roads") { out <- find_layer_query(major_url, query, input_geometry, ...) } else if (layer_type == "minor_roads") { out <- find_layer_query(minor_url, query, input_geometry, ...) } else { stop("layer_type must be one of the following:\n", paste(" *", c("major_roads", "minor_roads", "all"), collapse = "\n")) } return(out) } #' Get WDNR Image and Map Layers #' #' Functions to pull layers from the ImageServer and MapServer sections of the #' \href{https://dnrmaps.wi.gov/arcgis_image/rest/services}{ #' Wisconsin Department of Natural Resources ArcGIS REST API #' }. These are raster layers representing various maps and images throughout #' the state of Wisconsin. Arguments to these function can be used to specify #' the spatial extent of the output. If no argument is provided, the full #' raster will be queried. #' #' For a full list of available services use the following search options. #' \describe{ #' \item{ #' get_wis_landcover #' }{ #' -- \code{list_services(section = "DW_Land_Cover")} #' } #' \item{ #' get_wis_imagery #' }{ #' -- \code{list_services(section = "DW_Image")} #' } #' } #' #' @inheritParams get_hydro_layer #' @param service A string describing the service to be pulled. #' @param ... Additional arguments to be passed to \code{\link{get_map_layer}} #' #' @return A "RasterLayer" object #' @export #' #' @name get_wis_raster_layer #' #' @examples #' \dontrun{ #' mke_forest <- get_wis_landcover(county = c("Milwaukee","Forest")) #' plot_layer(mke_forest, outline_poly = wi_poly, legend = FALSE) #' #' #' } get_wis_landcover <- function(service = "EN_Land_Cover2_Lev2", county = NULL, watershed_code = NULL, watershed_name = NULL, sf_object = NULL, ...) { return( get_wis_rasters( service, county = county, watershed_code = watershed_code, watershed_name = watershed_name, sf_object = sf_object, get_raster_function = arcpullr::get_map_layer, ... ) ) } #' @rdname get_wis_raster_layer #' @export get_wis_imagery <- function(service = "EN_Image_Basemap_Leaf_Off", county = NULL, watershed_code = NULL, watershed_name = NULL, sf_object = NULL, ...) { return( get_wis_rasters( service, county = county, watershed_code = watershed_code, watershed_name = watershed_name, sf_object = sf_object, get_raster_function = arcpullr::get_image_layer, ... ) ) } #' General function to pull Raster layers from a MapServer or ImageServer #' #' This is a non-exported function that is used as the engine for #' \code{\link{get_wis_landcover}} and \code{\link{get_wis_imagery}}. It #' converts watersheds, counties, etc. to the appropriate sf_object and #' queries the desired service using the function specified in #' \code{get_raster_function} #' #' @param service Text string describing which service to pull. Will get matched #' by \code{match_services(service)}. #' @param get_raster_function The \code{arcpullr} function to use: either #' \code{\link[arcpullr]{get_map_layer}} or #' \code{\link[arcpullr]{get_image_layer}} #' @inheritParams get_hydro_layer #' @param ... Additional arguments to pass to the \code{get_raster_function} #' #' @return A Raster* object dependent on \code{get_raster_function} get_wis_rasters<- function(service, get_raster_function, county = NULL, watershed_code = NULL, watershed_name = NULL, sf_object = NULL, ...) { avoid_duplicate_sf_args(county, watershed_name, watershed_code, sf_object) #filtering parameters if (!is.null(watershed_name)) { sf_poly <- get_watershed_layer(watershed_name = watershed_name) } else if (!is.null(watershed_code)) { sf_poly <- get_watershed_layer(watershed_code = watershed_code) } else if (!is.null(county)) { sf_poly <- filter_county_poly(county) } else if (!is.null(sf_object)) { sf_poly <- sf_object } else { sf_poly <- wdnr.gis::wi_poly } layer_url <- list_urls( services = match_services(service, exact = TRUE) ) return(get_raster_function(layer_url, sf_poly, ...)) }
/scratch/gouwar.j/cran-all/cranData/wdnr.gis/R/get_layer_functions.R
#' @importFrom dplyr %>% NULL #' @importFrom rlang .data NULL #' @import arcpullr sf NULL
/scratch/gouwar.j/cran-all/cranData/wdnr.gis/R/imports.R
#' Various example sf polygons #' #' These are sf polygons that are used for functions and examples #' throughout the package #' #' @format An object of class sf and data.frame: #' @name wi_example_polys #' #' @source \code{\link[ggplot2]{map_data}} "wi_counties" #' @rdname wi_example_polys "wi_poly"
/scratch/gouwar.j/cran-all/cranData/wdnr.gis/R/sf_polygon_data.R
wi_landcover_url <- paste0( "https://dnrmaps.wi.gov/arcgis_image/rest/services/", "DW_Land_Cover/EN_Land_Cover2_Lev2/MapServer" ) wi_leaf_off_url <- paste0( "https://dnrmaps.wi.gov/arcgis_image/rest/services/", "DW_Image/EN_Image_Basemap_Leaf_Off/ImageServer" )
/scratch/gouwar.j/cran-all/cranData/wdnr.gis/R/url_objects.R
#' List available sections, services, layers, and URLs in the WDNR GIS REST API #' #' These functions can take sections, services, and layers specified as #' character strings and return either the section, service, layer or url as #' available in the WDNR GIS REST API #' #' @return A vector of matching sections, services, layers, or URLs depending #' on the function called #' @export #' #' @name list_funs #' #' @examples #' list_sections() #' list_services(sections = "WT_TMDL") #' list_layers(services = match_services("Invasive")) #' list_urls(sections = match_sections("WT"), #' services = match_services("inland")) list_sections <- function() { return(unique(wdnr.gis::service_urls$section)) } #' @rdname list_funs #' @param sections A character vector of available sections to subset by #' @param pull Logical. Pull unique values (TRUE, default) or show the matching #' rows in the service_urls data.frame #' @export list_services <- function(sections = NULL, pull = TRUE) { if (!is.null(sections)) { sections <- match_sections(sections) out <- wdnr.gis::service_urls %>% dplyr::filter(.data$section %in% sections) if (pull) { out <- out %>% dplyr::pull(.data$service) %>% unique() } else { out <- out %>% dplyr::select(.data$section, .data$service) %>% dplyr::distinct() } } else { out <- unique(wdnr.gis::service_urls$service) } return(out) } #' @rdname list_funs #' @param services A character vector of available services to subset by #' @export list_layers <- function(sections = NULL, services = NULL, pull = TRUE) { return( list_layer_url( type = "layer", sections = sections, services = services, pull = pull ) ) } #' @rdname list_funs #' @param layers A character vector of available layers to subset by #' @export list_urls <- function(layers = NULL, sections = NULL, services = NULL, pull = TRUE) { if (!is.null(layers)) { out <- wdnr.gis::service_urls %>% dplyr::filter(.data$layer %in% layers) if (pull) { out <- out %>% dplyr::pull(.data$url) %>% unique() } return(out) } else { return( list_layer_url( type = "url", sections = sections, services = services, pull = pull ) ) } } #' Helper function to re-create \code{\link{list_layers}} and #' \code{\link{list_urls}} #' #' @param type Character. The column of data to retrieve from service_urls #' @param sections See \code{\link{list_funs}} #' @param services See \code{\link{list_funs}} #' @param pull See \code{\link{list_funs}} #' #' @return A vector of available layers or URLs; depending on \code{type} list_layer_url <- function(type = "layer", sections = NULL, services = NULL, pull = TRUE) { if (!is.null(sections) & !is.null(services)) { out <- wdnr.gis::service_urls %>% dplyr::filter(.data$section %in% sections, .data$service %in% services) } else if (!is.null(sections) & is.null(services)) { out <- wdnr.gis::service_urls %>% dplyr::filter(.data$section %in% sections) } else if (is.null(sections) & !is.null(services)) { out <- wdnr.gis::service_urls %>% dplyr::filter(.data$service %in% services) if (length(out) > 20 & length(services) > 1) { warning( paste( sprintf( "There are %s %s available.", length(out), paste0(type, "s") ), "You may want to specify a section as well." ) ) } } else { out <- wdnr.gis::service_urls warning( paste( sprintf( "There are %s %s available.", nrow(wdnr.gis::service_urls), paste0(type, "s") ), "Please specify a section and/or service" ) ) } if (pull) { out <- unique(out[, type]) } else { if (type == "layer") { out <- dplyr::select(out, .data$section, .data$service, .data$layer) } } return(out) } #' Find available sections, services, or layers using a regular expression #' #' These functions allow you to search for sections, services, or layers that #' are available in the WDNR ArcGIS REST API using a regular expression. This #' is useful when you don't know the full name of a section, service, or #' layer but want to search based on keywords #' #' @param ... Character vector or regular expression to match on #' @param exact Logical stating whether to match objects in \code{...} exactly #' or loosely #' #' @return A character vector of all matching sections, services, or layers #' appropriate to the called function #' @export #' #' @name match_funs #' #' @examples #' match_sections("WT") #' match_services("Fish", sections = match_sections("WT")) #' match_layers("Fish", sections = match_sections("WT")) match_sections <- function(..., exact = FALSE) { x <- unlist(list(...)) if (exact) { x <- paste0("^", x, "$") } out <- lapply(x, function(y) { grep(y, unique(wdnr.gis::service_urls$section), ignore.case = TRUE, value = TRUE) }) return(unlist(out)) } #' @rdname match_funs #' @param sections A character vector of available sections to subset by #' @param pull Logical. Pull unique values (TRUE, default) or show the matching #' rows in the service_urls data.frame #' @export match_services <- function(..., sections = NULL, pull = TRUE, exact = FALSE) { x <- unlist(list(...)) if (exact) { x <- paste0("^", x, "$") } out <- lapply(x, function(y) { if (!is.null(sections)) { sections <- match_sections(sections) tmp <- wdnr.gis::service_urls %>% dplyr::filter(.data$section %in% sections) %>% dplyr::pull(.data$service) %>% unique() } else { tmp <- unique(wdnr.gis::service_urls$service) } return(grep(y, tmp, ignore.case = TRUE, value = TRUE)) }) if (!pull) { tmp <- unlist(out) out <- wdnr.gis::service_urls %>% dplyr::filter(.data$service %in% tmp) %>% dplyr::select(.data$section, .data$service, .data$layer, .data$layer_type) return(out) } else { return(unlist(out)) } } #' @rdname match_funs #' @param services A character vector of available services to subset by #' @export match_layers <- function(..., sections = NULL, services = NULL, pull = TRUE, exact = FALSE) { x <- unlist(list(...)) if (exact) { x <- paste0("^", x, "$") } out <- lapply(x, function(y) { if (!is.null(sections) & !is.null(services)) { sections <- match_sections(sections) services <- match_services(services) tmp <- wdnr.gis::service_urls %>% dplyr::filter(.data$section %in% sections, .data$services %in% services) %>% dplyr::pull(.data$layer) %>% unique() } else if (!is.null(sections) & is.null(services)) { sections <- match_sections(sections) tmp <- wdnr.gis::service_urls %>% dplyr::filter(.data$section %in% sections) %>% dplyr::pull(.data$layer) %>% unique() } else if (is.null(sections) & !is.null(services)) { services <- match_services(services) tmp <- wdnr.gis::service_urls %>% dplyr::filter(.data$service %in% services) %>% dplyr::pull(.data$layer) %>% unique() } else { tmp <- unique(wdnr.gis::service_urls$layer) } return(grep(y, tmp, ignore.case = TRUE, value = TRUE)) }) if (!pull) { tmp <- unlist(out) out <- wdnr.gis::service_urls %>% dplyr::filter(.data$layer %in% tmp) %>% dplyr::select(.data$section, .data$service, .data$layer) return(out) } else { return(unlist(out)) } } #' Helper functions to aid in checking arguments to get_*_layer functions #' #' \code{check_layer_args} simply looks at the arguments that is passed to it #' and checks to make sure that at least one is not NULL. #' \code{avoid_duplicate_sf_args} ensures the presence of only one argument that #' would result in a downstream spatial query (i.e. only a single sf object #' can be used in a spatial query -- this function ensures that only one will #' be). #' \code{deparse_arg_names} is just a helper for the above two functions to #' format argument names in a useful way #' #' @param ... Any number of objects to be checked #' #' @return If any of \code{...} are not NULL, returns nothing. Otherwise stops #' function execution. #' #' @name check_args #' #' @examples #' \dontrun{ #' a <- NULL #' b <- NULL #' check_layer_args(a, b) #' } check_layer_args <- function(...) { chk <- unlist(lapply(list(...), is.null)) if (all(chk)) { arg_names <- deparse_arg_names(...) msg <- paste0( "Please provide one of the following arguments:\n", paste(" *", unlist(arg_names), collapse = "\n") ) return(stop(msg)) } } #' @rdname check_args avoid_duplicate_sf_args <- function(...) { null_poly_list <- lapply(list(...), is.null) if (length(null_poly_list[!unlist(null_poly_list)]) > 1) { arg_names <- deparse_arg_names(...) msg <- paste0( "Please provide one and only one of the following arguments:\n", paste(" *", unlist(arg_names), collapse = "\n") ) return(stop(msg)) } } # find_sf_query_object <- function(...) { # args <- list(...) # if (!is.null(args$watershed_name)) { # sf_poly <- get_watershed_layer(watershed_name = args$watershed_name) # } else if (!is.null(args$watershed_code)) { # sf_poly <- get_watershed_layer(watershed_code = args$watershed_code) # } else if (!is.null(args$county)) { # sf_poly <- filter_county_poly(args$county) # } else if (!is.null(args$sf_object)) { # sf_poly <- sf_object # } else { # sf_poly <- wdnr.gis::wi_poly # } # return(sf_poly) # } #' @rdname check_args deparse_arg_names <- function(...) { arg_names <- gsub("list\\(|\\)", "", deparse(substitute(list(...)))) arg_names <- trimws(strsplit(paste(arg_names, collapse = ""), split = ",", )[[1]]) return(arg_names) } find_layer_query <- function(url, query, input_geometry, ...) { # first check the input geometry type if (inherits(input_geometry, "sf")) { input_geom_type <- sf::st_geometry_type(input_geometry) } else if (inherits(input_geometry, "bbox")) { input_geom_type <- "bbox" } else { input_geom_type <- NULL } if (!inherits(input_geometry, "sf")) { out <- arcpullr::get_spatial_layer( url = url, where = query, ... ) } else if (all(grepl("POLYGON", input_geom_type))) { out <- arcpullr::get_layer_by_poly( url = url, geometry = input_geometry, where = query, ... ) } else if (all(grepl("LINE", input_geom_type))) { out <- arcpullr::get_layer_by_line( url = url, geometry = input_geometry, where = query, ... ) } else if (all(input_geom_type == "POINT")) { out <- arcpullr::get_layer_by_point( url = url, geometry = input_geometry, where = query, ... ) } else if (all(input_geom_type == "MULTIPOINT")) { out <- arcpullr::get_layer_by_point( url = url, geometry = input_geometry, where = query, ... ) } else if (all(input_geom_type == "bbox")) { out <- arcpullr::get_layer_by_envelope( url = url, geometry = input_geometry, where = query, ... ) } else { stop("Sorry, something went wrong with your query. ", "Check your arguments to make sure you didn't miss something.") } return(out) } #' Standardize county names #' #' This function alters string text of county names to a standardized format of #' lower-cased, no punctuation (i.e. st instead of st.), and underscore instead #' of spaces #' #' #' @param ... One or more county names in quotations, or a character vector of #' county names #' #' @return A character vector the same length as \code{name}, but tidied up #' for easier and standard viewing standardize_county_names <- function(...) { names <- unlist(list(...)) out <- names %>% tolower() %>% trimws() %>% gsub("\\s+", "_", .) %>% gsub("\\.", "", .) return(out) }
/scratch/gouwar.j/cran-all/cranData/wdnr.gis/R/utilities.R
#' wdnr.gis #' #' A package to pull spatial layers from the Wisconsin DNR ArcGIS #' REST API #' #' \if{html}{\figure{logo.png}{options: alt='logo' width='15\%'}} #' \if{latex}{\figure{logo.png}{options: width=0.5in}} #' #' The wdnr.gis package provides shortcut functions for working with various #' spatial layers on the WDNR ArcGIS REST API. Currently, these include: #' get_hydro_layer, get_watershed_layer, get_roads_layer, get_fmdb_site_layer #' #' @section get_*_layer functions: #' These functions retrieve spatial layers that are noted by the middle term in #' the function name. For example, the get_hydro_layer function retrieve's #' spatial data from Wisconsin's 24K Rivers and Streams Hydrography layer (or #' lakes if specified). These functions generally have the same arguments and #' can be queried by county, sf_object, watershed, or a SQL where statement. #' #' @aliases wdnr.gis #' #' @docType package #' @name wdnr.gis-package NULL ## quiets concerns of R CMD check re: the .'s that appear in pipelines if(getRversion() >= "2.15.1") utils::globalVariables(c("."))
/scratch/gouwar.j/cran-all/cranData/wdnr.gis/R/wdnr.gis-package.R
## ---- include = FALSE--------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>", eval = FALSE ) ## ----setup, eval = TRUE------------------------------------------------------- library(wdnr.gis) ## ---- warning = FALSE--------------------------------------------------------- # mke_cty_streams <- get_hydro_layer(county = "milwaukee") ## ---- warning = FALSE--------------------------------------------------------- # pt <- sf_point(c(-90.8, 43.4)) # watershed <- get_watershed_layer(sf_object = pt, huc_level = "HUC_8") # streams <- get_hydro_layer(sf_object = watershed) ## ----------------------------------------------------------------------------- # portage_lc <- get_wis_landcover(county = "portage") # plot_layer(portage_lc) ## ----------------------------------------------------------------------------- # portage_imagery <- get_wis_imagery(county = "portage") # plot_layer(portage_imagery) ## ---- eval = TRUE------------------------------------------------------------- match_sections("trout") match_services("trout") match_layers("trout stream") ## ---- eval = TRUE------------------------------------------------------------- list_services("FM_Trout") ## ---- eval = TRUE------------------------------------------------------------- list_layers(services = match_services("trout.*stream")) list_urls(layers = match_layers("trout.*stream"))
/scratch/gouwar.j/cran-all/cranData/wdnr.gis/inst/doc/wdnr.gis-intro.R
--- title: "wdnr.gis-Introduction" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{wdnr_gis-intro} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>", eval = FALSE ) ``` <img src='../man/figures/logo.png' width="160" height="180" style="border: none; float: right"/> \ \ wdnr.gis is a package for pulling data from the Wisconsin Department of Natural Resources (WDNR) ArcGIS REST API sites, which are located [here](https://dnrmaps.wi.gov/arcgis/rest/services) and [here](https://dnrmaps.wi.gov/arcgis2/rest/services). There are a wide variety of services and layers on these sites, and this package helps to make sense of and use these data. \ \ ```{r setup, eval = TRUE} library(wdnr.gis) ``` Notice that dependency packages arcpullr and sf are loaded along with wdnr.gis. These are essential packages that are used to pull data from an ArcGIS REST API and convert them to an sf object in R. All other functions in wdnr.gis are just wrappers around functions within arcpullr and sf that are specific to WDNR's ArcGIS REST API. \ \ # Pulling Specific Feature Layers There are a handful of functions that are written specifically to query certain commonly used layers by using either a spatial or a SQL query. The querying is mostly built in to these functions, but advanced querying functionality is also available. ```{r, warning = FALSE} mke_cty_streams <- get_hydro_layer(county = "milwaukee") ``` You can also pass an sf object to these get_*_layer functions. This example just shows how to query the watershed that a particular point falls in and the streams within that watershed ```{r, warning = FALSE} pt <- sf_point(c(-90.8, 43.4)) watershed <- get_watershed_layer(sf_object = pt, huc_level = "HUC_8") streams <- get_hydro_layer(sf_object = watershed) ``` Current functions that query specific WDNR layers are `get_hydro_layer`, `get_watershed_layer`, `get_fmdb_site_layer`, and `get_roads_layer`. Others are available upon request. If you have a layer that you use a lot, email us with a request for a layer specific function and we can add it in (especially if it will be useful to many others as well). \ \ # Pulling Specific Map and Image Layers These functions work similarly to the other specific feature layer functions, but they query the WDNR's map and image services instead and return a raster. To get landcover data use the `get_wis_landcover` function. For aerial imagery use the `get_wis_imagery` function. ```{r} portage_lc <- get_wis_landcover(county = "portage") plot_layer(portage_lc) ``` The specific landcover service to be queried can be altered using the `service` argument. A full list of available services can be found by running `list_services(section = "DW_Land_Cover")`. ```{r} portage_imagery <- get_wis_imagery(county = "portage") plot_layer(portage_imagery) ``` Similar to `get_wis_landcover` the service to be queried can be altered using the `service` argument. A full list of available image services can be found by running `list_services(section = "DW_Image")`. \ \ # Finding Sections, Services, and Layers ArcGIS REST APIs are hierarchical in nature. There are one or more folders, each of which contain one or more services. Within services are one or more layers and perhaps sub-layers. The layers contain the data of interest, but we've built in functions to be able to find any of the folders, services, or layers using the following functions: ```{r, eval = TRUE} match_sections("trout") match_services("trout") match_layers("trout stream") ``` There are also functions to find specific sections, services, and layers when the name is known. These are most useful when trying to obtain a URL for a specific layer. ```{r, eval = TRUE} list_services("FM_Trout") ``` These functions are most useful when combined for finding specific layers and the associated URLs. ```{r, eval = TRUE} list_layers(services = match_services("trout.*stream")) list_urls(layers = match_layers("trout.*stream")) ``` Notice the use of regular expressions in the `list_urls` function.
/scratch/gouwar.j/cran-all/cranData/wdnr.gis/inst/doc/wdnr.gis-intro.Rmd
--- title: "wdnr.gis-Introduction" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{wdnr_gis-intro} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>", eval = FALSE ) ``` <img src='../man/figures/logo.png' width="160" height="180" style="border: none; float: right"/> \ \ wdnr.gis is a package for pulling data from the Wisconsin Department of Natural Resources (WDNR) ArcGIS REST API sites, which are located [here](https://dnrmaps.wi.gov/arcgis/rest/services) and [here](https://dnrmaps.wi.gov/arcgis2/rest/services). There are a wide variety of services and layers on these sites, and this package helps to make sense of and use these data. \ \ ```{r setup, eval = TRUE} library(wdnr.gis) ``` Notice that dependency packages arcpullr and sf are loaded along with wdnr.gis. These are essential packages that are used to pull data from an ArcGIS REST API and convert them to an sf object in R. All other functions in wdnr.gis are just wrappers around functions within arcpullr and sf that are specific to WDNR's ArcGIS REST API. \ \ # Pulling Specific Feature Layers There are a handful of functions that are written specifically to query certain commonly used layers by using either a spatial or a SQL query. The querying is mostly built in to these functions, but advanced querying functionality is also available. ```{r, warning = FALSE} mke_cty_streams <- get_hydro_layer(county = "milwaukee") ``` You can also pass an sf object to these get_*_layer functions. This example just shows how to query the watershed that a particular point falls in and the streams within that watershed ```{r, warning = FALSE} pt <- sf_point(c(-90.8, 43.4)) watershed <- get_watershed_layer(sf_object = pt, huc_level = "HUC_8") streams <- get_hydro_layer(sf_object = watershed) ``` Current functions that query specific WDNR layers are `get_hydro_layer`, `get_watershed_layer`, `get_fmdb_site_layer`, and `get_roads_layer`. Others are available upon request. If you have a layer that you use a lot, email us with a request for a layer specific function and we can add it in (especially if it will be useful to many others as well). \ \ # Pulling Specific Map and Image Layers These functions work similarly to the other specific feature layer functions, but they query the WDNR's map and image services instead and return a raster. To get landcover data use the `get_wis_landcover` function. For aerial imagery use the `get_wis_imagery` function. ```{r} portage_lc <- get_wis_landcover(county = "portage") plot_layer(portage_lc) ``` The specific landcover service to be queried can be altered using the `service` argument. A full list of available services can be found by running `list_services(section = "DW_Land_Cover")`. ```{r} portage_imagery <- get_wis_imagery(county = "portage") plot_layer(portage_imagery) ``` Similar to `get_wis_landcover` the service to be queried can be altered using the `service` argument. A full list of available image services can be found by running `list_services(section = "DW_Image")`. \ \ # Finding Sections, Services, and Layers ArcGIS REST APIs are hierarchical in nature. There are one or more folders, each of which contain one or more services. Within services are one or more layers and perhaps sub-layers. The layers contain the data of interest, but we've built in functions to be able to find any of the folders, services, or layers using the following functions: ```{r, eval = TRUE} match_sections("trout") match_services("trout") match_layers("trout stream") ``` There are also functions to find specific sections, services, and layers when the name is known. These are most useful when trying to obtain a URL for a specific layer. ```{r, eval = TRUE} list_services("FM_Trout") ``` These functions are most useful when combined for finding specific layers and the associated URLs. ```{r, eval = TRUE} list_layers(services = match_services("trout.*stream")) list_urls(layers = match_layers("trout.*stream")) ``` Notice the use of regular expressions in the `list_urls` function.
/scratch/gouwar.j/cran-all/cranData/wdnr.gis/vignettes/wdnr.gis-intro.Rmd
#' Country code #' #' Find ISO-3 country code for a country. #' #' @param x `character` name of country or its ISO-3 code. #' #' @return `character` ISO-3 country code. #' #' @seealso [countrycode::countrycode()]. #' #' @noRd country_code <- function(x) { # validate argument assertthat::assert_that(assertthat::is.string(x)) # processing if (nchar(x) == 3) { # check that x is valid ISO-3 code name <- suppressWarnings(countrycode::countrycode(x, "iso3c", "country.name.en")) if (is.na(name[[1]])) stop("argument to x is not a valid iso3 code") country_code <- toupper(x) } else { # check that x is valid country name country_code <- suppressWarnings(countrycode::countrycode(x, "country.name.en", "iso3c")) if (is.na(country_code[[1]])) stop("argument to x is not a valid country name") country_code <- toupper(country_code) } return(country_code) } #' Find most recent version of WDPA dataset in folder #' #' Find the file in a folder which has the most recent version of the WDPA data #' set in it. #' #' @inheritParams wdpa_fetch #' #' @return `character` file path. #' #' @noRd wdpa_file <- function(x, download_dir = tempfile()) { # validate arguments assertthat::assert_that(assertthat::is.string(x), assertthat::is.dir(download_dir)) # convert x to country ISO3 code if (x != "global") { x <- country_code(x) } # set file and date search patterns if (x == "global") { file_pattern <- "WDPA\\_.*\\_Public.zip" } else { file_pattern <- paste0("WDPA\\_.*\\_", x, "\\-shapefile.zip") } ## search for file file_paths <- dir(download_dir, file_pattern, full.names = TRUE) if (length(file_paths) == 0) stop("data not found in \"download_dir\" folder") # parse date-times file_versions <- vapply(file_paths, wdpa_version, character(1)) file_dates <- convert_wdpa_version_to_POSIXct(file_versions) # set file_path as latest version file_path <- file_paths[which.max(file_dates)] # return file path return(file_path) } #' Extract polygons and points #' #' Extract polygons and points from a [sf::sf()] object. #' #' @param x [sf::sf()] object. #' #' @return [sf::sf()] object. #' #' @noRd extract_polygons_and_points <- function(x) { # find point indices ind <- vapply(sf::st_geometry(x), inherits, logical(1), c("POINT", "MULTIPOINT")) # extract polygons from geometries if (inherits(sf::st_geometry(x), c("sfc_GEOMETRY", "sfc_GEOMETRYCOLLECTION"))) { o <- suppressWarnings(sf::st_collection_extract(x, "POLYGON")) } else { o <- x[!ind, ] } # extract points rbind(o, x[ind, , drop = FALSE]) } # Empty WDPA data set #' #' This function creates an empty WDPA dataset. #' #' @return [sf::sf()] object. #' #' @examples #' empty_wdpa_dataset(st_crs("+init=epsg:4326")) #' #' @noRd empty_wdpa_dataset <- function(crs) { sf::st_sf(tibble::tibble( WDPAID = numeric(0), WDPA_PID = character(0), PA_DEF = character(0), NAME = character(0), ORIG_NAME = character(0), DESIG = character(0), DESIG_ENG = character(0), DESIG_TYPE = character(0), IUCN_CAT = character(0), INT_CRIT = character(0), MARINE = character(0), REP_M_AREA = numeric(0), REP_AREA = numeric(0), NO_TAKE = character(0), NO_TK_AREA = numeric(0), STATUS = character(0), STATUS_YR = numeric(0), GOV_TYPE = character(0), OWN_TYPE = character(0), MANG_AUTH = character(0), MANG_PLAN = character(0), VERIF = character(0), METADATAID = integer(0), SUB_LOC = character(0), PARENT_ISO = character(0), ISO3 = character(0), SUPP_INFO = character(0), CONS_OBJ = character(0), GEOMETRY_TYPE = character(0), AREA_KM2 = numeric(0)), geometry = sf::st_sfc(crs = crs)) } #' Dataset version #' #' Determine the version of protected area data. #' #' @param x `character` file name. #' #' @return `character` version. #' #' @noRd wdpa_version <- function(x) { # verify argument is valid assertthat::assert_that(assertthat::is.string(x), assertthat::noNA(x)) x <- gsub("WDOECM_", "", x, fixed = TRUE) strsplit(basename(x), "_", fixed = TRUE)[[1]][[2]] } #' Covert dataset version to POSIXct #' #' Coerce data version to `POSIXct` format. #' #' @param x `character` file name. #' #' @return `character` version. #' #' @noRd convert_wdpa_version_to_POSIXct <- function(x) { # verify argument is valid assertthat::assert_that(is.character(x), assertthat::noNA(x)) # parse date-times # note that date-times easily cannot be parsed using base::strptime due to # bug where date-times with month before year (e.g. Nov2011) will return NA # see this post on Stack Overflow # https://stackoverflow.com/questions/26997864/strptime-not-recognizing-b-b # therefore we must---unsatisfyingly---extract the month and years separately # reorder them, and then parse using base::strptime month <- gsub("[[:digit:]]", "", x) year <- gsub("[[:alpha:]]", "", x) out <- try(as.POSIXct(strptime(paste0("01/", month, "/", year), "%d/%b/%Y")), silent = TRUE) # verify valid date if (inherits(out, "try-error")) stop("version not recognized") #nocov assertthat::assert_that( all(!is.na(out)), all(nchar(year) == 4), all(nchar(month) == 3), msg = "version not recognized") # return result out } #' Download file #' #' @param url `character` URL for downloading file. #' #' @param path `character` path to save data. #' #' @param quiet `logical` should downloading information be suppressed? #' #' @return Invisible `logical` indicating success. #' #' @noRd download_file <- function(url, path, quiet = TRUE) { res <- curl::curl_download(url, path, quiet = quiet) invisible(TRUE) } #' Is online? #' #' Check if an online internet connection is active. #' #' @details #' This function uses [pingr::is_online()] and [curl::has_internet()] #' to check for an internet connection. There are some issues where #' one of these functions returns a false-negative. #' #' @return `logical` value indicating success. #' #' @examples #' # check if online #' print(is_online()) #' #' @noRd is_online <- function() { isTRUE(try(curl::has_internet(), silent = TRUE)) || isTRUE(try(pingr::is_online(), silent = TRUE)) } assertthat::on_failure(is_online) <- function(call, env) { "could not establish an active internet connection" }
/scratch/gouwar.j/cran-all/cranData/wdpar/R/internal.R
#' wdpar: Interface to the World Database on Protected Areas #' #' The \pkg{wdpar} R package provides an interface to data provided by #' [Protected Planet](https://www.protectedplanet.net/en). #' Specifically, it can be used to automatically obtain data from #' the World Database on Protected Areas #' (WDPA) and the World Database on Other Effective Area-Based Conservation #' Measures (WDOECM). #' It also provides methods for cleaning data from these databases following #' best practices #' (outlined in Butchart *et al.* 2015; Protected Planet 2021; Runge *et al.* #' 2015). The main functions are [wdpa_fetch()] #' for downloading data and [wdpa_clean()] for cleaning data. For #' more information, please see the package vignette. #' To cite this package, please see `citation("wdpar")`. #' #' @references #' Butchart SH, Clarke M, Smith RJ, Sykes RE, Scharlemann JP, #' Harfoot M, ... & Brooks TM (2015) Shortfalls and solutions for #' meeting national and global conservation area targets. #' *Conservation Letters*, **8**: 329--337. #' #' Protected Planet (2021) Calculating protected and OECM area coverage. #' Available at: #' <https://www.protectedplanet.net/en/resources/calculating-protected-area-coverage>. #' #' Runge CA, Watson JEM, Butchart HM, Hanson JO, Possingham HP & Fuller RA #' (2015) Protected areas and global conservation of migratory birds. #' *Science*, **350**: 1255--1258. #' #' @name wdpar #' @aliases wdpar-package #' @docType package NULL #' @import sf NULL # avoid false positive NOTEs during CRAN checks #' @importFrom rappdirs user_data_dir NULL
/scratch/gouwar.j/cran-all/cranData/wdpar/R/package.R
#' Read spatial data #' #' Import spatial data. If desired, only a subset of the available data #' are imported. #' #' @param dsn `character` data source name. #' #' @param layer `character` layer name. Defaults to `NULL`. #' #' @param n `integer` number of records to import. #' Defaults to `NULL` such that all data are imported. #' #' @return [sf::sf()] object. #' #' @noRd read_sf_n <- function(dsn, layer = NULL, n = NULL) { # validate arguments assertthat::assert_that(assertthat::is.string(dsn), inherits(layer, c("character", "NULL")), inherits(n, c("numeric", "NULL"))) if (!is.null(n)) { assertthat::assert_that(assertthat::is.count(n), assertthat::noNA(n)) } if (is.null(layer)) { layer <- sf::st_layers(dsn)$name[[1]] } assertthat::assert_that(assertthat::is.string(layer), assertthat::noNA(layer)) # construct query if (!is.null(n)) { query <- paste0("SELECT * FROM \"", layer, "\" LIMIT ", n) } else { query <- paste0("SELECT * FROM \"", layer, "\"") } # import data out <- sf::read_sf(dsn = dsn, query = query) if (!is.null(n)) { if (nrow(out) > n) { out <- out[seq_len(n), ] } } # force sf_geometry column to be called "geometry" if (!"geometry" %in% names(out)) { old_name <- attr(out, "sf_column") names(out)[names(out) == old_name] <- "geometry" attr(out, "sf_column") <- "geometry" } # return result out }
/scratch/gouwar.j/cran-all/cranData/wdpar/R/read_sf_n.R
#' @include internal.R NULL #' Erase overlaps #' #' Erase overlapping geometries in a [sf::sf()] object. #' #' @param x [sf::sf()] object. #' #' @param verbose `logical` should progress be reported? Defaults to #' `FALSE`. #' #' @details This is a more robust -- albeit slower -- implementation for #' [sf::st_difference()] when `y` is missing. #' #' @return [sf::sf()] object. #' #' @seealso [sf::st_difference()], [wdpa_dissolve()]. #' #' @examples #' # create data #' pl1 <- sf::st_polygon(list(matrix(c(0, 0, 2, 0, 1, 1, 0, 0), byrow = TRUE, #' ncol = 2))) * 100 #' pl2 <- sf::st_polygon(list(matrix(c(0, 0.5, 2, 0.5, 1, 1.5, 0, 0.5), #' byrow = TRUE, ncol = 2))) * 100 #' pl3 <- sf::st_polygon(list(matrix(c(0, 1.25, 2, 1.25, 1, 2.5, 0, 1.25), #' byrow = TRUE, ncol = 2))) * 100 #' x <- sf::st_sf(order = c("A", "B", "C"), #' geometry = sf::st_sfc(list(pl1, pl2, pl3), crs = 3395)) #' #' # erase overlaps #' y <- st_erase_overlaps(x) #' #' # plot data for visual comparison #' par(mfrow = c(1, 2)) #' plot(sf::st_geometry(x), xlim = c(0, 200), ylim = c(0, 250), #' main = "original", col = "transparent") #' plot(sf::st_geometry(y), , xlim = c(0, 200), ylim = c(0, 250), #' main = "no overlaps", col = "transparent") #' @export st_erase_overlaps <- function(x, verbose = FALSE) { # validate arguments assertthat::assert_that(inherits(x, "sf"), assertthat::is.flag(verbose)) # extract precision precision <- sf::st_precision(x) # processing g <- sf::st_geometry(x) o <- g[1] # initialize progress bar if (verbose) { pb <- progress::progress_bar$new( format = "[:bar] :current/:total (:percent) eta: :eta", total = length(g) - 1, clear = FALSE, width = 60) } for (i in seq(2, length(g))) { ## find overlapping geometries ovr <- sf::st_intersects(g[i], o)[[1]] ## if overlapping geometries then calculate difference if (length(ovr) > 0) { ## create union ### run union u <- sf::st_union( sf::st_set_precision( suppressWarnings(sf::st_collection_extract(sf::st_buffer(o[ovr], 0))), precision ) ) ### buffer the union to fix any geometry issues u <- sf::st_buffer(u, dist = 0) ### repair the geometry if there are any issues if (!all(sf::st_is_valid(u))) { #nocov start u <- suppressWarnings( sf::st_collection_extract( sf::st_make_valid(sf::st_set_precision(u, precision)), "POLYGON" ) ) #nocov end } ## calculate difference ### run difference d <- sf::st_difference( suppressWarnings( sf::st_collection_extract( sf::st_make_valid(sf::st_set_precision(g[i], precision)), "POLYGON" ) ), suppressWarnings( sf::st_collection_extract( sf::st_make_valid(sf::st_set_precision(u, precision)), "POLYGON" ) ) ) if (length(d) == 0L) { d <- sf::st_sfc(sf::st_geometrycollection(), crs = sf::st_crs(d)) #nocov } d <- suppressWarnings(sf::st_collection_extract(d, "POLYGON")) ### repair the geometry if there are any issues if (!all(sf::st_is_valid(d))) { #nocov start d <- suppressWarnings( sf::st_collection_extract( sf::st_make_valid(sf::st_set_precision(d, precision)), "POLYGON" ) ) #nocov end } } else { d <- g[i] } ## find empty geometries empty <- sf::st_is_empty(d) ## process geometry if its not empty if (!all(empty)) { ### remove slivers (areas less then 1 m^2) d <- d[!empty] d <- sf::st_cast(d, "POLYGON") d <- d[as.numeric(sf::st_area(d)) > 1] d <- sf::st_cast(d, "MULTIPOLYGON") if (length(d) == 0) { d <- sf::st_sfc(sf::st_geometrycollection(), crs = sf::st_crs(d)) #nocov } d <- suppressWarnings(sf::st_collection_extract(d, "POLYGON")) } ## if d contains multiple geometries, then union them if (length(d) > 1) { d <- sf::st_union(d) d <- suppressWarnings( sf::st_collection_extract( sf::st_make_valid(sf::st_set_precision(d, precision)), "POLYGON" ) ) } ## create empty geometry if empty if (length(d) == 0) { d <- sf::st_sfc(sf::st_polygon()) #nocov } ## store geometry o[i] <- d[[1]] ## increment progress bar if (verbose) pb$tick() } x <- sf::st_set_geometry(x, o) x <- sf::st_set_precision(x, precision) # return output x }
/scratch/gouwar.j/cran-all/cranData/wdpar/R/st_erase_overlaps.R
#' Repair geometry #' #' Repair the geometry of a [sf::st_sf()] object. #' #' @param x [sf::sf()] object. #' #' @inheritParams wdpa_clean #' #' @details #' This function works by first using the [sf::st_make_valid()] function #' to attempt to fix geometry issues. Since the [sf::st_make_valid()] function #' sometimes produce incorrect geometries in rare cases #' (e.g. when fixing invalid geometries that cross the dateline), #' this function then uses the `st_prepair()` function from the \pkg{prepr} #' package to fix those geometries instead #' (see <https://github.com/dickoa/prepr> for details). #' #' @section Installation: #' This function uses the \pkg{prepr} package to help repair geometries #' in certain cases. Because the \pkg{prepr} package is not available on #' the Comprehensive R Archive Network (CRAN), it must be installed from #' its online code repository. To achieve this, please #' use the following code: #' ``` #' if (!require(remotes)) install.packages("remotes") #' remotes::install_github("dickoa/prepr") #' ``` #' #' Note that the \pkg{prepr} package has system dependencies that need to be #' installed before the package itself can be installed #' (see package README file for platform-specific instructions). #' #' @examples #' # create sf object #' p1 <- st_sf( #' id = 1, #' geometry = st_as_sfc("POLYGON((0 0, 0 10, 10 0, 10 10, 0 0))", crs = 3857) #' ) #' #' # repair geometry #' p2 <- st_repair_geometry(p1) #' #' # print object #' print(p2) #' @export st_repair_geometry <- function(x, geometry_precision = 1500) { # assert arguments are valid assertthat::assert_that( inherits(x, "sf"), !assertthat::has_name(x, "_repair_id"), assertthat::is.count(geometry_precision), assertthat::noNA(geometry_precision) ) # add in identifier column to keep track of geometries x[["_repair_id"]] <- seq_len(nrow(x)) # set precision x <- sf::st_set_precision(x, geometry_precision) # apply first pass for fixing geometry x2 <- sf::st_make_valid(x) # remove empty geometries x2 <- x2[!sf::st_is_empty(x2), ] # extract polygons and points (if needed) x2 <- extract_polygons_and_points(x2) # dissolve by repair id if (!identical(anyDuplicated(x2[["_repair_id"]]), 0L)) { x2 <- split(x2, x2[["_repair_id"]]) x2_df <- tibble::tibble(`_repair_id` = as.integer(names(x2))) x2 <- lapply(x2, sf::st_geometry) x2 <- lapply(x2, sf::st_union) x2 <- do.call(c, x2) x_df <- match(x2_df[["_repair_id"]], x[["_repair_id"]]) x_df <- sf::st_drop_geometry(x)[x_df, , drop = FALSE] x_df <- x_df[, setdiff(names(x_df), "_repair_id"), drop = FALSE] x2_df <- tibble::as_tibble(cbind(x2_df, x_df)) x2_df <- x2_df[, names(sf::st_drop_geometry(x)), , drop = FALSE] x2_df$geometry <- x2 x2 <- sf::st_sf(x2_df) rm(x_df, x2_df) } # detect if any invalid geometries persist ## subset repaired polygons x_sub <- x[match(x2[["_repair_id"]], x[["_repair_id"]]), , drop = FALSE] ## detect if invalid polygons based on changes in area area_threshold <- ifelse(sf::st_is_longlat(x), 1, 1e+4) invalid_idx <- which( abs( as.numeric(sf::st_area(sf::st_set_crs(x_sub, NA))) - as.numeric(sf::st_area(sf::st_set_crs(x2, NA))) ) >= area_threshold ) ## refine detections to only include polygons that span width of planet ## note this only works if x has a defined CRS if (sf::st_crs(x) != st_crs(NA)) { ## compute global extent in coordinate system of x (if crs defined) global_bbox <- sf::st_as_sfc( "POLYGON((-180 -90, 180 -90, 180 90, -180 90, -180 -90))", crs = 4326 ) if (sf::st_crs(x) != sf::st_crs(4326)) { global_bbox <- sf::st_transform(global_bbox, sf::st_crs(x)) } global_bbox <- sf::st_bbox(global_bbox) ## compute distance threshold for invalid outputs from st_make_valid() dist_threshold <- unname(global_bbox$xmax - global_bbox$xmin) * 0.7 ## detect if invalid polygons based on total width across planet invalid_bbox_idx <- which( vapply(sf::st_geometry(x2), FUN.VALUE = logical(1), function(y) { b <- sf::st_bbox(y) (b$xmax - b$xmin) > dist_threshold }) ) ## subset geometries invalid_idx <- intersect(invalid_idx, invalid_bbox_idx) } # manually fix geometries if needed if (length(invalid_idx) > 0) { ### verify that prepr package is installed assertthat::assert_that( requireNamespace("prepr", quietly = TRUE), msg = paste( "the \"prepr\" package needs to be installed, use: \n", "remotes::install_github(\"dickoa/prepr\")" ) ) ### find geometries to repair invalid_ids <- x_sub[["_repair_id"]][invalid_idx] rm(x_sub) ### fix geometries x2 <- rbind( x2[!x2[["_repair_id"]] %in% invalid_ids, , drop = FALSE], prepr::st_prepair( x[x[["_repair_id"]] %in% invalid_ids, , drop = FALSE] ) ) } # remove custom id column geom_col <- attr(x2, "sf_column") x2 <- x2[, setdiff(names(x2), c("_repair_id", geom_col)), drop = FALSE] # return result x2 }
/scratch/gouwar.j/cran-all/cranData/wdpar/R/st_repair_geometry.R
#' @include internal.R st_erase_overlaps.R NULL #' Clean data #' #' Clean data obtained from #' [Protected Planet](https://www.protectedplanet.net/en). #' Specifically, this function is designed to clean data obtained from #' the World Database on Protected Areas #' (WDPA) and the World Database on Other Effective Area-Based Conservation #' Measures (WDOECM). #' For recommended practices on cleaning large datasets #' (e.g. datasets that span multiple countries or a large geographic area), #' please see below. #' #' @param x [sf::sf()] object containing protected area data. #' #' @param crs `character` or code{integer} object representing a #' coordinate reference system. Defaults to World Behrmann #' (*ESRI:54017*). #' #' @param exclude_unesco `logical` should UNESCO Biosphere Reserves be excluded? #' Defaults to `TRUE`. #' #' @param retain_status `character` vector containing the statuses for #' protected areas that should be retained during the cleaning process. #' Available statuses include: #' `"Proposed"`, `"Inscribed"`, `"Adopted"`, `"Designated"`, and #' `"Established"`. #' Additionally, a `NULL` argument can be specified to ensure that no #' protected areas are excluded according to their status. #' The default argument is a `character` vector containing `"Designated"`, #' `"Inscribed"`, and `"Established"`. #' This default argument ensures that protected areas that are not currently #' implemented are excluded. #' #' @param snap_tolerance `numeric` tolerance for snapping geometry to a #' grid for resolving invalid geometries. Defaults to 1 meter. #' #' @param simplify_tolerance `numeric` simplification tolerance. #' Defaults to 0 meters. #' #' @param geometry_precision `numeric` level of precision for processing #' the spatial data (used with [sf::st_set_precision()]). The #' default argument is 1500 (higher values indicate higher precision). #' This level of precision is generally suitable for analyses at the #' national-scale. For analyses at finer-scale resolutions, please #' consider using a greater value (e.g. 10000). #' #' @param erase_overlaps `logical` should overlapping boundaries be #' erased? This is useful for making comparisons between individual #' protected areas and understanding their "effective" geographic coverage. #' On the other hand, this processing step may not be needed #' (e.g. if the protected area boundaries are going to be rasterized), and so #' processing time can be substantially by skipping this step and setting #' the argument to `FALSE`. Defaults to `TRUE`. #' #' @param verbose `logical` should progress on data cleaning be reported? #' Defaults to `TRUE` in an interactive session, otherwise #' `FALSE`. #' #' @details This function cleans data following best practices #' (Butchart *et al.* 2015; Protected Planet 2021; Runge *et al.* 2015). #' To obtain accurate protected area coverage statistics for a country, #' please note that you will need to manually clip the cleaned data to #' the countries' coastline and its Exclusive Economic Zone (EEZ). #' #' \enumerate{ #' #' \item Exclude protected areas according to their status (i.e. #' `"STATUS"` field). Specifically, protected areas that have #' a status not specified in the argument to `retain_status` are excluded. #' By default, only protected areas that have a #' `"Designated"`, `"Inscribed"`, or `"Established"` status are retained. #' This means that the default behavior is to exclude protected that #' are not currently implemented. #' #' \item Exclude United Nations Educational, Scientific and Cultural #' Organization (UNESCO) Biosphere Reserves (Coetzer *et al.* 2014). #' This step is only performed if the argument to `exclude_unesco` is #' `TRUE`. #' #' \item Create a field (`"GEOMETRY_TYPE"`) indicating if areas are #' represented as point localities (`"POINT"`) or as polygons #' (`"POLYGON"`). #' #' \item Exclude areas represented as point localities that do not #' have a reported spatial extent (i.e. missing data for the field # \code{"REP_AREA"}). #' #' \item Geometries are wrapped to the dateline (using #' [sf::st_wrap_dateline()] with the options #' `"WRAPDATELINE=YES"` and `"DATELINEOFFSET=180"`). #' #' \item Reproject data to coordinate system specified in argument to #' `crs` (using [sf::st_transform()]). #' #' \item Repair any invalid geometries that have manifested #' (using [st_repair_geometry()]). #' #' \item Buffer areas represented as point localities to circular areas #' using their reported spatial extent (using data in the field #' `"REP_AREA"` and [sf::st_buffer()]; see Visconti #' *et al.* 2013). #' #' \item Snap the geometries to a grid to fix any remaining #' geometry issues (using argument to `snap_tolerance` and #' [lwgeom::st_snap_to_grid()]). #' #' \item Repair any invalid geometries that have manifested #' (using [st_repair_geometry()]). #' #' \item Simplify the protected area geometries to reduce computational burden #' (using argument to `simplify_tolerance` and #' [sf::st_simplify()]). #' #' \item Repair any invalid geometries that have manifested #' (using [st_repair_geometry()]). #' #' \item The `"MARINE"` field is converted from integer codes #' to descriptive names (i.e. `0` = `"terrestrial"`, #' `1` = `"partial"`, `2` = `"marine"`). #' #' \item The `"PA_DEF"` field is converted from integer codes #' to descriptive names (i.e. `0` = `"OECM"`, and `1` = `"PA"`). #' #' \item Zeros in the `"STATUS_YR"` field are replaced with #' missing values (i.e. `NA_real_` values). #' #' \item Zeros in the `"NO_TK_AREA"` field are replaced with `NA` #' values for areas where such data are not reported or applicable #' (i.e. areas with the values `"Not Applicable"` #' or `"Not Reported"` in the `"NO_TK_AREA"` field). #' #' \item Overlapping geometries are erased from the protected area data #' (discussed in Deguignet *et al.* 2017). Geometries are erased such #' that areas associated with more effective management #' categories (`"IUCN_CAT"`) or have historical precedence are retained #' (using [sf::st_difference()]). #' #' \item Slivers are removed (geometries with areas less than 0.1 square #' meters). #' #' \item The size of areas are calculated in square kilometers and stored in #' the field `"AREA_KM2"`. #' #' } #' #' @section Recommended practices for large datasets: #' This function can be used to clean large datasets assuming that #' sufficient computational resources and time are available. #' Indeed, it can clean data spanning large countries, multiple #' countries, and even the full global dataset. #' When processing the full global dataset, it is recommended to use a #' computer system with at least 32 GB RAM available and to allow for at least #' one full day for the data cleaning procedures to complete. #' It is also recommended to avoid using the computer system for any other #' tasks while the data cleaning procedures are being completed, #' because they are very computationally intensive. #' Additionally, when processing large datasets -- and especially #' for the global dataset -- it is strongly recommended to disable the #' procedure for erasing overlapping areas. #' This is because the built-in procedure for erasing overlaps is #' very time consuming when processing many protected areas, so that #' information on each protected area can be output #' (e.g. IUCN category, year established). #' Instead, when cleaning large datasets, it is recommended to run #' the data cleaning procedures with the procedure for erasing #' overlapping areas disabled (i.e. with `erase_overlaps = FALSE`). #' After the data cleaning procedures have completed, #' the protected area data can be manually dissolved #' to remove overlapping areas (e.g. using [wdpa_dissolve()]). #' For an example of processing a large protected area dataset, #' please see the vignette. #' #' @return [sf::sf()] object. #' #' @seealso [wdpa_fetch()], [wdpa_dissolve()]. #' #' @references #' Butchart SH, Clarke M, Smith RJ, Sykes RE, Scharlemann JP, #' Harfoot M, ... & Brooks TM (2015) Shortfalls and solutions for #' meeting national and global conservation area targets. #' *Conservation Letters*, **8**: 329--337. #' #' Coetzer KL, Witkowski ET, & Erasmus BF (2014) Reviewing #' Biosphere Reserves globally: Effective conservation action or bureaucratic #' label? *Biological Reviews*, **89**: 82--104. #' #' Deguignet M, Arnell A, Juffe-Bignoli D, Shi Y, Bingham H, MacSharry B & #' Kingston N (2017) Measuring the extent of overlaps in protected area #' designations. *PloS One*, **12**: e0188681. #' #' Runge CA, Watson JEM, Butchart HM, Hanson JO, Possingham HP & Fuller RA #' (2015) Protected areas and global conservation of migratory birds. #' *Science*, **350**: 1255--1258. #' #' Protected Planet (2021) Calculating protected and OECM area coverage. #' Available at: #' <https://www.protectedplanet.net/en/resources/calculating-protected-area-coverage>. #' #' Visconti P, Di Marco M, Alvarez-Romero JG, Januchowski-Hartley SR, Pressey, #' RL, Weeks R & Rondinini C (2013) Effects of errors and gaps in spatial data #' sets on assessment of conservation progress. *Conservation Biology*, #' **27**: 1000--1010. #' #' @examples #' \dontrun{ #' # fetch data for the Liechtenstein #' lie_raw_data <- wdpa_fetch("LIE", wait = TRUE) #' #' # clean data #' lie_data <- wdpa_clean(lie_raw_data) #' #' # plot cleaned dataset #' plot(lie_data) #' #' } #' @export wdpa_clean <- function(x, crs = paste("+proj=cea +lon_0=0 +lat_ts=30 +x_0=0", "+y_0=0 +datum=WGS84 +ellps=WGS84 +units=m +no_defs"), exclude_unesco = TRUE, retain_status = c("Designated", "Inscribed", "Established"), snap_tolerance = 1, simplify_tolerance = 0, geometry_precision = 1500, erase_overlaps = TRUE, verbose = interactive()) { # check arguments are valid ## display message if (isTRUE(verbose)) { cli::cli_progress_step("initializing") } ## simple arguments assertthat::assert_that(inherits(x, "sf"), nrow(x) > 0, all(assertthat::has_name(x, c("ISO3", "STATUS", "DESIG_ENG", "REP_AREA", "MARINE", "PA_DEF"))), assertthat::is.string(crs) || assertthat::is.count(crs), assertthat::is.number(snap_tolerance), isTRUE(snap_tolerance >= 0), assertthat::is.number(simplify_tolerance), isTRUE(simplify_tolerance >= 0), assertthat::is.count(geometry_precision), assertthat::is.flag(erase_overlaps), assertthat::is.flag(exclude_unesco), assertthat::is.flag(verbose)) ## retain status assertthat::assert_that(inherits(retain_status, c("character", "NULL"))) if (is.character(retain_status)) { assertthat::assert_that( assertthat::noNA(retain_status), all(retain_status %in% c( "Proposed", "Inscribed", "Adopted", "Designated", "Established"))) } ## check that x is in wgs1984 assertthat::assert_that(sf::st_crs(x) == sf::st_crs(4326), msg = "argument to x is not longitude/latitude (i.e. EPSG:4326)") # clean data ## exclude areas based on status if (is.null(retain_status)) { if (verbose) { cli::cli_progress_step("retaining areas regardless of status)") } } else { if (verbose) { cli::cli_progress_step("retaining only areas with specified statuses") } x <- x[which(x$STATUS %in% retain_status), ] } ## remove UNESCO sites if needed if (exclude_unesco) { if (verbose) { cli::cli_progress_step("removing UNESCO Biosphere Reserves") } x <- x[x$DESIG_ENG != "UNESCO-MAB Biosphere Reserve", ] } else { if (verbose) { cli::cli_progress_step("retaining UNESCO Biosphere Reserves") } } ## assign column indicating geometry type is_point <- vapply(sf::st_geometry(x), inherits, logical(1), c("POINT", "MULTIPOINT")) x$GEOMETRY_TYPE <- "POLYGON" x$GEOMETRY_TYPE[is_point] <- "POINT" ## remove protected areas represented as points that do not have ## a reported area if (verbose) { cli::cli_progress_step("removing points with no reported area") } x <- x[!(x$GEOMETRY_TYPE == "POINT" & !is.finite(x$REP_AREA)), ] ## wrap dateline issues if (verbose) { cli::cli_progress_step("wrapping dateline") } x <- sf::st_set_precision(x, geometry_precision) x <- suppressWarnings(sf::st_wrap_dateline(x, options = c("WRAPDATELINE=YES", "DATELINEOFFSET=180"))) x <- x[!sf::st_is_empty(x), ] x <- extract_polygons_and_points(x) x <- sf::st_set_precision(x, geometry_precision) ## repair geometry if (verbose) { cli::cli_progress_step("repairing geometry") } x <- st_repair_geometry(x, geometry_precision) ## reproject data if (verbose) { cli::cli_progress_step("reprojecting data") } x <- sf::st_set_precision(x, geometry_precision) x <- sf::st_transform(x, crs) x <- sf::st_set_precision(x, geometry_precision) ## repair geometry again if (verbose) { cli::cli_progress_step("repairing geometry") } x <- st_repair_geometry(x, geometry_precision) ## buffer polygons by zero to fix any remaining issues x_polygons_pos <- which(x$GEOMETRY_TYPE == "POLYGON") if (length(x_polygons_pos) > 0) { if (verbose) { cli::cli_progress_step("further geometry fixes (i.e. buffering by zero)") } if (verbose) message("buffering by zero: ", cli::symbol$continue, "\r", appendLF = FALSE) x_polygons_data <- x[x_polygons_pos, ] x_polygons_data <- sf::st_set_precision(x_polygons_data, geometry_precision) x_polygons_data <- sf::st_buffer(x_polygons_data, 0) x <- rbind(x[which(x$GEOMETRY_TYPE == "POINT"), ], x_polygons_data) x <- sf::st_set_precision(x, geometry_precision) } ## buffer areas represented as points x_points_pos <- which(x$GEOMETRY_TYPE == "POINT") if (length(x_points_pos) > 0) { if (verbose) { cli::cli_progress_step("buffering points to reported area") } x_points_data <- x[x_points_pos, ] x_points_data <- sf::st_buffer(x_points_data, sqrt((x_points_data$REP_AREA * 1e6) / pi)) if (any(x$GEOMETRY_TYPE == "POLYGON")) { x <- rbind(x[which(x$GEOMETRY_TYPE == "POLYGON"), ], x_points_data) } else { x <- x_points_data } x <- sf::st_set_precision(x, geometry_precision) } ## return empty dataset if no valid non-empty geometries remain if (all(sf::st_is_empty(x))) { if (verbose) { cli::cli_alert_warning( "no valid non-empty geometries remain, returning empty dataset") } return(empty_wdpa_dataset(sf::st_crs(x))) } ## simplify geometries if (simplify_tolerance > 0) { if (verbose) { cli::cli_progress_step("simplifying geometry") } x <- sf::st_set_precision(x, geometry_precision) x <- sf::st_simplify(x, TRUE, simplify_tolerance) x <- sf::st_set_precision(x, geometry_precision) x <- x[!sf::st_is_empty(x), ] x <- suppressWarnings(sf::st_collection_extract(x, "POLYGON")) x <- sf::st_set_precision(x, geometry_precision) } ## repair geometry again if (verbose) { cli::cli_progress_step("repairing geometry") } x <- st_repair_geometry(x, geometry_precision) ## snap geometry to grid if (snap_tolerance > 0) { if (verbose) { cli::cli_progress_step("snapping geometry to tolerance") } x <- sf::st_set_precision(x, geometry_precision) x <- lwgeom::st_snap_to_grid(x, snap_tolerance) x <- sf::st_set_precision(x, geometry_precision) } ## repair geometry again if (verbose) { cli::cli_progress_step("repairing geometry") } x <- st_repair_geometry(x, geometry_precision) ## format columns if (verbose) { cli::cli_progress_step("formatting attribute data") } ### MARINE field x$MARINE[x$MARINE == "0"] <- "terrestrial" x$MARINE[x$MARINE == "1"] <- "partial" x$MARINE[x$MARINE == "2"] <- "marine" ### STATUS_YR field x$STATUS_YR[x$STATUS_YR == 0] <- NA_real_ ### NO_TK_AREA field x$NO_TK_AREA[x$NO_TAKE %in% c("Not Reported", "Not Applicable")] <- NA_real_ ### PA_DEF field x$PA_DEF <- as.character(x$PA_DEF) x$PA_DEF[x$PA_DEF == "0"] <- "OECM" x$PA_DEF[x$PA_DEF == "1"] <- "PA" if (verbose) { cli::cli_progress_done() } ## remove overlaps data if (erase_overlaps && isTRUE(nrow(x) > 1)) { x$IUCN_CAT <- factor(as.character(x$IUCN_CAT), levels = c("Ia", "Ib", "II", "III", "IV", "V", "VI", "Not Reported", "Not Applicable", "Not Assigned")) x <- sf::st_set_precision(x, geometry_precision) x <- st_erase_overlaps(x[order(x$IUCN_CAT, x$STATUS_YR), ], verbose) x$IUCN_CAT <- as.character(x$IUCN_CAT) x <- x[!sf::st_is_empty(x), ] x <- suppressWarnings(sf::st_collection_extract(x, "POLYGON")) x <- sf::st_set_precision(x, geometry_precision) } ## remove slivers if (verbose) { cli::cli_progress_step("removing slivers") } x <- x[as.numeric(sf::st_area(x)) > 0.1, ] ## calculate area in square kilometers if (verbose) { cli::cli_progress_step("calculating spatial statistics") } areas <- as.numeric(sf::st_area(x)) * 1e-6 x$AREA_KM2 <- as.numeric(areas) ## move geometry to last column if ((!"geometry" %in% names(x))) { geom_col <- attr(x, "sf_column") attr(x, "sf_column") <- "geometry" names(x)[names(x) == geom_col] <- "geometry" } x <- x[, c(setdiff(names(x), "geometry"), "geometry")] # return cleaned data x }
/scratch/gouwar.j/cran-all/cranData/wdpar/R/wdpa_clean.R
#' @include internal.R NULL #' Dissolve data #' #' Create a dataset of spatial boundaries that contains no #' overlapping geometries. #' #' @inheritParams st_erase_overlaps #' @inheritParams wdpa_clean #' #' @details #' This function is basically a wrapper for [sf::st_union()]. #' It also contains additional parameters to assist with processing #' large and complex geometry data. #' #' @inherit st_erase_overlaps return #' #' @seealso [sf::st_union()], [st_erase_overlaps()]. #' #' @examples #' # create data #' pl1 <- sf::st_polygon(list(matrix(c(0, 0, 2, 0, 1, 1, 0, 0), byrow = TRUE, #' ncol = 2))) * 100 #' pl2 <- sf::st_polygon(list(matrix(c(0, 0.5, 2, 0.5, 1, 1.5, 0, 0.5), #' byrow = TRUE, ncol = 2))) * 100 #' pl3 <- sf::st_polygon(list(matrix(c(0, 1.25, 2, 1.25, 1, 2.5, 0, 1.25), #' byrow = TRUE, ncol = 2))) * 100 #' x <- sf::st_sf(order = c("A", "B", "C"), #' geometry = sf::st_sfc(list(pl1, pl2, pl3), crs = 3395)) #' #' # dissolve data #' y <- wdpa_dissolve(x) #' #' # plot data for visual comparison #' par(mfrow = c(1, 2)) #' plot(sf::st_geometry(x), xlim = c(0, 200), ylim = c(0, 250), #' main = "original", col = "transparent") #' plot(sf::st_geometry(y), , xlim = c(0, 200), ylim = c(0, 250), #' main = "dissolved", col = "transparent") #' @export wdpa_dissolve <- function(x, geometry_precision = 1500) { # assert valid arguments assertthat::assert_that( inherits(x, "sf"), assertthat::is.count(geometry_precision), assertthat::noNA(geometry_precision) ) # repair geometry x <- sf::st_set_precision(x, geometry_precision) x <- sf::st_make_valid(x) x <- x[!sf::st_is_empty(x), ] x <- extract_polygons_and_points(x) # dissolve geometry x <- sf::st_set_precision(x, geometry_precision) x <- sf::st_union(x) x <- sf::st_sf(id = 1, geometry = x) # return result x }
/scratch/gouwar.j/cran-all/cranData/wdpar/R/wdpa_dissolve.R
#' @include internal.R wdpa_url.R wdpa_latest_version.R NULL #' Fetch data #' #' Fetch data from [Protected Planet](https://www.protectedplanet.net/en). #' Specifically, data are downloaded from the #' World Database on Protected Areas #' (WDPA) and the World Database on Other Effective Area-Based Conservation #' Measures (WDOECM). #' **Note that data are downloaded assuming non-commercial use.** #' #' @inheritParams wdpa_read #' @inheritParams wdpa_url #' #' @param x `character` country for which to download data. This argument #' can be the name of the country (e.g. `"Liechtenstein"`) or the #' ISO-3 code for the country (e.g. `"LIE"`). This argument can also #' be set to `"global"` to download all of the protected areas available #' in the database (approximately 1.1 GB). #' #' @param wait `logical` if data is not immediately available for download #' should the session be paused until it is ready for download? If argument #' to `wait` is `FALSE` and the data is not ready then `NA` #' will be returned. Defaults to `FALSE`. #' #' @param download_dir `character` folder path to download the data. #' Defaults to a temporary directory. To avoid downloading the #' same dataset multiple times, it is recommended to use a persistent #' directory (e.g. `rappdirs::user_data_dir("wdpar")`; see Examples below). #' #' @param force_download `logical` if the data has previously been #' downloaded and is available at argument to `download_dir`, should a #' fresh copy be downloaded? Defaults to `FALSE`. #' #' @param check_version `logical` if the data are being imported from #' from the argument to `download_dir`, should the data be checked to see #' if the version number matches the latest version available online? #' Defaults to `TRUE`. #' #' @param verbose `logical` should a progress on downloading data be #' reported? Defaults to `TRUE` in an interactive session, otherwise #' `FALSE`. #' #' @details #' This function obtains and imports data from Protected Planet. #' By default (per `force_download = FALSE`), it will check to see if the #' data have already been downloaded and, if so, simply import the previously #' downloaded data. #' It will also check to see if a newer version of the dataset is available #' on Protected Planet (per `check_version = TRUE`) and, if so, provide an #' alert. #' If the latest version is not required, this alert can be safely ignored. #' However, if the latest version of the data is required, #' then using `force_download = TRUE` will ensure that the latest version #' is always obtained. #' After importing the data, it is strongly recommended to clean the data #' prior to analysis (see [wdpa_clean()]). #' #' @section Data source: #' The `PA_DEF` column indicates the data source for individual #' areas and sites that comprise the imported dataset. #' Specifically, data obtained through the World Database on Protected Areas #' (WDPA) are indicated with a value of `1` in the `PA_DEF` column. #' Additionally, data obtained through the World Database on Other Effective #' Area-Based Conservation Measures (WDOECM) are indicated with a value of `0` #' in the `PA_DEF` column. #' For more details on data conventions, please consult the official manual #' (UNEP-WCMC 2019). #' #' @section Troubleshooting: #' This function will sometimes return the error message #' `PhantomJS signals port = 4567 is already in use`. #' This error message can occur when you have previously run the function and #' it threw an error, or it terminated early. #' It can also occur when attempting to run the the function in multiple #' sessions on the same computer. #' To address this issue, you will need to restart your computer. #' #' @return [sf::sf()] object. #' #' @seealso [wdpa_clean()], [wdpa_read()], #' [wdpa_url()], [countrycode::countrycode()]. #' #' @references #' UNEP-WCMC (2019). User Manual for the World Database on Protected Areas and #' world database on other effective area-based conservation measures: 1.6. #' UNEP-WCMC: Cambridge, UK. Available at: <https://wcmc.io/WDPA_Manual>. #' #' @examples #' \dontrun{ #' # fetch data for Liechtenstein #' lie_raw_data <- wdpa_fetch("Liechtenstein", wait = TRUE) #' #' # print data #' print(lie_raw_data) #' #' # plot data #' plot(lie_raw_data) #' #' # fetch data for Liechtenstein using the ISO3 code #' lie_raw_data <- wdpa_fetch("LIE", wait = TRUE) #' #' # since data are saved in a temporary directory by default, #' # a persistent directory can be specified to avoid having to download the #' # same dataset every time the R session is restarted #' lie_raw_data <- wdpa_fetch("LIE", wait = TRUE, #' download_dir = rappdirs::user_data_dir("wdpar")) #' #' # data for multiple countries can be downloaded separately and combined, #' # this is useful to avoid having to download the global dataset #' ## load packages to easily merge datasets #' library(dplyr) #' library(tibble) #' #' ## define country names to download #' country_codes <- c("LIE", "MHL") #' #' ## download data for each country #' mult_data <- lapply(country_codes, wdpa_fetch, wait = TRUE) #' #' ## merge datasets together #' mult_dat <- st_as_sf(as_tibble(bind_rows(mult_data))) #' #' ## print data #' print(mult_dat) #' } #' @export wdpa_fetch <- function(x, wait = FALSE, download_dir = tempdir(), force_download = FALSE, check_version = TRUE, n = NULL, page_wait = 2, verbose = interactive()) { # check that arguments are valid ## check that classes are correct dir.create(download_dir, showWarnings = FALSE, recursive = TRUE) assertthat::assert_that( assertthat::is.string(x), assertthat::is.dir(download_dir), assertthat::is.flag(force_download), assertthat::is.flag(verbose), assertthat::is.flag(check_version), identical(x, "global") || assertthat::is.string(country_code(x))) # try to find locally on system file_path <- try(wdpa_file(x, download_dir = download_dir), silent = TRUE) # fetch data if (force_download || inherits(file_path, "try-error")) { ## check for internet connection if (!is_online()) { #nocov start stop(paste0("data not found in download_dir, and no internet connection", "to download it.")) #nocov end } ## find latest version of the dataset current_month_year <- wdpa_latest_version() ## find the download link and set file path to save the data download_url <- wdpa_url(x, wait = wait, page_wait = page_wait) ## note that file name conventions on protectedplanet.net have changed ## (detected on 8th Oct 2020) and so file names are manually changed ## to follow the previous convention if (!identical(x, "global")) { file_name <- paste0("WDPA_", current_month_year, "_", country_code(x), "-shapefile.zip") } else { file_name <- paste0("WDPA_", current_month_year, "_Public.gdb.zip") } file_path <- file.path(download_dir, file_name) ## download the data if (!file.exists(file_path) || force_download) { download_file(download_url, file_path, quiet = !verbose) if (verbose) message("\n") } ## verify that the file exists if (!file.exists(file_path)) stop("downloading data failed") #nocov } else { # check version of available data if (isTRUE(check_version)) { ## if internet available... if (is_online()) { ### parse month-year from input file input_version <- wdpa_version(file_path) input_file_date <- convert_wdpa_version_to_POSIXct(input_version) ### parse month-year from latest release current_version <- wdpa_latest_version() current_file_date <- convert_wdpa_version_to_POSIXct(current_version) ### throw warning if out of date if (input_file_date < current_file_date) { #nocov start cli::cli_alert_warning( paste0( "importing local data (version ", format(input_file_date, "%b %Y"), "); use \"force=TRUE\" if you need latest version." ) ) #nocov end } } else { ## if internet not available # nocov start cli::cli_alert_warning( "cannot verify if version on disk is up to date." ) # nocov end } } } # import the data wdpa_read(file_path, n) }
/scratch/gouwar.j/cran-all/cranData/wdpar/R/wdpa_fetch.R
#' @include internal.R wdpa_url.R NULL #' Query latest version #' #' Find the latest version of the combined #' World Database on Protected Areas #' (WDPA) and World Database on Other Effective Area-Based Conservation Measures #' (WDOECM) dataset. #' This is a character identifier representing the month and year (e.g. #' `Sep2020`) the data were released. #' #' @details The version number is determined using a web address where the #' global dataset is available. For specific details, please refer to #' the source code for this function. #' #' @return `character` version of the dataset. #' #' @examples #' \dontrun{ #' # find the latest version #' wdpa_latest_version() #' } #' #' @export wdpa_latest_version <- function() { assertthat::assert_that(is_online()) download_url <- "http://wcmc.io/wdpa_current_release" file_name <- basename(httr::HEAD(download_url)$url)[[1]] wdpa_version(file_name) }
/scratch/gouwar.j/cran-all/cranData/wdpar/R/wdpa_latest_version.R
#' @include internal.R NULL #' Read data #' #' Read data obtained from #' [Protected Planet](https://www.protectedplanet.net/en). #' Specifically, this function is designed to import data obtained from #' the World Database on Protected Areas #' (WDPA) and the World Database on Other Effective Area-Based Conservation #' Measures (WDOECM). #' #' @param x `character` file name for a zip archive file downloaded from #' <https://www.protectedplanet.net/en>. #' #' @param n `integer` number of records to import per data source. #' Defaults to `NULL` such that all data are imported. #' #' @details #' This function assumes that data have previously been downloaded to #' your computer, and need to import the data. #' After importing the data, it is strongly recommended to clean the data #' prior to analysis (see [wdpa_clean()]). #' #' @inheritSection wdpa_fetch Data source #' #' @return [sf::sf()] object. #' #' @seealso [wdpa_fetch()], [wdpa_clean()]. #' #' @inherit wdpa_fetch references #' #' @examples #' \dontrun{ #' # find url for Liechtenstein dataset #' download_url <- wdpa_url("LIE", wait = TRUE) #' #' # path to save file zipfile with data #' path <- tempfile(pattern = "WDPA_", fileext = ".zip") #' #' # download zipfile #' result <- httr::GET(download_url, httr::write_disk(path)) #' #' # load data #' lie_raw_data <- wdpa_read(path) #' #' # plot data #' plot(lie_raw_data) #' } #' @export wdpa_read <- function(x, n = NULL) { # validate arguments assertthat::assert_that(assertthat::is.string(x), assertthat::is.readable(x), assertthat::has_extension(x, "zip"), startsWith(basename(x), "WDPA_"), file.exists(x), inherits(n, c("numeric", "NULL"))) if (!is.null(n)) { assertthat::assert_that(assertthat::is.count(n), assertthat::noNA(n)) } # unzip the folder tdir <- file.path(tempdir(), basename(tempfile())) dir.create(tdir, showWarnings = FALSE, recursive = TRUE) utils::unzip(x, exdir = tdir) # determine version month_year <- strsplit(basename(x), "_", fixed = TRUE)[[1]][[2]] # load data if (grepl("Public", basename(x))) { ## load global data ### find geodatabase(s) gdb_paths <- dir(tdir, "^.*\\.gdb$", recursive = TRUE, full.names = TRUE, include.dirs = TRUE) ## import data from geodatabase(s) if (length(gdb_paths) == 1) { wdpa_lyrs <- sf::st_layers(gdb_paths) point_path <- grep("point", wdpa_lyrs$name, value = TRUE, ignore.case = TRUE) polygon_path <- grep("poly", wdpa_lyrs$name, value = TRUE, ignore.case = TRUE) assertthat::assert_that( length(point_path) == 1, length(polygon_path) == 1, !identical(polygon_path, point_path), msg = "global data format not recognized.") wdpa_point_data <- read_sf_n(gdb_paths, point_path, n) wdpa_polygon_data <- read_sf_n(gdb_paths, polygon_path, n) } else if (length(gdb_paths) == 2) { ### WDPA <= Dec2020 #nocov start point_path <- grep("point", gdb_paths, value = TRUE, ignore.case = TRUE) polygon_path <- grep("poly", gdb_paths, value = TRUE, ignore.case = TRUE) assertthat::assert_that( length(point_path) == 1, length(polygon_path) == 1, !identical(polygon_path, point_path), msg = "global data format not recognized.") wdpa_point_data <- read_sf_n(point_path, "WDPA_WDOECM_wdpa_gdb_points", n) wdpa_polygon_data <- read_sf_n(polygon_path, "WDPA_WDOECM_wdpa_gdb_polygons", n) #nocov end } else { stop("global data format not recognized.") #nocov } ## extract point and polygon data ## merge data together polygon_matching_cols <- which(names(wdpa_polygon_data) %in% names(wdpa_point_data)) point_matching_cols <- which(names(wdpa_point_data) %in% names(wdpa_polygon_data)) wdpa_polygon_data <- wdpa_polygon_data[, polygon_matching_cols] wdpa_point_data <- wdpa_point_data[, point_matching_cols] wdpa_data <- rbind(wdpa_polygon_data, wdpa_point_data) } else { ## extract any data stored in zip files zip_path <- dir(tdir, "^.*\\.zip$", recursive = TRUE, full.names = TRUE) if (length(zip_path) > 0) result <- Map(utils::unzip, zip_path, exdir = gsub(".zip", "", zip_path, fixed = TRUE)) ## import shapefile data shapefile_path <- dir(tdir, "^.*\\.shp$", recursive = TRUE, full.names = TRUE) wdpa_data <- lapply(shapefile_path, read_sf_n, n = n) ## merge shapefile data together if (length(wdpa_data) > 1) { col_names <- Reduce(base::intersect, lapply(wdpa_data, names)) wdpa_data <- lapply(wdpa_data, function(x) x[, col_names]) wdpa_data <- do.call(rbind, wdpa_data) } else { wdpa_data <- wdpa_data[[1]] } } # cleanup unlink(tdir) # return data return(wdpa_data) }
/scratch/gouwar.j/cran-all/cranData/wdpar/R/wdpa_read.R
#' @include internal.R NULL #' Download URL #' #' Obtain a URL to download data from #' [Protected Planet](https://www.protectedplanet.net/en). #' Specifically, the URL provides access to data available through #' the World Database on Protected Areas #' (WDPA) and the World Database on Other Effective Area-Based Conservation #' Measures (WDOECM). #' **Note that data are accessed assuming non-commercial use.** #' #' @param x `character` country for desired data. This argument #' can be the name of the country (e.g. `"Liechtenstein"`) or the #' ISO-3 code for the country (e.g. `"LIE"`). This argument can also #' be set to `"global"` to obtain the URL for the global dataset. #' #' @param wait `logical` if data is not immediately available for download #' should the session be paused until it is ready for download? If argument #' to `wait` is `FALSE` and the data is not ready then an error #' will be thrown. Defaults to `FALSE`. #' #' @param page_wait `numeric` number of seconds to wait for web pages #' to load when finding the download URL on #' [Protected Planet](https://www.protectedplanet.net/en). #' Defaults to 2. #' Since the process of finding a download URL requires #' navigating through multiple web pages, #' the default argument means that the function will take at least 8 #' seconds to complete. #' Users on slow internet connections may experience issues #' with the default argument (e.g. resulting in an error #' containing the message `Error: Summary: NoSuchElement`). #' To avoid this, users can try specifying a greater value (e.g. 5 seconds). #' #' @return `character` URL to download the data. #' #' @seealso [wdpa_fetch()], [countrycode::countrycode()]. #' #' @examples #' \dontrun{ #' # obtain url for New Zealand data #' nzl_url <- wdpa_url("New Zealand", wait = TRUE) #' print(nzl_url) #' #' # obtain url for New Zealand data using its ISO3 code #' nzl_url <- wdpa_url("NZL", wait = TRUE) #' print(nzl_url) #' #' # obtain url for global data #' global_url <- wdpa_url("global") #' print(global_url) #' } #' @export wdpa_url <- function(x, wait = FALSE, page_wait = 2) { # validate arguments assertthat::assert_that( assertthat::is.string(x), assertthat::is.flag(wait), assertthat::is.count(page_wait), assertthat::noNA(page_wait), is_online() ) assertthat::assert_that( has_phantomjs(), msg = paste0( "cannot find PhantomJS; please install it using: ", "webdriver::install_phantomjs()" ) ) # declare hidden function try_and_find_url <- function(x) { ## initialize web driver result <- suppressMessages(tryCatch({ ## initialize URL url <- character(0) ## initialize driver pjs <- start_phantomjs() rd <- webdriver::Session$new(port = pjs$port) ## navigate to download web page rd$go(paste0("https://www.protectedplanet.net/country/", x)) Sys.sleep(page_wait) # wait for page to load elem <- rd$findElement(css = ".download__trigger") elem$click() Sys.sleep(page_wait) # wait for page to load elem <- rd$findElement(css = "li:nth-child(2) .popup__link") elem$click() Sys.sleep(page_wait) # wait for dialog to open elem <- rd$findElement(css = ".modal__link-button") elem$click() Sys.sleep(page_wait) # wait for for dialog to open ## extract html for modal src <- xml2::read_html(rd$getSource()[[1]][[1]], encoding = "UTF-8") divs <- xml2::xml_find_all(src, ".//div") divs <- divs[which(xml2::xml_attr(divs, "class") == "modal__content")] ## parse download link attrs <- xml2::xml_attr(xml2::xml_find_all(divs, ".//a"), "href") url <- grep("shp.zip", attrs, fixed = TRUE, value = TRUE) }, finally = { ## clean up web driver try(rd$delete(), silent = TRUE) try(rd$delete(), silent = TRUE) try(stop_phantomjs(pjs), silent = TRUE) try(stop_phantomjs(pjs), silent = TRUE) })) ## prepare output if (length(url) == 0) return(NA_character_) #nocov return(url) } # find url if (x == "global") { out <- "http://wcmc.io/wdpa_current_release" } else { ## convert x to country ISO3 code x <- country_code(x) ## initialize web driver ## check if data is ready for download attempted_url <- try_and_find_url(x) ## return NA if not ready and not wait #nocov start if (is.na(attempted_url) && !wait) { stop(paste("data is not yet available for download; try again later", "or use wait=TRUE")) } else { ## otherwise check for url in 5 minute increments while (is.na(attempted_url)) { Sys.sleep(60 * 5) attempted_url <- try_and_find_url(x) } ## now that data is available, store the url out <- attempted_url } #nocov end } # return url return(out) } start_phantomjs <- function() { # initialize phantomjs if ( identical(.Platform$OS.type, "unix") && identical(Sys.getenv("OPENSSL_CONF"), "") ) { withr::with_envvar( list("OPENSSL_CONF"= "/etc/ssl"), pjs <- webdriver::run_phantomjs() ) } else { pjs <- suppressMessages(webdriver::run_phantomjs()) } # return object pjs } stop_phantomjs <- function(pjs) { try(pjs$process$kill(), silent = TRUE) try(pjs$process$kill(), silent = TRUE) } has_phantomjs <- function() { pjs <- suppressMessages(try(start_phantomjs(), silent = TRUE)) on.exit(suppressMessages(stop_phantomjs(pjs))) !inherits(pjs, "try-error") }
/scratch/gouwar.j/cran-all/cranData/wdpar/R/wdpa_url.R
.onLoad <- function(libname, pkgname) { # import the codelist dataset from countrycode package utils::data("codelist", package = "countrycode") codelist <- codelist # return success invisible() }
/scratch/gouwar.j/cran-all/cranData/wdpar/R/zzz.R
--- title: "wdpar: Interface to the World Database on Protected Areas" author: "Jeffrey O. Hanson" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: false fig_caption: true self_contained: true fontsize: 11pt documentclass: article vignette: > %\VignetteIndexEntry{wdpar: Interface to the World Database on Protected Areas} %\VignetteEngine{knitr::rmarkdown_notangle} --- ```{r, include = FALSE} h = 3.5 w = 3.5 is_check <- ("CheckExEnv" %in% search()) || any(c("_R_CHECK_TIMINGS_", "_R_CHECK_LICENSE_") %in% names(Sys.getenv())) knitr::opts_chunk$set(fig.align = "center", eval = !is_check, purl = !is_check, root.dir = normalizePath("../.."), fig.height = 4.5, fig.width = 4.5) ``` ```{r, include = FALSE} devtools::load_all() ``` ### Introduction [Protected Planet](https://www.protectedplanet.net/en) provides the most comprehensive data for conservation areas worldwide. Specifically, it provides the World Database on Protected Areas (WDPA) and the World Database on Other Effective Area-Based Conservation Measures (WDOECM). These databases are used to monitor the performance of existing protected areas, and identify priority areas for future conservation efforts. Additionally, these databases receive monthly updates from government agencies and non-governmental organizations. However, they are associated with [several issues that need to be addressed prior to analysis](https://www.protectedplanet.net/en/resources/calculating-protected-area-coverage) and the dynamic nature of these databases means that the entire data cleaning process needs to be repeated after obtaining a new version. The _wdpar R_ package provides an interface to data available on [Protected Planet](https://www.protectedplanet.net/en). Specifically, it can be used to automatically obtain data from the [World Database on Protected Areas (WDPA)](https://www.protectedplanet.net/en/thematic-areas/wdpa?tab=WDPA) and the [World Database on Other Effective Area-Based Conservation Measures (WDOECM)](https://www.protectedplanet.net/en/thematic-areas/oecms). It also provides methods for cleaning data from these databases following best practices (outlined in Butchart *et al.* 2015; [Protected Planet 2021](https://www.protectedplanet.net/en/resources/calculating-protected-area-coverage); Runge *et al.* 2015). In this vignette, we provide a tutorial and recommendations for using the package. ### Tutorial Here we will provide a short introduction to the _wdpar R_ package. First, we will load the _wdpar R_ package. We will also load the _dplyr_ and _ggmap R_ packages to help explore the data. ```{r, message = FALSE, warning = FALSE} # load packages library(wdpar) library(dplyr) library(ggmap) ``` Now we will download protected area data for Malta from [Protected Planet](https://www.protectedplanet.net/en). We can achieve this by specifying Malta's country name (i.e. `"Malta"`) or Malta's [ISO3 code](https://en.wikipedia.org/wiki/ISO_3166-1_alpha-3) (i.e. `"MLT"`). Since data are downloaded to a temporary directory by default, we will specify that the data should be downloaded to a persistent directory. This means that R won't have to re-download the same dataset every time we restart our R session, and R can simply re-load previously downloaded datasets as needed. ```{r} # download protected area data for Malta # (excluding areas represented as point localities) mlt_raw_pa_data <- wdpa_fetch( "Malta", wait = TRUE, download_dir = rappdirs::user_data_dir("wdpar") ) ``` Next, we will clean the data set. Briefly, the cleaning steps include: excluding protected areas that are not yet implemented, excluding protected areas with limited conservation value, replacing missing data codes (e.g. `"0"`) with missing data values (i.e. `NA`), replacing protected areas represented as points with circular protected areas that correspond to their reported extent, repairing any topological issues with the geometries, and erasing overlapping areas. Please note that, by default, spatial data processing is performed at a scale suitable for national scale analyses (see below for recommendations for local scale analyses). For more information on the data cleaning procedures, see `wdpa_clean()`. ```{r} # clean Malta data mlt_pa_data <- wdpa_clean(mlt_raw_pa_data) ``` After cleaning the data set, we will perform an additional step that involves clipping the terrestrial protected areas to Malta's coastline. Ideally, we would also clip the marine protected areas to Malta's Exclusive Economic Zone (EEZ) but such data are not as easy to obtain on a per country basis (but see https://www.marineregions.org/eez.php)). ```{r} # download Malta boundary from Global Administrative Areas dataset file_path <- tempfile(fileext = "rds") download.file( "https://biogeo.ucdavis.edu/data/gadm3.6/Rsf/gadm36_MLT_0_sf.rds", file_path ) # import Malta's boundary mlt_boundary_data <- readRDS(file_path) # repair any geometry issues, dissolve the border, reproject to same # coordinate system as the protected area data, and repair the geometry again mlt_boundary_data <- mlt_boundary_data %>% st_set_precision(1000) %>% sf::st_make_valid() %>% st_set_precision(1000) %>% st_combine() %>% st_union() %>% st_set_precision(1000) %>% sf::st_make_valid() %>% st_transform(st_crs(mlt_pa_data)) %>% sf::st_make_valid() # clip Malta's protected areas to the coastline mlt_pa_data <- mlt_pa_data %>% filter(MARINE == "terrestrial") %>% st_intersection(mlt_boundary_data) %>% rbind(mlt_pa_data %>% filter(MARINE == "marine") %>% st_difference(mlt_boundary_data)) %>% rbind(mlt_pa_data %>% filter(!MARINE %in% c("terrestrial", "marine"))) # recalculate the area of each protected area mlt_pa_data <- mlt_pa_data %>% mutate(AREA_KM2 = as.numeric(st_area(.)) * 1e-6) ``` Now that we have finished cleaning the data, let's preview the data. For more information on what these columns mean, please refer to the [official manual](https://www.protectedplanet.net/en/resources/wdpa-manual) (available in English, French, Spanish, and Russian). ```{r} # print first six rows of the data head(mlt_pa_data) ``` We will now reproject the data to longitude/latitude coordinates ([EPSG:4326](https://spatialreference.org/ref/epsg/wgs-84/)) for visualization purposes. ```{r} # reproject data mlt_pa_data <- st_transform(mlt_pa_data, 4326) ``` Next, we can plot a map showing the boundaries of Malta's protected area system. ```{r, message = FALSE, warning = FALSE} # download basemap for making the map bg <- get_stamenmap( unname(st_bbox(mlt_pa_data)), zoom = 8, maptype = "watercolor", force = TRUE ) # print map ggmap(bg) + geom_sf(data = mlt_pa_data, fill = "#31A35480", inherit.aes = FALSE) + theme(axis.title = element_blank()) ``` We can also create a histogram showing the year when each protected area was established. ```{r} hist( mlt_pa_data$STATUS_YR, main = "Malta's protected areas", xlab = "Year established" ) ``` Now let's calculate some statistics. We can calculate the total amount of land and ocean inside Malta's protected area system (km^2^). ```{r} # calculate total amount of area inside protected areas (km^2) statistic <- mlt_pa_data %>% as.data.frame() %>% select(-geometry) %>% group_by(MARINE) %>% summarize(area_km = sum(AREA_KM2)) %>% ungroup() %>% arrange(desc(area_km)) # print statistic print(statistic) ``` We can also calculate the percentage of land inside its protected area system that are managed under different categories (i.e. [using the protected area management categories defined by The International Union for Conservation of Nature](https://www.iucn.org/)). ```{r} # calculate percentage of land inside protected areas (km^2) statistic <- mlt_pa_data %>% as.data.frame() %>% select(-geometry) %>% group_by(IUCN_CAT) %>% summarize(area_km = sum(AREA_KM2)) %>% ungroup() %>% mutate(percentage = (area_km / sum(area_km)) * 100) %>% arrange(desc(area_km)) # print statistic print(statistic) ``` We can also plot a map showing Malta's protected areas and color each area according to it's management category. ```{r, message = FALSE, warning = FALSE} ggmap(bg) + geom_sf(aes(fill = IUCN_CAT), data = mlt_pa_data, inherit.aes = FALSE) + theme(axis.title = element_blank(), legend.position = "bottom") ``` ### Recommended practices for large datasets The _wdpar R_ package can be used to clean large datasets assuming that sufficient computational resources and time are available. Indeed, it can clean data spanning large countries, multiple countries, and even the full global datatset. When processing the full global dataset, it is recommended to use a computer system with at least 32 GB RAM available and to allow for at least one full day for the data cleaning procedures to complete. It is also recommended to avoid using the computer system for any other tasks while the data cleaning procedures are being completed, because they are very computationally intensive. Additionally, when processing large datasets -- and especially for the global dataset -- it is strongly recommended to disable the procedure for erasing overlapping areas. This is because the built-in procedure for erasing overlaps is very time consuming when processing many protected areas, so that information on each protected area can be output (e.g. IUCN category, year established). Instead, when cleaning large datasets, it is recommended to run the data cleaning procedures with the procedure for erasing overlapping areas disabled (i.e. with `erase_overlaps = FALSE`). After the data cleaning procedures have completed, the protected area data can be manually dissolved to remove overlapping areas (e.g. using `wdpa_dissolve()`). For an example of these procedures, please see below. ```{r, warning = FALSE} # download protected area data for multiple of countries ## (i.e. Portugal, Spain, France) raw_pa_data <- c("PRT", "ESP", "FRA") %>% lapply(wdpa_fetch, wait = TRUE, download_dir = rappdirs::user_data_dir("wdpar")) %>% bind_rows() # clean protected area data (with procedure for erasing overlaps disabled) full_pa_data <- wdpa_clean(raw_pa_data, erase_overlaps = FALSE) # at this stage, the data could be filtered based on extra criteria (if needed) ## for example, we could subset the data to only include protected areas ## classified as IUCN category Ia or Ib sub_pa_data <- full_pa_data %>% filter(IUCN_CAT %in% c("Ia", "Ib")) # dissolve all geometries together (removing spatial overlaps) pa_data <- wdpa_dissolve(sub_pa_data) # preview data print(pa_data) # calculate total area print(st_area(pa_data)) ``` ### Recommended practices for local scale analyses The default parameters for the data cleaning procedures are well suited for national-scale analyses. Although these parameters reduce memory requirements and the time needed to complete the data cleaning procedures, they can produce protected area boundaries that appear overly "blocky" -- lacking smooth edges -- when viewed at finer scales. As such, it is strongly recommended to increase the level of spatial precision when cleaning data for local scale analyses (via the `geometry_precision` parameter of the `wdpa_clean()` function). Here we will explore the consequences of using the default parameters for the data cleaning procedures when working at a local scale. This will help illustrate why it can be important to adjust the spatial precision of the data cleaning procedures. To begin with, we will obtain data for a small protected area. Specifically, we will extract a protected area from the Malta dataset we downloaded earlier. ```{r} # find id for smallest reserve in cleaned dataset mlt_reserve_id <- mlt_pa_data$WDPAID[which.min(mlt_pa_data$AREA_KM2)] # extract the smallest reserve from the raw dataset mlt_raw_reserve_data <- mlt_raw_pa_data %>% filter(WDPAID == mlt_reserve_id) # preview data print(mlt_raw_reserve_data) # visualize data plot(mlt_raw_reserve_data[, 1]) ``` ```{r, include = FALSE} assertthat::assert_that( nrow(mlt_raw_reserve_data) == 1, msg = "failed to extract small reserve from Malta data" ) ``` We can see that the boundary for this protected area has a high level of detail. This suggests that the protected area data is available at a resolution that is sufficient to permit local scale analyses. To help understand the consequences of cleaning data with the default parameters, we will clean this dataset using the default parameters. ```{r} # clean the data with default parameters mlt_default_cleaned_reserve_data <- wdpa_clean(mlt_raw_reserve_data) # preview data print(mlt_default_cleaned_reserve_data) # visualize data plot(mlt_default_cleaned_reserve_data[, 1]) ``` After cleaning the data with the default parameters, we can see that the boundary of the protected area is no longer highly detailed. For example, the smooth edges of the raw protected area data have been replaced with sharp, blocky edges. As such, subsequent analysis performed at the local scale -- such as calculating the spatial extent of land cover types within this single protected area -- might not be sufficiently precise. Now, let's clean the data using parameters that are well suited for local scale analysis. ```{r} # clean the data with default parameters mlt_fixed_cleaned_reserve_data <- wdpa_clean( mlt_raw_reserve_data, geometry_precision = 10000 ) # preview data print(mlt_fixed_cleaned_reserve_data) # visualize data plot(mlt_fixed_cleaned_reserve_data[, 1]) ``` Here, we specified that the spatial data processing should be performed at a much greater level of precision (using the `geometry_precision` parameter). As a consequence, we can see -- after applying the data cleaning procedures -- that the protected area boundary still retains a high level of detail. This means that the cleaned protected area data is more suitable for local scale analysis. If a greater level of detail is required, the level of precision could be increased further. Note that the maximum level of detail that can be achieved in the cleaned data is limited by the level of detail in the raw data. This means that increasing the level of precision beyond a certain point will have no impact on the cleaned data, because the raw data do not provide sufficient detail for the increased precision to alter the spatial data processing. ### Additional datasets Although the [World Database on Protected Areas (WDPA)](https://www.protectedplanet.net/en) is the most comprehensive global dataset, many datasets are available for specific countries or regions that do not require such extensive data cleaning procedures. As a consequence, it is often worth looking for alternative data sets when working at smaller geographic scales before considering the [World Database on Protected Areas (WDPA)](https://www.protectedplanet.net/en). The list below outlines several alternative protected area datasets and information on where they can be obtained. If you know of any such datasets that are missing, [please create an issue on the GitHub repository](https://github.com/prioritizr/wdpar/issues) and we can add them to the list. * Arctic + [Arctic Protected Areas - 2017](http://geo.abds.is/geonetwork/srv/eng/catalog.search#/metadata/2e56ee1f-50a9-4983-88f4-edaa8588950d) * Australia + [Collaborative Australian Protected Area Database (CAPAD)](http://www.environment.gov.au/fed/catalog/search/resource/details.page?uuid=%7B4448CACD-9DA8-43D1-A48F-48149FD5FCFD%7D) + [Conservation Areas Reporting and Tracking System (CARTS)](https://ccea-ccae.org) * The United States of America + [Protected Areas Database of the US (PAD-US)](https://doi.org/10.5066/P9Q9LQ4B) ### Citation Please cite the _wdpar R_ package and the relevant databases in publications. To see citation details, use the code: ```{r, eval = FALSE} citation("wdpar") ```
/scratch/gouwar.j/cran-all/cranData/wdpar/inst/doc/wdpar.Rmd
# System command to execute: # R CMD BATCH --no-restore --no-save global-example-script.R # Initialization ## define countries for processing data country_names <- "global" ## define file path to save data path <- paste0( "~/wdpa-data/global-", format(Sys.time(), "%Y-%m-%d"), ".gpkg" ) ## load packages library(sf) library(wdpar) # Preliminary processing ## prepare folder if needed export_dir <- suppressWarnings(normalizePath(dirname(path))) if (!file.exists(export_dir)) { dir.create(export_dir, showWarnings = FALSE, recursive = TRUE) } ## preapre user data directory data_dir <- rappdirs::user_data_dir("wdpar") if (!file.exists(data_dir)) { dir.create(data_dir, showWarnings = FALSE, recursive = TRUE) } # Main processing ## download data raw_data <- wdpa_fetch( country_names, wait = TRUE, download_dir = data_dir, verbose = TRUE ) ## clean data result_data <- wdpa_clean(raw_data, erase_overlaps = FALSE, verbose = TRUE) # Exports ## save result sf::write_sf(result_data, path, overwrite = TRUE)
/scratch/gouwar.j/cran-all/cranData/wdpar/inst/scripts/global-example-script.R
--- title: "wdpar: Interface to the World Database on Protected Areas" author: "Jeffrey O. Hanson" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: false fig_caption: true self_contained: true fontsize: 11pt documentclass: article vignette: > %\VignetteIndexEntry{wdpar: Interface to the World Database on Protected Areas} %\VignetteEngine{knitr::rmarkdown_notangle} --- ```{r, include = FALSE} h = 3.5 w = 3.5 is_check <- ("CheckExEnv" %in% search()) || any(c("_R_CHECK_TIMINGS_", "_R_CHECK_LICENSE_") %in% names(Sys.getenv())) knitr::opts_chunk$set(fig.align = "center", eval = !is_check, purl = !is_check, root.dir = normalizePath("../.."), fig.height = 4.5, fig.width = 4.5) ``` ```{r, include = FALSE} devtools::load_all() ``` ### Introduction [Protected Planet](https://www.protectedplanet.net/en) provides the most comprehensive data for conservation areas worldwide. Specifically, it provides the World Database on Protected Areas (WDPA) and the World Database on Other Effective Area-Based Conservation Measures (WDOECM). These databases are used to monitor the performance of existing protected areas, and identify priority areas for future conservation efforts. Additionally, these databases receive monthly updates from government agencies and non-governmental organizations. However, they are associated with [several issues that need to be addressed prior to analysis](https://www.protectedplanet.net/en/resources/calculating-protected-area-coverage) and the dynamic nature of these databases means that the entire data cleaning process needs to be repeated after obtaining a new version. The _wdpar R_ package provides an interface to data available on [Protected Planet](https://www.protectedplanet.net/en). Specifically, it can be used to automatically obtain data from the [World Database on Protected Areas (WDPA)](https://www.protectedplanet.net/en/thematic-areas/wdpa?tab=WDPA) and the [World Database on Other Effective Area-Based Conservation Measures (WDOECM)](https://www.protectedplanet.net/en/thematic-areas/oecms). It also provides methods for cleaning data from these databases following best practices (outlined in Butchart *et al.* 2015; [Protected Planet 2021](https://www.protectedplanet.net/en/resources/calculating-protected-area-coverage); Runge *et al.* 2015). In this vignette, we provide a tutorial and recommendations for using the package. ### Tutorial Here we will provide a short introduction to the _wdpar R_ package. First, we will load the _wdpar R_ package. We will also load the _dplyr_ and _ggmap R_ packages to help explore the data. ```{r, message = FALSE, warning = FALSE} # load packages library(wdpar) library(dplyr) library(ggmap) ``` Now we will download protected area data for Malta from [Protected Planet](https://www.protectedplanet.net/en). We can achieve this by specifying Malta's country name (i.e. `"Malta"`) or Malta's [ISO3 code](https://en.wikipedia.org/wiki/ISO_3166-1_alpha-3) (i.e. `"MLT"`). Since data are downloaded to a temporary directory by default, we will specify that the data should be downloaded to a persistent directory. This means that R won't have to re-download the same dataset every time we restart our R session, and R can simply re-load previously downloaded datasets as needed. ```{r} # download protected area data for Malta # (excluding areas represented as point localities) mlt_raw_pa_data <- wdpa_fetch( "Malta", wait = TRUE, download_dir = rappdirs::user_data_dir("wdpar") ) ``` Next, we will clean the data set. Briefly, the cleaning steps include: excluding protected areas that are not yet implemented, excluding protected areas with limited conservation value, replacing missing data codes (e.g. `"0"`) with missing data values (i.e. `NA`), replacing protected areas represented as points with circular protected areas that correspond to their reported extent, repairing any topological issues with the geometries, and erasing overlapping areas. Please note that, by default, spatial data processing is performed at a scale suitable for national scale analyses (see below for recommendations for local scale analyses). For more information on the data cleaning procedures, see `wdpa_clean()`. ```{r} # clean Malta data mlt_pa_data <- wdpa_clean(mlt_raw_pa_data) ``` After cleaning the data set, we will perform an additional step that involves clipping the terrestrial protected areas to Malta's coastline. Ideally, we would also clip the marine protected areas to Malta's Exclusive Economic Zone (EEZ) but such data are not as easy to obtain on a per country basis (but see https://www.marineregions.org/eez.php)). ```{r} # download Malta boundary from Global Administrative Areas dataset file_path <- tempfile(fileext = "rds") download.file( "https://biogeo.ucdavis.edu/data/gadm3.6/Rsf/gadm36_MLT_0_sf.rds", file_path ) # import Malta's boundary mlt_boundary_data <- readRDS(file_path) # repair any geometry issues, dissolve the border, reproject to same # coordinate system as the protected area data, and repair the geometry again mlt_boundary_data <- mlt_boundary_data %>% st_set_precision(1000) %>% sf::st_make_valid() %>% st_set_precision(1000) %>% st_combine() %>% st_union() %>% st_set_precision(1000) %>% sf::st_make_valid() %>% st_transform(st_crs(mlt_pa_data)) %>% sf::st_make_valid() # clip Malta's protected areas to the coastline mlt_pa_data <- mlt_pa_data %>% filter(MARINE == "terrestrial") %>% st_intersection(mlt_boundary_data) %>% rbind(mlt_pa_data %>% filter(MARINE == "marine") %>% st_difference(mlt_boundary_data)) %>% rbind(mlt_pa_data %>% filter(!MARINE %in% c("terrestrial", "marine"))) # recalculate the area of each protected area mlt_pa_data <- mlt_pa_data %>% mutate(AREA_KM2 = as.numeric(st_area(.)) * 1e-6) ``` Now that we have finished cleaning the data, let's preview the data. For more information on what these columns mean, please refer to the [official manual](https://www.protectedplanet.net/en/resources/wdpa-manual) (available in English, French, Spanish, and Russian). ```{r} # print first six rows of the data head(mlt_pa_data) ``` We will now reproject the data to longitude/latitude coordinates ([EPSG:4326](https://spatialreference.org/ref/epsg/wgs-84/)) for visualization purposes. ```{r} # reproject data mlt_pa_data <- st_transform(mlt_pa_data, 4326) ``` Next, we can plot a map showing the boundaries of Malta's protected area system. ```{r, message = FALSE, warning = FALSE} # download basemap for making the map bg <- get_stamenmap( unname(st_bbox(mlt_pa_data)), zoom = 8, maptype = "watercolor", force = TRUE ) # print map ggmap(bg) + geom_sf(data = mlt_pa_data, fill = "#31A35480", inherit.aes = FALSE) + theme(axis.title = element_blank()) ``` We can also create a histogram showing the year when each protected area was established. ```{r} hist( mlt_pa_data$STATUS_YR, main = "Malta's protected areas", xlab = "Year established" ) ``` Now let's calculate some statistics. We can calculate the total amount of land and ocean inside Malta's protected area system (km^2^). ```{r} # calculate total amount of area inside protected areas (km^2) statistic <- mlt_pa_data %>% as.data.frame() %>% select(-geometry) %>% group_by(MARINE) %>% summarize(area_km = sum(AREA_KM2)) %>% ungroup() %>% arrange(desc(area_km)) # print statistic print(statistic) ``` We can also calculate the percentage of land inside its protected area system that are managed under different categories (i.e. [using the protected area management categories defined by The International Union for Conservation of Nature](https://www.iucn.org/)). ```{r} # calculate percentage of land inside protected areas (km^2) statistic <- mlt_pa_data %>% as.data.frame() %>% select(-geometry) %>% group_by(IUCN_CAT) %>% summarize(area_km = sum(AREA_KM2)) %>% ungroup() %>% mutate(percentage = (area_km / sum(area_km)) * 100) %>% arrange(desc(area_km)) # print statistic print(statistic) ``` We can also plot a map showing Malta's protected areas and color each area according to it's management category. ```{r, message = FALSE, warning = FALSE} ggmap(bg) + geom_sf(aes(fill = IUCN_CAT), data = mlt_pa_data, inherit.aes = FALSE) + theme(axis.title = element_blank(), legend.position = "bottom") ``` ### Recommended practices for large datasets The _wdpar R_ package can be used to clean large datasets assuming that sufficient computational resources and time are available. Indeed, it can clean data spanning large countries, multiple countries, and even the full global datatset. When processing the full global dataset, it is recommended to use a computer system with at least 32 GB RAM available and to allow for at least one full day for the data cleaning procedures to complete. It is also recommended to avoid using the computer system for any other tasks while the data cleaning procedures are being completed, because they are very computationally intensive. Additionally, when processing large datasets -- and especially for the global dataset -- it is strongly recommended to disable the procedure for erasing overlapping areas. This is because the built-in procedure for erasing overlaps is very time consuming when processing many protected areas, so that information on each protected area can be output (e.g. IUCN category, year established). Instead, when cleaning large datasets, it is recommended to run the data cleaning procedures with the procedure for erasing overlapping areas disabled (i.e. with `erase_overlaps = FALSE`). After the data cleaning procedures have completed, the protected area data can be manually dissolved to remove overlapping areas (e.g. using `wdpa_dissolve()`). For an example of these procedures, please see below. ```{r, warning = FALSE} # download protected area data for multiple of countries ## (i.e. Portugal, Spain, France) raw_pa_data <- c("PRT", "ESP", "FRA") %>% lapply(wdpa_fetch, wait = TRUE, download_dir = rappdirs::user_data_dir("wdpar")) %>% bind_rows() # clean protected area data (with procedure for erasing overlaps disabled) full_pa_data <- wdpa_clean(raw_pa_data, erase_overlaps = FALSE) # at this stage, the data could be filtered based on extra criteria (if needed) ## for example, we could subset the data to only include protected areas ## classified as IUCN category Ia or Ib sub_pa_data <- full_pa_data %>% filter(IUCN_CAT %in% c("Ia", "Ib")) # dissolve all geometries together (removing spatial overlaps) pa_data <- wdpa_dissolve(sub_pa_data) # preview data print(pa_data) # calculate total area print(st_area(pa_data)) ``` ### Recommended practices for local scale analyses The default parameters for the data cleaning procedures are well suited for national-scale analyses. Although these parameters reduce memory requirements and the time needed to complete the data cleaning procedures, they can produce protected area boundaries that appear overly "blocky" -- lacking smooth edges -- when viewed at finer scales. As such, it is strongly recommended to increase the level of spatial precision when cleaning data for local scale analyses (via the `geometry_precision` parameter of the `wdpa_clean()` function). Here we will explore the consequences of using the default parameters for the data cleaning procedures when working at a local scale. This will help illustrate why it can be important to adjust the spatial precision of the data cleaning procedures. To begin with, we will obtain data for a small protected area. Specifically, we will extract a protected area from the Malta dataset we downloaded earlier. ```{r} # find id for smallest reserve in cleaned dataset mlt_reserve_id <- mlt_pa_data$WDPAID[which.min(mlt_pa_data$AREA_KM2)] # extract the smallest reserve from the raw dataset mlt_raw_reserve_data <- mlt_raw_pa_data %>% filter(WDPAID == mlt_reserve_id) # preview data print(mlt_raw_reserve_data) # visualize data plot(mlt_raw_reserve_data[, 1]) ``` ```{r, include = FALSE} assertthat::assert_that( nrow(mlt_raw_reserve_data) == 1, msg = "failed to extract small reserve from Malta data" ) ``` We can see that the boundary for this protected area has a high level of detail. This suggests that the protected area data is available at a resolution that is sufficient to permit local scale analyses. To help understand the consequences of cleaning data with the default parameters, we will clean this dataset using the default parameters. ```{r} # clean the data with default parameters mlt_default_cleaned_reserve_data <- wdpa_clean(mlt_raw_reserve_data) # preview data print(mlt_default_cleaned_reserve_data) # visualize data plot(mlt_default_cleaned_reserve_data[, 1]) ``` After cleaning the data with the default parameters, we can see that the boundary of the protected area is no longer highly detailed. For example, the smooth edges of the raw protected area data have been replaced with sharp, blocky edges. As such, subsequent analysis performed at the local scale -- such as calculating the spatial extent of land cover types within this single protected area -- might not be sufficiently precise. Now, let's clean the data using parameters that are well suited for local scale analysis. ```{r} # clean the data with default parameters mlt_fixed_cleaned_reserve_data <- wdpa_clean( mlt_raw_reserve_data, geometry_precision = 10000 ) # preview data print(mlt_fixed_cleaned_reserve_data) # visualize data plot(mlt_fixed_cleaned_reserve_data[, 1]) ``` Here, we specified that the spatial data processing should be performed at a much greater level of precision (using the `geometry_precision` parameter). As a consequence, we can see -- after applying the data cleaning procedures -- that the protected area boundary still retains a high level of detail. This means that the cleaned protected area data is more suitable for local scale analysis. If a greater level of detail is required, the level of precision could be increased further. Note that the maximum level of detail that can be achieved in the cleaned data is limited by the level of detail in the raw data. This means that increasing the level of precision beyond a certain point will have no impact on the cleaned data, because the raw data do not provide sufficient detail for the increased precision to alter the spatial data processing. ### Additional datasets Although the [World Database on Protected Areas (WDPA)](https://www.protectedplanet.net/en) is the most comprehensive global dataset, many datasets are available for specific countries or regions that do not require such extensive data cleaning procedures. As a consequence, it is often worth looking for alternative data sets when working at smaller geographic scales before considering the [World Database on Protected Areas (WDPA)](https://www.protectedplanet.net/en). The list below outlines several alternative protected area datasets and information on where they can be obtained. If you know of any such datasets that are missing, [please create an issue on the GitHub repository](https://github.com/prioritizr/wdpar/issues) and we can add them to the list. * Arctic + [Arctic Protected Areas - 2017](http://geo.abds.is/geonetwork/srv/eng/catalog.search#/metadata/2e56ee1f-50a9-4983-88f4-edaa8588950d) * Australia + [Collaborative Australian Protected Area Database (CAPAD)](http://www.environment.gov.au/fed/catalog/search/resource/details.page?uuid=%7B4448CACD-9DA8-43D1-A48F-48149FD5FCFD%7D) + [Conservation Areas Reporting and Tracking System (CARTS)](https://ccea-ccae.org) * The United States of America + [Protected Areas Database of the US (PAD-US)](https://doi.org/10.5066/P9Q9LQ4B) ### Citation Please cite the _wdpar R_ package and the relevant databases in publications. To see citation details, use the code: ```{r, eval = FALSE} citation("wdpar") ```
/scratch/gouwar.j/cran-all/cranData/wdpar/vignettes/wdpar.Rmd
# * Author: Bangyou Zheng ([email protected]) # * Created: 12/16/2010 # * #' Define the class of WeaAna #' #' @docType class #' @slot name Name of weather station #' @slot number Station number of weather station #' @slot latitude Latitude of weather station #' @slot longitude Latitude of weather station #' @slot tav Annual average ambient temperature #' @slot amp Annual amplitude in mean monthly temperature #' @slot marker The extra marker for this site #' @slot year A vector of year of weather station #' @slot day A vector of day of weather station #' @slot radn A vector of radiation of weather station #' @slot maxt A vector of maximum temperature of weather station #' @slot mint A vector of minimum temperature of weather station #' @slot evap A vector of evaporation of weather station #' @slot rain A vector of rainfall of weather station #' @slot vp A vector of pressure atmosphere of weather station #' @slot code The 6 digit code indicates the source of the 6 data columns #' @slot extra A list of variables need to store #' @slot res All statistics results store in this slot #' @slot figures A list to store all plotted figures. #' @slot file.path The file path for this site. #' @slot data.format The data format for this site. #' @slot load.later Whether are records loaded laterly. #' @exportClass WeaAnaSite setClass( Class="WeaAnaSite", representation = representation( name = "character", number = "character", latitude = "numeric", longitude = "numeric", tav = "numeric", amp = "numeric", marker = "list", year = "numeric", day = "numeric", radn = "numeric", maxt = "numeric", mint = "numeric", rain = "numeric", evap = "numeric", vp = "numeric", code = "character", extra = "list", res = "list", figures = "list", file.path = "character", data.format = "character", load.later = "logical" ) ) #' Define the class for multiple sites #' #' @docType class #' @slot num total number of weather station #' @slot records A pointer vector to weather records of each site #' @slot result A pointer for all results name and type. #' @exportClass WeaAna setClass( Class="WeaAna", representation = representation( num = "numeric", records = "vector", result = "vector" ) ) #' Define the class for statistics results #' #' @docType class #' @slot name Name of result #' @slot type Type of result #' @exportClass result setClass( Class="result", representation = representation( name = "character", type = "character" ) )
/scratch/gouwar.j/cran-all/cranData/weaana/R/1.weaanaClass.R
# * Author: Bangyou Zheng ([email protected]) # * Created: 10:43 PM Wednesday, 2 January 2013 # * Copyright: AS IS # * #' Change weather records #' #' @param object A WeaAna object. #' @param ... Not used #' @docType methods #' @rdname changeWeatherRecords-methods setGeneric('changeWeatherRecords', function(object, ...) { standardGeneric('changeWeatherRecords') } ) #' Change weather records #' #' @docType methods #' @param object A WeaAna object. #' @param ... New weather records #' @return A new WeaAna object with updated records #' @rdname changeWeatherRecords-methods #' @aliases changeWeatherRecords,WeaAna,WeaAna-method setMethod(f = 'changeWeatherRecords', signature = c(object = 'WeaAna'), definition = function(object, ...) { new_values <- list(...) if (object@num > 1) { stop('There are more than one met files') } records <- getWeaAnaSiteByPos(object, 1) new_cols <- names(new_values) records_row <- length(records$value@year) var_cols <- c('year', 'day', 'radn', 'maxt', 'mint', 'rain', 'evap', 'vp', 'code') if (!all(new_cols %in% var_cols)) { stop(sprintf('Check new values for %s', paste(new_cols[!(new_cols %in% var_cols)], collapse = ', '))) } for (i in seq(along = new_values)) { methods::slot(records$value, new_cols[i]) <- rep(new_values[[i]], length.out = records_row) } } )
/scratch/gouwar.j/cran-all/cranData/weaana/R/changeWeatherRecords.R
# * Author: Bangyou Zheng ([email protected]) # * Created: 18/03/2011 # * #' Convert a data frame to weaana class #' @param infor A list or data frame of site information #' @param records A data frame will convert to records #' @return A new WeaAna object #' @export convert2Records <- function( infor, records ) { d.names <- names( records ) n.vars <- c( waGetPara( "records.index" ), waGetPara( "records.vars" ) ) if( !identical( n.vars %in% d.names, rep( TRUE, length( n.vars ) ) ) ) { stop( paste( "Records columns needed: ", paste( n.vars, collapse = ", ") ) ) } i.vars <- c( "Name", "Number", "Latitude", "Longitude" ) if( !identical( i.vars %in% names( infor ), rep( TRUE, length( i.vars ) ) ) ) { stop( paste( "Infor columns needed: ", paste( i.vars, collapse = ", ") ) ) } a <- NULL for ( i in seq ( along = n.vars ) ) { a[[n.vars[i]]] <- records[[n.vars[i]]] } extra <- NULL extra$avgt <- ( a$maxt + a$mint ) / 2 extra$vpd <- vpd.apsim( a$maxt, a$mint ) record <- methods::new( "WeaAnaSite", name = infor$Name, number = infor$Number, latitude = as.numeric( infor$Latitude ), longitude = as.numeric( infor$Longitude ), year = a$year, day = a$day, radn = a$radn, maxt = a$maxt, mint = a$mint, rain = a$rain, evap = a$evap, vp = a$vp, code = as.character( a$code ), extra = extra, file.path = as.character( NA ), data.format = as.character( NA ), load.later = FALSE ) result <- c(NULL, newPointer( methods::new( "result", name = as.character( NULL ), type = as.character( NULL ) ) ) ) records <- methods::new( "WeaAna", num = 1, records = c( NULL, newPointer( record ) ), result = result ) return( records ) }
/scratch/gouwar.j/cran-all/cranData/weaana/R/convert2Records.R
# * Author: Bangyou Zheng ([email protected]) # * Created: 15:16 Tuesday, 16 August 2011 # * # Calculate day of year according to date # # @param Date a string or date vector. # @param format date format when Date is a string vector # @export DOY <- function(Date, format = NULL) { if (is.null(format)) { format <- '%d/%m/%Y' } if (mode(Date) == 'character') { Date <- as.Date(Date, format = format) } if (mode(Date) == 'numeric' & 'Date' %in% class(Date)) { pos <- !is.na(Date) last_year <- rep(NA, length(Date)) last_year[pos] <- as.Date(paste(as.numeric(format(Date[pos],"%Y"))-1, "-12-31",sep="")) return(as.numeric(Date - last_year)) } else if (mode(Date) %in% c("numeric", 'list') & "POSIXt" %in% class(Date)) { pos <- !is.na(Date) last_year <- as.POSIXlt(rep(NA, length(Date))) last_year[pos] <- as.POSIXlt(paste(as.numeric(format(Date[pos], "%Y")) - 1, "-12-31", sep = "")) return(floor(as.numeric(Date - last_year))) } return (NULL) }
/scratch/gouwar.j/cran-all/cranData/weaana/R/date.R
# * Author: Bangyou Zheng ([email protected]) # * Created: 27/01/2011 # * # initialize the parameters # Improvements - consider reading these from a CSV file or similar WA_OPTIONS <- settings::options_manager( data.format = "APSIM", # data format load.later = FALSE, # whether load all weather records when read file. yrange = 1799:as.numeric( format(Sys.time(), "%Y") ), # The year range to calculate records.index = c( "year", "day" ), records.vars = c( "radn", "maxt", "mint", "rain", "evap", "vp" ), base.temperature = 0, # the base temperature to calculate the degree days key.degree.days = 400, # the key degree days to calculate the extreme temperature in this periods day.begin = 1, # days start to calculate day.end = 366, # days end to calculate extreme = list( maxt = list( hot.day = list( value = 35, more.or.less = 1, label = "Hot days" ), very.hot.day = list( value = 40, more.or.less = 1, label = "Very hot days" ) ), mint = list( frost.night = list( value = 0, more.or.less = 0, label = "Frost nights" ) ) ), # trim.incomplete.year = TRUE, # Trim the incomplete data (year) from original data # evap.year = 1967, mov.window = 10, # moving window # # wea.vars.lables = c( "VPD", "evaporation perday", "rainfall", "radiation", "minimum temperature", "maximum temperature" ), # wea.vars.units = c( "(hPa)", "(mm)", "(mm total)", "(MJ/m2)", "(oC)", "(oC)" ), # wea.vars.strips = c( "VPD (hPa)", # "Evaporation per day (mm)", # "Rainfall (mm total)", # expression( paste( "Radiation", ~"("*MJ^2*")" ) ), # expression( paste( "Minimum Temperature", ~"("*degree*"C)" ) ), # expression( paste( "Maximum Temperature", ~"("*degree*"C)" ) ) ), # daily.yrange = c( as.numeric( format(Sys.time(), "%Y") ) - 10, # as.numeric( format(Sys.time(), "%Y") ) ), # # extreme indeces for extreme maximum temperature # extreme.indices.maxt = list( var = "maxt", # names = c( "very.hot.days", "hot.days", "cold.days", "very.cold.days" ), # lables = c( "very hot days", "hot days", "cold days", "very cold days" ), # values = c( 40, 35, 15, 10 ), # more.or.less = c( 1, 1, 0, 0 ) # ), # # extreme indeces for extreme minimum temperature # extreme.indices.mint = list( var = "mint", # names = c( "very.hot.nights","hot.nights", "cold.nights", "frost.nights" ), # lables = c( "very hot nights","hot nights", "cold nights", "frost nights" ), # values = c( 25, 20, 5, 0 ), # more.or.less = c( 1, 1, 0, 0 ) # ), # # define the stress conditions. # stress.temp.indices = c( "mint<5", "30<maxt<35" ), shift = "begin", # The shift methods when calculate the moving extreme temperature. # if extreme.temp.shift = "centre", then values are shifted to centre. # if extreme.temp.shift = "begin", then values are at begin of period. # if extreme.temp.shift = "end", then values are at end of period. # countPeriod = "month", # the period at which we count the numbers of a specific condition. It could be "year", "month", "week" or any number of days numdays = 5, # # is.out.fig.each.site = TRUE, # Whether output figures to files # is.out.fig.each.type = TRUE, # Whether output figures to files # is.out.fig.using.map = TRUE, # Whether output figures to files # # output.format = "pdf", # output.prefix = NA, # plot.days.range = c( 1, 365 ), # The day range to show in the plots # # plot.year.range = c( as.numeric( format(Sys.time(), "%Y") ) - 10, as.numeric( format(Sys.time(), "%Y") ) ),# The year range to show in the plots # plot.maxt.keypoints = c(20, 35), # The key points for maximum temperature # plot.mint.keypoints = c(10, 20), # The key points for minimum temperature # plot.maxt.colors = c( "blue", "red", "blue", "red", "blue" ), # the colors for maximum temperature # plot.mint.colors = c( "black", "green", "black", "green", "black" ), # the colores of minumum temperature # # placeholder = NA # a placeholder to conveniently add new parameters ) # Get the all/a default parameter value # # Get the all/a default parameter value # @param para.name The paramer with this name will be returned # @return The all default parameter will be returned if the para.name is not specified, or # just the default paramter whose name is para.name # @export defaultPara <- function( para.name = NULL ) { if ( !is.null( para.name ) ) { return( WA_OPTIONS()[[para.name]] ) } else { return( WA_OPTIONS() ) } }
/scratch/gouwar.j/cran-all/cranData/weaana/R/defaultPara.r
#' @name records #' @rdname records #' @title Demo weather records #' NULL #' @rdname records "records"
/scratch/gouwar.j/cran-all/cranData/weaana/R/demo_data.R
# * Author: Bangyou Zheng ([email protected]) # * Created: 1:47 PM Monday, 13 August 2012 # * Copyright: AS IS # * #' Calculate the diurnal variation in air temperature with Parton and Logan, 1981 #' #' @description #' Calculate the diurnal variation in air temperature. #' Parton WJ, Logan JA (1981) A model for diurnal variation in soil and air temperature. #' Agricultural Meteorology, 23, 205?216. #' Codes copied from APSIM Utilities.cpp #' @param maxt maximum daily temperature #' @param mint minimum daily temperature #' @param doy day of year #' @param hour hour from 1 to 24 #' @param latitude latitude in radials #' @param A is the time lag in temperature after noon #' @param B is coef that controls temperature decrease at night #' @param C is the time lag for min temperature after sunrise #' @return A vector with diurnal air temperature #' @examples #' diurnalT(maxt = 20, mint = 10, doy = 1, #' hour = seq(from = 1, to = 23.99, by = 0.1), #' latitude = -10, A = 1.5, B = 4, C = 1) #' @export diurnalT <- function(maxt, mint, doy, hour, latitude, A = 1.5, B = 4, C = 1) { doy <- rep(doy, length = length(hour)) # hour <- (as.numeric(datetime) - # as.numeric(as.POSIXlt(format(datetime, '%Y-%m-%d')))) / 3600 aDelt <- 0.4014 * sin(2 * pi * (doy - 77.0) / 365.0) temp1 <- 1.0 - (-tan(latitude) * (aDelt))^2.0 temp1 <- sqrt(temp1) temp2 <- (-tan(latitude) * tan(aDelt)) aHou <- atan2(temp1, temp2) ady <- (aHou / pi) * 24.0 ani <- (24.0 - ady) bb <- 12.0 - ady / 2.0 + C be <- 12.0 + ady / 2.0 temperature <- NULL for (i in seq(along = hour)) { if(hour[i] >= bb[i] & hour[i] < be[i]) { bbd <- hour[i] - bb[i] temperature <- c(temperature, (maxt - mint) * sin((pi * bbd) / (ady[i] + 2 * A)) + mint) }else { if(hour[i] > be[i]) bbd <- hour[i] - be[i] if(hour[i] < bb[i]) bbd <- (24.0 - be[i]) + hour[i] ddy <- ady[i] - C tsn <- (maxt - mint) * sin((pi * ddy) / (ady[i] + 2 * A)) + mint temperature <- c(temperature, mint + (tsn - mint) * exp(-B * bbd / ani[i])) } } return (temperature) }
/scratch/gouwar.j/cran-all/cranData/weaana/R/diurnal.R
# * Author: Bangyou Zheng ([email protected]) # * Created: 04/05/2010 # * # Get all file path from a vector of files and folders # # @param dataFiles A vector of weather data files # @param dataFolders A vector of weather data folders # @param dataFormat The format for weather data files. # "dataFroamt" should be One of "APSIM" and "RDATA". # @return A vector of all file path fileList <- function( dataFiles = NULL, dataFolders = NULL, dataFormat = "APSIM" ) { file.suffix <- NULL if ( dataFormat == "APSIM" ) { file.suffix <- c( "MET", "met" ) } else if ( dataFormat == "RDATA" ) { file.suffix <- c( "RData" ) } else if (dataFormat == 'GHCN') { file.suffix <- c('dly') }else { stop( paste( "Data format \"", dataFormat, "\" can not supported. ", "Please use \"APSIM\", \"RDATA\" or \"GHCN\"as data format.", sep = "" ) ) } if ( !is.null( dataFolders ) ) { for ( i in 1:length( dataFolders ) ) { dataFiles <- c( dataFiles, list.files( dataFolders[i], full.names = TRUE ) ) } } fileLists <- NULL if ( !is.null( dataFiles ) ) { for ( i in 1:length( dataFiles ) ) { if ( !file.exists( dataFiles[i] ) ) { warning( paste( "File \"", dataFiles[i], "\" is not exists.", sep = "" ), call. = FALSE ) } if ( right( dataFiles[i], nchar( file.suffix[1] ) ) %in% file.suffix ) { fileLists <- c( fileLists, dataFiles[i] ) } } } if ( is.null( fileLists ) ) { stop( paste( "No file found with data format \"", dataFormat, "\" in the specified file and folder lists.", sep = "" ) ) } return( unique( fileLists ) ) } # Get all file name from a vector of files # # @param fileLists A vector of files # @return A vector of file name siteList <- function( fileLists = NULL ) { siteLists <- NULL if ( !is.null( fileLists ) ) { sites <- basename( fileLists ) for ( i in 1:length( sites ) ) { siteLists[i] <- left( sites[i], len( sites[i] ) - 4 ) } } return( unique( siteLists ) ) }
/scratch/gouwar.j/cran-all/cranData/weaana/R/fileList.R
# * Author: Bangyou Zheng ([email protected]) # * Created: 4:15 PM Wednesday, 22 August 2012 # * Copyright: AS IS # * # Calculate the first frost event # # @param object A WeaAna object. # @param ... Not used setGeneric("firstFrostDay", function(object, ...) { standardGeneric("firstFrostDay") } ) # Calculate the last frost # # @docType methods # @param object A WeaAna object. # @param yrange The year range for statistics # @param stress The stress temperature for frost # @export setMethod(f = "firstFrostDay", signature = c(object = "WeaAna"), definition = function(object, stress = waGetPara("extreme$mint$frost.night$value"), yrange = waGetPara("yrange") ) { res <- wapply(object, vars = "mint", period = "year", FUN = firstFrostDay, ARGS = list(firstFrostDay = list(stress = stress)), site.ARGS = list(latitude = siteInfor(object)$Latitude), yrange = yrange, res.name = c("firstFrostDay")) return(res) } ) # Calculate the last frost # # @docType methods # @param object A numeric object. # @param stress The stress temperature for frost # @param latitude Latitude of site. # @export setMethod(f = "firstFrostDay", signature = c(object = "numeric"), definition = function(object, stress = waGetPara("extreme$mint$frost.night$value"), latitude = -20 ) { min.value <- -1000 res <- NULL if (length(object) < 365) { warning("There are some years don't have complete data, NA return.") return(NA) } if (latitude < 0) { if (min(object) > stress) { return(NA) } pos <- rep(FALSE, length(object)) pos[object < stress] <- TRUE max_pos <- which.max(pos) if (max_pos == 1) { return(NA) } return(max_pos) } else { stop('Not implemented') } return (NA) } )
/scratch/gouwar.j/cran-all/cranData/weaana/R/firstFrostDay.R
# * Author: Bangyou Zheng ([email protected]) # * Created: 20/01/2011 # * # Calculate the first heat day # # @param object A WeaAna object. # @param ... Not used setGeneric("firstHeatDay", function(object, ...) { standardGeneric("firstHeatDay") } ) # Calculate the first heat day # # @docType methods # @param object A WeaAna object. # @param yrange The year range for statistics # @param stress The stress temperature for frost # @param conse Number of consecutive days # @param offset offset # The first heat day of each year will return if prob equal to NULL. # @export setMethod(f = "firstHeatDay", signature = c(object = "WeaAna"), definition = function(object, stress = waGetPara("extreme$maxt$hot.day$value"), yrange = waGetPara("yrange"), conse = 1, offset = 0) { res <- wapply(object, vars = "maxt", period = "year", FUN = firstHeatDay, ARGS = list(firstHeatDay = list(stress = stress)), site.ARGS = list(latitude = siteInfor(object)$Latitude, conse = conse, offset = offset), yrange = yrange, res.name = c("firstHeatDay")) return(res) } ) # Calculate the first heat day # # @docType methods # @param object A numeric object. # @param stress The stress temperature for frost # @param latitude Latitude of site. # @param conse Number of consecutive days # @param offset offset # @export setMethod(f = "firstHeatDay", signature = c(object = "numeric"), definition = function(object, stress = waGetPara("extreme$maxt$hot.day$value"), latitude = -20, conse = 1, offset = 0) { max.value <- 1000 if (length(object) < 365) { warning("There are some years don't have complete data, NA return.") return(NA_integer_) } if (latitude < 0) { object_n <- object[181:length(object)] if (max(object_n) < stress) { return(NA_integer_) } pos <- rep(FALSE, length(object_n)) pos[ object_n > stress] <- TRUE if (conse > 1) { pos <- mov.sum(pos, conse, shift = "begin") pos >= conse } if (max(pos, na.rm = TRUE) < conse) { return(NA_integer_) } max.pos <- which.max(pos) + 180 if (offset != 0) { bt <- waGetPara("base.temperature") tt <- cumsum(ifelse(object > bt, object - bt, 0)) max.pos <- which.min(abs(tt - (tt[max.pos] + offset))) } return(max.pos) } else { if (max(object) < stress) { return(NA_integer_) } pos <- rep(FALSE, length(object)) pos[ object > stress] <- TRUE if (conse > 1) { pos <- mov.sum(pos, conse, shift = "begin") pos >= conse } if (max(pos, na.rm = TRUE) < conse) { return(NA_integer_) } return(which.max(pos)) return(max.pos) } } )
/scratch/gouwar.j/cran-all/cranData/weaana/R/firstHeatDay.R
# * Author: Bangyou Zheng ([email protected]) # * Created: 07/01/2011 # * #' Getter to access the weather data at a specific position. #' #' Getter to access the weather data at a specific position. #' @param x A WeaAna object. #' @param i the specific position which will access. #' @param j None use parameter. #' @param drop None use parameter. #' @return A WeaAnaSite object at the position i. #' @examples #' library(weaana) #' data( "WeatherRecordsDemo" ) #' records[1] #' records[1:2] #' records[2:2] #' #' @rdname getter setMethod( f = "[", signature = signature( "WeaAna" ), definition = function( x, i, j, drop ) { sites.records <- NULL num <- 0 i.len <- length( i ) for ( k in 1:i.len ) { if ( i[k] <= x@num && i[k] >= 1 ) { site.record <- x@records[i[k]][[1]] site.record <- site.record$value sites.records <- c( sites.records, newPointer( site.record ) ) num <- num + 1 } else { warning( paste( "\"", i[k], "\" out of bound, skip it.", sep = "" ) ) } } records <- methods::new( "WeaAna", num = num, records = sites.records, result = x@result ) return( records ) } ) # Get WeaAnaSite object by position # # @param object A WeaAna object. # @param ... Not used # @docType methods # @rdname getWeaAnaSiteByPos-methods setGeneric( "getWeaAnaSiteByPos", function( object, ... ) { standardGeneric( "getWeaAnaSiteByPos" ) } ) # Plot site position in a map # # @docType methods # @param object A WeaAna object. # @param pos The position would be return. # @examples # library(weaana) # data( "WeatherRecordsDemo" ) # getWeaAnaSiteByPos( records, 1 ) # @return A new WeaAna object at pos. # @export # @rdname getWeaAnaSiteByPos-methods # @aliases getWeaAnaSiteByPos,WeaAna,WeaAna-method setMethod( f = "getWeaAnaSiteByPos", signature = c( object = "WeaAna" ), definition = function( object, pos ) { if ( length( pos ) > 1 ) { warning( "Only one element is supported. Other will be omitted." ) } pos <- pos[1] if ( pos > object@num ) { stop( "Subscript out of bound." ) } records <- object@records[pos][[1]] return( records ) } ) # Get results by name # # @param object A WeaAna object. # @param ... Not used # @docType methods # @rdname getResults-methods setGeneric( "getResults", function( object, ... ) { standardGeneric( "getResults" ) } ) # Get results by name # # @docType methods # @param object A WeaAna object. # @param name The results with these name are returned. # @examples # library(weaana) # data( "WeatherRecordsDemo" ) # getResults( records, "result" ) # @return Results for weaana object # @export # @rdname getResults-methods # @aliases getResults,WeaAna,WeaAna-method setMethod( f = "getResults", signature = c( object = "WeaAna" ), definition = function( object, name = NULL ) { if ( is.null( name ) ) { stop( "name argument must be specified." ) } if ( length( name ) > 1 ) { warning( "Only one element for name is supported. Other will be omitted." ) name <- name[1] } type <- findResType( object, name ) res <- NULL if ( is.null( type ) ) { warning( paste( "Result \"", name, "\" is not existed.", sep = "" ) ) return( NULL ) } if ( type == "data.frame" ) { for ( i in 1:object@num ) { records <- getWeaAnaSiteByPos( object, i ) site.res <- records$value@res[[name]] res <- rbind( res, site.res ) } } # TODO: For other data format res <- as.data.frame( res, stringsAsFactors = FALSE ) row.names( res ) <- 1:length( res[[1]] ) return( res ) } ) #' Get site information #' #' @param object A WeaAna object. #' @param ... Not used #' @docType methods #' @rdname siteInfor-methods setGeneric( "siteInfor", function( object, ... ) { standardGeneric( "siteInfor" ) } ) #' Get site information #' #' @docType methods #' @param object A WeaAna object. #' @param load.now Whether load site information #' @examples #' library(weaana) #' data( "WeatherRecordsDemo" ) #' siteInfor( records ) #' siteInfor( records, load.now = TRUE ) #' @export #' @return Site information in the WeaAna object #' @rdname siteInfor-methods #' @aliases siteInfor,WeaAna,WeaAna-method setMethod( f = "siteInfor", signature = c( object = "WeaAna" ), definition = function( object, load.now = FALSE ) { res <- NULL if ( object@num < 1 ) { return( res ) } for ( i in 1:object@num ) { records <- getWeaAnaSiteByPos( object, i ) record <- records$value site.res <- siteInfor( record, load.now ) if ( load.now ) { records$value@name <- site.res$Name records$value@number <- site.res$Number records$value@latitude <- site.res$Latitude records$value@longitude <- site.res$Longitude } res <- rbind( res, site.res ) } if ( !is.null( res ) ) res <- as.data.frame( res, stringsAsFactors = FALSE ) return( res ) } ) #' Get site information #' #' @docType methods #' @param object A WeaAnaSite object. #' @param load.now Whether load site information #' @export #' @return Site information in the WeaAnaSite object #' @rdname siteInfor-methods #' @aliases siteInfor,WeaAnaSite,WeaAnaSite-method setMethod( f = "siteInfor", signature = c( object = "WeaAnaSite" ), definition = function( object, load.now = FALSE ) { if ( load.now & [email protected] ) { object <- readSite( [email protected], dataFormat = [email protected] ) } res <- NULL if ( length( object@name ) ) { res$Name <- object@name } else if ( [email protected] ) { res$Name <- "Load later" } if ( length( object@number ) ) { res$Number <- object@number } else { res$Number <- as.character( NA ) } if ( length( object@latitude ) ) { res$Latitude <- object@latitude } else { res$Latitude <- as.numeric( NA ) } if ( length( object@longitude ) ) { res$Longitude <- object@longitude } else { res$Longitude <- as.numeric( NA ) } res <- c( res, object@marker ) res <- as.data.frame( res, stringsAsFactors = FALSE ) return( res ) } ) # Get name and type of results # # @param object A WeaAna object. # @param ... Not used # @docType methods # @rdname resultsInfor-methods setGeneric( "resultsInfor", function( object, ... ) { standardGeneric( "resultsInfor" ) } ) # Get name and type of results # # @docType methods # @param object A WeaAna object. # @examples # library(weaana) # data( "WeatherRecordsDemo" ) # resultsInfor( records ) # # @export # @return Information for results # @rdname resultsInfor-methods # @aliases resultsInfor,WeaAna,WeaAna-method setMethod( f = "resultsInfor", signature = c( object = "WeaAna" ), definition = function( object ) { if ( is.null( object@result ) ) { return( NULL ) } name <- object@result[[1]]$value@name type <- object@result[[1]]$value@type res <- cbind( name = name, type = type ) res <- as.data.frame( res, stringsAsFactors = FALSE ) return( res ) } ) # Get result type by name # # @param object A WeaAna object. # @param name Result name. # @docType methods # @rdname findResType-methods setGeneric( "findResType", function( object, name ) { standardGeneric( "findResType" ) } ) # Get result type by name # # @param object A WeaAna object. # @param name Result name. # @export # @rdname findResType-methods # @aliases resultsInfor,WeaAna,WeaAna-method setMethod( f = "findResType", signature = c( object = "WeaAna", name = "character" ), definition = function( object, name ) { if ( is.null( object@result ) ) { return( NULL ) } if ( length( name ) > 1 ) { warning( "Only one element for name is supported. Other will be omitted." ) name <- name[1] } old.name <- object@result[[1]]$value@name old.type <- object@result[[1]]$value@type res <- old.type[old.name %in% name] if ( length( res ) == 0 ) { return( NULL ) } return ( res ) } ) #' Get all weather records by year range #' #' @param object A WeaAna object. #' @param ... Not used #' @docType methods #' @rdname getWeatherRecords-methods setGeneric( "getWeatherRecords", function( object, ... ) { standardGeneric( "getWeatherRecords" ) } ) #' Get all weather records by year range #' #' @param object A WeaAna object. #' @param yrange Year range. #' @param vars Variable #' @param ... Other arguments #' @examples #' library(weaana) #' data( "WeatherRecordsDemo" ) #' getWeatherRecords( records, yrange = c( 2008, 2009 ) ) #' getWeatherRecords( records, yrange = c( 2008, 2009 ), length = 10 ) #' #' @export #' @return A data frame with all weather records #' @rdname getWeatherRecords-methods #' @aliases getWeatherRecords,WeaAna,WeaAna-method setMethod( f = "getWeatherRecords", signature = c( object = "WeaAna" ), definition = function( object, yrange = NULL, vars = "all", ... ) { vars <- c( vars, waGetPara( "records.index" ) ) fixed.vars <- c( waGetPara( "records.index" ), waGetPara( "records.vars" ) ) res <- NULL if ( object@num == 0 ) { return( NULL ) } for ( i in 1:object@num ) { s.records <- getWeaAnaSiteByPos( object, i ) s.record <- s.records$value s.extra <- as.list( NULL ) site.res <- NULL if ( [email protected] ) { s.extra <- s.record@extra s.record <- readSite([email protected], dataFormat = [email protected] ) s.records$value@name <- s.record@name s.records$value@number <- s.record@number s.records$value@latitude <- s.record@latitude s.records$value@longitude <- s.record@longitude } if ( object@num > 1 ) { site.res$name <- s.record@name site.res$number <- s.record@number site.res$latitude <- s.record@latitude site.res$longitude <- s.record@longitude } for ( j in seq( along = fixed.vars ) ) { if ( "all" %in% vars | fixed.vars[j] %in% vars ) { if (length(methods::slot(s.record, fixed.vars[j])) > 0 ) { site.res[[fixed.vars[j]]] <- methods::slot(s.record, fixed.vars[j]) } } } # for extra variables which calculate by read functions extra.names <- names( s.record@extra ) for ( j in seq( along = extra.names ) ) { if ( "all" %in% vars | extra.names[j] %in% vars ) { site.res[[extra.names[j]]] <- s.record@extra[[extra.names[j]]] } } # for extra variables calculate ( load.later ) ns.extra <- names( s.extra ) for ( j in seq( along = s.extra ) ) { # string mode if ( !is.null( s.extra[[j]]$fun.str ) ) { site.res[[ns.extra[j]]] <- wcalStr( site.res, str = s.extra[[j]]$fun.str, len = length( site.res$day ) ) } else # function mode { site.res[[ns.extra[j]]] <- wcalFun( site.res, s.extra[[j]]$fun.name, s.extra[[j]]$var.args, s.extra[[j]]$other.args, len = length( site.res$day ) ) } } site.res <- as.data.frame( site.res, stringsAsFactors = FALSE ) if ( !is.null( yrange ) ) { site.res <- ( site.res[ site.res$year %in% yrange, ] ) } other.args <- list( ... ) if ( !is.null( other.args$length ) ) { if ( other.args$length < 1 ) { warnings( "length must be more than 0 and less than total records." ) other.args$length <- 1 } else if ( other.args$length > nrow( site.res ) ) { warnings( "length must be more than 0 and less than total records." ) other.args$length <- nrow( site.res ) } site.res <- site.res[seq( other.args$length ),] } res <- rbind( res, site.res ) rm( s.records, s.record, site.res ) } res <- as.data.frame( res, stringsAsFactors = FALSE, seq( along = res[[1]] ) ) return( res ) } ) #TODO: Change this function for load later # Get a string which contains the basic information of weather station. # # @param object A WeaAna object. # @param ... Not used setGeneric( "getStationInforByString", function( object, ... ) { standardGeneric( "getStationInforByString" ) } ) # Get a string which contains the basic information of weather station. # # @param object A WeaAna object. # @param yrange Year range. # @export setMethod( f = "getStationInforByString", signature = c( object = "WeaAnaSite" ), definition = function( object, yrange = NULL ) { if ( is.null( yrange ) ) { yrange <- range( object@year ) } sub.title = paste( "Site No: ", object@number, " Site Name: ", object@name, "\n (", object@latitude, " ", object@longitude, ", ", paste( yrange, collapse = "-" ), ")", sep = "" ) return( sub.title ) } ) #------------------------------------------------------------------------------ # Get available variables of weather records # # @param object A WeaAna object. # @param ... Not used setGeneric( "getAvaiVars", function( object, ... ) { standardGeneric( "getAvaiVars" ) } ) # Get available variables of weather records # # @param object A WeaAna object. # @examples # library(weaana) # data( "WeatherRecordsDemo" ) # getAvaiVars( records ) # @export setMethod( f = "getAvaiVars", signature = c( object = "WeaAna" ), definition = function( object ) { if ( object@num ) { fixed.vars <- waGetPara( "records.vars" ) records <- getWeaAnaSiteByPos( object, 1 ) extra.names <- names( records$value@extra ) return( c( fixed.vars, extra.names ) ) } else { return( NULL ) } } ) # Get available variables of weather records # # @param object A WeaAnaSite object. setMethod( f = "getAvaiVars", signature = c( object = "WeaAnaSite" ), definition = function( object ) { fixed.vars <- waGetPara( "records.vars" ) extra.names <- names( object@extra ) return( c( fixed.vars, extra.names ) ) } ) #------------------------------------------------------------------------------ # Add marker for each site # # @param object A WeaAna object. # @param ... Not used setGeneric( "addMarkers", function( object, ... ) { standardGeneric( "addMarkers" ) } ) # Add marker for each site # # @param object A WeaAna object. # @param ... marker arguments # @examples # library(weaana) # data( "WeatherRecordsDemo" ) # addMarkers( records, model = c( "SILO", "A2" ) ) # # @export setMethod( f = "addMarkers", signature = c( object = "WeaAna" ), definition = function( object, ... ) { c.args <- list( ... ) name.args <- attr( c.args, "name") if ( is.null( name.args ) ) { stop( "No arguments are specified." ) } for ( i in 1:length( c.args ) ) { if ( !nchar( name.args[i] ) ) { warning( paste( "No name for ", c.args[i], ". Skip it.", sep = "" ) ) next() } c.value <- c.args[[i]] if ( length( c.value ) != object@num ) { warning( paste( "The length of ", name.args[i], " is not equal to station number. Replicated it.", sep = "") ) c.value <- rep( c.value, length = object@num ) } for ( j in 1:object@num ) { s.records <- getWeaAnaSiteByPos( object, j ) s.records$value@marker[[name.args[i]]] <- c.value[j] } } } ) #------------------------------------------------------------------------------ # Delete marker for each site # # @param object A WeaAna object. # @param name A vector of name to remove setGeneric( "delMarkers", function( object, name ) { standardGeneric( "delMarkers" ) } ) # Delete marker for each site # # @param object A WeaAna object. # @param name A vector of name to remove # @examples # library(weaana) # data( "WeatherRecordsDemo" ) # addMarkers( records, model = c( "SILO", "A2" ) ) # delMarkers( records, name = "model" ) # # @export setMethod( f = "delMarkers", signature = c( object = "WeaAna" ), definition = function( object, name = NULL ) { for ( i in 1:object@num ) { s.records <- getWeaAnaSiteByPos( object, i ) for ( j in seq( name ) ) { s.records$value@marker[[name[j]]] <- NULL } } } ) #------------------------------------------------------------------------------ # Site number # # @param object A WeaAna object. # @param ... Not used setGeneric( "siteNum", function( object, ... ) { standardGeneric( "siteNum" ) } ) # Site number # # @param object A WeaAna object. # @param ... marker arguments # @examples # library(weaana) # data( "WeatherRecordsDemo" ) # siteNum( records ) # @export setMethod( f = "siteNum", signature = c( object = "WeaAna" ), definition = function( object, ... ) { return( object@num ) } ) #------------------------------------------------------------------------------ # Get parameters # @param name Variable name which will return. All parameters will return if NULL # @examples # library(weaana) # data("records") # waGetPara( "yrange" ) # # for lower level parameters # waGetPara( "extreme$maxt$hot.day$value" ) # @export waGetPara <- function( name = NULL ) { weaana.glb.para <- defaultPara() if ( is.null( name ) ) { return( weaana.glb.para ) } s.name <- strsplit( name, "\\$" )[[1]] c.para <- weaana.glb.para for ( i in seq( s.name ) ) { c.para <- c.para[[s.name[i]]] if ( is.null( c.para ) ) { warning( "Can not found parameters. All parameters are returned." ) return( weaana.glb.para ) } } return( c.para ) } #------------------------------------------------------------------------------ # Set parameters # # @param ... Parameter arguments # @export setPara <- function( ... ) { settings::stop_if_reserved(...) WA_OPTIONS(...) } #------------------------------------------------------------------------------ # Obtain version of package weaana # # @export weaanaVersion <- function( ) { infor <- utils::sessionInfo() if ( is.null( infor$otherPkgs ) ) { stop( "weaana package is not loaded" ) } weaana.infor <- infor$otherPkgs$weaana if ( is.null( weaana.infor ) ) { stop( "weaana package is not loaded" ) } res <- NULL res$Version <- weaana.infor$Version vers <- as.numeric( strsplit( res$Version, ".", fixed = TRUE )[[1]] ) res$Major <- vers[1] res$Minor <- vers[2] res$Build <- vers[3] res$Revision <- vers[4] return( res ) } # Get filename # # @param object A WeaAna object. setGeneric("getFilenames", function(object) { standardGeneric("getFilenames") } ) # Get filename # # @docType methods # @param object A WeaAna object. # @export setMethod(f = "getFilenames", signature = c(object = "WeaAna"), definition = function(object) { res <- NULL if (object@num < 1) { return(res) } for (i in 1:object@num) { records <- getWeaAnaSiteByPos(object, i) record <- records$value res <- c(res, [email protected]) } return( res ) } )
/scratch/gouwar.j/cran-all/cranData/weaana/R/getter.R
# * Author: Bangyou Zheng ([email protected]) # * Created: 1:08 PM Wednesday, 12 September 2012 # * Copyright: AS IS # * # Convert hourly weather records for daily APSIM format # # @param records hourly weather records # @export hourly2Daily <- function(records) { names(records) <- tolower(names(records)) records$year <- format(records$date, '%Y') records$day <- DOY(records$date) records$dmy <- format(records$date, '%Y-%m-%d') res <- NULL res$year <- as.numeric(as.character(tapply(records$year, records$dmy, function(x) x[1]))) res$day <- as.numeric(as.character(tapply(records$day, records$dmy, function(x) x[1]))) res$maxt <- as.numeric(as.character(tapply(records$airt, records$dmy, max, na.rm = TRUE))) res$mint <- as.numeric(as.character(tapply(records$airt, records$dmy, min, na.rm = TRUE))) res <- as.data.frame(res) return(res) }
/scratch/gouwar.j/cran-all/cranData/weaana/R/hourly2Daily.R
#' @importFrom magrittr %>% #' @importFrom rlang .data NULL
/scratch/gouwar.j/cran-all/cranData/weaana/R/import_packages.R
# * Author: Bangyou Zheng ([email protected]) # * Created: 06/01/2011 # * # Calculate the last frost # # @param object A WeaAna object. # @param ... Not used setGeneric("lastFrostDay", function(object, ...) { standardGeneric("lastFrostDay") } ) # Calculate the last frost # # @docType methods # @param object A WeaAna object. # @param yrange The year range for statistics # @param stress The stress temperature for frost # @param offset offset # @export setMethod(f = "lastFrostDay", signature = c(object = "WeaAna"), definition = function(object, stress = waGetPara("extreme$mint$frost.night$value"), yrange = waGetPara("yrange"), offset = 0) { res <- wapply(object, vars = "mint", period = "year", FUN = lastFrostDay, ARGS = list(lastFrostDay = list(stress = stress)), site.ARGS = list(latitude = siteInfor(object)$Latitude, offset = offset), yrange = yrange, res.name = c("lastFrostDay")) return(res) } ) # Calculate the last frost # # @docType methods # @param object A numeric object. # @param stress The stress temperature for frost # @param latitude Latitude of site. # @param offset offset. # @export setMethod(f = "lastFrostDay", signature = c(object = "numeric"), definition = function(object, stress = waGetPara("extreme$mint$frost.night$value"), latitude = -20, offset = 0) { min.value <- -1000 res <- NULL if (length(object) < 365) { warning("There are some years don't have complete data, NA return.") return(NA_integer_) } if (latitude < 0) { object_n <- rev(object) object_n[ object_n < stress] <- min.value min.pos <- which.min(object_n) if (min(object_n) > min.value) { return(NA_integer_) } min.pos <- length(object_n) + 1 - min.pos if (offset != 0) { bt <- waGetPara("base.temperature") tt <- cumsum(ifelse(object > bt, object - bt, 0)) min.pos <- which.min(abs(tt - (tt[min.pos] + offset))) } return(min.pos) } else { stop('Not implemented') # object <- rev(object[1:182]) # object[ object < stress] <- min.value # min.pos <- which.min(object) # if (min(object) > min.value) # { # min.pos <- NA # } else # { # min.pos <- 183 - min.pos # } # return(min.pos) } return (NA_integer_) } )
/scratch/gouwar.j/cran-all/cranData/weaana/R/lastFrostDay.R
# * Author: Bangyou Zheng ([email protected]) # * Created: 15/04/2010 # * # Judge a leap or common year # @param year A numeric of year # @return True for leap year or False for common year leapYear <- function( year ) { return ( year %% 400 == 0 | ( year %% 4 == 0 & year %% 100 != 0 ) ) }
/scratch/gouwar.j/cran-all/cranData/weaana/R/leapYear.r
# * Author: Bangyou Zheng ([email protected]) # * Created: 02/12/2010 # * # Convert met file to RData files # # @param dataFiles A character vector to specify the path of weather data files. # @param dataFolders A character vector to specify the path of weather data folders. # @param outputFolder All RData files would be put into this folder. The default position # @param dataFormat The format of weather data file. # is the original folder # @export Met2RData <- function( dataFiles = NULL, dataFolders = NULL, outputFolder = NULL, dataFormat = "APSIM" ) { station.list <- fileList( dataFiles, dataFolders, dataFormat ) if ( !is.null( outputFolder ) ) { dir.create( outputFolder , showWarnings = FALSE ) } for ( i in 1:length( station.list ) ) { # READ File records <- readWeatherRecords( station.list[i], dataFormat = dataFormat ) file.name <- basename( station.list[i] ) file.name <- paste( substr( file.name, 1, nchar( file.name ) - 3 ), "RData", sep = "" ) res.file.path <- NULL if ( is.null( outputFolder ) ) { res.file.path <- file.path( dirname( station.list[i] ), file.name ) } else { res.file.path <- file.path( outputFolder, file.name ) } save( records, file = res.file.path ) } }
/scratch/gouwar.j/cran-all/cranData/weaana/R/met2RData.R
# * Author: Bangyou Zheng ([email protected]) # * Created: 08/04/2010 # * # Use Calculate the moving average. For compatibility only. # # Note that for n = odd, can average at central period. If n = even, must average at # end of period and then shift values # @param x A vector to calculate moving average # @param k The moving windows # @param shift if shift = "centre", then values are shifted to centre. # if shift = "begin", then values are at begin of period. # if shift = "end", then values are at end of period. # The default value (centre) will be used if shift is other value. # @return The moving average of vector x at moving windows n # @export mov.avg <- function( x, k = 10, shift = "centre" ) { return( mov( x, k, shift, "mean" ) ) } # Calculate the moving maximum. For compatibility only. # # @param x A vector to calculate moving maximum # @param k The moving windows # @param shift if shift = "centre", then values are shifted to centre. # if shift = "begin", then values are at begin of period. # if shift = "end", then values are at end of period. # The default value (centre) will be used if shift is other value. # @return The moving maximum of vector x at moving windows k # @export mov.max = function( x, k, shift = "centre" ) { return( mov( x, k, shift, "max" ) ) } # Calculate the moving minimum. For compatibility only. # # @param x A vector to calculate moving minimum # @param k The moving windows # @param shift if shift = "centre", then values are shifted to centre. # if shift = "begin", then values are at begin of period. # if shift = "end", then values are at end of period. # The default value (centre) will be used if shift is other value. # @return The moving minimum of vector x at moving windows k # @export mov.min = function( x, k, shift = "centre" ) { return( mov( x, k, shift, "min" ) ) } # Calculate the moving sum. For compatibility only. # # @param x A vector to calculate moving sum # @param k The moving windows # @param shift if shift = "centre", then values are shifted to centre. # if shift = "begin", then values are at begin of period. # if shift = "end", then values are at end of period. # The default value (centre) will be used if shift is other value. # @return The moving sum of vector x at moving windows k # @export mov.sum = function( x, k, shift = "centre" ) { return( mov( x, k, shift, "sum" ) ) } # Calculate the moving values # # @param x A vector to calculate moving values # @param k The moving windows # @param shift if shift = "centre", then values are shifted to centre. # if shift = "begin", then values are at begin of period. # if shift = "end", then values are at end of period. # The default value (centre) will be used if shift is other value. # @param fun The method to calculate moving values. Curruntly, only # "mean", "max", "min", and "sum" are supported. A NULL will be returned # for any other values # @return The moving value of vector x at moving windows k. A NULL will be returned # for any unsupported fun # @export mov <- function( x, k = 10, shift = "centre", fun = "mean" ) { y <- NULL if ( sum( fun == c( "mean", "max", "min", "sum" ) ) ) { y <- apply( stats::embed( x, k ), 1, fun ) if ( shift == "begin" ) { y <- c( y, rep( NA, k - 1 ) ) } else if ( shift == "end" ) { y <- c( rep( NA, k - 1 ), y ) } else { shiftnum <- ceiling( k / 2 ) - 1 y <- c( rep( NA, shiftnum ), y, rep( NA, k - shiftnum - 1 ) ) } } return( as.numeric( y ) ) }
/scratch/gouwar.j/cran-all/cranData/weaana/R/mov.r
# * Author: Bangyou Zheng ([email protected]) # * Created: 14:40 Wednesday, 8 June 2011 # * # Calculate the moving extreme temperature by days # # @param object A WeaAna object. # @param ... Not used setGeneric( "movExtremeByDay", function( object, ... ) { standardGeneric( "movExtremeByDay" ) } ) # Calculate the moving extreme temperature by days # # @docType methods # @param object A WeaAna object. # @param indices The data will be classify according the classes # @param period The periods for moving window # @param shift The method for calculating percentage of moving extreme temperature. # @param numdays The maximum days for statistics # @param yrange The year range for statistics # @param res.name The name for result # @return The probability occurring a certain days extreme temperature # @export setMethod( f = "movExtremeByDay", signature = c( object = "WeaAna" ), definition = function( object, indices = waGetPara( "extreme" ), period = waGetPara( "mov.window" ), shift = waGetPara( "shift" ), yrange = waGetPara( "yrange" ), numdays = waGetPara( "numdays" ), res.name = "movExtremeByDay" ) { # Check parameters if ( numdays == 0 ) { return( NULL ) } res <- NULL for ( i in 1:object@num ) { records <- getWeaAnaSiteByPos( object, i ) record <- records$value w_data <- getWeatherRecords( object[i], yrange = yrange ) year <- w_data$year day <- w_data$day day[day == 366] <- NA var_names <- names( indices ) site_res <- NULL for ( j in seq( length = length( indices ) ) ) { value <- w_data[[var_names[j]]] var_indices <- indices[[j]] var_in_names <- names( var_indices ) for ( k in seq( length = length( var_indices ) ) ) { this_index <- var_indices[[k]] this_index_value <- rep( 0, length( day ) ) if ( is.null( this_index$more.or.less ) ) { this_index$more.or.less <- 1 } if ( this_index$more.or.less ) { this_index_value[ value > this_index$value] <- 1 } else { this_index_value[ value < this_index$value] <- 1 } this_index_movday <- mov.sum( this_index_value, period, shift = shift ) year_num <- tapply( this_index_movday, day, FUN = function( x ) sum( !is.na( x ) ) ) year_num[year_num==0] <- 1 mov_percent <- NULL mov_percent$Name = record@name mov_percent$Number = record@number mov_percent$Latitude = record@latitude mov_percent$Longitude = record@longitude mov_percent$Day <- seq( 1, 365 ) mov_percent$Extreme <- var_in_names[k] for ( m in seq( length = numdays ) ) { index_name = paste( "per", as.character(m), "d", sep = "" ) mov_percent[[index_name]] <- as.numeric( tapply( this_index_movday, day, FUN = function(x) sum( x >= m, na.rm = TRUE ) ) / year_num * 100 ) } mov_percent[[paste( "daysMean", sep = "" )]] <- as.numeric( tapply( this_index_movday, day, FUN = function(x) sum( x, na.rm = TRUE ) ) / year_num ) mov_percent[[paste( "daysSD", sep = "" )]] <- as.numeric( tapply( this_index_movday, day, FUN = function(x) stats::sd( x, na.rm = TRUE ) ) / year_num ) mov_percent <- as.data.frame( mov_percent, stringsAsFactors = FALSE ) site_res <- rbind( site_res, mov_percent ) } } records$value@res[[res.name[j]]] <- site_res res <- rbind( res, site_res ) } registerRes( object, res.name, "data.frame" ) return( res ) } ) # Calculate the moving extreme temperature by commulated temperature # # @param object A WeaAna object. # @param ... Not used setGeneric( "movExtremeByDegreeDay", function( object, ... ) { standardGeneric( "movExtremeByDegreeDay" ) } ) # Calculate the moving extreme temperature by commulated temperature # # @docType methods # @param object WeaAna object # @param indices The data will be classify according the classes # @param period The periods for moving window # @param shift The method for calculating percentage of moving extreme temperature. # @param numdays The maximum days for statistics # @param yrange The year range for statistics # @param res.name Name of results # @param ... Other argument to calculate thermal time # @export setMethod( f = "movExtremeByDegreeDay", signature = c( object = "WeaAna" ), definition = function( object, indices = waGetPara( "extreme" ), period = 400, shift = waGetPara( "shift" ), yrange = waGetPara( "yrange" ), numdays = waGetPara( "numdays" ), res.name = "movExtremeByDegreeDay", ... ) { # Check parameters if ( numdays == 0 ) { return( NULL ) } res <- NULL for ( i in 1:object@num ) { records <- getWeaAnaSiteByPos( object, i ) record <- records$value w_data <- getWeatherRecords( object[i], yrange = yrange ) year <- w_data$year day <- w_data$day day[day == 366] <- NA # Calculate the commulated temperature degree_days <- thermalTimeDaily( w_data$maxt, w_data$mint, ... ) # numdays just for reducing computing time max_days <- maximumDays( degree_days, period ) y <- stats::embed( degree_days, max_days ) z <- NULL if ( shift == "centre" ) { midPoint <- ceiling( max_days / 2 ) z <- cbind( z, y[,midPoint] ) for ( j in (midPoint+1):max_days ) { z <- cbind( z, z[,1] + y[,j] ) z <- cbind( z[,ncol(z)] + y[,midPoint - ( j - midPoint ) ], z ) } } else if ( shift == "end" ) { z <- cbind( z, y[,1] ) for ( j in 2:max_days ) { z <- cbind( z, z[, j - 1] + y[, j] ) } } else { z <- cbind( z, y[,max_days] ) for ( j in (max_days - 1):1 ) { z <- cbind( z[, 1] + y[, j], z ) } } z[ z <= period ] <- 1 z[ z > period ] <- 0 var_names <- names( indices ) site_res <- NULL for ( j in seq( length = length( indices ) ) ) { value <- w_data[[var_names[j]]] var_indices <- indices[[j]] var_in_names <- names( var_indices ) for ( k in seq( length = length( var_indices ) ) ) { this_index <- var_indices[[k]] this_index_value <- rep( 0, length( day ) ) if ( is.null( this_index$more.or.less ) ) { this_index$more.or.less <- 1 } if ( this_index$more.or.less ) { this_index_value[ value > this_index$value] <- 1 } else { this_index_value[ value < this_index$value] <- 1 } mm <- stats::embed( this_index_value, max_days ) mm <- mm * z this_index_movday <- NULL if ( shift == "centre" ) { shiftnum <- ceiling( max_days / 2 ) - 1 this_index_movday <- c( rep( NA, shiftnum ), apply( mm, 1, FUN = sum ), rep( NA, max_days - shiftnum - 1 ) ) } else if ( shift == "end" ) { this_index_movday <- c( rep( NA, max_days - 1 ), apply( mm, 1, FUN = sum ) ) } else { this_index_movday <- c( apply( mm, 1, FUN = sum ), rep( NA, max_days - 1 ) ) } mov_percent <- NULL mov_percent$Name = record@name mov_percent$Number = record@number mov_percent$Latitude = record@latitude mov_percent$Longitude = record@longitude mov_percent$Day <- seq( 1, 365 ) mov_percent$Extreme <- var_in_names[k] year_num <- tapply( this_index_movday, day, FUN = function( x ) sum( !is.na( x ) ) ) year_num[year_num==0] <- 1 for ( m in seq( length = numdays ) ) { index_name = paste( "per", as.character(m), "d", sep = "" ) mov_percent[[index_name]] <- as.numeric( tapply( this_index_movday, day, FUN = function(x) sum( x >= m, na.rm = TRUE ) ) / year_num * 100 ) } mov_percent[[paste( "daysMean", sep = "" )]] <- as.numeric( tapply( this_index_movday, day, FUN = function(x) sum( x, na.rm = TRUE ) ) / year_num ) mov_percent[[paste( "daysSD", sep = "" )]] <- as.numeric( tapply( this_index_movday, day, FUN = function(x) stats::sd( x, na.rm = TRUE ) ) / year_num ) mov_percent <- as.data.frame( mov_percent, stringsAsFactors = FALSE ) site_res <- rbind( site_res, mov_percent ) } } records$value@res[[res.name[j]]] <- site_res res <- rbind( res, site_res ) } registerRes( object, res.name, "data.frame" ) return( res ) } ) # Calculate the maximum days which degree days is more than "key.degree.day" # @param degree.day A vector of accumulated temperature # @param key.degree.day The periods for moving window # @return The probability occuring a certain days extreme temperature maximumDays <- function( degree.day, key.degree.day = 400 ) { pos <- which.min( degree.day ) t <- degree.day[pos] pre <- pos nex <- pos total.num <- length( degree.day ) for ( i in 1:365 ) { pre <- pre - 1 nex <- nex + 1 if ( pre > 0 ) { t <- t + degree.day[pos-i] } if ( nex < total.num ) { t <- t + degree.day[pos+i] } if ( t > key.degree.day ) { break } } max_days <- nex - pre if ( max_days %% 2 == 1 ) { max_days <- max_days + 4 } else { max_days <- max_days + 5 } return ( max_days ) }
/scratch/gouwar.j/cran-all/cranData/weaana/R/movExtreme.r
# * Author: Bangyou Zheng ([email protected]) # * Created: 01/02/2011 # * # Calculate number of frost day # # @param object A WeaAna object. # @param ... Not used setGeneric( "numberFrostDay", function( object, ... ) { standardGeneric( "numberFrostDay" ) } ) # Calculate number of frost day # # @docType methods # @param object A WeaAna object. # @param yrange The year range for statistics # @param stress The stress temperature for frost # @export setMethod( f = "numberFrostDay", signature = c( object = "WeaAna" ), definition = function( object, stress = waGetPara( "extreme$mint$frost.night$value" ), yrange = waGetPara( "yrange" ) ) { res <- wapply( object, vars = "mint", period = "year", FUN = numberFrostDay, ARGS = list( numberFrostDay = list( stress = stress ) ), yrange = yrange, res.name = c( "numberFrostDay" ) ) return( res ) } ) # Calculate number of frost day # # @docType methods # @param object A numeric object. # @param stress The stress temperature for frost # @export setMethod( f = "numberFrostDay", signature = c( object = "numeric" ), definition = function( object, stress = waGetPara( "extreme$mint$frost.night$value" ) ) { return( sum( object < stress ) ) } )
/scratch/gouwar.j/cran-all/cranData/weaana/R/numberFrostDay.R
# * Author: Bangyou Zheng ([email protected]) # * Created: 01/02/2011 # * # Calculate number of heat day # # @param object A WeaAna object. # @param ... Not used setGeneric( "numberHeatDay", function( object, ... ) { standardGeneric( "numberHeatDay" ) } ) # Calculate number of heat day # # @docType methods # @param object A WeaAna object. # @param yrange The year range for statistics # @param stress The stress temperature for heat # @export setMethod( f = "numberHeatDay", signature = c( object = "WeaAna" ), definition = function( object, stress = waGetPara( "extreme$maxt$hot.day$value" ), yrange = waGetPara( "yrange" ) ) { res <- wapply( object, vars = "maxt", period = "year", FUN = numberHeatDay, ARGS = list( numberHeatDay = list( stress = stress ) ), yrange = yrange, res.name = c( "numberHeatDay" ) ) return( res ) } ) # Calculate number of heat day # # @docType methods # @param object A numeric object. # @param stress The stress temperature for heat # @export setMethod( f = "numberHeatDay", signature = c( object = "numeric" ), definition = function( object, stress = waGetPara( "extreme$maxt$hot.day$value" ) ) { return( sum( object > stress ) ) } )
/scratch/gouwar.j/cran-all/cranData/weaana/R/numberHeatDay.R
# * Author: Bangyou Zheng ([email protected]) # * Created: 15/04/2010 # * # Calcualte the index according year and days at a specific period # # @param year A vector of year # @param day A vector of year # @param period The periods to count # @return Generated index according to period periodIndex <- function( year, day, period ) { if ( length( period ) == length( year ) ) { return( period ) } keys <- NULL if ( period == "year" ) { keys <- year } else if ( period == "month" ) { keys <- as.numeric( format( as.Date( day - 1, origin = paste( year, "-01-01", sep = "" ) ), "%m" ) ) } else if ( period == "week" ) { keys <- ceiling( day / 7 ) # We assume each year only have 52 weeks. the last one day (two days for leap years) was counted into the 52th week. keys[keys>=53] <- 52 } else if ( period == "day" ) { keys <- as.numeric( format( as.Date( day - 1, origin = paste( year, "-01-01", sep = "" ) ), "%d" ) ) }else if ( period == "doy" ) { keys <- day } else { keys <- ceiling( day / period ) } return(keys) }
/scratch/gouwar.j/cran-all/cranData/weaana/R/periodIndex.r
# * Author: Bangyou Zheng ([email protected]) # * Created: 12/16/2010 # * # Create new pointer object # # @param value The value for this pointer # @return A new pointer to value # # @rdname readWeatherRecords newPointer <- function(value) { object <- new.env(parent = globalenv()) object$value <- value class(object) <- 'pointer' return(object) } #' Read weather records from a file list and/or a folder list #' #' @param dataFiles A character vector to specify the path of weather data files. #' @param dataFolders A character vector to specify the path of weather data folders. #' @param dataFormat The format of weather data file. #' @param dataWeather A data.frame for existing data. #' @param load.later Whether load weather records now or later. #' "dataFroamt" should be One of "APSIM" and "RDATA". #' @param ... Other arguments #' @return A WeaAna class which contains all weather data. #' @export #' #' @rdname readWeatherRecords readWeatherRecords <- function(dataFiles = NULL, dataFolders = NULL, dataFormat = "APSIM", dataWeather = NULL, load.later = FALSE, ...) { fileLists <- fileList(dataFiles, dataFolders, dataFormat) records <- NULL if(load.later) { sites.records <- NULL num <- 0 for (i in seq(along = fileLists)) { site.record <- methods::new("WeaAnaSite", file.path = fileLists[i], data.format = dataFormat, load.later = TRUE) sites.records <- c(sites.records, newPointer(site.record)) num <- num + 1 } result <- c(NULL, newPointer(methods::new("result", name = as.character(NULL), type = as.character(NULL)))) records <- methods::new("WeaAna", num = num, records = sites.records, result = result) } else { sites.records <- NULL num <- 0 for (i in 1:length(fileLists)) { site.record <- readSite(fileLists[i], dataFormat = dataFormat, ...) sites.records <- c(sites.records, newPointer(site.record)) num <- num + 1 } result <- c(NULL, newPointer(methods::new("result", name = as.character(NULL), type = as.character(NULL)))) records <- methods::new("WeaAna", num = num, records = sites.records, result = result) yearDay2Date <- function(day, year) { return (as.Date(day, origin = as.Date(paste(year - 1, '-12-31', sep = ''), format = '%Y-%m-%d'))) } wcal(records, FUN = yearDay2Date, var.args = c("day", "year"), var.name = "date") } if (is.null(records)) { stop("No weather records are found.") } setPara(data.format = dataFormat) setPara(load.later = load.later) return(records) } # Read weather records from a weather data file # # @param filename The file name of weather data file. # @param dataFormat The format of weather data file. # @param ... Other arguments # @return A WeaAnaSite class which contains all weather data. # @export # @rdname readWeatherRecords readSite <- function(filename, dataFormat = "APSIM", ...) { record <- NULL if (dataFormat == "APSIM") { record <- readSiteAPSIM(filename) } else if (dataFormat == "RDATA") { record <- readSiteRDATA(filename) } else if (dataFormat == 'GHCN') { record <- readSiteGHCN(filename, ...) } else { stop ("Wrong data format!") } return(record) } # Read weather records from a weather data file with APSIM format # # @param filename The file name of weather data file. # @return A WeaAnaSite class which contains all weather data. # # @rdname readWeatherRecords readSiteAPSIM <- function(filename) { a <- NULL station.number <- as.character(NA) station.name <- as.character(NA) latitude <- as.numeric(NA) longitude <- as.numeric(NA) temp <- readLines(filename, n = 100) sta.num.str <- temp[grep("!station number", temp)] if (length((sta.num.str)) > 0) { station.number <- omitBlankSE(substr(sta.num.str, 19, 1000)) } sta.name.str <- temp[grep("!station name", temp)] if (length((sta.name.str)) > 0) { station.name <- omitBlankSE(substr(sta.name.str, 16, 1000)) } lat.str <- temp[grep("latitude", tolower(temp))] if (length((lat.str)) > 0) { latitude <- gsub("^latitude( |\t)*=( |\t)*(-?\\d*\\.{0,1}\\d*).*$", "\\3", tolower(lat.str)) latitude <- as.numeric(latitude) } lon.str <- temp[grep("longitude", tolower(temp))] if (length((lon.str)) > 0) { longitude <- gsub("^longitude( |\t)*=( |\t)*(-?\\d*\\.{0,1}\\d*).*$", "\\3", tolower(lon.str)) longitude <- as.numeric(longitude) } tav.str <- temp[grep("^tav", temp)] tav <- -999 if (length((tav.str)) > 0) { tav <- as.numeric( gsub('(^tav += +)(\\d+\\.?\\d*)( .*$)', '\\2', tav.str)) } amp.str <- temp[grep("^amp", temp)] amp <- -999 if (length((amp.str)) > 0) { amp <- as.numeric( gsub('(^amp += +)(\\d+\\.?\\d*)( .*$)', '\\2', amp.str)) } # for year start.line <- grep("^.*(year|Year|date|Date)", temp) if (length(start.line) == 0) { stop("Keywords year or date ae not found.") } a <- utils::read.table(filename, head = FALSE, sep = "", skip = start.line + 1, col.names = scan(filename, "", sep = "", skip = start.line - 1, nlines = 1, quiet = TRUE), as.is = TRUE) names(a) <- tolower(names(a)) # Convert date if (!is.null(a$date)) { date_format <- scan(filename, "", sep = "", skip = start.line, nlines = 1, quiet = TRUE) date_format <- date_format[which(names(a) == "date")] if (nchar(date_format) == 0) { stop("Date format is not found") } date_format <- gsub("(\\(|\\))", "", date_format) date_format <- "%d/%m/%Y" a$date <- as.Date(a$date, format = date_format) if (sum(is.na(a$date)) > 0) { stop("NA values are found for date columns.") } a$year <- format(a$date, "%Y") a$day <- format(a$date, "%j") } a$year <- as.numeric(a$year) a$day <- as.numeric(a$day) if (!is.null(a$pan)) { a$evap <- a$pan } extra <- NULL a$maxt <- as.numeric(a$maxt) a$mint <- as.numeric(a$mint) a$radn <- as.numeric(a$radn) a$rain <- as.numeric(a$rain) if (!is.null(a$evap)) { a$evap <- as.numeric(a$evap) } if (is.null(a$avgt)) { extra$avgt <- (a$maxt + a$mint) / 2 } else { extra$avgt <- a$avgt } if (is.null(a$vpd)) { extra$vpd <- vpd.apsim(a$maxt, a$mint) } else { extra$vpd <- a$vpd } records <- methods::new("WeaAnaSite", name = station.name, number = station.number, latitude = latitude, longitude = longitude, tav = tav, amp = amp, year = as.numeric(a$year), day = as.numeric(a$day), radn = as.numeric(a$radn), maxt = as.numeric(a$maxt), mint = as.numeric(a$mint), rain = as.numeric(a$rain), evap = as.numeric(a$evap), vp = as.numeric(a$vp), code = as.character(a$code), extra = extra, file.path = filename, data.format = "APSIM", load.later = FALSE) rm(a, temp) gc() return(records) } # Read weather records from a weather data file with RDATA format # # @param filename The file name of weather data file. # @return A WeaAnaSite class which contains all weather data. # # @rdname readWeatherRecords readSiteRDATA <- function(filename) { temp.env <- new.env() vars <- load(filename, temp.env) if (length(vars) > 1) { warning("Only first variable used. ") } record <- NULL temp.records <- get(vars[1], envir = temp.env) if (class(temp.records) == "WeaAna") { if (temp.records@num > 1) { warning(paste("There are more than one weather station in ", filename, ". Only first station is used.", sep = "")) } site.record <- getWeaAnaSiteByPos(temp.records, 1) record <- methods::new("WeaAnaSite", name = site.record$value@name, number = site.record$value@number, latitude = site.record$value@latitude, longitude = site.record$value@longitude, year = site.record$value@year, day = site.record$value@day, radn = site.record$value@radn, maxt = site.record$value@maxt, mint = site.record$value@mint, rain = site.record$value@rain, evap = site.record$value@evap, vp = site.record$value@vp, code = site.record$value@code, extra = site.record$value@extra, file.path = filename, data.format = "RDATA", load.later = FALSE) } rm(list = vars, envir = temp.env) rm(site.record ) rm(temp.env) gc() return(record) } # Read weather records from a weather data file with GHCN format # # @param filename The file name of weather data file. # @param ... Other arguments for site information # @return A WeaAnaSite class which contains all weather data. # # @rdname readWeatherRecords readSiteGHCN <- function(filename, ...) { others <- list(...) temp <- readLines(filename) out <- strsplit(temp, '') out <- as.matrix(do.call(rbind, out)) pos_matrix <- matrix(c(12, 16, 18, seq(0, 30) * 8 + 22, 15, 17, 21, seq(0, 30) * 8 + 26), ncol = 2) traits_map <- data.frame(name = c('PRCP', 'TMAX', 'TMIN'), apsim = c('rain', 'maxt', 'mint'), ratio = c(0.1, 0.1, 0.1), stringsAsFactors = FALSE) out <- apply(pos_matrix, 1, function(x) { substr(temp, x[1], x[2]) }) year <- as.numeric(out[,1]) month <- as.numeric(out[,2]) trait <- out[,3] pos <- match(trait, traits_map$name) trait <- traits_map$apsim[pos] value <- t(out[,-(1:3)]) value <- matrix(as.numeric(value), nrow = nrow(value)) value[value == -9999] <- NA ratio <- matrix(rep(traits_map$ratio[pos], each = nrow(value)), nrow = nrow(value)) value <- value * ratio value <- reshape2::melt(value) names(value) <- c('day', 'pos', 'value') value$year <- year[value$pos] month <- month[value$pos] value$trait <- trait[value$pos] date <- ISOdate(value$year, month, value$day) value$day <- lubridate::yday(date) value <- value[!is.na(value$day),] weather <- reshape2::dcast(value, year + day ~ trait, value.var = 'value') records <- methods::new("WeaAnaSite", name = others$Name, number = others$Number, latitude = others$Latitude, longitude = others$Longitude, tav = -999, amp = -999, year = as.numeric(weather$year), day = as.numeric(weather$day), radn = as.numeric(weather$radn), maxt = as.numeric(weather$maxt), mint = as.numeric(weather$mint), rain = as.numeric(weather$rain), evap = as.numeric(weather$evap), vp = as.numeric(weather$vp), code = as.character(weather$code), extra = weather[,!(names(weather) %in% c('year', 'day', 'radn', 'maxt', 'mint', 'rain', 'evap', 'vp'))], file.path = filename, data.format = "GHCN", load.later = FALSE) rm(weather, temp) gc() return(records) } #' create WeaAna class #' #' @param mets A list contained information of weather records. #' @return A new WeaAna class #' @export createWeaAna <- function(mets) { if (!is.null(mets$Records)) { tmp <- mets mets <- as.list(NULL) mets[[1]] <- tmp } tmp <- mets site_record <- NULL result <- NULL for (i in seq(along = tmp)) { mets <- tmp[[i]] extra <- NULL if (is.null(mets$Records$avgt)) { extra$avgt <- (mets$Records$maxt + mets$Records$mint) / 2 } else { extra$Records$avgt <- mets$Records$avgt } if (is.null(mets$Records$vpd)) { extra$Records$vpd <- vpd.apsim(mets$Records$maxt, mets$Records$mint) } else { extra$Records$vpd <- mets$Records$vpd } if (is.null(mets$Records$evap) & !is.null(mets$Records$pan)) { mets$Records$evap <- mets$Records$pan } if (is.null(mets$tav) | is.null(mets$amp)) { records <- mets$Records records$date <- as.Date(records$day, origin = as.Date(paste( records$year - 1, '-12-31', sep = ''), format = '%Y-%m-%d')) records$ym <- format(records$date, format = '%Y-%m') records$m <- format(records$date, format = '%m') records$avgt <- (records$maxt + records$mint) / 2 amp <- tapply(records$avgt, records$ym, FUN = mean) month <- tapply(records$m, records$ym, FUN = function(x) as.numeric(as.character(x[1]))) year <- tapply(records$year, records$ym, FUN = function(x) as.numeric(as.character(x[1]))) amp_max <- tapply(amp, year, FUN = max) amp_min <- tapply(amp, year, FUN = min) tav <- round(mean(amp), 2) amp <-round(mean(amp_max - amp_min), 2) mets$tav <- tav mets$amp <- amp } site_rd <- methods::new("WeaAnaSite", name = as.character(mets$Name), number = as.character(mets$Number), latitude = mets$Latitude, longitude = mets$Longitude, tav = as.numeric(mets$tav), amp = as.numeric(mets$amp), year = as.numeric(mets$Records$year), day = as.numeric(mets$Records$day), radn = as.numeric(mets$Records$radn), maxt = as.numeric(mets$Records$maxt), mint = as.numeric(mets$Records$mint), rain = as.numeric(mets$Records$rain), evap = as.numeric(mets$Records$evap), vp = as.numeric(mets$Records$vp), code = as.character(mets$Records$code), extra = extra, file.path = as.character(NA), data.format = "APSIM", load.later = FALSE) site_record <- c(site_record, newPointer(site_rd)) result <- c(result, newPointer(methods::new("result", name = as.character(NULL), type = as.character(NULL)))) } records <- methods::new("WeaAna", num = length(tmp), records = site_record, result = result) yearDay2Date <- function(day, year) { return (as.Date(day, origin = as.Date(paste(year - 1, '-12-31', sep = ''), format = '%Y-%m-%d'))) } wcal(records, FUN = yearDay2Date, var.args = c("day", "year"), var.name = "date") return(records) }
/scratch/gouwar.j/cran-all/cranData/weaana/R/readWeatherRecords.r
# * Author: Bangyou Zheng ([email protected]) # * Created: 21/01/2011 # * setGeneric( "registerRes", function( object, name, type ) { standardGeneric( "registerRes" ) } ) setMethod( f = "registerRes", signature = c( object = "WeaAna", name = "character", type = "character" ), definition = function( object, name, type ) { old.name <- object@result[[1]]$value@name old.type <- object@result[[1]]$value@type if ( is.null( name ) || is.na( name ) ) { return } if ( !( name %in% old.name ) ) { object@result[[1]]$value@name <- c( old.name, name ) object@result[[1]]$value@type <- c( old.type, type ) } } ) setGeneric( "addResult", function( object, an.res ) { standardGeneric( "addResult" ) } ) setMethod( f = "addResult", signature = c( object = "result", an.res = "result" ), definition = function( object, an.res ) { old.name <- object@name old.type <- object@type an.name <- an.res@name an.type <- an.res@type pos <- ! ( an.name %in% old.name ) object@name <- c( old.name, an.name[pos] ) object@type <- c( old.type, an.type[pos] ) return( object ) } )
/scratch/gouwar.j/cran-all/cranData/weaana/R/registerRes.R
# * Author: Bangyou Zheng ([email protected]) # * Created: 12/16/2010 # * #' Show basic information of class WeaAna #' #' Show the name, number, latitude, longitude of all weather stations. #' @docType methods #' @param object WeaAna objects #' @examples #' library(weaana) #' data( "WeatherRecordsDemo" ) #' show( records ) #' records setMethod( "show", signature = c( object = "WeaAna" ), definition = function( object ) { infor <- siteInfor( object ) if ( !( is.null( infor ) & waGetPara( "load.later" ) ) ) { print( infor ) } else { msg <- paste( "There are ", object@num, " weather stations,", " but they will be loaded later.", sep = "" ) print( msg ) } } )
/scratch/gouwar.j/cran-all/cranData/weaana/R/showWeaAna.R
# * Author: Bangyou Zheng ([email protected]) # * Created: 15:13 Tuesday, 3 May 2011 # * #' Calculate the sphere distance #' #' @param lat1 Latitude #' @param lon1 Longitude #' @param lat2 Latitude #' @param lon2 Longitude #' @return Distance in km #' @export sphericalDistance <- function( lat1, lon1, lat2, lon2 ) { lon1 <- lon1 * pi /180 lat1 <- lat1 * pi /180 lon2 <- lon2 * pi /180 lat2 <- lat2 * pi /180 dLat <- lat2 - lat1 dLon <- lon1 - lon2 a <- sin(dLat/2) * sin(dLat/2) + cos(lat1) * cos(lat2) * sin(dLon/2) * sin(dLon/2) c <- 2 * atan2(sqrt(a), sqrt(1-a)) d <- 6371 * c return( d ) }
/scratch/gouwar.j/cran-all/cranData/weaana/R/sphericalDistance.R
# * Author: Bangyou Zheng ([email protected]) # * Created: 06/04/2010 # * # String function in this package # # Get the length of a string # @rdname string # @param str Input string # @return The character number of the string len <- function( str ) { return( length( unlist( strsplit( str, "" ) ) ) ) } # String function in this package # # Get the several characters of a string from left # @rdname string # @param str Input string # @param num The number of character will be returned from left # @return the character vector of the string from left left <- function( str, num = 1 ) { return( substr( str, 1, num ) ) } # String function in this package # # Get the several characters of a string from right # @rdname string # @param str Input string # @param num The number of character will be returned from right # @return the character vector of the string from right right <- function( str, num = 1 ) { len = len( str ) return( substr( str, len - num + 1, len ) ) } # String function in this package # # Omit the blank of a string # @rdname string # @param str Input string # @return the character vector except the blank omitBlank <- function( str ) { str <- unlist( strsplit( str, "" ) ) str[str==" "] <- "" res <- NULL for ( i in 1:length(str) ) { res <- paste( res, str[i], sep = "" ) } return( res ) } # String function in this package # # Search a character from a string # @rdname string # @param str Input string # @param search The character will be searched # @param start The start position to search # @return The postion where character firstly appear searchChar <- function( str, search, start = 1 ) { str <- unlist( strsplit( str, "" ) ) pos <- NULL for ( i in start:length(str) ) { if ( str[i] == search ) { pos <- i break } } return( pos ) } # String function in this package # # Omit the start and end blank charcter # @rdname string # @param str Input string # @return the character vector except the blank at the start and end omitBlankSE <- function( str ) { str <- unlist( strsplit( str, "" ) ) for ( i in 1:length(str) ) { if ( str[i] != " " ) { str <- str[ i:length( str ) ] break } } for ( i in length(str):1 ) { if ( str[i] != " " ) { str <- str[ 1:i ] break } } res <- NULL for ( i in 1:length(str) ) { res <- paste( res, str[i], sep = "" ) } return( res ) } # Omit the start and end blank charcter # # Convert vertor to string with a separater # @rdname string # @param vector The input vector to convert # @param sep The separater with default value ", " # @return The string which connected all members of vector vector2string <- function( vector, sep = ", " ) { s <- as.character( vector[1] ) if ( length( vector ) > 1 ) { for ( i in 2:length( vector ) ) { s <- paste( s, vector[i], sep = sep ) } } return( s ) }
/scratch/gouwar.j/cran-all/cranData/weaana/R/string.r
# * Author: Bangyou Zheng ([email protected]) # * Created: 21:35 Tuesday, 7 June 2011 # * #' Calculate thermal time using cardinal temperatures #' #' @param weather WeaAna object #' @param x_temp The cardinal temperatures #' @param y_temp The effective thermal time #' @param method The method to calculate thermal time. #' The default method is ( maxt + mint ) / 2 - base. #' The three hour temperature methods will be used if method = '3hr' #' @return A data.frame with three columns: year, day and thermalTime. #' @export #' @examples #' met_file <- system.file("extdata/WeatherRecordsDemo1.met", package = "weaana") #' records <- readWeatherRecords(met_file) #' x_temp <- c(0, 26, 34) #' y_temp <- c(0, 26, 0) #' res <- thermalTime(records, x_temp, y_temp) #' head(res) #' res <- thermalTime(records, x_temp, y_temp, method = "3hr") #' head(res) thermalTime <- function(weather, x_temp, y_temp, method = NULL) { if (class(weather) != "WeaAna") { stop("WeaAna class is required.") } res <- NULL for ( i in 1:weather@num ) { w_data <- getWeatherRecords( weather[i]) w_data <- w_data %>% dplyr::mutate(thermalTime = thermalTimeDaily( .data$mint, .data$maxt, x_temp = x_temp, y_temp = y_temp, method = method) ) res[[i]] <- w_data %>% dplyr::select(dplyr::all_of(c('year', 'day', 'thermalTime'))) } res <- dplyr::bind_cols(res) return( res ) } #' Calculate thermal time using cardinal temperatures #' #' @param maxt The maximum temperature #' @param mint The minimum temperature #' @param x_temp The cardinal temperatures #' @param y_temp The effective thermal time #' @param method The method to calculate thermal time. #' The default method is ( maxt + mint ) / 2 - base. #' The three hour temperature methods will be usesd if method = '3hr' #' @return The thermal time. #' @export #' @examples #' mint <- c(0, 10) #' maxt <- c(30, 40) #' x_temp <- c(0, 20, 35) #' y_temp <- c(0, 20, 0) #' thermalTimeDaily(mint, maxt, x_temp, y_temp) #' thermalTimeDaily(mint, maxt, x_temp, y_temp, method = '3hr') thermalTimeDaily <- function(mint, maxt, x_temp, y_temp, method = NULL) { if (!is.numeric(maxt) | !is.numeric(mint)) { stop("Numeric vector is required for mint and maxt.") } if (length(mint) != length(maxt)) { stop("mint and maxt require the same length.") } if (is.null(method)) { meant <- (maxt + mint) / 2 tt <- interpolationFunction(x = x_temp, y = y_temp, values = meant) return (tt) } else if (method == "3hr") { pos <- mint > maxt if (sum(pos) > 0) { stop("Minimum temperature is more than maximum temperature.") } # Calculate the 3hour temperature hour <- seq(1, 8) frac <- 0.92105 + 0.1140 * hour - 0.0703 * hour * hour + 0.0053 * hour * hour * hour mint2 <- matrix(rep(mint, times = 8), ncol = 8) maxt2 <- matrix(rep(maxt, times = 8), ncol = 8) frac2 <- matrix(rep(frac, each = length(mint)), ncol = 8) temp <- mint2 + (maxt2 - mint2) * frac2 tt <- matrix(interpolationFunction(x = x_temp, y = y_temp, temp), ncol = 8) res <- apply(tt, 1, mean) return(res) } else { stop("Not implemented for method ", method) } } #' Calculate thermal time using the hourly temperature (non daily temperature) #' #' @param timestamp The timestamp of weather records #' @param temperature The temperature #' @param x_temp The cardinal temperatures #' @param y_temp The effective thermal time #' #' @return A data frame with daily thermal time #' @export #' @examples #' met_file <- system.file("extdata/WeatherHourly.csv", package = "weaana") #' hourly <- read.csv(met_file, as.is = TRUE) #' #' hourly$timestamp <- as.POSIXct(hourly$timestamp, format = "%Y-%m-%dT%H:%M:%SZ") #' x_temp <- c(0, 20, 35) #' y_temp <- c(0, 20, 0) #' thermalTimeHourly(hourly$timestamp, hourly$temperature, x_temp, y_temp) thermalTimeHourly <- function(timestamp, temperature, x_temp, y_temp) { if (!("POSIXct" %in% class(timestamp))) { stop("POSIXct class is required for timestamp") } if (length(timestamp) != length(temperature)) { stop("Require the same length of timestamp and temperature") } if (sum(is.na(timestamp)) > 0) { stop("Missing values are found in the timestamp") } if (sum(is.na(temperature)) > 0) { stop("Missing values are found in the temperature") } # Calculate the time difference diff <- as.numeric(timestamp) - dplyr::lag(as.numeric(timestamp)) # Assign the second to the first value diff[1] <- diff[2] res <- tibble::tibble(timestamp = timestamp, temperature = temperature, diff = diff) %>% dplyr::mutate(date = as.Date(.data$timestamp)) %>% dplyr::rowwise() %>% dplyr::mutate(tt = interpolationFunction( x = x_temp, y = y_temp, values = .data$temperature)) %>% dplyr::group_by(.data$date) %>% dplyr::summarise(value = sum(.data$tt * .data$diff) / sum(.data$diff), .groups = "drop") res }
/scratch/gouwar.j/cran-all/cranData/weaana/R/thermalTime.R
# * Author: Bangyou Zheng ([email protected]) # * Created: 4:29 PM Friday, 15 February 2013 # * Copyright: AS IS # * # Transfer of sign - from FORTRAN. # The result is of the same type and kind as a. Its value is the abs(a) of a, # if b is greater than or equal positive zero; and -abs(a), if b is less than # or equal to negative zero. # Example a = sign_apsim (30,-2) ! a is assigned the value -30 # # @param a value 1 # @param b value 2 sign_apsim <- function( a, b ) { if ( b >= 0 ) { return( abs( a ) ) } else { return( -abs(a) ) } } # Some utility functions for weather analysis #' Significantly t-test with auto-correlation for time serial data #' #' Method is presented by Santer et al. 2000 #' @param y A vector of time serial data #' @param slope Whether export slope #' @return p values of t-test #' @export ttest_ts <- function(y, slope = NULL) { if(sum(is.na(y)) == 0) { y <- as.numeric(y) num <- length(y) x <- seq(along = y) if (is.null(slope)) { slope <- stats::cor(x, y) * stats::sd(y)/stats::sd(x) } sb_m <- sqrt(sum((x - mean(x)) ^ 2)) inercept <- (sum(y) - slope * sum(x)) / num et_x <- y - (inercept + slope * x) ne_x <- stats::cor(et_x[-1], et_x[-(num)]) ne_x <- num * (1 - ne_x) / (1 + ne_x) se_x <- sqrt((1 / (ne_x - 2)) * sum(et_x * et_x, na.rm = TRUE)) sb_x <- se_x / sb_m tb_x <- abs(slope / sb_x) p_x <- (1 - stats::pt(tb_x, df = ne_x - 2)) * 2 return (p_x) } else { return (NA) } } # Calculate the spatial slope and aspect # # Burrough, P. A., and McDonell, R. A., 1998. Principles of Geographical Information Systems (Oxford University Press, New York) # @param x A matrix for spatial data with row for longitude and column for latitude. # dimnames must be specified for values of longitude and latitude # @param slope Logical, whether return slope # @param aspect Logical, whether return aspect spatial <- function(x, slope = TRUE, aspect = TRUE) { x_dim <- dim(x) x_template <- array(rep(NA, prod(x_dim)), dim = x_dim) f_a <- x_template f_a[seq(2, x_dim[1]), seq(2, x_dim[2])] <- x[-x_dim[1],-x_dim[2]] f_b <- x_template f_b[seq(2, x_dim[1]),] <- x[-x_dim[1],] f_c <- x_template f_c[seq(2, x_dim[1]), seq(1, x_dim[2] - 1)] <- x[-x_dim[1],-1] f_d <- x_template f_d[,seq(2, x_dim[2])] <- x[,-x_dim[2]] f_e <- x f_f <- x_template f_f[,seq(1, x_dim[2] - 1)] <- x[,-1] f_g <- x_template f_g[seq(1, x_dim[1] - 1), seq(2, x_dim[2])] <- x[-1,-x_dim[2]] f_h <- x_template f_h[seq(1, x_dim[1] - 1),] <- x[-1,] f_i <- x_template f_i[seq(1, x_dim[1] - 1), seq(1, x_dim[2] - 1)] <- x[-1,-1] x_dimnames <- dimnames(x) x_cellsize <- array(rep(111.325 * cos(as.numeric(x_dimnames[[2]]) * pi / 180) * 0.05, times = length(x_dimnames[[1]])), dim = x_dim) y_cellsize <- array(rep(111.325, prod(x_dim)), dim = x_dim) * 0.05 dz_dx <- ((f_c + 2 * f_f + f_i) - (f_a + 2 * f_d + f_g)) / (8 * x_cellsize) dz_dy <- ((f_g + 2 * f_h + f_i) - (f_a + 2 * f_b + f_c)) / (8 * y_cellsize) slope_v <- sqrt(dz_dx * dz_dx + dz_dy * dz_dy) dimnames(slope_v) <- x_dimnames aspect_v <- (180 / pi) * atan2(dz_dy, -dz_dx) dimnames(aspect_v) <- x_dimnames if (slope & aspect) { return(list(slope = slope_v, aspect = aspect_v)) } else if (slope) { return (slope_v) } else if (aspect) { return (aspect_v) } return (NULL) } #' The time elapsed in hours between the specified sun angle #' from 90 degree in am and pm. +ve above the horizon, -ve below the horizon. #' @param doy day of year number #' @param lat latitude of site (deg) #' @param angle angle to measure time between, such as twilight (deg). #' angular distance between 90 deg and end of twilight - altitude of sun. +ve up, -ve down. #' @return day length in hours #' @export dayLength <- function( doy, lat, angle = -6 ) { # Constant Values aeqnox <- 82.25 dg2rdn <- ( 2.0 * pi ) / 360.0 decsol <- 23.45116 * dg2rdn dy2rdn <- ( 2.0 * pi ) / 365.25 rdn2hr <- 24.0 / ( 2.0 *pi ) sun_alt <- angle * dg2rdn; dec <- decsol * sin( dy2rdn * ( doy - aeqnox ) ) if ( ( abs( lat ) == 90.0 ) ) { coshra <- rep( sign_apsim( 1.0, -dec) * sign_apsim( 1.0, lat ), times = length( doy ) ) } else { latrn <- lat * dg2rdn slsd <- sin( latrn ) * sin( dec ) clcd <- cos( latrn ) * cos( dec ) altmn <- asin( min( max( slsd - clcd, -1.0 ), 1.0 ) ) altmx <- asin( min( max( slsd + clcd, -1.0 ), 1.0 ) ) alt <- min( max( sun_alt, altmn ), altmx ) coshra <- (sin( alt ) - slsd ) / clcd coshra[coshra < -1] <- -1 coshra[coshra > 1] <- 1 } hrangl <- acos( coshra ) hrlt <- hrangl * rdn2hr * 2.0 return( hrlt ) } #'Return a y value from a linear interpolation function #' #' @param x x #' @param y y #' @param values values #' @param split split #' @return The interpolated values #' @export interpolationFunction <- function( x, y, values, split = '\\s+' ) { if (is.character(x) & length(x) == 1) { x <- as.numeric(strsplit(x, split)[[1]]) } if (is.character(y) & length(y) == 1) { y <- as.numeric(strsplit(y, split)[[1]]) } res <- rep(NA, length(values)) pos <- values < x[1] res[pos] <- y[1] for (i in seq(length = length(x) - 1)) { pos <- values >= x[i] & values < x[i + 1] slope <- (y[i+1] - y[i] ) / (x[i+1] - x[i]) res[pos] <- y[i] + slope * (values[pos] - x[i]) } pos <- values >= x[length(x)] res[pos] <- y[length(y)] return ( res ) } # Calculate weather variables through a string formula. # # @param x A data frame contained all weather records # @param str A string function # @param len The length of result wcalStr <- function( x, str = NULL, len = length( x[[1]] ) ) { temp.env <- new.env() x.names <- names( x ) for ( j in seq( along = x.names ) ) { assign( x.names[j], x[[x.names[j]]], envir = temp.env ) } res <- eval( parse( text = as.character( str ) ), envir = temp.env ) if ( length( res ) != len ) { stop( "The result length is not equal to original length" ) } return( res ) } # Calculate weather variables through a function. # # @param x A data frame contained all weather records # @param FUN A function to be used which results should have the same length as original records. # @param var.args Arguments of weather variable pass to \code{FUN}. # @param other.args Optional arguments to \code{FUN} # @param len The length of result wcalFun <- function( x, FUN, var.args, other.args = NULL, len = length( x[[1]] ) ) { temp.env <- new.env() x.names <- names( x ) for ( j in seq( along = x.names ) ) { assign( x.names[j], x[[x.names[j]]], envir = temp.env ) } fun.args <- as.list( NULL ) for ( j in seq( along = var.args ) ) { if ( var.args[j] %in% x.names ) { fun.args[[j]] <- x[[var.args[j]]] } else { stop( paste( "Can not find variable ", var.args[j], sep = "" ) ) } } fun.args <- c( fun.args, other.args ) res <- do.call( FUN, fun.args ) if ( length( res ) != len ) { stop( "The result length is not equal to original length" ) } return( res ) }
/scratch/gouwar.j/cran-all/cranData/weaana/R/utilities.R
# * Author: Scott Chapman ([email protected]) # * Created: 09/04/2010 # * # Calculate the vpd using apsim method # # Calculate the vpd using apsim method # @param maxt The maximum temperature # @param mint The minimum temperature # @return vpd calculated APSIM method vpd.apsim <- function( maxt, mint ) { vpd <- 0.1 * 0.75 * ( 6.1078 * exp( ( 17.269 * maxt ) / ( 273.3 + maxt ) ) - 6.1078 * exp ( ( 17.269 * mint ) / ( 273.3 + mint ) ) ) vpd[ vpd < 0.01 ] <- 0.01 return( vpd ) }
/scratch/gouwar.j/cran-all/cranData/weaana/R/vpd.apsim.r
# * Author: Bangyou Zheng ([email protected]) # * Created: 09/02/2011 # * # Statistics weather records by a certain period and function # # @param object A WeaAna objects. # @param ... Not used setGeneric( "wapply", function( object, ... ) { standardGeneric( "wapply" ) } ) # Statistics weather records by a certain period and function # # \code{wapply} is a powerful and flexible function to statistics weather records. # It could be used to calculate multiple variables with different functions # and different parameters for all levels in a certain period. # # \code{vars} could be a vector to list all variables which need to calculate. # Available variables could be obtained through function \code{\link{getAvaiVars}}. # # \code{period} could be one of "year", "month", "week", "day", or any number of day. # It will be complicated to the same length of \code{vars}. # # \code{FUN} could be a vector. \code{FUN} will be replicated to the same length as \code{vars}. # Each \code{FUN} will be applied to each \code{vars} as the sequence of \code{vars}. # # Optional arguments \code{args} is a list which have the same length as \code{FUN}. # Each element of \code{args} is the optional arguments for each \code{FUN}. # Each argument will be replicated to same length as level number which # determines by period. Then, each level will be passed to different parameter. # # @docType methods # @param object A WeaAna objects. # @param period A period to apply FUN. It could be one of "year", "month", "week", "day", or # any number of day. # @param vars A variable vector to apply \code{FUN}. # @param FUN A function vector to be applied. # @param ARGS Optional arguments to \code{FUN}. # @param site.ARGS Arguments for each site # @param res.name A result name to store and return. # @param yrange Year range to statistics. # @param as.data.frame Logical, if TRUE, a data frame will be returned, but just a period supported. # if FALSE, a list will be returned. # @param extra A list or data.frame whose elements have the same length of sites # @examples # library(weaana) # data( "records" ) # # calculate yearly mean temperature # wapply( records, vars = "avgt", period = "year", FUN = mean ) # # yealy total rainfall # wapply( records, vars = "rain", period = "year", FUN = sum ) # # yearly mean temperature # wapply( records, vars = "avgt", period = "year", FUN = mean ) # # monthly mean temperature # wapply( records, vars = "avgt", period = "month", FUN = mean ) # # 10 days mean temperature # wapply( records, vars = "avgt", period = 10, FUN = mean ) # # yearly mean temperature, total raifall and mean radiation. # wapply( records, vars = c( "avgt", "rain", "radn" ), # period = "year", # FUN = c( mean, sum ), # res.name = c( "mean.avgt", "total.rain", "mean.radn" ) ) # # calculate thermal time from last frost day to first heat day for year range 1960 to 2009 # last.frost <- lastFrostDay( records, yrange = c( 1960, 2009 ) ) # first.heat <- firstHeatDay( records, yrange = c( 1960, 2009 ) ) # wapply( records, vars = "avgt", period = "year", FUN = function ( x, start, end, base = 0 ) # { # if ( is.na( start ) | is.na( end ) ) # { # return( NA ) # } else # { # x <- x[ seq( start, end )] # x <- x[ x > base ] - base # return( sum( x, na.rm = TRUE ) ) # } # }, ARGS = list( list( start = last.frost[,6], end = first.heat[,6] ) ), # res.name = "tt", yrange = c( 1960, 2009 ) ) setMethod( f = "wapply", signature = c( object = "WeaAna" ), definition = function( object, vars, period, FUN, ARGS = NULL, site.ARGS = NULL, res.name = "result", yrange = waGetPara( "yrange" ), as.data.frame = FALSE, extra = NULL) { # check parameters res <- NULL if ( as.data.frame == TRUE ) { if ( length( period ) > 1 ) { stop( "Only one period supported for data frame results" ) } } else { res <- as.list( NULL ) for ( i in 1:length( vars ) ) { res[[i]] <- as.list( NULL ) } } # res.name for ( i in seq( along = vars ) ) { if ( is.na( res.name[i] ) ) { res.name[i] <- paste( "result", i, sep = "" ) } } # Check extra if (!is.null(extra)) { extra <- as.data.frame(extra) } # convert FUN to a list # FUN if ( is.null( FUN ) ) { stop( "FUN can not be NULL." ) } t.fun <- as.list( NULL ) if ( length( FUN ) > 1 ) { for ( i in seq( along = FUN ) ) { t.fun[[i]] <- FUN[[i]] } } else { t.fun[[1]] <- FUN } # make sure the length of FUN is same as vars t.fun <- rep( t.fun, length.out = length( vars ) ) # period if ( length( period ) > 1 ) { old.period <- period period <- NULL period[[1]] <- old.period } period <- rep( period, length.out = length( vars ) ) period <- as.list( period ) # ARGS and site.SRGS if ( !is.null( ARGS ) ) ARGS <- rep( ARGS, length.out = length( vars ) ) if ( !is.null( site.ARGS ) ) { for ( i in seq( site.ARGS ) ) { site.ARGS[[i]] <- rep( site.ARGS[[i]], length.out = object@num ) } } # check weather station number if ( object@num == 0 ) { warning( "No weather records in this object." ) return( NULL ) } # for each weather station used.args <- 0 for ( i in 1:object@num ) { # obtain all weather records records <- getWeaAnaSiteByPos( object, i ) record <- records$value w.data <- getWeatherRecords( object[i], yrange = yrange, vars = vars ) # Check variables, skip variables don't exist. n.vars <- names( w.data ) for ( j in seq( along = vars ) ) { if ( !( vars[j] %in% n.vars ) ) { warning( paste( "Variable(s) not exist, skip it:", paste( vars[j], collapse = ", " ) ) ) next() } # generate index according period. key <- periodIndex( w.data$year, w.data$day, period[[j]] ) w.levels <- as.numeric( levels( as.factor( key ) ) ) # check level number. go to next station when no levels need to calculate if ( length( w.levels ) == 0 ) { warning( "No any levels which need to calculate. Skip this site." ) next() } # generate basic information for results. site.res <- NULL site.res$Name = records$value@name site.res$Number = records$value@number site.res$Latitude = records$value@latitude site.res$Longitude = records$value@longitude site.res[[as.character( period[[j]][1] )]] = w.levels w.nlevels <- length( w.levels ) # for each levels w.res <- NULL w.args <- ARGS[[j]] n.w.args <- names( w.args ) for ( m in seq( along = w.args ) ) { w.args[[m]] <- rep( w.args[[m]], length = used.args + w.nlevels ) } for ( k in 1:w.nlevels ) { # get weather data of this level. l.data <- as.numeric( w.data[ key == w.levels[k], vars[j] ] ) l.args <- as.list( NULL ) l.args[[1]] <- l.data for ( m in seq( along = w.args ) ) { l.args[[n.w.args[m]]] <- w.args[[m]][k + used.args] } n.site.args <- names( site.ARGS ) for ( m in seq( along = site.ARGS ) ) { l.args[[n.site.args[m]]] <- site.ARGS[[m]][i] } # call function to obtain results l.res <- do.call( as.function( t.fun[[j]] ), l.args ) # Check results if ( length( l.res ) > 1 ) { warning( "Only first result is used." ) } w.res <- c( w.res, l.res[1] ) } used.args <- used.args + w.nlevels site.res[[res.name[j]]] <- w.res site.res <- as.data.frame( site.res, stringsAsFactors = FALSE ) if (!is.null(extra)) { names_site_res <- c(names(site.res), names(extra)) site.res <- cbind(site.res, extra[i,]) names(site.res) <- names_site_res } row.names( site.res ) <- seq( along = site.res[[1]] ) records$value@res[[res.name[j]]] <- site.res if ( as.data.frame == TRUE ) { if ( is.null( res ) ) { res <- site.res } else { res <- cbind( res, site.res[[6]] ) } } else { res[[j]] <- rbind( res[[j]], site.res ) } } } if ( as.data.frame == TRUE ) { res <- as.data.frame( res, stringsAsFactors = FALSE ) names( res ) <- c( "Name", "Number", "Latitude", "Longitude", period[[1]][1], res.name ) row.names( res ) <- seq( along = res[[1]] ) return( res ) } else { # save results n.res <- NULL for ( i in 1:length( vars ) ) { temp <- as.data.frame( res[[i]], stringsAsFactors = FALSE ) row.names( temp ) <- seq( along = temp[[1]] ) registerRes( object, res.name[i], "data.frame" ) n.res[[res.name[i]]] <- temp } rm( res ) gc() if ( length( vars ) == 1 ) { return( n.res[[1]] ) } return( n.res ) } } )
/scratch/gouwar.j/cran-all/cranData/weaana/R/wapply.R
# * Author: Bangyou Zheng ([email protected]) # * Created: 09/02/2011 # * # Calculate weather variables through function or a string formula. # # @docType methods # @param object A WeaAna objects. # @param ... Not used # @rdname wcal-methods setGeneric( "wcal", function( object, ... ) { standardGeneric( "wcal" ) } ) # Calculate weather variables through function or a string formula. # # There are two modes to use \code{wcal}, function mode if \code{FUN} is not null, # and string formula mode if \code{FUN} is NULL. # # @param object A WeaAna objects. # @param FUN A function to be used which results should have the same length as original records. # @param ... Optional arguments to \code{FUN} in function mode. # String formulas if \code{FUN} is NULL. # @param var.args Arguments of weather variable pass to \code{FUN}. # @param var.name Variable name is used if \code{FUN} is not NULL. # @rdname wcal-methods # @aliases wcal,WeaAna,WeaAna-method # @examples # library(weaana) # data( "records" ) # # Daily mean temperature # wcal( records, avgt2 = "( maxt + mint ) / 2" ) # # Moving average temperature # wcal( records, FUN = mov.avg, var.args = "avgt", k = 5, shift = "begin", var.name = "mov.avg" ) setMethod( f = "wcal", signature = c( object = "WeaAna" ), definition = function( object, FUN = NULL, ..., var.args = NULL, var.name = NULL ) { load.later <- waGetPara( "load.later" ) # check arguments funs <- NULL n.funs <- NULL if ( is.null( FUN ) )# for string mode { # check functions funs <- list( ... ) n.funs <- names( funs ) if ( is.null( n.funs ) ) { stop( "NO function is defined." ) } for ( i in seq( along = n.funs ) ) { if ( nchar( n.funs[i]) == 0 ) { stop( paste( "NO result name is defined for ", funs[i], sep = "" ) ) } } } else # for function mode { if ( is.null( var.args ) ) { stop( "var.args must be specified." ) } if ( is.null( var.name ) ) { stop( "var.name must be specified." ) } if ( !is.character( as.character( var.name ) ) ) { stop( "var.name must be character" ) } } # check weather station number if ( object@num == 0 ) { warning( "No weather records in this object." ) return( NULL ) } # for each weather station for ( i in 1:object@num ) { # obtain all weather records records <- getWeaAnaSiteByPos( object, i ) record <- records$value w.data <- NULL if ( !load.later ) { w.data <- getWeatherRecords( object[i] ) } # for string mode if ( is.null( FUN ) ) { for ( j in seq( along = n.funs ) ) { if ( load.later ) { if ( n.funs[j] %in% getAvaiVars( record ) ) { warning( paste( n.funs[j], " existed, overwriting.", sep = "" ) ) } records$value@extra[[n.funs[j]]] <- list( fun.str = funs[j] ) } else { res <- wcalStr( w.data, funs[j] ) if ( n.funs[j] %in% getAvaiVars( record ) ) { warning( paste( n.funs[j], " existed, overwriting.", sep = "" ) ) } records$value@extra[[n.funs[j]]] <- res } } } else # for fun mode { if ( load.later ) { if ( var.name %in% getAvaiVars( record ) ) { warning( paste( var.name, " existed, overwriting.", sep = "" ) ) } records$value@extra[[var.name]] <- list( fun.name = FUN, var.args = var.args, other.args = list( ... ) ) # print( names( records$value@extra ) ) } else { fun.args <- as.list( NULL ) res <- wcalFun( w.data, FUN, var.args, list( ... ) ) if ( var.name %in% getAvaiVars( record ) ) { warning( paste( var.name, " existed, overwriting.", sep = "" ) ) } records$value@extra[[var.name]] <- res } } } } )
/scratch/gouwar.j/cran-all/cranData/weaana/R/wcal.R
# * Author: Bangyou Zheng ([email protected]) # * Created: 12:58 Tuesday, 6 September 2011 # * #' Write weather records into file #' #' @param object A WeaAna object. #' @param ... Not used #' @docType methods #' @rdname writeWeatherRecords-methods setGeneric('writeWeatherRecords', function(object, ...) { standardGeneric('writeWeatherRecords') } ) #' Write weather records into file #' #' @param object A WeaAna object. #' @param file Path of output file. #' @param cols Columns to export. All columns exported if NULL #' @return No return values #' @export #' @rdname writeWeatherRecords-methods #' @aliases writeWeatherRecords,WeaAna,WeaAna-method setMethod(f = 'writeWeatherRecords', signature = c(object = 'WeaAna'), definition = function(object, file, cols = NULL) { if (object@num != length(file)) { stop(sprintf('The output files are not equal to %s', object@num)) } var_name <- c('year', 'day', 'radn','maxt', 'mint', 'rain', 'evap', 'vp', 'code') var_unit <- c('()', '()', '(mj/m2)', '(oC)', '(oC)', '(mm)', '(mm)', '(hPa)', '()') var_width <- c(4, 5, 6, 6, 6, 6, 6, 6, 7) nsmall <- c(0, 0, 0, 0, 0, 0, 0, 0, 0) var_cols <- c('year', 'day', 'radn', 'maxt', 'mint', 'rain', 'evap', 'vp', 'code') if (!is.null(cols)) { cols <- unique(c('year', 'day', cols)) pos <- var_cols %in% cols var_cols <- var_cols[pos] var_name <- var_name[pos] var_unit <- var_unit[pos] var_width <- var_width[pos] nsmall <- nsmall[pos] } for (i in seq(length = object@num)) { records <- getWeaAnaSiteByPos(object, i) records <- records$value res_str <- NULL res_str <- c(res_str, sprintf('!station number = %s', records@number)) res_str <- c(res_str, sprintf('!station name = %s', records@name)) res_str <- c(res_str, sprintf('latitude = %s (DECIMAL DEGREES)', format(records@latitude, nsmall = 2))) res_str <- c(res_str, sprintf('longitude = %s (DECIMAL DEGREES)', format(records@longitude, nsmall = 2))) res_str <- c(res_str, sprintf('tav = %s (oC) ! Annual average ambient temperature', format(records@tav, nsmall = 2))) res_str <- c(res_str, sprintf('amp = %s (oC) ! Annual amplitude in mean monthly temperature', format(records@amp, nsmall = 2))) res_str <- c(res_str, '') values <- NULL pos <- NULL for (j in seq(along = var_cols)) { if (length(methods::slot(records, var_cols[j])) > 0) { values[[var_cols[j]]] <- format( methods::slot(records, var_cols[j]), width = var_width[j], justify = 'right', nsmall = nsmall[j]) pos <- c(pos, j) } } values <- as.data.frame(values) values <- apply(values, 1, FUN = function(x) { return(paste(x, collapse = ' ')) }) var_name <- paste(format(var_name[pos], width = var_width[pos], justify = 'right', nsmall = nsmall[pos]), collapse = ' ') var_unit <- paste(format(var_unit[pos], width = var_width[pos], justify = 'right', nsmall = nsmall[pos]), collapse = ' ') res_str <- c(res_str, var_name, var_unit) res_str <- c(res_str, values) writeLines(res_str, file[i]) } } )
/scratch/gouwar.j/cran-all/cranData/weaana/R/writeWeatherRecords.R
#'Computation of autocovariance and autocorrelation for an ARMA residuals. #' #' @description Computes empirical autocovariances and autocorrelations #' functions for an ARMA process for only one given lag. #' #' @param ar Vector of AR coefficients. If \code{NULL}, it is a MA process. #' @param ma Vector of MA coefficients. If \code{NULL}, it is a AR process. #' @param y Univariate time series. #' @param h Given lag to compute autocovariance and autocorrelation, with h an integer. #' @param e Vector of residuals of the time series. If \code{NULL}, the function will compute it. #' #' @return A list with : #' \describe{ #' \item{\code{autocov}}{Value of the autocovariance.} #' \item{\code{autocor}}{Value of the autocorrelation.} #' } #' #' @export #' #' @examples #' param.estim <- estimation(p = 1, q = 1, y = CAC40return.sq) #' \donttest{acf.univ(ar = param.estim$ar, ma = param.estim$ma, y = CAC40return.sq, h = 20)} #' #' #' @seealso \code{\link{acf.gamma_m}} for autocorrelation and autocovariance for all h lag. acf.univ <- function (ar = NULL, ma = NULL, y, h, e = NULL) { n <- length(y) if (is.null(e)){ e <- residual(ar = ar, ma = ma, y = y) } h <- floor(abs(h)) centre <- e - mean(e) autocov <- sum(centre[1:(n - h)] * centre[(h + 1):n]) / n autocor <- autocov / (sum(centre[1:n] * centre[1:n]) / n) list(autocov = autocov, autocor = autocor) } #'Computation of autocovariance and autocorrelation for an ARMA residuals. #' #' @description Computes empirical autocovariances and autocorrelations function #' for an ARMA process for lag max given. #' #' @param ar Vector of AR coefficients. If \code{NULL}, it is a MA process. #' @param ma Vector of MA coefficients. If \code{NULL}, it is a AR process. #' @param y Univariate time series. #' @param h Computes autocovariances and autocorrelations from lag 1 to lag h with h an integer. #' @param e Vector of residuals. If \code{NULL}, the function will compute it. #' #' @return A list with : #' \describe{ #' \item{\code{gamma_m}}{Vector of the autocovariances.} #' \item{\code{rho_m}}{Vector of the autocorrelations.} #' } #' #' #' @export #' #' @examples #' param.estim <- estimation(p = 1, q = 1, y = CAC40return.sq) #' \donttest{acf.gamma_m(ar = param.estim$ar, ma = param.estim$ma, y = CAC40return.sq, h = 20)} #' #' @seealso \code{\link{acf.univ}} for autocorrelation and autocovariance for only one given lag h. #' acf.gamma_m <- function (ar = NULL, ma = NULL, y, h, e = NULL) { n <- length(y) if (is.null(e)){ e <- residual(ar = ar, ma = ma, y = y) } h <- abs(h) hmax <- min(n, h) autocov <- rep(0, hmax) autocor <- rep(0, hmax) centre <- e - mean(e) for (i in 1:hmax) autocov[i] <- sum(centre[1:(n - i)]*centre[(i + 1):n]) / n for (i in 1:hmax) autocor[i] <- autocov[i] / (sum(centre[1:n] * centre[1:n]) / n) list(gamma_m = autocov, rho_m = autocor) } #'Autocorrelogram #' #' @description Plots autocorrelogram for non linear process. #' #' @param ar Vector of AR coefficients. If \code{NULL}, we consider a MA process. #' @param ma Vector of MA coefficients. If \code{NULL}, we consider an AR process. #' @param y Univariate time series. #' @param main Character string representing the title for the plot. #' @param nlag Maximum lag at which to calculate the acf. If \code{NULL}, it is #' determinate by \eqn{nlag = min(10log(n))} where n is the number of #' observation. #' @param conflevel Value of the confidence level, 5\% by default. #' @param z Zoom on the graph. #' @param aff Specify the method between SN, M and both (see in Details). #' #' @importFrom stats qnorm acf #' @importFrom graphics lines #' #' @export #' #' @examples #' est<-estimation(p = 1, q = 1, y = CAC40return.sq) #' \donttest{nl.acf(ar = est$ar, ma = est$ma, y = CAC40return.sq, main = "Autocorrelation of an ARMA(1,1) #' residuals of the CAC40 return square", nlag = 20)} #' #' @note The only value available for the argument \code{conflevel} are #' 0.1, 0.05, 0.025, 0.01 or 0.005. #' #' @details For the argument \code{aff} you have the choice between: #' \code{SN}, \code{M} and \code{both}. #' \code{SN} prints the self-normalized method (see Boubacar Maïnassara and Saussereau) in green, #' \code{M} prints the modified method introduced by Francq, Roy and Zakoïan (see also Boubacar Maïnassara) in red #' and \code{both} prints both of the methods. #' #' @return An autocorrelogram with every autocorrelations from 1 to a lag max, and #' with methods you choose to print. #' #' @references Boubacar Maïnassara, Y. 2011, Multivariate portmanteau test for structural {VARMA} models #' with uncorrelated but non-independent error terms \emph{Journal of Statistical Planning and Inference}, #' vol. 141, no. 8, pp. 2961-2975. #' @references Boubacar Maïnassara, Y.and Saussereau, B. 2018, Diagnostic checking in multivariate {ARMA} models with #' dependent errors using normalized residual autocorrelations , #' \emph{Journal of the American Statistical Association}, vol. 113, no. 524, pp. 1813-1827. #' @references Francq, C., Roy, R. and Zakoïan, J.M. 2005, Diagnostic Checking in ARMA #' Models with Uncorrelated Errors, \emph{Journal of the American Statistical #' Association}, vol. 100, no. 470, pp. 532-544. #' @references Lobato, I.N. 2001, Testing that a dependant process is #' uncorrelated. J. Amer. Statist. Assos. 96, vol. 455, pp. 1066-1076. #' #' nl.acf <- function(ar = NULL, ma = NULL, y, main = NULL, nlag = NULL, conflevel = 0.05, z=1.2, aff="both") { if (conflevel!=0.1 & conflevel!=0.05 & conflevel!=0.025 & conflevel!=0.01 & conflevel!=0.005){ stop("Choose a confidence level equal to 0.1, 0.05, 0.025, 0.01 or 0.005") } n <- length(y) if (is.null(nlag)) nlag <- as.integer(min(10*log10(n), n - 1)) coef.critique.SN <- 0 coef.critique.ST <- 0 auto <- acf.gamma_m(ar = ar, ma = ma, y = y, h = nlag) rho_m <- auto$rho_m gamma_m <- auto$gamma_m if (is.null(ar) & is.null(ma)) res <- y #else res <- resultat.h(ar = ar, ma = ma, y = y, m = 1)$eps else res <- residual(ar = ar, ma = ma, y = y) var <- rep(0, nlag) var1 <- rep(0,nlag) for (h in 1:nlag) { mat <- resultat.h(ar = ar, ma = ma, y = y, m = h, eps=res) var[h] <- mat$Sigma_rho.h var1[h] <- mat$matC_m.h } band <- sqrt(var / n) band1 <- sqrt(var1 / n) if (aff!="M"){ if (conflevel == 0.1) {coef.critique.SN <- 28.31 } if (conflevel == 0.05) {coef.critique.SN <- 45.4 } if (conflevel == 0.025) {coef.critique.SN <- 66.13 } if (conflevel == 0.01) {coef.critique.SN <- 99.76 } if (conflevel == 0.005) {coef.critique.SN <- 128.1 } } if (aff!="SN"){ coef.critique.ST<-abs(qnorm(conflevel/2)) } minval <- z * min(rho_m, - sqrt(coef.critique.SN) * band1, - coef.critique.ST * band, - coef.critique.ST / sqrt(n)) maxval <- z * max(rho_m, sqrt(coef.critique.SN) * band1, coef.critique.ST * band, coef.critique.ST / sqrt(n)) acf(res, lag.max = nlag, xlab = 'Lag', ylab = 'ACF', ylim = c(minval, maxval), main = main, ci = (1-conflevel)) if (aff == "M"){ lines(c(1:nlag), -coef.critique.ST * band, lty = 1, col = 'red') lines(c(1:nlag), coef.critique.ST * band, lty = 1, col = 'red') } if (aff == "SN"){ lines(c(1:nlag), -sqrt(coef.critique.SN) * band1, lty = 1, col = 'green') lines(c(1:nlag), sqrt(coef.critique.SN) * band1, lty = 1, col = 'green') } if (aff == "both"){ lines(c(1:nlag), -coef.critique.ST * band, lty = 1, col = 'red') lines(c(1:nlag), coef.critique.ST * band, lty = 1, col = 'red') lines(c(1:nlag), -sqrt(coef.critique.SN) * band1, lty = 1, col = 'green') lines(c(1:nlag), sqrt(coef.critique.SN) * band1, lty = 1, col = 'green') } }
/scratch/gouwar.j/cran-all/cranData/weakARMA/R/Acf.R
#' @title Paris stock exchange return #' @docType data #' @description #' This data set considers CAC40 return at the closure of the market from #' March 2, 1990 to June 14, 2021. #' #' @format A numerical vector with 7935 observations. #' #' We computed every value from the dataset \code{\link{CAC40}} #' with the following code: #' #' \preformatted{ #' cac<-CAC40; #' n<-length(cac); #' rend<-rep(0,n); #' rend[2:n]<-(log(cac[2:n]/cac[1:(n-1)])*100); #' CAC40return<-rend[2:n] #' } #' #' @seealso \code{\link{CAC40}} and \code{\link{CAC40return.sq}} #' "CAC40return"
/scratch/gouwar.j/cran-all/cranData/weakARMA/R/CAC40return.R
#' @title Paris stock exchange square return #' @docType data #' @description #' This data set considers CAC40 square return at the closure of the market #' from March 2, 1990 to June 14, 2021. #' #' @format A numerical vector with 7935 observations. #' #' We computed every value from the dataset \code{\link{CAC40}} #' with the following code: #' #' \preformatted{ #' cac<-CAC40; #' n<-length(cac); #' rend<-rep(0,n); #' rend[2:n]<-(log(cac[2:n]/cac[1:(n-1)])*100); #' CAC40return.sq<-rend[2:n]^2 #' } #' #' @seealso \code{\link{CAC40}} and \code{\link{CAC40return}} #' "CAC40return.sq"
/scratch/gouwar.j/cran-all/cranData/weakARMA/R/CAC40returnsq.R
AICb <- function(n, p = NULL, q = NULL, sigma2, I, J.inv, c = 2) { if (is.null(p)) p = 0 if (is.null(q)) q = 0 return(n*log(sigma2) + 2*(p + q)) } AICc <- function(n, p = NULL, q = NULL, sigma2, I, J.inv, c = 2) { if (is.null(p)) p = 0 if (is.null(q)) q = 0 return(n*log(sigma2) + n + (n / (n-(p + q))) * 2*(p + q)) } AICcm <- function(n, p = NULL, q = NULL, sigma2, I, J.inv, c = 2) { if (is.null(p)) p = 0 if (is.null(q)) q = 0 return(n*log(sigma2) + (n^2 / (n-(p + q ))) + (n / (2*(n-(p + q ))))*sum(diag((I%*%J.inv))/sigma2)) } AICm <- function(n, p = NULL, q = NULL, sigma2, I, J.inv, c = 2) { if (is.null(p)) p = 0 if (is.null(q)) q = 0 return(n*log(sigma2) + sum(diag((I%*%J.inv))/sigma2)) } BICb <- function(n, p = NULL, q = NULL, sigma2, I, J.inv, c = 2) { if (is.null(p)) p = 0 if (is.null(q)) q = 0 return(n*log(sigma2) + (p + q)*log(n)) } BICm <- function(n, p = NULL, q = NULL, sigma2, I, J.inv, c = 2) { if (is.null(p)) p = 0 if (is.null(q)) q = 0 return(n*log(sigma2) + 0.5*sum(diag((I%*%J.inv))/sigma2)*log(n)) } HQ <- function(n, p = NULL, q = NULL, sigma2, I, J.inv, c = 2) { if (is.null(p)) p = 0 if (is.null(q)) q = 0 return(n*log(sigma2) + c*2*(p + q)*log(log(n))) } HQm <- function(n, p = NULL, q = NULL, sigma2, I, J.inv, c = 2) { return(n*log(sigma2) + c*sum(diag((I%*%J.inv))/sigma2)*log(log(n))) }
/scratch/gouwar.j/cran-all/cranData/weakARMA/R/CriteriaFunction.R
#' @title Paris stock exchange #' @docType data #' @description #' This data set considers market index at the closure of the market from #' March 1, 1990 to June 14, 2021. #' #' @format A vector with the variable \code{Close}. #' #' There are 7936 observations. We removed every \code{NULL} values. #' #' @source Data pulled from Yahoo Finance: \samp{https://fr.finance.yahoo.com/quote/\%5EFCHI/history?p=\%5EFCHI} #' #' @seealso \code{\link{CAC40return}} and \code{\link{CAC40return.sq}} #' "CAC40"
/scratch/gouwar.j/cran-all/cranData/weakARMA/R/Data.R
#' Function optim will minimize #' #' @param x One point in \eqn{\rm I\!R^{(p+q)}}. #' @param dim.ar Length of AR vector. #' @param dim.ma Length of MA vector. #' @param y Vector of a time series. #' #' @return #' \describe{ #' \item{\code{ms}}{Mean square at the point \code{x}.} #' } #' #' #' @description Computes the mean square of the time series at the point \code{x}, will be minimize with the \code{\link[stats]{optim}} function in our function \code{\link{estimation}}. #' #' @export #' meansq <- function(x, dim.ar = NULL, dim.ma = NULL, y) { n <- length(y) e <- y if (is.null(dim.ar) || dim.ar==0) { ma <-x ar <- 0 dim.ar <- 0 for (t in 2:n){ e[t] <- y[t]+ sum(ma[1:min((t-1),dim.ma)]*e[(t-1):max(1,(t - dim.ma))]) } } else { if (is.null(dim.ma) || dim.ma==0) { ar <- x ma <- 0 dim.ma <- 0 for (t in 2:n){ e[t] <- y[t]- sum(ar[1:min((t-1),dim.ar)]*y[(t-1):max(1,(t - dim.ar))]) } } else { ar <- x[1:dim.ar] ma <- x[(dim.ar + 1):length(x)] for (t in 2:n){ e[t] <- y[t]- sum(ar[1:min((t-1),dim.ar)]*y[(t-1):max(1,(t - dim.ar))]) + sum(ma[1:min((t-1),dim.ma)]*e[(t-1):max(1,(t - dim.ma))]) } } } ms <- mean(e^2) return(ms) } #' Parameters estimation of a time series. #' #' @description Estimates the parameters of a time series for given orders \code{p} and \code{q} #' #' @param p Order of AR, if \code{NULL}, MA is computed. #' @param q Order of MA, if \code{NULL}, AR is computed. #' @param y Univariate time series. #' @param meanparam Logical argument if the mean parameter has to be computed or not. If FALSE \eqn{\mu} is not computed. #' #' @importFrom stats optim #' #' @return List of estimate coefficients: #' \describe{ #' \item{\code{mu}}{Mean parameter}. #' \item{\code{ar}}{Vector of AR coefficients with length is equal to \code{p}.} #' \item{\code{ma}}{Vector of MA coefficients with length is equal to \code{q}.} #' \item{\code{sigma.carre}}{Mean square residuals.} #' } #' @export #' #' @details This function uses the algorithm BFGS in the function optim to minimize our objective function \code{\link{meansq}}. #' #' @references Francq, C. and Zakoïan, J. 1998, Estimating linear representations of nonlinear processes #' \emph{Journal of Statistical Planning and Inference}, vol. 68, no. 1, pp. 145-165. #' #' @examples #' y<-sim.ARMA(1000,ar = c(0.9,-0.3), ma = 0.2, method = "product") #' estimation(p = 2, q = 1, y = y) #' #' estimation(p = 1, q = 1, y = CAC40return.sq, meanparam = TRUE) #' estimation <- function(p = NULL, q = NULL, y, meanparam = FALSE) { mu<-0 if (meanparam == TRUE){ mu<-mean(y) y<-y-mu } if (is.null(p)) {p1<-0;q1<-q} else { if (is.null(q)) {q1<-0;p1<-p} else {p1<-p;q1<-q} } #si p et q ne peuvent pas etre egaux à 0 en meme temps para<-numeric(p1+q1) res <- optim(par = para, fn = meansq, dim.ar = p1, dim.ma = q1, y = y, method = "BFGS",control = list(trace=0)) if(is.null(p) || p==0) { return(list(mu=mu,ma=res$par,sigma.carre=res$value)) } else { if (is.null(q) || q==0){ return(list(mu=mu,ar=res$par,sigma.carre=res$value)) } else { return(list(mu=mu,ar=res$par[1:p],ma=res$par[(p+1):length(res$par)],sigma.carre=res$value)) } } }
/scratch/gouwar.j/cran-all/cranData/weakARMA/R/Estimation.R
#' Computation the gradient of the residuals of an ARMA model #' #' @description Computes the gradient of the residuals of an ARMA model. #' #' @param ar Vector of \code{ar} coefficients. #' @param ma Vector of \code{ma} coefficients. #' @param y Univariate time series. #' #' #' #' @return A list containing: #' \describe{ #' \item{\code{der.eps}}{Matrix of the gradient.} #' \item{\code{esp}}{Vector of residuals.} #' } #' @export #' #' @examples #' est<-estimation(p = 1, q = 1, y = CAC40return.sq) #' gradient(ar = est$ar, ma = est$ma, y = CAC40return.sq) #' gradient <- function(ar = NULL, ma = NULL, y) { grand = 1 / sqrt(.Machine$double.eps) n <- length(y) eps <- residual(ar = ar, ma = ma, y = y) C <- matrix(0, nrow = 0, ncol = n) if (is.null(ar)) { p <- 0 q <- length(ma) der.eps <- matrix(0, nrow = q, ncol = n) #Calcul de epsilon(t) = y(t)-a(1)y(t-1)-a(2)y(t-2)-...-a(p)y(t-p)-b(1)eps(t-1)-b(2)eps(t-2)-...-b(q)eps(t-q) #calcul de la derivee de eps / theta = -(y(t-1) ; y(t-2) ; ... ; y(t-p) ; (eps(t-1) ; eps(t-2) ; ... ; eps(t-q))-b(1)eps(t-1)-b(2)eps(t-2)-...-b(q)eps(t-q) for (t in 2:n){ for (i in 1:min(q,(t-1))){ der.eps[i, t] <- eps[t - i] } } C<-der.eps for(t in (q + 1):n) { for(i in 1:q){ der.eps[ ,t] <- C[ ,t]+ma[i]*der.eps[ ,t - i] } } } else { if (is.null(ma)) { q <- 0 p <- length(ar) der.eps <- matrix(0, nrow = p, ncol = n) #Calcul de epsilon(t) = y(t)-a(1)y(t-1)-a(2)y(t-2)-...-a(p)y(t-p) #calcul de la derivee de eps / theta = -(y(t-1) ; y(t-2) ; ... ; y(t-p)) for (t in 2:n){ for (i in 1:min(p,(t-1))){ der.eps[i, t] <- -y[t - i] } } } else { p <- length(ar) q <- length(ma) der.eps.y <- matrix(0, nrow = p, ncol = n) der.eps.e <- matrix(0, nrow = q, ncol = n) der.eps <- matrix(0, nrow = (p + q), ncol = n) #Calcul de epsilon(t) = y(t)-a(1)y(t-1)-a(2)y(t-2)-...-a(p)y(t-p)-b(1)eps(t-1)-b(2)eps(t-2)-...-b(q)eps(t-q) #calcul de la derivee de eps / theta = -(y(t-1) ; y(t-2) ; ... ; y(t-p) ; (eps(t-1) ; eps(t-2) ; ... ; eps(t-q))-b(1)eps(t-1)-b(2)eps(t-2)-...-b(q)eps(t-q) for (t in 2:n){ for (i in 1:min(q,(t-1))){ der.eps.e[i, t] <- eps[t - i] } for (i in 1:min(p,(t-1))){ der.eps.y[i, t] <- -y[t - i] } } C <- rbind(der.eps.y, der.eps.e) for(t in (q+1):n) { for(i in 1:(p+q)){ der.eps[i,t] <- C[i,t]+sum(ma*der.eps[i,(t-1):(t-q)]) } } } } return(list(gradient = der.eps, eps = eps)) }
/scratch/gouwar.j/cran-all/cranData/weakARMA/R/Gradient.R
#' Estimation of Fisher information matrix I #' #' @description Uses a consistent estimator of the matrix I based on an autoregressive spectral estimator. #' #' @param data Matrix of dimension (p+q,n). #' @param p Dimension of AR estimate coefficients. #' @param q Dimension of MA estimate coefficients. #' #' @importFrom stats resid #' #' @return Estimate Fisher information matrix \eqn{I = #' \sum_{h=-\infty}^{+\infty} cov(2e_t \nabla e_t, 2e_{t-h} \nabla e_{t-h})} where \eqn{\nabla e_t} #' denotes the gradient of the residuals. #' #' @export #' #' @references Berk, Kenneth N. 1974, Consistent autoregressive spectral estimates, #' \emph{The Annals of Statistics}, vol. 2, pp. 489-502. #' @references Boubacar Maïnassara, Y. and Francq, C. 2011, Estimating structural VARMA models with uncorrelated but #' non-independent error terms, \emph{Journal of Multivariate Analysis}, vol. 102, no. 3, pp. 496-505. #' @references Boubacar Mainassara, Y. and Carbon, M. and Francq, C. 2012, Computing and estimating information matrices #' of weak ARMA models \emph{Computational Statistics & Data Analysis}, vol. 56, no. 2, pp. 345-361. #' #' #' matXi <- function (data,p=0,q=0) { if(length(data[,1])!=(p+q)){ stop("Matrix data has not the right dimensions. Please use this code to obtain the right matrix: grad<- gradient(ar=ar,ma=ma,y=y) eps <- grad$eps der.eps <- grad$gradient data<-as.matrix(sapply(1:n, function(i) as.vector(2 * t(der.eps[, i]) * eps[i]))) if ((length(ar)+length(ma))==1){ data<-t(data) } with y your time serie and the correct ar and ma arguments.") } grand = 1 / sqrt(.Machine$double.eps) if ((p+q)<=1) { datab <- as.vector(data) n <- length(datab) selec <- floor(n^((1 / 3) - .Machine$double.eps)) selec<-min(selec,5) coef <- estimation(p = selec, y = datab) phi <- 1 - sum(coef$ar) if (kappa(phi) < grand) phi.inv <- solve(phi) else phi.inv <- phi matXi <- (phi.inv^2) * coef$sigma.carre }else { data <- as.data.frame(t(data)) n <- length(data[,1]) selec <- floor(n^((1 / 3) - .Machine$double.eps)) selec<-min(selec,5) mod<- VARest(x=data,p=selec) p1 <- mod$p d <- mod$k phi <- diag(1, d) Ac<-mod$ac for (i in 1:p1) phi <- phi - Ac[,,i] if (kappa(phi) < grand) phi.inv <- solve(phi) else phi.inv <- ginv(phi) res <- mod$res sigmaU <- (t(res) %*% res) / (n-p1) matXi <- phi.inv %*% sigmaU %*% t(phi.inv) } return(matXi) }
/scratch/gouwar.j/cran-all/cranData/weakARMA/R/MatXi.R
#' Computation of Fisher information matrice #' #' @description Computes matrices of Fisher information like \eqn{I}, \eqn{J}. #' #' #' @param ar Vector of AR coefficients. If \code{NULL}, the simulation is a MA process. #' @param ma Vector of MA coefficients. If \code{NULL}, the simulation is a AR process. #' @param y Univariate time series. #' #' @return A list of matrix containing: #' \describe{ #' \item{\code{I}}{Matrix \code{I} computed in function \code{\link{matXi}}.} #' \item{\code{J}}{Matrix \code{J} computed as \eqn{\frac{2}{n} H(e) H(e)^t } where \eqn{e} is the residuals vector.} #' \item{\code{J.inv}}{Inverse of the matrix \code{J}.} #' \item{\code{matOmega}}{Matrix variance-covariance in the weak case computed as \eqn{J^{-1}IJ^{-1}}.} #' \item{\code{matvar.strong}}{Matrix variance-covariance in the strong case computed as #' \eqn{2\sigma^2J^{-1}}.} #' \item{\code{standard.dev.Omega}}{Standard deviation of the matrix \code{matOmega}.} #' \item{\code{standard.dev.strong}}{Standard deviation of the matrix \code{matvar.strong}.} #' \item{\code{sig2}}{Innovation variance estimate.} #' } #' @import vars #' #' @export #' #' @examples #' y <- sim.ARMA(n = 1000, ar = c(0.95,-0.8), ma = -0.6) #' \donttest{est<-estimation(p = 2, q = 1, y = y)} #' \donttest{omega(ar = est$ar, ma = est$ma, y = y)} #' #' estCAC<-estimation(p = 1, q = 1, y = CAC40return.sq, meanparam = TRUE) #' \donttest{omega(ar = estCAC$ar, ma = estCAC$ma, y = CAC40return.sq)} omega <- function(ar = NULL, ma = NULL, y) { grand = 1 / sqrt(.Machine$double.eps) n <- length(y) if (is.null(ma)) {p <- length(ar) ; q <- 0} else { if (is.null(ar)) {q <- length(ma) ; p <- 0} else {p <- length(ar) ; q <- length(ma) } } grad<- gradient(ar=ar,ma=ma,y=y) eps <- grad$eps der.eps <- grad$gradient J <- matrix(0, nrow = (p + q), ncol = (p + q)) Upsilon <- matrix(0, nrow = p + q, ncol = n) sig2 <- mean(eps^2) J <- (2 / n) * der.eps %*% t(der.eps) if (kappa(J) < grand) matJ.inv <- solve(J) else {matJ.inv <- ginv(J)} Upsilon <- as.matrix(sapply(1:n, function(i) (2 * t(der.eps[, i]) * eps[i]))) if (p+q==1){ Upsilon<-t(Upsilon) } matI <- matXi(data = Upsilon, p = p, q = q) matOmega <- matJ.inv %*% matI %*% matJ.inv matvar.strong <- 2 * sig2 * matJ.inv ecart.type.Omega <- sqrt(diag(matOmega) / n) ecart.type.strong <- sqrt(diag(matvar.strong) / n) list(matJ = J, matI = matI, matJ.inv = matJ.inv, matOmega = matOmega, matvar.strong = matvar.strong, standard.dev.Omega = ecart.type.Omega, standard.dev.strong = ecart.type.strong, sig2 = sig2) }
/scratch/gouwar.j/cran-all/cranData/weakARMA/R/Omega.R
#'Portmanteau tests for one lag. #' #' @description Computes Box-Pierce and Ljung-Box statistics for standard, modified and #' self-normalized test procedures. #' #' @param ar Vector of AR coefficients. If \code{NULL}, it is a MA process. #' @param ma Vector of MA coefficients. If \code{NULL}, it is an AR process. #' @param y Univariate time series. #' @param h Integer for the chosen lag. #' @param grad Gradient of the series from the function \link{gradient}. If \code{NULL} gradient will be computed. #' #' @importFrom matrixStats rowCumsums #' @importFrom stats pchisq #' @importFrom CompQuadForm imhof #' @importFrom MASS ginv #' #' @details Portmanteau statistics are generally used to test the null hypothesis. #' H0 : \eqn{X_t} satisfies an ARMA(p,q) representation. #' #' The Box-Pierce (BP) and Ljung-Box (LB) statistics, defined as follows, are #' based on the residual empirical autocorrelation. \deqn{Q_{m}^{BP} = #' n\sum_{h}^{m} \rho^{2}(h)} \deqn{Q_{m}^{LB} = n(n+2) \sum_{h}^{m} #' \frac{\rho^{2}(h)}{(n-h)}} #' #' The standard test procedure consists in rejecting the null hypothesis of an #' ARMA(p,q) model if the statistic \eqn{Q_m > \chi^{2}(1-\alpha)} where #' \eqn{\chi^{2}(1-\alpha)} denotes the \eqn{(1-\alpha)}-quantile of a #' chi-squared distribution with m-(p+q) (where m > p + q) degrees of freedom. The #' two statistics have the same asymptotic distribution, but the LB statistic #' has the reputation of doing better for small or medium sized samples. #' #' But the significance limits of the residual autocorrelation can be very #' different for an ARMA models with iid noise and ARMA models with only #' uncorrelated noise but dependant. The standard test is obtained under the #' stronger assumption that \eqn{\epsilon_{t}} is iid. So we give an another #' way to obtain the exact asymptotic distribution of the standard portmanteau #' statistics under the weak dependence assumptions. #' #' Under H0, the statistics \eqn{Q_{m}^{BP}} and \eqn{Q_{m}^{LB}} converge in #' distribution as \eqn{n \rightarrow \infty}, to \deqn{Z_m(\xi_m) := #' \sum_{i}^{m}\xi_{i,m} Z^{2}_i} where \eqn{\xi_m = #' (\xi_{1,m}',...,\xi_{m,m}')} is the eigenvalues vector of the asymptotic #' covariance matrix of the residual autocorrelations vector and #' \eqn{Z_{1},...,Z_{m}} are independent \eqn{\mathcal{N}(0,1)} variables. #' #' #' So when the error process is a weak white noise, the asymptotic distribution #' \eqn{Q_{m}^{BP}} and \eqn{Q_{m}^{LB}} statistics is a weighted sum of #' chi-squared. The distribution of the quadratic form \eqn{Z_{m}(\xi_m)} can #' be computed using the algorithm by Imhof available here : #' \code{\link[CompQuadForm]{imhof}} #' #' We propose an alternative method where we do not estimate an asymptotic #' covariance matrix. It is based on a self-normalization based approach to #' construct a new test-statistic which is asymptotically distribution-free #' under the null hypothesis. #' #' The sample autocorrelation, at lag \code{h} take the form \eqn{\hat{\rho}(h) = #' \frac{\hat{\Gamma}(h)}{\hat{\Gamma}(0)}}. #' Where \eqn{\hat{\Gamma}(h) = \frac{1}{n} \sum_{t=h+1}^n \hat{e}_t\hat{e}_{t-h}}. #' With \eqn{\hat{\Gamma}_m = (\hat{\Gamma}(1),...,\hat{\Gamma}(m)) } #' The vector of the first m sample autocorrelations is written \eqn{\hat{\rho}_m = (\hat{\rho}(1),...,\hat{\rho}(m))'}. #' #' The normalization matrix is defined by \eqn{\hat{C}_{m} = #' \frac{1}{n^{2}}\sum_{t=1}^{n} \hat{S}_t \hat{S}_t'} where \eqn{\hat{S}_t = \sum_{j=1}^{t} (\hat{\Lambda} \hat{U}_{j} - #' \hat{\Gamma}_m)}. #' #' The sample autocorrelations satisfy \eqn{Q_{m}^{SN}=n\hat{\sigma}^{4}\hat{\rho}_m ' #' \hat{C}_m^{-1}\hat{\rho}_m \rightarrow U_{m}}. #' #' \eqn{\tilde{Q}_{m}^{SN} = #' n\hat{\sigma}^{4}\hat{\rho}_{m}' D_{n,m}^{1/2}\hat{C}_{m}^{-1} D_{n,m}^{1/2}\hat{\rho}_{m} \rightarrow U_{m} } #' reprensating respectively the version modified of Box-Pierce (BP) and #' Ljung-Box (LB) statistics. Where \eqn{D_{n,m} = \left(\begin{array}{ccc} \frac{n}{n-1} & & 0 \\ #' & \ddots & \\ #' 0 & & \frac{n}{n-m} #' \end{array}\right)}. #' The critical values for \eqn{U_{m}} have been tabulated by Lobato. #' #' #' #' @return A list including statistics and p-value: #' #' \describe{ #' \item{\code{Pm.BP}}{Standard portmanteau Box-Pierce statistics.} #' \item{\code{PvalBP}}{p-value corresponding at standard test where the #' asymptotic distribution is approximated by a chi-squared} #' \item{\code{PvalBP.Imhof}}{p-value corresponding at the exact asymptotic distribution #' of the standard portmanteau Box-Pierce statistics.} #' \item{\code{Pm.LB}}{Standard portmanteau Box-Pierce statistics. } #' \item{\code{PvalLB}}{p-value corresponding at standard test where the #' asymptotic distribution is approximated by a chi-squared. } #' \item{\code{PvalLB.Imhof}}{ p-value corresponding at the exact asymptotic distribution #' of the standard portmanteau Ljung-Box statistics.} #' \item{\code{LB.modSN }}{Ljung-Box statistic with the self-normalization method. } #' \item{\code{BP.modSN}}{Box-Pierce statistic with the self-normalization method.} } #' #' @references Boubacar Maïnassara, Y. 2011, Multivariate portmanteau test for structural {VARMA} models #' with uncorrelated but non-independent error terms \emph{Journal of Statistical Planning and Inference}, #' vol. 141, no. 8, pp. 2961-2975. #' @references Boubacar Maïnassara, Y. and Saussereau, B. 2018, Diagnostic checking in multivariate {ARMA} models with #' dependent errors using normalized residual autocorrelations , #' \emph{Journal of the American Statistical Association}, vol. 113, no. 524, pp. 1813-1827. #' @references Francq, C., Roy, R. and Zakoïan, J.M. 2005, Diagnostic Checking in ARMA #' Models with Uncorrelated Errors, \emph{Journal of the American Statistical #' Association}, vol. 100, no. 470 pp. 532-544 #' @references Lobato, I.N. 2001, Testing that a dependant process is #' uncorrelated. J. Amer. Statist. Assos. 96, vol. 455, pp. 1066-1076. #' #' @seealso \code{\link{portmanteauTest}} to obtain the statistics of all m #' lags. portmanteauTest.h <- function(ar = NULL, ma = NULL, y, h, grad=NULL) { grand<-1 / sqrt(.Machine$double.eps) #nombre grand n <- length(y) #nb d'obs h.max <- as.integer(min(10*log10(n), n-1)) if (is.null(h)|| (h>h.max)) h <- as.integer(min(10*log10(n), n-1)) h <- abs(floor(h)) #on veut un entier positif if (h>= n)return(NA) if (is.null(ar)) { if (is.null(ma)){ p <- 0 ; q <- 0 } else { p <- 0 ; q <- length(ma) } } else{ if (is.null(ma)){ q <- 0 ; p <- length(ar)} else{ q <- length(ma) ; p <- length(ar) } } #valeurs p et q en fonction de ar et ma eps <- y #Dans le cas ou p+q==0 eps=y, dans les autres eps sera remplace der.eps <- matrix(0, nrow = (p + q), ncol = n) upsilon <- matrix(0, nrow = (p + q), ncol = n) upsilon2 <- matrix(0, nrow = h, ncol = n) evec <- matrix(0, nrow = h, ncol = n) Prod <- array(0, dim = c(h, (p + q), n)) J <- matrix(0, nrow = (p + q), ncol = (p + q)) Phim <- array(0, dim = c(h, (p + q))) Upsilon <- array(0, dim = c(h + p + q, n)) GAMMA <- matrix(0, nrow = h, ncol = (h + p + q)) S_t <- matrix(0, nrow = h, ncol = n) matC_h <- matrix(0, nrow = h, ncol = h) Upsilon.reduit <- matrix(0, nrow = h, ncol = n) Upsilon_centre <- matrix(0, nrow = h, ncol = n) selec <- floor(n ^ ((1 / 3) - .Machine$double.eps)) if ((p+q==0)){ #dans le cas ou p+q=0 on ne peut pas utiliser notre fonction gradient for (t in 2:n){ for (j in 1:min(h,(t-1))){ #pour eviter iterations inutiles evec[j, t] <- eps[t - j] } } upsilon2<-sapply(1:n, function(i) evec[,i]*eps[i]) #calcul ups2 } else { if (is.null(grad)){ grad <- gradient(ar = ar, ma = ma, y = y) } eps <- grad$eps der.eps <- grad$gradient upsilon<-sapply(1:n, function(i) eps[i]*der.eps[,i]) #calcul ups J <- 2 * der.eps %*% t(der.eps) / n #calcul J if (kappa(J) < grand) matJ.inv <- solve(J) else matJ.inv <- ginv(J) #calcul invJ } sig2 <- mean(eps^2) if (h==1){ #cas latence = 1 if ((p+q==0)){ #cas bruit Upsilon <- sapply(1:n, function(i) as.vector(upsilon2[ i])) data <- as.vector(Upsilon) #estimation a besoin d un vect coef <- estimation(p = selec, y = data) phi <- 1 - sum(coef$ar) if (kappa(phi) < grand) phi.inv <- solve(phi) else phi.inv <- ginv(phi) #calcul inv phi Sigma_Gamma <- (phi.inv^2) * coef$sigma.carre #calcul sigma_gamma } else { if ((p+q==1)){ #cas AR ou MA for (t in 2:n){ for (j in 1:min(h,(t-1))) { evec[j,t] <- eps[t - j] } Prod[ , ,t] <- evec[ ,t]%*%t(der.eps[ ,t]) } Phih <- mean(sapply(1:n, function(i) as.vector(Prod[ , ,i]))) GAMMA <- c(1, Phih) } else { #cas ARMA ou AR,MA d ordre > 1 for (t in 2:n){ for (j in 1:min(h,(t-1))) { evec[j,t] <- eps[t - j] } Prod[ , ,t] <- evec[ ,t]%*%t(der.eps[ ,t]) } Phih <- matrix(rowMeans(sapply(1:n, function(i) as.vector(Prod[ , ,i]))), nrow = h, ncol = (p + q)) GAMMA <- cbind(diag(1, h), Phih) } } } else { if ((p+q==0)){ Upsilon<-sapply(1:n, function(i) as.vector(upsilon2[ ,i])) Sigma_Gamma <-matXi(Upsilon,p=h) #p=h pour ne pas que le if(p+q<=1) du matXi soit vrai et que l'on obtienne une matrice [h x h] } else { for (t in 2:n){ for (j in 1:min(h,(t-1))) { evec[j,t] <- eps[t - j] } Prod[ , ,t] <- evec[ ,t]%*%t(der.eps[ ,t]) } Phih <- matrix(rowMeans(sapply(1:n, function(i) as.vector(Prod[ , ,i]))), nrow = h, ncol = (p + q)) GAMMA <- cbind(diag(1, h), Phih) } } upsilon2<-sapply(1:n, function(i) evec[,i]*eps[i]) if ((p+q==0)){ if (h==1){ Upsilon.reduit <- sapply(1:n, function(i) as.vector((upsilon2[ i]))) #Upsilon.reduit pour harmoniser la sortie du if else } else { Upsilon.reduit <- sapply(1:n, function(i) as.vector((upsilon2[ ,i]))) #Upsilon.reduit pour harmoniser la sortie du if else } }else { if ((p+q==1)){ if (h != 1) { Upsilon <- sapply(1:n, function(i) as.vector(c(upsilon2[ ,i], ((-2) * matJ.inv) %*% upsilon[ i]))) } else { Upsilon <- sapply(1:n, function(i) as.vector(c(upsilon2[ i], ((-2) * matJ.inv) %*% upsilon[ i]))) } } else { if (h != 1) { Upsilon <- sapply(1:n, function(i) as.vector(c(upsilon2[ ,i], ((-2) * matJ.inv) %*% upsilon[ ,i]))) } else { Upsilon <- sapply(1:n, function(i) as.vector(c(upsilon2[ i], ((-2) * matJ.inv) %*% upsilon[ ,i]))) } } Upsilon.reduit <- GAMMA %*% Upsilon } auto <- acf.gamma_m(ar = ar, ma = ma, y = y, h = h, e = eps) rho_h <- auto$rho_m gamma_h <- auto$gamma_m Upsilon.centre <- Upsilon.reduit-(matrix(t(gamma_h), nrow = h, ncol = n)) S_t <- (rowCumsums(Upsilon.centre)) for (i in 1:n){ matC_h <- matC_h + (S_t[ ,i] %*% (t(S_t[ ,i]))) / (n^2) } if (kappa(matC_h) < grand) matC_h.inv <- solve(matC_h) else matC_h.inv <- ginv(matC_h) T_n <- matrix(0, nrow = h, ncol = h) for (i in 1:h){ T_n[i, i] <- sqrt((n + 2)*(1 / (n-i))) } LB.modSN <- as.numeric( n * ((sig2)^2) * ( (t(rho_h) %*% T_n) %*% matC_h.inv %*% (T_n %*% rho_h)) ) BP.modSN <- as.numeric( n * ((sig2)^2) * (t(rho_h) %*% matC_h.inv %*% (rho_h)) ) if ((p+q!=0)){ matI <- matXi(Upsilon,p=(p+h),q=q) #On calcule une matrice d'estimation des param pas la matI, p+m permet d'eviter if(p+q==1) dans le cas AR(1) ou MA(1) #car on veut une matrice dim (p+q+h x p+q+h) Sigma_gam <- matI[1:h, 1:h] Sigma_theta <- matI[(h + 1):(h + p + q), (h + 1):(h + p + q)] Sigma_gam.theta <- matI[1:h, (h + 1):(h + p + q)] Sigma_theta.gam <- matI[(h + 1):(h + p + q), 1:h] Sigma_Gamma <- Sigma_gam + Phih %*% Sigma_theta %*% t(Phih) + Phih %*% Sigma_theta.gam + Sigma_gam.theta %*% t(Phih) } Sigma_rho <- as.matrix(Sigma_Gamma / ((sig2)^2)) Lambda <- eigen(Sigma_rho, symmetric = TRUE)$values Pm.LB <- as.numeric(n * (t(T_n %*% rho_h) %*% (T_n %*% rho_h)) ) Pm.BP <- as.numeric(n * (t(rho_h) %*% ((rho_h))) ) if (h - (p + q) <= 0) { PvalLB <- NA PvalBP <- NA } else { PvalLB <- as.numeric( 1 - pchisq(Pm.LB, df = (h - (p + q))) ) PvalBP <- as.numeric( 1 - pchisq(Pm.BP, df = (h - (p + q))) ) } PvalLB.Imhof <- imhof(Pm.LB, Lambda)$Qq PvalBP.Imhof <- imhof(Pm.BP, Lambda)$Qq list(Omegah = Sigma_rho, Pm.BP = Pm.BP, PvalBP = PvalBP, PvalBP.Imhof = PvalBP.Imhof, Pm.LB = Pm.LB, PvalLB = PvalLB, PvalLB.Imhof = PvalLB.Imhof, LB.modSN = LB.modSN, BP.modSN = BP.modSN) } #'Portmanteau tests #' #' @description Realizes portmanteau tests of the first m lags, this function uses \code{\link{portmanteauTest.h}} #' for h in 1:m. #' #' @param ar Vector of AR coefficients. If \code{NULL}, it is a MA process. #' @param ma Vector of MA coefficients. If \code{NULL}, it is an AR process. #' @param y Univariate time series. #' @param m Integer for the lag. #' #' @return A list of vectors of length \code{m}, corresponding to statistics and p-value for each lag, #' for standard, modified and self-normalized Ljung-Box and Box-Pierce methods. #' #' #' @export #' #' @examples #' est<-estimation(p = 1, q = 1, y = CAC40return.sq) #' \donttest{portmanteauTest(ar = est$ar, ma = est$ma, y = CAC40return.sq, m = 20)} #' #' @references Boubacar Maïnassara, Y. 2011, Multivariate portmanteau test for structural {VARMA} models #' with uncorrelated but non-independent error terms \emph{Journal of Statistical Planning and Inference}, #' vol. 141, no. 8, pp. 2961-2975. #' @references Boubacar Maïnassara, Y. and Saussereau, B. 2018, Diagnostic checking in multivariate {ARMA} models with #' dependent errors using normalized residual autocorrelations , #' \emph{Journal of the American Statistical Association}, vol. 113, no. 524, pp. 1813-1827. #' @references Francq, C., Roy, R. and Zakoïan, J.M. 2005, Diagnostic Checking in ARMA #' Models with Uncorrelated Errors, \emph{Journal of the American Statistical #' Association}, vol. 100, no. 470, pp. 532-544. #' #' #' @seealso \code{\link{portmanteauTest.h}} to obtain statistics for only one h lag. portmanteauTest <- function(ar = NULL, ma = NULL, y, m = NULL) { n <- length(y) m.max <- as.integer(min(10*log10(n), n-1)) if (is.null(m)||m>m.max) m <- m.max Pm.BP <- rep(0, m) names(Pm.BP) <- paste("m", 1:m, sep = " = ") PvalBP <- rep(0, m) names(PvalBP) <- paste("m", 1:m, sep = " = ") PvalBP.Imhof <- rep(0, m) names(PvalBP.Imhof) <- paste("m", 1:m, sep = " = ") Pm.LB <- rep(0, m) names(Pm.LB) <- paste("m", 1:m, sep = " = ") PvalLB <- rep(0, m) names(PvalLB) <- paste("m", 1:m, sep = " = ") PvalLB.Imhof <- rep(0, m) names(PvalLB.Imhof) <- paste("m", 1:m, sep = " = ") LB.modSN <- rep(0, m) names(LB.modSN) <- paste("m", 1:m, sep = " = ") BP.modSN <- rep(0, m) names(BP.modSN) <- paste("m", 1:m, sep = " = ") if (is.null(ar) & is.null(ma)){ grad<-NULL } else { grad<-gradient(ar = ar, ma = ma, y = y) } for(i in 1:m){ res <- portmanteauTest.h(ar = ar, ma = ma, y = y, h = i, grad = grad) Pm.BP[i] <- round( res$Pm.BP, digits = 6) PvalBP[i] <- round(res$PvalBP, digits = 6) PvalBP.Imhof[i] <- round(res$PvalBP.Imhof, digits = 6) Pm.LB[i] <- round(res$Pm.LB, digits = 6) PvalLB[i] <- round(res$PvalLB, digits = 6) PvalLB.Imhof[i] <- round(res$PvalLB.Imhof, digits = 6) LB.modSN[i] <- round(res$LB.modSN, digits = 6) BP.modSN[i] <- round(res$BP.modSN, digits = 6) } list(Pm.BP = Pm.BP, PvalBP = PvalBP, PvalBP.Imhof = PvalBP.Imhof, Pm.LB = Pm.LB, PvalLB = PvalLB, PvalLB.Imhof = PvalLB.Imhof, LB.modSN = LB.modSN, BP.modSN = BP.modSN) } resultat.h <- function (ar = NULL, ma = NULL, y, m = NULL, eps) { grand<-1 / sqrt(.Machine$double.eps) n <- length(y) m <- abs(m) selec <- min(floor(n ^ ((1 / 3) - .Machine$double.eps)),5) if (m >= n) return(NA) auto <- acf.univ(ar = ar, ma = ma, y = y, h = m, e = eps) rho_m <- auto$autocor gamma_m <- auto$autocov ee.h <- matrix(0, nrow = 1, ncol = n) upsilon.h <- matrix(0, nrow = 1, ncol = n) Upsilon.reduit.h <- matrix(0, nrow = 1, ncol = n) Upsilon_centre.h <- matrix(0, nrow = 1, ncol = n) if (is.null(ar) & is.null(ma)) { eps <- y sig2 <- mean(eps^2) for (t in (m+1):n){ upsilon.h[ ,t] <- eps[t - m] * eps[t] } Upsilon.h <- sapply(1:n, function(i) as.vector(upsilon.h[1,i])) Upsilon.reduit.h <- Upsilon.h Upsilon_centre.h <- as.vector(Upsilon.reduit.h-(array(t(gamma_m), dim = c(1, n)))) S_t.h <- cumsum(Upsilon_centre.h) #vecteur de la somme cumulée matC_m.h <- 0 for (i in 1:n) matC_m.h <- matC_m.h + (S_t.h[i] * (t(S_t.h[i]))) / (n ^ 2) matC_m.h <- matC_m.h / (sig2^2) if (kappa(matC_m.h) < grand) matC_m.inv.h <- solve(matC_m.h) else matC_m.inv.h <- matC_m.h data.h <- Upsilon.h[1:n] coef <- estimation(p = selec, y = data.h) phi <- 1 - sum(coef$ar) if (kappa(phi) < grand) phi.inv <- solve(phi) else phi.inv <- phi matI.h <- as.numeric((phi.inv^2)) * coef$sigma.carre Sigma_rho.h <- as.numeric(matI.h / ((sig2)^2)) return(list(Sigma_rho.h = Sigma_rho.h, eps = eps, matC_m.inv.h = matC_m.inv.h, matC_m.h = matC_m.h)) } else { if (is.null(ma)) { q <- 0 ; p <- length(ar)} else { if (is.null(ar)) { p <- 0 ; q <- length(ma)} else {q <- length(ma) ; p <- length(ar)} } der.eps <- matrix(0, nrow = (p + q), ncol = n) upsilon <- matrix(0, nrow = (p + q), ncol = n) Prod.h <- array(0, dim = c(1, (p + q), n)) J <- matrix(0, nrow = (p + q), ncol = (p + q)) Phim.h <- array(0, dim = c(1, (p + q))) Upsilon.h <- matrix(0, nrow = (1 + p + q ), ncol = n) GAMMA.h <- matrix(0, nrow = 1, ncol = (p + q + 1)) S_t.h <- array(0, dim = c(1, n)) grad <- gradient(ar = ar, ma = ma, y = y) eps <- grad$eps der.eps <- grad$gradient sig2 <- mean(eps^2) J <- 2*der.eps %*% t(der.eps) / n if (kappa(J)< grand )matJ.inv <- solve(J) else {matJ.inv <- ginv(J)} upsilon <- sapply(1:n, function (i) eps[i]*der.eps[ ,i]) for (t in (m+1):n){ upsilon.h[t] <- eps[t - m]*eps[t] if ((p+q==1)) Prod.h[1, ,t] <- eps[t-m] * t(der.eps[ ,t]) else Prod.h[ , ,t] <- eps[t-m] %*% t(der.eps[ ,t]) } if ((p+q)==1) { Phim.h <- mean(sapply(1:n, function(i) Prod.h[ , ,i])) Upsilon.h <- sapply(1:n, function(i) c(upsilon.h[1,i], ((- 2) * matJ.inv) * upsilon[i])) GAMMA.h <- cbind(1, Phim.h) } else{ Phim.h <- matrix(rowMeans(sapply(1:n, function(i) Prod.h[ , ,i])), nrow = 1, ncol = (p + q)) Upsilon.h <- as.matrix( sapply(1:n, function(i) c(upsilon.h[1,i], ((-2) * matJ.inv) %*% upsilon[ ,i]))) GAMMA.h <- cbind(1, Phim.h) } Upsilon.reduit.h <- GAMMA.h %*% Upsilon.h Upsilon_centre.h <- as.vector(Upsilon.reduit.h - (array(t(gamma_m), dim = c(1, n)))) S_t.h <- cumsum(Upsilon_centre.h) matC_m.h <- 0 for (i in 1:n) matC_m.h <- matC_m.h + (S_t.h[i]*(t(S_t.h[i]))) / (n^2) matC_m.h <- matC_m.h / (sig2^2) if (kappa(matC_m.h)< grand) matC_m.inv.h <- solve(matC_m.h) else {matC_m.inv.h <- ginv(matC_m.h)} matXI.h <- matXi(Upsilon.h,(p+1),q) Sigma_gam.h <- matXI.h[1, 1] Sigma_theta.h <- matXI.h[2:(p + q + 1), 2:(p + q + 1)] Sigma_gam.theta.h <- matXI.h[1, 2:(p + q + 1)] Sigma_theta.gam.h <- matXI.h[2:(p + q + 1), 1] Sigma_Gamma.h <- Sigma_gam.h + Phim.h %*% Sigma_theta.h %*% t(Phim.h) + Phim.h %*% Sigma_theta.gam.h + Sigma_gam.theta.h %*% t(Phim.h) Sigma_rho.h <- as.numeric(Sigma_Gamma.h / ((sig2)^2)) return(list(Sigma_rho.h = Sigma_rho.h, eps = eps, matC_m.inv.h = matC_m.inv.h, matC_m.h = matC_m.h)) } }
/scratch/gouwar.j/cran-all/cranData/weakARMA/R/PortmanteauTest.R
residual <- function(ar = NULL, ma = NULL, y, centre=TRUE) { n <- length(y) e <- y mu<-0 if (centre==FALSE){ mu<-mean(y) } ybis<-y-mu if (is.null(ar) && (is.null(ma))) e <- y else{ if (is.null(ma)) for (t in 2:n){ e[t] <- ybis[t]- sum(ar[1:min((t-1),length(ar))]*ybis[(t-1):max(1,(t - length(ar)))]) } else if ((is.null(ar))) for (t in 2:n) e[t] <- ybis[t]+ sum(ma[1:min((t-1),length(ma))]*e[(t-1):max(1,(t -length(ma)))]) else{ for (t in 2:n) { e[t] <- ybis[t]- sum(ar[1:min((t-1),length(ar))]*ybis[(t-1):max(1,(t - length(ar)))]) + sum(ma[1:min((t-1),length(ma))]*e[(t-1):max(1,(t - length(ma)))]) } } } return(e) }
/scratch/gouwar.j/cran-all/cranData/weakARMA/R/Residual.R
#'Selection of ARMA models #' #' @description Identifies the orders p and q of an ARMA model according to several #' information criteria. #' #' @param data Univariate time series. #' @param P Integer for the maximum lag order of autoregressive component. #' @param Q Integer for the maximum lag order of moving-average component. #' @param c Real number >1 needed to compute Hannan-Quinn information criterion. #' #' #' #' @return A list of the different criteria, each item contains the matrix of the #' computed value for the different model and the selected order with this criterion #' (corresponding to the minimum value in the previous matrix). #' #' @details The fitted model which is favored is the one corresponding to the #' minimum value of the criterion. The most popular criterion is the Akaike information #' criterion (\code{AIC}). This was designed to be an approximately unbiased #' estimator of a fitted model. For small sample or when the number of fitted #' parameters is large, it is more appropriate to manipulate a corrected AIC #' version (\code{AICc}) which is more nearly unbiased. But these two criteria #' are inconsistent for model orders selection. If you want to use a consistent #' criterion, it is possible to take the Bayesian information criterion #' (\code{BIC}) or the Hannan-Quinn information criteria (\code{HQ}). #' #' For the weak ARMA, i.e under the assumption that the errors are uncorrelated #' but not necessarily independant, modified criteria has been adapted : #' \code{AICm}, \code{AICcm}, \code{BICm}, \code{HQm}. #' #' The criteria definitions are the following : #' #' \deqn{AIC = n\log(\sigma^{2}) + 2(p + q)} #' #' \deqn{AICm = n\log(\sigma^{2}) + \frac{Tr(IJ^{-1})}{\sigma^2}} #' #' \deqn{AICc = n\log(\sigma^{2}) + n + \frac{n}{(n-(p + q + 1))} 2(p + q)} #' #' \deqn{AICcm = n\log(\sigma^{2}) + \frac{n^{2}}{(n-(p + q + 1))} + \frac{n}{(2(n-(p + q + 1)))} \frac{Tr(IJ^{-1})}{\sigma^2}} #' #' \deqn{BIC = n\log(\sigma^{2}) + (p + q)log(n)} #' #' \deqn{BICm = n\log(\sigma^{2}) + \frac{1}{2} \frac{Tr(IJ^{-1})}{\sigma^2}log(n)} #' #' \deqn{HQ = n\log(\sigma^{2}) + 2c(p + q)log(log(n))} #' #' \deqn{HQm = n\log(\sigma^{2}) + c\frac{Tr(IJ^{-1})}{\sigma^2}log(log(n))} #' #' #' @export #' #' @examples \donttest{ARMA.selec (CAC40return.sq, P = 3, Q = 3)} #' #' @references Boubacar Maïnassara, Y. 2012, Selection of weak VARMA models by #' modified Akaike's information criteria, \emph{Journal of Time Series #' Analysis}, vol. 33, no. 1, pp. 121-130 #' @references Boubacar Maïnassara, Y. and Kokonendji, C. C. 2016, Modified Schwarz #' and Hannan-Quin information criteria for weak VARMA models, \emph{Stat #' Inference Stoch Process}, vol. 19, no. 2, pp. 199-217 ARMA.selec <- function(data, P, Q, c = 2) { I <- array(0, c(P + Q + 1, P + Q + 1, (P+1)*(Q+1))) J.inv <- array(0, c(P + Q + 1, P + Q + 1, (P+1)*(Q+1))) sigma <- rep(0, (P+1)*(Q+1)) i <- 1 for(p in 0:P){ for(q in 0:Q){ if ((p+q)!=0){ para.estim <- estimation(p = p, q = q, y = data) if (p!=0){ estimateur.ar <- as.vector(para.estim$ar) } else { estimateur.ar <- NULL } if (q!=0){ estimateur.ma <- as.vector(para.estim$ma) } else { estimateur.ma <- NULL } omeg <- omega(ar = estimateur.ar, ma = estimateur.ma, y = data) I[1:(p + q), 1:(p + q), i] <- as.matrix(omeg$matI) J.inv[1:(p + q), 1:(p + q), i] <- omeg$matJ.inv sigma[i] <- omeg$sig2 } i <- i + 1 } } AIC <- critere.ARMA(data = data, P = P, Q = Q, func = AICb, c = c, I = I, J.inv = J.inv, sigma = sigma, critere.bruit = 2) AICc <- critere.ARMA(data = data, P = P, Q = Q, func = AICc, c = c, I = I, J.inv = J.inv, sigma = sigma, critere.bruit = 1) AICcm <- critere.ARMA(data = data, P = P, Q = Q, func = AICcm, c = c, I = I, J.inv = J.inv, sigma = sigma, critere.bruit = 1) AICm <- critere.ARMA(data = data, P = P, Q = Q, func = AICm, c = c, I = I, J.inv = J.inv, sigma = sigma, critere.bruit = 2) BIC <- critere.ARMA(data = data, P = P, Q = Q, func = BICb, c = c, I = I, J.inv = J.inv, sigma = sigma, critere.bruit = 2) BICm <- critere.ARMA(data = data, P = P, Q = Q, func = BICm, c = c, I = I, J.inv = J.inv, sigma = sigma, critere.bruit = 2) HQ <- critere.ARMA(data = data, P = P, Q = Q, func = HQ, c = c, I = I, J.inv = J.inv, sigma = sigma, critere.bruit = 2) HQm <- critere.ARMA(data = data, P = P, Q = Q, func = HQm, c = c, I = I, J.inv = J.inv, sigma = sigma, critere.bruit = 2) res <- list(AIC = AIC, AICm = AICm, AICc = AICc, AICcm = AICcm, BIC = BIC, BICm = BICm, HQ = HQ, HQm = HQm) } critere.ARMA <- function(data, P, Q, func, c = 2, I, J.inv, sigma, critere.bruit) { n <- length(data) critere <- matrix(0, nrow = (P+1), ncol = (Q+1)) i <- 1 for(p in 0:P){ for(q in 0:Q){ if ((p+q)==0){ switch (critere.bruit, critere[1,1] <- n*log(mean(data^2)) + n, critere[1,1] <- n*log(mean(data^2)) ) }else { critere[(p+1), (q+1)] <- func(n = n, p = p, q = q, sigma = sigma[i], I = I[1:(p + q), 1:(p + q), i], J.inv = J.inv[1:(p + q), 1:(p + q), i], c = c) } i <- i + 1 } } rownames(critere)<-(seq(1:(P+1))-1) colnames(critere)<-(seq(1:(Q+1))-1) min.ind <- which(critere == min(critere), arr.ind = T) p <- (min.ind[, 1]-1) ; q <- (min.ind[, 2]-1) return(list(critere = critere, p = p, q = q)) }
/scratch/gouwar.j/cran-all/cranData/weakARMA/R/Selection.R
#' Computes the parameters significance #' #' @description Computes a matrix with estimated coefficient and their significance. #' #' @param ar Vector of AR coefficients, if \code{NULL}, MA process. #' @param ma Vector of MA coefficients, if \code{NULL}, AR process. #' @param p Order of AR, if \code{NULL} MA process. #' @param q Order of MA, if \code{NULL} AR process. #' @param y Univariate time series. #' @param sd.strong Standard error of time series in the strong case computed in \code{\link[weakARMA]{omega}}, if not provided the function will compute it. #' @param sd.weak Standard error of time series in the weak case computed in \code{\link[weakARMA]{omega}}, if not provided the function will compute it. #' @param meanparam If \eqn{\mu} of the time series needs to be computed. #' @param mu Value of \eqn{\mu}, if it is known and if the \code{meanparam} is \code{TRUE}. If not known the function will compute it. #' #' @return Matrix of the estimate coefficient with their significance. #' \describe{ #' \item{\code{coef}}{Estimation of each coefficient.} #' \item{\code{sd}}{Standard deviation in each case.} #' \item{\code{t-ratio}}{T-ratio corresponding to each coefficient.} #' \item{\code{signif}}{Significance of each parameter. #' Must be small, if not the parameter is not significant.} #' } #' #' @importFrom stats pt sd #' #' @export #' #' @details The function needs at least one pair between: ar and/or ma, or p and/or q to be executed. It will be faster with all the parameters provided. #' #' @examples #' \donttest{signifparam(p = 1, q = 2, y = CAC40return.sq)} #The last parameter is not significant. #' \donttest{signifparam(p = 1, q = 1, y = CAC40return.sq)} #All the parameters are significant. #' signifparam<-function(ar = NULL, ma = NULL, p = NULL, q = NULL, y, sd.strong = NULL, sd.weak = NULL, meanparam = TRUE, mu = NULL){ if (is.null(p) & is.null(q) & is.null(ar) & is.null(ma)){ stop("Impossible to compute without vectors ar and/or ma, or without order p and/or q") } if (is.null(p)){ p<-length(ar) } if (is.null(q)){ q<-length(ma) } n<-length(y) pValWeak<-numeric(p+q) pValStr<-numeric(p+q) names<-vector(length = (p+q)) if ((is.null(ar) & is.null(ma))||(meanparam==TRUE & is.null(mu))){ est<-estimation(p = p, q = q, y = y, meanparam = meanparam) mu<-est$mu ar<-est$ar ma<-est$ma sigma<-est$sigma.carre } if (is.null(sd.strong) || is.null(sd.weak)){ om<-omega(ar = ar, ma = ma, y = y) sd.strong <- om$standard.dev.strong sd.weak <- om$standard.dev.Omega } tWeak<-c(ar,ma)/sd.weak tStr<-c(ar,ma)/sd.strong for (i in 1:(p+q)){ pValWeak[i]<-round(2*(1-pt(abs(tWeak[i]),df=n-(p+q))),4) pValStr[i]<-round(2*(1-pt(abs(tStr[i]),df=n-(p+q))),4) } if (meanparam == TRUE){ tMuW<-sqrt(n)*(mu/sqrt((((1-sum(ma))/(1-sum(ar)))^2)*(sigma))) pValMuW<-round(2*(1-pt(abs(tMuW),df=(n))),4) strg<-matrix(data = c(mu,t(c(ar,ma)),sqrt((((1-sum(ma))/(1-sum(ar)))^2)*(sigma/n)),t(sd.strong),mu/sqrt((((1-sum(ma))/(1-sum(ar)))^2)*(sigma/n)),t(tStr),pValMuW,t(pValStr)),nrow = (p+q+1),ncol=4) weak<-matrix(data = c(mu,t(c(ar,ma)),sqrt((((1-sum(ma))/(1-sum(ar)))^2)*(sigma/n)),t(sd.weak),mu/sqrt((((1-sum(ma))/(1-sum(ar)))^2)*(sigma/n)),t(tWeak),pValMuW,t(pValWeak)),nrow = (p+q+1),ncol=4) names[1]<-"mu" for ( i in 1:p){ names[i+1]<-paste("alpha", i, sep=" ") } for ( i in 1:q){ names[i+p+1]<-paste("beta", i, sep=" ") } }else { strg<-matrix(data = c(t(c(ar,ma)),t(tStr),t(sd.strong),t(pValStr)),nrow = (p+q),ncol=4) weak<-matrix(data = c(t(c(ar,ma)),t(tWeak),t(sd.weak),t(pValWeak)),nrow = (p+q),ncol=4) for ( i in 1:p){ names[i]<-paste("alpha", i, sep=" ") } for ( i in 1:q){ names[i+p]<-paste("beta", i, sep=" ") } } colnames(strg)<-c("coef","sd","t-ratio","signif") colnames(weak)<-c("coef","sd","t-ratio","signif") rownames(strg)<-names rownames(weak)<-names ret<-list(weak=weak,strong=strg) return(ret) }
/scratch/gouwar.j/cran-all/cranData/weakARMA/R/Signif_param.R
#'Simulation of ARMA(p,q) model. #' @description Simulates an ARMA, AR or MA process according to the arguments #' given. #' #' @param n Number of observations. #' @param ar Vector of AR coefficients. If \code{NULL}, the simulation is a MA process. #' @param ma Vector of MA coefficients. If \code{NULL}, the simulation is a AR process. #' @param sigma Standard deviation. #' @param eta Vector of white noise sequence. Allows the user to use his own #' white noise. #' @param method Defines the kind of noise used for the simulation. By default, #' the noise used is strong. See 'Details'. #' @param k Integer used in the creation of the noise. See 'Details'. #' @param mu Integer for the mean of the series. #' @param ... Arguments needed to simulate GARCH noise. See 'Details'. #' #' @details ARMA model is of the following form : \deqn{ X_{t}-\mu = e_{t} + a_{1} (X_{t-1}-\mu) #' + a_{2} (X_{t-2}-\mu) + ... + a_{p} (X_{t-p}-\mu) - b_1 e_{t-1} - b_2 e_{t-2} - ... - b_{q} e_{t-q}} #' where \eqn{e_t} is a sequence of uncorrelated random variables with zero #' mean and common variance \eqn{\sigma^{2} > 0} . \eqn{ar = (a_{1}, a_{2}, ..., a_{p})} are #' autoregressive coefficients and \eqn{ma = (b_{1}, b_{2}, ... , b_{q})} are moving #' average coefficients. Characteristic polynomials of ar and ma must #' constitute a stationary process. #' #' Method "\code{strong}" realise a simulation with gaussian white noise. #' #' Method "\code{product}", "\code{ratio}" and "\code{product.square}" #' realise a simulation with a weak white noise. These methods employ #' respectively the functions \code{\link{wnPT}}, \code{\link{wnRT}} and #' \code{\link{wnPT_SQ}} to simulate nonlinear ARMA model. So, the #' paramater \code{k} is an argument of these functions. See \code{\link{wnPT}}, \code{\link{wnRT}} #' or \code{\link{wnPT_SQ}}. #' #' Method "\code{GARCH}" gives an ARMA process with a GARCH noise. See #' \code{\link{simGARCH}}. #' #' @return Returns a vector containing the \code{n} simulated observations of the #' time series. #' #' @importFrom stats rnorm #' #' @export #' #' @examples #' y <- sim.ARMA(n = 100, ar = 0.95, ma = -0.6, method = "strong" ) #' y2 <- sim.ARMA(n = 100, ar = 0.95, ma = -0.6, method = "ratio") #' y3 <- sim.ARMA(n = 100, ar = 0.95, ma = -0.6, method = "GARCH", c = 1, A = 0.1, B = 0.88) #' y4 <- sim.ARMA(n = 100, ar = 0.95, ma = -0.6, method = "product") #' y5 <- sim.ARMA(n = 100, ar = 0.95, ma = -0.6, method = "product.square") #' #' @references Francq, C. and Zakoïan, J.M. 1998, Estimating linear representations #' of nonlinear processes, \emph{Journal of Statistical Planning and #' Inference}, vol. 68, no. 1, pp. 145-165 #' #' @seealso \code{\link[stats]{arima.sim}} #' sim.ARMA <- function (n, ar = NULL, ma = NULL, sigma = 1, eta = NULL, method = "strong", k = 1, mu=0, ...) { p <- length(ar) q <- length(ma) if (is.null(eta)) { switch(method, "strong" = {eta <- rnorm(n, sd = sqrt(sigma))}, "product" = {eta <- wnPT(n , sigma, k = k)}, "product.square" = {eta <- wnPT_SQ(n, sigma, k = k)}, "ratio" = {eta <- wnRT(n , sigma, k = k)}, "GARCH" = {eta <- simGARCH(n = n , ...)} ) } x <- eta xbis<-x-mu if (!is.null(ma)) if (min(abs(polyroot(c(1, -1*ma)))) < 1) warning ("Polynomial MA not invertible") if (!is.null(ar)) if (min(abs(polyroot(c(1, -1*ar)))) < 1) warning ("Polynomial AR not invertible") if (is.null(ma)) {ma <- 0 ; q <- 0} if (is.null(ar)) {ar <- 0 ; p <- 0} for (t in 2:n){ xbis[t] <- eta[t] + sum(ar[1:min((t-1),p)]*xbis[(t - 1):max(1,(t - p))]) - sum(ma[1:min((t-1),q)]*eta[(t - 1):max(1,(t - q))]) #coherence avec article -> -sum(ar*x[(t-1):(t-p)] } x<-xbis+mu return(x) }
/scratch/gouwar.j/cran-all/cranData/weakARMA/R/SimARMA.R
#' Estimation of VAR(p) model #' #' @param x Matrix of dimension (n,p+q). #' @param p Integer for the lag order. #' #' #' @return A list containing: #' \describe{ #' \item{\code{ac}}{Coefficients data matrix.} #' \item{\code{p}}{Integer of the lag order.} #' \item{\code{k}}{Dimension of the VAR.} #' \item{\code{res}}{Matrix of residuals.} #' } #' #' @description Estimates the coefficients of a VAR(p) model. Used in \code{\link{matXi}}. #' #' #' @export #' VARest <- function (x, p) { n <- nrow(x) k <- ncol(x) y <- t(x[(p + 1):n, ]) z0 <- t(x[n:1, ]) z <- matrix(1, nrow = k * p, ncol = 1) ac <- array(1,dim = c(k,k,p)) for (i in n:(p + 1)) { m <- t(t(as.vector(matrix(z0[, (i - p + 1):i])))) z <- cbind(z, m) } z <- z[, 2:(n - p + 1), drop = FALSE] if (kappa(z)<(1 / sqrt(.Machine$double.eps))){ b <- tcrossprod(y, z) %*% solve(tcrossprod(z)) }else { b <- tcrossprod(y, z) %*% ginv(tcrossprod(z)) } e <- y - b %*% z for (i in 1:p){ ac[,,i]<-b[,(1+(k*(i-1))):(k*i)] } return(list(ac=ac, p=p, k=k, res=t(e))) }
/scratch/gouwar.j/cran-all/cranData/weakARMA/R/VARest.R
#' Weak white noise #' @description Simulates an uncorrelated but dependant noise process. #' @param n Number of observations. #' @param sigma Standard deviation. #' @param k Integer \eqn{\neq 0} to prevent a zero denominator. #' @param ninit Length of 'burn-in' period. #' #' @importFrom stats rnorm #' #' @export #' #' @return Vector of size \code{n} containing a nonlinear sequence \eqn{X_{i}} such as #' \eqn{X_i = \frac{Z_{i}}{|Z_{i+1}| + k}} , where \eqn{Z_{i}} is a sequence of iid #' random variables mean-zero random variable with variance \eqn{\sigma^2}. #' @seealso \code{\link{wnPT}}, \code{\link{wnPT_SQ}}, \code{\link{simGARCH}} #' #' @references Romano, J. and Thombs, L. 1996, Inference for autocorrelation under weak assumptions, #' \emph{Journal of the American Statistical Association}, vol. 91, no. 434, pp. 590-600 #' #' @examples #' wnRT(100) #' wnRT(100, sigma = 1) wnRT <- function (n, sigma = 1, k = 1, ninit = 100) { eps <- rep(0, (n + ninit)) eta <- rnorm((n + ninit + 1), sd = sigma) for (t in 1:(n + ninit)) { eps[t] <- eta[t + 1] / (abs(eta[t]) + k)} return(eps[(ninit + 1): (n + ninit)]) } #' Weak white noise #' @description Simulates an uncorrelated but dependant noise process. #' @param n Number of observations. #' @param sigma Standard deviation. #' @param k Integer corresponding to the number of past observation will be used. #' @param ninit Length of 'burn-in' period. #' #' @importFrom stats rnorm #' #' @export #' #' @return Vector of size \code{n} containing a nonlinear sequence \eqn{X_{i}} such as #' \eqn{X_{i} = Z_{i}Z_{i-1}...Z_{i-k}} , where \eqn{Z_{i}} is a sequence of iid #' random variables mean-zero random variable with variance \eqn{\sigma^2}. #' @seealso \code{\link{wnRT}}, \code{\link{wnPT_SQ}}, \code{\link{simGARCH}} #' #' @references Romano, J. and Thombs, L. 1996, Inference for autocorrelation under weak assumptions, #' \emph{Journal of the American Statistical Association}, vol. 91, no. 434, pp. 590-600 #' #' @examples #' wnPT(100) #' wnPT(100, sigma = 1, k = 1) #' wnPT(100, k = 0) #strong noise wnPT <- function (n, sigma = 1, k = 1, ninit = 100) { eps <- rep(0, (n + ninit)) eta <- rnorm((n + ninit) , sd = sigma) for(t in (k + 1):(n + ninit)) eps[t] <- prod(eta[t:(t - k)]) return(eps[(ninit + 1) : (n + ninit)]) } #' Weak white noise #' @description Simulates an uncorrelated but dependant noise process. #' @param n Number of observations. #' @param sigma Standard deviation. #' @param k Integer corresponding to the number of past observation will be used. #' @param ninit Length of 'burn-in' period. #' #' @importFrom stats rnorm #' @export #' @return Vector of size \code{n} containing a nonlinear sequence \eqn{X_{i}} such as #' \eqn{X_{i} = Z^{2}_iZ_{i-1}...Z_{i-k}} , where \eqn{Z_{i}} is a sequence of iid #' random variables mean-zero random variable with variance \eqn{\sigma^2}. #' @seealso \code{\link{wnRT}}, \code{\link{wnPT}}, \code{\link{simGARCH}} #' #' @references Romano, J. and Thombs, L. 1996, Inference for autocorrelation under weak assumptions, #' \emph{Journal of the American Statistical Association}, vol. 91, no. 434, pp. 590-600 #' #' @examples #' wnPT_SQ(100) #' wnPT_SQ(100, sigma = 1, k = 1) wnPT_SQ <- function (n, sigma = 1, k = 1, ninit=100) { eps <- rep(0, (n + ninit)) eta <- rnorm(n + ninit, sd = sigma) for(t in (k + 1):(n + ninit)) eps[t] <- eta[t]*prod(eta[t:(t - k)]) return(eps[(ninit + 1) : (n + ninit)]) } #' GARCH process #' @description Simulates a GARCH process which is an example of a weak white noise. #' #' @param n Number of observations. #' @param c Positive number. #' @param A Vector of ARCH coefficients >=0. #' @param B Vector of GARCH coefficients >=0. If \code{NULL}, the #' simulation is a ARCH process. #' @param ninit Length of 'burn-in' period. #' #' #' @importFrom stats rnorm #' @export #' #' @return Vector of size \code{n} containing a nonlinear sequence \eqn{\epsilon_t} such as #' \deqn{\epsilon_{t} = H_{t}^{1 / 2} \eta_{t}} where \deqn{H_{t} = c + #' a_{1}\epsilon_{t - 1}^ {2}+...+a_{q}\epsilon_{t - q} ^{2} + b_{1}H_{t-1}+...+ b_{p}H_{t-p}} #' #' @references Francq C. and Zakoïan J.M., 2010, \emph{GARCH models: structure, statistical inference and financial applications} #' #' #' @seealso \code{\link{wnRT}}, \code{\link{wnPT}}, \code{\link{wnPT_SQ}} #' #' @examples #' simGARCH(100, c = 1, A = 0.25) #' simGARCH(100, c = 1, A = 0.1, B = 0.88) simGARCH <- function(n, c, A, B = NULL, ninit = 100) { q <- length(A) p <- length(B) if (missing(B)) {B <- 0 ; p <- 0} eps <- rep(0 , n + ninit) eta <- rnorm(n + ninit, sd = 1) H <- rep(0, n + ninit) H[1] <- c eps[1] <- H[1] * (eta[1]) for (t in 2:(n + ninit)){ H[t] <- c + sum(A[1:min((t-1),q)] * (eps[(t-1):max(1,(t-q))])^2) + sum(B[1:min((t-1),p)] * H[(t-1):max(1,(t-p))]) eps[t] <- sqrt(H[t]) * eta[t] } eps <- eps[(1 + ninit):(n + ninit)] return(eps) }
/scratch/gouwar.j/cran-all/cranData/weakARMA/R/WhiteNoise.R
#' Aggregate E4 data into 1min timesteps #' @param x An object read by \code{\link{read_e4}}. #' @export aggregate_e4_data <- function(x){ datetime_1min <- EDA <- y <- z <- a <- TEMP <- HR <- NULL x$EDA <- padr::thicken(x$EDA, interval = "1 min", colname = "datetime_1min") %>% dplyr::group_by(datetime_1min) %>% summarize(EDA = mean(EDA)) %>% dplyr::rename(DateTime = datetime_1min) x$ACC <- padr::thicken(x$ACC, interval = "1 min", colname = "datetime_1min") %>% group_by(datetime_1min) %>% summarize(x = mean(x), y = mean(y), z = mean(z), a = mean(a)) %>% dplyr::rename(DateTime = datetime_1min) x$TEMP <- padr::thicken(x$TEMP, interval = "1 min", colname = "datetime_1min") %>% group_by(datetime_1min) %>% summarize(TEMP = mean(TEMP)) %>% dplyr::rename(DateTime = datetime_1min) x$HR <- padr::thicken(x$HR, interval = "1 min", colname = "datetime_1min") %>% group_by(datetime_1min) %>% summarize(HR = mean(HR)) %>% dplyr::rename(DateTime = datetime_1min) x$BVP <- NULL x$IBI <- NULL return(x) }
/scratch/gouwar.j/cran-all/cranData/wearables/R/aggregate_e4_data.R
max <- function(...) suppressWarnings(base::max(... , na.rm = TRUE)) min <- function(...) suppressWarnings(base::min(... , na.rm = TRUE)) mean <- function(...) base::mean(... , na.rm = TRUE) sum <- function(...) base::sum(... , na.rm = TRUE) median <- function(...) stats::median(... , na.rm = TRUE) sd <- function(...) stats::sd(... , na.rm = TRUE) #' Configuration of the SVM algorithm for binary classification #' #' @author Sara Taylor \email{sataylor@@mit.edu} #' @references \url{https://eda-explorer.media.mit.edu/} "binary_classifier_config" #' Configuration of the SVM algorithm for ternary classification #' #' @author Sara Taylor \email{sataylor@@mit.edu} #' @references \url{https://eda-explorer.media.mit.edu/} "multiclass_classifier_config" #' First derivative #' #' Get the first derivative. #' #' @param values vector of numbers get_derivative <- function(values){ end <- length(values) if(end < 3){ list(NaN) } else { list((values[2:(end-1)] + values[3:end]) / 2 - (values[2:(end-1)] + values[1:(end-2)]) / 2) } } #' Second derivative #' #' Get the second derivative. #' #' @param values vector of numbers get_second_derivative <- function(values){ end <- length(values) if(end < 3){ list(NaN) } else { list(values[3:end] - 2 * values[2:(end-1)] + values[1:(end-2)]) } } #' Derivative features #' #' Compute derivative features. #' #' @param derivative vector of derivatives #' @param feature_name name of feature compute_derivative_features <- function(derivative, feature_name){ features <- list() features[paste0(feature_name, "_max")] <- max(derivative) features[paste0(feature_name, "_min")] <- min(derivative) features[paste0(feature_name, "_abs_max")] <- max(abs(derivative)) features[paste0(feature_name, "_abs_avg")] <- mean(abs(derivative)) as.data.frame(features) } #' Amplitude features #' #' Compute amplitude features. #' #' @param data vector of amplitude values #' @importFrom dplyr across .data compute_amplitude_features <- function(data){ data %>% group_by(.data$group) %>% mutate(raw_derivative = get_derivative(.data$EDA), raw_second_derivative = get_second_derivative(.data$EDA), filtered_derivative = get_derivative(.data$filtered_eda), filtered_second_derivative = get_second_derivative(.data$filtered_eda)) %>% summarize(raw_mean = mean(.data$EDA), filtered_mean = mean(.data$filtered_eda), across(c(.data$raw_derivative, .data$raw_second_derivative, .data$filtered_derivative, .data$filtered_second_derivative), list(max = ~max(unlist(.x)), min = ~min(unlist(.x)), abs_max = ~max(abs(unlist(.x))), abs_avg = ~mean(abs(unlist(.x))))), .groups = "drop") %>% select(-.data$group) } #' Max value per segment of length n #' #' Give the maximum value of a vector of values per segment of length n. #' #' @param values array of numbers #' @param n length of each segment #' @param output_length argument to adjust for final segment not being full max_per_n <- function(values, n, output_length){ if (n == 1) { abs(values[1:output_length]) } else { matrix <- matrix(values[1:(n * output_length)], nrow = n, byrow = FALSE, dimnames = list(1:n, 1:output_length)) as.double(apply(abs(matrix), 2, max)) } } #' Wavelet decomposition #' #' Compute wavelet decomposition. #' #' @param data vector of values #' @importFrom waveslim dwt compute_wavelet_decomposition <- function(data){ output_length <- (length(data) %/% 8) * 8 decompostion <- waveslim::dwt(data[1:output_length], "haar", 3, "periodic") list(level1 = decompostion$d1, level2 = decompostion$d2, level3 = decompostion$d3) } #' Wavelet coefficients #' #' Compute wavelet coefficients. #' #' @param data data with an EDA element compute_wavelet_coefficients <- function(data){ wavelets <- compute_wavelet_decomposition(data$EDA) one_second_feature_length <- ceiling(nrow(data) / 8) one_second_level_1_features <- max_per_n(wavelets$level1, 4, one_second_feature_length) one_second_level_2_features <- max_per_n(wavelets$level2, 2, one_second_feature_length) one_second_level_3_features <- max_per_n(wavelets$level3, 1, one_second_feature_length) half_second_feature_length <- ceiling(nrow(data) / 4) half_second_level_1_features <- max_per_n(wavelets$level1, 2, half_second_feature_length) half_second_level_2_features <- max_per_n(wavelets$level2, 1, half_second_feature_length) list(one_second_features = data.frame(one_second_level_1 = one_second_level_1_features, one_second_level_2 = one_second_level_2_features, one_second_level_3 = one_second_level_3_features), half_second_features = data.frame(half_second_level_1 = half_second_level_1_features, half_second_level_2 = half_second_level_2_features)) } #' Addition of chunk groups #' #' partition data into chunks of a fixed number of rows in order to calculate #' aggregated features per chunk #' #' @param data df to partition into chunks #' @param rows_per_chunk size of a chunk #' @importFrom magrittr "%>%" #' @importFrom dplyr arrange bind_rows mutate .data add_chunk_group <- function(data, rows_per_chunk){ old_part <- data %>% dplyr::mutate(group = rep(seq(1, by=rows_per_chunk, length.out = nrow(data)/rows_per_chunk), each=rows_per_chunk, length.out = nrow(data))) new_part <- old_part[tail(unique(old_part$group), -1), ] %>% dplyr::mutate(group = .data$group - rows_per_chunk) dplyr::bind_rows(old_part, new_part) %>% dplyr::arrange(.data$group) } #' Features computation #' #' Compute features for SVM #' #' @param data df with eda, filtered eda and timestamp columns #' @export #' @importFrom magrittr "%>%" #' @importFrom dplyr group_by summarize select mutate across .data compute_features2 <- function(data){ sec_per_chunk <- 5 coefficients <- compute_wavelet_coefficients(data) fun_lis <- list( max = max, mean = mean, std = sd, median = median, positive = ~sum(.x > 0) ) out_1sec <- coefficients$one_second_features %>% add_chunk_group(sec_per_chunk) %>% dplyr::group_by(.data$group) %>% dplyr::summarize(across(.fns = fun_lis), .groups = "drop") %>% dplyr::select(-.data$group) out_05sec <- coefficients$half_second_features %>% add_chunk_group(2 * sec_per_chunk) %>% dplyr::group_by(.data$group) %>% dplyr::summarize(across(.fns = fun_lis), .groups = "drop") %>% dplyr::select(-.data$group) amplitude_features <- data %>% add_chunk_group(8 * sec_per_chunk) %>% compute_amplitude_features() timestamps <- data$DateTime[1] + sec_per_chunk * (1:nrow(amplitude_features) - 1) as.data.frame(cbind(id = timestamps, out_1sec, out_05sec, amplitude_features)) } #' SVM kernel #' #' Generate kernel needed for SVM #' #' @param kernel_transformation Data matrix used to transform EDA features #' into kernel values #' @param sigma The inverse kernel width used by the kernel #' @param columns Features computed from EDA signal get_kernel <- function(kernel_transformation, sigma, columns){ kernlab::kernelMatrix(kernlab::rbfdot(sigma = sigma), kernel_transformation, as.matrix(columns)) } #' Binary classifiers #' #' Generate classifiers (artifact, no artifact) #' #' @param data features from EDA signal #' @export predict_binary_classifier <- function(data){ relevant_columns <- data[c("raw_mean", "raw_derivative_abs_max", "raw_second_derivative_max", "raw_second_derivative_abs_avg", "filtered_mean", "filtered_second_derivative_min", "filtered_second_derivative_abs_max", "one_second_level_1_max", "one_second_level_1_mean", "one_second_level_1_std", "one_second_level_2_std", "one_second_level_3_std", "one_second_level_3_median")] config <- wearables::binary_classifier_config kernel <- unname(as.data.frame(get_kernel(config$kernel_tranformation, config$sigma, relevant_columns))) labels <- sapply(kernel, function(value){ as.integer(sign(sum(config$coefficients * value) + config$intercept)) }) data.frame(id = data$id, label = labels) } #' Choice between two classes #' #' Make choice between two classes based on kernel values #' #' @param class_a Number by which class a is indicated #' @param class_b Number by which class b is indicated #' @param kernels Kernel values from SVM #' @export choose_between_classes <- function(class_a, class_b, kernels){ config <- wearables::multiclass_classifier_config coef_a <- config$coeffcients[[paste0(class_a, "_constrasted_with_", class_b)]] coef_b <- config$coeffcients[[paste0(class_b, "_constrasted_with_", class_a)]] intercept_a_b <- config$intercept[paste0(class_a, "_and_", class_b)] kernel_a <- kernels[[paste0("class_", class_a)]] kernel_b <- kernels[[paste0("class_", class_b)]] sapply(seq_len(ncol(kernel_a)), function(index) { prediction_value <- sum(coef_a * kernel_a[,index]) + sum(coef_b * kernel_b[,index]) + intercept_a_b if (prediction_value > 0) { as.integer(class_a) } else { as.integer(class_b) } }) } #' Ternary classifiers #' #' Generate classifiers (artifact, unclear, no artifact) #' #' @param data features from EDA signal #' @export predict_multiclass_classifier <- function(data){ relevant_columns <- data[c("filtered_second_derivative_abs_max", "filtered_second_derivative_min", "one_second_level_1_std", "raw_second_derivative_max", "raw_mean", "one_second_level_1_max", "raw_second_derivative_abs_max", "raw_second_derivative_abs_avg", "filtered_second_derivative_max", "filtered_mean")] config <- wearables::multiclass_classifier_config kernels <- lapply(config$kernel_tranformation, get_kernel, config$sigma, relevant_columns) label_predictions <- cbind(`class -1 and 0` = choose_between_classes(-1, 0, kernels), `class -1 and 1` = choose_between_classes(-1, 1, kernels), `class 0 and 1` = choose_between_classes(0, 1, kernels)) label_majority_votes <- apply(label_predictions, 1, function(values) { out <- values[duplicated(values)] if(length(out) == 0)out <- 0 out }) data.frame(id = data$id, label = label_majority_votes) } #' Artifact plots #' #' Plot artifacts after eda_data is classified #' #' @param labels labels with artifact classification #' @param eda_data data upon which the labels are plotted #' @export #' @importFrom ggplot2 ggplot aes geom_vline geom_line #' @importFrom dplyr .data plot_artifacts <- function(labels, eda_data){ binaries <- labels %>% dplyr::filter(.data$label == -1) %>% mutate(min = as.numeric(lubridate::force_tz(.data$id, "CEST") - eda_data$DateTime[1], units = "mins")) %>% dplyr::pull(.data$min) eda_data %>% mutate(min = as.numeric(.data$DateTime - .data$DateTime[1], units = "mins")) %>% ggplot(aes(.data$min, .data$EDA)) + geom_vline(xintercept = binaries, colour = "red", size = 4) + geom_line(size = 1) }
/scratch/gouwar.j/cran-all/cranData/wearables/R/artifact_detection.R
#' Convert an E4 data stream to a timeseries #' @description Creates an xts object indexed by time #' @param data A dataframe, subelements of list as output by read_e4 function #' @param index Which column (integer) to use as the data in the timeseries. Default: 2. #' @param name_col Column name to give to the timeseries data. #' #' @export #' @importFrom xts xts as_timeseries <- function(data, index = 2, name_col = "V1"){ tdata <- pad_e4(data) panel <- xts(tdata[[index]], order.by = tdata[[1]]) colnames(panel) <- name_col return(panel) }
/scratch/gouwar.j/cran-all/cranData/wearables/R/as_timeseries.R
#' Batch analysis #' #' Read and process all ZIP files in a directory #' #' @param path_in input path #' @param path_out output path #' @export #' @importFrom futile.logger flog.info batch_analysis <- function(path_in = NULL, path_out = "."){ if(is.null(path_in) && .Platform$OS.type == "windows"){ path_in <- utils::choose.dir() } else { stop("Provide an input directory (argument path_in)") } # path <- "C:\\repos2\\e4dashboard\\BVI" zips <- list.files(path_in, pattern = "[.]zip$", recursive = TRUE, full.names = TRUE) for(i in seq_along(zips)){ flog.info(paste("----- ", zips[i], " -----")) out <- read_and_process_e4(zips[i]) fn_root <- basename(tools::file_path_sans_ext(zips[i])) out_file <- file.path(path_out, paste0(fn_root, ".rds")) saveRDS(out, out_file) flog.info(paste("----- ", i, "/", length(zips), " complete -----")) } }
/scratch/gouwar.j/cran-all/cranData/wearables/R/batch_analysis.R
#' Force character datetime variable ("yyyy-mm-dd hh:mm:ss") to system timezone #' @param time Datetime variable ("yyyy-mm-dd hh:mm:ss") #' @export char_clock_systime <- function (time){ lubridate::force_tz(lubridate::ymd_hms(time), tz = Sys.timezone()) } #' Filter datasets for a Datetime start + end #' @description A function to determine how many intervals should be #' created. The question is at what time do you want the filecut to start, what should be #' the period that you want separate files for, and what should the interval be? #' @param time_start User input start time in the character format #' "yyyy-mm-dd hh:mm:ss" / e.g., "2019-11-27 08:32:00". Where do you want the file cut to start? #' @param time_end User input end time (same format as time_start) #' @param interval # Interval: User input interval (in minutes/ e.g., 5) #' What is the duration of the interval you want to divide the period into? #' For example, the paper by de Looff et al. (2019) uses 5 minute intervals over #' a 30 minute period preceding aggressive behavior. The 5 minute interval is #' chosen as for the calculation of some of the heart rate variability parameters #' one needs at least 5 minutes of data, but shorter intervals are possible as well, see for instance: #' Shaffer, Fred, en J. P. Ginsberg. ‘An Overview of Heart Rate Variability Metrics and Norms’. #' Frontiers in Public Health 5 (28 september 2017). https://doi.org/10.3389/fpubh.2017.00258. #' @export #' @importFrom lubridate ymd_hms minutes e4_filecut_intervals <- function(time_start, time_end, interval){ # This should also be put in a separate box in the app user_inp_time <- lubridate::ymd_hms(time_start, tz = Sys.timezone()) n_mins <- as.numeric(difftime(time_end, time_start, units = "mins")) n_intervals <- floor(n_mins / interval) vec_interval <- time_start + lubridate::minutes(seq(from = 0, by = interval, length.out = n_intervals)) return(list(vec_interval = vec_interval, interval = interval, time_start = time_start, time_end = time_end)) } #' Function to filter the data object based on the time period and intervals that #' are needed for the files to be cut. #' The function also creates identical Empatica E4 zipfiles in the same directory as #' where the original zipfile is located. #' @param data Object read with \code{\link{read_e4}} #' @param time_start User input start time in the character format #' "yyyy-mm-dd hh:mm:ss" / e.g., "2019-11-27 08:32:00". Where do you want the file cut to start? #' @param time_end User input end time (same format as time_start) #' @param interval # Interval: User input interval (in minutes/ e.g., 5) #' What is the duration of the interval you want to divide the period into? #' For example, the paper by de Looff et al. (2019) uses 5 minute intervals over #' a 30 minute period preceding aggressive behavior. The 5 minute interval is #' chosen as for the calculation of some of the heart rate variability parameters #' one needs at least 5 minutes of data. #' @param out_path The directory where to write the cut files; defaults to the input folder. #' @param fn_name The directory where to write the cut files without the extension. #' @return out_path fn_name #' @importFrom utils zip write.table #' @export filter_createdir_zip <- function(data, time_start, time_end, interval, out_path = NULL, fn_name = NULL){ # Create an out object with the start times of the intervals needed out <- e4_filecut_intervals(time_start, time_end, interval) if(is.null(out_path)){ out_path <- dirname(attributes(data)$zipfile) dir.create(out_path, showWarnings = FALSE) } if(is.null(fn_name)){ fn_base <- basename(attributes(data)$zipfile) } else { fn_base <- tools::file_path_sans_ext(fn_name) } # Iterate over the intervals needed to be split for (i in seq_along(out$vec_interval)){ # Create a directory that uses the name of the original file and # then adds the interval number out_fn <- file.path(out_path, paste0(fn_base,"_", i)) dir.create(out_fn, showWarnings = FALSE) # What are the start and end times of the intervals needed? # charclock could be entered here start <- lubridate::ymd_hms(out$vec_interval[i], tz = Sys.timezone()) end <- lubridate::ymd_hms(out$vec_interval[i], tz = Sys.timezone()) + lubridate::minutes(interval) # Filter the data data_filtered <- structure( list( EDA = dplyr::filter(data$EDA, .data$DateTime >= start, .data$DateTime <= end), ACC = dplyr::filter(data$ACC, .data$DateTime >= start, .data$DateTime <= end), TEMP = dplyr::filter(data$TEMP, .data$DateTime >= start, .data$DateTime <= end), HR = dplyr::filter(data$HR, .data$DateTime >= start, .data$DateTime <= end), BVP = dplyr::filter(data$BVP, .data$DateTime >= start, .data$DateTime <= end), IBI = dplyr::filter(data$IBI, .data$DateTime >= start, .data$DateTime <= end) ), class = "e4data", zipfile = attributes(data)$zipfile, tz = attributes(data)$tz ) # And write it as unix (for the zip files) unix_e4 <- as.numeric(lubridate::with_tz(lubridate::ymd_hms(out$vec_interval[i], tz = Sys.timezone()), tzone = "UTC")) # IBI has a different structure than the other files # IBI.csv has a column with the number of seconds since the start of the original recording # Therefore we use the unix time of the EDA file to ensure that the proper DateTime intervals # are selected and saved. unix_ibi <- as.numeric(lubridate::with_tz(lubridate::ymd_hms(data$EDA$DateTime[1], tz = Sys.timezone()), tzone = "UTC")) # Hz from the datafiles acc_hz <- 32 bvp_hz <- 64 eda_hz <- 4 hr_hz <- 1 temp_hz <- 4 # Write the file to the created directory # EDA utils::write.table(c(unix_e4, eda_hz, data_filtered$EDA$EDA), file = paste0(out_fn, "/", "EDA.csv"), quote=F, dec=".", row.names=FALSE, col.names=FALSE) # ACC utils::write.table(data.frame(x = c(unix_e4, acc_hz, data_filtered$ACC$x), y = c(unix_e4, acc_hz, data_filtered$ACC$y), z = c(unix_e4, acc_hz, data_filtered$ACC$z)), file = paste0(out_fn, "/", "ACC.csv"), quote=F, sep = ",", dec=".", row.names=FALSE, col.names=FALSE) # TEMP utils::write.table(c(unix_e4, temp_hz, data_filtered$TEMP$TEMP), file = paste0(out_fn, "/", "TEMP.csv"), quote=F, dec=".", row.names=FALSE, col.names=FALSE) # HR utils::write.table(c(unix_e4, hr_hz, data_filtered$HR$HR), file = paste0(out_fn, "/", "HR.csv"), quote=F, dec=".", row.names=FALSE, col.names=FALSE) # BVP utils::write.table(c(unix_e4, bvp_hz, data_filtered$BVP$BVP), file = paste0(out_fn, "/", "BVP.csv"), quote=F, dec=".", row.names=FALSE, col.names=FALSE) # IBI utils::write.table(data.frame(time = c(unix_ibi, data_filtered$IBI$seconds), ibi = c("IBI", data_filtered$IBI$IBI)), file = paste0(out_fn, "/", "IBI.csv"), quote=F, sep = ",", dec=".", row.names=FALSE, col.names=FALSE) # Zip and clean before end of sequence files2zip <- dir(out_fn, full.names = TRUE) utils::zip(zipfile = out_fn, files = files2zip, extras = '-j') unlink(out_fn, recursive = TRUE) } }
/scratch/gouwar.j/cran-all/cranData/wearables/R/cut_e4_data_intervals.R
#' Show class of object #' @description Returns 'object of class' #' @param x An e4 data list #' @param \dots Further arguments currently ignored. #' @export print.e4data <- function(x, ...){ cat("This is an object of class 'e4data'.\n") cat("Datasets included:", paste(names(x), collapse=", "), "\n") } # # summary.e4data <- function(object, ...){ # # } # plot.e4data <- function(x, ...){ # # }
/scratch/gouwar.j/cran-all/cranData/wearables/R/e4_methods.R
#' Filter all four datasets for a Datetime start + end #' @param data Object read with \code{\link{read_e4}} #' @param start Start Datetime (posixct) #' @param end End Datetime (posixct) #' @export #' @importFrom dplyr .data filter_e4data_datetime <- function(data, start, end){ data$IBI$datetime <- lubridate::force_tz(data$IBI$DateTime, "UTC") data$EDA$datetime <- lubridate::force_tz(data$EDA$DateTime, "UTC") data$ACC$datetime <- lubridate::force_tz(data$ACC$DateTime, "UTC") data$TEMP$datetime <- lubridate::force_tz(data$TEMP$DateTime, "UTC") data$HR$datetime <- lubridate::force_tz(data$HR$DateTime, "UTC") data$IBI <- dplyr::filter(data$IBI, .data$datetime >= start, .data$datetime <= end) data$EDA <- dplyr::filter(data$EDA, .data$datetime >= start, .data$datetime <= end) data$ACC <- dplyr::filter(data$ACC, .data$datetime >= start, .data$datetime <= end) data$TEMP <- dplyr::filter(data$TEMP, .data$datetime >= start, .data$datetime <= end) data$HR <- dplyr::filter(data$HR, .data$datetime >= start, .data$datetime <= end) return(data) }
/scratch/gouwar.j/cran-all/cranData/wearables/R/filter_e4data_datetime.R
#' IBI analysis #' #' Analysis of interbeat interval (IBI) #' #' @param IBI IBI data, component of object (the number of seconds since the start of the recording) read with \code{\link{read_e4}} #' @export #' @importFrom RHRV CreateHRVData SetVerbose BuildNIHR FilterNIHR InterpolateNIHR #' @importFrom RHRV CreateTimeAnalysis CreateFreqAnalysis CalculatePowerBand ibi_analysis <- function(IBI){ # Select the heart beat positions in time. Use the amount of seconds since the start e4_hrv_data <- RHRV::CreateHRVData() e4_hrv_data <- RHRV::SetVerbose(e4_hrv_data, TRUE ) e4_hrv_data$datetime <- as.POSIXlt(IBI$DateTime)[1] # There is no 0 added to the Empatica E4 seconds column, therefore, slight deviations # with RHRV are possible. To match RHRV outcome, add 0 to the dataframe. # Reason for not adding the 0 is that Empatica does not contain a valid first # RR interval from the start of the study. e4_hrv_data$Beat <- data.frame(Time = IBI$seconds) n_beats_original <- nrow(e4_hrv_data$Beat) # Then build the non interpolated heart rate series e4_hrv_data <- RHRV::BuildNIHR(e4_hrv_data) # Pay attention that we don't need the inter-beat-intervals as RHRV does not know how to handle these # as there are so much missing values in there. # Remove too short RR intervals or missed beats # This also provides the number of accepted beats e4_hrv_data <- RHRV::FilterNIHR(e4_hrv_data) n_beats_accepted <- nrow(e4_hrv_data$Beat) # Note that it is not necessary to specify freqhr since it matches with # the default value: 4 Hz suppressWarnings({ e4_hrv_data <- RHRV::InterpolateNIHR(e4_hrv_data, freqhr = 4) e4_hrv_data <- RHRV::CreateTimeAnalysis(e4_hrv_data, size = 300, interval = 7.8125) }) # We typically have a lot of missing beats with wristbands, so frequency analysis is difficult. e4_hrv_data <- RHRV::CreateFreqAnalysis(e4_hrv_data) e4_hrv_data <- RHRV::CalculatePowerBand(e4_hrv_data , indexFreqAnalysis = 1, size = 300, shift = 30, type = "fourier", ULFmin = 0, ULFmax = 0.03, VLFmin = 0.03, VLFmax = 0.05, LFmin = 0.05, LFmax = 0.15, HFmin = 0.15, HFmax = 0.4 ) time <- e4_hrv_data$TimeAnalysis[[1]] freq <- e4_hrv_data$FreqAnalysis[[1]] list( time_analysis = time, #freq_analysis = freq, summary = list( time = list( SDNN = time$SDNN, pNN50 = time$pNN50, SDSD = time$SDSD, rMSSD = time$rMSSD, HRVi = time$HRVi, SDANN = time$SDANN, TINN = time$TINN ), frequency = list( HF = mean(freq$HF), LF = mean(freq$LF), HFLF = mean(freq$HFLF), VLF = mean(freq$VLF), ULF = mean(freq$ULF) ), beats = list( beats_original = n_beats_original, beats_accepted = n_beats_accepted ) ) ) }
/scratch/gouwar.j/cran-all/cranData/wearables/R/ibi_analysis.R
#' RMSSD calculation #' #' Calculation of RMSSD over 1 minute time periods for plotting #' #' @param IBIdata Uses the IBI data frame as created by \code{\link{read_e4}} #' @export #' @importFrom varian rmssd #' @importFrom dplyr .data calculate_RMSSD <- function(IBIdata){ #heart rate variability from inter beat intervals #https://www.sciencedirect.com/science/article/pii/S0735109797005548 #https://www.hrv4training.com/blog/heart-rate-variability-normal-values # Successive RR interval differences (in milliseconds) IBI <- IBIdata %>% dplyr::mutate(IBI_ms = (IBI * 1000)) # Calculates the root mean square of successive differences (RMSSD) RMSSD <- IBI %>% #group per 5 minute interval dplyr::group_by(bin = cut(.data$DateTime, "1 min")) %>% #when more than one datapoint dplyr::filter(dplyr::n() > 1) %>% #calculate root mean squared error of difference scores #rmssd = root(average(difference between this and previous IBI)square) dplyr::summarize(RMSSD = rmssd(.data$IBI_ms)) %>% #cast grouping to a timestamp dplyr::mutate(time = as.POSIXct(as.character(.data$bin))) return(RMSSD) }
/scratch/gouwar.j/cran-all/cranData/wearables/R/ibi_analysis_rmssd_time_period.R
################### # Code to process EDA data ################### #' Process EDA data #' @param eda_data Data read with \code{\link{read_e4}} #' @export process_eda <- function(eda_data){ # Make sure data has a sample rate of 8Hz eda_data <- upsample_data_to_8Hz(eda_data) # Get the filtered data using a low-pass butterworth filter (cutoff:1hz, sampling frequency:8hz, order:6) eda_data$filtered_eda <- as.numeric(butter_lowpass_filter(eda_data$EDA, 1.0, 8, 6)) return(eda_data) } ###################### # code to bring signal to 8Hz (currently only supports upsampling) ###################### #' Upsample EDA data to 8 Hz #' @param eda_data Data read with \code{\link{read_e4}} #' @importFrom stats approx #' @export upsample_data_to_8Hz <- function(eda_data){ # Upsample start <- eda_data$DateTime[[1]] end <- eda_data$DateTime[length(eda_data$DateTime)] time_sequence_8Hz <- seq(from = start, to = end, units = "seconds", by = .125) #python: data = data.resample("125L").mean() # Interpolate all empty values interpolated <- approx(eda_data$DateTime, eda_data$EDA, xout = time_sequence_8Hz) data <- data.frame(DateTime = interpolated[1], EDA = interpolated[2]) names(data)[1] <- "DateTime" names(data)[2] <- "EDA" #python: data = interpolateEmptyValues(data) return(data) } ###################### # code to filter signal #https://stackoverflow.com/questions/7105962/how-do-i-run-a-high-pass-or-low-pass-filter-on-data-points-in-r ###################### #' @importFrom signal butter butter_lowpass_filter <- function(data, cutoff, sampling_frequency, order = 5){ # Filtering Helper functions nyquist_frequency <- 0.5 * sampling_frequency normal_cutoff <- cutoff / nyquist_frequency butter_filter <- signal::butter(order, normal_cutoff, type = "low", plane = "z") #"z" for a digital filter #python: c(b, a) = scisig.butter(order, normal_cutoff, btype='low', analog=False) # Apply filter y <- signal::filter(butter_filter, data) #python: y = scisig.lfilter(b, a, data) return (y) }
/scratch/gouwar.j/cran-all/cranData/wearables/R/load_files.R
#' Electrodermal activity signal derivative #' #' Finds the first derivatives of the eda signal #' #' @param eda eda vector get_eda_deriv <- function(eda){ eda[2:length(eda)] - eda[1:(length(eda)-1)] } #' Get the eda apex of the signal #' #' finds the apex of electrodermal activity eda signal #' within an optional time window #' #' @param eda_deriv uses the eda derivative to find the apex #' @param offset minimum number of downward measurements after the apex, #' in order to be considered a peak (default 1 means no restrictions) get_apex <- function(eda_deriv, offset = 1){ peak_has_drop <- function(i){ length_drop <- rle(peak_sign[i:(i+offset-1)])$lengths[1] length_drop >= offset } peak_sign <- sign(eda_deriv) apex <- integer(length(peak_sign) + 1) apex[c(FALSE, diff(peak_sign) == -2)] <- 1 i_apex <- which(apex == 1) has_drops <- sapply(i_apex, peak_has_drop) apex[i_apex[!has_drops]] <- 0 apex } #' Rise time of peaks #' #' Calculates the rise time of all peaks #' #' @param eda_deriv first derivative of signal #' @param apices apex status per measurement (0 or 1) #' @param sample_rate sample rate of the signal #' @param start_WT window within which to look for rise time (in seconds) get_rise_time <- function(eda_deriv, apices, sample_rate, start_WT){ get_rise_events_for_apex <- function(i){ lookback_i <- max(1, i - max_lookback) r <- rle(rev(peak_sign[lookback_i:i - 1])) r$lengths[1] } peak_sign <- sign(eda_deriv) max_lookback <- sample_rate * start_WT rise_time <- numeric(length(apices)) i_apex <- which(apices == 1) rise_time[i_apex] <- sapply(i_apex, get_rise_events_for_apex) / sample_rate rise_time } #' Start of peaks #' #' Provide info for each measurement whether it is the start of a peak (0 or 1) #' #' @param data df with peak info #' @param sample_rate sample rate of the signal get_peak_start <- function(data, sample_rate){ i_apex <- which(data$peaks == 1) peak_start <- integer(nrow(data)) length_rise_events <- sample_rate * data$rise_time[i_apex] peak_start[i_apex - length_rise_events] <- 1 peak_start } #' Small peaks removal #' #' Remove peaks with a small rise from start to apex are removed #' #' @param data df with info on peaks #' @param thres threshold of amplitude difference in order to be removed #' (default 0 means no removals) remove_small_peaks <- function(data, thres = 0){ if(thres > 0){ i_apex <- which(data$peaks == 1) i_peak_start <- which(data$peak_start == 1) i_to_remove <- data$filtered_eda[i_apex] - data$filtered_eda[i_peak_start] < thres data$peaks[i_apex][i_to_remove] <- 0 data$rise_time[i_apex][i_to_remove] <- 0 data$peak_start[i_peak_start][i_to_remove] <- 0 } data } #' Peak start times #' #' Get the start times of the peaks #' #' @param data df with peak info get_peak_start_times <- function(data){ i_apex <- which(data$peaks == 1) i_peak_start <- which(data$peak_start == 1) peak_start_times <- as.POSIXct(rep(NA, nrow(data))) peak_start_times[i_apex] <- data$DateTime[i_peak_start] peak_start_times } #' Maximum derivative #' #' Get the largest slope before apex, interpolated to seconds #' #' @param data df with info on the peaks #' @param eda_deriv derivative of the signal #' @param sample_rate sample rate of the signal get_max_deriv <- function(data, eda_deriv, sample_rate){ get_max_deriv_for_event <- function(i){ max(eda_deriv[max(1, i - sample_rate):i]) } max_deriv <- numeric(nrow(data)) i_apex <- which(data$peaks == 1) if(length(i_apex) > 0){ max_deriv[i_apex] <- sapply(i_apex, get_max_deriv_for_event) * sample_rate } max_deriv } #' Peak amplitude #' #' Get the amplitude of the peaks #' #' @param data df with peak info get_amp <- function(data){ i_apex <- which(data$peaks == 1) i_peak_start <- which(data$peak_start == 1) amp <- numeric(nrow(data)) apex_amp <- data$filtered_eda[i_apex] start_amp <- data$filtered_eda[i_peak_start] amp[i_apex] <- apex_amp - start_amp amp } #' Half peak amp #' #' Get the amplitude value halfway between peak start and apex #' #' @param data df with peak info #' @param i apex index get_half_amp <- function(data, i){ apex_amp <- data$filtered_eda[i] amp_diff <- data$amp[i] half_amp <- apex_amp - .5 * amp_diff half_amp } #' Peak end #' #' Find the end of the peaks, with some restrictions on the search #' #' @param data df with peak info #' @param max_lookahead max distance from apex to search for end #' @importFrom utils tail get_peak_end <- function(data, max_lookahead){ get_i_end_per_apex <- function(i, i_max_peak_end){ half_amp <- get_half_amp(data, i) i_lookahead <- min(i_max_peak_end, i + max_lookahead) amps_ahead <- data$filtered_eda[(i + 1):(i_lookahead)] length_peak_end <- which(amps_ahead < half_amp)[1] if(is.na(length_peak_end)){ i + which.min(amps_ahead) } else { i + length_peak_end } } i_apex <- which(data$peaks == 1) peak_end <- integer(nrow(data)) if(length(i_apex) > 0){ i_peak_start <- which(data$peak_start == 1) i_next_peak_start <- tail(i_peak_start, -1) i_max_peak_end <- c(i_next_peak_start - 1, nrow(data)) i_peak_end <- mapply(get_i_end_per_apex, i_apex, i_max_peak_end) peak_end[i_peak_end] <- 1 } peak_end } #' Peak end times #' #' Get the end timstamp of the peaks #' #' @param data df with peak info get_peak_end_times <- function(data){ peak_end_times <- as.POSIXct(rep(NA, nrow(data))) i_apex <- which(data$peaks == 1) i_peak_end <- which(data$peak_end == 1) peak_end_times[i_apex] <- data$DateTime[i_peak_end] peak_end_times } #' Decaying peaks #' #' Identify peaks with a decent decay (at least half the amplitude of rise) #' #' @param data df with peak info get_i_apex_with_decay <- function(data){ i_apex <- which(data$peaks == 1) i_peak_end <- which(data$peak_end == 1) half_amp <- get_half_amp(data, i_apex) has_decay <- data$filtered_eda[i_peak_end] < half_amp i_apex[has_decay] } #' Decay time #' #' Get the time (in seconds) it takes to decay for each peak #' #' @param data df with peak info #' @param i_apex_with_decay indexes of relevant peaks get_decay_time <- function(data, i_apex_with_decay){ decay_time <- numeric(nrow(data)) decay_time[i_apex_with_decay] <- as.numeric(difftime( data$peak_end_times[i_apex_with_decay], data$DateTime[i_apex_with_decay], units="secs" )) decay_time } #' Half rise time #' #' Get the time (in seconds) it takes to get to halfway the rise in a peak #' #' @param data df with peak info #' @param i_apex_with_decay relevant apices get_half_rise <- function(data, i_apex_with_decay){ get_i_half_rise <- function(i_peak_start, i_apex){ half_amp <- data$filtered_eda[i_apex] - .5 * data$amp[i_apex] is_below_amp <- data$filtered_eda[(i_apex - 1):i_peak_start] < half_amp i_apex - which(is_below_amp)[1] } i_apex <- which(data$peaks == 1) half_rise <- as.POSIXct(rep(NA, nrow(data))) if(length(i_apex) > 0){ has_decay <- i_apex %in% i_apex_with_decay i_peak_start_with_decay <- which(data$peak_start == 1)[has_decay] i_half_rise <- mapply(get_i_half_rise, i_peak_start_with_decay, i_apex_with_decay) half_rise[i_apex_with_decay] <- data$DateTime[i_half_rise] } half_rise } #' Peak width #' #' Get the width of the peak (in seconds, from halfway the rise until the end) #' #' @param data df with peak info #' @param i_apex_with_decay relevant apices get_SCR_width <- function(data, i_apex_with_decay){ SCR_width <- numeric(nrow(data)) SCR_width[i_apex_with_decay] <- as.numeric(difftime( data$peak_end_times[i_apex_with_decay], data$half_rise[i_apex_with_decay], units="secs" )) SCR_width } #' Function to find peaks of an EDA datafile #' @description This function finds the peaks of an EDA signal and adds #' basic properties to the datafile. #' @details Also, peak_end is assumed to be no later than the start of the next peak. #' Is that OK? #' @param data DataFrame with EDA as one of the columns and indexed by a datetimeIndex #' @param offset the number of rising seconds and falling seconds after a peak needed to be counted as a peak #' @param start_WT maximum number of seconds before the apex of a peak that is the "start" of the peak #' @param end_WT maximum number of seconds after the apex of a peak that is the "end" of the peak 50 percent of amp #' @param thres the minimum microsecond change required to register as a peak, defaults as .005 #' @param sample_rate number of samples per second, default=8 #' @return data frame with several columns #' peaks 1 if apex #' peak_start 1 if start of peak #' peak_end 1 if end of preak #' peak_start_times if apex then corresponding start timestamp #' peak_end_times if apex then corresponding end timestamp #' half_rise if sharp decaying apex then time to halfway point in rise #' amp if apex then value of EDA at apex - value of EDA at start #' max_deriv if apex then max derivative within 1 second of apex #' rise_time if apex then time from start to apex #' decay_time if sharp decaying apex then time from apex to end #' SCR_width if sharp decaying apex then time from half rise to end #' @export find_peaks <- function(data, offset = 1, start_WT = 4, end_WT = 4, thres = .005, sample_rate = getOption("SAMPLE_RATE", 8)){ offset <- offset * sample_rate old_col_names <- names(data) eda_deriv <- get_eda_deriv(data$filtered_eda) data$peaks <- get_apex(eda_deriv, offset) data$rise_time <- get_rise_time(eda_deriv, data$peaks, sample_rate, start_WT) data$peak_start <- get_peak_start(data, sample_rate) data <- remove_small_peaks(data, thres) data$peak_start_times <- get_peak_start_times(data) data$max_deriv <- get_max_deriv(data, eda_deriv, sample_rate) data$amp <- get_amp(data) data$peak_end <- get_peak_end(data, end_WT * sample_rate) data$peak_end_times <- get_peak_end_times(data) i_apex_with_decay <- get_i_apex_with_decay(data) data$decay_time <- get_decay_time(data, i_apex_with_decay) data$half_rise <- get_half_rise(data, i_apex_with_decay) data$SCR_width <- get_SCR_width(data, i_apex_with_decay) new_col_names_ordered <- c('peaks', 'peak_start', 'peak_end', 'peak_start_times', 'peak_end_times', 'half_rise', 'amp', 'max_deriv', 'rise_time', 'decay_time', 'SCR_width') data <- data[, c(old_col_names, new_col_names_ordered)] # Remove rows without peaks featureData <- data[data$peaks==1,][c('DateTime', 'EDA','rise_time','max_deriv','amp','decay_time','SCR_width')] # Replace 0s with NA, this is where the 50 percent of the peak was not found, too close to the next peak featureData[, c('SCR_width','decay_time')][featureData[, c('SCR_width','decay_time')] == 0] <- NA featureData['AUC'] <- featureData['amp'] * featureData['SCR_width'] featureData } # ############ MAIN CODE ###################### # # fullOutputPath = "features.csv" # # #for testing # #offset <- 1 # #thres <- 0.02 # #start_WT <- 4 # #end_WT <- 4 # # #settings Peter de Looff (also used in the default) # offset = 1 # thres = 0.005 # start_WT = 4 # end_WT = 4 # # print(paste("Finding peaks in file", filepath, "using the following parameters")) # print(paste0("Offset: ", offset, "; Minimum peak amplitude: ", thres, "; Max rise time (s): ", start_WT,"; Max decay time (s): ", end_WT)) # # data_with_peaks <- find_peaks(eda_data, offset*SAMPLE_RATE, start_WT, end_WT, thres, SAMPLE_RATE) # # write_peak_features(data_with_peaks, fullOutputPath) # print(paste0("Features computed and saved to ", fullOutputPath)) # # ########### PLOTTING ######################### # # plot(peakData$DateTime, eda_data$filtered_eda, type='l', col='black') # #peaks only # foundPeaks <- peakData[peakData$peaks==1,] # abline(v=foundPeaks$DateTime, lwd = 2, col = rgb(0, 1, 0, alpha=0.5))
/scratch/gouwar.j/cran-all/cranData/wearables/R/peak_detection.R
#' Row-bind E4 datasets #' @param data An object read in by read_e4 #' @importFrom dplyr bind_rows #' @export rbind_e4 <- function(data){ out <- list() nms <- names(data[[1]]) for(name in nms){ # retrieve data dat <- lapply(data, "[[", name) out[[name]] <- bind_rows(dat) } out }
/scratch/gouwar.j/cran-all/cranData/wearables/R/rbind_e4.R