content
stringlengths
0
14.9M
filename
stringlengths
44
136
```{r child="weights-optim-exponential.Rmd", eval=needs_optimization} ``` ```{r} # remove duplicates W <- W %>% round(digits = 3) %>% {./rowSums(.)} %>% unique # extract EQRs of interest V <- as.matrix(d_ind[, colnames(W)]) ``` ```{r} to_log("INFO", "Entering subsection 'summary stats for all combinations of weights'...") ``` ```{r, eval=has_pressure} d <- apply( X = W, MARGIN = 1L, FUN = function(x) { d <- data.frame(MMI_STAR = drop(V %*% x), P = d_ind$PRESSURE) %>% na.omit m <- try( (MMI_STAR ~ b0 + b1*exp(b2*P)) %>% nls( data = d, start = list(b0 = 0.1, b1 = 0.9, b2 = -1), control = nls.control(maxiter = 1000) ), silent = TRUE ) list(d = d, m = m) } ) # statistics # (deviance should not be used, as this statistic is dependent on the response) S <- cbind(W, do.call( what = "rbind", args = lapply( X = d, FUN = function(x) { if (inherits(x$m, "try-error")) { res <- rep.int(NA_real_, times = 6) names(res) <- c("b1", "b2", "b3", "deviance", "pseudo_r2", "converged") res <- as.data.frame(res) res$converged <- FALSE } else { y <- x$d$MMI_STAR n <- length(x$d$MMI_STAR) SS_res <- sum(residuals(x$m)^2) SS_tot <- var(y) * (n-1) pseudo_r2 <- 1 - SS_res / SS_tot b <- as.numeric(coefficients(x$m)) res <- data.frame( b0 = b[1], b1 = b[2], b2 = b[3], pseudo_r2 = pseudo_r2, converged = x$m$convInfo$isConv ) } res } ) ) ) %>% as.data.frame %>% arrange(desc(pseudo_r2)) optimization_failed <- all(!S$converged) ``` ```{r child="weights-stats-table-exponential.Rmd", eval=has_pressure && !optimization_failed} ``` ```{r child="plot-eqr-pressure-exponential.Rmd", eval=has_pressure && !optimization_failed} ``` ```{r} if (needs_optimization) { weights <- as.matrix(S)[1, 1:number_of_indicators] weights <- weights[weights > 0] weights_text <- round(weights, 3) names(weights_text) <- sub(pattern = "_STAR$", replacement = "*", x = names(weights_text)) } else { weights <- settings$weights names(weights) <- indicators_EQR weights_text <- attr(settings$weights, "character") if (is.null(weights_text)) { weights_text <- round(settings$weights, 3) } names(weights_text) <- paste0(indicators, "*") } ``` `r if(needs_optimization){paste("The optimized", title_text, "is:\n")}else{paste("The", title_text, "based on user-specified weights is:\n")}` MMI* = `r paste(weights_text, names(weights_text), collapse = " + ", sep = " &times; ")`. ```{r} d_ind$MMI_STAR <- drop(V[, names(weights), drop = FALSE] %*% weights) ``` `r if(has_pressure && !optimization_failed){"Summary statistics of this model are given below:\n"}else{"\n"}` ```{r, eval=has_pressure && !optimization_failed} (MMI_STAR ~ b0 + b1*exp(b2*PRESSURE)) %>% nls( data = d_ind, start = list(b0 = 0.1, b1 = 0.9, b2 = -1), control = nls.control(maxiter = 1000)) %>% summary ``` <br> <br>
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/model-exponential.Rmd
```{r child="weights-optim-linear.Rmd", eval=needs_optimization} ``` ```{r} # remove duplicates W <- W %>% round(digits = 3) %>% {./rowSums(.)} %>% unique # extract EQRs of interest V <- as.matrix(d_ind[, colnames(W)]) ``` ```{r} to_log("INFO", "Entering subsection 'summary stats for all combinations of weights'...") ``` ```{r, eval=has_pressure} d <- apply( X = W, MARGIN = 1L, FUN = function(x) { (MMI_STAR ~ P) %>% lm(data = data.frame(MMI_STAR = drop(V %*% x), P = d_ind$PRESSURE)) %>% summary } ) S <- cbind(W, do.call( what = "rbind", args = lapply( X = d, FUN = function(x) { b <- coefficients(x) c( b0 = b["(Intercept)", "Estimate"], b1 = if (nrow(b) == 1L) {NA_real_} else {b["P", "Estimate"]}, r2_adj = x$adj.r.squared, r_adj = suppressWarnings(sqrt(x$adj.r.squared)), p_value = if (is.null(x$fstatistic)) { NA_real_ } else { pf( # see stats:::print.summary.lm q = x$fstatistic[1L], df1 = x$fstatistic[2L], df2 = x$fstatistic[3L], lower.tail = FALSE ) %>% as.numeric } ) } ) ) ) %>% as.data.frame %>% arrange(desc(r2_adj)) # check if slopes are available optimization_failed <- all(is.na(S$b1)) if (needs_optimization && optimization_failed) { to_log("ERROR", sprintf( "Optimization failed.\nCheck PRESSURE column in %s", sQuote(basename(settings$files$benthos)) ) ) } ``` ```{r child="weights-stats-table-linear.Rmd", eval=has_pressure && !optimization_failed} ``` ```{r child="plot-eqr-pressure-linear.Rmd", eval=has_pressure && !optimization_failed} ``` ```{r} if (needs_optimization) { weights <- as.matrix(S)[1, 1:number_of_indicators] weights <- weights[weights > 0] weights_text <- round(weights, 3) names(weights_text) <- sub(pattern = "_STAR$", replacement = "*", x = names(weights_text)) } else { weights <- settings$weights names(weights) <- indicators_EQR weights_text <- attr(settings$weights, "character") if (is.null(weights_text)) { weights_text <- round(settings$weights, 3) } names(weights_text) <- paste0(indicators, "*") } ``` `r if(needs_optimization){paste("The optimized", title_text, "is:\n")}else{paste("The", title_text, "based on user-specified weights is:\n")}` MMI* = `r paste(weights_text, names(weights_text), collapse = " + ", sep = " &times; ") ` ```{r} d_ind$MMI_STAR <- drop(V[, names(weights), drop = FALSE] %*% weights) ``` `r if(has_pressure && !optimization_failed){"Summary statistics of this model are given below:\n"}else{"\n"}` ```{r, eval=has_pressure && !optimization_failed} (MMI_STAR ~ PRESSURE) %>% lm(data = d_ind) %>% summary ``` <br> <br>
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/model-linear.Rmd
- Total number of non-endofauna records that has been removed: 0
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/no-groups-to-exclude.Rmd
Data will not be pooled and taken as is. ```{r} # in case of no pooling, assign sample id to the POOL_ID column d_mmi <- d_mmi %>% mutate( POOL_RUN = 1L, POOL_ID = ID ) ```
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/no-pooling.Rmd
### Box-whisker plots of Ecological Quality Ratio's ```{r} to_log("INFO", "Entering subsection 'Box-whisker plots...'...") ``` The figures below show box-whisker plots of the `r title_text_plur` computed above for each OBJECTID. The upper/lower hinge (_i.e._, the top and the bottom of the box respectively) corresponds to the third/first quartile. The upper/lower whisker extends from the third/first quartile hinge to the largest/smallest value no further than 1.5 times the inter-quartile range from that hinge. Extreme data values are plotted as points. ```{r} fig_height <- ((nrow(W)-1L) %/% 3L + 1L) * 2.8 ``` <figure> ```{r,echo=FALSE, fig.retina=NULL, fig.width=6, fig.height=fig_height, out.width=900, dpi=600, warning=FALSE, message=FALSE, eval=has_pressure} d <- d_ind %>% select(OBJECTID, PRESSURE) for (i in 1:nrow(W)) { w <- W[i, ] mmi_name <- paste0(names(w)[w > 0], collapse = "+") mmi_name <- gsub(pattern = "_EQR", replacement = "", x = mmi_name) d[[mmi_name]] <- drop(V %*% w) } d <- d %>% gather(key = "EQR", value = "VALUE", -OBJECTID, -PRESSURE) d$EQR <- sub(pattern = "_STAR$", replacement = "*", x = d$EQR) g <- ggplot(data = d, mapping = aes(x = OBJECTID, y = VALUE)) + geom_boxplot() + facet_wrap(~EQR) + scale_x_discrete(name = "") + scale_y_continuous(name = "") + theme( axis.text = element_text(colour = "black"), axis.text.x = element_text(angle = 45, hjust = 1) ) suppressWarnings(print(g)) ``` <figcaption>Box-whisker plots of the `r title_text_plur` for each OBJECTID</figcaption> </figure> <br> <br>
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/plot-box-whisker.Rmd
A plot of the `r title_text_plur` as function of `r settings$pressure$name` is given below, showing the sensitivity of each index. In addition, the `r settings$confidencelevel`-confidence interval of the mean `r title_text` is shown, indicating its precision. <figure> ```{r,echo=FALSE, fig.retina=NULL, fig.width=5, fig.height=5, out.width=900, dpi=600, warning=FALSE, message=FALSE} d <- d_ind %>% select(OBJECTID, PRESSURE) for (i in 1:nrow(W)) { w <- W[i, ] mmi_name <- paste0(names(w)[w > 0], collapse = "+") d[[mmi_name]] <- drop(V %*% w) } d <- d %>% gather(key = "EQR", value = "VALUE", -OBJECTID, -PRESSURE) d$EQR <- gsub(pattern = "_STAR", replacement = "*", x = d$EQR) f <- function(x) { m <- try( nls( VALUE ~ b0 + b1*exp(b2*PRESSURE), data = x, start = list(b0 = 0.1, b1 = 0.9, b2 = -1), control = nls.control(maxiter = 1000)), silent = TRUE ) if (inherits(m, "try-error")) { return(NULL) } p <- seq(from = min(x$PRESSURE), to = max(x$PRESSURE), length.out = 101) res <- data.frame( PRESSURE = p ) res$VALUE <- predict(m, newdata = res) res } d_m <- d %>% group_by(EQR) %>% do( f(.) ) g <- ggplot() + geom_path( data = d_m, mapping = aes(x = PRESSURE, y = VALUE, colour = EQR) ) + scale_x_continuous( name = sprintf("%s (%s)", settings$pressure$name, settings$pressure$unit)) + scale_y_continuous(name = title_text) + scale_linetype_discrete(name = "") + scale_colour_discrete(name = "") + theme(axis.text = element_text(colour = "black")) suppressWarnings(print(g)) ``` <figcaption>Nonlinear regression of `r title_text` as function of `r settings$pressure$name`. Data points are not shown. Note: plotting is restricted to the data range (no extrapolation).</figcaption> </figure> <br> <br> The plot below shows the same results, including the data points. <figure> ```{r,echo=FALSE, fig.retina=NULL, fig.width=5, fig.height=5, out.width=700, dpi=600, warning=FALSE, message=FALSE} for (eqr in unique(d$EQR)) { g <- ggplot( data = d %>% filter(EQR == eqr), mapping = aes(x = PRESSURE, y = VALUE) ) + geom_point(alpha = 0.25) + geom_path( data = d_m %>% filter(EQR == eqr), mapping = aes(x = PRESSURE, y = VALUE), colour = "red" ) + scale_x_continuous( name = sprintf("%s (%s)", settings$pressure$name, settings$pressure$unit)) + scale_y_continuous( name = eqr, limits = c(min(0, min(d$VALUE)), max(1, max(d$VALUE))), breaks = 0:5/5 ) + theme(axis.text = element_text(colour = "black")) print(g) } ``` <figcaption>`r Title_text` as function of `r settings$pressure$name`, including data points.</figcaption> </figure> <br> <br> Over-plotting may obscure relations in the data. Therefore, the plot below summarizes the dots in the plot above as (minimalistic) box plots: - the blue outer (thinner) vertical line segment runs from the 5% to the 95% percentile; - the blue inner (thicker) vertical line segment runs from the 25% to the 75% percentile; - the dot is the median; - the red line is the linear regression model fitted to the data by ordinary least squares. <figure> ```{r,echo=FALSE, fig.retina=NULL, fig.width=5, fig.height=5, out.width=700, dpi=600, warning=FALSE, message=FALSE} s <- d %>% group_by(OBJECTID, PRESSURE, EQR) %>% summarise( min = min(VALUE, na.rm = TRUE), q05 = quantile(VALUE, probs = 0.05, na.rm = TRUE), q25 = quantile(VALUE, probs = 0.25, na.rm = TRUE), q50 = median(VALUE, na.rm = TRUE), q75 = quantile(VALUE, probs = 0.75, na.rm = TRUE), q95 = quantile(VALUE, probs = 0.95, na.rm = TRUE), max = max(VALUE, na.rm = TRUE), n = sum(!is.na(VALUE)) ) for (eqr in unique(d$EQR)) { g <- ggplot( data = s %>% filter(EQR == eqr) ) + geom_path( data = d_m %>% filter(EQR == eqr), mapping = aes(x = PRESSURE, y = VALUE), colour = "red" ) + geom_linerange( mapping = aes(x = PRESSURE, ymin = q05, ymax = q95), size = 0.25, colour = "blue", alpha = 0.5 ) + geom_linerange( mapping = aes(x = PRESSURE, ymin = q25, ymax = q75), size = 0.50, colour = "blue", alpha = 0.5 ) + geom_point( mapping = aes(x = PRESSURE, y = q50) ) + scale_x_continuous( name = sprintf("%s (%s)", settings$pressure$name, settings$pressure$unit)) + scale_y_continuous( name = eqr, limits = c(min(0, min(d$VALUE)), max(1, max(d$VALUE))), breaks = 0:5/5 ) + theme(axis.text = element_text(colour = "black")) print(g) } ``` <figcaption>`r Title_text` as function of `r settings$pressure$name`. See text for explanation.</figcaption> </figure> <br> <br>
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/plot-eqr-pressure-exponential.Rmd
A plot of the `r title_text_plur` as function of `r settings$pressure$name` is given below, showing the sensitivity of each index. In addition, the `r settings$confidencelevel`-confidence interval of the mean `r title_text` is shown, indicating its precision. <figure> ```{r,echo=FALSE, fig.retina=NULL, fig.width=5, fig.height=5, out.width=900, dpi=600, warning=FALSE, message=FALSE} d <- d_ind %>% select(OBJECTID, PRESSURE) for (i in 1:nrow(W)) { w <- W[i, ] mmi_name <- paste0(names(w)[w > 0], collapse = "+") d[[mmi_name]] <- drop(V %*% w) } d <- d %>% gather(key = "EQR", value = "VALUE", -OBJECTID, -PRESSURE) d$EQR <- gsub(pattern = "_STAR", replacement = "*", x = d$EQR) g <- ggplot( data = d, mapping = aes(x = PRESSURE, y = VALUE, colour = EQR, linetype = EQR)) + stat_smooth( method = "lm", formula = y ~ x, level = settings$confidencelevel, size = 1 ) + scale_x_continuous( name = sprintf("%s (%s)", settings$pressure$name, settings$pressure$unit)) + scale_y_continuous(name = title_text) + scale_linetype_discrete(name = "") + scale_colour_discrete(name = "") + theme(axis.text = element_text(colour = "black")) suppressWarnings(print(g)) ``` <figcaption>Linear regression of `r title_text` as function of `r settings$pressure$name`. Data points are not shown. In addition, the `r settings$confidencelevel`-confidence interval of the `r title_text` is shown, indicating its precision. Note: plotting is restricted to the data range (no extrapolation).</figcaption> </figure> <br> <br> The plot below shows the same results, including the data points. <figure> ```{r,echo=FALSE, fig.retina=NULL, fig.width=5, fig.height=5, out.width=700, dpi=600, warning=FALSE, message=FALSE} for (eqr in unique(d$EQR)) { g <- ggplot( data = d %>% filter(EQR == eqr), mapping = aes(x = PRESSURE, y = VALUE) ) + geom_point(alpha = 0.25) + stat_smooth( method = "lm", formula = y ~ x, level = settings$confidencelevel, colour = "red" ) + scale_x_continuous( name = sprintf("%s (%s)", settings$pressure$name, settings$pressure$unit)) + scale_y_continuous( name = eqr, limits = c(min(0, min(d$VALUE)), max(1, max(d$VALUE))), breaks = 0:5/5 ) + theme(axis.text = element_text(colour = "black")) print(g) } ``` <figcaption>`r Title_text` as function of `r settings$pressure$name`, including data points.</figcaption> </figure> <br> <br> Over-plotting may obscure relations in the data. Therefore, the plot below summarizes the dots in the plot above as (minimalistic) box plots: - the blue outer (thinner) vertical line segment runs from the 5% to the 95% percentile; - the blue inner (thicker) vertical line segment runs from the 25% to the 75% percentile; - the dot is the median; - the red line is the linear regression model fitted to the data by ordinary least squares. <figure> ```{r,echo=FALSE, fig.retina=NULL, fig.width=5, fig.height=5, out.width=700, dpi=600, warning=FALSE, message=FALSE} s <- d %>% group_by(OBJECTID, PRESSURE, EQR) %>% summarise( min = min(VALUE, na.rm = TRUE), q05 = quantile(VALUE, probs = 0.05, na.rm = TRUE), q25 = quantile(VALUE, probs = 0.25, na.rm = TRUE), q50 = median(VALUE, na.rm = TRUE), q75 = quantile(VALUE, probs = 0.75, na.rm = TRUE), q95 = quantile(VALUE, probs = 0.95, na.rm = TRUE), max = max(VALUE, na.rm = TRUE), n = sum(!is.na(VALUE)) ) for (eqr in unique(d$EQR)) { g <- ggplot( data = s %>% filter(EQR == eqr) ) + stat_smooth( data = d %>% filter(EQR == eqr), mapping = aes(x = PRESSURE, y = VALUE), method = "lm", formula = y ~ x, level = settings$confidencelevel, size = 0.5, colour = "red" ) + geom_linerange( mapping = aes(x = PRESSURE, ymin = q05, ymax = q95), size = 0.25, colour = "blue", alpha = 0.5 ) + geom_linerange( mapping = aes(x = PRESSURE, ymin = q25, ymax = q75), size = 0.50, colour = "blue", alpha = 0.5 ) + geom_point( mapping = aes(x = PRESSURE, y = q50) ) + scale_x_continuous( name = sprintf("%s (%s)", settings$pressure$name, settings$pressure$unit)) + scale_y_continuous( name = eqr, limits = c(min(0, min(d$VALUE)), max(1, max(d$VALUE))), breaks = 0:5/5 ) + theme(axis.text = element_text(colour = "black")) print(g) } ``` <figcaption>`r Title_text` as function of `r settings$pressure$name`. See text for explanation.</figcaption> </figure> <br> <br>
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/plot-eqr-pressure-linear.Rmd
The table and figures below give linear fits of _S_-1 versus log(_N_). The table lists the number of samples, the intercept (b0), the slope (b1), and r^2^, for each fit for combinations of OBJECTID, HABITAT, and YEAR with at least `r min_samples` samples. In addition, the p-value of the fit is given. The figures also give the 99%-confidence and 99%-prediction intervals. See figure caption for more information. ```{r results='asis'} # add points with confidence and prediction intervals d$g <- d %>% select(-n) %>% pmap(function(OBJECTID, HABITAT, YEAR, x_logN, model, g) { g + geom_ribbon( data = data.frame( logN = x_logN, predict(model, newdata = data.frame(logN = x_logN), interval = "prediction", level = 0.99) ), mapping = aes(x = logN, ymin = lwr, ymax = upr), fill = "blue", alpha = 0.1) + geom_ribbon( data = data.frame( logN = x_logN, predict(model, newdata = data.frame(logN = x_logN), interval = "confidence", level = 0.99) ), mapping = aes(x = logN, ymin = lwr, ymax = upr), fill = "blue", alpha = 0.1) + geom_point(mapping = aes(x = logN, y = S-1)) + geom_path( data = data.frame( logN = x_logN, y = predict(model, newdata = data.frame(logN = x_logN)) ), mapping = aes(x = logN, y = y), colour = "blue" ) + scale_x_continuous(name = expression(ln(N)), limits = c(-0.026, NA)) + scale_y_continuous(name = "S-1") + ggtitle(sprintf("%s - %s - %s", OBJECTID, HABITAT, YEAR)) } ) # add r2 d$r2 <- d$model %>% map_chr({. %>% summary %>% getElement("r.squared") %>% formatC(format = "f", digits = 2) }) # add fstats d$f <- d$model %>% map({. %>% summary %>% getElement("fstatistic") }) # add p-value d$p <- d$f %>% map_chr(function(x) { if (is.null(x)) { return(NA_character_) } pf( # see stats:::print.summary.lm q = x[1L], df1 = x[2L], df2 = x[3L], lower.tail = FALSE ) %>% formatC(format = "f", digits = 5) }) d$g <- pmap( list(x = d$g, y = d$r2, z = d$p), function(x, y, z) { x + annotate("text", x = -Inf, y = Inf, label = paste0("r^2==", y), parse = TRUE, hjust = -0.1, vjust = 1.1) + annotate("text", x = -Inf, y = Inf, label = paste0("p = ", z), hjust = -0.1, vjust = 3.1) } ) d$model %>% map_df(function(x) { x %>% coefficients %>% t %>% as_data_frame }) %>% set_names(c("b0", "b1")) %>% prepend(d) %>% as_data_frame %>% select(OBJECTID, HABITAT, YEAR, `# samples` = n, b0, b1, r2, p) %>% arrange(OBJECTID, HABITAT, YEAR) %>% xtable %>% print(type = "html") ``` <br> <br> <br> <figure> ```{r,echo=FALSE, fig.retina=NULL, warning=FALSE, fig.width=4, fig.height=4, out.width=350, dpi=300, eval=isTRUE(nrow(d) > 0)} d$g %>% walk(print) ``` <figcaption>Fits (blue line) of S-1 as function of N. The inner band is the 99%-confidence interval (represents our uncertainty about the fit), the outer band is the 99%-prediction interval (represents our uncertainty about individual points (future responses)).</figcaption> </figure> <br> <br>
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/plot-qc-margalef-fits.Rmd
It is not possible to create quality control plots (fits of _S_-1 as function of log(_N_)) because the number of samples is smaller then `r min_samples`. ```{r} to_log("INFO", sprintf("The number of samples is smaller than %s...", min_samples)) to_log("INFO", "...therefore no quality control plots will be created.") ```
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/plot-qc-margalef-no-fits.Rmd
<h2 style="counter-reset: h2">Quality control plot based on Margalef D</h2> ```{r} to_log("INFO", "Entering subsection 'Quality control plot based on Margalef D'...") ``` ```{r} # set minimum number of samples to analyse # increased from 10 to 20 (advise Willem 2016-09-12) min_samples <- 20L # estimate S and N for each pool d <- d_mmi %>% group_by(OBJECTID, HABITAT, YEAR, POOL_RUN, POOL_ID) %>% summarise( S = species_richness(taxon = TAXON, count = VALUE), N = total_abundance(count = VALUE) ) %>% mutate(logN = log(N)) # model (S-1) ~ log(N) d <- d %>% group_by(OBJECTID, HABITAT, YEAR) %>% do( x_logN = seq(from = min(.$logN), to = max(.$logN), length.out = 100), n = nrow(.), model = lm(I(S-1) ~ logN, data = .), g = ggplot(data = .) ) %>% ungroup # unroll n d$n <- d$n %>% flatten_int # only create plots for at least a minimum number of samples d <- d %>% filter(n >= min_samples) ``` ```{r child="plot-qc-margalef-fits.Rmd", eval=isTRUE(nrow(d) > 0L)} ``` ```{r child="plot-qc-margalef-no-fits.Rmd", eval=isTRUE(nrow(d) == 0L)} ```
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/plot-qc-margalef.Rmd
### Correlations ```{r} to_log("INFO", "Entering subsection 'Correlation'...") ``` The relation between all pairs of indices is given below. <figure> ```{r,echo=FALSE, fig.retina=NULL, fig.width=5.5, fig.height=4.5, out.width=900, dpi=300} d <- d_ind %>% select_(.dots = c("OBJECTID", "HABITAT", indicators)) %>% mutate(id_chr = paste(OBJECTID, HABITAT, sep = "-")) %>% select_(.dots = c("id_chr", indicators)) %>% mutate(id_num = id_chr %>% match(sort(unique(id_chr)))) %>% mutate(id_num = factor(x = id_num, levels = sort(unique(id_num)), ordered = TRUE)) lut <- d %>% select(id_num, id_chr) %>% distinct %>% arrange(id_num) # create combinations of color and shape col_shp <- expand.grid( col = c("red", "deepskyblue", "orange", "green", "purple"), shp = c(16, 15, 17, 3, 4), stringsAsFactors = FALSE ) g <- ggplot(data = d) + geom_point( mapping = aes_string( x = names(d)[2], y = names(d)[3], colour = "id_num", shape = "id_num" ) ) + scale_color_manual( name = "", values = col_shp$col ) + scale_shape_manual( name = "", values = col_shp$shp ) print(g) if (number_of_indicators == 3L) { g <- ggplot(data = d) + geom_point( mapping = aes_string( x = names(d)[2], y = names(d)[4], colour = "id_num", shape = "id_num" ) ) + scale_color_manual( name = "", values = col_shp$col ) + scale_shape_manual( name = "", values = col_shp$shp ) print(g) g <- ggplot(data = d) + geom_point( mapping = aes_string( x = names(d)[3], y = names(d)[4], colour = "id_num", shape = "id_num" ) ) + scale_color_manual( name = "", values = col_shp$col ) + scale_shape_manual( name = "", values = col_shp$shp ) print(g) } ``` <figcaption>Scatter plots of all pairs of indices. The meaning of the numbers in the legend is: `r paste(lut$id_num, lut$id_chr, sep = " = ", collapse = "; ")`</figcaption> </figure> <br> <br>
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/plot-scatter.Rmd
```{r} sel <- which(d$total < settings$pooling$targetarea[1]) tooSmall <- NULL if (length(sel) > 0L) { tooSmall <- sQuote( apply( X = d[sel, c("YEAR", "OBJECTID", "ECOTOPE"), drop = FALSE], MARGIN = 1, paste, collapse = "-" ) ) } ``` The total sample area is smaller than the minimum target pool area (`r settings$pooling$targetarea[1]`) for `r length(tooSmall)` combinations of waterbody- habitat-year. ```{r eval=(length(tooSmall)>0L)} tooSmall ```
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/pooling-area.Rmd
```{r} # number of pooling runs n_runs <- 10L # pooling d <- replicate(n_runs, { d_mmi %>% group_by(OBJECTID, HABITAT, YEAR) %>% mutate( POOL_ID = pool( sample_id = ID, area = AREA, target_area = settings$pooling$targetarea ) ) %>% ungroup %>% select(POOL_ID) } ) # add names to each pool run names(d) <- paste( "POOL_RUN", formatC(x = 1:n_runs, width = nchar(n_runs), flag = "0"), sep = "" ) # add pools to d_mmi data set d_mmi <- d_mmi %>% bind_cols(d %>% as_data_frame) %>% as_data_frame # store table with pooling information tmp <- d_mmi %>% select(OBJECTID, SAMPLEID, DATE, starts_with("POOL_RUN")) %>% distinct to_log("INFO", "storing pooling results...") write.csv(x = tmp, file = settings$files$pooling, row.names = FALSE, na = "") to_log("INFO", "pooling results have been stored.") tmp <- tmp %>% select(starts_with("POOL_RUN")) %>% as.matrix ``` The samples in the MMI-input file have been pooled. An average of `r round(100 * sum(is.na(tmp))/ length(tmp), 2)` percent of the samples could not be pooled in each run. These samples have been removed. Each sample has been pooled for at least `r min(apply(X = tmp, MARGIN = 1, FUN = function(x) {sum(!is.na(x))}))` out of 10 times. The results have been written to `r basename(settings$files$pooling)`. ```{r} # convert data to 'long'-format and remove samples that could not be pooled d_mmi <- d_mmi %>% gather(key = "POOL_RUN", value = "POOL_ID", starts_with("POOL_RUN")) %>% mutate(POOL_RUN = parse_number(POOL_RUN) %>% as.integer) %>% filter(!is.na(POOL_ID)) ```
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/pooling.Rmd
## Species sensitivity values ```{r} to_log("INFO", "Entering section 'Species sensitivity values'...") ``` ```{r child="ambi.Rmd", eval=("ambi" %in% settings$indicators)} ``` ```{r child="iti.Rmd", eval=("iti" %in% settings$indicators)} ```
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/species-sensitivity-values.Rmd
### Study area ```{r} to_log("INFO", "Entering subsection 'Map'...") ``` The plot below gives the spatial distribution of pressure values (`r sprintf("%s, %s", settings$pressure$name, settings$pressure$unit)`). <figure> ```{r, fig.width=6, fig.height=5, out.width=600, dpi=600, warning=FALSE} g <- ggplot(data = d_mmi %>% select(LONG, LAT, PRESSURE) %>% distinct) + geom_point(mapping = aes(x = LONG, y = LAT, size = PRESSURE), colour = "black", alpha = 0.3) + scale_size_area(name = "pressure") + coord_equal() + theme_bw() suppressWarnings(print(g)) ``` <figcaption>Map of the pressure values.</figcaption> </figure> <br> <br>
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/study-area-pressure.Rmd
### Study area ```{r} to_log("INFO", "Entering subsection 'Map'...") ``` The map below shows the location of the samples. <figure> ```{r, fig.width=5, fig.height=5, out.width=600, dpi=600, warning=FALSE} g <- ggplot(data = d_mmi %>% select(LONG, LAT) %>% distinct) + geom_point(mapping = aes(x = LONG, y = LAT), colour = "black", alpha = 0.3) + coord_equal() + theme_bw() suppressWarnings(print(g)) ``` <figcaption>Map of sampling locations.</figcaption> </figure> <br> <br>
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/study-area.Rmd
The following AMBI-file has been provided: `r sQuote(basename(settings$files$ambi))` containing species not present in the standard AMBI-list, or for which according to Dutch North Sea benthos specialists a different species sensitivity classification is necessary (Van Loon et al., 2015). The standard AMBI-list (www.azti.es, version November 2014, by courtesy of Angel Borja) will be used for taxa not available in `r sQuote(basename(settings$files$ambi))`.
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/user-ambi.Rmd
The following ITI-file has been provided by the user: `r sQuote(basename(settings$files$iti))`. The internal ITI-list will be used for taxa not available in `r sQuote(basename(settings$files$iti))`. Note: A copy of this internal ITI-list is available in the 'REF-FILES'-directory, and can also be obtained by calling the function `benthos::get_iti()`.
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/user-iti.Rmd
### Which MMI correlates best with pressure? ```{r} to_log("INFO", "Entering subsection 'Which MMI correlates best with pressure?'...") ``` ```{r} number_of_combinations <- c(1L, 3L, 7L)[number_of_indicators] ``` Multimetric indices are constructed by fitting all `r number_of_combinations` possible weighted linear combinations of the set of indices `r toString(paste0(indicators, "*"))` to the pressure. Our general model is MMI^* = b<sub>0</sub> + b<sub>1</sub> &times; exp(b<sub>2</sub> &times; PRESSURE) where MMI^* is the `r if (tolower(settings$legendtext) == "eqr") {"EQR of the"} else {"normalized"}` multimetric index, PRESSURE are the values in the PRESSURE column of the input file, and b<sub>0</sub>, b<sub>1</sub> and b<sub>2</sub> are coefficients. MMI^* is a weighted linear combination of `r if (tolower(settings$legendtext) == "eqr") {"the ecological quality ratios of the"} else {"normalized values"}` of the selected indices: `r paste(paste0("w<sub>", 1:number_of_indicators, "</sub>"), paste0(indicators, "*"), collapse = " + ", sep = " &times; ")`. where weights `r toString(paste0("w<sub>", 1:number_of_indicators, "</sub>"))` are non-negative and sum to one. For each of the `r number_of_combinations` possible combinations of `r title_text_plur`, the weights are optimized mathematically in order to maximize the precision of the model mentioned above. Duplicated models (with approximately the same set of weights) have been removed. ```{r} # initialize weight matrix and add single metric weights W <- diag(number_of_indicators) dimnames(W) <- list(NULL, indicators_EQR) W <- rbind(W, matrix( data = 0, nrow = number_of_combinations - number_of_indicators, ncol = number_of_indicators ) ) # bimetric indicators i <- number_of_indicators if (number_of_indicators >= 2L) { f_obj <- function(w, id) { (sprintf( "I(%e * %s + %e * %s) ~ b0 + b1*exp(b2*PRESSURE)", w, indicators_EQR[id[1]], 1 - w, indicators_EQR[id[2]] ) %>% as.formula %>% nls( data = d_ind, start = list(b0 = 0.1, b1 = 0.9, b2 = -1), control = nls.control(maxiter = 1000)) ) %>% AIC } opt <- optimize(f = f_obj, interval = c(0, 1), maximum = FALSE, id = c(1, 2)) i <- i + 1L W[i, 1] <- opt$minimum W[i, 2] <- 1 - opt$minimum } # trimetric indicators if (number_of_indicators == 3L) { opt <- optimize(f = f_obj, interval = c(0, 1), maximum = FALSE, id = c(1, 3)) i <- i + 1L W[i, 1] <- opt$minimum W[i, 3] <- 1 - opt$minimum opt <- optimize(f = f_obj, interval = c(0, 1), maximum = FALSE, id = c(2, 3)) i <- i + 1L W[i, 2] <- opt$minimum W[i, 3] <- 1 - opt$minimum } if (number_of_indicators == 3L) { f_obj <- function(w) { if (any(w < 0)) { return(Inf) } w <- w / sum(w) (sprintf( "I(%e * %s + %e * %s + %e * %s) ~ b0 + b1*exp(b2*PRESSURE)", w[1], indicators_EQR[1], w[2], indicators_EQR[2], w[3], indicators_EQR[3] ) %>% as.formula %>% nls( data = d_ind, start = list(b0 = 0.1, b1 = 0.9, b2 = -1), control = nls.control(maxiter = 1000)) ) %>% AIC } opt <- optim( par = c(1, 1, 1), fn = f_obj, method = "Nelder-Mead", control = list(maxit = 1000, fnscale = 1) # minimization ) has_converged <- opt$convergence == 0L if (!has_converged) { stop("Can't find an optimum set of weights", call. = FALSE) } i <- i + 1L W[i, ] <- opt$par / sum(opt$par) } ```
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/weights-optim-exponential.Rmd
### Which MMI correlates best with pressure? ```{r} to_log("INFO", "Entering subsection 'Which MMI correlates best with pressure?'...") ``` ```{r} number_of_combinations <- c(1L, 3L, 7L)[number_of_indicators] ``` Multimetric indices are constructed by fitting all `r number_of_combinations` possible weighted linear combinations of the set of indices `r toString(paste0(indicators, "*"))` to the pressure. Our general model is MMI^* = b<sub>0</sub> + b<sub>1</sub> &times; PRESSURE where MMI^* is the `r if (tolower(settings$legendtext) == "eqr") {"EQR of the"} else {"normalized"}` multimetric index, PRESSURE are the values in the PRESSURE column of the input file, and b<sub>0</sub> and b<sub>1</sub> are the intercept and the slope respectively. MMI^* is a weighted linear combination of `r if (tolower(settings$legendtext) == "eqr") {"the ecological quality ratios of the"} else {"normalized values"}` of the selected indices: `r paste(paste0("w<sub>", 1:number_of_indicators, "</sub>"), paste0(indicators, "*"), collapse = " + ", sep = " &times; ")`. where weights `r toString(paste0("w<sub>", 1:number_of_indicators, "</sub>"))` are non-negative and sum to one. For each of the `r number_of_combinations` possible combinations of `r title_text_plur`, the weights are optimized mathematically in order to maximize the precision of the model mentioned above. Duplicated models (with approximately the same set of weights) have been removed. ```{r} # initialize weight matrix and add single metric weights W <- diag(number_of_indicators) dimnames(W) <- list(NULL, indicators_EQR) W <- rbind(W, matrix( data = 0, nrow = number_of_combinations - number_of_indicators, ncol = number_of_indicators ) ) # bimetric indicators i <- number_of_indicators if (number_of_indicators >= 2L) { f_obj <- function(w, id) { (sprintf( "I(%e * %s + %e * %s) ~ PRESSURE", w, indicators_EQR[id[1]], 1 - w, indicators_EQR[id[2]] ) %>% as.formula %>% lm(data = d_ind) %>% summary)$adj.r.squared # maximum needed } opt <- optimize(f = f_obj, interval = c(0, 1), maximum = TRUE, id = c(1, 2)) i <- i + 1L W[i, 1] <- opt$maximum W[i, 2] <- 1 - opt$maximum } # trimetric indicators if (number_of_indicators == 3L) { opt <- optimize(f = f_obj, interval = c(0, 1), maximum = TRUE, id = c(1, 3)) i <- i + 1L W[i, 1] <- opt$maximum W[i, 3] <- 1 - opt$maximum opt <- optimize(f = f_obj, interval = c(0, 1), maximum = TRUE, id = c(2, 3)) i <- i + 1L W[i, 2] <- opt$maximum W[i, 3] <- 1 - opt$maximum } if (number_of_indicators == 3L) { f_obj <- function(w) { if (any(w < 0)) { return(Inf) } w <- w / sum(w) (sprintf( "I(%e * %s + %e * %s + %e * %s) ~ PRESSURE", w[1], indicators_EQR[1], w[2], indicators_EQR[2], w[3], indicators_EQR[3] ) %>% as.formula %>% lm(data = d_ind) %>% summary)$adj.r.squared # maximum needed } opt <- optim( par = c(1, 1, 1), fn = f_obj, method = "Nelder-Mead", control = list(maxit = 1000, fnscale = -1) # maximization ) has_converged <- opt$convergence == 0L if (!has_converged) { stop("Can't find an optimum set of weights", call. = FALSE) } i <- i + 1L W[i, ] <- opt$par / sum(opt$par) } ```
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/weights-optim-linear.Rmd
The weights for each combination of `r title_text_plur` are given in the first `r number_of_indicators` columns of the table below. The remaining columns give the coefficients b<sub>0</sub>, b<sub>1</sub>, and b<sub>2</sub>, the pseudo r^2, _i.e._ a rough estimate of the amount of variation in the data that has been explained by the model, and an indicator indicating the convergence of the model. The table has been sorted by pseudo r^2 (best models at the top). <br> ```{r results='asis'} d <- S names(d)[1:number_of_indicators] <- paste0( "w", 1:number_of_indicators, " (", sub(pattern = "_STAR$", replacement = "*", x = names(d)[1:number_of_indicators]), ")") d %>% xtable(digits = c(0L, rep.int(x = 3L, times = ncol(d) - 1L), 0L)) %>% print(type = "html", include.rownames = FALSE) ``` <br> The slope of each curve is given by: b<sub>0</sub> &times; b<sub>1</sub> &times; exp(b<sub>2</sub> &times; PRESSURE)
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/weights-stats-table-exponential.Rmd
The weights for each combination of `r title_text_plur` are given in the first `r number_of_indicators` columns of the table below. The remaining columns give the intercept b<sub>0</sub>, the slope b<sub>1</sub>, the adjusted r^2 and Pearson product-moment correlation coefficient (Pearson's adjusted r). The rows are sorted with respect to adjusted r^2. <br> ```{r results='asis'} d <- S names(d)[1:number_of_indicators] <- paste0( "w", 1:number_of_indicators, " (", sub(pattern = "_STAR$", replacement = "*", x = names(d)[1:number_of_indicators]), ")") d %>% xtable(digits = c(0L, rep.int(x = 3L, times = ncol(d) - 1L), 4L)) %>% print(type = "html", include.rownames = FALSE) ``` <br>
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/weights-stats-table-linear.Rmd
```{r} to_log("INFO", "Entering subsection 'user defined weights'...") ``` ```{r} # W <- diag(number_of_indicators) # dimnames(W) <- list(NULL, indicators_EQR) # W <- rbind(W, settings$weights) W <- matrix( data = settings$weights, nrow = 1, dimnames = list(NULL, indicators_EQR) ) ```
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/Rmd/weights-user.Rmd
## ----ini, echo=FALSE, results='hide', message=FALSE--------------------------- library(BENMMI) library(benthos) library(knitr) library(xtable) library(ggplot2) library(DEoptim) library(readr) library(dplyr) ## ----echo=FALSE--------------------------------------------------------------- opts_chunk$set( echo = FALSE, comment = NA, quiet = TRUE, progress = FALSE, tidy = FALSE, cache = FALSE, message = FALSE, error = TRUE, warning = TRUE ) ## ----eval=FALSE, echo=TRUE---------------------------------------------------- # library(BENMMI) ## ----eval=FALSE, echo=TRUE---------------------------------------------------- # BENMMIdir() ## ----eval=FALSE, echo=TRUE---------------------------------------------------- # BENMMIdir(path = "c:/myprojects/BENMMI/BENMMI_FILES") ## ----eval=FALSE, echo=TRUE---------------------------------------------------- # BENMMI() ## ----eval=FALSE, echo=TRUE---------------------------------------------------- # BENMMI(filename = "c:/myprojects/BENMMI/BENMMI_FILES/settings-S-D-lin.json") ## ----echo=FALSE, results='asis'----------------------------------------------- cat( paste( readLines(system.file("extdata", "settings-S-D-lin.json", package = "BENMMI")), collapse = "\n" ) ) ## ----echo=FALSE, results='asis'----------------------------------------------- d <- scan( file = "./tables/tab-benmmi-input.csv", what = character(), sep = ",", quiet = TRUE ) h <- d[1:3] d <- as.data.frame(matrix(data = d[-(1:3)], ncol = 3, byrow = TRUE)) colnames(d) <- h print( xtable(x = d, align = "llp{50mm}p{50mm}"), include.rownames = FALSE, size = "footnotesize", add.to.row = list(list(-1), "\\rowcolor{blue!15}") ) ## ----echo=FALSE, results='asis'----------------------------------------------- d <- read_ambi( filename = system.file( "extdata", "REF-FILES", "AMBI-NL+.csv", package = "BENMMI" ) ) print( xtable(x = d[sample.int(n = nrow(d), size = 25), ], align = "llr"), include.rownames = FALSE, size = "footnotesize", add.to.row = list(list(-1), "\\rowcolor{blue!15}") ) ## ----echo=FALSE, results='asis'----------------------------------------------- d <- read_iti( filename = system.file( "extdata", "REF-FILES", "ITI+carnivores-2015-10-23.csv", package = "BENMMI" ) ) print( xtable(x = d[sample.int(n = nrow(d), size = 25), ], align = "llrr"), include.rownames = FALSE, size = "footnotesize", add.to.row = list(list(-1), "\\rowcolor{blue!15}") ) ## ----echo=FALSE, results='asis'----------------------------------------------- filename <- system.file( "extdata", "REF-FILES", "AREAS-HABITATS-SNS-2016-11-27.csv", package = "BENMMI" ) d <- read_ref(file = filename, indicators = c("D", "S", "AMBI", "ITI")) print( xtable(x = d), include.rownames = FALSE, size = "footnotesize", add.to.row = list(list(-1), "\\rowcolor{blue!15}"), sanitize.text.function = function(x) { gsub(pattern="_", replacement = "\\\\_", x=x) }, rotate.colnames = TRUE ) ## ----echo=FALSE, results='asis'----------------------------------------------- filename <- system.file( "extdata", "REF-FILES", "TAXONOMIC-GROUPS-EXCLUDED.csv", package = "BENMMI" ) d <- read_csv( file = filename, col_types = cols( GROUP = col_character(), DESCRIPTION = col_character() )) print( xtable(x = d, align = "lll"), include.rownames = FALSE, size = "footnotesize", add.to.row = list(list(-1), "\\rowcolor{blue!15}") ) ## ----echo=FALSE, results='asis'----------------------------------------------- filename <- system.file( "extdata", "REF-FILES", "TAXA-BE-DE-NL-UK-2017-01-06.csv", package = "BENMMI" ) d <- read_csv( file = filename, col_types = cols( group = col_character(), provided = col_character(), accepted = col_character(), level = col_character(), quality_code = col_integer() ) ) print( xtable(x = d %>% slice(1:10), align = "lllllr"), include.rownames = FALSE, size = "footnotesize", add.to.row = list(list(-1), "\\rowcolor{blue!15}") ) ## ----echo=FALSE, results='asis'----------------------------------------------- # https://cran.r-project.org/web/packages/xtable/vignettes/xtableGallery.pdf d <- read.csv(file = "./tables/tab-GROUP.csv") print(xtable(d[1:20, 1:11]), floating.environment = 'sidewaystable', size = 'footnotesize', include.rownames = FALSE) ## ----echo=FALSE, results='asis'----------------------------------------------- # https://cran.r-project.org/web/packages/xtable/vignettes/xtableGallery.pdf d <- read.csv(file = "./tables/tab-ITI.csv") names(d)[ncol(d)] <- "NA" print(xtable(d[1:20, ]), floating.environment = 'sidewaystable', size = 'footnotesize', include.rownames = FALSE) ## ----echo=FALSE, message=FALSE------------------------------------------------ set.seed(314) ## ----eval=FALSE, echo=FALSE--------------------------------------------------- # #Calculated with BENMMI 2016-08-18 # d <- read_csv("saltkallefjord-indices.csv") %>% # mutate( # date = SAMPLEID %>% # substr(start = 7, stop = 16) %>% # as.Date # ) %>% # select(date, D) # d %>% # arrange(date) %>% # write_csv("saltkallefjord-D.csv") ## ----echo=FALSE--------------------------------------------------------------- plot_fit <- function(d, theta = NULL) { g <- ggplot() + geom_point( data = d, mapping = aes(x = date, y = D), colour = "blue" ) + scale_x_date( name = "", limits = as.Date(c("1965-01-01", "1980-01-01")) ) + scale_y_continuous(name = "Margalef diversity D") if (is.null(theta)) { return(g) } d_fit <- data.frame( date = seq( from = min(d$date), to = max(d$date), by = 0.1 ) ) d_fit$ndate <- as.numeric(d_fit$date) d_fit$D <- f_gl(x = d_fit$ndate, theta) g + geom_path( data = d_fit, mapping = aes(x = date, y = D) ) } ## ----echo=TRUE---------------------------------------------------------------- f_gl <- function(x, theta) { A <- theta[1] K <- theta[2] B <- theta[3] M <- theta[4] A + (K - A) / (1 + exp(-B*(x-M))) } ## ----echo=TRUE---------------------------------------------------------------- # read data d_obs <- read.csv("./data/saltkallefjord-D.csv", as.is = TRUE) # coercion from character to Date-object d_obs$date <- as.Date(d_obs$date) # print head of these data head(d_obs) ## ----fig.width=4, fig.height=3, out.width="0.7\\textwidth", echo=FALSE-------- plot_fit(d_obs) ## ----echo=TRUE---------------------------------------------------------------- d_obs$ndate <- as.numeric(d_obs$date) ## ----echo=TRUE---------------------------------------------------------------- f_obj <- function(theta, data) { # constraint: lower asymptote should be nonnegative if (theta[1] < 0) { return(Inf) } # predict Margalef diversity by means of the generalised logistic function D_hat <- f_gl(data$ndate, theta) # difference between Margalef diversity based on # observations and the generalised logistic function error <- d_obs$D - D_hat # our objective to minimize: the sum of squared errors sum(error * error) } ## ----echo=TRUE---------------------------------------------------------------- # define lower and upper bounds for each parameter lower_bounds <- c(0, 5, 0.0001, -1000) upper_bounds <- c(5, 9, 1.0000, 1000) # DE-optimization opt <- DEoptim( fn = f_obj, lower = lower_bounds, upper = upper_bounds, data = d_obs, control = DEoptim.control(NP = 100, itermax = 100, trace=FALSE) ) ## ----echo=FALSE--------------------------------------------------------------- # check results theta <- opt$optim$bestmem in_bounds <- all(abs(theta - lower_bounds) > 1.0e-3) & all(abs(theta - upper_bounds) > 1.0e-3) if (!in_bounds) { cat("parameters pressed against bounds: ", toString(p), "\n") print(lower_bounds) print(upper_bounds) } ## ----fig.width=4, fig.height=3, out.width="0.7\\textwidth", echo=FALSE-------- plot_fit(d_obs, theta) ## ----echo=FALSE, results='asis'----------------------------------------------- # https://cran.r-project.org/web/packages/xtable/vignettes/xtableGallery.pdf d <- read.csv(file = "./tables/tab-checks.csv") print( xtable(d, align = "llp{55mm}lp{30mm}"), hline.after = c(-1), add.to.row = list( pos = list(0), command = paste0( "\\hline\n\\endhead\n", "\\hline\n", "\\multicolumn{", ncol(d) + 1, "}{l}", "{\\footnotesize Continued on next page}\n", "\\endfoot\n", "\\endlastfoot\n")), floating = FALSE, tabular.environment = "longtable" )
/scratch/gouwar.j/cran-all/cranData/BENMMI/inst/doc/BENMMI_User_Manual.R
#' Data generated according to decision tree for simulation purposes #' #' #' @format A typical list produced by the BEST function: #' \describe{ #' \item{1}{Tree structure indicating spliting variables, impurity of the region and split variable} #' \item{2}{List of splitting values} #' \item{3}{Observaton numbers in the respective regions} #' ... #' } "Fit"
/scratch/gouwar.j/cran-all/cranData/BESTree/R/Fit.R
################################################################################### ### Branch-Exclusive Splits Trees ################################################################################### ### Cedric Beaulac (2018) ### Fully Re-Coded BEST Algorithm (Classification Tree only) ################################################################################### ### Can only manage simple gating structure ( January 23rd, 2018 ) ################################################################################### ### Bagged Tree, Random Forest and Variable Importances ( February 27th, 2018 ) ################################################################################### ### R Packaging ( April 5th, 2019 ) ################################################################################### ################################################################################### ## This file contains callable functions related to simplest version of the BEST ## algorithm. ################################################################################### ################################################################################### ## BEST ## ## Parameters ## Data - Data set (Data Frame) : Can take on both numerical and categorical ## predictors. Last Column is Response Variable (Categorical only) ## Integer needed for factor levels ## Size - Minimal Number of Observation within a leaf needed for partitionning ## VA - Variable Availability structure ## ## Returns ## Tree - Matrix representing a Tree including Region Names, No Obs, Split ## Variables, Split points, prediction for each region, etc... ## Regions - Row Names for every Regions ## Split Points - Necessar for categorical predictors ################################################################################### #' Main function of the package. #' It produces Classification Trees with Branch-Exclusive variables. #' @param Data A data set (Data Frame): Can take on both numerical and categorical predictors. Last column of the data set must be the Repsonse Variable (Categorical Variables only) #' @param Size Minimal Number of Observation within a leaf needed for partitionning #' @param VA Variable Availability structure #' @return A BEST object with is a list containing the resulting tree, row numbers for each regions and the split points #' @examples #' n <- 1000 #' Data <- BESTree::Data[1:n,] #' d <- ncol(Data)-1 #' VA <- ForgeVA(d,1,0,0,0) #' Size <- 50 #' Fit <- BESTree::BEST(Data,Size,VA) #' @export BEST <- function( Data, Size, VA ) { # Establishing basic data information n <- nrow(Data) d <- (ncol(Data) - 1) X <- data.frame(Data[,1:d]) Y <- Data[,(d+1)] #Determine if a input is factor or not CatInd <- rep(0,d) for ( i in 1:d ) { if (is.factor(X[,i])) { CatInd[i] <- 1 } } #Set up list of index for categorical predictors CatNo <- CatInd*(1:d) if ( sum(CatNo) > 0 ) { CatList <- CatIndex(data.frame(Data[,CatNo]),setdiff(CatNo,0)) } # Innteger indication the current iteration Ite <- 1 # Cond is a binary variable to check if we continue splitting or not Cond <- TRUE #Variable avaible at any iteration Variables <- list() Variables[[Ite]] <- VA[[1]] #Regions Regions <- list() Regions[[Ite]] <- seq(1:n) #Tree object Tree <- matrix( data = c(0,0,0,0,0,0,0,0,1,1), nrow=1 ) #Split Points, this is mostly for Categorical variables purposes, since we can't include #vectors in the Tree matrix SPList <- list() while ( Ite <= length(Regions) ) { n <- length(Regions[[Ite]]) X <- data.frame(Data[Regions[[Ite]],1:d]) Y <- Data[Regions[[Ite]],(d+1)] Response <- plyr::count(Y) Prediction <- Response$x[which.max(Response$freq)] Impurity <- n*ImpFctCM(Response) # Preparing Variable Available CatVA <- Variables[[Ite]]*CatInd NumVA <- (Variables[[Ite]]-CatVA) CatVANo <- which(CatVA > 0) NumVANo <- which(NumVA > 0) # Making sure splitting IS possible if ( sum(CatVA)>0 && n > 1) { CatCond <- FALSE for ( i in 1:length(CatVANo)) { CatCond <- CatCond || length(unique(X[,CatVANo[i]])) > 1 } } else { CatCond<- FALSE } if ( sum(NumVA)>0 && n > 1) { NumCond <- FALSE for ( i in 1:length(NumVANo)) { NumCond <- NumCond || length(unique(X[,NumVANo[i]])) > 1 } } else { NumCond <- FALSE } # Check Conditions Cond <- ( (n > Size) && nrow(Response) > 1 && (NumCond || CatCond)) # If one of the conditions is false, we stop the splitting process on that branch. if ( !Cond ) { Tree <- rbind(Tree , c(Ite,0,0,0,0,Prediction,n,0,1,Impurity)) SPList[[Ite]] <- 0 #Tree[[Ite]] <- c(Ite,0,0,0,0,Pred,n,0,1) } else { #Proceed to split on Numerical predictors if possible if ( NumCond ) { BN <- BestNum(data.frame(Data[Regions[[Ite]],c(NumVANo,(d+1))])) } else { BN <- c(0,0,0) } #Proceed to split on Categorical predictors if possible if ( CatCond ) { BC <- BestCat(data.frame(Data[,c(CatVANo,(d+1))]),Regions[[Ite]],CatList, CatVANo) } else { BC <- list(0,0,0) } #Select the type of split producing best decreasin in impurity #If Numerical split produce best impurity reduction if ( BN[3] == 0 && BC[[3]] == 0 ) { Tree <- rbind(Tree, c(Ite,0,0,0,0,Prediction,n,0,1,Impurity)) #Tree[[Ite]] <- c(Ite,0,0,0,0,Pred,n,0,1) SPList[[Ite]] <- 0 } else if (BN[3] >= BC[[3]] ) { #Keep track of Split variable and point select #Also, names of children leaves and Impurity drop produced SV <- NumVANo[BN[1]] SP <- BN[2] LB <- length(Regions)+1 BB <- length(Regions)+2 Regions[[LB]] <- intersect(which(Data[,SV]<=SP),Regions[[Ite]]) Regions[[BB]] <- setdiff(Regions[[Ite]],Regions[[LB]]) Tree <- rbind(Tree,c(Ite,SV,SP,LB,BB,Prediction,n,BN[3],0,Impurity) ) #Tree[[Ite]] <- c(Ite,SV,SP,LB,BB,Pred,n,BN[3],0) SPList[[Ite]] <- SP #BEST : Update the list of predictors available for futur splits #If SP if greater then threshold only top data get new variables if ( SP > VA[[SV+1]][[1]] ) { Variables[[LB]] <- Variables[[Ite]] Variables[[BB]] <- Variables[[Ite]]+VA[[SV+1]][[3]] #If SP is smaller than threshold only lower data get new variables } else if (SP < VA[[SV+1]][[1]] ) { Variables[[LB]] <- Variables[[Ite]]+VA[[SV+1]][[2]] Variables[[BB]] <- Variables[[Ite]] } else { Variables[[LB]] <- Variables[[Ite]]+VA[[SV+1]][[2]] Variables[[BB]] <- Variables[[Ite]]+VA[[SV+1]][[3]] } } else if (BC[[3]] > BN[3] ) { #Keep track of Split variable and point select #Also, names of children leaves and Impurity drop produced SV <- CatVANo[BC[[1]]] SP <- BC[[2]] LB <- length(Regions)+1 BB <- length(Regions)+2 Regions[[LB]] <- intersect( Regions[[Ite]], unlist(CatList[[SV]][as.integer(SP)]) ) Regions[[BB]] <- setdiff(Regions[[Ite]],Regions[[LB]]) Tree <- rbind(Tree,c(Ite,SV,-1,LB,BB,Prediction,n,BC[[3]],0,Impurity) ) #Tree[[Ite]] <- c(Ite,SV,-1,LB,BB,Pred,n,BN[[3]],0) #BEST : Update the list of predictors available for futur splits #Not coded yet, Gating variable MUST BE numerical Variables[[LB]] <- Variables[[Ite]] Variables[[BB]] <- Variables[[Ite]] SPList[[Ite]] <- SP } } Ite <- Ite+1 } colnames(Tree) <- c('RegNo','SV','SP','LLeaf','RLeaf','Pred','NoObs','ImpRed','Leaf/Node','Imp') ToReturn <- list() ToReturn[[1]] <- Tree[2:nrow(Tree),] ToReturn[[2]] <- SPList ToReturn[[3]] <- Regions ToReturn[[4]] <- CatInd return(ToReturn) } #' Classify a new observation point #' @param Point A new observation #' @param Fit A BEST object #' @return The predicted class #' @examples #' n <- 500 #' Data <- BESTree::Data[1:n,] #' NewPoint <- BESTree::Data[n+1,] #' d <- ncol(Data)-1 #' VA <- ForgeVA(d,1,0,0,0) #' Size <- 50 #' Fit <- BESTree::BEST(Data,Size,VA) #' BESTree::Predict(NewPoint[1:d],Fit) #' @export Predict <- function(Point,Fit) { Tree <- Fit[[1]] SPList <- Fit[[2]] CatInd <- Fit[[4]] Reg <- 1 Prediction <- 0 LN <- Tree[Reg,9] while ( LN == 0 ) { SV <- Tree[Reg,2] #Check if SV is Categorical if ( CatInd[SV] == 1 ) { if ( is.element(Point[SV],SPList[[Reg]]) ) { Reg <- Tree[Reg,4] } else { Reg <- Tree[Reg,5] } # Else the SV is numerical } else { if ( Point[SV] <= Tree[Reg,3] ) { Reg <- Tree[Reg,4] } else { Reg <- Tree[Reg,5] } } LN <- Tree[Reg,9] } Prediction <- Tree[Reg,6] return(Prediction) } #' Classify a set of new observation points #' @param M A matrix of new observations where one row is one observation #' @param Fit A BEST object #' @return The predicted class #' @examples #' n <- 500 #' Data <- BESTree::Data[1:n,] #' d <- ncol(Data)-1 #' NewPoints <- BESTree::Data[(n+1):(n+11),1:d] #' VA <- ForgeVA(d,1,0,0,0) #' Size <- 50 #' Fit <- BESTree::BEST(Data,Size,VA) #' Predictions <- BESTree::MPredict(NewPoints,Fit) #' @export MPredict <- function(M,Fit) { n <- nrow(M) Predictions <- rep(0,n) for ( i in 1:n) { Predictions[i] <- Predict(M[i,],Fit) } return(Predictions) } #' Uses a Validation Set to select the best trees within the list of pruned trees. #' @param Fit A BEST object #' @param VSet A Validation Set (Can also be used in CV loop) #' @return The shallower trees among trees wiht Highest accuracy. This replaces the first element in the BEST object list. #' @examples #' nv <- 50 #' ValData <- BESTree::Data[(1000+1):nv,] #' Fit <- BESTree::Fit #' Fit[[1]] <- BESTree::TreePruning(Fit,ValData) #' @export TreePruning <- function(Fit,VSet){ #List of possible pruned trees TreeList <- ListOfTrees(Fit) #Number of trees to test NTree <- length(TreeList) d <- ncol(VSet)-1 Accuracy <- rep(0,NTree) Pred <- MPredict(VSet[,(1:d)],Fit) Accuracy[1] <- Acc(VSet[,(d+1)],Pred) #Set accuracy of full tree as Benchmark BestAccuracy <- Accuracy[1] Index <- 1 for ( i in 2:NTree) { Fit[[1]] <- TreeList[[i]] Pred <- MPredict(VSet[,(1:d)],Fit) Accuracy[i] <- Acc(VSet[,(d+1)],Pred) #If one tree beat benchmark then it is the new best tree #We use greater or EQUAL since we want trees as shallow as possible if ( Accuracy[i] >= BestAccuracy ) { BestAccuracy <- Accuracy[i] Index <- i } } return(TreeList[[Index]]) }
/scratch/gouwar.j/cran-all/cranData/BESTree/R/best.R
################################################################################### ### Branch-Exclusive Splits Trees (Non-Callables) ################################################################################### ### Cedric Beaulac (2018) ### Fully Re-Coded BEST Algorithm (Classification Tree only) ################################################################################### ### Can only manage simple gating structure ( January 23rd, 2018 ) ################################################################################### ### Bagged Tree, Random Forest and Variable Importances ( February 27th, 2018 ) ################################################################################### ### R Packaging ( April 5th, 2019 ) ################################################################################### ################################################################################### ## This file contains all callable functions related to Forest and Tree Bagging ################################################################################### #' Performs Bootstrap Aggregating of BEST trees #' @param Data A data set (Data Frame): Can take on both numerical and categorical predictors. Last column of the data set must be the Repsonse Variable (Categorical Variables only) #' @param VA Variable Availability structure #' @param Size Minimal Number of Observation within a leaf needed for partitionning (default is 50) #' @param NoT Number of Trees in the bag #' @return A list of BEST Objects #' @examples #' n <- 500 #' Data <- BESTree::Data[1:n,] #' d <- ncol(Data)-1 #' VA <- ForgeVA(d,1,0,0,0) #' Size <- 50 #' NoT <- 10 #' Fit <- BESTree::BaggedBEST(Data,VA,NoT,Size) #' @export BaggedBEST <- function(Data,VA,NoT=50,Size=50) { #To be returned is a list of BEST objects ListofFit <- list() n <- nrow(Data) for ( i in 1:NoT ) { #Bootstrap sample samp <- sample((1:n),size=n,replace=TRUE) BData <- Data[samp,] #Fit BEST Trees, 50 is abitrary BFit <- BEST(BData,Size,VA) ListofFit[[i]] <- BFit } return(ListofFit) } # Construct a BEST tree where predictors are randomly selected (for RF purposes) # Input : A training Set, the size of trees and Variable availability structure # Ouptut : A BEST Objects RBEST <- function( Data, Size, VA ) { # Establishing basic data information n <- nrow(Data) d <- (ncol(Data) - 1) X <- data.frame(Data[,1:d]) Y <- Data[,(d+1)] #Determine if a input is factor or not CatInd <- rep(0,d) for ( i in 1:d ) { if (is.factor(X[,i])) { CatInd[i] <- 1 } } #Set up list of index for categorical predictors CatNo <- CatInd*(1:d) if ( sum(CatNo) > 0 ) { CatList <- CatIndex(data.frame(Data[,CatNo]),setdiff(CatNo,0)) } # Innteger indication the current iteration Ite <- 1 # Cond is a binary variable to check if we continue splitting or not Cond <- TRUE #Variable avaible at any iteration Variables <- list() Variables[[Ite]] <- VA[[1]] #Regions Regions <- list() Regions[[Ite]] <- seq(1:n) #Tree object Tree <- matrix( data = c(0,0,0,0,0,0,0,0,1,1), nrow=1 ) #Split Points, this is mostly for Categorical variables purposes, since we can't include #vectors in the Tree matrix SPList <- list() while ( Ite <= length(Regions) ) { n <- length(Regions[[Ite]]) X <- data.frame(Data[Regions[[Ite]],1:d]) Y <- Data[Regions[[Ite]],(d+1)] Response <- plyr::count(Y) Prediction <- Response$x[which.max(Response$freq)] Impurity <- n*ImpFctCM(Response) #Randomly picking variables ( For random forest purposes) dIte <- sum(Variables[[Ite]]) ToDraw <- round(sqrt(dIte)) From <- which(Variables[[Ite]] > 0) Var <- sample(From,size=ToDraw,replace=FALSE) # Preparing Variable Available CatVA <- Variables[[Ite]]*CatInd NumVA <- (Variables[[Ite]]-CatVA) CatVANo <- which(CatVA > 0) NumVANo <- which(NumVA > 0) CatVANo <- intersect(CatVANo,Var) NumVANo <- intersect(NumVANo,Var) # Making sure splitting IS possible if ( length(CatVANo)>0 && n > 1) { CatCond <- FALSE for ( i in 1:length(CatVANo)) { CatCond <- CatCond || length(unique(X[,CatVANo[i]])) > 1 } } else { CatCond<- FALSE } if ( length(NumVANo)>0 && n > 1) { NumCond <- FALSE for ( i in 1:length(NumVANo)) { NumCond <- NumCond || length(unique(X[,NumVANo[i]])) > 1 } } else { NumCond <- FALSE } # Check Conditions Cond <- ( (n > Size) && nrow(Response) > 1 && (NumCond || CatCond)) # If one of the conditions is false, we stop the splitting process on that branch. if ( !Cond ) { Tree <- rbind(Tree , c(Ite,0,0,0,0,Prediction,n,0,1,Impurity)) SPList[[Ite]] <- 0 #Tree[[Ite]] <- c(Ite,0,0,0,0,Pred,n,0,1) } else { #Proceed to split on Numerical predictors if possible if ( NumCond ) { BN <- BestNum(Data[Regions[[Ite]],c(NumVANo,(d+1))]) } else { BN <- c(0,0,0) } #Proceed to split on Categorical predictors if possible if ( CatCond ) { BC <- BestCat(data.frame(Data[,c(CatVANo,(d+1))]),Regions[[Ite]],CatList,CatVANo) } else { BC <- list(0,0,0) } #Select the type of split producing best decreasin in impurity #If Numerical split produce best impurity reduction if ( BN[3] == 0 && BC[[3]] == 0 ) { Tree <- rbind(Tree, c(Ite,0,0,0,0,Prediction,n,0,1,Impurity)) #Tree[[Ite]] <- c(Ite,0,0,0,0,Pred,n,0,1) SPList[[Ite]] <- 0 } else if (BN[3] >= BC[[3]] ) { #Keep track of Split variable and point select #Also, names of children leaves and Impurity drop produced SV <- NumVANo[BN[1]] SP <- BN[2] LB <- length(Regions)+1 BB <- length(Regions)+2 Regions[[LB]] <- intersect(which(Data[,SV]<=SP),Regions[[Ite]]) Regions[[BB]] <- setdiff(Regions[[Ite]],Regions[[LB]]) Tree <- rbind(Tree,c(Ite,SV,SP,LB,BB,Prediction,n,BN[3],0,Impurity) ) #Tree[[Ite]] <- c(Ite,SV,SP,LB,BB,Pred,n,BN[3],0) SPList[[Ite]] <- SP #BEST : Update the list of predictors available for futur splits #If SP if greater then threshold only top data get new variables if ( SP > VA[[SV+1]][[1]] ) { Variables[[LB]] <- Variables[[Ite]] Variables[[BB]] <- Variables[[Ite]]+VA[[SV+1]][[3]] #If SP is smaller than threshold only lower data get new variables } else if (SP < VA[[SV+1]][[1]] ) { Variables[[LB]] <- Variables[[Ite]]+VA[[SV+1]][[2]] Variables[[BB]] <- Variables[[Ite]] } else { Variables[[LB]] <- Variables[[Ite]]+VA[[SV+1]][[2]] Variables[[BB]] <- Variables[[Ite]]+VA[[SV+1]][[3]] } } else if (BC[[3]] > BN[3] ) { #Keep track of Split variable and point select #Also, names of children leaves and Impurity drop produced SV <- CatVANo[BC[[1]]] SP <- BC[[2]] LB <- length(Regions)+1 BB <- length(Regions)+2 Regions[[LB]] <- intersect( Regions[[Ite]], unlist(CatList[[ SV ]][as.integer(SP)]) ) Regions[[BB]] <- setdiff(Regions[[Ite]],Regions[[LB]]) Tree <- rbind(Tree,c(Ite,SV,-1,LB,BB,Prediction,n,BN[[3]],0,Impurity) ) #Tree[[Ite]] <- c(Ite,SV,-1,LB,BB,Pred,n,BN[[3]],0) #BEST : Update the list of predictors available for futur splits #Not coded yet, Gating variable MUST BE numerical Variables[[LB]] <- Variables[[Ite]] Variables[[BB]] <- Variables[[Ite]] SPList[[Ite]] <- SP } } Ite <- Ite+1 } colnames(Tree) <- c('RegNo','SV','SP','LLeaf','RLeaf','Pred','NoObs','ImpRed','Leaf/Node','Imp') ToReturn <- list() ToReturn[[1]] <- Tree[2:nrow(Tree),] ToReturn[[2]] <- SPList ToReturn[[3]] <- Regions ToReturn[[4]] <- CatInd return(ToReturn) } #' Generates a random forest of BEST trees #' @param Data A data set (Data Frame): Can take on both numerical and categorical predictors. Last column of the data set must be the Repsonse Variable (Categorical Variables only) #' @param VA Variable Availability structure #' @param Size Minimal Number of Observation within a leaf needed for partitionning (default is 50) #' @param NoT Number of Trees in the bag #' @return A list of BEST Objects (Random Forest) #' @examples #' n <- 500 #' Data <- BESTree::Data[1:n,] #' d <- ncol(Data)-1 #' VA <- ForgeVA(d,1,0,0,0) #' Size <- 50 #' NoT <- 10 #' Fit <- BESTree::BESTForest(Data,VA,NoT,Size) #' @export BESTForest <- function (Data,VA,NoT=50,Size=50) { ListofFit <- list() n <- nrow(Data) for ( i in 1:NoT ) { samp <- sample((1:n),size=n,replace=TRUE) BData <- Data[samp,] BFit <- RBEST(BData,Size,VA) ListofFit[[i]] <- BFit } return(ListofFit) } #' Emits prediction from a forest of BEST's #' @param M A matrix of new observations where one row is one observation #' @param LFit A list of BEST Objects (Usually produced by RBEST or BESTForest) #' @return A vector of predictions #' @examples #' n <- 500 #' Data <- BESTree::Data[1:n,] #' d <- ncol(Data)-1 #' NewPoints <- BESTree::Data[(n+1):(n+11),1:d] #' VA <- ForgeVA(d,1,0,0,0) #' Size <- 50 #' NoT <- 10 #' Fit <- BESTree::BaggedBEST(Data,VA,NoT,Size) #' Predictions <- BESTree::FPredict(NewPoints,Fit) #' @export FPredict <- function(M,LFit) { NoT <- length(LFit) n <- nrow(M) Predictions <- rep(0,n) Pred <- matrix(rep(0,n*NoT),nrow=n) for ( i in 1:n) { for ( j in 1:NoT) { #Matrix of predictions base of every trees Pred[i,j] <- Predict(M[i,],LFit[[j]]) } #Aggregating the predictions (Majority of votes) Predictions[i] <- plyr::count(Pred[i,])$x[which.max(plyr::count(Pred[i,])$freq)] } return(Predictions) } #' Produces a variable important analysis using the mean decrease in node impurity #' @param Forest A list of BEST Objects (Usually produced by RBEST or BESTForest) #' @return A vector of importance (size d) #' @examples #' n <- 500 #' Data <- BESTree::Data[1:n,] #' d <- ncol(Data)-1 #' NewPoints <- BESTree::Data[(n+1):(n+11),1:d] #' VA <- ForgeVA(d,1,0,0,0) #' Size <- 50 #' NoT <- 10 #' Fit <- BESTree::BaggedBEST(Data,VA,NoT,Size) #' VI <- BESTree::VI(Fit) #' @export VI <- function(Forest){ d <- length(Forest[[1]][[4]]) NT <- length(Forest) TImp <- rep(0,d) for ( i in 1:NT) { ImpT <- rep(0,d) Tree <- Forest[[i]][[1]] DTree <- data.frame(Tree) #ddplyRes <- plyr::ddply(DTree,"SV",plyr::summarize,MeanImpRed = sum(DTree$ImpRed)) ddplyRes <- stats::aggregate(DTree, list(SV = DTree$SV), sum)[,c(1,9)] for ( j in 2:nrow(ddplyRes)){ ImpT[ddplyRes[j,1]] <- ddplyRes[j,2] } TImp <- TImp+ImpT } Imp <- TImp/NT return(Imp) }
/scratch/gouwar.j/cran-all/cranData/BESTree/R/bestforest.R
#' Data generated according to decision tree for simulation purposes #' #' #' @format A data frame with 10000 rows and 5 variables: #' \describe{ #' \item{X_1}{Binary predictor} #' \item{X_2}{Binary predictor} #' \item{X_3}{Continuous predictor between 0 and 1} #' \item{X_4}{Continuous predictor between 0 and 1} #' \item{Y}{The response variable} #' ... #' } "Data"
/scratch/gouwar.j/cran-all/cranData/BESTree/R/data.R
#utils::globalVariables(c("summarize", "ImpRed"))
/scratch/gouwar.j/cran-all/cranData/BESTree/R/globals.R
################################################################################### ### Branch-Exclusive Splits Trees ################################################################################### ### Cedric Beaulac (2018) ### Fully Re-Coded BEST Algorithm (Classification Tree only) ################################################################################### ### Can only manage simple gating structure ( January 23rd, 2018 ) ################################################################################### ### Bagged Tree, Random Forest and Variable Importances ( February 27th, 2018 ) ################################################################################### ### R Packaging ( April 5th, 2019 ) ################################################################################### ################################################################################### ## This file contains two speciall callable functions. ## These are meant to make your life easier. ################################################################################### #' Computes the proportion of matching terms in two vectors of the same length. #' Used to compute the accuracy for prediction on test set. #' @param Vec1 A vector of labels #' @param Vec2 Another vector of labels #' @return Percentage of identical labels (accuracy) #' @examples #' Vec1 <- c(1,1,2,3,1) #' Vec2 <- c(1,2,2,3,1) #' Acc(Vec1,Vec2) #' @export Acc <- function(Vec1,Vec2) { Transformed <- c(Vec1,Vec2) Vec1I <- as.integer(Transformed[1:length(Vec1)]) Vec2I <- as.integer(Transformed[(length(Vec1)+1):(2*length(Vec1))]) acc <- sum((Vec1I-Vec2I)==0)/length(Vec1) return(acc) } #' Quickly build the Available Variable list necessary for BEST #' This list contains details as to which variables is available for the partitioning. #' It also contains which variables are gating variables. #' @param d Number of predictors #' @param GV Gating variables #' @param BEV Branch-Exclusive Variables #' @param Thresh Threshold for Gates #' @param Direc Direction of Gates ( 1 means add variable if bigger than thresh) #' @return The list containing the Variable Availability structure #' @examples #' #This function can be used to set up the variable availability structure. #' #Suppose we want to fit a regular decision tree on a data set containing d predictors #' d <- 10 #' VA <- ForgeVA(d,1,0,0,0) #' #Suppose now that predictor x5 is a binary gating variable for x4 #' #such that x4 is available if x5 = 1 #' GV <- 5 #The gating variable #' BEV <- 4 #The Branch-Exclusive variable #' Tresh = 0.5 #Value between 0 and 1 #' Direc = 1 #X4 is available if X5 is bigger than Tresh #' VA <- ForgeVA(d,GV,BEV,Tresh,Direc) #' @export ForgeVA <- function(d,GV,BEV,Thresh=0.5,Direc=0) { VA <- list() VA[[1]] <- rep(1,d) VA[[1]][BEV] <- 0 for ( i in 1:d) { VA[[i+1]] <- list() VA[[i+1]][[1]] <- 0 VA[[i+1]][[2]] <- rep(0,d) VA[[i+1]][[3]] <- rep(0,d) } for ( j in 1:length(GV) ) { VA[[GV[j]+1]][[1]] <- Thresh[j] if ( Direc[j] == 1 ) { VA[[GV[j]+1]][[3]][BEV[j]] <- 1 } else { VA[[GV[j]+1]][[2]][BEV[j]] <- 1 } } return(VA) }
/scratch/gouwar.j/cran-all/cranData/BESTree/R/simplify.R
################################################################################### ### Branch-Exclusive Splits Trees (Non-Callables) ################################################################################### ### Cedric Beaulac (2018) ### Fully Re-Coded BEST Algorithm (Classification Tree only) ################################################################################### ### Can only manage simple gating structure ( January 23rd, 2018 ) ################################################################################### ### Bagged Tree, Random Forest and Variable Importances ( February 27th, 2018 ) ################################################################################### ### R Packaging ( April 5th, 2019 ) ################################################################################### ################################################################################### ## This file contains all function that should not be called by users directly. ## These functions are used by callable functions. ################################################################################### # ImpFct provides the vector Impurity (compile only one of the following) # Input : a vector of categorical variable # Output : Impurity value for the vector ImpFctCM <- function(CM) { #return(((CM[,2])/length(Y))%*%(1-(CM[,2])/length(Y))) return(1-sum(((CM[,2])/sum(CM[,2]))^2)) } #ImpFct <- compiler::cmpfun(ImpFctCM) #Deviance (without count) #ImpFctD <- function(CM) { # return( -(log((CM[,2])/length(Y)) %*% (CM[,2])/length(Y)) ) #} # BestNum finds the Best Split within numerical predictors # BestNum2 Is an attempt at removing counting from loops # Input : Data (Data frame, last column is response) # Output : Information about best split among numerical predictors (Vector) BestNum <- function(Data) { n <- nrow(Data) d <- ncol(Data)-1 X <- data.frame(Data[,1:d]) Y <- as.integer(factor(Data[,(d+1)])) CM <- plyr::count(Y) Impurity <- n*ImpFctCM(CM) NImpurity <- 0 #Value to be returned SV <- 0 SP <- 0 ImpRed <- 0 for ( j in 1:(d)) { #Ordering is really heavy computation wise :( Data <- Data[order(Data[,j]),] X <- data.frame(Data[,1:d]) Y <- as.integer(factor(Data[,(d+1)])) XV <- Data[1,j] Index <- 1 CM1 <- matrix(data=rep(0,nrow(CM)*2),ncol=2) CM1[as.integer(Y[1]),2] <- 1 CM2 <- CM CM2[as.integer(Y[1]),2] <- CM2[as.integer(Y[1]),2] - 1 while ( Index < n-6) { if ( round(Data[(Index+1),j],10) != round(XV,10) && Index > 5 ) { NImpurity <- (Index*ImpFctCM(CM1)+(n-Index)*ImpFctCM(CM2)) if ( NImpurity < Impurity ) { SV <- j SP <- (XV + Data[(Index+1),j])/2 ImpRed <- ImpRed + (Impurity - NImpurity) ind <- Index Impurity <- NImpurity } XV <- Data[(Index+1),j] } Index <- Index+1 CM1[as.integer(Y[Index]),2] <- CM1[as.integer(Y[Index]),2] +1 CM2[as.integer(Y[Index]),2] <- CM2[as.integer(Y[Index]),2] - 1 } } BNum <- c(SV,SP,ImpRed) return(BNum) } # OrderOnce will contain a matrix, containing the ordered index for numerical predictors # Input : Data is a data frame containing the numerical predictors # Ouput : Matrix? containg order based upon every numerical predictors OrderOnce <- function(Data) { d <- ncol(Data) n <- nrow(Data) Order <- matrix(data=rep(0,d*n),nrow=d,ncol=n) for ( j in 1:d ) { Order[j,] <- order(Data[,j]) } return(Order) } # BestCat finds the Best Split within categorical predictors # Input : Data (Data frame, last column is response) # Output : Information about best split among categorical predictors (List) BestCat <- function(Data,RegionIndex,CatList, CatVANo) { n <- nrow(Data[RegionIndex,]) d <- ncol(Data)-1 X <- data.frame(Data[,1:d]) Y <- Data[,(d+1)] CM <- plyr::count(Data[RegionIndex,(d+1)]) Impurity <- n*ImpFctCM(CM) ImpRed <- 0 SV <- 0 LCat <- 0 for ( j in 1:d) { NoLvl <- length(levels(factor(X[RegionIndex,j]))) NoSplit <- 2^(NoLvl-1)-1 if ( NoSplit > 0 ) { #First split ( These set operators are SUPER SLOW) IndexL <- intersect( RegionIndex, CatList[[CatVANo[j]]][[as.integer(levels(factor(X[RegionIndex,j]))[NoLvl])]] ) IndexR <- setdiff( RegionIndex, IndexL ) NImpurity <- length(IndexL)*ImpFctCM(plyr::count(Data[IndexL,(d+1)]))+length(IndexR)*ImpFctCM(plyr::count(Data[IndexR,(d+1)])) if ( NImpurity < Impurity ) { SV <- j LCat <- levels(factor(X[RegionIndex,j]))[NoLvl] ImpRed <- ImpRed + (Impurity - NImpurity) Impurity <- NImpurity } if ( NoLvl > 2 ) { for ( s in 1:(NoLvl-2)) { ToAdd <- utils::combn((NoLvl-1),s) for ( i in 1:ncol(ToAdd)) { NIndexL <- IndexL # Add new category in Left leaf ( These set operators are SUPER SLOW) for ( k in 1:s) { NIndexL <- c(NIndexL,intersect( RegionIndex, CatList[[CatVANo[j]]][[ToAdd[k,i]]] )) } NIndexR <- setdiff( RegionIndex, NIndexL ) NImpurity <- length(NIndexL)*ImpFctCM(plyr::count(Y[NIndexL]))+length(NIndexR)*ImpFctCM(plyr::count(Y[NIndexR])) if ( NImpurity < Impurity ) { SV <- j LCat <- c(NoLvl,ToAdd[,i]) ImpRed <- ImpRed + (Impurity - NImpurity) Impurity <- NImpurity } } } } } } ToReturn <- list() ToReturn[[1]] <- SV ToReturn[[2]] <- LCat ToReturn[[3]] <- ImpRed return(ToReturn) } # CatIndex returns a list containing index for categorical values # Input : Data is a data frame containing the categorical predictors # Output : List where first increment is categorical variable number # and 2nd sub list is level value CatIndex <- function(Data,CatNo) { d <- ncol(Data) n <- nrow(Data) Index <- list() for ( j in 1:d ) { Index[[CatNo[j]]] <- list() for ( i in 1:length(levels(Data[,j])) ) { Index[[CatNo[j]]][[i]] <- (Data[,j]==levels(Data[,j])[i])*(1:n) Index[[CatNo[j]]][[i]] <- Index[[CatNo[j]]][[i]][ !is.na(Index[[CatNo[j]]][[i]]) ] Index[[CatNo[j]]][[i]] <- Index[[CatNo[j]]][[i]][ Index[[CatNo[j]]][[i]] >0 ] } } return(Index) } # ListOfTree Builds all possible pruned trees that will be used in the tree pruning process # Input : A BEST Object # Output : a list of Trees (Matrix form) ListOfTrees <- function(Fit){ Tree <- data.frame(Fit[[1]]) Regions <- Fit[[3]] TreeList <- list() TreeList[[1]] <- Tree NoLeaves <- sum(Tree[,9]==1) while( NoLeaves > 1 ) { PossibleC <- Tree[Tree[,9]==0,] CInd <- rep(0,nrow(PossibleC)) for ( i in 1:nrow(PossibleC)) { if ( Tree[PossibleC[i,4],9] ==1 && Tree[PossibleC[i,5],9] ==1 ) { CInd[i] <- 1 } } RPC <- PossibleC[CInd==1,] ToC <- RPC[which.min(RPC[,8]),] #2 stands for 'collapsed leaves' Tree[ToC[1,4],9] <- 2 Tree[ToC[1,5],9] <- 2 Tree[ToC[1,1],9] <- 1 TreeList[[length(TreeList)+1]] <- Tree NoLeaves <- NoLeaves-1 } return(TreeList) }
/scratch/gouwar.j/cran-all/cranData/BESTree/R/utility_functions.R
## ----setup, include = FALSE---------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) library(BESTree) ## ----include = TRUE------------------------------------------------------ set.seed(100) n=1000 X1 <- rnorm(n,0,sd=1) X2 <- rnorm(n,2,sd=2) X3 <- runif(n,0,1) X4 <- runif(n,-2,2) Y <- 1*(X1<0)*(X4<0.5)+0*(X1>0)*(X4<0.5)+1*(X3>0.5)*(X4>0.5)+0*(X3<0.5)*(X4>0.5) #Add some randomized Y RY <- sample(1000,150) Y[RY] <- 1-Y[RY] ## ----include = TRUE------------------------------------------------------ X3[X3>0.5] <- NA Data <- cbind(X1,X2,X3,X4,as.factor(Y)) ## ----include = TRUE------------------------------------------------------ X5 <- is.na(X3)*1 NewData <- cbind(Data[,1:4],X5,Data[,ncol(Data)]) Training <- NewData[1:800,] Valid <- NewData[801:900,] Testing <- NewData[901:1000,] d = ncol(NewData)-1 #number of predictor VA <- BESTree::ForgeVA(d,5,3) ## ----include = TRUE------------------------------------------------------ VA ## ----include = TRUE------------------------------------------------------ Fit <- BESTree::BEST(Training,10,VA) PTree <- BESTree::TreePruning(Fit,Valid) Fit[[1]] <- PTree preds <- BESTree::MPredict(Testing[,1:d],Fit) BESTree::Acc(preds,Testing[,d+1])
/scratch/gouwar.j/cran-all/cranData/BESTree/inst/doc/my-vignette.R
--- title: "How to use BEST ?" author: "Cedric Beaulac" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{How to use BEST ?} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- # How to use BEST ? ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) library(BESTree) ``` BEST is Decision Tree algorithm that permits the user to define a precise ordering in the partitionning process. As a statistician I believe the data should speak for it self as much as possible but sometimes guiding the algorithm can be helpfull if the data set contains few observations or if we would like to utilize some expert external knowledge about the structure of the data. Here we will show how to utilize this feature to produces a Decision Tree on a data set containing missing values. To begin let us generate a simple data set : ```{r include = TRUE} set.seed(100) n=1000 X1 <- rnorm(n,0,sd=1) X2 <- rnorm(n,2,sd=2) X3 <- runif(n,0,1) X4 <- runif(n,-2,2) Y <- 1*(X1<0)*(X4<0.5)+0*(X1>0)*(X4<0.5)+1*(X3>0.5)*(X4>0.5)+0*(X3<0.5)*(X4>0.5) #Add some randomized Y RY <- sample(1000,150) Y[RY] <- 1-Y[RY] ``` Now, let us make one important predictor missing : ```{r include = TRUE} X3[X3>0.5] <- NA Data <- cbind(X1,X2,X3,X4,as.factor(Y)) ``` Now that we have our data set with missing values, let us use BEST. To begin, let's create a dummy variable indicating if $X_3$ is missing. Then let us use the ForgeVA function to build the list that will guide BETS through the data partitionning process: ```{r include = TRUE} X5 <- is.na(X3)*1 NewData <- cbind(Data[,1:4],X5,Data[,ncol(Data)]) Training <- NewData[1:800,] Valid <- NewData[801:900,] Testing <- NewData[901:1000,] d = ncol(NewData)-1 #number of predictor VA <- BESTree::ForgeVA(d,5,3) ``` Let us quickly examine what ForgeVA does, it might be the most confusing part of this package. The first input is the number of predictor, the second the location of the gating variable and the third is the location of the variable with missing value. The list looks like : ```{r include = TRUE} VA ``` Where the first element ([1]) is the variable usable when begining, every variables except the ones with missing values. Then the elements at location [d+1] in the list represent the gating abilities of individual predictor. Note in [5+1] that for the branch $X_5 < 0.5$ we will add the predictor $X_3$ (the threshold value 0.5 is included in [[6]][[1]] and the variable added on $X_5 < 0.5$ is included in [[6]][[2]]). Finally, let's run BEST on the training set, prune it according to the validations et and check it's accuracy on the test set : ```{r include = TRUE} Fit <- BESTree::BEST(Training,10,VA) PTree <- BESTree::TreePruning(Fit,Valid) Fit[[1]] <- PTree preds <- BESTree::MPredict(Testing[,1:d],Fit) BESTree::Acc(preds,Testing[,d+1]) ```
/scratch/gouwar.j/cran-all/cranData/BESTree/inst/doc/my-vignette.Rmd
--- title: "How to use BEST ?" author: "Cedric Beaulac" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{How to use BEST ?} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- # How to use BEST ? ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) library(BESTree) ``` BEST is Decision Tree algorithm that permits the user to define a precise ordering in the partitionning process. As a statistician I believe the data should speak for it self as much as possible but sometimes guiding the algorithm can be helpfull if the data set contains few observations or if we would like to utilize some expert external knowledge about the structure of the data. Here we will show how to utilize this feature to produces a Decision Tree on a data set containing missing values. To begin let us generate a simple data set : ```{r include = TRUE} set.seed(100) n=1000 X1 <- rnorm(n,0,sd=1) X2 <- rnorm(n,2,sd=2) X3 <- runif(n,0,1) X4 <- runif(n,-2,2) Y <- 1*(X1<0)*(X4<0.5)+0*(X1>0)*(X4<0.5)+1*(X3>0.5)*(X4>0.5)+0*(X3<0.5)*(X4>0.5) #Add some randomized Y RY <- sample(1000,150) Y[RY] <- 1-Y[RY] ``` Now, let us make one important predictor missing : ```{r include = TRUE} X3[X3>0.5] <- NA Data <- cbind(X1,X2,X3,X4,as.factor(Y)) ``` Now that we have our data set with missing values, let us use BEST. To begin, let's create a dummy variable indicating if $X_3$ is missing. Then let us use the ForgeVA function to build the list that will guide BETS through the data partitionning process: ```{r include = TRUE} X5 <- is.na(X3)*1 NewData <- cbind(Data[,1:4],X5,Data[,ncol(Data)]) Training <- NewData[1:800,] Valid <- NewData[801:900,] Testing <- NewData[901:1000,] d = ncol(NewData)-1 #number of predictor VA <- BESTree::ForgeVA(d,5,3) ``` Let us quickly examine what ForgeVA does, it might be the most confusing part of this package. The first input is the number of predictor, the second the location of the gating variable and the third is the location of the variable with missing value. The list looks like : ```{r include = TRUE} VA ``` Where the first element ([1]) is the variable usable when begining, every variables except the ones with missing values. Then the elements at location [d+1] in the list represent the gating abilities of individual predictor. Note in [5+1] that for the branch $X_5 < 0.5$ we will add the predictor $X_3$ (the threshold value 0.5 is included in [[6]][[1]] and the variable added on $X_5 < 0.5$ is included in [[6]][[2]]). Finally, let's run BEST on the training set, prune it according to the validations et and check it's accuracy on the test set : ```{r include = TRUE} Fit <- BESTree::BEST(Training,10,VA) PTree <- BESTree::TreePruning(Fit,Valid) Fit[[1]] <- PTree preds <- BESTree::MPredict(Testing[,1:d],Fit) BESTree::Acc(preds,Testing[,d+1]) ```
/scratch/gouwar.j/cran-all/cranData/BESTree/vignettes/my-vignette.Rmd
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 symmCpp <- function(X_R, d, unif) { .Call('_BET_symmCpp', PACKAGE = 'BET', X_R, d, unif) } colorCpp <- function(X_R, d, unif) { .Call('_BET_colorCpp', PACKAGE = 'BET', X_R, d, unif) } cellCpp <- function(X_R, d, unif) { .Call('_BET_cellCpp', PACKAGE = 'BET', X_R, d, unif) } BETCpp <- function(X_R, d, unif, asymptotic, test_uniformity, test_independence, independence_index) { .Call('_BET_BETCpp', PACKAGE = 'BET', X_R, d, unif, asymptotic, test_uniformity, test_independence, independence_index) } BeastCpp <- function(X_R, d, m, B, unif, lambda, test_uniformity, test_independence, independence_index, method, numPerm) { .Call('_BET_BeastCpp', PACKAGE = 'BET', X_R, d, m, B, unif, lambda, test_uniformity, test_independence, independence_index, method, numPerm) } nullCpp <- function(n, p, d, m, B, lambda, test_uniformity, test_independence, independence_index, method, numPerm) { .Call('_BET_nullCpp', PACKAGE = 'BET', n, p, d, m, B, lambda, test_uniformity, test_independence, independence_index, method, numPerm) }
/scratch/gouwar.j/cran-all/cranData/BET/R/RcppExports.R
uni <- function(x){ Fx <- ecdf(x) Fx(x) } frac.2 <- function(a,d){ a0 <- a b <- rep(NA,d) for (i in 1:d){ temp <- 2^i*a0 if (temp>1){ b[i] <- 1 a0 <- a0-1/2^i }else{ b[i] <- 0 } } b } frac2 <- function(a,d){ if (d==1){ t(t(sapply(a,function(x){frac.2(x,d)}))) }else{ t(sapply(a,function(x){frac.2(x,d)})) } } bex.centers <- function(depth){ # depth >=1 cbind(rep((1:2^depth)/2^depth,2^depth),rep( (1:2^depth)/2^depth, rep(2^depth,2^depth) ))-1/2^(depth+1) } plot_bid <- function(depth, be.ind1, be.ind2){ xyc <- bex.centers(depth) BEx <- frac2(xyc[,1], depth) BEy <- frac2(xyc[,2], depth) RDx <- 2*BEx-1 RDy <- 2*BEy-1 be.ind1.num <- as.numeric(unlist(strsplit(be.ind1,""))) be.ind1.num <- which(be.ind1.num == 1) x.prod <- apply(RDx[,be.ind1.num,drop=F],1,prod) #1:row be.ind2.num <- as.numeric(unlist(strsplit(be.ind2,""))) be.ind2.num <- which(be.ind2.num == 1) y.prod <- apply(RDy[,be.ind2.num,drop=F],1,prod) col.ind <- x.prod*y.prod for (i.col in 1: nrow(xyc)){ if (col.ind[i.col]<0){ xycc <- xyc[i.col,] xp <- c(xycc[1]-1/2^(depth+1),xycc[1]+1/2^(depth+1),xycc[1]+1/2^(depth+1),xycc[1]-1/2^(depth+1)) yp <- c(xycc[2]-1/2^(depth+1),xycc[2]-1/2^(depth+1),xycc[2]+1/2^(depth+1),xycc[2]+1/2^(depth+1)) polygon(xp,yp,border=NA,col=rgb(0,0,1,1/4)) } } } bet <- function(X, dep, unif.margin = FALSE, cex=0.5, index = list(c(1:ncol(X))), ...) UseMethod("bet") bet.plot <- function(X, dep, unif.margin = FALSE, cex=0.5, index = list(c(1:ncol(X))), ...){ if(ncol(X) != 2) stop("X does not have two columns.") p = 2 if(identical(index, list(c(1:p)))){ # c(1:p):uniformity unif.margin = TRUE if(sum(X > 1 | X < 0) > 0) stop("Data out of range [0, 1]") test.uniformity = TRUE test.independence = FALSE }else{ test.uniformity = FALSE test.independence = TRUE # test index cover all 1:p for only 1 time v = c() for (i in 1:length(index)) { v = c(v, index[[i]]) } if(length(v) != p){ stop("index out of range of 1:p") }else if(!all.equal(sort(v), c(1:p))){ stop("index should be a list of disjoint subsets of 1:p") } } bet.res <- BETCpp(X, dep, unif.margin, asymptotic = T, test.uniformity, test.independence, index) be.ind1 <- unlist(strsplit(bet.res$Interaction, "-"))[1] be.ind2 <- unlist(strsplit(bet.res$Interaction, "-"))[2] # be.ind1 <- unlist(strsplit(i1, " "))[1] # be.ind2 <- unlist(strsplit(i2, " "))[1] if(unif.margin){ x <- X[,1] y <- X[,2] }else{ x <- uni(X[,1]) y <- uni(X[,2]) } # par(mgp = c(1.8, 0.5, 0),mar=c(3,3,3,1)) plot(c(0,1), c(0,1), xlab=expression(U[x]),ylab=expression(U[y]),type = "n") points(x,y,mgp = c(1.8, 0.5, 0),xlim=c(0,1),ylim=c(0,1),cex=cex,col=2, pch=16) plot_bid(dep, be.ind1, be.ind2) } MaxBET <- function(X, dep, unif.margin = FALSE, asymptotic = TRUE, plot = FALSE, index = list(c(1:ncol(X)))){ if(is.vector(X)){ X = as.matrix(X, ncol = 1) } n <- nrow(X) p <- ncol(X) if (p == 1){ for (i in 1:n){ if(sum(X > 1 | X < 0) > 0) stop("Data out of range [0, 1]") } } # independent index # mutual.idx = list() # for(i in 1:p){ # mutual.idx[length(mutual.idx) + 1] = c(i) # } if(identical(index, list(c(1:p)))){ # c(1:p):uniformity unif.margin = TRUE if(sum(X > 1 | X < 0) > 0) stop("Data out of range [0, 1]") test.uniformity = TRUE test.independence = FALSE }else{ test.uniformity = FALSE test.independence = TRUE # test index cover all 1:p for only 1 time v = c() for (i in 1:length(index)) { v = c(v, index[[i]]) } if(length(v) != p){ stop("index out of range of 1:p") }else if(!all.equal(sort(v), c(1:p))){ stop("index should be a list of disjoint subsets of 1:p") } } if (plot && (p == 2)) bet.plot(X, dep, unif.margin, index = index) if (plot && (p != 2)) warning("plot not available: X does not have two columns.") BETCpp(X, dep, unif.margin, asymptotic, test.uniformity, test.independence, index) } symm <- function(X, dep, unif.margin = FALSE, print.sample.size = TRUE){ if(is.vector(X)){ X = as.matrix(X, ncol = 1) } n <- nrow(X) p <- ncol(X) if (p == 1){ for (i in 1:n){ if (X[i][1] > 1 || X[i][1] < 0) stop("Data out of range [0, 1]") } } res = symmCpp(X, dep, unif.margin)[-1,] res = res[order(res$BinaryIndex),] rownames(res) = 1:nrow(res) if(print.sample.size){ cat("Sample size: ", n, "\n") } return(res) } get.signs <- function(X, dep, unif.margin = FALSE){ if(is.vector(X)){ X = as.matrix(X, ncol = 1) } n <- nrow(X) p <- ncol(X) if (p == 1){ for (i in 1:n){ if (X[i][1] > 1 || X[i][1] < 0) stop("Data out of range [0, 1]") } } res = colorCpp(X, dep, unif.margin) res = res[,order(colnames(res))] return(res) } cell.counts <- function(X, dep, unif.margin = FALSE){ if(is.vector(X)){ X = as.matrix(X, ncol = 1) } n <- nrow(X) p <- ncol(X) if (p == 1){ for (i in 1:n){ if (X[i][1] > 1 || X[i][1] < 0) stop("Data out of range [0, 1]") } } res = cellCpp(X, dep, unif.margin) return(res) } MaxBETs <- function(X, d.max=4, unif.margin = FALSE, asymptotic = TRUE, plot = FALSE, index = list(c(1:ncol(X)))){ if(is.vector(X)){ X = as.matrix(X, ncol = 1) } n <- nrow(X) p <- ncol(X) # independent index # mutual.idx = list() # for(i in 1:p){ # mutual.idx[length(mutual.idx) + 1] = c(i) # } if(identical(index, list(c(1:p)))){ # c(1:p):uniformity unif.margin = TRUE if(sum(X > 1 | X < 0) > 0) stop("Data out of range [0, 1]") test.uniformity = TRUE test.independence = FALSE }else{ test.uniformity = FALSE test.independence = TRUE # test index cover all 1:p for only 1 time v = c() for (i in 1:length(index)) { v = c(v, index[[i]]) } if(length(v) != p){ stop("index out of range of 1:p") }else if(!all.equal(sort(v), c(1:p))){ stop("index should be a list of disjoint subsets of 1:p") } } temp <- MaxBET(X, 1, unif.margin, asymptotic, FALSE, index) #BET bet.adj.pvalues <- rep(NA,d.max) bet.extreme.asymmetry <- rep(NA,d.max) max.abs.count.interaction <- abs(temp$Extreme.Asymmetry) bet.extreme.asymmetry[1] <- temp$Extreme.Asymmetry # table22 <- matrix(c(max.abs.count.interaction/2+n/4, -max.abs.count.interaction/2+n/4, -max.abs.count.interaction/2+n/4, max.abs.count.interaction/2+n/4), 2, 2) # FE22 <- fisher.test(table22,conf.int=FALSE)$p.value- dhyper(table22[1,1],n/2,n/2,n/2)/2 FE.pvalue0 <- min(temp$p.value.bonf, 1) bet.adj.pvalues[1] <- FE.pvalue0 bet.s.interaction <- temp$Interaction if (d.max==1){ return(list(bet.s.pvalue=temp$p.value.bonf,bet.s.extreme.asymmetry=temp$Extreme.Asymmetry, bet.s.index=temp$Interaction, bet.s.zstatistic=temp$z.statistic)) }else{ for (id in 2:d.max){ tempa <- MaxBET(X, id, unif.margin, asymptotic, FALSE, index) #BET max.abs.count.interaction <- abs(tempa$Extreme.Asymmetry) bet.extreme.asymmetry[id] <- tempa$Extreme.Asymmetry if (p == 1){ FE.pvalue <- min(tempa$p.value.bonf/(2^id-1) * ((2^id-1) - (2^(id-1)-1)), 1) }else{ FE.pvalue <- min((tempa$p.value.bonf/(2^(p*id)-p*(2^id-1)-1)) * ((2^(p*id)-p*(2^id-1)-1) - (2^(p*(id-1))-p*(2^(id-1)-1)-1)), 1) } bet.adj.pvalues[id] <- FE.pvalue if (FE.pvalue < FE.pvalue0){ bet.s.interaction <- tempa$Interaction FE.pvalue0 <- FE.pvalue } } bet.s.pvalue <- min(min(bet.adj.pvalues)*d.max,1) dp = which(bet.adj.pvalues==min(bet.adj.pvalues),arr.ind=TRUE)[1] if (plot && p == 2) bet.plot(X, dp, unif.margin, index = index) if (plot && p != 2) warning('plot not available: X does not have two columns.') bet.s.extreme.asymmetry <- bet.extreme.asymmetry[which(bet.adj.pvalues==min(bet.adj.pvalues))] bet.s.zstat <- abs(bet.s.extreme.asymmetry)/sqrt(n) return(list(bet.s.pvalue.bonf=bet.s.pvalue, bet.s.extreme.asymmetry=bet.s.extreme.asymmetry, bet.s.index=bet.s.interaction, bet.s.zstatistic=bet.s.zstat)) } } BEAST <- function(X, dep, subsample.percent = 1/2, B = 100, unif.margin = FALSE, lambda = NULL, index = list(c(1:ncol(X))), method = "p", num = NULL){ if(is.vector(X)){ X = as.matrix(X, ncol = 1) } n <- nrow(X) p <- ncol(X) if (p == 1){ if(sum(X > 1 | X < 0) > 0) stop("Data out of range [0, 1]") } if(is.null(lambda)){ lambda <- sqrt(log(2^(p * dep)) / (8*n)) } # independent index # mutual.idx = list() # for(i in 1:p){ # mutual.idx[length(mutual.idx) + 1] = c(i) # } if(identical(index, list(c(1:p)))){ # c(1:p):uniformity unif.margin = TRUE if(sum(X > 1 | X < 0) > 0) stop("Data out of range [0, 1]") test.uniformity = TRUE test.independence = FALSE }else{ test.uniformity = FALSE test.independence = TRUE # test index cover all 1:p for only 1 time v = c() for (i in 1:length(index)) { v = c(v, index[[i]]) } if(length(v) != p){ stop("index out of range of 1:p") }else if(!all.equal(sort(v), c(1:p))){ stop("index should be a list of disjoint subsets of 1:p") } } if(is.null(num)){ if(!method %in% c("p", "s")){ method <- "NA" num <- 1 }else if(method == "p"){ num <- 100 }else if(method == "s"){ num <- 1000 } } m <- n * subsample.percent L = BeastCpp(X, dep, m, B, unif.margin, lambda, test.uniformity, test.independence, index, method, num) L$Interaction = matrix(as.numeric(unlist(strsplit((unlist(strsplit(L$Interaction, "-"))), ""))), nrow = p, byrow = TRUE) return(L) } BEAST.null.simu <- function(n, p, dep, subsample.percent = 1/2, B = 100, lambda = NULL, index = list(c(1:p)), method = "p", num = NULL){ if(is.null(lambda)){ lambda <- sqrt(log(2^(p * dep)) / (8*n)) } # independent index # mutual.idx = list() # for(i in 1:p){ # mutual.idx[length(mutual.idx) + 1] = c(i) # } if(identical(index, list(c(1:p)))){ # c(1:p):uniformity test.uniformity = TRUE test.independence = FALSE }else{ test.uniformity = FALSE test.independence = TRUE # test index cover all 1:p for only 1 time v = c() for (i in 1:length(index)) { v = c(v, index[[i]]) } if(length(v) != p){ stop("index out of range of 1:p") }else if(!all.equal(sort(v), c(1:p))){ stop("index should be a list of disjoint subsets of 1:p") } } if(!method %in% c("p", "s")){ stop("Select a method from permutation or simulation to generate a null distribution.") } if(is.null(num)){ if(method == "p"){ num = 100 }else if(method == "s"){ num = 1000 } } m <- n * subsample.percent nullCpp(n, p, dep, m, B, lambda, test.uniformity, test.independence, index, method, num) }
/scratch/gouwar.j/cran-all/cranData/BET/R/plot.R
#' Coordinates of Brightest Stars in the Night Sky #' #'This data set collects the galactic coordinates of the 256 brightest stars in the night sky (Perryman et al. 1997). #'We consider the longitude (\code{x}) and sine latitude (\code{y}) here. #' #' @docType data #' #' @usage data(star) #' #' @keywords datasets #' #' #' @examples #' data(star) #' BETs(cbind(star$x.raw, star$y.raw)) "star"
/scratch/gouwar.j/cran-all/cranData/BET/R/star-data.R
#' @title BETS: A package for obtaining and analysing thousands of Brazilian economic time series. #' #' @description The Brazilian Economic Time Series (BETS) package provides access and #'information about the most important Brazilian economic time series. #' #'These series are created by three influential centers: the Central Bank of #'Brazil (BCB), the Brazilian Institute of Geography and Statistics (IBGE) #'and the Brazilian Institute of Economics, from the Getulio Vargas Foundation #'(FVG-IBRE). Currently, there are more than 18.640 available time series, most #'of them free of charge. Besides providing access to this vast database, the #'package allows the user to interact with data in an easy and friendly way. #' #'For instance, the user can search for a time series using keywords. More #'importantly, it installs several consecrated packages for time series #'analysis, giving the user the option to perform a complete analysis without #'having to worry about installing and loading other packages. In a near #'future, the authors will publish a series of R exercises to be solved with #'BETS and its statiscal/econometrical tools, therefore helping the user to #'understand the behavior of brazilian time series. #' #' @note The authors would like to thank the support by the Getulio #' Vargas Foundation (FGV) and make it clear that all #' data in the package is in public domain. The rights of all centers from #' which the series are taken are maintained. We reaffirm that BETS #' is mainly intended for academic usage. #' #' #' @author Pedro Costa Ferreira \email{[email protected]}, #' Jonatha Costa \email{[email protected]}, #' Talitha Speranza \email{[email protected]}, #' Fernando Teixeira \email{[email protected]} #' #' @docType package #' @name BETS #' NULL
/scratch/gouwar.j/cran-all/cranData/BETS/R/BETS.R
#' BETS search #' #' An interface for searching time series with possibility #' to extract the data in different extensions. #' #' @import miniUI #' @import rstudioapi #' @import shiny #' @importFrom utils write.csv write.csv2 #' #' @export BETS.addin_en = function(){ ui<- miniUI::miniPage( gadgetTitleBar("BETS Search Addin"), miniTabstripPanel( # Tab 1 - choose any colourweb. miniTabPanel( "Search", icon = icon("search"), miniContentPanel( fluidRow( column(4, textInput("description", "Description:", c("Search") ) ), column(2, selectInput("periodicity", "Periodicity:", c("All","Monthly","Anual","Quarterly","Weekly","Daily") ) ), column(3, textInput("source", "Source:", c("All") ) ), # Create a new row for the table. DT::dataTableOutput("table") ) ) ), # Tab 2 - choose an R colour similar to a colour you choose miniTabPanel( "Visualization", icon = icon("eye"), miniContentPanel( DT::dataTableOutput("table2") ) ), miniTabPanel( "Export", icon = icon("save"), miniContentPanel( fluidRow( column(2, textInput("code","TS code") ), column(3, textInput("name","file name","Dados") ), column(7, textInput("local","Save file",c(getwd())) ) ), br(), fluidRow( column( 4, div(style = "text-align: center; font-size: 120%; font-weight:bold; background-color:#3D8B37; color:#FFF; border-color:#3D8B37; border-style:solid; border-width:1px; padding: 30px 0px 30px 0px", actionLink("action_csv", "Excel - CSV",style = "color:#FFF"), div("Click to export", style = "font-size:75%; font-weight:normal")) ), column( 4, div(style = "text-align: center; font-size: 120%; font-weight:bold; background-color:#2E8B57; color:#FFF; border-color:#2E8B57; border-style:solid; border-width:1px; padding: 30px 0px 30px 0px", actionLink("action_csv2", "Excel - CSV2",style = "color:#FFF"), div("Click to export", style = "font-size:75%; font-weight:normal")) ), column( 4, div(style = "text-align: center; font-size: 120%; font-weight:bold; background-color:#3c97ac; color:#FFF; border-color:#3c97ac; border-style:solid; border-width:1px; padding: 30px 0px 30px 0px", actionLink("action_rds", "R - RDS",style = "color:#FFF"), div("Click to export", style = "font-size:75%; font-weight:normal")) ) ), br(), fluidRow( column( 4, div(style = "text-align: center; font-size: 120%; font-weight:bold; background-color:#1D5F6D; color:#FFF; border-color:#1D5F6D; border-style:solid; border-width:1px; padding: 30px 0px 30px 0px", actionLink("action_stata", "STATA",style = "color:#FFF"), div("Click to export", style = "font-size:75%; font-weight:normal")) ), column( 4, div(style = "text-align: center; font-size: 120%; font-weight:bold; background-color:#4782ba; color:#FFF; border-color:#4782ba; border-style:solid; border-width:1px; padding: 30px 0px 30px 0px", actionLink("action_sas", "SAS",style = "color:#FFF"), div("Click to export", style = "font-size:75%; font-weight:normal")) ), column( 4, div(style = "text-align: center; font-size: 120%; font-weight:bold; background-color:#e1004b; color:#FFF; border-color:#e1004b; border-style:solid; border-width:1px; padding: 30px 0px 30px 0px", actionLink("action_spss", "SPSS",style = "color:#FFF"), div("Click to export", style = "font-size:75%; font-weight:normal")) ) ), fluidRow( br(), span(verbatimTextOutput('done_export')) ) ) ) ) ) server <- function(input, output, session) { output$table <- DT::renderDataTable(DT::datatable({ req(input$description) # tratamento para o input da descricao req(input$source) # tratamento para o input da fonte #nomes = c("Code","Description","Unit","Periodicity","Start","Last Value","Source") data.addin <- BETSsearch(description ="*",view=F) #names(data.addin) = nomes if(input$description != "Search"){ print(input$description) req(input$description) # tratamento para o input da descricao data.addin <- BETSsearch(description = input$description,view=F) if(is.character(data.addin)){ data.addin = BETSsearch(description="*",view=F) }else{ data.addin } }else{ data.addin <- BETSsearch(description ="*",view=F) } if(input$periodicity!= "All"){ req(input$periodicity) if(input$periodicity == "Monthly"){ data.addin <- data.addin[data.addin$periodicity == "M",] } if(input$periodicity == "Anual"){ data.addin <- data.addin[data.addin$periodicity == "A",] } if(input$periodicity == "Quarterly"){ data.addin <- data.addin[data.addin$periodicity == "Q",] } if(input$periodicity == "Weekly"){ data.addin <- data.addin[data.addin$periodicity == "W",] } if(input$periodicity == "Daily"){ data.addin <- data.addin[data.addin$periodicity == "D",] } # tratamento para o input da fonte #data.addin <- search(description = input$description,view=F) if(is.character(data.addin)){ data.addin = BETSsearch(description="*",view=F) }else{ data.addin } } if(input$source!= "All"){ req(input$source) # tratamento para o input da fonte #data.addin <- search(description = input$description,view=F) data.addin <- data.addin[data.addin$source == input$source,] if(is.character(data.addin)){ data.addin = BETSsearch(description="*",view=F) }else{ data.addin } } print(data.addin) if(is.character(data.addin)){ data.addin = BETSsearch(description="*",view=F) }else{ data.addin } #names(data.addin) = nomes data.addin },options = list(pageLength = 5, dom = 'tip'),selection = 'single')) output$table2<- DT::renderDataTable(DT::datatable({ req(input$description) # tratamento para o input da descricao req(input$source) # tratamento para o input da fonte #nomes = c("Code","Description","Unit","Periodicity","Start","Last Value","Source") data.addin <- BETSsearch(description ="*",view=F) #names(data.addin) = nomes if(input$description != "Search"){ print(input$description) req(input$description) # tratamento para o input da descricao data.addin <- BETSsearch(description = input$description,view=F) if(is.character(data.addin)){ data.addin = BETSsearch(description="*",view=F) }else{ data.addin } }else{ data.addin <- BETSsearch(description ="*",view=F) } if(input$periodicity!= "All"){ req(input$periodicity) # tratamento para o input da fonte #data.addin <- search(description = input$description,view=F) data.addin <- data.addin[data.addin$periodicity == input$periodicity,] if(is.character(data.addin)){ data.addin = BETSsearch(description="*",view=F) }else{ data.addin } } if(input$source!= "All"){ req(input$source) # tratamento para o input da fonte #data.addin <- search(description = input$description,view=F) data.addin <- data.addin[data.addin$source == input$source,] if(is.character(data.addin)){ data.addin = BETSsearch(description="*",view=F) }else{ data.addin } } if(is.character(data.addin)){ data.addin = BETSsearch(description="*",view=F) }else{ data.addin } #names(data.addin) = nomes data.addin },options = list(pageLength = 50),selection = 'single')) # observeEvent(input$table_cell_clicked,{ # output$code = renderPrint(input$table_cell_clicked$value) # # }) # output$code = req(renderPrint(input$table_cell_clicked$value)) # code_ts = as.numeric(input$table_cell_clicked$value) # output$code = renderPrint(input$table2_cell_clicked$value) observeEvent(input$action_csv2,{ dados = BETSget(code = input$code,data.frame = T) local = paste0(input$local,"/",input$name) write.csv2(dados,file =paste0(local,".csv") ) output$done_export = renderPrint("The file was successfully exported!") }) observeEvent(input$action_csv,{ dados = BETSget(code = input$code,data.frame = T) local = paste0(input$local,"/",input$name) write.csv(dados,file =paste0(local,".csv") ) output$done_export = renderPrint("The file was successfully exported!") }) observeEvent(input$action_rds,{ dados = BETSget(code = input$code,data.frame = T) local = paste0(input$local,"/",input$name) saveRDS(dados,file = paste0(local,".rds")) output$done_export = renderPrint("The file was successfully exported!") }) observeEvent(input$action_sas,{ saveSas(code = as.numeric(input$code,file.name = input$name)) output$done_export = renderPrint("The file was successfully exported!") }) observeEvent(input$action_stata,{ saveStata(code = as.numeric(input$code),file.name = paste0(input$name,".dta")) output$done_export = renderPrint("The file was successfully exported!") }) observeEvent(input$action_spss,{ saveSpss(code = input$code,file.name = input$name) output$done_export = renderPrint("The file was successfully exported!") }) observeEvent(input$done, { invisible(stopApp()) }) } viewer <- dialogViewer("BETS search", width = 1000, height = 800) runGadget(ui, server, viewer = viewer) }
/scratch/gouwar.j/cran-all/cranData/BETS/R/BETS.addin_en.R
#' BETS search #' #' An interface for searching time series with possibility #' to extract the data in different extensions. #' #' @import miniUI #' @import rstudioapi #' @import shiny #' @importFrom utils write.csv write.csv2 #' #' #' @export BETS.addin_pt = function(){ ui<- miniUI::miniPage( gadgetTitleBar("BETS Search Addin"), miniTabstripPanel( # Tab 1 - choose any colourweb. miniTabPanel( "Pesquisa", icon = icon("search"), miniContentPanel( fluidRow( column(4, textInput("description", "Descri\u00E7\u00E3o:", c("Search") ) ), column(2, selectInput("periodicity", "Periodicidade:", c("All","Mensal","Anual","Semanal","Trimestral","Di\u00E1ria") ) ), column(3, textInput("source", "Fonte:", c("All") ) ), # Create a new row for the table. DT::dataTableOutput("table") ) ) ), # Tab 2 - choose an R colour similar to a colour you choose miniTabPanel( "Visualiza\u00E7\u00E3o", icon = icon("eye"), miniContentPanel( DT::dataTableOutput("table2") ) ), miniTabPanel( "Exporta\u00E7\u00E3o", icon = icon("save"), miniContentPanel( fluidRow( column(2, textInput("code","C\u00F3digo da s\u00E9rie") ), column(3, textInput("name","Nome do arquivo","Dados") ), column(7, textInput("local","Salvar no diret\u00F3rio",c(getwd())) ) ), br(), fluidRow( column( 4, div(style = "text-align: center; font-size: 120%; font-weight:bold; background-color:#3D8B37; color:#FFF; border-color:#3D8B37; border-style:solid; border-width:1px; padding: 30px 0px 30px 0px", actionLink("action_csv", "Excel - CSV",style = "color:#FFF"), div("Clique no formato para exportar", style = "font-size:75%; font-weight:normal")) ), column( 4, div(style = "text-align: center; font-size: 120%; font-weight:bold; background-color:#2E8B57; color:#FFF; border-color:#2E8B57; border-style:solid; border-width:1px; padding: 30px 0px 30px 0px", actionLink("action_csv2", "Excel - CSV2",style = "color:#FFF"), div("Clique no formato para exportar", style = "font-size:75%; font-weight:normal")) ), column( 4, div(style = "text-align: center; font-size: 120%; font-weight:bold; background-color:#3c97ac; color:#FFF; border-color:#3c97ac; border-style:solid; border-width:1px; padding: 30px 0px 30px 0px", actionLink("action_rds", "R - RDS",style = "color:#FFF"), div("Clique no formato para exportar", style = "font-size:75%; font-weight:normal")) ) ), br(), fluidRow( column( 4, div(style = "text-align: center; font-size: 120%; font-weight:bold; background-color:#1D5F6D; color:#FFF; border-color:#1D5F6D; border-style:solid; border-width:1px; padding: 30px 0px 30px 0px", actionLink("action_stata", "STATA",style = "color:#FFF"), div("Clique no formato para exportar", style = "font-size:75%; font-weight:normal")) ), column( 4, div(style = "text-align: center; font-size: 120%; font-weight:bold; background-color:#4782ba; color:#FFF; border-color:#4782ba; border-style:solid; border-width:1px; padding: 30px 0px 30px 0px", actionLink("action_sas", "SAS",style = "color:#FFF"), div("Clique no formato para exportar", style = "font-size:75%; font-weight:normal")) ), column( 4, div(style = "text-align: center; font-size: 120%; font-weight:bold; background-color:#e1004b; color:#FFF; border-color:#e1004b; border-style:solid; border-width:1px; padding: 30px 0px 30px 0px", actionLink("action_spss", "SPSS",style = "color:#FFF"), div("Clique no formato para exportar", style = "font-size:75%; font-weight:normal")) ) ), fluidRow( br(), span(verbatimTextOutput('done_export')) ) ) ) ) ) server <- function(input, output, session) { output$table <- DT::renderDataTable(DT::datatable({ req(input$description) # tratamento para o input da descricao req(input$source) # tratamento para o input da fonte #nomes = c("Codigo","Descricao","Unidade","Periodicidade","Inicio","Ultimo Valor","Fonte") data.addin <- BETSsearch(description ="*",lang="pt",view=F) #names(data.addin) = nomes if(input$description != "Search"){ req(input$description) # tratamento para o input da descricao data.addin <- BETSsearch(description = input$description,lang="pt",view=F) if(is.character(data.addin)){ data.addin = BETSsearch(description="*",lang="pt",view=F) }else{ data.addin } }else{ data.addin <- BETSsearch(description ="*",lang="pt",view=F) } if(input$periodicity!= "All"){ req(input$periodicity) # tratamento para o input da fonte #data.addin <- search(description = input$description,view=F) data.addin <- data.addin[data.addin$periodicity == input$periodicity,] if(is.character(data.addin)){ data.addin = BETSsearch(description="*",lang="pt",view=F) }else{ data.addin } } if(input$source!= "All"){ req(input$source) # tratamento para o input da fonte #data.addin <- search(description = input$description,view=F) data.addin <- data.addin[data.addin$source == input$source,] if(is.character(data.addin)){ data.addin = BETSsearch(description="*",lang="pt",view=F) }else{ data.addin } } if(is.character(data.addin)){ data.addin = BETSsearch(description="*",lang="pt",view=F) }else{ data.addin } #names(data.addin) = nomes data.addin },options = list(pageLength = 5, dom = 'tip'),selection = 'single')) output$table2<- DT::renderDataTable(DT::datatable({ req(input$description) # tratamento para o input da descricao req(input$source) # tratamento para o input da fonte #nomes = c("Code","Description","Unit","Periodicity","Start","Last Value","Source") data.addin <- BETSsearch(description ="*",view=F) #names(data.addin) = nomes if(input$description != "Search"){ req(input$description) # tratamento para o input da descricao data.addin <- BETSsearch(description = input$description,view=F,lang="pt") if(is.character(data.addin)){ data.addin = BETSsearch(description="*",view=F,lang="pt") }else{ data.addin } }else{ data.addin <- BETSsearch(description ="*",view=F,lang="pt") } if(input$periodicity!= "All"){ req(input$periodicity) # tratamento para o input da fonte #data.addin <- search(description = input$description,view=F) data.addin <- data.addin[data.addin$periodicity == input$periodicity,] if(is.character(data.addin)){ data.addin = BETSsearch(description="*",view=F,lang="pt") }else{ data.addin } } if(input$source!= "All"){ req(input$source) # tratamento para o input da fonte #data.addin <- search(description = input$description,view=F) data.addin <- data.addin[data.addin$source == input$source,] if(is.character(data.addin)){ data.addin = BETSsearch(description="*",view=F,lang="pt") }else{ data.addin } } if(is.character(data.addin)){ data.addin = BETSsearch(description="*",view=F,lang="pt") }else{ data.addin } #names(data.addin) = nomes data.addin },options = list(pageLength = 50),selection = 'single')) observeEvent(input$action_csv2,{ dados = BETSget(code = input$code,data.frame = T) local = paste0(input$local,"/",input$name) write.csv2(dados,file =paste0(local,".csv") ) output$done_export = renderPrint("O arquivo foi exportado com \u00EAxito!") }) observeEvent(input$action_csv,{ dados = BETSget(code = input$code,data.frame = T) local = paste0(input$local,"/",input$name) write.csv(dados,file =paste0(local,".csv") ) output$done_export = renderPrint("O arquivo foi exportado com \u00EAxito!") }) observeEvent(input$action_rds,{ dados = BETSget(code = input$code,data.frame = T) local = paste0(input$local,"/",input$name) saveRDS(dados,file = paste0(local,".rds")) output$done_export = renderPrint("O arquivo foi exportado com \u00EAxito!") }) observeEvent(input$action_sas,{ saveSas(code = as.numeric(input$code,file.name = input$name)) output$done_export = renderPrint("O arquivo foi exportado com \u00EAxito!") }) observeEvent(input$action_stata,{ saveStata(code = as.numeric(input$code),file.name = paste0(input$name,".dta")) output$done_export = renderPrint("O arquivo foi exportado com \u00EAxito!") }) observeEvent(input$action_spss,{ saveSpss(code = input$code,file.name = input$name) output$done_export = renderPrint("O arquivo foi exportado com \u00EAxito!") }) observeEvent(input$done, { invisible(stopApp()) }) } viewer <- dialogViewer("BETS search", width = 1000, height = 800) runGadget(ui, server, viewer = viewer) }
/scratch/gouwar.j/cran-all/cranData/BETS/R/BETS.addin_pt.R
#' @title Get a complete time series from a BETS database #' #' @description Extracts a complete time series from either the Central Bank of Brazil (BCB), the Brazilian Institute of Geography and Statistics (IBGE) or the Brazilian Institute of Economics (FGV/IBRE). #' #' @param code A \code{character} or an \code{integer}. The unique code that references the time series. This code can be obtained by using the \code{\link{search}} function. More than one code can be provided at once, through a vector. In this case, be careful with the dates, i.e, parameters \code{from} and \code{to}. They must either be the same length as \code{code}, containing the date limits in order, or an isolated date, but nothing in between. See the examples section. #' @param data.frame A \code{boolean}. True if you want the output to be a data frame. True to \code{ts} output. #' @param from A \code{character} or a \code{Data} object. Starting date of the time series (format YYYY-MM-DD). Can be a vector of dates/characters if the length of the parameter \code{code} is greater than 1. #' @param to A \code{character} or a \code{Data} object. Ending date of the time series (format YYYY-MM-DD). Can be a vector of dates/characters if the length of the parameter \code{code} is greater than 1. #' @param frequency An \code{integer}. The frequency of the time series. It is not needed. It is going to be used only if the metadata for the series is corrupted. #' #' @return A \code{\link[stats]{ts}} (time series) object containing the desired series. #' #' @note Due to the significant size of the databases, it could take a while to retrieve the values. However, it shouldn't take more than 90 seconds. #' #' @examples #' #' # Anual series: GDP at constant prices, in R$ (brazilian reais) #' #BETSget(1208) #' #' # International reserves - Cash concept #' #int.reserves <- get("3543") #' #plot(int.reserves) #' #' # Exchange rate - Free - United States dollar (purchase) #' #us.brl <- get(3691) #' #' # Multiple requests #' # BETSget(code = c(10777,4447),from = "2001-01-01", to = "2016-10-31") #' # BETSget(code = c(10777,4447),from = c("2001-10-31",""),to = c("2016-10-31","")) #' #' # f <- c("2001-10-31","1998-09-01") #' # t <- c("2014-10-31","2015-01-01") #' # BETSget(code = c(10777,4447), from = f, to = t) #' #' # BETSget(code = c(10777,4447),from = "2001-10-31", to = c("2014-10-31","2015-01-01")) #' # BETSget(code = c(10777,4447),from = c("2002-10-31","1997-01-01"), to = "2015-01-01") #' #' #' @seealso \code{\link[stats]{ts}}, \code{\link[BETS]{BETSsearch}} and \code{\link[seasonal]{seas}} #' #' @keywords get #' @import RMySQL #' @import DBI #' @export BETSget = function(code,from = "", to = "",data.frame = FALSE, frequency = NULL){ n = length(code) f = length(from) t = length(to) if(n > 1){ ts = list() nms = c() if(f == 1 && t == 1){ for(i in 1:n){ ts[[i]] = get.series(code[i], from = from, to = to, data.frame = data.frame, frequency = frequency) } } else if(f == n && t == 1){ for(i in 1:n){ ts[[i]] = get.series(code[i], from = from[i], to = to, data.frame = data.frame, frequency = frequency) } } else if(f == 1 && t == n){ for(i in 1:n){ ts[[i]] = get.series(code[i], from = from, to = to[i], data.frame = data.frame, frequency = frequency) } } else if(f == n && t == n){ for(i in 1:n){ ts[[i]] = get.series(code[i], from = from[i], to = to[i], data.frame = data.frame, frequency = frequency) } } for(i in 1:n){ nms[i] = paste0("ts_",code[i]) } names(ts)<- nms return(ts) } else{ return(get.series(code, from, to, data.frame = data.frame, frequency = frequency)) } }
/scratch/gouwar.j/cran-all/cranData/BETS/R/BETSget.R
#' @title Search for a Brazilian Economic Time Series #' #' @description Searches the BETS databases for a time series by its description, source, periodicity, code, data, unit of measurement and database name. #' #' @param description A \code{character}. A search string to look for matching series descriptions. Check the syntax rules under the 'Details' section for better performance. #' @param src A \code{character}. The source of the series. See the 'Details' section for a list of the available sources. #' @param periodicity A \code{character}. The periodicity of the series. See the 'Details' section for a list of possible values. #' @param unit A \code{character}. The unit of measurement of the data. See the 'Details' section for a list of possible values. #' @param code An \code{integer}. The index of the series within the database. #' @param view A \code{boolean}. The default is \code{TRUE}. If set to \code{FALSE}, the output's \code{head} will be printed in your console as a \code{data.frame}. #' @param start A \code{date}. Starting date of the series. #' @param lang A \code{character}. The search language. The default is "en" for english, but "pt" for portuguese is also possible. #' #' @return A \code{list} that can be interpreted as a \code{data.frame}. The fields are described below. #' #' \tabular{ll}{ #' code \tab The code/index of the series within the database \cr #' description \tab The description of the series \cr #' periodicity \tab The periodicity of the series \cr #' start \tab Starting date of the series \cr #' source \tab The source of the series \cr #' unit \tab The unit of measurement of the data #'} #' #' @details #' #' \itemize{ #' #' \item{ Syntax rules for the parameter \code{description}, the search string to look for matching series descriptions: #' \enumerate{ #' \item{To search for alternative words, separate them by white spaces. #' Example: \code{description = "ipca core"} means that the series description must contain 'ipca' AND'core' #' } #' \item{To search for whole expressions, surround them with \code{' '}. #' Example: \code{description = "'core ipca' index"} means that the series description must contain 'core ipca' AND 'index' #' } #' \item{To exclude words from the search, insert a \code{~} before each of them. #' Example: \code{description = "ipca ~ core"} means that the series description must contain 'ipca' AND must NOT contain 'core' #' } #' \item{To exclude whole expressions from the search, surround them with code{' '} and insert a \code{~} before each of them. #' Example: \code{description = "~ 'ipca core' index"} means that the series description must contain 'index' AND must NOT contain 'core ipca' #' } #' \item{It is possible to search for multiple words or expressions and to negate multiple words or expressions, as long as the preceeding rules are observed. #' } #' \item{The white space after the negation sign (\code{~}) is not required. But the white spaces AFTER expressions or words ARE required. #' } #' } #' } #' #' \item{ Possible values for the parameter \code{src}: #' \tabular{ll}{ #' IBGE \tab Brazilian Institute of Geography and Statistics \cr #' BCB \tab Central Bank of Brazil \cr #' FGV \tab Getulio Vargas Foundation \cr #' FGv-IBRE \tab Getulio Vargas Foundation - Brazilian Institute of Economics \cr #' BCB e FGV \tab Central Bank of Brazil and Getulio Vargas Foundation \cr #' BCB-Deban \tab Cetral Bank of Brazil - Department of Banking and Payments \cr #' BCB-Depin \tab Central Bank of Brazil - Department of International Reserves \cr #' BCB-Derin \tab Central Bank of Brazil - Department of International Affairs \cr #' BCB-Desig \tab Central Bank of Brazil - Department of Financial Monitoring \cr #' BCB-Secre \tab Central Bank of Brazil - Executive Secretariat \cr #' BCB-Demab \tab Central Bank of Brazil - Department of Open Market Operations \cr #' BCB-Denor \tab Central Bank of Brazil - Department of Financial System Regulation \cr #' BCB-Depec \tab Central Bank of Brazil - Department of Economics \cr #' Sisbacen \tab Central Bank of Brazil Information System \cr #' Abecip \tab Brazilian Association of Real Estate Loans and Savings Companies #' } #' } #' #' \item{ Possible values for the parameter \code{periodicity}: #' \tabular{ll}{ #' A \tab anual data \cr #' M \tab monthly data \cr #' Q \tab quaterly data \cr #' W \tab weekly data \cr #' D \tab daily data #' } #' } #' #' \item{ Possible values for the parameter \code{unit}: #' \tabular{ll}{ #' R$ \tab brazilian reais \cr #' $ \tab US dolars \cr #' \% \tab percentage #' } #' } #'} #' #' #' #' #' @examples #' #not run #' #BETSsearch(description="sales",view = FALSE) #' #' #' #BETSsearch(src="Denor", view = FALSE) #' #' #' #BETSsearch(periodicity="A", view = FALSE) #' #' #' @references #' #' Central Bank of Brazil #' #' @keywords search #' #' @import RMySQL #' @import DBI #' @import sqldf #' @importFrom stringr str_split #' @importFrom utils View #' @importFrom dplyr as_tibble #' @export BETSsearch = function(description="*",src,periodicity,unit,code,start,view=FALSE,lang="en"){ conn = connection() if(lang == "en"){ tb = "metadata_en" } else { tb = "metadata_pt" } if(description == "*" && missing(src) && missing(periodicity) && missing(unit) && missing(code)){ query <- paste0("select * from ", tb) } else { if(missing(description) && missing(src) && missing(periodicity) && missing(unit) && missing(code)){ invisible(dbDisconnect(conn)) return(msg("No search parameters. Please set the values of one or more parameters.")) } params = vector(mode = "character") if(!missing(description)){ ## Break description parameters and_params = vector(mode = "character") or_params = vector(mode = "character") # Workaround description = paste0(description, " ") # Do not match whole expressions exprs = regmatches(description,gregexpr("~ ?'(.*?)'",description))[[1]] if(length(exprs) != 0){ for(i in 1:length(exprs)){ description = gsub(exprs[i], "", description) exprs[i] = gsub("~", "", exprs[i]) exprs[i] = gsub("'", "", exprs[i]) exprs[i] = trimws(exprs[i]) and_params = c(and_params, paste0("description not like " ,"\'%", exprs[i] ,"%\'")) } } # Match whole expressions exprs = regmatches(description,gregexpr("'(.*?)'",description))[[1]] if(length(exprs) != 0){ for(i in 1:length(exprs)){ description = gsub(exprs[i], "", description) exprs[i] = gsub("'", "", exprs[i]) exprs[i] = trimws(exprs[i]) or_params = c(or_params, paste0("description like " ,"\'%", exprs[i] ,"%\'")) } } # Do not match words words = regmatches(description,gregexpr("~ ?(.*?) ",description))[[1]] if(length(words) != 0){ for(i in 1:length(words)){ description = gsub(words[i], "", description) words[i] = gsub("~", "", words[i]) words[i] = trimws(words[i]) and_params = c(and_params, paste0("description not like " ,"\'%", words[i] ,"%\'")) } } # Match words words = str_split(description, " ")[[1]] words = words[words != ""] if(length(words) != 0){ for(i in 1:length(words)){ or_params = c(or_params, paste0("description like " ,"\'%", words[i] ,"%\'")) } } if(length(and_params) > length(or_params)){ desc = and_params[1] and_params = and_params[-1] } else { desc = or_params[1] or_params = or_params[-1] } if(length(or_params) != 0){ for(i in 1:length(or_params)){ desc = paste(desc, "and", or_params[i]) } } if(length(and_params) != 0){ for(i in 1:length(and_params)){ desc = paste(desc, "and", and_params[i]) } } params = c(params, desc) } if(!missing(src)){ params = c(params, paste0("source like " ,"\'%", src ,"%\'")) } if(!missing(periodicity)){ params = c(params, paste0("periodicity like " ,"\'%", periodicity ,"%\'")) } if(!missing(unit)){ params = c(params, paste0("unit like " ,"\'%", unit ,"%\'")) } if(!missing(code)){ params = c(params, paste0("code like " ,"\'", code ,"\'")) } if(!missing(start)){ params = c(params, paste0("start like " ,"\'", start ,"\'")) } query = paste0("select * from ", tb, " where") query = paste(query, params[1]) if(length(params) != 1) { for(i in 2:length(params)){ query = paste(query, "and", params[i]) } } } results = tryCatch({ dbGetQuery(conn, query) }, error = function(e){ metadata_pt <- readRDS(file.path(system.file(package="BETS"),"/metadata_pt.rds")) metadata_en <- readRDS(file.path(system.file(package="BETS"),"/metadata_en.rds")) return(sqldf(query)) }) if(Sys.info()[["sysname"]] == "Linux"){ encod = "latin1" } else { encod = "UTF-8" } results$description = iconv(results$description, from = encod) results$unit = iconv(results$unit, from = encod) if(!is.null(conn)){ count = dbGetQuery(conn,paste0("select count(*) from ", tb)) invisible(dbDisconnect(conn)) } else { count = nrow(results) } if(nrow(results) > 0){ msg(paste("Found", nrow(results),"out of", count ,"time series.",sep=" ")) results = as_tibble(results) if(view==T){ return(utils::View(results,"Metadata")) } else{ return(results) } } else{ msg("No series found. Try using another combination of search terms.") } }
/scratch/gouwar.j/cran-all/cranData/BETS/R/BETSsearch.R
#' @title Display a list of sources available at BETS package #' #' @description Display a list of sources available at BETS package in console. The numbers of #' sources will increase wiht new versions of the package. #' #' #' @export BETSsources <- function(){ return(message("The sources available at BETS, to date, are: \n > Banco Central, IBGE, Sidra, FGV")) }
/scratch/gouwar.j/cran-all/cranData/BETS/R/BETSsources.R
#' @title Perform an ARCH test #' #' @description Performs an ARCH test and show the results. Formerly, this function was part of FinTS, now an obsoleted package. #' #' @param x A \code{ts} object. The time series #' @param lags An \code{integer}. Maximum number of lags #' @param demean A \code{boolean}. Should the series be demeaned? #' @param alpha A \code{numeric} value. Significance level #' #' @return A \code{list} with the results of the ARCH test #' #' @importFrom stats embed lm pchisq resid #' @export #' #' @author Spencer Graves \email{[email protected]}, Talitha Speranza \email{[email protected]} arch_test <- function (x, lags = 12, demean = FALSE, alpha = 0.5) { # Capture name of x for documentation in the output xName <- deparse(substitute(x)) # x <- as.vector(x) if(demean) x <- scale(x, center = TRUE, scale = FALSE) # lags <- lags + 1 mat <- embed(x^2, lags) arch.lm <- summary(lm(mat[, 1] ~ mat[, -1])) STATISTIC <- arch.lm$r.squared * length(resid(arch.lm)) #names(STATISTIC) <- "Chi-squared" PARAMETER <- lags - 1 #names(PARAMETER) <- "df" PVAL <- 1 - pchisq(STATISTIC, df = PARAMETER) METHOD <- "ARCH LM-test; Null hypothesis: no ARCH effects" # result <- list(statistic = STATISTIC, parameter = PARAMETER, # p.value = PVAL, method = METHOD, data.name = # xName) # class(result) <- "htest" if(PVAL >= alpha){ htk = TRUE } else { htk = FALSE } result <- data.frame("statistic" = STATISTIC, "p.value" = PVAL, "htk" = htk) return(result) }
/scratch/gouwar.j/cran-all/cranData/BETS/R/arch_test.R
as.date <- function(dates){ not_formatted = grepl("/",dates) if(TRUE %in% not_formatted){ for(i in 1:length(dates)){ if(not_formatted[i]){ dt = strsplit(dates[i],"/")[[1]] if(nchar(dt[1]) != 4){ year = dt[3] dt[3] = dt[1] dt[1] = year } dates[i] = paste(dt,collapse = "-") } } } return(as.Date(dates)) }
/scratch/gouwar.j/cran-all/cranData/BETS/R/as.date.R
#' @title bcbExpectA #' #' @description Market Expectations with annual reference. #' #' #' @param variables Possible options: "Media", "Mediana", "DesvioPadrao", #' "CoeficienteVariacao", "Minimo", "Maximo". #' @param start Initial date at which the data was projected, in ISO format. #' @param end Final date at which the data was projected, in ISO format. #' @param indicator A string. Available indicator. #' @param limit A integer. A limint of data in request, top is 10000. #' #' #' @import rjson #' @return A data.frame. #' @export #' #' #' @note The available indicators are: Balanca comercial, Balanco de pagamentos, Fiscal, IGP-DI, #' IGP-M, INPC, IPA-DI, IPA-M, IPCA, IPCA-15, IPC-FIPE, Precos administrados por contrato e #' monitorado, Producao industrial, PIB Industrial, PIB Servicos, PIB Total, Meta para taxa #' over-selic e Taxa de cambio. #' #' #' In collaboration with Angelo Salton <https://github.com/angelosalton>. #' #' @examples #' # bcbExpectA() #' #' #' bcbExpectA <- function(indicator = 'IPCA',limit = 100, variables = c("Media","Mediana","DesvioPadrao","CoeficienteVariacao","Minimo","Maximo","numeroRespondentes",'baseCalculo'), start, end ){ indicator = str_replace_all(indicator," ","%20") if(limit > 10000 | limit < 0)stop("You need provid a limit in between 0 and 10000!") # variaveis variaveis_a <- paste("filter=Indicador%20eq%20'",indicator,"'",sep="") variaveis_b <- paste("top=",limit,sep="") variaveis_c <- paste("Indicador", "IndicadorDetalhe", "Data", "DataReferencia", variables, sep = ",") if(missing(start) & missing(end)){ timespan <- "" }else if(missing(start) & !missing(end)){ timespan <- paste0("%20and%20Data%20gt%20'", start,"'") }else if(!missing(start) & !missing(end)){ timespan <- paste0("%20and%20Data%20gt%20'", start, "'%20and%20", "Data%20lt%20'", end,"'") }else{ timespan <- paste0("%20and%20Data%20lt%20'", end,"'") } baseurl <- "https://olinda.bcb.gov.br/olinda/servico/Expectativas/versao/v1/odata/" query_url <- paste(baseurl, "ExpectativasMercadoAnuais", "?$",variaveis_b,"&$",variaveis_a,timespan, "&$select=",variaveis_c, sep = "", collapse = "") data <- fromJSON(file = query_url)$value data <- do.call("rbind", lapply(data, as.data.frame)) return(data) }
/scratch/gouwar.j/cran-all/cranData/BETS/R/bcbExpectA.R
#' @title bcbExpectATop5 #' #' @description Annual Market Expectations Top5. #' #' #' @param variables Possible options: "Media", "Mediana", "DesvioPadrao", #' "CoeficienteVariacao", "Minimo", "Maximo". #' @param start Initial date at which the data was projected, in ISO format. #' @param end Final date at which the data was projected, in ISO format. #' @param indicator A string. Available indicator. #' @param limit A integer. A limint of data in request, top is 10000. #' #' #' @import rjson #' @return A data.frame. #' @export #' #' #' @note The available indicators are: IGP-DI, IGP-M, IPCA, Meta para taxa over-selic, Taxa de cambio. #' #' @examples #' # bcbExpectATop5() #' #' #' bcbExpectATop5 <- function(indicator = 'IGP-DI',limit = 100, variables = c("tipoCalculo","Media","Mediana","DesvioPadrao","CoeficienteVariacao","Minimo","Maximo"), start, end ){ indicator = str_replace_all(indicator," ","%20") if(limit > 10000 | limit < 0)stop("You need provid a limit in between 0 and 10000!") # variaveis variaveis_a <- paste("filter=Indicador%20eq%20'",indicator,"'",sep="") variaveis_b <- paste("top=",limit,sep="") variaveis_c <- paste("Indicador", "IndicadorDetalhe", "Data", "DataReferencia", variables, sep = ",") if(missing(start) & missing(end)){ timespan <- "" }else if(missing(start) & !missing(end)){ timespan <- paste0("%20and%20Data%20gt%20'", start,"'") }else if(!missing(start) & !missing(end)){ timespan <- paste0("%20and%20Data%20gt%20'", start, "'%20and%20", "Data%20lt%20'", end,"'") }else{ timespan <- paste0("%20and%20Data%20lt%20'", end,"'") } baseurl <- "https://olinda.bcb.gov.br/olinda/servico/Expectativas/versao/v1/odata/" query_url <- paste(baseurl, "ExpectativasMercadoTop5Anuais", "?$",variaveis_b,"&$",variaveis_a,timespan, "&$select=",variaveis_c, sep = "", collapse = "") data <- fromJSON(file = query_url)$value data <- do.call("rbind", lapply(data, as.data.frame)) return(data) }
/scratch/gouwar.j/cran-all/cranData/BETS/R/bcbExpectAtop5.R
#' @title bcbExpectInf12 #' #' @description Market expectations for inflation in the next 12 months #' #' #' @param variables Possible options: "Media", "Mediana", "DesvioPadrao", #' "CoeficienteVariacao", "Minimo", "Maximo". #' @param start Initial date at which the data was projected, in ISO format. #' @param end Final date at which the data was projected, in ISO format. #' @param indicator A string. Available indicator. #' @param limit A integer. A limint of data in request, top is 10000. #' #' #' @import rjson #' @return A data.frame. #' @export #' #' #' @note The available indicators are: IGP-DI, IGP-M, INPC, IPA-DI, IPA-M, IPCA, IPCA-15, IPC-FIPE. #' #' @examples #' # bcbExpectInf12() #' #' #' bcbExpectInf12 <- function(indicator = 'IPC-FIPE',limit = 100, variables = c("Media","Mediana","DesvioPadrao","CoeficienteVariacao","Minimo","Maximo","numeroRespondentes","baseCalculo"), start, end ){ indicator = str_replace_all(indicator," ","%20") if(limit > 10000 | limit < 0)stop("You need provid a limit in between 0 and 10000!") # variaveis variaveis_a <- paste("filter=Indicador%20eq%20'",indicator,"'",sep="") variaveis_b <- paste("top=",limit,sep="") variaveis_c <- paste("Indicador", "Data","Suavizada", variables, sep = ",") if(missing(start) & missing(end)){ timespan <- "" }else if(!missing(start) & missing(end)){ timespan <- paste0("%20and%20Data%20gt%20'", start,"'") }else if(!missing(start) & !missing(end)){ timespan <- paste0("%20and%20Data%20gt%20'", start, "'%20and%20", "Data%20lt%20'", end,"'") }else{ timespan <- paste0("%20and%20Data%20lt%20'", end,"'") } baseurl <- "https://olinda.bcb.gov.br/olinda/servico/Expectativas/versao/v1/odata/" query_url <- paste(baseurl, "ExpectativasMercadoInflacao12Meses", "?$",variaveis_b,"&$",variaveis_a,timespan, "&$select=",variaveis_c, sep = "", collapse = "") data <- fromJSON(file = query_url)$value data <- do.call("rbind", lapply(data, as.data.frame)) return(data) }
/scratch/gouwar.j/cran-all/cranData/BETS/R/bcbExpectInf12.R
#' @title bcbExpectM #' #' @description Market Expectations with mensal reference. #' #' #' @param variables Possible options: "Media", "Mediana", "DesvioPadrao", #' "CoeficienteVariacao", "Minimo", "Maximo". #' @param start Initial date at which the data was projected, in ISO format. #' @param end Final date at which the data was projected, in ISO format. #' @param indicator A string. Available indicator. #' @param limit A integer. A limint of data in request, top is 10000. #' #' #' @import rjson stringr dplyr #' @return A data.frame. #' @export #' #' #' @note The available indicators are: IGP-DI, IGP-M, INPC, IPA-DI, IPA-M, IPCA, IPCA-15, IPC-FIPE, Producao #' industrial, Meta para taxa over-selic, Taxa de cambio . #' #' @examples #' # bcbExpectM() #' #' #' bcbExpectM <- function(indicator = 'IPCA-15',limit = 100, variables = c("Media","Mediana","DesvioPadrao","CoeficienteVariacao","Minimo","Maximo","numeroRespondentes","baseCalculo"), start, end ){ indicator = str_replace_all(indicator," ","%20") if(limit > 10000 | limit < 0)stop("You need provid a limit in between 0 and 10000!") # variaveis variaveis_a <- paste("filter=Indicador%20eq%20'",indicator,"'",sep="") variaveis_b <- paste("top=",limit,sep="") k = paste(variables,collapse = ",") variaveis_c <- paste("Indicador,Data,DataReferencia", k, sep = ",") if(missing(start) & missing(end)){ timespan <- "" }else if(missing(start) & !missing(end)){ timespan <- paste0("%20and%20Data%20gt%20'", start,"'") }else if(!missing(start) & !missing(end)){ timespan <- paste0("%20and%20Data%20gt%20'", start, "'%20and%20", "Data%20lt%20'", end,"'") }else{ timespan <- paste0("%20and%20Data%20lt%20'", end,"'") } baseurl <- "https://olinda.bcb.gov.br/olinda/servico/Expectativas/versao/v1/odata/" query_url <- paste(baseurl, "ExpectativaMercadoMensais", "?$",variaveis_b,"&$",variaveis_a,timespan, "&$select=",variaveis_c, sep = "", collapse = "") data <- fromJSON(file = query_url)$value data <- do.call("rbind", lapply(data, as.data.frame)) # data$Data = as.Date(data$Data) # data$DataReferencia = as.Date(as.character(data$DataReferencia),format = "%m/%Y") # data = data %>% arrange(Data) # data = data %>% arrange(DataReferencia) # f = as.character(unique(data$DataReferencia)) # f = (f[(str_detect(f,"2019"))]) # # if(trat){ # df = setNames(data.frame(matrix(ncol = length(f),nrow = 1 )),f) # } return(data) }
/scratch/gouwar.j/cran-all/cranData/BETS/R/bcbExpectM.R
#' @title bcbExpectMTop5 #' #' @description Monthly Market Expectations Top5. #' #' #' @param variables Possible options: "Media", "Mediana", "DesvioPadrao", #' "CoeficienteVariacao", "Minimo", "Maximo". #' @param start Initial date at which the data was projected, in ISO format. #' @param end Final date at which the data was projected, in ISO format. #' @param indicator A string. Available indicator. #' @param limit A integer. A limint of data in request, top is 10000. #' #' #' @import rjson #' @return A data.frame. #' @export #' #' #' @note The available indicators are: IGP-DI, IGP-M, IPCA, Meta para taxa over-selic, Taxa de cambio. #' #' @examples #' # bcbExpectMTop5() #' #' #' bcbExpectMTop5 <- function(indicator = 'IGP-DI',limit = 100, variables = c("tipoCalculo","Media","Mediana","DesvioPadrao","CoeficienteVariacao","Minimo","Maximo"), start, end ){ indicator = str_replace_all(indicator," ","%20") if(limit > 10000 | limit < 0)stop("You need provid a limit in between 0 and 10000!") # variaveis variaveis_a <- paste("filter=Indicador%20eq%20'",indicator,"'",sep="") variaveis_b <- paste("top=",limit,sep="") variaveis_c <- paste("Indicador", "IndicadorDetalhe", "Data", "DataReferencia", variables, sep = ",") if(missing(start) & missing(end)){ timespan <- "" }else if(missing(start) & !missing(end)){ timespan <- paste0("%20and%20Data%20gt%20'", start,"'") }else if(!missing(start) & !missing(end)){ timespan <- paste0("%20and%20Data%20gt%20'", start, "'%20and%20", "Data%20lt%20'", end,"'") }else{ timespan <- paste0("%20and%20Data%20lt%20'", end,"'") } baseurl <- "https://olinda.bcb.gov.br/olinda/servico/Expectativas/versao/v1/odata/" query_url <- paste(baseurl, "ExpectativasMercadoTop5Mensais", "?$",variaveis_b,"&$",variaveis_a,timespan, "&$select=",variaveis_c, sep = "", collapse = "") data <- fromJSON(file = query_url)$value data <- do.call("rbind", lapply(data, as.data.frame)) return(data) }
/scratch/gouwar.j/cran-all/cranData/BETS/R/bcbExpectMtop5.R
#' @title bcbExpectT #' #' @description Quarterly Market Expectations. #' #' #' @param variables Possible options: "Media", "Mediana", "DesvioPadrao", #' "CoeficienteVariacao", "Minimo", "Maximo". #' @param start Initial date at which the data was projected, in ISO format. #' @param end Final date at which the data was projected, in ISO format. #' @param indicator A string. Available indicator. #' @param limit A integer. A limint of data in request, top is 10000. #' #' #' @import rjson #' @return A data.frame. #' @export #' #' #' @note The available indicators are: PIB Agropecuario, PIB Industrial, PIB Serviços e PIB Total. #' #' @examples #' # bcbExpectT() #' #' #' bcbExpectT <- function(indicator = 'PIB Total',limit = 100, variables = c("Media","Mediana","DesvioPadrao","CoeficienteVariacao","Minimo","Maximo","numeroRespondentes"), start, end ){ indicator = str_replace_all(indicator," ","%20") if(limit > 10000 | limit < 0)stop("You need provid a limit in between 0 and 10000!") # variaveis variaveis_a <- paste("filter=Indicador%20eq%20'",indicator,"'",sep="") variaveis_b <- paste("top=",limit,sep="") variaveis_c <- paste("Indicador", "IndicadorDetalhe", "Data", "DataReferencia", variables, sep = ",") if(missing(start) & missing(end)){ timespan <- "" }else if(missing(start) & !missing(end)){ timespan <- paste0("%20and%20Data%20gt%20'", start,"'") }else if(!missing(start) & !missing(end)){ timespan <- paste0("%20and%20Data%20gt%20'", start, "'%20and%20", "Data%20lt%20'", end,"'") }else{ timespan <- paste0("%20and%20Data%20lt%20'", end,"'") } baseurl <- "https://olinda.bcb.gov.br/olinda/servico/Expectativas/versao/v1/odata/" query_url <- paste(baseurl, "ExpectativasMercadoTrimestrais", "?$",variaveis_b,"&$",variaveis_a,timespan, "&$select=",variaveis_c, sep = "", collapse = "") data <- fromJSON(file = query_url)$value data <- do.call("rbind", lapply(data, as.data.frame)) return(data) }
/scratch/gouwar.j/cran-all/cranData/BETS/R/bcbExpectT.R
#' @title Create a chart with BETS aesthetics #' #' @description Create a professional looking chart, using a pre-defined BETS series or a custom series. #' #' @param ts A \code{character} or a \code{ts} object. A custom time series or the name of a pre-defined series. A complete list of names is under the 'Details' section. #' @param style A \code{character}. Should the chart be made with Plotly (style = "plotly") or with R standard library (style = "normal")? #' @param lang A \code{character}. The language. For now, only 'en' (english) is available. #' @param file A \code{character}. The whole path, including a custom name, for the output (an image file). The default value is NULL. If left to NULL, the chart will be rendered in the standard R plotting area. #' @param open A \code{boolean}. Whether to open the file containing the chart. #' @param params A \code{list}. Parameters for drawing custom charts. See the 'details' section. #' #' @details #' #' \bold{Names of pre-defined charts:} #' #' \bold{1. Business Cycle Dashboard ('plotly' style)} #' #' \tabular{lll}{ #' \emph{VALUE} \tab \emph{DESCRIPTION} \tab \emph{CODE} \cr #' \tab \tab \cr #' \emph{'iie_br'} \tab Uncertainty Index \tab ST_100.0 \cr #' \emph{'sent_ind'} \tab Economic Sentiment Index (average between several confidence indexes) \tab (*) \cr #' \emph{'gdp_mon'} \tab GDP Monthly and Interanual Variation (last values) - GDP Monitor (FGV/IBRE) \tab (*) \cr #' \emph{'ei_vars'} \tab Economic Indicators (Leading and Coincident) monthly variation \tab (*) \cr #' \emph{'ei_comps'} \tab Economic Indicators (Leading and Coincident) components variation \tab (*) \cr #' \emph{'lei'} \tab Leading Economic Indicator (LEI - FGV/IBRE with The Conference Board) \tab (*) \cr #' \emph{'cei'} \tab Coincident Economic Indicator (CEI - FGV/IBRE with the Conference Board) \tab (*) \cr #' \emph{'gdp_vars'} \tab GDP components variation (whole series) - GDP Monitor (FGV/IBRE) \tab (*) \cr #' \emph{'misery_index} \tab Misery Index \tab 13522 plus 24369 \cr #' \emph{'gdp_comps'} \tab GDP components variation (last values) - GDP Monitor (FGV/IBRE) \tab (*) \cr #' \emph{'gdp_unemp'} \tab GDP monthly levels versus Unemployement Rate \tab 22109 and 24369 \cr #' \emph{'conf_lvl'} \tab Enterprises Confidence Index versus Consumers Confidence Index \tab (*) \cr #' \emph{'inst_cap'} \tab Installed Capacity Index \tab (*) \cr #' \emph{'lab_lead'} \tab Labor Leading Indicator \tab (*) \cr #' \emph{'lab_coin'} \tab Labor Coincident Indicator \tab (*) \cr #' \emph{'transf_ind'} \tab Transformation Industry Confidence Index (Expectations versus Present Situation) \tab (*) \cr #' \emph{'servc'} \tab Services Confidence Index (Expectations versus Present Situation) \tab (*) \cr #' \emph{'constr'} \tab Construction Confidence Index (Expectations versus Present Situation) \tab (*) \cr #' \emph{'retail'} \tab Retail Sellers Confidence Index (Expectations versus Present Situation) \tab (*) \cr #' \emph{'consm'} \tab Consumer Confidence Index (Expectations versus Present Situation) \tab (*) #'} #' #' \bold{2. Macro Situation Dashboard ('normal' style)} #' #' \tabular{lll}{ #' \emph{VALUE} \tab \emph{DESCRIPTION} \tab \emph{CODE} \cr #' \tab \tab \cr #' \emph{'ipca_with_core'} \tab National consumer price index (IPCA) - in 12 months and Broad national consumer price index - Core IPCA trimmed means smoothed \tab 13522 and 4466 \cr #' \emph{'ulc'} \tab Unit labor cost - ULC-US$ - June/1994=100 \tab 11777 \cr #' \emph{'eap'} \tab Economically active population \tab 10810 \cr #' \emph{'cdb'} \tab Time deposits (CDB/RDB-preset) - Daily return (percentage) \tab 14 \cr #' \emph{'indprod'} \tab Prodcution Indicators (2012=100) - General \tab 21859 \cr #' \emph{'selic'} \tab Interest rate - Selic accumulated in the month in annual terms (basis 252) \tab 4189 \cr #' \emph{'unemp'} \tab Unemployment rate - by metropolitan region (PNAD-C) \tab 10777\cr #' \emph{'vargdp'} \tab GDP - real percentage change in the year \tab 7326 #'} #' #' (*) Not available on BETS databases yet. But you can find it in .csv files saved under your BETS installation directory. #' #' \bold{3. Custom Charts} #' #' None of these parameters is required. Please note that some parameters only work for a certain type of chart. #' #' \tabular{lll}{ #' \emph{PARAMETER} \tab \emph{DESCRIPTION} \tab \emph{WORKS FOR} \cr #' \tab \tab \cr #' \code{type} \tab A \code{character}. Either 'bar' or 'lines'. Whether to plot bars or lines. Works for main series, only. \tab Both\cr #' \code{trend}\tab A \code{boolean}. Default is \code{FALSE}. Set it to \code{TRUE} if the trend of the main series (parameter \code{ts}) is to be drawn. \tab Both \cr #' \code{title}\tab A \code{character}. Plot's title. \tab Both \cr #' \code{subtitle}\tab A \code{character}. Plot's subtitle. \tab Both \cr #' \code{xlim}\tab A \code{numeric} vector. X axis limits \tab Both \cr #' \code{ylim}\tab A \code{numeric} vector. Y axis limits \tab Both \cr #' \code{arr.ort}\tab A \code{character}. Orientation of the arrow pointing to the last value of the main series. Valid values are 'h' (horizontal) and 'v' (vertical). \tab \emph{'normal'} \cr #' \code{arr.len}\tab A \code{numeric} value. Length of the arrow pointing to the last value of the main series. \tab \emph{'normal'} \cr #' \code{extra}\tab A \code{ts} object. A second series to be plotted. \tab Both \cr #' \code{extra.y2}\tab A \code{boolean}. Default is \code{FALSE}. Does the extra series require a second y axis? \tab \emph{'plotly'} \cr #' \code{extra.arr.ort}\tab A \code{character}. Orientation of the arrow pointing to the last value of the extra series. Valid values are 'h' (horizontal) and 'v' (vertical). \tab \emph{'normal'} \cr #' \code{extra.arr.len}\tab A \code{numeric} value. Length of the arrow pointing to the last value of the extra series. \tab \emph{'normal'} \cr #' \code{colors}\tab A \code{character} or \code{integer} vector. A vector of colors, one for each series. Trends will always be drawn in gray, its color can't be set. \tab Both \cr #' \code{legend}\tab A \code{character} vector. Names of the series. Default is \code{NULL} (no legends). \tab Both \cr #' \code{legend.pos}\tab A \code{character}. Legend position. If \code{type} is set to \emph{'normal'}, possibile values are 'top' and 'bottom'; if \code{type} is set to \emph{'plotly'}, either 'h' (horizontal) and 'v' (vertical). \tab Both \cr #' \code{codace}\tab A \code{boolean}. Default is \code{FALSE}. Include shaded areas for recessions, as dated by CODACE(**)? \tab \emph{'plotly'} \cr #'} #' #' (**) Business Cycle Dating Committee (FGV/IBRE) #' #' @return If parameter \code{file} is not set by the user, the chart will be shown at the standard R ploting area. Otherwise, it is going to be saved on your computer. #' #' @examples #' #' # chart(ts = "sent_ind", file = "animal_spirits", open = T) #' # chart(ts = "gdp_mon", file = "gdp_mon.png", open = F) #' # chart(ts = "misery_index") #' # chart(ts = "transf_ind", file = "transf_ind.png", open = F) #' #' @author Talitha Speranza \email{[email protected]} #' #' @importFrom plotly export #' @import webshot #' @export chart = function(ts, style = "normal", file = NULL, open = TRUE, lang = "en", params = NULL){ if(lang == "en"){ Sys.setlocale(category = "LC_ALL", locale = "English_United States.1252") } else if(lang == "pt"){ Sys.setlocale(category = "LC_ALL", locale = "Portuguese_Brazil.1252") } else { return(invisible(msg(.MSG_LANG_NOT_AVAILABLE))) } not.set <- F if(!is.null(file)){ dir.create("graphs", showWarnings = F) file = paste0("graphs","\\",file) if(!grepl("\\.png$", file) && !grepl("\\.pdf$",file)) { not.set <- T file <- paste(file,".png",sep="") } } if(class(ts) == "character"){ if(ts == "iie_br"){ p = draw.iie_br() } else if(ts == "sent_ind"){ p = draw.sent_ind() } else if(ts == "gdp_mon"){ p = draw.gdp_mon() } else if(ts == "lab_lead"){ p = draw.lab_lead() } else if(ts == "lab_coin"){ p = draw.lab_coin() } else if(ts == "gdp_vars"){ p = draw.gdp_vars() } else if(ts == "lei"){ p = draw.lei() } else if(ts == "cei"){ p = draw.cei() } else if(ts == "gdp_comps"){ p = draw.gdp_comps() } else if(ts == "misery_index"){ p = draw.misery_index() } else if(ts == "gdp_unemp"){ p = draw.gdp_unemp() } else if(ts == "ei_vars"){ p = draw.ei_vars() } else if(ts == "ei_comps"){ p = draw.ei_comps() } else if(ts == "conf_lvl"){ p = draw.conf_lvl() } else if(ts == "cap_utl"){ p = draw.cap_utl() } else if(ts %in% c("transf_ind","servc","retail","constr","consm")){ p = draw.survey(ts) } else { if(!is.null(file)){ if(not.set){ file <- sub("\\.png","\\.pdf",file) } if(grepl("\\.png", file)){ png(file,width=728,height=478, pointsize = 15) } else { pdf(file, width = 7, height = 4.5) } } if(ts == "ipca_with_core"){ draw.ipca() } else if(ts == "ulc"){ draw.ulc() } else if(ts == "eap"){ draw.eap() } else if(ts == "cdb"){ draw.cdb() } else if(ts == "indprod"){ draw.indprod() } else if(ts == "selic"){ draw.selic() } else if(ts == "unemp"){ draw.unemp() } else if(ts == "vargdp"){ draw.vargdp() } else { msg(paste("Plot was not created.",.MSG_PARAMETER_NOT_VALID)) } if(!is.null(file)){ dev.off() } } } else { if(style == "normal" && !is.null(file)){ if(not.set){ file <- sub("\\.png","\\.pdf",file) } if(grepl("\\.png", file)){ png(file,width=728,height=478, pointsize = 15) } else { pdf(file, width = 7, height = 4.5) } } p = suppressWarnings(draw.generic(ts, style, params)) if(style == "normal" && !is.null(file)){ dev.off() } } if(!is.null(file)){ tryCatch({ export(p, file = file, zoom = 4, cliprect = c(20,20,740,500))}, message = function(e){ install_phantomjs() export(p, file = file, zoom = 4, cliprect = c(20,20,740,500)) }, error = function(e){ # do nothing }) if(open){ file.show(file) } } else { p } }
/scratch/gouwar.j/cran-all/cranData/BETS/R/chart.R
#' @title Create a chart of the Unitary Labor Cost time series #' #' @description Creates a plot of series 11777 #' #' @param ylim A \code{numeric vector}. Y axis limits. #' @param xlim A \code{numeric vector}. x axis limits. #' @param type A \code{character}. The type of of plot (lines). #' @param title A \code{character}. The plot title. #' @param subtitle A \code{character}. The plot subtitle. #' @param ts A \code{ts}. the ts object. #' @param col A \code{character}. Color. #' @param arr.size A \code{vector}. #' @param arr.pos A \code{vector}. #' @param leg.pos A \code{vector}. #' @param trend A \code{boolean}. #' @return An image file is saved in the 'graphs' folder, under the BETS installation directory. #' #' @importFrom grDevices dev.new dev.off pdf png #' @importFrom utils read.csv2 #' @importFrom zoo as.Date #' @importFrom stats ts plot.ts #' @importFrom graphics axis text points mtext arrows #' @importFrom graphics strheight strwidth #' @importFrom utils tail #' @author Talitha Speranza \email{[email protected]} chart.add_basic = function(ts, xlim = NULL, ylim = NULL, type = "lines", title = "", subtitle = "", col = "firebrick4", arr.size = NULL, arr.pos = "v", leg.pos = "top", trend = FALSE){ freq = 0 if(class(ts) != "data.frame"){ freq = frequency(ts) series = ts dt = as.Date(ts)[length(ts)] labs = NULL } else { series = as.ts(ts[,"value"]) dates = as.Date(ts[,"date"]) dt = tail(dates,1) s = seq(1,nrow(ts),by = floor(nrow(ts)/8)) labs = dates[s] } if(trend){ requireNamespace("mFilter") hp = fitted(mFilter::hpfilter(series)) } last = vector(mode = "numeric") last[1] = as.integer(format(dt, "%Y")) last[2] = as.integer(format(dt, "%m")) last[3] = as.integer(format(dt, "%d")) if(freq == 12){ aval = paste0("Last available data: ",format(dt, "%b"),"/", format(dt,"%Y")) } else if(freq == 1) { aval = paste0("Last available data: ", format(dt,"%Y")) } else { aval = paste0("Last available data: ", last[1], "/", last[2], "/", last[3]) } if(frequency(ts) != 1){ m = c(7.1,4.1,3.1,2.1) } else { m = c(3.1,4.1,3.1,2.1) } x0 = last[1] + last[2]/12 + last[3]/30 val = round(series[length(series)],2) d = 0 par(font.lab = 2, cex.axis = 1.2, bty = "n", las = 1, mar= m) if(type == "lines"){ if(is.null(labs)){ plot(series, lwd = 2.5, lty = 1, xlab = "", ylab = "", main = title, col = col, ylim = ylim, xlim = xlim) } else { plot.ts(x = dates, y = series, type = "l", xaxt = "n", ylim = ylim, lwd = 2.5, lty = 1, xlab = "", ylab = "", main = title, col = col) axis(1, at = labs, labels = labs, las=1, cex.axis = 0.75) } } else { xbar = barplot(as.vector(series), names.arg = as.vector(time(series)), xlab = "", ylab = "", main = title, col = col, ylim = ylim, xpd = FALSE) if(trend == F){ x0 = xbar[nrow(xbar),1] s = 0 } else { s = strwidth(val)/2 } } mtext(subtitle) if(trend){ if(type == "lines"){ lines(hp, lty = 6, col = "darkgray", lwd = 2) } else { par(new = TRUE) plot(hp, lty = 6, col = "darkgray", lwd = 2, xaxt="n",yaxt = "n",xlab = "",ylab = "", ylim = ylim) } } if(nchar(val) >= 4 && type != "bar"){ d = strwidth(val)/nchar(val) } if(is.null(xlim)){ xlim = par("usr")[1:2] } if(is.null(ylim)){ ylim = par("usr")[3:4] } x.spam = xlim[2] - xlim[1] y.spam = ylim[2] - ylim[1] l = 0 if(trend){ l = strheight(aval) if(leg.pos == "top"){ legend("topleft", "Trend (HP Filter)", lty = 6, lwd = 2, col="darkgrey", bty = "n", cex = 0.9) } else if(leg.pos == "bottom") { legend("bottomleft", "Trend (HP Filter)", lty = 6, lwd = 2, col="darkgrey", bty = "n", cex = 0.9) } } if(leg.pos == "top"){ text(xlim[1] + 0.2*x.spam, ylim[2] - 0.06*y.spam - l, aval, cex = 0.9) } else if (leg.pos == "bottom"){ text(xlim[1] + 0.2*x.spam, ylim[1] + 0.06*y.spam + l, aval, cex = 0.9) } if(type == "lines"){ points(x0, val, pch = 21, cex = 1.25, lwd = 2, bg = col, col = "darkgray") if(arr.pos == "v"){ if(is.null(arr.size)){ arr.size = y.spam/2 } if(val > (ylim[1] + ylim[2])/2){ x1 = x0 y0 = val - arr.size y1 = val - 0.02*y.spam h = -strheight(val) } else { x1 = x0 y0 = val + arr.size y1 = val + 0.02*y.spam h = strheight(val) } text(x1 - d, y0 + h, as.character(val), cex = 1.1, font = 2) } else { if(is.null(arr.size)){ arr.size = y.spam/12 } y0 = val y1 = val x1 = x0 - 0.02*x.spam x0 = x0 - arr.size text(x0 - strwidth(val) + d, y0, as.character(val), cex = 1.1, font = 2) } arrows(x0 = x0, x1 = x1, y0 = y0, y1 = y1, length = c(0.01*x.spam, 0.00006*y.spam), lwd = 2) } else { # text(x0 - strwidth(val)/2, val - sign(val)*0.7*strheight(val), as.character(val), cex = 0.9, font = 2) text(x0 - s, val/2, as.character(val), cex = 0.9, font = 2) } return(c(xlim,ylim)) }
/scratch/gouwar.j/cran-all/cranData/BETS/R/chart.add_basic.R
#' @title Create a chart of the Unitary Labor Cost time series #' #' @description Creates a plot of series 11777 #' #' @param ylim A \code{numeric vector}. Y axis limits. #' @param ts A \code{ts}. the ts object. #' @param xlim A \code{numeric vector}. x axis limits. #' @param col A \code{character}. Color. #' @param arr.size A \code{}. #' @param arr.pos A \code{}. #' @param leg.pos A \code{}. #' @param leg.text A \code{}. #' @param main.type A \code{}. #' #' @return An image file is saved in the 'graphs' folder, under the BETS installation directory. #' #' @importFrom grDevices dev.new dev.off pdf png #' @importFrom utils read.csv2 #' @importFrom zoo as.Date #' @importFrom stats ts plot.ts #' @importFrom graphics axis text points mtext arrows #' @importFrom graphics strheight strwidth #' @importFrom utils tail #' @author Talitha Speranza \email{[email protected]} chart.add_extra= function(ts, ylim = NULL, xlim = NULL, col = "firebrick3", arr.size = NULL, arr.pos = "v", leg.pos = "top", leg.text = "", main.type = "lines"){ freq = 0 if(class(ts) != "data.frame"){ freq = frequency(ts) series = ts dt = as.Date(ts)[length(ts)] } else { series = as.ts(ts[,2]) dt = as.Date(ts[nrow(ts),1]) } last = vector(mode = "numeric") last[1] = as.integer(format(dt, "%Y")) last[2] = as.integer(format(dt, "%m")) last[3] = as.integer(format(dt, "%d")) if(main.type == "bar"){ xbar <- barplot(as.vector(series), plot = F) lines(x = xbar, y = as.vector(series),lwd = 2.5, lty = 1, col = col, xpd = T) x0 = xbar[nrow(xbar),1] } else { lines(series, lwd = 2.5, lty = 2, col = col, xpd = T) x0 = last[1] + last[2]/12 + last[3]/30 } val = round(series[length(series)],2) d = 0 if(nchar(val) >= 4){ d = strwidth(val)/nchar(val) } if(is.null(xlim)){ xlim = par("usr")[1:2] } if(is.null(ylim)){ ylim = par("usr")[3:4] } x.spam = xlim[2] - xlim[1] y.spam = ylim[2] - ylim[1] l = strheight("a") if(leg.pos == "top"){ legend("topleft", leg.text, lty = 6, lwd = 2, col= col, bty = "n", cex = 0.9) text(xlim[1] + 0.2*x.spam, ylim[2] - 0.06*y.spam - l, cex = 0.9) } else if(leg.pos == "bottom") { legend("bottomleft", leg.text, lty = 6, lwd = 2, col= col, bty = "n", cex = 0.9) text(xlim[1] + 0.2*x.spam, ylim[1] + 0.06*y.spam + l, cex = 0.9) } if(arr.pos == "v" || arr.pos == "h"){ points(x0, val, pch = 21, cex = 1.25, lwd = 2, bg = col, col = "darkgray") } if(arr.pos == "v"){ if(is.null(arr.size)){ arr.size = y.spam/2 } if(val > (ylim[1] + ylim[2])/2){ x1 = x0 y0 = val - arr.size y1 = val - 0.02*y.spam h = -strheight(val) } else { x1 = x0 y0 = val + arr.size y1 = val + 0.02*y.spam h = strheight(val) } text(x1 - d, y0 + h, as.character(val), cex = 1.1, font = 2) arrows(x0 = x0, x1 = x1, y0 = y0, y1 = y1, length = c(0.01*x.spam, 0.00006*y.spam), lwd = 2) } else if(arr.pos == "h") { if(is.null(arr.size)){ arr.size = y.spam/12 } y0 = val y1 = val x1 = x0 - 0.02*x.spam x0 = x0 - arr.size text(x0 - strwidth(val) + d, y0, as.character(val), cex = 1.1, font = 2) arrows(x0 = x0, x1 = x1, y0 = y0, y1 = y1, length = c(0.01*x.spam, 0.00006*y.spam), lwd = 2) } return(c(xlim,ylim)) }
/scratch/gouwar.j/cran-all/cranData/BETS/R/chart.add_extra.R
#' @title Add notes #' #' @description Add notes #' #' @param series.list A \code{ts object} #' @param xlim A \code{vector} #' @param ylim A \code{vector} #' @param names A \code{character} #' @param dec An \code{integer} #' #' #' @importFrom graphics par points text #' @importFrom stats frequency #' @importFrom graphics strheight strwidth #' #' #' @author Talitha Speranza \email{[email protected]} chart.add_notes = function(series.list, xlim, ylim, names = NULL, dec = 2){ par(xpd=NA) if(class(series.list) == "ts"){ series.list = list(series.list) } x.spam = xlim[2] - xlim[1] y.spam = ylim[2] - ylim[1] if(length(series.list) == 1){ mg = 5.8*strheight("A") divs = 3 } else{ if(is.null(names)){ names = names(series.list) } mg = 9*strheight("A") divs = length(series.list)*2 + 2 } x.coords = vector(mode = "numeric") dist = x.spam/divs for(i in 1:(divs-1)){ x.coords[i] = xlim[1] + i*dist - strwidth("AAAAA") } y.coord = ylim[1] - mg j = 1 for(i in 1:length(series.list)){ series = series.list[[i]] len = length(series) freq = frequency(series) last.period.val = paste0(round((series[len]/series[len-1] - 1)*100,2),"%") last.year.val = paste0(round((series[len]/series[len-freq] - 1)*100,2),"%") if(freq != 365){ dt.lp = as.Date(series)[len-1] dt.ly = as.Date(series)[len-freq] last.period.comp = paste0(format(dt.lp,"%b"),"/", format(dt.lp,"%Y"),": ", round(series[len-1],dec)) last.year.comp = paste0(format(dt.ly,"%b"),"/", format(dt.ly,"%Y"), ": ", round(series[len-freq],dec)) } else { last.period.comp = paste0("A day before: ", round(series[len-1],dec)) last.year.comp = paste0("A month before: ", round(series[len-30],dec)) } d.ly = 0 if(nchar(last.year.val) == 4){ d.ly = 0.05*x.spam } if(nchar(last.year.val) == 5){ d.ly = 0.015*x.spam } x.coords[j] = x.coords[j] - 0.01*x.spam x.coords[j + 1] = x.coords[j + 1] + 0.04*x.spam if(last.period.val > 0){ points(x = x.coords[j] + d.ly, y = y.coord - 0.022*y.spam, pch = 24, col = "blue", bg = "blue", cex = 1.3) } else if(last.period.val < 0){ points(x = x.coords[j] + d.ly, y = y.coord, pch = 25, col = "red", bg = "red", cex = 1.3) } else { points(x = x.coords[j] + d.ly, y = y.coord, pch = "-", col = "green", bg = "green", cex = 1.3) } text(last.period.val, x = x.coords[j] + 0.075*x.spam, y = y.coord - 0.01*y.spam, cex = 1.1, font = 2) text(last.period.comp, x = x.coords[j] + 0.075*x.spam, y = y.coord - 0.1*y.spam, cex = 0.9) if(last.year.val > 0){ points(x = x.coords[j+1] + d.ly, y = y.coord - 0.022*y.spam, pch = 24, col = "blue", bg = "blue", cex = 1.3) } else if(last.year.val < 0){ points(x = x.coords[j+1] + d.ly, y = y.coord, pch = 25, col = "red", bg = "red", cex = 1.3) } else { points(x = x.coords[j+1]+ d.ly, y = y.coord, pch = "-", col = "green", bg = "green", cex = 1.3) } text(last.year.val, x = x.coords[j+1] + 0.075*x.spam, y = y.coord - 0.01*y.spam, cex = 1.1, font = 2) text(last.year.comp, x = x.coords[j+1] + 0.075*x.spam, y = y.coord - 0.1*y.spam, cex = 0.9) if(!is.null(names)){ title.x = 0.5*x.coords[j] + 0.5*x.coords[j+1] + 0.04*x.spam title.y = y.coord + 2*strheight("A") text(names[i], x = title.x, y = title.y, cex = 0.9, font = 2) } j = j + 3 } }
/scratch/gouwar.j/cran-all/cranData/BETS/R/chart.add_notes.R
#' @title Check series #' #' @description Check series in BETS dataset #' #' @param ts A \code{ts object} #' @param message A \code{character} #' #' @importFrom stats start end #' @author Talitha Speranza \email{[email protected]} check.series = function(ts,message = NULL){ if(is.list(ts)){ s = sum(sapply(ts, function(x){anyNA(x)})) if(s != 0){ msg(paste("There is at least one series with NAs.",message)) return(FALSE) } l = length(ts[[1]]) st = start(ts[[1]]) e = end(ts[[1]]) for(i in 2:length(ts)){ if(l != length(ts[[i]])){ msg(paste("Not all series have the same length.",message)) return(FALSE) } exp = all.equal(st,start(ts[[i]])) if(!isTRUE(exp)){ msg(paste("Not all series have the same starting period.",message)) return(FALSE) } exp = all.equal(e,end(ts[[i]])) if(!isTRUE(exp)){ msg(paste("Not all series have the same ending period.",message)) return(FALSE) } } } else if(class(ts) == "ts") { if(anyNA(ts)){ msg(paste("This series contains NAs.",message)) return(FALSE) } } else{ msg(paste("Argument is not a time series or a list of time series.",message)) return(FALSE) } return(TRUE) }
/scratch/gouwar.j/cran-all/cranData/BETS/R/check.series.R
#' @title Connection with the server #' @description Make the connection with the server #' @import DBI RMySQL digest #' @importFrom utils install.packages remove.packages connection = function(){ key <- readRDS(paste0(system.file(package="BETS"),"/key.rds")) dat <- readBin(paste0(system.file(package="BETS"),"/credentials.txt"),"raw",n=1000) aes <- AES(key,mode="ECB") raw <- aes$decrypt(dat, raw=TRUE) txt <- rawToChar(raw[raw>0]) credentials <- read.csv(text=txt, stringsAsFactors = F) rm(key) conn = tryCatch({ dbConnect(MySQL(),db=credentials$bd,user=credentials$login,password=as.character(credentials$password),host=credentials$host,port=credentials$port) }, error = function(e){ return(NULL) }) return(conn) }
/scratch/gouwar.j/cran-all/cranData/BETS/R/connection.R
# Messages .MSG_NOT_AVAILABLE = "Sorry. This series is not yet available." .MSG_NOT_FOUND = "Sorry. Series was not found." .MSG_PARAMETER_NOT_VALID = "Parameter is not valid. Check the help files for a list of possible vaues." .MSG_LANG_NOT_AVAILABLE = "Language option is not avaiable. BETS reports are only available in english or portuguese." .MSG_OUT_NOT_AVAILABLE = "Output option is not avaiable. BETS charts can only be saved as .png ou .pdf" .MSG_BCB_DATA_OFFLINE = "The BCB data center is offline, please waite and try again" # Wanining .WARN_SOFT = "Don't worry, this is not a critical problem. We are working on a solution."
/scratch/gouwar.j/cran-all/cranData/BETS/R/constants.R
#' @title Plot the ACF or the PACF of a time series #' #' @description Plot correlograms using plot.ly and several other options that differ theses plots from \link[forecast]{forecast}s ACF and PACF. #' #' @param ts An object of type \code{ts} or \code{xts}. The time series for which the plot must be constructed. #' @param lag.max A \code{numeric} value. The number of lags to be shown in the plot. #' @param type A \code{character}. Can be either 'correlation' (for the ACF) or 'partial' (for the PACF). #' @param style A \code{character}. Set this parameter to 'normal' if you want it made with ggplot2 or to 'plotly' if you want to be a \link[plotly]{plotly} object. #' @param ci A \code{numeric} value. The confidence interval to be shown in the plot. #' @param mode A \code{character}. Set this parameter to 'bartlett' if you want the variance to be calculated according to \href{https://en.wikipedia.org/wiki/Correlogram#Statistical_inference_with_correlograms}{Bartlett's formula}. Otherwise, it is going to be simply equal to \code{1/sqrt(N)}. #' @param knit A \code{boolean}. If you're using this function to exhibit correlograms on a R dynamic report, set this parameter to true. #' #' @return A plot and a \code{vector} containing the correlations. #' #' @author Talitha Speranza \email{[email protected]} #' #' @export #' @importFrom plotly plotly_build #' @import forecast #' @importFrom ggplot2 ggplot geom_segment scale_x_continuous geom_point geom_step labs aes corrgram = function(ts, lag.max = 12, type = "correlation", mode = "simple", ci = 0.95, style = "plotly", knit = F){ ## Validation if(!is.numeric(ci) || ci <= 0 || ci >= 1){ stop("Parameter 'ci' (confidence interval) must be a real number between 0+ and 1-") } if(type != "correlation" && type != "partial"){ stop("Unknown value for parameter 'type'") } if(mode != "simple" && mode != "bartlett"){ stop("Unknown value for parameter 'mode'") } if(!is.integer(lag.max)){ if(is.numeric(lag.max)){ lag.max = round(lag.max) } else{ stop("Parameter 'lag.max' must be an integer") } } alpha = (1 - ci)/2 z = -qnorm(alpha, 0, 1) if(type == "correlation"){ out <- forecast::Acf(ts, plot=F, lag.max = lag.max) yaxis = "Correlation" corrs <- out$acf[-1, , ] lags = out$lag[-1, , ] } else { out <- forecast::Pacf(ts, plot=F, lag.max = lag.max) yaxis = "Partial Correlation" corrs <- out$acf[ , , ] lags = out$lag[ , , ] + 1 } step = frequency(ts) lim = lag.max - lag.max%%step ticks = seq(0,lim,step) ticks[1] = 1 var <- vector(mode = "numeric") N = length(ts) var[1] = 1/sqrt(N) sum = 0 if(mode == "bartlett"){ for(i in 2:length(lags)){ for(j in 1:(i-1)){ sum = sum + (var[j])^2 } var[i] = sqrt((1 + 2*sum)/N) } } else { for(i in 2:length(lags)){ var[i] = var[1] } } var = z*var data <- as.data.frame(cbind(lags,corrs,var)) gp <- ggplot(data, aes(x = lags, y= corrs)) + geom_segment(aes(x=lags, xend=lags, y=0, yend=corrs), data=data, size = 0.5) + scale_x_continuous(breaks = ticks) + geom_point(size = 0.5) + geom_step(data=data, mapping=aes(x=lags, y=var), color="red", linetype="dashed", size=0.3) + geom_step(data=data, mapping=aes(x=lags, y=-var), color="red", linetype="dashed", size=0.3) + labs(list(x="Lag", y= yaxis)) if(style == "plotly"){ p <- plotly_build(gp) p$x$data[[1]]$text <- paste("Lag:", lags) p$x$data[[2]]$text <- paste0(yaxis, ": ", round(corrs,3), " <br> Lag: ", lags) p$x$data[[3]]$text <- paste("CI upper bound:", round(var,3)) p$x$data[[4]]$text <- paste("CI lower bound:", -round(var,3)) if(knit){ return(p) } else{ print(p) } } else { p <- gp if(knit){ return(p) } else{ par(cex.axis = 0.75, cex.lab = 0.8, mar = c(5.1, 4.1, 0.5, 2.1)) plot(p) } } return(invisible(corrs)) }
/scratch/gouwar.j/cran-all/cranData/BETS/R/corrgram.R
#' @title Create a BETS custom dashboard #' #' @description Generate thematic dashboards using a selection of BETS time series and charts. For now, themes and charts are pre-defined. #' #' @param type A \code{character}. The theme of the dashboard. The only three options, for the time being, is 'business_cycle', 'macro_situation' and 'custom'. Custom dashboards can be rendered with any given set of charts. #' @param saveas A \code{character}. A path and a name for the dashboard file (a .pdf file). If this parameter is not provided, the dashboard will be saved inside the 'dashboards' folder, under the BETS installation directory. #' @param charts A \code{character} and/or \code{ts} object list. The charts to be added to a custom dashboard. Up to 16 charts are allowed, including pre-defined charts, identified by their codes (see \code{\link{chart}}). This will only work if parameter 'type' is set to 'custom'. #' @param parameters A \code{list}. A list of parameters. See the 'Details' section for a description of these parameters for each type of dashboard. #' #' @details #' #' \bold{Macro Situation and Custom Dashboard Parameters} #' #' \tabular{ll}{ #' \code{text}\tab The text to be printed in the dashboard. Separate paragraphs with two backslashes 'n' and pages with '##'. There are no other syntax rules.\cr #' \code{author}\tab The author's name.\cr #' \code{email}\tab The author's email.\cr #' \code{url}\tab The author's webpage.\cr #' \code{logo}\tab The author's business logo.\cr #' } #' #' #' \bold{Additional Custom Dashboard Parameters} #' #' \tabular{ll}{ #' \code{style} \tab A \code{character}. The style of the charts. As in \code{chart}, can be either \code{'plotly'} or \code{'normal'}.\cr #' \code{charts.opts} \tab A \code{list} of parameters lists, one for each chart. Parameters are specified in \code{\link{chart}} \cr #' } #' #' @return A .pdf file (the dashboard) #' #' @author Talitha Speranza \email{[email protected]} #' #' @examples #' #' # dashboard() #' # dashboard(saveas = "survey.pdf") #' # dashboard(type = "macro_situation") #' #' @export #' @import rmarkdown dashboard = function(type = "business_cycle", charts = "all", saveas = NA, parameters = NULL){ rmd = paste0(type, "_dashboard.Rmd") file = system.file(package="BETS", rmd) if(!is.null(parameters$text)){ if(is.null(parameters$author)){ msg("You've provided an analysis to be printed together with the dashboard, but the argument 'author' is missing. Dashboard will not be printed.") } } if(type == "custom"){ if(is.null(parameters)){ parameters = list() } parameters$charts = charts } if(is.null(parameters)){ rmarkdown::render(file) } else { rmarkdown::render(file, params = parameters) } if(is.na(saveas)){ dir = paste0(system.file(package="BETS"),"//dashboards") dir.create(dir) saveas = paste0(dir, "//", type, "_dashboard.pdf") } file = gsub(".Rmd", ".pdf", file) file.copy(file, saveas, overwrite = T) file.remove(file) system2("open", saveas) }
/scratch/gouwar.j/cran-all/cranData/BETS/R/dashboard.R
#' @title Deflate a time series #' #' @description Deflate a time series using a deflator series. The deflator can be an index, a percentage or a point percentage series. #' #' @param ts A \code{ts} object. The time series to be deflated. #' @param deflator A \code{ts} object. The deflator series. #' @param type A \code{character}. Can be either \code{'index'}, \code{'point.perc'} (for point percentage) or \code{'perc'} (for percentage). #' #' @return The deflated series. #' #' @author Talitha Speranza \email{[email protected]} #' #' @export deflate = function(ts, deflator, type = "index"){ freq_ts = frequency(ts) freq_def = frequency(deflator) if(freq_ts != freq_def){ return("ERROR") } if(length(ts) == length(deflator)){ if(type == "index"){ deflator = 100/deflator } else if(type == "point.perc"){ deflator= 1/deflator } else { deflator = 1/(deflator/100 + 1) } s = as.numeric(ts) def = as.numeric(deflator) } else{ start_ts = start(ts) start_def = start(deflator) end_ts = end(ts) end_def = end(deflator) if(start_ts[1] > start_def[1]){ deflator = window(deflator, start = start_ts, frequency = freq_ts) } else if(start_ts[1] < start_def[1]){ ts = window(ts, start = start_def, frequency = freq_ts) } else { if(start_ts[2] > start_def[2]){ deflator = window(deflator, start = start_ts, frequency = freq_ts) } else if(start_ts[2] < start_def[2]){ ts = window(ts, start = start_def, frequency = freq_ts) } } if(end_ts[1] > end_def[1]){ ts = window(ts, end = end_def, frequency = freq_ts) } else if(end_ts[1] < end_def[1]){ deflator = window(deflator, end = end_ts, frequency = freq_ts) } else { if(end_ts[2] > end_def[2]){ ts = window(ts, end = end_def, frequency = freq_ts) } else if(end_ts[2] < end_def[2]){ deflator = window(deflator, end = end_ts, frequency = freq_ts) } } if(type == "index"){ deflator = 100/deflator } else if(type == "point.perc"){ deflator= 1/deflator } else { deflator = 1/(deflator/100 + 1) } s = as.numeric(ts) def = as.numeric(deflator) } return(ts(s*def, start = start(ts), frequency = freq_ts)) }
/scratch/gouwar.j/cran-all/cranData/BETS/R/deflate.R
#' @title Create a chart of the Base Interest Rate (SELIC) time series #' #' @description Creates a plot of series 4189 #' #' @return An image file is saved in the 'graphs' folder, under the BETS installation directory. #' @importFrom zoo as.Date as.yearqtr #' @importFrom grDevices rgb #' @import plotly #' @author Talitha Speranza \email{[email protected]} draw.cap_utl = function(){ file = file.path(system.file(package="BETS"), "/sondagens_completo_fgv.csv") sond = read.csv2(file, stringsAsFactors = F) sond[,-1] = suppressWarnings( data.frame(lapply(sond[,-1], function(x){as.numeric(gsub(",",".",x))})) ) inx = ts(sond[148:nrow(sond),c(5,9,13)],start = c(2013,4),frequency = 12) inx = ts(round((inx[,1] + inx[,2] + inx[,3])/3,2), start = c(2013,4), frequency = 12) t = "<b>CAPACITY UTILIZATION</b>" t = paste0(t,"<br><span style = 'font-size:15px'>Seasonally Adjusted</span>") len = length(inx) t <- list( x = 0.5, y = 1.17, text = t, xref = "paper", yref = "paper", showarrow = F, font = list(size = 19) ) a <- list( x = as.Date(inx)[len], y = inx[len], text = paste0("<b>",inx[len],"</b>"), xref = "x", yref = "y", showarrow = TRUE, arrowhead = 6, ay = -50, ax = 0, font = list(size = 22) ) m <- list( t = 60, pad = 1, b = 60, r = 15 ) dates = as.Date(inx) rg = rgb(162,7,7, maxColorValue = 255) p = plot_ly(width = 700, height = 450) %>% add_lines(x = dates, y = inx, name = "Coincident") %>% layout(title = t, yaxis = list(tickfont = list(size = 22)), xaxis = list(tickfont = list(size = 17)), margin = m, titlefont = list(size = 19), annotations = list(a,t), shapes = list(type = "rect", fillcolor = rg, line = list(color = rg), opacity = 0.2, x0 = "2014-07-01", x1 = as.Date(inx)[len], xref = "x", y0 = 70, y1 = 85, yref = "y")) return(p) }
/scratch/gouwar.j/cran-all/cranData/BETS/R/draw.cap_utl.R
#' @title Create a chart of the Time Deposits time series #' #' @description Creates a plot of series 14 #' #' @importFrom grDevices dev.new dev.off pdf png #' @importFrom utils read.csv2 #' @importFrom stats ts plot.ts #' @importFrom graphics axis text points mtext arrows #' #' @return An image file is saved in the 'graphs' folder, under the BETS installation directory. #' @author Talitha Speranza \email{[email protected]} draw.cdb= function(){ cdb = BETSget(code = 14, data.frame = TRUE) if(!is.null(start)){ # if(start[2] < 9){ # start[2] = paste0("0",start[2]) # } # if(start[3] < 9){ # start[3] = paste0("0",start[3]) # } # # init = as.Date(paste0(start[1],"-",start[2],"-",start[3]), format = "%Y-%m-%d") init = as.Date("2006-01-01") i = which(cdb[,"date"] >= init) if(length(i) != 0){ cdb = cdb[i,] } } lims = chart.add_basic(ts = cdb, title = "Time Deposits (CDB/RDB-Preset)", subtitle = "Daily Returns (%)", col = "darkolivegreen", leg.pos = "bottom") chart.add_notes(ts(cdb[,"value"], frequency = 365), ylim = lims[3:4], xlim = lims[1:2],dec = 4) }
/scratch/gouwar.j/cran-all/cranData/BETS/R/draw.cdb.R
#' @title Create a chart of the Base Interest Rate (SELIC) time series #' #' @description Creates a plot of series 4189 #' #' @return An image file is saved in the 'graphs' folder, under the BETS installation directory. #' @importFrom zoo as.Date as.yearqtr #' @importFrom grDevices rgb #' @import plotly #' @author Talitha Speranza \email{[email protected]} draw.cei = function(){ file = file.path(system.file(package="BETS"), "/cei.csv") lei= read.csv2(file, stringsAsFactors = F)[,2] lei = window(ts(as.numeric(lei), start = c(1996,1),frequency = 12),start = c(2000,6)) t = "<b>COINCIDENT ECONOMIC INDICATOR</b>" t = paste0(t,"<br><span style = 'font-size:15px'>The Conference Board with FGV/IBRE</span>") t <- list( x = 0.5, y = 1.17, text = t, xref = "paper", yref = "paper", showarrow = F, font = list(size = 19) ) l = length(lei) a <- list( x = as.Date(lei)[l], y = lei[l], text = paste0("<b>",lei[l],"</b>"), xref = "x", yref = "y", showarrow = TRUE, arrowhead = 6, ay = -50, ax = 0, font = list(size = 22) ) m <- list( t = 60, pad = 1 ) dates = as.Date(lei) rg = rgb(162,7,7, maxColorValue = 255) p = plot_ly(width = 700, height = 450) %>% add_lines(x = dates, y = lei) %>% layout(title = t, yaxis = list(tickfont = list(size = 22),range = c(60,115)), xaxis = list(tickfont = list(size = 17)), margin = m, titlefont = list(size = 19), annotations = list(a,t), shapes = list( list(type = "rect", fillcolor = rg, line = list(color = rg), opacity = 0.2, x0 = "2001-01-01", x1 = "2002-01-01", xref = "x", y0 = 60, y1 = 115, yref = "y"), list(type = "rect", fillcolor = rg, line = list(color = rg), opacity = 0.2, x0 = "2003-01-01", x1 = "2003-07-01", xref = "x", y0 = 60, y1 = 115, yref = "y"), list(type = "rect", fillcolor = rg, line = list(color = rg), opacity = 0.2, x0 = "2008-10-01", x1 = "2009-04-01", xref = "x", y0 = 60, y1 = 115, yref = "y"), list(type = "rect", fillcolor = rg, line = list(color = rg), opacity = 0.2, x0 = "2014-07-01", x1 = as.Date(lei)[l], xref = "x", y0 = 60, y1 = 115, yref = "y"))) return(p) }
/scratch/gouwar.j/cran-all/cranData/BETS/R/draw.cei.R
#' @title Create a chart of the Base Interest Rate (SELIC) time series #' #' @description Creates a plot of series 4189 #' #' @return An image file is saved in the 'graphs' folder, under the BETS installation directory. #' @importFrom zoo as.Date as.yearqtr #' @importFrom grDevices rgb #' @import plotly #' @author Talitha Speranza \email{[email protected]} draw.conf_lvl = function(){ file = file.path(system.file(package="BETS"), "/sondagens_completo_fgv.csv") sond = read.csv2(file, stringsAsFactors = F) sond[,-1] = data.frame(lapply(sond[,-1], function(x){as.numeric(gsub(",",".",x))})) emp = (sond[,2] + sond[,6] + sond[,10] + sond[,16])/4 emp = round(emp[!is.na(emp)],2) emp = ts(emp, start = c(2010,7), frequency = 12) cons = ts(sond[115:nrow(sond),17],start = c(2010,7),frequency = 12) last_val = emp[length(emp)] last_date = as.Date(emp)[length(emp)] a <- list( x = last_date, y = last_val, text = paste0("<b>",last_val,"</b>"), xref = "x", yref = "y", showarrow = TRUE, arrowhead = 6, ay = 60, ax = 0, font = list(size = 22) ) last_val = cons[length(cons)] last_date = as.Date(cons)[length(cons)] b <- list( x = last_date, y = last_val, text = paste0("<b>",last_val,"</b>"), xref = "x", yref = "y2", showarrow = TRUE, arrowhead = 6, ay = -60, ax = 0, font = list(size = 22) ) m <- list( t = 50, pad = 1 ) rg = rgb(162,7,7, maxColorValue = 255) p = plot_ly(mode = "lines", type = "scatter", x = as.Date(emp), y = emp, name = "Enterprises", width = 700, height = 450) %>% add_lines(x = as.Date(cons), y = cons, name = "Consumers") %>% layout(title = "<b>CONFIDENCE INDEX - ENTERPRISES AND CONSUMERS</b>", yaxis = list(tickfont = list(size = 22)), xaxis = list(tickfont = list(size = 15)), margin = m, titlefont = list(size = 19), annotations = list(a,b), legend = list(orientation = 'h', x = 0.17, y = -0.33), shapes = list(type = "rect", fillcolor = rg, line = list(color = rg), opacity = 0.2, x0 = "2014-07-01", x1 = as.Date(emp)[length(emp)], xref = "x", y0 = 60, y1 = 115, yref = "y") ) return(p) }
/scratch/gouwar.j/cran-all/cranData/BETS/R/draw.conf_lvl.R
#' @title Create a chart of the Economically Active Population time series #' #' @description Creates a plot of series 10810 #' #' @importFrom grDevices dev.new dev.off pdf png #' @importFrom utils read.csv2 #' @importFrom stats ts plot.ts #' @importFrom graphics axis text points mtext arrows #' #' @return An image file is saved in the 'graphs' folder, under the BETS installation directory. #' @author Talitha Speranza \email{[email protected]} draw.eap = function(){ eap = (BETSget(10810)/BETSget(10800))*100 start = c(2006,1) if(!is.null(start)){ eap = window(eap, start = start) } lims = chart.add_basic(ts = eap, title = "Economically Active Population", subtitle = "Percentage of Population in Active Age", col = "royalblue", arr.pos = "h", leg.pos = "bottom") chart.add_notes(eap, ylim = lims[3:4], xlim = lims[1:2]) }
/scratch/gouwar.j/cran-all/cranData/BETS/R/draw.eap.R
#' @title Create a chart of the Base Interest Rate (SELIC) time series #' #' @description Creates a plot of series 4189 #' #' @return An image file is saved in the 'graphs' folder, under the BETS installation directory. #' @importFrom zoo as.Date #' @import plotly draw.ei_comps = function(){ file.cei = file.path(system.file(package="BETS"), "/cei_comps.csv") cei <- read.csv2(file.cei, stringsAsFactors = F) file.lei = file.path(system.file(package="BETS"), "/lei_comps.csv") lei <- read.csv2(file.lei, stringsAsFactors = F) lei.labs = c("Swap<br>Rate","Manufacturing<br>Expec.", "Services<br>Expec.", "Consumers<br>Expec.", "Stock<br>Prices", "Terms of<br>Trade", "Consumer<br>Durable Goods<br>Production Exp.", "Exports<br>Volume") cei.labs = c("Industrial<br>Prod.","Ind. Electric<br>Energy Cons.","Shipments of<br>Corrugated<br>Paper", "Volume of<br>Sales (Ret.)", "Employement", 'Avg. Real<br>Income (Workers)') cei$Value = as.numeric(cei$Value) cei = cbind(cei, cei.labs)[,2:3] cei = cei[order(cei$Value, decreasing = T),] lei$Value = as.numeric(lei$Value) lei = cbind(lei, lei.labs)[,2:3] lei = lei[order(lei$Value, decreasing = T),] lei.x <- t(lei[,1]) lei.y <- "LEI" data.lei = data.frame(lei.y, lei.x) cei.x <- t(cei[,1]) cei.y <- "CEI" data.cei = data.frame(cei.y, cei.x) m <- list( t = 80, pad = 1 ) t <- list( x = 0.5, y = 1.175, text = "<b>LEI AND CEI COMPONENTS VARIATION</b>", xref = "paper", yref = "paper", showarrow = F, font = list(size = 19) ) cols = c('rgba(38, 24, 74, 1)', 'rgba(38, 24, 74, 0.9)', 'rgba(38, 24, 74, 0.8)', 'rgba(71, 58, 131, 0.8)', 'rgba(71, 58, 134, 0.7)', 'rgba(164, 163, 204, 0.85)', 'rgba(190, 192, 213, 1)','rgba(122, 120, 168, 0.8)') p1 <- plot_ly(data.lei, type = 'bar', orientation = 'h', width = 700, height = 450) %>% layout(xaxis = list(title = "", showgrid = FALSE, showline = FALSE, showticklabels = F, zeroline = TRUE, zerolinecolor = '#969696', zerolinewidth = 3), yaxis = list(showgrid = FALSE, showline = FALSE, showticklabels = F, zeroline = FALSE, domain = c(0, 0.75)), barmode = 'relative', annotations = t, showlegend = FALSE) %>% add_annotations(xref = 'paper', yref = 'paper', xanchor = 'right', x = 0.0, y = 0.45, text = paste0("<b>",data.lei[1,1],"</b>"), showarrow = F,font = list(size = 18)) p2 <- plot_ly(data.cei, type = 'bar', orientation = 'h', width = 700, height = 450) %>% layout(xaxis = list(title = "", showgrid = FALSE, showline = FALSE, showticklabels = F, zeroline = TRUE, zerolinecolor = '#969696', zerolinewidth = 3), yaxis = list(showgrid = FALSE, showline = FALSE, showticklabels = F, zeroline = FALSE, domain = c(0,0.75)), barmode = 'relative', #paper_bgcolor = 'rgb(248, 248, 255)', plot_bgcolor = 'rgb(248, 248, 255)', showlegend = FALSE) %>% add_annotations(xref = 'paper', yref = 'paper', xanchor = 'right', x = 0, y = 0.3, text = paste0("<b>",data.cei[1,1],"</b>"), showarrow = F,font = list(size = 18)) val.pos = 0 val.neg = 0 for(i in 1:nrow(lei)){ val = as.numeric(lei[i,1]) if(abs(val) >= 0.1){ sig = "" if(val > 0){ sig = "+" pos = val.pos + val/2 val.pos = val.pos + val } else{ pos = val.neg + val/2 val.neg = val.neg + val } p1 = p1 %>% add_trace(x = data.lei[1,i+1], name = lei.labs[1], marker = list(color = cols[i])) %>% add_annotations(xref = 'x', yref = 'paper', x = pos, y = 0.45, text = paste0("<b>", sig, val,"%</b>"), showarrow = F,font = list(size = 12, color = "#FFFFFF")) %>% add_annotations(xref = 'x', yref = 'paper', x = pos, y = 0.93, text = lei[i,2], showarrow = F,font = list(size = 12)) } } lei.vals = lei[abs(lei$Value) >= 0.1,1] lei.pos = lei.vals[lei.vals > 0] lei.neg = lei.vals[lei.vals < 0] cei.vals = cei[abs(cei$Value) > 0.03,1] cei.pos = cei.vals[cei.vals > 0] cei.neg = cei.vals[cei.vals < 0] fac.pos = sum(lei.pos)/sum(cei.pos) fac.neg = sum(abs(lei.neg))/sum(abs(cei.neg)) val.pos = 0 val.neg = 0 for(i in 1:nrow(cei)){ val = as.numeric(cei[i,1]) if(abs(val) > 0.03){ sig = "" if(val > 0){ sig = "+" pos = fac.pos*(val.pos + val/2) val.pos = val.pos + val } else{ pos = fac.neg*(val.neg + val/2) val.neg = val.neg + val } p2 = p2 %>% add_trace(x = data.cei[1,i+1], name = cei.labs[1], marker = list(color = cols[i])) %>% add_annotations(xref = 'x', yref = 'paper', x = pos, y = 0.33, text = paste0("<b>", sig, val,"%</b>"), showarrow = F,font = list(size = 12, color = "#FFFFFF")) %>% add_annotations(xref = 'x', yref = 'paper', x = pos, y = 0.85, text = cei[i,2], showarrow = F,font = list(size = 12)) } } s = subplot(p1,p2, nrows = 2) return(s) }
/scratch/gouwar.j/cran-all/cranData/BETS/R/draw.ei_comps.R
#' @title Create a chart of the Base Interest Rate (SELIC) time series #' #' @description Creates a plot of series 4189 #' #' @return An image file is saved in the 'graphs' folder, under the BETS installation directory. #' @importFrom zoo as.Date #' @import plotly #' @author Talitha Speranza \email{[email protected]} draw.ei_vars = function(){ ei = file.path(system.file(package="BETS"), "/ei_vars.csv") data <- read.csv2(ei, stringsAsFactors = F) lei = ts(data[,2], start = c(2016,8),frequency = 12) lei_vars = ts(data[,3], start = c(2016,8), frequency = 12) cei = ts(data[,4], start = c(2016,8),frequency = 12) cei_vars = ts(data[,5], start = c(2016,8), frequency = 12) e = length(lei) s = e - 2 lei = ts(data[s:e,2],end = end(lei), frequency = 12) lei_vars = ts(data[s:e,3],end = end(lei), frequency = 12) cei = ts(data[s:e,4],end = end(lei), frequency = 12) cei_vars = ts(data[s:e,5],end = end(lei), frequency = 12) m <- list( t = 60, l = 100, pad = 1 ) t <- list( x = 0.5, y = 1.2, text = "<b>ECONOMIC INDICATORS - LEADING AND COINCIDENT</b><br><span style = 'font-size:16px'>The Conference Board with FGV/IBRE</span>", xref = "paper", yref = "paper", showarrow = F, font = list(size = 19) ) dates = as.Date(lei) ym = as.yearmon(dates) p = plot_ly(x = as.numeric(lei), y = as.Date(lei), name = "LEI", type = "bar", orientation = 'h', width = 700, height = 450, marker = list(color = 'rgb(171,104,87)')) %>% add_trace(x = as.numeric(cei), y = as.Date(cei), name = "CEI", type = "bar", orientation = 'h', marker = list(color = 'rgb(114,147,203)')) %>% layout(title = "", yaxis = list(tickfont = list(size = 20), tickvals = dates, ticktext=as.character(ym), showline = F, zeroline = F), xaxis = list(title = "", showgrid = FALSE, showline = FALSE, showticklabels = FALSE, zeroline = FALSE), margin = m, annotations = t, titlefont = list(size = 19), showlegend = T, legend = list(orientation = 'h', x = 0.35)) for(i in 1:3){ text = paste0("<b>",lei[i],"</b>") y = dates[i] - 7 x = as.numeric(lei[i])/2 p = p %>% add_annotations(text = text, y = y, x = x, showarrow = F, font = list(size = 18, color = "#FFFFFF")) sig = "" if(cei[i] > 0) sig = "+" text = paste0("<b>",cei[i],"</b>") y = dates[i] + 7 x = as.numeric(cei[i])/2 p = p %>% add_annotations(text = text, y = y, x = x, showarrow = F, font = list(size = 18, color = "#FFFFFF")) sig = "" if(lei_vars[i] > 0) sig = "+" text = paste0("<b>",sig, lei_vars[i],"%</b>") y = dates[i] - 7 x = as.numeric(lei[i]) + 6.5 p = p %>% add_annotations(text = text, y = y, x = x, showarrow = F, font = list(size = 18)) sig = "" if(cei_vars[i] > 0) sig = "+" text = paste0("<b>",sig,cei_vars[i],"%</b>") y = dates[i] + 7 x = as.numeric(cei[i]) + 6.5 p = p %>% add_annotations(text = text, y = y, x = x, showarrow = F, font = list(size = 18)) } return(p) }
/scratch/gouwar.j/cran-all/cranData/BETS/R/draw.ei_vars.R
#' @title Create a chart of the Base Interest Rate (SELIC) time series #' #' @description Creates a plot of series 4189 #' #' @return An image file is saved in the 'graphs' folder, under the BETS installation directory. #' @importFrom zoo as.Date as.yearqtr #' @importFrom forecast ma #' @importFrom utils read.csv #' @importFrom stats aggregate #' @import plotly #' @importFrom seasonal seas #' @author Talitha Speranza \email{[email protected]} draw.gdp_comps = function(){ gdp_comp = file.path(system.file(package="BETS"), "/mon_pib_comps.csv") data <- ts(read.csv2(gdp_comp, stringsAsFactors = F)[,-1],start = c(2000,1), frequency = 12) data <- aggregate(data) year2 = end(data)[1] year1 = end(data)[1]-1 data <- window(data, start = year1) data[,5] = data[,5] - data[,6] data = data[,c(-6,-1)] data = t(data) rownames(data) = c("Hous.<br>Exp.", "Gov.<br>Exp.","GFFK","NX") #s = apply(data[,-1], 1, function(x){sum(x)}) # cbind(data[,1],s) colors <- c('rgb(211,94,96)', 'rgb(128,133,133)', 'rgb(144,103,167)', 'rgb(171,104,87)', 'rgb(114,147,203)') a <- list( x = 0.18, y = 0.5, text = paste0("<b>", year1,"</b>"), xref = "paper", yref = "paper", showarrow = F, font = list(size = 18) ) b <- list( x = 0.82, y = 0.5, text = paste0("<b>", year2,"</b>"), xref = "paper", yref = "paper", showarrow = F, font = list(size = 18) ) m <- list( t = 50, pad = 1 ) p <- plot_ly(width = 700, height = 450) %>% add_pie(labels = rownames(data), values = data[,1], textposition = 'inside', textinfo = "label+percent", insidetextfont = list(color = '#FFFFFF', size = 16), marker = list(colors = colors, line = list(color = '#FFFFFF', width = 1)), showlegend = F, hole = 0.4, domain = list(x = c(0, 0.45), y = c(0, 1))) %>% add_pie(labels = rownames(data), values = data[,2], textposition = 'inside', textinfo = "label+percent", insidetextfont = list(color = '#FFFFFF', size = 16), marker = list(colors = colors, line = list(color = '#FFFFFF', width = 1)), showlegend = F, hole = 0.4, domain = list(x = c(0.55, 1), y = c(0, 1))) %>% layout(title = '<b>GDP COMPONENTS</b><br><span style = "font-size:17">Nominal Yearly GDP - GDP Monitor (FGV/IBRE)</span>', annotations = list(a,b), titlefont = list(size = 19), margin = m, xaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE), yaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE)) return(p) }
/scratch/gouwar.j/cran-all/cranData/BETS/R/draw.gdp_comps.R
#' @title Create a chart of the Base Interest Rate (SELIC) time series #' #' @description Creates a plot of series 4189 #' #' @return An image file is saved in the 'graphs' folder, under the BETS installation directory. #' @importFrom zoo as.Date #' @import plotly #' @author Talitha Speranza \email{[email protected]} draw.gdp_mon = function(){ gdp_comp = file.path(system.file(package="BETS"), "/mon_pib_comps.csv") data <- read.csv2(gdp_comp, stringsAsFactors = F) gdp = window(ts(as.numeric(data[,2]), start = c(2000,1), frequency = 12),start = c(2013,1)) last_year = end(gdp)[1] last_month = end(gdp)[2] years = (last_year-2):last_year dates = vector(mode = "numeric") ia_vars = vector(mode = "numeric") lm_vars = vector(mode = "numeric") last = length(gdp) for(i in 0:2){ ia_vars[i+1] = round(((gdp[last-i]-gdp[last-i-1])/gdp[last-i-1])*100,2) lm_vars[i+1] = round(((gdp[last-12*i]-gdp[last-(i+1)*12])/gdp[last-12*i])*100,2) dates[i+1] = paste0(years[i+1],"-01-",last_month) } ia_vars = ts(ia_vars, start = last_year-2, frequency = 1) lm_vars = ts(lm_vars, start = last_year-2, frequency = 1) m <- list( t = 60, l = 100, pad = 1 ) t <- list( x = 0.5, y = 1.2, text = "<b>GDP VARIATION</b><br><span style = 'font-size:16px'>GDP Monitor (FGV/IBRE)</span>", xref = "paper", yref = "paper", showarrow = F, font = list(size = 19) ) ym = as.yearmon(dates) p = plot_ly(x = as.numeric(ia_vars), y = years, name = "Interanual Variation", type = "bar", orientation = 'h', width = 700, height = 450, marker = list(color = 'rgb(171,104,87)')) %>% add_trace(x = as.numeric(lm_vars), y = years, name = "Monthly Variation", type = "bar", orientation = 'h', marker = list(color = 'rgb(114,147,203)')) %>% layout(title = "", yaxis = list(tickfont = list(size = 20), tickvals = ym, ticktext = as.character(ym), showline = F, zeroline = F), xaxis = list(title = "", showgrid = FALSE, showline = FALSE, showticklabels = FALSE, zeroline = FALSE), margin = m, annotations = t, titlefont = list(size = 19), showlegend = T, legend = list(orientation = 'h', x = 0.2)) for(i in 1:3){ sig = "" if(ia_vars[i] > 0) sig = "+" text = paste0("<b>",sig, ia_vars[i],"%</b>") y = years[i] - 0.2 x = as.numeric(ia_vars[i])/2 p = p %>% add_annotations(text = text, y = y, x = x, showarrow = F, font = list(size = 18)) sig = "" if(lm_vars[i] > 0) sig = "+" text = paste0("<b>",sig, lm_vars[i],"%</b>") y = years[i] + 0.2 x = as.numeric(lm_vars[i])/2 p = p %>% add_annotations(text = text, y = y, x = x, showarrow = F, font = list(size = 18)) } return(p) }
/scratch/gouwar.j/cran-all/cranData/BETS/R/draw.gdp_mon.R
#' @title Create a chart of the Base Interest Rate (SELIC) time series #' #' @description Creates a plot of series 4189 #' #' @return An image file is saved in the 'graphs' folder, under the BETS installation directory. #' @importFrom zoo as.Date as.yearqtr #' @importFrom grDevices rgb #' @import plotly #' @author Talitha Speranza \email{[email protected]} draw.gdp_unemp = function(){ gdp = window(BETSget(22109), start = c(2012,2)) #ipca = window(aggregate(BETSget(433), nfrequency = 4), start = c(2012,2)) #gdp = deflate(ts = gdp, deflator = ipca, type = "perc") # unemp = suppressWarnings(ts(BETSget(24369)[,2], start = c(2012,6),frequency = 12)) # unemp = BETSget(24369) ind = grepl("-03-|-06-|-09-|-12-", as.Date(unemp)) unemp = ts(unemp[ind],start = c(2012,2), frequency = 4) unemp = window(unemp, end = end(gdp)) last_val = gdp[length(gdp)] last_date = as.Date(gdp)[length(gdp)] a <- list( x = last_date, y = last_val, text = paste0("<b>",last_val,"</b>"), xref = "x", yref = "y", showarrow = TRUE, arrowhead = 6, ay = 40, ax = 0, font = list(size = 22) ) last_val = unemp[length(unemp)] last_date = as.Date(unemp)[length(unemp)] b <- list( x = last_date, y = last_val, text = paste0("<b>",last_val,"</b>"), xref = "x", yref = "y2", showarrow = TRUE, arrowhead = 6, ay = 40, ax = 0, font = list(size = 22) ) ay <- list( overlaying = "y", side = "right", zeroline = FALSE, showgrid = FALSE, tickfont = list(size = 22) ) m <- list( t = 50, pad = 1, r = 60 ) dates = as.Date(gdp) quarters = as.yearqtr(dates) p = plot_ly(mode = "lines", type = "scatter", x = as.Date(gdp), y = gdp, name = "Quaterly GDP (Index)", width = 700, height = 450) %>% add_lines(yaxis = "y2", x = as.Date(unemp), y = unemp, name = "Unemployment Rate") %>% layout(title = "<b>GDP x UNEMPLOYMENT RATE</b>", yaxis = list(tickfont = list(size = 22)), xaxis = list(tickfont = list(size = 15), tickangle = 60, tickvals = dates, ticktext=as.character(quarters)), yaxis2 = ay, margin = m, titlefont = list(size = 19), annotations = list(a,b), legend = list(orientation = 'h', x = 0.3, y = -0.33)) return(p) }
/scratch/gouwar.j/cran-all/cranData/BETS/R/draw.gdp_unemp.R
#' @title Create a chart of the Base Interest Rate (SELIC) time series #' #' @description Creates a plot of series 4189 #' #' @return An image file is saved in the 'graphs' folder, under the BETS installation directory. #' @importFrom zoo as.Date as.yearqtr #' @importFrom forecast ma #' @import plotly #' @importFrom seasonal seas #' @author Talitha Speranza \email{[email protected]} draw.gdp_vars = function(){ gdp_comp = file.path(system.file(package="BETS"), "/mon_pib_vars.csv") data <- read.csv2(gdp_comp, stringsAsFactors = F) gdp = window(ts(as.numeric(data[,2]), start = c(2000,12), frequency = 12),start = c(2013,1)) gdp_cons = window(ts(as.numeric(data[,3]), start = c(2000,12), frequency = 12),start = c(2013,1)) gdp_gov = window(ts(as.numeric(data[,4]), start = c(2000,12), frequency = 12),start = c(2013,1)) gdp_bffk = window(ts(as.numeric(data[,5]), start = c(2000,12), frequency = 12),start = c(2013,1)) m <- list( t = 60, pad = 1 ) t <- list( x = 0.5, y = 1.25, text = "<b>GDP COMPONENTS VARIATION</b><br><span style = 'font-size:13'>Accumulated variation 12 Months - GDP Monitor (FGV/IBRE)</span>", xref = "paper", yref = "paper", showarrow = F, font = list(size = 19) ) if(gdp[length(gdp)] < 0){ y0 = 0 } else { y0 = gdp[length(gdp)] } a <- list( x = as.Date(gdp)[length(gdp)], y = y0, text = paste0("<b>",gdp[length(gdp)],"</b>"), xref = "x", yref = "y", showarrow = TRUE, arrowhead = 6, ay = -40, ax = 0, font = list(size = 22) ) #dates = as.Date(gdp) #quarters = as.yearqtr(dates) p = plot_ly(type = "bar", x = as.Date(gdp), y = gdp, name = "GDP", width = 700, height = 450) %>% add_trace(y = gdp_gov, x = as.Date(gdp), name = "Gov. Exp.", type = "scatter", mode = "lines") %>% add_trace(y = gdp_cons, x = as.Date(gdp), name = "Hous. Exp.", type = "scatter", mode = "lines") %>% add_trace(y = gdp_bffk, x = as.Date(gdp), name = "GFFK", type = "scatter", mode = "lines", line = list(color = "#908989")) %>% layout(title = '', yaxis = list(tickfont = list(size = 20)), xaxis = list(tickfont = list(size = 15),tickangle = 60), #xaxis = list(tickfont = list(size = 15), tickangle = 60, tickvals = dates, ticktext=as.character(quarters), showgrid = T), margin = m, titlefont = list(size = 19), annotations = list(a,t), legend = list(orientation = 'h', x = 0.15, y = -0.33) ) return(p) }
/scratch/gouwar.j/cran-all/cranData/BETS/R/draw.gdp_vars.R
#' @title Create a chart of the Base Interest Rate (SELIC) time series #' #' @description Creates a plot of series 4189 #' #' @param ts aaaa #' @param style aaa #' @param params aaa #' #' @return An image file is saved in the 'graphs' folder, under the BETS installation directory. #' @importFrom zoo as.Date as.yearqtr #' @importFrom grDevices rgb #' @import plotly #' @author Talitha Speranza \email{[email protected]} draw.generic <- function(ts, style, params){ no.extra = F no.legend = F if(is.null(params$extra)){ no.extra = T } if(is.null(params$legend)){ no.legend = T leg = paste0("Series ", 1:length(ts)) } else { leg = params$legend } if(is.null(params$trend)){ params$trend = F } if(is.null(params$type)){ params$type = 'lines' } if(is.null(params$title)){ params$title = '' } if(is.null(params$subtitle)){ params$subtitle = '' } if(is.null(params$arr.ort)){ params$arr.ort = 'v' } if(is.null(params$extra.arr.ort)){ params$extra.arr.ort = 'h' } if(is.null(params$xlim) && !no.extra){ rgs = c(range(time(ts)),range(time(params$extra))) params$xlim = c(min(rgs), max(rgs)) } if(is.null(params$ylim) && !no.extra){ rgs = c(range(ts),range(params$extra)) params$ylim = c(min(rgs), max(rgs)) } if(style == "normal"){ series = ts leg.pos = params$legend.pos if(is.null(leg.pos)){ params$legend.pos = 'topleft' leg.pos = "none" } if(is.null(params$colors)){ params$colors = c("firebrick4", "firebrick3") } if(!no.extra){ leg.pos = 'none' } lims = chart.add_basic(ts = series, type = params$type, title = params$title, subtitle = params$subtitle, xlim = params$xlim, ylim = params$ylim, col = params$colors[1], leg.pos = leg.pos, arr.pos = params$arr.ort, arr.size = params$arr.len, trend = params$trend) if(is.null(params$xlim)){ params$xlim = lims[1:2] } if(is.null(params$ylim)){ params$ylim = lims[3:4] } if(!no.extra){ series = list(series, params$extra) chart.add_extra(params$extra, ylim = params$ylim, xlim = params$xlim, col = params$colors[2], leg.pos = leg.pos, arr.pos = params$extra.arr.ort, arr.size = params$extra.arr.len, main.type = params$type) } if(!no.legend){ t2 = 2 if(params$type == "bar"){ t2 = 1 } legend(params$legend.pos, leg, lty=c(1,t2), lwd=c(2.5,2.5), col= params$colors, bty = "n", cex = 0.9) } if(frequency(ts) != 1){ nms = leg if(no.legend){ nms = NULL } chart.add_notes(series, names = nms, ylim = lims[3:4], xlim = lims[1:2]) } } else { subtitle <- NULL if(params$subtitle != ""){ subtitle <- paste0("<br><span style = 'font-size:16px'>", params$subtitle, "</span>") } if(is.null(params$colors)){ params$colors = c("#8B1A1A", "#CD2626") } m <- list( t = 70, l = 60, r = 60, pad = 1 ) last_val = ts[length(ts)] last_date = as.Date(ts)[length(ts)] if(is.null(params$arr.len)){ hlen = 80 vlen = 80 } else { hlen = params$arr.len vlen = params$arr.len } if(params$arr.ort == 'h'){ ay = 0 ax = hlen } else { ay = vlen ax = 0 } a1 <- list( x = last_date, y = last_val, text = paste0("<b>",last_val,"</b>"), xref = "x", yref = "y", showarrow = TRUE, arrowhead = 6, ay = ay, ax = -ax, font = list(size = 22) ) a2 <- NULL p = plot_ly(width = 700, height = 450) if(params$type == "lines"){ p <- p %>% add_lines(x = as.Date(ts), y = ts, name = leg[1], line = list(color = params$colors[1])) } else { p <- p %>% add_trace(type = "bar", x = as.Date(ts), y = ts, name = leg[1], marker = list(color = params$colors[1])) } if(params$trend){ requireNamespace("mFilter") tr <- fitted(mFilter::hpfilter(ts)) p <- p %>% add_trace(y = tr, x = as.Date(tr), name = "Trend", line = list(color = "#bd081c", dash = "dash")) } yaxis2 <- NULL if(!no.extra){ if(is.null(params$extra.y2)){ y2 <- "y" yaxis2 <- NULL } else { y2 <- "y2" yaxis2 <- list( overlaying = "y", side = "right", zeroline = FALSE, showgrid = FALSE, tickfont = list(size = 22) ) } extra <- params$extra last_val = extra[length(extra)] last_date = as.Date(extra)[length(extra)] if(is.null(params$extra.arr.len)){ hlen = 80 vlen = 80 } else { hlen = params$arr.len vlen = params$arr.len } if(params$extra.arr.ort == 'h'){ ay = 0 ax = hlen } else { ay = vlen ax = 0 } a2 <- list( x = last_date, y = last_val, text = paste0("<b>",last_val,"</b>"), xref = "x", yref = y2, showarrow = TRUE, arrowhead = 6, ay = ay, ax = -ax, font = list(size = 22) ) p <- p %>% add_lines(yaxis = y2, x = as.Date(extra), y = extra, name = leg[2], line = list(color = params$colors[2])) } if(!no.legend){ pos = params$legend.pos if(is.null(pos)){ pos = 'h' } legend.list = list(orientation = pos) } else { legend.list = NULL } p <- p %>% layout(title = paste0("<b>",params$title,"</b>", subtitle), yaxis = list(tickfont = list(size = 22), range = params$ylim), xaxis = list(tickfont = list(size = 15)), yaxis2 = yaxis2, margin = m, titlefont = list(size = 19), annotations = list(a1,a2), legend = legend.list) return(p) } return(NULL) }
/scratch/gouwar.j/cran-all/cranData/BETS/R/draw.generic.R
#' @title Create a chart of the Base Interest Rate (SELIC) time series #' #' @description Creates a plot of series 4189 #' #' @return An image file is saved in the 'graphs' folder, under the BETS installation directory. #' @importFrom zoo as.Date #' @importFrom forecast ma #' @import plotly #' @author Talitha Speranza \email{[email protected]} draw.iie_br = function(){ #iiebr = paste0(system.file(package="BETS"), "/incerteza_fgv.csv") iiebr = BETSget("ST_iiebr") #data <- read.csv2(iiebr, stringsAsFactors = F) iiebr = ts(iiebr[,2], start = c(2000,1), frequency = 12) iiebr.ma = ma(iiebr,6) m <- list( t = 50, pad = 1 ) a <- list( x = as.Date(iiebr)[length(iiebr)], y = iiebr[length(iiebr)], text = paste0("<b>",iiebr[length(iiebr)],"</b>"), xref = "x", yref = "y", showarrow = TRUE, arrowhead = 6, ay = 50, ax = 0, font = list(size = 22) ) p = plot_ly(mode = "lines", type = "scatter", x = as.Date(iiebr), y = iiebr, name = "IIE-Br", line = list(color = "#908989"), width = 700, height = 450) %>% add_trace(y = iiebr.ma, x = as.Date(iiebr.ma), name = "MA 6 periods", line = list(color = "#bd081c", dash = "dash")) %>% layout(title = "<b>UNCERTAINTY INDEX</b><br>IIE-Br (FGV/IBRE)", yaxis = list(tickfont = list(size = 22), titlefont = list(size = 22)), xaxis = list(tickfont = list(size = 22)), margin = m, titlefont = list(size = 19), annotations = a, legend = list(orientation = 'h', x = 0.3)) return(p) }
/scratch/gouwar.j/cran-all/cranData/BETS/R/draw.iie_br.R
#' @title Create a chart of the Production Indicators time series #' #' @description Creates a plot of series 21859 #' #' @return An image file is saved in the 'graphs' folder, under the BETS installation directory. #' #' @importFrom grDevices dev.new dev.off pdf png #' @importFrom utils read.csv2 #' @importFrom stats ts plot.ts #' @importFrom graphics axis text points mtext arrows #' @importFrom seasonal seas #' @author Talitha Speranza \email{[email protected]} draw.indprod = function(){ indprod = seasonal::final(seas(BETSget(21859))) start = c(2006,1) if(!is.null(start)){ indprod = window(indprod, start = start) } lims = chart.add_basic(ts = indprod, title = "Industrial Production", subtitle = "Seasonally Adjusted. Index (2012 = 100)", col = "chocolate1") chart.add_notes(indprod, ylim = lims[3:4], xlim = lims[1:2]) }
/scratch/gouwar.j/cran-all/cranData/BETS/R/draw.indprod.R
#' @title Create a chart of the National Consumer Price Index time series #' #' @description Creates a plot of series 13522 (NCPI), along with series 4466 (NCPI core) #' #' @return An image file is saved in the 'graphs' folder, under the BETS installation directory. #' #' @importFrom zoo as.Date #' #' @importFrom grDevices dev.new dev.off pdf png #' @importFrom utils read.csv2 #' @importFrom stats ts plot.ts #' @importFrom graphics axis text points mtext arrows #' @importFrom graphics strwidth #' @author Talitha Speranza \email{[email protected]} draw.ipca = function(){ ipca = BETSget(13522) core = BETSget(4466) start = c(2006,1) if(is.null(start)){ start = vector(mode = "logical") if(start(ipca) > start(core)){ start = start(ipca) } else if(start(ipca) == start(core)) { start[1] = start(ipca)[1] start[2] = max(start(ipca)[2],start(core)[2]) } else { start = start(core) } } ipca = window(ipca, start = start) core_acc = vector(mode = "numeric") for(i in 12:length(core)){ sum = 0 for(j in 1:11){ sum = sum + core[i-j] } core_acc[i-11] = sum } par(mar = c(7,4,4,2)) core_acc = ts(core_acc, start = c(1996,12), frequency = 12) core = window(core_acc, start = start) lims = chart.add_basic(ts = ipca, title = "National Consumer Price Index (IPCA)", subtitle = "Cumulative 12-Month Percentage", arr.pos = "h", leg.pos = "none") chart.add_extra(core, ylim = lims[3:4], xlim = lims[1:2], leg.pos = "none") abline(a = 4.5, b = 0, lty = 3, lwd = 3, col = "darkgray") legend("topleft", c("IPCA", "Core"), lty=c(1,2), lwd=c(2.5,2.5),col=c("firebrick4", "firebrick3"), bty = "n", cex = 0.9) text(lims[2] - 3*strwidth("Target"), 4.1, "Target", cex = 0.9) chart.add_notes(list(ipca = ipca, core = core), names = c("IPCA","Core"), ylim = lims[3:4], xlim = lims[1:2]) }
/scratch/gouwar.j/cran-all/cranData/BETS/R/draw.ipca.R
#' @title Create a chart of the Base Interest Rate (SELIC) time series #' #' @description Creates a plot of series 4189 #' #' @return An image file is saved in the 'graphs' folder, under the BETS installation directory. #' @importFrom zoo as.Date as.yearqtr #' @importFrom grDevices rgb #' @import plotly #' @author Talitha Speranza \email{[email protected]} draw.lab_coin = function(){ file = file.path(system.file(package="BETS"), "/sondagens_completo_fgv.csv") sond = read.csv2(file, stringsAsFactors = F) sond[,-1] = suppressWarnings( data.frame(lapply(sond[,-1], function(x){as.numeric(gsub(",",".",x))})) ) inx = ts(sond[90:nrow(sond),21:22],start = c(2008,6),frequency = 12) t = "<b>LABOR COINCIDENT INDICATOR</b>" t = paste0(t,"<br><span style = 'font-size:15px'>Seasonally Adjusted</span>") len = nrow(inx) if(is.na(inx[len,1])){ len = len - 1 } t <- list( x = 0.5, y = 1.17, text = t, xref = "paper", yref = "paper", showarrow = F, font = list(size = 19) ) a <- list( x = as.Date(inx)[len], y = inx[len,1], text = paste0("<b>",inx[len,1],"</b>"), xref = "x", yref = "y", showarrow = TRUE, arrowhead = 6, ay = -50, ax = 0, font = list(size = 22) ) m <- list( t = 60, pad = 1 ) dates = as.Date(inx) rg = rgb(162,7,7, maxColorValue = 255) p = plot_ly(width = 700, height = 450) %>% add_lines(x = dates, y = inx[,1], name = "Coincident") %>% layout(title = t, yaxis = list(tickfont = list(size = 22)), xaxis = list(tickfont = list(size = 17)), margin = m, titlefont = list(size = 19), annotations = list(a,t), legend = list(orientation = 'h', x = 0.27), shapes = list( list(type = "rect", fillcolor = rg, line = list(color = rg), opacity = 0.2, x0 = "2008-10-01", x1 = "2009-03-31", xref = "x", y0 = 55, y1 = 115, yref = "y"), list(type = "rect", fillcolor = rg, line = list(color = rg), opacity = 0.2, x0 = "2014-07-01", x1 = as.Date(inx)[nrow(inx)], xref = "x", y0 = 55, y1 = 113, yref = "y"))) return(p) }
/scratch/gouwar.j/cran-all/cranData/BETS/R/draw.lab_coin.R
#' @title Create a chart of the Base Interest Rate (SELIC) time series #' #' @description Creates a plot of series 4189 #' #' @return An image file is saved in the 'graphs' folder, under the BETS installation directory. #' @importFrom zoo as.Date as.yearqtr #' @importFrom grDevices rgb #' @import plotly #' @author Talitha Speranza \email{[email protected]} draw.lab_lead = function(){ file = file.path(system.file(package="BETS"), "/sondagens_completo_fgv.csv") sond = read.csv2(file, stringsAsFactors = F) sond[,-1] = suppressWarnings( data.frame(lapply(sond[,-1], function(x){as.numeric(gsub(",",".",x))})) ) inx = ts(sond[90:nrow(sond),21:22],start = c(2008,6),frequency = 12) t = "<b>LABOR LEADING INDICATOR</b>" t = paste0(t,"<br><span style = 'font-size:15px'>Seasonally Adjusted</span>") len = nrow(inx) if(is.na(inx[len,1])){ len = len - 1 } t <- list( x = 0.5, y = 1.17, text = t, xref = "paper", yref = "paper", showarrow = F, font = list(size = 19) ) b <- list( x = as.Date(inx)[len], y = inx[len,2], text = paste0("<b>",inx[len,2],"</b>"), xref = "x", yref = "y", showarrow = TRUE, arrowhead = 6, ay = 50, ax = 0, font = list(size = 22) ) m <- list( t = 60, pad = 1 ) dates = as.Date(inx) rg = rgb(162,7,7, maxColorValue = 255) p = plot_ly(width = 700, height = 450) %>% add_lines(x = dates, y = inx[,2], name = "Leading") %>% layout(title = t, yaxis = list(tickfont = list(size = 22)), xaxis = list(tickfont = list(size = 17)), margin = m, titlefont = list(size = 19), annotations = list(b,t), legend = list(orientation = 'h', x = 0.27), shapes = list( list(type = "rect", fillcolor = rg, line = list(color = rg), opacity = 0.2, x0 = "2008-10-01", x1 = "2009-03-31", xref = "x", y0 = 55, y1 = 115, yref = "y"), list(type = "rect", fillcolor = rg, line = list(color = rg), opacity = 0.2, x0 = "2014-07-01", x1 = as.Date(inx)[nrow(inx)], xref = "x", y0 = 55, y1 = 113, yref = "y"))) return(p) }
/scratch/gouwar.j/cran-all/cranData/BETS/R/draw.lab_lead.R
#' @title Create a chart of the Base Interest Rate (SELIC) time series #' #' @description Creates a plot of series 4189 #' #' @return An image file is saved in the 'graphs' folder, under the BETS installation directory. #' @importFrom zoo as.Date as.yearqtr #' @importFrom grDevices rgb #' @import plotly #' @author Talitha Speranza \email{[email protected]} draw.lei = function(){ file = file.path(system.file(package="BETS"), "/lei.csv") lei= read.csv2(file, stringsAsFactors = F)[,2] lei = window(ts(as.numeric(lei), start = c(1996,1),frequency = 12),start = c(2000,6)) t = "<b>LEADING ECONOMIC INDICATOR</b>" t = paste0(t,"<br><span style = 'font-size:15px'>The Conference Board with FGV/IBRE</span>") t <- list( x = 0.5, y = 1.17, text = t, xref = "paper", yref = "paper", showarrow = F, font = list(size = 19) ) l = length(lei) a <- list( x = as.Date(lei)[l], y = lei[l], text = paste0("<b>",lei[l],"</b>"), xref = "x", yref = "y", showarrow = TRUE, arrowhead = 6, ay = -50, ax = 0, font = list(size = 22) ) m <- list( t = 60, pad = 1 ) dates = as.Date(lei) rg = rgb(162,7,7, maxColorValue = 255) p = plot_ly(width = 700, height = 450) %>% add_lines(x = dates, y = lei) %>% layout(title = t, yaxis = list(tickfont = list(size = 22),range = c(60,115)), xaxis = list(tickfont = list(size = 17)), margin = m, titlefont = list(size = 19), annotations = list(a,t), shapes = list( list(type = "rect", fillcolor = rg, line = list(color = rg), opacity = 0.2, x0 = "2001-01-01", x1 = "2002-01-01", xref = "x", y0 = 60, y1 = 115, yref = "y"), list(type = "rect", fillcolor = rg, line = list(color = rg), opacity = 0.2, x0 = "2003-01-01", x1 = "2003-07-01", xref = "x", y0 = 60, y1 = 115, yref = "y"), list(type = "rect", fillcolor = rg, line = list(color = rg), opacity = 0.2, x0 = "2008-10-01", x1 = "2009-04-01", xref = "x", y0 = 60, y1 = 115, yref = "y"), list(type = "rect", fillcolor = rg, line = list(color = rg), opacity = 0.2, x0 = "2014-07-01", x1 = as.Date(lei)[l], xref = "x", y0 = 60, y1 = 115, yref = "y"))) return(p) }
/scratch/gouwar.j/cran-all/cranData/BETS/R/draw.lei.R
#' @title Create a chart of the Base Interest Rate (SELIC) time series #' #' @description Creates a plot of series 4189 #' #' @return An image file is saved in the 'graphs' folder, under the BETS installation directory. #' @importFrom zoo as.Date #' @importFrom grDevices rgb #' @import plotly #' @author Talitha Speranza \email{[email protected]} draw.misery_index = function(){ ipca = suppressWarnings(BETSget(13522)) #ipca = ts(ipca[,"value"], start = c(1980,12), frequency = 12) ipca = window(ipca, start = c(2012,3)) unemp = BETSget(24369) misery = ipca + unemp # Consumer Confidence file = file.path(system.file(package="BETS"), "/sondagens_completo_fgv.csv") sond = read.csv2(file, stringsAsFactors = F) sond[,-1] = data.frame(lapply(sond[,-1], function(x){as.numeric(gsub(",",".",x))})) cons = window(ts(sond[115:nrow(sond),17],start = c(2010,7),frequency = 12),start = c(2012,3)) a <- list( x = as.Date(misery)[length(misery)], y = misery[length(misery)], text = paste0("<b>",misery[length(misery)],"</b>"), xref = "x", yref = "y", showarrow = TRUE, arrowhead = 6, ay = -40, ax = 0, font = list(size = 22) ) b <- list( x = as.Date(cons)[length(cons)], y = cons[length(cons)], text = paste0("<b>",cons[length(cons)],"</b>"), xref = "x", yref = "y2", showarrow = TRUE, arrowhead = 6, ay = 40, ax = 0, font = list(size = 22) ) t <- list( x = 0.5, y = 1.18, text = "<b>MISERY INDEX x CONSUMER CONFIDENCE</b>", xref = "paper", yref = "paper", showarrow = F, font = list(size = 19) ) ay <- list( overlaying = "y", side = "right", zeroline = FALSE, showgrid = FALSE, tickfont = list(size = 22) ) m <- list( t = 100, pad = 1, r = 60 ) rg = rgb(162,7,7, maxColorValue = 255) p = plot_ly(width = 700, height = 450) %>% add_lines(x = as.Date(misery), y = misery, name = "Misery Index") %>% add_lines(yaxis = "y2", x = as.Date(cons), y = cons, name = "Consumer Confidence") %>% layout(title = "", yaxis = list(tickfont = list(size = 22)), yaxis2 = ay, xaxis = list(title = "", tickfont = list(size = 22)), margin = m, annotations = list(t,a,b), legend = list(orientation = 'h', x = 0.17, y = -0.33), shapes = list( list(type = "rect", fillcolor = rg, line = list(color = rg), opacity = 0.2, x0 = "2014-07-01", x1 = as.Date(misery)[length(misery)], xref = "x", y0 = 10, y1 = 22, yref = "y") ) ) return(p) }
/scratch/gouwar.j/cran-all/cranData/BETS/R/draw.misery_index.R
#' @title Create a chart of the Base Interest Rate (SELIC) time series #' #' @description Creates a plot of series 4189 #' #' @return An image file is saved in the 'graphs' folder, under the BETS installation directory. #' #' @importFrom zoo zooreg #' #' @importFrom grDevices dev.new dev.off pdf png #' @importFrom utils read.csv2 #' @importFrom stats ts plot.ts #' @importFrom graphics axis text points mtext arrows #' @author Talitha Speranza \email{[email protected]} draw.selic = function(){ selic = BETSget(4189) target = BETSget(432) start = c(2006,1) if(!is.null(start)){ selic = window(selic, start = start) } else{ start = start(selic) } # target = get.series.bacen(432)[[1]] # target[,1] = as.Date(target[,1], format = "%d/%m/%Y") inx = grep("-15$",target[,1]) first = target[1,1] target = ts(target[inx,2], start = as.numeric(c(format(first,"%Y"),format(first,"%m"))), frequency = 12) target = window(target, start = start, frequency = 12) lims = chart.add_basic(ts = selic, title = "Base Interest Rate (SELIC)", subtitle = "Accumulated in the Month, in Annual Terms", col = "darkolivegreen", arr.pos = "h", leg.pos = "none") chart.add_extra(target, ylim = lims[3:4], xlim = lims[1:2], arr.pos = "none", leg.pos = "none", col = "darkgray") legend("bottomleft", c("SELIC", "Target"), lty=c(1,2), lwd=c(2.5,2.5),col=c("darkolivegreen", "darkgray"), bty = "n", cex = 0.9) chart.add_notes(selic, ylim = lims[3:4], xlim = lims[1:2]) }
/scratch/gouwar.j/cran-all/cranData/BETS/R/draw.selic.R
#' @title Create a chart of the Base Interest Rate (SELIC) time series #' #' @description Creates a plot of series 4189 #' #' @return An image file is saved in the 'graphs' folder, under the BETS installation directory. #' @importFrom zoo as.Date as.yearqtr #' @importFrom grDevices rgb #' @import plotly #' @author Talitha Speranza \email{[email protected]} draw.sent_ind = function(){ file = file.path(system.file(package="BETS"), "/sondagens_completo_fgv.csv") sond = read.csv2(file, stringsAsFactors = F) sond[,-1] = data.frame(lapply(sond[,-1], function(x){as.numeric(gsub(",",".",x))})) emp = (sond[,2] + sond[,6] + sond[,10] + sond[,16])/4 emp = round(emp[!is.na(emp)],2) emp = ts(emp, start = c(2010,7), frequency = 12) cons = ts(sond[115:nrow(sond),17],start = c(2010,7),frequency = 12) tot = (cons + emp)/2 last_val = emp[length(emp)] last_date = as.Date(emp)[length(emp)] a <- list( x = last_date, y = last_val, text = paste0("<b>",last_val,"</b>"), xref = "x", yref = "y", showarrow = TRUE, arrowhead = 6, ay = 60, ax = 0, font = list(size = 22) ) last_val = cons[length(tot)] last_date = as.Date(cons)[length(tot)] m <- list( t = 50, pad = 1 ) rg = rgb(162,7,7, maxColorValue = 255) p = plot_ly(mode = "lines", type = "scatter", x = as.Date(tot), y = tot, width = 700, height = 450) %>% layout(title = "<b>ECONOMIC SENTIMENT INDICATOR</b>", yaxis = list(tickfont = list(size = 22)), xaxis = list(tickfont = list(size = 15)), margin = m, titlefont = list(size = 19), annotations = a, shapes = list(type = "rect", fillcolor = rg, line = list(color = rg), opacity = 0.2, x0 = "2014-07-01", x1 = as.Date(emp)[length(emp)], xref = "x", y0 = 60, y1 = 115, yref = "y") ) return(p) }
/scratch/gouwar.j/cran-all/cranData/BETS/R/draw.sent_ind.R
#' @title Create a chart of the Base Interest Rate (SELIC) time series #' #' @description Creates a plot of series 4189 #' #' @param survey xxx #' #' @return An image file is saved in the 'graphs' folder, under the BETS installation directory. #' @importFrom zoo as.Date as.yearqtr #' @importFrom grDevices rgb #' @importFrom utils tail #' @import plotly #' @author Talitha Speranza \email{[email protected]} draw.survey = function(survey){ file = file.path(system.file(package="BETS"), "/sondagens_completo_fgv.csv") sond = read.csv2(file, stringsAsFactors = F) sond[,-1] = suppressWarnings( data.frame(lapply(sond[,-1], function(x){as.numeric(gsub(",",".",x))})) ) #head(na.omit(sond[57:nrow(sond),c(1,10:12)]),1) if(survey == "consm"){ inx = ts(sond[57:nrow(sond),17:19],start = c(2005,9),frequency = 12) inx = window(inx, start = c(2015,1)) t = "<b>CONSUMERS SURVEY</b>" } else if(survey == "constr"){ inx = ts(sond[115:nrow(sond),14:16],start = c(2010,7),frequency = 12) inx = window(inx, start = c(2015,1)) temp = inx[,3] inx[,3] = inx[,1] inx[,1] = temp t = '<b>CONSTRUCTION INDUSTRY SURVEY</b>' } else if(survey == 'retail'){ inx = ts(sond[111:nrow(sond),10:12],start = c(2010,3),frequency = 12) inx = window(inx, start = c(2015,1)) t = "<b>RETAILERS SURVEY</b>" } else if(survey == 'servc'){ inx = ts(sond[90:nrow(sond),6:8],start = c(2008,6),frequency = 12) inx = window(inx, start = c(2015,1)) t = "<b>SERVICES SURVEY</b>" } else if(survey == 'transf_ind'){ inx = ts(sond[57:nrow(sond),2:4],start = c(2005,9),frequency = 12) inx = window(inx, start = c(2015,1)) t = "<b>MANUFACTURING INDUSTRY SURVEY</b>" } t = paste0(t,"<br><span style = 'font-size:16px'>Index</span>") max = max(inx[,1],inx[,2],inx[,3]) min = min(inx[,1],inx[,2],inx[,3]) if(inx[nrow(inx),1] < 0){ y0 = 0 } else { y0 = inx[nrow(inx),1] } a <- list( x = tail(as.Date(inx),1), y = y0, text = paste0("<b>",inx[nrow(inx),1],"</b>"), xref = "x", yref = "y", showarrow = TRUE, arrowhead = 6, ay = -50, ax = 0, font = list(size = 22) ) m <- list( t = 50, pad = 1 ) dates = as.Date(inx) months = as.yearmon(dates) p = plot_ly(width = 700, height = 450) %>% add_lines(x = dates, y = inx[,3], name = "Expectations") %>% add_lines(x = dates, y = inx[,2], name = "Present Situation") %>% add_trace(type = "bar", x = dates, y = inx[,1], name = "Confidence Index", marker = list(color = "#908989")) %>% layout(title = t, yaxis = list(tickfont = list(size = 22), range = c(min-10,max+10)), xaxis = list(tickfont = list(size = 15), tickangle = 60, tickvals = dates, ticktext=as.character(months)), margin = m, titlefont = list(size = 19), annotations = a, legend = list(orientation = 'h', x = 0.17, y = -0.33)) return(p) }
/scratch/gouwar.j/cran-all/cranData/BETS/R/draw.survey.R
#' @title Create a chart of the Unitary Labor Cost time series #' #' @description Creates a plot of series 11777 #' #' @return An image file is saved in the 'graphs' folder, under the BETS installation directory. #' #' @importFrom grDevices dev.new dev.off pdf png #' @importFrom utils read.csv2 #' @importFrom zoo as.Date #' @importFrom stats ts plot.ts #' @importFrom graphics axis text points mtext arrows draw.ulc = function(){ cut = BETSget(11777) start = c(2006,1) if(!is.null(start)){ cut = window(cut, start = start) } lims = chart.add_basic(ts = cut, title = "Unitary Labor Cost", subtitle = "US$ - June 1994 = 100", col = "firebrick4", arr.size = 25) chart.add_notes(cut, ylim = lims[3:4], xlim = lims[1:2]) }
/scratch/gouwar.j/cran-all/cranData/BETS/R/draw.ulc.R
#' @title Create a chart of the Open Unemployment Rate time series #' #' @description Creates a plot of series 10777 #' #' @return An image file is saved in the 'graphs' folder, under the BETS installation directory. #' #' @importFrom grDevices dev.new dev.off pdf png #' @importFrom zoo as.Date #' @importFrom utils read.csv2 #' @importFrom stats ts plot.ts #' @importFrom graphics axis text points mtext arrows draw.unemp = function(){ unemp = BETSget(24369) # start = c(2006,1) # # if(!is.null(start)){ # unemp = window(unemp, start = start) # } lims = chart.add_basic(ts = unemp, ylim = c(4,14), title = "Unemployment Rate (PNAD-C)", subtitle = "Metropolitan Regions", col = "royalblue", trend = TRUE, leg.pos = "bottom") chart.add_notes(unemp, ylim = lims[3:4], xlim = lims[1:2]) }
/scratch/gouwar.j/cran-all/cranData/BETS/R/draw.unemp.R
#' @title Create a chart of the Real Percentage Change of GDP in the Year time series #' #' @description Creates a plot of series 7326 #' #' @return An image file is saved in the 'graphs' folder, under the BETS installation directory. #' #' @importFrom grDevices dev.new dev.off pdf png #' @importFrom utils read.csv2 #' @importFrom stats ts plot.ts #' @importFrom graphics axis text points mtext arrows draw.vargdp = function(){ vargdp = BETSget(7326) start = c(2006,1) if(!is.null(start)){ vargdp = window(vargdp, start = start) } chart.add_basic(ts = vargdp, type = "bar", ylim = c(-5.5,10), title = "Real GDP", subtitle = "Percentage Change in the Year", col = "chocolate1", trend = T) }
/scratch/gouwar.j/cran-all/cranData/BETS/R/draw.vargdp.R
#' @title Create a monthly or quarterly dummy #' #' @description Returns a monthly or quarterly dummy (a time series with only 0s and 1s). #' #' @param start An \code{integer vector}. The period of the first observation. The first element of the \code{vector} specifies the year of the first observation, whereas the second, the month (for monthly dummies) or quarter (for quarterly dummies) #' @param end An \code{integer vector}. The period of the last observation. The first element of the \code{vector} specifies the year of the last observation, whereas the second, the month (for monthly dummies) or quarter (for quarterly dummies) #' @param frequency An \code{integer}. The frequency of the dummy, that is, the number of observations per unit of time. The defaulf is 12 (a monthly dummy). #' @param year An \code{integer}, a \code{seq} or a \code{vector}. The years for which the dummy must be set to 1. All periods of these years will be set to 1. #' @param month An \code{integer}, a \code{seq} or a \code{vector}. The months for which the dummy must be set to 1. These months will be set to 1 for all years. #' @param quarter An \code{integer}, a \code{seq} or a \code{vector}. The quarters for which the dummy must be set to 1. The quarters will be set to 1 for all years. #' @param date a \code{list}. The periods for which the dummy must be set to one. Periods must be represented as {integer vectors}, as described for \code{start} and \code{end}. #' @param from An \code{integer vector} The starting period of a sequence of perids for which the dummy must be set to one. Periods must be represented as {integer vectors}, as described for \code{start} and \code{end}. #' @param to The ending period of a sequence of perids for which the dummy must be set to one. Periods must be represented as {integer vectors}, as described for \code{start} and \code{end}. #' #' @return A monthly or a quarterly \code{ts} object. #' #' @examples #' #' #1 from a specific date to another specific date #' dummy(start = c(2000,1),end = c(2012,5),frequency = 12,from = c(2005,1),to = c(2006,12)) #' #' #' #Other options that may be helpful: #' #' #over a month equal to 1 #' dummy(start = c(2000,1), end = c(2012,5), frequency = 12, month = c(5,12)) #' #' #Months equal to 1 only for some year #' dummy(start = c(2000,1), end = c(2012,5), frequency = 12, month = 5, year = 2010) #' dummy(start = c(2000,1), end = c(2012,5), frequency = 12, month = 8, year = 2002) #' #' #Months equal to 1 only for some years #' dummy(start = c(2000,1), end = c(2012,5), frequency = 12, month = 5, year = 2005:2007) #' dummy(start = c(2000,1), end = c(2012,5), frequency = 12, month = 3, year = c(2005,2007)) #' dummy(start = c(2000,1), end = c(2012,5), frequency = 12, month = 5:6, year = c(2005,2007)) #' #' #specific dates #' dummy(start = c(2000,1), end = c(2012,5), frequency = 12, date = list(c(2010,1))) #' dummy(start = c(2000,1), end = c(2012,5), #' freq = 12, date = list(c(2010,9), c(2011,1), c(2000,1)) ) #' #' #' @seealso \code{\link[stats]{ts}}, \code{\link[BETS]{dummy}} #' #' #' @importFrom zoo as.Date #' @export dummy <- function(start = NULL, end = NULL, frequency = 12, year = NULL, month = NULL, quarter = NULL, date = NULL, from = NULL, to = NULL){ if(is.null(frequency) | !(frequency %in% c(4,12))){ stop("Set freq as 12 for monthly series or 4 for quarterly series") } if(is.null(start)){ stop("Set start")} if(is.null(end)){ stop("Set end")} ts <- ts(0, start = start, end = end, freq = frequency) years <- as.numeric(substr(as.Date(ts),1,4)) if(frequency == 12){ months <- as.numeric(substr(as.Date(ts),6,7)) if(is.null(date) & is.null(from)){ if(is.null(year)){ ts[months %in% month] <- 1 }else{ ts[months %in% month & years %in% year] <- 1 } }else if(!is.null(date)){ n <- length(date) months0 <- unlist(date)[seq(2, 2*n, by = 2)] years0 <- unlist(date)[seq(1, 2*n, by = 2)] for(i in 1:n){ ts[months0[i] == months & years0[i] == years] <- 1 } }else if(!is.null(from)){ months0 <- from[2] years0 <- from[1] if(!is.null(to)){ months1 <- to[2] years1 <- to[1] }else{ months1 <- months[length(months)] years1 <- years[length(years)] } ts[which(months0 == months & years0 == years): which(months1 == months & years1 == years)] <- 1 } }else if(frequency == 4){ quarters <- as.numeric(substr(as.Date(ts),6,7)) quarters[quarters == 4] <- 2 quarters[quarters == 7] <- 3 quarters[quarters == 10] <- 4 if(is.null(date) & is.null(from)){ if(is.null(year)){ ts[quarters %in% quarter] <- 1 }else{ ts[quarters %in% quarter & years %in% year] <- 1 } }else if(!is.null(date)){ n <- length(date) quarters0 <- unlist(date)[seq(2, 2*n, by = 2)] years0 <- unlist(date)[seq(1, 2*n, by = 2)] for(i in 1:n){ ts[quarters0[i] == quarters & years0[i] == years] <- 1 } }else if(!is.null(from)){ quarters0 <- from[2] years0 <- from[1] if(!is.null(to)){ quarters1 <- to[2] years1 <- to[1] }else{ quarters1 <- quarters[length(quarters)] years1 <- years[length(years)] } ts[which(quarters0 == quarters & years0 == years): which(quarters1 == quarters & years1 == years)] <- 1 } } ts }
/scratch/gouwar.j/cran-all/cranData/BETS/R/dummy.R
get.period <- function(start,frequency){ if(class(start) != 'Date'){ return(msg("Error: Argument 'start' must be a Date object")) } starting_year <- as.numeric(substr(start,1,4)) if(frequency == 1){ return(starting_year) } starting_month <- as.numeric(substr(start,6,7)) if(frequency == 12){ return(c(starting_year,starting_month)) } if(frequency == 4){ starting_quarter = ceiling(starting_month/3) return(c(starting_year,starting_quarter)) } if(frequency == 52 || frequency == 365){ return(1) } }
/scratch/gouwar.j/cran-all/cranData/BETS/R/get.period.R
#' @title Get a complete time series from a BETS database #' #' @description Extracts a complete time series from either the Central Bank of Brazil (BCB), the Brazilian Institute of Geography and Statistics (IBGE) or the Brazilian Institute of Economics (FGV/IBRE). #' #' @param code A \code{character}. The unique code that references the time series. This code can be obtained by using the \code{\link{BETSsearch}} function. #' @param data.frame A \code{boolean}. True if you want the output to be a data frame. True to \code{ts} output. #' @param from A \code{character} or a \code{Data} object. Starting date of the time series (format YYYY-MM-DD). #' @param to A \code{character} or a \code{Data} object. Ending date of the time series (format YYYY-MM-DD). #' @param frequency An \code{integer}. The frequency of the time series. It is not needed. It is going to be used only if the metadata for the series is corrupted. #' #' #' @keywords get #' @import RMySQL #' @import DBI get.series = function(code, from = "", to = "", data.frame = FALSE, frequency = NULL){ if(!grepl("ST_",code)){ if(from != ""){ from = format(as.Date(from), "%d/%m/%Y") } if(to != ""){ to = format(as.Date(to), "%d/%m/%Y") } code = as.numeric(code) aux = tryCatch({ get.series.bacen(code, from = from, to = to)[[1]] }, error = function(e){ data.frame() }) if(nrow(aux) == 0){ examples <- readRDS(file.path(system.file(package="BETS"),"/examples.rds")) examples <- examples[examples$code == code,] if(nrow(examples) != 0){ aux <- examples[,c(1,2)] } else { return(invisible(msg(paste(.MSG_NOT_AVAILABLE,"Series is empty in the BACEN databases")))) } } sch = suppressMessages(BETSsearch(code = code, view = F)) freq = NA if("data.frame" %in% class(sch)){ freq = trimws(sch[1,4]) } no.meta = F if(is.na(freq)){ msg(paste("There is no corresponding entry in the metadata table.\n\n", .WARN_SOFT), warn = TRUE) no.meta = T freq = "" } if(freq == "A"){ freq = 1 } else if(freq == "Q" || freq == "T"){ freq = 4 } else if(freq == "M"){ freq = 12 } else if(freq == "W" || freq == "S"){ freq = 52 } else if(freq == "D"){ freq = 365 } else { if(!no.meta){ msg(paste("Malformed metadata. The value", freq, "is not valid for 'periodicity'\n\n", .WARN_SOFT), warn = TRUE) } if(is.null(frequency)){ data.frame = T } else { freq = frequency } } } else { freq = 365 conn = connection() aux = DBI::dbGetQuery(conn,paste0("select date, value from bets.IPC where code = '",code,"' order by date asc")) invisible(dbDisconnect(conn)) if(nrow(aux) == 0){ return(invisible(msg(paste(.MSG_NOT_AVAILABLE,"Series is empty in the FGV database")))) } #- Falta filtrar por datas! } aux1 = as.numeric(aux[,2]) try = FALSE if(grepl("-",aux[1,1])){ try = tryCatch({ aux2 = as.Date(aux[,1], format = "%Y-%m-%d") FALSE }, error = function(err) { return(TRUE) } )}else{ try = tryCatch({ aux2 = as.Date(aux[,1], format = "%d/%m/%Y") FALSE }, error = function(err) { return(TRUE) }) } suppressWarnings(if(try){ return(invisible(msg(paste(.MSG_NOT_AVAILABLE,"Date formatting is inadequate.")))) }) if(freq != 365 &&!data.frame){ #year = as.numeric(format(k,"%Y")) #month = as.numeric(format(k,"%m")) #day = as.numeric(format(k,"%d")) start = get.period(aux2[1],freq) #start = get.period(c(year,month,day),freq) #print(start) ts <- ts(aux1, start = start, freq = freq) }else { ts = data.frame(date = aux2, value = aux1) } return(ts) }
/scratch/gouwar.j/cran-all/cranData/BETS/R/get.series.R
#' A function to extract BACEN series using their API #' @param x Bacen series numbers. Either an integer or a numeric vector. #' @param from A string specifying where the series shall start. #' @param to A string specifying where the series shall end. #' @param save A string specifying if data should be saved in csv or xlsx format. #' Defaults to not saving. #' @keywords bacen #' @author Fernando Teixeira \email{[email protected]} and Jonatha Azevedo #' \email{[email protected]} #' @importFrom httr GET content get.series.bacen<- function(x, from = "", to = "",save = ""){ if (missing(x)){ stop("Need to specify at least one series.") } if (! is.numeric(x)){ stop("Argument x must be numeric.") } if (from == ""){ data_init = "01/01/1980" } else {data_init = from} if (to == ""){ data_end = format(Sys.Date(), "%d/%m/%Y") } else {data_end = to} inputs = as.character(x) len = seq_along(inputs) serie = mapply(paste0, "serie_", inputs, USE.NAMES = FALSE) for(i in len){ texto=tryCatch({ k = paste0('http://api.bcb.gov.br/dados/serie/bcdata.sgs.', inputs[i], '/dados?formato=csv&dataInicial=', data_init, '&dataFinal=', data_end) dados = httr::GET(k) aux = httr::content(dados,'raw') aux2= base::rawToChar(aux) DF <- data.frame(do.call(cbind, strsplit(aux2, "\r\n", fixed=TRUE))) names(DF) <- "mist" DF$mist <- as.character(DF$mist) DF$mist <- gsub(x = DF$mist,pattern = '"',replacement = "") DF$data <- gsub(x = DF$mist,pattern = ";.*",replacement = "") DF$valor <- gsub(x = DF$mist,pattern = ".*;",replacement = "") DF$valor <- gsub(x = DF$valor,pattern = ",",replacement = ".") DF <- DF[-1,-1] })} assign(serie[i], DF) rm(DF, texto) lista = list() ls_df = ls()[grepl('data.frame', sapply(ls(), function(x) class(get(x))))] for ( obj in ls_df ) { lista[obj]=list(get(obj))} return(invisible(lista)) }
/scratch/gouwar.j/cran-all/cranData/BETS/R/get.series.bacen.R
#' @title Test a set of General Regression Neural Networks #' #' @description Given new values of the independent variables, tests a list of trained GRNNs and picks the best net, based on an accuracy measure between the forecasted and the actual values. #' #' @param results The object returned by \link[BETS]{grnn.train}. #' @param test.set A \code{ts list}. The first element must be the actual values of the dependent variable. The others, the new values of the regressors. #' #' @return A \code{list} object representing the best network (according to forecasting MAPE). Its fields are: #' #' \itemize{ #' \item{\code{mape}: The forecasting MAPE } #' \item{\code{model}: The network object} #' \item{\code{sigma}: The sigma parameter} #' \item{\code{id}: The id number of the network, as given by \link[BETS]{grnn.train}} #' \item{\code{mean}: The predicted values } #' \item{\code{x}: The original series } #' \item{\code{fitted}: The fitted values } #' \item{\code{actual}: The actual values (to be compared with the predicted values)} #' \item{\code{residuals}: Difference between the fitted values and the series original values } #' \item{\code{regressors}: The regressors used to train the network } #' } #' #' @author Talitha Speranza \email{[email protected]} #' @importFrom utils head combn #' @export #' @import grnn forecast grnn.test = function(results, test.set){ select = TRUE if(length(test.set) < 2 || !check.series(test.set, "Series list: test.")){ return(NULL) } test.n_elem = length(test.set[[1]]) test.n_series = length(test.set) actual = test.set[[1]] test_mt = matrix(nrow = test.n_elem, ncol = test.n_series) for(i in 1:test.n_series){ test_mt[,i] = test.set[[i]] } res = vector(mode = "list") res$mape = 1.797693e+308 if(select){ for(i in 1:length(results)){ regs = results[[i]]$regressors sub_test = as.matrix(test_mt[,regs]) preds = vector(mode = "numeric") for(r in 1:nrow(sub_test)){ preds[r] = guess(results[[i]]$net, t(as.matrix(sub_test[r,]))) } if(!any(is.nan(preds))){ acc = accuracy(preds,actual)[5] } else{ acc = res$mape } if(acc < res$mape){ res$model = results[[i]]$net res$mape = acc res$id = results[[i]]$id res$sigma = results[[i]]$sigma res$mean = ts(preds,start = start(actual), end=end(actual), frequency = frequency(actual)) res$x = results[[i]]$series res$fitted = results[[i]]$fitted res$actual = actual res$residuals = results[[i]]$residuals res$regressors = results[[i]]$regressors } } } return(res) }
/scratch/gouwar.j/cran-all/cranData/BETS/R/grnn.test.R
#' @title Train a General Regression Neural Network #' #' @description Creates a set of probabilistic neural networks as proposed by \href{http://www.inf.ufrgs.br/~engel/data/media/file/cmp121/GRNN.pdf}{Specht [1991]}. The user provides a set of regressors and the function chooses which subset is the best, based on an accuracy measure (by default, the MAPE) between fited and actual values. These networks have only one parameter, the \code{sigma}, which is the standard deviation of each activation function (gaussian) of the pattern layer. Sigma can also be automatically chosen. This function builds on \link[grnn]{grnn-package}. #' #' @param train.set A \code{ts list} (a list of \code{ts} objects). The first element must be the dependent variable. The other elements, the regressors. #' @param sigma A \code{numeric} or a \code{numeric vector}. The sigma parameter, that is, the standard deviation of the activation functions (gaussians) of the pattern layer. Can be either a fixed value or a range (a vector containing the minimum and the maximum values). #' @param step A \code{numeric} value. If \code{sigma} is a range, the user must provide a step value to vary sigma. The function is going to select the best sigma based on MAPE. #' @param select A \code{boolean}. Must be set to \code{FALSE} if the regressors should not be chosen. The default is \code{TRUE}. #' @param names A \code{character vector}. Optional. The names of the regressors. If not provided, indexes will be used and reported. #' #' @return A \code{list} of result objects, each representing a network. These objects are ordered by MAPE (the 20 best MAPEs) and its fields are: #' #' \itemize{ #' \item{\code{accuracy}: A \code{numeric} value. Accuracy measure between the fitted and the actual series values. By default, the MAPE. In future versions, it will be possible to change it.} #' \item{\code{fitted}: The fitted values, that is, one step ahead predicitions calculated by the trained net.} #' \item{\code{net}: An object returned by the grnn function. Represents a trained net. } #' \item{\code{sigma}: A \code{numeric}. The sigma that was chosen, either by the user or by the function itself (in case \code{select} was set to \code{TRUE})} #' \item{\code{regressors}: A \code{character vector}. Regressors that were chosen, either by the user or by the fuction itself (in case \code{select} was set to \code{TRUE})} #' \item{\code{sigma.accuracy}: A \code{data.frame}. Sigma versus accuracy value of the corresponding trained network. Those networks were trained using the best set of regressors.} #' \item{\code{residuals}: A \code{numeric vector}. Fitted values subtracted from the actual values.} #' } #' #' grnn.train also returns a diagnostic of training rounds and a \code{sigma} versus \code{accuracy} plot. #' #' @author Talitha Speranza \email{[email protected]} #' #' @export #' @import grnn forecast grnn.train = function(train.set, sigma, step = 0.1, select = TRUE, names = NA){ if(length(train.set) < 2 || !check.series(train.set, "Series list: train.")){ return(NULL) } if(!is.na(names) && length(train.set) != length(names)){ msg("ERROR") return(NULL) } train.n_elem = length(train.set[[1]]) train.n_series = length(train.set) series = train.set[[1]] if(is.vector(sigma)){ sigma = seq(sigma[1],sigma[2],step) } train_mt = matrix(nrow = train.n_elem, ncol = train.n_series) for(i in 1:train.n_series){ train_mt[,i] = train.set[[i]] } results.list = vector(mode = "list") id = 1 if(select){ for(i in 1:(train.n_series-1)){ trial = combn(2:train.n_series,i) for(j in 1:ncol(trial)){ sub_train = matrix(nrow = train.n_elem, ncol = nrow(trial)+1) sub_train[,1] = train_mt[,1] for(k in 1:nrow(trial)){ ind = trial[k,j] sub_train[,k+1] = train_mt[,ind] } result = vector(mode = "list") result$mape = 1.797693e+308 vec.sigmas = vector(mode = "numeric") vec.mapes = vector(mode = "numeric") for(s in sigma){ nn = smooth(learn(sub_train),s) fitted = vector(mode = "numeric") sub_train_fit = as.matrix(sub_train[,-1]) for(r in 1:nrow(sub_train)){ fitted[r] = guess(nn, t(as.matrix(sub_train_fit[r,]))) } acc = accuracy(fitted,sub_train[,1])[5] vec.sigmas = c(vec.sigmas,s) vec.mapes = c(vec.mapes,acc) if(acc < result$mape){ result$mape = acc result$fitted = fitted result$net = nn result$sigma = s regs = trial[,j] if(!is.na(names)){ result$regressors = names[regs] } else { result$regressors = regs } } } result$sigma.mape = cbind(sigma = vec.sigmas, mape = vec.mapes) result$series = series result$residuals = result$series - result$fitted result$id = id results.list[[id]] = result id = id + 1 } } } len = length(results.list) rankm = data.frame(matrix(nrow = len, ncol = 4)) names(rankm) = c("id","mape","regs","sigma") for(i in 1:len){ rankm[i,"id"] = i rankm[i,"mape"] = results.list[[i]]$mape rankm[i,"regs"] = paste(results.list[[i]]$regressors, collapse = ",") rankm[i,"sigma"] = results.list[[i]]$sigma } rankm = head(rankm[order(rankm[,2]),],20) print("General Regression Neural Network") print(rankm) results = vector(mode = "list") for(i in 1:20){ results[[i]] = results.list[[rankm[i,"id"]]] } plot(results[[1]]$sigma.mape, col = "royalblue", type = "b") return(results) }
/scratch/gouwar.j/cran-all/cranData/BETS/R/grnn.train.R
#' @title Format and show a console message. #' #' @description Customizes a message and shows it in the console. #' #' @param ... Arguments to be passed to \code{\link[base]{message}} #' @param skip_before A \code{boolean}. Indicates if a line should be skipped before the message. #' @param skip_after A \code{boolean}. Indicates if a line should be skipped after the message. #' @param warn A \code{boolean}. Indicates whether a warning should be thrown. #' #' @return None #' #' @importFrom stringr str_c #' @author Talitha Speranza \email{[email protected]}, Jonatha Azevedo \email{[email protected]} msg <- function(..., skip_before=TRUE, skip_after=FALSE, warn = FALSE) { m <- str_c("BETS-package: ", ...) if(skip_before) k <- paste0("\n", m) if(skip_after) k <- paste0(m, "\n") Encoding(k) <- "UTF-8" if(warn){ warning(k, call. = FALSE) } else { message(k) } invisible(m) }
/scratch/gouwar.j/cran-all/cranData/BETS/R/msg.R
#' @title Normalize a time series #' #' @description Normalizes a time series, either by stardization or by mapping to values between 0 and 1. #' #' @param series A \code{ts} object or a \code{ts list}. The series to be normalized. #' @param mode A \code{character}. The normalization method. Set this parameter to 'maxmin' to map series values to values between 0 and 1. Alternatively, set this parameter to 'scale' to standardize (substract the mean and divide by the standard deviation). #' #' @return A \code{ts} object or a \code{ts list}. The normalized series. #' #' @author Talitha Speranza \email{[email protected]} #' #' @importFrom stats as.ts end fitted frequency na.omit plot.ts qnorm qt sd start time ts uniroot window #' #' @export normalize = function(series, mode="scale"){ if(mode == "maxmin"){ if(is.list(series)){ return(lapply(series, function(x){(x-min(x))/(max(x)-min(x))})) } else{ return((series - min(series))/(max(series) - min(series))) } } else if(mode == "scale"){ if(is.list(series)){ return(lapply(series, function(x){(x - mean(x))/sd(x)})) } else { return((series - mean(series))/sd(series)) } } return("ERROR") }
/scratch/gouwar.j/cran-all/cranData/BETS/R/normalize.R
#' @title Get the predicted values of a model and visualize it #' #' @description This function is built upon \link[forecast]{forecast}. Besides the model predictions, it returns an accuracy measure table (calculated by the \link[forecast]{accuracy} function) and a graph showing the original series, the predicted values and the actual values. #' #' @param ... arguments passed on to \link[forecast]{forecast}. If the model is a neural network, these arguments will be passed on to \link[BETS]{grnn.test}. #' @param actual A \code{numeric vector}. The actual values (to be compared with predicted values). #' @param main A \code{character}. The name of the prediction plot. #' @param ylab A \code{character}. The Y axis label. #' @param xlim A \code{numeric vector}. The limits of the X axis. #' @param style A \code{character}. Can be either 'dygraphs' (the \link[dygraphs]{dygraph} function will be use to make the plot, which is going to be HTML based) or 'normal' (standard R functions will be used to make the plot) #' @param unnorm A \code{numeric vector}. If predictions must be unnormalized, set the first element of this vector to the mean and the second, to the standard deviation. #' @param legend.pos A \code{character}. The position of the legend. Possible values are standard R plot values, i.e., "topright', "bottomleft', etc. #' @param knit A \code{boolean}. Set this parameter to \code{TRUE} if #' #' @return Besides the prediction plot, this function returns an object whose fields are: #' #' \itemize{ #' \item{\code{accuracy}: An object returned by \link[forecast]{accuracy}. It is a table containing several accuracy measures} #' \item{\code{predictions}: A \code{numeric vector} containing the predicted values. } #' } #' #' @author Talitha Speranza \email{[email protected]} #' #' @importFrom zoo as.Date #' @importFrom stats as.ts end fitted frequency na.omit plot.ts qnorm qt sd start time ts uniroot window #' @importFrom graphics abline arrows axis barplot legend lines mtext par points text #' @export #' @import forecast dygraphs predict = function(..., actual = NULL, main = "", ylab = "", xlim = NULL, style = "dygraphs", unnorm = NULL, legend.pos = "topright", knit = F){ l = list(...) if(is.null(l$object)){ model = l[[1]] } else{ model = l$object } if(class(model)[1] == "arima" || class(model)[1] == "Arima" || class(model)[1] == "ARIMA" || class(model) == "HoltWinters"){ preds = forecast(...) } else { preds = grnn.test(...) } if(!is.null(unnorm)){ preds$x = preds$x*unnorm[2] + unnorm[1] preds$mean = preds$mean*unnorm[2] + unnorm[1] preds$fitted = preds$fitted*unnorm[2] + unnorm[1] if(!is.null(actual)){ actual = actual*unnorm[2] + unnorm[1] } if(!is.null(preds$lower)){ preds$lower = preds$lower*unnorm[2] + unnorm[1] preds$upper = preds$upper*unnorm[2] + unnorm[1] } } if(style == "dygraphs"){ dt = as.ts(cbind(fit = preds$mean, upr = preds$upper[,2], lwr = preds$lower[,2])) if(is.null(actual)){ dt = cbind(hist = preds$x, md = dt) p = dygraph(dt, main = main) %>% dySeries("hist", label = "Actual") %>% dySeries(c("md.lwr", "md.fit", "md.upr"), label = "Predicted") %>% dyRangeSelector(strokeColor = "gray", fillColor = "gray") %>% dyAxis("y", label = ylab) } else { dt = cbind(hist = preds$x, md = dt, act = actual) p = dygraph(dt, main = main) %>% dySeries("hist", label = "Series") %>% dySeries("act", label = "Actual") %>% dySeries(c("md.lwr", "md.fit", "md.upr"), label = "Predicted") %>% dyRangeSelector(strokeColor = "gray", fillColor = "gray") %>% dyAxis("y", label = ylab) } if(knit == F){ print(p) } else { return(p) } } else if(style == "normal") { max = max(c(preds$x,preds$mean)) max = ceiling(max + 0.1*max) min = min(c(preds$x, preds$mean)) min = floor(min - 0.1*min) step = floor((max - min)/4) y_last = preds$x[length(preds$x)] x_last = as.Date(preds$x)[length(preds$x)] if(!is.null(preds$lower)){ series = preds } else{ series = preds$x } plot(series, main = main, ylab = ylab, xlim = xlim, yaxt = "n", xaxp = c(1900, 2500, 600)) abline(v = seq(1900,2500,1), col = "gray60", lty = 3) axis(side = 2, at = seq(min,max,step)) par(new = TRUE) if(!is.null(actual)){ lines(actual, col = "firebrick3", lwd = 2) y_ac = actual[1] x_ac = as.Date(actual)[1] } y_pr = preds$mean[1] #lines(x = c(x_last, x_ac), y = c(y_last, y_ac), col = "firebrick3", lw = 2) #lines(x = c(x_last, x_ac), y = c(y_last, y_pr), col = "royalblue", lw = 2) if(is.null(preds$lower)){ lines(preds$mean, col = "royalblue", lwd = 2) } if(!is.null(actual)){ legend(legend.pos,col=c("firebrick3","royalblue"), lty=1,legend=c("Actual","Predicted"), cex = 0.7) } } if(!is.null(actual)){ results = vector(mode = "list") acc = accuracy(preds$mean, actual) results$accuracy = acc results$predictions = preds results$forecasting.errors = preds$mean - actual } else { results = preds } return(invisible(results)) }
/scratch/gouwar.j/cran-all/cranData/BETS/R/predict.R
#' @title Create dynamic reports with a full analysis of a set of time series #' #' @description Generate automatic reports with a complete analysis of a set of time series. For now, SARIMA (Box & Jenkins approach), Holt-Winters and GRNN analysis are possible. Soon, Multilayer Perceptron, Fuzzy Logic and Box-Cox analysis will become available. #' #' @param mode A \code{character}.The type of the analysis. So far, 'SARIMA', 'GRNN' and 'HOLT-WINTERS' are available. #' @param ts A \code{integer}, a \code{ts} object or a \code{list} of \code{integer}s and \code{ts} objects. Either the ID of the series in the BETS database or a time series object (any series, not just BETS's). If a \code{list} is provided, a report is generated for each series in this list, which can be mixed with IDs and time series objects. #' @param parameters A \code{list}. The parameters of the report. See the 'details' section for more information. #' @param report.file A \code{character}. A path and a name for the report file (an .html file). If there is more than one series, this name will be used as a prefix. If this parameter is not provided, the report will be saved inside the 'reports' folder, under the BETS installation directory. #' @param series.saveas A \code{character}. The format of the file on which the series and the predictions should be written. Possible values are 'none' (default), 'sas', 'dta', 'spss', 'csv', 'csv2' . Is is saved under the same directory as the report file. #' #' @details #' #' \bold{SARIMA Report Parameters} #' #' \itemize{ #' \item{\code{cf.lags}: An \code{integer}. Maximum number of lags to show on the ACFs e PACFs} #' \item{\code{n.ahead}: An \code{integer}. Prevision horizon (number of steps ahead)} #' \item{\code{inf.crit}: A \code{character}. Information criterion to be used in model selection.} #' \item{\code{dummy}: A \code{ts} object. A dummy regressor. Must also cover the forecasting period.} #' \item{\code{ur.test}: A \code{list}. Parameters of \code{\link[BETS]{ur_test}} } #' \item{\code{arch.test}: A \code{list}. Parameters of \code{\link[BETS]{arch_test}} } #' \item{\code{box.test}: A \code{list}. Parameters of \code{\link[stats]{Box.test}} } #' } #' #' \bold{GRNN Report Parameters} #' #' \itemize{ #' \item{\code{auto.reg}: A \code{boolean}. Is the dependant variable auto-regressive?} #' \item{\code{present.regs}: A \code{boolean} Include non-lagged series among regressors? } #' \item{\code{lag.max}: A \code{integer} Regressors' maximum lag} #' \item{\code{regs}: A \code{list}. Regressors codes or time series} #' \item{\code{start.train}: Training set starting period } #' \item{\code{end.train}: Training set ending period} #' \item{\code{start.test}: Testing set starting period } #' \item{\code{end.test}: Testing set ending period } #' \item{\code{sigma.interval}: A \code{numeric} vector. Sigma inteval} #' \item{\code{sigma.step}: A \code{numeric} value. Sigma step} #' \item{\code{var.names}: A \code{character} vector. Variable names} #' } #' #' \bold{HOLT-WINTERS Report Parameters} #' #' \itemize{ #' \item{\code{alpha}: Smooth factor of the level component. If numeric, it must be within the half-open unit interval (0, 1]. A small value means that older values in x are weighted more heavily. Values near 1.0 mean that the latest value has more weight. NULL means that the HoltWinters function should find the optimal value of alpha. It must not be FALSE or 0.} #' \item{\code{beta}: Smooth factor of the trend component. If numeric, it must be within the unit interval [0, 1]. A small value means that older values in x are weighted more heavily. Values near 1.0 mean that the latest value has more weight. NULL means that the HoltWinters function should find the optimal value of beta. The trend component is omitted if beta is FALSE or 0.} #' \item{\code{gamma}: Smooth factors of the seasonal component. If numeric, it must be within the unit interval [0, 1]. A small value means that older values in x are weighted more heavily. Values near 1.0 mean that the latest value has more weight. NULL means that the HoltWinters function should find the optimal value of gamma. The seasonal component will be omitted if gamma is FALSE or 0. This must be specified as FALSE if frequency(x) is not an integer greater than 1.} #' \item{\code{additive}: A single character string specifying how the seasonal component interacts with the other components. "additive", the default, means that x is modeled as level + trend + seasonal and "multiplicative" means the model is (level + trend) * seasonal. Abbreviations of "additive" and "multiplicative" are accepted.} #' \item{\code{l.start}: The starting value of the level component.} #' \item{\code{b.start}: The starting value of the trend component} #' \item{\code{s.start}: The starting values of seasonal component, a vector of length frequency(x)} #' \item{\code{n.ahead}: Prevision horizon (number of steps ahead)} #' } #' #' For more information about these parameters, see also \code{\link{HoltWinters}}. Most parameters are the same and we just reproduced their documentation here. #' #' @return One or more .html files (the reports) and, optionally, data files (series plus predictions). #' #' @author Talitha Speranza \email{[email protected]} #' #' @examples #' ##-- SARIMA #' #' # parameters = list(lag.max = 48, n.ahead = 12 ) #' # report(ts = 21864, parameters = parameters) #' #' # report(ts = 4447, series.saveas = "csv") #' #' # series = list(BETSget(4447), BETSget(21864)) #' # parameters = list(lag.max = 20, n.ahead = 15 ) #' # report(ts = series, parameters = parameters) #' #' # series = list(4447, 21864) #' # report(ts = series, parameters = parameters) #' #' # parameters = list( #' # cf.lags = 25, #' # n.ahead = 15, #' # dummy = dum, #' # arch.test = list(lags = 12, alpha = 0.01), #' # box.test = list(type = "Box-Pierce") #' # ) # #' # report(ts = window(BETSget(21864), start= c(2002,1) , end = c(2015,10)), #' #parameters = parameters) #' #' # dum <- dummy(start= c(2002,1) , end = c(2017,1) , #' #from = c(2008,9) , to = c(2008,11)) #' #' # parameters = list( #' # cf.lags = 25, #' # n.ahead = 15, #' # dummy = dum #' # ) #' #' # report(ts = window(BETSget(21864), start= c(2002,1) , end = c(2015,10)), #' #parameters = parameters) #' #' #' ##-- GRNN #' #' # params = list(regs = 4382) #' # report(mode = "GRNN", ts = 13522, parameters = params) #' #' ##-- HOLT-WINTERS #' #' # params = list(alpha = 0.5, gamma = TRUE) #' # report(mode = "HOLT-WINTERS", ts = 21864, series.saveas = "csv", parameters = params) #' #' # params = list(gamma = T, beta = TRUE) #' # report(mode = "HOLT-WINTERS", ts = 21864, series.saveas = "csv", parameters = params) #' #' @export #' @import rmarkdown report <- function(mode = "SARIMA", ts = 21864, parameters = NULL, report.file= NA, series.saveas = "none"){ if(class(ts) == "list" || class(ts) == "numeric" || class(ts) == "integer"){ vec = ts } else if(class(ts) == "ts"){ vec = list(ts) } else { return(msg("ts - ", .MSG_PARAMETER_NOT_VALID)) } if(is.na(report.file)){ dir = paste0(system.file(package="BETS"),"/reports") dir.create(dir) report.file = paste0(dir,"/analysis") } i = 1 for(ts in vec){ name = paste0("analysis_",mode,".Rmd") file = system.file(package="BETS", name) if(class(ts) == 'ts'){ id = paste0("custom_", i) i = i + 1 } else{ id = ts } rep.file = paste0(report.file, "_", mode, "_", id) if(series.saveas != "none"){ series.file = paste0(rep.file,".",series.saveas) } else{ series.file = NA } rep.file = paste0(rep.file,".html") if(!(ts == 21864 && is.null(parameters))){ parameters$ts = ts } parameters$series.file = series.file rmarkdown::render(file, params = parameters) file = gsub(".Rmd", ".html", file) file.copy(file, rep.file, overwrite = T) file.remove(file) cmd = "open" if(Sys.info()[["sysname"]] == "Linux"){ cmd = paste0("xdg-",cmd) } system2(cmd, rep.file) } }
/scratch/gouwar.j/cran-all/cranData/BETS/R/report.R
#' @title Prepare a time series to be exported #' #' @description To be used with saveSpss, saveSas and others. #' #' @param code An \code{integer}. The unique identifier of the series within the BETS database. #' @param data A \code{data.frame} or a \code{ts}. Contains the data to be written. If \code{data} is supplied, the BETS database will not be searched. #' @param file.name A \code{character}. The name of the output file. The default is 'series.spss'. #' @param type A \code{character}. The type of the file (e.g. 'spss' or 'sas'). #' #' @return A list with the data frame to be saved and the file name #' #' @importFrom zoo as.Date save = function(code = NULL, data = NULL, file.name="series", type = ""){ path = FALSE if(grepl("\\\\",file.name) || grepl("/",file.name)){ path = TRUE } if(!path){ local= paste0(getwd(),"/") } else { local = "" } if(is.null(data) && !is.null(code)){ y = BETSget(code, data.frame = TRUE) if(file.name == "series"){ file.name = paste0(file.name, "_", code) } } else if(is.data.frame(data) || is.numeric(data)){ y = data.frame(data) } else if(class(data) == 'ts'){ y = data.frame(date = as.Date(data), value = data) } else { return(msg('The parameter "data" must be either a data.frame, a numeric vector or a ts object')) } file.name = paste0(local,file.name,".",type) return(list(data = y, file = file.name)) }
/scratch/gouwar.j/cran-all/cranData/BETS/R/save.R
#' @title Export a time series to SAS #' #' @description Writes a time series to a .sas (SAS) file. #' #' @param code An \code{integer}. The unique identifier of the series within the BETS database. #' @param data A \code{data.frame} or a \code{ts}. Contains the data to be written. If \code{data} is supplied, the BETS database will not be searched. #' @param file.name A \code{character}. The name of the output file. The default is 'series.sas'. #' #' @return None #' #' @examples #' #' #Exchange rate - Free - United States dollar (purchase) #' #us.brl <- get(3691) #' #require(seasonal) #' #us.brl.seasonally_adjusted <- seas(us.brl) #' #saveSas(data = us.brl.seasonally_adjusted,file.name="us.brl.seasonally_adjusted") #' # Or #' #saveSas(code=3691,file.name="us.brl") #' #' @importFrom foreign write.foreign #' @export saveSas=function(code = NULL, data = NULL, file.name="series"){ ret = save(code, data, file.name, "sas") if(class(ret) == "list"){ write.foreign(ret$data, datafile = ret$file, codefile = tempfile(), package="SAS") } }
/scratch/gouwar.j/cran-all/cranData/BETS/R/saveSas.R
#' @title Export a time series to SPSS #' #' @description Writes a time series to a .spss (SPSS) file. #' #' @param code An \code{integer}. The unique identifier of the series within the BETS database. #' @param data A \code{data.frame} or a \code{ts}. Contains the data to be written. If \code{data} is supplied, the BETS database will not be searched. #' @param file.name A \code{character}. The name of the output file. The default is 'series.spss'. #' #' @examples #' #' #Exchange rate - Free - United States dollar (purchase) #' #us.brl <- get(3691) #' #requires(seasonal) #' #us.brl.seasonally_adjusted <- seas(us.brl) #' #saveSpss(data = us.brl.seasonally_adjusted,file.name="us.brl.seasonally_adjusted") #' # Or #' #saveSpss(code=3691,file.name="us.brl") #' #' @importFrom foreign write.foreign #' @export saveSpss=function(code = NULL, data = NULL, file.name="series"){ ret = save(code, data, file.name, "spss") if(class(ret) == "list"){ write.foreign(ret$data, datafile = ret$file, codefile = tempfile(), package="SPSS") } }
/scratch/gouwar.j/cran-all/cranData/BETS/R/saveSpss.R
#' @title Export a time series to STATA #' #' @description Writes a time series to a .dta (STATA) file. #' #' @param code An \code{integer}. The unique identifier of the series within the BETS database. #' @param data A \code{data.frame} or a \code{ts}. Contains the data to be written. If \code{data} is supplied, the BETS database will not be searched. #' @param file.name A \code{character}. The name of the output file. The default is 'series.dta'. #' #' @return None #' #' @examples #' #' #Exchange rate - Free - United States dollar (purchase) #' #us.brl <- get(3691) #' #requires(seasonal) #' #us.brl.seasonally_adjusted <- seas(us.brl) #' #saveStata(data = us.brl.seasonally_adjusted,file.name="us.brl.seasonally_adjusted") #' # Or #' #saveStata(code=3691,file.name="us.brl") #' #' @importFrom foreign write.dta #' @export saveStata=function(code = NULL, data = NULL, file.name="series"){ ret = save(code, data, file.name, "dta") if(class(ret) == "list"){ write.dta(ret$data, ret$file) } }
/scratch/gouwar.j/cran-all/cranData/BETS/R/saveStata.R