content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
test_qae <- function() {
v <- wrapr::qae(a = 1, b := 2, c %:=% 3)
expect_equal(c(a = "1", b = "2", c = "3"), v)
v2 <- qae(a = 1, b := 2, c %:=% 3)
expect_equal(c(a = "1", b = "2", c = "3"), v2)
invisible(NULL)
}
test_qae()
| /scratch/gouwar.j/cran-all/cranData/wrapr/inst/tinytest/test_qae.R |
test_qc <- function() {
a <- "x"
expect_equal(qc(a), "a")
expect_equal(qc(.(a)), "x")
expect_equal(qc(.(a) := a), c("x" = "a"))
expect_equal(qc("a"), "a")
expect_equal(qc(sin(x)), "sin(x)")
expect_equal(qc(a, qc(b, c)), c("a", "b", "c"))
expect_equal(qc(a, c("b", "c")), c("a", "b", "c"))
expect_equal(qc(x=a, qc(y=b, z=c)), c(x="a", y="b", z="c"))
expect_equal(qc('x'='a', wrapr::qc('y'='b', 'z'='c')), c(x="a", y="b", z="c"))
#c(a = c(a="1", b="2")) # returns c(a.a = "1", a.b = "2")
expect_equal(qc(a = c(a=1, b=2)), c(a.a = "1", a.b = "2"))
expect_equal(qc(a := c(a=1, b=2)), c(a.a = "1", a.b = "2"))
expect_equal(qc(c("a", "b") := c("d", "e")), c(a = "d", b = "e"))
expect_equal(qc(x = a, qc(b, z = c)), c("x" = "a", qc("b", "z" = "c")))
expect_equal(qc(x := a, qc(b, z := c)), c("x" = "a", qc("b", "z" = "c")))
invisible(NULL)
}
test_qc()
| /scratch/gouwar.j/cran-all/cranData/wrapr/inst/tinytest/test_qc.R |
test_qchar_frame <- function() {
block_record <- wrapr::qchar_frame(
"baker" , "order", "score", "guess" |
. , 1 , score_1, guess_1 |
. , 2 , score_2, guess_2 |
. , 3 , score_3, guess_3 )
dims <- dim(block_record)
expect_equal(c(3, 4), dims)
b2 <- wrapr::build_frame(
"baker" , "order", "score" , "guess" |
"." , "1" , "score_1", "guess_1" |
"." , "2" , "score_2", "guess_2" |
"." , "3" , "score_3", "guess_3" )
expect_equal(block_record, b2)
invisible(NULL)
}
test_qchar_frame()
| /scratch/gouwar.j/cran-all/cranData/wrapr/inst/tinytest/test_qchar_frame.R |
test_slots <- function() {
# from help("slot")
setClass("track", slots = c(x="numeric", y="numeric"))
myTrack <- new("track", x = -4:4, y = exp(-4:4))
expect <- myTrack@x
v1 <- myTrack %.>% .@x
expect_equal(expect, v1)
let(
c(X = 'x'),
v2 <- myTrack@X
)
expect_equal(expect, v2)
invisible(NULL)
}
test_slots()
| /scratch/gouwar.j/cran-all/cranData/wrapr/inst/tinytest/test_slots.R |
test_split_braces <- function() {
expect_equal(
split_at_brace_pairs("{x} + y"),
c("{x}", " + y"))
expect_equal(
split_at_brace_pairs("{x} + y + {z}"),
c("{x}", " + y + ", "{z}"))
expect_equal(
split_at_brace_pairs(list("{x} + y + {z}")),
list(c("{x}", " + y + ", "{z}")))
expect_equal(
split_at_brace_pairs("x + {y} + z"),
c("x + ", "{y}", " + z"))
expect_equal(
split_at_brace_pairs("x + y"),
"x + y")
expect_equal(
split_at_brace_pairs(""),
"")
expect_equal(
split_at_brace_pairs(c("{x} + y",
"{x} + y + {z}")),
list(c("{x}", " + y"),
c("{x}", " + y + ", "{z}")))
expect_equal(
split_at_brace_pairs(list("{x} + y",
"{x} + y + {z}")),
list(c("{x}", " + y"),
c("{x}", " + y + ", "{z}")))
expect_equal(
split_at_brace_pairs("-<hi>- <hi> -hi-", open_symbol = "-<", close_symbol = ">-"),
c("-<hi>-", " <hi> -hi-"))
expect_equal(
split_at_brace_pairs("x + .[y]+z", open_symbol = ".[", close_symbol = "]"),
c("x + ", ".[y]", "+z"))
expect_equal(
wrapr::split_at_brace_pairs(list(nm = "sqrt(.[v1])"), open_symbol = ".[", close_symbol = "]"),
list(nm = c("sqrt(", ".[v1]", ")" )))
invisible(NULL)
}
test_split_braces()
| /scratch/gouwar.j/cran-all/cranData/wrapr/inst/tinytest/test_split_braces.R |
test_str_stuff <- function() {
v0 <- split_at_brace_pairs("{x} + y + {z}")
expect_equal(
v0,
c("{x}", " + y + ", "{z}"))
v1 <- strsplit_capture("x is .(x) and x+1 is .(x+1)", "\\.\\([^)]+\\)")
x <- 7
v2 <- sinterp("x is .(x), x+1 is .(x+1)\n.(x) is odd is .(x%%2 == 1)")
expect_equal(
v2,
"x is 7, x+1 is 8\n7 is odd is TRUE")
v3 <- sinterp("x is .(x), x+1 is .(x+1)\n.(x) is odd is .(x%%2 == 1)",
envir = list(x = 10))
expect_equal(
v3,
"x is 10, x+1 is 11\n10 is odd is FALSE")
d <- data.frame(x = 1:2, y = 3:4)
v4 <- sinterp("x is .(x), and y is .(y)", envir = d)
expect_equal(
v4,
"x is 1:2, and y is 3:4")
invisible(NULL)
}
test_str_stuff()
| /scratch/gouwar.j/cran-all/cranData/wrapr/inst/tinytest/test_str_stuff.R |
test_strict <- function() {
# some strictness from wrapr pipe
expect_error(
5 %.>% sin()
)
badf <- function(x) {
x %.>% return(.)
return(7)
}
expect_error(
badf(7)
)
expect_equal(sin(5), 5 %.>% sin(.))
expect_equal(sin(5), 5 %.>% (sin(.)))
expect_equal(sin(5), 5 %.>% {sin(.)})
invisible(NULL)
}
test_strict()
| /scratch/gouwar.j/cran-all/cranData/wrapr/inst/tinytest/test_strict.R |
expect_equal(
sx('1 2 "c", d'), c("1", "2", "c", "d")
)
expect_equal(
sx('1 2 3'), c("1", "2", "3")
)
expect_equal(
sx('1 2 "3"'), c("1", "2", "3")
)
expect_equal(
sx('1,2|3.4'), c("1", "2", "3.4")
)
expect_equal(
sx('01 02'), c("01", "02")
)
expect_equal(
sx('0x3 0z3'), c("0x3", "0z3")
)
| /scratch/gouwar.j/cran-all/cranData/wrapr/inst/tinytest/test_sx.R |
test_unpack_unpack <- function() {
# named unpacking
# looks like assignment: DESTINATION = NAME_VALUE_USING
d <- data.frame(x = 1:2,
g=c('test', 'train'),
stringsAsFactors = FALSE)
unpack[train_set = train, test_set = test] <- split(d, d$g)
# train_set and test_set now correctly split
expect_true(test_set$g[[1]] == 'test')
expect_true(train_set$g[[1]] == 'train')
rm(list = c('train_set', 'test_set'))
# again with self in local environment
# named unpacking
# looks like assignment: DESTINATION = NAME_VALUE_USING
d <- data.frame(x = 1:2,
g=c('test', 'train'),
stringsAsFactors = FALSE)
unpack[train_set = train, test_set = test] <- split(d, d$g)
# train_set and test_set now correctly split
expect_true(test_set$g[[1]] == 'test')
expect_true(train_set$g[[1]] == 'train')
rm(list = c('train_set', 'test_set'))
split(d, d$g) %.>% unpack[train_set = train, test_set = test]
# train_set and test_set now correctly split
expect_true(test_set$g[[1]] == 'test')
expect_true(train_set$g[[1]] == 'train')
rm(list = c('train_set', 'test_set'))
# named unpacking NEWNAME = OLDNAME implicit form
# values are matched by name, not index
unpack[train, test] <- split(d, d$g)
expect_true(test$g[[1]] == 'test')
expect_true(train$g[[1]] == 'train')
rm(list = c('train', 'test'))
# function version
unpack(split(d, d$g), train, test)
expect_true(test$g[[1]] == 'test')
expect_true(train$g[[1]] == 'train')
rm(list = c('train', 'test'))
# pipe version
split(d, d$g) %.>% unpack(., train, test)
expect_true(test$g[[1]] == 'test')
expect_true(train$g[[1]] == 'train')
rm(list = c('train', 'test'))
}
test_unpack_unpack()
test_unpack_to <- function() {
# named unpacking
# looks like assignment: DESTINATION = NAME_VALUE_USING
d <- data.frame(x = 1:2,
g=c('test', 'train'),
stringsAsFactors = FALSE)
to[train_set = train, test_set = test] <- split(d, d$g)
# train_set and test_set now correctly split
expect_true(test_set$g[[1]] == 'test')
expect_true(train_set$g[[1]] == 'train')
rm(list = c('train_set', 'test_set'))
# again with self in local environment
# named unpacking
# looks like assignment: DESTINATION = NAME_VALUE_USING
d <- data.frame(x = 1:2,
g=c('test', 'train'),
stringsAsFactors = FALSE)
to[train_set = train, test_set = test] <- split(d, d$g)
# train_set and test_set now correctly split
expect_true(test_set$g[[1]] == 'test')
expect_true(train_set$g[[1]] == 'train')
rm(list = c('train_set', 'test_set'))
split(d, d$g) %.>% to[train_set = train, test_set = test]
# train_set and test_set now correctly split
expect_true(test_set$g[[1]] == 'test')
expect_true(train_set$g[[1]] == 'train')
rm(list = c('train_set', 'test_set'))
# named unpacking NEWNAME = OLDNAME implicit form
# values are matched by name, not index
to[train, test] <- split(d, d$g)
expect_true(test$g[[1]] == 'test')
expect_true(train$g[[1]] == 'train')
rm(list = c('train', 'test'))
# pipe version (no dot)
split(d, d$g) %.>% to(train, test)
expect_true(test$g[[1]] == 'test')
expect_true(train$g[[1]] == 'train')
rm(list = c('train', 'test'))
}
test_unpack_to()
test_partial_unpack_specification <- function() {
list(a = 1, b = 2) -> to[e = a, b]
expect_equal(e, 1)
expect_equal(b, 2)
invisible(NULL)
}
test_partial_unpack_specification()
test_grab_rewrite <- function() {
f <- function(...) {
unpack_environment <- parent.frame(n = 1)
args <- substitute(list(...))
grab_assignments_from_dots(args)
}
v <- f(a, c = d, e := f, g <- h, i -> j)
expect_true(identical(v, c('a', 'c' = 'd', 'e' = 'f', 'g' = 'h', 'j' = 'i')))
}
test_grab_rewrite()
test_partial_unpack_specification2 <- function() {
list(a = 1, b = 2) -> to[e <- a, b]
expect_equal(e, 1)
expect_equal(b, 2)
invisible(NULL)
}
test_partial_unpack_specification2()
test_unpack_bquote_position <- function() {
aname <- 'a'
bname <- 'b'
# allowed
unpack(data.frame(a = 1, b = 2), a, b = b)
# allowed
unpack(data.frame(a = 1, b = 2), a = .(aname), b)
# not allowed
expect_error(unpack(data.frame(a = 1, b = 2), .(aname), b))
# not allowed
expect_error(unpack(data.frame(a = 1, b = 2), .(aname) := a, b))
# not allowed
expect_error(unpack(data.frame(a = 1, b = 2), x = .(aname) := a, b))
}
test_unpack_bquote_position()
| /scratch/gouwar.j/cran-all/cranData/wrapr/inst/tinytest/test_unpack.R |
---
title: "Corner Cases"
author: "John Mount"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Corner Cases}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
`wrapr::let()` is designed to get several important corner cases correct:
including substitutions that are disjoint from the expression and symbol swaps.
```{r exdisj}
library("wrapr")
X <- 1
Y <- 2
let(
c(),
debugPrint = TRUE,
X + Y
)
let(
c(),
debugPrint = TRUE,
subsMethod = 'langsubs',
X + Y
)
let(
c(),
debugPrint = TRUE,
subsMethod = 'stringsubs',
X + Y
)
let(
c(),
debugPrint = TRUE,
subsMethod = 'subsubs',
X + Y
)
```
```{r exswap}
library("wrapr")
X <- 1
Y <- 2
let(
c(X='Y', Y='X'),
debugPrint = TRUE,
X + Y
)
let(
c(X='Y', Y='X'),
debugPrint = TRUE,
subsMethod = 'langsubs',
X + Y
)
let(
c(X='Y', Y='X'),
debugPrint = TRUE,
subsMethod = 'stringsubs',
X + Y
)
let(
c(X='Y', Y='X'),
debugPrint = TRUE,
subsMethod = 'subsubs',
X + Y
)
```
| /scratch/gouwar.j/cran-all/cranData/wrapr/vignettes/CornerCases.Rmd |
---
title: "Debug Vignette"
author: "John Mount, Nina Zumel"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Debug Vignette}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
This vignette demonstrates debugging a user-created function with the `DebugFnW` call. For our example, we will use a simple function that takes an argument `i` and returns the `i`th index of a ten-element vector:
```{r setup}
# load package
library("wrapr")
# user function
f <- function(i) { (1:10)[[i]] }
```
Let's imagine that we are calling this function deep within another process; perhaps we are calling it repeatedly, on a long sequence of (possibly unknown to us) inputs.
```{r unwrapped}
inputs = c(4,5,2,9,0,8)
tryCatch(
for(x in inputs) {
f(x)
},
error = function(e) { print(e) })
```
Oops! We've crashed, and if this loop were deep in another process, we wouldn't know why, or where. If we suspect that the function `f` is the cause, then we can wrap `f` using `wrapr:DebugFn`.
`DebugFnW(saveDest, fn)` wraps its function argument `fn`, captures any arguments that cause it to fail, and saved those arguments and other state to a specified destination `saveDest`.
The state data is written to:
* a random temp file (if `saveDest` is null)
* a user chosen file (if `saveDest` is character)
* a `globalenv()` variable (if `saveDest` is a name, as produced by `as.name()` or `quote()`)
* passed to a user function (if `saveDest` is a function).
Here, we wrap `f` and save error state into the global variable `lastError`.
```{r writeBackVersion2}
# wrap function with writeBack
df <- DebugFnW(as.name('lastError'), f)
```
Now we run the same loop as above, with the wrapped function `df` (note that the `tryCatch` is not strictly needed, this is just for running this example in a vignette).
```{r writeBackVersion3}
# capture error (Note: tryCatch not needed for user code!)
tryCatch(
for(x in inputs) {
df(x)
},
error = function(e) { print(e) })
```
We can then examine the error. Note in particular that `lastError$fn_name` records the name of the function that crashed, and `lastError$args` records the arguments that the function was called with. Also in these examples we are wrapping our code with a `tryCatch` block to capture exceptions; this is only to allow the `knitr` sheet to continue and *not* needed to use the debugging wrappers effectively.
```{r writeBackVersion4}
# examine error
str(lastError)
lastError$args
```
In many situations, just knowing the arguments is enough information ("Oops, we tried to index the vector from zero!"). In more complicated cases, we can set a debug point on the offending function, and then call it again with the failing arguments in order to track down the bug.
```{r writeBackVersion5}
# redo call, perhaps debugging
tryCatch(
do.call(lastError$fn_name, lastError$args),
error = function(e) { print(e) })
# clean up
rm(list='lastError')
```
In many cases you may prefer to save the failing state into an external file rather than into the current runtime environment. Below we show example code for saving state to an RDS file.
```{r FileVersion, eval=FALSE}
saveDest <- paste0(tempfile('debug'),'.RDS')
# wrap function with saveDeest
df <- DebugFnW(saveDest,f)
# capture error (Note: tryCatch not needed for user code!)
tryCatch(
for(x in inputs) {
df(x)
},
error = function(e) { print(e) })
```
We can later read that file back into R, for debugging.
```{r FileVersion2, eval=FALSE}
# load data
lastError <- readRDS(saveDest)
# examine error
str(lastError)
# redo call, perhaps debugging
tryCatch(
do.call(lastError$fn_name, lastError$args),
error = function(e) { print(e) })
# clean up
file.remove(saveDest)
```
For more practice, please view [our video on wrapper debugging](https://youtu.be/zFEC9-1XSN8?list=PLAKBwakacHbQT51nPHex1on3YNCCmggZA).
Note: `wrapr` debug functionality rehashes some of the capabilities of `dump.frames` (see `help(dump.frames)`). Roughly `dump.frames` catches the exception (so trying to step or continue re-throws, and arguments may have moved from their starting values) and `wrapr` catches the call causing the exception in a state *prior* to starting the calculation (so arguments should be at their starting values). We have found some cases where `wrapr` is a bit more convenient in how it interacts with the `RStudio` visual debugger (please see this [screencast](https://youtu.be/2NCj4Hacm8E?list=PLAKBwakacHbQT51nPHex1on3YNCCmggZA) for some comparison). Also, please see [this article](https://win-vector.com/2012/10/09/error-handling-in-r/) for use of <code>tryCatch</code> and
<code>withRestarts</code>.
| /scratch/gouwar.j/cran-all/cranData/wrapr/vignettes/DebugFnW.Rmd |
---
title: "Frame Tools"
author: "John Mount, Win-Vector LLC"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Frame Tools}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
[`wrapr`](https://winvector.github.io/wrapr/) supplies a few tools for creating example
`data.frame`s. An important use case is: building the control table for `cdata::rowrecs_to_blocks()` and `cdata::blocks_to_rowrecs()` (example [here](https://winvector.github.io/cdata/articles/cdata.html)).
Lets see how to create an example data frame. The idea is similar to that found in [`tibble::tribble()`](https://tibble.tidyverse.org/reference/tribble.html): for small tables a
row oriented constructor can be quite legible, and avoids the cognitive load of taking a transpose.
For example we can create a typical `data.frame` as follows:
```{r}
d <- data.frame(
names = c("a", "b", "c", "d"),
x = c(1, 2, 3, 4 ),
y = c(1, 4, 9, 16 ),
stringsAsFactors = FALSE)
print(d)
```
Notice how the table is specified by columns (which is close to how `data.frame`s
are implemented), but printed by rows. `utils::str()` and `tibble::glimpse()` both
print by columns.
```{r}
str(d)
```
`wrapr` supplies the method [`draw_frame`](https://winvector.github.io/wrapr/articles/FrameTools.html) which at first glance appears to be a mere pretty-printer:
```{r}
library("wrapr")
```
```{r, comment=''}
cat(draw_frame(d))
```
However, the above rendering is actually executable `R` code. If we run it, we re-create
the original `data.frame()`.
```{r}
d2 <- build_frame(
"names", "x", "y" |
"a" , 1 , 1 |
"b" , 2 , 4 |
"c" , 3 , 9 |
"d" , 4 , 16 )
print(d2)
```
The merit is: the above input is how it looks when printed.
The technique is intended for typing small examples (or [`cdata`](https://github.com/WinVector/cdata) control tables) and only builds `data.frame`s with atomic types (characters, numerics, and logicals; no times, factors or list columns). The specification rule is the first appearance of an infix 2-argument function call (in this case the infix "or symbol" "<code>|</code>") is taken to mean the earlier arguments are part of the header or column names and later arguments are values. The other appearances of "<code>/</code>" are ignored. This means we could also write the frame as follows:
```{r}
build_frame(
"names", "x", "y" |
"a" , 1 , 1 ,
"b" , 2 , 4 ,
"c" , 3 , 9 ,
"d" , 4 , 16 )
```
This is more limited than `base::dump()`, but also more legible.
```{r, comment=""}
cat(dump("d", ""))
```
One can use the combination of `build_frame()` and `draw_frame()` to neaten up by-hand examples for later use (via copy and paste):
```{r, comment=""}
cat(draw_frame(build_frame(
"names", "x", "y" |
"a", 1, 1,
"b", 2, 4,
"c", 3, 9,
"d", 4, 16)))
```
`build_frame()` allows for simple substitutions of values. In contrast the method `qchar_frame()`
builds `data.frame`s containing only `character` types and doesn't require quoting (though it does allow it).
```{r}
qchar_frame(
col_1, col_2, col_3 |
a , b , c |
d , e , "f g" )
```
`build_frame()` is intended to capture typed-in examples, and is only compatible with very limited in-place calculation and substitution, and that _must_ be in parenthesis:
```{r}
build_frame(
"names", "x" , "y" |
"a" , 1 , 1 |
"b" , cos(2) , 4 |
"c" , (3+2) , 9 |
"d" , 4 , 16 )
```
Expressions not in parenthesis (such as "<code>3 + 2</code>") will confuse the language transform `build_frame()` uses to detect cell boundaries.
| /scratch/gouwar.j/cran-all/cranData/wrapr/vignettes/FrameTools.Rmd |
---
title: "Named Arguments"
author: "John Mount, Win-Vector LLC"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Named Arguments}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
R's named function argument binding is a great aid in writing correct programs. It is a good idea, if practical, to force optional arguments to only be usable by name. To do this declare the additional arguments after "<code>...</code>" and enforce that none got lost in the "<code>...</code> trap" by using a checker such as <a href="https://winvector.github.io/wrapr/reference/stop_if_dot_args.html">wrapr::stop_if_dot_args()</a>.
Example:
```{r}
#' Increment x by inc.
#'
#' @param x item to add to
#' @param ... not used for values, forces later arguments to bind by name
#' @param inc (optional) value to add
#' @return x+inc
#'
#' @examples
#'
#' f(7) # returns 8
#'
f <- function(x, ..., inc = 1) {
wrapr::stop_if_dot_args(substitute(list(...)), "f")
x + inc
}
f(7)
f(7, inc = 2)
tryCatch(
f(7, q = mtcars),
error = function(e) { print(e) })
tryCatch(
f(7, 2),
error = function(e) { print(e) })
```
By R function evaluation rules: any unexpected/undeclared arguments are captured by the "<code>...</code>" argument. Then "wrapr::stop_if_dot_args()" inspects for such values and reports an error if there are such. The "f" string is returned as part of the error, I chose the name of the function as in this case. The "substitute(list(...))" part is R's way of making the contents of "..." available for inspection.
You can also use the technique on required arguments. <a href="https://winvector.github.io/wrapr/reference/stop_if_dot_args.html">wrapr::stop_if_dot_args()</a> is a simple low-dependency helper function intended to make writing code such as the above easier. This is under the rubric that hidden errors are worse than thrown exceptions. It is best to find and signal problems early, and near the cause.
The idea is that you should not expect a user to remember the positions of more than 1 to 3 arguments, the rest should only be referable by name. Do not make your users count along large sequences of arguments, <a href="https://en.wikipedia.org/wiki/Subitizing">the human brain may have special cases for small sequences</a>.
<blockquote>
If you have a procedure with 10 parameters, you probably missed some.
<p/>Alan Perlis, "Epigrams on Programming", ACM SIGPLAN Notices 17 (9), September 1982, pp. 7–13.
</blockquote>
Note that the "<code>substitute(list(...))</code>" part is the R idiom for capturing the unevaluated contents of "<code>...</code>", I felt it best to use standard R as much a possible in favor of introducing any <em>additional</em> magic invocations. | /scratch/gouwar.j/cran-all/cranData/wrapr/vignettes/Named_Arguments.Rmd |
---
title: "Quoting Concatinate"
author: "John Mount"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Quoting Concatinate}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
In [`R`](https://www.r-project.org) data analysis code-capturing interfaces (or non-standard-evaluation/NSE interfaces are considered a fun convenience. These allow users to type column names in directly when working. However in functions and packages there can be a large interface price or code safety risk to pay for supplying the user the minor convenience of eliding a few quotation marks.
Our package [`wrapr`](https://CRAN.R-project.org/package=wrapr) has the sensible design principle: NSE can be convenient to the user, but we are going to isolate it to a few methods (for safe and simple code). Users then pass the objects that those methods create as *values* to other methods. Computation is meant to be over values, so this is a good trade-off.
The primary place code capturing shows up in our `wrapr` `R` package is in the [`qc()`](https://winvector.github.io/wrapr/reference/qc.html) and [`qchar_frame()`](https://winvector.github.io/wrapr/reference/qchar_frame.html) methods.
`qc()` stands for "quoting concatenate" it is much like `R`'s `c()` (combine), but it quotes its arguments before concatenating vectors or lists.
It lets you replace this:
```{r}
c("Petal.Width", "Petal.Length")
```
with this:
```{r}
library("wrapr")
qc(Petal.Width, Petal.Length)
```
This, in turn, lets you replace this:
```
library("dplyr")
iris %>%
select(., Petal.Width, Petal.Length) %>%
head(.)
```
with this:
```{r}
iris[, qc(Petal.Width, Petal.Length)] %.>%
head(.)
```
We still skipped the quotes, and the NSE stuff is safely isolated from the rest of the system.
`wrapr` now incorporates `bquote()` based quasiquotation in a few of its interfaces.
Quasiquotation was introduced into R by Thomas Lumley in 2003, and allows users to signal they want to turn off quotation for portions of their code. The user indicates they do not wish a portion of their code to be quoted (but instead want it evaluated for its value) by surrounding that portion with the function-notation "`.()`".
An example of this is given here.
```{r}
OTHER_SYMBOL <- "Petal.Length"
qc(Petal.Width, OTHER_SYMBOL)
qc(Petal.Width, .(OTHER_SYMBOL))
```
This should be familiar to `data.table` users, as `data.table` has supported related notations for quite some time.
Also, `qc()` is designed to have a simple "mutually recursive" relationship with `c()` (i.e. they call each other when they see one another). This means `c()` is also a quasiquotation escape-notation for `qc()`:
```{r}
qc(Petal.Width, c(OTHER_SYMBOL))
```
This escape notation arises as a natural consequence of a design of `qc()` that calls `c()` instead of quoting it (i.e. delegates `c()`-expressions to `c()`). The `bquote()-.()` should be the preferred notation for regularity, and to match any other `bquote()` quasiquotation interface (such as `qchar_frame()` or [even a variation of `dplyr`](https://win-vector.com/2018/10/16/quasiquotation-in-r-via-bquote/)).
`qc()` also takes some trouble to work with named vectors:
```{r}
qc(a = b)
```
And we can even re-map left-hand sizes of (or names) if we use the alternate "`:=`" assignment notation.
```{r}
LEFT_NAME = "a"
qc(.(LEFT_NAME) := b)
```
Notice syntactically `qc()` fills a general niche much like the specific function `ggplot2::aes()` fits in the `ggplot2` package.
The `qc()` notation is very powerful and clearly indicates where which quoting rules are in effect when. We strongly suggest users look to it for code-capturing and package developers recommend it to their users.
| /scratch/gouwar.j/cran-all/cranData/wrapr/vignettes/QuotingConcatinate.Rmd |
---
title: "Substitution Modes"
author: "John Mount"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Substitution Modes}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
## The substitution modes
`wrapr::let()` now has three substitution implementations:
* Language substitution (`subsMethod='langsubs'` the new default). In this mode user code is captured as an abstract syntax tree (or parse tree) and substitution is performed only on nodes known to be symbols or behaving in a symbol-role (`"X"` in `d$"X"` is one such example).
* Substitute substitution (`subsMethod='subsubs'`). In this mode substitution is performed by `R`'s own `base::substitute()`.
* String substitution (`subsMethod='stringsubs'`, the previous default now deprecated). In this mode user code is captured as text and then string replacement on word-boundaries is used to substitute in variable re-mappings.
The semantics of the three methods can be illustrated by showing the effects of substituting the variable name "`y`" for "`X`" and the function "`sin`" for "`F`" in the somewhat complicated block of statements:
```r
{
d <- data.frame("X" = "X", X2 = "XX", d = X*X, .X = X_)
X <- list(X = d$X, X2 = d$"X", v1 = `X`, v2 = ` X`, F(1:2))
X$a
"X"$a
X = function(X, ...) { X + 1 }
}
```
This block a lot of different examples and corner-cases.
#### Language substitution (`subsMethod='langsubs'`)
```{r exlang}
library("wrapr")
let(
c(X = 'y', F = 'sin'),
{
d <- data.frame("X" = "X", X2 = "XX", d = X*X, .X = X_)
X <- list(X = d$X, X2 = d$"X", v1 = `X`, v2 = ` X`, F(1:2))
X$a
"X"$a
X = function(X, ...) { X + 1 }
},
eval = FALSE, subsMethod = 'langsubs')
```
Notice the substitution replaced all symbol-like uses of "`X`", and only these (including correctly working with some that were quoted!).
#### String substitution (`subsMethod='stringsubs'`)
```{r exstr}
let(
c(X = 'y', F = 'sin'),
{
d <- data.frame("X" = "X", X2 = "XX", d = X*X, .X = X_)
X <- list(X = d$X, X2 = d$"X", v1 = `X`, v2 = ` X`, F(1:2))
X$a
"X"$a
X = function(X, ...) { X + 1 }
},
eval = FALSE, subsMethod = 'stringsubs')
```
Notice string substitution has a few flaws: it went after variable names that appeared to start with a word-boundary (the cases where the variable name started with a dot or a space). Substitution also occurred in some string constants (which as we have seen could be considered a good thing).
These situations are all avoidable as both the code inside the `let`-block and the substitution targets are chosen by the programmer, so they can be chosen to be simple and mutually consistent. We suggest "`ALL_CAPS`" style substitution targets as they jump out as being macro targets. But, of course, it is better to have stricter control on substitution.
Think of the language substitution implementation as a lower-bound on a perfect implementation (cautious, with a few corner cases to get coverage) and string substitution as an upper bound on a perfect implementation (aggressive, with a few over-reaches).
#### Substitute substitution (`subsMethod='subsubs'`)
```{r exsubs}
let(c(X = 'y', F = 'sin'),
{
d <- data.frame("X" = "X", X2 = "XX", d = X*X, .X = X_)
X <- list(X = d$X, X2 = d$"X", v1 = `X`, v2 = ` X`, F(1:2))
X$a
"X"$a
X = function(X, ...) { X + 1 }
},
eval = FALSE, subsMethod = 'subsubs')
```
Notice `base::substitute()` doesn't re-write left-hand-sides of argument bindings. This is why I originally didn't consider using this implementation. Re-writing left-hand-sides of assignments is critical in expressions such as `dplyr::mutate( RESULTCOL = INPUTCOL + 1)`. Also `base::substitute()` doesn't special case the `d$"X"` situation (but that really isn't very important).
## Conclusion
`wrapr::let()` when used prudently is a safe and powerful tool.
| /scratch/gouwar.j/cran-all/cranData/wrapr/vignettes/SubstitutionModes.Rmd |
---
title: "bquote"
author: "John Mount"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{bquote}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
It would be nice if [<code>R</code>](https://www.r-project.org) [string-interpolation](https://en.wikipedia.org/wiki/String_interpolation) and [quasi-quotation](https://en.wikipedia.org/wiki/Grave_accent#Use_in_programming) both used the same notation. They are related concepts. So some commonality of notation would actually be clarifying, and help teach the concepts. We will define both of the above terms, and demonstrate the relation between the two concepts.
## String-interpolation
[String-interpolation](https://en.wikipedia.org/wiki/String_interpolation) is the name for substituting value into a string.
For example:
```{r}
library("wrapr")
variable <- "angle"
sinterp(
'variable name is .(variable)'
)
```
Notice the "<code>.(variable)</code>" portion was replaced with the actual variable name "<code>angle</code>".
For string interpolation we are intentionally using the "<code>.()</code>" notation that Thomas Lumley’s picked in 2003 when he introduced quasi-quotation into <code>R</code> (a different concept than string-interpolation, but the topic of our next section).
String interpolation is a common need, and there are many [<code>R</code>](https://www.r-project.org) packages that supply variations of such functionality:
* <code>base::sprintf</code>
* [<code>R.utils::gstring()</code>](https://CRAN.R-project.org/package=R.utils)
* [<code>rprintf::rprintf()</code>](https://CRAN.R-project.org/package=rprintf)
* [<code>stringr::str_interp()</code>](https://CRAN.R-project.org/package=stringr)
* [<code>glue::glue()</code>](https://CRAN.R-project.org/package=glue)
* [<code>wrapr::sinterp()</code>](https://winvector.github.io/wrapr/reference/sinterp.html).
## Quasi-quotation
A related idea is ["quasi-quotation"](https://en.wikipedia.org/wiki/Grave_accent#Use_in_programming) which substitutes a value into a general expression. For example:
```{r}
angle = 1:10
variable_name <- as.name("angle")
if(requireNamespace("graphics", quietly = TRUE)) {
evalb(
plot(x = .(-variable_name),
y = sin(.(-variable_name)))
)
}
```
Notice how in the above plot the actual variable name "<code>angle</code>" was substituted into the <code>graphics::plot()</code> arguments, allowing this name to appear on the axis labels.
We can also use strings in place of names by using the `.(-)` "strip quotes to convert strings to name notation."
```{r}
angle = 1:10
variable_string <- "angle"
if(requireNamespace("graphics", quietly = TRUE)) {
evalb(
plot(x = .(-variable_string),
y = sin(.(-variable_string)))
)
}
```
<code>evalb()</code> is a very simple function built on top of <code>base::bquote()</code>.
All <code>evalb()</code> does is: call <code>bquote()</code> as intended, with the extension that `.(-x)` is shorthand for `.(as.name(x))`.
And we see the un-executed code with the substitutions performed.
There are many <code>R</code> quasi-quotation systems including:
* <code>base::bquote()</code>
* [<code>gtools::strmacro()</code>](https://CRAN.R-project.org/package=gtools)
* [<code>lazyeval</code>](https://CRAN.R-project.org/package=lazyeval)
* [<code>wrapr::let()</code>](https://winvector.github.io/wrapr/reference/let.html)
* <code>rlang::as_quosure()</code>
* [<code>nseval</code>](https://CRAN.R-project.org/package=nseval)
If you don't want to wrap your <code>plot()</code> call in <code>evalb()</code> you can instead pre-adapt the function. Below we create a new function <code>plotb()</code> that is intended as shorthand for <code>eval(bquote(plot(...)))</code>.
```{r}
plotb <- bquote_function(graphics::plot)
if(requireNamespace("graphics", quietly = TRUE)) {
plotb(x = .(-variable),
y = sin(.(-variable)))
}
```
The `wrapr` [dot arrow pipe](https://winvector.github.io/wrapr/reference/dot_arrow.html) also uses the `bquote`-style escape to specify "extra execution". For example.
```{r}
f <- function() {
sin
}
# pipe 5 to the value of f()
# the .() says to evaluate f() before the
# piping
5 %.>% .(f())
# evaluate "f()"" with . = 5
# not interesting as "f()"" is "dot free"
5 %.>% f()
```
We can annotate any function as "eager eval" as follows.
```{r}
attr(f, 'dotpipe_eager_eval_function') <- TRUE
# now evalutates pipe on f() result.
5 %.>% f()
```
## Conclusion
When string-interpolation and quasi-quotation use the same notation we can teach them quickly as simple related concepts.
| /scratch/gouwar.j/cran-all/cranData/wrapr/vignettes/bquote.Rmd |
---
title: "Dot Pipe"
author: "John Mount"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Dot Pipe}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
[`%.>%` dot arrow pipe](https://winvector.github.io/wrapr/reference/dot_arrow.html) is a strict pipe with intended semantics:
> "`a %.>% b`" is to be treated
> as if the user had written "`{ . <- a; b };`"
> with "`%.>%`" being treated as left-associative.
That is: `%.>%` does not alter any function arguments that are not explicitly named. `%.>%` is designed to be explicit and simple.
The following two expressions should be equivalent:
```{r pipe1s}
library("wrapr")
cos(exp(sin(4)))
4 %.>% sin(.) %.>% exp(.) %.>% cos(.)
```
The notation is quite powerful as it treats pipe stages as expression parameterized over the variable
"`.`". This means you do not need to introduce functions to express stages. The following is
a valid dot-pipe:
```{r pipe1}
1:4 %.>% .^2
```
The notation is also very regular in that many variations of expression work as expected. Example:
```{r pipe2}
5 %.>% sin(.)
5 %.>% base::sin(.)
```
Regularity can be a *big* advantage in teaching and comprehension. Please see ["In Praise of Syntactic Sugar"](https://win-vector.com/2017/07/07/in-praise-of-syntactic-sugar/) for discussion.
The dot arrow pipe has S3/S4 dispatch (please see ["Dot-Pipe: an S3 Extensible Pipe for R"](https://journal.r-project.org/archive/2018/RJ-2018-042/index.html)).
However as the right-hand side of the pipe is normally held unevaluated, we don't know the type except in special
cases (such as the rigth-hand side being referred to by a name or variable). To force the evaluation of a pipe term,
simply wrap it in .().
A detail of R-style pipes is the right argument is held unevalauted (unless it is a name), so we can't always use the class of the right hand side to dispatch. To work around this we suggest using `.()` notation, which in the context of the pipe means "evaluate early." An example is given below:
```{r peager}
f <- function() { sin }
# returns f() ignoring dot, not what we want
5 %.>% f()
# evaluates f() early then evaluates result with .-substitution rules
5 %.>% .(f())
```
| /scratch/gouwar.j/cran-all/cranData/wrapr/vignettes/dot_pipe.Rmd |
---
title: "lambda Function Builder"
author: "John Mount"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{lambda Function Builder}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
The [`CRAN`](https://cran.r-project.org) version of the [`R`](https://www.r-project.org) package [`wrapr`](https://CRAN.R-project.org/package=wrapr) package now includes a concise anonymous function constructor: `l()`.
To use it please do the following: attach `wrapr` and ask it to place a definition for `l()` in your environment:
```{r wrapri}
library("wrapr")
wrapr::defineLambda(name = "l")
ls()
```
Note: throughout this document we are using the letter "`l`" as a stand-in for the Greek letter lambda, as this non-ASCII character can cause formatting problems in some situations.
You can use `l()` to define functions. The syntax is: `l(arg [, arg]*, body [, env=env])`. That
is we write a `l()`-call (which you can do by cutting and pasting) and list the desired function arguments and then the function body. For example the function that squares numbers is:
```{r fsq1}
l(x, x^2)
```
We can use such a function to square the first four positive integers as follows:
```{r fsq2}
sapply(1:4, l(x, x^2))
```
Dot-pipe style notation does not need the `l()` factory as it treats pipe stages
as expressions parameterized over the variable "`.`":
```{r fsqp}
1:4 %.>% { .^2 }
```
And we can also build functions that take more than one argument as follows:
```{r ft}
l(x, y, x + 3*y)
```
| /scratch/gouwar.j/cran-all/cranData/wrapr/vignettes/lambda.Rmd |
---
title: "Let"
author: "Nina Zumel, John Mount"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Let}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
The vignette demonstrates the use of `let` to standardize calls to functions that use non-standard evaluation. For a more formal description please see [here](https://github.com/WinVector/wrapr/blob/master/extras/wrapr_let.pdf).
For the purposes of this discussion, *standard evaluation* of variables preserves referential transparency: that is, values and references to values behave the same.
```{r}
x = 5
print(5 + 1)
print(x + 1)
```
Some functions in R use *non-standard evaluation* (NSE) of variables, in order to snoop variable names (for example, `plot`), or to delay or even avoid argument evaluation (for example `library(foobar)` versus `library("foobar")`).
In the case of `plot`, NSE lets `plot` use the variable names as the axis labels.
```{r}
set.seed(1234)
xvar = runif(100) - 0.5
yvar = dnorm(xvar)
plot(xvar, yvar)
```
In the case of `library`, non-standard evaluation saves typing a couple of quotes. The dollar sign notation for accessing data frame columns also uses non standard evaluation.
```{r}
d <- data.frame(x=c(1,NA))
d$x
```
Issues arise when you want to use functions that use non-standard evaluation -- for brevity, I'll call these *NSE expressions* -- but you don't know the name of the variable, as might happen when you are calling these expression from within another function. Generally in these situations, you are taking the name of the desired variable from a string. But how do you pass it to the NSE expression?
For this discussion, we will demonstrate `let` to standardize calling `plot` with unknown variables.
`let` takes two arguments:
* A list of assignments *symname=varname*, where *symname* is the name used in the NSE expression, and *varname* is the name (as a string) of the desired variable.
* The NSE expression. Enclose a block of multiple expressions in brackets.
Here's the `plot` example again.
```{r}
library("wrapr")
xvariable = "xvar"
yvariable = "yvar"
let(
c(XVARIABLE=xvariable, YVARIABLE=yvariable),
{ # since we have the names as strings, we can create a title
title = paste(yvariable, "vs", xvariable)
plot(XVARIABLE, YVARIABLE, main=title)
}
)
```
In the above `let()` block we are using the `alias`-convention that we specify substitution target names (in this case `XVARIABLE` and `YVARIABLE`) as upper-case analogues of the substitution name values (in this case `xvariable` and `yvariable`). This convention is very legible and makes it easy to both use value interfaces (as we did in the title `paste()`) and name-capturing interfaces (`plot()` itself).
## Implementation details
Roughly `wrapr::let(A, B)` behaves like a syntactic sugar for `eval(substitute(B, A))`.
```{r}
a <- 1
b <- 2
let(c(z=quote(a)), z+b)
eval(substitute(z+b, c(z=quote(a))))
```
However, `wrapr::let()` is actually implemented in terms of a de-parse and safe language token substitution.
`wrapr::let()` was inspired by `gtools::strmacro()` and `base::bquote()`, please
see [here](https://github.com/WinVector/wrapr/blob/master/extras/bquote.md) for some notes on macro methods in `R`.
## More
For more discussion please see:
* [our video on non-standard evaluation and `let`](https://youtu.be/iKLGxzzm9Hk?list=PLAKBwakacHbQp_Z66asDnjn-0qttTO-o9).
* [Standard nonstandard evaluation rules](https://developer.r-project.org/nonstandard-eval.pdf).
* [technical article on let](https://github.com/WinVector/wrapr/blob/master/extras/wrapr_let.pdf).
| /scratch/gouwar.j/cran-all/cranData/wrapr/vignettes/let.Rmd |
---
title: "Multiple Assignment"
author: "John Mount"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Multiple Assignment}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
[`wrapr`](https://github.com/WinVector/wrapr) now supplies a name based multiple assignment notation for [`R`](https://www.r-project.org).
In `R` there are many functions that return named lists or other structures keyed by names. Let's start with a simple example: `base::split()`.
First some example data.
```{r}
d <- data.frame(
x = 1:9,
group = c('train', 'calibrate', 'test'),
stringsAsFactors = FALSE)
knitr::kable(d)
```
One way to use `base::split()` is to call it on a `data.frame` and then unpack the desired portions from the returned value.
```{r}
parts <- split(d, d$group)
train_data <- parts$train
calibrate_data <- parts$calibrate
test_data <- parts$test
```
```{r}
knitr::kable(train_data)
knitr::kable(calibrate_data)
knitr::kable(test_data)
```
If we use a multiple assignment notation we can collect some steps together, and avoid possibly leaving a possibly large temporary variable such as `parts` in our environment.
Let's clear out our earlier results.
```{r}
rm(list = c('train_data', 'calibrate_data', 'test_data', 'parts'))
```
And now let's apply `split()` and unpack the results in one step.
```{r}
library(wrapr)
to[
train_data <- train,
calibrate_data <- calibrate,
test_data <- test
] <- split(d, d$group)
```
```{r}
knitr::kable(train_data)
knitr::kable(calibrate_data)
knitr::kable(test_data)
```
The semantics of `[]<-` imply that an object named "`to`" is left in our workspace as a side effect. However, this object is small and if there is already an object name `to` in the workspace that is not of class `Unpacker` the unpacking is aborted prior to overwriting anything. The unpacker two modes: `unpack` (a function that needs a dot in pipes) and `to` (an eager function factory that does not require a dot in pipes). The side-effect can be avoided by using `:=` for assigment.
```{r}
rm(list = c('train_data', 'calibrate_data', 'test_data', 'to'))
to[
train_data <- train,
calibrate_data <- calibrate,
test_data <- test
] := split(d, d$group)
ls()
```
Also the side-effect can be avoided by using alternate non-array update notations.
We will demonstrate a few of these. First is pipe to array notation.
```{r}
rm(list = c('train_data', 'calibrate_data', 'test_data'))
```
```{r}
split(d, d$group) %.>% to[
train_data <- train,
calibrate_data <- calibrate,
test_data <- test
]
ls()
```
Note the above is the [`wrapr` dot arrow pipe](https://journal.r-project.org/archive/2018/RJ-2018-042/index.html) (which requires explicit dots to denote pipe targets). In this case it is dispatching on the class of the right-hand side argument to get the effect. This is a common feature of the wrapr dot arrow pipe. We could get a similar effect by using right-assigment "`->`" instead of the pipe.
We can also use a pipe function notation.
```{r}
rm(list = c('train_data', 'calibrate_data', 'test_data'))
```
```{r}
split(d, d$group) %.>% to(
train_data <- train,
calibrate_data <- calibrate,
test_data <- test
)
ls()
```
Notice piping to `to()` is like piping to `to[]`, no dot is needed.
We can not currently use the `magrittr` pipe in the above as in that case the unpacked results are lost in a temporary intermediate environment `magrittr` uses during execution.
A more conventional functional form is given in `unpack()`. `unpack()` requires a dot in `wrapr` pipelines.
```{r}
rm(list = c('train_data', 'calibrate_data', 'test_data'))
```
```{r}
split(d, d$group) %.>% unpack(
.,
train_data <- train,
calibrate_data <- calibrate,
test_data <- test
)
ls()
```
Unpack also support the pipe to array and assign to array notations. In addition, with `unpack()` we could also use the conventional function notation.
```{r}
rm(list = c('train_data', 'calibrate_data', 'test_data'))
```
```{r}
unpack(
split(d, d$group),
train_data <- train,
calibrate_data <- calibrate,
test_data <- test
)
ls()
```
`to()` can not be directly used as a function. It is *strongly* suggested that the objects returned by `to[]`, `to()`, and `unpack[]` *not ever* be stored in variables, but instead only produced, used, and discarded. The issue these are objects of class `"UnpackTarget"` and have the upack destination names already bound in. This means if one of these is used in code: a user reading the code can not tell where the side-effects are going without examining the contents of the object.
The assignments in the unpacking block can be any of `<-`, `=`, `:=`, or even `->` (though the last one assigns left to right).
```{r}
rm(list = c('train_data', 'calibrate_data', 'test_data'))
```
```{r}
unpack(
split(d, d$group),
train_data = train,
calibrate_data = calibrate,
test_data = test
)
ls()
```
```{r}
rm(list = c('train_data', 'calibrate_data', 'test_data'))
```
```{r}
unpack(
split(d, d$group),
train -> train_data,
calibrate -> calibrate_data,
test -> test_data
)
ls()
```
It is a caught and signaled error to attempt to unpack an item that is not there.
```{r}
rm(list = c('train_data', 'calibrate_data', 'test_data'))
```
```{r, error=TRUE}
unpack(
split(d, d$group),
train_data <- train,
calibrate_data <- calibrate_misspelled,
test_data <- test
)
```
```{r}
ls()
```
The unpack attempts to be atomic: preferring to unpack all values or no values.
Also, one does not have to unpack all slots.
```{r}
unpack(
split(d, d$group),
train_data <- train,
test_data <- test
)
ls()
```
We can use a name alone as shorthand for `name <- name` (i.e. unpacking to the same name as in the incoming object).
```{r}
rm(list = c('train_data', 'test_data'))
```
```{r}
split(d, d$group) %.>%
to[
train,
test
]
ls()
```
We can also use `bquote` `.()` notation to use variables to specify where data is coming from.
```{r}
rm(list = c('train', 'test'))
```
```{r}
train_source <- 'train'
split(d, d$group) %.>%
to[
train_result = .(train_source),
test
]
ls()
```
In all cases the user explicitly documents the intended data sources and data destinations at the place of assignment. This meas a later reader of the source code can see what the operation does, without having to know values of additional variables.
Related work includes:
<ul>
<li>
The <a href="https://CRAN.R-project.org/package=zeallot"><code>zeallot::%<-%</code></a> arrow already supplies excellent positional or ordered unpacking. But we feel that style may be more appropriate in the Python world where many functions return un-named tuples of results. Python functions tend to have positional tuple return values <em>because</em> the Python language has had positional tuple unpacking as a core language feature for a very long time (thus positional structures have become "Pythonic"). R has not emphasized positional unpacking, so R functions tend to return named lists or named structures. For named lists or named structures it may not be safe to rely on value positions. So I feel it is more "R-like" to use named unpacking.</li>
<li>
<a href="https://github.com/crowding/vadr/blob/master/R/bind.R"><code>vadr::bind</code></a> supplies named unpacking, but appears to use a "<code>SOURCE = DESTINATION</code>" notation. That is the reverse of a "<code>DESTINATION = SOURCE</code>" which is how both R assignments and argument binding are already written.</li>
<li><code>base::attach</code>. <code>base::attach</code> adds items to the search path with names controlled by the object being attached (instead of by the user).</li>
<li><code>base::with()</code>. <code>unpack(list(a = 1, b = 2), x <- a, y <- b)
</code> works a lot like <code>
with(list(a = 1, b = 2), { x <<- a; y <<-b })</code>.
</li>
<li>
<a href="https://CRAN.R-project.org/package=tidytidbits"><code>tidytidbits</code></a> supplies positional unpacking with a <code>%=%</code> notation.
</li>
<li><a href="https://winvector.github.io/wrapr/articles/let.html"><code>wrapr::let()</code></a>. <code>wrapr::let()</code> re-maps names during code execution using a "<code>TARGET = NEWNAME</code>" target replacement scheme, where <code>TARGET</code> acts as if it had the name stored in <code>NEWNAME</code> for the duration of the let-block.
</li>
</ul>
| /scratch/gouwar.j/cran-all/cranData/wrapr/vignettes/multi_assign.Rmd |
---
title: "Named Map Builder"
author: "John Mount"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Named Map Builder}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
"named map builder" is an operator written as "`:=`". Named map builder is a *very* simple bit of code that performs a very simple task: it adds names to vectors or lists (making them work more like maps).
Here are some examples:
```{r ex1}
library("wrapr")
'a' := 5
c('a' := 5, 'b' := 6)
c('a', 'b') := c(5, 6)
```
The left-side argument of the `:=` operator is called "the names", and the right-side argument is called "the values". The `:=` operators returns the values with the names set to names.
`:=` is a left-over assignment operator in `R`. It is part of the syntax, but by default not defined.
`data.table` has long used `:=` to denote "in-place assignment" as in the following.
```
library("data.table")
data.table(x = 1)[, y := x + 1][]
# x y
# 1: 1 2
```
`dplyr` later adopted the `:=` notation as this allows for substitution on the left-hand sides of assignments. [`wrapr::qc()`](https://winvector.github.io/wrapr/articles/QuotingConcatinate.html) uses the `:=` for the same purpose.
A key use of the named map builder is the following:
```{r key1}
`:=` <- wrapr::`:=` # in case data.tables "catch calls" definition is active
key = 'keycode'
key := 'value'
```
Notice the value inside the variable `key` was used as the array name, this differs from
what is easily done with `R`'s native `c(key = 'value')` style notation.
```{r print, eval=FALSE}
help(`:=`, package = 'wrapr')
```
| /scratch/gouwar.j/cran-all/cranData/wrapr/vignettes/named_map_builder.Rmd |
---
title: "Multiple Assignment with unpack"
author: "Nina Zumel and John Mount"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Multiple Assignment with unpack}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
In `R` there are many functions that return named lists or other structures keyed by names. Often, you want to unpack the elements of such a list into separate variables, for ease of use. One example is the use of `split()` to partition a larger data frame into a named list of smaller data frames, each corresponding to some grouping.
```{r}
library(wrapr)
# example data
d <- data.frame(
x = 1:9,
group = c('train', 'calibrate', 'test'),
stringsAsFactors = FALSE)
knitr::kable(d)
# split the d by group
(parts <- split(d, d$group))
train_data <- parts$train
calibrate_data <- parts$calibrate
test_data <- parts$test
knitr::kable(train_data)
knitr::kable(calibrate_data)
knitr::kable(test_data)
```
A multiple assignment notation allows us to assign all the smaller data frames to variables in one step, and avoid leaving a possibly large temporary variable such as `parts` in our environment. One such notation is `unpack()`.
## Basic `unpack()` example
```{r}
# clear out the earlier results
rm(list = c('train_data', 'calibrate_data', 'test_data', 'parts'))
# split d and unpack the smaller data frames into separate variables
unpack(split(d, d$group),
train_data = train,
test_data = test,
calibrate_data = calibrate)
knitr::kable(train_data)
knitr::kable(calibrate_data)
knitr::kable(test_data)
```
You can also use `unpack` with an assignment notation similar to the notation used with the
<a href="https://CRAN.R-project.org/package=zeallot"><code>zeallot::%<-%</code></a> pipe:
```{r}
# split d and unpack the smaller data frames into separate variables
unpack[traind = train, testd = test, cald = calibrate] := split(d, d$group)
knitr::kable(traind)
knitr::kable(cald)
knitr::kable(testd)
```
### Reusing the list names as variables
If you are willing to assign the elements of the list into variables with the same names, you can just use the names:
```{r}
unpack(split(d, d$group), train, test, calibrate)
knitr::kable(train)
knitr::kable(calibrate)
knitr::kable(test)
# try the unpack[] assignment notation
rm(list = c('train', 'test', 'calibrate'))
unpack[test, train, calibrate] := split(d, d$group)
knitr::kable(train)
knitr::kable(calibrate)
knitr::kable(test)
```
Mixed notation is allowed:
```{r}
rm(list = c('train', 'test', 'calibrate'))
unpack(split(d, d$group), train, holdout=test, calibrate)
knitr::kable(train)
knitr::kable(calibrate)
knitr::kable(holdout)
```
### Unpacking only parts of a list
You can also unpack only a subset of the list's elements:
```{r error=TRUE}
rm(list = c('train', 'holdout', 'calibrate'))
unpack(split(d, d$group), train, test)
knitr::kable(train)
knitr::kable(test)
# we didn't unpack the calibrate set
calibrate
```
### `unpack` checks for unknown elements
If `unpack` is asked to unpack an element it doesn't recognize, it throws an error. In this case, none of the elements are unpacked, as `unpack` is deliberately an atomic operation.
```{r error=TRUE}
# the split call will not return an element called "holdout"
unpack(split(d, d$group), training = train, testing = holdout)
# train was not unpacked either
training
```
## Other multiple assignment packages
### `zeallot`
The [`zeallot`](https://CRAN.R-project.org/package=zeallot) package already supplies excellent positional or ordered unpacking.
The primary difference between `zeallot`'s <a href="https://CRAN.R-project.org/package=zeallot"><code>%<-%</code></a> pipe and `unpack` is that `%<-%` is a *positional* unpacker: you must unpack the list based on the *order* of the elements in the list. This style may be more appropriate in the Python world where many functions return un-named tuples of results.
`unpack` is a *named* unpacker: assignments are based on the *names* of elements in the list, and the assignments can be in any order. We feel this is more appropriate for R, as R has not emphasized positional unpacking; R functions tend to return named lists or named structures. For named lists or named structures it may not be safe to rely on value positions.
For unpacking named lists, we recommend `unpack`. For unpacking unnamed lists, use `%<-%`.
### `vadr`
<a href="https://github.com/crowding/vadr/blob/master/R/bind.R"><code>vadr::bind</code></a> supplies named unpacking, but appears to use a "<code>SOURCE = DESTINATION</code>" notation. That is the reverse of a "<code>DESTINATION = SOURCE</code>" which is how both R assignments and argument binding are already written.
### `tidytidbits`
<a href="https://CRAN.R-project.org/package=tidytidbits"><code>tidytidbits</a> supplies positional unpacking with a <code>%=%</code> notation.
| /scratch/gouwar.j/cran-all/cranData/wrapr/vignettes/unpack_multiple_assignment.Rmd |
---
title: "wrapr Eager Evaluation"
author: "John Mount, Win-Vector LLC"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{wrapr Eager Evaluation}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
[`wrapr`](https://github.com/WinVector/wrapr) dot arrow piping is designed to emphasize a `a %.>% b` "is nearly" `{. <- a; b}` semantics. In many cases this makes a piped expression of the form `a %.>% b(.)` look very much like `b(a)`. This leads to the observation that "wrapr explicit dot notation" appears to need one more dot than the common "[`magrittr`]( https://CRAN.R-project.org/package=magrittr) dot is a new implicit first argument notation."
There are some special rules around things like names. For example `5 %.>% sin` is *not* valued as `sin`, which would be the strict interpretation of `{. <- 5; sin}`. Instead it is expanded to something closer `{. <- 5; sin(.)}`, which intentionally looks very much like `sin(5)`. In more complicated cases the user can signal they wish for an eager evaluation of this style by writing on outer `.()` container.
And `wrapr` now also exposes an "eager" annotation such that function evaluations or array indexing operations so-annotated are interpreted as `a %.>% f(...)` is interpreted roughly as `{. <- a; _f <- eval(f(...)); _f(.)}`, where `_f` is a notional temporary variable (not visible or produces as a side-effect). This effect is used in `wrapr`'s "pipe to array" variation of the `unpack` notation (example [here](https://win-vector.com/2020/01/21/using-unpack-to-manage-your-r-environment/)).
This eager effect can be gotten by setting the appropriate attribute as we see below.
For array notation:
```{r}
library(wrapr)
```
```{r}
lst <- list(sin)
# without the attribute, the function is returned
4 %.>% lst[[1]]
```
```{r}
# an outer .() signals for eager eval from the pipeline
4 %.>% .(lst[[1]])
```
```{r}
# with the attribute, the array is always de-referenced
# before the pipe execution allowing the function
# to be evaluated using the piped-in value.
attr(lst, 'dotpipe_eager_eval_bracket') <- TRUE
4 %.>% lst[[1]]
```
For functions:
```{r}
# without the attribute the result is sin
f <- function(...) { sin }
4 %.>% f()
```
```{r}
# an outer .() signals for eager eval from the pipeline
4 %.>% .(f())
```
```{r}
# with the attribute the result is sin(4)
attr(f, 'dotpipe_eager_eval_function') <- TRUE
4 %.>% f()
```
Essentially objects with this attribute have an implicit `.()` "eager eval" on them.
| /scratch/gouwar.j/cran-all/cranData/wrapr/vignettes/wrapr_Eager.Rmd |
---
title: "wrapr_applicable"
author: "John Mount, Win-Vector LLC"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{wrapr_applicable}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
`wrapr` includes de-referencing, function evaluation, and a new concept called `"wrapr_applicable"`. `"wrapr_applicable"` is dispatch by type of right hand side argument scheme.
## Basic `wrapr`
The `wrapr` pipe operators (`%.>%` and `%>.%`) are roughly defined as: `a %>.% b ~ { . <- a; b };`. This works under the assumption that `b` is an expression with free-instances of "`.`". A typical use is:
```{r use1}
library("wrapr")
5 %.>% sin(.)
```
The above is performed by standard `S3` dispatch on the left argument of an exported generic functions called `apply_left()` and `apply_right()`. A formal description of `wrapr` piping can be found [here](https://github.com/WinVector/wrapr/blob/master/extras/wrapr_pipe.pdf).
## Dereferencing and function evaluation
`wrapr` works primarily over expressions and "`.`". `wrapr` does tries to de-reference names found in the right-hand side of pipe stages, and also dispatches functions. One can also write the following.
```{r nofn}
5 %.>% sin
5 %.>% base::sin
```
## `"wrapr_applicable"`
Arbitrary objects ask `wrapr` to treat them as special expressions by overriding one or more of `apply_left()` and `apply_right()` for the `S3` class they wish managed.
For example:
```{r sinfn}
function_reference <- list(f = sin)
class(function_reference) <- c("wrapr_applicable", "ourclass")
apply_right.ourclass <- function(pipe_left_arg,
pipe_right_arg,
pipe_environment,
left_arg_name,
pipe_string,
right_arg_name) {
pipe_right_arg$f(pipe_left_arg)
}
function_reference
5 %.>% function_reference
function_reference$f <- sqrt
5 %.>% function_reference
```
The signature arguments work as follows:
* `pipe_left_arg`: The value moving down the pipeline.
* `pipe_right_arg`: The right pipeline operator (essentially "`self`" or "`this`" in object oriented terms, used for `S3` dispatch).
* `pipe_environment`: The environment the pipeline is working in (not usually needed).
* `left_arg_name`: If the left arguement was passed in by name, what that name was.
* `pipe_string`: The name of the pipe operator (not usually needed).
* `right_arg_name`: If the right arguement was passed in by name, what that name was.
This functionality allows arbitrary objects to directly specify their intended pipeline behavior.
Let's use a debugging function to see the values of all of the arguments.
```{r debug}
apply_right.ourclass <- function(pipe_left_arg,
pipe_right_arg,
pipe_environment,
left_arg_name,
pipe_string,
right_arg_name) {
print("pipe_left_arg")
print(pipe_left_arg)
print("pipe_right_arg")
print(pipe_right_arg)
print("pipe_environment")
print(pipe_environment)
print("left_arg_name")
print(left_arg_name)
print("pipe_string")
print(pipe_string)
print("right_arg_name")
print(right_arg_name)
pipe_right_arg$f(pipe_left_arg)
}
5 %.>% function_reference
a <- 5
a %.>% function_reference
```
## Conclusion
`wrapr` values (left-hand sides of pipe expressions) are completely general.
`wrapr` operators (right-hand sides of pipe expressions) are primarily intended to be expressions that have "`.`" as a free-reference. `wrapr` can also be used with right-hand sides that are function references or with arbitrary annotated objects.
| /scratch/gouwar.j/cran-all/cranData/wrapr/vignettes/wrapr_applicable.Rmd |
##' read.AsspDataObj creates an object of class dobj from a signal or parameter
##' file readable by the ASSP Library (WAVE, SSFF, AU, ...)
##'
##' @title read.AsspDataObj from a signal/parameter file
##' @param fname filename of the signal or parameter file
##' @param begin begin time (default is in seconds) of segment to retrieve
##' @param end end time (default is in seconds) of segment to retrieve
##' @param samples (BOOL) if set to false seconds values of begin/end are sample numbers
##' @return list object containing file data
##' @author Lasse Bombien
##' @aliases getAsspDataObj
##' @useDynLib wrassp, .registration = TRUE
##' @export
'read.AsspDataObj' <- 'getAsspDataObj' <- function(fname, begin=0, end=0, samples=FALSE) {
fname <- prepareFiles(fname)
# type cast begin/end if integer
if(inherits(begin, "integer")){
begin = as.numeric(begin)
}
if(inherits(end, "integer")){
end = as.numeric(end)
}
.External("getDObj2", fname, begin=begin, end=end, samples=samples, PACKAGE="wrassp")
}
##' Prints an overview of ASSP Data Objects
##'
##' @title print a summary of an AsspDataObj
##' @param x an object of class AsspDataObj
##'
##' @param ... other arguments that might be passed on to other functions
##' @author Lasse Bombien
##' @method print AsspDataObj
##' @seealso \code{\link{read.AsspDataObj}}
##' @useDynLib wrassp, .registration = TRUE
##' @aliases summary.AsspDataObj
##' @export
"print.AsspDataObj" <- summary.AsspDataObj <- function(x, ...)
{
temp <- attr(x, "filePath")
if (is.null(temp)) {
cat("In-memory Assp Data Object\n")
}
else {
cat(paste("Assp Data Object of file ", temp, ".\n", sep=""))
}
cat(sprintf("Format: %s (%s)\n", AsspFileFormat(x), AsspDataFormat(x)))
cat(paste(as.integer(numRecs.AsspDataObj(x)),
"records at", attr(x, 'sampleRate'), "Hz\n"))
cat(sprintf("Duration: %f s\n", dur.AsspDataObj(x)))
cat(paste("Number of tracks:", length(names(x)), "\n"))
for (track in names(x)) {
cat('\t', track)
cat(paste(" (", ncol(x[[track]]), " fields)\n", sep=''))
}
genVars <- attr(x, 'genericVars')
if (!is.null(genVars)) {
cat("\nGeneric variables:\n")
for (var in names(genVars)) {
cat(sprintf(" %s:", var))
if (genVars[[var]]$Type %in% c("CHAR", "BYTE")) {
cat(sprintf("\t%s\n", genVars[[var]]$Value))
} else {
cat(sprintf("\t%f\n", genVars[[var]]$Value))
}
cat(sprintf(" (%s)\n", genVars[[var]]$Type))
}
}
}
##' Writes an object of class AsspDataObj to a file given the meta information
##' contained in the object.
##'
##' @title write.AsspDataObj to file
##' @param dobj an object of class AsspDataObj
##' @param file file name as a character string, defaults to the
##' \code{filePath} attribute of the AsspDataObj
##' @return NULL
##' @author Lasse Bombien
##' @useDynLib wrassp, .registration = TRUE
##' @export
"write.AsspDataObj" <- function (dobj, file=attr(dobj, 'filePath'))
{
if (is.null(file))
stop('File path not set internally. Please specify!')
file <- path.expand(file)
.Call("writeDObj_", dobj, file, PACKAGE="wrassp")
}
##' Checks whether x is a valid AsspDataObj
##'
##' @title Checks whether x is a valid AsspDataObj
##' @param x an object of class AsspDataObj
##' @param ... optional other arguments passed to further functions
##' @return TRUE or FALSE
##' @author Lasse Bombien
##' @useDynLib wrassp, .registration = TRUE
##' @export
is.AsspDataObj <- function (x, ...)
{
if (!inherits(x, "AsspDataObj"))
return (FALSE)
return (TRUE)
}
##' Remove a track from an
##' AsspDataObj object
##'
##' @title Remove track from an AsspDataObj
##' @param dobj An object of class AsspDataObj
##' @param trackname the name of a track in this object
##' @return The object without the track named trackname
##' @author Lasse Bombien
##' @useDynLib wrassp, .registration = TRUE
##' @export
delTrack <- function (dobj, trackname)
{
if (!is.AsspDataObj (dobj))
stop ('First argument must be a AsspDataObj.')
w <- which (names (dobj) == trackname)
if (length (w) != 1)
stop ('Invalid trackname')
## remove track
dobj[[trackname]] <- NULL
## remove
attr(dobj, 'trackFormats') <- attr(dobj, 'trackFormats')[-w]
return (dobj)
}
##' Add a track to an AsspDataObj
##'
##' The specified data object is extended by a new track named \code{trackname}.
##' If there already is a track with the same name and \code{deleteExisiting}
##' is \code{FALSE} the function does nothing but returns with an error. If
##' \code{deleteExisting} is \code{TRUE} the existing track will be removed
##' (see \code{\link{delTrack}}.
##' \code{data} to be added is a numeric matrix (or will be coerced to one).
##' It must have
##' the same number of rows as the tracks that already exist in the object
##' (if any). TODO add \code{format} information.
##' @title Add a track to an AsspDataObj
##' @param dobj The data object to which the data is to be added
##' @param trackname The name of the new track
##' @param data a matrix with values
##' @param format format for binary writing to file (defaults to 'INT16')
##' @param deleteExisting Delete existing track with the same (default: FALSE)
##' @return the object including the new track
##' @author Lasse Bombien
##' @seealso \code{\link{delTrack}}
##' @useDynLib wrassp, .registration = TRUE
##' @export
addTrack <- function (dobj, trackname, data, format = 'INT16',
deleteExisting=FALSE) {
if (!is.AsspDataObj(dobj))
stop('dobj must be an AsspDataObj.')
if (!is.numeric(data))
stop('data must be a numeric matrix')
if (!is.character(trackname) | length(trackname) != 1)
stop('trackname must be an atomic string.')
data <- as.matrix(data)
tracks <- names(dobj)
w <- tracks == trackname
if (any(w) & !deleteExisting)
stop(paste('Track', trackname,
'exists and will not be deleted',
'("deleteExisting" argument)'))
if (length(tracks) == 1 & any(w)) {
## this is fine: the only track will be replaced
} else if (length(tracks) > 0) {
if (nrow(data) != nrow(dobj[[1]]))
stop(paste("number of rows in data must match number of rows in",
"existing tracks."))
}
dobj[[trackname]] <- data
if (any(w))
attr(dobj, 'trackFormats')[w] <- format
else
append(attr(dobj, 'trackFormats'), format)
return(dobj)
}
##' List the tracks of an AsspDataObj
##'
##' AsspDataObj contain tracks (at least one). This function lists the names
##' of these tracks. This function is equivalent to calling \code{names(x)}.
##' @title tracks.AsspDataObj
##' @param x an object of class AsspDataObj
##' @return a character vector containing the names of the tracks
##' @author Lasse Bombien
##' @export
##' @useDynLib wrassp, .registration = TRUE
tracks.AsspDataObj <- function(x) {
names(x)
}
##' Function to get or set the file format of an AsspDataObj.
##'
##' \code{libassp} handles a number of file formats common in speech research.
##' This function enables the user to determine the file format of an object
##' read from file and to set it for subsequent writing. This allows for file
##' format conversion to some degree. Note, that many conversions are not
##' reasonable/possible: conversions are therefore discouraged unless the user
##' knows what they are doing. Format specifiers can be found in
##' \code{\link{AsspFileFormats}} and exist in two forms: a code name and a
##' code number. Both are suitable for setting the format.
##' @title Get and set AsspFileFormat
##' @param x an object of class AsspDataObj
##' @return for \code{AsspFileFormat} the code name of the object's
##' currently set file format
##' @author Lasse Bombien
##' @seealso \code{\link{AsspFileFormats}}, \code{\link{AsspDataFormat}}
##' @examples
##' \dontrun{
##' obj <- read.AsspDataObj('/path/to/file.wav')
##' AsspFileFormat(obj)
##' AsspFileFormat(obj) <- 'SSFF' ## or
##' AsspFileFormat(obj) <- 20
##' }
##' @useDynLib wrassp, .registration = TRUE
##' @export
AsspFileFormat <- function(x) {
## file format is in the first element (of two) in the fileInfo attribute
xx <- x
if (!is.AsspDataObj(xx))
stop('Argument must be an object of class AsspDataObj')
curFormat <- attr(xx, 'fileInfo')[1]
ind <- match(curFormat, AsspFileFormats)
if (is.na(ind))
stop('Invalid file format. This AsspDataObj has been messed with!')
return(names(AsspFileFormats)[ind])
}
##' @rdname AsspFileFormat
##' @param value an integer or a string indicating the new file format
##' @usage AsspFileFormat(x) <- value
##' @return for \code{AsspFileFormat<-}, the updated object
##' @export
"AsspFileFormat<-" <- function(x, value) {
value <- value[1]
if (!is.AsspDataObj(x))
stop('Argument must be an object of class AsspDataObj')
fi <- attr(x, 'fileInfo')
if (is.numeric(value)) {
ind <- match(value, AsspFileFormats)
} else if (is.character(value)) {
ind <- match(value, names(AsspFileFormats))
} else {
stop ('format must be an integer or a string.')
}
if (is.na(ind))
stop('format does not specify a valid file format.')
fi[1] <- AsspFileFormats[ind]
attr(x, 'fileInfo') <- as.integer(fi)
x
}
##' Function to get or set the data format of an AsspDataObj.
##'
##' \code{libassp} can store data in binary and ASCII format.
##' This function enables the user to determine the data format of an object
##' read from file and to set it for subsequent writing.
##' Valid values are
##' \code{'ascii'} (or \code{1}) for ASCII format or \code{'binary'} (or \code{2}) for binary IO.
##' Use is discouraged unless the user knows what they are doing.
##' @title Get/set data format of an AsspDataObj
##' @param x an object of class AsspDataObj
##' @return a string representing the current data format
##' @useDynLib wrassp, .registration = TRUE
##' @seealso \code{\link{AsspFileFormat}}
##' @export
##' @author Lasse Bombien
AsspDataFormat <- function(x) {
f <- attr(x, 'fileInfo')[2]
if (f==1)
return('ascii')
else if (f==2)
return('binary')
else
stop('Invalid data format. This AsspDataObj has been messed with!')
}
##' @rdname AsspDataFormat
##' @param value an integer or a string indicating the new data format
##' @usage AsspDataFormat(x) <- value
##' @return for \code{AsspDataFormat<-}, the updated object
##' @export
##'
"AsspDataFormat<-" <- function(x, value) {
value <- value[1]
fi <- attr(x, 'fileInfo')
if (is.numeric(value)) {
if (value %in% c(1,2))
fi[2] <- value
else
stop('Invalid data format specified')
} else if (is.character(value)) {
formats <- c('ascii', 'binary')
ind <- charmatch(tolower(value), formats)
if (is.na(ind))
stop('Invalid data format specified')
fi[2] <- ind
} else
stop('New value must be an integer or a string.')
attr(x, 'fileInfo') <- as.integer(fi)
x
}
##' Various information on AsspDataObj
##'
##' Some utility function to retrieve duration, number of records, sample rate and so on.
##' @title Timing information on AsspDataObj
##' @param x an object of class AsspDataObj
##' @return dur: the duration of the AsspDataObj in ms
##' @author Lasse Bombien
##' @export
##' @useDynLib wrassp, .registration = TRUE
dur.AsspDataObj <- function(x) {
if (!is.AsspDataObj(x))
stop('Argument must be of class AsspDataObj.')
numRecs.AsspDataObj(x) / attr(x, 'sampleRate')
}
##' @rdname dur.AsspDataObj
##' @return numRecs: the number of records stored in the AsspDataObj
##' @export
numRecs.AsspDataObj <- function(x) {
attr(x, 'endRecord') - attr(x, 'startRecord') + 1
}
##' @rdname dur.AsspDataObj
##' @return rate: the data/sample rate of the AsspDataObj in Hz
##' @export
rate.AsspDataObj <- function(x) {
attr(x, 'sampleRate')
}
##' @rdname dur.AsspDataObj
##' @return startTime: start time of the first sample of the AsspDataObj
##' @export
startTime.AsspDataObj <- function(x) {
attr(x, 'startTime')
}
##' @importFrom tibble as_tibble
##' @export
"as_tibble.AsspDataObj" <- function(x, ...){
frame_time = seq(from = startTime.AsspDataObj(x),
by = 1/rate.AsspDataObj(x),
length.out = numRecs.AsspDataObj(x)) * 1000
all_tracks = do.call(cbind, x)
colnames(all_tracks) = paste0(rep(names(x), each = ncol(x[[1]])), rep(seq(1, to = ncol(x[[1]])), length(x)))
return(as_tibble(cbind(frame_time, all_tracks)))
}
| /scratch/gouwar.j/cran-all/cranData/wrassp/R/AsspDataObj.R |
##' returns all valid AsspWindowTypes according to the assp library
##'
##' wrapper function for AsspWindowTypes of wrassp
##' @title AsspWindowTypes
##' @return vector containing window types
##' @author Raphael Winkelmann
##' @useDynLib wrassp, .registration = TRUE
##' @export
'AsspWindowTypes' <- function(){
return(.Call("AsspWindowTypes_", PACKAGE = "wrassp"))
}
##' returns all valid AsspLpTypes according to the assp library
##'
##' wrapper function for AsspLpTypes of wrassp
##' @title AsspLpTypes
##' @return vector containing lp types
##' @author Raphael Winkelmann
##' @useDynLib wrassp, .registration = TRUE
##' @export
'AsspLpTypes' <- function(){
return(.Call("AsspLpTypes_", PACKAGE = "wrassp"))
}
##' returns all valid AsspSpectTypes according to the assp library
##'
##' wrapper function for AsspSpectTypes of wrassp
##' @title AsspSpectTypes
##' @return vector containing spectrogram types
##' @author Raphael Winkelmann
##' @useDynLib wrassp, .registration = TRUE
##' @export
'AsspSpectTypes' <- function(){
return(.Call("AsspSpectTypes_", PACKAGE = "wrassp"))
}
| /scratch/gouwar.j/cran-all/cranData/wrassp/R/AsspTypes.R |
##' acfana function adapted from libassp
##'
##' Analysis of short-term autocorrelation function of
##' the signals in <listOFFiles>.
##' Analysis results will be written to a file with the
##' base name of the input file and extension '.acf'.
##' Default output is in SSFF binary format (track 'acf').
##' @title acfana
##' @param listOfFiles vector of file paths to be processed by function
##' @param optLogFilePath path to option log file
##' @param beginTime = <time>: set begin of analysis interval to <time> seconds (default: 0 = beginning of file)
##' @param centerTime = <time>: set single-frame analysis with the analysis window centred at <time> seconds;
##' overrules BeginTime, EndTime and WindowShift options
##' @param endTime = <time>: set end of analysis interval to <time> seconds (default: 0 = end of file)
##' @param windowShift = <dur>: set analysis window shift to <dur> ms (default: 5.0)
##' @param windowSize = <dur>: set analysis window size to <dur> ms; overrules EffectiveLength parameter
##' @param effectiveLength make window size effective rather than exact
##' @param window = <type>: set analysis window function to <type> (default: BLACKMAN)
##' @param analysisOrder = <num>: set analysis order to <num> (default: 0 = sample rate in kHz + 3)
##' @param energyNormalization calculate energy-normalized autocorrelation
##' @param lengthNormalization calculate length-normalized autocorrelation
##' @param toFile write results to file (default extension is .acf)
##' @param explicitExt set if you wish to override the default extension
##' @param outputDirectory directory in which output files are stored. Defaults to NULL, i.e.
##' the directory of the input files
##' @param forceToLog is set by the global package variable useWrasspLogger. This is set
##' to FALSE by default and should be set to TRUE is logging is desired.
##' @param verbose display infos & show progress bar
##' @return nrOfProcessedFiles or if only one file to process return AsspDataObj of that file
##' @author Raphael Winkelmann
##' @author Lasse Bombien
##' @useDynLib wrassp, .registration = TRUE
##' @examples
##' # get path to audio file
##' path2wav <- list.files(system.file("extdata", package = "wrassp"),
##' pattern = glob2rx("*.wav"),
##' full.names = TRUE)[1]
##'
##' # calculate short-term autocorrelation
##' res <- acfana(path2wav, toFile=FALSE)
##'
##' # plot short-term autocorrelation values
##' matplot(seq(0,numRecs.AsspDataObj(res) - 1) / rate.AsspDataObj(res) +
##' attr(res, 'startTime'),
##' res$acf,
##' type='l',
##' xlab='time (s)',
##' ylab='short-term autocorrelation values')
##'
##' @export
'acfana' <- function(listOfFiles = NULL, optLogFilePath = NULL,
beginTime = 0.0, centerTime = FALSE,
endTime = 0.0, windowShift = 5.0,
windowSize = 20.0, effectiveLength = TRUE,
window = "BLACKMAN", analysisOrder = 0,
energyNormalization = FALSE, lengthNormalization = FALSE,
toFile = TRUE, explicitExt = NULL, outputDirectory = NULL,
forceToLog = useWrasspLogger, verbose = TRUE){
###########################
# a few parameter checks and expand paths
if (is.null(listOfFiles)) {
stop(paste("listOfFiles is NULL! It has to be a string or vector of file",
"paths (min length = 1) pointing to valid file(s) to perform",
"the given analysis function."))
}
if (is.null(optLogFilePath) && forceToLog){
stop("optLogFilePath is NULL! -> not logging!")
}else{
if(forceToLog){
optLogFilePath = path.expand(optLogFilePath)
}
}
if(!isAsspWindowType(window)){
stop("WindowFunction of type '", window,"' is not supported!")
}
if (!is.null(outputDirectory)) {
outputDirectory = normalizePath(path.expand(outputDirectory))
finfo <- file.info(outputDirectory)
if (is.na(finfo$isdir))
if (!dir.create(outputDirectory, recursive=TRUE))
stop('Unable to create output directory.')
else if (!finfo$isdir)
stop(paste(outputDirectory, 'exists but is not a directory.'))
}
###########################
# Pre-process file list
listOfFiles <- prepareFiles(listOfFiles)
###########################
# perform analysis
if(length(listOfFiles)==1 | !verbose){
pb <- NULL
}else{
if(toFile==FALSE){
stop("length(listOfFiles) is > 1 and toFile=FALSE! toFile=FALSE only permitted for single files.")
}
cat('\n INFO: applying acfana to', length(listOfFiles), 'files\n')
pb <- utils::txtProgressBar(min = 0, max = length(listOfFiles), style = 3)
}
externalRes = invisible(.External("performAssp", listOfFiles,
fname = "acfana", beginTime = beginTime,
centerTime = centerTime, endTime = endTime,
windowShift = windowShift, windowSize = windowSize,
effectiveLength = effectiveLength, window = window,
analysisOrder = as.integer(analysisOrder), energyNormalization = energyNormalization,
lengthNormalization = lengthNormalization, toFile = toFile,
explicitExt = explicitExt, progressBar = pb,
outputDirectory = outputDirectory, PACKAGE = "wrassp"))
############################
# write options to options log file
if (forceToLog){
optionsGivenAsArgs = as.list(match.call(expand.dots = TRUE))
wrassp.logger(optionsGivenAsArgs[[1]], optionsGivenAsArgs[-1],
optLogFilePath, listOfFiles)
}
#############################
# return dataObj if length only one file
if(!is.null(pb)){
close(pb)
}else{
return(externalRes)
}
}
| /scratch/gouwar.j/cran-all/cranData/wrassp/R/acfana.R |
##' afdiff function adapted from libassp
##'
##' Computes the first difference of the signal in the audio-
##' formatted file(s) <listOfFiles>. The differentiated signal will
##' be written to a file with the base name of the input file
##' and an extension consisting of '.d', followed by the
##' extension of the input file. The format of the output file
##' will be the same as that of the input file.
##' Differentiation can improve results on F0 analysis of e.g.
##' EGG signals because it removes a DC offset, attenuates
##' very low frequency components - and in the case of central
##' differentiation also very high ones - and enhances the
##' moment of glottal closure.
##' @title afdiff
##' @param listOfFiles vector of file paths to be processed by function
##' @param optLogFilePath path to option log file
##' @param computeBackwardDifference compute backward difference (s'[n] = s[n] - s[n-1]) (default: forward difference s'[n] = s[n+1] - s[n])
##' @param computeCentralDifference compute central/interpolated/3-point difference
##' @param channel = <num>: for multi-channel input files: extract and differentiate channel <num> (1 <= <num> <= 8 default: channel 1)
##' @param toFile write results to file (default extension is .d+(extensionsOfAudioFile))
##' @param explicitExt set if you wish to override the default extension
##' @param outputDirectory directory in which output files are stored. Defaults to NULL, i.e.
##' the directory of the input files
##' @param forceToLog is set by the global package variable useWrasspLogger. This is set
##' to FALSE by default and should be set to TRUE is logging is desired.
##' @param verbose display infos & show progress bar
##' @return nrOfProcessedFiles or if only one file to process return AsspDataObj of that file
##' @author Raphael Winkelmann
##' @author Lasse Bombien
##' @useDynLib wrassp, .registration = TRUE
##' @examples
##' # get path to audio file
##' path2wav <- list.files(system.file("extdata", package = "wrassp"),
##' pattern = glob2rx("*.wav"),
##' full.names = TRUE)[1]
##'
##' # compute the first forward difference of the signal
##' res <- afdiff(path2wav, toFile=FALSE)
##'
##' # plot samples
##' # (only plot every 10th element to accelerate plotting)
##' plot(seq(0,numRecs.AsspDataObj(res) - 1, 10) / rate.AsspDataObj(res),
##' res$audio[c(TRUE, rep(FALSE,9))],
##' type='l',
##' xlab='time (s)',
##' ylab='Audio samples')
##'
##' @export
'afdiff' <- function(listOfFiles = NULL, optLogFilePath = NULL,
computeBackwardDifference = FALSE, computeCentralDifference = FALSE,
channel = 1, toFile = TRUE,
explicitExt=NULL, outputDirectory = NULL,
forceToLog = useWrasspLogger, verbose = TRUE){
###########################
# a few parameter checks and expand paths
if (is.null(listOfFiles)) {
stop(paste("listOfFiles is NULL! It has to be a string or vector of file",
"paths (min length = 1) pointing to valid file(s) to perform",
"the given analysis function."))
}
if (is.null(optLogFilePath) && forceToLog){
stop("optLogFilePath is NULL! -> not logging!")
}else{
if(forceToLog){
optLogFilePath = path.expand(optLogFilePath)
}
}
if (!is.null(outputDirectory)) {
outputDirectory = normalizePath(path.expand(outputDirectory))
finfo <- file.info(outputDirectory)
if (is.na(finfo$isdir))
if (!dir.create(outputDirectory, recursive=TRUE))
stop('Unable to create output directory.')
else if (!finfo$isdir)
stop(paste(outputDirectory, 'exists but is not a directory.'))
}
###########################
# Pre-process file list
listOfFiles <- prepareFiles(listOfFiles)
###########################
# perform analysis
if(length(listOfFiles)==1 | !verbose){
pb <- NULL
}else{
if(toFile==FALSE){
stop("length(listOfFiles) is > 1 and toFile=FALSE! toFile=FALSE only permitted for single files.")
}
cat('\n INFO: applying afdiff to', length(listOfFiles), 'files\n')
pb <- utils::txtProgressBar(min = 0, max = length(listOfFiles), style = 3)
}
externalRes = invisible(.External("performAssp", listOfFiles,
fname = "afdiff", computeBackwardDifference = computeBackwardDifference,
channel = as.integer(channel), toFile = toFile,
explicitExt = explicitExt, progressBar=pb,
outputDirectory = outputDirectory, PACKAGE = "wrassp"))
############################
# write options to options log file
if (forceToLog){
optionsGivenAsArgs = as.list(match.call(expand.dots = TRUE))
wrassp.logger(optionsGivenAsArgs[[1]], optionsGivenAsArgs[-1],
optLogFilePath, listOfFiles)
}
#############################
# return dataObj if length only one file
if(!is.null(pb)){
close(pb)
}else{
return(externalRes)
}
}
| /scratch/gouwar.j/cran-all/cranData/wrassp/R/afdiff.R |
##' affilter function adapted from libassp
##'
##' Filters the audio signal in <listOfFiles>.
##' By specifying the high-pass and/or low-pass cut-off
##' frequency one of four filter characteristics may be
##' selected as shown in the table below.
##'
##' \tabular{ccll}{
##' \strong{hp} \tab \strong{lp} \tab \strong{filter characteristic} \tab \strong{extension}\cr
##' > 0 \tab 0 \tab high-pass from hp \tab '.hpf'\cr
##' 0 \tab > 0 \tab low-pass up to lp \tab '.lpf'\cr
##' > 0 \tab > hp \tab band-pass from hp to lp \tab '.bpf'\cr
##' > lp \tab > 0 \tab band-stop between lp and hp \tab '.bsf'\cr
##' }
##'
##' Please note: per default a high-pass filter from 0 to 4000 Hz is applied.
##'
##' The Kaiser-window design method is used to compute the
##' coefficients of a linear-phase FIR filter with unity gain
##' in the pass-band. The cut-off frequencies (-6 dB points)
##' of the filters are in the middle of the transition band.
##' The filtered signal will be written to a file with the
##' base name of the input file and an extension corresponding
##' to the filter characteristic (see table). The format of
##' the output file will be the same as that of the input file.
##' @title affilter
##' @param listOfFiles vector of file paths to be processed by function
##' @param optLogFilePath path to option log file
##' @param highPass = <num>: set the high-pass cut-off frequency to <num> Hz (default: 4000, high-pass filtering is applied)
##' @param lowPass = <num>: set the low-pass cut-off frequency to <num> Hz (default: 0, no low-pass filtering)
##' @param stopBand = <num>: set the stop-band attenuation to <num> dB (default: 93.0 dB, minimum: 21.0 dB)
##' @param transition = <num>: set the width of the transition band to <num> Hz (default: 250.0 Hz)
##' @param useIIR switch from the default FIR to IIR filter
##' @param numIIRsections = <num>: set the number of 2nd order sections to <num> (default: 4) where each section
##' adds 12dB/oct to the slope of the filter
##' @param toFile write results to file (for default extension see details section))
##' @param explicitExt set if you wish to override the default extension
##' @param outputDirectory directory in which output files are stored. Defaults to NULL, i.e.
##' the directory of the input files
##' @param forceToLog is set by the global package variable useWrasspLogger. This is set
##' to FALSE by default and should be set to TRUE is logging is desired.
##' @param verbose display infos & show progress bar
##' @return nrOfProcessedFiles or if only one file to process return AsspDataObj of that file
##' @author Raphael Winkelmann
##' @author Lasse Bombien
##' @useDynLib wrassp, .registration = TRUE
##' @examples
##' # get path to audio file
##' path2wav <- list.files(system.file("extdata", package = "wrassp"),
##' pattern = glob2rx("*.wav"),
##' full.names = TRUE)[1]
##'
##' # band-pass filter signal between 4000 and 5000 Hz
##' res <- affilter(path2wav, highPass=4000, lowPass=5000, toFile=FALSE)
##'
##' # plot samples
##' # (only plot every 10th element to accelerate plotting)
##' plot(seq(0,numRecs.AsspDataObj(res) - 1, 10) / rate.AsspDataObj(res),
##' res$audio[c(TRUE, rep(FALSE,9))],
##' type='l',
##' xlab='time (s)',
##' ylab='Audio samples')
##'
##' @export
'affilter' <- function(listOfFiles = NULL, optLogFilePath = NULL,
highPass = 4000, lowPass = 0,
stopBand = 96, transition = 250,
useIIR = FALSE, numIIRsections = 4,
toFile = TRUE, explicitExt = NULL,
outputDirectory = NULL, forceToLog = useWrasspLogger,
verbose = TRUE){
###########################
### a few parameter checks and expand paths
if (is.null(listOfFiles)) {
stop(paste("listOfFiles is NULL! It has to be a string or vector of file",
"paths (min length = 1) pointing to valid file(s) to perform",
"the given analysis function."))
}
if (is.null(optLogFilePath) && forceToLog){
stop("optLogFilePath is NULL! -> not logging!")
}else{
if(forceToLog){
optLogFilePath = path.expand(optLogFilePath)
}
}
if (!is.null(outputDirectory)) {
outputDirectory = normalizePath(path.expand(outputDirectory))
finfo <- file.info(outputDirectory)
if (is.na(finfo$isdir))
if (!dir.create(outputDirectory, recursive=TRUE))
stop('Unable to create output directory.')
else if (!finfo$isdir)
stop(paste(outputDirectory, 'exists but is not a directory.'))
}
###########################
# Pre-process file list
listOfFiles <- prepareFiles(listOfFiles)
###########################
### perform analysis
if(length(listOfFiles)==1 | !verbose){
pb <- NULL
}else{
if(toFile==FALSE){
stop("length(listOfFiles) is > 1 and ToFile=FALSE! ToFile=FALSE only permitted for single files.")
}
cat('\n INFO: applying affilter to', length(listOfFiles), 'files\n')
pb <- utils::txtProgressBar(min = 0, max = length(listOfFiles), style = 3)
}
externalRes = invisible(.External("performAssp", listOfFiles,
fname = "affilter", highPass = highPass,
lowPass = lowPass, stopBand = stopBand, transition = transition,
useIIR = useIIR, numIIRsections = as.integer(numIIRsections),
toFile = toFile, explicitExt = explicitExt,
progressBar = pb, outputDirectory = outputDirectory,
PACKAGE = "wrassp"))
############################
# write options to options log file
if (forceToLog){
optionsGivenAsArgs = as.list(match.call(expand.dots = TRUE))
wrassp.logger(optionsGivenAsArgs[[1]], optionsGivenAsArgs[-1],
optLogFilePath, listOfFiles)
}
#############################
# return dataObj if length only one file
if(!is.null(pb)){
close(pb)
}else{
return(externalRes)
}
}
| /scratch/gouwar.j/cran-all/cranData/wrassp/R/affilter.R |
##' checks if given string is a valid AsspWindowType according to the assp library
##'
##' @title isAsspWindowType
##' @param windowName name of window
##' @return (BOOL) true if windowName is valid; false otherwise
##' @author Raphael Winkelmann
##' @useDynLib wrassp, .registration = TRUE
##' @export
"isAsspWindowType" <- function(windowName = NULL) {
if (is.null(windowName)) {
stop("No windowName given!")
}
winTypes = AsspWindowTypes()
isValidWindow = FALSE
for (type in winTypes) {
if (windowName == type) {
isValidWindow = TRUE
break
}
}
return(isValidWindow)
}
##' checks if given string is a valid AsspLpType according to the assp library
##'
##' @title isAsspLpType
##' @param lpName name of lp type
##' @return (BOOL) true if lpName is valid; false otherwise
##' @author Raphael Winkelmann
##' @useDynLib wrassp, .registration = TRUE
##' @export
"isAsspLpType" <- function(lpName = NULL) {
if (is.null(lpName)) {
stop("No lpName given!")
}
lpTypes = AsspLpTypes()
isValidLp = FALSE
for (type in lpTypes) {
if (lpName == type) {
isValidLp = TRUE
break
}
}
return(isValidLp)
}
##' checks if given string is a valid AsspSpectType according to the assp library
##'
##' @title isAsspSpectType
##' @param spectName name of lp type
##' @return (BOOL) true if spectName is valid; false otherwise
##' @author Raphael Winkelmann
##' @useDynLib wrassp, .registration = TRUE
##' @export
"isAsspSpectType" <- function(spectName = NULL) {
if (is.null(spectName)) {
stop("No lpName given!")
}
spectTypes = AsspSpectTypes()
isValidSpect = FALSE
for (type in spectTypes) {
if (spectName == type) {
isValidSpect = TRUE
break
}
}
return(isValidSpect)
}
| /scratch/gouwar.j/cran-all/cranData/wrassp/R/asspChecks.R |
##' calculate cepstral coefficients using libassp
##'
##' Short-term cepstral analysis of the signal in <listOfFiles>
##' using the Fast Fourier Transform. The number of
##' coefficients per output record will also equal the
##' FFT length / 2 + 1 (i.e. be non-mirrored).
##' Analysis results will be written to a file with the
##' base name of the input file and as extension '.cep'.
##' Default output is in SSFF format with
##' 'cep' as track name.
##' @title cepstrum
##' @param listOfFiles vector of file paths to be processed by function
##' @param optLogFilePath path to option log file
##' @param beginTime = <time>: set begin of analysis interval to <time> seconds
##' (default: begin of data)
##' @param centerTime = <time>: set single-frame analysis with the analysis
##' window centred at <time> seconds; overrules beginTime, endTime and
##' windowShift options
##' @param endTime = <time>: set end of analysis interval to <time> seconds
##' (default: end of data)
##' @param resolution = <freq>: set FFT length to the smallest value which
##' results in a frequency resolution of <freq> Hz or better (default: 40.0)
##' @param fftLength = <num>: set FFT length to <num> points (overrules default
##' and 'resolution' option)
##' @param windowShift = <dur>: set analysis window shift to <dur> ms
##' (default: 5.0)
##' @param window = <type>: set analysis window function to <type> (default:
##' BLACKMAN)
##' @param toFile write results to file (default extension depends on )
##' @param explicitExt set if you wish to override the default extension
##' @param outputDirectory directory in which output files are stored. Defaults to NULL, i.e.
##' the directory of the input files
##' @param forceToLog is set by the global package variable useWrasspLogger. This is set
##' to FALSE by default and should be set to TRUE is logging is desired.
##' @param verbose display infos & show progress bar
##' @return nrOfProcessedFiles or if only one file to process return
##' AsspDataObj of that file
##' @author Raphael Winkelmann
##' @author Lasse Bombien
##' @seealso \code{\link{dftSpectrum}}, \code{\link{cssSpectrum}}, \code{\link{lpsSpectrum}};
##' all derived from libassp's spectrum function
##' @useDynLib wrassp, .registration = TRUE
##' @examples
##' # get path to audio file
##' path2wav <- list.files(system.file("extdata", package = "wrassp"),
##' pattern = glob2rx("*.wav"),
##' full.names = TRUE)[1]
##'
##' # calulate cepstrum
##' res <- cepstrum(path2wav, toFile=FALSE)
##'
##' # plot cepstral values at midpoint of signal
##' plot(res$cep[dim(res$cep)[1]/2,],
##' type='l',
##' xlab='cepstral value index',
##' ylab='cepstral value')
##'
##' @export
'cepstrum' <- function(listOfFiles = NULL, optLogFilePath = NULL,
beginTime = 0.0, centerTime = FALSE,
endTime = 0.0, resolution = 40.0,
fftLength = 0, windowShift = 5.0,
window = 'BLACKMAN', toFile = TRUE,
explicitExt = NULL, outputDirectory = NULL,
forceToLog = useWrasspLogger, verbose = TRUE){
## ########################
## a few parameter checks and expand paths
if (is.null(listOfFiles)) {
stop(paste("listOfFiles is NULL! It has to be a string or vector of file",
"paths (min length = 1) pointing to valid file(s) to perform",
"the given analysis function."))
}
if (is.null(optLogFilePath) && forceToLog){
stop("optLogFilePath is NULL! -> not logging!")
}else{
if(forceToLog){
optLogFilePath = path.expand(optLogFilePath)
}
}
if(!isAsspWindowType(window)){
stop("WindowFunction of type '", window,"' is not supported!")
}
if (!is.null(outputDirectory)) {
outputDirectory = normalizePath(path.expand(outputDirectory))
finfo <- file.info(outputDirectory)
if (is.na(finfo$isdir))
if (!dir.create(outputDirectory, recursive=TRUE))
stop('Unable to create output directory.')
else if (!finfo$isdir)
stop(paste(outputDirectory, 'exists but is not a directory.'))
}
###########################
# Pre-process file list
listOfFiles <- prepareFiles(listOfFiles)
## #######################
## perform analysis
if(length(listOfFiles)==1 | !verbose){
pb <- NULL
}else{
if(toFile==FALSE){
stop("length(listOfFiles) is > 1 and toFile=FALSE! toFile=FALSE only permitted for single files.")
}
cat('\n INFO: applying cepstrum to', length(listOfFiles), 'files\n')
pb <- utils::txtProgressBar(min = 0, max = length(listOfFiles), style = 3)
}
externalRes = invisible(.External("performAssp", listOfFiles,
fname = "spectrum", beginTime = beginTime,
centerTime = centerTime, endTime = endTime,
spectrumType = 'CEP',
resolution = resolution,
fftLength = as.integer(fftLength),
windowShift = windowShift, window = window,
toFile = toFile, explicitExt = explicitExt,
progressBar = pb, outputDirectory = outputDirectory,
PACKAGE = "wrassp"))
## #########################
## write options to options log file
if (forceToLog){
optionsGivenAsArgs = as.list(match.call(expand.dots = TRUE))
wrassp.logger(optionsGivenAsArgs[[1]], optionsGivenAsArgs[-1],
optLogFilePath, listOfFiles)
}
## #########################
## return dataObj if length only one file
if(!is.null(pb)){
close(pb)
}else{
return(externalRes)
}
}
| /scratch/gouwar.j/cran-all/cranData/wrassp/R/cepstrum.R |
##' calculate cepstrally smoothed spectrum using libassp
##'
##' Short-term spectral analysis of the signal in <listOfFiles>
##' using the Fast Fourier Transform and cepstral smoothing.
##' Analysis results will be written to a file with the
##' base name of the input file and '.css.' as extension.
##' Default output is in SSFF format with
##' 'css' in lower case as track name.
##' @title cssSpectrum
##' @param listOfFiles vector of file paths to be processed by function
##' @param optLogFilePath path to option log file
##' @param beginTime = <time>: set begin of analysis interval to <time> seconds
##' (default: begin of data)
##' @param centerTime = <time>: set single-frame analysis with the analysis
##' window centred at <time> seconds; overrules beginTime, endTime and
##' windowShift options
##' @param endTime = <time>: set end of analysis interval to <time> seconds
##' (default: end of data)
##' @param resolution = <freq>: set FFT length to the smallest value which
##' results in a frequency resolution of <freq> Hz or better (default: 40.0)
##' @param fftLength = <num>: set FFT length to <num> points (overrules default
##' and 'resolution' option)
##' @param windowShift = <dur>: set analysis window shift to <dur> ms
##' (default: 5.0)
##' @param window = <type>: set analysis window function to <type> (default:
##' BLACKMAN)
##' @param numCeps = <num>: set number of cepstral coefficients used to <num>
##' (default: sampling rate in kHz + 1; minimum: 2)
##' @param toFile write results to file (default extension depends on )
##' @param explicitExt set if you wish to override the default extension
##' @param outputDirectory directory in which output files are stored. Defaults to NULL, i.e.
##' the directory of the input files
##' @param forceToLog is set by the global package variable useWrasspLogger. This is set
##' to FALSE by default and should be set to TRUE is logging is desired.
##' @param verbose display infos & show progress bar
##' @return nrOfProcessedFiles or if only one file to process return
##' AsspDataObj of that file
##' @author Raphael Winkelmann
##' @author Lasse Bombien
##' @seealso \code{\link{dftSpectrum}}, \code{\link{lpsSpectrum}}, \code{\link{cepstrum}};
##' all derived from libassp's spectrum function.
##' @useDynLib wrassp, .registration = TRUE
##' @examples
##' # get path to audio file
##' path2wav <- list.files(system.file("extdata", package = "wrassp"),
##' pattern = glob2rx("*.wav"),
##' full.names = TRUE)[1]
##'
##' # calculate cepstrally smoothed spectrum
##' res <- cssSpectrum(path2wav, toFile=FALSE)
##'
##' # plot spectral values at midpoint of signal
##' plot(res$css[dim(res$css)[1]/2,],
##' type='l',
##' xlab='spectral value index',
##' ylab='spectral value')
##'
##' @export
'cssSpectrum' <- function(listOfFiles = NULL, optLogFilePath = NULL,
beginTime = 0.0, centerTime = FALSE,
endTime = 0.0, resolution = 40.0,
fftLength = 0, windowShift = 5.0,
window = 'BLACKMAN', numCeps = 0,
toFile = TRUE, explicitExt = NULL,
outputDirectory = NULL, forceToLog = useWrasspLogger,
verbose = TRUE){
## ########################
## a few parameter checks and expand paths
if (is.null(listOfFiles)) {
stop(paste("listOfFiles is NULL! It has to be a string or vector of file",
"paths (min length = 1) pointing to valid file(s) to perform",
"the given analysis function."))
}
if (is.null(optLogFilePath) && forceToLog){
stop("optLogFilePath is NULL! -> not logging!")
}else{
if(forceToLog){
optLogFilePath = path.expand(optLogFilePath)
}
}
if(!isAsspWindowType(window)){
stop("WindowFunction of type '", window,"' is not supported!")
}
if (!is.null(outputDirectory)) {
outputDirectory = normalizePath(path.expand(outputDirectory))
finfo <- file.info(outputDirectory)
if (is.na(finfo$isdir))
if (!dir.create(outputDirectory, recursive=TRUE))
stop('Unable to create output directory.')
else if (!finfo$isdir)
stop(paste(outputDirectory, 'exists but is not a directory.'))
}
###########################
# Pre-process file list
listOfFiles <- prepareFiles(listOfFiles)
## #######################
## perform analysis
if(length(listOfFiles)==1 | !verbose){
pb <- NULL
}else{
if(toFile==FALSE){
stop("length(listOfFiles) is > 1 and ToFile=FALSE! ToFile=FALSE only permitted for single files.")
}
cat('\n INFO: applying cssSpectrum to', length(listOfFiles), 'files\n')
pb <- utils::txtProgressBar(min = 0, max = length(listOfFiles), style = 3)
}
externalRes = invisible(.External("performAssp", listOfFiles,
fname = "spectrum", beginTime = beginTime,
centerTime = centerTime, endTime = endTime,
spectrumType = 'CSS',
resolution = resolution,
fftLength = as.integer(fftLength),
windowShift = windowShift, window = window,
numCeps = as.integer(numCeps),
toFile = toFile, explicitExt = explicitExt,
progressBar = pb, outputDirectory = outputDirectory,
PACKAGE = "wrassp"))
## #########################
## write options to options log file
if (forceToLog){
optionsGivenAsArgs = as.list(match.call(expand.dots = TRUE))
wrassp.logger(optionsGivenAsArgs[[1]], optionsGivenAsArgs[-1],
optLogFilePath, listOfFiles)
}
## #########################
## return dataObj if length only one file
if(!is.null(pb)){
close(pb)
}else{
return(externalRes)
}
}
| /scratch/gouwar.j/cran-all/cranData/wrassp/R/cssSpectrum.R |
##' DFT spectrum function adapted from libassp
##'
##' Short-term spectral analysis of the signal in <listOfFiles>
##' using the Fast Fourier Transform. The default is to
##' calculate an unsmoothed narrow-band spectrum with the
##' size of the analysis window equal to the length of the
##' FFT. The output from the FFT will be converted to a
##' power spectrum in dB from 0 Hz up to and including the
##' Nyquist rate.
##' Analysis results will be written to a file with the
##' base name of the input file and the spectrum type in
##' lower case as extension (e.g. '.dft').
##' Default output is in SSFF format with the
##' spectrum type in lower case as track name.
##' @title dftSpectrum
##' @param listOfFiles vector of file paths to be processed by function
##' @param optLogFilePath path to option log file
##' @param beginTime = <time>: set begin of analysis interval to <time> seconds
##' (default: begin of data)
##' @param centerTime = <time>: set single-frame analysis with the analysis
##' window centred at <time> seconds; overrules beginTime, endTime and
##' windowShift options
##' @param endTime = <time>: set end of analysis interval to <time> seconds
##' (default: end of data)
##' @param resolution = <freq>: set FFT length to the smallest value which
##' results in a frequency resolution of <freq> Hz or better (default: 40.0)
##' @param fftLength = <num>: set FFT length to <num> points (overrules default
##' and 'resolution' option)
##' @param windowShift = <dur>: set analysis window shift to <dur> ms
##' (default: 5.0)
##' @param window = <type>: set analysis window function to <type> (default:
##' BLACKMAN)
##' @param bandwidth = <freq>: set the effective analysis bandwidth to <freq>
##' Hz (default: 0, yielding the smallest possible value given the length of
##' the FFT)
##' @param toFile write results to file (default extension depends on )
##' @param explicitExt set if you wish to override the default extension
##' @param outputDirectory directory in which output files are stored. Defaults to NULL, i.e.
##' the directory of the input files
##' @param forceToLog is set by the global package variable useWrasspLogger. This is set
##' to FALSE by default and should be set to TRUE is logging is desired.
##' @param verbose display infos & show progress bar
##' @return nrOfProcessedFiles or if only one file to process return
##' AsspDataObj of that file
##' @author Raphael Winkelmann
##' @author Lasse Bombien
##' @seealso \code{\link{cssSpectrum}}, \code{\link{lpsSpectrum}}, \code{\link{cepstrum}};
##' all derived from libassp's spectrum function.
##' @useDynLib wrassp, .registration = TRUE
##' @examples
##' # get path to audio file
##' path2wav <- list.files(system.file("extdata", package = "wrassp"),
##' pattern = glob2rx("*.wav"),
##' full.names = TRUE)[1]
##'
##' # calculate dft spectrum
##' res <- dftSpectrum(path2wav, toFile=FALSE)
##'
##' # plot spectral values at midpoint of signal
##' plot(res$dft[dim(res$dft)[1]/2,],
##' type='l',
##' xlab='spectral value index',
##' ylab='spectral value')
##'
##' @export
'dftSpectrum' <- function(listOfFiles = NULL, optLogFilePath = NULL,
beginTime = 0.0, centerTime = FALSE,
endTime = 0.0, resolution = 40.0,
fftLength = 0, windowShift = 5.0,
window = 'BLACKMAN', bandwidth = 0.0, ## DFT specific
toFile = TRUE, explicitExt = NULL,
outputDirectory = NULL, forceToLog = useWrasspLogger,
verbose = TRUE) {
## ########################
## a few parameter checks and expand paths
if (is.null(listOfFiles)) {
stop(paste("listOfFiles is NULL! It has to be a string or vector of file",
"paths (min length = 1) pointing to valid file(s) to perform",
"the given analysis function."))
}
if (is.null(optLogFilePath) && forceToLog){
stop("optLogFilePath is NULL! -> not logging!")
}else{
if(forceToLog){
optLogFilePath = path.expand(optLogFilePath)
}
}
if(!isAsspWindowType(window)){
stop("WindowFunction of type '", window,"' is not supported!")
}
if (!is.null(outputDirectory)) {
outputDirectory = normalizePath(path.expand(outputDirectory))
finfo <- file.info(outputDirectory)
if (is.na(finfo$isdir))
if (!dir.create(outputDirectory, recursive=TRUE))
stop('Unable to create output directory.')
else if (!finfo$isdir)
stop(paste(outputDirectory, 'exists but is not a directory.'))
}
###########################
# Pre-process file list
listOfFiles <- prepareFiles(listOfFiles)
## #######################
## perform analysis
if(length(listOfFiles)==1 | !verbose){
pb <- NULL
}else{
if(toFile==FALSE){
stop("length(listOfFiles) is > 1 and toFile=FALSE! toFile=FALSE only permitted for single files.")
}
cat('\n INFO: applying dftSpectrum to', length(listOfFiles), 'files\n')
pb <- utils::txtProgressBar(min = 0, max = length(listOfFiles), style = 3)
}
externalRes = invisible(.External("performAssp", listOfFiles,
fname = "spectrum", beginTime = beginTime,
centerTime = centerTime, endTime = endTime,
resolution = resolution,
fftLength = as.integer(fftLength),
windowShift = windowShift, window = window,
bandwidth = bandwidth,
toFile = toFile, explicitExt = explicitExt,
progressBar = pb, outputDirectory = outputDirectory,
PACKAGE = "wrassp"))
## #########################
## write options to options log file
if (forceToLog){
optionsGivenAsArgs = as.list(match.call(expand.dots = TRUE))
wrassp.logger(optionsGivenAsArgs[[1]], optionsGivenAsArgs[-1],
optLogFilePath, listOfFiles)
}
## #########################
## return dataObj if length only one file
if(!is.null(pb)){
close(pb)
}else{
return(externalRes)
}
}
| /scratch/gouwar.j/cran-all/cranData/wrassp/R/dftSpectrum.R |
##' Normalise a list of filenames so that they can be passed to a signal processing function
##'
##' @param listOfFiles The list of file names to process
##' @return A normalised list of filenames
##' @author Raphael Winkelmann
##' @examples
##'
##'
##' # listOfFiles <- prepareFiles(listOfFiles)
##'
##'
prepareFiles <- function(listOfFiles) {
listOfFiles = gsub("^file://","", listOfFiles)
listOfFiles = normalizePath(path.expand(listOfFiles))
return(listOfFiles)
}
| /scratch/gouwar.j/cran-all/cranData/wrassp/R/fileHelper.R |
##' forest function adapted from libassp
##'
##' Formant estimation of the signal(s) in <listOfFiles>.
##' Raw resonance frequency and bandwidth values are
##' obtained by root-solving of the Linear Prediction
##' polynomial from the autocorrelation method and the
##' Split-Levinson-Algorithm (SLA). Resonances are then
##' classified as formants using the so-called Pisarenko
##' frequencies (by-product of the SLA) and a formant
##' frequency range table derived from the nominal F1
##' frequency. The latter may have to be increased by
##' about 12\% for female voices (see NominalF1 and Gender options).
##' Formant estimates will be written to a file with the
##' base name of the input file and extension '.fms'.
##' Default output is in SSFF binary format (tracks 'fm'
##' and 'bw')
##' @title forest
##' @param listOfFiles vector of file paths to be processed by function
##' @param optLogFilePath path to option log file
##' @param beginTime = <time>: set begin of analysis interval to <time> seconds (default = 0: begin of data)
##' @param endTime = <time>: set end of analysis interval to <time> seconds (default = 0: end of data)
##' @param windowShift = <dur>: set analysis window shift to <dur> ms (default: 5.0)
##' @param windowSize = <dur>: set analysis window size to <dur> ms (default: 30.0)
##' @param effectiveLength make window size effective rather than exact
##' @param nominalF1 = <freq>: set nominal F1 frequency to <freq> Hz (default: 500.0 Hz)
##' @param gender = <code>: set gender specific parameters where
##' <code> = f[emale], m[ale] or u[nknown] (when <code>=f: eff. window length = 12.5 ms nominal F1 = 560.0 Hz)
##' @param estimate insert rough frequency estimates of missing formants (default: frequency set to zero)
##' @param order decrease default order by 2 (one resonance less)
##' @param incrOrder increase default order by 2 (one resonance more)
##' @param numFormants = <num>: set number of formants to <num> (default: 4; maximum: 8 or half the LP order)
##' @param window = <type>: set analysis window function to <type> (default: BLACKMAN)
##' @param preemphasis = <val>: set pre-emphasis factor to <val> (-1 <= val <= 0)
##' (default: dependent on sample rate and nominal F1)
##' @param toFile write results to file (default extension is .fms)
##' @param explicitExt set if you wish to override the default extension
##' @param outputDirectory directory in which output files are stored. Defaults to NULL, i.e.
##' the directory of the input files
##' @param forceToLog is set by the global package variable useWrasspLogger. This is set
##' to FALSE by default and should be set to TRUE is logging is desired.
##' @param verbose display infos & show progress bar
##' @return nrOfProcessedFiles or if only one file to process return AsspDataObj of that file
##' @author Raphael Winkelmann
##' @author Lasse Bombien
##' @useDynLib wrassp, .registration = TRUE
##' @examples
##' # get path to audio file
##' path2wav <- list.files(system.file("extdata", package = "wrassp"),
##' pattern = glob2rx("*.wav"),
##' full.names = TRUE)[1]
##'
##' # calculate formant values
##' res <- forest(path2wav, toFile=FALSE)
##'
##' # plot formant values
##' matplot(seq(0,numRecs.AsspDataObj(res) - 1) / rate.AsspDataObj(res) +
##' attr(res, 'startTime'),
##' res$fm,
##' type='l',
##' xlab='time (s)',
##' ylab='Formant frequency (Hz)')
##'
##' @export
'forest' <- function(listOfFiles = NULL, optLogFilePath = NULL,
beginTime = 0.0, endTime = 0.0,
windowShift = 5.0, windowSize = 20.0,
effectiveLength = TRUE, nominalF1 = 500,
gender = 'm', estimate = FALSE,
order = 0, incrOrder = 0,
numFormants = 4, window = 'BLACKMAN',
preemphasis = -0.8, toFile = TRUE,
explicitExt = NULL, outputDirectory = NULL,
forceToLog = useWrasspLogger, verbose = TRUE){
###########################
# a few parameter checks and expand paths
if (is.null(listOfFiles)) {
stop(paste("listOfFiles is NULL! It has to be a string or vector of file",
"paths (min length = 1) pointing to valid file(s) to perform",
"the given analysis function."))
}
if (is.null(optLogFilePath) && forceToLog){
stop("optLogFilePath is NULL! -> not logging!")
}else{
if(forceToLog){
optLogFilePath = path.expand(optLogFilePath)
}
}
if(!isAsspWindowType(window)){
stop("WindowFunction of type '", window,"' is not supported!")
}
if (!is.null(outputDirectory)) {
OutputDirectory = normalizePath(path.expand(outputDirectory))
finfo <- file.info(outputDirectory)
if (is.na(finfo$isdir))
if (!dir.create(outputDirectory, recursive=TRUE))
stop('Unable to create output directory.')
else if (!finfo$isdir)
stop(paste(outputDirectory, 'exists but is not a directory.'))
}
###########################
# Pre-process file list
listOfFiles <- prepareFiles(listOfFiles)
###########################
#perform analysis
if(length(listOfFiles)==1 | !verbose){
pb <- NULL
}else{
if(toFile==FALSE){
stop("length(listOfFiles) is > 1 and toFile=FALSE! toFile=FALSE only permitted for single files.")
}
cat('\n INFO: applying forest to', length(listOfFiles), 'files\n')
pb <- utils::txtProgressBar(min = 0, max = length(listOfFiles), style = 3)
}
externalRes = invisible(.External("performAssp", listOfFiles,
fname = "forest", beginTime = beginTime,
endTime = endTime, windowShift = windowShift,
windowSize = windowSize, effectiveLength = effectiveLength,
nominalF1 = nominalF1, gender = gender,
estimate = estimate, order = as.integer(order),
incrOrder = as.integer(incrOrder), numFormants = as.integer(numFormants),
window = window, preemphasis = preemphasis,
toFile = toFile, explicitExt = explicitExt,
progressBar = pb, outputDirectory = outputDirectory,
PACKAGE = "wrassp"))
############################
# write options to options log file
if (forceToLog){
optionsGivenAsArgs = as.list(match.call(expand.dots = TRUE))
wrassp.logger(optionsGivenAsArgs[[1]], optionsGivenAsArgs[-1],
optLogFilePath, listOfFiles)
}
#############################
# return dataObj if length only one file
if(!is.null(pb)){
close(pb)
}else{
return(externalRes)
}
}
| /scratch/gouwar.j/cran-all/cranData/wrassp/R/forest.R |
##' ksvF0 function adapted from libassp
##'
##' F0 analysis of the signal in <listOfFiles> using the
##' K. Schaefer-Vincent periodicity detection algorithm.
##' Analysis results will be written to a file with the
##' base name of the input file and extension '.f0'.
##' Default output is in SSFF binary format (track 'F0').
##' Optionally, location and type of the signal extrema on
##' which the F0 data are based, may be stored in a label
##' file. The name of this file will consist of the base
##' name of the F0 file and the extension '.prd'.
##' @title ksvF0
##' @param listOfFiles vector of file paths to be processed by function
##' @param optLogFilePath path to option log file
##' @param beginTime = <time>: set begin of analysis interval to <time> seconds (default = 0: begin of data)
##' @param endTime set end of analysis interval to <time> seconds (default = 0: end of data)
##' @param windowShift = <dur>: set frame shift to <dur> ms (default: 5.0)
##' @param gender = <code> set gender-specific F0 ranges; <code> may be:
##' "f[emale]" (80.0 - 640.0 Hz)
##' "m[ale]" (50.0 - 400.0 Hz)
##' "u[nknown]" (default; 50.0 - 600.0 Hz)
##' @param maxF = <freq>: set maximum F0 value to <freq> Hz (default: 500.0)
##' @param minF = <freq>: set minimum F0 value to <freq> Hz (default: 50.0)
##' @param minAmp = <amp>: set amplitude threshold for voiced samples to <amp> (default: 100)
##' @param maxZCR maximum zero crossing rate in Hz (for voicing detection)
##' @param toFile write results to file (default extension is .f0)
##' @param explicitExt set if you wish to override the default extension
##' @param outputDirectory directory in which output files are stored. Defaults to NULL, i.e.
##' the directory of the input files
##' @param forceToLog is set by the global package variable useWrasspLogger. This is set
##' to FALSE by default and should be set to TRUE is logging is desired.
##' @param verbose display infos & show progress bar
##' @return nrOfProcessedFiles or if only one file to process return AsspDataObj of that file
##' @author Raphael Winkelmann
##' @author Lasse Bombien
##' @references Schaefer-Vincent K (1983) Pitch period detection and chaining: method and evaluation. Phonetica 1983, Vol 40, pp. 177-202
##' @aliases f0ana f0_ksv
##' @seealso \code{\link{mhsF0}} for an alternative pitch tracker
##' @useDynLib wrassp, .registration = TRUE
##' @examples
##' # get path to audio file
##' path2wav <- list.files(system.file("extdata", package = "wrassp"),
##' pattern = glob2rx("*.wav"),
##' full.names = TRUE)[1]
##'
##' # calculate fundamental frequency contour
##' res <- ksvF0(path2wav, toFile=FALSE)
##'
##' # plot the fundamental frequency contour
##' plot(seq(0,numRecs.AsspDataObj(res) - 1) / rate.AsspDataObj(res) +
##' attr(res, 'startTime'),
##' res$F0,
##' type='l',
##' xlab='time (s)',
##' ylab='F0 frequency (Hz)')
##'
##' @export
'ksvF0' <- 'f0ana' <- 'f0_ksv' <- function(listOfFiles = NULL, optLogFilePath = NULL,
beginTime = 0.0, endTime = 0.0,
windowShift = 5.0, gender = 'u',
maxF = 600, minF = 50,
minAmp = 50, maxZCR = 3000.0,
toFile = TRUE, explicitExt = NULL,
outputDirectory = NULL, forceToLog = useWrasspLogger,
verbose = TRUE) {
###########################
# a few parameter checks and expand paths
if (is.null(listOfFiles)) {
stop(paste("listOfFiles is NULL! It has to be a string or vector of file",
"paths (min length = 1) pointing to valid file(s) to perform",
"the given analysis function."))
}
if (is.null(optLogFilePath) && forceToLog){
stop("optLogFilePath is NULL! -> not logging!")
}else{
if(forceToLog){
optLogFilePath = path.expand(optLogFilePath)
}
}
if (!is.null(outputDirectory)) {
outputDirectory = normalizePath(path.expand(outputDirectory))
finfo <- file.info(outputDirectory)
if (is.na(finfo$isdir))
if (!dir.create(outputDirectory, recursive=TRUE))
stop('Unable to create output directory.')
else if (!finfo$isdir)
stop(paste(outputDirectory, 'exists but is not a directory.'))
}
###########################
# Pre-process file list
listOfFiles <- prepareFiles(listOfFiles)
###########################
# perform analysis
if(length(listOfFiles) == 1 | !verbose){
pb <- NULL
}else{
if(toFile==FALSE){
stop("length(listOfFiles) is > 1 and toFile=FALSE! toFile=FALSE only permitted for single files.")
}
cat('\n INFO: applying f0ana to', length(listOfFiles), 'files\n')
pb <- utils::txtProgressBar(min = 0, max = length(listOfFiles), style = 3)
}
externalRes = invisible(.External("performAssp", listOfFiles,
fname = "f0ana", beginTime = beginTime,
endTime = endTime, windowShift = windowShift,
gender = gender, maxF = maxF,
minF = minF, minAmp = minAmp,
maxZCR = maxZCR, explicitExt = explicitExt,
toFile = toFile, progressBar = pb,
outputDirectory = outputDirectory, PACKAGE = "wrassp"))
############################
# write options to options log file
if (forceToLog){
optionsGivenAsArgs = as.list(match.call(expand.dots = TRUE))
wrassp.logger(optionsGivenAsArgs[[1]], optionsGivenAsArgs[-1],
optLogFilePath, listOfFiles)
}
#############################
# return dataObj if length only one file
if(!is.null(pb)){
close(pb)
}else{
return(externalRes)
}
}
| /scratch/gouwar.j/cran-all/cranData/wrassp/R/ksvF0.R |
##' Calculate Linear Prediction smoothed spectrum using libassp
##'
##' Short-term spectral analysis of the signal in <listOfFiles>
##' using the Fast Fourier Transform and linear predictive smoothing.
##' Analysis results will be written to a file with the
##' base name of the input file and the spectrum type in
##' lower case as extension (i.e. '.lps').
##' Default output is in SSFF format with the
##' spectrum type in lower case as track name.
##' @title lpsSpectrum
##' @param listOfFiles vector of file paths to be processed by function
##' @param optLogFilePath path to option log file
##' @param beginTime = <time>: set begin of analysis interval to <time> seconds
##' (default: begin of data)
##' @param centerTime = <time>: set single-frame analysis with the analysis
##' window centred at <time> seconds; overrules beginTime, endTime and
##' windowShift options
##' @param endTime = <time>: set end of analysis interval to <time> seconds
##' (default: end of data)
##' @param resolution = <freq>: set FFT length to the smallest value which
##' results in a frequency resolution of <freq> Hz or better (default: 40.0)
##' @param fftLength = <num>: set FFT length to <num> points (overrules default
##' and 'resolution' option)
##' @param windowShift = <dur>: set analysis window shift to <dur> ms
##' (default: 5.0)
##' @param window = <type>: set analysis window function to <type> (default:
##' BLACKMAN)
##' @param windowSize = <dur>: set effective analysis window size to <dur> ms
##' @param order = <num>: set prediction order to <num> (default: sampling
##' rate in kHz + 3)
##' @param preemphasis = <val>: set pre-emphasis factor to <val> (default:
##' -0.95)
##' @param deemphasize (default: undo spectral tilt due to
##' pre-emphasis used in LP analysis, i.e. TRUE)
##' @param toFile write results to file (default extension depends on )
##' @param explicitExt set if you wish to override the default extension
##' @param outputDirectory directory in which output files are stored. Defaults to NULL, i.e.
##' the directory of the input files
##' @param forceToLog is set by the global package variable useWrasspLogger. This is set
##' to FALSE by default and should be set to TRUE is logging is desired.
##' @param verbose display infos & show progress bar
##' @return nrOfProcessedFiles or if only one file to process return
##' AsspDataObj of that file
##' @author Raphael Winkelmann
##' @author Lasse Bombien
##' @seealso \code{\link{dftSpectrum}}, \code{\link{cssSpectrum}}, \code{\link{cepstrum}};
##' all derived from libassp's spectrum function.
##' @useDynLib wrassp, .registration = TRUE
##' @examples
##' # get path to audio file
##' path2wav <- list.files(system.file("extdata", package = "wrassp"),
##' pattern = glob2rx("*.wav"),
##' full.names = TRUE)[1]
##'
##' # calculate lps spectrum
##' res <- lpsSpectrum(path2wav, toFile=FALSE)
##'
##' # plot spectral values at midpoint of signal
##' plot(res$lps[dim(res$lps)[1]/2,],
##' type='l',
##' xlab='spectral value index',
##' ylab='spectral value')
##'
##' @export
'lpsSpectrum' <- function(listOfFiles = NULL, optLogFilePath = NULL,
beginTime = 0.0, centerTime = FALSE,
endTime = 0.0, resolution = 40.0,
fftLength = 0, windowSize = 20.0,
windowShift = 5.0, window = 'BLACKMAN',
order = 0, preemphasis = -0.95,
deemphasize = TRUE, toFile = TRUE,
explicitExt = NULL, outputDirectory = NULL,
forceToLog = useWrasspLogger, verbose = TRUE){
## ########################
## a few parameter checks and expand paths
if (is.null(listOfFiles)) {
stop(paste("listOfFiles is NULL! It has to be a string or vector of file",
"paths (min length = 1) pointing to valid file(s) to perform",
"the given analysis function."))
}
if (is.null(optLogFilePath) && forceToLog){
stop("optLogFilePath is NULL! -> not logging!")
}else{
if(forceToLog){
optLogFilePath = path.expand(optLogFilePath)
}
}
if(!isAsspWindowType(window)){
stop("WindowFunction of type '", window,"' is not supported!")
}
if (!is.null(outputDirectory)) {
outputDirectory = normalizePath(path.expand(outputDirectory))
finfo <- file.info(outputDirectory)
if (is.na(finfo$isdir))
if (!dir.create(outputDirectory, recursive=TRUE))
stop('Unable to create output directory.')
else if (!finfo$isdir)
stop(paste(outputDirectory, 'exists but is not a directory.'))
}
###########################
# Pre-process file list
listOfFiles <- prepareFiles(listOfFiles)
## #######################
## perform analysis
if(length(listOfFiles) == 1 | !verbose){
pb <- NULL
}else{
if(toFile==FALSE){
stop("length(listOfFiles) is > 1 and toFile=FALSE! toFile=FALSE only permitted for single files.")
}
cat('\n INFO: applying lpsSpectrum to', length(listOfFiles), 'files\n')
pb <- utils::txtProgressBar(min = 0, max = length(listOfFiles), style = 3)
}
externalRes = invisible(.External("performAssp", listOfFiles,
fname = "spectrum", beginTime = beginTime,
centerTime = centerTime, endTime = endTime,
spectrumType = 'LPS',
resolution = resolution,
fftLength = as.integer(fftLength), windowSize = windowSize,
windowShift = windowShift, window = window,
effectiveLength = TRUE,
order = as.integer(order), preemphasis = preemphasis,
deemphasize = deemphasize,
toFile = toFile, explicitExt = explicitExt,
progressBar = pb, outputDirectory = outputDirectory,
PACKAGE = "wrassp"))
## #########################
## write options to options log file
if (forceToLog){
optionsGivenAsArgs = as.list(match.call(expand.dots = TRUE))
wrassp.logger(optionsGivenAsArgs[[1]], optionsGivenAsArgs[-1],
optLogFilePath, listOfFiles)
}
## #########################
## return dataObj if length only one file
if(!is.null(pb)){
close(pb)
}else{
return(externalRes)
}
}
| /scratch/gouwar.j/cran-all/cranData/wrassp/R/lpsSpectrum.R |
##' mhsF0 function adapted from libassp
##'
##' Pitch analysis of the speech signal in <listOfFile> using
##' Michel's/Modified Harmonic Sieve algorithm.
##' Analysis results will be written to a file with the
##' base name of the input file and extension '.pit'.
##' Default output is in SSFF binary format (track 'pitch').
##' @title mhsF0
##' @param listOfFiles vector of file paths to be processed by function
##' @param optLogFilePath path to option log file
##' @param beginTime = <time>: set begin of analysis interval to <time> seconds (default = 0: begin of file)
##' @param centerTime = <time>: set single-frame analysis with the analysis
##' window centred at <time> seconds; overrules beginTime, endTime and windowShift options
##' @param endTime = <time>: set end of analysis interval to <time> seconds (default = 0: end of file)
##' @param windowShift = <dur>: set analysis window shift to <dur> ms (default: 5.0)
##' @param gender = <code> set gender-specific pitch ranges; <code> may be:
##' "f[emale]" (80.0 - 600.0 Hz)
##' "m[ale]" (50.0 - 375.0 Hz)
##' "u[nknown]" (default; 50.0 - 600.0 Hz)
##' @param maxF = <freq>: set maximum pitch value to <freq> Hz (default: 500.0)
##' @param minF = <freq>: set minimum pitch value to <freq> Hz (default: 50.0 minimum: 25.0)
##' @param minAmp = <amp>: minimum signal amplitude (default: 50)
##' @param minAC1 = <freq>: minimum 1st correlation coefficient (default: 0.250)
##' @param minRMS = <num>: minimum RMS amplitude in dB (default: 18.0)
##' @param maxZCR = <freq>: maximum zero crossing rate in Hz (default: 3000)
##' @param minProb = <num>: minimum quality value of F0 fit (default: 0.520)
##' @param plainSpectrum use plain rather than masked power spectrum
##' @param toFile write results to file (default extension is .pit)
##' @param explicitExt set if you wish to override the default extension
##' @param outputDirectory directory in which output files are stored. Defaults to NULL, i.e.
##' the directory of the input files
##' @param forceToLog is set by the global package variable useWrasspLogger. This is set
##' to FALSE by default and should be set to TRUE is logging is desired.
##' @param verbose display infos & show progress bar
##' @return nrOfProcessedFiles or if only one file to process return AsspDataObj of that file
##' @author Raphael Winkelmann
##' @author Lasse Bombien
##' @aliases mhspitch f0_mhs
##' @seealso \code{\link{ksvF0}} for an tracking the fundamental frequency
##' @useDynLib wrassp, .registration = TRUE
##' @examples
##' # get path to audio file
##' path2wav <- list.files(system.file("extdata", package = "wrassp"),
##' pattern = glob2rx("*.wav"),
##' full.names = TRUE)[1]
##'
##' # calculate fundamental frequency contour
##' res <- mhsF0(path2wav, toFile=FALSE)
##'
##' # plot fundamental frequency contour
##' plot(seq(0,numRecs.AsspDataObj(res) - 1) / rate.AsspDataObj(res) +
##' attr(res, 'startTime'),
##' res$pitch,
##' type='l',
##' xlab='time (s)',
##' ylab='F0 frequency (Hz)')
##'
##' @export
'mhsF0' <- 'mhspitch' <- 'f0_mhs' <-function(listOfFiles = NULL, optLogFilePath = NULL,
beginTime = 0.0, centerTime = FALSE,
endTime = 0.0, windowShift = 5.0,
gender = 'u', maxF = 600.0,
minF = 50.0, minAmp = 50.0,
minAC1 = 0.25, minRMS = 18.0,
maxZCR = 3000.0, minProb = 0.52,
plainSpectrum = FALSE, toFile = TRUE,
explicitExt = NULL, outputDirectory = NULL,
forceToLog = useWrasspLogger, verbose = TRUE){
###########################
# a few parameter checks and expand paths
if (is.null(listOfFiles)) {
stop(paste("listOfFiles is NULL! It has to be a string or vector of file",
"paths (min length = 1) pointing to valid file(s) to perform",
"the given analysis function."))
}
if (is.null(optLogFilePath) && forceToLog){
stop("optLogFilePath is NULL! -> not logging!")
}else{
if(forceToLog){
optLogFilePath = path.expand(optLogFilePath)
}
}
if (!is.null(outputDirectory)) {
OutputDirectory = normalizePath(path.expand(outputDirectory))
finfo <- file.info(outputDirectory)
if (is.na(finfo$isdir))
if (!dir.create(outputDirectory, recursive=TRUE))
stop('Unable to create output directory.')
else if (!finfo$isdir)
stop(paste(outputDirectory, 'exists but is not a directory.'))
}
###########################
# Pre-process file list
listOfFiles <- prepareFiles(listOfFiles)
###########################
# perform analysis
if(length(listOfFiles) == 1 | !verbose){
pb <- NULL
}else{
if(toFile==FALSE){
stop("length(listOfFiles) is > 1 and toFile=FALSE! toFile=FALSE only permitted for single files.")
}
cat('\n INFO: applying mhspitch to', length(listOfFiles), 'files\n')
pb <- utils::txtProgressBar(min = 0, max = length(listOfFiles), style = 3)
}
externalRes = invisible(.External("performAssp", listOfFiles,
fname = "mhspitch", beginTime = beginTime,
centerTime = centerTime, endTime = endTime,
windowShift = windowShift, gender = gender,
maxF = maxF, minF = minF,
minAmp = minAmp, minAC1 = minAC1,
minRMS = minRMS, maxZCR = maxZCR,
minProb = minProb, plainSpectrum = plainSpectrum,
toFile = toFile, explicitExt = explicitExt,
progressBar = pb, outputDirectory = outputDirectory,
PACKAGE = "wrassp"))
############################
# write options to options log file
if (forceToLog){
optionsGivenAsArgs = as.list(match.call(expand.dots = TRUE))
wrassp.logger(optionsGivenAsArgs[[1]], optionsGivenAsArgs[-1],
optLogFilePath, listOfFiles)
}
#############################
# return dataObj if length only one file
if(!is.null(pb)){
close(pb)
}else{
return(externalRes)
}
}
| /scratch/gouwar.j/cran-all/cranData/wrassp/R/mhsF0.R |
##' package variable to force the usage of the logger
##' set to FALSE by default
##' @author Raphael Winkelmann
##' @export
useWrasspLogger <- FALSE
##' list of default output extensions,
##' track names and output type
##' for each signal processing function in wrassp
##' @author Raphael Winkelmann
##' @export
wrasspOutputInfos = list("acfana" = list("ext"= c("acf"), "tracks"=c("acf"), "outputType"="SSFF"),
"afdiff" = list("ext"= c("dwav"), "tracks"=c(""), "outputType"="wav"),
"affilter" = list("ext"= c("hpf", "lpf", "bpf", "bsf"), "tracks"=c(""), "outputType"="wav"),
"cepstrum" = list("ext"= c("cep"), "tracks"=c("cep"), "outputType"="SSFF"),
"cssSpectrum" = list("ext"= c("css"), "tracks"=c("css"), "outputType"="SSFF"),
"dftSpectrum" = list("ext"= c("dft"), "tracks"=c("dft"), "outputType"="SSFF"),
"ksvF0" = list("ext"= c("f0"), "tracks"=c("F0"), "outputType"="SSFF"),
"mhsF0" = list("ext"= c("pit"), "tracks"=c("pitch"), "outputType"="SSFF"),
"forest" = list("ext"= c("fms"), "tracks"=c("fm", "bw"), "outputType"="SSFF"),
"lpsSpectrum" = list("ext"= c("lps"), "tracks"=c("lps"), "outputType"="SSFF"),
"rfcana" = list("ext"= c("rfc", "arf", "lar", "lpc"), "tracks"=c("rms", "gain", "arf|lar|lpc|rfc"), "outputType"="SSFF"),
"rmsana" = list("ext"= c("rms"), "tracks"=c("rms"), "outputType"="SSFF"),
"zcrana" = list("ext"= c("zcr"), "tracks"=c("zcr"), "outputType"="SSFF")
)
##' list of possibly useful file formats for AsspDataObj corresponding to the
##' first element of the fileInfo attribute
##' @author Lasse Bombien
##' @docType data
##' @seealso \code{\link{AsspFileFormat}}
##' @format
##' \tabular{rll}{
##' Code Name \tab code number \tab description\cr
##' RAW \tab 1\tab headerless or unsupported format \cr
##' ASP_A \tab 2\tab ASP with ASCII data \cr
##' ASP_B \tab 3\tab ASP with binary data \cr
##' XASSP \tab 4\tab xassp ASCII \cr
##' IPDS_M \tab 5\tab labels in IPdS `MIX' format \cr
##' IPDS_S \tab 6\tab labels in IPdS `SAMPA' format \cr
##' AIFF \tab 7\tab Apple Audio Interchange File Format \cr
##' AIFC \tab 8\tab AIFF extended for compressed data \cr
##' CSL \tab 9\tab Kay Elemetrics Computerized Speech Lab \cr
##' CSRE \tab 10\tab Computerized Speech Research Environment \cr
##' ESPS \tab 11\tab Entropic Signal Processing System \cr
##' ILS \tab 12\tab \cr
##' KTH \tab 13\tab Kungliga Tekniska Hoegskolan Stockholm \cr
##' SWELL \tab 13\tab commercial version of KTH \cr
##' SNACK \tab 13\tab as Tcl extension \cr
##' SFS \tab 14\tab University College London Speech Filing System \cr
##' SND \tab 15\tab NeXT version of `SND' format \cr
##' AU \tab 15\tab Sun version of `SND' format \cr
##' NIST \tab 16\tab National Institute of Standards and Technology \cr
##' SPHERE \tab 16\tab SPeech HEader REsources \cr
##' PRAAT_S \tab 17\tab UvA praat 'short' text file \cr
##' PRAAT_L \tab 18\tab UvA praat 'long' text file \cr
##' PRAAT_B \tab 19\tab UvA praat binary file \cr
##' SSFF \tab 20\tab Simple Signal File Format \cr
##' WAVE \tab 21\tab IBM/Microsoft RIFF-WAVE \cr
##' WAVE_X \tab 22\tab RIFF-WAVE extended format (Revision 3) \cr
##' XLABEL \tab 24\tab ESPS xlabel \cr
##' YORK \tab 25\tab University of York (Klatt'80 parameters) \cr
##' UWM \tab 26\tab University of Wisconsin at Madison (microbeam data) )\cr
##' }
##' @export
AsspFileFormats <- c(
RAW = 1, ## headerless or unsupported format
ASP_A = 2, ## ASP with ASCII data
ASP_B = 3, ## ASP with binary data
XASSP = 4, ## xassp ASCII
IPDS_M = 5, ## labels in IPdS `MIX' format
IPDS_S = 6, ## labels in IPdS `SAMPA' format
AIFF = 7, ## Apple Audio Interchange File Format
AIFC = 8, ## AIFF extended for compressed data
CSL = 9, ## Kay Elemetrics Computerized Speech Lab
CSRE = 10, ## Computerized Speech Research Environment
ESPS = 11, ## Entropic Signal Processing System
ILS = 12, ##
KTH = 13, ## Kungliga Tekniska Hoegskolan Stockholm
SWELL = 13, ## commercial version of KTH
SNACK = 13, ## as Tcl extension
SFS = 14, ## University College London Speech Filing System
SND = 15, ## NeXT version of `SND' format
AU = 15, ## Sun version of `SND' format
NIST = 16, ## National Institute of Standards and Technology
SPHERE = 16, ## SPeech HEader REsources
PRAAT_S = 17, ## UvA praat 'short' text file
PRAAT_L = 18, ## UvA praat 'long' text file
PRAAT_B = 19, ## UvA praat binary file
SSFF = 20, ## Simple Signal File Format
WAVE = 21, ## IBM/Microsoft RIFF-WAVE
WAVE_X = 22, ## RIFF-WAVE extended format (Revision 3)
XLABEL = 24, ## ESPS xlabel
YORK = 25, ## University of York (Klatt'80 parameters)
UWM = 26 ## University of Wisconsin at Madison (microbeam data) )
)
| /scratch/gouwar.j/cran-all/cranData/wrassp/R/packageVars.R |
##' rfcana function adapted from libassp
##'
##' Linear Prediction analysis of <listOfFiles> using the
##' autocorrelation method and the Durbin recursion.
##' This program calculates the RMS amplitudes of the input
##' and residual signal in dB and, per default, reflection
##' coefficients (see '-t' option).
##' Analysis results will be written to a file with the
##' base name of the input file and the parameter type in
##' lower case as extension (e.g. '.rfc').
##' Default output is in SSFF binary format (tracks 'rms',
##' 'gain' and the LP type in lower case).
##' @title rfcana
##' @param listOfFiles vector of file paths to be processed by function
##' @param optLogFilePath path to option log file
##' @param beginTime = <time>: set begin of analysis interval to <time> seconds (default = 0: begin of file)
##' @param centerTime set single-frame analysis with the analysis window centred at <time> seconds;
##' overrules beginTime, endTime and windowShift options
##' @param endTime = <time>: set end of analysis interval to <time> seconds (default = 0: end of file)
##' @param windowShift = <dur>: set analysis window shift to <dur> ms (default: 5.0)
##' @param windowSize = <dur>: set analysis window size to <dur> ms; overrules effectiveLength option
##' @param effectiveLength make window size effective rather than exact
##' @param window = <type>: set analysis window function to <type> (default: BLACKMAN)
##' @param order = <num>: set prediction order to <num> (default: sample rate in kHz + 3)
##' @param preemphasis = <val>: set pre-emphasis factor to <val> (default: -0.95)
##' @param lpType = <type>: calculate <type> LP parameters; <type> may be:
##' "ARF": area function
##' "LAR": log area ratios
##' "LPC": linear prediction filter coefficients
##' "RFC": reflection coefficients (default)
##' @param toFile write results to file (default extension dependent on LpType .arf/.lar/.lpc/.rfc)
##' @param explicitExt set if you wish to override the default extension
##' @param outputDirectory directory in which output files are stored. Defaults to NULL, i.e.
##' the directory of the input files
##' @param forceToLog is set by the global package variable useWrasspLogger. This is set
##' to FALSE by default and should be set to TRUE is logging is desired.
##' @param verbose display infos & show progress bar
##' @return nrOfProcessedFiles or if only one file to process return AsspDataObj of that file
##' @author Raphael Winkelmann
##' @author Lasse Bombien
##' @useDynLib wrassp, .registration = TRUE
##' @examples
##' # get path to audio file
##' path2wav <- list.files(system.file("extdata", package = "wrassp"),
##' pattern = glob2rx("*.wav"),
##' full.names = TRUE)[1]
##'
##' # perform linear prediction analysis
##' res <- rfcana(path2wav, toFile=FALSE)
##'
##' # plot reflection coefficients
##' matplot(seq(0,numRecs.AsspDataObj(res) - 1) / rate.AsspDataObj(res) +
##' attr(res, 'startTime'),
##' res$rfc,
##' type='l',
##' xlab='time (s)',
##' ylab='reflection coefficient values')
##'
##' @export
'rfcana' <- function(listOfFiles = NULL, optLogFilePath = NULL,
beginTime = 0.0, centerTime = FALSE,
endTime = 0.0, windowShift = 5.0,
windowSize = 20.0, effectiveLength = TRUE,
window = 'BLACKMAN', order = 0,
preemphasis = -0.95, lpType = 'RFC',
toFile = TRUE, explicitExt = NULL,
outputDirectory = NULL, forceToLog = useWrasspLogger,
verbose = TRUE){
###########################
# a few parameter checks and expand files
if (is.null(listOfFiles)) {
stop(paste("listOfFiles is NULL! It has to be a string or vector of file",
"paths (min length = 1) pointing to valid file(s) to perform",
"the given analysis function."))
}
if (is.null(optLogFilePath) && forceToLog){
stop("optLogFilePath is NULL! -> not logging!")
}else{
if(forceToLog){
optLogFilePath = path.expand(optLogFilePath)
}
}
if(!isAsspWindowType(window)){
stop("WindowFunction of type '", window,"' is not supported!")
}
if(!isAsspLpType(lpType)){
stop("LpType of type '", lpType,"' is not supported!")
}
if (!is.null(outputDirectory)) {
outputDirectory = normalizePath(path.expand(outputDirectory))
finfo <- file.info(outputDirectory)
if (is.na(finfo$isdir))
if (!dir.create(outputDirectory, recursive=TRUE))
stop('Unable to create output directory.')
else if (!finfo$isdir)
stop(paste(outputDirectory, 'exists but is not a directory.'))
}
###########################
# Pre-process file list
listOfFiles <- prepareFiles(listOfFiles)
###########################
# perform analysis
if(length(listOfFiles) == 1 | !verbose){
pb <- NULL
}else{
if(toFile==FALSE){
stop("length(listOfFiles) is > 1 and toFile=FALSE! toFile=FALSE only permitted for single files.")
}
cat('\n INFO: applying rfcana to', length(listOfFiles), 'files\n')
pb <- utils::txtProgressBar(min = 0, max = length(listOfFiles), style = 3)
}
externalRes = invisible(.External("performAssp", listOfFiles,
fname = "rfcana", beginTime = beginTime,
centerTime = centerTime, endTime = endTime,
windowShift = windowShift, windowSize = windowSize,
effectiveLength = effectiveLength, window = window,
order = as.integer(order),
preemphasis = preemphasis, lpType = lpType,
toFile = toFile, explicitExt = explicitExt,
progressBar = pb, outputDirectory = outputDirectory,
PACKAGE = "wrassp"))
############################
# write options to options log file
if (forceToLog){
optionsGivenAsArgs = as.list(match.call(expand.dots = TRUE))
wrassp.logger(optionsGivenAsArgs[[1]], optionsGivenAsArgs[-1],
optLogFilePath, listOfFiles)
}
#############################
# return dataObj if length only one file
if(!is.null(pb)){
close(pb)
}else{
return(externalRes)
}
}
| /scratch/gouwar.j/cran-all/cranData/wrassp/R/rfcana.R |
##' rmsana function adapted from libassp
##'
##' Analysis of short-term Root Mean Square amplitude of
##' the signal in <listOfFiles>. Per default, the RMS values are
##' expressed in decibel (dB) so that they correspond to
##' the short-term power of the signal.
##' Analysis results will be written to a file with the
##' base name of the input file and extension '.rms'.
##' Default output is in SSFF binary format (track 'rms').
##' @title rmsana
##' @param listOfFiles vector of file paths to be processed by function
##' @param optLogFilePath path to option log file
##' @param beginTime = <time>: set begin of analysis interval to <time> seconds (default = 0: begin of file)
##' @param centerTime = <time>: set single-frame analysis with the analysis window centred at <time> seconds;
##' overrules beginTime, endTime and windowShift options
##' @param endTime = <time>: set end of analysis interval to <time> seconds (default: end of file)
##' @param windowShift = <dur>: set analysis window shift to <dur> ms (default: 5.0)
##' @param windowSize = <dur>: set analysis window size to <dur> ms; overrules effectiveLength option
##' @param effectiveLength make window size effective rather than exact
##' @param linear calculate linear RMS values (default: values in dB)
##' @param window = <type>: set analysis window function to <type> (default: HAMMING)
##' @param toFile write results to file (default extension is .rms)
##' @param explicitExt set if you wish to override the default extension
##' @param outputDirectory directory in which output files are stored. Defaults to NULL, i.e.
##' the directory of the input files
##' @param forceToLog is set by the global package variable useWrasspLogger. This is set
##' to FALSE by default and should be set to TRUE is logging is desired.
##' @param verbose display infos & show progress bar
##' @return nrOfProcessedFiles or if only one file to process return AsspDataObj of that file
##' @author Raphael Winkelmann
##' @author Lasse Bombien
##' @useDynLib wrassp, .registration = TRUE
##' @examples
##' # get path to audio file
##' path2wav <- list.files(system.file("extdata", package = "wrassp"),
##' pattern = glob2rx("*.wav"),
##' full.names = TRUE)[1]
##'
##' # calculate rms values
##' res <- rmsana(path2wav, toFile=FALSE)
##'
##' # plot rms values
##' plot(seq(0,numRecs.AsspDataObj(res) - 1) / rate.AsspDataObj(res) +
##' attr(res, 'startTime'),
##' res$rms,
##' type='l',
##' xlab='time (s)',
##' ylab='RMS energy (dB)')
##'
##' @export
'rmsana' <- function(listOfFiles = NULL, optLogFilePath = NULL,
beginTime = 0.0, centerTime = FALSE,
endTime = 0.0, windowShift = 5.0,
windowSize = 20.0, effectiveLength = TRUE,
linear = FALSE, window = 'HAMMING',
toFile = TRUE, explicitExt = NULL,
outputDirectory = NULL, forceToLog = useWrasspLogger,
verbose = TRUE){
###########################
# a few parameter checks and expand paths
if (is.null(listOfFiles)) {
stop(paste("listOfFiles is NULL! It has to be a string or vector of file",
"paths (min length = 1) pointing to valid file(s) to perform",
"the given analysis function."))
}
if (is.null(optLogFilePath) && forceToLog){
stop("optLogFilePath is NULL! -> not logging!")
}else{
if(forceToLog){
optLogFilePath = path.expand(optLogFilePath)
}
}
if(!isAsspWindowType(window)){
stop("WindowFunction of type '", window,"' is not supported!")
}
if (!is.null(outputDirectory)) {
outputDirectory = normalizePath(path.expand(outputDirectory))
finfo <- file.info(outputDirectory)
if (is.na(finfo$isdir))
if (!dir.create(outputDirectory, recursive=TRUE))
stop('Unable to create output directory.')
else if (!finfo$isdir)
stop(paste(outputDirectory, 'exists but is not a directory.'))
}
###########################
# Pre-process file list
listOfFiles <- prepareFiles(listOfFiles)
###########################
# perform analysis
if(length(listOfFiles) == 1 | !verbose){
pb <- NULL
}else{
if(toFile==FALSE){
stop("length(listOfFiles) is > 1 and toFile=FALSE! toFile=FALSE only permitted for single files.")
}
cat('\n INFO: applying rmsana to', length(listOfFiles), 'files\n')
pb <- utils::txtProgressBar(min = 0, max = length(listOfFiles), style = 3)
}
externalRes = invisible(.External("performAssp", listOfFiles,
fname = "rmsana", beginTime = beginTime,
centerTime = centerTime, endTime = endTime,
windowShift = windowShift, windowSize = windowSize,
effectiveLength = effectiveLength, linear = linear,
window = window, toFile = toFile,
explicitExt = explicitExt,
progressBar = pb, outputDirectory = outputDirectory,
PACKAGE = "wrassp"))
############################
# write options to options log file
if (forceToLog){
optionsGivenAsArgs = as.list(match.call(expand.dots = TRUE))
wrassp.logger(optionsGivenAsArgs[[1]], optionsGivenAsArgs[-1],
optLogFilePath, listOfFiles)
}
#############################
# return dataObj if length only one file
if(!is.null(pb)){
close(pb)
}else{
return(externalRes)
}
}
| /scratch/gouwar.j/cran-all/cranData/wrassp/R/rmsana.R |
##' wrassp - Interface to the ASSP Library
##'
##' wrassp is a wrapper for R around Michel Scheffers's libassp (Advanced Speech Signal Processor).
##' The libassp library aims at providing functionality for handling speech signal files in most
##' common audio formats and for performing analyses common in phonetic science/speech science.
##' This includes the calculation of formants, fundamental frequency, root mean square,
##' auto correlation, a variety of spectral analyses, zero crossing rate, filtering etc.
##' This wrapper provides R with a large subset of libassp's signal processing functions and
##' provides them to the user in a (hopefully) user-friendly manner.
##'
##' This package is part of the next iteration of the EMU Speech Database Management System
##' which aims to be as close to an all-in-one solution for generating, manipulating, querying,
##' analyzing and managing speech databases as possible.
##' For an overview of the system please visit this URL: \url{http://ips-lmu.github.io/EMU.html}.
##'
##' Available signal processing functions:
##'
##' \enumerate{
##' \item \code{\link{acfana}}: Analysis of short-term autocorrelation function
##' \item \code{\link{afdiff}}: Computes the first difference of the signal
##' \item \code{\link{affilter}}: Filters the audio signal (see docs for types)
##' \item \code{\link{cepstrum}}: Short-term cepstral analysis
##' \item \code{\link{cssSpectrum}}: Cepstral smoothed version of \code{\link{dftSpectrum}}
##' \item \code{\link{dftSpectrum}}: Short-term DFT spectral analysis
##' \item \code{\link{forest}}: Formant estimation
##' \item \code{\link{ksvF0}}: F0 analysis of the signal
##' \item \code{\link{lpsSpectrum}}: Linear Predictive smoothed version of \code{\link{dftSpectrum}}
##' \item \code{\link{mhsF0}}: Pitch analysis of the speech signal using Michel's (M)odified (H)armonic (S)ieve algorithm
##' \item \code{\link{rfcana}}: Linear Prediction analysis
##' \item \code{\link{rmsana}}: Analysis of short-term Root Mean Square amplitude
##' \item \code{\link{zcrana}}: Analysis of the averages of the short-term positive and negative zero-crossing rates
##' }
##'
##' Available file handling functions:
##'
##' \enumerate{
##' \item \code{\link{read.AsspDataObj}}: read an existing SSFF file into a \code{AsspDataObj} which is its in-memory equivalent.
##' \item \code{\link{write.AsspDataObj}}: write a \code{AsspDataObj} out to a SSFF file.
##' }
##'
##' @name wrassp-package
##' @aliases wrassp wrassp-package
##'
##'
##' @keywords package
##' @docType package
NULL
| /scratch/gouwar.j/cran-all/cranData/wrassp/R/wrassp-packageDocs.R |
##' Designated logger for the wrassp signal processing functions
##'
##' Function logs the call to a signal processing function (spf) of wrassp.
##' It is called by default if the forceToLog option of the spf is not set to
##' false. I tries to format the output in an easily readable fashion.
##'
##' @title wrassp.logger
##' @param fName the name of the function calling the logger
##' @param fOpts are the function options given by the user acquired by match.call
##' @param optLogFilePath path to option log file
##' @param listOfFiles vector of file paths that the spf calling the logger processed
##' @author Raphael Winkelmann
##' @seealso \code{\link{match.call}}
wrassp.logger <- function(fName, fOpts,
optLogFilePath, listOfFiles){
fid = file(optLogFilePath, open="a")
cat("\n##################################\n", file = fid, append = T)
cat("##################################\n", file = fid, append = T)
cat(paste("########", fName, "performed ########\n"), file = fid, append = T)
cat("Timestamp: ", paste(Sys.time()), '\n', file = fid, append = T)
for (opt in names(fOpts)){
if(opt != "listOfFiles" && opt != "optLogFilePath"){
cat(paste(opt, ":", fOpts[opt][[1]], "\n"), file = fid, append = T)
}
}
cat(" => on files:\n\t", file = fid, append = T)
cat(paste(listOfFiles, collapse="\n\t"), file = fid, append = T)
close(fid)
}
| /scratch/gouwar.j/cran-all/cranData/wrassp/R/wrassp.logger.R |
##' zcrana function adapted from libassp
##'
##' Analysis of the averages of the short-term positive and
##' negative zero-crossing rates of the signal in <listOfFiles>.
##' Analysis results will be written to a file with the
##' base name of the input file and extension '.zcr'.
##' Default output is in SSFF binary format (track 'zcr').
##' @title zcrana
##' @param listOfFiles vector of file paths to be processed by function
##' @param optLogFilePath path to option log file
##' @param beginTime = <time>: set begin of analysis interval to <time> seconds (default: begin of file)
##' @param centerTime = <time> set single-frame analysis with the analysis window centred at <time> seconds;
##' overrules beginTime, endTime and windowShift options
##' @param endTime = <time>: set end of analysis interval to <time> seconds (default: end of file)
##' @param windowShift = <dur>: set analysis window shift to <dur> ms (default: 5.0)
##' @param windowSize = <dur>: set analysis window size to <dur> ms (default: 25.0)
##' @param toFile write results to file (default extension is .zcr)
##' @param explicitExt set if you wish to override the default extension
##' @param outputDirectory directory in which output files are stored. Defaults to NULL, i.e.
##' the directory of the input files
##' @param forceToLog is set by the global package variable useWrasspLogger. This is set
##' to FALSE by default and should be set to TRUE is logging is desired.
##' @param verbose display infos & show progress bar
##' @return nrOfProcessedFiles or if only one file to process return AsspDataObj of that file
##' @author Raphael Winkelmann
##' @author Lasse Bombien
##' @useDynLib wrassp, .registration = TRUE
##' @examples
##' # get path to audio file
##' path2wav <- list.files(system.file("extdata", package = "wrassp"),
##' pattern = glob2rx("*.wav"),
##' full.names = TRUE)[1]
##'
##' # calculate zcr values
##' res <- zcrana(path2wav, toFile=FALSE)
##'
##' # plot zcr values
##' plot(seq(0,numRecs.AsspDataObj(res) - 1) / rate.AsspDataObj(res) +
##' attr(res, 'startTime'),
##' res$zcr,
##' type='l',
##' xlab='time (s)',
##' ylab='ZCR values')
##'
##' @export
'zcrana' <- function(listOfFiles = NULL, optLogFilePath = NULL,
beginTime = 0.0, centerTime = FALSE,
endTime = 0.0, windowShift = 5.0,
windowSize = 25.0, toFile = TRUE,
explicitExt = NULL, outputDirectory = NULL,
forceToLog = useWrasspLogger, verbose = TRUE){
###########################
# a few parameter checks and expand paths
if (is.null(listOfFiles)) {
stop(paste("listOfFiles is NULL! It has to be a string or vector of file",
"paths (min length = 1) pointing to valid file(s) to perform",
"the given analysis function."))
}
if (is.null(optLogFilePath) && forceToLog){
stop("optLogFilePath is NULL! -> not logging!")
}else{
if(forceToLog){
optLogFilePath = path.expand(optLogFilePath)
}
}
if (!is.null(outputDirectory)) {
outputDirectory = normalizePath(path.expand(outputDirectory))
finfo <- file.info(outputDirectory)
if (is.na(finfo$isdir))
if (!dir.create(outputDirectory, recursive=TRUE))
stop('Unable to create output directory.')
else if (!finfo$isdir)
stop(paste(outputDirectory, 'exists but is not a directory.'))
}
###########################
# Pre-process file list
listOfFiles <- prepareFiles(listOfFiles)
###########################
# perform analysis
if(length(listOfFiles) == 1 | !verbose){
pb <- NULL
}else{
if(toFile==FALSE){
stop("length(listOfFiles) is > 1 and toFile=FALSE! toFile=FALSE only permitted for single files.")
}
cat('\n INFO: applying zcrana to', length(listOfFiles), 'files\n')
pb <- utils::txtProgressBar(min = 0, max = length(listOfFiles), style = 3)
}
externalRes = invisible(.External("performAssp", PACKAGE = "wrassp",
listOfFiles, fname = "zcrana",
beginTime = beginTime, centerTime = centerTime,
endTime = endTime, windowShift = windowShift,
windowSize = windowSize,
toFile = toFile, explicitExt = explicitExt,
outputDirectory = outputDirectory, progressBar = pb))
############################
# write options to options log file
if (forceToLog){
optionsGivenAsArgs = as.list(match.call(expand.dots = TRUE))
wrassp.logger(optionsGivenAsArgs[[1]], optionsGivenAsArgs[-1],
optLogFilePath, listOfFiles)
}
#############################
# return dataObj if length only one file
if(!is.null(pb)){
close(pb)
}else{
return(externalRes)
}
}
| /scratch/gouwar.j/cran-all/cranData/wrassp/R/zcrana.R |
## -----------------------------------------------------------------------------
# load the package
library(wrassp)
# get the path to the data that comes with the package
wavPath = system.file('extdata', package='wrassp')
# now list the .wav files so we have some audio files to play with
wavFiles = list.files(wavPath, pattern=glob2rx('*.wav'), full.names=TRUE)
## -----------------------------------------------------------------------------
# load an audio file, e.g. the first one in the list above
au = read.AsspDataObj(wavFiles[1])
# show class
class(au)
# print object description
print(au)
## -----------------------------------------------------------------------------
# extract duration
dur.AsspDataObj(au)
# extract sampling rate
rate.AsspDataObj(au)
# extract number of records/samples
numRecs.AsspDataObj(au)
# extract additional attributes
attributes(au)
## -----------------------------------------------------------------------------
# extract track names
tracks.AsspDataObj(au)
# or an alternative way to extract track names
names(au)
# show head of samples
head(au$audio)
# and we can of course also plot these samples
# (only plot every 10th element to accelerate plotting)
plot(seq(0,numRecs.AsspDataObj(au) - 1, 10) / rate.AsspDataObj(au),
au$audio[c(TRUE, rep(FALSE,9))],
type='l',
xlab='time (s)',
ylab='Audio samples')
## -----------------------------------------------------------------------------
# manipulate the audio
au$audio = au$audio * 0.5
# write file to tempdir
dir = tempdir()
writeres = write.AsspDataObj(au, file.path(dir, 'newau.wav'))
## -----------------------------------------------------------------------------
# calculate formants and corresponding bandwidth values
fmBwVals = forest(wavFiles[1], toFile=F)
# due to toFile=F this returns an object of the type AsspDataObj and
# prevents the result being saved to disc as an SSFF file
class(fmBwVals)
# extract track names
# this time the object contains muliple tracks (formants + their bandwidths)
tracks.AsspDataObj(fmBwVals)
# with more than one field (in this case 250 F1/F2/F3/F4 values)
dim(fmBwVals$fm)
# plot the formant values
matplot(seq(0,numRecs.AsspDataObj(fmBwVals) - 1) / rate.AsspDataObj(fmBwVals) +
attr(fmBwVals, 'startTime'),
fmBwVals$fm,
type='l',
xlab='time (s)',
ylab='Formant frequency (Hz)')
## -----------------------------------------------------------------------------
# calculate the fundamental frequency contour
f0vals = ksvF0(wavFiles[1], toFile=F)
# plot the fundamental frequency contour
plot(seq(0,numRecs.AsspDataObj(f0vals) - 1) / rate.AsspDataObj(f0vals) +
attr(f0vals, 'startTime'),
f0vals$F0,
type='l',
xlab='time (s)',
ylab='F0 frequency (Hz)')
## -----------------------------------------------------------------------------
# calculate the RMS-energy contour for all wavFiles
rmsana(wavFiles, outputDirectory = tempdir())
# list new files using wrasspOutputInfos$rmsana$ext (see below)
rmsFilePaths = list.files(tempdir(),
pattern = paste0('*.',wrasspOutputInfos$rmsana$ext),
full.names = T)
# read first rms file
rmsvals = read.AsspDataObj(rmsFilePaths[1])
# plot the RMS energy contour
plot(seq(0,numRecs.AsspDataObj(rmsvals) - 1) / rate.AsspDataObj(rmsvals) +
attr(rmsvals, 'startTime'),
rmsvals$rms,
type='l',
xlab='time (s)',
ylab='RMS energy (dB)')
## -----------------------------------------------------------------------------
# show all function names
names(wrasspOutputInfos)
## -----------------------------------------------------------------------------
# show output infos of function forest
wrasspOutputInfos$forest
## ----eval=FALSE---------------------------------------------------------------
# # open wrassp package documentation
# ?wrassp
| /scratch/gouwar.j/cran-all/cranData/wrassp/inst/doc/wrassp_intro.R |
---
title: "An introduction to the wrassp package"
author: "Lasse Bombien & Raphael Winkelmann"
affiliation: "Institute Of Phonetic And Speech Processing (LMU Munich)"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{An introduction to the wrassp package}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
# DEPRECATION WARNING
This vignette is considered deprecated! It's content has been moved to
the [the EMU-SDMS manual](https://ips-lmu.github.io/The-EMU-SDMS-Manual/) (+ expanded and updated). Specifially see
the [the R package wrassp](https://ips-lmu.github.io/The-EMU-SDMS-Manual/chap-wrassp.html) as well as
the [wrassp implementation](https://ips-lmu.github.io/The-EMU-SDMS-Manual/chap-wrassp-impl.html) chapters.
# Introduction
This document is meant as an introduction to the `wrassp`
package. `wrassp` is a **w**rapper for **R** around Michel Scheffers's [libassp](https://libassp.sourceforge.net/)
(**A**dvanced **S**peech **S**ignal **P**rocessor). The libassp library aims at
providing functionality for handling speech signal files in most
common audio formats and for performing analyses common in phonetic
science/speech science. This includes the calculation of formants,
fundamental frequency, root mean square, auto correlation, a variety
of spectral analyses, zero crossing rate, filtering etc. This wrapper
provides R with a large subset of libassp's signal processing
functions and provides them to the user in a (hopefully) user-friendly manner.
# File I/0 and the AsspDataObj
Let's get started by locating some example material distributed with
the package.
```{r}
# load the package
library(wrassp)
# get the path to the data that comes with the package
wavPath = system.file('extdata', package='wrassp')
# now list the .wav files so we have some audio files to play with
wavFiles = list.files(wavPath, pattern=glob2rx('*.wav'), full.names=TRUE)
```
One of the aims of `wrassp` is to provide mechanisms to handle
speech-related files such as sound files and parametric data
files. `wrassp` therefore comes with a class called
`AsspDataObj` which does just that.
```{r}
# load an audio file, e.g. the first one in the list above
au = read.AsspDataObj(wavFiles[1])
# show class
class(au)
# print object description
print(au)
```
`au` is an object of the class `AsspDataObj` and, using `print`,
we can get some information about the object, such as its sampling
rate, its duration and what kind of data are stored in what form. Since
the file we loaded is audio only, the object contains exactly one track.
And since it's a mono file, this track only has one field. We will later
encounter different types of data with more than one track and more
fields per track.
Here are some more ways of extracting attributes from the object, such as
duration, sampling rate and the number of records:
```{r}
# extract duration
dur.AsspDataObj(au)
# extract sampling rate
rate.AsspDataObj(au)
# extract number of records/samples
numRecs.AsspDataObj(au)
# extract additional attributes
attributes(au)
```
An important property of `AsspDataObj` is of course that it
contains data tracks, or at least one data track. As mentioned above,
the currently loaded object contains a single mono audio
track. Accessing the data is easy: `AsspDataObj` stores data in
simple matrices, one matrix for each track. Broadly speaking,
`AsspDataObj` is nothing but a list of at least one matrix. All
of them have the same number of rows (number of records) but each can
have a different number of columns (number of fields). Each track has
a name and we can access the track using that name.
```{r}
# extract track names
tracks.AsspDataObj(au)
# or an alternative way to extract track names
names(au)
# show head of samples
head(au$audio)
# and we can of course also plot these samples
# (only plot every 10th element to accelerate plotting)
plot(seq(0,numRecs.AsspDataObj(au) - 1, 10) / rate.AsspDataObj(au),
au$audio[c(TRUE, rep(FALSE,9))],
type='l',
xlab='time (s)',
ylab='Audio samples')
```
Now, purely to give us something unequal to the original `au` object to write
to disc, let's manipulate the audio data by simply multiplying
all the sample values by a factor of `0.5`. The resulting
`AsspDataObj` will then be saved to a temporary directory provided by `R`.
```{r}
# manipulate the audio
au$audio = au$audio * 0.5
# write file to tempdir
dir = tempdir()
writeres = write.AsspDataObj(au, file.path(dir, 'newau.wav'))
```
# Signal processing
`wrassp` is of course capable of more than just the mere reading and writing
of specific signal file formats. We will now use `wrassp` to calculate the formant values,
their corresponding bandwidths, the fundamental frequency contour and
the RMS energy contour of the audio file `wavFiles[1]`.
## Formants and their bandwidths
```{r}
# calculate formants and corresponding bandwidth values
fmBwVals = forest(wavFiles[1], toFile=F)
# due to toFile=F this returns an object of the type AsspDataObj and
# prevents the result being saved to disc as an SSFF file
class(fmBwVals)
# extract track names
# this time the object contains muliple tracks (formants + their bandwidths)
tracks.AsspDataObj(fmBwVals)
# with more than one field (in this case 250 F1/F2/F3/F4 values)
dim(fmBwVals$fm)
# plot the formant values
matplot(seq(0,numRecs.AsspDataObj(fmBwVals) - 1) / rate.AsspDataObj(fmBwVals) +
attr(fmBwVals, 'startTime'),
fmBwVals$fm,
type='l',
xlab='time (s)',
ylab='Formant frequency (Hz)')
```
## Fundamental frequency contour
```{r}
# calculate the fundamental frequency contour
f0vals = ksvF0(wavFiles[1], toFile=F)
# plot the fundamental frequency contour
plot(seq(0,numRecs.AsspDataObj(f0vals) - 1) / rate.AsspDataObj(f0vals) +
attr(f0vals, 'startTime'),
f0vals$F0,
type='l',
xlab='time (s)',
ylab='F0 frequency (Hz)')
```
## RMS energy contour
Seeing as one might want to reuse some of the computed signals at a later stage,
`wrassp` allows the user to write the result out to file by leaving the
`toFile` parameter set to `TRUE`. This also allows users to process more than one file at
once.
```{r}
# calculate the RMS-energy contour for all wavFiles
rmsana(wavFiles, outputDirectory = tempdir())
# list new files using wrasspOutputInfos$rmsana$ext (see below)
rmsFilePaths = list.files(tempdir(),
pattern = paste0('*.',wrasspOutputInfos$rmsana$ext),
full.names = T)
# read first rms file
rmsvals = read.AsspDataObj(rmsFilePaths[1])
# plot the RMS energy contour
plot(seq(0,numRecs.AsspDataObj(rmsvals) - 1) / rate.AsspDataObj(rmsvals) +
attr(rmsvals, 'startTime'),
rmsvals$rms,
type='l',
xlab='time (s)',
ylab='RMS energy (dB)')
```
# The wrasspOutputInfos object
`wrasspOutputInfos` stores meta information associated with the different signal
processing functions `wrassp` provides.
```{r}
# show all function names
names(wrasspOutputInfos)
```
This object can be useful to get additional information about a specific
`wrassp` function. It contains information about the default file extension (`$ext`),
the tracks produced (`$tracks`) and the output file type (`$outputType`) of
any given `wrassp` function.
```{r}
# show output infos of function forest
wrasspOutputInfos$forest
```
For a list of the available signal processing function provided by `wrassp` simply
open the package documentation:
```{r eval=FALSE}
# open wrassp package documentation
?wrassp
```
# Conclusion
We hope this document gives you a rough idea of how to use the `wrassp` package and what
it is capable of. For more information about the individual functions please consult the
respective R documentations (e.g. `?dftSpectrum`).
To find questions that might have already been answered or if you have an
issue or a bug to report please use our [GitHub issue tracker](https://github.com/IPS-LMU/wrassp/issues).
| /scratch/gouwar.j/cran-all/cranData/wrassp/inst/doc/wrassp_intro.Rmd |
---
title: "An introduction to the wrassp package"
author: "Lasse Bombien & Raphael Winkelmann"
affiliation: "Institute Of Phonetic And Speech Processing (LMU Munich)"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{An introduction to the wrassp package}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
# DEPRECATION WARNING
This vignette is considered deprecated! It's content has been moved to
the [the EMU-SDMS manual](https://ips-lmu.github.io/The-EMU-SDMS-Manual/) (+ expanded and updated). Specifially see
the [the R package wrassp](https://ips-lmu.github.io/The-EMU-SDMS-Manual/chap-wrassp.html) as well as
the [wrassp implementation](https://ips-lmu.github.io/The-EMU-SDMS-Manual/chap-wrassp-impl.html) chapters.
# Introduction
This document is meant as an introduction to the `wrassp`
package. `wrassp` is a **w**rapper for **R** around Michel Scheffers's [libassp](https://libassp.sourceforge.net/)
(**A**dvanced **S**peech **S**ignal **P**rocessor). The libassp library aims at
providing functionality for handling speech signal files in most
common audio formats and for performing analyses common in phonetic
science/speech science. This includes the calculation of formants,
fundamental frequency, root mean square, auto correlation, a variety
of spectral analyses, zero crossing rate, filtering etc. This wrapper
provides R with a large subset of libassp's signal processing
functions and provides them to the user in a (hopefully) user-friendly manner.
# File I/0 and the AsspDataObj
Let's get started by locating some example material distributed with
the package.
```{r}
# load the package
library(wrassp)
# get the path to the data that comes with the package
wavPath = system.file('extdata', package='wrassp')
# now list the .wav files so we have some audio files to play with
wavFiles = list.files(wavPath, pattern=glob2rx('*.wav'), full.names=TRUE)
```
One of the aims of `wrassp` is to provide mechanisms to handle
speech-related files such as sound files and parametric data
files. `wrassp` therefore comes with a class called
`AsspDataObj` which does just that.
```{r}
# load an audio file, e.g. the first one in the list above
au = read.AsspDataObj(wavFiles[1])
# show class
class(au)
# print object description
print(au)
```
`au` is an object of the class `AsspDataObj` and, using `print`,
we can get some information about the object, such as its sampling
rate, its duration and what kind of data are stored in what form. Since
the file we loaded is audio only, the object contains exactly one track.
And since it's a mono file, this track only has one field. We will later
encounter different types of data with more than one track and more
fields per track.
Here are some more ways of extracting attributes from the object, such as
duration, sampling rate and the number of records:
```{r}
# extract duration
dur.AsspDataObj(au)
# extract sampling rate
rate.AsspDataObj(au)
# extract number of records/samples
numRecs.AsspDataObj(au)
# extract additional attributes
attributes(au)
```
An important property of `AsspDataObj` is of course that it
contains data tracks, or at least one data track. As mentioned above,
the currently loaded object contains a single mono audio
track. Accessing the data is easy: `AsspDataObj` stores data in
simple matrices, one matrix for each track. Broadly speaking,
`AsspDataObj` is nothing but a list of at least one matrix. All
of them have the same number of rows (number of records) but each can
have a different number of columns (number of fields). Each track has
a name and we can access the track using that name.
```{r}
# extract track names
tracks.AsspDataObj(au)
# or an alternative way to extract track names
names(au)
# show head of samples
head(au$audio)
# and we can of course also plot these samples
# (only plot every 10th element to accelerate plotting)
plot(seq(0,numRecs.AsspDataObj(au) - 1, 10) / rate.AsspDataObj(au),
au$audio[c(TRUE, rep(FALSE,9))],
type='l',
xlab='time (s)',
ylab='Audio samples')
```
Now, purely to give us something unequal to the original `au` object to write
to disc, let's manipulate the audio data by simply multiplying
all the sample values by a factor of `0.5`. The resulting
`AsspDataObj` will then be saved to a temporary directory provided by `R`.
```{r}
# manipulate the audio
au$audio = au$audio * 0.5
# write file to tempdir
dir = tempdir()
writeres = write.AsspDataObj(au, file.path(dir, 'newau.wav'))
```
# Signal processing
`wrassp` is of course capable of more than just the mere reading and writing
of specific signal file formats. We will now use `wrassp` to calculate the formant values,
their corresponding bandwidths, the fundamental frequency contour and
the RMS energy contour of the audio file `wavFiles[1]`.
## Formants and their bandwidths
```{r}
# calculate formants and corresponding bandwidth values
fmBwVals = forest(wavFiles[1], toFile=F)
# due to toFile=F this returns an object of the type AsspDataObj and
# prevents the result being saved to disc as an SSFF file
class(fmBwVals)
# extract track names
# this time the object contains muliple tracks (formants + their bandwidths)
tracks.AsspDataObj(fmBwVals)
# with more than one field (in this case 250 F1/F2/F3/F4 values)
dim(fmBwVals$fm)
# plot the formant values
matplot(seq(0,numRecs.AsspDataObj(fmBwVals) - 1) / rate.AsspDataObj(fmBwVals) +
attr(fmBwVals, 'startTime'),
fmBwVals$fm,
type='l',
xlab='time (s)',
ylab='Formant frequency (Hz)')
```
## Fundamental frequency contour
```{r}
# calculate the fundamental frequency contour
f0vals = ksvF0(wavFiles[1], toFile=F)
# plot the fundamental frequency contour
plot(seq(0,numRecs.AsspDataObj(f0vals) - 1) / rate.AsspDataObj(f0vals) +
attr(f0vals, 'startTime'),
f0vals$F0,
type='l',
xlab='time (s)',
ylab='F0 frequency (Hz)')
```
## RMS energy contour
Seeing as one might want to reuse some of the computed signals at a later stage,
`wrassp` allows the user to write the result out to file by leaving the
`toFile` parameter set to `TRUE`. This also allows users to process more than one file at
once.
```{r}
# calculate the RMS-energy contour for all wavFiles
rmsana(wavFiles, outputDirectory = tempdir())
# list new files using wrasspOutputInfos$rmsana$ext (see below)
rmsFilePaths = list.files(tempdir(),
pattern = paste0('*.',wrasspOutputInfos$rmsana$ext),
full.names = T)
# read first rms file
rmsvals = read.AsspDataObj(rmsFilePaths[1])
# plot the RMS energy contour
plot(seq(0,numRecs.AsspDataObj(rmsvals) - 1) / rate.AsspDataObj(rmsvals) +
attr(rmsvals, 'startTime'),
rmsvals$rms,
type='l',
xlab='time (s)',
ylab='RMS energy (dB)')
```
# The wrasspOutputInfos object
`wrasspOutputInfos` stores meta information associated with the different signal
processing functions `wrassp` provides.
```{r}
# show all function names
names(wrasspOutputInfos)
```
This object can be useful to get additional information about a specific
`wrassp` function. It contains information about the default file extension (`$ext`),
the tracks produced (`$tracks`) and the output file type (`$outputType`) of
any given `wrassp` function.
```{r}
# show output infos of function forest
wrasspOutputInfos$forest
```
For a list of the available signal processing function provided by `wrassp` simply
open the package documentation:
```{r eval=FALSE}
# open wrassp package documentation
?wrassp
```
# Conclusion
We hope this document gives you a rough idea of how to use the `wrassp` package and what
it is capable of. For more information about the individual functions please consult the
respective R documentations (e.g. `?dftSpectrum`).
To find questions that might have already been answered or if you have an
issue or a bug to report please use our [GitHub issue tracker](https://github.com/IPS-LMU/wrassp/issues).
| /scratch/gouwar.j/cran-all/cranData/wrassp/vignettes/wrassp_intro.Rmd |
#################################################
# write.snns.R (v0.0-4.2) 2007/04/03 #
# Authors: #
# Manuel Castejon Limas. #
# mail: [email protected] #
# Joaquin Ordieres Mere. #
# mail: [email protected] #
# Francisco Javier de Cos Juez #
# mail: [email protected] #
# Francisco Javier Martinez de Pison #
# mail: [email protected] #
#################################################
# This function creates a SNNS pattern file
# from a data.frame or matrix.
write.snns <- function(x,file="", noutputs=1)
{
file.create(file)
cat(" SNNS pattern definition file v3.2\n", file=file, append=TRUE)
cat(paste(" generated at ",date(),"\n\n\n\n\n\n\n"), file=file, append=TRUE)
cat(paste(" No. of patterns :",nrow(x),"\n"), file=file, append=TRUE)
cat(paste(" No. of input units :",ncol(x)-noutputs,"\n"), file=file, append=TRUE)
cat(paste(" No. of output units :",noutputs,"\n\n"), file=file, append=TRUE)
for (i in 1:nrow(x))
{
cat(paste("\n#",i,"\n"), file=file, append=TRUE)
numcol <- ncol(x)
while( numcol >10)
{
line <- as.character(x[i,1])
for(j in 2:10)
line <- paste(line,x[i,j])
cat(line, file=file, append=TRUE)
cat("\n", file=file, append=TRUE)
numcol <- numcol - 10
}
line <- as.character(x[i,1])
for(j in 2:numcol)
line <- paste(line,x[i,j])
cat(line, file=file, append=TRUE)
cat("\n", file=file, append=TRUE)
}
}
| /scratch/gouwar.j/cran-all/cranData/write.snns/R/write.snns.R |
#' Excel Types
#'
#' Create special column types to write to a spreadsheet
#'
#' @family writexl
#' @param x character vector to be interpreted as formula
#' @export
#' @rdname xl_formula
#' @examples
#' df <- data.frame(
#' name = c("UCLA", "Berkeley", "Jeroen"),
#' founded = c(1919, 1868, 2030),
#' website = xl_hyperlink(c("http://www.ucla.edu", "http://www.berkeley.edu", NA), "homepage")
#' )
#' df$age <- xl_formula('=(YEAR(TODAY()) - INDIRECT("B" & ROW()))')
#' write_xlsx(df, 'universities.xlsx')
#'
#' # cleanup
#' unlink('universities.xlsx')
xl_formula <- function(x){
if(is.factor(x))
x <- as.character(x)
stopifnot(is.character(x))
if(!all(grepl("^=",x) | is.na(x)))
stop("Formulas must start with '='")
structure(x, class = c('xl_formula', 'xl_object'))
}
#' @rdname xl_formula
#' @export
#' @param url character vector of URLs
#' @param name character vector of friendly names
xl_hyperlink <- function(url, name = NULL){
if(is.factor(url))
url <- as.character(url)
stopifnot(is.character(url))
hyperlink <- dubquote(url)
if(length(name)){
hyperlink <- paste(hyperlink, dubquote(name), sep = ",")
}
out <- xl_formula(sprintf("=HYPERLINK(%s)", hyperlink))
out[is.na(url)] <- NA
structure(out, class = c('xl_hyperlink', 'xl_formula', 'xl_object'))
}
#' @export
print.xl_formula <- function(x, max = 10, ...){
cat(sprintf(" [:%s:]\n", class(x)[1]))
if(length(x) > max)
x <- c(x[1:max], "...", sprintf("(total: %s)", length(x)))
cat(x, sep = "\n")
}
#' @export
rep.xl_object <- function(x, ...){
structure(rep(unclass(x), ...), class = class(x))
}
#' @export
`[.xl_object` <- function(x, ...){
structure(`[`(unclass(x), ...), class = class(x))
}
#' @export
`[[.xl_object` <- function(x, ...){
structure(`[[`(unclass(x), ...), class = class(x))
}
#' @export
c.xl_object <- function(x, ...){
structure(c(unclass(x), ...), class = class(x))
}
#' @export
as.data.frame.xl_object <- function(x, ..., stringsAsFactors = FALSE){
as.data.frame.character(x, ..., stringsAsFactors = FALSE)
}
dubquote <- function(x){
paste0('"', x, '"')
}
| /scratch/gouwar.j/cran-all/cranData/writexl/R/excel_types.R |
#' Version
#'
#' Shows version of bundled libxlsxwriter.
#'
#' @export
#' @rdname writexl
#' @useDynLib writexl C_lxw_version
lxw_version <- function(){
version <- .Call(C_lxw_version)
as.numeric_version(version)
}
#' @useDynLib writexl C_set_tempdir
.onLoad <- function(lib, pkg){
.Call(C_set_tempdir, tempdir())
}
| /scratch/gouwar.j/cran-all/cranData/writexl/R/version.R |
#' Export to xlsx
#'
#' Writes a data frame to an xlsx file. To create an xlsx with (multiple) named
#' sheets, simply set \code{x} to a named list of data frames.
#'
#' Currently supports strings, numbers, booleans and dates. Formatting options
#' may be added in future versions.
#'
#' \if{html}{
#' \out{
#' <link rel="stylesheet" type="text/css" href="https://jeroen.github.io/clippy/clippy.min.css" media="all">
#' <script src="https://jeroen.github.io/clippy/bundle.js"></script>
#' }}
#'
#' @export
#' @aliases writexl
#' @useDynLib writexl C_write_data_frame_list
#' @param x data frame or named list of data frames that will be sheets in the xlsx
#' @param path a file name to write to
#' @param col_names write column names at the top of the file?
#' @param format_headers make the \code{col_names} in the xlsx centered and bold
#' @param use_zip64 use \href{https://en.wikipedia.org/wiki/Zip_(file_format)#ZIP64}{zip64}
#' to enable support for 4GB+ xlsx files. Not all platforms can read this.
#' @examples # Roundtrip example with single excel sheet named 'mysheet'
#' tmp <- write_xlsx(list(mysheet = iris))
#' readxl::read_xlsx(tmp)
write_xlsx <- function(x, path = tempfile(fileext = ".xlsx"), col_names = TRUE,
format_headers = TRUE, use_zip64 = FALSE){
if(is.data.frame(x))
x <- list(x)
if(!is.list(x) || !all(vapply(x, is.data.frame, logical(1))))
stop("Argument x must be a data frame or list of data frames")
x <- lapply(x, normalize_df)
if(any(nchar(names(x)) > 31)){
warning("Truncating sheet name(s) to 31 characters")
names(x) <- substring(names(x), 1, 29)
}
nm <- names(x)
if(length(unique(nm)) < length(nm)){
warning("Deduplicating sheet names")
names(x) <- make.unique(substring(names(x), 1, 28), sep = "_")
}
stopifnot(is.character(path) && length(path))
path <- normalizePath(path, mustWork = FALSE)
ret <- .Call(C_write_data_frame_list, x, path, col_names, format_headers, use_zip64)
invisible(ret)
}
normalize_df <- function(df){
if(nrow(df) > 1024^2){
stop("the xlsx format does not support tables with 1M+ rows")
}
# Types to coerce to strings
for(i in which(vapply(df, inherits, logical(1), c("factor", "hms")))){
df[[i]] <- as.character(df[[i]])
}
for(i in which(vapply(df, function(x){is.integer(x) && inherits(x, "POSIXct")}, logical(1)))){
df[[i]] <- as.POSIXct(as.double(df[[i]]))
}
for(i in which(vapply(df, inherits, logical(1), "POSIXlt"))){
df[[i]] <- as.POSIXct(df[[i]])
}
for(i in which(vapply(df, inherits, logical(1), "integer64"))){
warning(sprintf("Coercing columnn %s from int64 to double", names(df)[i]), call. = FALSE)
df[[i]] <- bit64::as.double.integer64(df[[i]])
}
df
}
| /scratch/gouwar.j/cran-all/cranData/writexl/R/write_xlsx.R |
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
#' @rdname sample_int
#' @export
sample_int_crank <- function(n, size, prob) {
.Call(`_wrswoR_sample_int_crank`, n, size, prob)
}
#' @rdname sample_int
#' @export
sample_int_ccrank <- function(n, size, prob) {
.Call(`_wrswoR_sample_int_ccrank`, n, size, prob)
}
#' @rdname sample_int
#' @export
sample_int_expj <- function(n, size, prob) {
.Call(`_wrswoR_sample_int_expj`, n, size, prob)
}
#' @rdname sample_int
#' @export
sample_int_expjs <- function(n, size, prob) {
.Call(`_wrswoR_sample_int_expjs`, n, size, prob)
}
| /scratch/gouwar.j/cran-all/cranData/wrswoR/R/RcppExports.R |
#' Weighted sampling without replacement
#'
#' These functions implement weighted sampling without replacement using various
#' algorithms, i.e., they take a sample of the specified
#' `size` from the elements of `1:n` without replacement, using the
#' weights defined by `prob`. The call
#' `sample_int_*(n, size, prob)` is equivalent
#' to `sample.int(n, size, replace = F, prob)`. (The results will
#' most probably be different for the same random seed, but the
#' returned samples are distributed identically for both calls.)
#' Except for `sample_int_R()` (which
#' has quadratic complexity as of this writing), all functions have complexity
#' \eqn{O(n \log n)}{O(n log n)} or better and
#' often run faster than R's implementation, especially when `n` and
#' `size` are large.
#'
#' @details
#' `sample_int_R()` is a simple wrapper for [base::sample.int()].
#'
#' @inheritParams base::sample.int
#' @return An integer vector of length `size` with elements from
#' `1:n`.
#' @seealso [base::sample.int()]
#' @references <https://stackoverflow.com/q/15113650/946850>
#' @name sample_int
#' @export
#' @examples
#' # Base R implementation
#' s <- sample_int_R(2000, 1000, runif(2000))
#' stopifnot(unique(s) == s)
#' p <- c(995, rep(1, 5))
#' n <- 1000
#' set.seed(42)
#' tbl <- table(replicate(sample_int_R(6, 3, p),
#' n = n)) / n
#' stopifnot(abs(tbl - c(1, rep(0.4, 5))) < 0.04)
#'
sample_int_R <- function(n, size, prob) {
sample.int(n, size, replace = FALSE, prob)
}
| /scratch/gouwar.j/cran-all/cranData/wrswoR/R/sample_int_R.R |
#' @name sample_int_ccrank
#' @rdname sample_int
#' @importFrom Rcpp evalCpp
#' @examples
#' ## Algorithm A, Rcpp version using std::vector
#' s <- sample_int_ccrank(20000, 10000, runif(20000))
#' stopifnot(unique(s) == s)
#' p <- c(995, rep(1, 5))
#' n <- 1000
#' set.seed(42)
#' tbl <- table(replicate(sample_int_ccrank(6, 3, p),
#' n = n)) / n
#' stopifnot(abs(tbl - c(1, rep(0.4, 5))) < 0.04)
#'
NULL
| /scratch/gouwar.j/cran-all/cranData/wrswoR/R/sample_int_ccrank.R |
#' @name sample_int_crank
#' @rdname sample_int
#' @importFrom Rcpp evalCpp
#' @examples
#' ## Algorithm A, Rcpp version using R vectors
#' s <- sample_int_crank(20000, 10000, runif(20000))
#' stopifnot(unique(s) == s)
#' p <- c(995, rep(1, 5))
#' n <- 1000
#' set.seed(42)
#' tbl <- table(replicate(sample_int_crank(6, 3, p),
#' n = n)) / n
#' stopifnot(abs(tbl - c(1, rep(0.4, 5))) < 0.04)
#'
NULL
| /scratch/gouwar.j/cran-all/cranData/wrswoR/R/sample_int_crank.R |
#' @name sample_int_expj
#' @rdname sample_int
#' @details `sample_int_expj()` and `sample_int_expjs()`
#' implement one-pass random sampling with a reservoir with exponential jumps
#' (Efraimidis and Spirakis, 2006, Algorithm A-ExpJ). Both functions are
#' implemented in `Rcpp`; `*_expj()` uses log-transformed keys,
#' `*_expjs()` implements the algorithm in the paper verbatim
#' (at the cost of numerical stability).
#' @examples
#' ## Algorithm A-ExpJ (with log-transformed keys)
#' s <- sample_int_expj(20000, 10000, runif(20000))
#' stopifnot(unique(s) == s)
#' p <- c(995, rep(1, 5))
#' n <- 1000
#' set.seed(42)
#' tbl <- table(replicate(sample_int_expj(6, 3, p),
#' n = n)) / n
#' stopifnot(abs(tbl - c(1, rep(0.4, 5))) < 0.04)
#'
NULL
| /scratch/gouwar.j/cran-all/cranData/wrswoR/R/sample_int_expj.R |
#' @name sample_int_expjs
#' @rdname sample_int
#' @examples
#' ## Algorithm A-ExpJ (paper version)
#' s <- sample_int_expjs(20000, 10000, runif(20000))
#' stopifnot(unique(s) == s)
#' p <- c(995, rep(1, 5))
#' n <- 1000
#' set.seed(42)
#' tbl <- table(replicate(sample_int_expjs(6, 3, p),
#' n = n)) / n
#' stopifnot(abs(tbl - c(1, rep(0.4, 5))) < 0.04)
#'
NULL
| /scratch/gouwar.j/cran-all/cranData/wrswoR/R/sample_int_expjs.R |
#' @rdname sample_int
#' @details `sample_int_rank()`, `sample_int_crank()` and
#' `sample_int_ccrank()` implement one-pass random sampling
#' (Efraimidis and Spirakis, 2006, Algorithm A). The first function is
#' implemented purely in R, the other two are optimized `Rcpp`
#' implementations (`*_crank()` uses R vectors internally, while
#' `*_ccrank()` uses `std::vector`; surprisingly, `*_crank()` seems
#' to be faster on most inputs). It can be
#' shown that the order statistic of \eqn{U^{(1/w_i)}} has the same
#' distribution as random sampling without replacement (\eqn{U=\mbox{uniform}(0,1)}{U=uniform(0,1)}
#' distribution). To increase numerical stability, \eqn{\log(U) /
#' w_i}{log(U) / w_i} is computed instead; the log transform does not
#' change the order statistic.
#' @author Dinre (for `*_rank()`), Kirill Müller
#' (for all other functions)
#' @references Efraimidis, Pavlos S., and Paul G. Spirakis. "Weighted
#' random sampling with a reservoir." *Information Processing
#' Letters* 97, no. 5 (2006): 181-185.
#' @export
#' @examples
#' ## Algorithm A
#' s <- sample_int_rank(20000, 10000, runif(20000))
#' stopifnot(unique(s) == s)
#' p <- c(995, rep(1, 5))
#' n <- 1000
#' set.seed(42)
#' tbl <- table(replicate(sample_int_rank(6, 3, p),
#' n = n)) / n
#' stopifnot(abs(tbl - c(1, rep(0.4, 5))) < 0.04)
#'
#' @importFrom stats rexp
#' @importFrom utils head
sample_int_rank <- function(n, size, prob) {
.check_args(n, size, prob)
head(order(rexp(n) / prob), size)
}
.check_args <- function(n, size, prob) {
if (n < size) {
stop("cannot take a sample larger than the population", call. = FALSE)
}
if (length(prob) != n) {
stop("incorrect number of probabilities", call. = FALSE)
}
}
| /scratch/gouwar.j/cran-all/cranData/wrswoR/R/sample_int_rank.R |
#' @rdname sample_int
#'
#' @details `sample_int_rej()` uses repeated weighted sampling with
#' replacement and a variant of rejection sampling. It is implemented purely
#' in R.
#' This function simulates weighted sampling without replacement using
#' somewhat more draws *with* replacement, and then discarding
#' duplicate values (rejection sampling). If too few items are
#' sampled, the routine calls itself recursively on a (hopefully) much
#' smaller problem. See also
#' <http://stats.stackexchange.com/q/20590/6432>.
#' @export
#' @examples
#' ## Rejection sampling
#' s <- sample_int_rej(20000, 10000, runif(20000))
#' stopifnot(unique(s) == s)
#' p <- c(995, rep(1, 5))
#' n <- 1000
#' set.seed(42)
#' tbl <- table(replicate(sample_int_rej(6, 3, p),
#' n = n)) / n
#' stopifnot(abs(tbl - c(1, rep(0.4, 5))) < 0.04)
#'
sample_int_rej <- function(n, size, prob) {
.check_args(n, size, prob)
.sample_int_rej(n, size, prob, 2, 1)
}
# Euler-Mascheroni constant
.EM = 0.57721566490153286060651209008240243104215933593992
# Computes the harmonic series. Exact for the first
# .harmonic.series.max values (through table lookup), otherwise using
# the approximation ln(a) + \gamma + 1 / (2a). Source:
# http://en.wikipedia.org/wiki/Harmonic_number
.harmonic <- function(a) {
stopifnot(a >= 0)
if (a < length(.harmonic.series)) {
.harmonic.series[a + 1]
} else {
log(a) + .EM + .5 / a
}
}
#' @importFrom logging logdebug
# Workhorse
.sample_int_rej <- function(
n, size, prob, MAX_OVERSHOOT, BIAS) {
logdebug('.sample_int_rej: parameters: %s, %s, %s', n, size, length(prob))
# How many draws *with replacement* are required on average, assuming
# *uniform* weights? (With non-uniform weights, this number can only
# increase.) The result is a general case of the coupon collector
# problem, see http://math.stackexchange.com/q/247569/16420 for an
# analysis. BIAS can be supplied to correct the estimate by a factor,
# at most n * MAX_OVERSHOOT samples will be drawn. Both are tuning
# parameters, ideal values are still to be found through simulation.
wr.size <- ceiling(n * min(BIAS * (.harmonic(n) - .harmonic(n - size)),
MAX_OVERSHOOT))
logdebug('.sample_int_rej: wr.size=%s', wr.size)
# Do the sampling with replacement...
wr.sample <- sample.int(n, size=wr.size, replace=T, prob)
# ...but keep only unique values.
wr.sample <- unique(wr.sample)
wr.sample.len <- length(wr.sample)
logdebug('.sample_int_rej: wr.sample.len=%s', wr.sample.len)
# How much still left to do?
rem.size <- size - wr.sample.len
# Done? Great!
if (rem.size <= 0)
return (head(wr.sample, size))
# Not yet: Find out which indexes haven't been sampled yet. Recall
# that negative indexes in a vector subscription mean "all but
# the selected".
rem.indexes <- (1:n)[-wr.sample]
rem.n <- length(rem.indexes)
stopifnot(rem.n == n - wr.sample.len)
# Recursive call to sample without replacement from the remaining
# weights
rem.sample <- .sample_int_rej(rem.n, rem.size,
prob[rem.indexes],
MAX_OVERSHOOT, BIAS)
# Combine the results, substitute the indexes from 1:rem.n obtained
# from the recursive call using the rem.indexes map
c(wr.sample, rem.indexes[rem.sample])
}
| /scratch/gouwar.j/cran-all/cranData/wrswoR/R/sample_int_rej.R |
#' @useDynLib wrswoR, .registration = TRUE
#' @title Faster weighted sampling without replacement
#' @description \R's default sampling without replacement using
#' [base::sample.int()] seems to require quadratic run time,
#' e.g., when using weights drawn from a uniform distribution. For large
#' sample sizes, this is too slow. This package contains several
#' alternative implementations.
#' @details Implementations are adapted from
#' <https://stackoverflow.com/q/15113650/946850>.
#'
#' @name wrswoR-package
#' @aliases wrswoR-package wrswoR
#' @docType package
#' @author Kirill Müller
#' @references Efraimidis, Pavlos S., and Paul G. Spirakis. "Weighted
#' random sampling with a reservoir." *Information Processing Letters* 97,
#' no. 5 (2006): 181-185.
#'
#' Wong, Chak-Kuen, and Malcolm C. Easton. "An efficient method for
#' weighted sampling without replacement." *SIAM Journal on Computing* 9,
#' no. 1 (1980): 111-113.
#'
#'
#' @keywords package
#' @examples
#' sample_int_rej(100, 50, 1:100)
NULL
| /scratch/gouwar.j/cran-all/cranData/wrswoR/R/wrswoR-package.R |
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
#' Collapsed Gibbs sampler for hWRU. Internal function
#'
#' @param last_name Integer vector of last name identifiers for each record (zero indexed; as all that follow). Must match columns numbers in M_rs.
#' @param first_name See last_name
#' @param mid_name See last_name
#' @param geo Integer vector of geographic units for each record. Must match column number in N_rg
#' @param N_rg Integer matrix of race | geography counts in census (geograpgies in columns).
#' @param pi_s Numeric matrix of race | surname prior probabilities.
#' @param pi_f Same as `pi_s`, but for first names.
#' @param pi_m Same as `pi_s`, but for middle names.
#' @param pi_nr Matrix of marginal probability distribution over missing names; non-keyword names default to this distribution.
#' @param which_names Integer; 0=surname only. 1=surname + first name. 2= surname, first, and middle names.
#' @param samples Integer number of samples to take after (in total)
#' @param burnin Integer number of samples to discard as burn-in of Markov chain
#' @param race_init Integer vector of initial race assignments
#' @param verbose Boolean; should informative messages be printed?
#'
#' @keywords internal
sample_me <- function(last_name, first_name, mid_name, geo, N_rg, pi_s, pi_f, pi_m, pi_nr, which_names, samples, burnin, race_init, verbose) {
.Call(`_wru_sample_me`, last_name, first_name, mid_name, geo, N_rg, pi_s, pi_f, pi_m, pi_nr, which_names, samples, burnin, race_init, verbose)
}
| /scratch/gouwar.j/cran-all/cranData/wru/R/RcppExports.R |
#' Preflight census data
#'
#' @inheritParams predict_race
#' @keywords internal
census_data_preflight <- function(census.data, census.geo, year) {
vars_ <- unlist(census_geo_api_names(year = year))
legacy_vars <- unlist(census_geo_api_names_legacy(year = year))
test <- lapply(census.data, function(x) {
nms_to_test <- names(x[[census.geo]])
all(vars_ %in% nms_to_test) || all(legacy_vars %in% nms_to_test)
})
missings <- names(test)[!unlist(test)]
if(any(!unlist(test))) {
stop(
paste0(
"Missing ",
paste0(vars_, collapse = ", "),
" from census.data object. Please update your census.data by",
" running `get_census_data` again."
)
)
}
} | /scratch/gouwar.j/cran-all/cranData/wru/R/census_data_preflight.R |
#' Census Data download function.
#'
#' \code{census_geo_api} retrieves U.S. Census geographic data for a given state.
#'
#' This function allows users to download U.S. Census geographic data (2010 or 2020),
#' at either the county, tract, block, or place level, for a particular state.
#'
#' @inheritParams get_census_data
#' @param state A required character object specifying which state to extract Census data for,
#' e.g., \code{"NJ"}.
#' @param geo A character object specifying what aggregation level to use.
#' Use `"block"`, `"block_group"`, `"county"`, `"place"`, `"tract"`, or `"zcta"`.
#' Default is \code{"tract"}. Warning: extracting block-level data takes very long.
#' @param age A \code{TRUE}/\code{FALSE} object indicating whether to condition on
#' age or not. If \code{FALSE} (default), function will return Pr(Geolocation | Race).
#' If \code{TRUE}, function will return Pr(Geolocation, Age | Race).
#' If \code{\var{sex}} is also \code{TRUE}, function will return Pr(Geolocation, Age, Sex | Race).
#' @param sex A \code{TRUE}/\code{FALSE} object indicating whether to condition on
#' sex or not. If \code{FALSE} (default), function will return Pr(Geolocation | Race).
#' If \code{TRUE}, function will return Pr(Geolocation, Sex | Race).
#' If \code{\var{age}} is also \code{TRUE}, function will return Pr(Geolocation, Age, Sex | Race).
#' @param year A character object specifying the year of U.S. Census data to be downloaded.
#' Use \code{"2010"}, or \code{"2020"}. Default is \code{"2020"}.
#' Warning: 2020 U.S. Census data is downloaded only when \code{\var{age}} and
#' \code{\var{sex}} are both \code{FALSE}.
#' @param retry The number of retries at the census website if network interruption occurs.
#' @param save_temp File indicating where to save the temporary outputs.
#' Defaults to NULL. If specified, the function will look for an .RData file
#' with the same format as the expected output.
#' @param counties A vector of counties contained in your data. If \code{NULL}, all counties are pulled.
#' Useful for smaller predictions where only a few counties are considered. Must be zero padded.
#' @return Output will be an object of class \code{list}, indexed by state names. It will
#' consist of the original user-input data with additional columns of Census geographic data.
#'
#' @examples
#' \dontshow{data(voters)}
#' \dontrun{census_geo_api(states = c("NJ", "DE"), geo = "block")}
#' \dontrun{census_geo_api(states = "FL", geo = "tract", age = TRUE, sex = TRUE)}
#' \dontrun{census_geo_api(states = "MA", geo = "place", age = FALSE, sex = FALSE,
#' year = "2020")}
#'
#' @references
#' Relies on `get_census_api()`, `get_census_api_2()`, and `vec_to_chunk()` functions authored by Nicholas Nagle,
#' available [here](https://rstudio-pubs-static.s3.amazonaws.com/19337_2e7f827190514c569ea136db788ce850.html).
#'
#' @importFrom furrr future_map_dfr
#' @importFrom purrr map_dfr
#' @keywords internal
census_geo_api <- function(
key = Sys.getenv("CENSUS_API_KEY"),
state,
geo = c("tract", "block", "block_group", "county", "place", "zcta"),
age = FALSE,
sex = FALSE,
year = c("2020", "2010"),
retry = 3,
save_temp = NULL,
counties = NULL
) {
key <- validate_key(key)
geo <- tolower(geo)
geo <- rlang::arg_match(geo)
year <- as.character(year)
year <- rlang::arg_match(year)
census <- NULL
state <- as_state_abbreviation(state)
df.out <- NULL
state.fips <- as_fips_code(state)
vars <- census_geo_api_names(year = year, age = age, sex = sex)
census_data_url <- census_geo_api_url(year = year)
if (geo == "place") {
region <- paste("for=place:*&in=state:", state.fips, sep = "")
census <- get_census_api(census_data_url, key = key, var.names = unlist(vars), region = region, retry)
}
if (geo == "county") {
if (is.null(counties)) {
region <- paste("for=county:*&in=state:", state.fips, sep = "")
} else {
counties_paste <- paste0(counties, collapse = ",")
region <- paste("for=county:",counties_paste,"&in=state:", state.fips, sep = "")
}
census <- get_census_api(census_data_url, key = key, var.names = unlist(vars), region = region, retry)
}
if (geo == "tract") {
if (is.null(counties)) {
region_county <- paste("for=county:*&in=state:", state.fips, sep = "")
} else {
counties_paste <- paste0(counties, collapse = ",")
region_county <- paste("for=county:",counties_paste,"&in=state:", state.fips, sep = "")
}
county_df <- get_census_api(census_data_url, key = key, var.names = unlist(vars), region = region_county, retry)
if(is.null(counties)) {
county_list <- county_df$county
} else {
county_list <- intersect(counties, county_df$county)
}
if(length(county_list) > 0) {
census <- furrr::future_map_dfr(seq_along(county_list), function(county) {
message(paste("County ", county, " of ", length(county_list), ": ", county_list[county], sep = ""))
region_county <- paste("for=tract:*&in=state:", state.fips, "+county:", county_list[county], sep = "")
get_census_api(data_url = census_data_url, key = key, var.names = unlist(vars), region = region_county, retry)
}, .progress = TRUE)
} else {
message('There were no intersecting counties in your voter.file data (tract)')
}
}
if (geo == "block_group") {
if (is.null(counties)) {
region_county <- paste("for=county:*&in=state:", state.fips, sep = "")
} else {
counties_paste <- paste0(counties, collapse = ",")
region_county <- paste("for=county:",counties_paste,"&in=state:", state.fips, sep = "")
}
county_df <- get_census_api(census_data_url, key = key, var.names = unlist(vars), region = region_county, retry)
if(is.null(counties)) {
county_list <- county_df$county
} else {
county_list <- intersect(counties, county_df$county)
}
if(length(county_list) > 0) {
message('Running block_group by county...')
census <- purrr::map_dfr(
1:length(county_list),
function(county) {
# too verbose, commenting out
message(paste("County ", county, " of ", length(county_list), ": ", county_list[county], sep = ""))
blockgroup <- paste("for=block+group:*&in=state:", state.fips, "+county:", county_list[county], sep = "")
# message(region_tract)
blockgroup_df <- get_census_api(census_data_url, key = key, var.names = unlist(vars), region = blockgroup, retry)
names(blockgroup_df)[4] <- "block_group" # Fix name, it comes in with a space from api.
blockgroup_df
}
)
message("\n") # new line for progress bar
} else {
message('There were no intersecting counties in your voter.file data (block)')
}
}
if (geo == "block") {
if (is.null(counties)) {
region_county <- paste("for=county:*&in=state:", state.fips, sep = "")
} else {
counties_paste <- paste0(counties, collapse = ",")
region_county <- paste("for=county:",counties_paste,"&in=state:", state.fips, sep = "")
}
county_df <- get_census_api(census_data_url, key = key, var.names = unlist(vars), region = region_county, retry)
if(is.null(counties)) {
county_list <- county_df$county
} else {
county_list <- intersect(counties, county_df$county)
}
if(length(county_list) > 0) {
message('Running block by county...')
census <- purrr::map_dfr(
1:length(county_list),
function(county) {
# too verbose, commenting out
message(paste("County ", county, " of ", length(county_list), ": ", county_list[county], sep = ""))
region_tract <- paste("for=tract:*&in=state:", state.fips, "+county:", county_list[county], sep = "")
# message(region_tract)
tract_df <- get_census_api(census_data_url, key = key, var.names = unlist(vars), region = region_tract, retry)
tract_list <- tract_df$tract
furrr::future_map_dfr(1:length(tract_list), function(tract) {
message(paste("Tract ", tract, " of ", length(tract_list), ": ", tract_list[tract], sep = ""))
region_block <- paste("for=block:*&in=state:", state.fips, "+county:", county_list[county], "+tract:", tract_list[tract], sep = "")
get_census_api(census_data_url, key = key, var.names = unlist(vars), region = region_block, retry)
}, .progress = TRUE)
}
)
message("\n") # new line for progress bar
} else {
message('There were no intersecting counties in your voter.file data (block)')
}
}
if (geo == "zcta") {
census <- census_geo_api_zcta(
census_data_url = census_data_url,
key = key,
vars = vars,
state = state,
counties = counties,
retry = retry
)
}
census <- dplyr::mutate(census, state = as_state_abbreviation(state))
r_columns <- purrr::map(vars, function(vars) rowSums(census[vars]))
census <- dplyr::bind_cols(census, r_columns)
census <- dplyr::group_by(census, dplyr::across(dplyr::any_of("state")))
census <- dplyr::mutate(
census,
dplyr::across(
# Divide all r_columns by the total population of the corresponding race
dplyr::all_of(names(r_columns)),
function(x) {
x / sum(
dplyr::pick(
sub("^.+_(.{3})$", "r_\\1", dplyr::cur_column(), perl = TRUE)
)
)
}
)
)
census <- dplyr::ungroup(census)
census
}
| /scratch/gouwar.j/cran-all/cranData/wru/R/census_geo_api.R |
# @staticimports pkg:stringstatic
# str_pad
#' Census geo API helper functions
#'
#' @inheritParams census_geo_api
#'
#' @return
#' \describe{
#' \item{`census_geo_api_names()`}{
#' A named list of [character] vectors whose values correspond to columns
#' of a Census API table and whose names represent the new columns they are
#' used to calculate in [census_geo_api()].
#' }
#' \item{`census_geo_api_url()`}{
#' A [character] string containing the base of the URL to a
#' Census API table.
#' }
#' }
#' @keywords internal
census_geo_api_names <- function(
year = c("2020", "2010", "2000"),
age = FALSE,
sex = FALSE
) {
year <- as.character(year)
year <- rlang::arg_match(year)
assert_boolean(age)
assert_boolean(sex)
if (year == "2020") {
prefix <- "P12"
separator <- "_"
suffix <- "N"
} else if (year %in% c("2010", "2000")) {
prefix <- "P012"
separator <- ""
suffix <- ""
}
race_codes <- list(
"_whi" = "I",
"_bla" = "B",
"_his" = "H",
"_asi" = c("D", "E"),
"_oth" = c("C", "F", "G")
)
sex_codes <- c("_mal" = 2, "_fem" = 26)
age_codes <- 1:23
names(age_codes) <- paste0("_", age_codes)
numeric_codes <- if (age && sex) {
age_sex_codes <- unlist(
purrr::map(sex_codes, function(x) x + age_codes)
)
names(age_sex_codes) <- sub(".", "", names(age_sex_codes), fixed = TRUE)
age_sex_codes[] <- str_pad(age_sex_codes, 3, "left", pad = "0")
as.list(age_sex_codes)
} else if (age) {
purrr::map(
age_codes,
function(x) str_pad(x + sex_codes, 3, "left", pad = "0")
)
} else if (sex) {
sex_codes[] <- str_pad(sex_codes, 3, "left", pad = "0")
as.list(sex_codes)
}
numeric_codes <- c("001", numeric_codes)
combinations <- expand.grid(
prefix = prefix,
race_codes = race_codes,
separator = separator,
numeric_codes = numeric_codes,
suffix = suffix,
KEEP.OUT.ATTRS = FALSE,
stringsAsFactors = FALSE
)
vars <- purrr::pmap(
combinations,
function(prefix, race_codes, separator, numeric_codes, suffix) {
inner_combinations <- expand.grid(
prefix = prefix,
race_codes = race_codes,
separator = separator,
numeric_codes = numeric_codes,
suffix = suffix,
KEEP.OUT.ATTRS = FALSE,
stringsAsFactors = FALSE
)
apply(inner_combinations, 1, paste, collapse = "")
}
)
names(vars) <- paste0(
"r",
names(combinations$numeric_codes),
names(combinations$race_codes)
)
vars
}
census_geo_api_names_legacy <- function(year) {
if (year == 2020) {
return(
list(
r_whi = 'P2_005N',
r_bla = 'P2_006N',
r_his = 'P2_002N',
r_asi = c('P2_008N', 'P2_009N'),
r_oth = c('P2_007N', 'P2_010N', 'P2_011N')
)
)
}
list(
r_whi = 'P005003',
r_bla = 'P005004',
r_his = 'P005010',
r_asi = c('P005006', 'P005007'),
r_oth = c('P005005', 'P005008', 'P005009')
)
}
#' @rdname census_geo_api_names
census_geo_api_url <- function(year = c("2020", "2010", "2000")) {
year <- as.character(year)
year <- rlang::arg_match(year)
if (year == "2020") return("https://api.census.gov/data/2020/dec/dhc?")
paste0("https://api.census.gov/data/", year, "/dec/sf1?")
} | /scratch/gouwar.j/cran-all/cranData/wru/R/census_geo_api_names.R |
census_geo_api_zcta <- function(
census_data_url,
key,
vars,
state,
counties,
retry
) {
if (!is.null(counties)) {
cli::cli_abort(
'{.arg counties} must be {.code NULL} when {.code geo = "zcta"},
because ZCTA-level census data split by county is not available.'
)
}
region <- paste0(
"for=zip%20code%20tabulation%20area%20(or%20part):*&in=state:",
paste(as_fips_code(state), collapse = ",")
)
census <- get_census_api(
census_data_url,
key = key,
var.names = unlist(vars),
region = region,
retry
)
names(census)[[2]] <- "zcta"
census
}
| /scratch/gouwar.j/cran-all/cranData/wru/R/census_geo_api_zcta.R |
#' Census helper function.
#'
#' \code{census_helper} links user-input dataset with Census geographic data.
#'
#' This function allows users to link their geocoded dataset (e.g., voter file)
#' with U.S. Census data (2010 or 2020). The function extracts Census Summary File data
#' at the county, tract, block, or place level. Census data calculated are
#' Pr(Geolocation | Race) where geolocation is county, tract, block, or place.
#'
#' @inheritParams get_census_data
#' @param voter.file An object of class \code{data.frame}. Must contain field(s) named
#' \code{\var{county}}, \code{\var{tract}}, \code{\var{block}}, and/or \code{\var{place}}
#' specifying geolocation. These should be character variables that match up with
#' U.S. Census categories. County should be three characters (e.g., "031" not "31"),
#' tract should be six characters, and block should be four characters.
#' Place should be five characters if it is included.
#' @param states A character vector specifying which states to extract
#' Census data for, e.g. \code{c("NJ", "NY")}. Default is \code{"all"}, which extracts
#' Census data for all states contained in user-input data.
#' @param geo A character object specifying what aggregation level to use.
#' Use \code{"county"}, \code{"tract"}, \code{"block"} or \code{"place"}. Default is \code{"tract"}.
#' Warning: extracting block-level data takes very long.
#' @param age A \code{TRUE}/\code{FALSE} object indicating whether to condition on
#' age or not. If \code{FALSE} (default), function will return Pr(Geolocation | Race).
#' If \code{TRUE}, function will return Pr(Geolocation, Age | Race).
#' If \code{\var{sex}} is also \code{TRUE}, function will return Pr(Geolocation, Age, Sex | Race).
#' @param sex A \code{TRUE}/\code{FALSE} object indicating whether to condition on
#' sex or not. If \code{FALSE} (default), function will return Pr(Geolocation | Race).
#' If \code{TRUE}, function will return Pr(Geolocation, Sex | Race).
#' If \code{\var{age}} is also \code{TRUE}, function will return Pr(Geolocation, Age, Sex | Race).
#' @param year A character object specifying the year of U.S. Census data to be downloaded.
#' Use \code{"2010"}, or \code{"2020"}. Default is \code{"2020"}.
#' Warning: 2020 U.S. Census data is downloaded only when \code{\var{age}} and
#' \code{\var{sex}} are both \code{FALSE}.
#' @param census.data A optional census object of class \code{list} containing
#' pre-saved Census geographic data. Can be created using \code{get_census_data} function.
#' If \code{\var{census.data}} is provided, the \code{\var{age}} element must have the same value
#' as the \code{\var{age}} option specified in this function (i.e., \code{TRUE} in both or
#' \code{FALSE} in both). Similarly, the \code{\var{sex}} element in the object provided in
#' \code{\var{census.data}} must have the same value as the \code{\var{sex}} option here.
#' Moreover, the \code{\var{year}} element in the object provided in \code{\var{census.data}}
#' must have the same value as the \code{\var{year}} option in the function (i.e., \code{"2010"}
#' in both or \code{"2020"} in both).
#' If \code{\var{census.data}} is missing, Census geographic data will be obtained via Census API.
#' @param retry The number of retries at the census website if network interruption occurs.
#' @param use.counties A logical, defaulting to FALSE. Should census data be filtered by counties available in \var{census.data}?
#' @return Output will be an object of class \code{data.frame}. It will
#' consist of the original user-input data with additional columns of
#' Census data.
#'
#' @examples
#' \dontshow{
#' data(voters)
#' }
#' \dontrun{
#' census_helper(voter.file = voters, states = "nj", geo = "block")
#' }
#' \dontrun{
#' census_helper(
#' voter.file = voters, states = "all", geo = "tract",
#' age = TRUE, sex = TRUE
#' )
#' }
#' \dontrun{
#' census_helper(
#' voter.file = voters, states = "all", geo = "county",
#' age = FALSE, sex = FALSE, year = "2020"
#' )
#' }
#'
#' @keywords internal
census_helper <- function(
key = Sys.getenv("CENSUS_API_KEY"),
voter.file,
states = "all",
geo = "tract",
age = FALSE,
sex = FALSE,
year = "2020",
census.data = NULL,
retry = 3,
use.counties = FALSE
) {
if (is.null(census.data) || (typeof(census.data) != "list")) {
toDownload <- TRUE
} else {
toDownload <- FALSE
}
if (toDownload) {
key <- validate_key(key)
}
states <- toupper(states)
if (states == "ALL") {
states <- toupper(as.character(unique(voter.file$state)))
}
df.out <- NULL
for (s in 1:length(states)) {
message(paste("State ", s, " of ", length(states), ": ", states[s], sep = ""))
state <- toupper(states[s])
if (geo == "place") {
geo.merge <- c("place")
if ((toDownload) || (is.null(census.data[[state]])) || (census.data[[state]]$age != age) || (census.data[[state]]$sex != sex) || (census.data[[state]]$year != year)) {
census <- census_geo_api(key, state, geo = "place", age, sex, year, retry)
} else {
census <- census.data[[toupper(state)]]$place
}
}
if (geo == "county") {
geo.merge <- c("county")
if ((toDownload) || (is.null(census.data[[state]])) || (census.data[[state]]$age != age) || (census.data[[state]]$sex != sex) || (census.data[[state]]$year != year)) {
census <- census_geo_api(key, state, geo = "county", age, sex, year, retry)
} else {
census <- census.data[[toupper(state)]]$county
}
}
if (geo == "tract") {
geo.merge <- c("county", "tract")
if ((toDownload) || (is.null(census.data[[state]])) || (census.data[[state]]$age != age) || (census.data[[state]]$sex != sex) || (census.data[[state]]$year != year)) {
if (use.counties) {
census <- census_geo_api(key, state, geo = "tract", age, sex, year, retry,
# Only those counties within the target state
counties = unique(voter.file$county[voter.file$state == state]))
} else {
census <- census_geo_api(key, state, geo = "tract", age, sex, year, retry)
}
} else {
census <- census.data[[toupper(state)]]$tract
}
}
if (geo == "block") {
geo.merge <- c("county", "tract", "block")
if ((toDownload) || (is.null(census.data[[state]])) || (census.data[[state]]$age != age) || (census.data[[state]]$sex != sex) || (census.data[[state]]$year != year)) {
if (use.counties) {
census <- census_geo_api(key, state, geo = "block", age, sex, year, retry,
# Only those counties within the target state
counties = unique(voter.file$county[voter.file$state == state]))
} else {
census <- census_geo_api(key, state, geo = "block", age, sex, year, retry)
}
} else {
census <- census.data[[toupper(state)]]$block
}
}
if (is.null(census) & use.counties) {
message("No intersecting counties in counties supplied")
return(NULL)
}
census$state <- state
if (age == T) {
## Add Census Age Categories
voter.file$agecat <- NA
voter.file$agecat <- ifelse(voter.file$age <= 4, 1, voter.file$agecat)
voter.file$agecat <- ifelse(voter.file$age >= 5 & voter.file$age <= 9, 2, voter.file$agecat)
voter.file$agecat <- ifelse(voter.file$age >= 10 & voter.file$age <= 14, 3, voter.file$agecat)
voter.file$agecat <- ifelse(voter.file$age >= 15 & voter.file$age <= 17, 4, voter.file$agecat)
voter.file$agecat <- ifelse(voter.file$age >= 18 & voter.file$age <= 19, 5, voter.file$agecat)
voter.file$agecat <- ifelse(voter.file$age == 20, 6, voter.file$agecat)
voter.file$agecat <- ifelse(voter.file$age == 21, 7, voter.file$agecat)
voter.file$agecat <- ifelse(voter.file$age >= 22 & voter.file$age <= 24, 8, voter.file$agecat)
voter.file$agecat <- ifelse(voter.file$age >= 25 & voter.file$age <= 29, 9, voter.file$agecat)
voter.file$agecat <- ifelse(voter.file$age >= 30 & voter.file$age <= 34, 10, voter.file$agecat)
voter.file$agecat <- ifelse(voter.file$age >= 35 & voter.file$age <= 39, 11, voter.file$agecat)
voter.file$agecat <- ifelse(voter.file$age >= 40 & voter.file$age <= 44, 12, voter.file$agecat)
voter.file$agecat <- ifelse(voter.file$age >= 45 & voter.file$age <= 49, 13, voter.file$agecat)
voter.file$agecat <- ifelse(voter.file$age >= 50 & voter.file$age <= 54, 14, voter.file$agecat)
voter.file$agecat <- ifelse(voter.file$age >= 55 & voter.file$age <= 59, 15, voter.file$agecat)
voter.file$agecat <- ifelse(voter.file$age >= 60 & voter.file$age <= 61, 16, voter.file$agecat)
voter.file$agecat <- ifelse(voter.file$age >= 62 & voter.file$age <= 64, 17, voter.file$agecat)
voter.file$agecat <- ifelse(voter.file$age >= 65 & voter.file$age <= 66, 18, voter.file$agecat)
voter.file$agecat <- ifelse(voter.file$age >= 67 & voter.file$age <= 69, 19, voter.file$agecat)
voter.file$agecat <- ifelse(voter.file$age >= 70 & voter.file$age <= 74, 20, voter.file$agecat)
voter.file$agecat <- ifelse(voter.file$age >= 75 & voter.file$age <= 79, 21, voter.file$agecat)
voter.file$agecat <- ifelse(voter.file$age >= 80 & voter.file$age <= 84, 22, voter.file$agecat)
voter.file$agecat <- ifelse(voter.file$age >= 85, 23, voter.file$agecat)
}
if (age == F & sex == F) {
## Calculate Pr(Geolocation | Race)
if (year == "2010") {
geoPopulations <- rowSums(census[, grepl("P00", names(census))])
vars <- c(
pop_white = "P005003", pop_black = "P005004",
pop_aian = "P005005", pop_asian = "P005006",
pop_nhpi = "P005007", pop_other = "P005008",
pop_two = "P005009", pop_hisp = "P005010"
)
drop <- c(grep("state", names(census)), grep("P005", names(census)))
} else if (year == "2020") {
geoPopulations <- rowSums(census[, grepl("P2_", names(census))])
vars <- c(
pop_white = "P2_005N", pop_black = "P2_006N",
pop_aian = "P2_007N", pop_asian = "P2_008N",
pop_nhpi = "P2_009N", pop_other = "P2_010N",
pop_two = "P2_011N", pop_hisp = "P2_002N"
)
drop <- c(grep("state", names(census)), grep("P2_", names(census)))
}
census$r_whi <- census[, vars["pop_white"]] / sum(census[, vars["pop_white"]]) # Pr(Geo | White)
census$r_bla <- census[, vars["pop_black"]] / sum(census[, vars["pop_black"]]) # Pr(Geo | Black)
census$r_his <- census[, vars["pop_hisp"]] / sum(census[, vars["pop_hisp"]]) # Pr(Geo | Latino)
census$r_asi <- (census[, vars["pop_asian"]] + census[, vars["pop_nhpi"]]) / (sum(census[, vars["pop_asian"]]) + sum(census[, vars["pop_nhpi"]])) # Pr(Geo | Asian or NH/PI)
census$r_oth <- (census[, vars["pop_aian"]] + census[, vars["pop_other"]] + census[, vars["pop_two"]]) / (sum(census[, vars["pop_aian"]]) + sum(census[, vars["pop_other"]]) + sum(census[, vars["pop_two"]])) # Pr(Geo | AI/AN, Other, or Mixed)
voters.census <- merge(voter.file[toupper(voter.file$state) == toupper(states[s]), ], census[, -drop], by = geo.merge, all.x = T)
}
if (age == F & sex == T) {
## Calculate Pr(Geolocation, Sex | Race)
eth.cen <- c("whi", "bla", "his", "asi", "oth")
eth.let <- c("I", "B", "H", "D", "F")
for (i in 1:length(eth.cen)) {
if (i != 4 & i != 5) {
census[paste("r_mal", eth.cen[i], sep = "_")] <- census[paste("P012", eth.let[i], "002", sep = "")] / sum(census[paste("P012", eth.let[i], "001", sep = "")])
census[paste("r_fem", eth.cen[i], sep = "_")] <- census[paste("P012", eth.let[i], "026", sep = "")] / sum(census[paste("P012", eth.let[i], "001", sep = "")])
}
if (i == 4) {
## Combine Asian and Native Hawaiian/Pacific Islander
census[paste("r_mal", eth.cen[i], sep = "_")] <- (census$P012D002 + census$P012E002) / sum(census$P012D001 + census$P012E001)
census[paste("r_fem", eth.cen[i], sep = "_")] <- (census$P012D026 + census$P012E026) / sum(census$P012D001 + census$P012E001)
}
if (i == 5) {
## Combine American India/Alaska Native and Other
census[paste("r_mal", eth.cen[i], sep = "_")] <- (census$P012C002 + census$P012F002) / sum(census$P012C001 + census$P012F001)
census[paste("r_fem", eth.cen[i], sep = "_")] <- (census$P012C026 + census$P012F026) / sum(census$P012C001 + census$P012F001)
}
}
voters.census <- merge(voter.file[toupper(voter.file$state) == toupper(states[s]), ], census[names(census) != "state"], by = geo.merge, all.x = T)
for (i in 1:length(eth.cen)) {
voters.census[voters.census$sex == 0, paste("r", eth.cen[i], sep = "_")] <-
voters.census[voters.census$sex == 0, paste("r_mal", eth.cen[i], sep = "_")]
voters.census[voters.census$sex == 1, paste("r", eth.cen[i], sep = "_")] <-
voters.census[voters.census$sex == 1, paste("r_fem", eth.cen[i], sep = "_")]
}
}
if (age == T & sex == F) {
## Calculate Pr(Geolocation, Age Category | Race)
eth.cen <- c("whi", "bla", "his", "asi", "oth")
eth.let <- c("I", "B", "H", "D", "F")
age.cat <- c(seq(1, 23), seq(1, 23))
age.cen <- as.character(c(c("03", "04", "05", "06", "07", "08", "09"), seq(10, 25), seq(27, 49)))
for (i in 1:length(eth.cen)) {
for (j in 1:23) {
if (i != 4 & i != 5) {
census[paste("r", age.cat[j], eth.cen[i], sep = "_")] <- (census[paste("P012", eth.let[i], "0", age.cen[j], sep = "")] + census[paste("P012", eth.let[i], "0", age.cen[j + 23], sep = "")]) / sum(census[paste("P012", eth.let[i], "001", sep = "")])
}
if (i == 4) {
## Combine Asian and Native Hawaiian/Pacific Islander
census[paste("r", age.cat[j], eth.cen[i], sep = "_")] <- (census[paste("P012D0", age.cen[j], sep = "")] + census[paste("P012D0", age.cen[j + 23], sep = "")] + census[paste("P012E0", age.cen[j], sep = "")] + census[paste("P012E0", age.cen[j + 23], sep = "")]) / sum(census$P012D001 + census$P012E001)
}
if (i == 5) {
## Combine American India/Alaska Native and Other
census[paste("r", age.cat[j], eth.cen[i], sep = "_")] <- (census[paste("P012C0", age.cen[j], sep = "")] + census[paste("P012C0", age.cen[j + 23], sep = "")] + census[paste("P012F0", age.cen[j], sep = "")] + census[paste("P012F0", age.cen[j + 23], sep = "")]) / sum(census$P012C001 + census$P012F001)
}
}
}
voters.census <- merge(voter.file[toupper(voter.file$state) == toupper(states[s]), ], census[names(census) != "state"], by = geo.merge, all.x = T)
for (i in 1:length(eth.cen)) {
for (j in 1:23) {
voters.census[voters.census$agecat == j, paste("r", eth.cen[i], sep = "_")] <-
voters.census[voters.census$agecat == j, paste("r", j, eth.cen[i], sep = "_")]
}
}
}
if (age == T & sex == T) {
## Calculate Pr(Tract, Sex, Age Category | Race)
eth.cen <- c("whi", "bla", "his", "asi", "oth")
eth.let <- c("I", "B", "H", "D", "F")
sex.let <- c("mal", "fem")
age.cat <- c(seq(1, 23), seq(1, 23))
age.cen <- as.character(c(c("03", "04", "05", "06", "07", "08", "09"), seq(10, 25), seq(27, 49)))
for (i in 1:length(eth.cen)) {
for (k in 1:length(sex.let)) {
for (j in 1:23) {
if (k == 2) {
j <- j + 23
}
if (i != 4 & i != 5) {
census[paste("r", sex.let[k], age.cat[j], eth.cen[i], sep = "_")] <- census[paste("P012", eth.let[i], "0", age.cen[j], sep = "")] / sum(census[paste("P012", eth.let[i], "001", sep = "")])
}
if (i == 4) {
## Combine Asian and Native Hawaiian/Pacific Islander
census[paste("r", sex.let[k], age.cat[j], eth.cen[i], sep = "_")] <- (census[paste("P012D0", age.cen[j], sep = "")] + census[paste("P012E0", age.cen[j], sep = "")]) / sum(census$P012D001 + census$P012E001)
}
if (i == 5) {
## Combine American India/Alaska Native and Other
census[paste("r", sex.let[k], age.cat[j], eth.cen[i], sep = "_")] <- (census[paste("P012C0", age.cen[j], sep = "")] + census[paste("P012F0", age.cen[j], sep = "")]) / sum(census$P012C001 + census$P012F001)
}
}
}
}
voters.census <- merge(voter.file[toupper(voter.file$state) == toupper(states[s]), ], census[names(census) != "state"], by = geo.merge, all.x = T)
for (i in 1:length(eth.cen)) {
for (j in 1:23) {
voters.census[
voters.census$sex == 0 & voters.census$agecat == j,
paste("r", eth.cen[i], sep = "_")
] <-
voters.census[
voters.census$sex == 0 & voters.census$agecat == j,
paste("r_mal", j, eth.cen[i], sep = "_")
]
voters.census[
voters.census$sex == 1 & voters.census$agecat == j,
paste("r", eth.cen[i], sep = "_")
] <-
voters.census[
voters.census$sex == 1 & voters.census$agecat == j,
paste("r_fem", j, eth.cen[i], sep = "_")
]
}
}
}
keep.vars <- c(
names(voter.file)[names(voter.file) != "agecat"],
paste("r", c("whi", "bla", "his", "asi", "oth"), sep = "_")
)
df.out <- as.data.frame(rbind(df.out, voters.census[keep.vars]))
}
return(df.out)
}
| /scratch/gouwar.j/cran-all/cranData/wru/R/census_helper.R |
#' Census helper function.
#'
#' \code{census_helper_new} links user-input dataset with Census geographic data.
#'
#' This function allows users to link their geocoded dataset (e.g., voter file)
#' with U.S. Census data (2010 or 2020). The function extracts Census Summary File data
#' at the county, tract, block, or place level. Census data calculated are
#' Pr(Geolocation | Race) where geolocation is county, tract, block, or place.
#'
#' @inheritParams get_census_data
#' @param voter.file An object of class \code{data.frame}. Must contain field(s) named
#' \code{\var{county}}, \code{\var{tract}}, \code{\var{block}}, and/or \code{\var{place}}
#' specifying geolocation. These should be character variables that match up with
#' U.S. Census categories. County should be three characters (e.g., "031" not "31"),
#' tract should be six characters, and block should be four characters.
#' Place should be five characters if it is included.
#' @param states A character vector specifying which states to extract
#' Census data for, e.g. \code{c("NJ", "NY")}. Default is \code{"all"}, which extracts
#' Census data for all states contained in user-input data.
#' @param geo A character object specifying what aggregation level to use.
#' Use \code{"county"}, \code{"tract"}, \code{"block"}, or \code{"place"}.
#' Default is \code{"tract"}. Warning: extracting block-level data takes very long.
#' @param age A \code{TRUE}/\code{FALSE} object indicating whether to condition on
#' age or not. If \code{FALSE} (default), function will return Pr(Geolocation | Race).
#' If \code{TRUE}, function will return Pr(Geolocation, Age | Race).
#' If \code{\var{sex}} is also \code{TRUE}, function will return Pr(Geolocation, Age, Sex | Race).
#' @param sex A \code{TRUE}/\code{FALSE} object indicating whether to condition on
#' sex or not. If \code{FALSE} (default), function will return Pr(Geolocation | Race).
#' If \code{TRUE}, function will return Pr(Geolocation, Sex | Race).
#' If \code{\var{age}} is also \code{TRUE}, function will return Pr(Geolocation, Age, Sex | Race).
#' @param year A character object specifying the year of U.S. Census data to be downloaded.
#' Use \code{"2010"}, or \code{"2020"}. Default is \code{"2020"}.
#' @param census.data A optional census object of class \code{list} containing
#' pre-saved Census geographic data. Can be created using \code{get_census_data} function.
#' If \code{\var{census.data}} is provided, the \code{\var{year}} element must
#' have the same value as the \code{\var{year}} option specified in this function
#' (i.e., \code{"2010"} in both or \code{"2020"} in both).
#' If \code{\var{census.data}} is provided, the \code{\var{age}} and the \code{\var{sex}}
#' elements must be \code{FALSE}. This corresponds to the defaults of \code{census_geo_api}.
#' If \code{\var{census.data}} is missing, Census geographic data will be obtained via Census API.
#' @param retry The number of retries at the census website if network interruption occurs.
#' @param use.counties A logical, defaulting to FALSE. Should census data be filtered by counties
#' available in \var{census.data}?
#' @param skip_bad_geos Logical. Option to have the function skip any geolocations that are not present
#' in the census data, returning a partial data set. Default is set to \code{FALSE}, which case it will
#' break and provide error message with a list of offending geolocations.
#' @return Output will be an object of class \code{data.frame}. It will
#' consist of the original user-input data with additional columns of
#' Census data.
#'
#' @examples
#' \dontshow{data(voters)}
#' \dontrun{census_helper_new(voter.file = voters, states = "nj", geo = "block")}
#' \dontrun{census_helper_new(voter.file = voters, states = "all", geo = "tract")}
#' \dontrun{census_helper_new(voter.file = voters, states = "all", geo = "place",
#' year = "2020")}
#'
#' @keywords internal
census_helper_new <- function(
key = Sys.getenv("CENSUS_API_KEY"),
voter.file,
states = "all",
geo = c("tract", "block", "block_group", "county", "place", "zcta"),
age = FALSE,
sex = FALSE,
year = "2020",
census.data = NULL,
retry = 3,
use.counties = FALSE,
skip_bad_geos = FALSE
) {
if ("precinct" %in% geo) {
stop("Error: census_helper_new function does not currently support precinct-level data.")
}
geo <- tolower(geo)
geo <- rlang::arg_match(geo)
if(!(year %in% c("2000","2010","2020"))){
stop("Interface only implemented for census years '2000', '2010', or '2020'.")
}
if (is.null(census.data) || (typeof(census.data) != "list")) {
toDownload = TRUE
} else {
toDownload = FALSE
}
if (toDownload) {
key <- validate_key(key)
}
if (toupper(states) == "ALL") {
states <- toupper(as.character(unique(voter.file$state)))
}
states <- as_state_abbreviation(states)
df.out <- NULL
for (s in 1:length(states)) {
message(paste("State ", s, " of ", length(states), ": ", states[s], sep = ""))
state <- toupper(states[s])
if (geo == "tract") {
geo.merge <- c("county", "tract")
if ((toDownload) || (is.null(census.data[[state]])) || (census.data[[state]]$year != year) || (census.data[[state]]$age != FALSE) || (census.data[[state]]$sex != FALSE)) {#} || (census.data[[state]]$age != age) || (census.data[[state]]$sex != sex)) {
if(use.counties) {
census <- census_geo_api(key, state, geo = "tract", age, sex, retry,
# Only those counties within the target state
counties = unique(voter.file$county[voter.file$state == state]))
} else {
census <- census_geo_api(key, state, geo = "tract", age, sex, year, retry)
}
} else {
census <- census.data[[toupper(state)]]$tract
}
} else if (geo == "block_group") {
geo.merge <- c("county", "tract", "block_group")
if ((toDownload) || (is.null(census.data[[state]])) || (census.data[[state]]$year != year) || (census.data[[state]]$age != FALSE) || (census.data[[state]]$sex != FALSE)) {#} || (census.data[[state]]$age != age) || (census.data[[state]]$sex != sex)) {
if(use.counties) {
census <- census_geo_api(key, state, geo = "block_group", age, sex, retry,
# Only those counties within the target state
counties = unique(voter.file$county[voter.file$state == state]))
} else {
census <- census_geo_api(key, state, geo = "block_group", age, sex, year, retry)
}
} else {
census <- census.data[[toupper(state)]]$block_group
}
} else if (geo == "block") {
if(any(names(census.data) == "block_group")) {
geo.merge <- c("county", "tract", "block_group", "block")
} else {
geo.merge <- c("county", "tract", "block")
}
if ((toDownload) || (is.null(census.data[[state]])) || (census.data[[state]]$year != year) || (census.data[[state]]$age != FALSE) || (census.data[[state]]$sex != FALSE)) {#} || (census.data[[state]]$age != age) || (census.data[[state]]$sex != sex)) {
if(use.counties) {
census <- census_geo_api(key, state, geo = "block", age, sex, retry,
# Only those counties within the target state
counties = unique(voter.file$county[voter.file$state == state]))
} else {
census <- census_geo_api(key, state, geo = "block", age, sex, year, retry)
}
} else {
census <- census.data[[toupper(state)]]$block
}
} else {
geo.merge <- geo
state_must_be_downloaded <- toDownload ||
is.null(census.data[[state]]) ||
census.data[[state]]$year != year ||
# TODO: Why do we always redownload if sex or age == TRUE?
census.data[[state]]$age != FALSE ||
census.data[[state]]$sex != FALSE
if (state_must_be_downloaded) {
census <- census_geo_api(key, state, geo = geo, age, sex, year, retry)
} else {
census <- census.data[[state]][[geo]]
}
}
census$state <- state
## Calculate Pr(Geolocation | Race)
if (any(c("P2_005N", "P005003") %in% names(census))) {
message(sprintf("NOTE: Legacy column names detected, loading Race values from Census Redistricting table for %s. Age, Sex, and ZCTA predictions will be unavailable.", year))
# TODO: Add test that we get the same ratios with legacy and new tables for 2020
# Old table: Redistricting (Pl-some numbers) (does not have age, sex, or ZCTAs)
# New table: DHC (does have age, sex, and ZCTA)
vars_ <- census_geo_api_names_legacy(year = year)
} else {
vars_ <- census_geo_api_names(year)
}
drop <- match(c("state", unlist(vars_)), names(census))
geoPopulations <- rowSums(census[,names(census) %in% vars_])
census$r_whi <- rowSums(census[, vars_[["r_whi"]], drop = FALSE]) / (geoPopulations) #Pr(White | Geo)
census$r_bla <- rowSums(census[, vars_[["r_bla"]], drop = FALSE]) / (geoPopulations) #Pr(Black | Geo)
census$r_his <- rowSums(census[, vars_[["r_his"]], drop = FALSE]) / (geoPopulations) #Pr(Latino | Geo)
census$r_asi <- rowSums(census[, vars_[["r_asi"]], drop = FALSE]) / (geoPopulations) #Pr(Asian or NH/PI | Geo)
census$r_oth <- rowSums(census[, vars_[["r_oth"]], drop = FALSE]) / (geoPopulations) #Pr(AI/AN, Other, or Mixed | Geo)
# check locations with zero people
# get average without places with zero people, and assign that to zero locs.
zero_ind <- which((geoPopulations - 0.0) < .Machine$double.eps)
if (length(zero_ind)) {
for (rcat in c("r_whi","r_bla","r_his","r_asi","r_oth")) {
census[[rcat]][zero_ind] <- mean(census[[rcat]], na.rm = TRUE)
}
}
voters.census <- merge(
voter.file[toupper(voter.file$state) == toupper(states[s]), ],
census[, -drop], by = geo.merge, all.x = TRUE)
#Check if geolocation missing from census object
if(any(is.na(voters.census$r_whi))){
miss_ind <- which(is.na(voters.census$r_whi))
message("The following locations in the voter.file are not available in the census data.",
paste0("(listed as ", paste0(c("state",geo.merge), collapse="-"),"):\n"),
paste(do.call(paste, c(unique(voters.census[miss_ind, c("state",geo.merge)]),
sep="-")),
collapse = ", "))
if(skip_bad_geos == TRUE) {
message("NOTE: Skipping unavailable geolocations. Returning partial data set.")
voters.census <- voters.census[!is.na(voters.census$r_whi),]
}
else {
stop("Stopping predictions. Please revise census data and/or verify the correct year is being supplied. To skip these rows use 'skip_bad_geos = TRUE'")
}
}
# }
keep.vars <- c(names(voter.file)[names(voter.file) != "agecat"],
paste("r", c("whi", "bla", "his", "asi", "oth"), sep = "_"))
df.out <- as.data.frame(rbind(df.out, voters.census[keep.vars]))
}
return(df.out)
}
| /scratch/gouwar.j/cran-all/cranData/wru/R/census_helper_v2.R |
#' Legacy data formatting function.
#'
#' \code{format_legacy_data} formats legacy data from the U.S. census to allow
#' for Bayesian name geocoding.
#'
#' This function allows users to construct datasets for analysis using the census legacy data format.
#' These data are available for the 2020 census at
#' https://www2.census.gov/programs-surveys/decennial/2020/data/01-Redistricting_File--PL_94-171/.
#' This function returns data structured analogously to data from the Census API, which is not yet
#' available for the 2020 Census as of September 2021.
#'
#' @param legacyFilePath A character vector giving the location of a legacy census data folder,
#' sourced from https://www2.census.gov/programs-surveys/decennial/2020/data/01-Redistricting_File--PL_94-171/.
#' These file names should end in ".pl".
#' @param state The two letter state postal code.
#' @param outFile Optional character vector determining whether the formatted RData object should be saved. The
#' filepath should end in ".RData".
#'
#' @import PL94171
#'
#' @examples
#' \dontrun{
#' gaCensusData <- format_legacy_data(PL94171::pl_url('GA', 2020))
#' predict_race_new(ga.voter.file, namesToUse = 'last, first, mid', census.geo = 'block',
#' census.data = gaCensusData)
#'}
#'
#' @export
format_legacy_data <- function(legacyFilePath, state, outFile = NULL) {
# aggregation levels to convert (county, tract, block group, and block)
summaryLevels <- c('050', '140', '150', '750')
# read in the data
pl <- pl_read(legacyFilePath)
pl <- pl_select_standard(pl)
# iterate through the levels
censusData.2020 <- lapply(summaryLevels, FUN = function(level) {
levelData <- pl[pl$summary_level == level,]
# construct the base data frame
df <- data.frame(state = toupper(state),
county = levelData$county,
P005003 = levelData$pop_white,
P005004 = levelData$pop_black,
P005010 = levelData$pop_hisp,
P005006 = levelData$pop_asian,
P005007 = levelData$pop_nhpi,
P005005 = levelData$pop_aian,
P005008 = levelData$pop_other,
P005009 = levelData$pop_two)
# add geographic levels
if(level != '050') {
df$tract <- substr(levelData$GEOID, nchar(levelData$GEOID) - 5, nchar(levelData$GEOID))
if(level != '140') {
df$blockGroup <- substr(levelData$GEOID, nchar(levelData$GEOID), nchar(levelData$GEOID))
if(level != '150')
df$block <- substr(levelData$GEOID, nchar(levelData$GEOID)-2, nchar(levelData$GEOID))
}
}
df
})
# format and optionally save the file
names(censusData.2020) <- c('county', 'tract', 'blockGroup', 'block')
if(!is.null(outFile))
save(censusData.2020, file = outFile)
# return the object
return(censusData.2020)
}
| /scratch/gouwar.j/cran-all/cranData/wru/R/format_legacy_data.R |
#' Census API function.
#'
#' \code{get_census_api} obtains U.S. Census data via the public API.
#'
#' This function obtains U.S. Census data via the public API. User
#' can specify the variables and region(s) for which to obtain data.
#'
#' @inheritParams get_census_data
#' @param data_url URL root of the API,
#' e.g., \code{"https://api.census.gov/data/2020/dec/pl"}.
#' @param var.names A character vector of variables to get,
#' e.g., \code{c("P2_005N", "P2_006N", "P2_007N", "P2_008N")}.
#' If there are more than 50 variables, then function will automatically
#' split variables into separate queries.
#' @param region Character object specifying which region to obtain data for.
#' Must contain "for" and possibly "in",
#' e.g., \code{"for=block:1213&in=state:47+county:015+tract:*"}.
#' @param retry The number of retries at the census website if network interruption occurs.
#' @return If successful, output will be an object of class \code{data.frame}.
#' If unsuccessful, function prints the URL query that caused the error.
#'
#' @examples
#' \dontrun{
#' get_census_api(
#' data_url = "https://api.census.gov/data/2020/dec/pl",
#' var.names = c("P2_005N", "P2_006N", "P2_007N", "P2_008N"), region = "for=county:*&in=state:34"
#' )
#' }
#'
#' @references
#' Based on code authored by Nicholas Nagle, which is available
#' \href{https://rstudio-pubs-static.s3.amazonaws.com/19337_2e7f827190514c569ea136db788ce850.html}{here}.
#'
#' @keywords internal
get_census_api <- function(
data_url,
key = Sys.getenv("CENSUS_API_KEY"),
var.names,
region,
retry = 0
) {
if (length(var.names) > 50) {
var.names <- vec_to_chunk(var.names) # Split variables into a list
get <- lapply(var.names, function(x) paste(x, sep = "", collapse = ","))
data <- lapply(
var.names,
function(x) get_census_api_2(data_url, key, x, region, retry)
)
} else {
get <- paste(var.names, sep = "", collapse = ",")
data <- list(get_census_api_2(data_url, key, get, region, retry))
}
## Format output. If there were no errors, than paste the data together. If there is an error, just return the unformatted list.
if (all(sapply(data, is.data.frame))) {
colnames <- unlist(lapply(data, names))
data <- do.call(cbind, data)
names(data) <- colnames
## Prettify the output and remove any non-unique columns
data <- data[, unique(colnames, fromLast = TRUE)]
## Reorder columns so that numeric fields follow non-numeric fields
data <- data[, c(which(sapply(data, class) != "numeric"), which(sapply(data, class) == "numeric"))]
return(data)
} else {
message("Unable to create single data.frame in get_census_api")
return(data)
}
}
| /scratch/gouwar.j/cran-all/cranData/wru/R/get_census_api.R |
#' Census API URL assembler.
#'
#' \code{get_census_api_2} assembles URL components for \code{get_census_api}.
#'
#' This function assembles the URL components and sends the request to the Census server.
#' It is used by the \code{get_census_api} function. The user should not need to call this
#' function directly.
#'
#' @inheritParams get_census_data
#' @param data_url URL root of the API,
#' e.g., \code{"https://api.census.gov/data/2020/dec/pl"}.
#' @param get A character vector of variables to get,
#' e.g., \code{c("P2_005N", "P2_006N", "P2_007N", "P2_008N")}.
#' If there are more than 50 variables, then function will automatically
#' split variables into separate queries.
#' @param region Character object specifying which region to obtain data for.
#' Must contain "for" and possibly "in",
#' e.g., \code{"for=block:1213&in=state:47+county:015+tract:*"}.
#' @param retry The number of retries at the census website if network interruption occurs.
#' @return If successful, output will be an object of class \code{data.frame}.
#' If unsuccessful, function prints the URL query that was constructed.
#'
#' @examples
#' \dontrun{try(get_census_api_2(data_url = "https://api.census.gov/data/2020/dec/pl",
#' get = c("P2_005N", "P2_006N", "P2_007N", "P2_008N"), region = "for=county:*&in=state:34"))}
#'
#' @references
#' Based on code authored by Nicholas Nagle, which is available
#' \href{https://rstudio-pubs-static.s3.amazonaws.com/19337_2e7f827190514c569ea136db788ce850.html}{here}.
#'
#' @keywords internal
get_census_api_2 <- function(
data_url,
key = Sys.getenv("CENSUS_API_KEY"),
get,
region,
retry = 3
){
if(length(get) > 1) {
get <- paste(get, collapse=',', sep='')
}
api_call <- paste(data_url, 'key=', key, '&get=', get, '&', region, sep='')
dat_raw <- try(readLines(api_call, warn="F"))
while (inherits(dat_raw, "try-error") && (retry > 0)) {
message(paste("Try census server again:", data_url))
Sys.sleep(1)
retry <- retry - 1
dat_raw <- try(readLines(api_call, warn="F"))
}
if (inherits(dat_raw, "try-error")) {
message("Data access failure at the census website, please try again by re-run the previous command")
stop(message(api_call))
return()
}
if (!inherits(dat_raw, "try-error") & "TRUE" %in% names(table(grepl("Invalid Key", dat_raw)))) {
stop('Invalid Key:
A valid key must be included with each data API request.
You included a key with this request, however, it is not valid.
Please check your key and try again.'
)
}
dat_df <- data.frame()
## Split the datastream into a list with each row as an element.
tmp <- strsplit(gsub("[^[:alnum:], _]", '', dat_raw), "\\,")
dat_df <- as.data.frame(do.call(rbind, tmp[-1]), stringsAsFactors=FALSE)
names(dat_df) <- tmp[[1]]
## Convert to numeric
value_cols <- grep("[0-9]", names(dat_df), value=TRUE)
for(col in value_cols) dat_df[,col] <- as.numeric(as.character(dat_df[,col]))
return(dat_df)
}
| /scratch/gouwar.j/cran-all/cranData/wru/R/get_census_api_2.R |
#' Multilevel Census data download function.
#'
#' \code{get_census_data} returns county-, tract-, and block-level Census data
#' for specified state(s). Using this function to download Census data in advance
#' can save considerable time when running \code{predict_race} and \code{census_helper}.
#'
#' @param key A character string containing a valid Census API key,
#' which can be requested from the
#' [U.S. Census API key signup page](https://api.census.gov/data/key_signup.html).
#'
#' By default, attempts to find a census key stored in an
#' [environment variable][Sys.getenv] named `CENSUS_API_KEY`.
#'
#' @param states which states to extract Census data for, e.g., \code{c("NJ", "NY")}.
#' @param age A \code{TRUE}/\code{FALSE} object indicating whether to condition on
#' age or not. If \code{FALSE} (default), function will return Pr(Geolocation | Race).
#' If \code{TRUE}, function will return Pr(Geolocation, Age | Race).
#' If \code{\var{sex}} is also \code{TRUE}, function will return Pr(Geolocation, Age, Sex | Race).
#' @param sex A \code{TRUE}/\code{FALSE} object indicating whether to condition on
#' sex or not. If \code{FALSE} (default), function will return Pr(Geolocation | Race).
#' If \code{TRUE}, function will return Pr(Geolocation, Sex | Race).
#' If \code{\var{age}} is also \code{TRUE}, function will return Pr(Geolocation, Age, Sex | Race).
#' @param year A character object specifying the year of U.S. Census data to be downloaded.
#' Use \code{"2010"}, or \code{"2020"}. Default is \code{"2020"}.
#' Warning: 2020 U.S. Census data is downloaded only when \code{\var{age}} and
#' \code{\var{sex}} are both \code{FALSE}.
#' @param census.geo An optional character vector specifying what level of
#' geography to use to merge in U.S. Census 2010 geographic data. Currently
#' \code{"county"}, \code{"tract"}, \code{"block"}, and \code{"place"} are supported.
#' @param retry The number of retries at the census website if network interruption occurs.
#' @param county.list A named list of character vectors of counties present in your \var{voter.file}, per state.
#' @return Output will be an object of class \code{list} indexed by state.
#' Output will contain a subset of the following elements:
#' \code{state}, \code{age}, \code{sex},
#' \code{county}, \code{tract}, \code{block_group}, \code{block}, and \code{place}.
#'
#' @export
#'
#' @examples
#' \dontrun{get_census_data(states = c("NJ", "NY"), age = TRUE, sex = FALSE)}
#' \dontrun{get_census_data(states = "MN", age = FALSE, sex = FALSE, year = "2020")}
get_census_data <- function(
key = Sys.getenv("CENSUS_API_KEY"),
states,
age = FALSE,
sex = FALSE,
year = "2020",
census.geo = c("tract", "block", "block_group", "county", "place", "zcta"),
retry = 3,
county.list = NULL
) {
key <- validate_key(key)
census.geo <- tolower(census.geo)
census.geo <- rlang::arg_match(census.geo)
states <- toupper(states)
message("Collecting ", year, " Census data...")
CensusObj <- NULL
for (s in states) {
CensusObj[[s]] <- list(state = s, age = age, sex = sex, year = year)
if (census.geo == "place") {
place <- census_geo_api(key, s, geo = "place", age, sex, year, retry)
CensusObj[[s]]$place <- place
}
if (census.geo == "block") {
block <- census_geo_api(key, s, geo = "block", age, sex, year, retry, counties = county.list[[s]])
CensusObj[[s]]$block <- block
}
if (census.geo == "block_group") {
block_group <- census_geo_api(key, s, geo = "block_group", age, sex, year, retry, counties = county.list[[s]])
CensusObj[[s]]$block_group <- block_group
}
if ((census.geo == "block") || (census.geo == "tract") || (census.geo == "block_group")) {
tract <- census_geo_api(key, s, geo = "tract", age, sex, year, retry, counties = county.list[[s]])
CensusObj[[s]]$tract <- tract
}
if ((census.geo == "block") || (census.geo == "tract") || (census.geo == "county") || (census.geo == "block_group")) {
county <- census_geo_api(key, s, geo = "county", age, sex, year, retry)
CensusObj[[s]]$county <- county
}
if (census.geo == "zcta") {
if (!is.null(county.list)) {
cli::cli_abort(c(
"The {.arg county.list} argument must be set to {.code NULL}
when {.arg census_geo} is {.val zcta},
because the Census Bureau does release data that divides ZCTAs by county."
))
}
CensusObj[[s]]$zcta <- census_geo_api(key, s, geo = "zcta", age, sex, year, retry)
}
}
return(CensusObj)
} | /scratch/gouwar.j/cran-all/cranData/wru/R/get_census_data.R |
#' Surname probability merging function.
#'
#' \code{merge_names} merges names in a user-input dataset with corresponding
#' race/ethnicity probabilities derived from both the U.S. Census Surname List
#' and Spanish Surname List and voter files from states in the Southern U.S.
#'
#' This function allows users to match names in their dataset with database entries
#' estimating P(name | ethnicity) for each of the five major racial groups for each
#' name. The database probabilities are derived from both the U.S. Census Surname List
#' and Spanish Surname List and voter files from states in the Southern U.S.
#'
#' By default, the function matches names as follows:
#' 1) Search raw surnames in the database;
#' 2) Remove any punctuation and search again;
#' 3) Remove any spaces and search again;
#' 4) Remove suffixes (e.g., "Jr") and search again (last names only)
#' 5) Split double-barreled names into two parts and search first part of name;
#' 6) Split double-barreled names into two parts and search second part of name;
#'
#' Each step only applies to names not matched in a previous step.
#' Steps 2 through 6 are not applied if \code{clean.surname} is FALSE.
#'
#' Note: Any name appearing only on the Spanish Surname List is assigned a
#' probability of 1 for Hispanics/Latinos and 0 for all other racial groups.
#'
#' @param voter.file An object of class \code{data.frame}. Must contain a row for each individual being predicted,
#' as well as a field named \code{\var{last}} containing each individual's surname.
#' If first name is also being used for prediction, the file must also contain a field
#' named \code{\var{first}}. If middle name is also being used for prediction, the field
#' must also contain a field named \code{\var{middle}}.
#' @param namesToUse A character vector identifying which names to use for the prediction.
#' The default value is \code{"last"}, indicating that only the last name will be used.
#' Other options are \code{"last, first"}, indicating that both last and first names will be
#' used, and \code{"last, first, middle"}, indicating that last, first, and middle names will all
#' be used.
#' @param census.surname A \code{TRUE}/\code{FALSE} object. If \code{TRUE},
#' function will call \code{merge_surnames} to merge in Pr(Race | Surname)
#' from U.S. Census Surname List (2000, 2010, or 2020) and Spanish Surname List.
#' If \code{FALSE}, user must provide a \code{name.dictionary} (see below).
#' Default is \code{TRUE}.
#' @param table.surnames An object of class \code{data.frame} provided by the
#' users as an alternative surname dictionary. It will consist of a list of
#' U.S. surnames, along with the associated probabilities P(name | ethnicity)
#' for ethnicities: white, Black, Hispanic, Asian, and other. Default is \code{NULL}.
#' (\code{\var{last_name}} for U.S. surnames, \code{\var{p_whi_last}} for White,
#' \code{\var{p_bla_last}} for Black, \code{\var{p_his_last}} for Hispanic,
#' \code{\var{p_asi_last}} for Asian, \code{\var{p_oth_last}} for other).
#' @param table.first See \code{\var{table.surnames}}.
#' @param table.middle See \code{\var{table.surnames}}.
#' @param impute.missing See \code{predict_race}.
#' @param model See \code{predict_race}.
#' @param clean.names A \code{TRUE}/\code{FALSE} object. If \code{TRUE},
#' any surnames in \code{\var{voter.file}} that cannot initially be matched
#' to the database will be cleaned, according to U.S. Census specifications,
#' in order to increase the chance of finding a match. Default is \code{TRUE}.
#' @return Output will be an object of class \code{data.frame}. It will
#' consist of the original user-input data with additional columns that
#' specify the part of the name matched with Census data (\code{\var{surname.match}}),
#' and the probabilities Pr(Race | Surname) for each racial group
#' (\code{\var{p_whi}} for White, \code{\var{p_bla}} for Black,
#' \code{\var{p_his}} for Hispanic/Latino,
#' \code{\var{p_asi}} for Asian and Pacific Islander, and
#' \code{\var{p_oth}} for Other/Mixed).
#' @importFrom dplyr coalesce
#' @examples
#' data(voters)
#' \dontrun{try(merge_names(voters, namesToUse = "surname", census.surname = TRUE))}
#' @keywords internal
merge_names <- function(voter.file, namesToUse, census.surname, table.surnames = NULL, table.first = NULL, table.middle = NULL, clean.names = TRUE, impute.missing = FALSE, model = "BISG") {
# check the names
if (namesToUse == "surname") {
if (!("surname" %in% names(voter.file))) {
stop("Voter data frame needs to have a column named 'surname'.")
}
} else if (namesToUse == "surname, first") {
if (!("surname" %in% names(voter.file)) || !("first" %in% names(voter.file))) {
stop("Voter data frame needs to have a column named 'surname' and a column called 'first'.")
}
} else if (namesToUse == "surname, first, middle") {
if (!("surname" %in% names(voter.file)) || !("first" %in% names(voter.file)) ||
!("middle" %in% names(voter.file))) {
stop("Voter data frame needs to have a column named 'surname', a column called 'first', and a column called 'middle'.")
}
}
wru_data_preflight()
path <- ifelse(getOption("wru_data_wd", default = FALSE), getwd(), tempdir())
first_c <- readRDS(paste0(path, "/wru-data-first_c.rds"))
mid_c <- readRDS(paste0(path, "/wru-data-mid_c.rds"))
if(census.surname){
last_c <- readRDS(paste0(path, "/wru-data-census_last_c.rds"))
} else {
last_c <- readRDS(paste0(path, "/wru-data-last_c.rds"))
}
p_eth <- c("c_whi", "c_bla", "c_his", "c_asi", "c_oth")
if (is.null(table.surnames)) {
lastNameDict <- last_c
} else {
lastNameDict <- table.surnames
names(lastNameDict) <- names(last_c)
lastNameDict[is.na(lastNameDict)] <- 0
}
if (is.null(table.first)) {
firstNameDict <- first_c
} else {
firstNameDict <- table.first
firstNameDict[is.na(firstNameDict)] <- 0
names(firstNameDict) <- names(first_c)
}
if (is.null(table.middle)) {
middleNameDict <- mid_c
} else {
middleNameDict <- table.middle
middleNameDict[is.na(middleNameDict)] <- 0
names(middleNameDict) <- names(mid_c)
}
nameDict <- list(
"first" = firstNameDict,
"middle" = middleNameDict,
"last" = lastNameDict
)
## Convert names in voter file to upper case
df <- voter.file
df$lastname.match <- df$lastname.upper <- toupper(as.character(df$surname))
if (grepl("first", namesToUse)) {
df$firstname.match <- df$firstname.upper <- toupper(as.character(df$first))
}
if (grepl("middle", namesToUse)) {
df$middlename.match <- df$middlename.upper <- toupper(as.character(df$middle))
df$middlename.match[is.na(df$middlename.match)] <- ""
}
## Merge Surnames with Census List (No Cleaning Yet)
df <- merge(df, lastNameDict, by.x = "lastname.match", by.y = "last_name", all.x = TRUE, sort = FALSE)
if (grepl("first", namesToUse)) {
df <- merge(df, firstNameDict, by.x = "firstname.match", by.y = "first_name", all.x = TRUE, sort = FALSE)
}
if (grepl("middle", namesToUse)) {
df <- merge(df, middleNameDict, by.x = "middlename.match", by.y = "middle_name", all.x = TRUE, sort = FALSE)
}
if (namesToUse == "surname" && sum(!(df$lastname.upper %in% lastNameDict$last_name)) == 0) {
return(df[, c(names(voter.file), "lastname.match", paste0(p_eth, "_last"))])
}
if (namesToUse == "surname, first" && sum(!(df$lastname.match %in% lastNameDict$last_name)) == 0 &&
sum(!(df$firstname.upper %in% firstNameDict$first_name)) == 0) {
return(df[, c(names(voter.file), "lastname.match", "firstname.match", paste0(p_eth, "_last"), paste0(p_eth, "_first"))])
}
if (namesToUse == "surname, first, middle" && sum(!(df$lastname.match %in% lastNameDict$last_name)) == 0 &&
sum(!(df$firstname.upper %in% firstNameDict$first_name)) == 0 && sum(!(df$middlename.upper %in% middleNameDict$middle_name)) == 0) {
return(df[, c(names(voter.file), "lastname.match", "firstname.match", "middlename.match", paste0(p_eth, "_last"), paste0(p_eth, "_first"), paste0(p_eth, "_middle"))])
}
## Clean names (if specified by user)
if (clean.names) {
for (nameType in strsplit(namesToUse, ", ")[[1]]) {
if(nameType=="surname"){
nameType <- "last"
}
df1 <- df[!is.na(df[, paste("c_whi_", nameType, sep = "")]), ] # Matched names
df2 <- df[is.na(df[, paste("c_whi_", nameType, sep = "")]), ] # Unmatched names
## Remove All Punctuation and Try Merge Again
if (nrow(df2) > 0) {
df2[, paste(nameType, "name.match", sep = "")] <- gsub("[^[:alnum:] ]", "", df2[, paste(nameType, "name.upper", sep = "")])
df2 <- merge(df2[, !grepl(paste("_", nameType, sep = ""), names(df2))], nameDict[[nameType]],
all.x = TRUE,
by.x = paste(nameType, "name.match", sep = ""), by.y = paste(nameType, "name", sep = "_"),
sort = FALSE
)
df2 <- df2[, names(df1)] # reorder the columns
if (sum(!is.na(df2[, paste("c_whi_", nameType, sep = ""), ])) > 0) {
df1 <- rbind(df1, df2[!is.na(df2[, paste("c_whi_", nameType, sep = ""), ]), ])
df2 <- df2[is.na(df2[, paste("c_whi_", nameType, sep = "")]), ]
}
}
## Remove All Spaces and Try Merge Again
if (nrow(df2) > 0) {
df2[, paste(nameType, "name.match", sep = "")] <- gsub(" ", "", df2[, paste(nameType, "name.match", sep = "")])
df2 <- merge(df2[, !grepl(paste("_", nameType, sep = ""), names(df2))], nameDict[[nameType]],
all.x = TRUE,
by.x = paste(nameType, "name.match", sep = ""), by.y = paste(nameType, "name", sep = "_"),
sort = FALSE
)
df2 <- df2[, names(df1)] # reorder the columns
if (sum(!is.na(df2[, paste("c_whi_", nameType, sep = ""), ])) > 0) {
df1 <- rbind(df1, df2[!is.na(df2[, paste("c_whi_", nameType, sep = ""), ]), ])
df2 <- df2[is.na(df2[, paste("c_whi_", nameType, sep = "")]), ]
}
}
# Edits specific to common issues with last names
if (nameType == "last" & nrow(df2) > 0) {
## Remove Jr/Sr/III Suffixes for last names
suffix <- c("JUNIOR", "SENIOR", "THIRD", "III", "JR", " II", " J R", " S R", " IV")
for (i in 1:length(suffix)) {
df2$lastname.match <- ifelse(substr(df2$lastname.match, nchar(df2$lastname.match) - (nchar(suffix)[i] - 1), nchar(df2$lastname.match)) == suffix[i],
substr(df2$lastname.match, 1, nchar(df2$lastname.match) - nchar(suffix)[i]),
df2$lastname.match
)
}
df2$lastname.match <- ifelse(nchar(df2$lastname.match) >= 7,
ifelse(substr(df2$lastname.match, nchar(df2$lastname.match) - 1, nchar(df2$lastname.match)) == "SR",
substr(df2$lastname.match, 1, nchar(df2$lastname.match) - 2),
df2$lastname.match
),
df2$lastname.match
) # Remove "SR" only if name has at least 7 characters
df2 <- merge(
df2[, !grepl(paste("_", nameType, sep = ""), names(df2))],
lastNameDict, by.x = "lastname.match", by.y = "last_name",
all.x = TRUE, sort = FALSE)
df2 <- df2[, names(df1)] # reorder the columns
if (sum(!is.na(df2[, paste("c_whi_", nameType, sep = ""), ])) > 0) {
df1 <- rbind(df1, df2[!is.na(df2[, paste("c_whi_", nameType, sep = ""), ]), ])
df2 <- df2[is.na(df2[, paste("c_whi_", nameType, sep = "")]), ]
}
}
## Names with Hyphens or Spaces, e.g. Double-Barreled Names
if (nrow(df2) > 0) {
df2$name2 <- df2$name1 <- NA
df2$name1[grep("-", df2[, paste(nameType, "name.upper", sep = "")])] <- sapply(strsplit(grep("-", df2[, paste(nameType, "name.upper", sep = "")], value = T), "-"), "[", 1)
df2$name2[grep("-", df2[, paste(nameType, "name.upper", sep = "")])] <- sapply(strsplit(grep("-", df2[, paste(nameType, "name.upper", sep = "")], value = T), "-"), "[", 2)
df2$name1[grep(" ", df2[, paste(nameType, "name.upper", sep = "")])] <- sapply(strsplit(grep(" ", df2[, paste(nameType, "name.upper", sep = "")], value = T), " "), "[", 1)
df2$name2[grep(" ", df2[, paste(nameType, "name.upper", sep = "")])] <- sapply(strsplit(grep(" ", df2[, paste(nameType, "name.upper", sep = "")], value = T), " "), "[", 2)
## Use first half of name to merge in priors
df2[, paste(nameType, "name.match", sep = "")] <- as.character(df2$name1)
df2 <- merge(df2[, !grepl(paste("_", nameType, sep = ""), names(df2))], nameDict[[nameType]],
all.x = TRUE,
by.x = paste(nameType, "name.match", sep = ""), by.y = paste(nameType, "name", sep = "_"),
sort = FALSE
)
df2 <- df2[, c(names(df1), "name1", "name2")] # reorder the columns
if (sum(!is.na(df2[, paste("c_whi_", nameType, sep = ""), ])) > 0) {
df1 <- rbind(df1, df2[!is.na(df2[, paste("c_whi_", nameType, sep = "")]), !(names(df2) %in% c("name1", "name2"))])
df2 <- df2[is.na(df2[, paste("c_whi_", nameType, sep = "")]), ]
}
}
## Use second half of name to merge in priors for rest
if (nrow(df2) > 0) {
df2[, paste(nameType, "name.match", sep = "")] <- as.character(df2$name2)
df2 <- merge(df2[, !grepl(paste("_", nameType, sep = ""), names(df2))], nameDict[[nameType]],
all.x = TRUE,
by.x = paste(nameType, "name.match", sep = ""), by.y = paste(nameType, "name", sep = "_"),
sort = FALSE
)
df2 <- df2[, c(names(df1), "name1", "name2")] # reorder the columns
if (sum(!is.na(df2[, paste("c_whi_", nameType, sep = ""), ])) > 0) {
df1 <- rbind(df1, df2[!is.na(df2[, paste("c_whi_", nameType, sep = "")]), !(names(df2) %in% c("name1", "name2"))])
df2 <- df2[is.na(df2[, paste("c_whi_", nameType, sep = "")]), ]
}
}
if (nrow(df2) > 0) {
df <- rbind(df1, df2[, !(names(df2) %in% c("name1", "name2"))])
} else {
df <- df1
}
}
}
## For unmatched names, just fill with an column mean if impute is true, or with constant if false
c_miss_last <- mean(is.na(df$c_whi_last))
if (c_miss_last > 0) {
message(paste(paste(sum(is.na(df$c_whi_last)), " (", round(100 * mean(is.na(df$c_whi_last)), 1), "%) individuals' last names were not matched.", sep = "")))
}
if (grepl("first", namesToUse)) {
c_miss_first <- mean(is.na(df$c_whi_first))
if (c_miss_first > 0) {
message(paste(paste(sum(is.na(df$c_whi_first)), " (", round(100 * mean(is.na(df$c_whi_first)), 1), "%) individuals' first names were not matched.", sep = "")))
}
}
if (grepl("middle", namesToUse)) {
c_miss_mid <- mean(is.na(df$c_whi_middle))
if (c_miss_mid > 0) {
message(paste(paste(sum(is.na(df$c_whi_middle)), " (", round(100 * mean(is.na(df$c_whi_middle)), 1), "%) individuals' middle names were not matched.", sep = "")))
}
}
if (impute.missing) {
impute.vec <- colMeans(df[, grep("c_", names(df), value = TRUE)], na.rm = TRUE)
for (i in grep("c_", names(df), value = TRUE)) {
df[, i] <- dplyr::coalesce(df[, i], impute.vec[i])
}
} else {
for (i in grep("c_", names(df), value = TRUE)) {
df[, i] <- dplyr::coalesce(df[, i], 1)
}
}
# return the data
if (namesToUse == "surname") {
return(df[, c(names(voter.file), "lastname.match", paste(p_eth, "last", sep = "_"))])
} else if (namesToUse == "surname, first") {
return(df[, c(
names(voter.file), "lastname.match", "firstname.match",
paste(p_eth, "last", sep = "_"), paste(p_eth, "first", sep = "_")
)])
} else if (namesToUse == "surname, first, middle") {
return(df[, c(
names(voter.file), "lastname.match", "firstname.match", "middlename.match",
paste(p_eth, "last", sep = "_"), paste(p_eth, "first", sep = "_"), paste(p_eth, "middle", sep = "_")
)])
}
}
#' Preflight for name data
#'
#' Checks if namedata is available in the current working directory, if not
#' downloads it from github using piggyback. By default, wru will download the
#' data to a temporary directory that lasts as long as your session does.
#' However, you may wish to set the \code{wru_data_wd} option to save the
#' downloaded data to your current working directory for more permanence.
#'
#' @importFrom piggyback pb_download
wru_data_preflight <- function() {
dest <- ifelse(getOption("wru_data_wd", default = FALSE), getwd(), tempdir())
tryCatch(
# Oddity of conditions for .token. Ignores token if is ""
piggyback::pb_download(repo = "kosukeimai/wru", dest = dest, .token = "", tag = "v2.0.0"),
error = function(e) message("There was an error retrieving data: ", e$message)
)
}
| /scratch/gouwar.j/cran-all/cranData/wru/R/merge_names.R |
#' Surname probability merging function.
#'
#' \code{merge_surnames} merges surnames in user-input dataset with corresponding
#' race/ethnicity probabilities from U.S. Census Surname List and Spanish Surname List.
#'
#' This function allows users to match surnames in their dataset with the U.S.
#' Census Surname List (from 2000 or 2010) and Spanish Surname List to obtain
#' Pr(Race | Surname) for each of the five major racial groups.
#'
#' By default, the function matches surnames to the Census list as follows:
#' 1) Search raw surnames in Census surname list;
#' 2) Remove any punctuation and search again;
#' 3) Remove any spaces and search again;
#' 4) Remove suffixes (e.g., Jr) and search again;
#' 5) Split double-barreled surnames into two parts and search first part of name;
#' 6) Split double-barreled surnames into two parts and search second part of name;
#' 7) For any remaining names, impute probabilities using distribution
#' for all names not appearing on Census list.
#'
#' Each step only applies to surnames not matched in a previous ste.
#' Steps 2 through 7 are not applied if \code{clean.surname} is FALSE.
#'
#' Note: Any name appearing only on the Spanish Surname List is assigned a
#' probability of 1 for Hispanics/Latinos and 0 for all other racial groups.
#'
#' @param voter.file An object of class \code{data.frame}. Must contain a field
#' named 'surname' containing list of surnames to be merged with Census lists.
#' @param surname.year An object of class \code{numeric} indicating which year
#' Census Surname List is from. Accepted values are \code{2010} and \code{2000}.
#' Default is \code{2020}.
#' @param name.data An object of class \code{data.frame}. Must contain a leading
#' column of surnames, and 5 subsequent columns, with Pr(Race | Surname) for each
#' of the five major racial categories.
#' @param clean.surname A \code{TRUE}/\code{FALSE} object. If \code{TRUE},
#' any surnames in \code{\var{voter.file}} that cannot initially be matched
#' to surname lists will be cleaned, according to U.S. Census specifications,
#' in order to increase the chance of finding a match. Default is \code{TRUE}.
#' @param impute.missing A \code{TRUE}/\code{FALSE} object. If \code{TRUE},
#' race/ethnicity probabilities will be imputed for unmatched names using
#' race/ethnicity distribution for all other names (i.e., not on Census List).
#' Default is \code{TRUE}.
#' @return Output will be an object of class \code{data.frame}. It will
#' consist of the original user-input data with additional columns that
#' specify the part of the name matched with Census data (\code{\var{surname.match}}),
#' and the probabilities Pr(Race | Surname) for each racial group
#' (\code{\var{p_whi}} for White, \code{\var{p_bla}} for Black,
#' \code{\var{p_his}} for Hispanic/Latino,
#' \code{\var{p_asi}} for Asian and Pacific Islander, and
#' \code{\var{p_oth}} for Other/Mixed).
#'#'
#' @examples
#' data(voters)
#' \dontrun{try(merge_surnames(voters))}
#'
#' @keywords internal
merge_surnames <- function(voter.file, surname.year = 2020, name.data, clean.surname = TRUE, impute.missing = TRUE) {
if ("surname" %in% names(voter.file) == FALSE) {
stop('Data does not contain surname field.')
}
## Census Surname List
if (surname.year == 2000) {
surnames2000$surname <- as.character(surnames2000$surname)
surnames <- surnames2000
} else if (surname.year == 2010) {
surnames2010$surname <- as.character(surnames2010$surname)
surnames <- surnames2010
} else {
surnames <- name.data
colnames(surnames) <- colnames(surnames2010)
surnames$surname <- as.character(surnames$surname)
}
p_eth <- c("p_whi", "p_bla", "p_his", "p_asi", "p_oth")
## Convert Surnames in Voter File to Upper Case
df <- voter.file
df$surname.match <- df$surname.upper <- toupper(as.character(df$surname))
## Merge Surnames with Census List (No Cleaning Yet)
df <- merge(df[names(df) %in% p_eth == FALSE], surnames[c("surname", p_eth)], by.x = "surname.match", by.y = "surname", all.x = TRUE)
if (nrow(df[df$surname.upper %in% surnames$surname == F, ]) == 0) {
return(df[, c(names(voter.file), "surname.match", p_eth)])
}
df[df$surname.upper %in% surnames$surname == F, ]$surname.match <- ""
df1 <- df[df$surname.upper %in% surnames$surname, ] #Matched surnames
df2 <- df[df$surname.upper %in% surnames$surname == F, ] #Unmatched surnames
## Clean Surnames (if Specified by User)
if (clean.surname) {
## Remove All Punctuation and Try Merge Again
df2$surname.match <- gsub("[^[:alnum:] ]", "", df2$surname.upper)
df2 <- merge(df2[names(df2) %in% p_eth == FALSE], surnames[c("surname", p_eth)], by.x = "surname.match", by.y = "surname", all.x = TRUE)
if (nrow(df2[df2$surname.match %in% surnames$surname, ]) > 0) {
df1 <- rbind(df1, df2[df2$surname.match %in% surnames$surname, ])
df2 <- df2[df2$surname.match %in% surnames$surname == F, ]
if (nrow(df2[df2$surname.match %in% surnames$surname, ]) > 0) {df2$surname.match <- ""}
}
## Remove All Spaces and Try Merge Again
df2$surname.match <- gsub(" ", "", df2$surname.match)
df2 <- merge(df2[names(df2) %in% p_eth == FALSE], surnames[c("surname", p_eth)], by.x = "surname.match", by.y = "surname", all.x = TRUE)
if (nrow(df2[df2$surname.match %in% surnames$surname, ]) > 0) {
df1 <- rbind(df1, df2[df2$surname.match %in% surnames$surname, ])
df2 <- df2[df2$surname.match %in% surnames$surname == FALSE, ]
if (nrow(df2[df2$surname.match %in% surnames$surname, ]) > 0) {df2$surname.match <- ""}
}
## Remove Jr/Sr/III Suffixes
suffix <- c("JUNIOR", "SENIOR", "THIRD", "III", "JR", " II", " J R", " S R", " IV")
for (i in 1:length(suffix)) {
df2$surname.match <- ifelse(substr(df2$surname.match, nchar(df2$surname.match) - (nchar(suffix)[i] - 1), nchar(df2$surname.match)) == suffix[i],
substr(df2$surname.match, 1, nchar(df2$surname.match) - nchar(suffix)[i]),
df2$surname.match)
}
df2$surname.match <- ifelse(nchar(df2$surname.match) >= 7,
ifelse(substr(df2$surname.match, nchar(df2$surname.match) - 1, nchar(df2$surname.match)) == "SR",
substr(df2$surname.match, 1, nchar(df2$surname.match) - 2),
df2$surname.match),
df2$surname.match) #Remove "SR" only if name has at least 7 characters
df2 <- merge(df2[names(df2) %in% p_eth == FALSE], surnames[c("surname", p_eth)], by.x = "surname.match", by.y = "surname", all.x = TRUE)
if (nrow(df2[df2$surname.match %in% surnames$surname, ]) > 0) {
df1 <- rbind(df1, df2[df2$surname.match %in% surnames$surname, ])
df2 <- df2[df2$surname.match %in% surnames$surname == FALSE, ]
if (nrow(df2[df2$surname.match %in% surnames$surname, ]) > 0) {df2$surname.match <- ""}
}
## Names with Hyphens or Spaces, e.g. Double-Barreled Names
df2$surname2 <- df2$surname1 <- NA
df2$surname1[grep("-", df2$surname.upper)] <- sapply(strsplit(grep("-", df2$surname.upper, value = TRUE), "-"), "[", 1)
df2$surname2[grep("-", df2$surname.upper)] <- sapply(strsplit(grep("-", df2$surname.upper, value = TRUE), "-"), "[", 2)
df2$surname1[grep(" ", df2$surname.upper)] <- sapply(strsplit(grep(" ", df2$surname.upper, value = TRUE), " "), "[", 1)
df2$surname2[grep(" ", df2$surname.upper)] <- sapply(strsplit(grep(" ", df2$surname.upper, value = TRUE), " "), "[", 2)
## Use first half of name to merge in priors
df2$surname.match <- as.character(df2$surname1)
df2 <- merge(df2[names(df2) %in% c(p_eth) == F], surnames[c("surname", p_eth)], by.x = "surname.match", by.y = "surname", all.x = TRUE)[names(df2)]
if (nrow(df2[df2$surname.match %in% surnames$surname, ]) > 0) {
df1 <- rbind(df1, df2[df2$surname.match %in% surnames$surname, names(df2) %in% names(df1)])
df2 <- df2[df2$surname.match %in% surnames$surname == F, ]
if (nrow(df2[df2$surname.match %in% surnames$surname, ]) > 0) {df2$surname.match <- ""}
}
## Use second half of name to merge in priors for rest
df2$surname.match <- as.character(df2$surname2)
df2 <- merge(df2[names(df2) %in% c(p_eth, "surname1", "surname2") == F], surnames[c("surname", p_eth)], by.x = "surname.match", by.y = "surname", all.x = TRUE)[names(df2) %in% c("surname1", "surname2") == F]
if (nrow(df2[df2$surname.match %in% surnames$surname, ]) > 0) {
df1 <- rbind(df1, df2[df2$surname.match %in% surnames$surname, names(df2) %in% names(df1)])
df2 <- df2[df2$surname.match %in% surnames$surname == F, ]
if (nrow(df2[df2$surname.match %in% surnames$surname, ]) > 0) {df2$surname.match <- ""}
}
}
## Impute priors for names not on Census lists
if (impute.missing) {
if (nrow(df2) > 0) {
df2$surname.match <- ""
df2$p_whi <- .6665; df2$p_bla <- .0853; df2$p_his <- .1367; df2$p_asi <- .0797; df2$p_oth <- .0318
message(paste("Probabilities were imputed for", nrow(df2), ifelse(nrow(df2) == 1, "surname", "surnames"), "that could not be matched to Census list."))
}
} else message(paste(nrow(df2), ifelse(nrow(df2) == 1, "surname was", "surnames were"), "not matched."))
df <- rbind(df1, df2)
return(df[, c(names(voter.file), "surname.match", p_eth)])
}
| /scratch/gouwar.j/cran-all/cranData/wru/R/merge_surnames.R |
#' Pre-process vector of names to match census style. Internal function
#'
#' @param voter_names Character vector to be pre-processed.
#' @param target_names Character vector of census names to be matched.
#'
#' @return A character vector of pre-processed named
#' @keywords internal
#'
.name_preproc <- function(voter_names, target_names){
post_names <- as.character(voter_names)
post_names <- toupper(post_names)
## 1) Raw match
match_tmp <- post_names %in% target_names
## 2) remove punctuation
post_names[!match_tmp] <- gsub("[^[:alnum:] ]", "",
post_names[!match_tmp])
match_tmp <- post_names %in% target_names
## 3) Remove spaces
post_names[!match_tmp] <- gsub(" ", "",
post_names[!match_tmp])
match_tmp <- post_names %in% target_names
## 4) Remove suffixes
suffix <- c("JUNIOR", "SENIOR", "THIRD", "III", "JR", " II", " J R", " S R", " IV")
for (i in 1:length(suffix)) {
post_names[!match_tmp] <- ifelse(substr(post_names[!match_tmp],
nchar(post_names[!match_tmp]) - (nchar(suffix)[i] - 1),
nchar(post_names[!match_tmp])) == suffix[i],
substr(post_names[!match_tmp], 1, nchar(post_names[!match_tmp]) - nchar(suffix)[i]),
post_names[!match_tmp])
}
post_names[!match_tmp] <- ifelse(nchar(post_names[!match_tmp]) >= 7,
ifelse(substr(post_names[!match_tmp],
nchar(post_names[!match_tmp]) - 1,
nchar(post_names[!match_tmp])) == "SR",
substr(post_names[!match_tmp], 1, nchar(post_names[!match_tmp]) - 2),
post_names[!match_tmp]),
post_names[!match_tmp]) #Remove "SR" only if name has at least 7 characters
match_tmp <- post_names %in% target_names
## 5) Split double-barreled names, match on first part
post_names_tmp <- post_names
post_names_tmp[(!match_tmp) & grep("[-, ]", post_names[(!match_tmp)])] <- sapply(strsplit(grep("[-, ]",
post_names[(!match_tmp)],
value = TRUE),
"[-, ]"),
"[", 1)
match_tmp_2 <- post_names_tmp %in% target_names
post_names[(match_tmp_2 == TRUE) & (match_tmp == FALSE)] <- post_names_tmp[(match_tmp_2 == TRUE) & (match_tmp == FALSE)]
match_tmp <- post_names %in% target_names
## 6) Split double-barreled names, match on second part
post_names_tmp <- post_names
post_names_tmp[(!match_tmp) & grep("[-, ]", post_names[(!match_tmp)])] <- sapply(strsplit(grep("[-, ]",
post_names[(!match_tmp)],
value = TRUE),
"[-, ]"),
"[", 2)
match_tmp_2 <- post_names_tmp %in% target_names
post_names[(match_tmp_2 == TRUE) & (match_tmp == FALSE)] <- post_names_tmp[(match_tmp_2 == TRUE) & (match_tmp == FALSE)]
return(unlist(post_names))
} | /scratch/gouwar.j/cran-all/cranData/wru/R/name_preproc.R |
#' Race prediction function.
#'
#' \code{predict_race} makes probabilistic estimates of individual-level race/ethnicity.
#'
#' This function implements the Bayesian race prediction methods outlined in
#' Imai and Khanna (2015). The function produces probabilistic estimates of
#' individual-level race/ethnicity, based on surname, geolocation, and party.
#' @param voter.file An object of class \code{data.frame}.
#' Must contain a row for each individual being predicted,
#' as well as a field named \code{\var{surname}} containing each individual's surname.
#' If using geolocation in predictions, \code{\var{voter.file}} must contain a field named
#' \code{\var{state}}, which contains the two-character abbreviation for each individual's
#' state of residence (e.g., \code{"nj"} for New Jersey).
#' If using Census geographic data in race/ethnicity predictions,
#' \code{\var{voter.file}} must also contain at least one of the following fields:
#' \code{\var{county}}, \code{\var{tract}}, \code{\var{block_group}}, \code{\var{block}},
#' and/or \code{\var{place}}.
#' These fields should contain character strings matching U.S. Census categories.
#' County is three characters (e.g., \code{"031"} not \code{"31"}),
#' tract is six characters, block group is usually a single character and block
#' is four characters. Place is five characters.
#' See below for other optional fields.
#' @param census.surname A \code{TRUE}/\code{FALSE} object. If \code{TRUE},
#' function will call \code{merge_surnames} to merge in Pr(Race | Surname)
#' from U.S. Census Surname List (2000, 2010, or 2020) and Spanish Surname List.
#' If \code{FALSE}, user must provide a \code{name.dictionary} (see below).
#' Default is \code{TRUE}.
#' @param surname.only A \code{TRUE}/\code{FALSE} object. If \code{TRUE}, race predictions will
#' only use surname data and calculate Pr(Race | Surname). Default is \code{FALSE}.
#' @param census.geo An optional character vector specifying what level of
#' geography to use to merge in U.S. Census geographic data. Currently
#' \code{"county"}, \code{"tract"}, \code{"block_group"}, \code{"block"}, and \code{"place"}
#' are supported.
#' Note: sufficient information must be in user-defined \code{\var{voter.file}} object.
#' If \code{\var{census.geo} = "county"}, then \code{\var{voter.file}}
#' must have column named \code{county}.
#' If \code{\var{census.geo} = "tract"}, then \code{\var{voter.file}}
#' must have columns named \code{county} and \code{tract}.
#' And if \code{\var{census.geo} = "block"}, then \code{\var{voter.file}}
#' must have columns named \code{county}, \code{tract}, and \code{block}.
#' If \code{\var{census.geo} = "place"}, then \code{\var{voter.file}}
#' must have column named \code{place}.
#' If `census.geo = "zcta"`, then `voter.file` must have column named `zcta`.
#' Specifying \code{\var{census.geo}} will call \code{census_helper} function
#' to merge Census geographic data at specified level of geography.
#'
#' @param census.key A character object specifying user's Census API key.
#' Required if `census.geo` is specified, because a valid Census API key is
#' required to download Census geographic data.
#'
#' If [`NULL`], the default, attempts to find a census key stored in an
#' [environment variable][Sys.getenv] named `CENSUS_API_KEY`.
#'
#' @param census.data A list indexed by two-letter state abbreviations,
#' which contains pre-saved Census geographic data.
#' Can be generated using \code{get_census_data} function.
#' @param age An optional \code{TRUE}/\code{FALSE} object specifying whether to
#' condition race predictions on age (in addition to surname and geolocation).
#' Default is \code{FALSE}. Must be same as \code{\var{age}} in \code{\var{census.data}} object.
#' May only be set to \code{TRUE} if \code{census.geo} option is specified.
#' If \code{TRUE}, \code{\var{voter.file}} should include a numerical variable \code{\var{age}}.
#' @param sex optional \code{TRUE}/\code{FALSE} object specifying whether to
#' condition race predictions on sex (in addition to surname and geolocation).
#' Default is \code{FALSE}. Must be same as \code{\var{sex}} in \code{\var{census.data}} object.
#' May only be set to \code{TRUE} if \code{census.geo} option is specified.
#' If \code{TRUE}, \code{\var{voter.file}} should include a numerical variable \code{\var{sex}},
#' where \code{\var{sex}} is coded as 0 for males and 1 for females.
#' @param year An optional character vector specifying the year of U.S. Census geographic
#' data to be downloaded. Use \code{"2010"}, or \code{"2020"}. Default is \code{"2020"}.
#' @param party An optional character object specifying party registration field
#' in \code{\var{voter.file}}, e.g., \code{\var{party} = "PartyReg"}.
#' If specified, race/ethnicity predictions will be conditioned
#' on individual's party registration (in addition to geolocation).
#' Whatever the name of the party registration field in \code{\var{voter.file}},
#' it should be coded as 1 for Democrat, 2 for Republican, and 0 for Other.
#' @param retry The number of retries at the census website if network interruption occurs.
#' @param impute.missing Logical, defaults to TRUE. Should missing be imputed?
#' @param skip_bad_geos Logical. Option to have the function skip any geolocations that are not present
#' in the census data, returning a partial data set. Default is set to \code{FALSE}, in which case it
#' will break and provide error message with a list of offending geolocations.
#' @param use.counties A logical, defaulting to FALSE. Should census data be filtered by counties
#' available in \var{census.data}?
#' @param model Character string, either "BISG" (default) or "fBISG" (for error-correction,
#' fully-Bayesian model).
#' @param name.dictionaries Optional named list of \code{data.frame}'s
#' containing counts of names by race. Any of the following named elements
#' are allowed: "surname", "first", "middle". When present, the objects must
#' follow the same structure as \code{last_c}, \code{first_c},
#'\code{mid_c}, respectively.
#' @param names.to.use One of 'surname', 'surname, first', or 'surname, first,
#' middle'. Defaults to 'surname'.
#' @param race.init Vector of initial race for each observation in voter.file.
#' Must be an integer vector, with 1=white, 2=black, 3=hispanic, 4=asian, and
#' 5=other. Defaults to values obtained using \code{model="BISG_surname"}.
#' @param control List of control arguments only used when \code{model="fBISG"}, including
#' \describe{
#' \item{iter}{Number of MCMC iterations. Defaults to 1000.}
#' \item{burnin}{Number of iterations discarded as burnin. Defaults to half of \code{iter}.}
#' \item{verbose}{Print progress information. Defaults to \code{TRUE}.}
#' \item{me.correct}{Boolean. Should the model correct measurement error for \code{races|geo}? Defaults to \code{TRUE}.}
#' \item{seed}{RNG seed. If \code{NULL}, a seed is generated and returned as an attribute for reproducibility.}
#' }
#'
#' @return Output will be an object of class \code{data.frame}. It will
#' consist of the original user-input \code{voter.file} with additional columns with
#' predicted probabilities for each of the five major racial categories:
#' \code{\var{pred.whi}} for White,
#' \code{\var{pred.bla}} for Black,
#' \code{\var{pred.his}} for Hispanic/Latino,
#' \code{\var{pred.asi}} for Asian/Pacific Islander, and
#' \code{\var{pred.oth}} for Other/Mixed.
#'
#' @examples
#' \donttest{
#' #' data(voters)
#' try(predict_race(voter.file = voters, surname.only = TRUE))
#' \dontrun{
#' try(predict_race(voter.file = voters, census.geo = "tract"))
#' }
#' \dontrun{
#' try(predict_race(
#' voter.file = voters, census.geo = "place", year = "2020"))
#' }
#' \dontrun{
#' CensusObj <- try(get_census_data(state = c("NY", "DC", "NJ")))
#' try(predict_race(
#' voter.file = voters, census.geo = "tract", census.data = CensusObj, party = "PID")
#' )
#' }
#' \dontrun{
#' CensusObj2 <- try(get_census_data(state = c("NY", "DC", "NJ"), age = T, sex = T))
#' try(predict_race(
#' voter.file = voters, census.geo = "tract", census.data = CensusObj2, age = T, sex = T))
#' }
#' \dontrun{
#' CensusObj3 <- try(get_census_data(state = c("NY", "DC", "NJ"), census.geo = "place"))
#' try(predict_race(voter.file = voters, census.geo = "place", census.data = CensusObj3))
#' }
#' }
#' @export
predict_race <- function(
voter.file,
census.surname = TRUE,
surname.only = FALSE,
census.geo = c("tract", "block", "block_group", "county", "place", "zcta"),
census.key = Sys.getenv("CENSUS_API_KEY"),
census.data = NULL,
age = FALSE,
sex = FALSE,
year = "2020",
party = NULL,
retry = 3,
impute.missing = TRUE,
skip_bad_geos = FALSE,
use.counties = FALSE,
model = "BISG",
race.init = NULL,
name.dictionaries = NULL,
names.to.use = "surname",
control = NULL
) {
message("Predicting race for ", year)
## Check model type
if (!(model %in% c("BISG", "fBISG"))) {
stop(
paste0(
"'model' must be one of 'BISG' (for standard BISG results, or results",
" with all name data without error correction) or 'fBISG' (for the",
" fully Bayesian/error correction model that accommodates all name data)."
)
)
}
if (any(unique(voter.file$state) %in% c("AS","GU","MP","PR","VI"))) {
stop(
paste0(
"The wru package does not support US territories",
" please filter these from your voter.file data")
)
}
census.geo <- tolower(census.geo)
census.geo <- rlang::arg_match(census.geo)
# block_group is missing, pull from block
if((surname.only == FALSE) && !(missing(census.geo)) && (census.geo == "block_group") && !("block_group" %in% names(voter.file))) {
voter.file$block_group <- substring(voter.file$block, 1, 1)
}
# Adjust voter.file with caseid for ordering at the end
voter.file$caseid <- 1:nrow(voter.file)
if (surname.only == FALSE && is.null(census.data)) {
# Otherwise predict_race_new and predict_race_me will both
# attempt to pull census_data
census.key <- validate_key(census.key)
voter.file$state <- toupper(voter.file$state)
states <- unique(voter.file$state)
county.list <- split(voter.file$county, voter.file$state)
county.list <- lapply(county.list, function(x) unique(x))
census.data <- get_census_data(
census.key, states, age,
sex, year, census.geo,
retry, county.list
)
}
if((model == "BISG") | (surname.only==TRUE)){
if((surname.only==TRUE) & (model == "fBISG")){
warning("Surname-only model only available with model = BISG.")
}
preds <- predict_race_new(voter.file = voter.file,
names.to.use = names.to.use,
year = year,
age = age, sex = sex, # not implemented, default to F
census.geo = census.geo,
census.key = census.key,
name.dictionaries = name.dictionaries,
surname.only=surname.only,
census.data = census.data,
retry = retry,
impute.missing = impute.missing,
skip_bad_geos = skip_bad_geos,
census.surname = census.surname,
use.counties = use.counties)
} else {
ctrl <- list(
iter = 1000,
thin = 1,
verbose = TRUE,
seed = sample(1:1000, 1)
)
ctrl$burnin <- floor(ctrl$iter / 2)
ctrl[names(control)] <- control
ctrl$usr_seed <- ifelse(is.null(control$seed), FALSE, TRUE)
if (is.null(race.init)) {
if(ctrl$verbose){
message("Using `predict_race` to obtain initial race prediction priors with BISG model")
}
race.init <- predict_race(voter.file = voter.file,
names.to.use = names.to.use,
year = year,
age = age, sex = sex, # not implemented, default to F
census.geo = census.geo,
census.key = census.key,
name.dictionaries = name.dictionaries,
surname.only=surname.only,
census.data = census.data,
retry = retry,
impute.missing = TRUE,
skip_bad_geos = skip_bad_geos,
census.surname = census.surname,
use.counties = use.counties,
model = "BISG",
control = list(verbose=FALSE))
race.init <- max.col(
race.init[, paste0("pred.", c("whi", "bla", "his", "asi", "oth"))],
ties.method = "random"
)
}
if (any(is.na(race.init))) {
stop("Some initial race values are NA.\n
If you didn't provide initial values, check the results of calling predict_race() on the voter.file you want me to work on.\n
The most likely reason for getting a missing race prediction is having a geolocation that does not match \n
locations on the census. If this problem persists, try impute.missing = TRUE or model = fBISG.")
}
preds <- predict_race_me(voter.file = voter.file,
names.to.use = names.to.use,
year = year, age = age, sex = age,
census.geo = census.geo,
census.key = census.key,
name.dictionaries = name.dictionaries,
surname.only = surname.only,
census.data = census.data, retry = retry,
impute.missing = impute.missing,
census.surname = census.surname,
use.counties = use.counties, race.init = race.init,
ctrl = ctrl)
}
seed_attr <- attr(preds, "RNGseed")
preds <- preds[order(preds$caseid),setdiff(names(preds), "caseid")]
attr(preds, "RNGseed") <- seed_attr
preds
}
| /scratch/gouwar.j/cran-all/cranData/wru/R/predict_race.R |
#' Internal model fitting functions
#'
#' These functions are intended for internal use only. Users should use the
#' [predict_race()] interface rather any of these functions directly.
#'
#' These functions fit different versions of WRU. \code{.predict_race_old} fits
#' the original WRU model, also known as BISG with census-based surname dictionary.
#' \code{.predict_race_new} fits a new version of BISG which uses a new, augmented
#' surname dictionary, and can also accommodate the use of first and middle
#' name information. Finally, \code{.predict_race_me} fits a fully Bayesian Improved
#' Surname Geocoding model (fBISG), which fits a model with measurement-error
#' correction of erroneous zeros in census tables, in addition to also accommodating
#' the augmented surname dictionary, and the first and middle name
#' dictionaries when making predictions.
#'
#' @inheritParams predict_race
#' @param voter.file See documentation in \code{race_predict}.
#' @param census.surname See documentation in \code{race_predict}.
#' @param surname.only See documentation in \code{race_predict}.
#' @param surname.year See documentation in \code{race_predict}.
#' @param census.geo See documentation in \code{race_predict}.
#' @param census.data See documentation in \code{race_predict}.
#' @param age See documentation in \code{race_predict}.
#' @param sex See documentation in \code{race_predict}.
#' @param year See documentation in \code{race_predict}.
#' @param party See documentation in \code{race_predict}.
#' @param retry See documentation in \code{race_predict}.
#' @param impute.missing See documentation in \code{race_predict}.
#' @param skip_bad_geos See documentation in \code{race_predict}.
#' @param names.to.use See documentation in \code{race_predict}.
#' @param race.init See documentation in \code{race_predict}.
#' @param name.dictionaries See documentation in \code{race_predict}.
#' @param ctrl See `control` in documentation for [predict_race()].
#' @param use.counties A logical, defaulting to FALSE. Should census data be filtered by counties available in \var{census.data}?
#'
#' @inherit predict_race return
#'
#' @name modfuns
NULL
#' @section .predict_race_old:
#' Original WRU race prediction function, implementing classical BISG with census-based
#' surname dictionary.
#' @importFrom stats rmultinom
#' @importFrom utils txtProgressBar setTxtProgressBar
#' @rdname modfuns
#' @keywords internal
.predict_race_old <- function(
voter.file,
census.surname = TRUE,
surname.only = FALSE,
surname.year = 2020,
name.dictionaries = NULL,
census.geo,
census.key = Sys.getenv("CENSUS_API_KEY"),
census.data = NULL,
age = FALSE,
sex = FALSE,
year = "2020",
party,
retry = 3,
impute.missing = TRUE,
use.counties = FALSE
) {
# warning: 2020 census data only support prediction when both age and sex are equal to FALSE
if ((sex == TRUE || age == TRUE) && (year == "2020")) {
stop("Warning: only predictions with both age and sex equal to FALSE are supported when using 2020 census data.")
}
if (!missing(census.geo) && (census.geo == "precinct")) {
# geo <- "precinct"
stop("Error: census_helper function does not currently support merging precinct-level data.")
}
vars.orig <- names(voter.file)
if (surname.only == TRUE) {
message("Proceeding with surname-only predictions...")
if (!("surname" %in% names(voter.file))) {
stop("Voter data frame needs to have a column named surname")
}
} else {
if (missing(census.geo) || is.null(census.geo) || all(is.na(census.geo)) || census.geo %in% c("county", "tract", "block", "place") == FALSE) {
stop("census.geo must be either 'county', 'tract', 'block', or 'place'")
} else {
message(paste("Proceeding with Census geographic data at", census.geo, "level..."))
}
if (missing(census.data) || is.null(census.data) || all(is.na(census.data))) {
census.key <- validate_key(census.key)
message("Downloading Census geographic data using provided API key...")
} else {
if (!("state" %in% names(voter.file))) {
stop("voter.file object needs to have a column named state.")
}
if (sum(toupper(unique(as.character(voter.file$state))) %in% toupper(names(census.data)) == FALSE) > 0) {
message("census.data object does not include all states in voter.file object.")
census.key <- validate_key(census.key)
message("Downloading Census geographic data for states not included in census.data object...")
} else {
message("Using Census geographic data from provided census.data object...")
}
}
}
eth <- c("whi", "bla", "his", "asi", "oth")
## Merge in Pr(Race | Surname) if necessary
if (census.surname) {
if (!(surname.year %in% c(2000, 2010, 2020))) {
stop(paste(surname.year, "is not a valid surname.year. It should be 2000, 2010 or 2020 (default)."))
}
voter.file <- merge_surnames(voter.file, surname.year = surname.year, name.data = NULL, impute.missing = impute.missing)
} else {
# Check if voter.file has the necessary data
if (is.null(name.dictionaries) | !("surname" %in% names(name.dictionaries))) {
stop("User must provide a 'name.dictionaries', with named element 'surname'.")
}
for (k in 1:length(eth)) {
if ((paste("c", eth[k], sep = "_") %in% names(name.dictionaries[["surname"]])) == FALSE) {
stop(paste("name.dictionaries element 'surname' needs to have columns named ", paste(paste("c", eth, sep = "_"), collapse = " and "), ".", sep = ""))
}
}
name.dictionaries[["surname"]] <- apply(name.dictionaries[["surname"]], 1, function(x) x / sum(x, na.rm = TRUE))
name.dictionaries[["surname"]][is.na(name.dictionaries[["surname"]])] <- 0
voter.file <- merge_surnames(voter.file, surname.year = surname.year, name.data = name.dictionaries[["surname"]], impute.missing = impute.missing)
}
## Surname-Only Predictions
if (surname.only) {
for (k in 1:length(eth)) {
voter.file[paste("pred", eth[k], sep = ".")] <- voter.file[paste("p", eth[k], sep = "_")] / apply(voter.file[paste("p", eth, sep = "_")], 1, sum)
}
pred <- paste("pred", eth, sep = ".")
return(voter.file[c(vars.orig, pred)])
}
## Merge in Pr(Party | Race) if necessary
if (missing(party) == FALSE) {
voter.file$PID <- voter.file[, party]
voter.file <- merge(voter.file, get("pid")[names(get("pid")) %in% "party" == F], by = "PID", all.x = TRUE)
}
if (census.geo == "place") {
if (!("place" %in% names(voter.file))) {
stop("voter.file object needs to have a column named place.")
}
voter.file <- census_helper(
key = census.key,
voter.file = voter.file,
states = "all",
geo = "place",
age = age,
sex = sex,
year = year,
census.data = census.data,
retry = retry
)
}
if (census.geo == "block_group") {
if (!("block_group" %in% names(voter.file)) || !("county" %in% names(voter.file)) || !("tract" %in% names(voter.file))) {
stop("voter.file object needs to have columns named block, tract, and county.")
}
voter.file <- census_helper(
key = census.key,
voter.file = voter.file,
states = "all",
geo = "block_group",
age = age,
sex = sex,
year = year,
census.data = census.data,
retry = retry,
use.counties = use.counties
)
}
if (census.geo == "block") {
if (!("tract" %in% names(voter.file)) || !("county" %in% names(voter.file)) || !("block" %in% names(voter.file))) {
stop("voter.file object needs to have columns named block, tract, and county.")
}
voter.file <- census_helper(
key = census.key,
voter.file = voter.file,
states = "all",
geo = "block",
age = age,
sex = sex,
year = year,
census.data = census.data,
retry = retry,
use.counties = use.counties
)
}
if (census.geo == "precinct") {
geo <- "precinct"
stop("Error: census_helper function does not currently support precinct-level data.")
}
if (census.geo == "tract") {
if (!("tract" %in% names(voter.file)) || !("county" %in% names(voter.file))) {
stop("voter.file object needs to have columns named tract and county.")
}
voter.file <- census_helper(
key = census.key,
voter.file = voter.file,
states = "all",
geo = "tract",
age = age,
sex = sex,
year = year,
census.data = census.data,
retry = retry,
use.counties = use.counties
)
}
if (census.geo == "county") {
if (!("county" %in% names(voter.file))) {
stop("voter.file object needs to have a column named county.")
}
voter.file <- census_helper(
key = census.key,
voter.file = voter.file,
states = "all",
geo = "county",
age = age,
sex = sex,
year = year,
census.data = census.data,
retry = retry
)
}
## Pr(Race | Surname, Geolocation)
if (missing(party)) {
for (k in 1:length(eth)) {
voter.file[paste("u", eth[k], sep = "_")] <- voter.file[paste("p", eth[k], sep = "_")] * voter.file[paste("r", eth[k], sep = "_")]
}
voter.file$u_tot <- apply(voter.file[paste("u", eth, sep = "_")], 1, sum, na.rm = TRUE)
for (k in 1:length(eth)) {
voter.file[paste("q", eth[k], sep = "_")] <- voter.file[paste("u", eth[k], sep = "_")] / voter.file$u_tot
}
}
## Pr(Race | Surname, Geolocation, Party)
if (missing(party) == FALSE) {
for (k in 1:length(eth)) {
voter.file[paste("u", eth[k], sep = "_")] <- voter.file[paste("p", eth[k], sep = "_")] * voter.file[paste("r", eth[k], sep = "_")] * voter.file[paste("r_pid", eth[k], sep = "_")]
}
voter.file$u_tot <- apply(voter.file[paste("u", eth, sep = "_")], 1, sum, na.rm = TRUE)
for (k in 1:length(eth)) {
voter.file[paste("q", eth[k], sep = "_")] <- voter.file[paste("u", eth[k], sep = "_")] / voter.file$u_tot
}
}
for (k in 1:length(eth)) {
voter.file[paste("pred", eth[k], sep = ".")] <- voter.file[paste("q", eth[k], sep = "_")]
}
pred <- paste("pred", eth, sep = ".")
return(voter.file[c(vars.orig, pred)])
}
#' @section .predict_race_new :
#' New race prediction function, implementing classical BISG with augmented
#' surname dictionary, as well as first and middle name information.
#' @rdname modfuns
predict_race_new <- function(
voter.file,
names.to.use,
year = "2020",
age = FALSE,
sex = FALSE,
census.geo = c("tract", "block", "block_group", "county", "place", "zcta"),
census.key = Sys.getenv("CENSUS_API_KEY"),
name.dictionaries,
surname.only=FALSE,
census.data = NULL,
retry = 0,
impute.missing = TRUE,
skip_bad_geos = FALSE,
census.surname = FALSE,
use.counties = FALSE
) {
# Check years
if (!(year %in% c("2000", "2010", "2020"))){
stop("Year should be one of 2000, 2010, or 2020 (default).")
}
# Define 2020 race marginal
race.margin <- c(r_whi=0.5783619, r_bla=0.1205021, r_his=0.1872988,
r_asi=0.06106737, r_oth=0.05276981)
census.geo <- tolower(census.geo)
census.geo <- rlang::arg_match(census.geo)
vars.orig <- names(voter.file)
# check the names
if (names.to.use == "surname") {
message("Proceeding with last name predictions...")
if (!("surname" %in% names(voter.file))) {
stop("Voter data frame needs to have a column named 'surname'.")
}
} else if (names.to.use == "surname, first") {
message("Proceeding with first and last name-only predictions...")
if (!("surname" %in% names(voter.file)) || !("first" %in% names(voter.file))) {
stop("Voter data frame needs to have a column named 'surname' and a column called 'first'.")
}
} else if (names.to.use == "surname, first, middle") {
message("Proceeding with first, last, and middle name predictions...")
if (!("surname" %in% names(voter.file)) || !("first" %in% names(voter.file)) ||
!("middle" %in% names(voter.file))) {
stop("Voter data frame needs to have a column named 'surname', a column called 'first', and a column called 'middle'.")
}
}
## Preliminary Data quality checks
wru_data_preflight()
path <- ifelse(getOption("wru_data_wd", default = FALSE), getwd(), tempdir())
first_c <- readRDS(paste0(path, "/wru-data-first_c.rds"))
mid_c <- readRDS(paste0(path, "/wru-data-mid_c.rds"))
if(census.surname){
last_c <- readRDS(paste0(path, "/wru-data-census_last_c.rds"))
} else {
last_c <- readRDS(paste0(path, "/wru-data-last_c.rds"))
}
if (any(!is.null(name.dictionaries))) {
if (!is.null(name.dictionaries[["surname"]])) {
stopifnot(identical(names(name.dictionaries[["surname"]]), names(last_c)))
}
if (!is.null(name.dictionaries[["first"]])) {
stopifnot(identical(names(name.dictionaries[["first"]]), names(first_c)))
}
if (!is.null(name.dictionaries[["middle"]])) {
stopifnot(identical(names(name.dictionaries[["middle"]]), names(mid_c)))
}
}
# check the geographies
if (surname.only == FALSE) {
message("Proceeding with Census geographic data at ", census.geo, " level...")
if (is.null(census.data)) {
census.key <- validate_key(census.key)
message("Downloading Census geographic data using provided API key...")
} else {
if (!("state" %in% names(voter.file))) {
stop("voter.file object needs to have a column named state.")
}
census_data_preflight(census.data, census.geo, year)
if (sum(toupper(unique(as.character(voter.file$state))) %in% toupper(names(census.data)) == FALSE) > 0) {
message("census.data object does not include all states in voter.file object.")
census.key <- validate_key(census.key)
message("Downloading Census geographic data for states not included in census.data object...")
} else {
message("Using Census geographic data from provided census.data object...")
}
}
geo_id_names <- determine_geo_id_names(census.geo)
if (!all(geo_id_names %in% names(voter.file))) {
stop(message("To use",census.geo,"as census.geo, voter.file needs to include the following column(s):",
paste(geo_id_names, collapse=", ")))
}
voter.file <- census_helper_new(
key = census.key,
voter.file = voter.file,
states = "all",
geo = census.geo,
age = age,
sex = sex,
year = year,
census.data = census.data,
retry = retry,
use.counties = use.counties,
skip_bad_geos = skip_bad_geos
)
}
eth <- c("whi", "bla", "his", "asi", "oth")
## Merge in Pr(Name | Race)
voter.file <- merge_names(voter.file = voter.file,
namesToUse = names.to.use,
census.surname = census.surname,
table.surnames=name.dictionaries[["surname"]],
table.first=name.dictionaries[["first"]],
table.middle=name.dictionaries[["middle"]],
clean.names = TRUE,
impute.missing = impute.missing,
model = 'BISG')
if (surname.only == TRUE) {
# Pr(Race | Surname)
preds <- voter.file[, grep("_last$", names(voter.file))] *
matrix(race.margin, nrow=nrow(voter.file), ncol=length(race.margin), byrow = TRUE)
} else {
# Pr(Race | Surname, Geolocation)
preds <- voter.file[, grep("_last$", names(voter.file))] * voter.file[, grep("^r_", names(voter.file))]
if (grepl("first", names.to.use)) {
preds <- preds * voter.file[, grep("_first$", names(voter.file))]
}
if (grepl("middle", names.to.use)) {
preds <- preds * voter.file[, grep("_middle$", names(voter.file))]
}
}
## Normalize (recycle marginal)
preds <- preds/rowSums(preds)
## Revert to Pr(Race|Surname) for missing predictions
if(impute.missing){
miss_ind <- !is.finite(preds$c_whi_last)
if(any(miss_ind)){
preds[miss_ind,] <- voter.file[miss_ind, grep("_last$", names(voter.file))] *
matrix(race.margin, nrow=nrow(voter.file[miss_ind,]), ncol=length(race.margin), byrow = TRUE)
}
}
colnames(preds) <- paste("pred", eth, sep = ".")
return(data.frame(cbind(voter.file[c(vars.orig)], preds)))
}
#' @section .predict_race_me:
#' New race prediction function, implementing fBISG (i.e. measurement
#' error correction, fully Bayesian model) with augmented
#' surname dictionary, as well as first and middle name information.
#' @importFrom dplyr pull
#' @rdname modfuns
predict_race_me <- function(
voter.file,
names.to.use,
year = "2020",
age = FALSE,
sex = FALSE,
census.geo = c("tract", "block", "block_group", "county", "place", "zcta"),
census.key = Sys.getenv("CENSUS_API_KEY"),
name.dictionaries,
surname.only = FALSE,
census.data = NULL,
retry = 0,
impute.missing = TRUE,
census.surname = FALSE,
use.counties = FALSE,
race.init,
ctrl
) {
census.geo <- tolower(census.geo)
census.geo <- rlang::arg_match(census.geo)
if(!is.null(census.data)) {
census_data_preflight(census.data, census.geo, year)
}
n_race <- 5
if (!(names.to.use %in% c(c("surname"), c("surname, first"), c("surname, first, middle")))) {
stop("'names.to.use' must be one of 'surname', 'surname, first', or 'surname, first, middle'")
}
name_types <- gsub(" ", "", strsplit(names.to.use, ",")[[1]])
if (!all(name_types %in% names(voter.file))) {
stop("When used, 'surname', 'first', and 'middle' must be variable names in voter.file.")
}
## Preliminary Data quality checks
wru_data_preflight()
path <- ifelse(getOption("wru_data_wd", default = FALSE), getwd(), tempdir())
if(census.surname){
last_c <- readRDS(paste0(path, "/wru-data-census_last_c.rds"))
} else {
last_c <- readRDS(paste0(path, "/wru-data-last_c.rds"))
}
if (!is.null(name.dictionaries[["surname"]])) {
stopifnot(identical(names(name.dictionaries[["surname"]]), names(last_c)))
last_c <- name.dictionaries[["surname"]]
}
if("first" %in% name_types){
first_c <- readRDS(paste0(path, "/wru-data-first_c.rds"))
if (!is.null(name.dictionaries[["first"]])){
stopifnot(identical(names(name.dictionaries[["first"]]), names(first_c)))
first_c <- name.dictionaries[["first"]]
}
}
if("middle" %in% name_types){
mid_c <- readRDS(paste0(path, "/wru-data-mid_c.rds"))
if (!is.null(name.dictionaries[["middle"]])){
stopifnot(identical(names(name.dictionaries[["middle"]]), names(mid_c)))
mid_c <- name.dictionaries[["middle"]]
}
}
## Other quick checks...
stopifnot(
all(!is.na(voter.file$surname))
)
orig.names <- names(voter.file)
orig.state <- voter.file$state
voter.file$state <- toupper(voter.file$state)
voter.file$rec_id_ <- 1:nrow(voter.file)
## Set RNG seed
set.seed(ctrl$seed)
if(!(ctrl$usr_seed) & (ctrl$verbose)){
message("fBISG relies on MCMC; for reproducibility, I am setting RNG seed and returning it as attribute 'RNGseed'.\n",
"To silence this message, you can set a seed explicitly by defining the 'seed' element in the control list.")
}
## Initial race
race_pred_args <- list(
census.data = NULL,
names.to.use = names.to.use,
census.geo = census.geo,
census.key = NULL,
model = "BISG",
name.dictionaries = name.dictionaries,
retry = 0
)
## level of geo estimation
geo_id_names <- c("state", determine_geo_id_names(census.geo))
#race_pred_args[names(args_usr)] <- args_usr
all_states <- unique(voter.file$state)
census.data <- census.data[all_states]
race.suff <- c("whi", "bla", "his", "asi", "oth")
geo_id <- do.call(paste, voter.file[, geo_id_names])
if (ctrl$verbose) {
message("Forming Pr(race | location) tables from census data...\n")
}
vars_ <- census_geo_api_names(year = year)
N_rg <- purrr::map(
census.data,
function(x) {
all_names <- names(x[[census.geo]])
if (any(c("P2_005N", "P005003") %in% all_names)) {
vars_ <- census_geo_api_names_legacy(year = year)
}
totals <- x[[census.geo]][, match(c(geo_id_names, unlist(vars_)), all_names)]
totals$r_whi <- rowSums(totals[, vars_[["r_whi"]], drop = FALSE]) # White population
totals$r_bla <- rowSums(totals[, vars_[["r_bla"]], drop = FALSE]) # Black population
totals$r_his <- rowSums(totals[, vars_[["r_his"]], drop = FALSE]) # Latino population
totals$r_asi <- rowSums(totals[, vars_[["r_asi"]], drop = FALSE]) # Asian + NH/PI population
totals$r_oth <- rowSums(totals[, vars_[["r_oth"]], drop = FALSE]) # AI/AN + Other + Mixed population
totals <- totals[, -match(unlist(vars_), names(totals))]
totals
}
)
N_rg <- dplyr::bind_rows(N_rg)
N_rg_geo <- do.call(paste, N_rg[, geo_id_names])
## Subset to geo's in vf
N_rg <- N_rg[N_rg_geo %in% geo_id, ]
rm(race_pred_args)
if (nrow(N_rg) != length(unique(geo_id))) {
stop("Some records in voter.file have geographic locations that I wasn't able to find in the census.data.\n
Records may have mis-matched geographic units that do not exist in the census.")
}
## Split data by geographic cluster
voter.file$state_cluster <- geo_id
N_rg$state_cluster <- do.call(paste, N_rg[, geo_id_names])
N_rg <- split(N_rg, N_rg$state_cluster)
geo_id <- split(geo_id, voter.file$state_cluster)
r_g_t <- mapply(function(tot_, gid_, g_n_) {
Nrg_geo_new <- do.call(paste, tot_[, g_n_])
geo_ <- match(gid_, Nrg_geo_new)
tot_ <- t(tot_[, grep("^r_", colnames(tot_))]) ## Races in rows
return(list(
geo_ = geo_,
#alpha_ = as.matrix(tab_),
N_rg_ = tot_
))
}, N_rg, geo_id,
MoreArgs = list(g_n_ = geo_id_names),
SIMPLIFY = FALSE
)
orig_ord <- split(voter.file$rec_id_, voter.file$state_cluster)
n_groups <- length(orig_ord)
## Create name indeces
name_data <- vector("list", 3)
names(name_data) <- c("surname", "first", "middle")
if (ctrl$verbose) {
message("Pre-processing names...\n")
}
for (ntype in c("surname", "first", "middle")) {
if (ntype %in% name_types) {
ntab <- switch(ntype,
surname = last_c,
first = first_c,
middle = mid_c)
kw_names <- toupper(dplyr::pull(ntab, 1))
proc_names_vf <- .name_preproc(voter.file[[ntype]], c(kw_names))
u_vf_names <- unique(proc_names_vf)
kw_in_vf <- kw_names %in% proc_names_vf
u_kw <- kw_names[kw_in_vf]
n_u_kw <- length(u_kw)
reord <- order(match(u_vf_names, u_kw))
u_vf_names <- u_vf_names[reord]
w_names <- match(proc_names_vf, u_vf_names)
w_names <- split(w_names, voter.file$state_cluster)
pi_ <- as.matrix(ntab[which(kw_in_vf), -1])
#pi_ <- apply(M_, 2, function(x) x / sum(x, na.rm = TRUE))
if (impute.missing) {
pi_miss <- colMeans(pi_, na.rm = TRUE)
} else {
pi_miss <- rep(1, n_race)
}
pi_[is.na(pi_)] <- 0
name_data[[ntype]] <- list(
record_name_id = w_names,
pi_ = t(pi_),
pi_miss = pi_miss
)
} else {
name_data[[ntype]] <- list(
record_name_id = replicate(n_groups, vector("integer"), simplify = FALSE),
pi_ = matrix(NA, 0, 0),
pi_miss = array(NA, 5)
)
}
}
## Build missing distribution
pi.miss <- do.call(cbind, lapply(name_data, function(x) {
x$pi_miss
}))
## Split inits by cluster
race.init <- split(race.init, voter.file$state_cluster)
## Name selector
which.names <- switch(names.to.use,
"surname" = 0L,
"surname, first" = 1L,
"surname, first, middle" = 2L
)
## Run Gibbs sampler
if (ctrl$verbose) {
message("Sampling races...\n")
pb <- txtProgressBar(min = 0, max = n_groups, style = 3)
}
race_samples <- lapply(seq.int(n_groups),
function(cluster) {
tmp <- sample_me(
name_data[["surname"]]$record_name_id[[cluster]] - 1L,
name_data[["first"]]$record_name_id[[cluster]] - 1L,
name_data[["middle"]]$record_name_id[[cluster]] - 1L,
r_g_t[[cluster]]$geo_ - 1L,
r_g_t[[cluster]]$N_rg_,
name_data[["surname"]]$pi_,
name_data[["first"]]$pi_,
name_data[["middle"]]$pi_,
pi.miss,
which.names,
ctrl$iter,
ctrl$burnin,
race.init[[cluster]] - 1L,
0
)
if (ctrl$verbose) {
setTxtProgressBar(pb, cluster)
}
return(cbind(orig_ord[[cluster]], tmp))
}
)
if (ctrl$verbose) {
close(pb)
}
if (ctrl$verbose) {
message("Post-processing results and wrapping up.\n")
}
## Get posterior race probabilities and append to voter.file
race_samples <- do.call(rbind, race_samples)
race_samples <- race_samples[order(race_samples[, 1]), -1]
race_probs <- proportions(race_samples, 1)
colnames(race_probs) <- paste0("pred.", race.suff)
voter.file <- cbind(voter.file[, orig.names], race_probs)
voter.file$state <- orig.state
attr(voter.file, "RNGseed") <- ctrl$seed
## Return expanded voter.file with RNG see attribute
return(voter.file)
}
| /scratch/gouwar.j/cran-all/cranData/wru/R/race_prediction_funs.R |
# Generated by staticimports; do not edit by hand.
# ======================================================================
# Imported from pkg:stringstatic
# ======================================================================
#' Duplicate and concatenate strings within a character vector
#'
#' Dependency-free drop-in alternative for `stringr::str_pad()`.
#'
#' @author Eli Pousson \email{[email protected]}
#' ([ORCID](https://orcid.org/0000-0001-8280-1706))
#'
#' Alexander Rossell Hayes \email{[email protected]}
#' ([ORCID](https://orcid.org/0000-0001-9412-0457))
#'
#' @source Adapted from the [stringr](https://stringr.tidyverse.org/) package.
#'
#' @param string Input vector.
#' Either a character vector, or something coercible to one.
#' @param width Minimum width of padded strings.
#' @param side Side on which padding character is added (left, right or both).
#' @param pad Single padding character (default is a space).
#' @param use_width If `FALSE`,
#' use the length of the string instead of the width;
#' see [str_width()]/[str_length()] for the difference.
#'
#' @return A character vector.
#' @noRd
str_pad <- function(
string, width, side = c("left", "right", "both"), pad = " ", use_width = TRUE
) {
if (!is.numeric(width)) {
return(string[NA])
}
if (any(nchar(pad, type = "width") != 1)) {
stop("each string in `pad` should consist of code points of total width 1")
}
side <- match.arg(side)
nchar_type <- if (isTRUE(use_width)) "width" else "chars"
string_width <- nchar(string, nchar_type)
pad_width <- width - string_width
pad_width[pad_width < 0] <- 0
switch(
side,
"left" = paste0(strrep(pad, pad_width), string),
"right" = paste0(string, strrep(pad, pad_width)),
"both" = paste0(
strrep(pad, floor(pad_width / 2)),
string,
strrep(pad, ceiling(pad_width / 2))
)
)
}
| /scratch/gouwar.j/cran-all/cranData/wru/R/staticimports.R |
#' Census Surname List (2000).
#'
#' Census Surname List from 2000 with race/ethnicity probabilities by surname.
#'
#' @format A data frame with 157,728 rows and 6 variables:
#' \describe{
#' \item{surname}{Surname}
#' \item{p_whi}{Pr(White | Surname)}
#' \item{p_bla}{Pr(Black | Surname)}
#' \item{p_his}{Pr(Hispanic/Latino | Surname)}
#' \item{p_asi}{Pr(Asian/Pacific Islander | Surname)}
#' \item{p_oth}{Pr(Other | Surname)}
#' #' }
#'
#' @docType data
#' @keywords datasets
#' @name surnames2000
#' @examples
#' data(surnames2000)
"surnames2000"
| /scratch/gouwar.j/cran-all/cranData/wru/R/surnames2000.R |
#' Census Surname List (2010).
#'
#' Census Surname List from 2010 with race/ethnicity probabilities by surname.
#'
#' @format A data frame with 167,613 rows and 6 variables:
#' \describe{
#' \item{surname}{Surname}
#' \item{p_whi}{Pr(White | Surname)}
#' \item{p_bla}{Pr(Black | Surname)}
#' \item{p_his}{Pr(Hispanic/Latino | Surname)}
#' \item{p_asi}{Pr(Asian/Pacific Islander | Surname)}
#' \item{p_oth}{Pr(Other | Surname)}
#' #' }
#'
#' @docType data
#' @keywords datasets
#' @name surnames2010
#' @examples
#' data(surnames)
"surnames2010"
| /scratch/gouwar.j/cran-all/cranData/wru/R/surnames2010.R |
assert_boolean <- function(
x,
argument_name = rlang::caller_arg(x),
call = rlang::caller_call()
) {
if (length(x) != 1) {
cli::cli_abort(
c(
"{.arg {argument_name}} must be a {.code TRUE} or {.code FALSE} value of length {.val {1}}.",
x = "{.arg {argument_name}} has a length of {.val {length(x)}}."
),
call = call
)
}
if (!inherits(x, "logical")) {
cli::cli_abort(
c(
"{.arg {argument_name}} must be a {.class logical} {.code TRUE} or {.code FALSE} value.",
x = "{.arg {argument_name}} is an object of class {.cls {class(x)}}."
),
call = call
)
}
if (!x %in% c(TRUE, FALSE)) {
cli::cli_abort(
c(
"{.arg {argument_name}} must be {.code TRUE} or {.code FALSE}.",
x = "{.arg {argument_name}} is {.val {x}}."
),
call = call
)
}
x
}
| /scratch/gouwar.j/cran-all/cranData/wru/R/utils_assert.R |
determine_geo_id_names <- function(census.geo) {
switch(
census.geo,
"tract" = c("county", "tract"),
"block_group" = c("county", "tract", "block_group"),
"block" = c("county", "tract", "block"),
# Return `census.geo` unchanged for county, place, and zcta
census.geo
)
}
| /scratch/gouwar.j/cran-all/cranData/wru/R/utils_determine_geo_id_names.R |
#' Dataset with FIPS codes for US states
#'
#' Dataset including FIPS codes and postal abbreviations for each U.S. state,
#' district, and territory.
#'
#' @format
#' A tibble with 57 rows and 3 columns:
#' \describe{
#' \item{`state`}{Two-letter postal abbreviation}
#' \item{`state_code`}{Two-digit FIPS code}
#' \item{`state_name`}{English name}
#' }
#' @source Derived from [tidycensus::fips_codes()]
"state_fips"
#' Convert between state names, postal abbreviations, and FIPS codes
#'
#' @param x A [numeric] or [character] vector of state names,
#' postal abbreviations, or FIPS codes.
#' Matches for state names and abbreviations are not case sensitive.
#' FIPS codes may be matched from numeric or character vectors,
#' with or without leading zeroes.
#'
#' @return
#' \describe{
#' \item{`as_state_fips_code()`}{
#' A [character] vector of two-digit FIPS codes.
#' One-digit FIPS codes are prefixed with a leading zero,
#' e.g., `"06"` for California.
#' }
#' \item{`as_state_abbreviation()`}{
#' A [character] vector of two-letter postal abbreviations,
#' e.g., `"CA"` for California.
#' }
#' }
#'
#' @examples
#' as_fips_code("california")
#' as_state_abbreviation("california")
#'
#' # Character vector matches ignore case
#' as_fips_code(c("DC", "Md", "va"))
#' as_state_abbreviation(c("district of columbia", "Maryland", "VIRGINIA"))
#'
#' # Note that `3` and `7` are standardized to `NA`,
#' # because no state is assigned those FIPS codes
#' as_fips_code(1:10)
#' as_state_abbreviation(1:10)
#'
#' # You can even mix methods in the same vector
#' as_fips_code(c("utah", "NM", 8, "04"))
#' as_state_abbreviation(c("utah", "NM", 8, "04"))
#'
#' @keywords internal
#' @export
as_fips_code <- function(x) {
state_fips <- wru::state_fips
state_fips$state_code[
dplyr::coalesce(
match(toupper(x), state_fips$state),
match(tolower(x), tolower(state_fips$state_name)),
match(suppressWarnings(as.numeric(x)), as.numeric(state_fips$state_code))
)
]
}
#' @rdname as_fips_code
#' @export
as_state_abbreviation <- function(x) {
state_fips <- wru::state_fips
state_fips$state[
dplyr::coalesce(
match(toupper(x), state_fips$state),
match(tolower(x), tolower(state_fips$state_name)),
match(suppressWarnings(as.numeric(x)), as.numeric(state_fips$state_code))
)
]
} | /scratch/gouwar.j/cran-all/cranData/wru/R/utils_state_fips.R |
#' @importFrom rlang %||%
validate_key <- function(
key,
argument_name = rlang::caller_arg(key),
call = rlang::caller_call()
) {
key <- key %||% Sys.getenv("CENSUS_API_KEY")
if (length(key) != 1) {
cli::cli_abort(
c(
"{.arg {argument_name}} must be a {.cls character} string of length {.val {1}}.",
x = "{.arg {argument_name}} has a length of {.val {length(key)}}."
),
call = call
)
}
if (!inherits(key, "character")) {
cli::cli_abort(
c(
"{.arg {argument_name}} must be a {.cls character} string of length {.val {1}}.",
x = "{.arg {argument_name}} is an object of class {.cls {class(key)}}."
),
call = call
)
}
if (!nzchar(key)) {
cli::cli_abort(
c(
"{.arg {argument_name}} must not be an empty string.",
"*" = "Have you set the {.envvar CENSUS_API_KEY} environment variable?
See {.help wru::get_census_data} for more information."
),
call = call
)
}
if (is.na(key)) {
cli::cli_abort(
"{.arg {argument_name}} must not be {.val {NA_character_}}.",
call = call
)
}
key
}
| /scratch/gouwar.j/cran-all/cranData/wru/R/utils_validate_key.R |
#' Variable vector into chunks.
#'
#' \code{vec_to_chunk} takes a list of variables and collects them into 50-variable chunks.
#'
#' This function takes a list of variable names and collects them into chunks with no more than
#' 50 variables each. This helps to get around requests with more than 50 variables,because the
#' API only allows queries of 50 variables at a time.
#' The user should not need to call this function directly.
#'
#' @param x Character vector of variable names.
#' @return Object of class \code{list}.
#'
#' @examples
#' \dontrun{
#' vec_to_chunk(x = c(paste("P012F0", seq(10:49), sep = ""),
#' paste("P012I0", seq(10, 49), sep = "")))
#' }
#'
#' @references
#' Based on code authored by Nicholas Nagle, which is available
#' \href{https://rstudio-pubs-static.s3.amazonaws.com/19337_2e7f827190514c569ea136db788ce850.html}{here}.
#'
#' @keywords internal
vec_to_chunk <- function(x){
s <- seq_along(x)
x1 <- split(x, ceiling(s/50))
return(x1)
}
| /scratch/gouwar.j/cran-all/cranData/wru/R/vec_to_chunk.R |
#' Example voter file.
#'
#' An example dataset containing voter file information.
#'
#' @format A data frame with 10 rows and 12 variables:
#' \describe{
#' \item{VoterID}{Voter identifier (numeric)}
#' \item{surname}{Surname}
#' \item{state}{State of residence}
#' \item{CD}{Congressional district}
#' \item{county}{Census county (three-digit code)}
#' \item{first}{First name}
#' \item{last}{Last name or surname}
#' \item{tract}{Census tract (six-digit code)}
#' \item{block}{Census block (four-digit code)}
#' \item{precinct}{Voting precinct}
#' \item{place}{Voting place}
#' \item{age}{Age in years}
#' \item{sex}{0=male, 1=female}
#' \item{party}{Party registration (character)}
#' \item{PID}{Party registration (numeric)}
#' #' }
#' @docType data
#' @keywords datasets
#' @name voters
#' @examples
#' data(voters)
"voters"
| /scratch/gouwar.j/cran-all/cranData/wru/R/voters.R |
.onAttach <- function(libname, pkgname) {
packageStartupMessage(
"\n",
"Please cite as:", "\n\n",
format(utils::citation("wru"), style = "text"), "\n\n",
"Note that wru 2.0.0 uses 2020 census data by default.", "\n",
'Use the argument `year = "2010"`, to replicate analyses produced with earlier package versions.',
"\n"
)
}
| /scratch/gouwar.j/cran-all/cranData/wru/R/wru-internal.R |
#' @useDynLib wru, .registration=TRUE
#' @importFrom Rcpp evalCpp
NULL
| /scratch/gouwar.j/cran-all/cranData/wru/R/wru.R |
.onUnload <- function (libpath) {
library.dynam.unload("wru", libpath)
} | /scratch/gouwar.j/cran-all/cranData/wru/R/zzz.R |
calculate.CV <- function(formula, data, offset = NULL, weights = NULL, kernel = c("Gaussian", "Epanechnikov"), kbin = 25, family = c("gaussian", "binomial", "poisson"), KfoldCV = 5) {
family <- match.arg(family)
kernel <- match.arg(kernel)
n <- nrow(data)
if(is.null(weights)) {
weights <- rep(1, n)
}
ECM <- vector(length = 0)
random <- runif(n, min = 0, max = 1)
factor <- c(0:KfoldCV)/KfoldCV
groups <- cut(random, factor)
#for (k in 1:KfoldCV) {
for (x in levels(groups)) {
#ii <- sample(n, size = 0.70*n)
#train <- data[ii,]
#test <- data[-ii,]
#wtrain <- weights[ii]
#wtest <- weights[-ii]
train <- data[-which(groups == x),]
test <- data[which(groups == x),]
wtrain <- weights[-which(groups == x)]
wtest <- weights[which(groups == x)]
offtrain <- offset[-which(groups == x)]
offtest <- offset[which(groups == x)]
mod <- sback.fit(formula = formula, data = train, offset = offtrain, weights = wtrain, kernel = kernel, kbin = kbin, family = family, newdata = test, newoffset = offtest, pred = TRUE)
if(mod$fit$err == 0) {
response <- as.character(attr(terms(formula), "variables")[2]) # Response variable
ECM <- append(ECM, dev(test[,response], mod$pfitted.values, wtest, family = family))
} else {
ECM <- append(ECM, NA)
}
}
ECM
} | /scratch/gouwar.j/cran-all/cranData/wsbackfit/R/calculate.CV.R |
construct.fixed.part <- function(formula, data) {
#env <- environment(formula)
if(inherits(formula, "character"))
formula <- as.formula(formula)
mf <- model.frame(formula, data, drop.unused.levels = TRUE)
mt <- terms(mf)
X <- model.matrix(mt, mf)
dim <- table(attr(X,"assign"))[-1]
names(dim) <- attr(mt, "term.labels")
attr(mt, "contrast") <- attr(X,"contrast")
attr(mt, "xlev") <- .getXlevels(mt, mf)
res <- list(X = X[,-1, drop = FALSE], dim = dim, terms = mt)
res
}
| /scratch/gouwar.j/cran-all/cranData/wsbackfit/R/construct.fixed.matrix.R |
construct.fixed.prediction.matrix <- function(object, newdata) {
#if(!is.null(object$terms$fixed)) {
mfp <- model.frame(object$terms, newdata, xlev = attr(object$terms, "xlev"))
Xp <- model.matrix(object$terms, data = mfp, contrasts.arg = attr(object$terms, "contrast"))
Xp <- Xp[,-1,drop = FALSE]
#} else {
# Xp <- NULL
#}
Xp
}
| /scratch/gouwar.j/cran-all/cranData/wsbackfit/R/construct.fixed.prediction.matrix.R |
create.formula <- function(formula, data, h0 = 0) {
A <- interpret.sbformula(formula)
aux <- vector()
for (i in 1:A$npartial) {
if (A$II[1,i] == "ONE") {
if (A$h[i] == -1) {
#range <- max(data[, A$II[2,i]], na.rm = TRUE) - min(data[, A$II[2,i]], na.rm = TRUE)
sd <- sd(data[, A$II[2,i]], na.rm = TRUE)
#aux[i] <- paste ("sb(", A$II[2,i], ", h = ", h0*range/sd, ")",sep="")
aux[i] <- paste ("sb(", A$II[2,i], ", h = ", round(h0*sd, 4), ")",sep="")
} else if (A$h[i]>0) {
aux[i] <- paste ("sb(", A$II[2,i], ", h = ", A$h[i], ")",sep="")
} else {
aux[i] <- A$II[2,i]
}
} else {
if (A$h[i] == -1) {
#range <- max(data[, A$II[2,i]], na.rm = TRUE) - min(data[, A$II[2,i]], na.rm = TRUE)
sd <- sd(data[, A$II[2,i]], na.rm = TRUE)
#aux[i] <- paste ("sb(", A$II[2,i],", by = ", A$II[1,i], ", h = ", h0*range/sd, ")",sep="")
aux[i] <- paste ("sb(", A$II[2,i],", by = ", A$II[1,i], ", h = ", round(h0*sd, 4), ")",sep="")
} else {
aux[i] <- paste ("sb(", A$II[2,i], ", h = ", A$h[i],", by = ", A$II[1,i], ")",sep="")}
}
}
res <- as.formula(paste(A$response, "~", paste(aux, collapse = "+", sep = ""), sep = ""))
res
} | /scratch/gouwar.j/cran-all/cranData/wsbackfit/R/create.formula.R |
create.formula.alpha <- function(formula, data, alpha0 = 0.5) {
A <- interpret.sbformula(formula)
aux <- vector()
for (i in 1:A$npartial) {
if (A$II[1,i] == "ONE") {
if (A$h[i] == -1) {
stop("For the alpha correction the user needs to specify bandwidth parameters for all nonparametric functions.")
} else if (A$h[i]>0) {
aux[i] <- paste ("sb(", A$II[2,i], ", h = ", round(A$h[i]*alpha0, 4), ")",sep="")
} else {
aux[i] <- A$II[2,i]
}
} else {
if (A$h[i] == -1) {
stop("For the alpha correction the user needs to specify bandwidth parameters for all nonparametric functions.")
} else {
aux[i] <- paste("sb(", A$II[2,i], ", h = ", round(A$h[i]*alpha0, 4),", by = ", A$II[1,i], ")",sep="")}
}
}
res <- as.formula(paste(A$response, "~", paste(aux, collapse = "+", sep = ""), sep = ""))
res
} | /scratch/gouwar.j/cran-all/cranData/wsbackfit/R/create.formula.alpha.R |
dev <- function(y, mu, w, family = c("gaussian", "binomial", "poisson")) {
family <- match.arg(family)
if (family == "gaussian") {
err <- sum(w*(y-mu)^2)
} else if (family == "binomial") {
mu[mu > 0.9999] = 0.999
mu[mu < 0.0001] = 0.001
err <- sum(-2*w*(y*log(mu)+(1-y)*log(1-mu)))
} else if (family == "poisson") {
r <- mu*w
p <- which(y > 0)
r[p] <- (w*(y*log(y/mu) - (y - mu)))[p]
err <- sum(2*r)
}
err <- err/sum(w)
err
}
dev.residuals <- function(y, mu, w, family = c("gaussian", "binomial", "poisson")) {
family <- match.arg(family)
if (family == "gaussian") {
dev.residuals <- w*(y-mu)^2
} else if (family == "binomial") {
mu[mu > 0.9999] = 0.999
mu[mu < 0.0001] = 0.001
dev.residuals <- -2*w*(y*log(mu)+(1-y)*log(1-mu))
} else if (family == "poisson") {
r <- mu*w
p <- which(y > 0)
r[p] <- (w*(y*log(y/mu) - (y - mu)))[p]
dev.residuals <- 2*r
}
s <- sign(y - mu)
dev.residuals <- sqrt(pmax(dev.residuals, 0))*s
dev.residuals
}
dev.pearson <- function(y, mu, w, family = c("gaussian", "binomial", "poisson")) {
family <- match.arg(family)
if (family == "gaussian") {
dev.pearson <- (y - mu) * sqrt(w)
} else if (family == "binomial") {
mu[mu > 0.9999] = 0.999
mu[mu < 0.0001] = 0.001
dev.pearson <- (y - mu) * sqrt(w)/sqrt(mu*(1-mu))
} else if (family == "poisson") {
dev.pearson <- (y - mu) * sqrt(w)/sqrt(mu)
}
dev.pearson
}
dev.working <- function(y, mu, w, family = c("gaussian", "binomial", "poisson")) {
family <- match.arg(family)
if (family == "gaussian") {
dev.working <- (y - mu)
} else if (family == "binomial") {
mu[mu > 0.9999] = 0.999
mu[mu < 0.0001] = 0.001
dev.working <- (y - mu)/(mu*(1-mu))
} else if (family == "poisson") {
dev.working <- (y - mu)/pmax(mu, .Machine$double.eps)
}
dev.working
}
residuals.sback <- function (object, type = c("deviance", "pearson", "working", "response"), ...) {
type <- match.arg(type)
fsb <- interpret.sbformula(object$formula)
y <- object$data[,fsb$response]
mu <- object$fitted.values
w <- object$weights
family <- object$family
res <- switch(type,
deviance = dev.residuals(y, mu, w, family),
pearson = dev.pearson(y, mu, w, family),
working = dev.working(y, mu, w, family),
response = y - mu)
res
} | /scratch/gouwar.j/cran-all/cranData/wsbackfit/R/dev.R |
interpret.sbformula <-
function(formula) {
env <- environment(formula)
if(inherits(formula, "character"))
formula <- as.formula(formula)
tf <- terms.formula(formula, specials = c("sb"))
terms <- attr(tf, "term.labels")
if(length(grep(":",terms)) != 0) stop("Symbol '*' is not allowed")
nt <- length(terms)
if(attr(tf, "response") > 0) {
ns <- attr(tf, "specials")$sb - 1 # -1 for the response
response <- as.character(attr(tf, "variables")[2])
} else {
ns <- attr(tf, "specials")$sb
response <- NULL
}
II <- list()
h <- list()
partial <- vector()
partial.s <- vector()
partial.p <- vector()
n.s <- n.p <- 0
k <- 0
if(nt) {
for (i in 1:nt) {
if (i %in% ns) {
k <- k + 1
n.s <- n.s + 1
st <- eval(parse(text = terms[i]), envir = env)
II[[k]] <- st$cov
h[[k]] <- st$h
partial[k] <- terms[i]
partial.s[n.s] <- terms[i]
} else {
k <- k + 1
n.p <- n.p + 1
II[[k]]<- c("ONE", terms[i])
h[[k]] <- 0
partial[k] <- terms[i]
partial.p[n.p] <- terms[i]
}
}
}
II <- if(length(II)) {
matrix(unlist(II), nrow = 2)
} else {
matrix(0, nrow = 2)
}
res <- list(response = response, II = II, h = unlist(h), npartial = k, partial = partial, partial.s = partial.s, partial.p = partial.p)
res
}
| /scratch/gouwar.j/cran-all/cranData/wsbackfit/R/interpret.sbformula.R |
plot.sback <-
function(x, composed = TRUE, ask = TRUE, select = NULL, ...) {
dots <- list(...)
grph.opt <- names(dots)
p.functions <- colnames(x$effects)[x$h != 0]
if(is.null(select)) {
ind <- 1:length(p.functions)
} else if(!is.numeric(select) | !all(select%in%(1:length(p.functions)))) {
stop("The model terms selected for printing do not exists")
} else {
ind <- select
}
j <- 0
for(i in ind) {
j <- j + 1
if(j > 1 & length(ind) > 1) {
if(ask) readline("Press return for next page ...")
}
# Partial effect
int.f <- interpret.sbformula(as.formula(paste("~", p.functions[i])))
var <- int.f$II[2,]
var.lin.1 <- ifelse(int.f$II[1,] == "ONE", int.f$II[2,], paste(int.f$II[1,], ":", int.f$II[2,], sep = ""))
var.lin.2 <- ifelse(int.f$II[1,] == "ONE", int.f$II[2,], paste(int.f$II[2,], ":", int.f$II[1,], sep = ""))
var.lin <- ifelse(var.lin.1 %in% names(x$coeff), var.lin.1, var.lin.2)
ord <- order(x$data[,var])
x.data <- x$data[ord,var]
y.data.nl <- x$effects[ord,i]
y.data.l <- x$coeff[var.lin]*x.data
if(composed) {
y.composed <- y.data.nl + y.data.l
range <- max(y.composed) - min(y.composed)
min.ylim <- min(y.composed) - 0.1*range
max.ylim <- max(y.composed) + 0.1*range
} else {
range <- max(c(y.data.nl, y.data.l)) - min(c(y.data.nl, y.data.l))
min.ylim <- min(c(y.data.nl, y.data.l)) - 0.1*range
max.ylim <- max(c(y.data.nl, y.data.l)) + 0.1*range
}
main.aux <- if(int.f$II[1,] == "ONE") {
paste(", main = \"Additive effect of ", var, "\"", sep = "")
} else {
paste(", main = \"Varying coefficient \n as function of ", var, "\"", sep = "")
}
stub <- paste(ifelse("xlab" %in% grph.opt, "", paste(", xlab = \"", var, "\"",sep = "")),
ifelse("ylab" %in% grph.opt, "", paste(", ylab = \"", p.functions[i], "\"", sep = "")),
ifelse("main" %in% grph.opt, "", main.aux),
ifelse("type" %in% grph.opt, "", ", type = \"l\""),
ifelse("ylim" %in% grph.opt, "", paste(", ylim = c(", min.ylim,",", max.ylim,")", sep = "")), ",...)", sep = "")
if(composed) {
plot <- paste("plot(x.data, y.composed", stub, sep = "")
eval(parse(text = plot))
rug(x$data[,var])
} else {
plot <- paste("plot(x.data, y.data.nl", stub, sep = "")
eval(parse(text = plot))
stub <- paste(ifelse("lty" %in% grph.opt, "", ",lty = 2"), ",...)", sep = "")
lines <- paste("lines(x.data, y.data.l", stub, sep = "")
eval(parse(text = lines))
rug(x$data[,var])
stub <- paste(ifelse("lty" %in% grph.opt, "", "lty = 1:2"), ", legend = c(\"Non linear\",\"Linear\"), bty = \"n\"", ", cex = ", dots$cex,")", sep = "")
legend <- paste("legend(\"topleft\",", stub, sep = "")
eval(parse(text = legend))
}
# Interaction surface
if(int.f$II[1,] != "ONE") {
x.data <- seq(min(x$data[,var]), max(x$data[,var]), l = 50)
y.data <- seq(min(x$data[,int.f$II[1,]]), max(x$data[,int.f$II[1,]]), l = 50)
z.data <- suppressWarnings(outer(approxfun(x$data[ord,var], x$effects[ord,i] + x$coeff[var.lin]*x$data[ord,var])(x.data), y.data, '*'))
if(ask) readline("Press return for next page....")
persp(x.data, y.data, z.data, xlab = var, ylab = int.f$II[1,], zlab = p.functions[i],
main = paste("Estimated surface for ", var, " and ", int.f$II[1,], sep = ""),
theta = ifelse(is.null(dots$theta), 45, dots$theta),
phi = ifelse(is.null(dots$phi), 45, dots$phi),
ticktype = "detailed",
shade = ifelse(is.null(dots$shade), 0.5, dots$shade),
cex.main = dots$cex.main, cex.axis = dots$cex.axis, cex.lab = dots$cex.lab, cex.sub = dots$cex.sub, cex = dots$cex)
}
}
}
| /scratch/gouwar.j/cran-all/cranData/wsbackfit/R/plot.sback.R |
predict.sback <- function(object, newdata, newoffset = NULL, ...) {
if(missing(newdata)) {
newdata <- object$data
}
n0 <- nrow(newdata)
if(is.null(newoffset)) {
newoffset <- rep(0, n0)
}
fsb <- interpret.sbformula(object$formula)
z.varnames <- fsb$II[1,]
x.varnames <- fsb$II[2,]
newdata[,"ONE"] <- 1.0
if(any(is.na(match(c(x.varnames, z.varnames), names(object$data))))) {
stop("Not all needed variables are supplied in newdata")
}
formula <- create.formula(formula = object$formula, h0 = object$h, data = object$data)
fit <- sback.fit(formula = formula, data = object$data, offset = object$offset, weights = object$weights, kernel = object$kernel, kbin = object$kbin, family = object$family, newdata = newdata, newoffset = newoffset, call = NULL, pred = TRUE)
out <- list()
out$newdata <- newdata
out$newoffset <- newoffset
out$coeff <- fit$coeff
out$peffects <- fit$peffects
out$pfitted.values <- fit$pfitted.values
out
} | /scratch/gouwar.j/cran-all/cranData/wsbackfit/R/predict.sback.R |
print.sback <-
function(x, ...){
foo0 <- cbind(colnames(x$effects), round(x$h, 4))
colnames(foo0) <- c("Effect", "h")
rownames(foo0) <- rep("", dim(foo0)[1])
cat("Generalized Smooth Backfitting/wsbackfit:\n\n")
cat("Call: "); print(x$call)
cat("\nSample size:", length(x$fitted.values), "\n\nBandwidths used in model:\n")
print(foo0, quote = FALSE)
cat("\nLinear/Parametric components:\n")
print(x$coeff, quote = FALSE)
cat("\n")
}
| /scratch/gouwar.j/cran-all/cranData/wsbackfit/R/print.sback.R |
print.summary.sback <-
function(x, ...) {
print.sback(x)
}
| /scratch/gouwar.j/cran-all/cranData/wsbackfit/R/print.summary.sback.R |
sb <-
function(x1 = NULL, by = NULL, h = -1) {
args <- match.call()
if(!is.null(args$x1) & is.null(args$by)) {
cov = c("ONE", deparse(args$x1, backtick = TRUE, width.cutoff = 500))
} else if (!is.null(args$x1) & !is.null(args$by)) {
cov = c(deparse(args$by, backtick = TRUE, width.cutoff = 500), deparse(args$x1, backtick = TRUE, width.cutoff = 500))
} else {
stop("Invalid expression")
}
res <- list(cov = cov, h = h)
res
}
| /scratch/gouwar.j/cran-all/cranData/wsbackfit/R/sb.R |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.