content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
'.syn' <- function(new,kind=0,...) { ## function names synonyms
if (!FALSE) {
res <- .parentFunc()
old <- res[length(res)]
res <- res[-c(1,length(res))]
parent <- paste(paste0(res,"()"),collapse=" -> ")
msg <- paste0("In ",parent,": '",old,"()' is obsolete. Use '",new,"()'")
if (kind==2)
{
op <- options(warn=0)
warning(msg,call.=!TRUE)
on.exit(options(op))
}
else if (kind==1)
message(msg)
}
do.call(new,list(...))
}
'.parentFunc' <- function() {
op <- options(warn=0)
n <- sys.parent()-3:1+1
n <- n[n>0]
res <- as.character(sys.call(which=0))[1]
for (i in n) {
res <- c(res,as.character(sys.call(which=i))[1])
}
options(op)
# class(res) <- "parentFunc"
res
}
# '.print.ParentFunc' <- function(x,...) message(paste(x[-1],collapse=" -> "))
|
/scratch/gouwar.j/cran-all/cranData/ursa/R/yyy.syno.R
|
## "http://wiki.openstreetmap.org/wiki/Slippy_map_tilenames", license?
# https://gist.github.com/Yago/05d479de169a21ba9fff
# http://b.sm.mapstack.stamen.com/(toner-background,$fff[difference],$fff[@23],$fff[hsl-saturation@20],toner-lines[destination-in])/9/273/172.png
# https://pogoda1.ru/map/precipitation/7/77/40.png
'.deg2numYa' <- function(lat,lon,zoom,verbose=FALSE) {
lat_rad <- lat*pi/180
lon_rad <- lon*pi/180
# n <- 2^zoom
a <- 6378137; ## a*pi==20037508.342789
k <- 0.0818191908426;
b <- 53.5865938
z1 <- tan(pi/4+lat_rad/2)/'^'(tan(pi/4 + asin(k*sin(lat_rad))/2),k)
ytile = floor((a*pi-a*log(z1))*b/'^'(2,23-zoom)/256)
xtile = floor((a*pi+a*lon_rad)*b/'^'(2,23-zoom)/256)
# xtile <- floor((lon+180)/360*2^zoom)
if (verbose)
print(data.frame(lon=lon,lat=lat,zoom=zoom,x=xtile,y=ytile))
c(xtile,ytile)
}
'.deg2num' <- function(lat,lon,zoom,verbose=FALSE) {
lat_rad <- lat*pi/180
n <- 2^zoom
xtile <- floor((lon+180)/360*n)
ytile <- floor((1-log(tan(lat_rad)+(1/cos(lat_rad)))/pi)/2*n)
if (verbose)
print(data.frame(lon=lon,lat=lat,zoom=zoom,x=xtile,y=ytile))
if (TRUE)
return(c(xtile,ytile))
osm <- paste0("https://",letters[sample(seq(3),1)],".tile.openstreetmap.org")
tile <- paste0(paste(osm,zoom,xtile,ytile,sep="/"),".png")
message(tile)
# fname <- "tile.png"
# download.file(tile,fname,mode="wb",quiet=!verbose)
fname <- .ursaCacheDownload(tile,mode="wb",quiet=!verbose)
return(tile)
}
# https://www.esri.com/arcgis-blog/products/product/mapping/web-map-zoom-levels-updated/
'.webResolution' <- function(zoom) {
s <- 2*6378137*pi/(2^(1:21+8))
if (missing(zoom))
return(s)
s[zoom]
}
# https://leaflet-extras.github.io/leaflet-providers/preview/
# https://leaflet-extras.github.io/leaflet-providers/leaflet-providers.js
'.tileService' <- function(server="",providers=FALSE) {
language <- if (.lgrep("Russian",ctype <- Sys.getlocale("LC_TIME"))) "ru"
else Sys.getenv("LANGUAGE")
osmCr <- "\uA9 OpenStreetMap contributors"
optHERE <- getOption("HEREapp")
TFkey <- getOption("ThunderforestApiKey")
BingKey <- getOption("BingMapsKey")
StadiaKey <- getOption("stadiamaps_api_key")
mapsurferKey <- getOption("openrouteserviceToken")
googleCr <- "Google: TERMS OF USE ARE VIOLATED"
yandexCr <- "Yandex: TERMS OF USE ARE VIOLATED"
StadiaCr <- paste0("\uA9 Stadia Maps, \uA9 OpenMapTiles ",osmCr)
StamenCr <- paste0("Map tiles by Stamen Design, CC BY 3.0 - Map data ",osmCr)
s <- list()
s$mapnik <- c("https://{abc}.tile.openstreetmap.org/{z}/{x}/{y}.png"
,osmCr) ## # http://{abc}.tile.osm.org/{z}/{x}/{y}.png
s$osmbw <- c("http://{abc}.tiles.wmflabs.org/bw-mapnik/{z}/{x}/{y}.png"
,osmCr)
s$cycle <- c("http://{abc}.tile.opencyclemap.org/cycle/{z}/{x}/{y}.png"
,paste(osmCr,"(Cycle)"))
s$osmfr <- c("http://{abc}.tile.openstreetmap.fr/osmfr/{z}/{x}/{y}.png"
,paste("\uA9 Openstreetmap France",osmCr))
s$transport <- c("http://{abc}.tile2.opencyclemap.org/transport/{z}/{x}/{y}.png"
,osmCr)
# copyright["transport"] <- paste0("Maps \uA9 Thunderforest, Data ",osmCr)
# s$mapsurfer <- c("http://korona.geog.uni-heidelberg.de/tiles/roads/x={x}&y={y}&z={z}"
# ,paste0(osmCr,", GIScience Research Group @ Heidelberg University")
# ,"png")
s$mapsurfer <- c(paste0("https://api.openrouteservice.org/mapsurfer/{z}/{x}/{y}.png?api_key="
,mapsurferKey)
,paste0(osmCr,", powered by MapSurfer.NET")
,"png")
s$mapsurfer.grayscale <- c("http://korona.geog.uni-heidelberg.de/tiles/roadsg/x={x}&y={y}&z={z}"
,paste0(osmCr,", GIScience Research Group @ Heidelberg University")
,"png")
# s$sputnik <- "http://tiles.maps.sputnik.ru/tiles/kmt2/{z}/{x}/{y}.png"
s$sputnik <- c("https://tilessputnik.ru/{z}/{x}/{y}.png"
,paste0(osmCr,", \u0421\u043F\u0443\u0442\u043D\u0438\u043A \uA9 \u0420\u043E\u0441\u0442\u0435\u043B\u0435\u043A\u043E\u043C"))
# http://cartodb-basemaps-c.global.ssl.fastly.net/light_all/6/37/21.png
# http://a.basemaps.cartocdn.com/light_only_labels/6/39/18.png
s$'internal.CartoDB' <- c("https://{abcd}.basemaps.cartocdn.com/light_all/{z}/{x}/{y}{r}.png"
,paste0(osmCr,", \uA9 CartoDB"))
s$'Positron' <- c("https://{abcd}.basemaps.cartocdn.com/light_all/{z}/{x}/{y}{r}.png"
,paste0(osmCr,", \uA9 CartoDB"))
s$'Dark Matter' <- c("https://{abcd}.basemaps.cartocdn.com/dark_all/{z}/{x}/{y}{r}.png"
,paste0(osmCr,", \uA9 CartoDB"))
s$'Voyager' <- c("https://{abcd}.basemaps.cartocdn.com/rastertiles/voyager/{z}/{x}/{y}{r}.png"
,paste0(osmCr,", \uA9 CartoDB"))
s$kosmosnimki <- c("http://{abcd}.tile.osm.kosmosnimki.ru/kosmo/{z}/{x}/{y}.png"
,paste0(osmCr,", \uA9 ScanEx"))
s$Esri.Ocean <- c("https://services.arcgisonline.com/ArcGIS/rest/services/Ocean/World_Ocean_Base/MapServer/tile/{z}/{y}/{x}.jpg"
,"\uA9 Esri: GEBCO, NOAA, CHS, OSU, UNH, CSUMB, National Geographic, DeLorme, NAVTEQ, and Esri")
s$Esri.Topo <- c("http://server.arcgisonline.com/ArcGIS/rest/services/World_Topo_Map/MapServer/tile/{z}/{y}/{x}.jpg"
,"\uA9 Esri - contributors to Esri World Topo Map")
s$Esri.Street <- c("http://server.arcgisonline.com/ArcGIS/rest/services/World_Street_Map/MapServer/tile/{z}/{y}/{x}.jpg"
,"\uA9 Esri - contributors to Esri Street Topo Map")
s$Esri.Terrain <- c("http://server.arcgisonline.com/ArcGIS/rest/services/World_Terrain_Base/MapServer/tile/{z}/{y}/{x}.jpg"
,"\uA9 Esri: USGS, Esri, TANA, DeLorme, and NPS")
s$Esri.Light <- c("https://server.arcgisonline.com/ArcGIS/rest/services/Canvas/World_Light_Gray_Base/MapServer/tile/{z}/{y}/{x}.jpg"
,"\uA9 Esri: Esri, HERE, Garmin, NGA, USGS")
s$Esri.Dark <- c("https://server.arcgisonline.com/ArcGIS/rest/services/Canvas/World_Dark_Gray_Base/MapServer/tile/{z}/{y}/{x}.jpg"
,"\uA9 Esri: Esri, HERE, Garmin, NGA, USGS")
s$Esri.Hillshade <- c("https://server.arcgisonline.com/ArcGIS/rest/services/Elevation/World_Hillshade/MapServer/tile/{z}/{y}/{x}.jpg"
,"\uA9 ESRI World Hillshade")
s$Esri.Satellite <- c("https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{z}/{y}/{x}.jpg"
,"\uA9 ESRI Satellite")
s$internal.Esri.WorldImagery <- c("https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{z}/{y}/{x}.jpg"
,"Tiles \uA9 Esri - Source: Esri, i-cubed, USDA, USGS, AEX, GeoEye, Getmapping, Aerogrid, IGN, IGP, UPR-EGP, and the GIS User Community")
s$Esri.Clarity <- c("https://clarity.maptiles.arcgis.com/arcgis/rest/services/World_Imagery/MapServer/tile/{z}/{y}/{x}"
,"ESRI.Clarity")
s$HERE.Aerial <- c(url=paste0("https://{1234}.aerial.maps.cit.api.here.com/maptile"
,"/2.1/maptile/newest/satellite.day/{z}/{x}/{y}/256/png8?"
,"app_id=",optHERE$id,"&app_code=",optHERE$code,"&lg=eng")
,cite="Map \uA9 1987-2014 HERE"
,ext="png")
s$'2gis' <- c("https://tile{0123}.maps.2gis.com/tiles?x={x}&y={y}&z={z}" #&v=1.2"
,paste0(osmCr,", API 2GIS")
,"png")
s$TF.Outdoors <- c(paste0("https://{abc}.tile.thunderforest.com/outdoors/{z}/{x}/{y}.png?apikey=",TFkey)
,paste0("Maps \uA9 Thunderforest, Data ",osmCr))
s$TF.Landscape <- c(paste0("https://{abc}.tile.thunderforest.com/landscape/{z}/{x}/{y}.png?apikey=",TFkey)
,paste0("Maps \uA9 Thunderforest, Data ",osmCr))
s$Bing.Map <- c(url=paste0("https://t{0123}.ssl.ak.dynamic.tiles.virtualearth.net/comp/ch/{q}"
,"?mkt=en-us&it=G,L&shading=hill&og=80&n=z&key=",BingKey)
,cite=paste0("Bing \uA9 Microsoft and its suppliers")
,ext="jpg")
s$Bing.Satellite <- c(paste0("http://ecn.t{0123}.tiles.virtualearth.net/tiles/a{q}.jpeg?g=0&dir=dir_n'&n=z&key=",BingKey)
,paste0("Bing \uA9 Microsoft and its suppliers")
,"jpg")
s$opentopomap <- c("http://{abc}.tile.opentopomap.org/{z}/{x}/{y}.png"
,paste0(osmCr,", \uA9 OpenTopoMap"))
s$wikimedia <- c("https://maps.wikimedia.org/osm-intl/{z}/{x}/{y}{r}.png"
,paste0("Wikimedia | ",osmCr))
s$ArcticConnect <- c("https://{abc}.tiles.arcticconnect.ca/osm_{l}/{z}/{x}/{y}.png"
,paste0("Map \uA9 ArcticConnect. Data ",osmCr))
s$ArcticSDI <- c(paste0("http://basemap.arctic-sdi.org/mapcache?"
,"&service=WMS"
,"&request=GetMap"
,"&layers=arctic_cascading"
,"&styles="
,"&format=image/jpeg"
,"&transparent=true"
,"&version=1.1.1"
,"&width=256&height=256"
,"&srs=EPSG:{l}"
,"&bbox={minx},{miny},{maxx},{maxy}"
)
,paste0("ArcticSDI")
,"jpg")
# s$polarmap <- c("https://{abc}.tiles.arcticconnect.ca/osm_{l}/{z}/{x}/{y}.png"
# ,paste0("Map \uA9 ArcticConnect. Data ",osmCr))
s$polarmap <- if (T) s$ArcticSDI else s$ArcticConnect
s$google.h <- c(paste0("https://mt{0123}.google.com/vt/lyrs=h" ## roads only
,"&x={x}&y={y}&z={z}&hl=",language),googleCr)
s$google <- c(paste0("https://mt{0123}.google.com/vt/lyrs=m" ## standard roadmap
,"&x={x}&y={y}&z={z}&hl=",language),googleCr)
s$google.ru <- c(paste0("https://mt{0123}.google.com/vt/lyrs=m" ## standard roadmap
,"&x={x}&y={y}&z={z}&hl=","ru"),googleCr)
s$google.m <- c(paste0("https://mt{0123}.google.com/vt/lyrs=m" ## standard roadmap
,"&x={x}&y={y}&z={z}&hl=",language),googleCr)
s$google.r <- c(paste0("https://mt{0123}.google.com/vt/lyrs=r" ## somehow altered roadmap
,"&x={x}&y={y}&z={z}&hl=",language),googleCr)
s$google.s <- c(paste0("https://mt{0123}.google.com/vt/lyrs=s" ## satellite only
,"&x={x}&y={y}&z={z}&hl=",language),googleCr)
s$google.y <- c(paste0("https://mt{0123}.google.com/vt/lyrs=y" ## hybrid
,"&x={x}&y={y}&z={z}&hl=",language),googleCr)
s$google.t <- c(paste0("https://mt{0123}.google.com/vt/lyrs=t" ## terrain only
,"&x={x}&y={y}&z={z}&hl=",language),googleCr)
s$google.p <- c(paste0("https://mt{0123}.google.com/vt/lyrs=p" ## terrain
,"&x={x}&y={y}&z={z}&hl=",language),googleCr)
s$'Yandex' <- c(paste0("https://vec0{1234}.maps.yandex.net/tiles?l=map"
,"&x={x}&y={y}&z={z}&scale={r}&lang="
,switch(language,ru="ru_RU","en_US")),yandexCr)
s$'Yandex.Map' <- c(paste0("https://vec0{1234}.maps.yandex.net/tiles?l=map"
,"&x={x}&y={y}&z={z}&scale={r}&lang="
,switch(language,ru="ru_RU","en_US")),yandexCr)
s$'Yandex.Satellite' <- c(paste0("https://vec0{1234}.maps.yandex.net/tiles?l=sat"
,"&x={x}&y={y}&z={z}&scale={r}&lang="
,switch(language,ru="ru_RU","en_US")),yandexCr)
# '\u044f\u043d\u0434\u0435\u043a\u0441'
s$'Yandex.ru' <- c(paste0("https://vec0{1234}.maps.yandex.net/"
,"tiles?l=map&x={x}&y={y}&z={z}&scale={r}&lang=ru_RU"),yandexCr)
s$mapy <- c("https://mapserver.mapy.cz/base-m/{r}/{z}-{x}-{y}","mapy.cz")
s$'internal.Stadia.AlidateSmooth' <- c(paste0("https://tiles.stadiamaps.com/tiles/alidade_smooth"
,"/{z}/{x}/{y}{r}.png","?api_key=",StadiaKey)
,StadiaCr)
s$'internal.Stadia.AlidateSmoothDark' <- c(paste0("https://tiles.stadiamaps.com/tiles/alidade_smooth_dark"
,"/{z}/{x}/{y}{r}.png","?api_key=",StadiaKey)
,StadiaCr)
s$'internal.zzzStadia.OSMBright' <- c(paste0("https://tiles.stadiamaps.com/tiles/osm_bright"
,"/{z}/{x}/{y}{r}.png","?api_key=",StadiaKey)
,StadiaCr)
s$'internal.Stadia.Outdoors' <- c(paste0("https://tiles.stadiamaps.com/tiles/outdoors"
,"/{z}/{x}/{y}{r}.png","?api_key=",StadiaKey)
,StadiaCr)
s$'Stamen.Terrain' <- c(paste0("https://stamen-tiles-{abcd}.a.ssl.fastly.net/terrain"
,"/{z}/{x}/{y}{r}.png")
,StamenCr)
s$'Stamen.TonerLite' <- c(paste0("https://stamen-tiles-{abcd}.a.ssl.fastly.net/toner-lite"
,"/{z}/{x}/{y}{r}.png")
,StamenCr)
s$'rumap' <- c("https://{abcd}tilecart.kosmosnimki.ru/rw/{z}/{x}/{y}.png"
,"\u0420\u435\u043b\u044c\u0435\u0444 \u0420\u0443\u043c\u0430\u043f Scanex")
s$'wikimapia' <- c("https://{s}.wikimapia.org/?x={x}&y={y}&zoom={z}&type=map&lng=1"
,"Wikimapia CC-BY-SA")
# http://a.maps.owm.io/map/precipitation_new/6/37/19?appid=b1b15e88fa797225412429c1c50c122a1
if (!sum(nchar(server))) {
val1 <- .grep(".*zzz(google|yandex).*",names(s),value=TRUE,invert=TRUE)
if ((providers)&&(requireNamespace("leaflet",quietly=.isPackageInUse()))&&
(requireNamespace("leaflet.providers",quietly=.isPackageInUse()))) {
cname <- file.path(.ursaCacheDir(),"leaflet_providers.rds")
if (!file.exists(cname)) {
val2 <- try(leaflet.providers::get_providers())
if (!inherits(val2,"try-error"))
saveRDS(val2,cname)
}
if (file.exists(cname)) {
val2 <- readRDS(cname)$providers
val1 <- unique(c(val1,val2))
}
}
# print(.grep(".*zzz(google|yandex).*",names(s),value=TRUE,invert=TRUE))
return(val1)
}
if (!(server[1] %in% names(s))) {
for (i in seq_along(s)) {
if (.lgrep("http",server))
ind <- 0L
else
ind <- .lgrep(server[1],s[[i]])
if (ind>0)
break
}
if (!ind) {
if (TRUE)
style <- server
else {
ret <- names(s)
# attr(ret,"copyright") <- copyright
return(ret)
}
}
else
style <- s[[ind[1]]]
}
else
style <- s[[server]]
if ((.lgrep("HERE",server))&&(is.null(optHERE)))
message("'options(HEREapp=list(id=<app_id>,code=<app_code>))' is required")
if ((.lgrep("^TF\\.",server))&&(is.null(TFkey)))
message("'options(ThunderforestApiKey=<api_key>)' is required")
if ((.lgrep("^Bing\\.",server))&&(is.null(BingKey)))
message("'options(BingMapsKey=<api_key>)' is required")
if ((.lgrep("mapsurfer",server))&&(is.null(mapsurferKey)))
message("'options(openrouteserviceToken=<api_key>)' is required")
if ((.lgrep("^Stadia\\.",server))&&(is.null(StadiaKey)))
message("'options(Stadiamaps_api_key=<api_key>)' is required")
# if (length(server)==1)
# style <- unlist(strsplit(server,split="\\s+"))
tile <- list(name="custom",url="",copyright=" ",fileext="___")
if (server[1] %in% names(s))
tile$name <- server
indUrl <- .grep("^http(s)*://",style)
if (!length(indUrl)) {
patt <- dirname(style)
indUrl <- which(patt!=".")
if (!length(indUrl)) {
patt <- file.path(getOption("SAS_Planet_cache"),style)
indUrl <- which(dir.exists(patt))
if (length(indUrl)==1)
style[indUrl] <- patt[indUrl]
}
else
if (!dir.exists(patt[indUrl]))
indUrl <- integer()
if (length(style)==1)
style <- c(style,"from SAS Planet cache")
}
if (!length(indUrl))
return(names(s))
indUrl <- indUrl[1]
indExt <- .grep("(png|jpg|jpeg)",style)
indCite <- seq(style)[-unique(c(indUrl,indExt))]
if (!length(indCite)) {
opW <- options(warn=1)
warning("Cannot identify citation/copyright/attribution for tile service")
options(opW)
}
tile$url <- style[indUrl]
if (length(indExt)>1)
pattExt <- paste0(".",style[indExt[indExt!=indUrl]])
else if ((length(indExt)==1)&&(indExt==indUrl))
pattExt <- style[indExt]
else
pattExt <- paste0(".",style[indExt])
if (.lgrep("(\\.|image/)(jpg|jpeg)",pattExt))
tile$fileext <- "jpg"
else if (.lgrep("(\\.|image/)png",pattExt))
tile$fileext <- "png"
else {
# cat(paste("Unable to detect either 'png' or 'jpg' format in url:"
# ,tile$url,"\n"))
# stop()
}
if (length(indCite))
tile$copyright <- style[indCite]
if (tile$name=="custom") {
if (.is.wms(tile$url)) {
wurl <- .grep("^(request=|service=WMS)",tile$url,value=TRUE,invert=TRUE)
tile$url <- paste0(paste(wurl,collapse="&")
,"&width=256&height=256"
,"&service=WMS&request=GetMap")
}
attr(tile$url,"credentials") <- attr(style,"credentials")
}
# print(tile);q()
tile
}
'.tileGet' <- function(z=4,x=10,y=3,minx=-2e7,miny=-2e7,maxx=2e7,maxy=2e7
,w=256,h=256,retina=NA,url,fileext,ursa=FALSE,cache=TRUE
,verbose=FALSE) {
if (is.na(retina))
retina <- getOption("ursaRetina")
if (isFALSE(is.numeric(retina)))
retina <- 1
isRetina <- retina>1
tile <- .gsub("{z}",z,.gsub("{y}",y,.gsub("{x}",x,url)))
tile <- .gsub("{h}",h,.gsub("{w}",w,tile))
tile <- .gsub("{maxy}",maxy,.gsub("{maxx}",maxx
,.gsub("{miny}",miny,.gsub("{minx}",minx,tile))))
if (.lgrep("maps.+yandex",tile)>0)
tile <- .gsub("{r}",ifelse(isRetina,"2","1"),tile)
else if (.lgrep("mapy\\.cz",tile)>0)
tile <- .gsub("{r}",ifelse(isRetina,"retina",""),tile)
else
tile <- .gsub("{r}",ifelse(isRetina,"@2x",""),tile)
if (.lgrep("{q}",tile)) {
b1 <- b2 <- rep(0,z)
for (i in seq(z)) {
b1[i] <- x%%2
b2[i] <- y%%2
x <- x%/%2
y <- y%/%2
}
b5 <- apply(matrix(c(matrix(rbind(rev(b2),rev(b1)),ncol=2)),nrow=2)
,2,function(x) strtoi(paste(x,collapse=""),base=2L))
tile <- .gsub("{q}",paste(b5,collapse=""),tile)
}
if (grepl("\\{s\\}\\.wikimapia\\.org",tile)) {
s <- paste0("i",(x %% 4)+(y %% 4)*4L)
tile <- gsub("\\{s\\}",s,tile)
}
if ((!FALSE)&&(.lgrep("\\{..+}",tile))) {
dom <- unlist(strsplit(.gsub2("\\{(.+)\\}","\\1",gsub("\\{.\\}","",tile)),""))
##~ print(tile)
##~ print(dom)
tile <- .gsub("{.+}",sample(dom,1),tile)
}
# fname <- tempfile(fileext=".tile")
if (!.isPackageInUse()) {
tile2prn <- tile
attr(tile2prn,"credentials") <- NULL
print(tile2prn)
}
if ((dir.exists(tile))&&(requireNamespace("RSQLite",quietly=!.isPackageInUse()))) {
## try SAS Planet
list1 <- dir(path=file.path(tile,paste0("z",z+1L)),pattern="\\.sqlitedb$"
,recursive=TRUE,full.names=TRUE)
if (!length(list1))
return("missed in cache")
for (i in seq_along(list1)) {
mydb <- RSQLite::dbConnect(RSQLite::SQLite(),list1[i])
a <- RSQLite::dbGetQuery(mydb
,paste0("SELECT * FROM t where t.x=",x," and ","t.y=",y))
RSQLite::dbDisconnect(mydb)
if (nrow(a))
break
}
if (!nrow(a))
return("missed in cache")
fname <- a$b[[1]]
d <- rawToChar(fname[1:8],multiple=TRUE) |> paste(collapse="")# |> as.character()
Encoding(d) <- "latin1" # |> print()
if (grepl("PNG",d))
fileext <- "png"
else if (grepl("JFIF",d))
fileext <- "jpg"
else
fileext <- "jpg"
}
else if (file.exists(tile)) {
fname <- tile
}
else
fname <- .ursaCacheDownload(tile,mode="wb",cache=cache,quiet=!verbose)
if (inherits(fname,"try-error")) {
return(fname)
# message(a)
# stop()
}
# message(tile)
# download.file(tile,fname,method="curl",mode="wb",quiet=FALSE
# ,extra="-H Accept-Language:de")
isPNG <- FALSE
isJPEG <- FALSE
isGIF <- FALSE
if (isPNG <- fileext %in% c("png"))
a <- try(255*png::readPNG(fname),silent=!verbose)
else if (isJPEG <- fileext %in% c("jpg","jpeg")) {
if (!requireNamespace("jpeg",quietly=.isPackageInUse()))
stop("Suggested package 'jpeg' missed, but is required here.")
a <- try(255*jpeg::readJPEG(fname),silent=!verbose)
}
else {
a <- try(255*png::readPNG(fname),silent=!verbose)
if (inherits(a,"try-error")) {
a <- try(255*jpeg::readJPEG(fname),silent=!verbose)
isJPEG <- !inherits(a,"try-error")
if (inherits(a,"try-error")) {
print("Cannot read either 'png' or 'jpg/jpeg' file.")
}
}
else
isPNG <- !inherits(a,"try-error")
}
if (inherits(a,"try-error")) {
if (!FALSE) { ## erroneous file extension
isPNG <- FALSE
isJPEG <- FALSE
a <- try(255*png::readPNG(fname),silent=!verbose)
if (inherits(a,"try-error"))
a <- try(255*jpeg::readJPEG(fname),silent=!verbose)
if (inherits(a,"try-error")) {
# if (requireNamespace("miss_caTools",quietly=.isPackageInUse())) {
# stop("caTools")
# }
g0 <- session_grid()
a <- read_gdal(fname)
session_grid(g0)
if (inherits(a,"try-error"))
cat(geterrmessage())
if (ursa_blank(a,NA))
ursa_value(a) <- 0
a <- as.array(a)
}
}
else
cat(geterrmessage())
return(a)
}
# file.remove(fname)
dima <- dim(a)
dimb <- c(h,w)*ifelse(isRetina,2,1)
reduce <- (TRUE)&&((dima[1]!=dimb[1])||(dima[2]!=dimb[2]))
# print(dima)
# print(dimb)
if (reduce) {
mul <- mean(dima[1:2]/dimb[1:2])
# print(mul)
# .elapsedTime("firstrun 0205a")
a <- as.array(regrid(as.ursa(a)
,res=c(dima[1]/dimb[1],dima[2]/dimb[2])
,resample=ifelse(mul>1,1,0.75)
,cover=1e-6,verbose=0L))
dima <- dim(a)
if (isPNG)
png::writePNG(a/256,fname)
else if (isJPEG)
jpeg::writeJPEG(a/256,fname)
else
stop("unable to update file")
a <- .round(a)
# .elapsedTime("firstrun 0205b")
}
a <- as.integer(c(a))
dim(a) <- dima
if (!ursa)
return(a)
epsg3857 <- paste("+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0"
,"+lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m"
,"+nadgrids=@null +wktext +no_defs")
n <- 2^z
lon <- (x+c(0,1))/n*360-180
lat <- atan(sinh(pi*(1-2*(y+c(0,1))/n)))*180/pi
xy <- .project(cbind(lon,rev(lat)),epsg3857)
dima <- dim(a)
g1 <- regrid(ursa_grid(),setbound=c(xy)[c(1,3,2,4)]
,columns=dima[2],rows=dima[1],crs=epsg3857)
b <- as.integer(255/255*as.ursa(a,aperm=TRUE,flip=TRUE))
ursa(b,"grid") <- g1
attr(b,"copyright") <- "For personal use only"
# session_grid(b)
# display(b,scale=1,coast=FALSE)
b
}
|
/scratch/gouwar.j/cran-all/cranData/ursa/R/yyy.tile.R
|
'.elapsedTime' <- function(message="",reset=FALSE,toPrint=FALSE)
{
startTime <- getOption("ursaTimeStart")
deltaTime <- getOption("ursaTimeDelta")
if (message=="")
message <- paste(as.character(Sys.time()),"***")
else
message <- paste(message,":",sep="")
mytext <- sprintf("*** %s: %s %.2f(%.2f) seconds ***"
# ,as.character(Sys.time())
,.argv0()
,message,(proc.time()-startTime)[3]
,(proc.time()-deltaTime)[3])
if (reset)
options(ursaTimeStart=proc.time())
options(ursaTimeDelta=proc.time())
if (toPrint)
print(mytext)
return (message(mytext))
}
'.round' <- function(x,digits=0,eps=.Machine$double.eps*9)
{
round(x+sign(x)*eps,digits=digits)
}
'.try' <- function(...)
{
a <- try(...)
# if ((is.character(a))&&(class(a)=="try-error"))
if (inherits(a,"try-error"))
return(FALSE)
return(TRUE)
}
'.dir' <- function(pattern=NULL,path=".",all.files=FALSE,full.names=FALSE
,recursive=FALSE,ignore.case=TRUE,include.dirs=FALSE)
{
a <- dir(path=path,pattern=NULL,all.files=all.files,full.names=full.names
,recursive=recursive,ignore.case=ignore.case,include.dirs=include.dirs)
if (is.null(pattern))
return (a)
if (!.try(b <- basename(a)))
{
b <- a
for (i in seq(along=a))
if (!.try(b[i] <- basename(a[i])))
b[i] <- NA
}
a[grep(pattern,b,perl=TRUE,ignore.case=ignore.case)]
}
'.grep' <- function(pattern,x,ignore.case=TRUE,perl=TRUE
,value=FALSE,fixed=FALSE,useBytes=FALSE,invert=FALSE)
{
grep(pattern,x,ignore.case=ignore.case,perl=perl
,value=value,fixed=fixed,useBytes=useBytes,invert=invert)
}
'.gsub' <- function(pattern,replacement,x,ignore.case=TRUE
,perl=TRUE,fixed=FALSE,useBytes=FALSE)
{
gsub(pattern,replacement,x,ignore.case=ignore.case
,perl=perl,fixed=fixed,useBytes=useBytes)
}
'.gsub2' <- function(pattern,replacement,x,ignore.case=TRUE
,perl=TRUE,fixed=FALSE,useBytes=FALSE)
{
mypattern <- sprintf("^.*%s.*$",pattern)
gsub(mypattern,replacement,x,ignore.case=ignore.case
,perl=perl,fixed=fixed,useBytes=useBytes)
}
'.lgrep' <- function(pattern,x,ignore.case=TRUE,perl=TRUE
,value=FALSE,fixed=FALSE,useBytes=FALSE,invert=FALSE)
{
length(grep(pattern,x,ignore.case=ignore.case,perl=perl
,value=value,fixed=fixed,useBytes=useBytes,invert=invert))
}
'.dirname' <- function(x)
{
a <- gregexpr("(/|\\\\)",x,ignore.case=TRUE,perl=TRUE)
.gsub("^$",".",substr(x,1,max(a[[1]]-1)))
}
'.basename' <- function(x)
{
a <- gregexpr("(/|\\\\)",x,ignore.case=TRUE,perl=TRUE)
substr(x,max(a[[1]])+1,nchar(x))
}
'.expand.grid' <- function(...,KEEP.OUT.ATTRS=FALSE,stringsAsFactors=FALSE)
expand.grid(...,KEEP.OUT.ATTRS=KEEP.OUT.ATTRS,stringsAsFactors=stringsAsFactors)
'.gc' <- function(verbose=FALSE)
{
if (verbose)
a1 <- gc()
a2 <- gc(reset=TRUE)
if (verbose)
{
print(a1)
print(a2)
}
invisible(NULL)
}
# '.paste' <- function(...,sep="",collapse=NULL) paste(...,sep=sep,collapse=collapse)
'.maketmp' <- function(n=1,ext="",prefix="")
{
if (!nchar(prefix)) {
prefix <- basename(tempfile("","."))
k <- nchar(prefix)
prefix <- substr(prefix,k-3,k)
}
tcount <- getOption("ursaTempFileCount")
if (is.null(tcount))
tcount <- 0L
list1 <- vector("character",length=n)
for (i in seq(along=list1))
{
list1[i] <- sprintf("ursa%02d_%s",tcount+i,prefix)
# list1[i] <- sprintf("tmp%s_%02d",prefix,tcount+i)
}
if (nchar(ext))
{
ext <- .gsub("^\\.","",ext)
list1 <- paste(list1,ext,sep=".")
}
options(ursaTempFileCount=tcount+n)
res <- paste0("___",list1)
if ((TRUE)||(!.isRscript()))
res <- file.path(getOption("ursaTempDir"),res)
res
}
'.args2list' <- function(args) {
isCMD <- missing(args)
if (isCMD)
args <- commandArgs(TRUE)
else {
args <- unlist(strsplit(args,split="\\s+"))
}
if (!length(args))
return(NULL)
if (FALSE)
a <- strsplit(args,"=")
else {
a <- vector("list",length(args))
for (i in seq_along(args)) {
ind <- .grep("=",unlist(strsplit(args[i],"")))
if (length(ind))
a[[i]] <- c(substr(args[i],1,ind[1]-1)
,substr(args[i],ind[1]+1,nchar(args[i])))
else
a[[i]] <- args[i]
}
}
variant <- c(1,-1)[1]
aname <- sapply(a,function(x) {if (length(x)==variant) "" else x[1]})
opE <- options(warn=-1,show.error.messages=FALSE)
for (i in seq_along(a)) {
x <- a[[i]]
n <- length(x)
y <- x[n]
if (TRUE) {
# if (n>1) ## only if named?
if (y=="NULL")
y <- NULL
else if (.try(z <- eval(parse(text=y)))) {
if (!is.null(z))
y <- z
}
if (n==-variant) {
if (!length(grep("(\\s|\\.)",y))) {
y <- length(grep("^[-!]\\S",y))==0
if (!y)
aname[i] <- gsub("^[-!]","",aname[i])
}
else
aname[i] <- ""
}
}
else {
z <- as.logical(y)
if (!is.na(z))
return(z)
if (.lgrep("^(-)*\\d+$",y))
return(as.integer(y))
if (.lgrep("^(-)*\\d+\\.\\d+$",y))
return(as.numeric(y))
}
a[[i]] <- y
}
options(opE)
names(a) <- aname
a
}
'.is.integer' <- function(x,tolerance=1e-11) {
if (inherits(x,c("Date","POSIXt","list")))
return(FALSE)
hasNA <- anyNA(x)
if (hasNA)
x <- x[!is.na(x)]
if (is.ursa(x))
x <- c(x$value)
else if (inherits(x,"units"))
x <- unclass(x)
else if ((is.character(x))||(is.factor(x))) {
ch <- grep("^\\s*(\\-)*\\d+(\\.\\d+)*((\\+|\\-)[eE]\\d+)*\\s*$",x,invert=TRUE)
if (length(ch))
return(FALSE)
}
if (any(abs(x)>1e9))
return(FALSE)
y <- abs(x-round(x)) ## ++ 20180531
# y <- abs(x-as.integer(round(x))) ## -- 20180531
if (all(x>100)) {
y <- y/x
}
res <- all(y<tolerance)
res
}
'.is.rgb' <- function(obj) {
if (.is.colortable(obj))
return(FALSE)
if (storage.mode(obj$value)!="integer")
return(FALSE)
if (!(nband(obj) %in% c(3,4)))
return(FALSE)
minv <- min(obj$value,na.rm=TRUE)
maxv <- max(obj$value,na.rm=TRUE)
if ((minv>=0)&&(maxv>=200)&&(maxv<=255))
return(TRUE)
FALSE
}
'.ursaOptions' <- function() {
op <- options()
op <- op[.grep("^ursa(Png|[A-Z]).+",names(op))]
indPng <- .grep("^ursaPng.+",names(op))
if (length(indPng))
return(str(op[indPng]))
str(op)
}
'.skipPlot' <- function(onPanel=TRUE) {
toPlot <- getOption("ursaPngPlot")
if ((!is.logical(toPlot))||(!toPlot))
return(TRUE)
if (!onPanel)
return(FALSE)
getOption("ursaPngSkip")
}
'.dist2' <- function(src,dst,summarize=!FALSE,positive=FALSE,spherical=NA
,verbose=!.isPackageInUse())
{
if (identical(src,dst))
positive <- TRUE
'.modal2' <- function(x,...) {
if (length(x)==1)
return(x)
z <- density(x,...)
y <- z$x[match(max(z$y),z$y)]
y
}
'.modal3' <- function(x) {
if (length(x)==1)
return(x)
res <- NA
## 'locfit' is not in 'suggests', 'depends'
if (requireNamespace("locfit",quietly=.isPackageInUse()))
try(res <- x[which.max(predict(locfit::locfit(~x),newdata=x))])
res
}
isLonLat <- .lgrep("(\\+proj=longlat|epsg:4326)",spatial_crs(src))>0
isUrsa <- FALSE
if ((is_spatial(src))&&((is_spatial(dst)))) {
if (!identical(spatial_crs(src),spatial_crs(dst)))
dst <- spatial_transform(dst,src)
}
if (is_spatial(src)) {
src <- spatial_coordinates(src)
if (is.list(src)) {
while(all(sapply(src,is.list)))
dst <- unlist(src,recursive=FALSE)
src <- do.call(rbind,src)
}
}
else if (is_ursa(src)) {
crsS <- ursa_crs(src)
src <- as.data.frame(src,na.rm=T)[,1:2]
isUrsa <- TRUE
}
else if ((is.null(dim(src)))&&(length(src)==2)) {
src <- rbind(src)
}
if (is_spatial(dst)) {
if (isUrsa) {
crsD <- spatial_crs(dst)
if (!identical(crsS,crsD))
dst <- spatial_transform(dst,crsS)
}
##~ dst <- switch(spatial_geotype(dst)
##~ ,POINT=spatial_coordinates(dst)
##~ ,stop("'dst': unimplemented for ",spatial_geotype(dst)))
dst <- spatial_coordinates(dst)
if (is.list(dst)) {
while(all(sapply(dst,is.list)))
dst <- unlist(dst,recursive=FALSE)
dst <- do.call(rbind,dst)
}
}
else if (is_ursa(dst)) {
dst <- as.data.frame(dst,na.rm=T)[,1:2]
}
else if ((is.null(dim(dst)))&&(length(dst)==2)) {
dst <- rbind(dst)
}
d1 <- dim(src)
d2 <- dim(dst)
if ((is.null(colnames(src)))&&(d1[2]>=2)) {
colnames(src) <- .maketmp(d1[2])
colnames(src)[1:2] <- c("x","y")
}
if ((is.null(colnames(dst)))&&(d2[2]>=2)) {
colnames(dst) <- .maketmp(d2[2])
colnames(dst)[1:2] <- c("x","y")
}
if ((length(d1)<2)||(d1[2]<2)||(length(d2)<2)||(d2[2]<2))
return(NULL)
if ((anyNA(dst[,"x"]))||(anyNA(dst[,"y"]))||
(anyNA(src[,"x"]))||(anyNA(src[,"y"])))
stop("NA values are not applicable")
if (is.na(spherical))
spherical <- isLonLat
b1 <- .Cursa(C_dist2dist,x1=as.numeric(dst[,"x"]),y1=as.numeric(dst[,"y"])
,x2=as.numeric(src[,"x"]),y2=as.numeric(src[,"y"])
,nxy=nrow(dst),ndf=nrow(src),positive=as.integer(positive)
,spherical=as.integer(spherical)
,verb=as.integer(verbose)
,dist=numeric(nrow(src)),ind=integer(nrow(src)))
b1 <- data.frame(ind=b1$ind+1L,dist=b1$dist)
if (summarize) {
d <- b1$dist
if (!.try(m <- .modal3(d)))
m <- NA
if (verbose)
print(c(avg=mean(d),median=median(d),mode2=.modal2(d),mode3=m))
}
if (isUrsa) {
return(as.ursa(cbind(src,b1)))
}
b1
}
'.is.eq' <- function(x,value) { ## isTRUE(all.equal(a,b)) https://stackoverflow.com/questions/9508518/why-are-these-numbers-not-equal
if (isAll <- missing(value)) {
value <- mean(x,na.omit=TRUE)
}
if (abs(value)<1e-16)
res <- abs(x-value)<1e-27
else if (abs(value)<1e-6)
res <- abs(x-value)<1e-11
else
res <- abs(x/value-1)<1e-6
if (isAll)
return(all(res))
res
}
'.is.ge' <- function(x,value) x>value | .is.eq(x,value)
'.is.le' <- function(x,value) x<value | .is.eq(x,value)
'.is.gt' <- function(x,value) x>value
'.is.lt' <- function(x,value) x<value
'.is.near' <- function(x1,x2,verbose=FALSE) {
# https://stackoverflow.com/questions/9508518/why-are-these-numbers-not-equal
m1 <- match(x1,x2)
if (all(!is.na(m1))) { ## 20161222 add 'all', removed 'any'
if (verbose)
message(".is.near: exact matching")
return(m1)
}
n1 <- length(x1)
n2 <- length(x2)
b1 <- .Cursa(C_isNear,x1=as.numeric(x1),x2=as.numeric(x2),n1=n1,n2=n2
,res=integer(n1),NAOK=FALSE)$res
b1[b1==0] <- NA
if (verbose)
message(".is.near: fuzzy matching")
b1
}
'.getMajorSemiAxis' <- function(proj4) {
ell <- .gsub(".*\\+ellps=(\\S+)\\s.*","\\1",proj4)
if (ell=="WGS84")
B <- 6378137
else if (ell==proj4) {
B <- .gsub(".*\\+a=(\\S+)\\s.*","\\1",proj4)
if (B!=proj4)
B <- as.numeric(B)
else {
opW <- options(warn=-1)
warning("Supposed that this projection is not supported yet")
options(opW)
B <- 6378137
}
}
else {
opW <- options(warn=-1)
warning("Supposed that this projection is not supported yet")
options(opW)
B <- 6378137
}
B
}
'.degminsec' <- function(x,suffix=c("A","B"),unique=FALSE) {
s <- sign(x)
x <- abs(x)
y <- rep("",length(x))
x1 <- floor(x)
x2 <- floor((x-x1)*60)
x3a <- (x-x1-x2/60)*3600
x3 <- .round(x3a)
ind2 <- which(x3==60)
if (length(ind2)) {
x2[ind2] <- x2[ind2]+1
x3[ind2] <- 0
}
ind2 <- which(x2==60)
if (length(ind2)) {
x1[ind2] <- x1[ind2]+1
x2[ind2] <- 0
}
x1a <- abs(x1)
if (all(c(x2,x3)==0))
y <- sprintf("%.0f\uB0",x1a)
else if (all(x3==0))
y <- sprintf("%.0f\uB0%02.0f'",x1a,x2)
else if ((!unique)||(length(unique(x3))==length(x3)))
y <- sprintf("%.0f\uB0%02.0f'%02.0f\"",x1a,x2,x3)
else {
for (digit in seq(1,6)) {
x3b <- .round(x3a,digit)
if (length(unique(x3b))==length(x3a))
break
}
y <- sprintf(paste0("%.0f\uB0%02.0f'%0",3+digit,".",digit,"f\""),x1a,x2,x3a)
}
if (length(ind2 <- s>=0))
y[ind2] <- paste0(y[ind2],suffix[1])
if (length(ind2 <- s<0))
y[ind2] <- paste0(y[ind2],suffix[2])
if ((length(unique(y))==1)&&(length(unique(x))!=1))
return(paste0(as.character(x),"\uB0",suffix[1]))
y
}
'.isRscript' <- function() .lgrep("^(--file=|-f$|-e$|--hiddenslave$)",commandArgs(FALSE))>=1
#'.isPackageInUse.deprecated' <- function() "ursa" %in% loadedNamespaces()
'.isPackageInUse' <- function(verbose=FALSE) {
if (is.logical(piu <- getOption("ursaPackageInUse")))
return(piu)
cond2 <- "ursa" %in% loadedNamespaces()
cond3 <- !("plEnviron" %in% search())
# ret <- (cond1)&&(cond2)
ret <- (cond2)&&(cond3)
if (verbose) {
print(search())
print(loadedNamespaces())
cond1 <- "package:ursa" %in% search()
print(c(cond1=cond1,cond2=cond2,cond3=cond3,ret=ret))
}
ret
}
'.argv0path' <- function() {
arglist <- commandArgs(FALSE)
if (length(ind <- .grep("^--file=.+",arglist,ignore.case=FALSE))==1)
return(strsplit(arglist[ind],"=")[[1]][2])
if (length(ind <- .grep("^-f$",arglist,ignore.case=FALSE))==1)
return(arglist[ind+1L])
""
}
'.argv0' <- function() basename(.argv0path())
'.argv0dir' <- function() dirname(.argv0path())
'.argv0name' <- function() .gsub("^(.+)(\\.)(.+)*$","\\1",.argv0())
'.argv0ext' <- function() .gsub("^(.+)(\\.)(.+)*$","\\2\\3",.argv0())
'.argv0png' <- function() Fout <- sprintf("%s%%02d.png",.argv0name())
'.argv0pdf' <- function() paste0(.argv0name(),".pdf")
'.dQuote' <- function(ch) paste0("\"",ch,"\"")
'.sQuote' <- function(ch) paste0("'",ch,"'")
'.require' <- function(pkg,quietly=TRUE) do.call("require",list(pkg,quietly=quietly))
'.tryE' <- function(...) {
opE <- options(show.error.messages=TRUE)
ret <- try(...)
options(opE)
ret
}
'.loaded' <- function() gsub("^package:","",grep("^package:",search(),value=TRUE))
'.in.memory' <- function(obj) {
if (!is.ursa(obj))
return(NA)
!is.null(dim(obj$value))
}
'.normalizePath' <- function(path) normalizePath(path,winslash=.Platform$file.sep,mustWork=FALSE)
'.isKnitr' <- '.isKnit' <- function() {
# cond1 <- requireNamespace("knitr",quietly=.isPackageInUse())
# if (!cond1)
# return(FALSE)
# is.character(knitr::current_input())
if (.isShiny())
return(TRUE)
ret <- ("knitr" %in% loadedNamespaces())&&(is.character(knitr::current_input()))
if (ret)
comment(ret) <- rmarkdown::all_output_formats(knitr::current_input())
ret
}
'.isJupyter' <- function() {
"jupyter:irkernel" %in% search()
# "IRkernel" %in% loadedNamespaces()
}
'.isReveal' <- function() {
res <- knitr::opts_knit$get("rmarkdown.pandoc.to")
if (!is.character(res))
return(FALSE)
res=="revealjs"
}
'.isRemark' <- function() {
if (!all(c("knitr","rmarkdown") %in% loadedNamespaces()))
return(FALSE)
oname <- names(rmarkdown::metadata$output)
if (is.null(oname))
return(FALSE)
grepl("moon.*reader",oname[1])
# length(grep("moon.*reader"
# ,rmarkdown::all_output_formats(knitr::current_input())[1]))>0
}
'.isDashboard' <- function() {
if (!all(c("knitr","rmarkdown") %in% loadedNamespaces()))
return(FALSE)
oname <- names(rmarkdown::metadata$output)
if (is.null(oname))
return(FALSE)
grepl("flex.*dashboard",oname[1])
# length(grep("flex.*dashboard"
# ,rmarkdown::all_output_formats(knitr::current_input())[1]))>0
}
'.isPaged' <- function() {
if (!all(c("knitr","rmarkdown") %in% loadedNamespaces()))
return(FALSE)
oname <- names(rmarkdown::metadata$output)
if (is.null(oname))
return(FALSE)
grepl("(thesis|html).*paged",oname[1])
# length(grep("(thesis|html).*paged"
# ,rmarkdown::all_output_formats(knitr::current_input())[1]))>0
}
'.isVignette' <- function() {
if (!all(c("knitr","rmarkdown") %in% loadedNamespaces()))
return(FALSE)
oname <- names(rmarkdown::metadata$output)
if (is.null(oname))
return(FALSE)
grepl("(vignette|html_document)",oname[1])
# length(grep("(vignette|html_document)"
# ,rmarkdown::all_output_formats(knitr::current_input())[1]))>0
}
'.isShiny' <- function() {
(("shiny" %in% loadedNamespaces())&&(length(shiny::shinyOptions())>0))
}
'.open.canceled' <- function(...) {
arglist <- lapply(list(...), function(x) {
if (!file.exists(x)) {
if (.lgrep("\\%(\\d)*d",x))
x <- sprintf(x,1L)
else
x <- NULL
}
x
})
ret <- system2("R",c("CMD","open",arglist))
# browseURL("R",c("CMD","open",arglist))
if (length(ret)==1)
ret <- ret[[1]]
invisible(ret)
}
'.isSF' <- function(obj) inherits(obj,c("sf","sfc"))
'.isSP' <- function(obj) {
((inherits(obj,"Spatial"))||
(.lgrep("Spatial(Points|Lines|Polygons)DataFrame",class(obj))))
}
'.is.numeric' <- function(obj) {
opW <- options(warn=-1)
res <- as.numeric(na.omit(obj))
options(opW)
!anyNA(res)
}
'.is.equal.crs' <- function(obj1,obj2=NULL) {
oprj <- spatial_crs(obj1)
sprj <- if (is.null(obj2)) session_crs() else spatial_crs(obj2)
if (nchar(sprj)<3)
return(FALSE)
oprj2 <- .gsub("\\+wktext\\s","",oprj)
sprj2 <- .gsub("\\+wktext\\s","",sprj)
oprj2 <- .gsub("(^\\s|\\s$)","",oprj2)
sprj2 <- .gsub("(^\\s|\\s$)","",sprj2)
ret <- identical(oprj2,sprj2)
ret
}
'.sample' <- function(x,n) {
if (length(x)<=1)
return(x)
if (missing(n))
return(sample(x))
if (n>=length(x))
return(sample(x))
sample(x,n)
}
'.system2.patch' <- function(...) {
## in 3.5.0 failure for 'interactive()' & 'system2(...,wait=TRUE)'
if (FALSE) #(!interactive())
return(system2(...))
arglist <- list(...)
str(arglist)
aname <- names(arglist)
if (is.null(aname))
return(system2(...))
na <- length(arglist)
str(aname)
# a <- which(sapply(aname,function(x)
# !inherits(try(match.arg(x,"stdout")),"try-error")))
ind <- which(!nchar(aname))
cmd1 <- unname(unlist(arglist[ind]))
print(cmd1)
ind <- seq(na)[-ind]
str(ind)
isCon <- FALSE
arg1 <- list(command=NULL)
for (a in aname[ind]) {
print(a)
ind2 <- try(match.arg(a,"args"))
if (!inherits(ind2,"try-error")) {
cmd1 <- c(cmd1,arglist[[a]])
next
}
if (!isCon) {
ind2 <- try(match.arg(a,"stdout"))
if (!inherits(ind2,"try-error")) {
cmd1 <- c(cmd1,"1>",arglist[[a]])
arg1$show.output.on.console <- FALSE
isCon <- TRUE
next
}
}
if (!isCon) {
ind2 <- try(match.arg(a,"stderr"))
if (!inherits(ind2,"try-error")) {
cmd1 <- c(cmd1,"2>",arglist[[a]])
isCon <- TRUE
next
}
}
ind2 <- try(match.arg(a,"wait"))
if (!inherits(ind2,"try-error")) {
arg1$wait <- arglist[[a]]
next
}
# print(ind2)
}
arg1$command <- paste(cmd1,collapse=" ")
cat("--------\n")
str(cmd1)
str(arg1)
# return(do.call("system",arg1))
NULL
}
'.origin' <- function () {
t3 <- Sys.time()
as.Date(as.POSIXlt(t3-as.numeric(t3),tz="UTC")) ## "1970-01-01"
}
'.evaluate' <- function(arglist,ref,verbose=F & .isPackageInUse()) {
if (F & !.isPackageInUse())
return(arglist)
verbal <- paste0("args evaluating (",paste(ref,collapse=", "),") --")
if (verbose)
.elapsedTime(paste(verbal,"started"))
argname <- character()
for (fun in ref)
argname <- c(argname,names(as.list(do.call("args",list(fun)))))
argname <- unique(argname)
rname <- names(arglist)
depth <- 1L+.isKnitr()
# print(c('as.character(arglist[[1]])'=as.character(arglist[[1]])))
# print(c(isPackageInUse=.isPackageInUse()))
# print(c('arglist[[1]]'=arglist[[1]]))
# try(print(c(a=head(names(as.list(args(arglist[[1]]))))),quote=FALSE))
# try(print(c(b=head(names(as.list(args(as.character(arglist[[1]])))))),quote=FALSE))
# try(print(c(c=head(names(as.list(args(colorize))))),quote=FALSE))
# try(print(c(d=head(names(as.list(args(ursa::colorize))))),quote=FALSE))
j <- integer()
for (i in seq_along(arglist)[-1]) {
if (rname[i]=="obj")
next
if (!is.language(arglist[[i]]))
next
if (inherits(try(match.arg(rname[i],argname),silent=TRUE),"try-error"))
next
if (isTRUE(getOption("ursaNoticeMatchCall")))
message('.evaluate: try `mget(names(match.call())[-1])` instead of `as.list(match.call())`')
res <- try(eval.parent(arglist[[i]],n=depth),silent=TRUE)
if (inherits(res,"try-error")) {
next
}
if (is.null(res))
j <- c(j,i)
else if (is.language(res)) {
res <- eval.parent(res,n=depth)
if (!is.language(res)) {
assign(rname[i],res)
arglist[[i]] <- res
}
else
stop("unable to evaluate agrument ",.sQuote(rname[i]))
}
else
arglist[[i]] <- res
}
if (length(j))
arglist <- arglist[-j]
if (verbose)
.elapsedTime(paste(verbal,"finished"))
arglist
}
'.isColor' <- function(x) !inherits(try(col2rgb(x),silent=TRUE),"try-error")
'.isWeb' <- function(grid) {
if (missing(grid))
grid <- session_grid()
crs <- ursa(grid,"crs")
v1 <- ursa(grid,"cellsize")
v2 <- 2*6378137*pi/(2^(1:21+8))
cond1 <- grepl("\\+proj=merc",crs)>0
# print(format(v2,sci=FALSE),quote=FALSE)
cond2 <- !is.na(.is.near(v1,v2))
cond1 & cond2
}
|
/scratch/gouwar.j/cran-all/cranData/ursa/R/yyy.util.R
|
'.areaIncrement' <- function(x,dist=NA,mul=1,verbose=FALSE)
{
if (!is.ursa(x))
return(NULL)
sparse <- attr(x$value,"sparse")
if ((!is.null(sparse))&&(any(na.omit(sparse)!=0)))
stop("TODO: expand compression")
if (!is.na(x$con$posZ))
{
nb <- length(x$con$posZ)
bn <- x$name[x$con$posZ]
}
else
{
nb <- x$dim[2]
bn <- x$name
}
if (any(is.na(dist)))
dist <- with(x$grid,c(resx,resy))
else if (length(dist)==1)
dist <- rep(dist,2)
else if (length(dist)!=2)
stop("unrecognized argument 'dist'")
dimx <- with(x$grid,c(columns,rows,nb))
x$value <- (.Cursa(C_areaIncrement,x=as.numeric(x$value),dim=as.integer(dimx)
,res=as.numeric(dist),out=numeric(prod(dimx))
,NAOK=TRUE)$out-1)*mul
dim(x$value) <- with(x$grid,c(columns*rows,nb))
x
}
|
/scratch/gouwar.j/cran-all/cranData/ursa/R/yyy.xtra.R
|
# utils::globalVariables("wbttools")
'.ursaToolSetDummyFunction' <- function() NULL
#try(Sys.setenv(R_PLASTER_TEMPLATE=
# file.path(chartr("\\","/",Sys.getenv("R_USER")),"template.idr")))
# try(Sys.setenv(R_PLASTER_TEMPLATE=system.file("inst","template",package="ursa")
.onLoad.blank <- function(lib, pkg) {
invisible(0L)
}
.onLoad <- function(lib, pkg) {
compiler::enableJIT(0) ## speed up if 'ByteCompile: no' in "DESCRIPTION"
# print("ursa -- .onLoad")
p <- proc.time()
options(ursaTimeStart=p,ursaTimeDelta=p) # ,ursaForceSF=TRUE
rm(p)
# session_pngviewer()
fpath <- getOption("ursaCacheDir") ## e.g., from ~/.Rprofile
if (is.null(fpath))
try(options(ursaCacheDir=tempdir()))
else
if (!file.exists(fpath))
dir.create(fpath)
## ursaCacheDir=file.path(dirname(tempdir()),"RtmpUrsaCache") ## out of CRAN policy
.ursaCacheDirClear()
session_tempdir()
# if ((FALSE)&&(interactive()))
# print(data.frame(pngviewer=session_pngviewer()
# ,tempdir=session_tempdir()
# ,row.names="session"))
# welcome2 <- .elapsedTime("ursa -- onload 1111",toPrint=FALSE)
# fpath <- file.path(chartr("\\","/",Sys.getenv("R_USER")),"template.idr")
fpath0 <- system.file("requisite",package="ursa")
fpath <- getOption("ursaRequisite") ## e.g., from ~/.Rprofile
if ((!is.null(fpath))&&(file.exists(fpath))) {
# ok <- try(Sys.setenv(R_RMAP_TEMPLATE=fpath))
ok <- try(options(ursaRequisite=fpath))
if (!inherits(ok,"try-error")) {
sapply(.dir(path=fpath0),function(x)
file.copy(file.path(fpath0,x),file.path(fpath,x)
,overwrite=FALSE,copy.date=TRUE))
# if (("plutil" %in% loadedNamespaces())&&(.isPackageInUse())) {
# NULL
# }
# spatialize <<- ursa:::spatialize
# assign("spatialize",ursa:::spatialize,envir=.GlobalEnv) ## OK
# assign("spatialize",get("spatialize"),envir=.GlobalEnv) ## OK
# assign("spatialize",get("ursa:::spatialize"),envir=.GlobalEnv) ## FAIL
return(invisible(0L))
}
}
# try(Sys.setenv(R_RMAP_TEMPLATE=fpath))
try(options(ursaRequisite=fpath0))
options(ursaNoticeMatchCall=FALSE & !.isPackageInUse())
invisible(0L)
}
.onAttach <- function(lib, pkg) { ## FAILED for 'Rscript -e "ursa::display()"'
# print("ursa -- .onAttach")
# welcome <- .elapsedTime("ursa -- attach 2222",toPrint=FALSE)
# packageStartupMessage(welcome,appendLF=FALSE)
invisible(0L)
}
.Last.hide <- function() {
message("ursa -- last")
if (!FALSE)
{
delafter <- getOption("ursaPngDelafter")
fileout <- getOption("ursaPngFileout")
if ((is.logical(delafter))&&(is.character(fileout))&&(delafter)&&(file.exists(fileout)))
{
# dev.off()
graphics.off()
if (!file.remove(fileout))
message(sprintf("'ursa' package message: Unable to remove file '%s'.",fileout))
}
}
con <- showConnections(all=!FALSE)
ind <- which(!is.na(match(con[,"class"],"file")))
if ((!FALSE)&&(length(ind)))
{
con <- con[ind,,drop=FALSE]
for (i in seq(nrow(con)))
{
con2 <- con[i,,drop=FALSE]
# close(getConnection(as.integer(rownames(con2)))) ## del
fname <- con2[,"description"]
if (length(grep("\\.unpacked(.*)\\~$",fname)))
{
close(getConnection(as.integer(rownames(con2)))) ## ins
if (!file.remove(fname))
message(sprintf("'ursa' package message: Unable to remove file '%s'."
,fname))
}
}
}
}
.noGenerics <- TRUE
.onUnload <- function(libpath) {
# message("ursa -- unload")
library.dynam.unload("ursa",libpath)
}
.onDetach <- function(libpath) {
# message("ursa -- detach")
}
|
/scratch/gouwar.j/cran-all/cranData/ursa/R/zzz.R
|
decodetable <- local({
con <- textConnection(
"\"colname\" \"colvalue\" \"description\"
\"STUSAB\" \"State/U.S. Abbreviation (USPS)\"
\"SUMLEV\" \"Summary level\"
\"SUMLEV\" \"10\" \"United States\"
\"SUMLEV\" \"20\" \"Region\"
\"SUMLEV\" \"30\" \"Division\"
\"SUMLEV\" \"40\" \"State\"
\"SUMLEV\" \"50\" \"State-County\"
\"SUMLEV\" \"60\" \"State-County-County Subdivision\"
\"SUMLEV\" \"70\" \"State-County-County Subdivision-Place/Remainder\"
\"SUMLEV\" \"160\" \"State-Place\"
\"SUMLEV\" \"155\" \"State-Place-County\"
\"SUMLEV\" \"170\" \"State-Consolidated City\"
\"SUMLEV\" \"172\" \"State-Consolidated City-Place Within Consolidated City\"
\"SUMLEV\" \"500\" \"State Congressional District\"
\"SUMLEV\" \"610\" \"State-State Legislative District (Upper Chamber)\"
\"SUMLEV\" \"620\" \"State-State Legislative District (Lower Chamber)\"
\"GEOCOMP\" \"Geographic Component\"
\"GEOCOMP\" \"0\" \"Not a geographic component\"
\"GEOCOMP\" \"1\" \"Urban\"
\"GEOCOMP\" \"4\" \"Urban--in urbanized area\"
\"GEOCOMP\" \"5\" \"Urban--in urbanized area of 5,000,000 or more population\"
\"GEOCOMP\" \"6\" \"Urban--in urbanized area of 2,500,000 to 4,999,999 population\"
\"GEOCOMP\" \"7\" \"Urban--in urbanized area of 1,000,000 to 2,499,999 population\"
\"GEOCOMP\" \"8\" \"Urban--in urbanized area of 500,000 to 999,999 population\"
\"GEOCOMP\" \"9\" \"Urban--in urbanized area of 250,000 to 499,999 population\"
\"GEOCOMP\" \"10\" \"Urban--in urbanized area of 100,000 to 249,999 population\"
\"GEOCOMP\" \"11\" \"Urban--in urbanized area of 50,000 to 99,999 population\"
\"GEOCOMP\" \"28\" \"Urban--in urban cluster\"
\"GEOCOMP\" \"29\" \"Urban--in urban cluster of 25,000 to 49,999 population\"
\"GEOCOMP\" \"30\" \"Urban--in urban cluster of 10,000 to 24,999 population\"
\"GEOCOMP\" \"31\" \"Urban--in urban cluster of 5,000 to 9,999 population\"
\"GEOCOMP\" \"32\" \"Urban--in urban cluster of 2,500 to 4,999 population\"
\"GEOCOMP\" \"43\" \"Rural\"
\"GEOCOMP\" \"44\" \"Rural-place\"
\"GEOCOMP\" \"45\" \"Rural--place of 2,500 or more population\"
\"GEOCOMP\" \"46\" \"Rural--place of 1,000 to 2,499 population\"
\"GEOCOMP\" \"47\" \"Rural--place of less than 1,000 population\"
\"GEOCOMP\" \"48\" \"Rural--not in place\"
\"GEOCOMP\" \"49\" \"Rural--farm\"
\"GEOCOMP\" \"50\" \"Urban portion of extended place\"
\"GEOCOMP\" \"51\" \"Rural portion of extended place\"
\"GEOCOMP\" \"89\" \"American Indian Reservation and Trust Land--Federal\"
\"GEOCOMP\" \"90\" \"American Indian Reservation and Trust Land--State\"
\"GEOCOMP\" \"91\" \"Oklahoma Tribal Statistical Area\"
\"GEOCOMP\" \"92\" \"Tribal Designated Statistical Area\"
\"GEOCOMP\" \"93\" \"Alaska Native Village Statistical Area\"
\"GEOCOMP\" \"94\" \"State Designated Tribal Statistical Area\"
\"GEOCOMP\" \"95\" \"Hawaiian Home Land\"
\"GEOCOMP\" \"A0\" \"In metropolitan or micropolitan statistical area\"
\"GEOCOMP\" \"A1\" \"In metropolitan or micropolitan statistical area--in principal city\"
\"GEOCOMP\" \"A2\" \"In metropolitan or micropolitan statistical area--not in principal city\"
\"GEOCOMP\" \"A3\" \"In metropolitan or micropolitan statistical area--urban\"
\"GEOCOMP\" \"A4\" \"In metropolitan or micropolitan statistical area--urban--in urbanized area\"
\"GEOCOMP\" \"A5\" \"In metropolitan or micropolitan statistical area--urban--in urban cluster\"
\"GEOCOMP\" \"A6\" \"In metropolitan or micropolitan statistical area--rural\"
\"GEOCOMP\" \"A7\" \"In metropolitan or micropolitan statistical area of 5,000,000 or more population\"
\"GEOCOMP\" \"A8\" \"In metropolitan or micropolitan statistical area of 2,500,000 to 4,999,999 population\"
\"GEOCOMP\" \"A9\" \"In metropolitan or micropolitan statistical area of 1,000,000 to 2,499,999 population\"
\"GEOCOMP\" \"AA\" \"In metropolitan or micropolitan statistical area of 500,000 to 999,999 population\"
\"GEOCOMP\" \"AB\" \"In metropolitan or micropolitan statistical area of 250,000 to 499,999 population\"
\"GEOCOMP\" \"AC\" \"In metropolitan or micropolitan statistical area of 100,000 to 249,999 population\"
\"GEOCOMP\" \"AD\" \"In metropolitan or micropolitan statistical area of 50,000 to 99,999 population\"
\"GEOCOMP\" \"AE\" \"In metropolitan or micropolitan statistical area of 25,000 to 49,999 population\"
\"GEOCOMP\" \"AF\" \"In metropolitan or micropolitan statistical area of less than 25,000 population\"
\"GEOCOMP\" \"C0\" \"In metropolitan statistical area\"
\"GEOCOMP\" \"C1\" \"In metropolitan statistical area--in principal city\"
\"GEOCOMP\" \"C2\" \"In metropolitan statistical area--not in principal city\"
\"GEOCOMP\" \"C3\" \"In metropolitan statistical area--urban\"
\"GEOCOMP\" \"C4\" \"In metropolitan statistical area--urban--in urbanized area\"
\"GEOCOMP\" \"C5\" \"In metropolitan statistical area--urban--in urban cluster\"
\"GEOCOMP\" \"C6\" \"In metropolitan statistical area--rural\"
\"GEOCOMP\" \"C7\" \"In metropolitan statistical area of 5,000,000 or more population\"
\"GEOCOMP\" \"C8\" \"In metropolitan statistical area of 2,500,000 to 4,999,999 population\"
\"GEOCOMP\" \"C9\" \"In metropolitan statistical area of 1,000,000 to 2,499,999 population\"
\"GEOCOMP\" \"CA\" \"In metropolitan statistical area of 500,000 to 999,999 population\"
\"GEOCOMP\" \"CB\" \"In metropolitan statistical area of 250,000 to 499,999 population\"
\"GEOCOMP\" \"CC\" \"In metropolitan statistical area of 100,000 to 249,999 population\"
\"GEOCOMP\" \"CD\" \"In metropolitan statistical area of less than 100,000 population\"
\"GEOCOMP\" \"CE\" \"In metropolitan statistical area of 5,000,000 or more population--in principal city\"
\"GEOCOMP\" \"CF\" \"In metropolitan statistical area of 5,000,000 or more population--not in principal city\"
\"GEOCOMP\" \"CG\" \"In metropolitan statistical area of 2,500,000 to 4,999,999 population--in principal city\"
\"GEOCOMP\" \"CH\" \"In metropolitan statistical area of 2,500,000 to 4,999,999 population--not in principal city\"
\"GEOCOMP\" \"CJ\" \"In metropolitan statistical area of 1,000,000 to 2,499,999 population--in principal city\"
\"GEOCOMP\" \"CK\" \"In metropolitan statistical area of 1,000,000 to 2,499,999 population--not in principal city\"
\"GEOCOMP\" \"CL\" \"In metropolitan statistical area of 500,000 to 999,999 population--in principal city\"
\"GEOCOMP\" \"CM\" \"In metropolitan statistical area of 500,000 to 999,999 population--not in principal city\"
\"GEOCOMP\" \"CN\" \"In metropolitan statistical area of 250,000 to 499,999 population--in principal city\"
\"GEOCOMP\" \"CP\" \"In metropolitan statistical area of 250,000 to 499,999 population--not in principal city\"
\"GEOCOMP\" \"CQ\" \"In metropolitan statistical area of 100,000 to 249,999 population--in principal city\"
\"GEOCOMP\" \"CR\" \"In metropolitan statistical area of 100,000 to 249,999 population--not in principal city\"
\"GEOCOMP\" \"CS\" \"In metropolitan statistical area of less than 100,000 population--in principal city\"
\"GEOCOMP\" \"CT\" \"In metropolitan statistical area of less than 100,000 population--not in principal city\"
\"GEOCOMP\" \"E0\" \"In micropolitan statistical area\"
\"GEOCOMP\" \"E1\" \"In micropolitan statistical area--in principal city\"
\"GEOCOMP\" \"E2\" \"In micropolitan statistical area--not in principal city\"
\"GEOCOMP\" \"E3\" \"In micropolitan statistical area--urban\"
\"GEOCOMP\" \"E4\" \"In micropolitan statistical area--urban--in urbanized area\"
\"GEOCOMP\" \"E5\" \"In micropolitan statistical area--urban--in urban cluster\"
\"GEOCOMP\" \"E6\" \"In micropolitan statistical area--rural\"
\"GEOCOMP\" \"E7\" \"In micropolitan statistical area of 100,000 or more population\"
\"GEOCOMP\" \"E8\" \"In micropolitan statistical area of 50,000 to 99,999 population\"
\"GEOCOMP\" \"E9\" \"In micropolitan statistical area of 25,000 to 49,999 population\"
\"GEOCOMP\" \"EA\" \"In micropolitan statistical area of less than 25,000 population\"
\"GEOCOMP\" \"EB\" \"In micropolitan statistical area of 100,000 or more population--in principal city\"
\"GEOCOMP\" \"EC\" \"In micropolitan statistical area of 100,000 or more population--not in principal city\"
\"GEOCOMP\" \"ED\" \"In micropolitan statistical area of 50,000 to 99,999 population--in principal city\"
\"GEOCOMP\" \"EE\" \"In micropolitan statistical area of 50,000 to 99,999 population--not in principal city\"
\"GEOCOMP\" \"EF\" \"In micropolitan statistical area of 25,000 to 49,999 population--in principal city\"
\"GEOCOMP\" \"EG\" \"In micropolitan statistical area of 25,000 to 49,999 population--not in principal city\"
\"GEOCOMP\" \"EH\" \"In micropolitan statistical area of less than 25,000 population--in principal city\"
\"GEOCOMP\" \"EJ\" \"In micropolitan statistical area of less than 25,000 population--not in principal city\"
\"GEOCOMP\" \"G0\" \"Not in metropolitan or micropolitan statistical area\"
\"GEOCOMP\" \"G1\" \"Not in metropolitan or micropolitan statistical area--urban\"
\"GEOCOMP\" \"G2\" \"Not in metropolitan or micropolitan statistical area--urban--in urbanized area\"
\"GEOCOMP\" \"G3\" \"Not in metropolitan or micropolitan statistical area--urban--in urban cluster\"
\"GEOCOMP\" \"G4\" \"Not in metropolitan or micropolitan statistical area--rural\"
\"GEOCOMP\" \"H0\" \"Not in metropolitan statistical area\"
\"GEOCOMP\" \"H1\" \"Not in metropolitan statistical area--urban\"
\"GEOCOMP\" \"H2\" \"Not in metropolitan statistical area--urban--in urbanized area\"
\"GEOCOMP\" \"H3\" \"Not in metropolitan statistical area--urban--in urban cluster\"
\"GEOCOMP\" \"H4\" \"Not in metropolitan statistical area--rural\"
\"GEOCOMP\" \"J0\" \"In combined statistical area\"
\"GEOCOMP\" \"L0\" \"Not in combined statistical area\"
\"GEOCOMP\" \"M0\" \"In New England city and town area\"
\"GEOCOMP\" \"M1\" \"In New England city and town area--in principal city\"
\"GEOCOMP\" \"M2\" \"In New England city and town area--not in principal city\"
\"GEOCOMP\" \"M3\" \"In New England city and town area--urban\"
\"GEOCOMP\" \"M4\" \"In New England city and town area--urban--in urbanized area\"
\"GEOCOMP\" \"M5\" \"In New England city and town area--urban--in urban cluster\"
\"GEOCOMP\" \"M6\" \"In New England city and town area--rural\"
\"GEOCOMP\" \"P0\" \"In combined New England city and town area\"
\"CHARITER\" \"Characteristic Iteration\"
\"CHARITER\" \"0\" \"Not a characteristic iteration\"
\"CHARITER\" \"1\" \"Total Population\"
\"REGION\" \"Region (Geographic Area Code)\"
\"REGION\" \"1\" \"Northeast\"
\"REGION\" \"2\" \"Midwest\"
\"REGION\" \"3\" \"South\"
\"REGION\" \"4\" \"West\"
\"REGION\" \"9\" \"Not in a region (Puerto Rico)\"
\"DIVISION\" \"Division (Geographic Area Code)\"
\"DIVISION\" \"0\" \"Not in a division (Puerto Rico)\"
\"DIVISION\" \"1\" \"New England\"
\"DIVISION\" \"2\" \"Middle Atlantic\"
\"DIVISION\" \"3\" \"East North Central\"
\"DIVISION\" \"4\" \"West North Central\"
\"DIVISION\" \"5\" \"South Atlantic\"
\"DIVISION\" \"6\" \"East South Central\"
\"DIVISION\" \"7\" \"West South Central\"
\"DIVISION\" \"8\" \"Mountain\"
\"DIVISION\" \"9\" \"Pacific\"
\"STATE\" \"State (FIPS)\"
\"STATE\" \"1\" \"Alabama\"
\"STATE\" \"2\" \"Alaska\"
\"STATE\" \"4\" \"Arizona\"
\"STATE\" \"5\" \"Arkansas\"
\"STATE\" \"6\" \"California\"
\"STATE\" \"8\" \"Colorado\"
\"STATE\" \"9\" \"Connecticut\"
\"STATE\" \"10\" \"Delaware\"
\"STATE\" \"11\" \"District of Columbia\"
\"STATE\" \"12\" \"Florida\"
\"STATE\" \"13\" \"Georgia\"
\"STATE\" \"15\" \"Hawaii\"
\"STATE\" \"16\" \"Idaho\"
\"STATE\" \"17\" \"Illinois\"
\"STATE\" \"18\" \"Indiana\"
\"STATE\" \"19\" \"Iowa\"
\"STATE\" \"20\" \"Kansas\"
\"STATE\" \"21\" \"Kentucky\"
\"STATE\" \"22\" \"Louisiana\"
\"STATE\" \"23\" \"Maine\"
\"STATE\" \"24\" \"Maryland\"
\"STATE\" \"25\" \"Massachusetts\"
\"STATE\" \"26\" \"Michigan\"
\"STATE\" \"27\" \"Minnesota\"
\"STATE\" \"28\" \"Mississippi\"
\"STATE\" \"29\" \"Missouri\"
\"STATE\" \"30\" \"Montana\"
\"STATE\" \"31\" \"Nebraska\"
\"STATE\" \"32\" \"Nevada\"
\"STATE\" \"33\" \"New Hampshire\"
\"STATE\" \"34\" \"New Jersey\"
\"STATE\" \"35\" \"New Mexico\"
\"STATE\" \"36\" \"New York\"
\"STATE\" \"37\" \"North Carolina\"
\"STATE\" \"38\" \"North Dakota\"
\"STATE\" \"39\" \"Ohio\"
\"STATE\" \"40\" \"Oklahoma\"
\"STATE\" \"41\" \"Oregon\"
\"STATE\" \"42\" \"Pennsylvania\"
\"STATE\" \"44\" \"Rhode Island\"
\"STATE\" \"45\" \"South Carolina\"
\"STATE\" \"46\" \"South Dakota\"
\"STATE\" \"47\" \"Tennessee\"
\"STATE\" \"48\" \"Texas\"
\"STATE\" \"49\" \"Utah\"
\"STATE\" \"50\" \"Vermont\"
\"STATE\" \"51\" \"Virginia\"
\"STATE\" \"53\" \"Washington\"
\"STATE\" \"54\" \"West Virginia\"
\"STATE\" \"55\" \"Wisconsin\"
\"STATE\" \"56\" \"Wyoming\"
\"STATE\" \"72\" \"Puerto Rico\"
\"COUNTY\" \"County (FIPS)\"
\"AREALAND\" \"Land area measurement in square meters\"
\"AREAWATR\" \"Water area measurement in square meters\""
)
res <- utils::read.table(
con,
header = TRUE,
row.names = NULL,
sep = "\t",
as.is = TRUE
)
close(con)
res
})
dfcolnames <- local({
con <- textConnection(
"\"fileid\"
\"stusab\"
\"sumlev\"
\"geocomp\"
\"chariter\"
\"cifsn\"
\"logrecno\"
\"region\"
\"division\"
\"state\"
\"county\"
\"countycc\"
\"countysc\"
\"cousub\"
\"cousubcc\"
\"cousubsc\"
\"place\"
\"placecc\"
\"placesc\"
\"tract\"
\"blkgrp\"
\"block\"
\"iuc\"
\"concit\"
\"concitcc\"
\"concitsc\"
\"aianhh\"
\"aianhhfp\"
\"aianhcc\"
\"aihhtli\"
\"aitsce\"
\"aits\"
\"aitscc\"
\"ttract\"
\"tblkgrp\"
\"anrc\"
\"anrccc\"
\"cbsa\"
\"cbsasc\"
\"metdiv\"
\"csa\"
\"necta\"
\"nectasc\"
\"nectadiv\"
\"cnecta\"
\"cbsapci\"
\"nectapci\"
\"ua\"
\"uasc\"
\"uatype\"
\"ur\"
\"cd\"
\"sldu\"
\"sldl\"
\"vtd\"
\"vtdi\"
\"reserve2\"
\"zcta5\"
\"submcd\"
\"submcdcc\"
\"sdelm\"
\"sdsec\"
\"sduni\"
\"arealand\"
\"areawatr\"
\"name\"
\"funcstat\"
\"gcuni\"
\"pop100\"
\"hu100\"
\"intplat\"
\"intptlon\"
\"lsadc\"
\"partflag\"
\"reserve3\"
\"uga\"
\"statens\"
\"countyns\"
\"cousubns\"
\"placens\"
\"concitns\"
\"aianhhns\"
\"aitsns\"
\"anrcns\"
\"submcdns\"
\"cd113\"
\"cd114\"
\"cd115\"
\"sldu2\"
\"sldu3\"
\"sldu4\"
\"sldl2\"
\"sldl3\"
\"sldl4\"
\"aianhhsc\"
\"csasc\"
\"cnectasc\"
\"memi\"
\"nmemi\"
\"puma\"
\"reserved\""
)
res <- utils::read.table(
con,
header = FALSE,
row.names = NULL,
sep = "\t",
as.is = TRUE
)
close(con)
res
})
maybetibble <- function(x) {
"create as a tibble/dataframe, if tibble is installed"
## https://stackoverflow.com/a/4090208
if (!is.null(tryCatch(loadNamespace("tibble"),
warning=function(x)NULL,
error=function(x)NULL))) {
x <- tibble::tibble(x)
}
x
}
#' (internal) Make Sure a Dataframe Exists
#'
#' ensure() returns the dataframe (or tibble), reading it in if necessary
#'
#' takes the name of the data and the suffix of the backing store for
#' the dataframe and makes sure it is in our private environment.
#'
#' @param name Name of the dataframe
#'
#' Stops in the case of an error
#'
#' @return returns the (possibly retrieved) dataframe.
ensure <- function(name) {
csvname <- paste(name, ".csv", sep="")
csvgzname <- paste(name, ".csv.gz", sep="")
## if we exist as a package, take our "private" version; else,
## '.csv', else '.csv.gz', else error
filenames <- c(tryCatch(system.file("private",
csvgzname,
package="us.census.geoheader",
mustWork=TRUE),
error=function(e) NULL),
csvname, csvgzname)
whiches <- file.exists(filenames)
stopifnot(any(whiches)) # if nothing, we're in trouble...
x <- utils::read.csv(filenames[which(whiches)[[1]]], stringsAsFactors=FALSE)
maybetibble(x)
}
#' (internal) Return the Decode Table
#'
#' @return the decode table
get.decode <- function() {
decodetable
}
#' (internal) Return the Database
#'
#' @return the database (as a tibble and/or dataframe)
get.data <- function() {
ensure("us2010sf2_101_col_usrdsc") # get the main dataset
}
#' Return text describing the meaning of a value in a column
#'
#' uscgh.2010.decode() returns a textual description of a cell
#'
#' takes as input the name of a column in the Geographic Header file
#' from the 2010 US census, and a value found in that column, and
#' attempts to return a description of the meaning of that value.
#'
#' @param colname Name of the column to be described.
#' @param colvalue Value whose description is wanted (\"""\" -- the
#' default -- to describe, generically, `colname` itself).
#' @param warnings Whether a `(colname,colvalue)` tuple that is
#' not successfully decoded should produce a warning message. (In
#' any event, the result will be NULL.)
#'
#' @return A character string describing the `colname` or
#' `(colname,colvalue)` tuple, if found. If not found, `NULL` is
#' returned (and, if `warnings` is `TRUE`, a warning is generated).
#'
#' @seealso 'RShowDoc("a-tour", package="us.census.geoheader")' for a
#' short tour of a few columns of the database
#'
#' @examples
#' uscgh.2010.decode('SUMLEV', 40)
#'
#' @export
uscgh.2010.decode <- function(colname, colvalue="", warnings=TRUE) {
"what does COLVALUE in column COLNAME mean?"
decode <- get.decode()
stopifnot(any(grepl(colname, unlist(dfcolnames), ignore.case=TRUE)))
rows <- decode[grepl(colname, decode$colname, ignore.case=TRUE),]
if (nrow(rows) == 0) {
if (warnings) {
warning(sprintf("uscgh.decode: column name \"%s\" not in decode table -- returning NULL\n", colname))
}
return(NULL)
}
if (is.null(colvalue)) {
rows <- rows[rows$colvalue=="",]
if (nrow(rows) == 0) {
## here, no warning
return(NULL)
}
stopifnot(nrow(rows) == 1) # should only be one!
}
rows <- rows[grepl(colvalue, rows$colvalue, ignore.case=TRUE),]
if (nrow(rows) == 0) {
## your guess is as good as mine
return(NULL)
}
return(rows$description[[1]])
}
#' Return the 2010 Census SF2 Geographic Header Data
#'
#' Returns the 2010 Census SF2 geographic header dataset
#'
#' `uscgh.2010.dataset` returns the SF2 geographic header data from
#' the 2010 US census
#'
#' @return a dataframe (tibble, if the tibble package is available)
#'
#' @seealso 'RShowDoc("a-tour", package="us.census.geoheader")' for a
#' short tour of a few columns of the database
#'
#' @examples
#' x <- uscgh.2010.sf2.geoheader()
#'
#' @export
uscgh.2010.sf2.geoheader <- function() {
get.data()
}
|
/scratch/gouwar.j/cran-all/cranData/us.census.geoheader/R/uscgh.R
|
#' @importFrom tibble tibble
NULL
# objects from states.R ---------------------------------------------------
#' US State and Territories
#'
#' The 50 states, District of Columbia, and Puerto Rico.
#'
#' @format A tibble with 52 rows and 8 variables:
#' \describe{
#' \item{abb}{2-letter abbreviation}
#' \item{name}{Full legal name}
#' \item{fips}{Federal Information Processing Standard Publication 5-2 code}
#' \item{region}{Census Bureau region}
#' \item{division}{Census Bureau division}
#' \item{area}{Area in square miles}
#' \item{lat}{Center latitudinal coordinate}
#' \item{long}{Center longitudinal coordinate}
#' }
"states"
#' US Territories
#'
#' The 6 non-state territories and federal district.
#'
#' @format A tibble with 7 rows and 6 variables:
#' \describe{
#' \item{abb}{2-letter abbreviation}
#' \item{name}{Full legal name}
#' \item{fips}{Federal Information Processing Standard Publication 5-2 code}
#' \item{area}{Area in square miles}
#' \item{lat}{Center latitudinal coordinate}
#' \item{long}{Center longitudinal coordinate}
#' }
"territory"
#' US State Abbreviations
#'
#' The 2-letter abbreviations for the US state names.
#'
#' @format A character vector of length 52.
#' @source \url{https://www2.census.gov/geo/docs/reference/state.txt}
"state.abb"
#' US Territory Abbreviations
#'
#' The 2-letter abbreviations for the US territory names.
#'
#' @format A character vector of length 52.
#' @source \url{https://www2.census.gov/geo/docs/reference/state.txt}
"territory.abb"
#' US State Areas
#'
#' The area in square miles of the US states.
#'
#' @format A numeric vector of length 52.
#' @source \url{https://tigerweb.geo.census.gov/tigerwebmain/Files/acs19/tigerweb_acs19_state_us.html}
"state.area"
#' US State Areas
#'
#' The area in square miles of the US territories.
#'
#' @format A numeric vector of length 52.
#' @source \url{https://tigerweb.geo.census.gov/tigerwebmain/Files/acs19/tigerweb_acs19_state_us.html}
"territory.area"
#' US State Centers
#'
#' A list with components named `x` and `y` giving the approximate geographic
#' center of each state in negative longitude and latitude.
#'
#' @format A list of length two, each element a numeric vector of length 52.
#' \describe{
#' \item{x}{Center longitudinal coordinate}
#' \item{y}{Center latitudinal coordinate}
#' }
#' @source \url{https://tigerweb.geo.census.gov/tigerwebmain/Files/acs19/tigerweb_acs19_state_us.html}
"state.center"
#' US Territory Centers
#'
#' A list with components named `x` and `y` giving the approximate geographic
#' center of each territory in negative longitude and latitude.
#'
#' @format A list of length two, each element a numeric vector of length 5.
#' \describe{
#' \item{x}{Center longitudinal coordinate}
#' \item{y}{Center latitudinal coordinate}
#' }
#' @source \url{https://tigerweb.geo.census.gov/tigerwebmain/Files/acs19/tigerweb_acs19_state_us.html}
"territory.center"
#' US State Divisions
#'
#' The Census division to which each state belongs, one of nine:
#' 1. New England
#' 2. Middle Atlantic
#' 3. East North Central
#' 4. West North Central
#' 5. South Atlantic
#' 6. East South Central
#' 7. West South Central
#' 8. Mountain
#' 9. Pacific
#'
#' @format A factor vector of length 52.
#' @source \url{https://www2.census.gov/programs-surveys/popest/geographies/2018/state-geocodes-v2018.xlsx}
"state.division"
#' US State Names
#'
#' The full names for the US states.
#'
#' @format A numeric vector of length 52.
#' @source \url{https://tigerweb.geo.census.gov/tigerwebmain/Files/acs19/tigerweb_acs19_state_us.html}
"state.name"
#' US Territory Names
#'
#' The full names for the US territories.
#'
#' @format A numeric vector of length 52.
#' @source \url{https://tigerweb.geo.census.gov/tigerwebmain/Files/acs19/tigerweb_acs19_state_us.html}
"territory.name"
#' US State Regions
#'
#' The Census region to which each state belongs, one of four:
#' 1. Northeast
#' 2. Midwest
#' 3. South
#' 4. West
#'
#' @format A factor vector of length 52.
#' @source \url{https://www2.census.gov/programs-surveys/popest/geographies/2018/state-geocodes-v2018.xlsx}
"state.region"
# objects from info.R -----------------------------------------------------
#' US State Facts
#'
#' Updated version of the [datasets::state.x77] matrix, which provides eights
#' statistics from the 1970's. This version is a modern data frame format
#' with updated (and alternative) statistics.
#'
#' @format A tibble with 52 rows and 9 variables:
#' \describe{
#' \item{name}{Full state name}
#' \item{population}{Population estimate (September 26, 2019)}
#' \item{votes}{Votes in the Electoral College (following the 2010 Census)}
#' \item{admission}{The data which the state was admitted to the union}
#' \item{income}{Per capita income (2018)}
#' \item{life_exp}{Life expectancy in years (2017-18)}
#' \item{murder}{Murder rate per 100,000 population (2018)}
#' \item{college}{Percent adult population with at least a bachelor's degree or greater (2019)}
#' \item{heat}{Mean number of degree days (temperature requires heating) per year from 1981-2010}
#' }
#' @source
#' * Population: \url{https://www2.census.gov/programs-surveys/popest/datasets/2010-2018/state/detail/SCPRC-EST2018-18+POP-RES.csv}
#' * Electoral College: \url{https://www.archives.gov/electoral-college/allocation}
#' * Income: \url{https://data.census.gov/cedsci/table?tid=ACSST1Y2018.S1903}
#' * GDP: \url{https://www.bea.gov/system/files/2019-11/qgdpstate1119.xlsx}
#' * Literacy: \url{https://nces.ed.gov/naal/estimates/StateEstimates.aspx}
#' * Life Expectancy: \url{https://web.archive.org/web/20231129160338/https://usa.mortality.org/}
#' * Murder: \url{https://ucr.fbi.gov/crime-in-the-u.s/2018/crime-in-the-u.s.-2018/tables/table-4/table-4.xls/output.xls}
#' * Education: \url{https://data.census.gov/cedsci/table?q=S1501}
#' * Temperature: \url{ftp://ftp.ncdc.noaa.gov/pub/data/normals/1981-2010/products/temperature/ann-cldd-normal.txt}
"facts"
#' US State and Territory Statistics
#'
#' A matrix version of the [facts] tibble, used to more closely align with the
#' [datasets::state.x77] matrix included with R.
#'
#' @format A tibble with 52 rows and 9 variables:
#' \describe{
#' \item{abb}{2-letter abbreviation}
#' \item{population}{Population estimate as of September 26, 2019}
#' \item{votes}{Votes in the Electoral College (following the 2010 Census)}
#' \item{income}{Per capita income (2017)}
#' \item{life_exp}{Life expectancy in years (2017-18)}
#' \item{murder}{Murder rate per 100,000 population (2018)}
#' \item{high}{Percent of population with at least a high school degree (2019)}
#' \item{bach}{Percent of population with at least a bachelor's degree (2019)}
#' \item{heat}{Mean number of "degree days" per year from 1981-2010}
#' }
"state.x19"
# objects from people.R ---------------------------------------------------
#' Synthetic Sample of US population
#'
#' A statistically representative synthetic sample of 20,000 Americans. Each
#' record is a simulated survey respondent.
#'
#' @details
#' This dataset was originally produced by the Pew Research center for their
#' paper entitled [_For Weighting Online Opt-In Samples, What Matters Most?_][1]
#' The synthetic population dataset was created to serve as a reference for
#' making online opt-in surveys more representative of the overall population.
#'
#' See [Appendix B: Synthetic population dataset][2] for a more detailed
#' description of the method for and rationale behind creating this dataset.
#'
#' In short, the dataset was created to overcome the limitations of using large,
#' federal benchmark survey datasets such as the American Community Survey (ACS)
#' or Current Population Survey (CPS). These surveys often do not contain the
#' exact questions asked in online-opt in surveys, keeping them from being used
#' for proper adjustment.
#'
#' This _synthetic_ dataset was created by combining nine separate benchmark
#' datasets. Each had a set of common demographic variables but many added
#' unique variables such as gun ownership or voter registration. The surveys
#' were combined, stratified, sampled, combined, and imputed to fill missing
#' values from each. From this large dataset, the original 20,000 surveys from
#' the ACS were kept to ensure accurate demographic distribution.
#'
#' The names were _RANDOMLY_ assigned to respondents to better simulate a
#' synthetic sample of the population. First names were taken from the
#' `babynames` dataset which contains the Social Security Administration's
#' record of baby names from 1880 to 2017 along with gender and proportion.
#' First names were proportionally randomly assigned by birth year and sex. Last
#' names were taken from the Census Bureau, who provides the 162,254 most common
#' last names in the 2010 Census, covering over 90% of the population. For a
#' given surname, the proportion of that name belonging to members of each race
#' and ethnicity is provided. The last names were proportionally randomly
#' assigned by race.
#'
#' [1]: https://www.pewresearch.org/methods/2018/01/26/for-weighting-online-opt-in-samples-what-matters-most/
#' [2]: https://www.pewresearch.org/methods/2018/01/26/appendix-b-synthetic-population-dataset/
#'
#' @format A tibble with 20,000 rows and 40 variables:
#' \describe{
#' \item{id}{Sequential unique ID}
#' \item{fname}{Random first name, see details}
#' \item{lname}{Random last name, see details}
#' \item{gender}{Biological sex}
#' \item{age}{Age capped at 85}
#' \item{race}{Race and Ethnicity}
#' \item{edu}{Educational attainment}
#' \item{div}{Census regional division}
#' \item{married}{Marital status}
#' \item{house_size}{Household size}
#' \item{children}{Has children}
#' \item{us_citizen}{Is a US citizen}
#' \item{us_born}{Was born in the Us}
#' \item{house_income}{Family income}
#' \item{emp_status}{Employment status}
#' \item{emp_sector}{Employment sector}
#' \item{hours_work}{Hours worked per week}
#' \item{hours_vary}{Hours vary week to week}
#' \item{mil}{Has served in the military}
#' \item{house_own}{Home ownership}
#' \item{metro}{Lives in metropolitan area}
#' \item{internet}{Household has internet access}
#' \item{foodstamp}{Receives food stamps}
#' \item{house_moved}{Moved in the last year}
#' \item{pub_contact}{Contacted or visited a public official}
#' \item{boycott}{}
#' \item{hood_group}{Participated in a community association}
#' \item{hood_talks}{Talked with neighbors}
#' \item{hood_trust}{Trusts neighbors}
#' \item{tablet}{Uses a tablet or e-reader}
#' \item{texting}{Uses text messaging}
#' \item{social}{Uses social media}
#' \item{volunteer}{Volunteered}
#' \item{register}{Is registered to vote}
#' \item{vote}{Voted in the 2014 midterm elections}
#' \item{party}{Political party}
#' \item{religion}{Religious (evangelical) affiliation}
#' \item{ideology}{Political ideology}
#' \item{govt}{Follows government and public affairs}
#' \item{guns}{Owns a gun}
#' }
#' @source “For Weighting Online Opt-In Samples, What Matters Most?” Pew
#' Research Center, Washington, D.C. (January 26, 2018)
#' \url{https://www.pewresearch.org/methods/2018/01/26/for-weighting-online-opt-in-samples-what-matters-most/}
"people"
# objects from zipcodes.R -------------------------------------------------
#' US ZIP Code Locations
#'
#' This tibble contains city, state, latitude, and longitude for U.S. ZIP codes
#' from the CivicSpace Database (August 2004) augmented by Daniel Coven's [web
#' site](http://federalgovernmentzipcodes.us/) (updated on January 22, 2012).
#' The data was originally contained in the
#' [`zipcode`](https://CRAN.R-project.org/package=zipcode) CRAN package, which
#' was archived on January 1, 2020.
#'
#' @format A tibble with 52 rows and 9 variables:
#' \describe{
#' \item{zip}{5 digit ZIP code or military postal code (FPO/APO)}
#' \item{city}{USPS official city name}
#' \item{state}{USPS official state, territory abbreviation code}
#' \item{latitude}{Decimal Latitude}
#' \item{longitude}{Decimal Longitude}
#' }
#' @source Daniel Coven's [web site](http://federalgovernmentzipcodes.us/) and
#' the CivicSpace US ZIP Code Database written by Schuyler Erle
#' <[email protected]>, 5 August 2004.
"zipcodes"
#' US ZIP Codes
#'
#' The United States Postal Service's 5-digit codes used to identify a
#' particular postal delivery area.
#'
#' @format A character vector of length 44336.
#' @source Daniel Coven's [web site](http://federalgovernmentzipcodes.us/) and
#' the CivicSpace US ZIP Code Database written by Schuyler Erle
#' <[email protected]>, 5 August 2004.
"zip.code"
#' US ZIP Centers
#'
#' A list with components named `x` and `y` giving the approximate geographic
#' center of each ZIP code in negative longitude and latitude.
#'
#' @format A list of length two, each element a numeric vector of length 44336.
#' \describe{
#' \item{x}{Center longitudinal coordinate}
#' \item{y}{Center latitudinal coordinate}
#' }
#' @source Daniel Coven's [web site](http://federalgovernmentzipcodes.us/) and
#' the CivicSpace US ZIP Code Database written by Schuyler Erle
#' <[email protected]>, 5 August 2004.
"zip.center"
#' US ZIP Cities
#'
#' The United States Postal Service's official names for the cities in which
#' ZIP codes are contained. This vector contains unique values, sorted
#' alphabetically; because of this, they do not line up the other vectors in the
#' way [zip.code] and [zip.center] do.
#'
#' @format A character vector of length 19108.
#' @source Daniel Coven's [web site](http://federalgovernmentzipcodes.us/) and
#' the CivicSpace US ZIP Code Database written by Schuyler Erle
#' <[email protected]>, 5 August 2004.
"city.name"
#' US Counties
#'
#' The county subdivisions of the US states and territories.
#'
#' @format A tibble with 3,232 rows and 3 variables:
#' \describe{
#' \item{fips}{Federal Information Processing Standard Publication 5-2 code}
#' \item{name}{Census county names}
#' \item{state}{USPS official state, territory abbreviation code}
#' }
#' @source \url{https://web.archive.org/web/20240106151642/https://transition.fcc.gov/oet/info/maps/census/fips/fips.txt}
"counties"
#' US County Names
#'
#' The name of distinct US counties.
#'
#' @format A character vector of length 19108.
#' @source \url{https://web.archive.org/web/20240106151642/https://transition.fcc.gov/oet/info/maps/census/fips/fips.txt}
"county.name"
|
/scratch/gouwar.j/cran-all/cranData/usa/R/data.R
|
#' Convert state identifiers
#'
#' Take a vector of state identifiers and convert to a common format.
#'
#' @param x A character vector of: state names, abbreviations, or FIPS codes.
#' @param to The format returned: "abb", "name" or "fips".
#' @examples
#' state_convert(c("AL", "Vermont", "06"))
#' @return A character vector of single format state identifiers.
#' @export
state_convert <- function(x, to = NULL) {
to <- match.arg(to, c("abb", "names", "fips"), several.ok = FALSE)
abbs <- grep("^[A-Z]{2}$", x)
full <- grep("^[a-zA-Z ]{3,}$", x)
fips <- grep("^\\d+$", x)
x[fips] <- sprintf("%02d", as.numeric(x[fips]))
match2 <- function(x, table) {
match(tolower(x), tolower(table))
}
if (to == "abb") {
x[abbs] <- usa::state.abb[match2(x[abbs], usa::state.abb)]
x[full] <- usa::state.abb[match2(x[full], usa::state.name)]
x[fips] <- usa::state.abb[match2(x[fips], usa::states$fips)]
} else if (to == "names") {
x[abbs] <- usa::state.name[match2(x[abbs], usa::state.abb)]
x[full] <- usa::state.name[match2(x[full], usa::state.name)]
x[fips] <- usa::state.name[match2(x[fips], usa::states$fips)]
} else if (to == "fips") {
x[abbs] <- usa::states$fips[match2(x[abbs], usa::state.abb)]
x[full] <- usa::states$fips[match2(x[full], usa::state.name)]
x[fips] <- usa::states$fips[match2(x[fips], usa::states$fips)]
}
return(x)
}
|
/scratch/gouwar.j/cran-all/cranData/usa/R/state-convert.R
|
#This function cleans the string
clean_str = function(str){
str = gsub("\\*", " ", str)
str = gsub("?A1\\w+", "", str)
str = gsub("?B1\\w+", "", str)
str = gsub("?C1\\w+", "", str)
str = gsub("?D1\\w+", "", str)
str = gsub("?E1\\w+", "", str)
str = gsub("?F1\\w+", "", str)
str = gsub("?G1\\w+", "", str)
str = gsub("?H1\\w+", "", str)
str = gsub("?I1\\w+", "", str)
str = gsub("?J1\\w+", "", str)
str = gsub("?K1\\w+", "", str)
str = gsub("?L1\\w+", "", str)
str = gsub("?M1\\w+", "", str)
str = gsub("?N1\\w+", "", str)
str = gsub("?O1\\w+", "", str)
str = gsub("?P1\\w+", "", str)
str = gsub("?Q1\\w+", "", str)
str = gsub("?R1\\w+", "", str)
str = gsub("?S1\\w+", "", str)
str = gsub("?T1\\w+", "", str)
str = gsub("?U1\\w+", "", str)
str = gsub("?W1\\w+", "", str)
str = gsub("?X1\\w+", "", str)
str = gsub("?Y1\\w+", "", str)
str = gsub("?Z1\\w+", "", str)
str = gsub("?V1\\w+", "", str)
str = gsub("?A2\\w+", "", str)
str = gsub("?B2\\w+", "", str)
str = gsub("?C2\\w+", "", str)
str = gsub("?D2\\w+", "", str)
str = gsub("?E2\\w+", "", str)
str = gsub("?F2\\w+", "", str)
str = gsub("?G2\\w+", "", str)
str = gsub("?H2\\w+", "", str)
str = gsub("?I2\\w+", "", str)
str = gsub("?J2\\w+", "", str)
str = gsub("?K2\\w+", "", str)
str = gsub("?L2\\w+", "", str)
str = gsub("?M2\\w+", "", str)
str = gsub("?N2\\w+", "", str)
str = gsub("?O2\\w+", "", str)
str = gsub("?P2\\w+", "", str)
str = gsub("?Q2\\w+", "", str)
str = gsub("?R2\\w+", "", str)
str = gsub("?S2\\w+", "", str)
str = gsub("?T2\\w+", "", str)
str = gsub("?U2\\w+", "", str)
str = gsub("?W2\\w+", "", str)
str = gsub("?X2\\w+", "", str)
str = gsub("?Y2\\w+", "", str)
str = gsub("?Z2\\w+", "", str)
str = gsub("?V2\\w+", "", str)
str = gsub("?A3\\w+", "", str)
str = gsub("?B3\\w+", "", str)
str = gsub("?C3\\w+", "", str)
str = gsub("?D3\\w+", "", str)
str = gsub("?E3\\w+", "", str)
str = gsub("?F3\\w+", "", str)
str = gsub("?G3\\w+", "", str)
str = gsub("?H3\\w+", "", str)
str = gsub("?I3\\w+", "", str)
str = gsub("?J3\\w+", "", str)
str = gsub("?K3\\w+", "", str)
str = gsub("?L3\\w+", "", str)
str = gsub("?M3\\w+", "", str)
str = gsub("?N3\\w+", "", str)
str = gsub("?O3\\w+", "", str)
str = gsub("?P3\\w+", "", str)
str = gsub("?Q3\\w+", "", str)
str = gsub("?R3\\w+", "", str)
str = gsub("?S3\\w+", "", str)
str = gsub("?T3\\w+", "", str)
str = gsub("?U3\\w+", "", str)
str = gsub("?W3\\w+", "", str)
str = gsub("?X3\\w+", "", str)
str = gsub("?Y3\\w+", "", str)
str = gsub("?Z3\\w+", "", str)
str = gsub("?V3\\w+", "", str)
str = gsub("?A4\\w+", "", str)
str = gsub("?B4\\w+", "", str)
str = gsub("?C4\\w+", "", str)
str = gsub("?D4\\w+", "", str)
str = gsub("?E4\\w+", "", str)
str = gsub("?F4\\w+", "", str)
str = gsub("?G4\\w+", "", str)
str = gsub("?H4\\w+", "", str)
str = gsub("?I4\\w+", "", str)
str = gsub("?J4\\w+", "", str)
str = gsub("?K4\\w+", "", str)
str = gsub("?L4\\w+", "", str)
str = gsub("?M4\\w+", "", str)
str = gsub("?N4\\w+", "", str)
str = gsub("?O4\\w+", "", str)
str = gsub("?P4\\w+", "", str)
str = gsub("?Q4\\w+", "", str)
str = gsub("?R4\\w+", "", str)
str = gsub("?S4\\w+", "", str)
str = gsub("?T4\\w+", "", str)
str = gsub("?U4\\w+", "", str)
str = gsub("?W4\\w+", "", str)
str = gsub("?X4\\w+", "", str)
str = gsub("?Y4\\w+", "", str)
str = gsub("?Z4\\w+", "", str)
str = gsub("?V4\\w+", "", str)
str = gsub("?A5\\w+", "", str)
str = gsub("?B5\\w+", "", str)
str = gsub("?C4\\w+", "", str)
str = gsub("?D5\\w+", "", str)
str = gsub("?E5\\w+", "", str)
str = gsub("?F5\\w+", "", str)
str = gsub("?G5\\w+", "", str)
str = gsub("?H5\\w+", "", str)
str = gsub("?I5\\w+", "", str)
str = gsub("?J5\\w+", "", str)
str = gsub("?K5\\w+", "", str)
str = gsub("?L5\\w+", "", str)
str = gsub("?M5\\w+", "", str)
str = gsub("?N5\\w+", "", str)
str = gsub("?O5\\w+", "", str)
str = gsub("?P5\\w+", "", str)
str = gsub("?Q5\\w+", "", str)
str = gsub("?R5\\w+", "", str)
str = gsub("?S5\\w+", "", str)
str = gsub("?T5\\w+", "", str)
str = gsub("?U5\\w+", "", str)
str = gsub("?W5\\w+", "", str)
str = gsub("?X5\\w+", "", str)
str = gsub("?Y5\\w+", "", str)
str = gsub("?Z5\\w+", "", str)
str = gsub("?V5\\w+", "", str)
str = gsub("2", "", str)
str = gsub("0", "", str)
str = gsub("1", "", str)
str = gsub("3", "", str)
str = gsub("4", "", str)
str = gsub("5", "", str)
str = gsub("6", "", str)
str = gsub("7", "", str)
str = gsub("8", "", str)
str = gsub("9", "", str)
str = gsub("/", "", str)
str = gsub(":", "", str)
str = gsub("-", "", str)
str = gsub(" ", " ", str)
str = gsub(" ", " ", str)
str = gsub("Open for Correction", "Open_for_Correction", str)
str = gsub("Open for Cancellation", "Open_for_Cancellation", str)
str = gsub("Pending Payment", "Pending_Payment", str)
str = gsub("Contact Last Name", "Contact_Last_Name", str)
str = gsub("Contact First Name", "Contact_First_Name", str)
str = gsub("Group Unpublished Works", "Group_Unpublished_Works", str)
str = gsub("Special Handling", "Special_Handling", str)
str = gsub("Error sending to CentralPrint", "Error_sending_to_CentralPrint", str)
str = gsub("Short Online Literary Works", "Short_Online_Literary_Works", str)
str = gsub("Musical works from an album", "Musical_works_from_an_album", str)
str = gsub("Sound recordings from an album", "Sound_recordings_from_an_album", str)
str = gsub("Single Serial Issue","Single_Serial_Issue", str)
str = gsub("Published Photographs","Published_Photographs", str)
str = gsub("Unpublished Photographs","Unpublished_Photographs", str)
str = gsub("Daily Newsletters","Daily_Newsletters", str)
str = gsub("Daily Newspapers","Daily_Newspapers", str)
str = stringr::str_trim(str, side = "both")
return(str)
}
#setwd("/Users/frederickliu/Desktop")
clean_data_to_excel = function(filename){
d <- readxl::read_excel(filename) #read the file
d[2] = apply(d[2], 2, clean_str) #clean the AUDIT_LOG strings
#create new vectors to store the new data
SR_NUM <- c()
AUDIT_LOG <- c()
OPERATION_DT <- c()
DIVISION <- c()
TEAM <- c()
LOGIN <- c()
OWNERSHIP_DATE <- c()
RECEIPT_DATE = c()
WAIT_ON_CUST = c()
REGISTRATION_DECISION_DATE = c()
REGISTRATION_DECISION = c()
count = 1 # to count the number of instances *important*
#Run time for this is O(n^2)
#Iterate through each row, then for each row, iterate through
# each Audit variable and then append them into the new vectors created above
for (i in 1:nrow(d[1])){
for (j in 1:length(stringr::str_split(d[[2]][i], " ")[[1]])){
SR_NUM[count] = d[[1]][i]
AUDIT_LOG[count] = stringr::str_split(d[[2]][i], " ")[[1]][j]
OPERATION_DT[count] = as.character(d[[3]][i])
DIVISION[count] = d[[4]][i]
TEAM[count] = gsub(" ", "-", d[[5]][i])
LOGIN[count] = d[[6]][i]
OWNERSHIP_DATE[count] = as.character(d[[7]][i])
RECEIPT_DATE[count] = as.character(d[[8]][i])
WAIT_ON_CUST[count] = d[[9]][i]
REGISTRATION_DECISION_DATE[count] = d[[10]][i]
REGISTRATION_DECISION[count] = d[[11]][i]
count <- count + 1
}
}
#store cleaned data to new data frame
data_cleaned <- data.frame(SR_NUM, AUDIT_LOG, OPERATION_DT, DIVISION, TEAM,
LOGIN, OWNERSHIP_DATE, RECEIPT_DATE, WAIT_ON_CUST,
REGISTRATION_DECISION_DATE, REGISTRATION_DECISION)
#export data into a new .xlsx file
openxlsx::write.xlsx(x = data_cleaned, file = "cleaned_data.xlsx", sheetName = "AuditData", append = FALSE, rowNames = FALSE)
print("******************* success! *******************")
#return(data_cleaned) #don't really need to return it... but needed for the ultimate cleaning function
}
clean_data_to_dataframe = function(filename){
d <- readxl::read_excel(filename) #read the file
d[2] = apply(d[2], 2, clean_str) #clean the AUDIT_LOG strings
#create new vectors to store the new data
SR_NUM <- c()
AUDIT_LOG <- c()
OPERATION_DT <- c()
DIVISION <- c()
TEAM <- c()
LOGIN <- c()
OWNERSHIP_DATE <- c()
RECEIPT_DATE = c()
WAIT_ON_CUST = c()
REGISTRATION_DECISION_DATE = c()
REGISTRATION_DECISION = c()
count = 1 # to count the number of instances *important*
#Run time for this is O(n^2)
#Iterate through each row, then for each row, iterate through
# each Audit variable and then append them into the new vectors created above
for (i in 1:nrow(d[1])){
for (j in 1:length(stringr::str_split(d[[2]][i], " ")[[1]])){
SR_NUM[count] = d[[1]][i]
AUDIT_LOG[count] = stringr::str_split(d[[2]][i], " ")[[1]][j]
OPERATION_DT[count] = as.character(d[[3]][i])
DIVISION[count] = d[[4]][i]
TEAM[count] = gsub(" ", "-", d[[5]][i])
LOGIN[count] = d[[6]][i]
OWNERSHIP_DATE[count] = as.character(d[[7]][i])
RECEIPT_DATE[count] = as.character(d[[8]][i])
WAIT_ON_CUST[count] = d[[9]][i]
REGISTRATION_DECISION_DATE[count] = as.character(d[[10]][i])
REGISTRATION_DECISION[count] = d[[11]][i]
count <- count + 1
}
}
#store cleaned data to new data frame
data_cleaned <- data.frame(SR_NUM, AUDIT_LOG, OPERATION_DT, DIVISION, TEAM,
LOGIN, OWNERSHIP_DATE, RECEIPT_DATE, WAIT_ON_CUST,
REGISTRATION_DECISION_DATE, REGISTRATION_DECISION)
#export data into a new .xlsx file
#openxlsx::write.xlsx(x = data_cleaned, file = "cleaned_data.xlsx", sheetName = "AuditData", append = FALSE, rowNames = FALSE)
#print("******************* success! *******************")
return(data_cleaned) #don't really need to return it... but needed for the ultimate cleaning function
}
format_from_excel <- function(filename){
#*Note* the whole process will take about 1 hour and 50 mins given the size of cleaned_data.xlsx
# the returned data frame is a large list of 196540 element
#------- My assumption on the values in the variable AUDIT_LOG -------
# all the values in AUDIT_LOG that start with "X_" or "SR_" are field values (in addition to 'Owner')
#------- My assumption on the values in the variable AUDIT_LOG -------
d <- readxl::read_excel(filename)
d <- as.data.frame(d) #read the cleaned_data.xlsx as a dataframe
FIELD = c() #create the FIELD dictionary for variables New Value/Old Value/Others
#basically what it will looks like:
#$'Owner' -> 'XXXX' 'YYYY' where 'XXXX' is the old value and 'YYYY' is the new value
value_count = 1 # count the number of New Value/Old Value/Others in each Field
other_dummy = "" # to temporarily store the field value key on each loop through
sr_stat_id_dummy = "" # to temporarily store the field value SR_STAT_ID such that its non-field values will be redistributed to it's dictionary field.
sr_stat_id_count = 1
x_sr_status_internal_dummy = "" # to temporarily store the field value X_SR_STATUS_INTERNAL such that its non-field values will be redistributed to it's dictionary field
x_sr_status_internal_count = 1
for (i in 1:nrow(d[1])){
AUDIT_LOG = d[[2]][i] #each value in variable AUDIT_LOG
firstchar = substr(AUDIT_LOG, 1, 2) #get the first two characters in the value
secnodchar = substr(AUDIT_LOG, 1, 3)
if (identical(firstchar, "X_") || identical(AUDIT_LOG, "Owner") || identical(secnodchar, "SR_") ){ #--> this could be adjusted later after deeper pattern research
#if the first two character is X_, then it is possibly a Field Value
#or if it's 'Owner' then it is also possibly a Field Value
temp_PASTED_value = paste("<", i, ">",d[[1]][i], AUDIT_LOG, d[[3]][i], d[[4]][i], d[[5]][i], d[[6]][i], d[[7]][i], d[[8]][i], d[[9]][i], d[[10]][i],d[[11]][i]) #paste the SR_NUM and the value together
#such that each key in the FIELD dictionary will be unique
if (identical(AUDIT_LOG, "SR_STAT_I") || identical(AUDIT_LOG, "SR_STAT_ID")){ #To redistribute specific values to its field dictionary value
sr_stat_id_count = 1
sr_stat_id_dummy = temp_PASTED_value
FIELD[[temp_PASTED_value]] = c()
} else if (identical(AUDIT_LOG, "X_SR_STATUS_INTERNA")){
x_sr_status_internal_count = 1
x_sr_status_internal_dummy = temp_PASTED_value
FIELD[[temp_PASTED_value]] = c()
}else {
value_count = 1 #reset the value count to 1 *important*
FIELD[[temp_PASTED_value]] = c("") #initiate the dictionary as an empty vector
#in the vector, we will be storing the possible New/Old/Others values
other_dummy = temp_PASTED_value # set the dummy key equal to the field value
}
} else {
if (identical(AUDIT_LOG, "Open") || identical(AUDIT_LOG, "Closed")){ #To redistribute specific values to its field dictionary value SR_STAT_I
FIELD[[sr_stat_id_dummy]][sr_stat_id_count] = AUDIT_LOG
sr_stat_id_count = sr_stat_id_count + 1
} else if (identical(AUDIT_LOG, "Open_for_Correction")||identical(AUDIT_LOG, "RESCANNED")||identical(AUDIT_LOG, "T")){ #To redistribute specific values to its field dictionary value X_SR_STATUS_INTERNA
FIELD[[x_sr_status_internal_dummy]][x_sr_status_internal_count] = AUDIT_LOG
x_sr_status_internal_count = x_sr_status_internal_count + 1
}
else {
FIELD[[other_dummy]][value_count] = AUDIT_LOG #if it's not those two possibilities of 'X_'
#or 'Owner' we know that the value might not be a field value. As such, we store
#it in the field value key that correspond to it...
value_count = value_count + 1 # we increment the vector counter in case if there are more
#New/Old/Others non-Field values
}
}
}
#export it to txt file:
#options(max.print=999999)
#sink("test_data_formatted.txt")
#print(FIELD)
#sink()
return(FIELD)
}
format_from_dataframe <- function(dataframedata){
#*Note* the whole process will take about 1 hour and 50 mins given the size of cleaned_data.xlsx
# the returned data frame is a large list of 196540 element
#------- My assumption on the values in the variable AUDIT_LOG -------
# all the values in AUDIT_LOG that start with "X_" or "SR_" are field values (in addition to 'Owner')
#------- My assumption on the values in the variable AUDIT_LOG -------
d <- as.data.frame(dataframedata)
FIELD = c() #create the FIELD dictionary for variables New Value/Old Value/Others
#basically what it will looks like:
#$'Owner' -> 'XXXX' 'YYYY' where 'XXXX' is the old value and 'YYYY' is the new value
value_count = 1 # count the number of New Value/Old Value/Others in each Field
other_dummy = "" # to temporarily store the field value key on each loop through
sr_stat_id_dummy = "" # to temporarily store the field value SR_STAT_ID such that its non-field values will be redistributed to it's dictionary field.
sr_stat_id_count = 1
x_sr_status_internal_dummy = "" # to temporarily store the field value X_SR_STATUS_INTERNAL such that its non-field values will be redistributed to it's dictionary field
x_sr_status_internal_count = 1
for (i in 1:nrow(d[1])){
AUDIT_LOG = d[[2]][i] #each value in variable AUDIT_LOG
firstchar = substr(AUDIT_LOG, 1, 2) #get the first two characters in the value
secnodchar = substr(AUDIT_LOG, 1, 3)
if (identical(firstchar, "X_") || identical(AUDIT_LOG, "Owner") || identical(secnodchar, "SR_") ){ #--> this could be adjusted later after deeper pattern research
#if the first two character is X_, then it is possibly a Field Value
#or if it's 'Owner' then it is also possibly a Field Value
temp_PASTED_value = paste("<", i, ">",d[[1]][i], AUDIT_LOG, d[[3]][i], d[[4]][i], d[[5]][i], d[[6]][i], d[[7]][i], d[[8]][i], d[[9]][i], d[[10]][i], d[[11]][i]) #paste the SR_NUM and the value together
#such that each key in the FIELD dictionary will be unique
if (identical(AUDIT_LOG, "SR_STAT_I") || identical(AUDIT_LOG, "SR_STAT_ID")){ #To redistribute specific values to its field dictionary value
sr_stat_id_count = 1
sr_stat_id_dummy = temp_PASTED_value
FIELD[[temp_PASTED_value]] = c()
} else if (identical(AUDIT_LOG, "X_SR_STATUS_INTERNA")){
x_sr_status_internal_count = 1
x_sr_status_internal_dummy = temp_PASTED_value
FIELD[[temp_PASTED_value]] = c()
}else {
value_count = 1 #reset the value count to 1 *important*
FIELD[[temp_PASTED_value]] = c("") #initiate the dictionary as an empty vector
#in the vector, we will be storing the possible New/Old/Others values
other_dummy = temp_PASTED_value # set the dummy key equal to the field value
}
} else {
if (identical(AUDIT_LOG, "Open") || identical(AUDIT_LOG, "Closed")){ #To redistribute specific values to its field dictionary value SR_STAT_I
FIELD[[sr_stat_id_dummy]][sr_stat_id_count] = AUDIT_LOG
sr_stat_id_count = sr_stat_id_count + 1
} else if (identical(AUDIT_LOG, "Open_for_Correction")||identical(AUDIT_LOG, "RESCANNED")||identical(AUDIT_LOG, "T")){ #To redistribute specific values to its field dictionary value X_SR_STATUS_INTERNA
FIELD[[x_sr_status_internal_dummy]][x_sr_status_internal_count] = AUDIT_LOG
x_sr_status_internal_count = x_sr_status_internal_count + 1
}
else {
FIELD[[other_dummy]][value_count] = AUDIT_LOG #if it's not those two possibilities of 'X_'
#or 'Owner' we know that the value might not be a field value. As such, we store
#it in the field value key that correspond to it...
value_count = value_count + 1 # we increment the vector counter in case if there are more
#New/Old/Others non-Field values
}
}
}
#export it to txt file:
old <- options()
options(max.print=999999)
sink("audit_data_formatted.txt")
print(FIELD)
sink()
on.exit(options(old))
return(FIELD)
}
support_function <- function(data){
#take in a data frame and then format them into a new readable dataframe and then output in .xlsx file
#this new data frame include variables
#1. SR_NUM
#2. FIELD
#3. OLD_VALUE
#4. NEW_VALUE
#5. NONDETERMINISTIC_VALUE
list_of_fields_keys = names(data)
SR_NUM = c()
FIELD = c()
NEW_VALUE = c()
OLD_VALUE = c()
NONDETERMINISTIC_VALUE = c()
OPERATION_DT <- c()
DIVISION <- c()
TEAM <- c()
LOGIN <- c()
OWNERSHIP_DATE <- c()
RECEIPT_DATE = c()
WAIT_ON_CUST = c()
REGISTRATION_DECISION_DATE = c()
REGISTRATION_DECISION = c()
count = 1
for (keys in list_of_fields_keys){
splited_keys = stringr::str_split(keys, " ")
SR_NUM_VALUE = splited_keys[[1]][4]
FIELD_VALUE = splited_keys[[1]][5]
OP_DT_VALUE = splited_keys[[1]][6]
DIVISION_VALUE = splited_keys[[1]][7]
TEAM_VALUE = splited_keys[[1]][8]
LOGIN_VALUE = splited_keys[[1]][9]
OWNERSHIP_DATE_VALUE = splited_keys[[1]][10]
REICEPT_DATE_VALUE = splited_keys[[1]][11]
WAIT_ON_CUST_VALUE = splited_keys[[1]][12]
REG_DEC_DATE_VALUE = splited_keys[[1]][13]
REG_DEC_VALUE = splited_keys[[1]][14]
value = data[[keys]]
if (length(value) == 2){
NEW_VALUE[count] = value[1]
OLD_VALUE[count] = value[2]
NONDETERMINISTIC_VALUE[count] = ""
} else if (length(value) == 1){
NEW_VALUE[count] = value[1] #if the length of the value list is 1, then we just assume that it is an old value...
OLD_VALUE[count] = ""
NONDETERMINISTIC_VALUE[count] = ""
} else {
NEW_VALUE[count] = ""
OLD_VALUE[count] = ""
NONDETERMINISTIC_VALUE[count] = paste(value, collapse = ", ")
#if the length of the value is list is more than 2,
#then it's hard to determine whether they are new/old value, therefore, we put it
#all at the non-deterministic value bracket
}
SR_NUM[count] = SR_NUM_VALUE
FIELD[count] = FIELD_VALUE
OPERATION_DT[count] = OP_DT_VALUE
DIVISION[count] = DIVISION_VALUE
TEAM[count] = TEAM_VALUE
LOGIN[count] = LOGIN_VALUE
OWNERSHIP_DATE[count] = OWNERSHIP_DATE_VALUE
RECEIPT_DATE[count] =REICEPT_DATE_VALUE
WAIT_ON_CUST[count] =WAIT_ON_CUST_VALUE
REGISTRATION_DECISION_DATE[count] =REG_DEC_DATE_VALUE
REGISTRATION_DECISION[count] =REG_DEC_VALUE
count = count + 1
}
fomatted_data = data.frame(SR_NUM, FIELD, NEW_VALUE, OLD_VALUE, NONDETERMINISTIC_VALUE, OPERATION_DT, DIVISION, TEAM, LOGIN, OWNERSHIP_DATE, RECEIPT_DATE, WAIT_ON_CUST, REGISTRATION_DECISION_DATE, REGISTRATION_DECISION)
openxlsx::write.xlsx(x = fomatted_data, file = "audit_data_formatted.xlsx", sheetName = "AuditData_FORMATTED", append = FALSE, rowNames = FALSE)
}
clean_format_all <- function(excelfile){
cleaned_data= clean_data_to_dataframe(excelfile)
formatted_data = format_from_dataframe(cleaned_data)
support_function(formatted_data)
}
# DELETE THESE LATER... JUST TESTING
#install_github("pakabuka/uscoauditlog/uscoauditlog")
#data <- readxl::read_excel("cleaned_data.xlsx")
#attach(data)
#tab = table(AUDIT_LOG)
#sorted <- tab[order(tab, decreasing = TRUE)]
#sorted
|
/scratch/gouwar.j/cran-all/cranData/uscoauditlog/R/uscoauditlog.R
|
utils::globalVariables(c("month", "value", "jan", 'total'))
|
/scratch/gouwar.j/cran-all/cranData/usdampr/R/globals.R
|
#' Request current and historical USDA-AMS MPR data
#'
#' This is the primary function in the \code{usdampr} package to request data from the United States
#' Department of Agriculture - Agricultural Marketing Service (USDA-AMS) mandatory price reporting, commonly known as MPR.
#' This function allow users to access data documented in the Livestock Mandatory Price Reporting (LMPR),
#' Dairy Products Mandatory Reporting Program (DPMRP), and Federal Milk Marketing Orders (FMMOS) market reports.
#' LMPR contains data for cattle, hogs, sheep, beef, pork, and lamb.
#'
#' This function is built on the web service provided by USDA-AMS. Alternatives to this package include text files to be
#' directly downloaded via \url{https://mpr.datamart.ams.usda.gov} (known as DATAMART),
#' or direct coding of the API. Starting in April 2020, text files were made permanently unavailable.
#'
#' The \code{mpr_request} function provides flexible ways to request data. Specifically, users can download data from a single report or
#' multiple reports for a pre-specified report time. Users can also specify slug IDs or the legacy slug IDs to request data.
#'
#' The data request takes two necessary inputs. The first input is slug ID or legacy slug ID. Slug ID should be a 4-digit number
#' (numbers in characters are fine). Examples for slug ID include 2461 (Report name: National Weekly Boxed Beef Cutout & Boxed Beef Cuts),
#' 2472 (Report name: Weekly Direct Slaughter Cattle). If you happen to not know the slug ID, you can use the legacy slug IDs,
#' such as LM_XB459 (Report name: National Weekly Boxed Beef Cutout & Boxed Beef Cuts). When legacy slug IDs are provided, the
#' \code{mpr_request} function would perform an internal search for their corresponding slug IDs and then make data requests. The returned
#' data are labelled by slug IDs for consistency. Users should provide either slug IDs or legacy slug IDs, not both. The provides slug IDs
#' or legacy slug IDs must be valid. Use data(slugInfo) to get a list of valid slug IDs and the report information.
#'
#' The second input is report time. For LMPR and DPMRP, the report time should be a specific date with year, month and day,
#' formatted as: %m/%d/%Y), such as "06/05/2020" for June 5th 2020. An error message could appear if the report time is not approaritely formated.
#' For FMMOS, the report time should be a year instead, such as 2020. Users can request data for a range of time period, such as "06/01/2020:06/05/2020" for data
#' from June 1st 2020 to June 5th 2020.
#'
#'
#'
#' @param slugIDs Valid slug IDs. Should be a 4-digit number, either a numerical value or a character.
#' Users can provide can either one slug ID or multiple slug IDs. See details.
#' @param slugIDs_legacy Valid legacy slug IDs. Examples: LM_XB401, LM_XB403.
#' @param report_time A valid date (e.g.,'01/31/2020') or period of time (e.g., '01/31/2020:03/25/2020').
#' For FMMOS, it should be a year (e.g., 2019). The default is the current system date.
#' @param message A binary indicator for whether to display warning messages or not. Default is TRUE.
#'
#' @return
#' The function returns a list with the requested data. The requested are either daily, weekly, monthly, or yearly, depending on the report data being requested.
#' Report sections associated with the slug ID are located in sub-lists. Empty data could be returned if there are no data associated with the request.
#'
#' @export
#'
#' @examples
#' \donttest{
#' # Load all available slug IDs, report date, report sections, and report frequency
#' # If you already know the slug IDs, you can ignore skip thie code
#' data(slugInfo)
#' # Example 1: One slug ID, single date
#' test1a <- mpr_request(slugIDs = 2461, report_time = '01/31/2020')
#' # Now use legacy slug ID
#' test1a_legacy <- mpr_request(slugIDs_legacy = 'LM_XB459', report_time = '01/31/2020')
#'
#' # Example 1: One slug ID, multiple dates
#' test1b <- mpr_request(slugIDs = 2461, report_time = '01/31/2020:03/25/2020')
#' # Multiple slug IDs, single date
#' test1c <- mpr_request(slugIDs = c(2461, 2463), report_time = '01/31/2020')
#' # Now use legacy slug ID
#' test1c_legacy <- mpr_request(slugIDs_legacy = c('LM_XB459', 'LM_XB461'), report_time = '01/31/2020')
#'
#' # Multiple slug IDs, multiple dates
#' test1d <- mpr_request(slugIDs = c(2461, 2463), report_time = '01/25/2020:03/25/2020')
#'
#' # Get Livestock Mandatory Price Reporting (LMPR) data.
#' test2a <- mpr_request(slugIDs = 2463, report_time = '01/25/2020:03/25/2020')
#' # Get Dairy Products Mandatory Reporting Program (DPMRP) data.
#' test2b <- mpr_request(slugIDs = 2991, report_time = '01/25/2020:03/25/2020')
#' # Get Federal Milk Marketing Orders (FMMOS) data. NAs are returned if the data do not exist.
#' test2c <- mpr_request(slugIDs = 3346, report_time = '2018:2019')
#'
#'}
mpr_request <- function(slugIDs = NULL, slugIDs_legacy = NULL, report_time = NULL, message = TRUE){
if(is.null(report_time)){
report_time = format(Sys.Date(), format="%m/%d/%Y")
}
if(is.null(slugIDs) & is.null(slugIDs_legacy))
stop('slugIDs or slugIDs_legacy must be provided.')
if(!is.null(slugIDs) & !is.null(slugIDs_legacy))
stop("Please provide slugIDs or slugIDs_legacy, not both. Hint: use data('slugInfo') to check with the ID information")
if(!is.null(slugIDs_legacy)){
ehagdhjdsdcsd <- new.env()
utils::data("slugInfo", envir = ehagdhjdsdcsd)
slugIDs <- ehagdhjdsdcsd$slugInfo$slug_id[which(ehagdhjdsdcsd$slugInfo$legacy_slugid %in% slugIDs_legacy)]
}
if(length(slugIDs) == 1){
out <- mpr_request_single(slugIDs, report_time, message = message)
}else{
out <- lapply(slugIDs, function(i) mpr_request_single(i, report_time, message = message))
names(out) <- slugIDs
}
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/usdampr/R/mpr_request.R
|
#' A data set lists all available report information as supplied by USDA-AMS
#'
#' The listed reports describes slug ID, legacy slug ID, report title, report frequency, market type, office, and section names.
#'
#' @docType data
#'
#' @usage data(slugInfo)
#'
#' @keywords datasets
#'
#' @examples
#' data(slugInfo)
"slugInfo"
|
/scratch/gouwar.j/cran-all/cranData/usdampr/R/slugInfo.R
|
reshape_func <- function(data) {
data_out <- tidyr::gather(data, month, value, jan:total)
data_out <- dplyr::mutate(data_out, value = as.numeric(gsub(',', '', value)),
report_month = match(month, tolower(month.abb)))
data_out
}
check_numeric <- function(dat){
dat_numeric <- suppressWarnings(as.numeric(gsub(',', '', dat)))
if(sum(dat_numeric, na.rm = T) == 0){
FALSE
}else{
TRUE
}
}
convert_date <- function(data){
out <- data
if('published_date' %in% colnames(out)){
out[, 'published_date'] <- as.POSIXct(out[, 'published_date'], format = "%m/%d/%Y %H:%M:%S")
}
if('report_date' %in% colnames(out)){
out[, 'report_date'] <- as.Date(out[, 'report_date'], format = "%m/%d/%Y")
}
if('slug_id' %in% colnames(out)){
out[, 'slug_id'] <- as.numeric(out[, 'slug_id'])
}
out <- dplyr::mutate_if(out, check_numeric, function(i) as.numeric(gsub(',', '', i)))
return(out)
}
mpr_request_single <- function(slug, report_time, message){
# validIDs <- c(2451, 2453, 2455:2464, 2466:2472, 2474:2489, 2498:2524, 2648, 2649, 2656, 2659:2681, 2685:2696, 2701:2703, 2989, 2991,
# 2993, 3345:3359)
# # Check slug id
# if(!as.numeric(slug) %in% validIDs) stop('Invalid slug ID. Please check with the slugInfo data set. Use data("slugInfo").')
slug <- as.numeric(slug)
# request_url <- NA_character_
if(slug > 2900 & slug <= 3000){# These slug ids are for dairy prices (starting from weekly).
request_url <- paste0('https://mpr.datamart.ams.usda.gov/services/v1.1/reports/', slug, '?q=week_ending_date=', report_time, '&allSections=true')
}
if(slug <= 2900){ # Livestock data
request_url <- paste0('https://mpr.datamart.ams.usda.gov/services/v1.1/reports/', slug, '?q=report_date=', report_time, '&allSections=true')
}
if(slug > 3000) {# dairy data
#if(nchar(report_time) != 4) stop('Dairy FMMOS request can only take a four-digit year as the report_time')
request_url <- paste0('https://mpr.datamart.ams.usda.gov/services/v1.1/reports/', slug, '?q=report_year=', report_time, '&allSections=true')
}
response <- httr::GET(request_url)
if(response$status_code == 500) stop('Internet server error. Possibly due to invalid slug id. Consider revise your request.')
data <- jsonlite::fromJSON(httr::content(response, as = "text", encoding = 'UTF-8')) #lapply(data, read_data_func2)
data_out <- data[['results']]
if(!is.null(data_out)) {
if(slug > 3000) {# dairy data
for(i in 2:length(data_out)){
data_out[[i]] <- reshape_func(data_out[[i]])
}
}
names(data_out) <- data$reportSection
}
# Remove sections with NULL values.
data_out <- data_out[!sapply(data_out, is.null)]
# Clean the dates and convert to numerical values.
data_out <- lapply(data_out, convert_date)
if(isTRUE(message)){
if(sum(grepl('No Results Found', data$message)) >= 1){
warning('There is warning message with the request. Possibly due to inappropriate format for report_time (see instructions from the help file).\n The warning message is:\n',
paste0(data$message, collapse = '..'))
}else{
cat('Successfully requested data for slug:', slug, '\nMultiple sections are included in the data list:\n',
paste0(1:length(data_out), '-',names(data_out), '.\n'))
}
}
return(data_out)
}
|
/scratch/gouwar.j/cran-all/cranData/usdampr/R/utils.R
|
.onAttach <- function( lib, pkg ) {
packageStartupMessage(
paste0( "\nPlease cite the 'usdampr' package as:\n",
"Elliott J. Dennis and Bowen Chen (2020). ",
"Introduction to the R-Package: usdampr. ",
"Farm and Ranch Management News. University of Nebraska at Lincoln. ",
"DOI: 10.13014/frm00016.\n\n",
"If you have questions, suggestions, or comments ",
"regarding the 'usdampr' package, ",
"please send an email to Dr. Dennis ([email protected]) or Dr. Chen ([email protected])."),
domain = NULL, appendLF = TRUE )
}
|
/scratch/gouwar.j/cran-all/cranData/usdampr/R/zzz.R
|
# https://lbusettspatialr.blogspot.com/2017/08/building-website-with-pkgdown-short.html
# require("devtools")
# use_readme_rmd()
# use_news_md()
# use_vignette("usdarnass") #substitute with the name of your package
#
# use_github_links()
# use_travis()
# use_cran_badge()
devtools::install_github("hadley/pkgdown")
library("desc")
library("pkgdown")
build_site()
desc_add_author("Robert", "Dinterman", "[email protected]",
role = "cre", comment = c(ORCID = "0000-0002-9055-6082"))
desc_add_author("Robert", "Dinterman", "[email protected]",
role = "aut", comment = c(ORCID = "0000-0002-9055-6082"))
desc_add_author("Jonathan", "Eyer", "[email protected]", role = "aut")
|
/scratch/gouwar.j/cran-all/cranData/usdarnass/R/ignore/building_pkgdown.R
|
library("pkgdown")
build_site()
source_desc = NULL
sector_desc = NULL
group_desc = NULL
commodity_desc = NULL
short_desc = NULL
domain_desc = NULL
domaincat_desc = NULL
agg_level_desc = NULL
statisticcat_desc = NULL
state_name = NULL
asd_desc = NULL
county_name = NULL
region_desc = NULL
zip_5 = NULL
watershed_desc = NULL
year = NULL
freq_desc = NULL
reference_period_desc = NULL
token = NULL
format = "CSV"
numeric_vals = F
token = "21E12D8F-E59B-3C68-9113-EC288AA44D4D"
agg_level_desc = "COUNTY"
statisticcat_desc = "AREA PLANTED"
commodity_desc = "WHEAT"
commodity_desc = c("CORN", "WHEAT")
# __LE = <=
# __LT = <
# __GT = >
# __GE = >=
# __LIKE = like
# __NOT_LIKE = not like
# __NE = not equal
#year = ">1999"
agg_level_desc = "COUNTY"
statisticcat_desc = "AREA PLANTED"
commodity_desc = "WHEAT"
year = ">2010"
how_many <- nass_count(agg_level_desc = "COUNTY",
statisticcat_desc = "AREA PLANTED",
commodity_desc = "WHEAT", year = ">2010")
what_of <- nass_param(param = "commodity_desc", agg_level_desc = "COUNTY",
statisticcat_desc = "AREA PLANTED",
year = ">2010")
what <- nass_data(agg_level_desc = "COUNTY",
statisticcat_desc = "AREA PLANTED",
commodity_desc = "WHEAT", year = ">2010")
|
/scratch/gouwar.j/cran-all/cranData/usdarnass/R/ignore/ignore.R
|
#' @title Get number of observations from Quick Stats query
#' @description Checks the number of observations that will be returned in a
#' data request. All queries to the Quick Stats are limited to 50,000
#' observations. This is a helpful function in determining how to structure a
#' data request to fit within the 50,000 limit.
#' @param source_desc "Program" - Source of data ("CENSUS" or "SURVEY"). Census
#' program includes the Census of Ag as well as follow up projects. Survey
#' program includes national, state, and county surveys.
#' @param sector_desc "Sector" - Five high level, broad categories useful to
#' narrow down choices. ("ANIMALS & PRODUCTS", "CROPS", "DEMOGRAPHICS",
#' "ECONOMICS", or "ENVIRONMENTAL")
#' @param group_desc "Group" - Subsets within sector (e.g., under sector_desc =
#' "CROPS", the groups are "FIELD CROPS", "FRUIT & TREE NUTS", "HORTICULTURE",
#' and "VEGETABLES").
#' @param commodity_desc "Commodity" - The primary subject of interest (e.g.,
#' "CORN", "CATTLE", "LABOR", "TRACTORS", "OPERATORS").
#' @param short_desc "Data Item" - A concatenation of six columns:
#' commodity_desc, class_desc, prodn_practice_desc, util_practice_desc,
#' statisticcat_desc, and unit_desc.
#' @param domain_desc "Domain" - Generally another characteristic of operations
#' that produce a particular commodity (e.g., "ECONOMIC CLASS", "AREA
#' OPERATED", "NAICS CLASSIFICATION", "SALES"). For chemical usage data, the
#' domain describes the type of chemical applied to the commodity. The
#' domain_desc = "TOTAL" will have no further breakouts; i.e., the data value
#' pertains completely to the short_desc.
#' @param domaincat_desc "Domain Category" - Categories or partitions within a
#' domain (e.g., under domain_desc = "SALES", domain categories include $1,000
#' TO $9,999, $10,000 TO $19,999, etc).
#' @param agg_level_desc "Geographic Level" - Aggregation level or geographic
#' granularity of the data. ("AGRICULTURAL DISTRICT", "COUNTY",
#' "INTERNATIONAL", "NATIONAL", "REGION : MULTI-STATE", "REGION : SUB-STATE",
#' "STATE", "WATERSHED", or "ZIP CODE")
#' @param statisticcat_desc "Category" - The aspect of a commodity being
#' measured (e.g., "AREA HARVESTED", "PRICE RECEIVED", "INVENTORY", "SALES").
#' @param state_name "State" - State full name.
#' @param asd_desc "Ag District" - Ag statistics district name.
#' @param county_name "County" - County name.
#' @param region_desc "Region" - NASS defined geographic entities not readily
#' defined by other standard geographic levels. A region can be a less than a
#' state (SUB-STATE) or a group of states (MULTI-STATE), and may be specific
#' to a commodity.
#' @param zip_5 "Zip Code" - US Postal Service 5-digit zip code.
#' @param watershed_desc "Watershed" - Name assigned to the HUC.
#' @param year "Year" - The numeric year of the data and can be either a
#' character or numeric vector. Conditional values are also possible, for
#' example a character vector of ">=1999" of "1999<=" will give years greater
#' than or equal to 1999. Right now the logical values can either be
#' greater/less than or equal to with the logical at either the beginning or
#' end of a string with the year.
#' @param freq_desc "Period Type" - Length of time covered ("ANNUAL", "SEASON",
#' "MONTHLY", "WEEKLY", "POINT IN TIME"). "MONTHLY" often covers more than one
#' month. "POINT IN TIME" is as of a particular day.
#' @param reference_period_desc "Period" - The specific time frame, within a
#' freq_desc.
#' @param token API key, default is to use the value stored in \code{.Renviron}
#' which is stored from the \code{\link{nass_set_key}} function. If there is
#' no API key stored in the environment, a character string can be provided.
#' @param \\dots Not used.
#' @return Number of observations.
#' @export nass_count
#' @examples
#'
#' \dontrun{
#' # Determine all the observations in NASS
#' nass_count()
#' }
#'
#' \dontrun{
#' # Find the number of observations for Wake County in North Carolina
#' nass_count(state_name = "NORTH CAROLINA", county_name = "WAKE")
#' }
nass_count <- function(source_desc = NULL,
sector_desc = NULL,
group_desc = NULL,
commodity_desc = NULL,
short_desc = NULL,
domain_desc = NULL,
domaincat_desc = NULL,
agg_level_desc = NULL,
statisticcat_desc = NULL,
state_name = NULL,
asd_desc = NULL,
county_name = NULL,
region_desc = NULL,
zip_5 = NULL,
watershed_desc = NULL,
year = NULL,
freq_desc = NULL,
reference_period_desc = NULL,
token = NULL, ...){
match.call(expand.dots = T)
token <- check_key(token)
# Check to see if year used a logical operator
year <- trimws(year)
punct <- grepl("[[:punct:]]", year)
if (length(punct) == 0) punct <- FALSE
punct_year <- as.numeric(gsub("[[:punct:]]", "", year))
args <- list(source_desc = source_desc,
sector_desc = sector_desc,
group_desc = group_desc,
commodity_desc = commodity_desc,
short_desc = short_desc,
domain_desc = domain_desc,
domaincat_desc = domaincat_desc,
agg_level_desc = agg_level_desc,
statisticcat_desc = statisticcat_desc,
state_name = state_name,
asd_desc = asd_desc,
county_name = county_name,
region_desc = region_desc,
zip_5 = zip_5,
watershed_desc = watershed_desc,
freq_desc = freq_desc,
reference_period_desc = reference_period_desc)
# Arguments to upper case
args <- lapply(args, function(x) if (!is.null(x)) toupper(x))
# Conditional year values
if (!punct) {
args <- append(args, list(year = year))
} else if (punct) {
# __LE = <=
# __LT = <
# __GT = >
# __GE = >=
# __LIKE = like
# __NOT_LIKE = not like
# __NE = not equal
if (grepl("^=<|^<=", year) | grepl("=>$|>=$", year)) {
args <- append(args, list(year__LE = punct_year))
}
if ((grepl("^<", year) | grepl(">$", year)) & !grepl("=", year)) {
args <- append(args, list(year__LT = punct_year))
}
if (grepl("^=>|^>=", year) | grepl("=<$|<=$", year)) {
args <- append(args, list(year__GE = punct_year))
}
if ((grepl("^>", year) | grepl("<$", year)) & !grepl("=", year)) {
args <- append(args, list(year__GT = punct_year))
}
}
base_url <- paste0("http://quickstats.nass.usda.gov/api/get_counts/?key=",
token, "&")
full_url <- httr::modify_url(base_url, query = args)
temp <- httr::GET(full_url)
tt <- check_response(temp)
if (names(tt) == "count"){
count_data <- as.numeric(tt[["count"]])
} else {
stop("Parameter entered is not valid")
}
return(count_data)
}
|
/scratch/gouwar.j/cran-all/cranData/usdarnass/R/nass_count.R
|
#' @title Get data from the Quick Stats query
#' @description Sends query to Quick Stats API from given parameter values. Data
#' request is limited to 50,000 records per the API. Use
#' \code{\link{nass_count}} to determine number of records in query.
#' @inheritParams nass_count
#' @param format Output format from API call. Defaults to CSV as it is typically
#' the smallest sized call. Other options are JSON and XML but these are not
#' recommended. XML currently not supported.
#' @param numeric_vals Optional to convert the year, Value, and coefficient of
#' variation (CV \%) to numerics as opposed to defaulted character values.
#' Default is to FALSE as some Values have a suppression code. Converting to
#' numeric will result in suppressed values to be NA.
#' @return A data frame containing query to API.
#' @export nass_data
#' @examples
#'
#' \dontrun{
#' # Get state values in 2012 for all of the values of agricultural land
#' nass_data(agg_level_desc = "STATE", year = "2012",
#' commodity_desc = "AG LAND", domain_desc = "VALUE")
#' }
#'
#' \dontrun{
#' # Get county level values in 2012 for the specific data item
#' nass_data(year = 2012, agg_level_desc = "COUNTY",
#' short_desc = "AG LAND, INCL BUILDINGS - ASSET VALUE, MEASURED IN $")
#' }
nass_data <- function(source_desc = NULL,
sector_desc = NULL,
group_desc = NULL,
commodity_desc = NULL,
short_desc = NULL,
domain_desc = NULL,
domaincat_desc = NULL,
agg_level_desc = NULL,
statisticcat_desc = NULL,
state_name = NULL,
asd_desc = NULL,
county_name = NULL,
region_desc = NULL,
zip_5 = NULL,
watershed_desc = NULL,
year = NULL,
freq_desc = NULL,
reference_period_desc = NULL,
token = NULL,
format = c("CSV", "JSON", "XML"),
numeric_vals = FALSE){
format = match.arg(format)
if (format == "XML") stop("XML not supported yet.")
token <- check_key(token)
# Check to see if year used a logical operator
year <- trimws(year)
punct <- grepl("[[:punct:]]", year)
if (length(punct) == 0) punct <- FALSE
punct_year <- as.numeric(gsub("[[:punct:]]", "", year))
args <- list(source_desc = source_desc,
sector_desc = sector_desc,
group_desc = group_desc,
commodity_desc = commodity_desc,
short_desc = short_desc,
domain_desc = domain_desc,
domaincat_desc = domaincat_desc,
agg_level_desc = agg_level_desc,
statisticcat_desc = statisticcat_desc,
state_name = state_name,
asd_desc = asd_desc,
county_name = county_name,
region_desc = region_desc,
zip_5 = zip_5,
watershed_desc = watershed_desc,
freq_desc = freq_desc,
reference_period_desc = reference_period_desc,
format = format)
# Arguments to upper case
args <- lapply(args, function(x) if (!is.null(x)) toupper(x))
count <- do.call(nass_count, append(args, list(year = year)))
if (count > 50000) stop(paste0("Query returns ",
prettyNum(count, big.mark = ","),
" records. The limit is 50,000. Subset the ",
"query to fit within limit. See nass_count()"))
# Conditional year values
if (!punct) {
args <- append(args, list(year = year))
} else if (punct) {
# __LE = <=
# __LT = <
# __GT = >
# __GE = >=
# __LIKE = like
# __NOT_LIKE = not like
# __NE = not equal
if (grepl("^=<|^<=", year) | grepl("=>$|>=$", year)) {
args <- append(args, list(year__LE = punct_year))
}
if ((grepl("^<", year) | grepl(">$", year)) & !grepl("=", year)) {
args <- append(args, list(year__LT = punct_year))
}
if (grepl("^=>|^>=", year) | grepl("=<$|<=$", year)) {
args <- append(args, list(year__GE = punct_year))
}
if ((grepl("^>", year) | grepl("<$", year)) & !grepl("=", year)) {
args <- append(args, list(year__GT = punct_year))
}
}
base_url <- paste0("http://quickstats.nass.usda.gov/api/api_GET/?key=",
token, "&")
full_url <- httr::modify_url(base_url, query = args)
temp <- httr::GET(full_url)
tt <- check_response(temp)
if (methods::is(tt, "data.frame")) {
nass <- tt
if (numeric_vals) {
nass$year <- as.numeric(nass$year)
nass$Value <- suppressWarnings(as.numeric(gsub(",", "", nass$Value)))
nass$`CV (%)` <- suppressWarnings(as.numeric(gsub(",", "", nass$`CV (%)`)))
}
} else {
stop("Parameter entered is not valid")
}
return(nass)
}
|
/scratch/gouwar.j/cran-all/cranData/usdarnass/R/nass_data.R
|
#' @title Get all possible values of a parameter
#' @description All possible values of a parameter for a given query. Helps to
#' understand the columns of data.frame from \code{\link{nass_data}}.
#' @param param A valid parameter value. Available names are: source_desc,
#' sector_desc, group_desc, commodity_desc, short_desc, domain_desc,
#' domaincat_desc, agg_level_desc, statisticcat_desc, state_name, asd_desc,
#' county_name, region_desc, zip_5, watershed_desc, year, freq_desc, and
#' reference_period_desc.
#' @inheritParams nass_count
#' @return Character vector of all possible parameter values.
#' @export nass_param
#' @examples
#'
#' \dontrun{
#' # Return the program sources for data
#' nass_param("source_desc")
#' }
#'
#' \dontrun{
#' # Return the group categories available in the CROPS sector
#' nass_param("group_desc", sector_desc = "CROPS")
#' }
nass_param <- function(param = NULL,
source_desc = NULL,
sector_desc = NULL,
group_desc = NULL,
commodity_desc = NULL,
short_desc = NULL,
domain_desc = NULL,
domaincat_desc = NULL,
agg_level_desc = NULL,
statisticcat_desc = NULL,
state_name = NULL,
asd_desc = NULL,
county_name = NULL,
region_desc = NULL,
zip_5 = NULL,
watershed_desc = NULL,
year = NULL,
freq_desc = NULL,
reference_period_desc = NULL,
token = NULL){
token <- check_key(token)
# Check to see if year used a logical operator
year <- trimws(year)
punct <- grepl("[[:punct:]]", year)
if (length(punct) == 0) punct <- FALSE
punct_year <- as.numeric(gsub("[[:punct:]]", "", year))
args <- list(source_desc = source_desc,
sector_desc = sector_desc,
group_desc = group_desc,
commodity_desc = commodity_desc,
short_desc = short_desc,
domain_desc = domain_desc,
domaincat_desc = domaincat_desc,
agg_level_desc = agg_level_desc,
statisticcat_desc = statisticcat_desc,
state_name = state_name,
asd_desc = asd_desc,
county_name = county_name,
region_desc = region_desc,
zip_5 = zip_5,
watershed_desc = watershed_desc,
freq_desc = freq_desc,
reference_period_desc = reference_period_desc)
# Arguments to upper case
args <- lapply(args, function(x) if (!is.null(x)) toupper(x))
# Conditional year values
if (!punct) {
args <- append(args, list(year = year))
} else if (punct) {
# __LE = <=
# __LT = <
# __GT = >
# __GE = >=
# __LIKE = like
# __NOT_LIKE = not like
# __NE = not equal
if (grepl("^=<|^<=", year) | grepl("=>$|>=$", year)) {
args <- append(args, list(year__LE = punct_year))
}
if ((grepl("^<", year) | grepl(">$", year)) & !grepl("=", year)) {
args <- append(args, list(year__LT = punct_year))
}
if (grepl("^=>|^>=", year) | grepl("=<$|<=$", year)) {
args <- append(args, list(year__GE = punct_year))
}
if ((grepl("^>", year) | grepl("<$", year)) & !grepl("=", year)) {
args <- append(args, list(year__GT = punct_year))
}
}
base_url <- paste0("http://quickstats.nass.usda.gov/api/get_param_values/",
"?key=", token, "&")
temp_url <- httr::modify_url(base_url, query = args)
if (!is.null(param)) {
full_url <- paste0(temp_url, "¶m=", tolower(param))
temp <- httr::GET(full_url)
tt <- check_response(temp)
if (names(tt) == param) {
param_data <- as.character(unlist(tt))
} else {
stop("Parameter entered is not valid")
}
} else{
stop("Please enter a parameter category")
}
return(param_data)
}
|
/scratch/gouwar.j/cran-all/cranData/usdarnass/R/nass_param.R
|
#' @title Set a Quick Stats API key
#' @description This function will add your Quick Stats API key to your
#' \code{.Renviron} file so it can be called securely without being stored in
#' your code. After you have installed your key, it can be called any time by
#' typing \code{Sys.getenv("NASS_KEY")} and can be used in package functions
#' by simply typing NASS_KEY. If you do not have an \code{.Renviron} file, the
#' function will create on for you. If you already have an \code{.Renviron}
#' file, the function will append the key to your existing file, while making
#' a backup of your original file for disaster recovery purposes.
#' @param key The API key provided to you from NASS formatted in quotes. A key
#' can be acquired at \url{https://quickstats.nass.usda.gov/api}
#' @param overwrite If this is set to TRUE, it will write an existing
#' NASS_KEY that you already have in your \code{.Renviron} file.
#' @export nass_set_key
#' @examples
#'
#' \dontrun{
#' set_nass_key("abcd012345678901234567890123456789")
#' # First time, relead your enviornment so you can use the key without
#' # restarting R.
#' readRenviron("~/.Renviron")
#' # You can check it with:
#' Sys.getenv("NASS_KEY")
#' }
#'
#' \dontrun{
#' # If you need to overwrite an existing key:
#' nass_set_key("abcd012345678901234567890123456789", overwrite = TRUE)
#' # First time, relead your enviornment so you can use the key without
#' # restarting R.
#' readRenviron("~/.Renviron")
#' # You can check it with:
#' Sys.getenv("NASS_KEY")
#' }
nass_set_key <- function(key = NULL, overwrite = FALSE) {
key_env <- Sys.getenv("NASS_KEY")
if (isFALSE(overwrite)) {
if (is.null(key)) {
if (key_env == "") {
stop("need an API key to query Quick Stats, see ?nass_set_key")
} else {
return(key_env)
}
} else if (nchar(key) == 36) {
# Key must be 36 characters in length, if not then it will not be valid
Sys.setenv(NASS_KEY = key)
} else {
stop(paste0("please enter a valid API key to query Quick Stats,",
" see ?nass_set_key for details"))
}
} else if (isTRUE(overwrite)) {
if (is.null(key)) {
if (key_env == "") {
stop("need an API key to query Quick Stats, see ?nass_set_key")
} else {
return(key_env)
}
} else if (nchar(key) == 36) {
# Install into the main R profile
if (file.exists(paste0(Sys.getenv("HOME"),"/.Renviron"))) {
# Backup original .Renviron just in case
file.copy(paste0(Sys.getenv("HOME"),"/.Renviron"),
paste0(Sys.getenv("HOME"),"/.Renviron_backup"))
} else if (!file.exists(paste0(Sys.getenv("HOME"),"/.Renviron"))) {
file.create(paste0(Sys.getenv("HOME"),"/.Renviron"))
}
message("Your original .Renviron will be backed up and stored in your ",
"R HOME directory if needed.")
oldenv <- utils::read.table(paste0(Sys.getenv("HOME"),"/.Renviron"),
stringsAsFactors = FALSE)
newenv <- oldenv[-grep("NASS_KEY", oldenv),]
upenv <- append(newenv, paste("NASS_KEY=", "'", key, "'", sep = ""))
utils::write.table(upenv, paste0(Sys.getenv("HOME"),"/.Renviron"),
quote = FALSE, sep = "\n",
col.names = FALSE, row.names = FALSE)
message(paste0('Your API key has been stored in your .Renviron and can',
' be accessed by Sys.getenv("NASS_KEY")'))
Sys.setenv(NASS_KEY = key)
} else {
stop("please enter a valid API key to query Quick Stats, see ?nass_set_key")
}
}
return(key)
}
|
/scratch/gouwar.j/cran-all/cranData/usdarnass/R/nass_set_key.R
|
# CHECK IF THERE IS AN API TOKEN AVAILABLE
check_key <- function(x){
tmp <- if (is.null(x)) Sys.getenv("NASS_KEY") else x
if (tmp == "") getOption("nasskey",
stop("need an API key to query Quick Stats, see ?nass_set_key")) else tmp
}
# CHECK GET RESPONSE FROM PAGE. DECIDE IF COUNT (NUMBER IF COUNT) OR DATA
check_response <- function(x){
if (x[["status_code"]] == 400) {
warning(paste0("Bad request - invalid query: error ", x[["status_code"]],
". There is an error in the query string, such as wrong ",
"parameter name or value."))
} else if (x[["status_code"]] == 413) {
warning(paste0("Exceeds limit = 50,000: error ", x[["status_code"]],
". The request would return more than 50,000 records."))
} else if (x[["status_code"]] == 415) {
warning(paste0("Bad request - unsupport media type: error ", x[["status_code"]],
". The request format parameter is not JSON or CSV or XML."))
} else if (x[["status_code"]] == 401) {
warning(paste0("Unauthorized: error ", x[["status_code"]],
". There is no key or invalid key parameter."))
} else if (x[["status_code"]] == 200) {
# NO ERRORS
if (x[["headers"]][["content-type"]] == "application/json") {
# JSON COUNT OR PARAM
if (names(httr::content(x)) != "data") {
res <- httr::content(x, as = "text", encoding = "UTF-8")
out <- jsonlite::fromJSON(res, simplifyVector = FALSE)
} else if (names(httr::content(x)) == "data") {
# JSON DATA
res <- httr::content(x, as = "text", encoding = "UTF-8")
out <- jsonlite::fromJSON(res)
out <- out[["data"]]
} else {
warning("Unknown JSON error.")
}
} else if (x[["headers"]][["content-type"]] == "text/xml; charset=UTF-8") {
# XML DATA, not yet supported
out <- "XML"
# out <- names(httr::content(x))
} else if (x[["headers"]][["content-type"]] == "text/csv; charset=UTF-8") {
# CSV DATA
out <- httr::content(x, col_types = readr::cols(.default = "c"))
out <- as.data.frame(out)
} else {
warning("Unknown data request error.")
}
return(out)
}
}
# # Argument List as defaulted null ....
# args_list <- function(...){
# match.call(expand.dots = T)
# args <- list(source_desc = source_desc,
# sector_desc = sector_desc,
# group_desc = group_desc,
# commodity_desc = commodity_desc,
# short_desc = short_desc,
# domain_desc = domain_desc,
# domaincat_desc = domaincat_desc,
# agg_level_desc = agg_level_desc,
# statisticcat_desc = statisticcat_desc,
# state_name = state_name,
# asd_desc = asd_desc,
# county_name = county_name,
# region_desc = region_desc,
# zip_5 = zip_5,
# watershed_desc = watershed_desc,
# freq_desc = freq_desc,
# reference_period_desc = reference_period_desc)
# return(args)
# }
|
/scratch/gouwar.j/cran-all/cranData/usdarnass/R/utils.R
|
## ----setup, include = FALSE----------------------------------------------
library("knitr")
opts_chunk$set(
collapse = TRUE,
eval = !(Sys.getenv("NASS_KEY") == ""),
comment = "#>"
)
## ----installation_github, eval = FALSE-----------------------------------
# # install.packages("devtools")
# devtools::install_github("rdinter/usdarnass")
## ----load----------------------------------------------------------------
library("usdarnass")
## ----key-install, eval = FALSE-------------------------------------------
# nass_set_key("YOUR_KEY_IN_QUOTATIONS")
# # First time, reload your enviornment so you can use the key without restarting R.
# readRenviron("~/.Renviron")
# # You can check it with:
# Sys.getenv("NASS_KEY")
## ----get_data------------------------------------------------------------
nass_data(year = 2012,
short_desc = "AG LAND, INCL BUILDINGS - ASSET VALUE, MEASURED IN $",
county_name = "WAKE",
state_name = "NORTH CAROLINA")
## ----source--------------------------------------------------------------
nass_param("source_desc")
## ----ohio_group----------------------------------------------------------
nass_param("group_desc",
state_name = "OHIO",
agg_level_desc = "COUNTY",
year = 2000)
## ----ohio_commodity------------------------------------------------------
nass_param("commodity_desc",
group_desc = "dairy",
state_name = "OHIO",
agg_level_desc = "COUNTY",
year = ">2000")
## ----count_all-----------------------------------------------------------
nass_count()
## ----count_agland--------------------------------------------------------
nass_count(commodity_desc = "AG LAND",
agg_level_desc = "COUNTY")
## ----data_agland_error, error = TRUE-------------------------------------
nass_data(commodity_desc = "AG LAND",
agg_level_desc = "COUNTY")
## ----count_agland_years--------------------------------------------------
years <- 2000:2017
sapply(years, function(x) nass_count(year = x,
commodity_desc = "AG LAND",
agg_level_desc = "COUNTY"))
## ----agland_params-------------------------------------------------------
agland_params <- nass_param("short_desc",
commodity_desc = "AG LAND",
agg_level_desc = "COUNTY")
agland_params
## ----count_agland_short_desc---------------------------------------------
sapply(agland_params, function(x) nass_count(short_desc = x,
commodity_desc = "AG LAND",
agg_level_desc = "COUNTY"))
## ----agland_domain-------------------------------------------------------
agland_domain <- nass_param("domain_desc",
short_desc = "AG LAND - TREATED, MEASURED IN ACRES",
commodity_desc = "AG LAND",
agg_level_desc = "COUNTY")
sapply(agland_domain, function(x) nass_count(domain_desc = x,
short_desc = "AG LAND - TREATED, MEASURED IN ACRES",
commodity_desc = "AG LAND",
agg_level_desc = "COUNTY"))
|
/scratch/gouwar.j/cran-all/cranData/usdarnass/inst/doc/usdarnass.R
|
---
title: "Getting started with usdarnass"
author: "Robert Dinterman"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Getting started with usdarnass}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
library("knitr")
opts_chunk$set(
collapse = TRUE,
eval = !(Sys.getenv("NASS_KEY") == ""),
comment = "#>"
)
```
# Introduction
`usdarnass` provides an alternative for downloading various USDA data from [https://quickstats.nass.usda.gov/](https://quickstats.nass.usda.gov/) through R. You must sign up for an [API key](https://quickstats.nass.usda.gov/api) from the mentioned website in order for this package to work.
The USDA's documentation on Quick Stats can be found throughout [https://www.nass.usda.gov/Quick_Stats/index.php](https://www.nass.usda.gov/Quick_Stats/index.php). A short description of what the data entail can be summarised from the Quick Stats description on [data.gov](https://catalog.data.gov/dataset/quick-stats-agricultural-database):
> Quick Stats is the National Agricultural Statistics Service's (NASS) online, self-service tool to access complete results from the 1997, 2002, 2007, and 2012 Censuses of Agriculture as well as the best source of NASS survey published estimates. The census collects data on all commodities produced on U.S. farms and ranches, as well as detailed information on expenses, income, and operator characteristics. The surveys that NASS conducts collect information on virtually every facet of U.S. agricultural production.
There are two main USDA sources within Quick Stats: censuses and surveys. The census values in Quick Stats start in 1997 while the survey values can range all the way back to 1850 and then annually since 1866. Although Agricultural Censuses occur once every 5 years (1997, 2002, 2007, 2012, and eventually 2017), USDA will administer other censuses which explains the extra years available. At this time, there is no support for documenting the various kinds of data which can be extracted from Quick Stats and this package only serves as an R interface for downloading Quick Stats data and it is up to the user to understand the source of the data they download.
<!-- The core functions are -->
<!-- - `nass_set_key()` - set the required USDA Quickstats API key for the session. -->
<!-- - `nass_param()` - extract all of the valid values for a particular parameter. -->
<!-- - `nass_count()` - number of observations in a particular query, helpful in determining if a query fits within the 50,000 limit. -->
<!-- - `nass_data()` - return a data.frame with the observations that fit within a query, however any query that returns more than 50,000 observations returns an error and it will need to be subset. -->
# Package Install
Development version (needs devtools installed):
```{r installation_github, eval = FALSE}
# install.packages("devtools")
devtools::install_github("rdinter/usdarnass")
```
Load package:
```{r load}
library("usdarnass")
```
# Usage
If a query works on the [https://quickstats.nass.usda.gov/](https://quickstats.nass.usda.gov/) interface, then it will work with the `usdarnass` package. Keep in mind that there is a 50,000 observation limit for both the web interface and data queries with this package.
There are three main functions for this package with the first as the workhorse:
1. `nass_data()` this will return a data.frame to the specifications of the query from all of the arguments set in the function call. This mimics the simple "GET DATA" command off of [https://quickstats.nass.usda.gov/](https://quickstats.nass.usda.gov/) and requires an API key. There is a 50,000 limit for each call.
2. `nass_param()` returns all of the possible values for a parameter in a query. Helpful to understand how to subset a query if it runs into the 50,000 limit.
3. `nass_count()` returns the number of records for a query. Very useful in conjunction with `nass_param()` to determine what queries can return data with a `nass_data()` call.
All of these functions require an API Key for each query, which can be set with another function.
## Key Install
```{r key-install, eval = FALSE}
nass_set_key("YOUR_KEY_IN_QUOTATIONS")
# First time, reload your enviornment so you can use the key without restarting R.
readRenviron("~/.Renviron")
# You can check it with:
Sys.getenv("NASS_KEY")
```
The above script will add a line to your `.Renviron` file to be re-used whenever you are using the package. If you are not comfortable with that, you can add the following line to your `.Renviron` file manually to produce the same result.
`NASS_KEY = 'YOUR_KEY_IN_QUOTATIONS'`
If you are not comfortable with either of these options, then to use the package you need to ensure that the `token` parameter is set to your API key in each of your function calls that queries USDA Quick Stats.
# Get Data
As previously stated, the workhorse function is the `nass_data()` function which will make query calls and return a data.frame as long as the query will return 50,000 or fewer observations. By way of example, we can put in a query to Quick Stats for the value of agricultural land (and buildings) in Wake County North Carolina for 2012:
```{r get_data}
nass_data(year = 2012,
short_desc = "AG LAND, INCL BUILDINGS - ASSET VALUE, MEASURED IN $",
county_name = "WAKE",
state_name = "NORTH CAROLINA")
```
The output of this query has a lot to digest, but the main focus is on the `Value` variable in the resulting data.frame. Please note that the returned `Value` for the query is of the class character. The parameter `numeric_vals` can be set to `TRUE` to have this return a numeric value, however the default is to be a character type as there are some suppressed values which will be coerced to `NA` when `numeric_vals` is set to `TRUE`.
Aside from the output, the particular query used four parameters for its output: `year`, `short_desc`, `county_name`, and `state_name`. Each of these parameters have a particular set of values which can generate of query, which can be figured out using the `nass_param()` function. In querying the Quick Stats API, you usually do not need to subset many parameters to get to the 50,000 limitation but if you do not get within the 50,000 limitation the call will produce an error.
There are 18 parameters for each query, although most of those will be `NULL` values and not required to specify an output.
## Parameters
There are 18 parameters which can be included in each query to Quick Stats however some of them are concatenations of other parameter values. If there is a name in quotations following a particular parameter, then that is the drop down menu via the [Quick Stats web interface](https://www.nass.usda.gov/Quick_Stats/index.php) that gives you a value. Not all parameters will have a drop down menu though:
1. `source_desc` "Program" - Source of data ("CENSUS" or "SURVEY"). Census program includes the Census of Ag as well as follow up projects. Survey program includes national, state, and county surveys.
2. `sector_desc` "Sector" - Five high level, broad categories useful to narrow down choices. ("ANIMALS & PRODUCTS", "CROPS", "DEMOGRAPHICS", "ECONOMICS", or "ENVIRONMENTAL")
3. `group_desc` "Group" - Subsets within sector (e.g., under sector_desc = "CROPS", the groups are "FIELD CROPS", "FRUIT & TREE NUTS", "HORTICULTURE", and "VEGETABLES").
4. `commodity_desc` "Commodity" - The primary subject of interest (e.g., "CORN", "CATTLE", "LABOR", "TRACTORS", "OPERATORS").
5. `short_desc` "Data Item" - A concatenation of six columns: commodity_desc, class_desc, prodn_practice_desc, util_practice_desc, statisticcat_desc, and unit_desc.
6. `domain_desc` "Domain" - Generally another characteristic of operations that produce a particular commodity (e.g., "ECONOMIC CLASS", "AREA OPERATED", "NAICS CLASSIFICATION", "SALES"). For chemical usage data, the domain describes the type of chemical applied to the commodity. The domain_desc = "TOTAL" will have no further breakouts; i.e., the data value pertains completely to the short_desc.
7. `domaincat_desc` "Domain Category" - Categories or partitions within a domain (e.g., under domain_desc = "SALES", domain categories include \$1,000 TO \$9,999, \$10,000 TO \$19,999, etc).
8. `agg_level_desc` "Geographic Level" - Aggregation level or geographic granularity of the data. ("AGRICULTURAL DISTRICT", "COUNTY", "INTERNATIONAL", "NATIONAL", "REGION : MULTI-STATE", "REGION : SUB-STATE", "STATE", "WATERSHED", or "ZIP CODE")
9. `statisticcat_desc` "Category" - The aspect of a commodity being measured (e.g., "AREA HARVESTED", "PRICE RECEIVED", "INVENTORY", "SALES").
10. `state_name` "State" - State full name.
11. `asd_desc` "Ag District" - Ag statistics district name.
12. `county_name` "County" - County name.
13. `region_desc` "Region" - NASS defined geographic entities not readily defined by other standard geographic levels. A region can be a less than a state (SUB-STATE) or a group of states (MULTI-STATE), and may be specific to a commodity.
14. `zip_5` "Zip Code" - US Postal Service 5-digit zip code.
15. `watershed_desc` "Watershed" - Name assigned to the HUC.
16. `year` "Year" - The numeric year of the data and can be either a character or numeric vector. Conditional values are also possible, for example a character vector of ">=1999" of "1999<=" will give years greater than or equal to 1999. Right now the logical values can either be greater/less than or equal to with the logical at either the beginning or end of a string with the year.
17. `freq_desc` "Period Type" - Length of time covered ("ANNUAL", "SEASON", "MONTHLY", "WEEKLY", "POINT IN TIME"). "MONTHLY" often covers more than one month. "POINT IN TIME" is as of a particular day.
18. `reference_period_desc` "Period" - The specific time frame, within a freq_desc.
The descriptions of parameters here are mostly thought of as minimal because much of the actual parameters have many values. This is where the `nass_param()` function comes into play in order to give the full set of values for each parameter.
# Get Parameter
The `nass_param()` function will return a vector of all the possible values for a parameter conditional on the other parameter subsets given above. By way of example, we can see that there are only two sources of datasets for the Quick Stats queries by asking what the values for the `source_desc` parameter are:
```{r source}
nass_param("source_desc")
```
The first argument in `nass_param()` is the parameter of interest, which can take on any of the 18 values from the parameters section. This argument must be passed in a character format, so make sure to use quotations in your calls. The characters are also not case sensitive in the calls. The `year` parameter is the only parameter that does not need to be a character vector but can be numeric instead.
This function is most helpful in determining what variables are available for a certain subset. For example, if I were interested in what county level variables in Ohio are available in 2000 I might start by determining what "Group" is available at that level:
```{r ohio_group}
nass_param("group_desc",
state_name = "OHIO",
agg_level_desc = "COUNTY",
year = 2000)
```
Now, if I want to further figure out what commodities are available for the "DAIRY" subset of this data but only after 2000, I would make a call of :
```{r ohio_commodity}
nass_param("commodity_desc",
group_desc = "dairy",
state_name = "OHIO",
agg_level_desc = "COUNTY",
year = ">2000")
```
While the `year` parameter does not need to be a character vector, it does accept relational operators which can modify the subsets even further for queries.
# Get Count
Every query with `nass_data()` has a 50,000 limit of observations. In order to determine the number of observations in a query, the `nass_count()` function will accept all the same parameters as `nass_data()` but its output is a numeric of the observations in a query:
```{r count_all}
nass_count()
```
Here we see how many observations are currently in the Quick Stats as of `r Sys.Date()`. Clearly, the entire data can not be downloaded with a query of their API. If you are concerned about downloading all of the Quick Stats data, then it would be more efficient to use their ftp site [ftp://ftp.nass.usda.gov/quickstats/](ftp://ftp.nass.usda.gov/quickstats/).
By way of another example, we can look at how many observations are available related to agricultural land at the county level:
```{r count_agland}
nass_count(commodity_desc = "AG LAND",
agg_level_desc = "COUNTY")
```
This particular query would not be able to be run for `nass_data()` because the number of observations greatly exceeds 50,000 and indeed that query returns an error:
```{r data_agland_error, error = TRUE}
nass_data(commodity_desc = "AG LAND",
agg_level_desc = "COUNTY")
```
At this point in time a bit of understanding of the data and the user's goals are needed. If there is only one state of interest for the study, then subsetting the data further to a state is likely the best strategy. However, it is more likely that the user wants the all county level data related to agricultural land. My strategy would be to look at the number of observations for each year of interest I might have:
```{r count_agland_years}
years <- 2000:2017
sapply(years, function(x) nass_count(year = x,
commodity_desc = "AG LAND",
agg_level_desc = "COUNTY"))
```
The observations here are only related to 2002, 2007, and 2012 which are agricultural census years and it is highly likely that there are a lot of variables in the category what would likely not be useful. It is then best to look at the descriptions of the variables to figure out what data would be most useful:
```{r agland_params}
agland_params <- nass_param("short_desc",
commodity_desc = "AG LAND",
agg_level_desc = "COUNTY")
agland_params
```
We can use the output for the parameter values to see the number of observations within each of these categories:
```{r count_agland_short_desc}
sapply(agland_params, function(x) nass_count(short_desc = x,
commodity_desc = "AG LAND",
agg_level_desc = "COUNTY"))
```
While most of these data items fit within the 50,000 limit, not all do. Take for example the "AG LAND - TREATED, MEASURED IN ACRES" category exceeds the limit and would not be able to be downloaded. This is because the treated category actually has multiple domains which can be seen by combining the `nass_param()` and `nass_count()`:
```{r agland_domain}
agland_domain <- nass_param("domain_desc",
short_desc = "AG LAND - TREATED, MEASURED IN ACRES",
commodity_desc = "AG LAND",
agg_level_desc = "COUNTY")
sapply(agland_domain, function(x) nass_count(domain_desc = x,
short_desc = "AG LAND - TREATED, MEASURED IN ACRES",
commodity_desc = "AG LAND",
agg_level_desc = "COUNTY"))
```
|
/scratch/gouwar.j/cran-all/cranData/usdarnass/inst/doc/usdarnass.Rmd
|
## ----setup, include = FALSE----------------------------------------------
library("knitr")
opts_chunk$set(
collapse = TRUE,
eval = !(Sys.getenv("NASS_KEY") == ""),
comment = "#>"
)
## ----start, warning=FALSE, message=FALSE---------------------------------
library("usdarnass")
library("dplyr") # Helpful package
ohio_rent <- nass_data(commodity_desc = "RENT", agg_level_desc = "COUNTY",
state_name = "OHIO")
glimpse(ohio_rent)
## ----rent_nass_param-----------------------------------------------------
nass_param("short_desc", commodity_desc = "RENT", agg_level_desc = "COUNTY", state_name = "OHIO")
## ----rent_nass_param_alt-------------------------------------------------
table(ohio_rent$short_desc)
## ----non_irrigated-------------------------------------------------------
non_irrigated <- ohio_rent %>%
filter(grepl("NON-IRRIGATED", short_desc))
table(non_irrigated$year) # Observation per year
## ----counties------------------------------------------------------------
table(non_irrigated$county_name)
# nass_param("county_name", state_name = "OHIO")
## ----asd-----------------------------------------------------------------
non_irrigated %>%
filter(county_name == "OTHER (COMBINED) COUNTIES") %>%
pull(asd_code) %>%
table()
## ----ag_census-----------------------------------------------------------
farms <- nass_data(source_desc = "CENSUS", year = 2012, state_name = "OHIO", agg_level_desc = "COUNTY", domain_desc = "TOTAL", short_desc = "FARM OPERATIONS - NUMBER OF OPERATIONS")
## ----combined------------------------------------------------------------
library("tidyr")
base_rent <- farms %>%
select(state_fips_code, county_code, county_name, asd_code, asd_desc) %>%
expand(year = unique(non_irrigated$year), nesting(state_fips_code, county_code, county_name, asd_code)) %>%
full_join(non_irrigated)
# Correct for missing values in the "other"
base_rent <- base_rent %>%
arrange(year, asd_code, county_code) %>%
group_by(year, asd_code) %>%
mutate(Value = ifelse(is.na(Value), Value[county_code == "998"], Value)) %>%
filter(county_code != "998")
# Finally, select only the relevant variables are rename
base_rent <- base_rent %>%
select(year, state_fips_code, county_code, county_name, asd_code, rent = Value) %>%
mutate(rent = as.numeric(rent),
fips = as.numeric(paste0(state_fips_code, county_code)))
glimpse(base_rent)
|
/scratch/gouwar.j/cran-all/cranData/usdarnass/inst/doc/usdarnass_output.R
|
---
title: "Output of usdarnass"
author: "Robert Dinterman"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Output of usdarnass}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
library("knitr")
opts_chunk$set(
collapse = TRUE,
eval = !(Sys.getenv("NASS_KEY") == ""),
comment = "#>"
)
```
# Introduction
Each successful call of the `nass_data()` command will return a data.frame object with 39 variables, although a handful of these variables will have the same value for each observation in the data.frame due to the nature of setting parameters for a query. The resulting data.frame is of the [long variety](http://vita.had.co.nz/papers/tidy-data.html) with the `Value` variable as the numerical variable of interest for the query.
## Official USDA Objects
The official documentation for each of these variables from the USDA are as follows:
- `week_ending` - Week ending date, used when freq_desc = WEEKLY.
- `state_name` - State full name.
- `country_code` - US Census Bureau, Foreign Trade Division 4-digit country code, as of April, 2007.
- `location_desc` - Full description for the location dimension.
- `begin_code` - If applicable, a 2-digit code corresponding to the beginning of the reference period (e.g., for freq_desc = MONTHLY, begin_code ranges from 01 (January) to 12 (December)).
- `zip_5` - US Postal Service 5-digit zip code.
- `county_ansi` - ANSI standard 3-digit county codes.
- `state_alpha` - State abbreviation, 2-character alpha code.
- `util_practice_desc` - Utilizations (e.g., GRAIN, FROZEN, SLAUGHTER) or marketing channels (e.g., FRESH MARKET, PROCESSING, RETAIL).
- `domain_desc` - Generally another characteristic of operations that produce a particular commodity (e.g., ECONOMIC CLASS, AREA OPERATED, NAICS CLASSIFICATION, SALES). For chemical usage data, the domain describes the type of chemical applied to the commodity. The domain = TOTAL will have no further breakouts; i.e., the data value pertains completely to the short_desc.
- `asd_desc` - Ag statistics district name.
- `freq_desc` - Length of time covered (ANNUAL, SEASON, MONTHLY, WEEKLY, POINT IN TIME). MONTHLY often covers more than one month. POINT IN TIME is as of a particular day.
- `prodn_practice_desc` - A method of production or action taken on the commodity (e.g., IRRIGATED, ORGANIC, ON FEED).
- `end_code` - If applicable, a 2-digit code corresponding to the end of the reference period (e.g., the reference period of JAN THRU MAR will have begin_code = 01 and end_code = 03).
- `sector_desc` - Five high level, broad categories useful to narrow down choices (CROPS, ANIMALS & PRODUCTS, ECONOMICS, DEMOGRAPHICS, and ENVIRONMENTAL).
- `short_desc` - A concatenation of six columns: commodity_desc, class_desc, prodn_practice_desc, util_practice_desc, statisticcat_desc, and unit_desc.
- `country_name` - County name.
- `Value` - Published data value or suppression reason code.
- `reference_period_desc` - The specific time frame, within a freq_desc.
- `CV (%)` - Coefficient of variation. Available for the 2012 Census of Agriculture only. County-level CVs are generalized.
- `class_desc` - Generally a physical attribute (e.g., variety, size, color, gender) of the commodity.
- `asd_code` - NASS defined county groups, unique within a state, 2-digit ag statistics district code.
- `agg_level_desc` - Aggregation level or geographic granularity of the data (e.g., STATE, AG DISTRICT, COUNTY, REGION, ZIP CODE).
- `county_name` - Country name.
- `region_desc` - NASS defined geographic entities not readily defined by other standard geographic levels. A region can be a less than a state (SUB-STATE) or a group of states (MULTI-STATE), and may be specific to a commodity.
- `watershed_desc` - Name assigned to the HUC.
- `state_ansi` - American National Standards Institute (ANSI) standard 2-digit state codes.
- `congr_district_code` - US Congressional District 2-digit code.
- `domaincat_desc` - Categories or partitions within a domain (e.g., under domain = SALES, domain categories include \$1,000 TO \$9,999, \$10,000 TO \$19,999, etc).
- `state_fips_code` - NASS 2-digit state codes; include 99 and 98 for US TOTAL and OTHER STATES, respectively; otherwise match ANSI codes.
- `group_desc` - Subsets within sector (e.g., under sector = CROPS, the groups are FIELD CROPS, FRUIT & TREE NUTS, HORTICULTURE, and VEGETABLES).
- `watershed_code` - US Geological Survey (USGS) 8-digit Hydrologic Unit Code (HUC) for watersheds.
- `unit_desc` - The unit associated with the statistic category (e.g., ACRES, $ / LB, HEAD, $, OPERATIONS).
- `source_desc` - Source of data (CENSUS or SURVEY). Census program includes the Census of Ag as well as follow up projects. Survey program includes national, state, and county surveys.
- `load_time` - Date and time indicating when record was inserted into Quick Stats database.
- `county_code` - NASS 3-digit county codes; includes 998 for OTHER (COMBINED) COUNTIES and Alaska county codes; otherwise match ANSI codes.
- `statisticcat_desc` - The aspect of a commodity being measured (e.g., AREA HARVESTED, PRICE RECEIVED, INVENTORY, SALES).
- `commodity_desc` - The primary subject of interest (e.g., CORN, CATTLE, LABOR, TRACTORS, OPERATORS).
- `year` - The numeric year of the data.
I learn best through examples, so I'll cover a few different levels of analysis and subtleties related to the data.
# County Level Example
We can set a query where we return all data at the county level in Ohio related to rent, which is equivalent to [https://quickstats.nass.usda.gov/](https://quickstats.nass.usda.gov/) setting "Geographic Level" to COUNTY, "State" equal to Ohio, and "Commodity" equal to RENT :
```{r start, warning=FALSE, message=FALSE}
library("usdarnass")
library("dplyr") # Helpful package
ohio_rent <- nass_data(commodity_desc = "RENT", agg_level_desc = "COUNTY",
state_name = "OHIO")
glimpse(ohio_rent)
```
The `agg_level_desc`, `commodity_desc`, and `state_name` variables are all the same because the query parameters were set on those values. It turns out a few other variables will be identical for the whole data.frame because they do not vary based on county level observations in Ohio: `country_code`, `state_alpha`, `state_ansi`, and `state_fips_code`.
There are a fair amount of other variables which are all the same for the entire data.frame, but these variables are not the same because of the regional aggregation variables but because we have subset the data by the `commodity_desc` as "RENT".
The various other parameters that we could have set in this query of interest are from the `short_desc` parameter, which we could use the `nass_param()` function to view the options for this data item:
```{r rent_nass_param}
nass_param("short_desc", commodity_desc = "RENT", agg_level_desc = "COUNTY", state_name = "OHIO")
```
Alternatively, in the previously returned query with the `nass_data()` function, we could view the frequency of the different `short_desc` variables to get to the same outcome but with the additional benefit of knowing the number of observations:
```{r rent_nass_param_alt}
table(ohio_rent$short_desc)
```
## Dominant form of rent in Ohio
In Ohio, the dominant form of cash rent is for non-irrigated cropland and most counties in Ohio are surveyed in the state and have a usable value for cash rent in a year. But this is not exactly right, so we can see this by subsetting our original query for only non-irrigated cropland and look at the number of counties in each year's observation.
```{r non_irrigated}
non_irrigated <- ohio_rent %>%
filter(grepl("NON-IRRIGATED", short_desc))
table(non_irrigated$year) # Observation per year
```
The cash rent values begin in 2008 with a small subset of counties in Ohio and then cover the vast majority of the state in 2009 onward. As it turns out, the entire state is surveyed but some counties do not have enough observations to have a statistically relevant sample and are thus combined at the agricultural reporting district level. This can be see with a listing of all of the counties in Ohio with rent data available:
```{r counties}
table(non_irrigated$county_name)
# nass_param("county_name", state_name = "OHIO")
```
The "OTHER (COMBINED) COUNTIES" value has by far and away the most observations over this time and, if one knows all of the counties in Ohio, there are a few missing counties that we need to input their values for. Each of the "OTHER (COMBINED) COUNTIES" values is for a specific agricultural reporting district:
```{r asd}
non_irrigated %>%
filter(county_name == "OTHER (COMBINED) COUNTIES") %>%
pull(asd_code) %>%
table()
```
There are nine reporting districts in Ohio and eight of the nine have observations which combine counties for an observation -- which implies some counties are missing official observations for the statistics in question. It is not necessarily the case that the counties are completely missing but they are suppressed. One method for correcting for these missing values is to replace the missing counties with the "OTHER (COMBINED) COUNTIES" category for average rent.
In order to do this, we first need a full set of all of the counties in Ohio along with their corresponding agricultural district number. There are many ways to accomplish this, but I will go about this by leveraging the 2012 Agricultural Census data which is in Quick Stats and contains a category for the number of farms in each Ohio county. The number of farms is not helpful for our data concerns but it does serve as a way to return a data.frame with 88 observations in Ohio with each uniquely corresponding to a county in Ohio.
```{r ag_census}
farms <- nass_data(source_desc = "CENSUS", year = 2012, state_name = "OHIO", agg_level_desc = "COUNTY", domain_desc = "TOTAL", short_desc = "FARM OPERATIONS - NUMBER OF OPERATIONS")
```
At this point, we want to make sure that each year for our non_irrigated rent data has all 88 counties with an NA value if it does not exist. Then, we want to impute the "OTHER (COMBINED) COUNTIES" category for all of the NAs. To do this we will create the backbone of our desired observations from the farms and then fully merge this with the current data from Quick Stats. The resulting data.frame will have missing "Value" observations for the counties which do not have observations and we will impute the value from "OTHER (COMBINED) COUNTIES". And last of all, with these data the only important aspect of these observations is that we have a county identifier, locational aspects, and the year in question. Much of the rest of the variables in the dataset are not of importance to us so we will have only keep the remaining important variables and convert them to numeric.
```{r combined}
library("tidyr")
base_rent <- farms %>%
select(state_fips_code, county_code, county_name, asd_code, asd_desc) %>%
expand(year = unique(non_irrigated$year), nesting(state_fips_code, county_code, county_name, asd_code)) %>%
full_join(non_irrigated)
# Correct for missing values in the "other"
base_rent <- base_rent %>%
arrange(year, asd_code, county_code) %>%
group_by(year, asd_code) %>%
mutate(Value = ifelse(is.na(Value), Value[county_code == "998"], Value)) %>%
filter(county_code != "998")
# Finally, select only the relevant variables are rename
base_rent <- base_rent %>%
select(year, state_fips_code, county_code, county_name, asd_code, rent = Value) %>%
mutate(rent = as.numeric(rent),
fips = as.numeric(paste0(state_fips_code, county_code)))
glimpse(base_rent)
```
|
/scratch/gouwar.j/cran-all/cranData/usdarnass/inst/doc/usdarnass_output.Rmd
|
# County level Beginning Farmers and Ranchers Data (plus young!)
# NASS API for their Quickstats web interface:
# https://quickstats.nass.usda.gov/
devtools::install_github("rdinter/rnass")
library(rnass)
library(tidyverse)
source("0-data/0-api_keys.R")
# Create a directory for the data
local_dir <- "0-data/NASS/BFR"
data_source <- paste0(local_dir, "/raw")
if (!file.exists(local_dir)) dir.create(local_dir)
if (!file.exists(data_source)) dir.create(data_source)
# Find all of the available data items for number of principal operators at
# the county level
exp_params <- nass_param(param = "short_desc", agg_level_desc = "COUNTY",
commodity_desc = "OPERATORS, PRINCIPAL",
statisticcat_desc = "OPERATORS",
token = api_nass_key)
# Download their data
experience_data <- map(exp_params$unlist.tt., function(x) {
temp <- nass_data(short_desc = x,
agg_level_desc = "COUNTY",
commodity_desc = "OPERATORS, PRINCIPAL",
statisticcat_desc = "OPERATORS",
token = api_nass_key)
return(temp)
})
exp_data <- experience_data %>%
bind_rows() %>%
mutate(year = parse_number(year),
fips = 1000*parse_number(state_ansi) + parse_number(county_code)) %>%
select(year, val = Value, short_desc, fips, county_name, state_name) %>%
spread(short_desc, val)
write_csv(exp_data, "county_level_operators.csv")
|
/scratch/gouwar.j/cran-all/cranData/usdarnass/vignettes/0-NASS-BFR.R
|
# NASS API for their Quickstats web interface:
# https://quickstats.nass.usda.gov/
# devtools::install_github("rdinter/rnass")
#source("0-data/0-api_keys.R")
# devtools::install_github("rdinter/nassR")
library("nassR")
library("tidyverse")
# Create a directory for the data
local_dir <- "0-data/OHIO"
data_source <- paste0(local_dir, "/raw")
if (!file.exists(local_dir)) dir.create(local_dir)
if (!file.exists(data_source)) dir.create(data_source)
# NOTE: a major issue in USDA county level data is that sometimes there are
# multiple counties combined together as "OTHER (COMBINED) COUNTIES" which
# should be where we simply impute downward at this level. This can be done
# by recognizing that the "asd_desc" is the level which counties are combined
# at. Therefore, group by "asd_desc" and check if values are missing. If they
# are missing, then replace them with the "OTHER (COMBINED) COUNTIES" which
# also has the county_code of "998
# Can I create a function for this?
# ---- financial ----------------------------------------------------------
ohio_rent <- nass_data(commodity_desc = "RENT", agg_level_desc = "COUNTY",
state_name = "OHIO", numeric_vals = T)
from_rent <- c("RENT, CASH, CROPLAND, IRRIGATED - EXPENSE, MEASURED IN $ / ACRE",
"RENT, CASH, CROPLAND, NON-IRRIGATED - EXPENSE, MEASURED IN $ / ACRE",
"RENT, CASH, LAND & BUILDINGS - EXPENSE, MEASURED IN $",
"RENT, CASH, LAND & BUILDINGS - OPERATIONS WITH EXPENSE",
"RENT, CASH, PASTURELAND - EXPENSE, MEASURED IN $ / ACRE",
"RENT, PER HEAD OR ANIMAL UNIT MONTH - OPERATIONS WITH EXPENSE")
to_rent <- c("rent_irrigated", "rent_nonirrigated", "rent_expense",
"operations_with_rent", "rent_pasture", "operations_head")
ohio <- ohio_rent %>%
mutate(year = as.numeric(year),
#location_desc = gsub("OHIO, ", "", location_desc),
short_desc = plyr::mapvalues(short_desc,
from = from_rent,
to = to_rent)) %>%
select(year, val = Value, short_desc,
county_code, county_name, asd_desc) %>%
spread(short_desc, val)
# Add on acreage from Census on rented land: these are only for part-owners
ohio_rent <- map(c("AG LAND, OWNED, IN FARMS - ACRES",
"AG LAND, RENTED FROM OTHERS, IN FARMS - ACRES",
"AG LAND, CROPLAND - ACRES",
"AG LAND, PASTURELAND, (EXCL CROPLAND & WOODLAND) - ACRES",
"AG LAND, WOODLAND - ACRES",
"AG LAND, WOODLAND, PASTURED - ACRES"),
function(x){
nass_data(commodity_desc = "AG LAND",
agg_level_desc = "COUNTY",
state_name = "OHIO", #domain_desc = "TOTAL",
#source_desc = "SURVEY",
short_desc = x, numeric_vals = T)
})
ohio_rent <- ohio_rent %>%
bind_rows() %>%
filter(domain_desc != "IRRIGATION STATUS") %>%
mutate(year = as.numeric(year),
short_desc = plyr::mapvalues(short_desc,
from = c("AG LAND, OWNED, IN FARMS - ACRES",
"AG LAND, CROPLAND - ACRES",
"AG LAND, PASTURELAND, (EXCL CROPLAND & WOODLAND) - ACRES",
"AG LAND, WOODLAND - ACRES",
"AG LAND, WOODLAND, PASTURED - ACRES",
"AG LAND, RENTED FROM OTHERS, IN FARMS - ACRES"),
to = c("acres_part_owned", "cropland_acres", "pasture_acres",
"woodland_acres", "woodland_pastured_acres", "acres_part_rented"))) %>%
select(year, val = Value, short_desc,
county_code, county_name, asd_desc) %>%
spread(short_desc, val)
ohio <- left_join(ohio, ohio_rent)
# Now for the other general all categories:
ohio_rent <- nass_data(commodity_desc = "FARM OPERATIONS",
agg_level_desc = "COUNTY", state_name = "OHIO",
source_desc = "CENSUS", domain_desc = "TENURE",
short_desc = "FARM OPERATIONS - ACRES OPERATED",
numeric_vals = T)
ohio_rent <- ohio_rent %>%
bind_rows() %>%
#filter(domain_desc != "IRRIGATION STATUS") %>%
mutate(year = as.numeric(year),
domaincat_desc = plyr::mapvalues(domaincat_desc,
from = c("TENURE: (FULL OWNER)",
"TENURE: (PART OWNER)",
"TENURE: (TENANT)"),
to = c("acres_owned", "acres_part",
"acres_tenant_rented"))) %>%
select(year, val = Value, domaincat_desc,
county_code, county_name, asd_desc) %>%
spread(domaincat_desc, val)
ohio <- left_join(ohio, ohio_rent)
####
# NOW ADD IN THOSE RENTED AND OWNED ACRES
#####
ohio$owned_acres <- ohio$acres_owned + ohio$acres_part_owned
ohio$rented_acres <- ohio$acres_part_rented + ohio$acres_tenant_rented
# Correct for missing values in the "other"
ohio <- ohio %>%
expand(year, nesting(county_code, county_name, asd_desc)) %>%
left_join(ohio) %>%
group_by(year, asd_desc) %>%
mutate_at(vars(rent_irrigated, rent_nonirrigated, rent_pasture),
funs(ifelse(is.na(.), .[county_code == "998"], .))) %>%
filter(county_code != "998")
ohio_tax <- nass_data(commodity_desc = "TAXES", agg_level_desc = "COUNTY",
state_name = "OHIO", numeric_vals = T)
from_tax <- c("TAXES, PROPERTY, REAL ESTATE & NON-REAL ESTATE, (EXCL PAID BY LANDLORD) - EXPENSE, MEASURED IN $",
"TAXES, PROPERTY, REAL ESTATE & NON-REAL ESTATE, (EXCL PAID BY LANDLORD) - OPERATIONS WITH EXPENSE")
ohio_tax <- ohio_tax %>%
mutate(year = as.numeric(year),
short_desc = plyr::mapvalues(short_desc,
from = from_tax,
to = c("taxes", "taxes_operations"))) %>%
select(year, val = Value, short_desc,
county_code, county_name, asd_desc) %>%
spread(short_desc, val)
ohio_income <- map(c("INCOME, FARM-RELATED - OPERATIONS WITH RECEIPTS",
"INCOME, FARM-RELATED - RECEIPTS, MEASURED IN $"),
function(x){
nass_data(commodity_desc = "INCOME, FARM-RELATED",
agg_level_desc = "COUNTY",
state_name = "OHIO", domain_desc = "TOTAL",
#source_desc = "SURVEY",
short_desc = x, numeric_vals = T)
})
ohio_income <- ohio_income %>%
bind_rows() %>%
mutate(year = as.numeric(year),
short_desc = plyr::mapvalues(short_desc,
from = c("INCOME, FARM-RELATED - OPERATIONS WITH RECEIPTS",
"INCOME, FARM-RELATED - RECEIPTS, MEASURED IN $"),
to = c("receipt_operations", "receipts"))) %>%
select(year, val = Value, short_desc,
county_code, county_name, asd_desc) %>%
spread(short_desc, val)
ohio_land <- nass_data(commodity_desc = "AG LAND", agg_level_desc = "COUNTY",
state_name = "OHIO", domain_desc = "TOTAL",
statisticcat_desc = "ASSET VALUE", numeric_vals = T)
ohio_land <- ohio_land %>%
mutate(year = as.numeric(year),
short_desc = plyr::mapvalues(short_desc,
from = c("AG LAND, INCL BUILDINGS - ASSET VALUE, MEASURED IN $",
"AG LAND, INCL BUILDINGS - ASSET VALUE, MEASURED IN $ / ACRE",
"AG LAND, INCL BUILDINGS - ASSET VALUE, MEASURED IN $ / OPERATION",
"AG LAND, INCL BUILDINGS - OPERATIONS WITH ASSET VALUE"),
to = c("agland", "agland_per_acre",
"agland_per_operation", "agland_operations"))) %>%
select(year, val = Value, short_desc,
county_code, county_name, asd_desc) %>%
spread(short_desc, val)
ohio_farms <- map(c("FARM OPERATIONS - NUMBER OF OPERATIONS",
"FARM OPERATIONS - ACRES OPERATED"), function(x){
nass_data(commodity_desc = "FARM OPERATIONS",
agg_level_desc = "COUNTY",
state_name = "OHIO", domain_desc = "TOTAL",
#source_desc = "SURVEY",
short_desc = x, numeric_vals = T)
})
ohio_farms <- ohio_farms %>%
bind_rows() %>%
mutate(year = as.numeric(year),
short_desc = plyr::mapvalues(short_desc,
from = c("FARM OPERATIONS - NUMBER OF OPERATIONS",
"FARM OPERATIONS - ACRES OPERATED"),
to = c("farms", "acres"))) %>%
filter(!(year %in% c(1997, 2002, 2007) & source_desc == "SURVEY")) %>%
select(year, val = Value, short_desc,
county_code, county_name, asd_desc) %>%
spread(short_desc, val)
ohio <- ohio_farms %>%
full_join(ohio) %>%
full_join(ohio_tax) %>%
full_join(ohio_income) %>%
full_join(ohio_land)
ohio$fips <- 39000 + as.numeric(ohio$county_code)
write.csv(ohio, paste0(local_dir, "/ohio_nass.csv"), row.names = F)
write_rds(ohio, paste0(local_dir, "/ohio_nass.rds"))
# ---- prices -------------------------------------------------------------
prices <- map(c("CORN", "HAY", "SOYBEANS", "WHEAT"), function(x){
nass_data(commodity_desc = x, statisticcat_desc = "PRICE RECEIVED",
state_name = "OHIO", numeric_vals = T)})
from_price <- c("CORN, GRAIN - PRICE RECEIVED, MEASURED IN $ / BU",
"HAY, ALFALFA - PRICE RECEIVED, MEASURED IN $ / TON",
"HAY, (EXCL ALFALFA) - PRICE RECEIVED, MEASURED IN $ / TON",
"HAY - PRICE RECEIVED, MEASURED IN $ / TON",
"SOYBEANS - PRICE RECEIVED, MEASURED IN $ / BU",
"WHEAT - PRICE RECEIVED, MEASURED IN $ / BU",
"WHEAT, WINTER - PRICE RECEIVED, MEASURED IN $ / BU")
to_price <- c("corn_price", "hay_alfa_price", "hay_nonalfa_price", "hay_price",
"soy_price", "wheat_price", "wheat_winter_price")
prices_data <- prices %>%
bind_rows() %>%
rename(time = reference_period_desc) %>%
mutate(year = as.numeric(year),
time = case_when(.$time == "JAN" ~ 1, .$time == "FEB" ~ 2,
.$time == "MAR" ~ 3, .$time == "APR" ~ 4,
.$time == "MAY" ~ 5, .$time == "JUN" ~ 6,
.$time == "JUL" ~ 7, .$time == "AUG" ~ 8,
.$time == "SEP" ~ 9, .$time == "OCT" ~ 10,
.$time == "NOV" ~ 11, .$time == "DEC" ~ 12,
TRUE ~ 13),
short_desc = plyr::mapvalues(short_desc,
from = from_price,
to = to_price),
date = as.Date(paste0(year, "-", time, "-1"), format="%Y-%m-%d")) %>%
select(year, val = Value, short_desc, time, date) %>%
spread(short_desc, val)
monthly <- prices_data %>%
filter(time != 13) %>%
select(-time)
annual <- prices_data %>%
filter(time == 13) %>%
select(-date, -time)
# Add in a blank holder for next year's prices
annual <- data.frame(year = max(annual$year) + 1) %>%
bind_rows(annual) %>%
arrange(year)
# Sales
sales <- map(c("CORN", "HAY", "SOYBEANS", "WHEAT"), function(x){
nass_data(commodity_desc = x, statisticcat_desc = "SALES",
freq_desc = "MONTHLY", state_name = "OHIO", numeric_vals = T)})
from_sales <- c("CORN, GRAIN - SALES, MEASURED IN PCT OF MKTG YEAR",
"HAY - SALES, MEASURED IN PCT OF MKTG YEAR",
"SOYBEANS - SALES, MEASURED IN PCT OF MKTG YEAR",
"WHEAT - SALES, MEASURED IN PCT OF MKTG YEAR")
to_sales <- c("corn_sales", "hay_sales",
"soy_sales", "wheat_sales")
sales_data <- sales %>%
bind_rows() %>%
rename(time = reference_period_desc) %>%
mutate(year = as.numeric(year),
time = case_when(.$time == "JAN" ~ 1, .$time == "FEB" ~ 2,
.$time == "MAR" ~ 3, .$time == "APR" ~ 4,
.$time == "MAY" ~ 5, .$time == "JUN" ~ 6,
.$time == "JUL" ~ 7, .$time == "AUG" ~ 8,
.$time == "SEP" ~ 9, .$time == "OCT" ~ 10,
.$time == "NOV" ~ 11, .$time == "DEC" ~ 12,
TRUE ~ 13),
short_desc = plyr::mapvalues(short_desc,
from = from_sales,
to = to_sales),
date = as.Date(paste0(year, "-", time, "-1"), format="%Y-%m-%d")) %>%
select(year, val = Value, short_desc, time, date) %>%
spread(short_desc, val)
monthly <- sales_data %>%
select(-time) %>%
right_join(monthly)
write.csv(annual, paste0(local_dir, "/ohio_prices_annual.csv"), row.names = F)
write_rds(annual, paste0(local_dir, "/ohio_prices_annual.rds"))
write.csv(monthly, paste0(local_dir, "/ohio_prices_monthly.csv"), row.names=F)
write_rds(monthly, paste0(local_dir, "/ohio_prices_monthly.rds"))
# ---- crops --------------------------------------------------------------
# statisticcat_desc - category
# short_desc - data item
# category - area harvested, area planted, production, sales, yields
corn_vals <- c("CORN - ACRES PLANTED",
"CORN, GRAIN - ACRES HARVESTED",
"CORN, SILAGE - ACRES HARVESTED",
# "CORN, GRAIN, IRRIGATED - ACRES HARVESTED",
# "CORN, SILAGE, IRRIGATED - ACRES HARVESTED",
"CORN, GRAIN - PRODUCTION, MEASURED IN BU",
"CORN, SILAGE - PRODUCTION, MEASURED IN TONS",
"CORN, GRAIN - YIELD, MEASURED IN BU / ACRE",
"CORN, SILAGE - YIELD, MEASURED IN TONS / ACRE")#, "CORN - SALES, MEASURED IN $")
corn_names <- c("corn_acres_planted",
"corn_grain_acres_harvest",
"corn_silage_acres_harvest",
# "corn_grain_irr_acres_harvest",
# "corn_silage_irr_acres_harvest",
"corn_grain_prod_bu",
"corn_silage_prod_tons",
"corn_grain_yield",
"corn_silage_yield")#, "corn_sales")
hay_vals <- c("HAY - ACRES HARVESTED", "HAY - PRODUCTION, MEASURED IN TONS",
"HAY - YIELD, MEASURED IN TONS / ACRE")
hay_names <- c("hay_acres_harvest", "hay_prod_tons", "hay_yield")
soy_vals <- c("SOYBEANS - ACRES HARVESTED", "SOYBEANS - ACRES PLANTED",
"SOYBEANS - PRODUCTION, MEASURED IN BU",
"SOYBEANS - YIELD, MEASURED IN BU / ACRE")
soy_names <- c("soy_acres_harvest", "soy_acres_planted",
"soy_prod_bu", "soy_yield")
wheat_vals <- c("WHEAT - ACRES HARVESTED", "WHEAT - ACRES PLANTED",
"WHEAT - PRODUCTION, MEASURED IN BU", "WHEAT, WINTER - ACRES HARVESTED",
"WHEAT, WINTER - ACRES PLANTED",
"WHEAT, WINTER - PRODUCTION, MEASURED IN BU",
"WHEAT, WINTER - YIELD, MEASURED IN BU / ACRE",
"WHEAT - YIELD, MEASURED IN BU / ACRE")
wheat_names <- c("wheat_acres_harvest", "wheat_acres_planted", "wheat_prod_bu",
"wheat_winter_acres_harvest", "wheat_winter_acres_planted",
"wheat_winter_prod_bu", "wheat_winter_yield", "wheat_yield")
ohio_crops <- map(c(corn_vals, soy_vals, wheat_vals), function(x){
nass_data(short_desc = x, agg_level_desc = "COUNTY", state_name = "OHIO",
sector = "CROPS", source_desc = "SURVEY", numeric_vals = T)
})
crops <- ohio_crops %>%
bind_rows() %>%
mutate(county = tolower(county_name),
year = as.numeric(year),
short_desc = plyr::mapvalues(short_desc,
from = c(corn_vals, soy_vals, wheat_vals),
to = c(corn_names, soy_names, wheat_names))) %>%
select(year, county, county_code, asd_desc, val = Value, short_desc) %>%
spread(short_desc, val)
# Livestock
cattle <- c("CATTLE, COWS, MILK - INVENTORY",
"CATTLE, INCL CALVES - INVENTORY")
cattle_names <- c("cattle_milk_inventory", "cattle_inventory")
ohio_livestock <- map(cattle, function(x){
nass_data(short_desc = x, agg_level_desc = "COUNTY", state_name = "OHIO",
source_desc = "SURVEY", numeric_vals = T)
})
livestock <- ohio_livestock %>%
bind_rows() %>%
mutate(county = tolower(county_name),
year = as.numeric(year),
short_desc = plyr::mapvalues(short_desc,
from = cattle,
to = cattle_names)) %>%
select(year, county, county_code, asd_desc, val = Value, short_desc) %>%
spread(short_desc, val)
crops <- crops %>%
full_join(livestock) %>%
expand(nesting(county, county_code, asd_desc), year) %>%
left_join(crops) %>%
left_join(livestock)
crops <- crops %>%
group_by(year, asd_desc) %>%
mutate_at(vars(corn_acres_planted:cattle_milk_inventory),
funs(ifelse(is.na(.), .[county_code == "998"], .))) %>%
filter(county_code != "998")
# Drop out winter wheat, just have wheat
crops <- crops %>%
mutate(wheat_acres_harvest = ifelse(is.na(wheat_acres_harvest),
wheat_winter_acres_harvest,
wheat_acres_harvest),
wheat_acres_planted = ifelse(is.na(wheat_acres_planted),
wheat_winter_acres_planted,
wheat_acres_planted),
wheat_prod_bu = ifelse(is.na(wheat_prod_bu),
wheat_winter_prod_bu, wheat_prod_bu),
wheat_yield = ifelse(is.na(wheat_yield),
wheat_winter_yield, wheat_yield)) %>%
select(-contains("winter"))
# # Add in a blank holder for next year's prices
# annual <- data.frame(year = max(annual$year) + 1) %>%
# bind_rows(annual) %>%
# arrange(year)
write.csv(crops, paste0(local_dir, "/ohio_crops.csv"), row.names = F)
write_rds(crops, paste0(local_dir, "/ohio_crops.rds"))
# ---- state --------------------------------------------------------------
state_crops <- map(c(corn_vals, soy_vals, wheat_vals), function(x){
nass_data(short_desc = x, agg_level_desc = "STATE", state_name = "OHIO",
sector = "CROPS", source_desc = "SURVEY", numeric_vals = T)
})
crops <- state_crops %>%
bind_rows() %>%
filter(reference_period_desc == "YEAR") %>%
mutate(year = as.numeric(year),
short_desc = plyr::mapvalues(short_desc,
from = c(corn_vals, soy_vals, wheat_vals),
to = c(corn_names, soy_names, wheat_names))) %>%
select(year, asd_desc, val = Value, short_desc) %>%
spread(short_desc, val)
crops <- crops %>%
mutate(wheat_acres_harvest = ifelse(is.na(wheat_acres_harvest),
wheat_winter_acres_harvest,
wheat_acres_harvest),
wheat_acres_planted = ifelse(is.na(wheat_acres_planted),
wheat_winter_acres_planted,
wheat_acres_planted),
wheat_prod_bu = ifelse(is.na(wheat_prod_bu),
wheat_winter_prod_bu, wheat_prod_bu),
wheat_yield = ifelse(is.na(wheat_yield),
wheat_winter_yield, wheat_yield)) %>%
select(-contains("winter"))
write.csv(crops, paste0(local_dir, "/ohio_state_crops.csv"), row.names = F)
write_rds(crops, paste0(local_dir, "/ohio_state_crops.rds"))
|
/scratch/gouwar.j/cran-all/cranData/usdarnass/vignettes/0-NASS-OHIO.R
|
---
title: "Getting started with usdarnass"
author: "Robert Dinterman"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Getting started with usdarnass}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
library("knitr")
opts_chunk$set(
collapse = TRUE,
eval = !(Sys.getenv("NASS_KEY") == ""),
comment = "#>"
)
```
# Introduction
`usdarnass` provides an alternative for downloading various USDA data from [https://quickstats.nass.usda.gov/](https://quickstats.nass.usda.gov/) through R. You must sign up for an [API key](https://quickstats.nass.usda.gov/api) from the mentioned website in order for this package to work.
The USDA's documentation on Quick Stats can be found throughout [https://www.nass.usda.gov/Quick_Stats/index.php](https://www.nass.usda.gov/Quick_Stats/index.php). A short description of what the data entail can be summarised from the Quick Stats description on [data.gov](https://catalog.data.gov/dataset/quick-stats-agricultural-database):
> Quick Stats is the National Agricultural Statistics Service's (NASS) online, self-service tool to access complete results from the 1997, 2002, 2007, and 2012 Censuses of Agriculture as well as the best source of NASS survey published estimates. The census collects data on all commodities produced on U.S. farms and ranches, as well as detailed information on expenses, income, and operator characteristics. The surveys that NASS conducts collect information on virtually every facet of U.S. agricultural production.
There are two main USDA sources within Quick Stats: censuses and surveys. The census values in Quick Stats start in 1997 while the survey values can range all the way back to 1850 and then annually since 1866. Although Agricultural Censuses occur once every 5 years (1997, 2002, 2007, 2012, and eventually 2017), USDA will administer other censuses which explains the extra years available. At this time, there is no support for documenting the various kinds of data which can be extracted from Quick Stats and this package only serves as an R interface for downloading Quick Stats data and it is up to the user to understand the source of the data they download.
<!-- The core functions are -->
<!-- - `nass_set_key()` - set the required USDA Quickstats API key for the session. -->
<!-- - `nass_param()` - extract all of the valid values for a particular parameter. -->
<!-- - `nass_count()` - number of observations in a particular query, helpful in determining if a query fits within the 50,000 limit. -->
<!-- - `nass_data()` - return a data.frame with the observations that fit within a query, however any query that returns more than 50,000 observations returns an error and it will need to be subset. -->
# Package Install
Development version (needs devtools installed):
```{r installation_github, eval = FALSE}
# install.packages("devtools")
devtools::install_github("rdinter/usdarnass")
```
Load package:
```{r load}
library("usdarnass")
```
# Usage
If a query works on the [https://quickstats.nass.usda.gov/](https://quickstats.nass.usda.gov/) interface, then it will work with the `usdarnass` package. Keep in mind that there is a 50,000 observation limit for both the web interface and data queries with this package.
There are three main functions for this package with the first as the workhorse:
1. `nass_data()` this will return a data.frame to the specifications of the query from all of the arguments set in the function call. This mimics the simple "GET DATA" command off of [https://quickstats.nass.usda.gov/](https://quickstats.nass.usda.gov/) and requires an API key. There is a 50,000 limit for each call.
2. `nass_param()` returns all of the possible values for a parameter in a query. Helpful to understand how to subset a query if it runs into the 50,000 limit.
3. `nass_count()` returns the number of records for a query. Very useful in conjunction with `nass_param()` to determine what queries can return data with a `nass_data()` call.
All of these functions require an API Key for each query, which can be set with another function.
## Key Install
```{r key-install, eval = FALSE}
nass_set_key("YOUR_KEY_IN_QUOTATIONS")
# First time, reload your enviornment so you can use the key without restarting R.
readRenviron("~/.Renviron")
# You can check it with:
Sys.getenv("NASS_KEY")
```
The above script will add a line to your `.Renviron` file to be re-used whenever you are using the package. If you are not comfortable with that, you can add the following line to your `.Renviron` file manually to produce the same result.
`NASS_KEY = 'YOUR_KEY_IN_QUOTATIONS'`
If you are not comfortable with either of these options, then to use the package you need to ensure that the `token` parameter is set to your API key in each of your function calls that queries USDA Quick Stats.
# Get Data
As previously stated, the workhorse function is the `nass_data()` function which will make query calls and return a data.frame as long as the query will return 50,000 or fewer observations. By way of example, we can put in a query to Quick Stats for the value of agricultural land (and buildings) in Wake County North Carolina for 2012:
```{r get_data}
nass_data(year = 2012,
short_desc = "AG LAND, INCL BUILDINGS - ASSET VALUE, MEASURED IN $",
county_name = "WAKE",
state_name = "NORTH CAROLINA")
```
The output of this query has a lot to digest, but the main focus is on the `Value` variable in the resulting data.frame. Please note that the returned `Value` for the query is of the class character. The parameter `numeric_vals` can be set to `TRUE` to have this return a numeric value, however the default is to be a character type as there are some suppressed values which will be coerced to `NA` when `numeric_vals` is set to `TRUE`.
Aside from the output, the particular query used four parameters for its output: `year`, `short_desc`, `county_name`, and `state_name`. Each of these parameters have a particular set of values which can generate of query, which can be figured out using the `nass_param()` function. In querying the Quick Stats API, you usually do not need to subset many parameters to get to the 50,000 limitation but if you do not get within the 50,000 limitation the call will produce an error.
There are 18 parameters for each query, although most of those will be `NULL` values and not required to specify an output.
## Parameters
There are 18 parameters which can be included in each query to Quick Stats however some of them are concatenations of other parameter values. If there is a name in quotations following a particular parameter, then that is the drop down menu via the [Quick Stats web interface](https://www.nass.usda.gov/Quick_Stats/index.php) that gives you a value. Not all parameters will have a drop down menu though:
1. `source_desc` "Program" - Source of data ("CENSUS" or "SURVEY"). Census program includes the Census of Ag as well as follow up projects. Survey program includes national, state, and county surveys.
2. `sector_desc` "Sector" - Five high level, broad categories useful to narrow down choices. ("ANIMALS & PRODUCTS", "CROPS", "DEMOGRAPHICS", "ECONOMICS", or "ENVIRONMENTAL")
3. `group_desc` "Group" - Subsets within sector (e.g., under sector_desc = "CROPS", the groups are "FIELD CROPS", "FRUIT & TREE NUTS", "HORTICULTURE", and "VEGETABLES").
4. `commodity_desc` "Commodity" - The primary subject of interest (e.g., "CORN", "CATTLE", "LABOR", "TRACTORS", "OPERATORS").
5. `short_desc` "Data Item" - A concatenation of six columns: commodity_desc, class_desc, prodn_practice_desc, util_practice_desc, statisticcat_desc, and unit_desc.
6. `domain_desc` "Domain" - Generally another characteristic of operations that produce a particular commodity (e.g., "ECONOMIC CLASS", "AREA OPERATED", "NAICS CLASSIFICATION", "SALES"). For chemical usage data, the domain describes the type of chemical applied to the commodity. The domain_desc = "TOTAL" will have no further breakouts; i.e., the data value pertains completely to the short_desc.
7. `domaincat_desc` "Domain Category" - Categories or partitions within a domain (e.g., under domain_desc = "SALES", domain categories include \$1,000 TO \$9,999, \$10,000 TO \$19,999, etc).
8. `agg_level_desc` "Geographic Level" - Aggregation level or geographic granularity of the data. ("AGRICULTURAL DISTRICT", "COUNTY", "INTERNATIONAL", "NATIONAL", "REGION : MULTI-STATE", "REGION : SUB-STATE", "STATE", "WATERSHED", or "ZIP CODE")
9. `statisticcat_desc` "Category" - The aspect of a commodity being measured (e.g., "AREA HARVESTED", "PRICE RECEIVED", "INVENTORY", "SALES").
10. `state_name` "State" - State full name.
11. `asd_desc` "Ag District" - Ag statistics district name.
12. `county_name` "County" - County name.
13. `region_desc` "Region" - NASS defined geographic entities not readily defined by other standard geographic levels. A region can be a less than a state (SUB-STATE) or a group of states (MULTI-STATE), and may be specific to a commodity.
14. `zip_5` "Zip Code" - US Postal Service 5-digit zip code.
15. `watershed_desc` "Watershed" - Name assigned to the HUC.
16. `year` "Year" - The numeric year of the data and can be either a character or numeric vector. Conditional values are also possible, for example a character vector of ">=1999" of "1999<=" will give years greater than or equal to 1999. Right now the logical values can either be greater/less than or equal to with the logical at either the beginning or end of a string with the year.
17. `freq_desc` "Period Type" - Length of time covered ("ANNUAL", "SEASON", "MONTHLY", "WEEKLY", "POINT IN TIME"). "MONTHLY" often covers more than one month. "POINT IN TIME" is as of a particular day.
18. `reference_period_desc` "Period" - The specific time frame, within a freq_desc.
The descriptions of parameters here are mostly thought of as minimal because much of the actual parameters have many values. This is where the `nass_param()` function comes into play in order to give the full set of values for each parameter.
# Get Parameter
The `nass_param()` function will return a vector of all the possible values for a parameter conditional on the other parameter subsets given above. By way of example, we can see that there are only two sources of datasets for the Quick Stats queries by asking what the values for the `source_desc` parameter are:
```{r source}
nass_param("source_desc")
```
The first argument in `nass_param()` is the parameter of interest, which can take on any of the 18 values from the parameters section. This argument must be passed in a character format, so make sure to use quotations in your calls. The characters are also not case sensitive in the calls. The `year` parameter is the only parameter that does not need to be a character vector but can be numeric instead.
This function is most helpful in determining what variables are available for a certain subset. For example, if I were interested in what county level variables in Ohio are available in 2000 I might start by determining what "Group" is available at that level:
```{r ohio_group}
nass_param("group_desc",
state_name = "OHIO",
agg_level_desc = "COUNTY",
year = 2000)
```
Now, if I want to further figure out what commodities are available for the "DAIRY" subset of this data but only after 2000, I would make a call of :
```{r ohio_commodity}
nass_param("commodity_desc",
group_desc = "dairy",
state_name = "OHIO",
agg_level_desc = "COUNTY",
year = ">2000")
```
While the `year` parameter does not need to be a character vector, it does accept relational operators which can modify the subsets even further for queries.
# Get Count
Every query with `nass_data()` has a 50,000 limit of observations. In order to determine the number of observations in a query, the `nass_count()` function will accept all the same parameters as `nass_data()` but its output is a numeric of the observations in a query:
```{r count_all}
nass_count()
```
Here we see how many observations are currently in the Quick Stats as of `r Sys.Date()`. Clearly, the entire data can not be downloaded with a query of their API. If you are concerned about downloading all of the Quick Stats data, then it would be more efficient to use their ftp site [ftp://ftp.nass.usda.gov/quickstats/](ftp://ftp.nass.usda.gov/quickstats/).
By way of another example, we can look at how many observations are available related to agricultural land at the county level:
```{r count_agland}
nass_count(commodity_desc = "AG LAND",
agg_level_desc = "COUNTY")
```
This particular query would not be able to be run for `nass_data()` because the number of observations greatly exceeds 50,000 and indeed that query returns an error:
```{r data_agland_error, error = TRUE}
nass_data(commodity_desc = "AG LAND",
agg_level_desc = "COUNTY")
```
At this point in time a bit of understanding of the data and the user's goals are needed. If there is only one state of interest for the study, then subsetting the data further to a state is likely the best strategy. However, it is more likely that the user wants the all county level data related to agricultural land. My strategy would be to look at the number of observations for each year of interest I might have:
```{r count_agland_years}
years <- 2000:2017
sapply(years, function(x) nass_count(year = x,
commodity_desc = "AG LAND",
agg_level_desc = "COUNTY"))
```
The observations here are only related to 2002, 2007, and 2012 which are agricultural census years and it is highly likely that there are a lot of variables in the category what would likely not be useful. It is then best to look at the descriptions of the variables to figure out what data would be most useful:
```{r agland_params}
agland_params <- nass_param("short_desc",
commodity_desc = "AG LAND",
agg_level_desc = "COUNTY")
agland_params
```
We can use the output for the parameter values to see the number of observations within each of these categories:
```{r count_agland_short_desc}
sapply(agland_params, function(x) nass_count(short_desc = x,
commodity_desc = "AG LAND",
agg_level_desc = "COUNTY"))
```
While most of these data items fit within the 50,000 limit, not all do. Take for example the "AG LAND - TREATED, MEASURED IN ACRES" category exceeds the limit and would not be able to be downloaded. This is because the treated category actually has multiple domains which can be seen by combining the `nass_param()` and `nass_count()`:
```{r agland_domain}
agland_domain <- nass_param("domain_desc",
short_desc = "AG LAND - TREATED, MEASURED IN ACRES",
commodity_desc = "AG LAND",
agg_level_desc = "COUNTY")
sapply(agland_domain, function(x) nass_count(domain_desc = x,
short_desc = "AG LAND - TREATED, MEASURED IN ACRES",
commodity_desc = "AG LAND",
agg_level_desc = "COUNTY"))
```
|
/scratch/gouwar.j/cran-all/cranData/usdarnass/vignettes/usdarnass.Rmd
|
---
title: "Output of usdarnass"
author: "Robert Dinterman"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Output of usdarnass}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
library("knitr")
opts_chunk$set(
collapse = TRUE,
eval = !(Sys.getenv("NASS_KEY") == ""),
comment = "#>"
)
```
# Introduction
Each successful call of the `nass_data()` command will return a data.frame object with 39 variables, although a handful of these variables will have the same value for each observation in the data.frame due to the nature of setting parameters for a query. The resulting data.frame is of the [long variety](http://vita.had.co.nz/papers/tidy-data.html) with the `Value` variable as the numerical variable of interest for the query.
## Official USDA Objects
The official documentation for each of these variables from the USDA are as follows:
- `week_ending` - Week ending date, used when freq_desc = WEEKLY.
- `state_name` - State full name.
- `country_code` - US Census Bureau, Foreign Trade Division 4-digit country code, as of April, 2007.
- `location_desc` - Full description for the location dimension.
- `begin_code` - If applicable, a 2-digit code corresponding to the beginning of the reference period (e.g., for freq_desc = MONTHLY, begin_code ranges from 01 (January) to 12 (December)).
- `zip_5` - US Postal Service 5-digit zip code.
- `county_ansi` - ANSI standard 3-digit county codes.
- `state_alpha` - State abbreviation, 2-character alpha code.
- `util_practice_desc` - Utilizations (e.g., GRAIN, FROZEN, SLAUGHTER) or marketing channels (e.g., FRESH MARKET, PROCESSING, RETAIL).
- `domain_desc` - Generally another characteristic of operations that produce a particular commodity (e.g., ECONOMIC CLASS, AREA OPERATED, NAICS CLASSIFICATION, SALES). For chemical usage data, the domain describes the type of chemical applied to the commodity. The domain = TOTAL will have no further breakouts; i.e., the data value pertains completely to the short_desc.
- `asd_desc` - Ag statistics district name.
- `freq_desc` - Length of time covered (ANNUAL, SEASON, MONTHLY, WEEKLY, POINT IN TIME). MONTHLY often covers more than one month. POINT IN TIME is as of a particular day.
- `prodn_practice_desc` - A method of production or action taken on the commodity (e.g., IRRIGATED, ORGANIC, ON FEED).
- `end_code` - If applicable, a 2-digit code corresponding to the end of the reference period (e.g., the reference period of JAN THRU MAR will have begin_code = 01 and end_code = 03).
- `sector_desc` - Five high level, broad categories useful to narrow down choices (CROPS, ANIMALS & PRODUCTS, ECONOMICS, DEMOGRAPHICS, and ENVIRONMENTAL).
- `short_desc` - A concatenation of six columns: commodity_desc, class_desc, prodn_practice_desc, util_practice_desc, statisticcat_desc, and unit_desc.
- `country_name` - County name.
- `Value` - Published data value or suppression reason code.
- `reference_period_desc` - The specific time frame, within a freq_desc.
- `CV (%)` - Coefficient of variation. Available for the 2012 Census of Agriculture only. County-level CVs are generalized.
- `class_desc` - Generally a physical attribute (e.g., variety, size, color, gender) of the commodity.
- `asd_code` - NASS defined county groups, unique within a state, 2-digit ag statistics district code.
- `agg_level_desc` - Aggregation level or geographic granularity of the data (e.g., STATE, AG DISTRICT, COUNTY, REGION, ZIP CODE).
- `county_name` - Country name.
- `region_desc` - NASS defined geographic entities not readily defined by other standard geographic levels. A region can be a less than a state (SUB-STATE) or a group of states (MULTI-STATE), and may be specific to a commodity.
- `watershed_desc` - Name assigned to the HUC.
- `state_ansi` - American National Standards Institute (ANSI) standard 2-digit state codes.
- `congr_district_code` - US Congressional District 2-digit code.
- `domaincat_desc` - Categories or partitions within a domain (e.g., under domain = SALES, domain categories include \$1,000 TO \$9,999, \$10,000 TO \$19,999, etc).
- `state_fips_code` - NASS 2-digit state codes; include 99 and 98 for US TOTAL and OTHER STATES, respectively; otherwise match ANSI codes.
- `group_desc` - Subsets within sector (e.g., under sector = CROPS, the groups are FIELD CROPS, FRUIT & TREE NUTS, HORTICULTURE, and VEGETABLES).
- `watershed_code` - US Geological Survey (USGS) 8-digit Hydrologic Unit Code (HUC) for watersheds.
- `unit_desc` - The unit associated with the statistic category (e.g., ACRES, $ / LB, HEAD, $, OPERATIONS).
- `source_desc` - Source of data (CENSUS or SURVEY). Census program includes the Census of Ag as well as follow up projects. Survey program includes national, state, and county surveys.
- `load_time` - Date and time indicating when record was inserted into Quick Stats database.
- `county_code` - NASS 3-digit county codes; includes 998 for OTHER (COMBINED) COUNTIES and Alaska county codes; otherwise match ANSI codes.
- `statisticcat_desc` - The aspect of a commodity being measured (e.g., AREA HARVESTED, PRICE RECEIVED, INVENTORY, SALES).
- `commodity_desc` - The primary subject of interest (e.g., CORN, CATTLE, LABOR, TRACTORS, OPERATORS).
- `year` - The numeric year of the data.
I learn best through examples, so I'll cover a few different levels of analysis and subtleties related to the data.
# County Level Example
We can set a query where we return all data at the county level in Ohio related to rent, which is equivalent to [https://quickstats.nass.usda.gov/](https://quickstats.nass.usda.gov/) setting "Geographic Level" to COUNTY, "State" equal to Ohio, and "Commodity" equal to RENT :
```{r start, warning=FALSE, message=FALSE}
library("usdarnass")
library("dplyr") # Helpful package
ohio_rent <- nass_data(commodity_desc = "RENT", agg_level_desc = "COUNTY",
state_name = "OHIO")
glimpse(ohio_rent)
```
The `agg_level_desc`, `commodity_desc`, and `state_name` variables are all the same because the query parameters were set on those values. It turns out a few other variables will be identical for the whole data.frame because they do not vary based on county level observations in Ohio: `country_code`, `state_alpha`, `state_ansi`, and `state_fips_code`.
There are a fair amount of other variables which are all the same for the entire data.frame, but these variables are not the same because of the regional aggregation variables but because we have subset the data by the `commodity_desc` as "RENT".
The various other parameters that we could have set in this query of interest are from the `short_desc` parameter, which we could use the `nass_param()` function to view the options for this data item:
```{r rent_nass_param}
nass_param("short_desc", commodity_desc = "RENT", agg_level_desc = "COUNTY", state_name = "OHIO")
```
Alternatively, in the previously returned query with the `nass_data()` function, we could view the frequency of the different `short_desc` variables to get to the same outcome but with the additional benefit of knowing the number of observations:
```{r rent_nass_param_alt}
table(ohio_rent$short_desc)
```
## Dominant form of rent in Ohio
In Ohio, the dominant form of cash rent is for non-irrigated cropland and most counties in Ohio are surveyed in the state and have a usable value for cash rent in a year. But this is not exactly right, so we can see this by subsetting our original query for only non-irrigated cropland and look at the number of counties in each year's observation.
```{r non_irrigated}
non_irrigated <- ohio_rent %>%
filter(grepl("NON-IRRIGATED", short_desc))
table(non_irrigated$year) # Observation per year
```
The cash rent values begin in 2008 with a small subset of counties in Ohio and then cover the vast majority of the state in 2009 onward. As it turns out, the entire state is surveyed but some counties do not have enough observations to have a statistically relevant sample and are thus combined at the agricultural reporting district level. This can be see with a listing of all of the counties in Ohio with rent data available:
```{r counties}
table(non_irrigated$county_name)
# nass_param("county_name", state_name = "OHIO")
```
The "OTHER (COMBINED) COUNTIES" value has by far and away the most observations over this time and, if one knows all of the counties in Ohio, there are a few missing counties that we need to input their values for. Each of the "OTHER (COMBINED) COUNTIES" values is for a specific agricultural reporting district:
```{r asd}
non_irrigated %>%
filter(county_name == "OTHER (COMBINED) COUNTIES") %>%
pull(asd_code) %>%
table()
```
There are nine reporting districts in Ohio and eight of the nine have observations which combine counties for an observation -- which implies some counties are missing official observations for the statistics in question. It is not necessarily the case that the counties are completely missing but they are suppressed. One method for correcting for these missing values is to replace the missing counties with the "OTHER (COMBINED) COUNTIES" category for average rent.
In order to do this, we first need a full set of all of the counties in Ohio along with their corresponding agricultural district number. There are many ways to accomplish this, but I will go about this by leveraging the 2012 Agricultural Census data which is in Quick Stats and contains a category for the number of farms in each Ohio county. The number of farms is not helpful for our data concerns but it does serve as a way to return a data.frame with 88 observations in Ohio with each uniquely corresponding to a county in Ohio.
```{r ag_census}
farms <- nass_data(source_desc = "CENSUS", year = 2012, state_name = "OHIO", agg_level_desc = "COUNTY", domain_desc = "TOTAL", short_desc = "FARM OPERATIONS - NUMBER OF OPERATIONS")
```
At this point, we want to make sure that each year for our non_irrigated rent data has all 88 counties with an NA value if it does not exist. Then, we want to impute the "OTHER (COMBINED) COUNTIES" category for all of the NAs. To do this we will create the backbone of our desired observations from the farms and then fully merge this with the current data from Quick Stats. The resulting data.frame will have missing "Value" observations for the counties which do not have observations and we will impute the value from "OTHER (COMBINED) COUNTIES". And last of all, with these data the only important aspect of these observations is that we have a county identifier, locational aspects, and the year in question. Much of the rest of the variables in the dataset are not of importance to us so we will have only keep the remaining important variables and convert them to numeric.
```{r combined}
library("tidyr")
base_rent <- farms %>%
select(state_fips_code, county_code, county_name, asd_code, asd_desc) %>%
expand(year = unique(non_irrigated$year), nesting(state_fips_code, county_code, county_name, asd_code)) %>%
full_join(non_irrigated)
# Correct for missing values in the "other"
base_rent <- base_rent %>%
arrange(year, asd_code, county_code) %>%
group_by(year, asd_code) %>%
mutate(Value = ifelse(is.na(Value), Value[county_code == "998"], Value)) %>%
filter(county_code != "998")
# Finally, select only the relevant variables are rename
base_rent <- base_rent %>%
select(year, state_fips_code, county_code, county_name, asd_code, rent = Value) %>%
mutate(rent = as.numeric(rent),
fips = as.numeric(paste0(state_fips_code, county_code)))
glimpse(base_rent)
```
|
/scratch/gouwar.j/cran-all/cranData/usdarnass/vignettes/usdarnass_output.Rmd
|
#' Convert state abbreviations to names
#'
#' Two utility functions. One converts state names to the state abbreviations,
#' and the second does the opposite.
#'
#'
#' @param abbr A vector of state abbreviation.
#' @return Returns a vector of the same length with the corresponding state
#' names or abbreviations.
#' @author David Diez
#' @seealso \code{\link{state2abbr}}, \code{\link{county}}, \code{\link{county_complete}}
#' @keywords State Abbreviation
#' @export
#' @examples
#'
#' abbr2state("MN")
#'
abbr2state <- function(abbr){
ab <- tolower(c("AL",
"AK", "AZ", "KS", "UT", "CO", "CT",
"DE", "FL", "GA", "HI", "ID", "IL",
"IN", "IA", "AR", "KY", "LA", "ME",
"MD", "MA", "MI", "MN", "MS", "MO",
"MT", "NE", "NV", "NH", "NJ", "NM",
"NY", "NC", "ND", "OH", "OK", "OR",
"PA", "RI", "SC", "SD", "TN", "TX",
"CA", "VT", "VA", "WA", "WV", "WI",
"WY", "DC"))
st <- c("Alabama",
"Alaska", "Arizona", "Kansas",
"Utah", "Colorado", "Connecticut",
"Delaware", "Florida", "Georgia",
"Hawaii", "Idaho", "Illinois",
"Indiana", "Iowa", "Arkansas",
"Kentucky", "Louisiana", "Maine",
"Maryland", "Massachusetts", "Michigan",
"Minnesota", "Mississippi", "Missouri",
"Montana", "Nebraska", "Nevada",
"New Hampshire", "New Jersey", "New Mexico",
"New York", "North Carolina", "North Dakota",
"Ohio", "Oklahoma", "Oregon",
"Pennsylvania", "Rhode Island", "South Carolina",
"South Dakota", "Tennessee", "Texas",
"California", "Vermont", "Virginia",
"Washington", "West Virginia", "Wisconsin",
"Wyoming", "District of Columbia")
st[match(tolower(abbr), ab)]
}
|
/scratch/gouwar.j/cran-all/cranData/usdata/R/abbr2state.R
|
#' United States Counties
#'
#' Data for 3142 counties in the United States. See the
#' \code{\link{county_complete}} data set for additional variables.
#'
#'
#' @name county
#' @docType data
#' @format A data frame with 3142 observations on the following 14 variables.
#' \describe{
#' \item{name}{County names.}
#' \item{state}{State names.}
#' \item{pop2000}{Population in 2000.}
#' \item{pop2010}{Population in 2010.}
#' \item{pop2017}{Population in 2017.}
#' \item{pop_change}{Population change from 2010 to 2017.}
#' \item{poverty}{Percent of population in poverty in 2017.}
#' \item{homeownership}{Home ownership rate, 2006-2010.}
#' \item{multi_unit}{Percent of housing units in multi-unit structures,
#' 2006-2010.}
#' \item{unemployment_rate}{Unemployment rate in 2017.}
#' \item{metro}{Whether the county contains a metropolitan area.}
#' \item{median_edu}{Median education level (2013-2017).}
#' \item{per_capita_income}{Per capita (per person) income
#' (2013-2017).}
#' \item{median_hh_income}{Median household income.}
#' \item{smoking_ban}{Describes whether the type of county-level
#' smoking ban in place in 2010, taking one of the values \code{"none"},
#' \code{"partial"}, or \code{"comprehensive"}.}
#' }
#' @seealso \code{\link{county_complete}}
#' @source These data were collected from Census Quick Facts (no longer
#' available as of 2020) and its accompanying pages. Smoking ban data were from
#' a variety of sources.
#' @keywords datasets
#' @examples
#'
#' library(ggplot2)
#'
#' ggplot(county, aes(x = median_edu, y = median_hh_income)) +
#' geom_boxplot()
#'
"county"
|
/scratch/gouwar.j/cran-all/cranData/usdata/R/data-county.R
|
#' American Community Survey 2019
#'
#' Data for 3142 counties in the United States with many variables
#' of the 2019 American Community Survey.
#'
#'
#' @name county_2019
#' @docType data
#' @format A data frame with 3142 observations on the following 95 variables.
#' \describe{
#' \item{state}{State.}
#' \item{name}{County name.}
#' \item{fips}{FIPS code.}
#' \item{median_individual_income}{Median individual income (2019).}
#' \item{median_individual_income_moe}{Margin of error for \code{median_individual_income}.}
#' \item{pop}{2019 population.}
#' \item{pop_moe}{Margin of error for \code{pop}.}
#' \item{white}{Percent of population that is white alone (2015-2019).}
#' \item{white_moe}{Margin of error for \code{white}.}
#' \item{black}{Percent of population that is black alone (2015-2019).}
#' \item{black_moe}{Margin of error for \code{black}.}
#' \item{native}{Percent of population that is Native American alone (2015-2019).}
#' \item{native_moe}{Margin of error for \code{native}.}
#' \item{asian}{Percent of population that is Asian alone (2015-2019).}
#' \item{asian_moe}{Margin of error for \code{asian}.}
#' \item{pac_isl}{Percent of population that is Native Hawaiian or other Pacific Islander alone (2015-2019).}
#' \item{pac_isl_moe}{Margin of error for \code{pac_isl}.}
#' \item{other_single_race}{Percent of population that is some other race alone (2015-2019).}
#' \item{other_single_race_moe}{Margin of error for \code{other_single_race}.}
#' \item{two_plus_races}{Percent of population that is two or more races (2015-2019).}
#' \item{two_plus_races_moe}{Margin of error for \code{two_plus_races}.}
#' \item{hispanic}{Percent of population that identifies as Hispanic or Latino (2015-2019).}
#' \item{hispanic_moe}{Margin of error for \code{hispanic}.}
#' \item{white_not_hispanic}{Percent of population that is white alone, not Hispanic or Latino (2015-2019).}
#' \item{white_not_hispanic_moe}{Margin of error for \code{white_not_hispanic}.}
#' \item{median_age}{Median age (2015-2019).}
#' \item{median_age_moe}{Margin of error for \code{median_age}.}
#' \item{age_under_5}{Percent of population under 5 (2015-2019).}
#' \item{age_under_5_moe}{Margin of error for \code{age_under_5}.}
#' \item{age_over_85}{Percent of population 85 and over (2015-2019).}
#' \item{age_over_85_moe}{Margin of error for \code{age_over_85}.}
#' \item{age_over_18}{Percent of population 18 and over (2015-2019).}
#' \item{age_over_18_moe}{Margin of error for \code{age_over_18}.}
#' \item{age_over_65}{Percent of population 65 and over (2015-2019).}
#' \item{age_over_65_moe}{Margin of error for \code{age_over_65}.}
#' \item{mean_work_travel}{Mean travel time to work (2015-2019).}
#' \item{mean_work_travel_moe}{Margin of error for \code{mean_work_travel}.}
#' \item{persons_per_household}{Persons per household (2015-2019)}
#' \item{persons_per_household_moe}{Margin of error for \code{persons_per_household}.}
#' \item{avg_family_size}{Average family size (2015-2019).}
#' \item{avg_family_size_moe}{Margin of error for \code{avg_family_size}.}
#' \item{housing_one_unit_structures}{Percent of housing units in 1-unit structures (2015-2019).}
#' \item{housing_one_unit_structures_moe}{Margin of error for \code{housing_one_unit_structures}.}
#' \item{housing_two_unit_structures}{Percent of housing units in multi-unit structures (2015-2019).}
#' \item{housing_two_unit_structures_moe}{Margin of error for \code{housing_two_unit_structures}.}
#' \item{housing_mobile_homes}{Percent of housing units in mobile homes and other types of units (2015-2019).}
#' \item{housing_mobile_homes_moe}{Margin of error for \code{housing_mobile_homes}.}
#' \item{median_individual_income_age_25plus}{Median individual income (2019 dollars, 2015-2019).}
#' \item{median_individual_income_age_25plus_moe}{Margin of error for \code{median_individual_income_age_25plus}.}
#' \item{hs_grad}{Percent of population 25 and older that is a high school graduate (2015-2019).}
#' \item{hs_grad_moe}{Margin of error for \code{hs_grad}.}
#' \item{bachelors}{Percent of population 25 and older that earned a Bachelor's degree or higher (2015-2019).}
#' \item{bachelors_moe}{Margin of error for \code{bachelors}.}
#' \item{households}{Total households (2015-2019).}
#' \item{households_moe}{Margin of error for \code{households}.}
#' \item{households_speak_spanish}{Percent of households speaking Spanish (2015-2019).}
#' \item{households_speak_spanish_moe}{Margin of error for \code{households_speak_spanish}.}
#' \item{households_speak_other_indo_euro_lang}{Percent of households speaking other Indo-European language (2015-2019).}
#' \item{households_speak_other_indo_euro_lang_moe}{Margin of error for \code{households_speak_other_indo_euro_lang}.}
#' \item{households_speak_asian_or_pac_isl}{Percent of households speaking Asian and Pacific Island language (2015-2019).}
#' \item{households_speak_asian_or_pac_isl_moe}{Margin of error for \code{households_speak_asian_or_pac_isl}.}
#' \item{households_speak_other}{Percent of households speaking non European or Asian/Pacific Island language (2015-2019).}
#' \item{households_speak_other_moe}{Margin of error for \code{households_speak_other}.}
#' \item{households_speak_limited_english}{Percent of limited English-speaking households (2015-2019).}
#' \item{households_speak_limited_english_moe}{Margin of error for \code{households_speak_limited_english}.}
#' \item{poverty}{Percent of population below the poverty level (2015-2019).}
#' \item{poverty_moe}{Margin of error for \code{poverty}.}
#' \item{poverty_under_18}{Percent of population under 18 below the poverty level (2015-2019).}
#' \item{poverty_under_18_moe}{Margin of error for \code{poverty_under_18}.}
#' \item{poverty_65_and_over}{Percent of population 65 and over below the poverty level (2015-2019).}
#' \item{poverty_65_and_over_moe}{Margin of error for \code{poverty_65_and_over}.}
#' \item{mean_household_income}{Mean household income (2019 dollars, 2015-2019).}
#' \item{mean_household_income_moe}{Margin of error for \code{mean_household_income}.}
#' \item{per_capita_income}{Per capita money income in past 12 months (2019 dollars, 2015-2019).}
#' \item{per_capita_income_moe}{Margin of error for \code{per_capita_income}.}
#' \item{median_household_income}{Median household income (2015-2019).}
#' \item{median_household_income_moe}{Margin of error for \code{median_household_income}.}
#' \item{veterans}{Percent among civilian population 18 and over that are veterans (2015-2019).}
#' \item{veterans_moe}{Margin of error for \code{veterans}.}
#' \item{unemployment_rate}{Unemployment rate among those ages 20-64 (2015-2019).}
#' \item{unemployment_rate_moe}{Margin of error for \code{unemployment_rate}.}
#' \item{uninsured}{Percent of civilian noninstitutionalized population that is uninsured (2015-2019).}
#' \item{uninsured_moe}{Margin of error for \code{uninsured}.}
#' \item{uninsured_under_6}{Percent of population under 6 years that is uninsured (2015-2019).}
#' \item{uninsured_under_6_moe}{Margin of error for \code{uninsured_under_6}.}
#' \item{uninsured_under_19}{Percent of population under 19 that is uninsured (2015-2019).}
#' \item{uninsured_under_19_moe}{Margin of error for \code{uninsured_under_19}.}
#' \item{uninsured_65_and_older}{Percent of population 65 and older that is uninsured (2015-2019).}
#' \item{uninsured_65_and_older_moe}{Margin of error for \code{uninsured_65_and_older}.}
#' \item{household_has_computer}{Percent of households that have desktop or laptop computer (2015-2019).}
#' \item{household_has_computer_moe}{Margin of error for \code{household_has_computer}.}
#' \item{household_has_smartphone}{Percent of households that have smartphone (2015-2019).}
#' \item{household_has_smartphone_moe}{Margin of error for \code{household_has_smartphone}.}
#' \item{household_has_broadband}{Percent of households that have broadband internet subscription (2015-2019).}
#' \item{household_has_broadband_moe}{Margin of error for \code{household_has_broadband}.}
#' }
#' @source The data were downloaded via the \code{tidycensus} R package.
#' @keywords datasets
#' @seealso \code{\link{county}}, \code{\link{county_complete}}
#' @examples
#'
#' library(ggplot2)
#'
#' ggplot(county_2019,
#' aes(x = hs_grad, y = median_individual_income,
#' size = sqrt(pop) / 1000)) +
#' geom_point(alpha = 0.5) +
#' scale_color_discrete(na.translate = FALSE) +
#' guides(size = FALSE) +
#' labs(
#' x = "Percentage of population graduated from high school",
#' y = "Median individual income"
#' )
"county_2019"
|
/scratch/gouwar.j/cran-all/cranData/usdata/R/data-county_2019.R
|
#' United States Counties
#'
#' Data for 3142 counties in the United States.
#'
#'
#' @name county_complete
#' @docType data
#' @format A data frame with 3142 observations on the following 188 variables.
#' \describe{
#' \item{state}{State.}
#' \item{name}{County name.}
#' \item{fips}{FIPS code.}
#' \item{pop2000}{2000 population.}
#' \item{pop2010}{2010 population.}
#' \item{pop2011}{2011 population.}names
#' \item{pop2012}{2012 population.}
#' \item{pop2013}{2013 population.}
#' \item{pop2014}{2014 population.}
#' \item{pop2015}{2015 population.}
#' \item{pop2016}{2016 population.}
#' \item{pop2017}{2017 population.}
#' \item{age_under_5_2010}{Percent of population under 5 (2010).}
#' \item{age_under_5_2017}{Percent of population under 5 (2017).}
#' \item{age_under_18_2010}{Percent of population under 18 (2010).}
#' \item{age_over_65_2010}{Percent of population over 65 (2010).}
#' \item{age_over_65_2017}{Percent of population over 65 (2017).}
#' \item{median_age_2017}{Median age (2017).}
#' \item{female_2010}{Percent of population that is female (2010).}
#' \item{white_2010}{Percent of population that is white (2010).}
#' \item{black_2010}{Percent of population that is black (2010).}
#' \item{black_2017}{Percent of population that is black (2017).}
#' \item{native_2010}{Percent of population that is a Native American (2010).}
#' \item{native_2017}{Percent of population that is a Native American (2017).}
#' \item{asian_2010}{Percent of population that is a Asian (2010).}
#' \item{asian_2017}{Percent of population that is a Asian (2017).}
#' \item{pac_isl_2010}{Percent of population that is Hawaii or Pacific Islander (2010).}
#' \item{pac_isl_2017}{Percent of population that is Hawaii or Pacific Islander (2017).}
#' \item{other_single_race_2017}{Percent of population that identifies as another single race (2017).}
#' \item{two_plus_races_2010}{Percent of population that identifies as two or more races (2010).}
#' \item{two_plus_races_2017}{Percent of population that identifies as two or more races (2017).}
#' \item{hispanic_2010}{Percent of population that is Hispanic (2010).}
#' \item{hispanic_2017}{Percent of population that is Hispanic (2017).}
#' \item{white_not_hispanic_2010}{Percent of population that is white and not Hispanic (2010).}
#' \item{white_not_hispanic_2017}{Percent of population that is white and not Hispanic (2017).}
#' \item{speak_english_only_2017}{Percent of population that speaks English only (2017).}
#' \item{no_move_in_one_plus_year_2010}{Percent of population that has not moved in at least one year (2006-2010).}
#' \item{foreign_born_2010}{Percent of population that is foreign-born (2006-2010).}
#' \item{foreign_spoken_at_home_2010}{Percent of population that speaks a foreign language at home (2006-2010).}
#' \item{women_16_to_50_birth_rate_2017}{Birth rate for women ages 16 to 50 (2017).}
#' \item{hs_grad_2010}{Percent of population that is a high school graduate (2006-2010).}
#' \item{hs_grad_2016}{Percent of population that is a high school graduate (2012-2016).}
#' \item{hs_grad_2017}{Percent of population that is a high school graduate (2017).}
#' \item{some_college_2016}{Percent of population with some college education (2012-2016).}
#' \item{some_college_2017}{Percent of population with some college education (2017).}
#' \item{bachelors_2010}{Percent of population that earned a bachelor's degree (2006-2010).}
#' \item{bachelors_2016}{Percent of population that earned a bachelor's degree (2012-2016).}
#' \item{bachelors_2017}{Percent of population that earned a bachelor's degree (2017).}
#' \item{veterans_2010}{Percent of population that are veterans (2006-2010).}
#' \item{veterans_2017}{Percent of population that are veterans (2017).}
#' \item{mean_work_travel_2010}{Mean travel time to work (2006-2010).}
#' \item{mean_work_travel_2017}{Mean travel time to work (2017).}
#' \item{broadband_2017}{Percent of population who has access to broadband (2017).}
#' \item{computer_2017}{Percent of population who has access to a computer (2017).}
#' \item{housing_units_2010}{Number of housing units (2010).}
#' \item{homeownership_2010}{Home ownership rate (2006-2010).}
#' \item{housing_multi_unit_2010}{Housing units in multi-unit structures (2006-2010).}
#' \item{median_val_owner_occupied_2010}{Median value of owner-occupied housing units (2006-2010).}
#' \item{households_2010}{Households (2006-2010).}
#' \item{households_2017}{Households (2017).}
#' \item{persons_per_household_2010}{Persons per household (2006-2010).}
#' \item{persons_per_household_2017}{Persons per household (2017).}
#' \item{per_capita_income_2010}{Per capita money income in past 12 months (2010 dollars, 2006-2010)}
#' \item{per_capita_income_2017}{Per capita money income in past 12 months (2017 dollars, 2017)}
#' \item{metro_2013}{Whether the county contained a metropolitan area in 2013.}
#' \item{median_household_income_2010}{Median household income (2006-2010).}
#' \item{median_household_income_2016}{Median household income (2012-2016).}
#' \item{median_household_income_2017}{Median household income (2017).}
#' \item{private_nonfarm_establishments_2009}{Private nonfarm establishments (2009).}
#' \item{private_nonfarm_employment_2009}{Private nonfarm employment (2009).}
#' \item{percent_change_private_nonfarm_employment_2009}{Private nonfarm employment, percent change from 2000 to 2009.}
#' \item{nonemployment_establishments_2009}{Nonemployer establishments (2009).}
#' \item{firms_2007}{Total number of firms (2007).}
#' \item{black_owned_firms_2007}{Black-owned firms, percent (2007).}
#' \item{native_owned_firms_2007}{Native American-owned firms, percent (2007).}
#' \item{asian_owned_firms_2007}{Asian-owned firms, percent (2007).}
#' \item{pac_isl_owned_firms_2007}{Native Hawaiian and other Pacific Islander-owned firms, percent (2007).}
#' \item{hispanic_owned_firms_2007}{Hispanic-owned firms, percent (2007).}
#' \item{women_owned_firms_2007}{Women-owned firms, percent (2007).}
#' \item{manufacturer_shipments_2007}{Manufacturer shipments, 2007 ($1000).}
#' \item{mercent_whole_sales_2007}{Mercent wholesaler sales, 2007 ($1000).}
#' \item{sales_2007}{Retail sales, 2007 ($1000).}
#' \item{sales_per_capita_2007}{Retail sales per capita, 2007.}
#' \item{accommodation_food_service_2007}{Accommodation and food services sales, 2007 ($1000).}
#' \item{building_permits_2010}{Building permits (2010).}
#' \item{fed_spending_2009}{Federal spending, in thousands of dollars (2009).}
#' \item{area_2010}{Land area in square miles (2010).}
#' \item{density_2010}{Persons per square mile (2010).}
#' \item{smoking_ban_2010}{Describes whether the type of county-level smoking ban in place in 2010, taking one of the values \code{"none"}, \code{"partial"}, or \code{"comprehensive"}.}
#' \item{poverty_2010}{Percent of population below poverty level (2006-2010).}
#' \item{poverty_2016}{Percent of population below poverty level (2012-2016).}
#' \item{poverty_2017}{Percent of population below poverty level (2017).}
#' \item{poverty_age_under_5_2017}{Percent of population under age 5 below poverty level (2017).}
#' \item{poverty_age_under_18_2017}{Percent of population under age 18 below poverty level (2017).}
#' \item{civilian_labor_force_2007}{Civilian labor force in 2007.}
#' \item{employed_2007}{Number of civilians employed in 2007.}
#' \item{unemployed_2007}{Number of civilians unemployed in 2007.}
#' \item{unemployment_rate_2007}{Unemployment rate in 2007.}
#' \item{civilian_labor_force_2008}{Civilian labor force in 2008.}
#' \item{employed_2008}{Number of civilians employed in 2008.}
#' \item{unemployed_2008}{Number of civilians unemployed in 2008.}
#' \item{unemployment_rate_2008}{Unemployment rate in 2008.}
#' \item{civilian_labor_force_2009}{Civilian labor force in 2009.}
#' \item{employed_2009}{Number of civilians employed in 2009.}
#' \item{unemployed_2009}{Number of civilians unemployed in 2009.}
#' \item{unemployment_rate_2009}{Unemployment rate in 2009.}
#' \item{civilian_labor_force_2010}{Civilian labor force in 2010.}
#' \item{employed_2010}{Number of civilians employed in 2010.}
#' \item{unemployed_2010}{Number of civilians unemployed in 2010.}
#' \item{unemployment_rate_2010}{Unemployment rate in 2010.}
#' \item{civilian_labor_force_2011}{Civilian labor force in 2011.}
#' \item{employed_2011}{Number of civilians employed in 2011.}
#' \item{unemployed_2011}{Number of civilians unemployed in 2011.}
#' \item{unemployment_rate_2011}{Unemployment rate in 2011.}
#' \item{civilian_labor_force_2012}{Civilian labor force in 2012.}
#' \item{employed_2012}{Number of civilians employed in 2012.}
#' \item{unemployed_2012}{Number of civilians unemployed in 2012.}
#' \item{unemployment_rate_2012}{Unemployment rate in 2012.}
#' \item{civilian_labor_force_2013}{Civilian labor force in 2013.}
#' \item{employed_2013}{Number of civilians employed in 2013.}
#' \item{unemployed_2013}{Number of civilians unemployed in 2013.}
#' \item{unemployment_rate_2013}{Unemployment rate in 2013.}
#' \item{civilian_labor_force_2014}{Civilian labor force in 2014.}
#' \item{employed_2014}{Number of civilians employed in 2014.}
#' \item{unemployed_2014}{Number of civilians unemployed in 2014.}
#' \item{unemployment_rate_2014}{Unemployment rate in 2014.}
#' \item{civilian_labor_force_2015}{Civilian labor force in 2015.}
#' \item{employed_2015}{Number of civilians employed in 2015.}
#' \item{unemployed_2015}{Number of civilians unemployed in 2015.}
#' \item{unemployment_rate_2015}{Unemployment rate in 2015.}
#' \item{civilian_labor_force_2016}{Civilian labor force in 2016.}
#' \item{employed_2016}{Number of civilians employed in 2016.}
#' \item{unemployed_2016}{Number of civilians unemployed in 2016.}
#' \item{unemployment_rate_2016}{Unemployment rate in 2016.}
#' \item{uninsured_2017}{Percent of population who are uninsured (2017).}
#' \item{uninsured_age_under_6_2017}{Percent of population under 6 who are uninsured (2017).}
#' \item{uninsured_age_under_19_2017}{Percent of population under 19 who are uninsured (2017).}
#' \item{uninsured_age_over_74_2017}{Percent of population under 74 who are uninsured (2017).}
#' \item{civilian_labor_force_2017}{Civilian labor force in 2017.}
#' \item{employed_2017}{Number of civilians employed in 2017.}
#' \item{unemployed_2017}{Number of civilians unemployed in 2017.}
#' \item{unemployment_rate_2017}{Unemployment rate in 2017.}
#' \item{median_individual_income_2019}{Median individual income (2019).}
#' \item{pop_2019}{2019 population.}
#' \item{white_2019}{Percent of population that is white alone (2015-2019).}
#' \item{black_2019}{Percent of population that is black alone (2015-2019).}
#' \item{native_2019}{Percent of population that is Native American alone (2015-2019).}
#' \item{asian_2019}{Percent of population that is Asian alone (2015-2019).}
#' \item{pac_isl_2019}{Percent of population that is Native Hawaiian or other Pacific Islander alone (2015-2019).}
#' \item{other_single_race_2019}{Percent of population that is some other race alone (2015-2019).}
#' \item{two_plus_races_2019}{Percent of population that is two or more races (2015-2019).}
#' \item{hispanic_2019}{Percent of population that identifies as Hispanic or Latino (2015-2019).}
#' \item{white_not_hispanic_2019}{Percent of population that is white alone, not Hispanic or Latino (2015-2019).}
#' \item{median_age_2019}{Median age (2015-2019).}
#' \item{age_under_5_2019}{Percent of population under 5 (2015-2019).}
#' \item{age_over_85_2019}{Percent of population 85 and over (2015-2019).}
#' \item{age_over_18_2019}{Percent of population 18 and over (2015-2019).}
#' \item{age_over_65_2019}{Percent of population 65 and over (2015-2019).}
#' \item{mean_work_travel_2019}{Mean travel time to work (2015-2019).}
#' \item{persons_per_household_2019}{Persons per household (2015-2019)}
#' \item{avg_family_size_2019}{Average family size (2015-2019).}
#' \item{housing_one_unit_structures_2019}{Percent of housing units in 1-unit structures (2015-2019).}
#' \item{housing_two_unit_structures_2019}{Percent of housing units in multi-unit structures (2015-2019).}
#' \item{housing_mobile_homes_2019}{Percent of housing units in mobile homes and other types of units (2015-2019).}
#' \item{median_individual_income_age_25plus_2019}{Median individual income (2019 dollars, 2015-2019).}
#' \item{hs_grad_2019}{Percent of population 25 and older that is a high school graduate (2015-2019).}
#' \item{bachelors_2019}{Percent of population 25 and older that earned a Bachelor's degree or higher (2015-2019).}
#' \item{households_2019}{Total households (2015-2019).}
#' \item{households_speak_spanish_2019}{Percent of households speaking Spanish (2015-2019).}
#' \item{households_speak_other_indo_euro_lang_2019}{Percent of households speaking other Indo-European language (2015-2019).}
#' \item{households_speak_asian_or_pac_isl_2019}{Percent of households speaking Asian and Pacific Island language (2015-2019).}
#' \item{households_speak_other_2019}{Percent of households speaking non European or Asian/Pacific Island language (2015-2019).}
#' \item{households_speak_limited_english_2019}{Percent of limited English-speaking households (2015-2019).}
#' \item{poverty_2019}{Percent of population below the poverty level (2015-2019).}
#' \item{poverty_under_18_2019}{Percent of population under 18 below the poverty level (2015-2019).}
#' \item{poverty_65_and_over_2019}{Percent of population 65 and over below the poverty level (2015-2019).}
#' \item{mean_household_income_2019}{Mean household income (2019 dollars, 2015-2019).}
#' \item{per_capita_income_2019}{Per capita money income in past 12 months (2019 dollars, 2015-2019).}
#' \item{median_household_income_2019}{Median household income (2015-2019).}
#' \item{veterans_2019}{Percent among civilian population 18 and over that are veterans (2015-2019).}
#' \item{unemployment_rate_2019}{Unemployment rate among those ages 20-64 (2015-2019).}
#' \item{uninsured_2019}{Percent of civilian noninstitutionalized population that is uninsured (2015-2019).}
#' \item{uninsured_under_6_2019}{Percent of population under 6 years that is uninsured (2015-2019).}
#' \item{uninsured_under_19_2019}{Percent of population under 19 that is uninsured (2015-2019).}
#' \item{uninsured_65_and_older_2019}{Percent of population 65 and older that is uninsured (2015-2019).}
#' \item{household_has_computer_2019}{Percent of households that have desktop or laptop computer (2015-2019).}
#' \item{household_has_smartphone_2019}{Percent of households that have smartphone (2015-2019).}
#' \item{household_has_broadband_2019}{Percent of households that have broadband internet subscription (2015-2019).}
#' }
#' @source The data prior to 2011 was from \url{http://census.gov},
#' though the exact page it came from is no longer available.
#'
#' More recent data comes from the following sources.
#' \itemize{
#' \item Downloaded via the \code{tidycensus} R package.
#' \item Download links for spreadsheets were found on
#' \url{https://www.ers.usda.gov/data-products/county-level-data-sets/download-data}
#' \item Unemployment - Bureau of Labor Statistics - LAUS data - \url{https://www.bls.gov/lau/}.
#' \item Median Household Income - Census Bureau - Small Area Income and Poverty Estimates (SAIPE) data.
#' \item The original data table was prepared by USDA, Economic Research Service.
#' \item Census Bureau.
#' \item 2012-16 American Community Survey 5-yr average.
#' \item The original data table was prepared by USDA, Economic Research Service.
#' \item Tim Parker (tparker at ers.usda.gov) is the contact for much of the new data incorporated into this data set.
#' }
#' @keywords datasets
#' @seealso \code{\link{county}}
#' @examples
#'
#' library(dplyr)
#' library(ggplot2)
#'
#' county_complete %>%
#' mutate(
#' pop_change = 100 * ((pop2017 / pop2013) - 1),
#' metro_area = if_else(metro_2013 == 1, TRUE, FALSE)
#' ) %>%
#' ggplot(aes(x = poverty_2016,
#' y = pop_change,
#' color = metro_area,
#' size = sqrt(pop2017) / 1e3)) +
#' geom_point(alpha = 0.5) +
#' scale_color_discrete(na.translate = FALSE) +
#' guides(size = FALSE) +
#' labs(
#' x = "Percentage of population in poverty (2016)",
#' y = "Percentage population change between 2013 to 2017",
#' color = "Metropolitan area",
#' title = "Population change and poverty"
#' )
#'
#' # Counties with high population change
#' county_complete %>%
#' mutate(pop_change = 100 * ((pop2017 / pop2013) - 1)) %>%
#' filter(pop_change < -10 | pop_change > 25) %>%
#' select(state, name, fips, pop_change)
#'
#' # Population by metro area
#' county_complete %>%
#' mutate(metro_area = if_else(metro_2013 == 1, TRUE, FALSE)) %>%
#' filter(!is.na(metro_area)) %>%
#' ggplot(aes(x = metro_area, y = log(pop2017))) +
#' geom_violin() +
#' labs(
#' x = "Metro area",
#' y = "Log of population in 2017",
#' title = "Population by metro area"
#' )
#'
#' # Poverty and median household income
#' county_complete %>%
#' mutate(metro_area = if_else(metro_2013 == 1, TRUE, FALSE)) %>%
#' ggplot(aes(x = poverty_2016,
#' y = median_household_income_2016,
#' color = metro_area,
#' size = sqrt(pop2017) / 1e3)) +
#' geom_point(alpha = 0.5) +
#' scale_color_discrete(na.translate = FALSE) +
#' guides(size = FALSE) +
#' labs(
#' x = "Percentage of population in poverty (2016)",
#' y = "Median household income (2016)",
#' color = "Metropolitan area",
#' title = "Poverty and median household income"
#' )
#'
#' # Unemployment rate and poverty
#' county_complete %>%
#' mutate(metro_area = if_else(metro_2013 == 1, TRUE, FALSE)) %>%
#' ggplot(aes(x = unemployment_rate_2017,
#' y = poverty_2016,
#' color = metro_area,
#' size = sqrt(pop2017) / 1e3)) +
#' geom_point(alpha = 0.5) +
#' scale_color_discrete(na.translate = FALSE) +
#' guides(size = FALSE) +
#' labs(
#' x = "Unemployment rate (2017)",
#' y = "Percentage of population in poverty (2016)",
#' color = "Metropolitan area",
#' title = "Unemployment rate and poverty"
#' )
"county_complete"
|
/scratch/gouwar.j/cran-all/cranData/usdata/R/data-county_complete.R
|
#' Election results for 2010 Governor races in the U.S.
#'
#' Election results for 2010 Governor races in the U.S.
#'
#'
#' @name govrace10
#' @docType data
#' @format A data frame with 37 observations on the following 23 variables.
#' \describe{
#' \item{id}{Unique identifier for the race, which does not overlap with other
#' 2010 races (see \code{\link{houserace10}} and \code{\link{senaterace10}})}
#' \item{state}{State name}
#' \item{abbr}{State name abbreviation}
#' \item{name1}{Name of the winning candidate}
#' \item{perc1}{Percentage of vote for winning candidate (if more than one candidate)}
#' \item{party1}{Party of winning candidate}
#' \item{votes1}{Number of votes for winning candidate}
#' \item{name2}{Name of candidate with second most votes}
#' \item{perc2}{Percentage of vote for candidate who came in second}
#' \item{party2}{Party of candidate with second most votes}
#' \item{votes2}{Number of votes for candidate who came in second}
#' \item{name3}{Name of candidate with third most votes}
#' \item{perc3}{Percentage of vote for candidate who came in third}
#' \item{party3}{Party of candidate with third most votes}
#' \item{votes3}{Number of votes for candidate who came in third}
#' \item{name4}{Name of candidate with fourth most votes}
#' \item{perc4}{Percentage of vote for candidate who came in fourth}
#' \item{party4}{Party of candidate with fourth most votes}
#' \item{votes4}{Number of votes for candidate who came in fourth}
#' \item{name5}{Name of candidate with fifth most votes}
#' \item{perc5}{Percentage of vote for candidate who came in fifth}
#' \item{party5}{Party of candidate with fifth most votes}
#' \item{votes5}{Number of votes for candidate who came in fifth}
#' }
#' @source MSNBC.com, retrieved 2010-11-09.
#' @keywords datasets
#' @examples
#'
#' table(govrace10$party1, govrace10$party2)
#'
"govrace10"
|
/scratch/gouwar.j/cran-all/cranData/usdata/R/data-govrace10.R
|
#' Election results for the 2010 U.S. House of Represenatives races
#'
#' Election results for the 2010 U.S. House of Represenatives races
#'
#' This analysis in the Examples section was inspired by and is similar to that
#' of Nate Silver's district-level analysis on the FiveThirtyEight blog in the
#' New York Times: \url{https://fivethirtyeight.com/features/2010-an-aligning-election/}
#'
#' @name houserace10
#' @docType data
#' @format A data frame with 435 observations on the following 24 variables.
#' \describe{
#' \item{id}{Unique identifier for the race, which does not
#' overlap with other 2010 races (see \code{\link{govrace10}} and
#' \code{\link{senaterace10}})}
#' \item{state}{State name}
#' \item{abbr}{State name abbreviation}
#' \item{num}{District number for the state}
#' \item{name1}{Name of the winning candidate}
#' \item{perc1}{Percentage of vote for winning candidate (if more than
#' one candidate)}
#' \item{party1}{Party of winning candidate}
#' \item{votes1}{Number of votes for winning candidate}
#' \item{name2}{Name of candidate with second most votes}
#' \item{perc2}{Percentage of vote for candidate who came in second}
#' \item{party2}{Party of candidate with second most votes}
#' \item{votes2}{Number of votes for candidate who came in second}
#' \item{name3}{Name of candidate with third most votes}
#' \item{perc3}{Percentage of vote for candidate who came in third}
#' \item{party3}{Party of candidate with third most votes}
#' \item{votes3}{Number of votes for candidate who came in third}
#' \item{name4}{Name of candidate with fourth most votes}
#' \item{perc4}{Percentage of vote for candidate who came in fourth}
#' \item{party4}{Party of candidate with fourth most votes}
#' \item{votes4}{Number of votes for candidate who came in fourth}
#' \item{name5}{Name of candidate with fifth most votes}
#' \item{perc5}{Percentage of vote for candidate who came in fifth}
#' \item{party5}{Party of candidate with fifth most votes}
#' \item{votes5}{Number of votes for candidate who came in fifth}
#' }
#' @source MSNBC.com, retrieved 2010-11-09.
#' @keywords datasets
#' @examples
#'
#' hr <- table(houserace10[,c("abbr", "party1")])
#' nr <- apply(hr, 1, sum)
#'
#' pr <- prrace08[prrace08$state != "DC",c("state", "p_obama")]
#' hr <- hr[as.character(pr$state),]
#' (fit <- glm(hr ~ pr$p_obama, family=binomial))
#'
#' x1 <- pr$p_obama[match(houserace10$abbr, pr$state)]
#' y1 <- (houserace10$party1 == "Democrat")+0
#' g <- glm(y1 ~ x1, family=binomial)
#'
#'
#' x <- pr$p_obama[pr$state != "DC"]
#' nr <- apply(hr, 1, sum)
#' plot(x, hr[,"Democrat"] / nr,
#' pch = 19, cex = sqrt(nr), col = "#22558844",
#' xlim = c(20, 80), ylim = c(0, 1),
#' xlab = "Percent vote for Obama in 2008",
#' ylab = "Probability of Democrat winning House seat")
#' X <- seq(0, 100, 0.1)
#' lo <- -5.6079 + 0.1009*X
#' p <- exp(lo)/(1+exp(lo))
#' lines(X, p)
#' abline(h=0:1, lty=2, col="#888888")
#'
"houserace10"
|
/scratch/gouwar.j/cran-all/cranData/usdata/R/data-houserace10.R
|
#' Election results for the 2008 U.S. Presidential race
#'
#' Election results for the 2008 U.S. Presidential race
#'
#' In Nebraska, 4 electoral votes went to McCain and 1 to Obama. Otherwise the
#' electoral votes were a winner-take-all.
#'
#' @name prrace08
#' @docType data
#' @format A data frame with 51 observations on the following 7 variables.
#' \describe{
#' \item{state}{State name abbreviation}
#' \item{state_full}{Full state name}
#' \item{n_obama}{Number of votes for Barack Obama}
#' \item{p_obama}{Proportion of votes for Barack Obama}
#' \item{n_mc_cain}{Number of votes for John McCain}
#' \item{p_mc_cain}{Proportion of votes for John McCain}
#' \item{el_votes}{Number of electoral votes for a state}
#' }
#' @source \href{https://www.infoplease.com/us/government/elections/presidential-election-of-2008-electoral-and-popular-vote-summary}{Presidential Election of 2008, Electoral and Popular Vote Summary},
#' retrieved 2011-04-21.
#' @keywords datasets election 2008 president United States
#' @examples
#'
#' #===> Obtain 2010 US House Election Data <===#
#' hr <- table(houserace10[,c("abbr", "party1")])
#' nr <- apply(hr, 1, sum)
#'
#' #===> Obtain 2008 President Election Data <===#
#' pr <- prrace08[prrace08$state != "DC",c("state", "p_obama")]
#' hr <- hr[as.character(pr$state),]
#' (fit <- glm(hr ~ pr$p_obama, family=binomial))
#'
#' #===> Visualizing Binomial outcomes <===#
#' x <- pr$p_obama[pr$state != "DC"]
#' nr <- apply(hr, 1, sum)
#' plot(x, hr[,"Democrat"]/nr, pch=19, cex=sqrt(nr), col="#22558844",
#' xlim=c(20, 80), ylim=c(0, 1), xlab="Percent vote for Obama in 2008",
#' ylab="Probability of Democrat winning House seat")
#'
#' #===> Logistic Regression <===#
#' x1 <- pr$p_obama[match(houserace10$abbr, pr$state)]
#' y1 <- (houserace10$party1 == "Democrat")+0
#' g <- glm(y1 ~ x1, family=binomial)
#' X <- seq(0, 100, 0.1)
#' lo <- -5.6079 + 0.1009*X
#' p <- exp(lo)/(1+exp(lo))
#' lines(X, p)
#' abline(h=0:1, lty=2, col="#888888")
#'
"prrace08"
|
/scratch/gouwar.j/cran-all/cranData/usdata/R/data-prrace08.R
|
#' Election results for the 2010 U.S. Senate races
#'
#' Election results for the 2010 U.S. Senate races
#'
#'
#' @name senaterace10
#' @docType data
#' @format A data frame with 38 observations on the following 23 variables.
#' \describe{
#' \item{id}{Unique identifier for the race, which does not overlap with other
#' 2010 races (see \code{\link{govrace10}} and \code{\link{houserace10}})}
#' \item{state}{State name}
#' \item{abbr}{State name abbreviation}
#' \item{name1}{Name of the winning candidate}
#' \item{perc1}{Percentage of vote for winning candidate (if more than one candidate)}
#' \item{party1}{Party of winning candidate}
#' \item{votes1}{Number of votes for winning candidate}
#' \item{name2}{Name of candidate with second most votes}
#' \item{perc2}{Percentage of vote for candidate who came in second}
#' \item{party2}{Party of candidate with second most votes}
#' \item{votes2}{Number of votes for candidate who came in second}
#' \item{name3}{Name of candidate with third most votes}
#' \item{perc3}{Percentage of vote for candidate who came in third}
#' \item{party3}{Party of candidate with third most votes}
#' \item{votes3}{Number of votes for candidate who came in third}
#' \item{name4}{Name of candidate with fourth most votes}
#' \item{perc4}{Percentage of vote for candidate who came in fourth}
#' \item{party4}{Party of candidate with fourth most votes}
#' \item{votes4}{Number of votes for candidate who came in fourth}
#' \item{name5}{Name of candidate with fifth most votes}
#' \item{perc5}{Percentage of vote for candidate who came in fifth}
#' \item{party5}{Party of candidate with fifth most votes}
#' \item{votes5}{Number of votes for candidate who came in fifth}
#' }
#' @source MSNBC.com, retrieved 2010-11-09.
#' @keywords datasets
#' @examples
#'
#' library(ggplot2)
#'
#' ggplot(senaterace10, aes(x = perc1)) +
#' geom_histogram(binwidth = 5) +
#' labs(x = "Winning candidate vote percentage")
#'
"senaterace10"
|
/scratch/gouwar.j/cran-all/cranData/usdata/R/data-senaterace10.R
|
#' State-level data
#'
#' Information about each state collected from both the official US Census
#' website and from various other sources.
#'
#'
#' @name state_stats
#' @docType data
#' @format A data frame with 51 observations on the following 23 variables.
#' \describe{
#' \item{state}{State name.}
#' \item{abbr}{State abbreviation (e.g. \code{"MN"}).}
#' \item{fips}{FIPS code.}
#' \item{pop2010}{Population in 2010.}
#' \item{pop2000}{Population in 2000.}
#' \item{homeownership}{Home ownership rate.}
#' \item{multiunit}{Percent of living units that are in multi-unit
#' structures.}
#' \item{income}{Average income per capita.}
#' \item{med_income}{Median household income.}
#' \item{poverty}{Poverty rate.} \item{fed_spend}{Federal
#' spending per capita.}
#' \item{land_area}{Land area.}
#' \item{smoke}{Percent of population that smokes.}
#' \item{murder}{Murders per 100,000 people.}
#' \item{robbery}{Robberies per 100,000.}
#' \item{agg_assault}{Aggravated assaults per 100,000.}
#' \item{larceny}{Larcenies per 100,000.}
#' \item{motor_theft}{Vehicle theft per 100,000.}
#' \item{soc_sec}{Percent of individuals collecting social security.}
#' \item{nuclear}{Percent of power coming from nuclear sources.}
#' \item{coal}{Percent of power coming from coal sources.}
#' \item{tr_deaths}{Traffic deaths per 100,000.}
#' \item{tr_deaths_no_alc}{Traffic deaths per 100,000 where alcohol was
#' not a factor.}
#' \item{unempl}{Unemployment rate (February 2012, preliminary).}
#' }
#' @source Census Quick Facts (no longer available as of 2020),
#' InfoChimps (also no longer available as of 2020),
#' \href{https://www-fars.nhtsa.dot.gov/Main/index.aspx}{National Highway Traffic Safety Administration},
#' ({\code{tr_deaths}, \code{tr_deaths_no_alc}}),
#' \href{https://www.bls.gov/web/laus/laumstrk.htm}{Bureau of Labor Statistics}
#' (\code{unempl}).
#' @keywords datasets state United States crime energy
#' @examples
#'
#' library(ggplot2)
#' library(dplyr)
#' library(maps)
#'
#' states_selected <- state_stats %>%
#' mutate(region = tolower(state)) %>%
#' select(region, unempl, murder, nuclear)
#'
#' states_map <- map_data("state") %>%
#' inner_join(states_selected)
#'
#' # Unemployment map
#' ggplot(states_map, aes(map_id = region)) +
#' geom_map(aes(fill = unempl), map = states_map) +
#' expand_limits(x = states_map$long, y = states_map$lat) +
#' scale_fill_viridis_c() +
#' labs(x = "", y = "", fill = "Unemployment\n(%)")
#'
#' # Murder rate map
#' states_map %>%
#' filter(region != "district of columbia") %>%
#' ggplot(aes(map_id = region)) +
#' geom_map(aes(fill = murder), map = states_map) +
#' expand_limits(x = states_map$long, y = states_map$lat) +
#' scale_fill_viridis_c() +
#' labs(x = "", y = "", fill = "Murders\nper 100k")
#'
#' # Nuclear energy map
#' ggplot(states_map, aes(map_id = region)) +
#' geom_map(aes(fill = nuclear), map = states_map) +
#' expand_limits(x = states_map$long, y = states_map$lat) +
#' scale_fill_viridis_c() +
#' labs(x = "", y = "", fill = "Nuclear energy\n(%)")
#'
#'
"state_stats"
|
/scratch/gouwar.j/cran-all/cranData/usdata/R/data-state_stats.R
|
#' Summary of many state-level variables
#'
#' Census data for the 50 states plus DC and Puerto Rico.
#'
#'
#' @name urban_owner
#' @aliases urban.owner urban_owner
#' @docType data
#' @format A data frame with 52 observations on the following 28 variables.
#' \describe{
#' \item{state}{State}
#' \item{total_housing_units_2000}{Total housing units available in 2000.}
#' \item{total_housing_units_2010}{Total housing units available in 2010.}
#' \item{pct_vacant}{a numeric vector}
#' \item{occupied}{Occupied.}
#' \item{pct_owner_occupied}{a numeric vector}
#' \item{pop_st}{a numeric vector}
#' \item{area_st}{a numeric vector}
#' \item{pop_urban}{a numeric vector}
#' \item{poppct_urban}{a numeric vector}
#' \item{area_urban}{a numeric vector}
#' \item{areapct_urban}{a numeric vector}
#' \item{popden_urban}{a numeric vector}
#' \item{pop_ua}{a numeric vector}
#' \item{poppct_urban.1}{a numeric vector}
#' \item{area_ua}{a numeric vector}
#' \item{areapct_ua}{a numeric vector}
#' \item{popden_ua}{a numeric vector}
#' \item{pop_uc}{a numeric vector}
#' \item{poppct_uc}{a numeric vector}
#' \item{area_uc}{a numeric vector}
#' \item{areapct_uc}{a numeric vector}
#' \item{popden_uc}{a numeric vector}
#' \item{pop_rural}{a numeric vector}
#' \item{poppct_rural}{a numeric vector}
#' \item{area_rural}{a numeric vector}
#' \item{areapct_rural}{a numeric vector}
#' \item{popden_rural}{a numeric vector}
#' }
#' @source US Census.
#' @keywords datasets
#' @examples
#'
#' urban_owner
#'
"urban_owner"
|
/scratch/gouwar.j/cran-all/cranData/usdata/R/data-urban_owner.R
|
#' State summary info
#'
#' Census info for the 50 US states plus DC.
#'
#'
#' @name urban_rural_pop
#' @docType data
#' @format A data frame with 51 observations on the following 5 variables.
#' \describe{
#' \item{state}{US state.}
#' \item{urban_in}{a numeric vector}
#' \item{urban_out}{a numeric vector}
#' \item{rural_farm}{a numeric vector}
#' \item{rural_nonfarm}{a numeric vector}
#' }
#' @source US census.
#' @keywords datasets
#' @examples
#'
#' urban_rural_pop
#'
"urban_rural_pop"
|
/scratch/gouwar.j/cran-all/cranData/usdata/R/data-urban_rural_pop.R
|
#' Predicting who would vote for NSA Mass Surveillance
#'
#' In 2013, the House of Representatives voted to not stop the National
#' Security Agency's (NSA's) mass surveillance of phone behaviors. We look at
#' two predictors for how a representative voted: their party and how much
#' money they have received from the private defense industry.
#'
#'
#' @name vote_nsa
#' @docType data
#' @format A data frame with 434 observations on the following 5 variables.
#' \describe{
#' \item{name}{Name of the Congressional representative.}
#' \item{party}{The party of the representative: \code{D} for Democrat and \code{R} for Republican.}
#' \item{state}{State for the representative.}
#' \item{money}{Money received from the defense industry for their campaigns.}
#' \item{phone_spy_vote}{Voting to rein in the phone dragnet or continue allowing mass surveillance.}
#' }
#' @references Kravets, D., 2020. Lawmakers Who Upheld NSA Phone Spying Received
#' Double The Defense Industry Cash. WIRED.
#' Available at \url{https://www.wired.com/2013/07/money-nsa-vote/}.
#' @source \href{https://www.maplight.org/}{MapLight}.
#' Available at \url{http://s3.documentcloud.org/documents/741074/amash-amendment-vote-maplight.pdf}.
#' @keywords datasets
#' @examples
#'
#' table(vote_nsa$party, vote_nsa$phone_spy_vote)
#' boxplot(vote_nsa$money / 1000 ~ vote_nsa$phone_spy_vote,
#' ylab = "$1000s Received from Defense Industry")
#'
"vote_nsa"
|
/scratch/gouwar.j/cran-all/cranData/usdata/R/data-vote_nsa.R
|
#' US Voter Turnout Data.
#'
#' State-level data on federal elections held in November
#' between 1980 and 2014.
#'
#' @format A data frame with 936 rows and 7 variables.
#' \describe{
#' \item{year}{Year election was held.}
#' \item{region}{Specifies if data is state or national total.}
#' \item{voting_eligible_population}{Number of citizens eligible to vote; does not count felons.}
#' \item{total_ballots_counted}{Number of ballots cast.}
#' \item{highest_office}{Number of ballots that contained a vote for the highest office of that election.}
#' \item{percent_total_ballots_counted}{Overall voter turnout percentage.}
#' \item{percent_highest_office}{Highest office voter turnout percentage.}
#' }
#'
#' @examples
#'
#' library(ggplot2)
#'
#' ggplot(voter_count, aes(x = percent_highest_office, y = percent_total_ballots_counted)) +
#' geom_point() +
#' labs(
#' title = "Total Ballots V Highest Office",
#' x = "Highest Office",
#' y = "Total Ballots"
#' )
#'
#' @source \href{http://www.electproject.org/home/voter-turnout/voter-turnout-data}{United States Election Project}
#'
"voter_count"
|
/scratch/gouwar.j/cran-all/cranData/usdata/R/data-voter_count.R
|
#' Convert state names to abbreviations
#'
#' Two utility functions. One converts state names to the state abbreviations,
#' and the second does the opposite.
#'
#'
#' @param state A vector of state name, where there is a little fuzzy matching.
#' @return Returns a vector of the same length with the corresponding state
#' names or abbreviations.
#' @author David Diez
#' @seealso \code{\link{abbr2state}}, \code{\link{county}}, \code{\link{county_complete}}
#' @keywords State Abbreviation
#' @export
#' @examples
#'
#' state2abbr("Minnesota")
#'
#' # Some spelling/capitalization errors okay
#' state2abbr("mINnesta")
#'
#' @export
state2abbr <- function(state){
ab <- tolower(c("AL",
"AK", "AZ", "KS", "UT", "CO", "CT",
"DE", "FL", "GA", "HI", "ID", "IL",
"IN", "IA", "AR", "KY", "LA", "ME",
"MD", "MA", "MI", "MN", "MS", "MO",
"MT", "NE", "NV", "NH", "NJ", "NM",
"NY", "NC", "ND", "OH", "OK", "OR",
"PA", "RI", "SC", "SD", "TN", "TX",
"CA", "VT", "VA", "WA", "WV", "WI",
"WY", "DC"))
st <- tolower(c("Alabama",
"Alaska56789", "Arizona", "Kansas",
"Utah", "Colorado", "Connecticut",
"Delaware", "Florida", "Georgia",
"Hawaii", "Idaho", "Illinois",
"Indiana", "Iowa", "9899Arkansas",
"Kentucky", "Louisiana", "Maine",
"Maryland", "Massachusetts", "Michigan",
"Minnesota", "Mississippi", "Missouri",
"Montana", "Nebraska", "Nevada",
"New Hampshire", "New Jersey", "New Mexico",
"New York", "North123498 Carolina",
"North123498 Dakota1234",
"Ohio", "Oklahoma", "Oregon",
"Pennsylvania", "Rhode Island", "South Carolina",
"South Dakota1234", "Tennessee", "Texas",
"California", "Vermont", "Virginia",
"Washington", "West Virginia", "Wisconsin",
"Wyoming", "District of Columbia"))
state <- tolower(as.character(state))
state <- gsub("north", "north123498", state)
state <- gsub("dakota", "dakota1234", state)
state <- gsub("arkansas", "9899arkansas", state)
state <- gsub("alaska", "alaska56789", state)
ST <- rep(NA, length(state))
for (i in 1:length(st)) {
ST[agrep(st[i], state, 0.2)] <- i
}
toupper(ab[ST])
}
|
/scratch/gouwar.j/cran-all/cranData/usdata/R/state2abbr.R
|
#' @keywords internal
#' @importFrom tibble tibble
"_PACKAGE"
# The following block is used by usethis to automatically manage
# roxygen namespace tags. Modify with care!
## usethis namespace: start
## usethis namespace: end
NULL
|
/scratch/gouwar.j/cran-all/cranData/usdata/R/usdata-package.R
|
# Author: Babak Naimi, [email protected]
# Date : Sep 2012
# Version 1.0
# Licence GPL v3
#setClassUnion("spoint", c("SpatVector","SpatialPoints"))
setClass("speciesLISA",
representation(species="SpatVector",
data="data.frame",
LISAs="matrix",
weights="numeric",
statistic="character",
LISA="numeric")
)
setClass("VIF",
representation(variables="character",
excluded="character",
corMatrix="matrix",
results="data.frame")
)
setClass("RasterVariogram",
representation(lag="numeric",
nlags="numeric",
variogramCloud="matrix",
variogram="data.frame")
)
|
/scratch/gouwar.j/cran-all/cranData/usdm/R/AAAClasses.R
|
# Author: Babak Naimi, [email protected]
# Date : Oct. 2012
# Version 1.0
# Licence GPL v3
.getFilterLag<-function(r,lag,n) {
d1 <- (n -1) * lag
d2 <- d1 + lag
.filter(r=r,d1=d1,d2=d2)
}
if (!isGeneric("Variogram")) {
setGeneric("Variogram", function(x,lag,cutoff,cells,size=100)
standardGeneric("Variogram"))
}
setMethod ('Variogram' ,signature(x='RasterLayer'),
function (x,lag,cutoff,cells,size=100) {
if (missing(cutoff)) cutoff<- sqrt((xmin(x)-xmax(x))^2+(ymin(x)-ymax(x))^2) / 3
if (missing(lag)) lag <- res(x)[1]
else if (lag < res(x)[1]) lag <- res(x)[1]
if (cutoff < lag) stop("cutoff should be greater than lag size")
nlag <- ceiling(cutoff / lag)
re <- res(x)[1]
if (missing(cells)) {
cells <-c(1:ncell(x))[which(!is.na(x[1:ncell(x)]))]
if (length(cells) > size) cells <- cells[sample(1:length(cells),size)]
}
n <- length(cells)
x <- as(x,"matrix")
tbl <- matrix(nrow=n,ncol=nlag)
for (nl in 1:nlag) {
filter <- .getFilterLag(re,lag,nl)
nf <- ncol(filter)
out <- rep(NA,n)
for (c in 1:n) {
xi <- t(x)[cells[c]]
if (!is.na(xi)) {
xn <- .neighborRowCol(x,cells[c],nf)
xn[,1] <- xn[,1] * as(filter,"vector")
xn <- xn[!is.na(xn[,1]),]
xn <- unlist(lapply(1:nrow(xn),function(r) {x[xn[r,1],xn[r,2]]}))
xn <- xn[!is.na(xn)]
out[c] <- mean((xi - xn)^2)/2
} else out[c] <- NA
}
tbl[,nl] <- out
}
v <- new("RasterVariogram")
v@lag <- lag
v@nlags <- nlag
v@variogramCloud <- tbl
v@variogram <- data.frame(distance=seq(lag,lag*nlag,lag) - (lag/2),gamma=apply(tbl,2,mean,na.rm=TRUE))
v
}
)
#------
setMethod ('Variogram' ,signature(x='SpatRaster'),
function (x,lag,cutoff,cells,size=100) {
if (missing(cutoff)) cutoff<- sqrt((xmin(x)-xmax(x))^2+(ymin(x)-ymax(x))^2) / 3
if (missing(lag)) lag <- res(x)[1]
else if (lag < res(x)[1]) lag <- res(x)[1]
if (cutoff < lag) stop("cutoff should be greater than lag size")
if (nlyr(x) > 1) {
warning('The raster has multiple layers; the first layer is considered!')
x <- x[[1]]
}
nlag <- ceiling(cutoff / lag)
re <- res(x)[1]
if (missing(cells)) {
cells <-c(1:ncell(x))[which(!is.na(x[1:ncell(x)]))]
if (length(cells) > size) cells <- cells[sample(1:length(cells),size)]
}
n <- length(cells)
x <- as.matrix(x,wide=TRUE)
.xt <- t(x)
tbl <- matrix(nrow=n,ncol=nlag)
for (nl in 1:nlag) {
filter <- .getFilterLag(re,lag,nl)
nf <- ncol(filter)
out <- rep(NA,n)
for (c in 1:n) {
xi <- .xt[cells[c]]
if (!is.na(xi)) {
xn <- .neighborRowCol(x,cells[c],nf)
xn[,1] <- xn[,1] * as(filter,"vector")
xn <- xn[!is.na(xn[,1]),]
xn <- unlist(lapply(1:nrow(xn),function(r) {x[xn[r,1],xn[r,2]]}))
xn <- xn[!is.na(xn)]
out[c] <- mean((xi - xn)^2)/2
} else out[c] <- NA
}
tbl[,nl] <- out
}
rm(.xt,x,out); gc()
v <- new("RasterVariogram")
v@lag <- lag
v@nlags <- nlag
v@variogramCloud <- tbl
v@variogram <- data.frame(distance=seq(lag,lag*nlag,lag) - (lag/2),gamma=apply(tbl,2,mean,na.rm=TRUE))
v
}
)
|
/scratch/gouwar.j/cran-all/cranData/usdm/R/Variogram.R
|
# Author: Babak Naimi, [email protected]
# Date : July. 2015
# Last update: June 2023
# Version 1.6
# Licence GPL v3
if (!isGeneric("exclude")) {
setGeneric("exclude", function(x,vif,...)
standardGeneric("exclude"))
}
setMethod ('exclude' ,signature(x='RasterStack', vif='VIF'),
function (x,vif,...) {
n <- names(x)
#for (i in 1:length(vif@results[,1])) if (!as.character(vif@results[i,1]) %in% n) stop("One or all variables in VIF are not in the Raster object")
if (length(vif@excluded) > 0) {
.ex <- vif@excluded %in% n
if (all(!.ex)) {
stop('None of the variables in the Raster object are among those variables specified in the VIF object to be excluded!')
} else if (any(!.ex)) {
warning(paste0('The variables (',paste(vif@excluded[!.ex],collapse = ', '),') are not available in the Raster object!'))
}
j <- which(n %in% vif@excluded)
x <- x[[-j]]
} else {
warning('No variable to exclude!')
}
x
}
)
setMethod ('exclude' ,signature(x='RasterBrick', vif='VIF'),
function (x,vif,...) {
n <- names(x)
if (length(vif@excluded) > 0) {
.ex <- vif@excluded %in% n
if (all(!.ex)) {
stop('None of the variables in the Raster object are among those variables specified in the VIF object to be excluded!')
} else if (any(!.ex)) {
warning(paste0('The variables (',paste(vif@excluded[!.ex],collapse = ', '),') are not available in the Raster object!'))
}
j <- which(n %in% vif@excluded)
x <- x[[-j]]
} else {
warning('No variable to exclude!')
}
x
}
)
#-----
setMethod ('exclude' ,signature(x='SpatRaster', vif='VIF'),
function (x,vif,...) {
n <- names(x)
if (length(vif@excluded) > 0) {
.ex <- vif@excluded %in% n
if (all(!.ex)) {
stop('None of the variables in the Raster object are among those variables specified in the VIF object to be excluded!')
} else if (any(!.ex)) {
warning(paste0('The variables (',paste(vif@excluded[!.ex],collapse = ', '),') are not available in the Raster object!'))
}
j <- which(n %in% vif@excluded)
x <- x[[-j]]
} else {
warning('No variable to exclude!')
}
x
}
)
setMethod ('exclude' ,signature(x='data.frame', vif='VIF'),
function (x,vif, ...) {
n <- colnames(x)
if (length(vif@excluded) > 0) {
.ex <- vif@excluded %in% n
if (all(!.ex)) {
stop('None of the variables in the Raster object are among those variables specified in the VIF object to be excluded!')
} else if (any(!.ex)) {
warning(paste0('The variables (',paste(vif@excluded[!.ex],collapse = ', '),') are not available in the Raster object!'))
}
j <- which(n %in% vif@excluded)
x <- x[,-j,drop=FALSE]
} else {
warning('No variable to exclude!')
}
x
}
)
setMethod ('exclude' ,signature(x='matrix', vif='VIF'),
function (x,vif, ...) {
n <- colnames(x)
if (length(vif@excluded) > 0) {
.ex <- vif@excluded %in% n
if (all(!.ex)) {
stop('None of the variables in the Raster object are among those variables specified in the VIF object to be excluded!')
} else if (any(!.ex)) {
warning(paste0('The variables (',paste(vif@excluded[!.ex],collapse = ', '),') are not available in the Raster object!'))
}
j <- which(n %in% vif@excluded)
x <- x[,-j,drop=FALSE]
} else {
warning('No variable to exclude!')
}
x
}
)
setMethod ('exclude' ,signature(x='RasterStack', vif='missing'),
function (x,vif,th,...) {
n <- names(x)
if(missing(th)) th <- 10
vif <- vifstep(x,...)
print(vif)
# if (length(vif@excluded) > 0) x[[as.character(vif@results[,1])]]
# else x
exclude(x,vif)
}
)
setMethod ('exclude' ,signature(x='RasterBrick', vif='missing'),
function (x,vif, th,...) {
n <- names(x)
if(missing(th)) th <- 10
vif <- vifstep(x,th=th,...)
print(vif)
exclude(x,vif)
}
)
setMethod ('exclude' ,signature(x='SpatRaster', vif='missing'),
function (x,vif, th,...) {
n <- names(x)
if(missing(th)) th <- 10
vif <- vifstep(x,th=th,...)
print(vif)
exclude(x,vif)
}
)
setMethod ('exclude' ,signature(x='data.frame', vif='missing'),
function (x,vif, th,...) {
n <- colnames(x)
if(missing(th)) th <- 10
vif <- vifstep(x,th=th,...)
print(vif)
exclude(x,vif)
}
)
setMethod ('exclude' ,signature(x='matrix', vif='missing'),
function (x,vif, th,...) {
n <- colnames(x)
if(missing(th)) th <- 10
vif <- vifstep(x,th=th,...)
print(vif)
exclude(x,vif)
}
)
|
/scratch/gouwar.j/cran-all/cranData/usdm/R/exclude.R
|
# Author: Babak Naimi, [email protected]
# Date : March 2013
# Last Update: June 2023
# Version 1.3
# Licence GPL v3
#----------
.neighborRowCol <- function(x,cell,w) {
nc <- ncol(x)
nr <- nrow(x)
row <- trunc((cell-1)/nc) + 1
col <- cell - ((row-1) * nc)
mr <- matrix(ncol=w,nrow=w)
tr <- trunc(w/2)
for (i in 1:w) {
mr[i,] <-tr
tr <- tr - 1
}
i <- trunc(w/2)+1
mr[i,i] <- NA
row <- row + as(mr,"vector")
col <- col + as(-t(mr),"vector")
row[row < 1 | row > nr] <- NA
col[col < 1 | col > nc] <- NA
rc <- cbind(row, col)
rc
}
.filter<-function(r,d1=0,d2) {
c<- d2%/%r
x<- y<- seq(-c,c,1)
eg<- expand.grid(x,y)
eg[1]<- -eg[1]
eg[3]<- sqrt(eg[1]^2+eg[2]^2)
ndim<- c*2+1
m<-matrix(eg[,3],ncol=ndim,nrow=ndim)*r
mw<- which(m > d1 & m <= d2)
m[mw] = 1
m[-mw] = NA
mw <- trunc(length(m)/2)+1
m[mw]<- 1
return (m)
}
.localK<-function(x,d1=0,d2,cell) {
filter<-.filter(r=res(x)[1],d1=d1,d2=d2)
if (length(filter) < 3) stop("d2 is less than resolution!")
xv <- x[1:ncell(x)]
w <- which(!is.na(xv))
xv <- xv[w]
ras <- FALSE
if (missing(cell)) {
cell <-c(1:ncell(x))[w]
ras <- TRUE
xx <- raster(x)
}
nf <- ncol(filter)
n <- length(xv)
n1 <- n-1
n2 <- n-2
x <- as(x,"matrix")
out <- rep(NA,length(cell))
for (c in 1:length(cell)) {
xi <- t(x)[cell[c]]
if (!is.na(xi)) {
xn <- .neighborRowCol(x,cell[c],nf)
xn[,1] <- xn[,1] * as(filter,"vector")
xn <- na.omit(xn)
xn <- unlist(lapply(1:nrow(xn),function(r) {x[xn[r,1],xn[r,2]]}))
xn <- xn[!is.na(xn)]
zj <- xv - xi
Di <- sum(abs(zj))/n1
Wi <- length(xn)
Ki <- sum(abs(xi-xn))
Fi <- sum(zj*zj)/n1
VKi <- Wi*(n1-Wi) * (Fi-Di*Di) / n2
out[c] <- (Ki - Wi*Di) / sqrt(VKi)
} else out[c] <- NA
}
if (ras) {
xx[cell] <- out
return(xx)
} else return(out)
}
#----
# terra
.localK.t<-function(x,d1=0,d2,cell) {
filter<-.filter(r=res(x)[1],d1=d1,d2=d2)
if (length(filter) < 3) stop("d2 is less than resolution!")
xv <- x[1:ncell(x)][,1]
w <- which(!is.na(xv))
xv <- xv[w]
ras <- FALSE
if (missing(cell)) {
cell <-c(1:ncell(x))[w]
ras <- TRUE
xx <- rast(x)
}
nf <- ncol(filter)
n <- length(xv)
n1 <- n-1
n2 <- n-2
x <- as.matrix(x,wide=TRUE)
.xt <- t(x)
out <- rep(NA,length(cell))
for (c in 1:length(cell)) {
xi <- .xt[cell[c]]
if (!is.na(xi)) {
xn <- .neighborRowCol(x,cell[c],nf)
xn[,1] <- xn[,1] * as(filter,"vector")
xn <- na.omit(xn)
xn <- unlist(lapply(1:nrow(xn),function(r) {x[xn[r,1],xn[r,2]]}))
xn <- xn[!is.na(xn)]
zj <- xv - xi
Di <- sum(abs(zj))/n1
Wi <- length(xn)
Ki <- sum(abs(xi-xn))
Fi <- sum(zj*zj)/n1
VKi <- Wi*(n1-Wi) * (Fi-Di*Di) / n2
out[c] <- (Ki - Wi*Di) / sqrt(VKi)
} else out[c] <- NA
}
if (ras) {
xx[cell] <- out
return(xx)
} else return(out)
}
.localGeary <- function(x,d1=0,d2,cell) {
filter<-.filter(r=res(x)[1],d1=d1,d2=d2)
if (length(filter) < 3) stop("d2 is less than resolution!")
sigmaX <- cellStats(x,sum)
i <- trunc(length(filter)/2)+1
ras <- FALSE
if (missing(cell)) {
w <- which(!is.na(x[1:ncell(x)]))
n <- length(w)
cell <-c(1:ncell(x))[w]
rm(w)
ras <- TRUE
xx <- raster(x)
} else n <- ncell(x) - cellStats(x,"countNA")
nf <- ncol(filter)
n1 <- n-1
s2 <- cellStats(x,var)
s2 <- (s2 * n1)/n
x <- as(x,"matrix")
out <- rep(NA,length(cell))
for (c in 1:length(cell)) {
xi <- t(x)[cell[c]]
if (!is.na(xi)) {
xn <- .neighborRowCol(x,cell[c],nf)
xn[,1] <- xn[,1] * as(filter,"vector")
xn <- na.omit(xn)
xn <- unlist(lapply(1:nrow(xn),function(r) {x[xn[r,1],xn[r,2]]}))
xn <- xn[!is.na(xn)]
Eij <- sum((xi - xn)^2)
out[c] <- Eij / s2
} else out[c] <- NA
}
if (ras) {
xx[cell] <- out
return(xx)
} else return(out)
}
#---
.localGeary.t <- function(x,d1=0,d2,cell) {
filter<-.filter(r=res(x)[1],d1=d1,d2=d2)
if (length(filter) < 3) stop("d2 is less than resolution!")
sigmaX <- global(x,"sum")[1,1]
i <- trunc(length(filter)/2)+1
ras <- FALSE
if (missing(cell)) {
w <- which(!is.na(x[1:ncell(x)]))
n <- length(w)
cell <-c(1:ncell(x))[w]
rm(w)
ras <- TRUE
xx <- rast(x)
} else n <- ncell(x) - global(x,"isNA")[1,1]
nf <- ncol(filter)
n1 <- n-1
s2 <- global(x,"sd")[1,1]
s2 <- (s2 * s2 * n1)/n
x <- as.matrix(x,wide=TRUE)
.xt <- t(x)
out <- rep(NA,length(cell))
for (c in 1:length(cell)) {
xi <- .xt[cell[c]]
if (!is.na(xi)) {
xn <- .neighborRowCol(x,cell[c],nf)
xn[,1] <- xn[,1] * as(filter,"vector")
xn <- na.omit(xn)
xn <- unlist(lapply(1:nrow(xn),function(r) {x[xn[r,1],xn[r,2]]}))
xn <- xn[!is.na(xn)]
Eij <- sum((xi - xn)^2)
out[c] <- Eij / s2
} else out[c] <- NA
}
if (ras) {
xx[cell] <- out
return(xx)
} else return(out)
}
#---
.localG <- function(x,d1=0,d2,cell) {
filter<-.filter(r=res(x)[1],d1=d1,d2=d2)
if (length(filter) < 3) stop("d2 is less than resolution!")
sigmaX <- cellStats(x,sum)
sigmaX2 <- cellStats(x^2,sum)
i <- trunc(length(filter)/2)+1
ras <- FALSE
if (missing(cell)) {
w <- which(!is.na(x[1:ncell(x)]))
n <- length(w)
cell <-c(1:ncell(x))[w]
rm(w)
ras <- TRUE
xx <- raster(x)
} else n <- ncell(x) - cellStats(x,"countNA")
nf <- ncol(filter)
n1 <- n - 1
n2 <- n - 2
s2 <- cellStats(x,var)
s2 <- (s2 * n1)/n
x <- as(x,"matrix")
out <- rep(NA,length(cell))
for (c in 1:length(cell)) {
xi <- t(x)[cell[c]]
if (!is.na(xi)) {
xn <- .neighborRowCol(x,cell[c],nf)
xn[,1] <- xn[,1] * as(filter,"vector")
xn <- na.omit(xn)
xn <- unlist(lapply(1:nrow(xn),function(r) {x[xn[r,1],xn[r,2]]}))
xn <- xn[!is.na(xn)]
s.xi <- sum(xn)
xbar.i <- (sigmaX - xi)/n1
Wi <- length(xn)
si <- sqrt(((sigmaX2 - xi^2) / n1) - xbar.i^2)
G <- (s.xi - Wi * xbar.i) / (si * sqrt(((n1*Wi) - Wi^2) / n2))
out[c] <- G
} else out[c] <- NA
}
if (ras) {
xx[cell] <- out
return(xx)
} else return(out)
}
#---
.localG.t <- function(x,d1=0,d2,cell) {
filter<-.filter(r=res(x)[1],d1=d1,d2=d2)
if (length(filter) < 3) stop("d2 is less than resolution!")
sigmaX <- global(x,"sum")[1,1]
sigmaX2 <- global(x^2,"sum")[1,1]
i <- trunc(length(filter)/2)+1
ras <- FALSE
if (missing(cell)) {
w <- which(!is.na(x[1:ncell(x)]))
n <- length(w)
cell <-c(1:ncell(x))[w]
rm(w)
ras <- TRUE
xx <- rast(x)
} else n <- ncell(x) - global(x,"isNA")[1,1]
nf <- ncol(filter)
n1 <- n - 1
n2 <- n - 2
s2 <- global(x,'sd')[1,1]
s2 <- (s2 * s2 * n1)/n
x <- as.matrix(x,wide=TRUE)
.xt <- t(x)
out <- rep(NA,length(cell))
for (c in 1:length(cell)) {
xi <- .xt[cell[c]]
if (!is.na(xi)) {
xn <- .neighborRowCol(x,cell[c],nf)
xn[,1] <- xn[,1] * as(filter,"vector")
xn <- na.omit(xn)
xn <- unlist(lapply(1:nrow(xn),function(r) {x[xn[r,1],xn[r,2]]}))
xn <- xn[!is.na(xn)]
s.xi <- sum(xn)
xbar.i <- (sigmaX - xi)/n1
Wi <- length(xn)
si <- sqrt(((sigmaX2 - xi^2) / n1) - xbar.i^2)
G <- (s.xi - Wi * xbar.i) / (si * sqrt(((n1*Wi) - Wi^2) / n2))
out[c] <- G
} else out[c] <- NA
}
if (ras) {
xx[cell] <- out
return(xx)
} else return(out)
}
#---
.localMoran <- function(x,d1=0,d2,cell) {
filter<-.filter(r=res(x)[1],d1=d1,d2=d2)
if (length(filter) < 3) stop("d2 is less than resolution!")
xv <- x[1:ncell(x)]
w <- which(!is.na(xv))
xv <- xv[w]
i <- trunc(length(filter)/2)+1
ras <- FALSE
if (missing(cell)) {
cell <-c(1:ncell(x))[w]
ras <- TRUE
xx <- raster(x)
}
nf <- ncol(filter)
n <- length(xv)
n1 <- n-1
n2 <- n-2
sigmaX <- sum(xv)
sigmaX2 <- sum(xv * xv)
s2 <- var(xv)
s2 <- (s2 * n1)/n
s4 <- sum((xv - mean(xv))^4)/n
rm(xv,w)
b2 <- s4 / (s2 ^ 2)
x <- as(x,"matrix")
out <- rep(NA,length(cell))
for (c in 1:length(cell)) {
xi <- t(x)[cell[c]]
if (!is.na(xi)) {
xn <- .neighborRowCol(x,cell[c],nf)
xn[,1] <- xn[,1] * as(filter,"vector")
xn <- na.omit(xn)
xn <- unlist(lapply(1:nrow(xn),function(r) {x[xn[r,1],xn[r,2]]}))
xn <- xn[!is.na(xn)]
s.x <- (sigmaX-xi)
xbar= s.x / n1
s <- ((sigmaX2 - xi * xi) - (s.x * s.x / n1)) / n1
z <- xn- xbar
lz <- sum(z)
Ii <- ((xi-xbar) / s) * lz
Wi <- length(xn)
EI <- -Wi/n1
VarI <- (Wi * (n - b2) / n1) + (Wi^2 * ((2*b2 - n) / (n1*n2))) - (Wi / n1^2)
out[c] <- (Ii - EI) / sqrt(VarI)
} else out[c] <- NA
}
if (ras) {
xx[cell] <- out
return(xx)
} else return(out)
}
#---
.localMoran.t <- function(x,d1=0,d2,cell) {
filter<-.filter(r=res(x)[1],d1=d1,d2=d2)
if (length(filter) < 3) stop("d2 is less than resolution!")
xv <- x[1:ncell(x)][,1]
w <- which(!is.na(xv))
xv <- xv[w]
i <- trunc(length(filter)/2)+1
ras <- FALSE
if (missing(cell)) {
cell <-c(1:ncell(x))[w]
ras <- TRUE
xx <- rast(x)
}
nf <- ncol(filter)
n <- length(xv)
n1 <- n-1
n2 <- n-2
sigmaX <- sum(xv)
sigmaX2 <- sum(xv * xv)
s2 <- var(xv)
s2 <- (s2 * n1)/n
s4 <- sum((xv - mean(xv))^4)/n
rm(xv,w)
b2 <- s4 / (s2 ^ 2)
x <- as.matrix(x,wide=TRUE)
.xt <- t(x)
out <- rep(NA,length(cell))
for (c in 1:length(cell)) {
xi <- .xt[cell[c]]
if (!is.na(xi)) {
xn <- .neighborRowCol(x,cell[c],nf)
xn[,1] <- xn[,1] * as(filter,"vector")
xn <- na.omit(xn)
xn <- unlist(lapply(1:nrow(xn),function(r) {x[xn[r,1],xn[r,2]]}))
xn <- xn[!is.na(xn)]
s.x <- (sigmaX-xi)
xbar= s.x / n1
s <- ((sigmaX2 - xi * xi) - (s.x * s.x / n1)) / n1
z <- xn- xbar
lz <- sum(z)
Ii <- ((xi-xbar) / s) * lz
Wi <- length(xn)
EI <- -Wi/n1
VarI <- (Wi * (n - b2) / n1) + (Wi^2 * ((2*b2 - n) / (n1*n2))) - (Wi / n1^2)
out[c] <- (Ii - EI) / sqrt(VarI)
} else out[c] <- NA
}
if (ras) {
xx[cell] <- out
return(xx)
} else return(out)
}
#---
.localG2 <- function(x,d1=0,d2,cell) {
filter<-.filter(r=res(x)[1],d1=d1,d2=d2)
if (length(filter) < 3) stop("d2 is less than resolution!")
sigmaX <- cellStats(x,sum)
sigmaX2 <- cellStats(x^2,sum)
i <- trunc(length(filter)/2)+1
ras <- FALSE
if (missing(cell)) {
w <- which(!is.na(x[1:ncell(x)]))
n <- length(w)
cell <-c(1:ncell(x))[w]
rm(w)
ras <- TRUE
xx <- raster(x)
} else n <- ncell(x) - cellStats(x,"countNA")
nf <- ncol(filter)
n1 <- n - 1
n2 <- n - 2
xbar <- sigmaX / n
s2 <- (sigmaX2 / n) - (xbar ^ 2)
si <- sqrt(s2)
x <- as(x,"matrix")
out <- rep(NA,length(cell))
for (c in 1:length(cell)) {
xi <- t(x)[cell[c]]
if (!is.na(xi)) {
xn <- .neighborRowCol(x,cell[c],nf)
xn[,1] <- xn[,1] * as(filter,"vector")
xn <- na.omit(xn)
xn <- unlist(lapply(1:nrow(xn),function(r) {x[xn[r,1],xn[r,2]]}))
xn <- xn[!is.na(xn)]
s.xi <- sum(xn) + xi
Wi <- length(xn)+1
G <- (s.xi - Wi * xbar) / (si * sqrt(((n*Wi) - Wi^2) / n1))
out[c] <- G
} else out[c] <- NA
}
if (ras) {
xx[cell] <- out
return(xx)
} else return(out)
}
#---
.localG2.t <- function(x,d1=0,d2,cell) {
filter<-.filter(r=res(x)[1],d1=d1,d2=d2)
if (length(filter) < 3) stop("d2 is less than resolution!")
sigmaX <- global(x,"sum")[1,1]
sigmaX2 <- global(x^2,"sum")[1,1]
i <- trunc(length(filter)/2)+1
ras <- FALSE
if (missing(cell)) {
w <- which(!is.na(x[1:ncell(x)]))
n <- length(w)
cell <-c(1:ncell(x))[w]
rm(w)
ras <- TRUE
xx <- rast(x)
} else n <- ncell(x) - global(x,"isNA")[1,1]
nf <- ncol(filter)
n1 <- n - 1
n2 <- n - 2
xbar <- sigmaX / n
s2 <- (sigmaX2 / n) - (xbar ^ 2)
si <- sqrt(s2)
x <- as.matrix(x,wide=TRUE)
.xt <- t(x)
out <- rep(NA,length(cell))
for (c in 1:length(cell)) {
xi <- .xt[cell[c]]
if (!is.na(xi)) {
xn <- .neighborRowCol(x,cell[c],nf)
xn[,1] <- xn[,1] * as(filter,"vector")
xn <- na.omit(xn)
xn <- unlist(lapply(1:nrow(xn),function(r) {x[xn[r,1],xn[r,2]]}))
xn <- xn[!is.na(xn)]
s.xi <- sum(xn) + xi
Wi <- length(xn)+1
G <- (s.xi - Wi * xbar) / (si * sqrt(((n*Wi) - Wi^2) / n1))
out[c] <- G
} else out[c] <- NA
}
if (ras) {
xx[cell] <- out
return(xx)
} else return(out)
}
if (!isGeneric("lisa")) {
setGeneric("lisa", function(x, y, d1=0, d2, cell, statistic="I")
standardGeneric("lisa"))
}
setMethod('lisa', signature(x='Raster',y='missing'),
function(x, y, d1=0, d2, cell, statistic="I") {
if (!statistic %in% c("K1","I","G","G*","C","k1","i","c","g","g*")) stop("statistic should be one of K1, I, G, G*, and C")
if (nlayers(x) == 1) {
if (missing(cell)) {
if (statistic == "K1" | statistic == "k1") return(.localK(x,d1=d1,d2=d2))
if (statistic == "I" | statistic == "i") return(.localMoran(x,d1=d1,d2=d2))
if (statistic == "G" | statistic == "g") return(.localG(x,d1=d1,d2=d2))
if (statistic == "G*" | statistic == "g*") return(.localG2(x,d1=d1,d2=d2))
if (statistic == "C" | statistic == "c") return(.localGeary(x,d1=d1,d2=d2))
} else {
if (statistic == "K1" | statistic == "k1") return(.localK(x,d1=d1,d2=d2,cell=cell))
if (statistic == "I" | statistic == "i") return(.localMoran(x,d1=d1,d2=d2,cell=cell))
if (statistic == "G" | statistic == "g") return(.localG(x,d1=d1,d2=d2,cell=cell))
if (statistic == "G*" | statistic == "g*") return(.localG2(x,d1=d1,d2=d2,cell=cell))
if (statistic == "C" | statistic == "c") return(.localGeary(x,d1=d1,d2=d2,cell=cell))
}
} else {
if (missing(cell)) {
out <- raster(x[[1]])
if (statistic == "K1" | statistic == "k1") {
for (i in 1:nlayers(x)) out <- addLayer(out,.localK(x[[i]],d1=d1,d2=d2))
names(out) <- paste(statistic," _statistic_for_",names(x),sep="")
out <- brick(out)
}
if (statistic == "I" | statistic == "i") {
for (i in 1:nlayers(x)) out <- addLayer(out,.localMoran(x[[i]],d1=d1,d2=d2))
names(out) <- paste(statistic," _statistic_for_",names(x),sep="")
out <- brick(out)
}
if (statistic == "G" | statistic == "g") {
for (i in 1:nlayers(x)) out <- addLayer(out,.localG(x[[i]],d1=d1,d2=d2))
names(out) <- paste(statistic," _statistic_for_",names(x),sep="")
out <- brick(out)
}
if (statistic == "G*" | statistic == "g*") {
for (i in 1:nlayers(x)) out <- addLayer(out,.localG2(x[[i]],d1=d1,d2=d2))
names(out) <- paste(statistic," _statistic_for_",names(x),sep="")
out <- brick(out)
}
if (statistic == "C" | statistic == "c") {
for (i in 1:nlayers(x)) out <- addLayer(out,.localGeary(x[[i]],d1=d1,d2=d2))
names(out) <- paste(statistic," _statistic_for_",names(x),sep="")
out <- brick(out)
}
} else {
out <- matrix(nrow=length(cell),ncol=nlayers(x))
colnames(out) <- names(x)
rownames(out) <- cell
if (statistic == "K1" | statistic == "k1") for (i in 1:nlayers(x)) out[,i] <- .localK(x[[i]],d1=d1,d2=d2,cell=cell)
if (statistic == "I" | statistic == "i") for (i in 1:nlayers(x)) out[,i] <- .localMoran(x[[i]],d1=d1,d2=d2,cell=cell)
if (statistic == "G" | statistic == "g") for (i in 1:nlayers(x)) out[,i] <- .localG(x[[i]],d1=d1,d2=d2,cell=cell)
if (statistic == "G*" | statistic == "g*") for (i in 1:nlayers(x)) out[,i] <- .localG2(x[[i]],d1=d1,d2=d2,cell=cell)
if (statistic == "C" | statistic == "c") for (i in 1:nlayers(x)) out[,i] <- .localGeary(x[[i]],d1=d1,d2=d2,cell=cell)
}
out
}
}
)
setMethod('lisa', signature(x='Raster',y='SpatialPoints'),
function(x, y, d1=0, d2, cell, statistic="I") {
if (!statistic %in% c("K1","I","G","G*","C")) stop("statistic should be one of K1, I, G, G*, and C")
xy <- coordinates(y)
cell <- cellFromXY(x,xy)
if (nlayers(x) == 1) {
if (statistic == "K1" | statistic == "k1") return(.localK(x,d1=d1,d2=d2,cell=cell))
if (statistic == "I" | statistic == "i") return(.localMoran(x,d1=d1,d2=d2,cell=cell))
if (statistic == "G" | statistic == "g") return(.localG(x,d1=d1,d2=d2,cell=cell))
if (statistic == "G*" | statistic == "g*") return(.localG2(x,d1=d1,d2=d2,cell=cell))
if (statistic == "C" | statistic == "c") return(.localGeary(x,d1=d1,d2=d2,cell=cell))
} else {
out <- matrix(nrow=length(cell),ncol=nlayers(x))
colnames(out) <- names(x)
rownames(out) <- cell
if (statistic == "K1" | statistic == "k1") for (i in 1:nlayers(x)) out[,i] <- .localK(x[[i]],d1=d1,d2=d2,cell=cell)
if (statistic == "I" | statistic == "i") for (i in 1:nlayers(x)) out[,i] <- .localMoran(x[[i]],d1=d1,d2=d2,cell=cell)
if (statistic == "G" | statistic == "g") for (i in 1:nlayers(x)) out[,i] <- .localG(x[[i]],d1=d1,d2=d2,cell=cell)
if (statistic == "G*" | statistic == "g*") for (i in 1:nlayers(x)) out[,i] <- .localG2(x[[i]],d1=d1,d2=d2,cell=cell)
if (statistic == "C" | statistic == "c") for (i in 1:nlayers(x)) out[,i] <- .localGeary(x[[i]],d1=d1,d2=d2,cell=cell)
out
}
}
)
setMethod('lisa', signature(x='Raster',y='SpatialPointsDataFrame'),
function(x, y, d1=0, d2, cell, statistic="I") {
if (!statistic %in% c("K1","I","G","G*","C")) stop("statistic should be one of K1, I, G, G*, and C")
xy <- coordinates(y)
cell <- cellFromXY(x,xy)
if (nlayers(x) == 1) {
if (statistic == "K1" | statistic == "k1") return(.localK(x,d1=d1,d2=d2,cell=cell))
if (statistic == "I" | statistic == "i") return(.localMoran(x,d1=d1,d2=d2,cell=cell))
if (statistic == "G" | statistic == "g") return(.localG(x,d1=d1,d2=d2,cell=cell))
if (statistic == "G*" | statistic == "g*") return(.localG2(x,d1=d1,d2=d2,cell=cell))
if (statistic == "C" | statistic == "c") return(.localGeary(x,d1=d1,d2=d2,cell=cell))
} else {
out <- matrix(nrow=length(cell),ncol=nlayers(x))
colnames(out) <- names(x)
rownames(out) <- cell
if (statistic == "K1" | statistic == "k1") for (i in 1:nlayers(x)) out[,i] <- .localK(x[[i]],d1=d1,d2=d2,cell=cell)
if (statistic == "I" | statistic == "i") for (i in 1:nlayers(x)) out[,i] <- .localMoran(x[[i]],d1=d1,d2=d2,cell=cell)
if (statistic == "G" | statistic == "g") for (i in 1:nlayers(x)) out[,i] <- .localG(x[[i]],d1=d1,d2=d2,cell=cell)
if (statistic == "G*" | statistic == "g*") for (i in 1:nlayers(x)) out[,i] <- .localG2(x[[i]],d1=d1,d2=d2,cell=cell)
if (statistic == "C" | statistic == "c") for (i in 1:nlayers(x)) out[,i] <- .localGeary(x[[i]],d1=d1,d2=d2,cell=cell)
out
}
}
)
#--------
setMethod('lisa', signature(x='SpatRaster',y='missing'),
function(x, y, d1=0, d2, cell, statistic="I") {
if (!statistic %in% c("K1","I","G","G*","C","k1","i","c","g","g*")) stop("statistic should be one of K1, I, G, G*, and C")
if (nlyr(x) == 1) {
if (missing(cell)) {
if (statistic == "K1" | statistic == "k1") return(.localK.t(x,d1=d1,d2=d2))
if (statistic == "I" | statistic == "i") return(.localMoran.t(x,d1=d1,d2=d2))
if (statistic == "G" | statistic == "g") return(.localG.t(x,d1=d1,d2=d2))
if (statistic == "G*" | statistic == "g*") return(.localG2.t(x,d1=d1,d2=d2))
if (statistic == "C" | statistic == "c") return(.localGeary.t(x,d1=d1,d2=d2))
} else {
if (statistic == "K1" | statistic == "k1") return(.localK.t(x,d1=d1,d2=d2,cell=cell))
if (statistic == "I" | statistic == "i") return(.localMoran.t(x,d1=d1,d2=d2,cell=cell))
if (statistic == "G" | statistic == "g") return(.localG.t(x,d1=d1,d2=d2,cell=cell))
if (statistic == "G*" | statistic == "g*") return(.localG2.t(x,d1=d1,d2=d2,cell=cell))
if (statistic == "C" | statistic == "c") return(.localGeary.t(x,d1=d1,d2=d2,cell=cell))
}
} else {
if (missing(cell)) {
if (statistic == "K1" | statistic == "k1") {
out <- .localK.t(x[[1]],d1=d1,d2=d2)
for (i in 2:nlyr(x)) out <- c(out,.localK.t(x[[i]],d1=d1,d2=d2))
names(out) <- paste(statistic," _statistic_for_",names(x),sep="")
out <- brick(out)
}
if (statistic == "I" | statistic == "i") {
out <- .localMoran.t(x[[1]],d1=d1,d2=d2)
for (i in 2:nlyr(x)) out <- c(out,.localMoran.t(x[[i]],d1=d1,d2=d2))
names(out) <- paste(statistic," _statistic_for_",names(x),sep="")
out <- brick(out)
}
if (statistic == "G" | statistic == "g") {
out <- .localG.t(x[[1]],d1=d1,d2=d2)
for (i in 2:nlyr(x)) out <- c(out,.localG.t(x[[i]],d1=d1,d2=d2))
names(out) <- paste(statistic," _statistic_for_",names(x),sep="")
out <- brick(out)
}
if (statistic == "G*" | statistic == "g*") {
out <- .localG2.t(x[[1]],d1=d1,d2=d2)
for (i in 2:nlyr(x)) out <- c(out,.localG2.t(x[[i]],d1=d1,d2=d2))
names(out) <- paste(statistic," _statistic_for_",names(x),sep="")
out <- brick(out)
}
if (statistic == "C" | statistic == "c") {
out <- .localGeary.t(x[[1]],d1=d1,d2=d2)
for (i in 2:nlyr(x)) out <- addLayer(out,.localGeary.t(x[[i]],d1=d1,d2=d2))
names(out) <- paste(statistic," _statistic_for_",names(x),sep="")
out <- brick(out)
}
} else {
out <- matrix(nrow=length(cell),ncol=nlyr(x))
colnames(out) <- names(x)
rownames(out) <- cell
if (statistic == "K1" | statistic == "k1") for (i in 1:nlyr(x)) out[,i] <- .localK.t(x[[i]],d1=d1,d2=d2,cell=cell)
if (statistic == "I" | statistic == "i") for (i in 1:nlyr(x)) out[,i] <- .localMoran.t(x[[i]],d1=d1,d2=d2,cell=cell)
if (statistic == "G" | statistic == "g") for (i in 1:nlyr(x)) out[,i] <- .localG.t(x[[i]],d1=d1,d2=d2,cell=cell)
if (statistic == "G*" | statistic == "g*") for (i in 1:nlyr(x)) out[,i] <- .localG2.t(x[[i]],d1=d1,d2=d2,cell=cell)
if (statistic == "C" | statistic == "c") for (i in 1:nlyr(x)) out[,i] <- .localGeary.t(x[[i]],d1=d1,d2=d2,cell=cell)
}
out
}
}
)
#-----
setMethod('lisa', signature(x='SpatRaster',y='SpatVector'),
function(x, y, d1=0, d2, cell, statistic="I") {
if (!statistic %in% c("K1","I","G","G*","C")) stop("statistic should be one of K1, I, G, G*, and C")
xy <- geom(y)[,c('x','y')]
cell <- cellFromXY(x,xy)
if (nlyr(x) == 1) {
if (statistic == "K1" | statistic == "k1") return(.localK.t(x,d1=d1,d2=d2,cell=cell))
if (statistic == "I" | statistic == "i") return(.localMoran.t(x,d1=d1,d2=d2,cell=cell))
if (statistic == "G" | statistic == "g") return(.localG.t(x,d1=d1,d2=d2,cell=cell))
if (statistic == "G*" | statistic == "g*") return(.localG2.t(x,d1=d1,d2=d2,cell=cell))
if (statistic == "C" | statistic == "c") return(.localGeary.t(x,d1=d1,d2=d2,cell=cell))
} else {
out <- matrix(nrow=length(cell),ncol=nlyr(x))
colnames(out) <- names(x)
rownames(out) <- cell
if (statistic == "K1" | statistic == "k1") for (i in 1:nlyr(x)) out[,i] <- .localK.t(x[[i]],d1=d1,d2=d2,cell=cell)
if (statistic == "I" | statistic == "i") for (i in 1:nlyr(x)) out[,i] <- .localMoran.t(x[[i]],d1=d1,d2=d2,cell=cell)
if (statistic == "G" | statistic == "g") for (i in 1:nlyr(x)) out[,i] <- .localG.t(x[[i]],d1=d1,d2=d2,cell=cell)
if (statistic == "G*" | statistic == "g*") for (i in 1:nlyr(x)) out[,i] <- .localG2.t(x[[i]],d1=d1,d2=d2,cell=cell)
if (statistic == "C" | statistic == "c") for (i in 1:nlyr(x)) out[,i] <- .localGeary.t(x[[i]],d1=d1,d2=d2,cell=cell)
out
}
}
)
#--------
|
/scratch/gouwar.j/cran-all/cranData/usdm/R/lisa.R
|
# Author: Babak Naimi, [email protected]
# Date : Sep. 2012
# Last Update : July 2023
# Version 1.4
# Licence GPL v3
.level <- function(levels) {
if (length(levels) == 1) {
if (levels == 0) levels <- c(-6,-3,0,3,6) # default
else levels <- c(-abs(levels),0,abs(levels))
} else {
if (length(levels[levels > 0]) > 0 & length(levels[levels < 0]) > 0) {
if(length(levels[levels > 0]) == length(levels[levels < 0])) levels <- sort(c(levels[levels < 0],0,levels[levels > 0]))
else if(length(levels[levels > 0]) > length(levels[levels < 0])) levels <- sort(c(-levels[levels > 0],0,levels[levels > 0]))
else levels <- sort(c(levels[levels < 0],0,-levels[levels < 0]))
}
else if (length(levels[levels > 0]) > 0) levels <- sort(c(-levels[levels > 0],0,levels[levels > 0]))
else levels <- sort(c(levels[levels < 0],0,-levels[levels < 0]))
}
levels
}
#====
.bbox <- function(v) {
t(matrix(ext(v),2))
}
#----
.coordinates <- function(x) {
geom(x)[,c('x','y')]
}
.plot.specisLISA <- function(x, y, cex=2,levels, xyLegend, xlab="X Coordinates",ylab="Y Coordinates", main, ...) {
op <- par(mar = par()$mar)
par(mar=par()$mar + c(0,2,0,2))
if (!missing(y)) .bx <- .bbox(y)
else .bx <- .bbox(x@species)
orig.cex <- cex
dx <- ((as.vector(.bx[1,2]) - as.vector(.bx[1,1])) * 0.04) /2
limx <-c(as.vector(.bx[1,1]) - dx,as.vector(.bx[1,2])+dx)
dy <- ((as.vector(.bx[2,2]) - as.vector(.bx[2,1])) * 0.04) /2
limy <-c(as.vector(.bx[2,1]) - dy, as.vector(.bx[2,2])+dy)
plot(0,0,xlim=limx,ylim=limy,xlab=xlab,ylab=ylab,main=main)
# if (!missing(y)) {
# plot(0,0,xlim=limx,ylim=limy,xlab=xlab,ylab=ylab,main=main)
# } else {
# plot(0,0,xlim=limx,ylim=limy,xlab="X Coordinate",ylab="Y Coordinate",main=main)
# }
cx <- rep(NA,length(x@LISA))
cx <- ifelse(x@LISA <= levels[1] | x@LISA >= levels[length(levels)],cex,cx)
cx.d <- (cex - 0.5) / trunc(length(levels)/2)
for(i in 2:(trunc(length(levels)/2)+1)) {
cex <- cex - cx.d
cx <- ifelse((x@LISA > levels[i-1] & x@LISA <= levels[i]) | (x@LISA >= levels[length(levels)+1-i] & x@LISA < levels[length(levels)+2-i]),cex,cx)
}
pch <- ifelse(x@LISA >= 0,16,1)
xy <- .coordinates(x@species)
points(xy[,1],xy[,2],cex=cx,pch=pch)
#plot(x@species,cex=cx,pch=pch,xlim=limx,ylim=limy,xlab=xlab,ylab=ylab,main=main)
if (!missing(y)) plot(y,add=TRUE)
txt <- paste("< ",levels[1],sep="")
for (i in 2:length(levels)) txt <- c(txt,paste(levels[i-1]," : ",levels[i],sep=''))
txt <- c(txt,paste("> ",levels[length(levels)],sep=''))
cex <- orig.cex + cx.d
cx <- c()
while (cex != 0.5) {
cex <- cex - cx.d
cx <- c(cx,cex)
}
while (cex != (orig.cex+cx.d)) {
cx <- c(cx,cex)
cex <- cex + cx.d
}
pch <- c(rep(1,length(txt)/2),rep(16,length(txt)/2))
legend(xyLegend[1],xyLegend[2],legend=txt,pt.cex=cx,pch=pch,title='LISA')
par(op)
}
#====
.plot.specisLISA.t <- function(x, y, cex=2,levels, xyLegend, xlab="X Coordinates",ylab="Y Coordinates", main, ...) {
op <- par(mar = par()$mar)
par(mar=par()$mar + c(0,2,0,2))
if (!missing(y)) .bx <- .bbox(y)
else .bx <- .bbox(x@species)
orig.cex <- cex
dx <- ((as.vector(.bx[1,2]) - as.vector(.bx[1,1])) * 0.2)
limx <-c(as.vector(.bx[1,1]) - (dx/2),as.vector(.bx[1,2])+dx)
dy <- ((as.vector(.bx[2,2]) - as.vector(.bx[2,1])) * 0.2) /2
limy <-c(as.vector(.bx[2,1]) - dy, as.vector(.bx[2,2]))
#plot(0,0,xlim=limx,ylim=limy,xlab=xlab,ylab=ylab,main=main)
# if (!missing(y)) {
# plot(0,0,xlim=limx,ylim=limy,xlab=xlab,ylab=ylab,main=main)
# } else {
# plot(0,0,xlim=limx,ylim=limy,xlab="X Coordinate",ylab="Y Coordinate",main=main)
# }
cx <- rep(NA,length(x@LISA))
cx <- ifelse(x@LISA <= levels[1] | x@LISA >= levels[length(levels)],cex,cx)
cx.d <- (cex - 0.5) / trunc(length(levels)/2)
for(i in 2:(trunc(length(levels)/2)+1)) {
cex <- cex - cx.d
cx <- ifelse((x@LISA > levels[i-1] & x@LISA <= levels[i]) | (x@LISA >= levels[length(levels)+1-i] & x@LISA < levels[length(levels)+2-i]),cex,cx)
}
pch <- ifelse(x@LISA >= 0,16,1)
#xy <- .coordinates(x@species)
#points(xy[,1],xy[,2],cex=cx,pch=pch)
plot(x@species,cex=cx,pch=pch,xlim=limx,ylim=limy,xlab=xlab,ylab=ylab,main=main)
if (!missing(y)) plot(y,add=TRUE)
txt <- paste("< ",levels[1],sep="")
for (i in 2:length(levels)) txt <- c(txt,paste(levels[i-1]," : ",levels[i],sep=''))
txt <- c(txt,paste("> ",levels[length(levels)],sep=''))
cex <- orig.cex + cx.d
cx <- c()
while (cex != 0.5) {
cex <- cex - cx.d
cx <- c(cx,cex)
}
while (cex != (orig.cex+cx.d)) {
cx <- c(cx,cex)
cex <- cex + cx.d
}
pch <- c(rep(1,length(txt)/2),rep(16,length(txt)/2))
legend(xyLegend[1],xyLegend[2],legend=txt,pt.cex=cx,pch=pch,title='LISA')
par(op)
}
#====
# .plot.specisLISA.t <- function(x, y, cex=2,levels, xyLegend, xlab="X Coordinates",ylab="Y Coordinates", main, ...) {
# op <- par(mar = par()$mar)
#
# par(mar=par()$mar + c(0,2,0,2))
#
# orig.cex <- cex
# if (!missing(y)) {
# dx <- ((as.vector(.bbox(y)[1,2]) - as.vector(.bbox(y)[1,1])) * 0.04) /2
# limx <-c(as.vector(.bbox(y)[1,1]) - dx,as.vector(.bbox(y)[1,2])+dx)
# dy <- ((as.vector(.bbox(y)[2,2]) - as.vector(.bbox(y)[2,1])) * 0.04) /2
# limy <-c(as.vector(.bbox(y)[2,1]) - dy, as.vector(.bbox(y)[2,2])+dy)
#
# plot(0,0,xlim=limx,ylim=limy,xlab=xlab,ylab=ylab,main=main)
# }
# else {
# dx <- ((as.vector(.bbox(x@species)[1,2]) - as.vector(.bbox(x@species)[1,1])) * 0.1) /2
# limx <-c(as.vector(.bbox(x@species)[1,1]) - dx,as.vector(.bbox(x@species)[1,2])+dx)
# dy <- ((as.vector(.bbox(x@species)[2,2]) - as.vector(.bbox(x@species)[2,1])) * 0.1) /2
# limy <-c(as.vector(.bbox(x@species)[2,1]) - dy,as.vector(.bbox(x@species)[2,2])+dy)
#
# plot(0,0,xlim=limx,ylim=limy,xlab="X Coordinate",ylab="Y Coordinate",main=main)
# }
# cx <- rep(NA,length(x@LISA))
# cx <- ifelse(x@LISA <= levels[1] | x@LISA >= levels[length(levels)],cex,cx)
# cx.d <- (cex - 0.5) / trunc(length(levels)/2)
# for(i in 2:(trunc(length(levels)/2)+1)) {
# cex <- cex - cx.d
# cx <- ifelse((x@LISA > levels[i-1] & x@LISA <= levels[i]) | (x@LISA >= levels[length(levels)+1-i] & x@LISA < levels[length(levels)+2-i]),cex,cx)
# }
# pch <- ifelse(x@LISA >= 0,16,1)
# xy <- .coordinates(x@species)
# points(xy[,1],xy[,2],cex=cx,pch=pch)
# if (!missing(y)) plot(y,add=TRUE)
#
# txt <- paste("< ",levels[1],sep="")
# for (i in 2:length(levels)) txt <- c(txt,paste(levels[i-1]," : ",levels[i],sep=''))
# txt <- c(txt,paste("> ",levels[length(levels)],sep=''))
#
# cex <- orig.cex + cx.d
# cx <- c()
# while (cex != 0.5) {
# cex <- cex - cx.d
# cx <- c(cx,cex)
# }
# while (cex != (orig.cex+cx.d)) {
# cx <- c(cx,cex)
# cex <- cex + cx.d
# }
# pch <- c(rep(1,length(txt)/2),rep(16,length(txt)/2))
# legend(xyLegend[1],xyLegend[2],legend=txt,pt.cex=cx,pch=pch,title='LISA')
# par(op)
# }
if (!isGeneric("plot")) {
setGeneric("plot", function(x,y,...)
standardGeneric("plot"))
}
setMethod("plot", signature(x='speciesLISA',y="SpatialPolygons"),
function(x,y,cex=2,levels=c(0,3,6), xyLegend, xlab="X Coordinates",ylab="Y Coordinates", main, ...) {
if (missing(xyLegend)) xyLegend <- c(.bbox(y)[1,2] - (.bbox(y)[1,2]-.bbox(y)[1,1]) * 0.16,.bbox(y)[2,1] + (.bbox(y)[2,2]-.bbox(y)[2,1]) * 0.25)
else if(length(xyLegend) != 2 || !inherits(xyLegend,'numeric')) xyLegend <- c(.bbox(y)[1,2] - (.bbox(y)[1,2]-.bbox(y)[1,1]) * 0.16,.bbox(y)[2,1] + (.bbox(y)[2,2]-.bbox(y)[2,1]) * 0.25)
if (missing(main)) main <- "Impact of positional uncertainty based on LISA"
levels <- .level(levels)
.plot.specisLISA(x=x,y=y,levels=levels,xyLegend=xyLegend,xlab=xlab,ylab=ylab, main=main, ...)
}
)
setMethod("plot", signature(x='speciesLISA',y="SpatialPolygonsDataFrame"),
function(x,y,cex=2,levels=c(0,3,6), xyLegend, xlab="X Coordinates",ylab="Y Coordinates", main, ...) {
if (missing(xyLegend)) xyLegend <- c(.bbox(y)[1,2] - (.bbox(y)[1,2]-.bbox(y)[1,1]) * 0.16,.bbox(y)[2,1] + (.bbox(y)[2,2]-.bbox(y)[2,1]) * 0.25)
else if(length(xyLegend) != 2 || !inherits(xyLegend,'numeric')) xyLegend <- c(.bbox(y)[1,2] - (.bbox(y)[1,2]-.bbox(y)[1,1]) * 0.16,.bbox(y)[2,1] + (.bbox(y)[2,2]-.bbox(y)[2,1]) * 0.25)
if (missing(main)) main <- "Impact of positional uncertainty based on LISA"
levels <- .level(levels)
.plot.specisLISA(x=x,y=y,levels=levels,xyLegend=xyLegend,xlab=xlab,ylab=ylab, main=main, ...)
}
)
setMethod("plot", signature(x='speciesLISA',y="missing"),
function(x,y,cex=2,levels=c(0,3,6), xyLegend, xlab="X Coordinates",ylab="Y Coordinates", main, ...) {
if (missing(levels)) levels=c(0,3,6)
.lc <- (length(levels) / 10)*1.5
if (missing(xyLegend)) xyLegend <- c(.bbox(x@species)[1,2] - (.bbox(x@species)[1,2]-.bbox(x@species)[1,1]) * 0.16,.bbox(x@species)[2,1] + (.bbox(x@species)[2,2]-.bbox(x@species)[2,1]) * .lc)
else if(length(xyLegend) != 2 || !inherits(xyLegend, 'numeric')) xyLegend <- c(.bbox(x@species)[1,2] - (.bbox(x@species)[1,2]-.bbox(x@species)[1,1]) * 0.16,.bbox(x@species)[2,1] + (.bbox(x@species)[2,2]-.bbox(x@species)[2,1]) * .lc)
if (missing(main)) main <- "Impact of positional uncertainty based on LISA"
levels <- .level(levels)
.plot.specisLISA.t(x=x,levels=levels,xyLegend=xyLegend,xlab=xlab,ylab=ylab, main=main, ...)
}
)
#----
setMethod("plot", signature(x='speciesLISA',y="SpatVector"),
function(x,y,cex=2,levels=c(0,3,6), xyLegend, xlab="X Coordinates",ylab="Y Coordinates", main, ...) {
if (missing(levels)) levels=c(0,3,6)
.lc <- (length(levels) / 10)*1.5
if (missing(xyLegend)) xyLegend <- c(.bbox(y)[1,2] - (.bbox(y)[1,2]-.bbox(y)[1,1]) * 0.1,.bbox(y)[2,1] + (.bbox(y)[2,2]-.bbox(y)[2,1]) * .lc)
else if(length(xyLegend) != 2 || !inherits(xyLegend,'numeric')) xyLegend <- c(.bbox(y)[1,2] - (.bbox(y)[1,2]-.bbox(y)[1,1]) * 0.16,.bbox(y)[2,1] + (.bbox(y)[2,2]-.bbox(y)[2,1]) * .lc)
if (missing(main)) main <- "Impact of positional uncertainty based on LISA"
levels <- .level(levels)
.plot.specisLISA.t(x=x,y=y,levels=levels,xyLegend=xyLegend,xlab=xlab,ylab=ylab, main=main, ...)
}
)
#----
setMethod("plot", signature(x='RasterVariogram'),
function(x,xlim,ylim,xlab,ylab,pch,col,main,cloud=FALSE,box=FALSE,...) {
if (missing(xlim)) xlim <- c(0,x@lag*x@nlags)
if (missing(ylim)) {
if (cloud | box) ylim <- c(0,quantile(x@variogramCloud,prob=0.99,na.rm=TRUE))
else ylim <- c(0,max(x@variogram$gamma,na.rm=TRUE))
}
if (missing(xlab)) xlab <- "Lag"
if (missing(ylab)) ylab <- "Semivariance"
if (missing(pch)) pch <- 16
if (missing(col)) {
if (box) col <- 0
else col <- 'blue'
}
if (missing(main)) {
if (cloud) main <- "Variogram Cloud"
else if (box) main <- "Box plot of variogram Cloud"
else main <- "Variogram"
}
if (cloud) {
plot(x@variogram$distance,x@variogramCloud[1,],xlim=xlim,ylim=ylim,xlab=xlab,ylab=ylab,main=main,pch=pch,col=col,...)
for (i in 2:x@nlags) points(x@variogram$distance,x@variogramCloud[i,],col=col,pch=pch,...)
} else if (box) boxplot(x@variogramCloud,names=x@variogram$distance,xlab=xlab,ylab=ylab,ylim=ylim,col=col,main=main,...)
else plot(x@variogram$distance,x@variogram$gamma,xlim=xlim,ylim=ylim,xlab=xlab,ylab=ylab,main=main,pch=pch,col=col,...)
}
)
|
/scratch/gouwar.j/cran-all/cranData/usdm/R/plot.R
|
# Author: Babak Naimi, [email protected]
# Date : Oct. 2012
# Version 1.1
# Licence GPL v3
setMethod ('show' , 'VIF',
function ( object ) {
if (length(object@excluded) > 0) {
cat (length(object@excluded),'variables from the',length(object@variables), 'input variables have collinearity problem:','\n','\n')
cat (object@excluded,'\n')
} else cat ('No variable from the',length(object@variables), 'input variables has collinearity problem.','\n')
cat('\n')
if (length(object@excluded) > 0) cat('After excluding the collinear variables, the linear correlation coefficients ranges between:','\n')
else cat('The linear correlation coefficients ranges between:','\n')
mx <- .minCor(object@corMatrix)
cat ('min correlation (',mx[1],'~',mx[2],'): ',object@corMatrix[mx[1],mx[2]], '\n')
mx <- .maxCor(object@corMatrix)
cat ('max correlation (',mx[1],'~',mx[2],'): ',object@corMatrix[mx[1],mx[2]], '\n')
cat ('\n')
cat('---------- VIFs of the remained variables --------','\n')
print(object@results)
}
)
setMethod ('show' , 'speciesLISA',
function(object) {
cat('class :' , class(object), '\n')
cat('LISA statistic :' , object@statistic, '\n\n')
cat('number of species observations : ' , nrow(object@LISAs), '\n')
cat('number of predictor variables : ' , ncol(object@LISAs), '\n')
cat('min, mean, max of aggregated LISA : ' , min(object@LISA),',' ,mean(object@LISA),',',max(object@LISA), '\n')
}
)
setMethod ('show' , 'RasterVariogram',
function(object) {
cat('class :' , class(object), '\n')
cat('Lag size :' , object@lag, '\n')
cat('Number of lags : ' , object@nlags, '\n')
cat ('\n')
cat('------ Variogram data ------','\n')
print(object@variogram)
}
)
|
/scratch/gouwar.j/cran-all/cranData/usdm/R/show.R
|
# Author: Babak Naimi, [email protected]
# Date : Sep 2012
# Version 1.0
# Licence GPL v3
if (!isGeneric("speciesLisa")) {
setGeneric("speciesLisa", function(x, y, uncertainty, statistic="K1",weights)
standardGeneric("speciesLisa"))
}
setMethod('speciesLisa', signature(x='Raster',y='SpatialPoints'),
function(x, y, uncertainty, statistic="K1",weights) {
if (nlayers(x) > 1) {
if (missing(weights)) weights <- rep(1/nlayers(x),nlayers(x))
else if (length(weights) != nlayers(x)) stop("the length of weights should be equal to the number of layers in the raster object")
weights <- weights/sum(weights)
o <- lisa(x=x,y=y,d1=0,d2=uncertainty,statistic=statistic)
n <- new("speciesLISA")
n@species <- y
n@data <- NA
n@LISAs <- o
n@weights <- weights
n@statistic <- statistic
for (i in 1:length(weights)) o[,i] <- o[,i] * weights[i]
n@LISA <- apply(o,1,sum)
} else {
o <- lisa(x=x,y=y,d1=0,d2=uncertainty,statistic=statistic)
n <- new("speciesLISA")
n@species <- vect(y)
n@LISAs <- o
n@weights <- NA
n@statistic <- statistic
n@LISA <- o
}
n
}
)
setMethod('speciesLisa', signature(x='Raster',y='SpatialPointsDataFrame'),
function(x, y, uncertainty, statistic="K1",weights) {
if (nlayers(x) > 1) {
if (missing(weights)) weights <- rep(1/nlayers(x),nlayers(x))
else if (length(weights) != nlayers(x)) stop("the length of weights should be equal to the number of layers in the raster object")
weights <- weights/sum(weights)
o <- lisa(x=x,y=y,d1=0,d2=uncertainty,statistic=statistic)
n <- new("speciesLISA")
n@species <- as(y,"SpatialPoints")
n@data <- as(y,'data.frame')
n@LISAs <- o
n@weights <- weights
n@statistic <- statistic
for (i in 1:length(weights)) o[,i] <- o[,i] * weights[i]
n@LISA <- apply(o,1,sum)
} else {
o <- lisa(x=x,y=y,d1=0,d2=uncertainty,statistic=statistic)
n <- new("speciesLISA")
n@species <- vect(y)
n@data <- as(y,'data.frame')
n@LISAs <- o
n@statistic <- statistic
n@weights <- NA
n@LISA <- o
}
n
}
)
#----
setMethod('speciesLisa', signature(x='SpatRaster',y='SpatVector'),
function(x, y, uncertainty, statistic="K1",weights) {
if (nlyr(x) > 1) {
if (missing(weights)) weights <- rep(1/nlyr(x),nlyr(x))
else if (length(weights) != nlyr(x)) stop("the length of weights should be equal to the number of layers in the raster object")
weights <- weights/sum(weights)
o <- lisa(x=x,y=y,d1=0,d2=uncertainty,statistic=statistic)
n <- new("speciesLISA")
n@species <- y
n@data <- as(y,'data.frame')
n@LISAs <- o
n@weights <- weights
n@statistic <- statistic
for (i in 1:length(weights)) o[,i] <- o[,i] * weights[i]
n@LISA <- apply(o,1,sum)
} else {
o <- lisa(x=x,y=y,d1=0,d2=uncertainty,statistic=statistic)
n <- new("speciesLISA")
n@species <- y
n@data <- as(y,'data.frame')
n@LISAs <- o
n@statistic <- statistic
n@weights <- NA
n@LISA <- o
}
n
}
)
|
/scratch/gouwar.j/cran-all/cranData/usdm/R/speciesLisa.R
|
# Author: Babak Naimi, [email protected]
# Date : Oct. 2019
# Last update: September 2023
# Version 1.7
# Licence GPL v3
.vif <- function(.dd) {
z<-rep(NA,ncol(.dd))
names(z) <- colnames(.dd)
for (i in 1:ncol(.dd)) {
z[i] <- 1 / (1 - summary(lm(.dd[,i]~.,data=.dd[-i]))$r.squared)
}
return(z)
}
.vif2 <- function(y,w) {
z<-rep(NA,length(w))
names(z) <- colnames(y)[w]
for (i in 1:length(w)) {
z[i] <- 1/(1-summary(lm(as.formula(paste(colnames(y)[w[i]],"~.",sep='')),data=y))$r.squared)
}
return(z)
}
.maxCor <- function(k){
k <- abs(k)
n <- nrow(k)
for (i in 1:n) k[i:n,i] <- NA
w <- which.max(k)
c(rownames(k)[((w%/%nrow(k))+1)],colnames(k)[w%%nrow(k)])
}
#---
# it checks the correlation and if both of the variables in the pair with max-Cor are in keep, it
# goes to next order of maximum correlation until a pair with at least one of them not in keep is selected
.maxCor2 <- function(k,keep){
k <- abs(k)
n <- nrow(k)
for (i in 1:n) k[i:n,i] <- NA
#w <- which.max(k)
o <- order(k,decreasing = TRUE)
w <- o[1]
vn <- c(rownames(k)[((w%/%nrow(k))+1)],colnames(k)[w%%nrow(k)])
if (all(vn %in% keep)) {
LOOP <-TRUE
j <- 2
while(LOOP) {
w <- o[j]
vn <- c(rownames(k)[((w%/%nrow(k))+1)],colnames(k)[w%%nrow(k)])
if (all(vn %in% keep) && j < length(o)) j <- j+1
else LOOP <- FALSE
}
return (list(variables=vn,warning_level=j))
} else {
return(vn)
}
}
.minCor <- function(k){
k <- abs(k)
rr<-c();cc<-c();co<-c()
for (c in 1:(ncol(k)-1)) {
for (r in (c+1):nrow(k)){
rr<-c(rr,rownames(k)[r]);cc<-c(cc,colnames(k)[c])
co <- c(co,k[r,c])
}
}
w <- which.min(co)
c(rr[w],cc[w])
}
#-----------------
if (!isGeneric("vif")) {
setGeneric("vif", function(x,size, ...)
standardGeneric("vif"))
}
setMethod('vif', signature(x='RasterStackBrick'),
function(x, size) {
if (nlayers(x) == 1) stop("The Raster object should have at least two layers")
if (missing(size)) {
if (ncell(x) < 7000) size <- NULL
else size <- 5000
}
if (is.null(size)) x <- as.data.frame(x,na.rm=TRUE)
else x <- sampleRandom(x,size,na.rm=TRUE)
x <- na.omit(x)
v <- .vif(x)
data.frame(Variables=names(v),VIF=as.vector(v))
}
)
#-----
setMethod('vif', signature(x='SpatRaster'),
function(x, size) {
if (nlyr(x) == 1) stop("The Raster object should have at least two layers")
if (missing(size)) {
if (ncell(x) < 7000) size <- NULL
else size <- 5000
}
if (is.null(size)) x <- as.data.frame(x,na.rm=TRUE)
else x <- spatSample(x,size,na.rm=TRUE)
x <- na.omit(x)
v <- .vif(x)
data.frame(Variables=names(v),VIF=as.vector(v))
}
)
setMethod('vif', signature(x='data.frame'),
function(x, size) {
if (ncol(x) == 1) stop("At least two variables are needed to quantify vif")
x <- na.omit(x)
if (missing(size)) {
if (nrow(x) < 7000) size <- NULL
else size <- 5000
}
if (!is.null(size)) {
if(nrow(x) > size) x <- x[sample(1:nrow(x),size),]
}
v <- .vif(x)
data.frame(Variables=names(v),VIF=as.vector(v))
}
)
setMethod('vif', signature(x='matrix'),
function(x, size) {
if (ncol(x) == 1) stop("At least two variables are needed to quantify vif")
x <- na.omit(x)
if (missing(size)) {
if (nrow(x) < 7000) size <- NULL
else size <- 5000
}
if (!is.null(size)) {
if(nrow(x) > size) x <- x[sample(1:nrow(x),size),]
}
v <- .vif(x)
data.frame(Variables=names(v),VIF=as.vector(v))
}
)
if (!isGeneric("vifcor")) {
setGeneric("vifcor", function(x, th= 0.9,keep=NULL, size, method='pearson',...)
standardGeneric("vifcor"))
}
setMethod('vifcor', signature(x='RasterStackBrick'),
function(x, th=0.9, keep=NULL, size, method='pearson') {
if (nlayers(x) == 1) stop("The Raster object should have at least two layers")
if (missing(method) || !method %in% c('pearson','kendall','spearman')) method <- 'pearson'
if (missing(size)) {
if (ncell(x) < 7000) size <- NULL
else size <- 5000
}
if (missing(keep)) keep <- NULL
#-----------
if (is.null(size)) x <- as.data.frame(x,na.rm=TRUE)
else x <- sampleRandom(x,size,na.rm=TRUE)
x <- na.omit(x)
vn <- colnames(x)
if (!is.null(keep)) {
if (is.numeric(keep)) {
.w <- keep %in% c(1:length(vn))
if (all(!.w)) stop(paste0('the values in keep are out of range; should be between 1 and ',ncol(x),' for your dataset...!'))
if (any(!.w)) {
keep <- keep[.w]
warning('some of layer numbers in keep are out of range (should be between 1 and nlyr(x)), so they are ignored!')
}
keep <- vn[keep]
} else if (is.character(keep)) {
.w <- keep %in% colnames(x)
if (all(!.w)) stop('None of the variable names in keep are available in x dataset!')
if (any(!.w)) {
keep <- keep[.w]
warning('some of the variable names in keep are not available in x (check if it is typos), so they are ignored!')
}
}
}
#----------------
.warn <- FALSE
LOOP <- TRUE
n <- new("VIF")
n@variables <- colnames(x)
exc <- c()
if (is.null(keep)) {
while (LOOP) {
xcor <- abs(cor(x, method=method))
mx <- .maxCor(xcor)
if (xcor[mx[1],mx[2]] >= th) {
w1 <- which(colnames(xcor) == mx[1])
w2 <- which(rownames(xcor) == mx[2])
v <- .vif2(x,c(w1,w2))
ex <- mx[which.max(v[mx])]
exc <- c(exc,ex)
x <- x[,-which(colnames(x) == ex)]
} else LOOP <- FALSE
}
} else {
while (LOOP) {
xcor <- abs(cor(x, method=method))
mx <- .maxCor2(xcor,keep=keep)
if (is.list(mx)) {
.warn <- TRUE
mx <- mx[[1]]
}
if (xcor[mx[1],mx[2]] >= th) {
w1 <- which(colnames(xcor) == mx[1])
w2 <- which(rownames(xcor) == mx[2])
if (any(mx %in% keep)) {
ex <- mx[!mx %in% keep]
} else {
v <- .vif2(x,c(w1,w2))
ex <- mx[which.max(v[mx])]
}
exc <- c(exc,ex)
x <- x[,-which(colnames(x) == ex)]
} else LOOP <- FALSE
}
}
if (.warn) {
if (length(keep) > 2) warning('At least two variables specified in "keep" are strongly correlated (i.e., are subjected to the collinearity issue)!')
else warning('The two variables specified in "keep" are strongly correlated (i.e., are subjected to the collinearity issue)!')
}
#---
if (length(exc) > 0) n@excluded <- exc
v <- .vif(x)
n@corMatrix <- cor(x, method=method)
n@results <- data.frame(Variables=names(v),VIF=as.vector(v))
n
}
)
#-------
setMethod('vifcor', signature(x='SpatRaster'),
function(x, th=0.9, keep=NULL, size, method='pearson') {
if (nlyr(x) == 1) stop("The Raster object should have at least two layers")
if (missing(method) || !method %in% c('pearson','kendall','spearman')) method <- 'pearson'
if (missing(size)) {
if (ncell(x) < 7000) size <- NULL
else size <- 5000
}
if (missing(keep)) keep <- NULL
#-----------
if (is.null(size)) x <- as.data.frame(x,na.rm=TRUE)
else x <- spatSample(x,size,na.rm=TRUE)
x <- na.omit(x)
vn <- colnames(x)
if (!is.null(keep)) {
if (is.numeric(keep)) {
.w <- keep %in% c(1:length(vn))
if (all(!.w)) stop(paste0('the values in keep are out of range; should be between 1 and ',ncol(x),' for your dataset...!'))
if (any(!.w)) {
keep <- keep[.w]
warning('some of layer numbers in keep are out of range (should be between 1 and nlyr(x)), so they are ignored!')
}
keep <- vn[keep]
} else if (is.character(keep)) {
.w <- keep %in% colnames(x)
if (all(!.w)) stop('None of the variable names in keep are available in x dataset!')
if (any(!.w)) {
keep <- keep[.w]
warning('some of the variable names in keep are not available in x (check if it is typos), so they are ignored!')
}
}
}
#----------------
.warn <- FALSE
LOOP <- TRUE
n <- new("VIF")
n@variables <- colnames(x)
exc <- c()
if (is.null(keep)) {
while (LOOP) {
xcor <- abs(cor(x, method=method))
mx <- .maxCor(xcor)
if (xcor[mx[1],mx[2]] >= th) {
w1 <- which(colnames(xcor) == mx[1])
w2 <- which(rownames(xcor) == mx[2])
v <- .vif2(x,c(w1,w2))
ex <- mx[which.max(v[mx])]
exc <- c(exc,ex)
x <- x[,-which(colnames(x) == ex)]
} else LOOP <- FALSE
}
} else {
while (LOOP) {
xcor <- abs(cor(x, method=method))
mx <- .maxCor2(xcor,keep=keep)
if (is.list(mx)) {
.warn <- TRUE
mx <- mx[[1]]
}
if (xcor[mx[1],mx[2]] >= th) {
w1 <- which(colnames(xcor) == mx[1])
w2 <- which(rownames(xcor) == mx[2])
if (any(mx %in% keep)) {
ex <- mx[!mx %in% keep]
} else {
v <- .vif2(x,c(w1,w2))
ex <- mx[which.max(v[mx])]
}
exc <- c(exc,ex)
x <- x[,-which(colnames(x) == ex)]
} else LOOP <- FALSE
}
}
if (.warn) {
if (length(keep) > 2) warning('At least two variables specified in "keep" are strongly correlated (i.e., are subjected to the collinearity issue)!')
else warning('The two variables specified in "keep" are strongly correlated (i.e., are subjected to the collinearity issue)!')
}
#---
if (length(exc) > 0) n@excluded <- exc
v <- .vif(x)
n@corMatrix <- cor(x, method=method)
n@results <- data.frame(Variables=names(v),VIF=as.vector(v))
n
}
)
#----------
setMethod('vifcor', signature(x='data.frame'),
function(x, th=0.9, keep=NULL, size, method='pearson') {
if (ncol(x) == 1) stop("The data.frame should have at least two columns")
if (missing(method) || !method %in% c('pearson','kendall','spearman')) method <- 'pearson'
x <- na.omit(x)
if (missing(size)) {
if (nrow(x) < 7000) size <- NULL
else size <- 5000
}
if (missing(keep)) keep <- NULL
#-----------
if (!is.null(size)) {
if(nrow(x) > size) x <- x[sample(1:nrow(x),size),]
}
#----
vn <- colnames(x)
if (!is.null(keep)) {
if (is.numeric(keep)) {
.w <- keep %in% c(1:length(vn))
if (all(!.w)) stop(paste0('the values in keep are out of range; should be between 1 and ',ncol(x),' for your dataset...!'))
if (any(!.w)) {
keep <- keep[.w]
warning('some of layer numbers in keep are out of range (should be between 1 and ncol(x)), so they are ignored!')
}
keep <- vn[keep]
} else if (is.character(keep)) {
.w <- keep %in% colnames(x)
if (all(!.w)) stop('None of the variable names in keep are available in x!')
if (any(!.w)) {
keep <- keep[.w]
warning('some of the variable names in keep are not available in x (check if it is typos), so they are ignored!')
}
}
}
#----------------
.warn <- FALSE
LOOP <- TRUE
n <- new("VIF")
n@variables <- colnames(x)
exc <- c()
if (is.null(keep)) {
while (LOOP) {
xcor <- abs(cor(x, method=method))
mx <- .maxCor(xcor)
if (xcor[mx[1],mx[2]] >= th) {
w1 <- which(colnames(xcor) == mx[1])
w2 <- which(rownames(xcor) == mx[2])
v <- .vif2(x,c(w1,w2))
ex <- mx[which.max(v[mx])]
exc <- c(exc,ex)
x <- x[,-which(colnames(x) == ex)]
} else LOOP <- FALSE
}
} else {
while (LOOP) {
xcor <- abs(cor(x, method=method))
mx <- .maxCor2(xcor,keep=keep)
if (is.list(mx)) {
.warn <- TRUE
mx <- mx[[1]]
}
if (xcor[mx[1],mx[2]] >= th) {
w1 <- which(colnames(xcor) == mx[1])
w2 <- which(rownames(xcor) == mx[2])
if (any(mx %in% keep)) {
ex <- mx[!mx %in% keep]
} else {
v <- .vif2(x,c(w1,w2))
ex <- mx[which.max(v[mx])]
}
exc <- c(exc,ex)
x <- x[,-which(colnames(x) == ex)]
} else LOOP <- FALSE
}
}
if (.warn) {
if (length(keep) > 2) warning('At least two variables specified in "keep" are strongly correlated (i.e., are subjected to the collinearity issue)!')
else warning('The two variables specified in "keep" are strongly correlated (i.e., are subjected to the collinearity issue)!')
}
#---
if (length(exc) > 0) n@excluded <- exc
v <- .vif(x)
n@corMatrix <- cor(x, method=method)
n@results <- data.frame(Variables=names(v),VIF=as.vector(v))
n
}
)
#--------
setMethod('vifcor', signature(x='matrix'),
function(x, th=0.9, keep=NULL, size, method='pearson') {
if (ncol(x) == 1) stop("The matrix should have at least two columns")
if (missing(method) || !method %in% c('pearson','kendall','spearman')) method <- 'pearson'
x <- as.data.frame(x)
x <- na.omit(x)
if (missing(size)) {
if (nrow(x) < 7000) size <- NULL
else size <- 5000
}
if (missing(keep)) keep <- NULL
#-----------
if (!is.null(size)) {
if(nrow(x) > size) x <- x[sample(1:nrow(x),size),]
}
#----
vn <- colnames(x)
if (!is.null(keep)) {
if (is.numeric(keep)) {
.w <- keep %in% c(1:length(vn))
if (all(!.w)) stop(paste0('the values in keep are out of range; should be between 1 and ',ncol(x),' for your dataset...!'))
if (any(!.w)) {
keep <- keep[.w]
warning('some of layer numbers in keep are out of range (should be between 1 and ncol(x)), so they are ignored!')
}
keep <- vn[keep]
} else if (is.character(keep)) {
.w <- keep %in% colnames(x)
if (all(!.w)) stop('None of the variable names in keep are available in x!')
if (any(!.w)) {
keep <- keep[.w]
warning('some of the variable names in keep are not available in x (check if it is typos), so they are ignored!')
}
}
}
#----------------
.warn <- FALSE
LOOP <- TRUE
n <- new("VIF")
n@variables <- colnames(x)
exc <- c()
if (is.null(keep)) {
while (LOOP) {
xcor <- abs(cor(x, method=method))
mx <- .maxCor(xcor)
if (xcor[mx[1],mx[2]] >= th) {
w1 <- which(colnames(xcor) == mx[1])
w2 <- which(rownames(xcor) == mx[2])
v <- .vif2(x,c(w1,w2))
ex <- mx[which.max(v[mx])]
exc <- c(exc,ex)
x <- x[,-which(colnames(x) == ex)]
} else LOOP <- FALSE
}
} else {
while (LOOP) {
xcor <- abs(cor(x, method=method))
mx <- .maxCor2(xcor,keep=keep)
if (is.list(mx)) {
.warn <- TRUE
mx <- mx[[1]]
}
if (xcor[mx[1],mx[2]] >= th) {
w1 <- which(colnames(xcor) == mx[1])
w2 <- which(rownames(xcor) == mx[2])
if (any(mx %in% keep)) {
ex <- mx[!mx %in% keep]
} else {
v <- .vif2(x,c(w1,w2))
ex <- mx[which.max(v[mx])]
}
exc <- c(exc,ex)
x <- x[,-which(colnames(x) == ex)]
} else LOOP <- FALSE
}
}
if (.warn) {
if (length(keep) > 2) warning('At least two variables specified in "keep" are strongly correlated (i.e., are subjected to the collinearity issue)!')
else warning('The two variables specified in "keep" are strongly correlated (i.e., are subjected to the collinearity issue)!')
}
#---
if (length(exc) > 0) n@excluded <- exc
v <- .vif(x)
n@corMatrix <- cor(x, method=method)
n@results <- data.frame(Variables=names(v),VIF=as.vector(v))
n
}
)
########################################
if (!isGeneric("vifstep")) {
setGeneric("vifstep", function(x, th= 10,keep=NULL, size, method='pearson',...)
standardGeneric("vifstep"))
}
setMethod('vifstep', signature(x='RasterStackBrick'),
function(x, th=10, keep=NULL, size, method='pearson') {
if (nlayers(x) == 1) stop("The Raster object should have at least two layers!")
if (missing(method) || !method %in% c('pearson','kendall','spearman')) method <- 'pearson'
if (missing(size)) {
if (ncell(x) < 7000) size <- NULL
else size <- 5000
}
if (missing(keep)) keep <- NULL
#-----------
if (is.null(size)) x <- as.data.frame(x,na.rm=TRUE)
else x <- sampleRandom(x,size,na.rm=TRUE)
x <- na.omit(x)
vn <- colnames(x)
if (!is.null(keep)) {
if (is.numeric(keep)) {
.w <- keep %in% c(1:length(vn))
if (all(!.w)) stop(paste0('the values in keep are out of range; should be between 1 and ',ncol(x),' for your dataset...!'))
if (any(!.w)) {
keep <- keep[.w]
warning('some of layer numbers in keep are out of range (should be between 1 and nlyr(x)), so they are ignored!')
}
keep <- vn[keep]
} else if (is.character(keep)) {
.w <- keep %in% colnames(x)
if (all(!.w)) stop('None of the variable names in keep are available in x dataset!')
if (any(!.w)) {
keep <- keep[.w]
warning('some of the variable names in keep are not available in x (check if it is typos), so they are ignored!')
}
}
}
#----------------
LOOP <- TRUE
n <- new("VIF")
n@variables <- colnames(x)
exc <- c()
if (is.null(keep)) {
while (LOOP) {
v <- .vif(x)
if (v[which.max(v)] >= th) {
ex <- names(v[which.max(v)])
exc <- c(exc,ex)
x <- x[,-which(colnames(x) == ex)]
} else LOOP=FALSE
}
} else {
while (LOOP) {
v <- .vif(x)
v <- v[!names(v) %in% keep]
if (v[which.max(v)] >= th) {
ex <- names(v[which.max(v)])
exc <- c(exc,ex)
x <- x[,-which(colnames(x) == ex)]
} else LOOP=FALSE
}
}
#---
if (length(exc) > 0) n@excluded <- exc
v <- .vif(x)
n@corMatrix <- cor(x, method=method)
n@results <- data.frame(Variables=names(v),VIF=as.vector(v))
n
}
)
setMethod('vifstep', signature(x='data.frame'),
function(x, th=10, keep=NULL, size, method='pearson') {
if (ncol(x) == 1) stop("The data.frame should have at least two variables!")
if (missing(method) || !method %in% c('pearson','kendall','spearman')) method <- 'pearson'
x <- na.omit(x)
if (missing(size)) {
if (nrow(x) < 6000) size <- NULL
else size <- 5000
}
if (missing(keep)) keep <- NULL
#-----------
if (!is.null(size)) {
if(nrow(x) > size) x <- x[sample(1:nrow(x),size),]
}
#----
vn <- colnames(x)
if (!is.null(keep)) {
if (is.numeric(keep)) {
.w <- keep %in% c(1:length(vn))
if (all(!.w)) stop(paste0('the values in keep are out of range; should be between 1 and ',ncol(x),' for your dataset...!'))
if (any(!.w)) {
keep <- keep[.w]
warning('some of layer numbers in keep are out of range (should be between 1 and ncol(x)), so they are ignored!')
}
keep <- vn[keep]
} else if (is.character(keep)) {
.w <- keep %in% colnames(x)
if (all(!.w)) stop('None of the variable names in keep are available in x!')
if (any(!.w)) {
keep <- keep[.w]
warning('some of the variable names in keep are not available in x (check if it is typos), so they are ignored!')
}
}
}
#----------------
LOOP <- TRUE
n <- new("VIF")
n@variables <- colnames(x)
exc <- c()
if (is.null(keep)) {
while (LOOP) {
v <- .vif(x)
if (v[which.max(v)] >= th) {
ex <- names(v[which.max(v)])
exc <- c(exc,ex)
x <- x[,-which(colnames(x) == ex)]
} else LOOP=FALSE
}
} else {
while (LOOP) {
v <- .vif(x)
v <- v[!names(v) %in% keep]
if (v[which.max(v)] >= th) {
ex <- names(v[which.max(v)])
exc <- c(exc,ex)
x <- x[,-which(colnames(x) == ex)]
} else LOOP=FALSE
}
}
if (length(exc) > 0) n@excluded <- exc
v <- .vif(x)
n@corMatrix <- cor(x, method=method)
n@results <- data.frame(Variables=names(v),VIF=as.vector(v))
n
}
)
setMethod('vifstep', signature(x='matrix'),
function(x, th=10, keep=NULL, size, method='pearson') {
if (ncol(x) == 1) stop("The matrix should have at least two columns (variables)!")
if (missing(method) || !method %in% c('pearson','kendall','spearman')) method <- 'pearson'
x <- as.data.frame(x)
x <- na.omit(x)
if (missing(size)) {
if (nrow(x) < 6000) size <- NULL
else size <- 5000
}
if (missing(keep)) keep <- NULL
#-----------
if (!is.null(size)) {
if(nrow(x) > size) x <- x[sample(1:nrow(x),size),]
}
#----
vn <- colnames(x)
if (!is.null(keep)) {
if (is.numeric(keep)) {
.w <- keep %in% c(1:length(vn))
if (all(!.w)) stop(paste0('the values in keep are out of range; should be between 1 and ',ncol(x),' for your dataset...!'))
if (any(!.w)) {
keep <- keep[.w]
warning('some of layer numbers in keep are out of range (should be between 1 and ncol(x)), so they are ignored!')
}
keep <- vn[keep]
} else if (is.character(keep)) {
.w <- keep %in% colnames(x)
if (all(!.w)) stop('None of the variable names in keep are available in x!')
if (any(!.w)) {
keep <- keep[.w]
warning('some of the variable names in keep are not available in x (check if it is typos), so they are ignored!')
}
}
}
#----------------
LOOP <- TRUE
n <- new("VIF")
n@variables <- colnames(x)
exc <- c()
if (is.null(keep)) {
while (LOOP) {
v <- .vif(x)
if (v[which.max(v)] >= th) {
ex <- names(v[which.max(v)])
exc <- c(exc,ex)
x <- x[,-which(colnames(x) == ex)]
} else LOOP=FALSE
}
} else {
while (LOOP) {
v <- .vif(x)
v <- v[!names(v) %in% keep]
if (v[which.max(v)] >= th) {
ex <- names(v[which.max(v)])
exc <- c(exc,ex)
x <- x[,-which(colnames(x) == ex)]
} else LOOP=FALSE
}
}
if (length(exc) > 0) n@excluded <- exc
v <- .vif(x)
n@corMatrix <- cor(x, method=method)
n@results <- data.frame(Variables=names(v),VIF=as.vector(v))
n
}
)
#------------
setMethod('vifstep', signature(x='SpatRaster'),
function(x, th=10, keep=NULL, size, method='pearson') {
if (nlyr(x) == 1) stop("The Raster object should have at least two layers!")
if (missing(method) || !method %in% c('pearson','kendall','spearman')) method <- 'pearson'
if (missing(size)) {
if (ncell(x) < 7000) size <- NULL
else size <- 5000
}
if (missing(keep)) keep <- NULL
#-----------
if (is.null(size)) x <- as.data.frame(x,na.rm=TRUE)
else x <- spatSample(x,size,na.rm=TRUE)
x <- na.omit(x)
vn <- colnames(x)
if (!is.null(keep)) {
if (is.numeric(keep)) {
.w <- keep %in% c(1:length(vn))
if (all(!.w)) stop(paste0('the values in keep are out of range; should be between 1 and ',ncol(x),' for your dataset...!'))
if (any(!.w)) {
keep <- keep[.w]
warning('some of layer numbers in keep are out of range (should be between 1 and nlyr(x)), so they are ignored!')
}
keep <- vn[keep]
} else if (is.character(keep)) {
.w <- keep %in% colnames(x)
if (all(!.w)) stop('None of the variable names in keep are available in x dataset!')
if (any(!.w)) {
keep <- keep[.w]
warning('some of the variable names in keep are not available in x (check if it is typos), so they are ignored!')
}
}
}
#----------------
LOOP <- TRUE
n <- new("VIF")
n@variables <- colnames(x)
exc <- c()
if (is.null(keep)) {
while (LOOP) {
v <- .vif(x)
if (v[which.max(v)] >= th) {
ex <- names(v[which.max(v)])
exc <- c(exc,ex)
x <- x[,-which(colnames(x) == ex)]
} else LOOP=FALSE
}
} else {
while (LOOP) {
v <- .vif(x)
v <- v[!names(v) %in% keep]
if (v[which.max(v)] >= th) {
ex <- names(v[which.max(v)])
exc <- c(exc,ex)
x <- x[,-which(colnames(x) == ex)]
} else LOOP=FALSE
}
}
if (length(exc) > 0) n@excluded <- exc
v <- .vif(x)
n@corMatrix <- cor(x, method=method)
n@results <- data.frame(Variables=names(v),VIF=as.vector(v))
n
}
)
|
/scratch/gouwar.j/cran-all/cranData/usdm/R/vif.R
|
#' Compute distances from each item to group centroids
#'
#' @param d A distance matrix object of class \code{dist}.
#' @param g A factor representing the groups of items in \code{d}.
#' @param squared If \code{TRUE}, return the squared distance to group
#' centroids.
#' @return A data frame with distances to the group centroids:
#'
#' \describe{
#' \item{Item}{
#' A character vector of item labels from the dist object, or an integer
#' vector of item locations if labels are not present.}
#' \item{CentroidGroup}{
#' The group for which the centroid distance is given. The column type
#' should match that of the argument g (the \code{unique} function is used
#' to generate this column).}
#' \item{CentroidDistance}{
#' Inferred distance from the item to the centroid position of the
#' indicated group.}}
#'
#' @details
#' This function computes the distance from each item to the centroid positions
#' of groups defined in the argument \code{g}. This is accomplished without
#' determining the centroid positions directly; see the documentation for
#' \code{\link{dist_between_centroids}} for details on this procedure.
#'
#' If the distance can't be represented in a Euclidean space, the
#' \code{CentroidDistance} is set to \code{NaN}. See the documentation for
#' \code{\link{dist_between_centroids}} for further details.
#'
#' @export
dist_to_centroids <- function (d, g, squared = FALSE) {
d <- stats::as.dist(d)
d2 <- d ** 2
items <- attr(d, "Labels")
# Use numeric index for items if the distance matrix has no labels
items <- if (is.null(items)) 1:attr(d, "Size") else items
group_items <- tapply(items, g, c)
group_sizes <- lapply(group_items, length)
group_d2s <- lapply(group_items, function (x) dist_subset(d2, x))
within_group_sums <- lapply(group_d2s, sum)
df <- expand.grid(Item=items, CentroidGroup=unique(g), stringsAsFactors = F)
dist_to_group_centroid <- function (idx2, group) {
idx1 <- group_items[[group]]
n1 <- group_sizes[[group]]
sum1 <- within_group_sums[[group]]
sum12 <- sum(as.matrix(d2)[idx1, idx2])
term1 <- sum1 / (n1 ** 2)
term12 <- sum12 / n1
result_squared <- term12 - term1
if (squared) {
result_squared
} else {
is_negative <- result_squared < 0
if (any(is_negative)) {
msg <- paste0(
"When computing distance to centroids, negative values were ",
"produced before taking a square root. ",
"This happens because the distances cannot be represented in a ",
"Euclidean coordinate system. ",
"These distances are being returned as NaN. ",
"Alternately, you may set `squared = TRUE` to return the squared ",
"distances. In this case, you will never get NaN, but you might ",
"receive negative numbers for the squared distance.")
warning(msg)
result <- numeric(length(result_squared))
result[!is_negative] <- sqrt(result_squared[!is_negative])
result[is_negative] <- NaN
result
} else {
sqrt(result_squared)
}
}
}
df$CentroidDistance <- mapply(
dist_to_group_centroid, df$Item, df$CentroidGroup)
df
}
#' Compute the distance between group centroids
#'
#' @param d A distance matrix object of class \code{dist}.
#' @param idx1 A vector of items in group 1.
#' @param idx2 A vector of items in group 2.
#' @param squared If \code{TRUE}, return the squared distance between centroids.
#' @return The distance between group centroids (see details).
#'
#' @details
#' If you have a distance matrix, and the objects are partitioned into groups,
#' you might like to know the distance between the group centroids. The
#' centroid of each group is simply the center of mass for the group.
#'
#' It is possible to infer the distance between group centroids directly from
#' the distances between items in each group. The \code{adonis} test in the
#' ecology package \code{vegan} takes advantage of this approach to carry out
#' an ANOVA-like test on distances.
#'
#' The approach rests on the assumption that the objects occupy some
#' high-dimensional Euclidean space. However, we do not have to actually
#' create the space to find the distance between centroids. Based on the
#' assumption that such a space exists, we can use an algebraic formula to
#' perform the computation.
#'
#' The formulas for this were presented by Apostol and Mnatsakanian in 2003,
#' though we need to re-arrange equation 28 in their paper to get the value
#' we want:
#'
#' \deqn{| c_1 - c_2 | = \sqrt{
#' \frac{1}{n_1 n_2} \sum_{(1,2)} -
#' \frac{1}{n_1^2} \sum_{(1)} -
#' \frac{1}{n_2^2} \sum_{(2)}},}
#'
#' where \eqn{n_1} is the number of samples in group 1, \eqn{\sum_{(1)}} is the
#' sum of squared distances between items in group 1, and \eqn{\sum_{(1,2)}} is
#' the sum of squared distances between items in group 1 and those in group 2.
#'
#' Sometimes, the distance between centroids is not a real number, because it
#' is not possible to create a space where this distance exists. Mathematically,
#' we get a negative number underneath the square root in the equation above.
#' If this happens, the function returns \code{NaN}. If you'd like to have
#' access to this value, you can set \code{squared = TRUE} to return the
#' squared distance between centroids. In this case, you will never get
#' \code{NaN}, but you might receive negative numbers in your result.
#'
#' @references Apostol, T.M. and Mnatsakanian, M.A. Sums of squares of distances
#' in m-space. Math. Assoc. Am. Monthly 110, 516 (2003).
#'
#' @export
dist_between_centroids <- function (d, idx1, idx2, squared = FALSE) {
if (is.logical(idx1)) {
n1 <- sum(idx1)
} else {
n1 <- length(idx1)
}
if (is.logical(idx2)) {
n2 <- sum(idx2)
} else {
n2 <- length(idx2)
}
d2 <- d ** 2
sum1 <- sum(dist_subset(d2, idx1))
sum2 <- sum(dist_subset(d2, idx2))
sum12 <- sum(as.matrix(d2)[idx1, idx2])
term1 <- sum1 / (n1 ** 2)
term2 <- sum2 / (n2 ** 2)
term12 <- sum12 / (n1 * n2)
result_squared <- term12 - term1 - term2
if (squared) {
result_squared
} else {
is_negative <- result_squared < 0
if (any(is_negative)) {
msg <- paste0(
"When computing distance between centroids, negative values were ",
"produced before taking a square root. ",
"This happens because the distances cannot be represented in a ",
"Euclidean coordinate system. ",
"These distances are being returned as NaN. ",
"Alternately, you may set `squared = TRUE` to return the squared ",
"distances. In this case, you will never get NaN, but you might ",
"receive negative numbers for the squared distance.")
warning(msg)
result <- numeric(length(result_squared))
result[!is_negative] <- sqrt(result_squared[!is_negative])
result[is_negative] <- NaN
result
} else {
sqrt(result_squared)
}
}
}
#' Make a new distance matrix of centroid distances between multiple groups
#' @param d A distance matrix object of class \code{dist}.
#' @param g A factor representing the groups of items in \code{d}.
#' @param squared If \code{TRUE}, return the squared distance between centroids.
#' @return A distance matrix of distances between the group centroids.
#' @export
dist_multi_centroids <- function (d, g, squared = FALSE) {
group_idxs <- tapply(seq_along(g), g, c, simplify = FALSE)
centroid_distance_from_groups <- function (gg) {
g1 <- gg[1]
g2 <- gg[2]
idx1 <- group_idxs[[g1]]
idx2 <- group_idxs[[g2]]
dist_between_centroids(d, idx1, idx2, squared = squared)
}
dc <- utils::combn(names(group_idxs), 2, centroid_distance_from_groups)
attr(dc, "Size") <- length(names(group_idxs))
attr(dc, "Labels") <- names(group_idxs)
attr(dc, "Diag") <- FALSE
attr(dc, "Upper") <- FALSE
class(dc) <- "dist"
dc
}
|
/scratch/gouwar.j/cran-all/cranData/usedist/R/centroid.R
|
#' usedist: a package for working with distance matrices in R
#'
#' In usedist, we provide a number of functions to help with distance matrix
#' objects, such as those produced by the \code{dist} function. Some functions
#' are geared towards making or altering distance matrix objects. Others
#' relate to groups of items in the distance matrix. They provide access to
#' within- or between-group distances, or use these distances to infer the
#' distance to group centroids.
#'
#' @docType package
#' @name usedist
NULL
#' Set the names/labels of a \code{dist} object.
#'
#' @param d A distance matrix object of class \code{dist}.
#' @param nm New labels for the rows/columns.
#' @return A distance matrix with new row/column labels.
#' @export
#' @examples
#' m4 <- matrix(1:16, nrow=4, dimnames=list(LETTERS[1:4]))
#' dm4 <- dist(m4)
#' dist_setNames(dm4, LETTERS[9:12])
dist_setNames <- function (d, nm) {
# Convert to matrix so errors are generated on assignment
# if nm does not contain the same number of elements as d
dm <- as.matrix(d)
dimnames(dm) <- list(nm, nm)
stats::as.dist(dm)
}
#' Retrieve distances from a \code{dist} object.
#'
#' @param d A distance matrix object of class \code{dist}.
#' @param idx1,idx2 Indices specifying the distances to extract.
#' @return A vector of distances.
#' @export
#' @examples
#' m4 <- matrix(1:16, nrow=4, dimnames=list(LETTERS[1:4]))
#' dm4 <- dist(m4)
#' dist_get(dm4, "A", "C")
#' dist_get(dm4, "A", c("A", "B", "C", "D"))
#' dist_get(dm4, c("A", "B", "C"), c("B", "D", "B"))
dist_get <- function (d, idx1, idx2) {
d <- stats::as.dist(d)
if (is.character(idx1)) {
idx1 <- match(idx1, attr(d, "Labels"))
}
if (is.character(idx2)) {
idx2 <- match(idx2, attr(d, "Labels"))
}
n <- attr(d, "Size")
if (any(is.na(idx1) | (idx1 < 1) | (idx1 > n))) {
stop("idx1 out of range")
}
if (any(is.na(idx2) | (idx2 < 1) | (idx2 > n))) {
stop("idx2 out of range")
}
i <- pmin(idx1, idx2)
j <- pmax(idx1, idx2)
# Zeros are eliminated from index vectors
# Need to fill with NA if i and j are equal
idx <- ifelse(i == j, NA, n*(i-1) - i*(i-1)/2 + j-i)
ifelse(i == j, 0, d[idx])
}
#' Extract parts of a \code{dist} object.
#'
#' Extract a subset of values from a distance matrix. This function also works
#' to re-arrange the rows of a distance matrix, if they are provided in the
#' desired order.
#'
#' @param d A distance matrix object of class \code{dist}.
#' @param idx Indices specifying the subset of distances to extract.
#' @return A distance matrix.
#' @export
#' @examples
#' m4 <- matrix(1:16, nrow=4, dimnames=list(LETTERS[1:4]))
#' dm4 <- dist(m4)
#' dist_subset(dm4, c("A", "B", "C"))
#' dist_subset(dm4, c("D", "C", "B", "A"))
dist_subset <- function (d, idx) {
stats::as.dist(as.matrix(d)[idx, idx])
}
#' Create a data frame of distances between groups of items.
#'
#' @param d A distance matrix object of class \code{dist}.
#' @param g A factor representing the groups of objects in \code{d}.
#' @return A data frame with 6 columns:
#' \describe{
#' \item{Item1, Item2}{The items being compared.}
#' \item{Group1, Group2}{The groups to which the items belong.}
#' \item{Label}{A convenient label for plotting or comparison.}
#' \item{Distance}{The distance between Item1 and Item2.}}
#' @export
#' @examples
#' m4 <- matrix(1:16, nrow=4, dimnames=list(LETTERS[1:4]))
#' dm4 <- dist(m4)
#' g4 <- rep(c("Control", "Treatment"), each=2)
#' dist_groups(dm4, g4)
dist_groups <- function(d, g) {
d <- stats::as.dist(d)
g <- as.factor(g)
dsize <- attr(d, "Size")
if (length(g) != dsize) {
stop(
"Length of grouping vector (g) must equal number of observations in ",
"dist object (d)")
}
dlabels <- attr(d, "Labels")
idxs <- utils::combn(dsize, 2)
idx1 <- idxs[1,]
idx2 <- idxs[2,]
# For the between group labels, we need to keep the groups in factor order.
# Here, we record the level of the group to use for the first and second
# parts of the label.
level1 <- levels(g)[pmin(as.numeric(g[idx1]), as.numeric(g[idx2]))]
level2 <- levels(g)[pmax(as.numeric(g[idx1]), as.numeric(g[idx2]))]
data.frame(
Item1 = if (is.null(dlabels)) idx1 else dlabels[idx1],
Item2 = if (is.null(dlabels)) idx2 else dlabels[idx2],
Group1 = g[idx1],
Group2 = g[idx2],
Label = factor(ifelse(
level1 == level2,
paste("Within", level1),
paste("Between", level1, "and", level2))),
Distance = dist_get(d, idx1, idx2),
stringsAsFactors = FALSE)
}
#' Make a distance matrix using a custom distance function
#'
#' @param x A matrix of observations, one per row
#' @param distance_fcn A function used to compute the distance between two
#' rows of the data matrix. The two rows will be passed as the first and
#' second arguments to \code{distance_fcn}.
#' @param ... Additional arguments passed to \code{distance_fcn}.
#' @return A \code{dist} object containing the distances between rows of the
#' data matrix.
#' @details We do not set the \code{call} or \code{method} attributes of the
#' \code{dist} object.
#' @export
#' @examples
#' x <- matrix(sin(1:30), nrow=5)
#' rownames(x) <- LETTERS[1:5]
#' manhattan_distance <- function (v1, v2) sum(abs(v1 - v2))
#' dist_make(x, manhattan_distance)
dist_make <- function (x, distance_fcn, ...) {
distance_from_idxs <- function (idxs) {
i1 <- idxs[1]
i2 <- idxs[2]
distance_fcn(x[i1,], x[i2,], ...)
}
size <- nrow(x)
d <- apply(utils::combn(size, 2), 2, distance_from_idxs)
attr(d, "Size") <- size
xnames <- rownames(x)
if (!is.null(xnames)) {
attr(d, "Labels") <- xnames
}
attr(d, "Diag") <- FALSE
attr(d, "Upper") <- FALSE
class(d) <- "dist"
d
}
|
/scratch/gouwar.j/cran-all/cranData/usedist/R/dist.R
|
check_tidyverse <- function () {
pkg_names <- c("dplyr", "tidyr", "tibble")
is_installed <- sapply(pkg_names, requireNamespace, quietly = TRUE)
pkgs_not_installed <- pkg_names[!is_installed]
if (length(pkgs_not_installed) == 1) {
msg <- paste0(
"Package \"", pkgs_not_installed, "\" is not installed, but is ",
"needed by \"usedist::pivot_to_numeric_matrix\". Please install the ",
"missing package to use this function."
)
stop(msg, call. = FALSE)
}
if (length(pkgs_not_installed) > 1) {
pkgs_not_installed <- paste(pkgs_not_installed, collapse = "\", \"")
msg <- paste0(
"Packages \"", pkgs_not_installed, "\" are not installed, but are ",
"needed by \"usedist::pivot_to_numeric_matrix\". Please install the ",
"missing packages to use this function."
)
stop(msg, call. = FALSE)
}
}
#' Convert a data frame in long format to a numeric matrix
#'
#' @param data A data frame with numerical values in long format.
#' @param obs_col The column listing the observation, or row of the matrix.
#' @param feature_col The column listing the feature, or column of the matrix.
#' @param value_col The column listing the value, to be placed inside the
#' matrix.
#'
#' The parameters \code{obs_col}, \code{feature_col}, and \code{value_col}
#' should be provided as bare column names. If any combination of row and
#' column does not appear in the data frame, a zero will be entered in the
#' resultant matrix.
#'
#' This function requires the packages \code{dplyr}, \code{tibble}, and
#' \code{tidyr} to be installed. If they are not installed, the function will
#' generate an error, with a message to install the appropriate packages.
#'
#' @export
#' @examples
#' longdata <- data.frame(
#' SampleID = paste0("Sample", c(1, 1, 1, 2, 2, 3, 3)),
#' FeatureID = paste0("Feature", c(1, 2, 3, 1, 2, 2, 3)),
#' Value = c(132, 41, 7, 56, 11, 929, 83))
#' longdata
#' pivot_to_numeric_matrix(longdata, SampleID, FeatureID, Value)
pivot_to_numeric_matrix <- function (data, obs_col, feature_col, value_col) {
check_tidyverse()
obs_col <- dplyr::enquo(obs_col)
feature_col <- dplyr::enquo(feature_col)
value_col <- dplyr::enquo(value_col)
value_fill <- list(0)
names(value_fill) <- dplyr::as_label(value_col)
# The function pivot_wider is not in older versions of tidyr.
# Fall back to spread if pivot_wider is not found
if (exists("pivot_wider", where=asNamespace("tidyr"), mode="function")) {
data_wide <- tidyr::pivot_wider(
data,
id_cols = !!obs_col,
names_from = !!feature_col,
values_from = !!value_col,
values_fill = value_fill)
} else {
data <- dplyr::select(data, !!obs_col, !!feature_col, !!value_col)
data_wide <- tidyr::spread(data, !!feature_col, !!value_col)
}
data_wide <- tibble::column_to_rownames(data_wide, dplyr::as_label(obs_col))
as.matrix(as.data.frame(data_wide))
}
|
/scratch/gouwar.j/cran-all/cranData/usedist/R/long_format.R
|
utils::globalVariables(
c(
"value",
"name",
"base",
"pckg_origin",
"n",
"pckg",
"func",
"total",
"count",
"desc",
"Function",
"Package",
"packfun",
"pckg_preferred",
"pckgx"
)
)
|
/scratch/gouwar.j/cran-all/cranData/usedthese/R/globals.R
|
#' Summarise function usage in a single document
#'
#' Consistent with knitr syntax highlighting, [used_here()] adds a
#' summary table of R package & function usage to a knitted Quarto or R Markdown document
#'
#' @details If the rendered summary includes rows where the package name is multiple packages
#' separated by a comma, this will be due to an unresolved conflict. The recommended approach
#' is to use the 'conflicted' package.
#'
#' @param fil If the usage summary is required in the document you are currently knitting,
#' then no argument need be specified.
#'
#' If you want to create a summary by running just the code chunk, then it is necessary to
#' specify the quoted name of the saved file. You should first load and attach the packages
#' used in a fresh R session.
#'
#' @return A printed kable table with the css class "usedthese"
#'
#' @export
#'
#' @examples
#' # Simple example which mimics a two-line script and creates
#' # an html table with a CSS class "usedthese"
#' usedthese::used_here("mean(c(1, 2, 3))\nsum(c(1, 2, 3))")
#'
used_here <- \(fil = knitr::current_input()) {
if (is.null(fil)) {
rlang::abort(
"If you are knitting the current document, i.e. you clicked the Render button, then leave fil unspecified. If you are running the code chunks, then ensure you library the packages first in a fresh R session and specify the saved filename quoted.",
fil = fil
)
}
old <- options(knitr.duplicate.label = "allow")
withr::defer(options(old))
if (stringr::str_ends(fil, "Rmd|qmd|rmarkdown")) {
purrr::walk(fil, knitr::purl, quiet = TRUE, documentation = 0)
fil <- stringr::str_replace(fil, "Rmd|qmd|rmarkdown", "R")
}
pckg_loaded <- .packages() |>
rlang::set_names()
funs_loaded <- pckg_loaded |>
purrr::map(\(x) base::ls(stringr::str_c("package:", x))) |>
tibble::enframe("pckg_loaded", "func") |>
tidyr::unnest(func)
get_mode <- \(x) {
ux <- unique(x)
ux[which.max(tabulate(match(x, ux)))]
}
funs_origin <- pckg_loaded |>
purrr::map(getNamespaceImports) |>
purrr::list_flatten() |>
tibble::enframe() |>
dplyr::filter(value != "TRUE") |>
tidyr::unnest(value) |>
tidyr::separate_wider_delim(name, "_", names = c("pckg_loaded", "pckg_origin")) |>
dplyr::rename(func = value) |>
dplyr::mutate(pckg_origin = get_mode(pckg_origin), .by = func) |>
dplyr::distinct()
funs_scouted <- conflicted::conflict_scout() |>
unlist() |>
dplyr::bind_rows()
if (nrow(funs_scouted) > 0) {
funs_scouted <- funs_scouted |>
tidyr::pivot_longer(tidyselect::everything(), names_to = "func") |>
dplyr::mutate(func = stringr::str_remove(func, "\\d$")) |>
dplyr::summarise(pckg_preferred = stringr::str_flatten_comma(value, na.rm = TRUE), .by = func)
} else {
funs_scouted <- tibble::tibble(pckg_preferred = "zzz", func = "zzz")
}
funs_augmented <- funs_loaded |>
dplyr::left_join(funs_origin, dplyr::join_by(pckg_loaded, func)) |>
dplyr::left_join(funs_scouted, dplyr::join_by(func)) |>
dplyr::group_by(func) |>
tidyr::fill(pckg_origin, .direction = "updown") |>
dplyr::mutate(
pckg_loaded = dplyr::coalesce(pckg_origin, pckg_loaded),
pckg_loaded = dplyr::coalesce(pckg_preferred, pckg_loaded)
) |>
dplyr::select(pckgx = pckg_loaded, func) |>
dplyr::arrange(func, pckgx) |>
dplyr::distinct(func, .keep_all = TRUE)
funs_coded <- fil |>
readr::read_lines() |>
highr::hi_latex(fallback = TRUE) |>
stringr::str_extract_all("([a-zA-Z_]+::)?\\\\hlkwd\\{([^\\{\\}]*(?=\\}))") |>
purrr::list_c() |>
tibble::as_tibble() |>
tidyr::separate_wider_regex(value, c(pckg = ".*?", "\\\\hlkwd\\{", func = ".*")) |>
dplyr::mutate(pckg = stringr::str_remove(pckg, "::") |> dplyr::na_if(""))
funs_used <-
funs_coded |>
dplyr::left_join(funs_augmented, dplyr::join_by(func)) |>
dplyr::mutate(pckg = dplyr::coalesce(pckg, pckgx)) |>
dplyr::count(pckg, func) |>
dplyr::mutate(func = stringr::str_c(func, "[", n, "]")) |>
dplyr::summarise(func = stringr::str_c(func, collapse = ", "), .by = pckg) |>
tidyr::drop_na()
funs_used |>
knitr::kable(
format = "html",
table.attr = "class = 'usedthese'",
col.names = c("Package", "Function")
) |>
kableExtra::kable_styling("striped")
}
|
/scratch/gouwar.j/cran-all/cranData/usedthese/R/used_here.R
|
#' Scrape the summaries for site-wide analysis
#'
#' Harvests and consolidates function usage tables from pages of a Quarto website
#' by searching for tables with the CSS class "usedthese"
#'
#' @param url The url to the website listing page of posts containing usage tables created with
#' [used_here()]
#'
#' @param num_links The number of links returned from the listing page may be restricted using
#' this argument. Defaults to 20.
#'
#' @return A tibble summarising package & function usage
#'
#' @export
#'
#' @examples
#' # Uses a Quarto listing url to scrape & consolidate usage
#' \donttest{used_there("https://www.quantumjitter.com/project/", 1)}
#'
used_there <- \(url, num_links = 30) {
html <- rlang::try_fetch(url |> rvest::read_html(),
error = \(cnd) rlang::abort("URL currently unavailable. Please try later.", parent = cnd)
)
urls <- html |>
rvest::html_elements(".quarto-grid-link ,
.quarto-default-link ,
.quarto-table-link") |>
rvest::html_attr("href") |>
stringr::str_replace("^",
stringr::str_c(
httr::parse_url(url)$scheme, "://",
httr::parse_url(url)$hostname
)
) |>
utils::tail(num_links)
purrr::map(urls, \(x) {
x |>
rvest::read_html() |>
rvest::html_element(".usedthese") |>
rvest::html_table() |>
dplyr::mutate(url = x)
}) |>
purrr::list_flatten() |>
purrr::list_rbind() |>
tidyr::separate_longer_delim(Function, delim = ",") |>
tidyr::extract(Function, c("Function", "n"),
"([^ ]+)\\[(.+)\\]",
convert = TRUE)
}
|
/scratch/gouwar.j/cran-all/cranData/usedthese/R/used_there.R
|
#' @keywords internal
"_PACKAGE"
## usethis namespace: start
#' @importFrom lifecycle deprecated
## usethis namespace: end
NULL
|
/scratch/gouwar.j/cran-all/cranData/usedthese/R/usedthese-package.R
|
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----setup--------------------------------------------------------------------
library(usedthese)
## -----------------------------------------------------------------------------
used_there("https://www.quantumjitter.com/project/")
|
/scratch/gouwar.j/cran-all/cranData/usedthese/inst/doc/multipage.R
|
---
title: "Site-wide usage"
description: |
Harvest the summary tables of package & function usage created by used_here() ready for site-wide analysis.
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Site-wide usage}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
```{r setup}
library(usedthese)
```
Having added `used_here()` to several of your Quarto website pages, you may want to make an overall site analysis of your package and function usage. `used_there()` scrapes and consolidates the tables into a `tibble` ready for analysis:
```{r}
used_there("https://www.quantumjitter.com/project/")
```
[Favourite Things](https://www.quantumjitter.com/project/box/) shows an example analysis which takes the tibble output from `used_there()`, augments these data with a category, and plots the most-used packages, the most-used functions and a word cloud.
|
/scratch/gouwar.j/cran-all/cranData/usedthese/inst/doc/multipage.Rmd
|
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----setup--------------------------------------------------------------------
options(tidyverse.quiet = TRUE)
options(xts.warn_dplyr_breaks_lag = FALSE)
library(conflicted)
library(dplyr)
library(tibble)
conflicts_prefer(dplyr::filter, dplyr::last)
library(usedthese)
library(xts, exclude = "first")
conflict_scout()
## -----------------------------------------------------------------------------
tribble(~group, ~a1, ~a2, ~b1,
"x", 1, 2, 3,
"x", 4, 5, 6,
"y", 7, 8, 9) |>
select(-starts_with("b")) |>
filter(group == "x") |>
mutate(first_a1 = first(a1),
last_a2 = last(a2))
## ----warning=FALSE------------------------------------------------------------
used_here()
|
/scratch/gouwar.j/cran-all/cranData/usedthese/inst/doc/usedthese.R
|
---
title: "Getting Started"
description: |
Add a summary of R package & function usage to a Quarto document.
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Getting Started}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
To add a summary table of package & function usage to the foot of a Quarto document, add `used_here()` to the end of the code. A separate code chunk with an appropriate heading is suggested but not essential.
The package author recommends using the [conflicted package](https://conflicted.r-lib.org) to resolve conflicts. In the example below, 'dplyr' has been preferred over stats for `filter()` and over the xts package for `last()`.
An alternative approach to using conflicted is to use the `exclude` or `include.only` argument in `library()`. This is also shown below with the xts version of `first` excluded and hence the dplyr version preferred.
## Some code
```{r setup}
options(tidyverse.quiet = TRUE)
options(xts.warn_dplyr_breaks_lag = FALSE)
library(conflicted)
library(dplyr)
library(tibble)
conflicts_prefer(dplyr::filter, dplyr::last)
library(usedthese)
library(xts, exclude = "first")
conflict_scout()
```
## More code
```{r}
tribble(~group, ~a1, ~a2, ~b1,
"x", 1, 2, 3,
"x", 4, 5, 6,
"y", 7, 8, 9) |>
select(-starts_with("b")) |>
filter(group == "x") |>
mutate(first_a1 = first(a1),
last_a2 = last(a2))
```
## Summary of usage
In the example below, `tribble()` is counted once against the (originating) tibble package even though it is also loaded by dplyr. And had we not used the conflicted package, `filter()` for example would have shown against the package name "dplyr, stats".
The rendered table is assigned the CSS class `.usedthese` to help other `used_*` functions find and aggregate multiple tables across one or more websites.
```{r warning=FALSE}
used_here()
```
|
/scratch/gouwar.j/cran-all/cranData/usedthese/inst/doc/usedthese.Rmd
|
---
title: "Site-wide usage"
description: |
Harvest the summary tables of package & function usage created by used_here() ready for site-wide analysis.
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Site-wide usage}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
```{r setup}
library(usedthese)
```
Having added `used_here()` to several of your Quarto website pages, you may want to make an overall site analysis of your package and function usage. `used_there()` scrapes and consolidates the tables into a `tibble` ready for analysis:
```{r}
used_there("https://www.quantumjitter.com/project/")
```
[Favourite Things](https://www.quantumjitter.com/project/box/) shows an example analysis which takes the tibble output from `used_there()`, augments these data with a category, and plots the most-used packages, the most-used functions and a word cloud.
|
/scratch/gouwar.j/cran-all/cranData/usedthese/vignettes/multipage.Rmd
|
---
title: "Getting Started"
description: |
Add a summary of R package & function usage to a Quarto document.
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Getting Started}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
To add a summary table of package & function usage to the foot of a Quarto document, add `used_here()` to the end of the code. A separate code chunk with an appropriate heading is suggested but not essential.
The package author recommends using the [conflicted package](https://conflicted.r-lib.org) to resolve conflicts. In the example below, 'dplyr' has been preferred over stats for `filter()` and over the xts package for `last()`.
An alternative approach to using conflicted is to use the `exclude` or `include.only` argument in `library()`. This is also shown below with the xts version of `first` excluded and hence the dplyr version preferred.
## Some code
```{r setup}
options(tidyverse.quiet = TRUE)
options(xts.warn_dplyr_breaks_lag = FALSE)
library(conflicted)
library(dplyr)
library(tibble)
conflicts_prefer(dplyr::filter, dplyr::last)
library(usedthese)
library(xts, exclude = "first")
conflict_scout()
```
## More code
```{r}
tribble(~group, ~a1, ~a2, ~b1,
"x", 1, 2, 3,
"x", 4, 5, 6,
"y", 7, 8, 9) |>
select(-starts_with("b")) |>
filter(group == "x") |>
mutate(first_a1 = first(a1),
last_a2 = last(a2))
```
## Summary of usage
In the example below, `tribble()` is counted once against the (originating) tibble package even though it is also loaded by dplyr. And had we not used the conflicted package, `filter()` for example would have shown against the package name "dplyr, stats".
The rendered table is assigned the CSS class `.usedthese` to help other `used_*` functions find and aggregate multiple tables across one or more websites.
```{r warning=FALSE}
used_here()
```
|
/scratch/gouwar.j/cran-all/cranData/usedthese/vignettes/usedthese.Rmd
|
#' @title colsToFront
#' @description Moves column names to the front or back of the names
#' @details Moves column names to the front or back of the names
#' @author Jared P. Lander
#' @export colsToFront
#' @param data data.frame or tbl
#' @param cols Columns that should be moved
#' @return Character vector of column names
#' @examples
#' theDF <- data.frame(A=1:10, B=11:20, C=1:10, D=11:20)
#' colsToFront(theDF, c('B', 'C'))
#' colsToFront(theDF, c('C', 'B'))
#' colsToFront(theDF, c('C', 'C'))
#' colsToBack(theDF, c('C', 'C'))
#' colsToBack(theDF, c('C', 'B'))
#' colsToBack(theDF, c('C', 'C'))
#'
colsToFront <- function(data, cols=names(data))
{
allCols <- names(data)
# get the columns that are not in cols
back <- allCols[!allCols %in% cols]
# return the new order
c(cols, back)
}
#' @title colsToBack
#' @rdname colsToFront
#' @export colsToBack
#' @inheritParams colsToFront
#'
colsToBack <- function(data, cols=names(data))
{
allCols <- names(data)
# get the columns that are not in cols
back <- allCols[!allCols %in% cols]
# return the new order
c(back, cols)
}
#' @title moveToFront
#' @description Rearranges column order by moving specified columns to the front or back.
#' @details Rearranges column order by moving specified columns to the front or back.
#' @export moveToFront
#' @author Jared P. Lander
#' @importFrom dplyr select_
#' @importFrom magrittr "%>%"
#' @param data data.frame
#' @param cols Character vector specifying the columns to be moved to the front or back
#' @return A data.frame with the columns in the right order
#' @examples
#' theDF <- data.frame(A=1:10, B=11:20, C=1:10, D=11:20)
#' moveToFront(theDF, c('B', 'C'))
#' moveToFront(theDF, c('C', 'B'))
#' moveToFront(theDF, c('C', 'C'))
#' moveToBack(theDF, c('C', 'C'))
#' moveToBack(theDF, c('C', 'B'))
#' moveToBack(theDF, c('C', 'C'))
#'
moveToFront <- function(data, cols)
{
colOrder <- colsToFront(data, cols)
data %>% select_(.dots=colOrder)
}
#' @title moveToBack
#' @rdname moveToFront
#' @export moveToBack
#' @inheritParams moveToFront
#'
moveToBack <- function(data, cols)
{
colOrder <- colsToBack(data, cols)
data %>% select_(.dots=colOrder)
}
|
/scratch/gouwar.j/cran-all/cranData/useful/R/ColumnReorder.r
|
## maps a given set of numbers to the specified interval
## @nums: (vector) the numbers to be mapped
## @start (numeric) the beginging of the mapping
## @stop (numeric) the end of the mapping
#' Map numbers to interval
#'
#' Maps a range of numbers to a given interval
#'
#' formula: a + (x - min(x)) * (b - a) / (max(x) - min(x))
#'
#' @aliases MapToInterval mapping
#' @param nums The vector of numbers to be mapped
#' @param start The start of the interval
#' @param stop The end of the interval
#' @return The original numbers mapped to the given interval
#' @author Jared P. Lander
#' www.jaredlander.com
#' @export MapToInterval mapping
#' @seealso \code{\link{mapping}}
#' @keywords numbers mapping interval
#' @examples
#'
#' MapToInterval(1:10, start=0, stop=1)
#' mapping(1:10, start=0, stop=1)
#'
MapToInterval <- function(nums, start=1, stop=10)
{
#do the mapping: a + (x - min(x)) * (b - a) / (max(x) - min(x))
mapped <- start + (nums - min(nums)) * (stop - start) / diff(range(nums))
return(mapped)
}
# just a better name for the function
mapping <- function(nums, start=1, stop=10)
{
return(MapToInterval(nums=nums, start=start, stop=stop))
}
|
/scratch/gouwar.j/cran-all/cranData/useful/R/MapToInterval.r
|
# flip binary variable
#' binary.flip
#'
#' Flip binary numbers
#'
#' @export binary.flip
#' @author Jared P. Lander
#' @aliases binary.flip
#' @param x A vector of 0/1 numbers.
#' @return X with 0's flipped to 1's and 1's flipped to 0's
#' @examples
#'
#' binary.flip(c(1,1,0,1,0,0,1))
#'
binary.flip <- function(x)
{
x*-1 + 1
}
|
/scratch/gouwar.j/cran-all/cranData/useful/R/binary.flip.r
|
#' Formula Builder
#'
#' Formula Builder
#'
#' Builds a formula easily given the left and right hand sides. Right now it only handles additive formulas and not interactions unless that is specified in the character.
#'
#' @param lhs Character vector for left side of formula
#' @param rhs Character vector for right side of formula
#' @author Jared P. Lander www.jaredlander.com
#' @aliases build.formula
#' @export build.formula
#' @importFrom stats as.formula
#' @seealso formula as.formula
#' @return A formula object
#' @examples
#'
#' build.formula("Y", "X")
#' build.formula(c("Y", "Z"), "X")
#' build.formula("Z", c("X", "Q"))
#' build.formula(c("Y", "Z"), c("X", "Q"))
#'
build.formula <- function(lhs, rhs)
{
if(is.null(lhs) && is.null(rhs))
{
return(NULL)
}
# build a formula for aggregation
theFormula <- as.formula(
sprintf("%s ~ %s",
paste(lhs, collapse=" + "),
paste(sprintf("`%s`", rhs), collapse=" + "))
)
return(theFormula)
}
|
/scratch/gouwar.j/cran-all/cranData/useful/R/buildFormula.r
|
#' @title build.x
#'
#' @description Build the x matrix for a glmnet model
#'
#' @details Given a formula and a data.frame build the predictor matrix
#' @author Jared P. Lander
#' @aliases build.x
#' @export build.x
#' @param formula A formula
#' @param data A data.frame
#' @param contrasts Logical indicating whether a factor's base level is removed. Can be either one single value applied to every factor or a value for each factor. Values will be recycled if necessary.
#' @param sparse Logical indicating if result should be sparse.
#' @return A matrix of the predictor variables specified in the formula
#' @examples
#' require(ggplot2)
#' head(mpg)
#' head(build.x(hwy ~ class + cyl + year, data=mpg))
#'
#' testFrame <- data.frame(First=sample(1:10, 20, replace=TRUE),
#' Second=sample(1:20, 20, replace=TRUE),
#' Third=sample(1:10, 20, replace=TRUE),
#' Fourth=factor(rep(c("Alice","Bob","Charlie","David"), 5)),
#' Fifth=ordered(rep(c("Edward","Frank","Georgia","Hank","Isaac"), 4)),
#' Sixth=factor(rep(c("a", "b"), 10)), stringsAsFactors=F)
#' head(build.x(First ~ Second + Fourth + Sixth, testFrame,
#' contrasts=c("Fourth"=TRUE, "Fifth"=FALSE, "Sixth"=TRUE)))
#' head(build.x(First ~ Second + Fourth + Fifth + Sixth, testFrame,
#' contrasts=c(Fourth=TRUE, Fifth=FALSE, Sixth=TRUE)))
#' head(build.x(First ~ Second + Fourth + Fifth + Sixth, testFrame, contrasts=TRUE))
#' head(build.x(First ~ Second + Fourth + Fifth + Sixth, testFrame,
#' contrasts=FALSE))
#' head(build.x(First ~ Second + Fourth + Fifth + Sixth - 1, testFrame,
#' contrasts=TRUE))
#' build.x(First ~ Second + Fourth + Fifth + Sixth - 1, testFrame,
#' contrasts=TRUE, sparse=TRUE)
#' head(build.x(First ~ Second + Fourth + Fifth + Fourth*Sixth, testFrame, contrasts=TRUE))
#' head(build.x(First ~ Second + Fourth + Fifth + Third*Sixth, testFrame, contrasts=TRUE))
#' #' head(build.x(First ~ Second + Fourth + Fifth + Fourth*Sixth, testFrame, contrasts=FALSE))
#' head(build.x(First ~ Second + Fourth + Fifth + Third*Sixth, testFrame, contrasts=FALSE))
#' build.x(First ~ Second + Fourth + Fifth + Third*Sixth, testFrame, contrasts=FALSE, sparse=TRUE)
#'
#' ## if contrasts is a list then you can specify just certain factors
build.x <- function(formula, data, contrasts=TRUE, sparse=FALSE)
{
# ensure data is a data.frame
data <- ForceDataFrame(data)
if(sparse)
{
matFun <- Matrix::sparse.model.matrix
} else
{
matFun <- stats::model.matrix
}
if(length(contrasts) == 1 && contrasts)
{
return(matFun(formula, data=data))
}
# make index of factor or character columns
catIndex <- which(sapply(data, function(x) is.factor(x) | is.character(x)))
# only keep those that also appear in the factors attr of the terms of formula
theTerms <- rownames(attr(stats::terms(formula, data=data), "factors"))
# new cat index only keeping those variables that are necessary
catIndex <- catIndex[which(names(data)[catIndex] %in% theTerms)]
# also cut down contrasts argument
# save for another time
if(length(catIndex) == 0)
{
return(matFun(formula, data=data))
}
# if any of these identified columns is still a character, they need to be changed into a factor
# find out which columns are characters
#print(sapply(data[, catIndex], is.character))
charIndex <- catIndex[sapply(data[, catIndex, drop=FALSE], is.character)]
if(length(charIndex))
{
# convert to factor
# data[, charIndex] <- plyr::catcolwise(as.factor)(data[, charIndex, drop=FALSE])
if(utils::packageVersion('dplyr') <= '0.5.0')
{
data <- dplyr::mutate_at(data, .cols=charIndex, as.factor)
} else if(utils::packageVersion('dplyr') >= '0.6.0')
{
data <- dplyr::mutate_at(data, .vars=charIndex, as.factor)
}
## now all factors or characters are at least factors (and nothing extraneous was done) and only the appropriate columns will be put into the contrasts argument
}
# if multiple contrasts are given they must be named
contrNames <- names(contrasts)
if(length(contrasts) > 1 && is.null(contrNames))
{
stop("If specifying more than one contrasts then it must be a named list of vector.")
}else if(!is.null(contrNames))
{
# get names of contrasts and use as the catIndex, factor/ordered columns not specified will be left to the default
catIndex <- contrNames
}else if(length(contrasts) == 1)
{
# make as many contrasts as necessary
contrasts <- rep(contrasts, times=length(catIndex))
}
# only non sparse is allowed for now
# sparse <- FALSE
# build contrast argument list
#contrArgs <- lapply(data[, catIndex, drop=FALSE], contrasts, contrasts=contrasts, sparse=sparse)
# contrArgs <- mapply(contrasts, data[, catIndex, drop=F], contrasts, MoreArgs=list(sparse=sparse))
contrArgs <- purrr::map2(.x=data[, catIndex, drop=FALSE], .y=contrasts, .f=stats::contrasts, sparse=FALSE)
# build model.matrix
matFun(formula, data=data, contrasts.arg=contrArgs)
#model.matrix(formula, data=data)[, -1]
}
#mapply(function(input, contrasts, sparse=FALSE){ contrasts(x=input, contrasts=contrasts, sparse=sparse) }, testFrame[, 4:5, drop=F], c(T), MoreArgs=list(sparse=F))
#head(model.matrix(~ ., data=testFrame, ))
#' ForceDataFrame
#'
#' Force matrix and arrays to data.frame
#'
#' This is a helper function for build.x and build.y to convert arrays and matrices--which are not accepted in model.frame--into data.frames
#'
#' @author Jared P. Lander
#' @aliases ForceDataFrame
#' @return a data.frame of the data
#' @param data matrix, data.frame, array, list, etc. . .
#'
ForceDataFrame <- function(data)
{
if(any(class(data) %in% c("matrix", "array")))
{
return(as.data.frame(data))
}
return(data)
}
#' build.y
#'
#' Build the y object from a formula and data
#'
#' Given a formula and a data.frame build the y object
#' @author Jared P. Lander
#' @aliases build.y
#' @export build.y
#' @importFrom stats model.frame
#' @param formula A formula
#' @param data A data.frame
#' @return The y object from a formula and data
#' @examples
#' require(ggplot2)
#' head(mpg)
#' head(build.y(hwy ~ class + cyl + year, data=mpg))
#'
build.y <- function(formula, data)
{
# build a model frame
theFrame <- model.frame(formula, data=ForceDataFrame(data))
# extract the response
theFrame[[1]]
}
|
/scratch/gouwar.j/cran-all/cranData/useful/R/buildMatrix.r
|
#' @title upper.case
#' @description Checks if strings are all upper case
#' @details Checks if strings are all upper case. This is a wrapper for \code{find.case('text', 'upper')}. If string is all numbers it returns TRUE.
#' @export upper.case
#' @aliases upper.case
#' @author Jared P. Lander
#' @param string Character vector of strings to check cases
#' @return A vector of TRUE AND FALSE
#' @seealso find.case lower.case mixed.case numeric.case
#' @examples
#' toCheck <- c('BIG', 'little', 'Mixed', 'BIG WITH SPACE', 'little with space', 'MIXED with SPACE')
#' upper.case(toCheck)
upper.case <- function(string)
{
find.case(string, 'upper')
}
#' @title lower.case
#' @description Checks if strings are all lower case
#' @details Checks if strings are all lower case. This is a wrapper for \code{find.case('text', 'lower')}. If string is all numbers it returns TRUE.
#' @export lower.case
#' @aliases lower.case
#' @author Jared P. Lander
#' @param string Character vector of strings to check cases
#' @return A vector of TRUE AND FALSE
#' @seealso find.case upper.case mixed.case numeric.case
#' @examples
#' toCheck <- c('BIG', 'little', 'Mixed', 'BIG WITH SPACE', 'little with space', 'MIXED with SPACE')
#' lower.case(toCheck)
lower.case <- function(string)
{
find.case(string, 'lower')
}
#' @title mixed.case
#' @description Checks if strings are all lower case
#' @details Checks if strings are a mix of upper and lower case. This is a wrapper for \code{find.case('text', 'mixed')}. If string is all numbers it returns FALSE.
#' @export mixed.case
#' @aliases mixed.case
#' @author Jared P. Lander
#' @param string Character vector of strings to check cases
#' @return A vector of TRUE AND FALSE
#' @seealso find.case all.upper
#' @examples
#' toCheck <- c('BIG', 'little', 'Mixed', 'BIG WITH SPACE', 'little with space', 'MIXED with SPACE')
#' mixed.case(toCheck)
mixed.case <- function(string)
{
find.case(string, 'mixed')
}
#' @title numeric.case
#' @description Checks if strings are all numbers or spaces
#' @details Checks if strings are all numbers and spaces. This is a wrapper for \code{find.case('text', 'numeric')}.
#' @export numeric.case
#' @aliases numeric.case
#' @author Jared P. Lander
#' @param string Character vector of strings to check cases
#' @return A vector of TRUE AND FALSE
#' @seealso find.case upper.case lower.case numeric.case
#' @examples
#' toCheck <- c('BIG', 'little', 'Mixed', 'BIG WITH SPACE',
#' 'little with space', 'MIXED with SPACE', '17')
#' numeric.case(toCheck)
numeric.case <- function(string)
{
find.case(string, 'numeric')
}
#' @title find.case
#' @description Checks if strings are all upper or all lower case
#' @details Checks if strings are all upper or all lower case. If string is all numbers it returns TRUE.
#' @export find.case
#' @aliases find.case
#' @author Jared P. Lander
#' @param string Character vector of strings to check cases
#' @param case Whether checking for upper or lower case
#' @return A vector of TRUE AND FALSE
#' @seealso upper.case lower.case numeric.case mixed.case
#' @examples
#' toCheck <- c('BIG', 'little', 'Mixed', 'BIG WITH SPACE', 'little with space', 'MIXED with SPACE')
#' find.case(toCheck, 'upper')
#' find.case(toCheck, 'lower')
find.case <- function(string, case=c('upper', 'lower', 'mixed', 'numeric'))
{
# find which case
case <- match.arg(case)
# ensure that string is a character
string <- as.character(string)
# build patterns
# the entire item must be lower or upper, or mixed
# for mixed we check if it is all upper or lower and then negate the answer
patterns <- c(upper='^[A-Z0-9 ]+$',
lower='^[a-z0-9 ]+$',
mixed='(^[A-Z0-9 ]+$)|(^[a-z0-9 ]+$)|(^[0-9 ]+$)',
numeric='^[0-9 ]+$'
)
# find answer
answer <- grepl(pattern=patterns[case], x=string, perl=TRUE)
if(case == 'mixed')
return(!answer)
return(answer)
}
|
/scratch/gouwar.j/cran-all/cranData/useful/R/checkCase.r
|
#' @title classdf
#' @description Get class information for each column in a \code{\link{data.frame}}.
#' @details Get class information for each column in a \code{\link{data.frame}}.
#' @aliases classdf
#' @export classdf
#' @author Jared P. Lander
#' @param data \code{link{data.frame}} that is to be inspected.
#' @param cols The columns (named or numeric) to be included in the check.
#' @return A vector detailing the class of each column.
#' @examples
#' classdf(CO2)
#' classdf(iris)
#' classdf(mtcars)
classdf <- function(data, cols)
{
# stop if it is not a data.frame
if(!is.data.frame(data))
{
stop("data must be a data.frame")
}
# if cols is not supplied check all columns
if(missing(cols))
{
cols <- 1:ncol(data)
}
# check the class of the columns
sapply(data[, cols], class)
}
|
/scratch/gouwar.j/cran-all/cranData/useful/R/classCheck.r
|
#' List Comparison
#'
#' List Comparison
#'
#' Compare elements of two equal length lists.
#'
#' @export compare.list
#' @aliases compare.list
#' @param a A List
#' @param b A List
#' @return A vector with a logical indicator for equality of each element
#' author Jared P. Lander www.jaredlander.com
#' @keywords list
#' @examples
#'
#' vect <- c(mean, mode, mean)
#' vect2 <- c(mean, mode, max)
#' vect3 <- c(mean, mean)
#' compare.list(vect, vect)
#' compare.list(vect, vect2)
#' tryCatch(compare.list(vect, vect3), error=function(e) print("Caught error"))
#'
compare.list <- function(a, b)
{
# get length of lists
a.length <- length(a)
b.length <- length(b)
## make sure lists are the same length
if(a.length != b.length)
{
stop("a and b must be the same length", call.=FALSE)
}
# build a vector to hold results
result <- rep(FALSE, a.length)
# check identical equality
for(i in 1:a.length)
{
result[i] <- identical(a[[i]], b[[i]])
}
rm(a, b, a.length, b.length)
# return results
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/useful/R/comparisons.r
|
#' @title pol2cart
#' @description Converts polar coordinates to cartesian coordinates
#' @details Converts polar coordinates to cartesian coordinates using a simple conversion. The angle, \code{theta} must be in radians.
#'
#' Somewhat inspired by http://www.r-bloggers.com/convert-polar-coordinates-to-cartesian/ and https://www.mathsisfun.com/polar-cartesian-coordinates.html
#' @export pol2cart
#' @importFrom dplyr data_frame
#' @aliases pol2cart
#' @author Jared P. Lander
#' @param r The radius of the point
#' @param theta The angle of the point, in radians
#' @param degrees Logical indicating if theta is specified in degrees
#' @return A data.frame holding the (x,y) coordinates and original polar coordinates
#' @examples
#'
#' polarRadPosTop <- data.frame(r=c(3, 5, 3, 5, 4, 6, 4, 6, 2),
#' theta=c(0, pi/6, pi/4, pi/3, pi/2, 2*pi/3, 3*pi/4, 5*pi/6, pi))
#' polarRadPosBottom <- data.frame(r=c(3, 5, 3, 5, 4, 6, 4, 6, 2),
#' theta=c(pi, 7*pi/6, 5*pi/4, 4*pi/3, 3*pi/2, 5*pi/3, 7*pi/4, 9*pi/6, 2*pi))
#' polarRadNegTop <- data.frame(r=c(3, 5, 3, 5, 4, 6, 4, 6, 2),
#' theta=-1*c(0, pi/6, pi/4, pi/3, pi/2, 2*pi/3, 3*pi/4, 5*pi/6, pi))
#' polarRadNegBottom <- data.frame(r=c(3, 5, 3, 5, 4, 6, 4, 6, 2),
#' theta=-1*c(pi, 7*pi/6, 5*pi/4, 4*pi/3, 3*pi/2, 5*pi/3, 7*pi/4, 9*pi/6, 2*pi))
#'
#' pol2cart(polarRadPosTop$r, polarRadPosTop$theta)
#' pol2cart(polarRadPosBottom$r, polarRadPosBottom$theta)
#' pol2cart(polarRadNegTop$r, polarRadNegTop$theta)
#' pol2cart(polarRadNegBottom$r, polarRadNegBottom$theta)
#'
pol2cart <- function(r, theta, degrees=FALSE)
{
# convert degrees to raidans if so requested
origTheta <- theta
if(degrees)
{
theta <- theta*pi/180
}
# compute x
x <- r*cos(theta)
# compute y
y <- r*sin(theta)
data_frame(x=x, y=y, r=r, theta=origTheta)
}
#' @title cart2pol
#' @description Converts polar coordinates to cartesian coordinates
#' @details Converts polar coordinates to cartesian coordinates using a simple conversion. The angle, \code{theta} must be in radians.
#'
#' Somewhat inspired by http://www.r-bloggers.com/convert-polar-coordinates-to-cartesian/ and https://www.mathsisfun.com/polar-cartesian-coordinates.html
#' @export cart2pol
#' @importFrom magrittr "%<>%"
#' @importFrom dplyr mutate data_frame
#' @aliases cart2pol
#' @author Jared P. Lander
#' @param x The x-coordinate of the point
#' @param y The y-coordinate of the point
#' @param degrees Logical indicating if theta should be returned in degrees
#' @return A data.frame holding the polar coordinates and the original (x,y) coordinates
#' @examples
#'
#' library(dplyr)
#' x1 <- c(1, sqrt(3)/2, sqrt(2)/2, 1/2, 0)
#' y1 <- c(0, 1/2, sqrt(2)/2, sqrt(3)/2, 1)
#' d1 <- data_frame(x=x1, y=y1, Q='I')
#'
#' x2 <- c(0, -1/2, -sqrt(2)/2, -sqrt(3)/2, -1)
#' y2 <- c(1, sqrt(3)/2, sqrt(2)/2, 1/2, 0)
#' d2 <- data_frame(x=x2, y=y2, Q='II')
#'
#' x3 <- c(-1, -sqrt(3)/2, -sqrt(2)/2, -1/2, 0)
#' y3 <- c(0, -1/2, -sqrt(2)/2, -sqrt(3)/2, -1)
#' d3 <- data_frame(x=x3, y=y3, Q='III')
#'
#' x4 <- c(0, 1/2, sqrt(2)/2, sqrt(3)/2, 1)
#' y4 <- c(-1, -sqrt(3)/2, -sqrt(2)/2, -1/2, 0)
#' d4 <- data_frame(x=x4, y=y4, Q='IV')
#'
#' dAll <- bind_rows(d1, d2, d3, d4)
#'
#' cart2pol(dAll$x, dAll$y)
#' cart2pol(dAll$x, dAll$y, degrees=TRUE)
#'
cart2pol <- function(x, y, degrees=FALSE)
{
# calculate r with sqrt of x and y squared
r <- sqrt(x^2 + y^2)
# calculate theta with arctan
theta <- atan2(y, x)
result <- data_frame(r=r, theta=theta, x=x, y=y)
## adjust angle for appropriate quadrant
## quadrants I and II need no adjustment
## quadrant III and IV, add 360
result %<>% mutate(theta=theta + (y < 0)*2*pi)
# return as degrees if requested
if(degrees)
{
result %<>% mutate(theta=theta*180/pi)
}
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/useful/R/coordinates.r
|
## Functions to grab the corner of data similar to head or tail
#' @title WhichCorner
#' @description Function to build the right row selection depending on the desired corner.
#' @details Function to build the right row selection depending on the desired corner. Helper function for getting the indexing for data.frame's, matrices
#' @author Jared P. Lander
#' @aliases WhichCorner
#' @param corner (character) which corner to display c("topleft", "bottomleft", "topright", "bottomright")
#' @param r (numeric) the number of rows to show
#' @param c (numeric) the number of columns to show
#' @param object The name of the object that is being subsetted
#' @return An expression that is evaluated to return the proper portion of the data
#' @examples
#' \dontrun{
#' WhichCorner('topleft')
#' WhichCorner('bottomleft')
#' WhichCorner('topright')
#' WhichCorner('bottomright')
#'
#' WhichCorner('topleft', r=6)
#' WhichCorner('bottomleft', r=6)
#' WhichCorner('topright', r=6)
#' WhichCorner('bottomright', r=6)
#'
#' WhichCorner('topleft', c=7)
#' WhichCorner('bottomleft', c=7)
#' WhichCorner('topright', c=7)
#' WhichCorner('bottomright', c=7)
#'
#' WhichCorner('topleft', r=8, c=3)
#' WhichCorner('bottomleft', r=8, c=3)
#' WhichCorner('topright', r=8, c=3)
#' WhichCorner('bottomright', r=8, c=3)
#' }
#'
WhichCorner <- function(corner=c("topleft", 'bottomleft', 'topright', 'bottomright'), r=5L, c=5L, object="x")
{
corner <- match.arg(corner)
theCorners <- list(
topleft=sprintf("list(rows=1:%s, cols=1:%s)", r, c),
bottomleft=sprintf("list(rows=(nrow(%s)-%s+1):nrow(%s), cols=1:%s)", object, r, object, c),
topright=sprintf("list(rows=1:%s, cols=(ncol(%s)-%s+1):ncol(%s))", r, object, c, object),
bottomright=sprintf("list(rows=(nrow(%s)-%s+1):nrow(%s), cols=(ncol(%s)-%s+1):ncol(%s))", object, r, object, object, c, object)
)
return(parse(text=theCorners[[corner]]))
}
# eval(WhichCorner(corner="topleft", 5, 5, "testFrame"))
# eval(WhichCorner(corner="topright", 5, 5, "testFrame"))
# eval(WhichCorner(corner="bottomleft", 5, 5, "testFrame"))
# eval(WhichCorner(corner="bottomright", 5, 5, "testFrame"))
## S3 generic function for getting the corner of data
#' Grabs a corner of a data set
#'
#' Display a corner section of a rectangular data set
#'
#' Displays a
#' corner of a rectangular data set such as a data.frame, matrix or table. If showing
#' the right side or bottom, the order of the data is preserved.
#'
#' The default method reverts to simply calling \code{\link{head}}
#'
## @aliases corner corner.default corner.data.frame corner.matrix
#' @title corner
#' @description Display a corner section of a rectangular data set
#' @details corner of a rectangular data set such as a data.frame, matrix or table. If showing the right side or bottom, the order of the data is preserved.
#' @author Jared P. Lander
#' @rdname corner
#' @param x The data
#' @param r Number of rows to display
#' @param c Number of columns to show
#' @param corner Which corner to grab. Possible values are c("topleft", "bottomleft", "topright", "bottomright")
#' @param \dots Arguments passed on to other functions
#' @return ... The part of the data set that was requested. The size depends on r and c and the position depends on corner.
#' @seealso \code{\link{head}} \code{\link{tail}} \code{\link{topleft}} \code{\link{topright}} \code{\link{bottomleft}} \code{\link{bottomright}} \code{\link{left}} \code{\link{right}}
#' @export corner
#' @keywords corner head tail display subsection view
#' @examples
#'
#' data(diamonds)
#' head(diamonds) # displays all columns
#' corner(diamonds) # displays first 5 rows and only the first 5 columns
#' corner(diamonds, corner="bottomleft") # displays the last 5 rows and the first 5 columns
#' corner(diamonds, corner="topright") # displays the first 5 rows and the last 5 columns
#'
corner <- function(x, ...)
{
UseMethod("corner")
}
## gets the corner for a data.frame
## @x (data.frame) the data
## @r (numeric) the number of rows to show
## @c (numeric) the number of columns to show
## @corner (character) which corner to return, c("topleft", "bottomleft", "topright", "bottomright")
#' @rdname corner
#' @export
#'
corner.data.frame <- function(x, r=5L, c=5L, corner="topleft", ...)
{
r <- if(nrow(x) < r) nrow(x) else r
c <- if(ncol(x) < c) ncol(x) else c
seqs <- eval(WhichCorner(corner=corner, r=r, c=c, object="x"))
return(x[seqs$rows, seqs$cols, drop=FALSE])
}
## gets the corner for a matrix
## @x (matrix) the data
## @r (numeric) the number of rows to show
## @c (numeric) the number of columns to show
## @corner (character) which corner to return, c("topleft", "bottomleft", "topright", "bottomright")
#' @rdname corner
#' @export
corner.matrix <- function(x, r=5L, c=5L, corner="topleft", ...)
{
r <- if(nrow(x) < r) nrow(x) else r
c <- if(ncol(x) < c) ncol(x) else c
seqs <- eval(WhichCorner(corner=corner, r=r, c=c, object="x"))
return(x[seqs$rows, seqs$cols, drop=FALSE])
}
#' @rdname corner
#' @export
corner.table <- function(x, r=5L, c=5L, corner="topleft", ...)
{
r <- if(nrow(x) < r) nrow(x) else r
c <- if(ncol(x) < c) ncol(x) else c
seqs <- eval(WhichCorner(corner=corner, r=r, c=c, object="x"))
return(x[seqs$rows, seqs$cols, drop=FALSE])
}
## gets the corner for default
## @x (data) the data
## @r (numeric) the number of rows to show
#' @rdname corner
#' @importFrom utils head
#' @export
corner.default <- function(x, r=5L, ...)
{
head(x, n=r, ...)
}
#' Grabs the top left corner of a data set
#'
#' Display the top left corner of a rectangular data set
#'
#' Displays the top left corner of a rectangular data set.
#'
#' This is a wrapper function for \code{\link{corner}}
#'
#' @aliases topleft
#' @rdname topleft
#' @param x The data
#' @param r Number of rows to display
#' @param c Number of columns to show
#' @param \dots Arguments passed on to other functions
#' @return ... The top left corner of the data set that was requested. The size depends on r and c.
#' @author Jared P. Lander www.jaredlander.com
#' @seealso \code{\link{head}} \code{\link{tail}} \code{\link{corner}} \code{\link{topright}} \code{\link{bottomleft}} \code{\link{bottomright}} \code{\link{left}} \code{\link{right}}
#' @export topleft
#' @keywords corner head tail display subsection view
#' @examples
#'
#' data(diamonds)
#' head(diamonds) # displays all columns
#' topleft(diamonds) # displays first 5 rows and only the first 5 columns
#'
topleft <- function(x, r=5L, c=5L, ...)
{
corner(x, r=r, c=c, corner="topleft", ...)
}
#' Grabs the top right corner of a data set
#'
#' Display the top right corner of a rectangular data set
#'
#' Displays the top right corner of a rectangular data set.
#'
#' This is a wrapper function for \code{\link{corner}}
#'
#' @aliases topright
#' @rdname topright
#' @param x The data
#' @param r Number of rows to display
#' @param c Number of columns to show
#' @param \dots Arguments passed on to other functions
#' @return ... The top right corner of the data set that was requested. The size depends on r and c.
#' @author Jared P. Lander www.jaredlander.com
#' @seealso \code{\link{head}} \code{\link{tail}} \code{\link{corner}} \code{\link{topleft}} \code{\link{bottomleft}} \code{\link{bottomright}} \code{\link{left}} \code{\link{right}}
#' @export topright
#' @keywords corner head tail display subsection view
#' @examples
#'
#' data(diamonds)
#' head(diamonds) # displays all columns
#' topright(diamonds) # displays first 5 rows and only the last 5 columns
#'
topright <- function(x, r=5L, c=5L, ...)
{
corner(x, r=r, c=c, corner="topright", ...)
}
#' Grabs the bottom left corner of a data set
#'
#' Display the bottom left corner of a rectangular data set
#'
#' Displays the bottom left corner of a rectangular data set.
#'
#' This is a wrapper function for \code{\link{corner}}
#'
#' @aliases bottomleft
#' @rdname bottomleft
#' @param x The data
#' @param r Number of rows to display
#' @param c Number of columns to show
#' @param \dots Arguments passed on to other functions
#' @return ... The bottom left corner of the data set that was requested. The size depends on r and c.
#' @author Jared P. Lander www.jaredlander.com
#' @seealso \code{\link{head}} \code{\link{tail}} \code{\link{corner}} \code{\link{topright}} \code{\link{topleft}} \code{\link{bottomright}} \code{\link{left}} \code{\link{right}}
#' @export bottomleft
#' @keywords corner head tail display subsection view
#' @examples
#'
#' data(diamonds)
#' head(diamonds) # displays all columns
#' bottomleft(diamonds) # displays last 5 rows and only the first 5 columns
#'
bottomleft <- function(x, r=5L, c=5L, ...)
{
corner(x, r=r, c=c, corner="bottomleft", ...)
}
#' Grabs the bottom right corner of a data set
#'
#' Display the bottom right corner of a rectangular data set
#'
#' Displays the bottom right corner of a rectangular data set.
#'
#' This is a wrapper function for \code{\link{corner}}
#'
#' @aliases bottomright
#' @rdname bottomright
#' @param x The data
#' @param r Number of rows to display
#' @param c Number of columns to show
#' @param \dots Arguments passed on to other functions
#' @return ... The bottom right corner of the data set that was requested. The size depends on r and c.
#' @author Jared P. Lander www.jaredlander.com
#' @seealso \code{\link{head}} \code{\link{tail}} \code{\link{corner}} \code{\link{topright}} \code{\link{bottomleft}} \code{\link{topleft}} \code{\link{left}} \code{\link{right}}
#' @export bottomright
#' @keywords corner head tail display subsection view
#' @examples
#'
#' data(diamonds)
#' head(diamonds) # displays all columns
#' bottomright(diamonds) # displays last 5 rows and only the last 5 columns
#'
bottomright <- function(x, r=5L, c=5L, ...)
{
corner(x, r=r, c=c, corner="bottomright", ...)
}
#' Grabs the left side of a data set
#'
#' Display the left side of a rectangular data set
#'
#' Displays the left side of a rectangular data set.
#'
#' This is a wrapper function for \code{\link{corner}}
#'
#' @aliases left
#' @rdname left
#' @param x The data
#' @param c Number of columns to show
#' @param \dots Arguments passed on to other functions
#' @return ... The left side of the data set that was requested. The size depends on c.
#' @author Jared P. Lander www.jaredlander.com
#' @seealso \code{\link{head}} \code{\link{tail}} \code{\link{corner}} \code{\link{topright}} \code{\link{bottomleft}} \code{\link{bottomright}} \code{\link{topleft}} \code{\link{right}}
#' @export left
#' @keywords corner head tail display subsection view
#' @examples
#'
#' data(diamonds)
#' head(diamonds) # displays all columns
#' left(diamonds) # displays all rows and only the first 5 columns
#'
left <- function(x, c=5L, ...)
{
corner(x, r=nrow(x), c=c, corner="topleft", ...)
}
#' Grabs the right side of a data set
#'
#' Display the right side of a rectangular data set
#'
#' Displays the right side of a rectangular data set.
#'
#' This is a wrapper function for \code{\link{corner}}
#'
#' @aliases right
#' @rdname right
#' @param x The data
#' @param c Number of columns to show
#' @param \dots Arguments passed on to other functions
#' @return ... The left side of the data set that was requested. The size depends on c.
#' @author Jared P. Lander www.jaredlander.com
#' @seealso \code{\link{head}} \code{\link{tail}} \code{\link{corner}} \code{\link{topright}} \code{\link{bottomleft}} \code{\link{bottomright}} \code{\link{topleft}} \code{\link{topleft}}
#' @export right
#' @keywords corner head tail display subsection view
#' @examples
#'
#' data(diamonds)
#' head(diamonds) # displays all columns
#' right(diamonds) # displays all rows and only the last 5 columns
#'
right <- function(x, c=5L, ...)
{
corner(x, r=nrow(x), c=c, corner="topright", ...)
}
|
/scratch/gouwar.j/cran-all/cranData/useful/R/corner.r
|
## formatting functions
#' multiple
#'
#' Order of Magnitude Formatter
#'
#' This divides the number by the appropriate amount and adds on the corresponding symbol at the end of the number.
#'
#' @author Jared P. Lander
#' @aliases multiple
#' @export multiple
#' @param x Vector of numbers to be formatted.
#' @param multiple The multiple to display numbers in. This symbol will be added to the end of the numbers.
#' @param big.mark Character specifying the thousands separator
#' @param extra DEPRECATED, use `big.mark` and `prefix` instead: Function for perform any further formatting.
#' @param digits Number of decimal places for rounding.
#' @param prefix Symbol to put in front of the numbers such as a dollar sign.
#' @param scientific Logical (default: `FALSE`) indicating if the numbers should be returned in scientific notation.
#' @return Character vector of formatted numbers.
#' @md
#' @examples
#'
#' require(scales)
#' vect <- c(1000, 1500, 23450, 21784, 875003780)
#' multiple(vect)
#' multiple(vect, extra=dollar)
#' multiple(vect, extra=identity)
#'
#' require(ggplot2)
#' data(diamonds)
#' ggplot(diamonds, aes(x=x, y=y, color=price*100)) + geom_point() +
#' scale_color_gradient2(labels=multiple)
#'
multiple <- function(x,
multiple=c("K", "M", "B", "T", "H", "k", "m", "b", "t", "h"),
big.mark=',',
extra, digits=0,
prefix='',
scientific=FALSE)
{
assertthat::assert_that(is.numeric(x))
if(!missing(extra) && is.function(extra))
{
big.mark <- dplyr::case_when(
identical(extra, scales::comma) ~ ',',
identical(extra, identity) ~ '',
TRUE ~ big.mark
)
prefix <- dplyr::case_when(
identical(extra, scales::dollar) ~ '$',
TRUE ~ prefix
)
}
# get the multiple
multiple=match.arg(multiple)
# set up a vector for dividing
dividers <- c("K"=1000, "M"=1000000, "B"=1000000000, "T"=1000000000000, "H"=100)
# get what we're dividing by
divider <- dividers[toupper(multiple)]
x <- purrr::map_dbl(x, ~ round(.x / divider, digits=digits))
# x <- format(x, digits=digits, big.mark=big.mark, scientific=scientific, trim=TRUE)
x <- purrr::map_chr(x, ~ format(.x, big.mark=big.mark, scientific=scientific, trim=TRUE))
sprintf("%s%s%s", prefix, x, multiple)
}
#' multiple_format
#'
#' Multiple Style Formatting
#'
#' Since ggplot requires a function for formatting this allows the user to specify the function's arguments, which will return a function that can be used by ggplot.
#'
#' @param \dots Arguments to be passed onto \code{\link{multiple}}
#' @return The function \code{\link{multiple}}.
#' @author Jared P. Lander
#' @export multiple_format
#' @aliases multiple_format
#' @examples
#'
#' library(scales)
#' vect <- c(1000, 1500, 23450, 21784, 875003780)
#' multiple_format()(vect)
#' multiple_format(extra=dollar)(vect)
#' multiple_format(extra=identity)(vect)
#'
#' require(ggplot2)
#' data(diamonds)
#' ggplot(diamonds, aes(x=x, y=y, color=price*100)) + geom_point() +
#' scale_color_gradient2(labels=multiple_format(extra=dollar))
#'
multiple_format <- function(...)
{
function(x) multiple(x, ...)
}
#' multiple.dollar
#'
#' Order of Magnitude Formatter
#'
#' Simply a wrapper for multiple that prespecifies the extra dollar.
#'
#' @author Jared P. Lander
#' @aliases multiple.dollar
#' @export multiple.dollar
#' @param x Vector of numbers to be formatted.
#' @param \dots Further arguments to be passed on to \code{\link{multiple}}
#' @return Character vector of dollar formatted numbers.
#' @examples
#'
#' require(scales)
#' vect <- c(1000, 1500, 23450, 21784, 875003780)
#' multiple.dollar(vect)
#' multiple.dollar(vect, multiple="k")
#' multiple.dollar(vect, multiple="h")
#'
#' require(ggplot2)
#' data(diamonds)
#' ggplot(diamonds, aes(x=x, y=y, color=price*100)) + geom_point() +
#' scale_color_gradient2(labels=multiple.dollar)
#'
multiple.dollar <- function(x, ...)
{
multiple(x=x, extra=scales::dollar, ...)
}
#' multiple.comma
#'
#' Order of Magnitude Formatter
#'
#' Simply a wrapper for multiple that prespecifies the extra comma.
#'
#' @author Jared P. Lander
#' @aliases multiple.comma
#' @export multiple.comma
#' @param x Vector of numbers to be formatted.
#' @param \dots Further arguments to be passed on to \code{link{multiple}}
#' @return Character vector of comma formatted numbers.
#' @examples
#'
#' require(scales)
#' vect <- c(1000, 1500, 23450, 21784, 875003780)
#' multiple.comma(vect)
#' multiple.comma(vect, multiple="k")
#' multiple.comma(vect, multiple="h")
#'
#' require(ggplot2)
#' data(diamonds)
#' ggplot(diamonds, aes(x=x, y=y, color=price*100)) + geom_point() +
#' scale_color_gradient2(labels=multiple.comma)
#'
multiple.comma <- function(x, ...)
{
multiple(x=x, extra=scales::comma, ...)
}
#' multiple.identity
#'
#' Order of Magnitude Formatter
#'
#' Simply a wrapper for multiple that prespecifies the extra identity.
#'
#' @author Jared P. Lander
#' @aliases multiple.identity
#' @export multiple.identity
#' @param x Vector of numbers to be formatted.
#' @param \dots Further arguments to be passed on to \code{link{multiple}}
#' @return Character vector of formatted numbers.
#' @examples
#'
#' vect <- c(1000, 1500, 23450, 21784, 875003780)
#' multiple.identity(vect)
#' multiple.identity(vect, multiple="k")
#' multiple.identity(vect, multiple="h")
#'
#' require(ggplot2)
#' data(diamonds)
#' ggplot(diamonds, aes(x=x, y=y, color=price*100)) + geom_point() +
#' scale_color_gradient2(labels=multiple.identity)
#'
multiple.identity <- function(x, ...)
{
multiple(x=x, extra=identity, ...)
}
|
/scratch/gouwar.j/cran-all/cranData/useful/R/formatters.r
|
## Functions for determining ideal number of clusters for kmeans
## Plots the results from the Hartigan's Rule run
## prints and returns the ggplot object
## @hartigan (data.frame) the results from fitting Hartigan's Rule
## @title (character) the title of the plot
## @linecolor (numeric) the color of the line indicating 10
## @linetype (numeric) the style of the line indicating 10
## @linesize (numeric) the size of the line indicating 10
## @minor (logical) whether the minor grid lines should be displayed
#' Plot a series of Hartigan's Numbers
#'
#' After fitting a series of Hartigan's Numbers (see \code{\link{FitKMeans}}) this will plot the results so it is easy to visualize
#'
#' Displays a graphical representation of the results of \code{\link{FitKMeans}}
#'
#' @param hartigan The results from
#' \code{\link{FitKMeans}}
#' @param title Title to be used in the plot
#' @param smooth logical; if true a smoothed line will be fit to the points, otherwise it will be a piecewise line
#' @param linecolor Color of the horizontal line denoting 10
#' @param linetype Type of the horizontal line denoting 10
#' @param linesize Size of the horizontal line denoting 10
#' @param minor logical; if true minor grid
#' lines will be plotted
#' @return a ggplot object
#' @author Jared P. Lander
#' www.jaredlander.com
#' @import ggplot2
#' @export PlotHartigan
#' @seealso \code{\link{kmeans}} \code{\link{FitKMeans}}
#' @references #' http://www.stat.columbia.edu/~madigan/DM08/descriptive.ppt.pdf
#' @keywords cluster kmeans hartigan clustering
#' @examples
#'
#' data(iris)
#' hartiganResults <- FitKMeans(iris[, -ncol(iris)])
#' PlotHartigan(hartiganResults)
#'
PlotHartigan <- function(hartigan, title="Hartigan's Rule", smooth=FALSE, linecolor="grey", linetype=2L, linesize=1L, minor=TRUE)
{
thePlot <- ggplot(data=hartigan, aes_string(x="Clusters", y="Hartigan")) +
geom_hline(aes(yintercept=10), linetype=linetype, colour=linecolor, size=linesize) +
#geom_line() +
geom_point(aes_string(colour="AddCluster")) +
scale_colour_discrete(name="Add Cluster") +
ggtitle(label=title) + if(minor) scale_x_continuous(minor_breaks=(1:(max(hartigan$Clusters)+1)))
if(smooth)
{
thePlot <- thePlot + geom_smooth(method="lm", formula=y~log(x))
}else
{
thePlot <- thePlot + geom_line()
}
return(thePlot)
}
## Compute Hartigan's Rule given a kmeans cluster WSS and a k+1means cluster WSS and the number of rows in the data
## returns the number
#' Compute Hartigan's Number
#'
#' Runs the computation found in http://www.stat.columbia.edu/~madigan/DM08/descriptive.ppt.pdf
#'
#' Not exported, only used by \code{\link{FitKMeans}}
#'
#' @aliases ComputeHartigan
#' @param FitActualWSS the WSS from a kmeans fit
#' @param FitPlus1WSS the WSS from a kmeans fit
#' @param nrow the number of rows in the original dataset
#' @return The computed Hartigan Number
#' @references http://www.stat.columbia.edu/~madigan/DM08/descriptive.ppt.pdf
#' @author Jared P. Lander
#' www.jaredlander.com
#' @seealso \code{\link{kmeans}} \code{\link{FitKMeans}}
#' @keywords cluster kmeans hartigan clustering
#' @examples
#' data(iris)
#' hartiganResults <- FitKMeans(iris[, -ncol(iris)])
#' PlotHartigan(hartiganResults)
#'
ComputeHartigan <- function(FitActualWSS, FitPlus1WSS, nrow)
{
return((sum(FitActualWSS) / sum(FitPlus1WSS) - 1) * (nrow - length(FitActualWSS) - 1))
}
## this function fits a series of kmeans and returns a data.frame listing the number of clusters and the result of applying Hartigan's Rule
## returns the data.frame of Hartigan results
## @x (data.frame or matrix) the data to fit kmeans on
## @max.clusters (numeric) the number of clusters to try
## @spectral (logical) whether it is fitting using spectral methods
## @nstart (numeric) the number of random starts for kmeans to use
## @iter.max (numeric) the maximum number of iterations for kmeans before giving up on convergence
## @seed (numeric) the random seed to be set
#' Fit a series of kmeans clusterings and compute Hartigan's Number
#'
#' Given a numeric dataset this function fits a series of kmeans clusterings with increasing number of centers. k-means is compared to k+1-means using Hartigan's Number to determine if the k+1st cluster should be added.
#'
#' A consecutive series of kmeans is computed with increasing k (number of centers). Each result for k and k+1 are compared using Hartigan's Number. If the number is greater than 10, it is noted that having k+1 clusters is of value.
#'
#' @param x The data, numeric, either a matrix or data.frame
#' @param max.clusters The maximum number of clusters that should be tried
#' @param spectral logical; If the data being fit are eigenvectors for spectral clustering
#' @param nstart The number of random starts for the kmeans algorithm to use
#' @param iter.max Maximum number of tries before the kmeans algorithm gives up on conversion
#' @param algorithm The desired algorithm to be used for kmeans. Options are c("Hartigan-Wong", "Lloyd", "Forgy", "MacQueen"). See \code{\link{kmeans}}
#' @param seed If not null, the random seed will be reset before each application of the kmeans algorithm
#' @return A data.frame consisting of columns, for the number of clusters, the Hartigan Number and whether that cluster should be added, based on Hartigan's Number.
#' @author Jared P. Lander
#' www.jaredlander.com
#' @export FitKMeans
#' @importFrom stats kmeans
#' @seealso \code{\link{kmeans}} \code{\link{PlotHartigan}}
#' @references http://www.stat.columbia.edu/~madigan/DM08/descriptive.ppt.pdf
#' @keywords cluster kmeans hartigan clustering
#' @examples
#'
#' data(iris)
#' hartiganResults <- FitKMeans(iris[, -ncol(iris)])
#' PlotHartigan(hartiganResults)
#'
FitKMeans <- function(x, max.clusters=12L, spectral=FALSE, nstart=1L, iter.max=10L, algorithm=c("Hartigan-Wong", "Lloyd", "Forgy", "MacQueen"), seed=NULL)
{
# get algorithm choice
algorithm <- match.arg(algorithm)
# data.frame for keeping track of Hartigan number
hartigan <- data.frame(Clusters=2:(max.clusters), Hartigan=NA, AddCluster=NA)
# compute the number of rows and columns just once
nRowX <- nrow(x)
nColX <- ncol(x)
## new algorithm
# in each loop build one partition
# compare to old partition
# make new partition into old partition
# first compute partition for 1 cluster
if(!is.null(seed))
{
set.seed(seed=seed)
}
FitActual <- kmeans(x[, 1:(nColX - (nColX-(2-1))*spectral)], centers=2-1, nstart=nstart, iter.max=iter.max, algorithm=algorithm)
## now build loop
for(i in 2:(max.clusters))
{
# calculate FitPlus1, which in this case will just be i
if(!is.null(seed))
{
set.seed(seed=seed)
}
FitPlus1 <- kmeans(x[, 1:(nColX - (nColX-(i+0))*spectral)], centers=i, nstart=nstart, iter.max=iter.max, algorithm=algorithm)
# calculate Hartigan and record in table
hartigan[i-1, "Hartigan"] <- ComputeHartigan(FitActualWSS=FitActual$withinss, FitPlus1WSS=FitPlus1$withinss, nrow=nRowX)
# now turn FitPlus1 into FitActual for use in the next iteration
FitActual <- FitPlus1
rm(FitPlus1); gc() # housekeeping
}
## could be made more efficient by fitting kmeans for each value of i one time, then comepare the consecutive pair wise results, would take mroe memory though
# compute kmeans repeatedly
# for(i in 2:(max.clusters))
# {
# # for k
# if(!is.null(seed))
# {
# set.seed(seed=seed)
# }
# FitActual <- kmeans(x[, 1:(nColX - (nColX-(i-1))*spectral)], centers=i-1, nstart=nstart, iter.max=iter.max, algorithm=algorithm)
#
# # for k+1
# if(!is.null(seed))
# {
# set.seed(seed=seed)
# }
# FitPlus1 <- kmeans(x[, 1:(nColX - (nColX-(i+0))*spectral)], centers=i, nstart=nstart, iter.max=iter.max, algorithm=algorithm)
#
# # calculate Hartigan
# hartigan[i-1, "Hartigan"] <- ComputeHartigan(FitActualWSS=FitActual$withinss, FitPlus1WSS=FitPlus1$withinss, nrow=nRowX)
# }
# if Hartigan is greater than 10 then the cluster should be added
hartigan$AddCluster <- ifelse(hartigan$Hartigan > 10, TRUE, FALSE)
return(hartigan)
}
# make compiled versions
# saving for a future version when compiler is more common
# ComputeHartigan <- cmpfun(ComputeHartigan)
# PlotHartigan <- cmpfun(PlotHartigan)
# FitKMeans <- cmpfun(FitKMeans)
|
/scratch/gouwar.j/cran-all/cranData/useful/R/hartigan.r
|
#' @title simple.impute
#' @description Generic function for simple imputation.
#' @details Provides the ability to simply impute data based on a simple measure such as mean or median. For more robust imputation see the packages Amelia, mice or mi.
#' @aliases simple.impute
#' @export simple.impute
#' @importFrom stats median
#' @author Jared P. Lander
#' @param x An object to be imputed
#' @param fun The function with which to fill in missing values
#' @param \dots Further arguments
#' @return An object with the missing values imputed.
#' @examples
#' theDF <- data.frame(A=1:10, B=1:10, C=1:10)
#' theDF[c(1, 4, 6), c(1)] <- NA
#' theDF[c(3, 4, 8), c(3)] <- NA
#'
#' simple.impute(theDF$A)
#' simple.impute(theDF$A, mean)
#' simple.impute(theDF$A, constant(4))
#' simple.impute(theDF)
#' simple.impute(theDF, mean)
#' simple.impute(theDF, constant(4))
#'
simple.impute <- function(x, fun=median, ...)
{
UseMethod('simple.impute')
}
#' @title simple.impute.default
#' @description Function for imputing a vector with missing data.
#' @details Provides the ability to simply impute data based on a simple measure such as mean or median. For more robust imputation see the packages Amelia, mice or mi.
#' @aliases simple.impute.default
#' @export
#' @export simple.impute.default
#' @importFrom stats median
#' @author Jared P. Lander
#' @param x A numeric or integer vector
#' @param fun The function with which to fill in missing values
#' @param \dots Further arguments
#' @return An object with the missing values imputed.
#' @examples
#' theDF <- data.frame(A=1:10, B=1:10, C=1:10)
#' theDF[c(1, 4, 6), c(1)] <- NA
#' theDF[c(3, 4, 8), c(3)] <- NA
#'
#' simple.impute.default(theDF$A)
#' simple.impute.default(theDF$A, mean)
#' simple.impute.default(theDF$A, constant(4))
#'
simple.impute.default <- function(x, fun=median, ...)
{
# find missing values
theNA <- is.na(x)
# replace with the constructed value
x[theNA] <- fun(x[!theNA])
return(x)
}
#' @title simple.impute.data.frame
#' @description Function for imputing a data.frame with missing data.
#' @details Provides the ability to simply impute data based on a simple measure such as mean or median. For more robust imputation see the packages Amelia, mice or mi.
#'
#' Each column is imputed independently.
#' @aliases simple.impute.data.frame
#' @export
#' @export simple.impute.data.frame
#' @author Jared P. Lander
#' @param x A data.frame
#' @param fun The function with which to fill in missing values
#' @param \dots Further arguments
#' @return A data.frame with the missing values imputed.
#' @examples
#' theDF <- data.frame(A=1:10, B=1:10, C=1:10)
#' theDF[c(1, 4, 6), c(1)] <- NA
#' theDF[c(3, 4, 8), c(3)] <- NA
#'
#' simple.impute.data.frame(theDF)
#' simple.impute.data.frame(theDF, mean)
#' simple.impute.data.frame(theDF, constant(4))
#'
simple.impute.data.frame <- function(x, fun=stats::median, ...)
{
. <- NULL
dplyr::mutate_at(x, .cols=names(x), .funs=dplyr::funs(simple.impute(., fun=fun)))
}
#' @title simple.impute.tbl_df
#' @description Function for imputing a tbl_df with missing data.
#' @details Provides the ability to simply impute data based on a simple measure such as mean or median. For more robust imputation see the packages Amelia, mice or mi.
#'
#' Each column is imputed independently.
#' @aliases simple.impute.tbl_df
#' @export
#' @importFrom stats median
#' @author Jared P. Lander
#' @param x A data.frame
#' @param fun The function with which to fill in missing values
#' @param \dots Further arguments
#' @return A data.frame with the missing values imputed.
#' @examples
#' theDF <- data.frame(A=1:10, B=1:10, C=1:10)
#' theDF[c(1, 4, 6), c(1)] <- NA
#' theDF[c(3, 4, 8), c(3)] <- NA
#'
#' simple.impute.data.frame(theDF)
#' simple.impute.data.frame(theDF, mean)
#' simple.impute.data.frame(theDF, constant(4))
#'
simple.impute.tbl_df <- function(x, fun=median, ...)
{
simple.impute.data.frame(x=x, fun=fun, ...)
}
#' @title constant
#' @description Helper function for imputing constants
#' @details Returns a function that always returns the value of n.
#' @export constant
#' @aliases constant
#' @author Jared P. Lander
#' @param n The value to return
#' @return A function that when used simply returns n.
#' @examples
#' constant(4)(1:10)
#'
#' theDF <- data.frame(A=1:10, B=1:10, C=1:10)
#' theDF[c(1, 4, 6), c(1)] <- NA
#' theDF[c(3, 4, 8), c(3)] <- NA
#' simple.impute(theDF, constant(4))
#'
constant <- function(n=1)
{
function(x, ...) n
}
|
/scratch/gouwar.j/cran-all/cranData/useful/R/impute.r
|
#' @title indexToPosition
#' @description Given a long matrix index convert to row and column positions
#' @details Using \code{\link{which}} on a matrix returns a number that iterates down rows then across columns. This function returns the (row, column) position of that index.
#' @author Jared P. Lander
#' @aliases indexToPosition
#' @export indexToPosition
#' @param x Position of indices
#' @param nrow The number of rows in the matrix
#' @return A Matrix with row and column columns and a row for each value of \code{x}
#' @examples
#' indexToPosition(3, 2)
#' indexToPosition(c(1, 4, 5, 7, 9), 3)
#' indexToPosition(1:16, 4)
#' indexToPosition(c(1, 3, 5, 6, 8, 10, 11, 13, 15), 5)
#'
indexToPosition <- function(x, nrow=1)
{
# first find the column
theCol <- ceiling(x / nrow)
# get the row
theRow <- x - (theCol - 1)*nrow
return(cbind(row=theRow, col=theCol))
}
#' @title positionToIndex
#' @description Given row and column positions calculate the index.
#' @details With row and column positions this computes the index, starting at (1,1) working down rows then across columns.
#' @author Jared P. Lander
#' @aliases positionToIndex
#' @export positionToIndex
#' @param row Vector specifying row positions
#' @param col Vector specifying column positions
#' @param nrow The number of rows in the matrix
#' @return A vector of indices
#' @examples
#' positionToIndex(1, 2, 2)
#' positionToIndex(row=c(1, 1, 2, 1, 3), col=c(1, 2, 2, 3, 3), nrow=3)
#' positionToIndex(rep(1:4, 4), rep(1:4, each=4), nrow=4)
#' positionToIndex(rep(c(1, 3, 5), 3), rep(1:3, each=3), nrow=5)
#'
positionToIndex <- function(row, col, nrow=max(row))
{
(col - 1)*nrow + row
}
|
/scratch/gouwar.j/cran-all/cranData/useful/R/indices.r
|
#' interval.check
#'
#' Check which interval a number belongs to
#'
#' This function takes in a data.frame with a specified column and compares that to a vector of times
#'
#' @author Jared P. Lander
#' @aliases interval.check
#' @export interval.check
#' @param data data.frame
#' @param input character name of column we wish to compare
#' @param times vector in ascending order where the differences between sequential elements are the intervals
#' @param fun character containing comparator
#' @return Vector indicating which element of \code{times} that row belongs to. If the row is beyond any element NA is in it's spot.
#' @examples
#'
#' head(cars)
#' interval.check(cars, input="speed", times=seq(min(cars$speed), max(cars$speed), length=10))
interval.check <- function(data, input="Stop", times, fun="<=")
{
# do an outer product seeing which of input is meets the requirement in realtion to each of the times
equalityMat <- outer(data[, input], times, FUN=fun)
## now we are going to see the first of the columns in each row to hold TRUE
## the best way is to do a cumsum, by row, of the negation of each cell
equalityMat <- t(apply(!equalityMat, 1, cumsum)) # transpose it get it back in the right shape
# now take the max of each row, this will tell you the first column that it worked for
# add one to account that the first column is 0
indices <- apply(equalityMat, 1, max) + 1
# now determine which upper bound it belongs to, NA means it didn't happen within alloted time
belongsTo <- times[indices]
return(belongsTo)
}
|
/scratch/gouwar.j/cran-all/cranData/useful/R/intervals.r
|
# k-means plotting
#' fortify.kmeans
#'
#' Fortify a kmeans model with its data
#'
#' Prepares a kmeans object to be plotted using \code{\link{cmdscale}} to compute the projected x/y coordinates. If \code{data} is not provided, then just the center points are calculated.
#'
#' @aliases fortify.kmeans
#' @export
#' @export fortify.kmeans
#' @importFrom stats cmdscale dist
#' @author Jared P. Lander
#' @seealso kmeans fortify ggplot plot.kmeans
#' @param model \code{\link{kmeans}} model
#' @param data Data used to fit the model
#' @param \dots Not Used
#' @return The original data with extra columns:
#' \item{.x}{The projected x position.}
#' \item{.y}{The projected y position.}
#' \item{.Cluster}{The cluster that point belongs to.}
#' @examples
#'
#' k1 <- kmeans(x=iris[, 1:4], centers=3)
#' hold <- fortify(k1, data=iris)
#' head(hold)
#' hold2 <- fortify(k1)
#' head(hold2)
#'
fortify.kmeans <- function(model, data=NULL, ...)
{
# get the names of columns used
usedCols <- colnames(model$centers)
if(is.null(data))
{
# get 2 dimensional scaling of the centers
centerPoints <- data.frame(cmdscale(d=dist(model$centers), k=2))
names(centerPoints) <- c(".x", ".y")
centerPoints$.Cluster <- as.factor(rownames(centerPoints))
return(centerPoints)
}
# make a 2 dimensional scaling of the data
points <- data.frame(cmdscale(d=dist(data[, usedCols]), k=2))
names(points) <- c(".x", ".y")
# tack centers onto the points
points$.Cluster <- as.factor(model$cluster)
data <- cbind(data, points)
return(data)
}
#' @title plot.kmeans
#' @description Plot the results from a k-means object
#' @details Plots the results of k-means with color-coding for the cluster membership. If \code{data} is not provided, then just the center points are calculated.
#' @aliases plot.kmeans
#' @method plot kmeans
#' @export plot.kmeans
#' @export
#' @author Jared P. Lander
#' @seealso kmeans fortify ggplot plot.kmeans
#' @param x A \code{\link{kmeans}} object.
#' @param data The data used to kit the \code{\link{kmeans}} object.
#' @param class Character name of the "true" classes of the data.
#' @param size Numeric size of points
#' @param legend.position Character indicating where the legend should be placed.
#' @param title Title for the plot.
#' @param xlab Label for the x-axis.
#' @param ylab Label for the y-axis.
#' @param \dots Not Used.
#' @return A ggplot object
#' @examples
#'
#' k1 <- kmeans(x=iris[, 1:4], centers=3)
#' plot(k1)
#' plot(k1, data=iris)
#'
plot.kmeans <-
function(x, data=NULL, class=NULL, size=2,
legend.position=c("right", "bottom", "left", "top", "none"),
title="K-Means Results",
xlab="Principal Component 1", ylab="Principal Component 2", ...)
{
# fortify the model and data so it is convenient to plot in ggplot
toPlot <- fortify(model=x, data=data)
# get the legend position
legend.position <- match.arg(legend.position)
# convert class to factor just in case it is not already
if(!is.null(class)) toPlot[, class] <- factor(toPlot[, class])
ggplot(toPlot, aes_string(x='.x', y='.y', colour='.Cluster')) +
geom_point(aes_string(shape=class), size=size) +
scale_color_discrete("Cluster") +
theme(legend.position=legend.position) +
labs(title=title, x=xlab, y=ylab)
}
|
/scratch/gouwar.j/cran-all/cranData/useful/R/kmeansPlotting.r
|
# #' do.knit
# #'
# #' Wrapper for knitr::knit
# #'
# #' This function is merely a vectorized wrapper for knitr::knit. It knits Rnw files into tex files.
# #'
# #' @import knitr
# #' @aliases knitVector
# #' @export knitVector
# #' @author Jared P. Lander
# #' @param input Character vector of filenames (with path) to be knited.
# #' @param output Character vector of filenames (with path) for the output TeX.
# #' @param tangle Whether to tangle the R code from the input file (like Stangle)
# #' @param text A character vector as an alternative way to provide the input file
# #' @return The compiled documents are written into the output files, and the paths of the output files are returned.
# #' @examples
# #' # none here
# #'
# knitVector <- Vectorize(FUN=knit, vectorize.args=c("input", "output", "tangle", "text"))
#
#
# #' do.knit
# #'
# #' Wrapper for knitr::knit
# #'
# #' This function is merely an advanced wrapper wrapper for knitr::knit. It knits the Rnw files into tex only if the Rnw files are newer than the tex files. Like LaTex, file names should not be provided with extensions as it will added them automatically.
# #'
# #' @import knitr
# #' @aliases do.knit
# #' @export do.knit
# #' @author Jared P. Lander
# #' @param files Name of files to be knitted, without extensions.
# #' @return The compiled documents are written into the output files, and the path of the output files are returned.
# #' @examples
# #' # none here
# do.knit <- function(files)
# {
# # create full names of files
# rnw <- sprintf("%s.Rnw", files)
# tex <- sprintf("%s.tex", files)
#
# # get vector of files that need updating based on. . .
# # if the rnw files exists and either the tex file doesn't or the tex file is old
# toRun <- ifelse(file.exists(rnw) & (!file.exists(tex) | file.info(tex)$mtime < file.info(rnw)$mtime), TRUE, FALSE)
#
# knitVector(input=rnw[toRun], output=tex[toRun])
# }
|
/scratch/gouwar.j/cran-all/cranData/useful/R/knitting.r
|
#' @title load_packages
#' @description Loads multiple packages
#' @details Allows the user to load multiple packages with one line of code. Delivers a message saying which packages have been loaded. If a user requests packages that are not installed there is an error.
#' @author Jared P. Lander
#' @param packages A `character` vector of packages to be installed
#' @return Nothing, loads packages
#' @export
#' @examples
#'
#' load_packages('ggplot2')
#' load_packages(c('ggplot2', 'dplyr'))
load_packages <- function(packages)
{
# be sure it is a character vector
assertthat::assert_that(is.character(packages))
## check that the packages are installed
# get list of installed packages
installedPackages <- rownames(utils::installed.packages())
# see which of our packages are installed
installedIndex <- packages %in% installedPackages
# get the installed ones
installedPackages <- packages[installedIndex]
# get the not installed ones
notInstalledPackages <- packages[!installedIndex]
# warn which packages are installed
if(length(notInstalledPackages))
{
stop(
sprintf(
'The following packages are not installed: {%s}',
paste(notInstalledPackages, collapse=', ')
)
)
}
purrr::walk(installedPackages, .f=library, character.only=TRUE)
message(
sprintf(
'The following packages were loaded: {%s}',
paste(installedPackages, collapse=', ')
)
)
}
|
/scratch/gouwar.j/cran-all/cranData/useful/R/loadPackages.r
|
#' @title reclass
#' @description Adds a class to an x.
#' @details Adds a class to an x by putting the new class at the front of the vector of classes for the x.
#' @aliases reclass
#' @export reclass
#' @rdname reclass
#' @author Jared P. Lander
#' @param x The x getting the new class
#' @param value The new class
#' @return The original x with the class containing \code{value} in addition to the previous class(es)
#' @examples
#' theDF <- data.frame(A=1:10, B=1:10)
#' reclass(theDF) <- 'newclass'
#' class(theDF)
#' theDF <- reclass(theDF, 'another')
#' class(theDF)
#'
reclass <- function(x, value)
{
class(x) <- c(value, class(x))
return(x)
}
#' @title `reclass<-`
#' @rdname reclass
#' @export reclass<-
`reclass<-` <- function(x, value)
{
reclass(x=x, value=value)
}
|
/scratch/gouwar.j/cran-all/cranData/useful/R/reclass.r
|
## Written by Jared P. Lander
## See LISCENSE for copyright information
## make getting regex results easier
## looks like Hadley's stringr pretty much does this already, but ok
## takes the result of a regex and extracts the desired tect
## @results () the return of either regexpr or gregexpr
## @text (character) the the text being substr'd
ProcessRegex <- function(results, text)
{
# find the start and stop positions
theFrame <- data.frame(Start=results, Stop=results + attr(results, "match.length") - 1, Text=text, stringsAsFactors=FALSE)
# just keep the text where the pattern was found
theFrame <- theFrame[theFrame$Start != -1, ]
# extract the desired text and return it
return(with(theFrame, substr(Text, Start, Stop)))
}
## @pattern (character) the regular expression pattern to search for
## @text (character)
## @ignore.case (logical) If FALSE, the pattern matching is case sensitive and if TRUE, case is ignored during matching.
## @perl (logical) Should perl-compatible regexps be used? Has priority over extended.
## @fixed (logical) If TRUE, pattern is a string to be matched as is. Overrides all conflicting arguments.
## @useBytes (logical) If TRUE the matching is done byte-by-byte rather than character-by-character.
## See ?regexpr for details
regex <- function(pattern, text, ignore.case=FALSE, perl=FALSE, fixed=FALSE, useBytes=FALSE)
{
# run the regex
theResult <- regexpr(pattern=pattern, text=text, ignore.case=ignore.case, perl=perl, fixed=fixed, useBytes=useBytes)
# extract the text
return(ProcessRegex(results=theResult, text=text))
}
## @pattern (character) the regular expression pattern to search for
## @text (character)
## @ignore.case (logical) If FALSE, the pattern matching is case sensitive and if TRUE, case is ignored during matching.
## @perl (logical) Should perl-compatible regexps be used? Has priority over extended.
## @fixed (logical) If TRUE, pattern is a string to be matched as is. Overrides all conflicting arguments.
## @useBytes (logical) If TRUE the matching is done byte-by-byte rather than character-by-character.
## See ?regexpr for details
OneRegex <- function(text, pattern, ignore.case=FALSE, perl=FALSE, fixed=FALSE, useBytes=FALSE)
{
# get the gregex for a single text entry
theResult <- gregexpr(pattern=pattern, text=text, ignore.case=ignore.case, perl=perl, fixed=fixed, useBytes=useBytes)[[1]]
# extract the text
return(ProcessRegex(results=theResult, text=text))
}
## same as regex but for global pattern recognition
## does one gregexpr for each element of text
## results are a list
## @pattern (character) the regular expression pattern to search for
## @text (character)
## @ignore.case (logical) If FALSE, the pattern matching is case sensitive and if TRUE, case is ignored during matching.
## @perl (logical) Should perl-compatible regexps be used? Has priority over extended.
## @fixed (logical) If TRUE, pattern is a string to be matched as is. Overrides all conflicting arguments.
## @useBytes (logical) If TRUE the matching is done byte-by-byte rather than character-by-character.
## See ?regexpr for details
#' @importFrom plyr alply
gregex <- function(pattern, text, ignore.case=FALSE, perl=FALSE, fixed=FALSE, useBytes=FALSE)
{
# get the results from the gregexpr's
theText <- alply(text, 1, OneRegex, pattern=pattern, ignore.case=ignore.case, perl=perl, fixed=fixed, useBytes=useBytes)
# give the list elements the names of the text
names(theText) <- text
# get rid of empty returns
theText <- theText[which(llply(theText, length) != 0)]
return(theText)
}
|
/scratch/gouwar.j/cran-all/cranData/useful/R/regex.r
|
#' shift.column
#'
#' Shift a column of data
#'
#' Shifts a column of data up or down a certain number of rows
#'
#' @author Jared P. Lander
#' @aliases shift.column
#' @export shift.column
#' @return \code{\link{data.frame}} with the specified columns shifted.
#' @param data \code{\link{data.frame}}
#' @param columns Character vector specifying which columns to shift.
#' @param newNames Character vector specifying new names for the columns that will be created by the shift. Must be same length as \code{columns}.
#' @param len Integer specifying how many rows to shift the data.
#' @param up logical indicating if rows should be shifted up or down.
#' @examples
#'
#' myData <- data.frame(Upper=LETTERS, lower=letters)
#' shift.column(data=myData, columns="lower")
#' shift.column(data=myData, columns="lower", len=2)
#'
shift.column <- function(data, columns, newNames=sprintf("%s.Shifted", columns), len=1L, up=TRUE)
{
if(length(columns) != length(newNames))
{
stop("columns and newNames must be the same length")
}
# get the rows to keep based on how much to shift it by and weather to shift up or down
rowsToKeep <- seq(from=1 + len*up, length.out=NROW(data) - len)
# for the original dat ait needs to be shifted the other way
dataRowsToKeep <- seq(from=1 + len*!up, length.out=NROW(data) - len)
#create a df of the shifted rows
shiftedDF <- data[rowsToKeep, columns]
# give the right names to these new columns
names(shiftedDF) <- newNames
# data names
dataNames <- names(data)
# get rid of excess rows in data
data <- data[dataRowsToKeep, ]
# tack shifted data onto the end of the original (and cutoff) data
data <- cbind(data, shiftedDF)
names(data) <- c(dataNames, newNames)
# return the data
return(data)
}
|
/scratch/gouwar.j/cran-all/cranData/useful/R/shiftColumn.r
|
#' @title subVector
#' @rdname subMultiple
#' @export subVector
#' @param toSub Named vector where the elements are the pattern and the names are the replacement values
#' @examples
#' theText <- c('Hi Bob & Cooper how is life today',
#' 'Anything happening now?',
#' 'Sally & Dave are playing with Jess & Julio | with their kids')
#' subVector(theText, toSub=c("and"='&', 'or'='\\|'))
#' subVector(theText)
#'
subVector <- function(x, toSub)
{
if(missing(toSub) || is.null(toSub))
{
return(x)
}
subMultiple(x=x, pattern=toSub, replacement=names(toSub))
}
#' @title subMultiple
#' @description Substitutes multiple patterns and corresponding replacements
#' @details Given a vector of text replaces all patterns each each element
#' @author Jared P. Lander
#' @rdname subMultiple
#' @export subMultiple
#' @param x Vector of text to search
#' @param pattern Vector of patterns to find in each element of x
#' @param replacement Vector of replacement values corresponding to each value of pattern
#' @return The text in x with substitutions made
#' @examples
#' theText <- c('Hi Bob & Cooper how is life today',
#' 'Anything happening now?',
#' 'Sally & Dave are playing with Jess & Julio | with their kids')
#' subMultiple(theText, pattern=c('&', '\\|'), replacement=c('and', 'or'))
#'
subMultiple <- function(x, pattern, replacement)
{
# loop through the special characters and sub in the replacements
for(i in 1:length(pattern))
{
x <- gsub(pattern=pattern[i], replacement=replacement[i], x=x) # do the subbing
}
return(x)
}
|
/scratch/gouwar.j/cran-all/cranData/useful/R/subMultiple.r
|
## subSpecials
## Written by Jared P. Lander
## See LISCENSE for copyright information
## Converts special characters in escaped special characters
## Meant to help out when doing regular expressions
## loops through all the special characters, subbing them out one by one with their escaped equivalent
## @toAlter: vector of words to have their special characters subbed out
## @specialChars: the characters to be replaced
## returns the modified vector
#' Sub special characters out of a character vector.
#'
#' Converts each of the special characters to their escaped equivalents in each element of a single vector.
#'
#' Each element in the specialChar vector is subbed for its escaped equivalent in each of the elements of toAlter
#'
#' @param toAlter Character vector that will be altered by subbing the special characters with their escaped equivalents
#' @param specialChars The characters to be subbed out
#' @return toAlter is returned with any of the defined specialChars subbed out for their escaped equivalents
#' @author Jared P. Lander
#' www.jaredlander.com
#' @export subOut
#' @seealso \code{\link{sub}} \code{\link{subSpecials}}
#' @keywords string text
#' @examples
#'
#' subOut(c("Hello", "(parens)", "Excited! Mark"))
#' subOut(c("Hello", "(parens)", "Excited! Mark"), specialChars=c("!", "("))
#'
subOut <- function(toAlter, specialChars=c("!", "(", ")", "-", "=", "*", "."))
{
# put slashes in front of the characters
specialChars <- paste("\\", specialChars, sep="")
# put double slashes for the replacements
modChars <- paste("\\", specialChars, sep="")
# loop through the special characters and sub in the replacements
for(i in 1:length(specialChars))
{
toAlter <- gsub(specialChars[i], modChars[i], toAlter) # do the subbing
}
return(toAlter)
}
## Converts special characters in escaped special characters
## Meant to help out when doing regular expressions
## @...: 1 to n vectors to be subbed on
## @specialChars: the characters to be replaced
## calls .subOut to do the actual work
## returns list of the modified vectors
#' Sub special characters out of character vectors.
#'
#' Converts each of the special characters to their escaped equivalents in each element of each vector.
#'
#' Each element in the specialChar vector is subbed for its escaped equivalent in each of the elements of each vector passed in
#'
#' @param \dots Character vectors that will be altered by subbing the special characters with their escaped equivalents
#' @param specialChars The characters to be subbed out
#' @return The provided vectors are returned with any of the defined specialChars subbed out for their escaped equivalents. Each vector is returned as an element of a list.
#' @author Jared P. Lander
#' www.jaredlander.com
#' @export subSpecials
#' @importFrom plyr llply
#' @seealso \code{\link{sub}} \code{\link{subOut}}
#' @keywords string text
#' @examples
#'
#' subSpecials(c("Hello", "(parens)", "Excited! Mark"))
#' subSpecials(c("Hello", "(parens)", "Excited! Mark"), specialChars=c("!", "("))
#' subSpecials(c("Hello", "(parens)", "Excited! Mark"),
#' c("This is a period. And this is an asterisk *"), specialChars=c("!", "("))
#' subSpecials(c("Hello", "(parens)", "Excited! Mark"),
#' c("This is a period. And this is an asterisk *"), specialChars=c("!", "(", "*"))
#'
subSpecials <- function(..., specialChars=c("!", "(", ")", "-", "=", "*", "."))
{
result <- llply(list(...), subOut, specialChars=specialChars) # run .subOut on each vector, returning the resulting list
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/useful/R/subspecials.r
|
# time single
#' timeSingle
#'
#' Convenience function that takes in a time object and calculates a difference
#' with a user specified prompt
#'
#' @export
#' @author Daniel Y. Chen
#' @aliases timeSingle
#' @param string string of what was timed
#' @param startTime "POSIXct" "POSIXt" object, usually from \code{\link{Sys.time}}
#' @param endTime "POSIXct" "POSIXt" object, usually from \code{\link{Sys.time}}
#' @param sep string, usually character that is used as the separator between user prompt and time difference
#' @return prompt_string string user prompt with time difference
#' @examples
#'
#' x <- 3.14
#' strt <- Sys.time()
#' sq <- x ** 2
#' timeSingle('Squaring value', strt)
#'
timeSingle <- function(string='Time difference', startTime,
endTime=Sys.time(), sep=':')
{
assertthat::assert_that(assertthat::is.time(startTime))
assertthat::assert_that(assertthat::is.time(endTime))
diffTime <- endTime - startTime
parse_time <- unclass(diffTime)[1]
parse_units <- attr(unclass(diffTime), 'units')
prompt_string <- sprintf('%s %s %s %s', string, sep, parse_time, parse_units)
return(prompt_string)
}
|
/scratch/gouwar.j/cran-all/cranData/useful/R/time.single.R
|
#' vplayout
#'
#' Viewport
#'
#' Creates viewport for pushing ggplot objects to parts of a console.
#'
#' @author Jared P. Lander
#' @aliases vplayout
#' @export vplayout
#' @return An R object of class viewport.
#' @param x The x cell of the viewport to push into.
#' @param y The y cell of the viewport to push into.
#' @examples
#'
#' library(ggplot2)
#' library(grid)
#'
vplayout <- function(x, y)
{
grid::viewport(layout.pos.row=x, layout.pos.col=y)
}
#' fortify.ts
#'
#' Fortify a ts object.
#'
#' Prepares a ts object for plotting with ggplot.
#'
#' @author Jared P. Lander
#' @aliases fortify.ts
#' @export
#' @return \code{\link{data.frame}} for plotting with ggplot.
#' @param model A \code{\link{ts}} object.
#' @param data A vector of the same length of \code{x} that specifies the time component of each element of \code{x}.
#' @param name Character specifying the name of x if it is to be different than the variable being inputed.
#' @param \dots Further arguments.
#' @examples
#'
#' fortify(sunspot.year)
#'
fortify.ts <- function(model, data=NULL, name=as.character(m[[2]]), ...)
{
m <- match.call()
# if time is provided use that as the x values
if(!is.null(data))
{
theX <- data
}else
# otherwise use the built in attributes
{
theTime <- attr(model, which="tsp")
theX <- seq(from=theTime[1], to=theTime[2], by=1/theTime[3])
rm(theTime)
}
data <- data.frame(theX, model)
names(data) <- c("Time", name)
return(data)
}
#' ts.plotter
#'
#' Plot a ts object
#'
#' Fortifies, then plots a \code{\link{ts}} object.
#'
#' @export ts.plotter
#' @aliases ts.plotter
#' @author Jared P. Lander
#' @import ggplot2
#' @return A ggplot object
#' @param data A \code{\link{ts}} object to be plotted.
#' @param time A vector of the same length of \code{data} that specifies the time component of each element of \code{data}.
#' @param title Title of plot.
#' @param xlab X-axis label.
#' @param ylab Y-axis label.
#' @examples
#'
#' ts.plotter(sunspot.year)
#'
ts.plotter <- function(data, time=NULL,
title="Series Plot", xlab="Time", ylab="Rate")
{
# grab the name of the ts that was provided
# fortify the ts so it is usable in ggplot
data <- fortify.ts(data, data=time, name=as.character(match.call()[[2]]))
# figure out the names returned by fortifying
x <- names(data)[1]
y <- names(data)[2]
# build the plot
ggplot(data, aes_string(x=x, y=y)) + geom_line(aes(group=1)) +
labs(title=title, x=xlab, y=ylab)
}
#' fortify.acf
#'
#' Fortify an acf/pacf object
#'
#' Prepares acf (and pacf) objects for plotting with ggplot.
#'
#' @author Jared P. Lander
#' @aliases fortify.acf
#' @export
#' @return \code{\link{data.frame}} for plotting with ggplot.
#' @param model An \code{\link{acf}} object.
#' @param data Not used. Just for consistency with the fortify method.
#' @param \dots Other arguments
#' @examples
#'
#' fortify(acf(sunspot.year, plot=FALSE))
#' fortify(pacf(sunspot.year, plot=FALSE))
#'
fortify.acf <- function(model, data=NULL, ...)
{
# the different tpe of acf objects
theNames <- c(correlation="ACF", covariance="ACF", partial="Partial.ACF")
# build a data.frame consisting the lag number and the acf value
data <- data.frame(model$lag, model$acf)
# name the data "Lag" and the appropriate type of acf
names(data) <- c("Lag", theNames[model$type])
return(data)
}
#' @title plot.acf
#' @description Plotting an ACF object
#' @details This function has been deprecated in favor of autoplot
#' @author Jared P. Lander
#' @param x An ACF object
#' @param \dots Arguments passed on to autoplot
#' @return A ggplot2 object
#'
plot.acf <- function(x, ...)
{
.Deprecated(new='autoplot.acf', package='useful', msg='Please use autoplot(x) instead')
autoplot(x, ...)
}
#' @title autoplot.acf
#' @description Plot acf objects
#' @details Plot acf (and pacf) objects.
#' @author Jared P. Lander
#' @export
#' @return A ggplot object.
#' @param object An \code{\link{acf}} object.
#' @param xlab X-axis label.
#' @param ylab y-axis label.
#' @param title Graph title.
#' @param \dots Further arguments.
#' @examples
#'
#' autoplot(acf(sunspot.year, plot=FALSE))
#' autoplot(pacf(sunspot.year, plot=FALSE))
#'
autoplot.acf <- function(object,
xlab=x, ylab=y,
#xlab=x, ylab=sub("\\.", " ", y),
title=sprintf("%s Plot", y), ...
#title=sprintf("%s Plot", sub("\\.", " ", y))
)
{
# fortify the acf object
data <- fortify.acf(object)
# get the names we are using
x <- names(data)[1]
y <- names(data)[2]
# build plot
ggplot(data, aes_string(x=x)) +
geom_linerange(aes_string(ymin=pmin(y, 0), ymax=pmax(y, 0))) +
labs(title=title, x=xlab, y=ylab)
}
# # @title plot
# # @description Overwritten plot generic so that plot.acf can be defined in this package
# # @details Overwritten plot generic so that plot.acf can be defined in this package
# # @author Jared P. Lander
# # @export
# # @param x Object to be plotted
# # @param \dots Further arguments
# # @return A plot
# #
# plot <- function(x, ...)
# {
# UseMethod('plot')
# }
#' plotTimesSeries
#'
#' Plot ts object
#'
#' Plot a ts object and, if desired, it's acf and pacf.
#'
#' @aliases plot.times.series
#' @author Jared P. Lander
#' @export plotTimesSeries
#' @importFrom stats na.fail
# @S3method plot ts
# @method plot ts
#' @return A ggplot object if \code{acf} is \code{FALSE}, otherwise \code{TRUE} indicating success.
#' @param x a \code{\link{ts}} object.
#' @param time A vector of the same length of \code{x} that specifies the time component of each element of \code{x}.
#' @param acf Logical indicating if the acf and pacf should be plotted.
#' @param lag.max maximum lag at which to calculate the acf. Default is 10*log10(N/m) where N is the number of observations and m the number of series. Will be automatically limited to one less than the number of observations in the series.
#' @param na.action function to be called to handle missing values. na.pass can be used.
#' @param demean logical. Should the covariances be about the sample means?
#' @param xlab X-axis label.
#' @param ylab Y-axis label.
#' @param title Graph title.
#' @param \dots Further arguments.
#' @seealso ts.plotter plot.acf fortify.ts
#' @examples
#'
#' plot(sunspot.year)
#' plot(sunspot.year, acf=TRUE)
#'
plotTimesSeries <- function(x, time=NULL, acf=FALSE,
lag.max=NULL, na.action=na.fail, demean=TRUE,
title=sprintf("%s Plot", name), xlab="Time", ylab=name, ...)
{
# get real name of x
name <- as.character(match.call()[[2]])
# build ts plot
tsPlot <- ts.plotter(data=x, time=time, title=title, xlab=xlab, ylab=ylab)
# if we're not doing acf just return the ts plot
if(!acf)
{
return(tsPlot)
}
# calculate the acf/pacf
theAcf <- acf(x=x, lag.max=lag.max, na.action=na.action, demean=demean, type="correlation", plot=FALSE)
thePacf <- acf(x=x, lag.max=lag.max, na.action=na.action, demean=demean, type="partial", plot=FALSE)
# build plots for acf/pacf
acfPlot <- plot.acf(theAcf, title=NULL)
pacfPlot <- plot.acf(thePacf, title=NULL)
grid::grid.newpage()
grid::pushViewport(grid::viewport(layout=grid::grid.layout(nrow=2, ncol=2)))
print(tsPlot, vp=vplayout(1, 1:2))
print(acfPlot, vp=vplayout(2, 1))
print(pacfPlot, vp=vplayout(2, 2))
#list(tsPlot, acfPlot, pacfPlot)
invisible(TRUE)
}
|
/scratch/gouwar.j/cran-all/cranData/useful/R/tsPlot.r
|
#' @title uniqueBidirection
#' @description Find unique rows of a data.frame regardless of the order they appear
#' @details Sorts individual rows to get uniques regardless of order of appearance.
#' @author Jared P. Lander
#' @rdname uniqueBidirection
#' @export uniqueBidirection
#' @param x a data.frame
#' @return A data.frame that is unique regardless of direction
#' @examples
#'
#' ex <- data.frame(One=c('a', 'c', 'a', 'd', 'd', 'c', 'b'),
#' Two=c('b', 'd', 'b', 'e', 'c', 'd', 'a'),
#' stringsAsFactors=FALSE)
#'
#' # make a bigger version
#' exBig <- ex
#' for(i in 1:1000)
#' {
#' exBig <- rbind(exBig, ex)
#' }
#'
#' dim(exBig)
#'
#' uniqueBidirection(ex)
#' uniqueBidirection(exBig)
#'
#' ex3 <- dplyr::bind_cols(ex, dplyr::data_frame(Three=rep('a', nrow(ex))))
#' uniqueBidirection(ex3)
#'
uniqueBidirection <- function(x)
{
# make sure x is a two-column data.frame
if(!is.data.frame(x))
{
stop('x must be a data.frame')
}
# if(ncol(x) != 2)
# {
# stop('x must have exactly two columns')
# }
# get original names and class to use later
theNames <- names(x)
theClass <- class(x)
res <- unique(t(apply(unique(x), 1, sort)))
res <- as.data.frame(res, stringsAsFactors=FALSE)
if('tbl' %in% theClass)
{
res <- dplyr::as.tbl(res)
}
# set names and return
stats::setNames(res, theNames)
}
# dplyr way but slower
# sortCols <- function(x)
# {
# # get original names to use later
# theNames <- names(x)
#
# # sort them col-wise
# x <- sort(as.data.frame(x, stringsAsFactors=FALSE))
#
# # set original names and return object
# setNames(x, nm=theNames)
# }
#
# uniqueBidirection <- function(x)
# {
# x %>% unique %>% rowwise %>% do(sortCols(.)) %>% ungroup %>% unique
# }
|
/scratch/gouwar.j/cran-all/cranData/useful/R/unique.r
|
#' Helper functions
#'
#' A collection of handy, helper functions
#'
#' @import ggplot2
#' @docType package
#' @name useful
#' @aliases useful-package useful
NULL
|
/scratch/gouwar.j/cran-all/cranData/useful/R/useful-package.r
|
bibentries = c(
davis2006 = bibentry("article",
title = "The relationship between precision-recall and ROC curves",
author = "J. Davis and M. Goadrich",
year = 2006,
journal = "Proceedings of the 23rd International Conference on Machine Learning",
publisher = "ACM Press",
volume = 148,
number = 4,
pages = "233--240",
doi = "10.1145/1143844.1143874"
)
)
|
/scratch/gouwar.j/cran-all/cranData/usefun/R/bibentries.R
|
#' Get the common names of two vectors
#'
#' This function prints and returns the common \code{names} of two vectors. The
#' two vectors don't have to be the same length.
#'
#' @param vec1 vector with \code{names} attribute
#' @param vec2 vector with \code{names} attribute
#' @param vector.names.str string. Used for printing, it tell us what are the
#' \code{names} of the two vectors (use plural form). Default value: "nodes".
#' @param with.gt logical. Determines if the ">" sign will be appended for nice
#' printing in an R notebook (use with the chuck option \emph{results = 'asis'}).
#' Default value: TRUE.
#'
#' @return the character vector of the common names. If there is only one name
#' in common, the \code{vector.names.str} gets the last character stripped for
#' readability. If there is no common names, it returns FALSE.
#'
#' @seealso
#' \code{\link{pretty_print_vector_values}}, \code{\link{pretty_print_string}}
#'
#' @examples
#' vec1 = c(1,1,1)
#' vec2 = c(1,2)
#' names(vec1) = c("a","b","c")
#' names(vec2) = c("c","b")
#'
#' common.names = get_common_names(vec1, vec2)
#'
#' @export
get_common_names = function(vec1, vec2, vector.names.str = "nodes",
with.gt = TRUE) {
common.names = intersect(names(vec1), names(vec2))
if (is_empty(common.names)) {
str = paste0("No common ", vector.names.str)
pretty_print_string(str, with.gt = with.gt)
return(FALSE)
}
else {
pretty_print_vector_values(common.names, vector.values.str = vector.names.str,
with.gt = with.gt)
return(common.names)
}
}
#' Get the common values of two vectors
#'
#' This function prints and returns the common values of two vectors. The two
#' vectors don't have to be the same length.
#'
#' @param vec1 vector
#' @param vec2 vector
#' @param vector.values.str string. Used for printing, it tell us what are the
#' values of the two vectors (use plural form). Default value: "nodes".
#' @param with.gt logical. Determines if the ">" sign will be appended for nice
#' printing in an R notebook (use with the chuck option \emph{results = 'asis'}).
#' Default value: TRUE.
#'
#' @return the vector of the common values. If there is only one value
#' in common, the \code{vector.values.str} gets the last character stripped for
#' readability. If there are no common values, it returns NULL.
#'
#' @seealso
#' \code{\link{pretty_print_vector_values}}, \code{\link{pretty_print_string}}
#'
#' @examples
#' vec1 = c(1,2,3)
#' vec2 = c(3,4,1)
#'
#' common.names = get_common_values(vec1, vec2)
#'
#' @export
get_common_values = function(vec1, vec2, vector.values.str = "nodes",
with.gt = TRUE) {
common.values = intersect(vec1, vec2)
if (is_empty(common.values)) {
str = paste0("No common ", vector.values.str)
pretty_print_string(str, with.gt = with.gt)
return(NULL)
}
else {
pretty_print_vector_values(common.values, with.gt = with.gt)
return(common.values)
}
}
#' Get stats for unique values
#'
#' Use this function on two vectors with same \code{names} attribute (column
#' names), to find for each unique (numeric) value of the first vector, the
#' average and standard deviation values of the second vector's values (matching
#' is done by column name)
#'
#' @param vec1 vector with \code{names} attribute
#' @param vec2 vector with \code{names} attribute
#'
#' @return A \code{data.frame} consisting of 3 column vectors. The \code{data.frame}
#' size is \code{nx3}, where n is the number of unique values of \code{vec1} (rows).
#' The columns vectors are:
#' \enumerate{
#' \item the first input vector pruned to its unique values
#' \item a vector with the average values for each unique value of the
#' first vector (the matching is done by column name)
#' \item a vector with the standard deviation values for each unique value
#' of the first vector (the matching is done by column name)
#' }
#'
#' @examples
#' vec1 = c(1, 2, 3, 2)
#' vec2 = c(20, 2, 2.5, 8)
#' names.vec = c(seq(1,4))
#' names(vec1) = names.vec
#' names(vec2) = names.vec
#'
#' res = get_stats_for_unique_values(vec1, vec2)
#'
#' @importFrom stats sd
#' @export
get_stats_for_unique_values = function(vec1, vec2) {
stopifnot(names(vec1) == names(vec2))
vec1.sorted = sort(vec1)
vec1.sorted.unique = sort(unique(vec1))
vec2.avg.values = numeric(length = length(vec1.sorted.unique))
sd.values = numeric(length = length(vec1.sorted.unique))
index = 0
for (value in vec1.sorted.unique) {
index = index + 1
vec2.avg.values[index] = mean(vec2[
(names(vec1.sorted[vec1.sorted == value]))
])
sd.values[index] = sd(vec2[
(names(vec1.sorted[vec1.sorted == value]))
])
}
# In case of NA elements in sd calculation
# (one element vectors), replace with 0
sd.values[is.na(sd.values)] = 0
res = cbind(vec1.sorted.unique, vec2.avg.values, sd.values)
colnames(res) = c("vec1.unique", "vec2.mean", "vec2.sd")
return(as.data.frame(res))
}
#' Get percentage of matches between two vectors
#'
#' Use this function on two numeric vectors with the same \code{names} attribute
#' (columns) and same length, in order to find the percentage of common elements
#' (value matches between the two vectors). The same \code{names} for the two
#' vectors ensures that their values are logically matched one-to-one.
#'
#' @param vec1 numeric vector with \code{names} attribute
#' @param vec2 numeric vector with \code{names} attribute
#'
#' @return the percentage of common values (exact matches) between the two
#' vectors. Can only be a value between 0 (no common elements) and 1 (perfect
#' element match). Note that \emph{NaN} and \emph{NA} values are allowed in
#' the input vectors, but they will always count as a mismatch.
#'
#' @examples
#' vec1 = c(1, 2, 3, 2)
#' vec2 = c(20, 2, 2.5, 8)
#' vec3 = c(1, 2, 333, 222)
#' names.vec = c(seq(1,4))
#' names(vec1) = names.vec
#' names(vec2) = names.vec
#' names(vec3) = names.vec
#'
#' match.1.2 = get_percentage_of_matches(vec1, vec2)
#' match.1.3 = get_percentage_of_matches(vec1, vec3)
#'
#' @export
get_percentage_of_matches = function(vec1, vec2) {
stopifnot(is.numeric(vec1) && is.numeric(vec2))
stopifnot(length(vec1) == length(vec2))
stopifnot(names(vec1) == names(vec2))
total = length(vec1)
diff = vec1 - vec2
matches = sum(diff == 0, na.rm = TRUE)
matches.percentage = matches / total
return(matches.percentage)
}
#' Prune single-value columns from a data frame
#'
#' Given a \code{data.frame} and an integer value, it checks whether there is a
#' column vector whose values match the given one. If so, it prunes that
#' single-valued column from the \code{data.frame}
#'
#' @param df \code{data.frame}
#' @param value an integer value
#'
#' @return the column-pruned \code{data.frame}
#'
#' @examples
#' df = data.frame(c(0,0,0), c(0,1,0), c(1,0,0))
#' prune_columns_from_df(df, value = 0)
#'
#' @export
prune_columns_from_df = function(df, value) {
if (length(df) == 0) return(df)
return(df[, colSums(df != value) > 0])
}
#' Prune single-value rows from a data frame
#'
#' Given a \code{data.frame} and an integer value, it checks whether there is a
#' row vector whose values match the given one. If so, it prunes that
#' single-valued row from the \code{data.frame}
#'
#' @param df \code{data.frame}
#' @param value an integer value
#'
#' @return the row-pruned \code{data.frame}
#'
#' @examples
#' df = data.frame(c(0,0,0), c(0,1,0), c(1,0,0))
#' prune_rows_from_df(df, value = 0)
#'
#' @export
prune_rows_from_df = function(df, value) {
if (length(df) == 0) return(df)
return(df[rowSums(df != value) > 0, ])
}
#' Add vector to a (n x 2) data frame
#'
#' Given a vector, adds each value and its corresponding name to a data frame
#' of 2 columns as new rows, where the name fills in the 1st column and the
#' value the 2nd column.
#'
#' @param df \code{data.frame}, with n rows and 2 columns
#' @param vec a vector
#'
#' @return a \code{data.frame} with additional rows and each element as a
#' character.
#'
#' @examples
#' df = data.frame(c(0,0,1), c(0,0,2))
#' vec = 1:3
#' names(vec) = c("a","b","c")
#'
#' add_vector_to_df(df, vec)
#'
#' @export
add_vector_to_df = function(df, vec) {
stopifnot(ncol(df) == 2)
if (length(vec) == 0) return(df)
for (i in 1:length(vec)) {
value = vec[i]
name = names(vec)[i]
df = rbind(df, c(name, value))
}
return(df)
}
#' Prune and reorder vector elements
#'
#' Given two vectors, the first one's elements are pruned and reordered according
#' to the common values of the second vector and the elements' \emph{\code{names}
#' (attribute) of the first}. If there no common such values, an empty vector is
#' returned.
#'
#' @param vec a vector with \code{names} attribute
#' @param filter.vec a character vector whose values will be used to filter the
#' \code{vec} elements
#'
#' @return the pruned and re-arranged vector.
#'
#' @examples
#' vec = c(1,2,3)
#' names(vec) = c("a","b","c")
#'
#' filter.vec1 = c("a")
#' prune_and_reorder_vector(vec, filter.vec1)
#'
#' filter.vec2 = c("c", "ert", "b")
#' prune_and_reorder_vector(vec, filter.vec2)
#'
#' @export
prune_and_reorder_vector = function(vec, filter.vec) {
pruned.vec = vec[names(vec) %in% filter.vec]
reordered.vec = pruned.vec[order(match(names(pruned.vec), filter.vec))]
return(reordered.vec)
}
#' Get ternary class id
#'
#' Helper function that checks if a \emph{value} surpasses the given
#' \emph{threshold} either positively, negatively or not at all and returns
#' a value indicating in which class (i.e. interval) it belongs.
#'
#' @param value numeric
#' @param threshold numeric
#'
#' @return an integer. There are 3 cases:
#' \itemize{
#' \item 1: when \eqn{value > threshold}
#' \item -1: when \eqn{value < -threshold}
#' \item 0: otherwise
#' }
#'
#' @export
get_ternary_class_id = function(value, threshold) {
if (value > threshold) return(1) # active
if (value < -threshold) return(-1) # inhibited
return(0) # no biomarker
}
#' Add a row to a 3-valued (ternary) \code{data.frame}
#'
#' Use this function on a \code{data.frame} object (with values only
#' in the 3-element set \{-1,0,1\} ideally - specifying either a positive,
#' negative or none/absent condition/state/result about something) and add an
#' extra \strong{first or last row vector} with zero values, where \emph{1}
#' and \emph{-1} will be filled when the column names of the given
#' \code{data.frame} match the values in the \emph{values.pos} or
#' \emph{values.neg} vector parameters respectively.
#'
#' @param df a \code{data.frame} object with values only in the
#' the 3-element set \{-1,0,1\}. The column names should be node names
#' (gene, protein names, etc.).
#' @param values.pos a character vector whose elements are indicators of a
#' positive state/condition and will be assigned a value of \emph{1}.
#' These elements \strong{must be a subset of the column names} of the given \code{df} parameter.
#' If empty, no values equal to \emph{1} will be added to the new row.
#' @param values.neg a character vector whose elements are indicators of a
#' negative state/condition and will be assigned a value of \emph{-1}.
#' If empty, no values equal to \emph{-1} will be added to the new row.
#' These elements \strong{must be a subset of the column names} of the given \code{df} parameter.
#' @param pos string. The position where we should put the new row that will be generated.
#' Two possible values: "first" (default) or "last".
#' @param row.name string. The name of the new row that we will added. Default
#' value: NULL.
#'
#' @return the \code{df} with one extra row, having elements from the \{-1,0,1\}
#' set depending on values of \code{values.pos} and \code{values.neg} vectors.
#'
#' @examples
#' df = data.frame(c(0,-1,0), c(0,1,-1), c(1,0,0))
#' colnames(df) = c("A","B","C")
#' df.new = add_row_to_ternary_df(df, values.pos = c("A"), values.neg = c("C"), row.name = "Hello!")
#'
#' @export
add_row_to_ternary_df =
function(df, values.pos, values.neg, pos = "first", row.name = NULL) {
# some checks
stopifnot(pos %in% c("first", "last"))
col.names = colnames(df)
stopifnot(values.pos %in% col.names, values.neg %in% col.names)
# initialize a 'row' data.frame
row = as.data.frame(matrix(0, ncol = length(col.names), nrow = 1))
colnames(row) = col.names
rownames(row) = row.name
# add 'positive' and 'negative' meta-values
row[colnames(row) %in% values.pos] = 1
row[colnames(row) %in% values.neg] = -1
if (pos == 'first')
res = rbind(row, df)
else
res = rbind(df, row)
return(res)
}
#' Rearrange a list of data frames by rownames
#'
#' @param list_df a (non-empty) list of \code{data.frame} objects. The data
#' frames must have the same \code{colnames} attribute.
#'
#' @return a rearranged list of data frames, where the names of the elements of
#' the \code{list_df} (the 'ids' of the data frames) and the \code{rownames} of
#' the data frames have switched places: the unique row names of the original list's
#' combined data frames serve as \code{names} for the returned list of data
#' frames, while the data frame 'ids' (\code{names} of the original list's
#' elements) now serve as \code{rownames} for the data frames in the new list.
#'
#' E.g. if in the given \code{list} there was a \code{data.frame} with id 'A':
#' \code{a = list_df[["A"]]} and \code{rownames(a) = c("row1", "row2")}, then
#' in the rearranged \code{list} there would be two data frames with ids
#' "row1" and "row2", each of them having a row with name "A" where also these
#' data rows would be the same as before: \code{list_df[["A"]]["row1", ] == returned_list[["row1"]]["A",]}
#' and \code{list_df[["A"]]["row2", ] == returned_list[["row2"]]["A",]} respectively.
#'
#' @examples
#' df.1 = data.frame(matrix(data = 0, nrow = 3, ncol = 3,
#' dimnames = list(c("row1", "row2", "row3"), c("C.1", "C.2", "C.3"))))
#' df.2 = data.frame(matrix(data = 1, nrow = 3, ncol = 3,
#' dimnames = list(c("row1", "row2", "row4"), c("C.1", "C.2", "C.3"))))
#' list_df = list(df.1, df.2)
#' names(list_df) = c("zeros", "ones")
#' res_list_df = ldf_arrange_by_rownames(list_df)
#'
#' @export
ldf_arrange_by_rownames = function(list_df) {
# some checks
stopifnot(is.list(list_df), length(list_df) > 0)
stopifnot(all(sapply(list_df, function(df) { is.data.frame(df) })))
column_names_mat = sapply(list_df, function(df) { colnames(df) })
stopifnot(all(duplicated(column_names_mat, MARGIN = 2)[-1])) # same column names
column_names = colnames(list_df[[1]])
unique_row_names = unique(unlist(sapply(list_df, function(df) { rownames(df) })))
# initialize `res` list
res = list()
for (row_name in unique_row_names) {
df = as.data.frame(matrix(data = NA, nrow = 0, ncol = length(column_names)))
colnames(df) = column_names
res[[row_name]] = df
}
# fill in `res` list
for (df_name in names(list_df)) {
df = list_df[[df_name]]
for (row_name in rownames(df)) {
res[[row_name]][df_name,] = df[row_name, ]
}
}
return(res)
}
#' Binarize matrix to given threshold
#'
#' Simple function that checks every element of a given matrix (or data.frame)
#' if it surpasses the given threshold either positively or negatively and it
#' outputs 1 for that element, otherwise 0.
#'
#' @param mat a matrix or data.frame object
#' @param thres a positive numerical value
#'
#' @return a binarized matrix (values either 0 or 1): elements that have 1
#' correspond to values of \code{mat} that they were either larger than the
#' threshold or smaller than it's negative.
#'
#' @examples
#'
#' mat = matrix(data = -4:4, nrow = 3, ncol = 3)
#' binarize_to_thres(mat, thres = 0.5)
#' binarize_to_thres(mat, thres = 2.5)
#'
#' @export
binarize_to_thres = function(mat, thres) {
stopifnot(is.data.frame(mat) | is.matrix(mat))
stopifnot(thres > 0)
apply(mat, c(1,2), function(x) {
if (x >= thres | x <= -thres) 1 else 0
})
}
#' Convert decimal number to its binary representation
#'
#' Get the binary representation of any decimal number from 0 to (2^31) - 1.
#' Doesn't work for larger numbers.
#'
#' @param decimal_num decimal number between 0 and (2^31) - 1
#' @param bits number of bits to keep in the result counting from the right.
#' \strong{Default value is 32}.
#'
#' @return a binary string representation of the given decimal number.
#'
#' @examples
#'
#' # representing 0
#' dec_to_bin(0,1)
#' dec_to_bin(0,10)
#' dec_to_bin(0,32)
#' dec_to_bin(0)
#'
#' # representing 24
#' dec_to_bin(24,6)
#' dec_to_bin(24,21)
#' dec_to_bin(24)
#' dec_to_bin(24,3) # note that this will cut the returned result so be careful!
#'
#' @export
dec_to_bin = function(decimal_num, bits = 32) {
stopifnot(decimal_num >= 0, decimal_num <= 2^31 - 1)
stopifnot(bits > 0, bits <= 32)
n = paste(sapply(strsplit(paste(rev(intToBits(decimal_num))),""),`[[`,2),collapse="")
return(substr(n, nchar(n)-bits+1, nchar(n)))
}
#' Get partial permutation of a vector
#'
#' @param x a vector with at least 2 elements
#' @param exp_sim a value between 0 and 1 indicating the level of \emph{expected
#' similarity} between the input and output vector. Default value is \strong{0}
#' (random permutation).
#'
#' @return a partially (random) permutated vector. If \code{exp_sim = 0} then
#' the result is equal to \code{sample(x)} (a random permutation). If
#' \code{exp_sim = 1} then the result is always the same as the input vector.
#' For \code{exp_sim} values between \emph{0} and \emph{1} we randomly sample
#' a subset of the input vector inversely proportionate to the \code{exp_sim}
#' value (e.g. \code{exp_sim = 0.8 => 20\%} of the elements) and randomly
#' permutate these elements only.
#'
#' @examples
#' set.seed(42)
#' partial_permut(x = LETTERS, exp_sim = 0)
#' partial_permut(x = LETTERS, exp_sim = 0.5)
#' partial_permut(x = LETTERS, exp_sim = 0.9)
#'
#' @export
partial_permut = function(x, exp_sim = 0) {
stopifnot(length(x) > 1)
stopifnot(exp_sim >= 0, exp_sim <= 1)
indexes = which(x %in% sample(x, size = round((1 - exp_sim) * length(x))))
if (length(indexes) == 1)
permut_indexes = indexes
else
permut_indexes = sample(indexes)
permut_x = x
x_bk = permut_x[indexes]
permut_x[indexes] = permut_x[permut_indexes]
permut_x[permut_indexes] = x_bk
return(permut_x)
}
|
/scratch/gouwar.j/cran-all/cranData/usefun/R/operations.R
|
#' Range normalization
#'
#' Normalize a vector, matrix or data.frame of numeric values in a specified
#' range.
#'
#' @param x vector, matrix or data.frame with at least two different elements
#' @param range vector of two elements specifying the desired normalized range.
#' Default value is c(0,1)
#' @return the normalized data
#'
#' @examples
#' vec = 1:10
#' normalize_to_range(vec)
#' normalize_to_range(vec, range = c(-1,1))
#'
#' mat = matrix(c(0,2,1), ncol = 3, nrow = 4)
#' normalize_to_range(mat, range = c(-5,5))
#'
#' @export
normalize_to_range = function(x, range = c(0,1)) {
stopifnot(length(unique(x)) >= 2)
x.max = max(x)
x.min = min(x)
a = range[1]
b = range[2]
res = a + (x - x.min)*(b-a)/(x.max - x.min)
return(res)
}
#' Specify decimal
#'
#' Use this function to transform a given decimal number to the desired
#' precision by choosing the number of digits after the decimal point.
#'
#' @param number numeric
#' @param digits.to.keep numeric. Refers to the digits to keep after decimal
#' point '.'. This value should be 15 or less.
#'
#' @return the pruned number in string format
#'
#' @examples
#' # 0.123
#' specify_decimal(0.1233213, 3)
#'
#' @export
specify_decimal = function(number, digits.to.keep) {
stopifnot(digits.to.keep <= 15)
trimws(format(round(number, digits.to.keep), nsmall = digits.to.keep))
}
#' Remove commented and empty lines
#'
#' Removes empty or commented lines from a character vector (each element being
#' a line)
#'
#' @param lines a character vector, usually the result from using the
#' \code{\link{readLines}} function
#'
#' @return a character vector of the pruned lines
#'
#' @export
remove_commented_and_empty_lines = function(lines) {
commented.or.empty.lines = character(0)
for (line in lines) {
if (startsWith(line, "#") || trimws(line) == "") {
commented.or.empty.lines = c(commented.or.empty.lines, line)
}
}
pruned.lines = lines[!lines %in% commented.or.empty.lines]
return(pruned.lines)
}
#' Retrieve the parent directory
#'
#' Use this function to retrieve the parent directory from a string representing
#' the full path of a file or a directory.
#'
#' @param pathStr string. The name of the directory, can be a full path filename.
#'
#' @return a string representing the parent directory. When a non-file path is
#' used as input (or something along those lines :) then it returns the root
#' ("/") directory.
#'
#' @examples
#' get_parent_dir("/home/john")
#' get_parent_dir("/home/john/a.txt")
#' get_parent_dir("/home")
#'
#' @export
get_parent_dir = function(pathStr) {
parts = unlist(strsplit(pathStr, "/"))
parent.dir = do.call(file.path, as.list(parts[1:length(parts) - 1]))
if (parent.dir == "" || is_empty(parent.dir)) return("/")
else return(parent.dir)
}
#' Matrix equality
#'
#' Check if two matrices are equal. Equality is defined by both of them being
#' matrices in the first place, having the same dimensions as well as the same
#' elements.
#'
#' @param x,y matrices
#'
#' @return a logical specifying if the two matrices are equal or not.
#'
#' @export
mat_equal = function(x, y) {
is.matrix(x) && is.matrix(y) && all(dim(x) == dim(y)) && all(x == y)
}
#' Is object empty?
#'
#' A function to test whether an object is \strong{empty}. It checks the length
#' of the object, so it has different behaviour than \code{\link{is.null}}.
#'
#' @param obj a general object
#'
#' @return a logical specifying if the object is NULL or not.
#'
#' @examples
#' # TRUE
#' is_empty(NULL)
#' is_empty(c())
#'
#' # FALSE
#' is_empty("")
#' is_empty(NA)
#' is_empty(NaN)
#'
#' @export
is_empty = function(obj) length(obj) == 0
#' Outersect
#'
#' Performs set \emph{outersection} on two vectors. The opposite operation from
#' \code{intersect}!
#'
#' @param x,y vectors
#'
#' @return a vector of the non-common elements of x and y.
#'
#' @examples
#' x = 1:10
#' y = 2:11
#'
#' # c(1,11)
#' outersect(x,y)
#'
#' @seealso \code{\link{intersect}}
#'
#' @export
outersect = function(x, y) {
sort(c(setdiff(x, y), setdiff(y, x)))
}
#' Is value between two others?
#'
#' This function checks if a given value is inside an interval specified by
#' two boundary values.
#'
#' @param value numeric
#' @param low.thres numeric. Lower boundary of the interval.
#' @param high.thres numeric. Upper boundary of the interval.
#' @param include.high.value logical. Whether the upper bound is included in the
#' interval or not. Default value: FALSE.
#'
#' @return a logical specifying if the \code{value} is inside the interval
#' \code{[low.thres,high.thres)} (default behaviour) or inside the interval
#' \code{[low.thres,high.thres]} if \code{include.high.value} is TRUE.
#'
#' @examples
#' is_between(3,2,4)
#' is_between(4,2,4)
#' is_between(4,2,4,include.high.value=TRUE)
#'
#' @export
is_between = function(value, low.thres, high.thres, include.high.value = FALSE) {
if (include.high.value) return(value >= low.thres & value <= high.thres)
else return(value >= low.thres & value < high.thres)
}
|
/scratch/gouwar.j/cran-all/cranData/usefun/R/others.R
|
#' @title 100 distinct colors
#' @description
#' 100 as-much-as-possible distinct colors!
#' @export
colors.100 = c("#000000", "#0089A3", "#1CE6FF", "#FF34FF", "#FF4A46", "#008941",
"#006FA6", "#A30059", "#FFDBE5", "#7A4900", "#0000A6", "#63FFAC",
"#B79762", "#004D43", "#8FB0FF", "#997D87", "#5A0007", "#809693",
"#FEFFE6", "#1B4400", "#4FC601", "#3B5DFF", "#4A3B53", "#FF2F80",
"#61615A", "#BA0900", "#6B7900", "#00C2A0", "#FFAA92", "#FF90C9",
"#B903AA", "#D16100", "#DDEFFF", "#000035", "#7B4F4B", "#A1C299",
"#300018", "#0AA6D8", "#013349", "#00846F", "#372101", "#FFB500",
"#C2FFED", "#A079BF", "#CC0744", "#C0B9B2", "#C2FF99", "#001E09",
"#00489C", "#6F0062", "#0CBD66", "#EEC3FF", "#456D75", "#B77B68",
"#7A87A1", "#788D66", "#885578", "#FAD09F", "#FF8A9A", "#D157A0",
"#BEC459", "#456648", "#0086ED", "#886F4C", "#34362D", "#B4A8BD",
"#00A6AA", "#452C2C", "#636375", "#A3C8C9", "#FF913F", "#938A81",
"#575329", "#00FECF", "#B05B6F", "#8CD0FF", "#3B9700", "#04F757",
"#C8A1A1", "#1E6E00", "#7900D7", "#A77500", "#6367A9", "#A05837",
"#6B002C", "#772600", "#D790FF", "#9B9700", "#549E79", "#FFF69F",
"#201625", "#72418F", "#BC23FF", "#99ADC0", "#3A2465", "#922329",
"#5B4534", "#FDE8DC", "#404E55", "#FFFF00")
#' Make a color bar plot
#'
#' Use this function when you want to visualize some numbers and their
#' respective color values. Note that more than 42 colors won't be nice
#' to see (too thin bars)!
#'
#' @param color.vector vector of color values
#' @param number.vector vector of numeric values (same length with
#' \code{color.vector})
#' @param title string. The title of the barplot
#' @param x.axis.label string. The x-axis label. Default value: empty string
#'
#' @examples
#' color.vector = rainbow(10)
#' number.vector = 1:10
#' title = "First 10 rainbow() colors"
#' make_color_bar_plot(color.vector, number.vector, title)
#'
#' @importFrom graphics barplot axis
#' @export
make_color_bar_plot = function(color.vector, number.vector, title,
x.axis.label = "") {
bp = barplot(rep(1,length(color.vector)), col = color.vector,
axes = FALSE, xlab = x.axis.label, main = title, border = NA)
axis(1, bp, number.vector)
}
#' Multiple densities plot
#'
#' Combine many density distributions to one common plot.
#'
#' @param densities a list, each element holding the results from executing
#' the \code{\link{density}} function to a (different) vector. Note that you
#' need to provide a name for each list element for the legend (see example).
#'
#' @param legend.title string. The legend title.
#' @param title string. The plot title.
#' @param x.axis.label string. The x-axis label.
#' @param legend.size numeric. Default value: 1.
#'
#' @examples
#' mat = matrix(rnorm(60), ncol=20)
#' densities = apply(mat, 1, density)
#' names(densities) = c("1st", "2nd", "3rd")
#' make_multiple_density_plot(densities, legend.title = "Samples",
#' x.axis.label = "", title = "3 Normal Distribution Samples")
#'
#' @importFrom graphics legend lines plot
#' @export
make_multiple_density_plot =
function(densities, legend.title, title, x.axis.label, legend.size = 1) {
stopifnot(length(densities) <= 100)
# take colors from the 100 distict color set
color.palette = colors.100[1:length(densities)]
plot(NA, xlim = range(sapply(densities, "[", "x")),
ylim = range(sapply(densities, "[", "y")),
main = title, xlab = x.axis.label, ylab = "Density")
mapply(lines, densities, col = color.palette)
legend("topright", legend = names(densities), fill = color.palette,
title = legend.title, cex = legend.size)
}
|
/scratch/gouwar.j/cran-all/cranData/usefun/R/plot.R
|
#' @title Compare two Precision-Recall curves
#'
#' @description
#' Test the hypothesis that the true difference in PR AUCs is equal to 0.
#' We implement the same bootstrap method based on the idea from [pROC::roc.test()].
#' The PR AUC is calculated using [PRROC::pr.curve()] with the interpolation
#' method of `r mlr3misc::cite_bib("davis2006")`.
#'
#' @param labels `numeric()`\cr
#' Vector of responses/labels (only two classes/values allowed: cases/positive
#' class = 1 and controls/negative class = 0)
#' @param pred1 `numeric()`\cr
#' Vector of prediction values. Higher values denote positive class.
#' @param pred2 `numeric()`\cr
#' Vector of prediction values. Higher values denote positive class.
#' Must have the same length as `pred1`.
#' @param boot.n `numeric(1)`\cr
#' Number of bootstrap resamples. Default: 10000
#' @param boot.stratified `logical(1)`\cr
#' Whether the bootstrap resampling is stratified (same number of cases/controls
#' in each replicate as in the original sample) or not.
#' Advised to use especially when classes from `labels` are imbalanced.
#' Default: TRUE.
#' @param alternative `character(1)` \cr
#' Specifies the alternative hypothesis. Either "two.sided", "less" or "greater".
#' Default: "two.sided".
#'
#' @return a list with the AUCs of the two original prediction vectors and the
#' p-value of the bootstrap-based test.
#'
#' @references
#' `r mlr3misc::format_bib("davis2006")`
#'
#' @examples
#' set.seed(42)
#' # imbalanced labels
#' labels = sample(c(0,1), 20, replace = TRUE, prob = c(0.8,0.2))
#' # predictions
#' pred1 = rnorm(20)
#' pred2 = rnorm(20)
#' pr.test(labels, pred1, pred2, boot.n = 1000, boot.stratified = FALSE)
#' pr.test(labels, pred1, pred2, boot.n = 1000, boot.stratified = TRUE)
#'
#' @importFrom PRROC pr.curve
#' @importFrom stats pnorm
#' @export
pr.test = function(labels, pred1, pred2, boot.n = 10000, boot.stratified = TRUE,
alternative = "two.sided") {
match.arg(alternative, c("two.sided", "less", "greater"))
stopifnot(all(sort(unique(labels)) == c(0,1))) # 2 classes only (0 => neg, 1 => pos)
stopifnot(length(pred1) == length(pred2))
diffs = sapply(1:boot.n, function(i) {
if (boot.stratified) {
# get the two classes values
cl1 = unique(labels)[1]
cl2 = unique(labels)[2]
# find indexes of those
cl1_indxs = which(labels == cl1)
cl2_indxs = which(labels == cl2)
# resample with replacement each class on its own
indx1 = sample(cl1_indxs, replace = TRUE)
indx2 = sample(cl2_indxs, replace = TRUE)
# combine to indx
indx = c(indx1, indx2)
} else {
indx = sample(1:length(labels), replace = TRUE)
}
# resampled labels and prediction values
rsmp_labels = labels[indx]
rsmp_pred1 = pred1[indx]
rsmp_pred2 = pred2[indx]
# calculate the two PR AUCs: AUC1, AUC2
auc1 = PRROC::pr.curve(scores.class0 = rsmp_pred1,
weights.class0 = rsmp_labels)$auc.davis.goadrich
auc2 = PRROC::pr.curve(scores.class0 = rsmp_pred2,
weights.class0 = rsmp_labels)$auc.davis.goadrich
# AUC diff
auc1 - auc2
})
# remove NA values if they exist
diffs = diffs[!is.na(diffs)]
# AUC1 and AUC2 are the PR AUCs on the original data
auc1 = PRROC::pr.curve(scores.class0 = pred1,
weights.class0 = labels)$auc.davis.goadrich
auc2 = PRROC::pr.curve(scores.class0 = pred2,
weights.class0 = labels)$auc.davis.goadrich
# AUC difference
obs_diff = auc1 - auc2
# Calculate statistic
stat = obs_diff / sd(diffs)
# compare stat with normal distribution, according to the value of `alternative`
# Alternative hypothesis: true difference in PR AUC is not equal to 0
if (alternative == "two.sided")
pval = 2 * pnorm(-abs(stat))
else if (alternative == "greater")
pval = pnorm(-stat)
else # less
pval = pnorm(stat)
# return results
list(
auc1 = auc1,
auc2 = auc2,
p.value = pval
)
}
|
/scratch/gouwar.j/cran-all/cranData/usefun/R/pr.R
|
#' Pretty print a string
#'
#' Nice printing of a string in an R notebook (default behaviour). Otherwise,
#' it prints the string to the standard R output.
#'
#' @param string a string
#' @param with.gt logical. Determines if the ">" sign will be appended for nice
#' printing in an R notebook (use with the chuck option \emph{results = 'asis'}).
#' Default value: TRUE.
#'
#' @seealso \code{\link{cat}}
#'
#' @export
pretty_print_string = function(string, with.gt = TRUE) {
if (with.gt)
cat(paste0("> ", string))
else
cat(string)
}
#' Pretty print a bold string
#'
#' Prints a bold string only when `html.output` is enabled. Otherwise, it prints
#' a normal string. The the ">" sign can be appended if nice output in an R
#' notebook is desired.
#'
#' @param string a string
#' @param with.gt logical. Determines if the ">" sign will be appended for nice
#' printing in an R notebook. (use with the chuck option \emph{results = 'asis'}). Default value: TRUE.
#' @param html.output logical. If TRUE, it encapsulates the string with the bold
#' tags for an HTML document. Default value: TRUE.
#'
#' @seealso \code{\link{pretty_print_string}}
#'
#' @export
pretty_print_bold_string =
function(string, with.gt = TRUE, html.output = TRUE) {
if (html.output) {
bold.string = paste0("<b>", string, "</b>")
if (with.gt)
cat(paste0("> ", bold.string))
else
cat(bold.string)
} else {
pretty_print_string(string, with.gt = with.gt)
}
}
#' Print an empty line
#'
#' @param html.output logical. If TRUE, it outputs an empty line for an HTML
#' document, else an empty line for the standard R output. Default value:
#' FALSE.
#'
#' @seealso \code{\link{cat}}
#'
#' @export
print_empty_line = function(html.output = FALSE) {
if (html.output)
cat("<br/>")
else
cat("\n")
}
#' Pretty printing of a vector's names attribute
#'
#' @param vec vector
#' @param vector.names.str string. It tell us what are the names of the
#' vector (use plural form) in order to fill the print message. Default value:
#' "nodes".
#' @param sep string. The separator character to use to distinguish between
#' the names values. Default value: ", ".
#' @param with.gt logical. Determines if the ">" sign will be appended for nice
#' printing in an R notebook (use with the chuck option \emph{results = 'asis'}).
#' Default value: TRUE.
#'
#' @seealso \code{\link{pretty_print_string}}
#'
#' @export
pretty_print_vector_names = function(vec, vector.names.str = "nodes",
sep = ", ", with.gt = TRUE) {
if (length(vec) == 1) {
vector.names.str = substr(vector.names.str, start = 1,
stop = nchar(vector.names.str) - 1)
}
pretty_print_string(paste0(length(vec), " ", vector.names.str, ": ",
paste0(names(vec), collapse = sep)), with.gt)
}
#' Pretty printing of a vector's values
#'
#' @param vec vector
#' @param vector.values.str string. It tell us what are the values of the
#' vector (use plural form) in order to fill the print message. Default value:
#' "nodes".
#' @param sep string. The separator character to use to distinguish between
#' the vector values. Default value: ", ".
#' @param with.gt logical. Determines if the ">" sign will be appended for nice
#' printing in an R notebook (use with the chuck option \emph{results = 'asis'}).
#' Default value: TRUE.
#'
#' @seealso \code{\link{pretty_print_string}}
#'
#' @export
pretty_print_vector_values = function(vec, vector.values.str = "nodes",
sep = ", ", with.gt = TRUE) {
if (length(vec) == 1) {
vector.values.str = substr(vector.values.str, start = 1,
stop = nchar(vector.values.str) - 1)
}
pretty_print_string(paste0(length(vec), " ", vector.values.str, ": ",
paste0(vec, collapse = sep)), with.gt)
}
#' Pretty printing of a vector's names and values
#'
#' It outputs a vector's names and values in this format: \emph{name1: value1,
#' name2: value2,...}. You can choose how many elements to show in this format.
#' Use with the chuck option \emph{results = 'asis'} to get a nice printing in
#' an R notebook.
#'
#' @param vec vector with \code{names} attribute
#' @param n the number of elements that you want to print in a nice way. Default
#' value: -1 (pretty print all elements). For any n < 1, all elements are
#' printed.
#'
#' @seealso \code{\link{pretty_print_name_and_value}}
#'
#' @export
pretty_print_vector_names_and_values = function(vec, n = -1) {
len = length(vec)
stopifnot(len > 0)
# print all elements by default
if (n == -1) n = len
vec.names = names(vec)
if (len == 1) {
pretty_print_name_and_value(vec.names, vec, with.gt = TRUE, with.comma = FALSE)
} else {
# limit elements to show
if (n >= 1 & n < len)
last.index = n
else
last.index = len
for (index in 1:last.index) {
name = vec.names[index]
value = vec[index]
if (index == 1 & index != last.index)
pretty_print_name_and_value(name, value, with.gt = TRUE, with.comma = TRUE)
if (index == 1 & index == last.index)
pretty_print_name_and_value(name, value, with.gt = TRUE, with.comma = FALSE)
if (index != 1 & index != last.index)
pretty_print_name_and_value(name, value)
if (index != 1 & index == last.index)
pretty_print_name_and_value(name, value, with.comma = FALSE)
}
}
}
#' Pretty print a name and value
#'
#' @param name string
#' @param value string
#' @param with.gt logical. Determines if the ">" sign will be appended for nice
#' printing in an R notebook (use with the chuck option \emph{results = 'asis'}).
#' Default value: FALSE.
#' @param with.comma logical. Determines if the comma (,) character will be
#' appended to the end of the output. Default value: TRUE.
#'
#' @examples
#' pretty_print_name_and_value("aName", "aValue", with.gt = TRUE)
#' pretty_print_name_and_value("aName", "aValue", with.comma = FALSE)
#' @export
pretty_print_name_and_value =
function(name, value, with.gt = FALSE, with.comma = TRUE) {
if (with.comma) {
pretty_print_string(string = paste0(name, ": ", value, ", "),
with.gt = with.gt)
}
else
pretty_print_string(string = paste0(name, ": ", value), with.gt = with.gt)
}
|
/scratch/gouwar.j/cran-all/cranData/usefun/R/print.R
|
#' Generate ROC statistics
#'
#' Use this function to generate the most useful statistics related to the
#' generation of a basic ROC (Receiver Operating Characteristic) curve.
#'
#' @param df a \code{data.frame} with (at least) two columns. See next two
#' parameters for what values these two columns should have (which should match
#' one to one).
#' @param pred_col string. The name of the column of the \code{df} data.frame
#' that has the prediction values. The values can be any numeric, negative,
#' positive or zero. What matters is the \strong{ranking} of these values which
#' is clarified with the \code{direction} parameter.
#' @param label_col string. The name of the column of the \code{df} data.frame
#' that has the true positive labelings/observed classes for the
#' prediction values. This column must have either \emph{1} or \emph{0}
#' elements representing either a \emph{positive} or \emph{negative} classification
#' label for the corresponding values.
#' @param direction string. Can be either \emph{>} or \emph{<} (default value)
#' and indicates the direction/ranking of the prediction values with respect to
#' the positive class labeling (for a specific threshold). If \strong{smaller}
#' prediction values indicate the positive class/label use \strong{<} whereas
#' if \strong{larger} prediction values indicate the positive class/label
#' (e.g. probability of positive class), use \strong{>}.
#'
#' @return A list with two elements:
#' \itemize{
#' \item \code{roc_stats}: a \code{tibble} which includes the \strong{thresholds}
#' for the ROC curve and the \strong{confusion matrix stats} for each threshold as
#' follows: \emph{TP} (#True Positives), \emph{FN} (#False Negatives),
#' \emph{TN} (#True Negatives), \emph{FP} (#False Positives),
#' \emph{FPR} (False Positive Rate - the x-axis values for the ROC curve) and
#' \emph{TPR} (True Positive Rate - the y-axis values for the ROC curve).
#' Also included are the \emph{dist-from-chance} (the vertical distance of
#' the corresponding (FPR,TPR) point to the chance line or positive diagonal)
#' and the \emph{dist-from-0-1} (the euclidean distance of the corresponding
#' (FPR,TPR) point from (0,1)).
#' \item \code{AUC}: a number representing the Area Under the (ROC) Curve.
#' }
#'
#' The returned results provide an easy way to compute two optimal \emph{cutpoints}
#' (thresholds) that dichotomize the predictions to positive and negative.
#' The first is the \emph{Youden index}, which is the maximum vertical distance from the
#' ROC curve to the chance line or positive diagonal. The second is the point
#' of the ROC curve closest to the (0,1) - the point of perfect differentiation.
#' See examples below.
#'
#' @examples
#' # load libraries
#' library(readr)
#' library(dplyr)
#'
#' # load test tibble
#' test_file = system.file("extdata", "test_df.tsv", package = "usefun", mustWork = TRUE)
#' test_df = readr::read_tsv(test_file, col_types = "di")
#'
#' # get ROC stats
#' res = get_roc_stats(df = test_df, pred_col = "score", label_col = "observed")
#'
#' # Plot ROC with a legend showing the AUC value
#' plot(x = res$roc_stats$FPR, y = res$roc_stats$TPR,
#' type = 'l', lwd = 2, col = '#377EB8', main = 'ROC curve',
#' xlab = 'False Positive Rate (FPR)', ylab = 'True Positive Rate (TPR)')
#' legend('bottomright', legend = round(res$AUC, digits = 3),
#' title = 'AUC', col = '#377EB8', pch = 19)
#' grid()
#' abline(a = 0, b = 1, col = '#FF726F', lty = 2)
#'
#' # Get two possible cutoffs
#' youden_index_df = res$roc_stats %>%
#' filter(dist_from_chance == max(dist_from_chance))
#' min_classification_df = res$roc_stats %>%
#' filter(dist_from_0_1 == min(dist_from_0_1))
#'
#' @importFrom dplyr %>% pull as_tibble
#' @importFrom utils head tail
#' @export
get_roc_stats = function(df, pred_col, label_col, direction = "<") {
# checks
stopifnot(ncol(df) >= 2, pred_col %in% colnames(df),
label_col %in% colnames(df), direction %in% c("<", ">"))
predictions = df %>% pull(pred_col)
observed = df %>% pull(label_col)
stopifnot(all(observed %in% c(0,1)))
if (direction == "<")
thresholds = c(-Inf, sort(unique(predictions)))
else
thresholds = c(sort(unique(predictions)), Inf)
stats = list()
index = 1
for(thres in thresholds) {
stats[[index]] = c(thres, get_conf_mat_for_thres(predictions, observed, thres, direction))
index = index + 1
}
roc_stats = as.data.frame(do.call(rbind, stats))
colnames(roc_stats)[1] = 'threshold'
x = sort(roc_stats$FPR)
y = sort(roc_stats$TPR)
AUC = sum(diff(x) * (head(y,-1)+tail(y,-1)))/2
res_list = list()
res_list$roc_stats = as_tibble(roc_stats)
res_list$AUC = AUC
return(res_list)
}
# get the confusion matrix values (TP, FN, TN, FP) + TPR, FPR by comparing the
# values from the `predictions` vector to the `thres` value, given the true class
# labelings from `observed` and the `direction` where the positive class values are
get_conf_mat_for_thres = function(predictions, observed, thres, direction) {
tp = 0
fn = 0
tn = 0
fp = 0
index = 1
for(prediction in predictions) {
obs = observed[index]
if (direction == "<") { # positive => smaller
if (prediction <= thres & obs == 1) {
tp = tp + 1
} else if (prediction <= thres & obs == 0) {
fp = fp + 1
} else if (prediction > thres & obs == 1) {
fn = fn + 1
} else if (prediction > thres & obs == 0) {
tn = tn + 1
}
} else { # positive => larger
if (prediction < thres & obs == 1) {
fn = fn + 1
} else if (prediction < thres & obs == 0) {
tn = tn + 1
} else if (prediction >= thres & obs == 1) {
tp = tp + 1
} else if (prediction >= thres & obs == 0) {
fp = fp + 1
}
}
index = index + 1
}
tpr = tp / (tp + fn)
fpr = fp / (fp + tn)
dist.from.chance = tpr - fpr
dist.from.0.1 = (fpr - 0)^2 + (tpr - 1)^2
res = c(tp, fn, tn, fp, fpr, tpr, dist.from.chance, dist.from.0.1)
names(res) = c('TP', 'FN', 'TN', 'FP', 'FPR', 'TPR', 'dist_from_chance', 'dist_from_0_1')
return(res)
}
|
/scratch/gouwar.j/cran-all/cranData/usefun/R/roc.R
|
#' Save vector to a specified file
#'
#' Function for saving a \code{vector} with or without its row names to a
#' specified file.
#' By default the \emph{tab} is used as a delimiter.
#'
#' @param vector vector
#' @param file string. The name of the file, can be a full path.
#' @param with.row.names logical. If TRUE, then the \code{names(vector)} will be
#' included in the output file. Default value: FALSE.
#'
#' @importFrom utils write.table
#' @export
save_vector_to_file = function(vector, file, with.row.names = FALSE) {
write.table(vector, file = file, quote = FALSE, col.names = FALSE,
row.names = with.row.names, sep = "\t")
}
#' Save data frame to a specified file
#'
#' Function for saving a \code{data.frame} to a specified file.
#' Column and row names are written by default and the \emph{tab} is used
#' as a delimiter.
#'
#' @param df data.frame
#' @param file string. The name of the file, can be a full path.
#'
#' @importFrom utils write.table
#' @export
save_df_to_file = function(df, file) {
write.table(df, file = file, quote = FALSE, col.names = TRUE,
row.names = TRUE, sep = "\t")
}
#' Save matrix to a specified file
#'
#' Function for saving a \code{matrix} to a specified file. Uses the
#' \code{\link{save_df_to_file}} function.
#'
#' @param mat matrix
#' @param file string. The name of the file, can be a full path.
#'
#' @export
save_mat_to_file = function(mat, file) {
save_df_to_file(mat, file)
}
|
/scratch/gouwar.j/cran-all/cranData/usefun/R/save.R
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.