content
stringlengths
0
14.9M
filename
stringlengths
44
136
between <- function(x, left, right) { x >= left & x <= right } #' @importFrom httr2 last_response resp_body_json #' @importFrom utils capture.output #' @noRd LastError <- function() { # Sentinel Hub API uses boom - HTTP-friendly error objects # https://hapi.dev/module/boom/ resp <- httr2::last_response() err <- as.data.frame(httr2::resp_body_json(resp)[[1]]) msg <- c(" \n", paste0(capture.output(err), "\n")) return(msg) } SafeNull <- function(x) { ifelse(is.null(x), NA, x) } CheckLengthIs2 <- function(x) { # if one value only provided, replicate it twice (use it for both X and Y) if (length(x) == 1L) { x <- rep(x, 2) } if (length(x) != 2L) { msg <- sprintf("%s must be a scalar or a vector of length 2", deparse(substitute(x))) stop(msg) } return(x) } CheckBbox <- function(bbox) { # try converting to numeric, just in case it is not bbox <- try(as.numeric(bbox), silent = TRUE) if (inherits(bbox, "try-error")) { stop("Invalid value of bbox argument, must be a numeric vector of length 4.") } # check class and length if (length(class(bbox)) > 1L || !inherits(bbox, "numeric") || (length(bbox) != 4L)) { stop("Invalid value of bbox argument, must be a numeric vector of length 4.") } # convert to matrix for easier range checks mat <- matrix(bbox, ncol = 2, byrow = TRUE) if (any(mat[, 1] > 90 | mat[, 1] < -90) || any(mat[, 2] > 180 | mat[, 2] < -180) || any(mat[1, ] > mat[2, ])) { stop("Invalid values in bbox, must be longitude/latitude of lower-left/upper-right corner.") } return() } DegLength <- function(latitude) { # Computes the length (in meters) of one degree of longitude (X) and one degree of latitude (Y) # at a given "latitude" using the WGS 84 ellipsoid parameters. # # WGS 84 ellipsoid parameters # Equatorial radius "a" (called semi-major axis) a <- 6378137.0 # Polar radius "b" (called semi-minor axis) b <- 6356752.3142 # eccentricity "e" of the ellipsoid is related to the major and minor axes (the equatorial and polar radii respectively) e2 <- (a^2 - b^2) / a^2 # e-square # convert degrees to radians phi <- (latitude * pi) / 180.0 # Length of a degree of longitude # https://en.wikipedia.org/wiki/Longitude#Length_of_a_degree_of_longitude degX <- (pi * a * cos(phi)) / (180.0 * sqrt(1 - e2 * sin(phi)^2)) # Length of a degree of latitude # https://en.wikipedia.org/wiki/Latitude#Length_of_a_degree_of_latitude degY <- (pi * a * (1 - e2)) / (180.0 * (1 - e2 * sin(phi)^2)^(3/2)) if (length(latitude) > 1L) { out <- data.frame(X = degX, Y = degY) } else { out <- c(degX, degY) names(out) <- c("X", "Y") } return(out) } MakeTimeRange <- function(period) { if (any(c(inherits(period, "character"), inherits(period, "Date")))) { from <- as.Date(min(period)) to <- as.Date(max(period)) } if (any(c(inherits(period, "integer"), inherits(period, "numeric")))) { from <- min(period) to <- max(period) from <- as.Date(as.character(from), format = "%Y%m%d") to <- as.Date(as.character(to), format = "%Y%m%d") } return(list(from = sprintf("%sT00:00:00.000Z", from), to = sprintf("%sT23:59:59.000Z", to))) } #' @importFrom sf st_as_sfc st_polygon #' @noRd PolyFromBbox <- function(x) { # creates sf geometry from bbox vector vec <- c(x[1], x[2], x[3], x[2], x[3], x[4], x[1], x[4], x[1], x[2]) mat <- matrix(vec, ncol = 2, byrow = TRUE) out <- sf::st_as_sfc(list(sf::st_polygon(list(mat))), crs = 4326) return(out) }
/scratch/gouwar.j/cran-all/cranData/CDSE/R/internals.R
.onLoad <- function(libname, pkgname) { options( CDSE.auth_url = "https://identity.dataspace.copernicus.eu/auth/realms/CDSE/protocol/openid-connect/token", CDSE.catalog_url = "https://sh.dataspace.copernicus.eu/api/v1/catalog/1.0.0/", CDSE.process_url = "https://sh.dataspace.copernicus.eu/api/v1/process") }
/scratch/gouwar.j/cran-all/cranData/CDSE/R/zzz.R
## ----label = "knitr options", include = FALSE--------------------------------- knitr::opts_chunk$set( fig.width = 7, fig.height = 4, out.width = "100%", fig.align = "center", collapse = TRUE, comment = "#>" ) ## ----label = "setup", include = FALSE----------------------------------------- library(CDSE) ## ----label = "GetOAuthClient", eval = FALSE----------------------------------- # id <- Sys.getenv("CDSE_ID") # secret <- Sys.getenv("CDSE_SECRET") # OAuthClient <- GetOAuthClient(id = id, secret = secret) # class(OAuthClient) # #> [1] "httr2_oauth_client" # OAuthClient # #> <httr2_oauth_client> # #> name: x9x99xx99x9xx99xx99xx9xx99x99x99 # #> id: xx-9x999x9x-9999-999x-xxxx-x9999x99x99x # #> secret: <REDACTED> # #> token_url: https://identity.dataspace.copernicus.eu/auth/realms/CDSE/protocol/openid-connect/token # #> auth: oauth_client_req_auth_header ## ----label = "Dummy OAuthClient", eval = TRUE--------------------------------- id <- "my_dummy_id" secret <- "my_dummy_secret" OAuthClient <- GetOAuthClient(id = id, secret = secret, url = "https://my_dummy_url.org") class(OAuthClient) OAuthClient ## ----label = "GetOAuthToken", eval = FALSE------------------------------------ # id <- Sys.getenv("CDSE_ID") # secret <- Sys.getenv("CDSE_SECRET") # OAuthToken <- GetOAuthToken(id = id, secret = secret) # class(OAuthToken) # #> [1] "character" # OAuthToken # #> [1] "xxXxxXxxXxXXXxX9XxXxXxX9xXXxXxXxxXXxxxx9xxXxX9XXXXx9X9xXxX9XxXXXxxX9xXXXxx......"
/scratch/gouwar.j/cran-all/cranData/CDSE/inst/doc/BeforeYouStart.R
--- title: "Before you start" output: pdf_document: df_print: tibble toc: yes urlcolor: blue vignette: > %\VignetteIndexEntry{Before you start} %\VignetteEncoding{UTF-8} %\VignetteEngine{knitr::rmarkdown} --- ```{r label = "knitr options", include = FALSE} knitr::opts_chunk$set( fig.width = 7, fig.height = 4, out.width = "100%", fig.align = "center", collapse = TRUE, comment = "#>" ) ``` ```{r label = "setup", include = FALSE} library(CDSE) ``` # Introduction The `CDSE` package for R was developed to allow access to the '[Copernicus Data Space Ecosystem](https://dataspace.copernicus.eu/)' data and services from R. The `'Copernicus Data Space Ecosystem'`, deployed in 2023, offers access to the EO data collection from the Copernicus missions, with discovery and download capabilities and numerous data processing tools. In particular, the ['Sentinel Hub' API](https://documentation.dataspace.copernicus.eu/APIs/SentinelHub.html) provides access to the multi-spectral and multi-temporal big data satellite imagery service, capable of fully automated, real-time processing and distribution of remote sensing data and related EO products. Users can use APIs to retrieve satellite data over their AOI and specific time range from full archives in a matter of seconds. When working on the application of EO where the area of interest is relatively small compared to the image tiles distributed by Copernicus (100 x 100 km), it allows to retrieve just the portion of the image of interest rather than downloading the huge tile image file and processing it locally. The goal of the `CDSE` package is to provide easy access to this functionality from R. The main functions allow to search the catalog of available imagery from the Sentinel-1, Sentinel-2, Sentinel-3, and Sentinel-5 missions, and to process and download the images of an area of interest and a time range in various formats. Other functions might be added in subsequent releases of the package. # Accessing CDSE data and services Access to the `'Copernicus Data Space Ecosystem'` is free, but you have to register to use the API. You can create a free account as explained in [User registration and authentication](https://documentation.dataspace.copernicus.eu/Registration.html). The free account has some limitations and quotas applied to it, but it should be sufficient for most individual users. The details are provided in [Quotas and Limitations](https://documentation.dataspace.copernicus.eu/Quotas.html). # API authentication Most of the API functions require a specific authentication. The API uses OAuth2 Authentication and requires that you have an access token. In essence, this is a piece of information you add to your requests so the server knows it's you. To be able to request a token, you need to register an OAuth Client in your [account settings](https://shapps.dataspace.copernicus.eu/dashboard/#/account/settings). Here you will obtain your client credentials - client id and client secret. You will use these client credentials to authenticate with the API. **Make sure to copy your personal OAuth secret, as you will not be able to see it again!** You can find more details on the documentation page dedicated to [API authentication](https://documentation.dataspace.copernicus.eu/APIs/SentinelHub/Overview/Authentication.html). # Storing client credentials You should store your client credentials securely. Do not hard-code them (include as clear text) in scripts, particularly in the scripts shared with others. Don't save them to a repository (like Git) or to a shared folder.\ You can of course provide the credentials every time they are needed, but this is a very cumbersome approach.\ A simple way to keep them persistently available is to store them as system environment variables. This can be achieved by defining them in your personal or project-level `.Renviron` file. We recommend this method for its simplicity and use it in our examples. You could also set the environment variables with `Sys.setenv()`, but you should note that is not persistent; the values are lost when the R session terminates. Another option is to store them in the global `options()`, typically in your personal `.Rprofile`. These two options require the name-value pairs, for example, `CDSE_ID = "yourid"` and `CDSE_SECRET = "yoursecret"`. You can find more information about dealing with sensitive information in R at [Managing secrets](https://cran.r-project.org/web/packages/httr/vignettes/secrets.html). # OAuth authentication client The recommended way to authenticate with the CDSE API is to use the `httr2_oauth_client` object (from the `httr2` package) returned by the `GetOAuthClient` function, as shown below. You have to provide your client credentials as arguments to the function. The returned object should be passed as the `client` argument to the functions requiring the authentication. The underlying services in the `httr2` package will automatically take care of the authentication lifecycle management (refreshing token, etc.). *The credentials have been obfuscated in the output.* ```{r label = "GetOAuthClient", eval = FALSE} id <- Sys.getenv("CDSE_ID") secret <- Sys.getenv("CDSE_SECRET") OAuthClient <- GetOAuthClient(id = id, secret = secret) class(OAuthClient) #> [1] "httr2_oauth_client" OAuthClient #> <httr2_oauth_client> #> name: x9x99xx99x9xx99xx99xx9xx99x99x99 #> id: xx-9x999x9x-9999-999x-xxxx-x9999x99x99x #> secret: <REDACTED> #> token_url: https://identity.dataspace.copernicus.eu/auth/realms/CDSE/protocol/openid-connect/token #> auth: oauth_client_req_auth_header ``` However, it should be noted that the object returned by the `GetOAuthClient` function has not been validated against the backend. The credentials provided will only be checked the first time this object is used in a query. Therefore, even if the `GetOAuthClient` function does not raise an error you cannot assume that your credentials have been accepted by the backend. To demonstrate it, you can provide a dummy id, secret, and URL, and no error will be raised at this stage. ```{r label = "Dummy OAuthClient", eval = TRUE} id <- "my_dummy_id" secret <- "my_dummy_secret" OAuthClient <- GetOAuthClient(id = id, secret = secret, url = "https://my_dummy_url.org") class(OAuthClient) OAuthClient ``` # OAuth authentication token In order to be able to check immediately if your credentials work correctly, we have provided another authentication function called `GetOAuthToken`. It takes the same arguments as the above mentioned `GetOAuthClient` function, but it verifies the credentials immediately. If successful, it returns a connection token, (very long) string that can be be passed as the `token` argument to the functions requiring the authentication. If your credentials have been refused by the backend, an error is raised. Please not that in this case you must explicitly take care yourself of the token lifecycle management. We therefore recommend that you use this function only to test that your credentials work, but to prefer passing the object returned by the `GetOAuthClient` as the `client` argument to the functions from the `CDSE` package that require authentication. *The token has been obfuscated and shortened in the output.* ```{r label = "GetOAuthToken", eval = FALSE} id <- Sys.getenv("CDSE_ID") secret <- Sys.getenv("CDSE_SECRET") OAuthToken <- GetOAuthToken(id = id, secret = secret) class(OAuthToken) #> [1] "character" OAuthToken #> [1] "xxXxxXxxXxXXXxX9XxXxXxX9xXXxXxXxxXXxxxx9xxXxX9XXXXx9X9xXxX9XxXXXxxX9xXXXxx......" ``` # Note for Windows users On some Windows systems, depending on the network and security settings, the things might not work out of the box. If you get an error while connecting to the CDSE API complaining about SSL/TLS handshake problem, try setting the environment variable `CURL_SSL_BACKEND` to `openssl` **before** using the functions from the `CDSE` package. You can restart R session, and type `Sys.setenv(CURL_SSL_BACKEND = "openssl")` before using the `CDSE` package. Even better, you can permanently set this environment variable in your `.Renviron` file (by adding the line `CURL_SSL_BACKEND = "openssl"`) or setting it in your Windows system settings environment variables. You can find more information about this issue [here](https://cran.r-project.org/web/packages/curl/vignettes/windows.html).
/scratch/gouwar.j/cran-all/cranData/CDSE/inst/doc/BeforeYouStart.Rmd
## ----label = "knitr options", include = FALSE--------------------------------- knitr::opts_chunk$set( fig.width = 7, fig.height = 4, out.width = "100%", fig.align = "center", collapse = TRUE, comment = "#>" ) ## ----label = "setup", include = FALSE----------------------------------------- library(CDSE) options(warn = -1) ## ----label = "GetOAuthClient"------------------------------------------------- id <- Sys.getenv("CDSE_ID") secret <- Sys.getenv("CDSE_SECRET") OAuthClient <- GetOAuthClient(id = id, secret = secret) ## ----label = "get collections"------------------------------------------------ collections <- GetCollections(as_data_frame = TRUE) collections ## ----label = "search catalog"------------------------------------------------- dsn <- system.file("extdata", "luxembourg.geojson", package = "CDSE") aoi <- sf::read_sf(dsn, as_tibble = FALSE) images <- SearchCatalog(aoi = aoi, from = "2023-07-01", to = "2023-07-31", collection = "sentinel-2-l2a", with_geometry = TRUE, client = OAuthClient) images ## ----label = "turn off global device", include = FALSE------------------------ knitr::opts_knit$set(global.device = FALSE) ## ----label = "plot AOI coverage", fig.cap = "Luxembourg image tiles coverage"---- library(maps) days <- range(as.Date(images$acquisitionDate)) maps::map(database = "world", col = "lightgrey", fill = TRUE, mar = c(0, 0, 4, 0), xlim = c(3, 9), ylim = c(47.5, 51.5)) plot(sf::st_geometry(aoi), add = TRUE, col = "red", border = FALSE) plot(sf::st_geometry(images), add = TRUE) title(main = sprintf("AOI coverage by image tiles for period %s", paste(days, collapse = " / ")), line = 1L) ## ----label = "summary AOI coverage"------------------------------------------- summary(images$areaCoverage) ## ----label = "by tileNumber"-------------------------------------------------- tileNumber <- substring(images$sourceId, 39, 44) by(images$areaCoverage, INDICES = tileNumber, FUN = summary) ## ----label = "search catalog to select image"--------------------------------- dsn <- system.file("extdata", "centralpark.geojson", package = "CDSE") aoi <- sf::read_sf(dsn, as_tibble = FALSE) images <- SearchCatalog(aoi = aoi, from = "2021-05-01", to = "2021-05-31", collection = "sentinel-2-l2a", with_geometry = TRUE, client = OAuthClient) images summary(images$areaCoverage) ## ----label = "retrieve the NDVI image", fig.cap = "Central Park NDVI raster"---- day <- images[order(images$tileCloudCover), ]$acquisitionDate[1] script_file <- system.file("scripts", "NDVI_float32.js", package = "CDSE") ras <- GetArchiveImage(aoi = aoi, time_range = day, script = script_file, collection = "sentinel-2-l2a", format = "image/tiff", mosaicking_order = "leastCC", resolution = 10, mask = TRUE, buffer = 100, client = OAuthClient) ras ras[ras < 0] <- 0 terra::plot(ras, main = paste("Central Park NDVI on", day), col = colorRampPalette(c("darkred", "yellow", "darkgreen"))(99)) ## ----label = "retrieve the RGB image", fig.cap = "Central Park image as PNG file", fig.width = 5, fig.height = 3---- bbox <- as.numeric(sf::st_bbox(aoi)) script_text <- paste(readLines(system.file("scripts", "TrueColor.js", package = "CDSE")), collapse = "\n") cat(script_text, sep = "\n") png <- tempfile("img", fileext = ".png") GetArchiveImage(bbox = bbox, time_range = day, script = script_text, collection = "sentinel-2-l2a", file = png, format = "image/png", mosaicking_order = "leastCC", pixels = c(600, 950), client = OAuthClient) terra::plotRGB(terra::rast(png))
/scratch/gouwar.j/cran-all/cranData/CDSE/inst/doc/UsingCDSE.R
--- title: "Using 'Copernicus Data Space Ecosystem' API Wrapper" output: pdf_document: df_print: tibble toc: yes toc_depth: 1 fig_caption: yes html_document: toc: yes toc_depth: '1' df_print: paged word_document: toc: yes toc_depth: '1' urlcolor: blue vignette: > %\VignetteIndexEntry{Worked examples using CDSE API wrapper} %\VignetteEncoding{UTF-8} %\VignetteEngine{knitr::rmarkdown} --- ```{r label = "knitr options", include = FALSE} knitr::opts_chunk$set( fig.width = 7, fig.height = 4, out.width = "100%", fig.align = "center", collapse = TRUE, comment = "#>" ) ``` ```{r label = "setup", include = FALSE} library(CDSE) options(warn = -1) ``` ### *Compiled on `r Sys.time()`.* \newpage # Introduction The `CDSE` package for R was developed to allow access to the '[Copernicus Data Space Ecosystem](https://dataspace.copernicus.eu/)' data and services from R. The `'Copernicus Data Space Ecosystem'`, deployed in 2023, offers access to the EO data collection from the Copernicus missions, with discovery and download capabilities and numerous data processing tools. In particular, the ['Sentinel Hub' API](https://documentation.dataspace.copernicus.eu/APIs/SentinelHub.html) provides access to the multi-spectral and multi-temporal big data satellite imagery service, capable of fully automated, real-time processing and distribution of remote sensing data and related EO products. Users can use APIs to retrieve satellite data over their AOI and specific time range from full archives in a matter of seconds. When working on the application of EO where the area of interest is relatively small compared to the image tiles distributed by Copernicus (100 x 100 km), it allows to retrieve just the portion of the image of interest rather than downloading the huge tile image file and processing it locally. The goal of the `CDSE` package is to provide easy access to this functionality from R. The main functions allow to search the catalog of available imagery from the Sentinel-1, Sentinel-2, Sentinel-3, and Sentinel-5 missions, and to process and download the images of an area of interest and a time range in various formats. Other functions might be added in subsequent releases of the package. # API authentication Most of the API functions require OAuth2 authentication. The recommended procedure is to obtain an authentication client object from the `GetOAuthClient` function, and to pass it as the `client` argument to the functions requiring the authentication. For more detailed information, you are invited to consult the "`Before you start`" document. ```{r label = "GetOAuthClient"} id <- Sys.getenv("CDSE_ID") secret <- Sys.getenv("CDSE_SECRET") OAuthClient <- GetOAuthClient(id = id, secret = secret) ``` ## *Note* *In this document, the data.frames are output as tibbles since it renders better in PDF. However, all the functions produce standard data.frames.* # Collections We can get the list of all the imagery collections available in the `'Copernicus Data Space Ecosystem'`. By default, the list is formatted as a data.frame listing the main collection features. It is also possible to obtain the raw list with all information by setting the argument `as_data_frame` to `FALSE`. ```{r label = "get collections"} collections <- GetCollections(as_data_frame = TRUE) collections ``` \newpage # Catalog search The imagery catalog can be searched by spatial and temporal extent for every collection present in the `'Copernicus Data Space Ecosystem'`. For the spatial filter, you can provide either a `sf` or `sfc` object from the `sf` package, typically a (multi)polygon, describing the Area of Interest, or a numeric vector of four elements describing the bounding box of interest. For the temporal filter, you must specify the time range by either `Date` or `character` values that can be converted to date by `as.Date` function. Open intervals (one side only) can be obtained by providing the `NA` or `NULL` value for the corresponding argument. ```{r label = "search catalog"} dsn <- system.file("extdata", "luxembourg.geojson", package = "CDSE") aoi <- sf::read_sf(dsn, as_tibble = FALSE) images <- SearchCatalog(aoi = aoi, from = "2023-07-01", to = "2023-07-31", collection = "sentinel-2-l2a", with_geometry = TRUE, client = OAuthClient) images ``` We can visualize the coverage of the area of interest by the satellite image tiles by plotting the footprints of the available images and showing the region of interest in red. ```{r label = "turn off global device", include = FALSE} knitr::opts_knit$set(global.device = FALSE) ``` ```{r label = "plot AOI coverage", fig.cap = "Luxembourg image tiles coverage"} library(maps) days <- range(as.Date(images$acquisitionDate)) maps::map(database = "world", col = "lightgrey", fill = TRUE, mar = c(0, 0, 4, 0), xlim = c(3, 9), ylim = c(47.5, 51.5)) plot(sf::st_geometry(aoi), add = TRUE, col = "red", border = FALSE) plot(sf::st_geometry(images), add = TRUE) title(main = sprintf("AOI coverage by image tiles for period %s", paste(days, collapse = " / ")), line = 1L) ``` Some tiles cover only a small fraction of the area of interest, while the others cover almost the entire area. ```{r label = "summary AOI coverage"} summary(images$areaCoverage) ``` The tile number can be obtained from the image attribute `sourceId`, as explained [here](https://sentinels.copernicus.eu/web/sentinel/user-guides/sentinel-2-msi/naming-convention). We can therefore summarize the distribution of area coverage by tile number, and see which tiles provide the best coverage of the AOI. ```{r label = "by tileNumber"} tileNumber <- substring(images$sourceId, 39, 44) by(images$areaCoverage, INDICES = tileNumber, FUN = summary) ``` \newpage # Scripts As we shall see in the examples below, we have to provide a `script` argument to the `GetArchiveImage` function. An evalscript (or "custom script") is a piece of JavaScript code that defines how the satellite data shall be processed by the API and what values the service shall return. It is a required part of any request involving data processing, such as retrieving an image of the area of interest. The evaluation scripts can use any JavaScript function or language structures, along with certain utility functions provided by the API for user convenience. Chrome V8 JavaScript engine is used for running the evalscripts. The evaluation scripts are passed as the `script` argument to the `GetArchiveImage` function. It has to be either a character string containing the evaluation script or the name of the file containing the script. The `scripts` folder of this package contains a few examples of evaluation scripts. It is beyond the scope of this document to provide guidance for writing scripts, we encourage users to consult the API [Beginners Guide](https://docs.sentinel-hub.com/api/latest/user-guides/beginners-guide/) and [Evalscript (custom script)](https://docs.sentinel-hub.com/api/latest/evalscript/) documentation. \newpage # Retrieving AOI satellite image as a raster object One of the most important features of the API is its ability to extract only the part of the images covering the area of interest. If the AOI is small as in the example below, this is a significant gain in efficiency (download, local processing) compared to getting the whole tile image and processing it locally. ```{r label = "search catalog to select image"} dsn <- system.file("extdata", "centralpark.geojson", package = "CDSE") aoi <- sf::read_sf(dsn, as_tibble = FALSE) images <- SearchCatalog(aoi = aoi, from = "2021-05-01", to = "2021-05-31", collection = "sentinel-2-l2a", with_geometry = TRUE, client = OAuthClient) images summary(images$areaCoverage) ``` As the area is small, it is systematically fully covered by all available images. We shall select the date with the least cloud cover, and retrieve the NDVI values as a `SpatRaster` from package `terra`. This allows further processing of the data, as shown below by replacing all negative values with zero. The size of the pixels is specified directly by the `resolution` argument. We are also adding a 100-meter `buffer` around the area of interest and `mask`ing the pixels outside of the AOI. ```{r label = "retrieve the NDVI image", fig.cap = "Central Park NDVI raster"} day <- images[order(images$tileCloudCover), ]$acquisitionDate[1] script_file <- system.file("scripts", "NDVI_float32.js", package = "CDSE") ras <- GetArchiveImage(aoi = aoi, time_range = day, script = script_file, collection = "sentinel-2-l2a", format = "image/tiff", mosaicking_order = "leastCC", resolution = 10, mask = TRUE, buffer = 100, client = OAuthClient) ras ras[ras < 0] <- 0 terra::plot(ras, main = paste("Central Park NDVI on", day), col = colorRampPalette(c("darkred", "yellow", "darkgreen"))(99)) ``` \newpage # Retrieving AOI satellite image as an image file If we don't want to process the satellite image locally but simply use as image file (to include in a report or a Web page, for example), we can use the appropriate script that will render a three-band raster for RGB layers (or one for black-and-white image). Here we specify the area of interest by its bounding box instead of the exact geometry. We also demonstrate that the evaluation script can be passed as a single character string, and provide the number of pixels in the output image rather than the size of individual pixels - it makes more sense if the image is intended for display and not processing. ```{r label = "retrieve the RGB image", fig.cap = "Central Park image as PNG file", fig.width = 5, fig.height = 3} bbox <- as.numeric(sf::st_bbox(aoi)) script_text <- paste(readLines(system.file("scripts", "TrueColor.js", package = "CDSE")), collapse = "\n") cat(script_text, sep = "\n") png <- tempfile("img", fileext = ".png") GetArchiveImage(bbox = bbox, time_range = day, script = script_text, collection = "sentinel-2-l2a", file = png, format = "image/png", mosaicking_order = "leastCC", pixels = c(600, 950), client = OAuthClient) terra::plotRGB(terra::rast(png)) ```
/scratch/gouwar.j/cran-all/cranData/CDSE/inst/doc/UsingCDSE.Rmd
# init --------------------------------------------------------------------------------- library(sf) library(terra) library(CDSE) # Getting collection doesn't require authentication ------------------------------------ collections <- GetCollections() collections # Authenticate ------------------------------------------------------------------------- id <- Sys.getenv("CDSE_ID") secret <- Sys.getenv("CDSE_SECRET") OAuthClient <- GetOAuthClient(id = id, secret = secret) # Search catalog ----------------------------------------------------------------------- # search for available Sentinel 2 L2A imagery of New York Central Park in July 2023 # get the New York City Central Park shape as area of interest dsn <- system.file("extdata", "centralpark.geojson", package = "CDSE") aoi <- sf::read_sf(dsn, as_tibble = FALSE) # search by area of interest images <- SearchCatalog(aoi = aoi, from = "2023-07-01", to = "2023-07-31", collection = "sentinel-2-l2a", with_geometry = TRUE, client = OAuthClient) images # select the date with the least cloud cover, and retrieve the NDVI values day <- images[order(images$tileCloudCover), ]$acquisitionDate[1] script_file <- system.file("scripts", "NDVI_float32.js", package = "CDSE") ras <- GetArchiveImage(aoi = aoi, time_range = day, script = script_file, collection = "sentinel-2-l2a", format = "image/tiff", mosaicking_order = "leastCC", resolution = 10, mask = TRUE, buffer = 100, client = OAuthClient) ras ras[ras < 0] <- 0 plot(ras, main = paste("Central Park NDVI on", day), col = colorRampPalette(c("darkred", "yellow", "darkgreen"))(99))
/scratch/gouwar.j/cran-all/cranData/CDSE/inst/examples/get_archive_image.R
# init --------------------------------------------------------------------------------- library(sf) library(CDSE) # Getting collection doesn't require authentication ------------------------------------ collections <- GetCollections() collections # Authenticate ------------------------------------------------------------------------- id <- Sys.getenv("CDSE_ID") secret <- Sys.getenv("CDSE_SECRET") OAuthClient <- GetOAuthClient(id = id, secret = secret) # Search catalog ----------------------------------------------------------------------- # search for available Sentinel 2 L2A imagery of New York Central Park in July 2023 # get the New York City Central Park shape as area of interest dsn <- system.file("extdata", "centralpark.geojson", package = "CDSE") aoi <- sf::read_sf(dsn, as_tibble = FALSE) # search by area of interest images <- SearchCatalog(aoi = aoi, from = "2023-07-01", to = "2023-07-31", collection = "sentinel-2-l2a", with_geometry = TRUE, client = OAuthClient) images # search for available Sentinel 1 GRD imagery of Luxembourg in January 2019 # get the Luxembourg country shape as area of interest dsn <- system.file("extdata", "luxembourg.geojson", package = "CDSE") aoi <- sf::read_sf(dsn, as_tibble = FALSE) # search by bounding box bbox <- as.numeric(sf::st_bbox(aoi)) images <- SearchCatalog(bbox = bbox, from = "2019-01-01", to = "2019-01-30", collection = "sentinel-1-grd", with_geometry = TRUE, client = OAuthClient) images
/scratch/gouwar.j/cran-all/cranData/CDSE/inst/examples/search_catalog.R
#' CDSS: Course dependent skill structures #' #' The \code{CDSS} package provides functions for a complete workflow from #' skill assignment tables to surmise mappings on the sets of skills and #' learning objects, respectively. #' #' @section Suggested workflow for the general case: #' 1. Read the skill assignment using one of the \code{read_skill_assignments_xxx()} functions. #' 1. Check the compliance to the definition for skill assignments using [cdss_sa_compliance()]. #' 1. Convert the skill assignment into a skill multi-assignment using [cdss_sa2sma()]. #' 1. Close the skill multi-assignment under completion using [cdss_sma2csma()]. #' 1. Compute the surmise function on skills using [cdss_csma2sf()]. #' 1. Continue with functions from the \code{kstMatrix} package, e.g., to obtain a basis and further on a skill space. #' #' @section Suggested workflow for the special case of one LO per skill: #' 1. Read the skill assignment using one of the \code{read_skill_assignments_xxx()} functions. #' 1. Check whether the skill assignment allows for the derivation of a surmise relation using [cdss_sa_describes_sr()]. #' 1. If yes, derive an attribution relation from the skill assignment using [cdss_sa2ar_skill()]. #' 1. Close the attribution relation to a surmise relation using [close_ar()]. #' 1. Continue with functions from the \code{kstMatrix} package, e.g., to obtain a basis and further on a skill space. #' #' @section Data files: #' The installation of this package includes several data files as examples in the #' \code{extdata} sub directory (see the Examples below for how to access the files there). #' There are four data sets, \code{KST}, \code{KST-Intro}, \code{SkillAssignment}, and #' \code{ErroneousSkillAssignment}. The \code{SkillAssignment} data set is available in #' three formats, ODS, XLSX, and CSV (in CSV format, there are two files each, #' \code{SkillAssignment-R.csv} and \code{SkillAssignment-T.csv}, for required and taught skills, respectively). #' The other three data sets are available in ODS format only. #' #' \code{SkillAssignment} and \code{ErroneousSkillAssignment} are small example data sets #' where the latter fails for \code{cdss_sa_compliance()}. \code{KST} contains a skill #' assignment for the course on knowledge space theory under <https://moodle.qhelp.eu/>. #' \code{KST-Intro} contains the reduction of \code{KST} to the first chapter of #' that course. #' #' @examples #' library(readODS) #' fpath <- system.file("extdata", "SkillAssignment.ods", package="CDSS") #' sa <- read_skill_assignment_ods(fpath) #' sa #' sma <- cdss_sa2sma(sa) #' sma #' csma <- cdss_sma2csma(sma) #' csma #' sf <- cdss_csma2sf(csma) #' sf #' #' @section References: #' Hockemeyer, C. (2022). Building Course-Dependent Skill Structures - Applying #' Competence based Knowledge Space Theory to Itself. Manuscript in preparation. #' #' @section Acknowledgements: #' The creation of this R package was financially supported by the Erasmus+ Programme #' of the European Commission through the QHELP project (<https://qhelp.eu/>). #' #' @aliases CDSS-package #' @md #' @name CDSS NULL
/scratch/gouwar.j/cran-all/cranData/CDSS/R/CDSS-package.R
#' Compute a binary matrix product #' #' \code{binary_matrix_product} expects two binary matrices and computes there Boolean product. #' #' @param m Binary matrix #' @param n Binary matrix #' #' @return Boolean matrix product of m and n #' #' @family Utility functions #' #' @export binary_matrix_product <- function(m,n) { if (dim(m)[1] != dim(n)[2]) { stop(sprintf("%s and %s do not fit in size!", dQuote("m"), dQuote("n"))) } res <- matrix(rep(0,dim(m)[2]*dim(n)[1]), nrow=dim(m)[2]) lapply((1:dim(m)[2]), function(x) { lapply((1:dim(n)[1]), function(y) { res[x,y] <<- max(m[x,] & n[,y]) }) }) res }
/scratch/gouwar.j/cran-all/cranData/CDSS/R/binary_matrix_product.R
#' Vector of learning objects requiring and teaching the same skill #' #' \code{cdss_circular_requirements} expects skill assignment and returns #' a vector of learning objects which require a skill that they teach. #' #' @param sa Skill assignment #' #' @return Vector of learning objects #' #' @family Functions testing validity of skill assignments #' #' @export cdss_circular_requirements <- function(sa) { if (!(inherits(sa, "cdss_sa"))) { stop(sprintf("%s must be of class %s.", dQuote("sa"), dQuote("cdss_sa"))) } required <- sa$required taught <- sa$taught # Checks if matrices fit together if (!(all(dim(taught) == dim(required)))) { stop(sprintf("%s and %s must have equal size!", dQuote("taught", dQuote("required")))) } if (!(all(colnames(required) == colnames(taught)))) { stop(sprintf("%s and %s must have equal colnames!", dQuote("taught", dQuote("required")))) } if (!(all(rownames(required) == rownames(taught)))) { stop(sprintf("%s and %s must have equal rownames!", dQuote("taught", dQuote("required")))) } rownames(which(required & taught, arr.ind=TRUE)) }
/scratch/gouwar.j/cran-all/cranData/CDSS/R/cdss_circular_requirements.R
#' Derive a surmise function from a complete skill multi-assignment #' #' \code{cdss_csma2sf} expects a complete skill multi-assignment object #' and returns the corresponding surmise function on the set of skills. #' #' @param csma Skill multi-assignment to be completed #' #' @return Object of class \code{cdss_csma}. #' #' @export cdss_csma2sf <- function(csma) { if (!(inherits(csma, "cdss_csma"))) { stop(sprintf("%s must be of class %s.", dQuote("csma"), dQuote("cdss_csma"))) } # First, we start with an attribution function. # This AF will be closed under reflexivity and transitivity because # we start this from a CSMA. skills <- (dim(csma)[2] - 1) / 2 tgtcols <- 2:(skills+1) reqcols <- (skills+2):(2*skills+1) # sl <- colnames(csma)[tgtcols] sf <- data.frame(t(c("xyz", 1:skills))) # We will delete this row later colnames(sf) <- 1:(skills+1) lapply(1:skills, function(s) { currskill <- colnames(csma)[s+1] clausenums <- which(csma[,s+1]==1) m1 <- matrix(rep(currskill, length(clausenums)),ncol = 1) m2 <- 1 * (csma[clausenums,tgtcols] | csma[clausenums,reqcols]) hf <- data.frame(cbind(m1, m2)) colnames(hf) <- 1:(skills+1) sf <<- rbind(sf, hf) }) sf <- sf[-1,] # Now we have to reach incomparability chgd <- TRUE sf[,2:(skills+1)] <- sapply(sf[,2:(skills+1)], as.numeric) while (chgd) { chgd <- FALSE rownames(sf) <- 1:dim(sf)[1] lapply(unique(sf[,1]), function(s) { if (!chgd) { sel <- which(sf[,1] == s) lapply(sel, function(x) { if (!chgd) { lapply(sel, function(y) { if (!chgd) { if (x != y) { if (all((sf[x,2:(skills+1)] & sf[y,2:(skills+1)]) == sf[x,2:(skills+1)])) { sf <<- sf[-y,] chgd <<- TRUE } } } }) } }) } }) } colnames(sf) <- c("Skill", colnames(csma)[tgtcols]) class(sf) <- unique(c("cdss_sf", class(sf))) sf }
/scratch/gouwar.j/cran-all/cranData/CDSS/R/cdss_csma2sf.R
#' Vector of skills without teaching learning objects. #' #' \code{cdss_missing_los} expects a skill assignment and returns a vector #' of skills which are not taught by any learning object. #' #' @param sa SKill assignment #' #' @return Vector of skills #' #' @family Functions testing validity of skill assignments #' #' @export cdss_missing_los <- function(sa) { if (!(inherits(sa, "cdss_sa"))) { stop(sprintf("%s must be of class %s.", dQuote("sa"), dQuote("cdss_sa"))) } taught <- sa$taught required <- sa$required # Checks if matrices fit together if (!(all(dim(taught) == dim(required)))) { stop(sprintf("%s and %s must have equal size!", dQuote("taught", dQuote("required")))) } if (!(all(colnames(required) == colnames(taught)))) { stop(sprintf("%s and %s must have equal colnames!", dQuote("taught", dQuote("required")))) } if (!(all(rownames(required) == rownames(taught)))) { stop(sprintf("%s and %s must have equal rownames!", dQuote("taught", dQuote("required")))) } names(which(colSums(taught) == 0)) }
/scratch/gouwar.j/cran-all/cranData/CDSS/R/cdss_missing_los.R
#' Vector of learning objects not teaching any skills. #' #' \code{cdss_nonteaching_los} expects a skill assignment and returns a vector #' of learning objects which do not teach any skill. #' #' @param sa SKill assignment #' #' @return Vector of learning objects #' #' @family Functions testing validity of skill assignments #' #' @export cdss_nonteaching_los <- function(sa) { if (!(inherits(sa, "cdss_sa"))) { stop(sprintf("%s must be of class %s.", dQuote("sa"), dQuote("cdss_sa"))) } taught <- sa$taught required <- sa$required # Checks if matrices fit together if (!(all(dim(taught) == dim(required)))) { stop(sprintf("%s and %s must have equal size!", dQuote("taught", dQuote("required")))) } if (!(all(colnames(required) == colnames(taught)))) { stop(sprintf("%s and %s must have equal colnames!", dQuote("taught", dQuote("required")))) } if (!(all(rownames(required) == rownames(taught)))) { stop(sprintf("%s and %s must have equal rownames!", dQuote("taught", dQuote("required")))) } names(which(rowSums(taught) == 0)) }
/scratch/gouwar.j/cran-all/cranData/CDSS/R/cdss_nonteaching_los.R
#' Create aan attribution relation on skills from a skill assignment. #' #' \code{cdss_sa2ar_skill} expects a skill assignment and derives an attribution relation #' on skills if the skill assignment fulfills the necessary conditions, i.e. if there #' is only one teaching LO per skill. #' #' @param sa Skill assignment object #' #' @return attribution relation or NULL #' #' @family functions deriving skill structures from skill assignments #' #' @export cdss_sa2ar_skill <- function(sa) { if (!(inherits(sa, "cdss_sa"))) { stop(sprintf("%s must be of class %s.", dQuote("sa"), dQuote("cdss_sa"))) } if (!cdss_sa_describes_sr(sa)) { stop(sprintf("%s does not describe a surmise relation!", dQuote("sa"))) } # Determine attribution relation skills <- dim(sa$taught)[2] ar <- diag(1, skills, skills) lapply(rownames(sa$taught), function(x) { sapply((1:skills), function(y) { if (sa$taught[x,y]) { sapply((1:skills), function(z) { if (sa$taught[x,z]) {ar[y,z] <<- 1} }) sapply((1:skills), function(z) { if (sa$required[x,z]) {ar[y,z] <<- 1} }) } }) }) colnames(ar) <- colnames(sa$taught) rownames(ar) <- colnames(sa$taught) class(ar) <- unique(c("attribution_relation", class(ar))) t(ar) }
/scratch/gouwar.j/cran-all/cranData/CDSS/R/cdss_sa2ar_skill.R
#' Convert skill assignment matrices to skill multi-assignment #' #' \code{cdss_sa2sma} expects a list of two matrices (\code{taught} and \code{required}) of a skill #' assignment. It returns a skill multi-assignment object. #' #' @param sa Skill assignment object #' #' @return Object of class \code{cdss_sma}. #' #' @family functions building skill (multi) assignment matrices #' #' @export cdss_sa2sma <- function(sa) { if (!(inherits(sa, "cdss_sa"))) { stop(sprintf("%s must be of class %s.", dQuote("sa"), dQuote("cdss_sa"))) } df <- cbind(data.frame(rownames(sa$taught)), data.frame(sa$taught), data.frame(sa$required)) colnames(df)[1] <- "LO" rownames(df) <- NULL class(df) <- unique(c("cdss_sma", class(df))) df }
/scratch/gouwar.j/cran-all/cranData/CDSS/R/cdss_sa2sma.R
#' Check whether a skill assignment is compliant to the CDCS conditions. #' #' \code{cdss_sa_compliance} expects a skill assignment and checks whether #' it is compliant to the conditions for CDCS. #' #' @param sa Skill assignment #' @param warnings Toggles whether warnings should be printed #' #' @return Boolean #' #' @family Functions testing validity of skill assignments #' #' @export cdss_sa_compliance <- function(sa, warnings = FALSE) { if (!(inherits(sa, "cdss_sa"))) { stop(sprintf("%s must be of class %s.", dQuote("sa"), dQuote("cdss_sa"))) } taught <- sa$taught required <- sa$required # Checks if matrices fit together if (!(all(dim(taught) == dim(required)))) { stop(sprintf("%s and %s must have equal size!", dQuote("taught", dQuote("required")))) } if (!(all(colnames(required) == colnames(taught)))) { stop(sprintf("%s and %s must have equal colnames!", dQuote("taught", dQuote("required")))) } if (!(all(rownames(required) == rownames(taught)))) { stop(sprintf("%s and %s must have equal rownames!", dQuote("taught", dQuote("required")))) } result <- TRUE if (any(colSums(taught) == 0)) { if (warnings) warning("Not all skills are taught by learning objects: ", names(which(colSums(taught) == 0))) result <- FALSE } if (any(required & taught)) { is <- rownames(which(required & taught, arr.ind=TRUE)) if (warnings) warning("Some skills are taught and required for the same learning object: ", is) result <- FALSE } if (any(rowSums(taught) == 0)) { if (warnings) warning("Not all learning objects teach something: ", names(which(rowSums(taught) == 0))) result <- FALSE } result }
/scratch/gouwar.j/cran-all/cranData/CDSS/R/cdss_sa_compliance.R
#' Check whether a surmise relation can be derived from a given skill assignment. #' #' \code{cdss_sa_describes_sr} expects a list of two matrices (\code{taught} and \code{required}) of a skill #' assignment. It returns TRUE if the skill assignment describes a surmise relation (i.e. there is #' only one teaching LO per skill) and FALSE. #' #' @param sa Skill assignment object #' @param verbose Flag, default is FALSE #' #' @return Logical value #' #' @family functions deriving skill structures from skill assignments #' #' @export cdss_sa_describes_sr <- function(sa, verbose = FALSE) { if (!(inherits(sa, "cdss_sa"))) { stop(sprintf("%s must be of class %s.", dQuote("sa"), dQuote("cdss_sa"))) } rv <- max(colSums(sa$taught)) == 1 if ((!rv) & verbose) { c <- colnames(sa$taught)[which(colSums(sa$taught) > 1)] msg <- paste ("The following skills have more than one teaching LO:", c) message(msg) } rv }
/scratch/gouwar.j/cran-all/cranData/CDSS/R/cdss_sa_describes_sr.R
#' Complete a skill multi-assignment #' #' \code{cdss_sma2csma} expects a skill multi-assignment object and returns #' the corresponding complete skill multi-assignment. #' If this would involve cycles, the function stops by default - except if #' \code{allowcycles} is set to \code{TRUE}. In that case, the result may #' be ill-defined! #' #' @param sma Skill multi-assignment to be completed #' @param allowcycles Whether prerequisite cycles should be allowed (default = FALSE) #' #' @return Object of class \code{cdss_csma}. #' #' @export cdss_sma2csma <- function(sma, allowcycles = FALSE) { if (!(inherits(sma, "cdss_sma"))) { stop(sprintf("%s must be of class %s.", dQuote("sma"), dQuote("cdss_sma"))) } skills <- (dim(sma)[2] - 1) / 2 reqcols <- (skills+2):((2*skills)+1) cn <- colnames(sma) # We do only/max. one addition per outer loop chgd <- TRUE foundcycles <- FALSE clo <- NULL # chgd LO while (chgd && (!foundcycles)) { # loop for a transitive extension chgd <- FALSE clauses <- dim(sma)[1] # for each row in the data frame lapply((1:clauses), function(cl) { if ((!chgd) && (!foundcycles)) { if (allowcycles) { cl_h <- 1 * (as.numeric(sma[cl,2:(skills+1)]) | as.numeric(sma[cl,(skills+2):((2*skills)+1)])) } else { cl_h <- as.numeric(sma[cl,(skills+2):((2*skills)+1)]) } # for each required skill lapply(which(sma[cl,reqcols]==1), function(s) { if ((!chgd) && (!foundcycles)) { # possible extensions, i.e. rows for LOs teaching skill s poss_rows <- which(sma[,s+1] == 1) poss <- 1 * (sma[poss_rows,2:(skills+1)] | sma[poss_rows,(skills+2):((2*skills)+1)]) # do we have any possible extension already included in cl_h? if (any(apply(poss, 1, function(p) {all(1*(p & cl_h) == p)}))) { } else { chgd <<- TRUE tgt <- 1 * (sma[cl,2:(skills+1)]) req <- 1 * (sma[cl,(skills+2):((2*skills)+1)]) clo <<- unlist(sma[cl,1]) sma <<- sma[-cl,] apply(poss, 1, function(p) { if ((allowcycles) || (all((tgt & p) == 0))) { v <- c(clo, tgt, 1*(req|p)) names(v) <- colnames(sma) sma <<- data.frame(rbind(sma, t(v))) sma[,2:(2*skills+1)] <<- sapply(sma[,2:(2*skills+1)], as.numeric) rownames(sma) <<- 1:dim(sma)[1] } else foundcycles <<- TRUE }) if ((!allowcycles) && foundcycles) { stop(sprintf("Cycle(s) around LO %s! Result is undefined", clo)) } } } }) } }) if ((!allowcycles) && foundcycles) return() # Remove comparable rows colnames(sma) <- cn rownames(sma) <- 1:(dim(sma)[1]) if (chgd) { ch2 <- TRUE while (ch2) { rownames(sma) <- 1:(dim(sma)[1]) ch2 <- FALSE sel <- which(sma[,1] == clo) lapply(sel, function(x) { if (!ch2) { lapply(sel, function(y) { if (!ch2) { if (x != y) { if (all((sma[x,reqcols] & sma[y,reqcols]) == sma[x,reqcols])) { sma <<- sma[-y,] ch2 <<- TRUE } else { } } } }) } }) colnames(sma) <- cn rownames(sma) <- 1:(dim(sma)[1]) } } } # Final touches to the result colnames(sma) <- cn rownames(sma) <- 1:(dim(sma)[1]) class(sma) <- unique(c("cdss_csma", "cdss_sma", class(sma))) sma }
/scratch/gouwar.j/cran-all/cranData/CDSS/R/cdss_sma2csma.R
#' Build matrices of taught and required, respectively, skills for learning objects from #' respective tables. #' #' \code{cdss_tables2sa} expects two data frames with two columns each. The first #' column contains the IDs of learning objects and the second row the IDs of single skills #' required or taught, respectively, by this learning object. #' It returns a list of two binary matrices, "taught" and "required". Each matrix has one #' row per learning object and one column per skill. The cells contain a "1" if the skill #' is taught or required, respectively, by the learning object and a "0" otherwise. #' #' @param taught Data table containing the assignment of taught skills to learning objects #' @param required Data table containing the assignment of required skills to learning objects #' #' @return List of two binary matrices, "taught" and "required". #' #' @family functions building skill (multi) assignment matrices #' #' @export cdss_tables2sa <- function(taught, required) { # Doing some awful hack here: blowing up the skill and LO names to 128 chars width # and at the end trimming them back - but I did not get it to work otherwise skills <- trimws(format(unique(c(taught[,2],required[,2])), width=128, justify="right"), "both") los <- trimws(format(unique(c(taught[,1],required[,1])), width=128, justify="right"), "both") req <- matrix(data = rep(0, length(skills)*length(los)), nrow = length(los)) tgt <- matrix(data = rep(0, length(skills)*length(los)), nrow = length(los)) colnames(req) <- skills rownames(req) <- los colnames(tgt) <- skills rownames(tgt) <- los apply(required, MARGIN =1, FUN=function(x){req[trimws(format(x[1], width=35,justify="right"), "both"), trimws(format(x[2], width=35,justify="right"), "both")] <<- 1}) apply(taught, MARGIN =1, FUN=function(x){tgt[trimws(format(x[1], width=35,justify="right"), "both"), trimws(format(x[2], width=35,justify="right"), "both")] <<- 1}) l <- list(taught=tgt, required=req) class(l) <- unique(c("cdss_sa", class(l))) l }
/scratch/gouwar.j/cran-all/cranData/CDSS/R/cdss_tables2sa.R
#' Close an attribution relation to get a surmise relation. #' #' \code{close_ar} expects a quadratic binary matrix and closes it under #' reflexivity and transitivity. #' #' @param ar Attribution relation matrix #' #' @return surmise relation or NULL #' #' @family Utility functions #' #' @export close_ar <- function(ar) { if (!(inherits(ar, "matrix"))) { stop(sprintf("%s must be of class %s.", dQuote("ar"), dQuote("matrix"))) } if ((max(ar) > 1) || (min(ar) < 0)) { stop(sprintf("%s is not a binary matrix!", dQuote("ar"))) } size <- dim(ar)[1] if (size != dim(ar)[2]) { stop(sprintf("%s is not a uqadratic matrix!", dQuote("ar"))) } # Close ar under reflexivity d <- diag(1,size,size) ar <- 1 * (ar | d) # Close ar under transitivity sr <- binary_matrix_product(ar, ar) sr.old <- ar while(any(sr != sr.old)) { sr.old <- sr sr <- binary_matrix_product(sr, sr) } colnames(sr) <- colnames(ar) rownames(sr) <- rownames(ar) class(sr) <- unique(c("surmise_relation", "attribution_relation", class(sr))) sr }
/scratch/gouwar.j/cran-all/cranData/CDSS/R/close_ar.R
#' Read an assignment of taught and required skills for a set of learning objects from CSV-files. #' #' \code{read_skill_assignment} expects two CSV-files with two columns each. The first #' column contains the IDs of learning objects and the second row the IDs of single skills #' required or taught, respectively, by this learning object. #' It returns a list of two binary matrices, "taught" and "required". Each matrix has one #' row per learning object and one column per skill. The cells contain a "1" if the skill #' is taught or required, respectively, by the learning object and a "0" otherwise, #' #' @param taught CSV-file with assignments of taught competencies to learning objects #' @param required CSV-file with assignments of required competencies to learning objects #' @param header Boolean specifying whether the CSV-files contain a header line (default = TRUE) #' @param sep Column separator (default ",") #' @param dec Decimal point character (default ".") #' @param warnonly Are non-compliant SAs allowed? (default = FALSE) #' @param verbose Verbosity of compliance test (default = TRUE) #' #' @return List of two binary matrices, "taught" and "required". #' #' @importFrom utils read.csv #' #' @family functions reading skill assignments #' #' @export read_skill_assignment_csv <- function( taught, required, header = TRUE, sep = ",", dec = ".", warnonly = FALSE, verbose = TRUE ) { t <- read.csv(taught, header=header, sep=sep, dec=dec, fill=FALSE) r <- read.csv(required, header=header, sep=sep, dec=dec, fill=FALSE) sa <- cdss_tables2sa(t, r) check <- cdss_sa_compliance(sa, verbose) if (!check) { if (warnonly) { stop("The assignment tables are not skill assignment compliant!") } else { warning("The assignment tables are not skill assignment compliant!") } } sa }
/scratch/gouwar.j/cran-all/cranData/CDSS/R/read_skll_assignments_csv.R
library(readODS) #' Read an assignment of taught and required skills for a set of learning objects from an ODS-file. #' #' \code{read_skill_assignment_ods} expects an ODS-file with two sheets assigning taught and #' required, respectively, skills to learning objects with two columns each. The first #' column contains the IDs of learning objects and the second row the IDs of single skills #' required or taught, respectively, by this learning object. #' It returns a list of two binary matrices, "taught" and "required". Each matrix has one #' row per learning object and one column per skill. The cells contain a "1" if the skill #' is taught or required, respectively, by the learning object and a "0" otherwise, #' #' @param filename Name of the ODS-file #' @param taughtname Name of the sheet with required assignment (default = "Taught") #' @param requiredname Name of the sheet with required assignment (default = "Required") #' @param warnonly Are non-compliant SAs allowed? (default = FALSE) #' @param verbose Verbosity of compliance test (default = TRUE) #' #' @return List of two binary matrices, "taught" and "required". #' #' @importFrom readODS read_ods #' #' @family functions reading skill assignments #' #' @export read_skill_assignment_ods <- function( filename, taughtname = "Taught", requiredname = "Required", warnonly = FALSE, verbose = TRUE ) { t <- as.data.frame(read_ods(path=filename, sheet=taughtname)) r <- as.data.frame(read_ods(path=filename, sheet=requiredname)) sa <- cdss_tables2sa(t, r) check <- cdss_sa_compliance(sa, verbose) if (!check) { if (warnonly) { warning("The assignment tables are not skill assignment compliant!") } else { stop("The assignment tables are not skill assignment compliant!") } } sa }
/scratch/gouwar.j/cran-all/cranData/CDSS/R/read_skll_assignments_ods.R
library(openxlsx) #' Read an assignment of taught and required skills for a set of learning objects from an XLSX-file. #' #' \code{read_skill_assignment_xlsx} expects an XLSX-file with two sheets assigning taught and #' required, respectively, skills to learning objects with two columns each. The first #' column contains the IDs of learning objects and the second row the IDs of single skills #' required or taught, respectively, by this learning object. #' It returns a list of two binary matrices, "taught" and "required". Each matrix has one #' row per learning object and one column per skill. The cells contain a "1" if the skill #' is taught or required, respectively, by the learning object and a "0" otherwise, #' #' @param filename Name of the XLSX-file #' @param taughtname Name of the sheet with required assignment (default = "Taught") #' @param requiredname Name of the sheet with required assignment (default = "Required") #' @param warnonly Are non-compliant SAs allowed? (default = FALSE) #' @param verbose Verbosity of compliance test (default = TRUE) #' #' @return List of two binary matrices, "taught" and "required". #' #' @importFrom openxlsx read.xlsx #' #' @family functions reading skill assignments #' #' @export read_skill_assignment_xlsx <- function( filename, taughtname = "Taught", requiredname = "Required", warnonly = FALSE, verbose = TRUE ) { t <- read.xlsx(xlsxFile = filename, sheet = taughtname) r <- read.xlsx(xlsxFile = filename, sheet = requiredname) sa <- cdss_tables2sa(t, r) check <- cdss_sa_compliance(sa, verbose) if (!check) { if (warnonly) { stop("The assignment tables are not skill assignment compliant!") } else { warning("The assignment tables are not skill assignment compliant!") } } sa }
/scratch/gouwar.j/cran-all/cranData/CDSS/R/read_skll_assignments_xlsx.R
#' Reduce a surmise function with respect to item equivalence #' #' \code{reduce_sf} takes a surmise function and returns its #' reduction to non-equivalent items. #' #' @param sf Surmise function #' @return Surmise function reduced by equivalences #' #' @family Utility functions #' #' @export reduce_sf <- function(sf) { if (!inherits(sf, "cdss_sf")) { stop(sprintf("%s must be of class %s.", dQuote("sf"), dQuote("cdss_sf"))) } sf <- t(sf) sf <- unique(sf) sf <- data.frame(t(unique(sf, MARGIN = 2))) rem <- colnames(sf)[-1] rownames(sf) <- sf[,1] sf <- sf[rem,] skills <- dim(sf)[2]-1 sf <- data.frame(apply(sf[, 2:(skills+1)], 2, as.numeric)) sf <- cbind(data.frame(rem), sf) colnames(sf)[1] <- "Skill" sf }
/scratch/gouwar.j/cran-all/cranData/CDSS/R/reduce_sf.R
#' Random dataset from a given vine copula model #' @description A random dataset simulated from a given 5-dimensional vine copula model. #' #' #' @format #' \describe{ #' \item{\code{$data}}{An \code{1000 x 5} data set (format \code{data.frame}) with the uniform #' variables \code{(U1,U2,U3,U4,U5)}.} #' \item{\code{$vine}}{\code{\link[VineCopula]{RVineMatrix}} object defyining the vine copula model from where \code{$data} was sampled.} #' } #' #' #' #' @examples #' #' # Load data #' data(dataset) #' #' # Extract data #' data <- dataset$data #' plot(data) #' #' # Extract the RVineMatrix object from where the dataset was randomly sampled #' vine <- dataset$vine #' vine$Matrix #' vine$family #' vine$par #' vine$par2 #' summary(vine) #' #' #' #' @author Emanuele Bevacqua "dataset" #' Simulation from a conditional C- or D-vine #' #' @description Simulates from a d-dimensional conditional C- or D-vine of the variables (\strong{Y},\strong{X}), #' given the fixed conditioning variables \strong{X}. The algorithm works #' for vines satysfying the requirements discussed in \emph{Bevacqua et al. (2017)}. The algorthm implemented here #' is a modified version of those form \emph{Aas et al. (2009)} and is shown in \emph{Bevacqua et al. (2017)}. #' #' #' @param RVM An \code{\link[VineCopula]{RVineMatrix}} object containing the information of the d-dimensional C- or D-Vine #' model (for further details about \code{\link[VineCopula]{RVineMatrix}} objects see the documentation file of the \code{VineCopula} package). #' If the full copula is 2-dimensional, RVM can be an \code{\link[VineCopula]{RVineMatrix}} object or a data.frame (or list) object #' where \code{$family}, \code{$par} and \code{$par2} are specified. #' @param Condition A \code{N x Nx} matrix of the Nx conditioning variables. #' For D-vine: data corresponding to the conditioning variable whose index is in \code{RVM$Matrix[i,i]}, are in i-th column of \code{Condition}. #' For C-vine: data corresponding to the conditioning variable whose index is in \code{RVM$Matrix[i,i]}, are in [(d+1)-i]-th column #' of \code{Condition}. See examples. #' @param N Number of data to be simulated. By default N is taken from \code{Condition}, which is a \code{N x Nx} matrix. #' It is necessary to specify \code{N} only when \code{Condition} is not given. #' #' @return A \code{N x d} matrix of the simulated variables from the given C- or D-vine copula model. In the first columns there are #' the simulated conditioned variables, and in the last columns the conditioning variables \code{Condition}. #' For more details about the exact order of the variables in the columns see the examples. The #' function is built to work easily in combination with \code{\link{CDVineCondFit}}. #' #' #' #' @examples #' #' # Example 1: conditional sampling from a C-Vine #' #' # Read data #' data(dataset) #' data <- dataset$data[1:400,1:4] #' #' # Define the variables Y and X. X are the conditioning variables, #' # which have to be positioned in the last columns of the data.frame #' colnames(data) <- c("Y1","Y2","X3","X4") #' #' \dontrun{ #' # Select a vine and fit the copula families, specifying that there are 2 conditioning variables #' RVM <- CDVineCondFit(data,Nx=2,type="CVine") #' #' # Set the values of the conditioning variables as those used for the calibration. #' # Order them with respect to RVM$Matrix, considering that is a C-Vine #' d=dim(RVM$Matrix)[1] #' cond1 <- data[,RVM$Matrix[(d+1)-1,(d+1)-1]] #' cond2 <- data[,RVM$Matrix[(d+1)-2,(d+1)-2]] #' condition <- cbind(cond1,cond2) #' #' # Simulate the variables #' Sim <- CDVineCondSim(RVM,condition) #' #' # Plot the simulated variables over the observed #' Sim <- data.frame(Sim) #' overplot(Sim,data) #' #' #' #' # Example 2: conditional sampling from a D-Vine #' #' # Read data #' data(dataset) #' data <- dataset$data[1:100,1:4] #' #' # Define the variables Y and X. X are the conditioning variables, #' # which have to be positioned in the last columns of the data.frame #' colnames(data) <- c("Y1","Y2","X3","X4") #' #' # Select a vine and fit the copula families, specifying that there are 2 conditioning variables #' RVM <- CDVineCondFit(data,Nx=2,type="DVine") #' summary(RVM) #It is a D-Vine. #' #' # Set the values of the conditioning variables as those used for the calibration. #' # Order them with respect to RVM$Matrix, considering that is a D-Vine. #' cond1 <- data[,RVM$Matrix[1,1]] #' cond2 <- data[,RVM$Matrix[2,2]] #' condition <- cbind(cond1,cond2) #' #' # Simulate the variables #' Sim <- CDVineCondSim(RVM,condition) #' #' # Plot the simulated variables over the observed #' Sim <- data.frame(Sim) #' overplot(Sim,data) #' #' #' #' # Example 3 #' #' # Read data #' data(dataset) #' data <- dataset$data[1:100,1:2] #' colnames(data) <- c("Y1","X2") #' #' # Fit copula #' require(VineCopula) #' BiCop <- BiCopSelect(data$Y1,data$X2) #' BiCop #' #' # Fix conditioning variable to low values and simulate #' condition <- data$X2/10 #' Sim <- CDVineCondSim(BiCop,condition) #' #' # Plot the simulated variables over the observed #' Sim <- data.frame(Sim) #' overplot(Sim,data) #' } #' #' @author Emanuele Bevacqua #' #' @references Bevacqua, E., Maraun, D., Hobaek Haff, I., Widmann, M., and Vrac, M.: Multivariate statistical modelling of compound events via pair-copula constructions: analysis of floods in Ravenna (Italy), #' Hydrol. Earth Syst. Sci., 21, 2701-2723, https://doi.org/10.5194/hess-21-2701-2017, 2017. #' \href{https://www.researchgate.net/publication/317414374_Multivariate_statistical_modelling_of_compound_events_via_pair-copula_constructions_Analysis_of_floods_in_Ravenna_Italy}{[link]} #' \href{https://www.hydrol-earth-syst-sci.net/21/2701/2017/hess-21-2701-2017.html}{[link]} #' #' Aas, K., Czado, C., Frigessi, A. and Bakken, H.: Pair-copula constructions of multiple dependence, Insurance: #' Mathematics and Economics, 44(2), 182-198, <doi:10.1016/j.insmatheco.2007.02.001>, 2009. #' \href{http://www.sciencedirect.com/science/article/pii/S0167668707000194}{[link]} #' #' Ulf Schepsmeier, Jakob Stoeber, Eike Christian Brechmann, Benedikt Graeler, Thomas #' Nagler and Tobias Erhardt (2017). VineCopula: Statistical Inference of Vine Copulas. R #' package version 2.1.1. \href{https://CRAN.R-project.org/package=VineCopula}{[link]} #' #' @seealso \code{\link{CDVineCondFit}} #' @import VineCopula #' @export CDVineCondSim CDVineCondSim <- function(RVM,Condition,N) { d <- dim(as.matrix(RVM$family))[1] if(d==1) { return(DVineCondSim(RVM,Condition,N)) } if(is.matrix(Condition)) { if(dim(Condition)[2]==d) { print("Please, provide a number of conditioning variables smaller than d") } } if(RVM$Matrix[d,1]!=RVM$Matrix[d,2]) { return(DVineCondSim(RVM,Condition,N)) } else if(RVM$Matrix[d,1]==RVM$Matrix[d,2]) { return(CVineCondSim(RVM,Condition,N)) } } #' Ranking of C- and D- vines allowing for conditional simulation #' #' @description Provides a ranking of the C- and D- vines which allow for conditional #' sampling, under the condition discussed in the descriprion of \code{\link{CDVineCondFit}}. #' #' @param data An \code{N x d} data matrix (with uniform margins). #' Data of the conditioning variable(s) have to occupy the last column(s) of this matrix. #' #' @param Nx Number of conditioning variables. #' #' @param treecrit Character indicating the criteria used to select the vine. All possible vines are fitted trough #' the function \code{\link[VineCopula]{RVineCopSelect}} of the package \code{VineCopula}. Then the vines are ranked with respect #' the Akaike information criterion (\code{treecrit = "AIC"}, default) or Bayesian information criterion (\code{treecrit = "BIC"}). #' This need the estimation and model selection for all the pairs of all the possible vines, therefore could #' require long time in case of large datasets, i.e. large \code{N x d}. #' #' @param selectioncrit Character indicating the criterion for pair-copula selection. #' Possible choices are \code{selectioncrit = "AIC"} (default) and \code{"BIC"}. #' #' @param familyset "Integer vector of pair-copula families to select from. The vector has to include at least one #' pair-copula family that allows for positive and one that allows for negative #' dependence. Not listed copula families might be included to better handle #' limit cases. If \code{familyset = NA} (default), selection among all #' possible families is performed. If a vector of negative numbers is provided, #' selection among all but \code{abs(familyset)} is performed. Coding of bivariate copula families: \cr #' \code{0} = independence copula \cr #' \code{1} = Gaussian copula \cr #' \code{2} = Student t copula (t-copula) \cr #' \code{3} = Clayton copula \cr #' \code{4} = Gumbel copula \cr #' \code{5} = Frank copula \cr #' \code{6} = Joe copula \cr #' \code{7} = BB1 copula \cr #' \code{8} = BB6 copula \cr #' \code{9} = BB7 copula \cr #' \code{10} = BB8 copula \cr #' \code{13} = rotated Clayton copula (180 degrees; ``survival Clayton'') \cr #' \code{14} = rotated Gumbel copula (180 degrees; ``survival Gumbel'') \cr #' \code{16} = rotated Joe copula (180 degrees; ``survival Joe'') \cr #' \code{17} = rotated BB1 copula (180 degrees; ``survival BB1'')\cr #' \code{18} = rotated BB6 copula (180 degrees; ``survival BB6'')\cr #' \code{19} = rotated BB7 copula (180 degrees; ``survival BB7'')\cr #' \code{20} = rotated BB8 copula (180 degrees; ``survival BB8'')\cr #' \code{23} = rotated Clayton copula (90 degrees) \cr #' \code{24} = rotated Gumbel copula (90 degrees) \cr #' \code{26} = rotated Joe copula (90 degrees) \cr #' \code{27} = rotated BB1 copula (90 degrees) \cr #' \code{28} = rotated BB6 copula (90 degrees) \cr #' \code{29} = rotated BB7 copula (90 degrees) \cr #' \code{30} = rotated BB8 copula (90 degrees) \cr #' \code{33} = rotated Clayton copula (270 degrees) \cr #' \code{34} = rotated Gumbel copula (270 degrees) \cr #' \code{36} = rotated Joe copula (270 degrees) \cr #' \code{37} = rotated BB1 copula (270 degrees) \cr #' \code{38} = rotated BB6 copula (270 degrees) \cr #' \code{39} = rotated BB7 copula (270 degrees) \cr #' \code{40} = rotated BB8 copula (270 degrees) \cr #' \code{104} = Tawn type 1 copula \cr #' \code{114} = rotated Tawn type 1 copula (180 degrees) \cr #' \code{124} = rotated Tawn type 1 copula (90 degrees) \cr #' \code{134} = rotated Tawn type 1 copula (270 degrees) \cr #' \code{204} = Tawn type 2 copula \cr #' \code{214} = rotated Tawn type 2 copula (180 degrees) \cr #' \code{224} = rotated Tawn type 2 copula (90 degrees) \cr #' \code{234} = rotated Tawn type 2 copula (270 degrees)" (VineCopula Documentation, version 2.1.1, pp. 73-74) #' #' #' #' @param type Type of vine to be fitted: \cr #' C-Vine: "CVine" or 1; \cr #' D-Vine: "DVine" or 2; \cr #' Both C and D-Vine: "CVine-DVine" or "1-2" (default). #' #' @param indeptest "Logical; whether a hypothesis test for the independence of #' \code{u1} and \code{u2} is performed before bivariate copula selection #' (default: \code{indeptest = FALSE}; see \code{BiCopIndTest}). The #' independence copula is chosen for a (conditional) pair if the null #' hypothesis of independence cannot be rejected. #' #' @param level numeric; significance level of the independence test (default: #' \code{level = 0.05}). #' #' @param se Logical; whether standard errors are estimated (default: \code{se #' = FALSE}). #' #' @param rotations logical; if \code{TRUE}, all rotations of the families in #' \code{familyset} are included. #' #' @param method indicates the estimation method: either maximum #' likelihood estimation (\code{method = "mle"}; default) or inversion of #' Kendall's tau (\code{method = "itau"}). For \code{method = "itau"} only #' one parameter families and the Student t copula can be used (\code{family = #' 1,2,3,4,5,6,13,14,16,23,24,26,33,34} or \code{36}). For the t-copula, #' \code{par2} is found by a crude profile likelihood optimization over the #' interval (2, 10]." (VineCopula Documentation, version 2.1.1, pp. 74-75) #' #' @return \describe{ #' \item{\code{table}}{A table with the ranking of the vines, with vine index \code{i}, #' values of the selected \code{treecrit} and vine \code{type} (1 for "CVine" and 2 for D-Vine).} #' #' \item{\code{vines}}{A list where the element \code{[[i]]} is an \code{\link[VineCopula]{RVineMatrix}} object corresponding to #' the \code{i}-th vine in the ranking shown in \code{table}. #' Each \code{\link[VineCopula]{RVineMatrix}} object containes the selected families (\code{$family}) as well as sequentially #' estimated parameters stored in \code{$par} and \code{$par2}. Details about \code{\link[VineCopula]{RVineMatrix}} objects are given in #' the documentation file of the \code{VineCopula} package). #' The fit of each model is performed via the function \code{\link[VineCopula]{RVineCopSelect}} of the package \code{VineCopula}. #' "The object is augmented by the following information about the fit: #' #' \describe{ #' \item{\code{se, se2}}{standard errors for the parameter estimates (if \code{se = TRUE}; note that these are only #' approximate since they do not account for the sequential nature of the estimation} #' #' \item{\code{nobs}}{number of observations} #' #' \item{\code{logLik, pair.logLik}}{log likelihood (overall and pairwise)} #' #' \item{\code{AIC, pair.AIC}}{Aikaike's Informaton Criterion (overall and pairwise)} #' #' \item{\code{BIC, pair.BIC}}{Bayesian's Informaton Criterion (overall and pairwise)} #' #' \item{\code{emptau}}{matrix of empirical values of Kendall's tau} #' #' \item{\code{p.value.indeptest}}{matrix of p-values of the independence test.}} #' }} #' #' @note For a comprehensive summary of the vine copula model, use #' \code{summary(object)}; to see all its contents, use \code{str(object)}." (VineCopula Documentation, version 2.1.1, pp. 103) #' #' @examples #' #' # Read data #' data(dataset) #' data <- dataset$data[1:100,1:5] #' #' # Define the variables Y and X. X are the conditioning variables, #' # which have to be positioned in the last columns of the data.frame #' colnames(data) <- c("Y1","Y2","X3","X4","X5") #' #' # Rank the possible D-Vines according to the AIC #' \dontrun{ #' Ranking <- CDVineCondRank(data,Nx=3,"AIC",type="DVine") #' Ranking$table #' # tree AIC type #' # 1 1 -292.8720 2 #' # 2 2 -290.2941 2 #' # 3 3 -288.5719 2 #' # 4 4 -288.2496 2 #' # 5 5 -287.8006 2 #' # 6 6 -285.8503 2 #' # 7 7 -282.2867 2 #' # 8 8 -278.9371 2 #' # 9 9 -275.8339 2 #' # 10 10 -272.9459 2 #' # 11 11 -271.1526 2 #' # 12 12 -270.5269 2 #' #' Ranking$vines[[1]]$AIC #' # [1] -292.8720 #' summary(Ranking$vines[[1]]) #' } #' #' @author Emanuele Bevacqua #' #' @references Bevacqua, E., Maraun, D., Hobaek Haff, I., Widmann, M., and Vrac, M.: Multivariate statistical modelling of compound events via pair-copula constructions: analysis of floods in Ravenna (Italy), #' Hydrol. Earth Syst. Sci., 21, 2701-2723, https://doi.org/10.5194/hess-21-2701-2017, 2017. #' \href{https://www.researchgate.net/publication/317414374_Multivariate_statistical_modelling_of_compound_events_via_pair-copula_constructions_Analysis_of_floods_in_Ravenna_Italy}{[link]} #' \href{https://www.hydrol-earth-syst-sci.net/21/2701/2017/hess-21-2701-2017.html}{[link]} #' #' Aas, K., Czado, C., Frigessi, A. and Bakken, H.: Pair-copula constructions of multiple dependence, Insurance: #' Mathematics and Economics, 44(2), 182-198, <doi:10.1016/j.insmatheco.2007.02.001>, 2009. #' \href{http://www.sciencedirect.com/science/article/pii/S0167668707000194}{[link]} #' #' Ulf Schepsmeier, Jakob Stoeber, Eike Christian Brechmann, Benedikt Graeler, Thomas #' Nagler and Tobias Erhardt (2017). VineCopula: Statistical Inference of Vine Copulas. R #' package version 2.1.1. \href{https://CRAN.R-project.org/package=VineCopula}{[link]} #' #' @seealso \code{\link{CDVineCondFit}} #' @import VineCopula combinat #' @export CDVineCondRank CDVineCondRank <- function(data,Nx,treecrit="AIC",selectioncrit="AIC",familyset = NA,type="CVine-DVine",indeptest = FALSE, level = 0.05, se = FALSE, rotations = TRUE, method = "mle"){ if(type==1) { type="CVine" } if(type==2) { type="DVine" } if(type=="1-2") { type="CVine-DVine" } if(type=="CVine-DVine" | type=="CVine") { RankingC <- RankCVineCond(data,Nx,treecrit,selectioncrit,familyset,indeptest = indeptest, level = level, se = se, rotations = rotations, method = method) } if(type=="CVine-DVine" | type=="DVine") { RankingD <- RankDVineCond(data,Nx,treecrit,selectioncrit,familyset,indeptest = indeptest, level = level, se = se, rotations = rotations, method = method) } if(type=="CVine-DVine") { RankingCD=list() RankingCD[[1]]=data.frame(tree=seq(1,(length(RankingD[[1]]$tree)+length(RankingC[[1]]$tree))), AICBIC=rep(-9999,(length(RankingD[[1]]$tree)+length(RankingC[[1]]$tree))), type=rep(-9999,(length(RankingD[[1]]$tree)+length(RankingC[[1]]$tree)))) RankingCD[[2]]=list() indexC=1 indexD=1 for(i in 1:(length(RankingD[[1]]$tree)+length(RankingC[[1]]$tree))) { if(indexC<=length(RankingC[[1]]$tree) & indexD<=length(RankingD[[1]]$tree)) { if(RankingC[[1]][indexC,2]<RankingD[[1]][indexD,2]) { RankingCD[[1]]$AICBIC[i]=RankingC[[1]][indexC,2] RankingCD[[1]]$type[i]=1 RankingCD[[2]][[i]]=RankingC[[2]][[indexC]] indexC=indexC+1 } else { RankingCD[[1]]$AICBIC[i]=RankingD[[1]][indexD,2] RankingCD[[1]]$type[i]=2 RankingCD[[2]][[i]]=RankingD[[2]][[indexD]] indexD=indexD+1 } } else if(indexC>length(RankingC[[1]]$tree)) { RankingCD[[1]]$AICBIC[i]=RankingD[[1]][indexD,2] RankingCD[[1]]$type[i]=2 RankingCD[[2]][[i]]=RankingD[[2]][[indexD]] indexD=indexD+1 } else if(indexD>length(RankingD[[1]]$tree)) { RankingCD[[1]]$AICBIC[i]=RankingC[[1]][indexC,2] RankingCD[[1]]$type[i]=1 RankingCD[[2]][[i]]=RankingC[[2]][[indexC]] indexC=indexC+1 } } if(treecrit=="AIC") { colnames(RankingCD[[1]])=c("tree","AIC","type") } else if(treecrit=="BIC") { colnames(RankingCD[[1]])=c("tree","BIC","type") } Rank=list() Rank$table=RankingCD[[1]] Rank$vines=RankingCD[[2]] return(Rank) } else if(type=="CVine") { RankingC[[1]]$type=1 Rank=list() Rank$table=RankingC[[1]] Rank$vines=RankingC[[2]] return(Rank) } else if(type=="DVine") { RankingD[[1]]$type=2 Rank=list() Rank$table=RankingD[[1]] Rank$vines=RankingD[[2]] return(Rank) } } #' List of the possible C- and D- vines allowing for conditional simulation #' #' @description Provides a list of the C- and D- vines which allow for conditional #' sampling, under the condition discussed in the descriprion of \code{\link{CDVineCondFit}}. #' #' @param data An \code{N x d} data matrix. #' Data of the conditioning variable(s) have to occupy the last column(s) of this matrix. #' #' @param Nx Number of conditioning variables. #' #' @param type Type of vine to be considered: \cr #' C-Vine: "CVine" or 1; \cr #' D-Vine: "DVine" or 2; \cr #' Both C and D-Vine: "CVine-DVine" or "1-2" (default). #' #' @return Listes of matrices describing C- (\code{$CVine}) and D- (\code{$DVine}) Vines. #' Each matrix corresponds to a vine, according to the same notation used for \code{\link[VineCopula]{RVineMatrix}} #' objects (for further details about \code{\link[VineCopula]{RVineMatrix}} objects see the documentation file of the \code{VineCopula} package). #' The index \code{i} in the matrix corresponds to the variable in the i-th column of \code{data}. #' #' @examples #' #' # Read data #' data(dataset) #' data <- dataset$data[1:100,1:5] #' #' # Define the variables Y and X. X are the conditioning variables, #' # which have to be positioned in the last columns of the data.frame #' colnames(data) <- c("Y1","Y2","X3","X4","X5") #' #' # List possible D-Vines: #' ListVines <- CDVineCondListMatrices(data,Nx=3,"DVine") #' ListVines$DVine #' #' # List possible C-Vines: #' ListVines <- CDVineCondListMatrices(data,Nx=3,"CVine") #' ListVines$CVine #' #' # List possible C- and D-Vines: #' ListVines <- CDVineCondListMatrices(data,Nx=3,"CVine-DVine") #' ListVines #' #' @author Emanuele Bevacqua #' #' @references Bevacqua, E., Maraun, D., Hobaek Haff, I., Widmann, M., and Vrac, M.: Multivariate statistical modelling of compound events via pair-copula constructions: analysis of floods in Ravenna (Italy), #' Hydrol. Earth Syst. Sci., 21, 2701-2723, https://doi.org/10.5194/hess-21-2701-2017, 2017. #' \href{https://www.researchgate.net/publication/317414374_Multivariate_statistical_modelling_of_compound_events_via_pair-copula_constructions_Analysis_of_floods_in_Ravenna_Italy}{[link]} #' \href{https://www.hydrol-earth-syst-sci.net/21/2701/2017/hess-21-2701-2017.html}{[link]} #' #' Aas, K., Czado, C., Frigessi, A. and Bakken, H.: Pair-copula constructions of multiple dependence, Insurance: #' Mathematics and Economics, 44(2), 182-198, <doi:10.1016/j.insmatheco.2007.02.001>, 2009. #' \href{http://www.sciencedirect.com/science/article/pii/S0167668707000194}{[link]} #' #' @seealso \code{\link{CDVineCondFit}} #' @import VineCopula combinat #' @export CDVineCondListMatrices CDVineCondListMatrices <- function(data,Nx,type="CVine-DVine"){ if(type==1) { type="CVine" } if(type==2) { type="DVine" } if(type=="1-2") { type="CVine-DVine" } if(type=="CVine-DVine" | type=="CVine") { MatricesC <- PossibleCVineMatrixCond(data,Nx) if(type=="CVine") { a=list() a$CVine=MatricesC return(a) } } if(type=="CVine-DVine" | type=="DVine") { MatricesD <- PossibleDVineMatrixCond(data,Nx) if(type=="DVine") { a=list() a$DVine=MatricesD return(a) } } if(type=="CVine-DVine") { a=list() a$CVine=MatricesC a$DVine=MatricesD return(a) } } #' Selection of a C- or D- vine copula model for conditional sampling #' #' @description This function fits either a C- or a D- vine model to a d-dimensional dataset of uniform variables. #' The fit of the pair-copula families is performed sequentially through the function \code{\link[VineCopula]{RVineCopSelect}} of #' the package \code{VineCopula}. The vine structure is selected among a group of C- and a D- vines which satisfy the requirement #' discussed in \emph{Bevacqua et al. (2017)}. This group is composed by all C- and D- vines from which the conditioning variables #' would be sampled as first when following the algorithms from \emph{Aas et al. (2009)}. Alternatively, if the #' vine matrix describing the vine structure is given to the function, the fit of the pair-copulas is directly performed skipping the vine structure #' selection procedure. #' #' @param data An \code{N x d} data matrix (with uniform margins). #' The data of the conditioning variable(s) have to occupy the last column(s) of this matrix. #' #' @param Nx Number of conditioning variables. #' #' @param treecrit Character indicating the criteria used to select the vine. All possible vines are fitted trough #' the function \code{\link[VineCopula]{RVineCopSelect}} of the package \code{VineCopula}. Then the vines are ranked with respect #' the Akaike information criterion (\code{treecrit = "AIC"}, default) or Bayesian information criterion (\code{treecrit = "BIC"}). #' This need the estimation and model selection for all the pairs of all the possible vines, therefore could #' require long time in case of large datasets, i.e. large \code{N x d}. #' #' @param type Type of vine to be fitted: \cr #' C-Vine: "CVine" or 1; \cr #' D-Vine: "DVine" or 2; \cr #' Both C and D-Vine: "CVine-DVine" or "1-2" (default). #' #' @param selectioncrit Character indicating the criterion for pair-copula selection. #' Possible choices are \code{"AIC"} (default) and \code{"BIC"}. #' #' @param familyset "Integer vector of pair-copula families to select from. The vector has to include at least one #' pair-copula family that allows for positive and one that allows for negative #' dependence. Not listed copula families might be included to better handle #' limit cases. If \code{familyset = NA} (default), selection among all #' possible families is performed. If a vector of negative numbers is provided, #' selection among all but \code{abs(familyset)} is performed. Coding of bivariate copula families: \cr #' \code{0} = independence copula \cr #' \code{1} = Gaussian copula \cr #' \code{2} = Student t copula (t-copula) \cr #' \code{3} = Clayton copula \cr #' \code{4} = Gumbel copula \cr #' \code{5} = Frank copula \cr #' \code{6} = Joe copula \cr #' \code{7} = BB1 copula \cr #' \code{8} = BB6 copula \cr #' \code{9} = BB7 copula \cr #' \code{10} = BB8 copula \cr #' \code{13} = rotated Clayton copula (180 degrees; ``survival Clayton'') \cr #' \code{14} = rotated Gumbel copula (180 degrees; ``survival Gumbel'') \cr #' \code{16} = rotated Joe copula (180 degrees; ``survival Joe'') \cr #' \code{17} = rotated BB1 copula (180 degrees; ``survival BB1'')\cr #' \code{18} = rotated BB6 copula (180 degrees; ``survival BB6'')\cr #' \code{19} = rotated BB7 copula (180 degrees; ``survival BB7'')\cr #' \code{20} = rotated BB8 copula (180 degrees; ``survival BB8'')\cr #' \code{23} = rotated Clayton copula (90 degrees) \cr #' \code{24} = rotated Gumbel copula (90 degrees) \cr #' \code{26} = rotated Joe copula (90 degrees) \cr #' \code{27} = rotated BB1 copula (90 degrees) \cr #' \code{28} = rotated BB6 copula (90 degrees) \cr #' \code{29} = rotated BB7 copula (90 degrees) \cr #' \code{30} = rotated BB8 copula (90 degrees) \cr #' \code{33} = rotated Clayton copula (270 degrees) \cr #' \code{34} = rotated Gumbel copula (270 degrees) \cr #' \code{36} = rotated Joe copula (270 degrees) \cr #' \code{37} = rotated BB1 copula (270 degrees) \cr #' \code{38} = rotated BB6 copula (270 degrees) \cr #' \code{39} = rotated BB7 copula (270 degrees) \cr #' \code{40} = rotated BB8 copula (270 degrees) \cr #' \code{104} = Tawn type 1 copula \cr #' \code{114} = rotated Tawn type 1 copula (180 degrees) \cr #' \code{124} = rotated Tawn type 1 copula (90 degrees) \cr #' \code{134} = rotated Tawn type 1 copula (270 degrees) \cr #' \code{204} = Tawn type 2 copula \cr #' \code{214} = rotated Tawn type 2 copula (180 degrees) \cr #' \code{224} = rotated Tawn type 2 copula (90 degrees) \cr #' \code{234} = rotated Tawn type 2 copula (270 degrees) #' #' #' @param indeptest Logical; whether a hypothesis test for the independence of #' \code{u1} and \code{u2} is performed before bivariate copula selection #' (default: \code{indeptest = FALSE}; see \code{BiCopIndTest}). The #' independence copula is chosen for a (conditional) pair if the null #' hypothesis of independence cannot be rejected. #' #' @param level numeric; significance level of the independence test (default: #' \code{level = 0.05}). #' #' @param se Logical; whether standard errors are estimated (default: \code{se #' = FALSE}). #' #' @param rotations logical; if \code{TRUE}, all rotations of the families in #' \code{familyset} are included. #' #' @param method indicates the estimation method: either maximum #' likelihood estimation (\code{method = "mle"}; default) or inversion of #' Kendall's tau (\code{method = "itau"}). For \code{method = "itau"} only #' one parameter families and the Student t copula can be used (\code{family = #' 1,2,3,4,5,6,13,14,16,23,24,26,33,34} or \code{36}). For the t-copula, #' \code{par2} is found by a crude profile likelihood optimization over the #' interval (2, 10]." (VineCopula Documentation, version 2.1.1, pp. 73-75) #' #' @param Matrix \code{d x d} matrix that defines the vine structure. #' If \code{Matrix} is not given, the routine finds the best vine structure according to \code{selectioncrit}. #' If \code{Matrix} is given, the fit is performed only if the structure respects the necessary conditions #' for the conditional sampling (if the conditions are not respected, an error message is returned). # #' @return An \code{\link[VineCopula]{RVineMatrix}} object describing the selected copula model #' (for further details about \code{\link[VineCopula]{RVineMatrix}} objects see the documentation file of the \code{VineCopula} package). #' The selected families are stored #' in \code{$family}, and the sequentially estimated parameters in \code{$par} and \code{$par2}. #' The fit of the model is performed via the function \code{\link[VineCopula]{RVineCopSelect}} of the package \code{VineCopula}. #' #' "The object \code{\link[VineCopula]{RVineMatrix}} includes the following information about the fit: #' \describe{ #' \item{\code{se, se2}}{standard errors for the parameter estimates (if \code{se = TRUE}; note that these are only #' approximate since they do not account for the sequential nature of the estimation,} #' #' \item{\code{nobs}}{number of observations,} #' #' \item{\code{logLik, pair.logLik}}{log likelihood (overall and pairwise)} #' #' \item{\code{AIC, pair.AIC}}{Aikaike's Informaton Criterion (overall and pairwise),} #' #' \item{\code{BIC, pair.BIC}}{Bayesian's Informaton Criterion (overall and pairwise),} #' #' \item{\code{emptau}}{matrix of empirical values of Kendall's tau,} #' #' \item{\code{p.value.indeptest}}{matrix of p-values of the independence test.} #' } #' #' @note For a comprehensive summary of the vine copula model, use #' \code{summary(object)}; to see all its contents, use \code{str(object)}". (VineCopula Documentation, version 2.1.1, pp. 103) #' #' @examples #' #' # Example 1 #' #' # Read data #' data(dataset) #' data <- dataset$data[1:100,1:5] #' #' # Define the variables Y and X. X are the conditioning variables, #' # which have to be positioned in the last columns of the data.frame #' colnames(data) <- c("Y1","Y2","X3","X4","X5") #' #' \dontrun{ #' # Select and fit a C- vine copula model, requiring that the #' RVM <- CDVineCondFit(data,Nx=3,treecrit="BIC",type="CVine",selectioncrit="AIC") #' summary(RVM) #' RVM$Matrix #' } #' #' #' #' # Example 2 #' #' # Read data #' data(dataset) #' data <- dataset$data[1:80,1:5] #' #' # Define the variables Y and X. X are the conditioning variables, #' # which have to be positioned in the last columns of the data.frame #' colnames(data) <- c("Y1","Y2","X3","X4","X5") #' #' # Define a VineMatrix which can be used for conditional sampling #' ListVines <- CDVineCondListMatrices(data,Nx=3) #' Matrix=ListVines$DVine[[1]] #' Matrix #' #' \dontrun{ #' # Fit copula families for the defined vine: #' RVM <- CDVineCondFit(data,Nx=3,Matrix=Matrix) #' summary(RVM) #' RVM$Matrix #' RVM$family #' #' # check #' identical(RVM$Matrix,Matrix) #' #' # Fit copula families for the defined vine, given a group of families to select from: #' RVM <- CDVineCondFit(data,Nx=3,Matrix=Matrix,familyset=c(1,2,3,14)) #' summary(RVM) #' RVM$Matrix #' RVM$family #' #' # Try to fit copula families for a vine which is not among those #' # that allow for conditional sampling: #' Matrix #' Matrix[which(Matrix==4)]=40 #' Matrix[which(Matrix==2)]=20 #' Matrix[which(Matrix==40)]=2 #' Matrix[which(Matrix==20)]=4 #' Matrix #' RVM <- CDVineCondFit(data,Nx=3,Matrix=Matrix) #' RVM #' } #' #' @author Emanuele Bevacqua #' #' @references Bevacqua, E., Maraun, D., Hobaek Haff, I., Widmann, M., and Vrac, M.: Multivariate statistical modelling of compound events via pair-copula constructions: analysis of floods in Ravenna (Italy), #' Hydrol. Earth Syst. Sci., 21, 2701-2723, https://doi.org/10.5194/hess-21-2701-2017, 2017. #' \href{https://www.researchgate.net/publication/317414374_Multivariate_statistical_modelling_of_compound_events_via_pair-copula_constructions_Analysis_of_floods_in_Ravenna_Italy}{[link]} #' \href{https://www.hydrol-earth-syst-sci.net/21/2701/2017/hess-21-2701-2017.html}{[link]} #' #' Aas, K., Czado, C., Frigessi, A. and Bakken, H.: Pair-copula constructions of multiple dependence, Insurance: #' Mathematics and Economics, 44(2), 182-198, <doi:10.1016/j.insmatheco.2007.02.001>, 2009. #' \href{http://www.sciencedirect.com/science/article/pii/S0167668707000194}{[link]} #' #' Ulf Schepsmeier, Jakob Stoeber, Eike Christian Brechmann, Benedikt Graeler, Thomas #' Nagler and Tobias Erhardt (2017). VineCopula: Statistical Inference of Vine Copulas. R #' package version 2.1.1. \href{https://CRAN.R-project.org/package=VineCopula}{[link]} #' #' @seealso \code{\link{CDVineCondSim}}, \code{\link{CDVineCondRank}} #' @export CDVineCondFit CDVineCondFit <- function(data,Nx,treecrit="AIC",type="CVine-DVine",selectioncrit="AIC",familyset=NA,indeptest = FALSE, level = 0.05, se = FALSE, rotations = TRUE, method = "mle",Matrix=FALSE) { if(!is.numeric(familyset)[1]) { familyset=c(1,2,3,4,5,6,7,8,9,10,13,14,16,17,18,19,20,23,24,26,27,28,29,30,33,34,36,37,38,39, 40,104,114,124,134, 204, 214, 224, 234 ) } if(!is.null(dim(Matrix))) { #quality check ListVines <- CDVineCondListMatrices(data,Nx,"CVine-DVine") quality=FALSE for(i in 1:2) { for(j in 1:length(ListVines[[i]])) { if(identical(ListVines[[i]][[j]],Matrix)) { quality=TRUE break } } } if(quality==TRUE) { vineFIT <- RVineCopSelect(data,familyset,Matrix,selectioncrit,indeptest = indeptest, level = level, se = se, rotations = rotations, method = method) return(vineFIT) } else { print("The given Matrix cannot be used for the conditional simulation. To fit a generic Vine, please use the function RvineCopSelect from the package VineCopula") return(NULL) } } if(type==1) { type="CVine" } if(type==2) { type="DVine" } if(type=="1-2") { type="CVine-DVine" } if(type=="CVine-DVine" | type=="CVine") { MatrixC <- FirstCVineCond(data,Nx=Nx,treecrit=treecrit,selectioncrit=selectioncrit,familyset=familyset,indeptest = indeptest, level = level, se = se, rotations = rotations, method = method) } if(type=="CVine-DVine" | type=="DVine") { MatrixD <- FirstDVineCond(data,Nx=Nx,treecrit=treecrit,selectioncrit=selectioncrit,familyset=familyset,indeptest = indeptest, level = level, se = se, rotations = rotations, method = method) } if(type=="CVine-DVine") { if(MatrixC[[2]]<MatrixD[[2]]) { vineFIT <- RVineCopSelect(data,familyset,MatrixC[[1]],selectioncrit,indeptest = indeptest, level = level, se = se, rotations = rotations, method = method) } else { vineFIT <- RVineCopSelect(data,familyset,MatrixD[[1]],selectioncrit,indeptest = indeptest, level = level, se = se, rotations = rotations, method = method) } return(vineFIT) } else if(type=="CVine") { vineFIT <- RVineCopSelect(data,familyset,MatrixC[[1]],selectioncrit,indeptest = indeptest, level = level, se = se, rotations = rotations, method = method) return(vineFIT) } else if(type=="DVine") { vineFIT <- RVineCopSelect(data,familyset,MatrixD[[1]],selectioncrit,indeptest = indeptest, level = level, se = se, rotations = rotations, method = method) return(vineFIT) } } #' overplot #' @description This function overlays the scatterplot matrices of two multivariate datsets. #' Moreover, it shows the dependencies among all the pairs for both datsets. #' #' @param data1,data2 Two \code{N x d} matrices of data to be plotted. #' @param col1,col2 Colors used for \code{data1} and \code{data2} during the plot. Default is \code{col1="black"} and \code{col2="grey"}. #' @param pch1,pch2 Paramter to specify the symbols to use when plotting points of \code{data1} and \code{data2}.Default is \code{pch1=1} and \code{pch2=1}. #' @param xlim,ylim Two bidimensional vectors indicating the limits of x and y axes for all the scatterplots. #' If not given, they are authomatically #' computed for each of the scatterplots. #' @param labels A character vector with the variable names to be printed (if not given, the names of \code{data1} #' variables are printed). #' @param method Character indicating the dependence types to be computed between the pairs. Possibilites: "kendall", #' "spearman" and "pearson" (default) #' @param cex.cor Number: character dimension of the printed dependencies. Default \code{cex.cor=1}. #' @param cex.labels Number: character dimension of the printed variable names. Default \code{cex.labels=1}. #' @param cor.signif Number: number of significant numbers of the printed dependencies. Default \code{cor.signif=2}. #' @param cex.axis Number: dimension of the axis numeric values. Default cex.axis=1. #' #' @return A matrix of overlaying scatterplots of the multivariate datsets \code{data1} and \code{data2}, with #' the dependencies of the pairs. #' #' @examples #' #' # Example 1 #' #' # Read and prepare the data for the plot #' data(dataset) #' data1 <- dataset$data[1:300,] #' data2 <- dataset$data[301:600,] #' overplot(data1,data2,xlim=c(0,1),ylim=c(0,1),method="kendall") #' #' #' #' \dontrun{ #' # Example 2 #' #' # Read and prepare the data for the plot #' data(dataset) #' data <- dataset$data[1:200,1:5] #' colnames(data) <- c("Y1","Y2","X3","X4","X5") #' #' # Fit copula families for the defined vine: #' ListVines <- CDVineCondListMatrices(data,Nx=3) #' Matrix=ListVines$CVine[[1]] #' RVM <- CDVineCondFit(data,Nx=3,Matrix=Matrix) #' #' # Simulate data: #' d=dim(RVM$Matrix)[1] #' cond1 <- data[,RVM$Matrix[(d+1)-1,(d+1)-1]] #' cond2 <- data[,RVM$Matrix[(d+1)-2,(d+1)-2]] #' cond3 <- data[,RVM$Matrix[(d+1)-3,(d+1)-3]] #' condition <- cbind(cond1,cond2,cond3) #' Sim <- CDVineCondSim(RVM,condition) #' #' # Plot the simulated variables Sim over the observed #' Sim <- data.frame(Sim) #' overplot(data[,1:2],Sim[,1:2],xlim=c(0,1),ylim=c(0,1),method="spearman") #' overplot(data,Sim,xlim=c(0,1),ylim=c(0,1),method="spearman") #' } #' #' @author Emanuele Bevacqua #' @importFrom stats cor #' @importFrom graphics axis box par plot points text #' @export overplot overplot <- function(data1,data2,col1="black",col2="grey",xlim=NA,ylim=NA,labels=NA,method="pearson",cex.cor=1,cex.labels=1, cor.signif=2,cex.axis=1,pch1=1,pch2=1){ par(mfrow=c((length(data1[1,])-1+1),(length(data1[1,]))-1+1), oma = c(0.1, 0.1, 0.2, 0.2), oma = c(0,0,0,0) + 2.1, mar = c(0,0,1,1) + 0.1) if(!is.character(labels[1])) { labels <- colnames(data1) } for(i in 1:(length(data1[1,])-1)) { plot(0,0,col="white",xaxt='n',yaxt='n') text(0,0,paste(labels[i]),cex=cex.labels) for(j in (i+1):(length(data1[1,]))) { if(!is.numeric(xlim)[1]) { vectJ=c(data1[,j],data2[,j]) xlim=c(min(vectJ),max(vectJ)) } if(!is.numeric(ylim)[1]) { vectI=c(data1[,i],data2[,i]) ylim=c(min(vectI),max(vectI)) } plot(data1[,j],data1[,i], col=col1, xlim=xlim,ylim=ylim, axes=FALSE, xlab="",ylab="",pch=pch1) points(data2[,j],data2[,i],col=col2,pch=pch2) if(i==1 & j !=(length(data1[1,]))) { axis(3,cex.axis=cex.axis) } else if(i==1 & j==(length(data1[1,]))) { axis(3,cex.axis=cex.axis) axis(4,cex.axis=cex.axis) } else if(i!=1 & j ==(length(data1[1,]))) { axis(4,cex.axis=cex.axis) } box() } if(i !=(length(data1[1,])-1)) { for(k in 1:i) { plot(0,0,col="white",xaxt='n',yaxt='n') #plot(1, type="n", axes=F, xlab="", ylab="") text(0,0.2,signif(cor(data1[,k],data1[,i+1],method=method),cor.signif),col=col1,cex=cex.cor) text(0,-0.2,signif(cor(data2[,k],data2[,i+1],method=method),cor.signif),col=col2,cex=cex.cor) } } } for(k in 1:(length(data1[1,])-1)) { plot(0,0,col="white",xaxt='n',yaxt='n') #plot(1, type="n", axes=F, xlab="", ylab="") text(0,0.2,signif(cor(data1[,k],data1[,length(data1[1,])],method=method),cor.signif),col=col1,cex=cex.cor) text(0,-0.2,signif(cor(data2[,k],data2[,length(data2[1,])],method=method),cor.signif),col=col2,cex=cex.cor) } plot(0,0,col="white",xaxt='n',yaxt='n') text(0,0,paste(labels[i+1]),cex=cex.labels) par(mfrow=c(1,1)) } CVineCondSim <- function(RVM,Condition,N) { RVM$family <- as.matrix(RVM$family) RVMFamilySim=RVM$family*0-9999 RVMFamilySim #asymmetric families changed with simmetric for the simulation for(family in c(23,24,26,27,28,29,30)) { RVMFamilySim[which(RVM$family==family)]=family+10 } for(family in c(104,114,124,134)) { RVMFamilySim[which(RVM$family==family)]=family+100 } for(family in (10+c(23,24,26,27,28,29,30))) { RVMFamilySim[which(RVM$family==family)]=family-10 } for(family in (100+c(104,114,124,134))) { RVMFamilySim[which(RVM$family==family)]=family-100 } RVMFamilySim[which(RVMFamilySim==-9999)]=RVM$family[which(RVMFamilySim==-9999)] d <- dim(RVM$family)[1] if(d==1){d <- 2} w <- list() x <- list() v <- list() v[[1]] <- list() #Condition is a matrix of conditional variables if(missing(Condition)) { Ncond=0 } else if(is.vector(Condition)) { cond <- list() cond[[1]] <- Condition Ncond <- 1 N <- length(cond[[1]]) } else { cond <- list() for(i in 1:length(Condition[1,])) { cond[[i]] <- Condition[,i] } Ncond <- length(cond) N <- length(cond[[1]]) } if(Ncond==0) { for(t in 1:d) { w[[t]] <- BiCopSim(N,0,0)[,1]#runif(N,0,1) } } else if(Ncond>=1) { for(t in 1:Ncond) { w[[t]] <- cond[[t]] } for(t in ((Ncond+1):d)) { w[[t]] <- BiCopSim(N,0,0)[,1]#runif(N,0,1) } } x[[1]] <- w[[1]] v[[1]][[1]] <- x[[1]] for(i in 2:d) { v[[i]] <- list() v[[i]][[1]] <- w[[i]] if(i>Ncond) { for(k in (i-1):1) { v[[i]][[1]] <- BiCopHinv2(v[[i]][[1]], v[[k]][[k]], BiCop(RVMFamilySim[d-k+1,d-k+1-(i-k)],### RVM$par[d-k+1,d-k+1-(i-k)], RVM$par2[d-k+1,d-k+1-(i-k)]))#$hinv2 } } x[[i]] <- v[[i]][[1]] if(i == d) {break}#goes out of the for(i in 2:d) else { for(j in 1:(i-1)) { v[[i]][[j+1]] <- BiCopHfunc2( v[[i]][[j]], v[[j]][[j]], RVMFamilySim[d-j+1,d-j+1-(i-j)],### RVM$par[d-j+1,d-j+1-(i-j)], RVM$par2[d-j+1,d-j+1-(i-j)])#$hfunc2 } } } xORDERED <- x matrix0 <- rep(0,d) for(k in 1:d) { matrix0[k] <- RVM$Matrix[k,k] } matrix0 <- rev(matrix0) for(k in 1:d) { xORDERED[[k]] <-x[[which(matrix0==k)]] } x <- xORDERED xxx <- matrix(0,nrow=N,ncol=d) for(k in 1:d) { xxx[,k] <- x[[k]] } return(xxx) } DVineCondSim <- function(RVM,Condition,N) { RVM$family <- as.matrix(RVM$family) RVMFamilySim=RVM$family*0-9999 RVMFamilySim #asymmetric families changed with simmetric for the simulation in BiCopHfunc2 for(family in c(23,24,26,27,28,29,30)) { RVMFamilySim[which(RVM$family==family)]=family+10 } for(family in c(104,114,124,134)) { RVMFamilySim[which(RVM$family==family)]=family+100 } for(family in (10+c(23,24,26,27,28,29,30))) { RVMFamilySim[which(RVM$family==family)]=family-10 } for(family in (100+c(104,114,124,134))) { RVMFamilySim[which(RVM$family==family)]=family-100 } RVMFamilySim[which(RVMFamilySim==-9999)]=RVM$family[which(RVMFamilySim==-9999)] d <- dim(RVM$family)[1] if(d==1){d <- 2} w <- list() x <- list() v <- list() v[[1]] <- list() v[[2]] <- list() #Condition is a matrix of conditional variables if(missing(Condition)) { Ncond=0 } else if(is.vector(Condition)) { cond <- list() cond[[1]] <- Condition Ncond <- 1 N <- length(cond[[1]]) } else { cond <- list() for(i in 1:length(Condition[1,])) { cond[[i]] <- Condition[,i] } Ncond <- length(cond) N <- length(cond[[1]]) } if(Ncond==0) { for(t in 1:d) { w[[t]] <- BiCopSim(N,0,0)[,1] } x[[1]] <- w[[1]] v[[1]][[1]] <- x[[1]] if(d==2) { x[[2]] <- BiCopHinv2(w[[2]], v[[1]][[1]], BiCop(RVM$family[1,1],RVM$par,RVM$par2))#$hinv2 } else { x[[2]] <- BiCopHinv2(w[[2]], v[[1]][[1]], BiCop(RVM$family[d+1-(1),1],RVM$par[d+1-(1),1],RVM$par2[d+1-(1),1]))#$hinv2 } v[[2]][[1]] <- x[[2]] } else if(Ncond==1) { w[[1]] <- cond[[1]] for(t in ((Ncond+1):d)) { w[[t]] <- BiCopSim(N,0,0)[,1] } x[[1]] <- w[[1]] v[[1]][[1]] <- x[[1]] if(d==2) { x[[2]] <- BiCopHinv2(w[[2]], v[[1]][[1]], BiCop(RVM$family[1,1],RVM$par,RVM$par2))#$hinv2 } else { x[[2]] <- BiCopHinv2(w[[2]], v[[1]][[1]], BiCop(RVM$family[d+1-(1),1],RVM$par[d+1-(1),1],RVM$par2[d+1-(1),1]))#$hinv2 v[[2]][[1]] <- x[[2]] } } else if(Ncond>=2) { for(t in 1:Ncond) { w[[t]] <- cond[[t]] } for(t in ((Ncond+1):d)) { w[[t]] <- BiCopSim(N,0,0)[,1] } x[[1]] <- w[[1]] v[[1]][[1]] <- x[[1]] x[[2]] <- w[[2]] v[[2]][[1]] <- x[[2]] } if(d==2) { return(cbind(x[[2]],cond[[1]])) } else if(d>2) { v[[2]][[2]] <- BiCopHfunc2( v[[1]][[1]], v[[2]][[1]], RVMFamilySim[d+1-(1),1],### RVM$par[d+1-(1),1], RVM$par2[d+1-(1),1])#$hfunc2 for(i in 3:d) { v[[i]] <- list() v[[i]][[1]] <- w[[i]] if(i>Ncond) { for(k in (i-1):2) { v[[i]][[1]] <- BiCopHinv2(v[[i]][[1]], v[[i-1]][[2*k-2]], BiCop(RVM$family[d+1-(k),(i-k)],RVM$par[d+1-(k),(i-k)],RVM$par2[d+1-(k),(i-k)]))#$hinv2 } v[[i]][[1]] <- BiCopHinv2(v[[i]][[1]], v[[i-1]][[1]], BiCop(RVM$family[d+1-(1),(i-1)],RVM$par[d+1-(1),(i-1)],RVM$par2[d+1-(1),(i-1)]))#$hinv2 } x[[i]] <- v[[i]][[1]] if(i == d) {break} else { v[[i]][[2]] <- BiCopHfunc2( v[[i-1]][[1]], v[[i]][[1]], RVMFamilySim[d+1-(1),i-1],### RVM$par[d+1-(1),i-1], RVM$par2[d+1-(1),i-1])#$hfunc2 v[[i]][[3]] <- BiCopHfunc2( v[[i]][[1]], v[[i-1]][[1]], RVMFamilySim[d+1-(1),i-1],### RVM$par[d+1-(1),i-1], RVM$par2[d+1-(1),i-1])#$hfunc2 if(i>3) { for(j in 2:(i-2)) { v[[i]][[2*j]] <- BiCopHfunc2( v[[i-1]][[2*j-2]], v[[i]][[2*j-1]], RVMFamilySim[d+1-(j),i-j],### RVM$par[d+1-(j),i-j], RVM$par2[d+1-(j),i-j])#$hfunc2 v[[i]][[2*j+1]] <- BiCopHfunc2( v[[i]][[2*j-1]], v[[i-1]][[2*j-2]], RVMFamilySim[d+1-(j),i-j],### RVM$par[d+1-(j),i-j], RVM$par2[d+1-(j),i-j])#$hfunc2 } } v[[i]][[2*i-2]] <- BiCopHfunc2( v[[i-1]][[2*i-4]], v[[i]][[2*i-3]], RVMFamilySim[d+1-(i-1),1],### RVM$par[d+1-(i-1),1], RVM$par2[d+1-(i-1),1])#$hfunc2 } } xORDERED <- x matrix0 <- rep(0,d) for(k in 1:d) { matrix0[k] <- RVM$Matrix[k,k] } for(k in 1:d) { xORDERED[[k]] <-x[[which(matrix0==k)]] } x <- xORDERED xxx <- matrix(0,nrow=N,ncol=d) for(k in 1:d) { xxx[,k] <- x[[k]] } return(xxx) } } PossibleCDVine4Condition <- function(Nx,Ny,type){ #this routine gives back the first level of all the possible vines that mathematically allow foor building a conditionated model x <- seq(1,(Ny+Nx),1) #in x the first variable has to be the x variables, the last the predictors Perm <- permn(x) if(Nx==0) { Nx=Ny Ny=0 } GoodTree <- logical(length = factorial(length(x))) for(i in 1:factorial(length(x))) { j <- 0 repeat { j <- j+1 GoodTree[i] <- TRUE if((which(x==Perm[[i]][j]) < x[Ny+1]) | (j==Nx)) #Perm[[i]][j] is not a predictor | finished) { if((j==Nx) & (which(x==Perm[[i]][j]) >= x[Ny+1])) { GoodTree[i] <- GoodTree[i] } else if(which(x==Perm[[i]][j]) < x[Ny+1]) { GoodTree[i] <- FALSE } break } } } aaa <- which(GoodTree==TRUE) MatrixGoodTree <- matrix(0,nrow=factorial(Ny)*factorial(Nx),ncol=length(x)) for(i in 1:length(aaa)) { MatrixGoodTree[i,] <- Perm[[aaa[i]]] } if(type==1 | type=="CVine") { bbb=MatrixGoodTree[,ncol(MatrixGoodTree):1]#inversion wih respect to Dvine! MatrixGoodTree=bbb if(Ny!=1)# Ny!=1 -> no duplications! { aaa=MatrixGoodTree d=Nx+Ny for(i in 1:length(aaa[,1])) { if(aaa[i,2]<aaa[i,1]) { MatrixGoodTree[i,2]=aaa[i,1] MatrixGoodTree[i,1]=aaa[i,2] } } MatrixGoodTree=unique(MatrixGoodTree) MatrixGoodTree } return(MatrixGoodTree) } if(type==2 | type=="DVine") { if(Ny==0 | Nx==0)# if Ny==0 | Nx==0 there are duplications! { half=dim(MatrixGoodTree)[1]/2 MatrixGoodTree=MatrixGoodTree[1:half,] } return(MatrixGoodTree) } } WriteMatrixDvine <- function(FirstLevelDvine){ ddd <- FirstLevelDvine MatrixDvine <- matrix(0,nrow=length(ddd),ncol=length(ddd)) for(i in 1:length(ddd)) { MatrixDvine[i,i] <- ddd[i] if(i!=1) { for(j in 1:(i-1)) { MatrixDvine[i,j] <- ddd[length(ddd)+j-i+1] } } } return(MatrixDvine) } FirstDVineCond <- function(data,Nx,treecrit="AIC",selectioncrit="AIC",familyset = NA,indeptest = FALSE, level = 0.05, se = FALSE, rotations = TRUE, method = "mle"){ Ny <- dim(data)[2]-Nx if(!is.numeric(familyset)[1]) { familyset=c(1,2,3,4,5,6,7,8,9,10,13,14,16,17,18,19,20,23,24,26,27,28,29,30,33,34,36,37,38,39, 40,104,114,124,134, 204, 214, 224, 234 ) } Dvine <- list() PossibleDvineFirstLevel <- PossibleCDVine4Condition(Nx,Ny,"DVine") NumbDVine <- length(PossibleDvineFirstLevel[,1])#factorial(Ny)*factorial(Nx) for(i in 1:NumbDVine) { print(paste("Number D-vine analysed: ",i,"/",NumbDVine,sep="")) Dvine[[i]] <- WriteMatrixDvine(PossibleDvineFirstLevel[i,]) aaa <- RVineCopSelect(data,familyset,Dvine[[i]],selectioncrit,indeptest = indeptest, level = level, se = se, rotations = rotations, method = method) Dvine[[i]] <- aaa } TestAIC <- rep(1000000,NumbDVine) TestBIC <- rep(1000000,NumbDVine) for(i in 1:NumbDVine) { TestAIC[i] <- RVineAIC(data, Dvine[[i]], par = Dvine[[i]]$par, par2 = Dvine[[i]]$par2)$AIC TestBIC[i] <- RVineBIC(data, Dvine[[i]], par = Dvine[[i]]$par, par2 = Dvine[[i]]$par2)$BIC } TestS <- data.frame(tree=seq(1,NumbDVine,1),AIC=TestAIC,BIC=TestBIC) BestAicDVine <- Dvine[[which(TestS$AIC==min(TestS$AIC))[1]]] BestBicDVine <- Dvine[[which(TestS$BIC==min(TestS$BIC))[1]]] BestDVine <- list() if(treecrit=="AIC") { BestDVine[[1]] <- BestAicDVine$Matrix BestDVine[[2]] <- min(TestS$AIC) if(length(which(TestS$AIC==max(TestS$AIC)))>1) { print(paste("Attention: there are ", length(which(TestS$AIC==max(TestS$AIC)))," vines (over a total of ", NumbDVine,") equally ranked as the best. Consider to use CDVineCondRank to visulaize the full ranking",sep="")) } } if(treecrit=="BIC") { BestDVine[[1]] <- BestBicDVine$Matrix BestDVine[[2]] <- min(TestS$BIC) if(length(which(TestS$BIC==max(TestS$BIC)))>1) { print(paste("Attention: there are ", length(which(TestS$BIC==max(TestS$BIC)))," vines (over a total of ", NumbDVine,") equally ranked as the best. Consider to use CDVineCondRank to visulaize the full ranking",sep="")) } } return(BestDVine) } WriteMatrixCvine <- function(FirstLevelCvine){ d <- FirstLevelCvine MatrixCvine <- matrix(0,nrow=length(d),ncol=length(d)) for(i in 1:length(d)) { MatrixCvine[i,(1:i)] <- d[i] } return(MatrixCvine) } FirstCVineCond <- function(data,Nx,treecrit="AIC",selectioncrit="AIC",familyset = NA,indeptest = FALSE, level = 0.05, se = FALSE, rotations = TRUE, method = "mle"){ Ny <- dim(data)[2]-Nx if(!is.numeric(familyset)[1]) { familyset=c(1,2,3,4,5,6,7,8,9,10,13,14,16,17,18,19,20,23,24,26,27,28,29,30,33,34,36,37,38,39, 40,104,114,124,134, 204, 214, 224, 234 ) } Cvine <- list() PossibleCvineFirstLevel <- PossibleCDVine4Condition(Nx,Ny,"CVine") NumbCVine <- length(PossibleCvineFirstLevel[,1]) for(i in 1:NumbCVine) { print(paste("Number C-vine analysed: ",i,"/",NumbCVine,sep="")) Cvine[[i]] <- WriteMatrixCvine(PossibleCvineFirstLevel[i,]) aaa <- RVineCopSelect(data,familyset,Cvine[[i]],selectioncrit,indeptest = indeptest, level = level, se = se, rotations = rotations, method = method) Cvine[[i]] <- aaa } TestAIC <- rep(1000000,NumbCVine) TestBIC <- rep(1000000,NumbCVine) for(i in 1:NumbCVine) { TestAIC[i] <- RVineAIC(data, Cvine[[i]], par = Cvine[[i]]$par, par2 = Cvine[[i]]$par2)$AIC TestBIC[i] <- RVineBIC(data, Cvine[[i]], par = Cvine[[i]]$par, par2 = Cvine[[i]]$par2)$BIC } TestS <- data.frame(tree=seq(1,NumbCVine,1),AIC=TestAIC,BIC=TestBIC) BestAicCVine <- Cvine[[which(TestS$AIC==min(TestS$AIC))[1]]] BestBicCVine <- Cvine[[which(TestS$BIC==min(TestS$BIC))[1]]] BestCVine <- list() if(treecrit=="AIC") { BestCVine[[1]] <- BestAicCVine$Matrix BestCVine[[2]] <- min(TestS$AIC) if(length(which(TestS$AIC==max(TestS$AIC)))>1) { print(paste("Attention: there are ", length(which(TestS$AIC==max(TestS$AIC)))," vines (over a total of ", NumbCVine,") equally ranked as the best. Consider to use CDVineCondRank to visulaize the full ranking",sep="")) } } if(treecrit=="BIC") { BestCVine[[1]] <- BestBicCVine$Matrix BestCVine[[2]] <- min(TestS$BIC) if(length(which(TestS$BIC==max(TestS$BIC)))>1) { print(paste("Attention: there are ", length(which(TestS$BIC==max(TestS$BIC)))," vines (over a total of ", NumbCVine,") equally ranked as the best. Consider to use CDVineCondRank to visulaize the full ranking",sep="")) } } return(BestCVine) } RankDVineCond <- function(data,Nx,treecrit="AIC",selectioncrit="AIC",familyset = NA,indeptest = FALSE, level = 0.05, se = FALSE, rotations = TRUE, method = "mle"){ Ny <- dim(data)[2]-Nx if(!is.numeric(familyset)[1]) { familyset=c(1,2,3,4,5,6,7,8,9,10,13,14,16,17,18,19,20,23,24,26,27,28,29,30,33,34,36,37,38,39, 40,104,114,124,134, 204, 214, 224, 234 ) } Dvine <- list() PossibleDvineFirstLevel <- PossibleCDVine4Condition(Nx,Ny,"DVine") NumbDVine <- length(PossibleDvineFirstLevel[,1]) for(i in 1:NumbDVine) { print(paste("Number D-vine analysed: ",i,"/",NumbDVine,sep="")) Dvine[[i]] <- WriteMatrixDvine(PossibleDvineFirstLevel[i,]) aaa <- RVineCopSelect(data,familyset,Dvine[[i]],selectioncrit,indeptest = indeptest, level = level, se = se, rotations = rotations, method = method) Dvine[[i]] <- aaa } TestAIC <- rep(1000000,NumbDVine) TestBIC <- rep(1000000,NumbDVine) for(i in 1:NumbDVine) { TestAIC[i] <- RVineAIC(data, Dvine[[i]], par = Dvine[[i]]$par, par2 = Dvine[[i]]$par2)$AIC TestBIC[i] <- RVineBIC(data, Dvine[[i]], par = Dvine[[i]]$par, par2 = Dvine[[i]]$par2)$BIC } if(treecrit=="AIC") { TestS <- data.frame(tree=seq(1,NumbDVine,1),AIC=TestAIC) TestS <- TestS[order(TestS$AIC, decreasing =FALSE),] } if(treecrit=="BIC") { TestS <- data.frame(tree=seq(1,NumbDVine,1),BIC=TestBIC) TestS <- TestS[order(TestS$BIC, decreasing =FALSE),] } Ranking <- list() Ranking[[1]] <- TestS Ranking[[2]] <- list() for(i in 1:NumbDVine) { Ranking[[2]][[i]] = Dvine[[TestS$tree[i]]] } Ranking[[1]]$tree=sort(Ranking[[1]]$tree) if(treecrit=="AIC") { Ranking[[1]]=data.frame(tree=Ranking[[1]]$tree,AIC=Ranking[[1]]$AIC) } if(treecrit=="BIC") { Ranking[[1]]=data.frame(tree=Ranking[[1]]$tree,BIC=Ranking[[1]]$BIC) } return(Ranking) } RankCVineCond <- function(data,Nx,treecrit="AIC",selectioncrit="AIC",familyset = NA,indeptest = FALSE, level = 0.05, se = FALSE, rotations = TRUE, method = "mle"){ Ny <- dim(data)[2]-Nx if(!is.numeric(familyset)[1]) { familyset=c(1,2,3,4,5,6,7,8,9,10,13,14,16,17,18,19,20,23,24,26,27,28,29,30,33,34,36,37,38,39, 40,104,114,124,134, 204, 214, 224, 234 ) } Cvine <- list() PossibleCvineFirstLevel <- PossibleCDVine4Condition(Nx,Ny,"CVine") NumbCVine <- length(PossibleCvineFirstLevel[,1]) for(i in 1:NumbCVine) { print(paste("Number C-vine analysed: ",i,"/",NumbCVine,sep="")) Cvine[[i]] <- WriteMatrixCvine(PossibleCvineFirstLevel[i,]) aaa <- RVineCopSelect(data,familyset,Cvine[[i]],selectioncrit,indeptest = indeptest, level = level, se = se, rotations = rotations, method = method) Cvine[[i]] <- aaa } TestAIC <- rep(1000000,NumbCVine) TestBIC <- rep(1000000,NumbCVine) for(i in 1:NumbCVine) { TestAIC[i] <- RVineAIC(data, Cvine[[i]], par = Cvine[[i]]$par, par2 = Cvine[[i]]$par2)$AIC TestBIC[i] <- RVineBIC(data, Cvine[[i]], par = Cvine[[i]]$par, par2 = Cvine[[i]]$par2)$BIC } if(treecrit=="AIC") { TestS <- data.frame(tree=seq(1,NumbCVine,1),AIC=TestAIC) TestS <- TestS[order(TestS$AIC, decreasing =FALSE),] } if(treecrit=="BIC") { TestS <- data.frame(tree=seq(1,NumbCVine,1),BIC=TestBIC) TestS <- TestS[order(TestS$BIC, decreasing =FALSE),] } Ranking <- list() Ranking[[1]] <- TestS Ranking[[2]] <- list() for(i in 1:NumbCVine) { Ranking[[2]][[i]] = Cvine[[TestS$tree[i]]] } Ranking[[1]]$tree=sort(Ranking[[1]]$tree) if(treecrit=="AIC") { Ranking[[1]]=data.frame(tree=Ranking[[1]]$tree,AIC=Ranking[[1]]$AIC) } if(treecrit=="BIC") { Ranking[[1]]=data.frame(tree=Ranking[[1]]$tree,BIC=Ranking[[1]]$BIC) } return(Ranking) } PossibleCVineMatrixCond <- function(data,Nx){ Ny <- dim(data)[2]-Nx Cvine <- list() PossibleCvineFirstLevel <- PossibleCDVine4Condition(Nx,Ny,"CVine") NumbCVine <- length(PossibleCvineFirstLevel[,1]) for(i in 1:NumbCVine) { Cvine[[i]] <- WriteMatrixCvine(PossibleCvineFirstLevel[i,]) } return(Cvine) } PossibleDVineMatrixCond <- function(data,Nx){ Ny <- dim(data)[2]-Nx Dvine <- list() PossibleDvineFirstLevel <- PossibleCDVine4Condition(Nx,Ny,"DVine") NumbDVine <- length(PossibleDvineFirstLevel[,1]) for(i in 1:NumbDVine) { Dvine[[i]] <- WriteMatrixDvine(PossibleDvineFirstLevel[i,]) } return(Dvine) }
/scratch/gouwar.j/cran-all/cranData/CDVineCopulaConditional/R/CDVineCopulaConditional.r
#' @title The CDatanet package #' @description The \pkg{CDatanet} package implements the count data model with social interactions and the dyadic linking model developed in Houndetoungan (2022). #' It also simulates data from the count data model and implements the Spatial Autoregressive Tobit model (LeSage, 2000; Xu and Lee, 2015) for left censored data and the Spatial Autoregressive Model (Lee, 2004). #' Network formation models, such as that studied by Yan et al. (2019), are also implemented. #' To make the computations faster \pkg{CDatanet} uses \code{C++} through the \pkg{Rcpp} package (Eddelbuettel et al., 2011). #' #' @references #' Eddelbuettel, D., & Francois, R. (2011). \pkg{Rcpp}: Seamless \R and \code{C++} integration. \emph{Journal of Statistical Software}, 40(8), 1-18, \doi{10.18637/jss.v040.i08}. #' @references #' Houndetoungan, E. A. (2022). Count Data Models with Social Interactions under Rational Expectations. Available at SSRN 3721250, \doi{10.2139/ssrn.3721250}. #' @references #' Lee, L. F. (2004). Asymptotic distributions of quasi-maximum likelihood estimators for spatial autoregressive models. \emph{Econometrica}, 72(6), 1899-1925, \doi{10.1111/j.1468-0262.2004.00558.x}. #' @references #' Xu, X., & Lee, L. F. (2015). Maximum likelihood estimation of a spatial autoregressive Tobit model. \emph{Journal of Econometrics}, 188(1), 264-280, \doi{10.1016/j.jeconom.2015.05.004}. #' @references #' Yan, T., Jiang, B., Fienberg, S. E., & Leng, C. (2019). Statistical inference in a directed network model with covariates. \emph{Journal of the American Statistical Association}, 114(526), 857-868, \doi{10.1080/01621459.2018.1448829}. #' #' @useDynLib CDatanet, .registration = TRUE "_PACKAGE" NULL
/scratch/gouwar.j/cran-all/cranData/CDatanet/R/CDatanet.R
#' @title Simulate data from Count Data Model with Social Interactions #' @param formula an object of class \link[stats]{formula}: a symbolic description of the model. The `formula` should be as for example \code{y ~ x1 + x2 | x1 + x2} #' where `y` is the endogenous vector, the listed variables before the pipe, `x1`, `x2` are the individual exogenous variables and #' the listed variables after the pipe, `x1`, `x2` are the contextual observable variables. Other formulas may be #' \code{y ~ x1 + x2} for the model without contextual effects, \code{y ~ -1 + x1 + x2 | x1 + x2} for the model #' without intercept or \code{y ~ x1 + x2 | x2 + x3} to allow the contextual variable to be different from the individual variables. #' @param contextual (optional) logical; if true, this means that all individual variables will be set as contextual variables. Set the #' `formula` as `y ~ x1 + x2` and `contextual` as `TRUE` is equivalent to set the formula as `y ~ x1 + x2 | x1 + x2`. #' @param Glist the adjacency matrix or list sub-adjacency matrix. #' @param theta the true value of the vector \eqn{\theta = (\lambda, \beta', \gamma')'}. The parameter \eqn{\gamma} should be removed if the model #' does not contain contextual effects (see details). #' @param deltabar the true value of \eqn{\bar{\delta}}{deltabar}. #' @param delta the true value of the vector \eqn{\delta = (\delta_2, ..., \delta_{\bar{R}})}{\delta = (\delta_2, ..., \delta_{Rbar})}. If `NULL`, then \eqn{\bar{R}}{Rbar} is set to one and `delta` is empty. #' @param rho the true value of \eqn{\rho}. #' @param tol the tolerance value used in the Fixed Point Iteration Method to compute the expectancy of `y`. The process stops if the \eqn{L_1}{L1} distance #' between two consecutive values of the expectancy of `y` is less than `tol`. #' @param maxit the maximal number of iterations in the Fixed Point Iteration Method. #' @param data an optional data frame, list or environment (or object coercible by \link[base]{as.data.frame} to a data frame) containing the variables #' in the model. If not found in data, the variables are taken from \code{environment(formula)}, typically the environment from which `mcmcARD` is called. #' @description #' `simcdnet` is used simulate counting data with rational expectations (see details). The model is presented in Houndetoungan (2022). #' @details #' Following Houndetoungan (2022), the count data \eqn{\mathbf{y}}{y} is generated from a latent variable \eqn{\mathbf{y}^*}{ys}. #' The latent variable is given for all i as #' \deqn{y_i^* = \lambda \mathbf{g}_i \mathbf{E}(\bar{\mathbf{y}}|\mathbf{X},\mathbf{G}) + \mathbf{x}_i'\beta + \mathbf{g}_i\mathbf{X}\gamma + \epsilon_i,}{ys_i = \lambda g_i*E(ybar|X, G) + x_i'\beta + g_i*X\gamma + \epsilon_i,} #' where \eqn{\epsilon_i \sim N(0, 1)}{\epsilon_i --> N(0, 1)}.\cr #' Then, \eqn{y_i = r} iff \eqn{a_r \leq y_i^* \leq a_{r+1}}{a_r \le ys_i \le a_{r + 1}}, where #' \eqn{a_0 = -\inf}{a_0 = -Inf}, \eqn{a_1 = 0}, \eqn{a_r = \sum_{k = 1}^r\delta_k}{a_r = \delta_1 + ... + \delta_r}. #' The parameter are subject to the constraints \eqn{\delta_r \geq \lambda}{\delta_r \ge \lambda} if \eqn{1 \leq r \leq \bar{R}}{1 \le r \le Rbar}, and #' \eqn{\delta_r = (r - \bar{R})^{\rho}\bar{\delta} + \lambda}{a_r = deltabar*(r - Rbar)^{\rho} + \lambda} if \eqn{r \geq \bar{R} + 1}{r \ge Rbar + 1}. #' @seealso \code{\link{cdnet}}, \code{\link{simsart}}, \code{\link{simsar}}. #' @return A list consisting of: #' \item{yst}{ys (see details), the latent variable.} #' \item{y}{the observed count data.} #' \item{yb}{ybar (see details), the expectation of y.} #' \item{Gyb}{the average of the expectation of y among friends.} #' \item{marg.effects}{the marginal effects.} #' \item{rho}{the return value of rho.} #' \item{Rmax}{infinite sums in the marginal effects are approximated by sums up to Rmax.} #' \item{iteration}{number of iterations performed by sub-network in the Fixed Point Iteration Method.} #' @references #' Houndetoungan, E. A. (2022). Count Data Models with Social Interactions under Rational Expectations. Available at SSRN 3721250, \doi{10.2139/ssrn.3721250}. #' @examples #' \donttest{ #' # Groups' size #' M <- 5 # Number of sub-groups #' nvec <- round(runif(M, 100, 1000)) #' n <- sum(nvec) #' #' # Parameters #' lambda <- 0.4 #' beta <- c(1.5, 2.2, -0.9) #' gamma <- c(1.5, -1.2) #' delta <- c(1, 0.87, 0.75, 0.6) #' delbar <- 0.05 #' theta <- c(lambda, beta, gamma) #' #' # X #' X <- cbind(rnorm(n, 1, 1), rexp(n, 0.4)) #' #' # Network #' Glist <- list() #' #' for (m in 1:M) { #' nm <- nvec[m] #' Gm <- matrix(0, nm, nm) #' max_d <- 30 #' for (i in 1:nm) { #' tmp <- sample((1:nm)[-i], sample(0:max_d, 1)) #' Gm[i, tmp] <- 1 #' } #' rs <- rowSums(Gm); rs[rs == 0] <- 1 #' Gm <- Gm/rs #' Glist[[m]] <- Gm #' } #' #' #' # data #' data <- data.frame(x1 = X[,1], x2 = X[,2]) #' #' rm(list = ls()[!(ls() %in% c("Glist", "data", "theta", "delta", "delbar"))]) #' #' ytmp <- simcdnet(formula = ~ x1 + x2 | x1 + x2, Glist = Glist, theta = theta, #' deltabar = delbar, delta = delta, rho = 0, data = data) #' #' y <- ytmp$y #' #' # plot histogram #' hist(y, breaks = max(y))} #' @importFrom Rcpp sourceCpp #' @importFrom stats rnorm #' @export simcdnet <- function(formula, contextual, Glist, theta, deltabar, delta = NULL, rho = 0, tol = 1e-10, maxit = 500, data) { stopifnot(rho >= 0) stopifnot(deltabar > 0) if (missing(contextual)) { contextual <- FALSE } if (!is.list(Glist)) { Glist <- list(Glist) } M <- length(Glist) nvec <- unlist(lapply(Glist, nrow)) n <- sum(nvec) igr <- matrix(c(cumsum(c(0, nvec[-M])), cumsum(nvec) - 1), ncol = 2) Rbar <- length(delta) + 1 lambda <- theta[1] if(length(delta) != 0){ stopifnot(all(delta > 0)) if(any(delta < abs(lambda))){ warning("Potential multiple equilibrium issue: abs(lambda) > delta") } } # if((Rbar^rho + deltabar) < abs(lambda)) warning("Potential multiple equilibrium issue: (Rbar^rho + deltabar) < abs(lambda)") f.t.data <- formula.to.data(formula = formula, contextual = contextual, Glist = Glist, M = M, igr = igr, data = data, type = "sim", theta0 = 0) X <- f.t.data$X K <- length(theta) if(K != (ncol(X) + 1)) { stop("Length of theta is not suited.") } b <- theta[2:K] xb <- c(X %*% b) eps <- rnorm(n, 0, 1) yb <- rep(0, n) Gyb <- rep(0, n) coln <- c("lambda", colnames(X)) thetaWI <- theta if("(Intercept)" %in% coln) { thetaWI <- theta[-2] coln <- coln[-2] } t <- NULL yst <- NULL meffects <- NULL Rmax <- NULL if(rho == 0){ delta <- c(delta, deltabar + lambda) #deltabar + lambda is put in delta when rho = 0 t <- fyb(yb, Gyb, Glist, igr, M, xb, lambda, delta, n, Rbar, tol, maxit) Ztlamda <- lambda*Gyb + xb yst <- Ztlamda + eps y <- c(fy(yst, max(yst), delta, n, Rbar)) meffects <- fmeffects(n, delta, Rbar, Ztlamda, thetaWI) } else{ t <- fyb2(yb, Gyb, Glist, igr, M, xb, lambda, delta, deltabar, rho, n, Rbar, tol, maxit) Ztlamda <- lambda*Gyb + xb yst <- Ztlamda + eps y <- c(fy2(yst, max(yst), lambda, delta, deltabar, rho, n, Rbar)) meffects <- fmeffects2(n, lambda, delta, deltabar, rho, Rbar, Ztlamda, thetaWI) } Rmax <- meffects$Rmax meffects <- c(meffects$meffects) names(meffects) <- coln list("yst" = yst, "y" = y, "yb" = yb, "Gyb" = Gyb, "marg.effects" = meffects, "rho" = rho, "Rmax" = Rmax, "iteration" = c(t)) } # # Marginal effet # fmeffects <- function(Ztlamda, theta, delta) { # # marginal effect # maxZTl <- max(Ztlamda) + 10 # avec <- c(0, cumsum(delta)) # deltaRB <- tail(delta, 1) # cont <- TRUE # Rmax <- length(avec) # while (cont) { # Rmax <- Rmax + 1 # avec[Rmax] <- tail(avec, 1) + deltaRB # cont <- tail(avec, 1) < maxZTl # } # fir <- sum(apply(dnorm(kronecker(Ztlamda, t(avec), "-")), 2, mean)) # theta*fir # }
/scratch/gouwar.j/cran-all/cranData/CDatanet/R/CDnetsim.R
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 fL <- function(ZtLambda, delta, Rbar, n) { .Call(`_CDatanet_fL`, ZtLambda, delta, Rbar, n) } fLncond <- function(ZtLambda, delta, Rbar, n, nsimu) { .Call(`_CDatanet_fLncond`, ZtLambda, delta, Rbar, n, nsimu) } fyb <- function(yb, Gyb, G, igroup, ngroup, psi, lambda, delta, n, Rbar, tol, maxit) { .Call(`_CDatanet_fyb`, yb, Gyb, G, igroup, ngroup, psi, lambda, delta, n, Rbar, tol, maxit) } fybncond <- function(yb, Gyb, G, igroup, ngroup, psi, lambda, delta, n, nsimu, Rbar, tol, maxit) { .Call(`_CDatanet_fybncond`, yb, Gyb, G, igroup, ngroup, psi, lambda, delta, n, nsimu, Rbar, tol, maxit) } fy <- function(yst, maxyst, delta, n, Rbar) { .Call(`_CDatanet_fy`, yst, maxyst, delta, n, Rbar) } fmeffects <- function(n, delta, Rbar, ZtLambda, lbeta) { .Call(`_CDatanet_fmeffects`, n, delta, Rbar, ZtLambda, lbeta) } foptimREM <- function(yb, Gyb, theta, X, G, igroup, ngroup, K, n, Rbar, y, maxy, tol = 1e-13, maxit = 1e3L) { .Call(`_CDatanet_foptimREM`, yb, Gyb, theta, X, G, igroup, ngroup, K, n, Rbar, y, maxy, tol, maxit) } foptimREMncond1 <- function(yb, Gyb, theta, X, Simu1, nsimu, G, igroup, ngroup, K, n, Rbar, y, maxy, tol = 1e-13, maxit = 1e3L) { .Call(`_CDatanet_foptimREMncond1`, yb, Gyb, theta, X, Simu1, nsimu, G, igroup, ngroup, K, n, Rbar, y, maxy, tol, maxit) } foptimREM_NPL <- function(Gyb, theta, X, Rbar, maxy, K, n, y) { .Call(`_CDatanet_foptimREM_NPL`, Gyb, theta, X, Rbar, maxy, K, n, y) } fL_NPL <- function(yb, Gyb, G, igroup, ngroup, X, theta, Rbar, K, n) { invisible(.Call(`_CDatanet_fL_NPL`, yb, Gyb, G, igroup, ngroup, X, theta, Rbar, K, n)) } fnewyb <- function(yb, Gyb, G, igroup, ngroup, X, theta, Rbar, K, n, tol, maxit) { invisible(.Call(`_CDatanet_fnewyb`, yb, Gyb, G, igroup, ngroup, X, theta, Rbar, K, n, tol, maxit)) } foptimREM_NPLncond1 <- function(Gyb, theta, X, Simu1, nsimu, Rbar, maxy, K, n, y) { .Call(`_CDatanet_foptimREM_NPLncond1`, Gyb, theta, X, Simu1, nsimu, Rbar, maxy, K, n, y) } fL_NPLncond1 <- function(yb, Gyb, G, igroup, ngroup, X, theta, Simu1, nsimu, Rbar, K, n) { invisible(.Call(`_CDatanet_fL_NPLncond1`, yb, Gyb, G, igroup, ngroup, X, theta, Simu1, nsimu, Rbar, K, n)) } foptimREM_NPLncond2 <- function(Gyb, theta, X, Simu1, Simu2, nsimu, Rbar, maxy, K, n, y) { .Call(`_CDatanet_foptimREM_NPLncond2`, Gyb, theta, X, Simu1, Simu2, nsimu, Rbar, maxy, K, n, y) } fL_NPLncond2 <- function(yb, Gyb, G, igroup, ngroup, X, theta, Simu1, Simu2, nsimu, Rbar, K, n) { invisible(.Call(`_CDatanet_fL_NPLncond2`, yb, Gyb, G, igroup, ngroup, X, theta, Simu1, Simu2, nsimu, Rbar, K, n)) } flogintphi <- function(n, S, a, b, Mean, simu, igroup, ngroup) { .Call(`_CDatanet_flogintphi`, n, S, a, b, Mean, simu, igroup, ngroup) } cdnetLBFGS <- function(par, Gyb, X, Rbar, maxy, K, n, y, maxit = 300L, eps_f = 1e-13, eps_g = 1e-13, print = FALSE) { .Call(`_CDatanet_cdnetLBFGS`, par, Gyb, X, Rbar, maxy, K, n, y, maxit, eps_f, eps_g, print) } fcovCDI <- function(n, Gyb, theta, X, Rbar, K, S, G, igroup, ngroup, ccov) { .Call(`_CDatanet_fcovCDI`, n, Gyb, theta, X, Rbar, K, S, G, igroup, ngroup, ccov) } fL2 <- function(ZtLambda, lambda, delta, bdelta, rho, Rbar, n) { .Call(`_CDatanet_fL2`, ZtLambda, lambda, delta, bdelta, rho, Rbar, n) } fLncond2 <- function(ZtLambda, lambda, delta, bdelta, rho, Rbar, n, nsimu) { .Call(`_CDatanet_fLncond2`, ZtLambda, lambda, delta, bdelta, rho, Rbar, n, nsimu) } fyb2 <- function(yb, Gyb, G, igroup, ngroup, psi, lambda, delta, bdelta, rho, n, Rbar, tol, maxit) { .Call(`_CDatanet_fyb2`, yb, Gyb, G, igroup, ngroup, psi, lambda, delta, bdelta, rho, n, Rbar, tol, maxit) } fybncond2 <- function(yb, Gyb, G, igroup, ngroup, psi, lambda, delta, bdelta, rho, n, nsimu, Rbar, tol, maxit) { .Call(`_CDatanet_fybncond2`, yb, Gyb, G, igroup, ngroup, psi, lambda, delta, bdelta, rho, n, nsimu, Rbar, tol, maxit) } fy2 <- function(yst, maxyst, lambda, delta, bdelta, rho, n, Rbar) { .Call(`_CDatanet_fy2`, yst, maxyst, lambda, delta, bdelta, rho, n, Rbar) } fmeffects2 <- function(n, lambda, delta, bdelta, rho, Rbar, ZtLambda, lbeta) { .Call(`_CDatanet_fmeffects2`, n, lambda, delta, bdelta, rho, Rbar, ZtLambda, lbeta) } foptimREM2 <- function(yb, Gyb, theta, X, G, igroup, ngroup, K, n, Rbar, y, maxy, tol = 1e-13, maxit = 1e3L) { .Call(`_CDatanet_foptimREM2`, yb, Gyb, theta, X, G, igroup, ngroup, K, n, Rbar, y, maxy, tol, maxit) } foptimREMncond12 <- function(yb, Gyb, theta, X, Simu1, nsimu, G, igroup, ngroup, K, n, Rbar, y, maxy, tol = 1e-13, maxit = 1e3L) { .Call(`_CDatanet_foptimREMncond12`, yb, Gyb, theta, X, Simu1, nsimu, G, igroup, ngroup, K, n, Rbar, y, maxy, tol, maxit) } foptimREM_NPL2 <- function(Gyb, theta, X, Rbar, maxy, K, n, y) { .Call(`_CDatanet_foptimREM_NPL2`, Gyb, theta, X, Rbar, maxy, K, n, y) } fL_NPL2 <- function(yb, Gyb, G, igroup, ngroup, X, theta, Rbar, K, n) { invisible(.Call(`_CDatanet_fL_NPL2`, yb, Gyb, G, igroup, ngroup, X, theta, Rbar, K, n)) } fnewyb2 <- function(yb, Gyb, G, igroup, ngroup, X, theta, Rbar, K, n, tol, maxit) { invisible(.Call(`_CDatanet_fnewyb2`, yb, Gyb, G, igroup, ngroup, X, theta, Rbar, K, n, tol, maxit)) } foptimREM_NPLncond12 <- function(Gyb, theta, X, Simu1, nsimu, Rbar, maxy, K, n, y) { .Call(`_CDatanet_foptimREM_NPLncond12`, Gyb, theta, X, Simu1, nsimu, Rbar, maxy, K, n, y) } fL_NPLncond12 <- function(yb, Gyb, G, igroup, ngroup, X, theta, Simu1, nsimu, Rbar, K, n) { invisible(.Call(`_CDatanet_fL_NPLncond12`, yb, Gyb, G, igroup, ngroup, X, theta, Simu1, nsimu, Rbar, K, n)) } foptimREM_NPLncond22 <- function(Gyb, theta, X, Simu1, Simu2, nsimu, Rbar, maxy, K, n, y) { .Call(`_CDatanet_foptimREM_NPLncond22`, Gyb, theta, X, Simu1, Simu2, nsimu, Rbar, maxy, K, n, y) } fL_NPLncond22 <- function(yb, Gyb, G, igroup, ngroup, X, theta, Simu1, Simu2, nsimu, Rbar, K, n) { invisible(.Call(`_CDatanet_fL_NPLncond22`, yb, Gyb, G, igroup, ngroup, X, theta, Simu1, Simu2, nsimu, Rbar, K, n)) } cdnetLBFGSrho <- function(par, Gyb, X, Rbar, maxy, K, n, y, maxit = 300L, eps_f = 1e-13, eps_g = 1e-13, print = FALSE) { .Call(`_CDatanet_cdnetLBFGSrho`, par, Gyb, X, Rbar, maxy, K, n, y, maxit, eps_f, eps_g, print) } fcovCDI2 <- function(n, Gyb, theta, X, Rbar, K, S, G, igroup, ngroup, ccov) { .Call(`_CDatanet_fcovCDI2`, n, Gyb, theta, X, Rbar, K, S, G, igroup, ngroup, ccov) } fdummies <- function(out, limit, M, n) { invisible(.Call(`_CDatanet_fdummies`, out, limit, M, n)) } fdatar <- function(X, ftovar, nvar, K) { .Call(`_CDatanet_fdatar`, X, ftovar, nvar, K) } frMtoVbyCOL <- function(u, N, M) { .Call(`_CDatanet_frMtoVbyCOL`, u, N, M) } frMtoVbyCOLsym <- function(u, N, M) { .Call(`_CDatanet_frMtoVbyCOLsym`, u, N, M) } fmusum <- function(mu, nu, index, indexgr, M, N) { .Call(`_CDatanet_fmusum`, mu, nu, index, indexgr, M, N) } fmusumsym <- function(mu, index, indexgr, M, N) { .Call(`_CDatanet_fmusumsym`, mu, index, indexgr, M, N) } bayesmunu <- function(a, dx, invdxdx, beta0, mu0, nu0, smu20, snu20, rho0, index, indexgr, INDEXgr, nfix, N, M, K, Kx, nvec, n, iteration, Print) { .Call(`_CDatanet_bayesmunu`, a, dx, invdxdx, beta0, mu0, nu0, smu20, snu20, rho0, index, indexgr, INDEXgr, nfix, N, M, K, Kx, nvec, n, iteration, Print) } bayesmu <- function(a, dx, invdxdx, beta0, mu0, smu20, index, indexgr, INDEXgr, nfix, N, M, K, Kx, nvec, n, iteration, sym, Print) { .Call(`_CDatanet_bayesmu`, a, dx, invdxdx, beta0, mu0, smu20, index, indexgr, INDEXgr, nfix, N, M, K, Kx, nvec, n, iteration, sym, Print) } fhomobeta2f <- function(theta, a, dx, nvec, index, indexgr, M, maxit = 300L, eps_f = 1e-6, eps_g = 1e-5, hasX = TRUE, Print = TRUE) { .Call(`_CDatanet_fhomobeta2f`, theta, a, dx, nvec, index, indexgr, M, maxit, eps_f, eps_g, hasX, Print) } fhomobeta1f <- function(theta, a, dx, nvec, index, indexgr, M, maxit = 300L, eps_f = 1e-6, eps_g = 1e-5, hasX = TRUE, Print = TRUE) { .Call(`_CDatanet_fhomobeta1f`, theta, a, dx, nvec, index, indexgr, M, maxit, eps_f, eps_g, hasX, Print) } fhomobetasym <- function(theta, a, dx, nvec, index, indexgr, M, maxit = 300L, eps_f = 1e-6, eps_g = 1e-5, hasX = TRUE, Print = TRUE) { .Call(`_CDatanet_fhomobetasym`, theta, a, dx, nvec, index, indexgr, M, maxit, eps_f, eps_g, hasX, Print) } hdataF2L <- function(data, nvec, index, M) { .Call(`_CDatanet_hdataF2L`, data, nvec, index, M) } hdataF2U <- function(data, nvec, index, indexgr, M) { .Call(`_CDatanet_hdataF2U`, data, nvec, index, indexgr, M) } hdata2S <- function(data, nvec, index, indexgr, M) { .Call(`_CDatanet_hdata2S`, data, nvec, index, indexgr, M) } fySar <- function(y, Gy, G, eps, igroup, ngroup, psi, lambda) { invisible(.Call(`_CDatanet_fySar`, y, Gy, G, eps, igroup, ngroup, psi, lambda)) } fySarRE <- function(y, Gye, ye, G, eps, igroup, ngroup, psi, lambda) { invisible(.Call(`_CDatanet_fySarRE`, y, Gye, ye, G, eps, igroup, ngroup, psi, lambda)) } foptimSAR <- function(alphatilde, X, invXX, G, I, n, y, Gy, ngroup, FE, print) { .Call(`_CDatanet_foptimSAR`, alphatilde, X, invXX, G, I, n, y, Gy, ngroup, FE, print) } fSARjac <- function(lambda, s2, X, XX, Xbeta, G, I, igroup, ngroup, n, K, FE) { .Call(`_CDatanet_fSARjac`, lambda, s2, X, XX, Xbeta, G, I, igroup, ngroup, n, K, FE) } fybsar <- function(yb, Gyb, G, igroup, ngroup, psi, lambda) { invisible(.Call(`_CDatanet_fybsar`, yb, Gyb, G, igroup, ngroup, psi, lambda)) } foptimSAR_RE <- function(alphatilde, X, G, I, y, Gy, igroup, ngroup, n, K) { .Call(`_CDatanet_foptimSAR_RE`, alphatilde, X, G, I, y, Gy, igroup, ngroup, n, K) } foptimSAR0_RE <- function(alphatilde, X, G, I, y, Gy, igroup, ngroup, n, K) { .Call(`_CDatanet_foptimSAR0_RE`, alphatilde, X, G, I, y, Gy, igroup, ngroup, n, K) } fyTobit <- function(yst, y, Gy, Ztlamda, G, eps, igroup, ngroup, psi, n, lambda, tol, maxit) { .Call(`_CDatanet_fyTobit`, yst, y, Gy, Ztlamda, G, eps, igroup, ngroup, psi, n, lambda, tol, maxit) } foptimTobit <- function(theta, X, logdetA2, alphatilde, G2, I2, K, y, Gy, idpos, idzero, npos, ngroup, I, W, n, igroup) { .Call(`_CDatanet_foptimTobit`, theta, X, logdetA2, alphatilde, G2, I2, K, y, Gy, idpos, idzero, npos, ngroup, I, W, n, igroup) } foptimTobit0 <- function(theta, X, logdetA2, alphatilde, G2, I2, K, y, Gy, idpos, idzero, npos, ngroup, I, W, n, igroup) { .Call(`_CDatanet_foptimTobit0`, theta, X, logdetA2, alphatilde, G2, I2, K, y, Gy, idpos, idzero, npos, ngroup, I, W, n, igroup) } fgradvecTobit <- function(theta, X, logdetA2, alphatilde, G2, I2, K, y, Gy, idpos, idzero, ngroup, I, W, n, indzero, indpos, igroup) { .Call(`_CDatanet_fgradvecTobit`, theta, X, logdetA2, alphatilde, G2, I2, K, y, Gy, idpos, idzero, ngroup, I, W, n, indzero, indpos, igroup) } fcovSTC <- function(theta, X, G2, I, W, K, n, y, Gy, indzero, indpos, igroup, ngroup, ccov) { .Call(`_CDatanet_fcovSTC`, theta, X, G2, I, W, K, n, y, Gy, indzero, indpos, igroup, ngroup, ccov) } fybtbit <- function(yb, Gyb, G, igroup, ngroup, psi, lambda, sigma, n, tol, maxit) { .Call(`_CDatanet_fybtbit`, yb, Gyb, G, igroup, ngroup, psi, lambda, sigma, n, tol, maxit) } foptimRE_TBT <- function(yb, Gyb, theta, yidpos, X, G, igroup, ngroup, npos, idpos, idzero, K, n, tol = 1e-13, maxit = 1e3L) { .Call(`_CDatanet_foptimRE_TBT`, yb, Gyb, theta, yidpos, X, G, igroup, ngroup, npos, idpos, idzero, K, n, tol, maxit) } foptimTBT_NPL <- function(yidpos, Gyb, X, theta, npos, idpos, idzero, K) { .Call(`_CDatanet_foptimTBT_NPL`, yidpos, Gyb, X, theta, npos, idpos, idzero, K) } fLTBT_NPL <- function(yb, Gyb, G, X, theta, igroup, ngroup, n, K) { invisible(.Call(`_CDatanet_fLTBT_NPL`, yb, Gyb, G, X, theta, igroup, ngroup, n, K)) } sartLBFGS <- function(par, yidpos, Gyb, X, npos, idpos, idzero, K, maxit = 300L, eps_f = 1e-6, eps_g = 1e-5, print = FALSE) { .Call(`_CDatanet_sartLBFGS`, par, yidpos, Gyb, X, npos, idpos, idzero, K, maxit, eps_f, eps_g, print) } fnewybTBT <- function(yb, Gyb, G, igroup, ngroup, X, theta, K, n, tol, maxit) { invisible(.Call(`_CDatanet_fnewybTBT`, yb, Gyb, G, igroup, ngroup, X, theta, K, n, tol, maxit)) } fcovSTI <- function(n, Gyb, theta, X, K, G, igroup, ngroup, ccov) { .Call(`_CDatanet_fcovSTI`, n, Gyb, theta, X, K, G, igroup, ngroup, ccov) }
/scratch/gouwar.j/cran-all/cranData/CDatanet/R/RcppExports.R
#' @title Estimate sart model #' @param formula an object of class \link[stats]{formula}: a symbolic description of the model. The `formula` should be as for example \code{y ~ x1 + x2 | x1 + x2} #' where `y` is the endogenous vector, the listed variables before the pipe, `x1`, `x2` are the individual exogenous variables and #' the listed variables after the pipe, `x1`, `x2` are the contextual observable variables. Other formulas may be #' \code{y ~ x1 + x2} for the model without contextual effects, \code{y ~ -1 + x1 + x2 | x1 + x2} for the model #' without intercept or \code{ y ~ x1 + x2 | x2 + x3} to allow the contextual variable to be different from the individual variables. #' @param contextual (optional) logical; if true, this means that all individual variables will be set as contextual variables. Set the #' `formula` as `y ~ x1 + x2` and `contextual` as `TRUE` is equivalent to set the formula as `y ~ x1 + x2 | x1 + x2`. #' @param Glist the adjacency matrix or list sub-adjacency matrix. #' @param theta0 (optional) starting value of \eqn{\theta = (\lambda, \beta, \gamma, \sigma)}. The parameter \eqn{\gamma} should be removed if the model #' does not contain contextual effects (see details). #' @param yb0 (optional) expectation of y. #' @param optimizer is either `fastlbfgs` (L-BFGS optimization method of the package \pkg{RcppNumerical}), `nlm` (referring to the function \link[stats]{nlm}), or `optim` (referring to the function \link[stats]{optim}). #' Other arguments #' of these functions such as, `control` and `method` can be defined through the argument `opt.ctr`. #' @param npl.ctr list of controls for the NPL method (see \code{\link{cdnet}}). #' @param opt.ctr list of arguments to be passed in `optim_lbfgs` of the package \pkg{RcppNumerical}, \link[stats]{nlm} or \link[stats]{optim} (the solver set in `optimizer`), such as `maxit`, `eps_f`, `eps_g`, `control`, `method`, ... #' @param print a Boolean indicating if the estimate should be printed at each step. #' @param cov a Boolean indicating if the covariance should be computed. #' @param RE a Boolean which indicates if the model if under rational expectation of not. #' @param data an optional data frame, list or environment (or object coercible by \link[base]{as.data.frame} to a data frame) containing the variables #' in the model. If not found in data, the variables are taken from \code{environment(formula)}, typically the environment from which `sart` is called. #' @description #' `sart` is used to estimate peer effects on censored data (see details). The model is presented in Xu and Lee(2015). #' @return A list consisting of: #' \item{info}{list of general information on the model.} #' \item{estimate}{Maximum Likelihood (ML) estimator.} #' \item{yb}{ybar (see details), expectation of y.} #' \item{Gyb}{average of the expectation of y among friends.} #' \item{cov}{List of covariances.} #' \item{details}{outputs as returned by the optimizer.} #' @details #' ## Model #' The left-censored variable \eqn{\mathbf{y}}{y} is generated from a latent variable \eqn{\mathbf{y}^*}{ys}. #' The latent variable is given for all i as #' \deqn{y_i^* = \lambda \mathbf{g}_i y + \mathbf{x}_i'\beta + \mathbf{g}_i\mathbf{X}\gamma + \epsilon_i,}{ys_i = \lambda g_i*y + x_i'\beta + g_i*X\gamma + \epsilon_i,} #' where \eqn{\epsilon_i \sim N(0, \sigma^2)}{\epsilon_i --> N(0, \sigma^2)}.\cr #' The count variable \eqn{y_i} is then define that is \eqn{y_i = 0} if #' \eqn{y_i^* \leq 0}{ys_i \le 0} and \eqn{y_i = y_i^*}{y_i = ys_i} otherwise. #' @seealso \code{\link{sar}}, \code{\link{cdnet}}, \code{\link{simsart}}. #' @references #' Xu, X., & Lee, L. F. (2015). Maximum likelihood estimation of a spatial autoregressive Tobit model. \emph{Journal of Econometrics}, 188(1), 264-280, \doi{10.1016/j.jeconom.2015.05.004}. #' @examples #' \donttest{ #' # Groups' size #' M <- 5 # Number of sub-groups #' nvec <- round(runif(M, 100, 1000)) #' n <- sum(nvec) #' #' # Parameters #' lambda <- 0.4 #' beta <- c(2, -1.9, 0.8) #' gamma <- c(1.5, -1.2) #' sigma <- 1.5 #' theta <- c(lambda, beta, gamma, sigma) #' #' # X #' X <- cbind(rnorm(n, 1, 1), rexp(n, 0.4)) #' #' # Network #' Glist <- list() #' #' for (m in 1:M) { #' nm <- nvec[m] #' Gm <- matrix(0, nm, nm) #' max_d <- 30 #' for (i in 1:nm) { #' tmp <- sample((1:nm)[-i], sample(0:max_d, 1)) #' Gm[i, tmp] <- 1 #' } #' rs <- rowSums(Gm); rs[rs == 0] <- 1 #' Gm <- Gm/rs #' Glist[[m]] <- Gm #' } #' #' #' # data #' data <- data.frame(x1 = X[,1], x2 = X[,2]) #' #' rm(list = ls()[!(ls() %in% c("Glist", "data", "theta"))]) #' #' ytmp <- simsart(formula = ~ x1 + x2 | x1 + x2, Glist = Glist, #' theta = theta, data = data) #' #' y <- ytmp$y #' #' # plot histogram #' hist(y) #' #' opt.ctr <- list(method = "Nelder-Mead", #' control = list(abstol = 1e-16, abstol = 1e-11, maxit = 5e3)) #' data <- data.frame(yt = y, x1 = data$x1, x2 = data$x2) #' rm(list = ls()[!(ls() %in% c("Glist", "data"))]) #' #' out <- sart(formula = yt ~ x1 + x2, optimizer = "nlm", #' contextual = TRUE, Glist = Glist, data = data) #' summary(out) #' } #' @importFrom stats dnorm #' @importFrom stats pnorm #' @export sart <- function(formula, contextual, Glist, theta0 = NULL, yb0 = NULL, optimizer = "fastlbfgs", npl.ctr = list(), opt.ctr = list(), print = TRUE, cov = TRUE, RE = FALSE, data) { stopifnot(optimizer %in% c("fastlbfgs", "optim", "nlm")) if(!RE & optimizer == "fastlbfgs"){ stop("fastlbfgs is only implemented for the rational expectation model in this version. Use another solver.") } env.formula <- environment(formula) # controls npl.print <- print npl.tol <- npl.ctr$tol npl.maxit <- npl.ctr$maxit #size if (missing(contextual)) { contextual <- FALSE } if (!is.list(Glist)) { Glist <- list(Glist) } M <- length(Glist) nvec <- unlist(lapply(Glist, nrow)) n <- sum(nvec) igr <- matrix(c(cumsum(c(0, nvec[-M])), cumsum(nvec) - 1), ncol = 2) f.t.data <- formula.to.data(formula, contextual, Glist, M, igr, data, theta0 = NULL) formula <- f.t.data$formula y <- f.t.data$y X <- f.t.data$X coln <- c("lambda", colnames(X)) K <- ncol(X) # variables indpos <- (y > 1e-323) indzero <- !indpos idpos <- which(indpos) - 1 idzero <- which(indzero) - 1 ylist <- lapply(1:M, function(x) y[(igr[x,1]:igr[x,2]) + 1]) idposlis <- lapply(ylist, function(w) which(w > 0)) npos <- unlist(lapply(idposlis, length)) thetat <- NULL if (!is.null(theta0)) { if(length(theta0) != (K + 2)) { stop("Length of theta0 is not suited.") } thetat <- c(log(theta0[1]/(1 -theta0[1])), theta0[2:(K+1)], log(theta0[K+2])) } else { Xtmp <- cbind(f.t.data$Gy, X) b <- solve(t(Xtmp)%*%Xtmp, t(Xtmp)%*%y) s <- sqrt(sum((y - Xtmp%*%b)^2)/n) thetat <- c(log(max(b[1]/(1 - b[1]), 0.01)), b[-1], log(s)) } llh <- NULL resTO <- list() covt <- NULL covm <- NULL var.comp <- NULL t <- NULL ybt <- NULL Gybt <- NULL theta <- NULL Ztlambda <- NULL if(RE){ # yb ybt <- rep(0, n) if (!is.null(yb0)) { ybt <- yb0 } # Gybt Gybt <- unlist(lapply(1:M, function(x) Glist[[x]] %*% ybt[(igr[x,1]:igr[x,2])+1])) if (is.null(npl.print)) { npl.print <- TRUE } if (is.null(npl.tol)) { npl.tol <- 1e-4 } if (is.null(npl.maxit)) { npl.maxit <- 500L } # other variables cont <- TRUE t <- 0 REt <- NULL llh <- 0 par0 <- NULL par1 <- NULL like <- NULL yidpos <- y[indpos] ctr <- c(list("yidpos" = yidpos, "Gyb" = Gybt, "X" = X, "npos" = sum(npos), "idpos" = idpos, "idzero" = idzero, "K" = K), opt.ctr) if (optimizer == "fastlbfgs"){ ctr <- c(ctr, list(par = thetat)); optimizer = "sartLBFGS" par0 <- "par" par1 <- "par" like <- "value" } else if (optimizer == "optim") { ctr <- c(ctr, list(fn = foptimTBT_NPL, par = thetat)) par0 <- "par" par1 <- "par" like <- "value" } else { ctr <- c(ctr, list(f = foptimTBT_NPL, p = thetat)) par0 <- "p" par1 <- "estimate" like <- "minimum" } if(npl.print) { while(cont) { # tryCatch({ ybt0 <- ybt + 0 #copy in different memory # compute theta # print(optimizer) REt <- do.call(get(optimizer), ctr) thetat <- REt[[par1]] llh <- -REt[[like]] theta <- c(1/(1 + exp(-thetat[1])), thetat[2:(K + 1)], exp(thetat[K + 2])) # compute y fLTBT_NPL(ybt, Gybt, Glist, X, thetat, igr, M, n, K) # distance # dist <- max(abs(c(ctr[[par0]]/thetat, ybt0/(ybt + 1e-50)) - 1), na.rm = TRUE) dist <- max(abs(c((ctr[[par0]] - thetat)/thetat, (ybt0 - ybt)/ybt)), na.rm = TRUE) cont <- (dist > npl.tol & t < (npl.maxit - 1)) t <- t + 1 REt$dist <- dist ctr[[par0]] <- thetat resTO[[t]] <- REt cat("---------------\n") cat(paste0("Step : ", t), "\n") cat(paste0("Distance : ", round(dist,3)), "\n") cat(paste0("Likelihood : ", round(llh,3)), "\n") cat("Estimate:", "\n") print(theta) # }, # error = function(e){ # cat("** Non-convergence ** Redefining theta and computing a new yb\n") # thetat[1] <- -4.5 # fnewybTBT(ybt, Gybt, Glist, igr, M, X, thetat, K, n, npl.tol, npl.maxit) # cont <- TRUE # t <- t + 1 # ctr[[par0]] <- thetat # resTO[[t]] <- NULL # }) } } else { while(cont) { # tryCatch({ ybt0 <- ybt + 0 #copy in different memory # compute theta REt <- do.call(get(optimizer), ctr) thetat <- REt[[par1]] # compute y fLTBT_NPL(ybt, Gybt, Glist, X, thetat, igr, M, n, K) # distance # dist <- max(abs(c(ctr[[par0]]/thetat, ybt0/(ybt + 1e-50)) - 1), na.rm = TRUE) dist <- max(abs(c((ctr[[par0]] - thetat)/thetat, (ybt0 - ybt)/ybt)), na.rm = TRUE) cont <- (dist > npl.tol & t < (npl.maxit - 1)) t <- t + 1 REt$dist <- dist ctr[[par0]] <- thetat resTO[[t]] <- REt # }, # error = function(e){ # thetat[1] <- -4.5 # fnewybTBT(ybt, Gybt, Glist, igr, M, X, thetat, K, n, npl.tol, npl.maxit) # cont <- TRUE # t <- t + 1 # ctr[[par0]] <- thetat # resTO[[t]] <- NULL # }) } llh <- -REt[[like]] theta <- c(1/(1 + exp(-thetat[1])), thetat[2:(K +1)], exp(thetat[K + 2])) } if (npl.maxit == t) { warning("The maximum number of iterations of the NPL algorithm has been reached.") } covt <- fcovSTI(n = n, Gyb = Gybt, theta = thetat, X = X, K = K, G = Glist, igroup = igr, ngroup = M, ccov = cov) } else{ G2list <- lapply(1:M, function(w) Glist[[w]][idposlis[[w]], idposlis[[w]]]) Gy <- unlist(lapply(1:M, function(w) Glist[[w]] %*% ylist[[w]])) I2list <- lapply(npos, diag) Ilist <- lapply(nvec, diag) Wlist <- lapply(1:M, function(x) (indpos[(igr[x,1]:igr[x,2]) + 1] %*% t(indpos[(igr[x,1]:igr[x,2]) + 1]))*Glist[[x]]) if(exists("alphatde")) rm("alphatde") if(exists("logdetA2")) rm("logdetA2") alphatde <- Inf logdetA2 <- 0 # arguments ctr <- c(list("X" = X, "G2" = G2list, "I2" = I2list, "K" = K, "y" = y, "Gy" = Gy, "idpos" = idpos, "idzero" = idzero, "npos" = sum(npos), "ngroup" = M, "alphatilde" = alphatde, "logdetA2" = logdetA2, "n" = n, "I" = Ilist, "W" = Wlist, "igroup" = igr), opt.ctr) if (optimizer == "optim") { ctr <- c(ctr, list(par = thetat)) par1 <- "par" like <- "value" if (npl.print) { ctr <- c(ctr, list(fn = foptimTobit)) } else { ctr <- c(ctr, list(fn = foptimTobit0)) } } else { ctr <- c(ctr, list(p = thetat)) par1 <- "estimate" like <- "minimum" if (npl.print) { ctr <- c(ctr, list(f = foptimTobit)) } else { ctr <- c(ctr, list(f = foptimTobit0)) } } resTO <- do.call(get(optimizer), ctr) theta <- resTO[[par1]] covt <- fcovSTC(theta = theta, X = X, G2 = G2list, I = Ilist, W = Wlist, K = K, n = n, y = y, Gy = Gy, indzero = indzero, indpos = indpos, igroup = igr, ngroup = M, ccov = cov) theta <- c(1/(1 + exp(-theta[1])), theta[2:(K + 1)], exp(theta[K + 2])) llh <- -resTO[[like]] rm(list = c("alphatde", "logdetA2")) } names(theta) <- c(coln, "sigma") # Marginal effects meffects <- c(covt$meffects) var.comp <- covt$var.comp covm <- covt$covm covt <- covt$covt colnME <- coln if("(Intercept)" %in% coln) { colnME <- coln[-2] meffects <- meffects[-2] covm <- covm[-2, -2] } names(meffects) <- colnME if(!is.null(covt)) { colnames(covt) <- c(coln, "logsigma") rownames(covt) <- c(coln, "logsigma") colnames(covm) <- colnME rownames(covm) <- colnME if(!is.null(var.comp$Sigma)){ rownames(var.comp$Sigma) <- c(coln, "logsigma") rownames(var.comp$Omega) <- c(coln, "logsigma") colnames(var.comp$Sigma) <- c(coln, "logsigma") colnames(var.comp$Omega) <- c(coln, "logsigma") } } INFO <- list("M" = M, "n" = n, "formula" = formula, "nlinks" = unlist(lapply(Glist, function(u) sum(u > 0))), "censured" = sum(indzero), "uncensured" = n - sum(indzero), "log.like" = llh, "npl.iter" = t) out <- list("info" = INFO, "estimate" = list(theta = theta, marg.effects = meffects), "yb" = ybt, "Gyb" = Gybt, "cov" = list(parms = covt, marg.effects = covm, var.comp = var.comp), "details" = resTO) class(out) <- "sart" out } #' @title Summarize sart Model #' @description Summary and print methods for the class `sart` as returned by the function \link{sart}. #' @param object an object of class `sart`, output of the function \code{\link{sart}}. #' @param x an object of class `summary.sart`, output of the function \code{\link{summary.sart}} #' or class `sart`, output of the function \code{\link{sart}}. #' @param Glist adjacency matrix or list sub-adjacency matrix. This is not necessary if the covariance method was computed in \link{cdnet}. #' @param data dataframe containing the explanatory variables. This is not necessary if the covariance method was computed in \link{cdnet}. #' @param ... further arguments passed to or from other methods. #' @return A list of the same objects in `object`. #' @export #' @param ... further arguments passed to or from other methods. #' @export "summary.sart" <- function(object, Glist, data, ...) { stopifnot(class(object) == "sart") out <- c(object, list("..." = ...)) if(is.null(object$cov$parms)){ env.formula <- environment(object$info$formula) thetat <- object$estimate$theta thetat <- c(log(thetat[1]/(1 - thetat[1])), thetat[-c(1, length(thetat))], log(thetat[length(thetat)])) Gybt <- object$Gyb contextual <- FALSE formula <- object$info$formula if (!is.list(Glist)) { Glist <- list(Glist) } M <- length(Glist) nvec <- unlist(lapply(Glist, nrow)) n <- sum(nvec) igr <- matrix(c(cumsum(c(0, nvec[-M])), cumsum(nvec) - 1), ncol = 2) f.t.data <- formula.to.data(formula, contextual, Glist, M, igr, data, theta0 = thetat) formula <- f.t.data$formula y <- f.t.data$y X <- f.t.data$X K <- ncol(X) coln <- c("lambda", colnames(X)) tmp <- NULL if(is.null(Gybt)){ indpos <- (y > 1e-323) indzero <- !indpos idpos <- which(indpos) - 1 idzero <- which(indzero) - 1 ylist <- lapply(1:M, function(x) y[(igr[x,1]:igr[x,2]) + 1]) idposlis <- lapply(ylist, function(w) which(w > 0)) npos <- unlist(lapply(idposlis, length)) G2list <- lapply(1:M, function(w) Glist[[w]][idposlis[[w]], idposlis[[w]]]) Gy <- unlist(lapply(1:M, function(w) Glist[[w]] %*% ylist[[w]])) I2list <- lapply(npos, diag) Ilist <- lapply(nvec, diag) Wlist <- lapply(1:M, function(x) (indpos[(igr[x,1]:igr[x,2]) + 1] %*% t(indpos[(igr[x,1]:igr[x,2]) + 1]))*Glist[[x]]) tmp <- fcovSTC(theta = thetat, X = X, G2 = G2list, I = Ilist, W = Wlist, K = K, n = n, y = y, Gy = Gy, indzero = indzero, indpos = indpos, igroup = igr, ngroup = M, ccov = TRUE) } else { tmp <- fcovSTI(n = n, Gyb = Gybt, theta = thetat, X = X, K = K, G = Glist, igroup = igr, ngroup = M, ccov = TRUE) } meffects <- c(tmp$meffects) var.comp <- tmp$var.comp covt <- tmp$covt covm <- tmp$covm Rmax <- tmp$Rmax colnME <- coln if("(Intercept)" %in% coln) { colnME <- coln[-2] meffects <- meffects[-2] covm <- covm[-2, -2] } names(meffects) <- colnME if(!is.null(covt)) { colnames(covt) <- c(coln, "logsigma") rownames(covt) <- c(coln, "logsigma") colnames(covm) <- colnME rownames(covm) <- colnME if(!is.null(var.comp$Sigma)){ rownames(var.comp$Sigma) <- c(coln, "logsigma") rownames(var.comp$Omega) <- c(coln, "logsigma") colnames(var.comp$Sigma) <- c(coln, "logsigma") colnames(var.comp$Omega) <- c(coln, "logsigma") } } out$cov <- list(parms = covt, marg.effects = covm, var.comp = var.comp) } class(out) <- "summary.sart" out } #' @rdname summary.sart #' @export "print.summary.sart" <- function(x, ...) { stopifnot(class(x) == "summary.sart") M <- x$info$M n <- x$info$n estimate <- x$estimate$theta iteration <- x$info$npl.iter RE <- !is.null(iteration) formula <- x$info$formula K <- length(estimate) coef <- estimate[-K] meff <- x$estimate$marg.effects std <- sqrt(diag(x$cov$parms)[-K]) std.meff <- sqrt(diag(x$cov$marg.effects)) sigma <- estimate[K] llh <- x$info$log.like censored <- x$info$censured uncensored <- x$info$uncensured tmp <- fcoefficients(coef, std) out_print <- tmp$out_print out <- tmp$out out_print <- c(list(out_print), x[-(1:6)], list(...)) tmp.meff <- fcoefficients(meff, std.meff) out_print.meff <- tmp.meff$out_print out.meff <- tmp.meff$out out_print.meff <- c(list(out_print.meff), x[-(1:6)], list(...)) nfr <- x$info$nlinks cat("sart Model", ifelse(RE, "with Rational Expectation", ""), "\n\n") cat("Call:\n") print(formula) if(RE){ cat("\nMethod: Nested pseudo-likelihood (NPL) \nIteration: ", iteration, "\n\n") } else{ cat("\nMethod: Maximum Likelihood (ML)", "\n\n") } cat("Network:\n") cat("Number of groups : ", M, "\n") cat("Sample size : ", n, "\n") cat("Average number of friends: ", sum(nfr)/n, "\n\n") cat("No. censored : ", paste0(censored, "(", round(censored/n*100, 0), "%)"), "\n") cat("No. uncensored : ", paste0(uncensored, "(", round(uncensored/n*100, 0), "%)"), "\n\n") cat("Coefficients:\n") do.call("print", out_print) cat("\nMarginal Effects:\n") do.call("print", out_print.meff) cat("---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n") cat("sigma: ", sigma, "\n") cat("log likelihood: ", llh, "\n") invisible(x) } #' @rdname summary.sart #' @export "print.sart" <- function(x, ...) { stopifnot(class(x) == "sart") print(summary(x, ...)) } #' @rdname summary.sart #' @importFrom stats cov #' @export "summary.sarts" <- function(object, ...) { stopifnot(class(object) %in% c("list", "sarts", "summary.sarts")) lclass <- unique(unlist(lapply(object, class))) if (!all(lclass %in%c("summary.sart"))) { stop("All the components in `object` should be from `summary.sart` class") } nsim <- length(object) K <- length(object[[1]]$estimate$theta) coef <- do.call("rbind", lapply(object, function(z) t(c(z$estimate$theta)))) meff <- do.call("rbind", lapply(object, function(z) t(z$estimate$marg.effects))) estimate <- colSums(coef)/nsim meffects <- colSums(meff)/nsim RE <- !is.null(object[[1]]$yb) vcoef2 <- Reduce("+", lapply(object, function(z) z$cov$parms))/nsim vmeff2 <- Reduce("+", lapply(object, function(z) z$cov$marg.effects))/nsim vcoef1 <- cov(coef) vmeff1 <- cov(meff) vcoef <- vcoef1 + vcoef2 vmeff <- vmeff1 + vmeff2 llh <- unlist(lapply(object, function(z) z$info$log.like)) llh <- c("min" = min(llh), "mean" = mean(llh), "max" = max(llh)) M <- object[[1]]$info$M n <- object[[1]]$info$n INFO <- list("M" = M, "n" = n, "log.like" = llh, "Rat.Exp" = RE, "simulation" = nsim) out <- list("info" = INFO, "estimate" = list(theta = estimate, marg.effects = meffects), "cov" = list(parms = vcoef, marg.effects = vmeff), ... = ...) class(out) <- "summary.sarts" out } #' @rdname summary.sart #' @importFrom stats cov #' @export "print.summary.sarts" <- function(x, ...) { stopifnot(class(x) %in% c("summary.sarts")) nsim <- x$info$simulation coef <- x$estimate$theta meffects <- x$estimate$marg.effects K <- length(coef) sigma <- tail(coef, 1) coef <- head(coef, K - 1) RE <- x$info$Rat.Exp vcoef <- x$cov$parms vmeff <- x$cov$marg.effects llh <- x$info$log.like M <- x$info$M n <- x$info$n std <- sqrt(head(diag(vcoef), K-1)) std.meff <- sqrt(diag(vmeff)) tmp <- fcoefficients(coef, std) out_print <- tmp$out_print out <- tmp$out tmp.meff <- fcoefficients(meffects, std.meff) out_print.meff <- tmp.meff$out_print out.meff <- tmp.meff$out out_print <- c(list(out_print), x[-c(1:3)], list(...)) out_print.meff <- c(list(out_print.meff), x[-c(1:3)], list(...)) cat("SART Model", ifelse(RE, "with Rational Expectation", ""), "\n\n") cat("Method: Replication of ") if(RE){ cat("Nested pseudo-likelihood (NPL)") } else{ cat("Maximum Likelihood (ML)") } cat("\nReplication: ", nsim, "\n\n") cat("Coefficients:\n") do.call("print", out_print) cat("\nMarginal Effects:\n") do.call("print", out_print.meff) cat("---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n") cat("sigma: ", sigma, "\n") cat("log likelihood: \n") print(llh) invisible(x) } #' @rdname summary.sart #' @importFrom stats cov #' @export "print.sarts" <- function(x, ...) { stopifnot(class(x) %in% c("sarts", "list")) print(summary.sarts(x, ...)) }
/scratch/gouwar.j/cran-all/cranData/CDatanet/R/SARTestim.R
#' @title Simulate data from the Tobit Model with Social Interactions #' @param formula an object of class \link[stats]{formula}: a symbolic description of the model. The `formula` should be as for example \code{y ~ x1 + x2 | x1 + x2} #' where `y` is the endogenous vector, the listed variables before the pipe, `x1`, `x2` are the individual exogenous variables and #' the listed variables after the pipe, `x1`, `x2` are the contextual observable variables. Other formulas may be #' \code{y ~ x1 + x2} for the model without contextual effects, \code{y ~ -1 + x1 + x2 | x1 + x2} for the model #' without intercept or \code{y ~ x1 + x2 | x2 + x3} to allow the contextual variable to be different from the individual variables. #' @param contextual (optional) logical; if true, this means that all individual variables will be set as contextual variables. Set the #' `formula` as `y ~ x1 + x2` and `contextual` as `TRUE` is equivalent to set the formula as `y ~ x1 + x2 | x1 + x2`. #' @param Glist the adjacency matrix or list sub-adjacency matrix. #' @param theta the parameter value as \eqn{\theta = (\lambda, \beta, \gamma, \sigma)}. The parameter \eqn{\gamma} should be removed if the model #' does not contain contextual effects (see details). #' @param tol the tolerance value used in the Fixed Point Iteration Method to compute `y`. The process stops if the \eqn{L_1}{L} distance #' between two consecutive values of `y` is less than `tol`. #' @param maxit the maximal number of iterations in the Fixed Point Iteration Method. #' @param RE a Boolean which indicates if the model if under rational expectation of not. #' @param data an optional data frame, list or environment (or object coercible by \link[base]{as.data.frame} to a data frame) containing the variables #' in the model. If not found in data, the variables are taken from \code{environment(formula)}, typically the environment from which `mcmcARD` is called. #' @description #' `simsart` is used to simulate censored data with social interactions (see details). The model is presented in Xu and Lee(2015). #' @details #' The left-censored variable \eqn{\mathbf{y}}{y} is generated from a latent variable \eqn{\mathbf{y}^*}{ys}. #' The latent variable is given for all i as #' \deqn{y_i^* = \lambda \mathbf{g}_i y + \mathbf{x}_i'\beta + \mathbf{g}_i\mathbf{X}\gamma + \epsilon_i,}{ys_i = \lambda g_i*y + x_i'\beta + g_i*X\gamma + \epsilon_i,} #' where \eqn{\epsilon_i \sim N(0, \sigma^2)}{\epsilon_i --> N(0, \sigma^2)}.\cr #' The censored variable \eqn{y_i} is then define that is \eqn{y_i = 0} if #' \eqn{y_i^* \leq 0}{ys_i \le 0} and \eqn{y_i = y_i^*}{y_i = ys_i} otherwise. #' @seealso \code{\link{sart}}, \code{\link{simsar}}, \code{\link{simcdnet}}. #' @return A list consisting of: #' \item{yst}{ys (see details), the latent variable.} #' \item{y}{the censored variable.} #' \item{yb}{expectation of y under rational expectation.} #' \item{Gy}{the average of y among friends.} #' \item{Gyb}{Average of expectation of y among friends under rational expectation.} #' \item{marg.effects}{the marginal effects.} #' \item{iteration}{number of iterations performed by sub-network in the Fixed Point Iteration Method.} #' @references #' Xu, X., & Lee, L. F. (2015). Maximum likelihood estimation of a spatial autoregressive Tobit model. \emph{Journal of Econometrics}, 188(1), 264-280, \doi{10.1016/j.jeconom.2015.05.004}. #' @examples #' # Groups' size #' M <- 5 # Number of sub-groups #' nvec <- round(runif(M, 100, 1000)) #' n <- sum(nvec) #' #' # Parameters #' lambda <- 0.4 #' beta <- c(2, -1.9, 0.8) #' gamma <- c(1.5, -1.2) #' sigma <- 1.5 #' theta <- c(lambda, beta, gamma, sigma) #' #' # X #' X <- cbind(rnorm(n, 1, 1), rexp(n, 0.4)) #' #' # Network #' Glist <- list() #' #' for (m in 1:M) { #' nm <- nvec[m] #' Gm <- matrix(0, nm, nm) #' max_d <- 30 #' for (i in 1:nm) { #' tmp <- sample((1:nm)[-i], sample(0:max_d, 1)) #' Gm[i, tmp] <- 1 #' } #' rs <- rowSums(Gm); rs[rs == 0] <- 1 #' Gm <- Gm/rs #' Glist[[m]] <- Gm #' } #' #' #' # data #' data <- data.frame(x1 = X[,1], x2 = X[,2]) #' #' rm(list = ls()[!(ls() %in% c("Glist", "data", "theta"))]) #' #' ytmp <- simsart(formula = ~ x1 + x2 | x1 + x2, Glist = Glist, #' theta = theta, data = data) #' #' y <- ytmp$y #' #' # plot histogram #' hist(y) #' #' @importFrom Rcpp sourceCpp #' @export simsart <- function(formula, contextual, Glist, theta, tol = 1e-15, maxit = 500, RE = FALSE, data) { if (missing(contextual)) { contextual <- FALSE } if (!is.list(Glist)) { Glist <- list(Glist) } M <- length(Glist) nvec <- unlist(lapply(Glist, nrow)) n <- sum(nvec) igr <- matrix(c(cumsum(c(0, nvec[-M])), cumsum(nvec) - 1), ncol = 2) f.t.data <- formula.to.data(formula, contextual, Glist, M, igr, data, "sim", 0) X <- f.t.data$X K <- length(theta) if(K != (ncol(X) + 2)) { stop("Length of theta is not suited.") } lambda <- theta[1] b <- theta[2:(K - 1)] sigma <- theta[K ] xb <- c(X %*% b) eps <- rnorm(n, 0, sigma) yst <- numeric(n) y <- NULL Gy <- NULL yb <- NULL Gyb <- NULL t <- NULL Ztl <- rep(0, n) if(RE){ yb <- rep(0, n) Gyb <- rep(0, n) t <- fybtbit(yb, Gyb, Glist, igr, M, xb, lambda, sigma, n, tol, maxit) Ztl <- lambda*Gyb + xb yst <- Ztl + eps y <- yst*(yst > 0) } else { y <- rep(0, n) Gy <- rep(0, n) t <- fyTobit(yst, y, Gy, Ztl, Glist, eps, igr, M, xb, n, lambda, tol, maxit) } # marginal effects coln <- c("lambda", colnames(X)) thetaWI <- head(theta, K - 1) if("(Intercept)" %in% coln) { thetaWI <- thetaWI[-2] coln <- coln[-2] } meffects <- thetaWI*mean(pnorm(Ztl/sigma)) names(meffects) <- coln list("yst" = yst, "y" = y, "yb" = yb, "Gy" = Gy, "Gyb" = Gyb, "marg.effects" = meffects, "iteration" = t) }
/scratch/gouwar.j/cran-all/cranData/CDatanet/R/SARTsim.R
#' @title Estimate SAR model #' @param formula an object of class \link[stats]{formula}: a symbolic description of the model. The `formula` should be as for example \code{y ~ x1 + x2 | x1 + x2} #' where `y` is the endogenous vector, the listed variables before the pipe, `x1`, `x2` are the individual exogenous variables and #' the listed variables after the pipe, `x1`, `x2` are the contextual observable variables. Other formulas may be #' \code{y ~ x1 + x2} for the model without contextual effects, \code{y ~ -1 + x1 + x2 | x1 + x2} for the model #' without intercept or \code{ y ~ x1 + x2 | x2 + x3} to allow the contextual variable to be different from the individual variables. #' @param contextual (optional) logical; if true, this means that all individual variables will be set as contextual variables. Set the #' `formula` as `y ~ x1 + x2` and `contextual` as `TRUE` is equivalent to set the formula as `y ~ x1 + x2 | x1 + x2`. #' @param Glist the adjacency matrix or list sub-adjacency matrix. #' @param lambda0 (optional) starting value of \eqn{\lambda}. The parameter \eqn{\gamma} should be removed if the model #' does not contain contextual effects (see details). #' @param fixed.effects logical; if true, group heterogeneity is included as fixed effects. #' @param optimizer is either `nlm` (referring to the function \link[stats]{nlm}) or `optim` (referring to the function \link[stats]{optim}). #' Other arguments #' of these functions such as, the control values and the method can be defined through the argument `opt.ctr`. #' @param opt.ctr list of arguments of \link[stats]{nlm} or \link[stats]{optim} (the one set in `optimizer`) such as control, method, ... #' @param print a Boolean indicating if the estimate should be printed at each step. #' @param cov a Boolean indicating if the covariance should be computed. #' @param data an optional data frame, list or environment (or object coercible by \link[base]{as.data.frame} to a data frame) containing the variables #' in the model. If not found in data, the variables are taken from \code{environment(formula)}, typically the environment from which `mcmcARD` is called. #' @description #' `sar` is used to estimate peer effects continuous variables (see details). The model is presented in Lee(2004). #' @details #' ## Model #' The variable \eqn{\mathbf{y}}{y} is given for all i as #' \deqn{y_i = \lambda \mathbf{g}_i y + \mathbf{x}_i'\beta + \mathbf{g}_i\mathbf{X}\gamma + \epsilon_i,}{y_i = \lambda g_i*y + x_i'\beta + g_i*X\gamma + \epsilon_i,} #' where \eqn{\epsilon_i \sim N(0, \sigma^2)}{\epsilon_i --> N(0, \sigma^2)}. #' @references #' Lee, L. F. (2004). Asymptotic distributions of quasi-maximum likelihood estimators for spatial autoregressive models. \emph{Econometrica}, 72(6), 1899-1925, \doi{10.1111/j.1468-0262.2004.00558.x}. #' @seealso \code{\link{sart}}, \code{\link{cdnet}}, \code{\link{simsar}}. #' @examples #' \donttest{ #' # Groups' size #' M <- 5 # Number of sub-groups #' nvec <- round(runif(M, 100, 1000)) #' n <- sum(nvec) #' #' # Parameters #' lambda <- 0.4 #' beta <- c(2, -1.9, 0.8) #' gamma <- c(1.5, -1.2) #' sigma <- 1.5 #' theta <- c(lambda, beta, gamma, sigma) #' #' # X #' X <- cbind(rnorm(n, 1, 1), rexp(n, 0.4)) #' #' # Network #' Glist <- list() #' #' for (m in 1:M) { #' nm <- nvec[m] #' Gm <- matrix(0, nm, nm) #' max_d <- 30 #' for (i in 1:nm) { #' tmp <- sample((1:nm)[-i], sample(0:max_d, 1)) #' Gm[i, tmp] <- 1 #' } #' rs <- rowSums(Gm); rs[rs == 0] <- 1 #' Gm <- Gm/rs #' Glist[[m]] <- Gm #' } #' #' #' # data #' data <- data.frame(x1 = X[,1], x2 = X[,2]) #' #' rm(list = ls()[!(ls() %in% c("Glist", "data", "theta"))]) #' #' ytmp <- simsar(formula = ~ x1 + x2 | x1 + x2, Glist = Glist, #' theta = theta, data = data) #' #' y <- ytmp$y #' #' # plot histogram #' hist(y, breaks = max(y)) #' #' data <- data.frame(yt = y, x1 = data$x1, x2 = data$x2) #' rm(list = ls()[!(ls() %in% c("Glist", "data"))]) #' #' out <- sar(formula = yt ~ x1 + x2, contextual = TRUE, #' Glist = Glist, optimizer = "optim", data = data) #' summary(out) #' } #' @return A list consisting of: #' \item{info}{list of general information on the model.} #' \item{estimate}{Maximum Likelihood (ML) estimator.} #' \item{cov}{covariance matrix of the estimate.} #' \item{details}{outputs as returned by the optimizer.} #' @export sar <- function(formula, contextual, Glist, lambda0 = NULL, fixed.effects = FALSE, optimizer = "optim", opt.ctr = list(), print = TRUE, cov = TRUE, # RE = FALSE, data) { stopifnot(optimizer %in% c("optim", "nlm")) # if(RE & fixed.effects) stop("Rational expectations with fixed effects are not allowed in this version.") env.formula <- environment(formula) #size if (missing(contextual)) { contextual <- FALSE } if (!is.list(Glist)) { Glist <- list(Glist) } M <- length(Glist) nvec <- unlist(lapply(Glist, nrow)) n <- sum(nvec) igr <- matrix(c(cumsum(c(0, nvec[-M])), cumsum(nvec) - 1), ncol = 2) f.t.data <- formula.to.data(formula, contextual, Glist, M, igr, data, theta0 = NULL, fixed.effects = fixed.effects) formula <- f.t.data$formula y <- f.t.data$y Gy <- f.t.data$Gy X <- f.t.data$X coln <- c("lambda", colnames(X), "sigma") K <- ncol(X) # variables Nvec <- sapply(Glist, nrow) XX <- t(X)%*%X invXX <- solve(XX) Ilist <- lapply(1:M, function(w) diag(Nvec[w])) lambdat <- NULL if (!is.null(lambda0)) { lambdat <- log(lambda0/(1- lambda0)) } else { Xtmp <- cbind(f.t.data$Gy, X) b <- solve(t(Xtmp)%*%Xtmp, t(Xtmp)%*%y) lambdat <- log(max(b[1]/(1 - b[1]), 0.01)) } # arguments if ((length(opt.ctr) == 0) & optimizer == "optim") { opt.ctr <- list("method" = "Brent", "upper" = 37, "lower" = -710) } ctr <- c(list(X = X,invXX = invXX, G = Glist, I = Ilist, n = n, y = y, Gy = Gy, ngroup = M, FE = fixed.effects, print = print), opt.ctr) if (optimizer == "optim") { ctr <- c(ctr, list(par = lambdat, fn = foptimSAR)) par0 <- "par" par1 <- "par" like <- "value" } else { ctr <- c(ctr, list(p = lambdat, f = foptimSAR)) par0 <- "p" par1 <- "estimate" like <- "minimum" } resSAR <- do.call(get(optimizer), ctr) lambdat <- resSAR[[par1]] llh <- -resSAR[[like]] lambda <- 1/(1 + exp(-lambdat)) hessian <- jacobSAR(lambda, X, invXX, XX, y, n, Glist, Ilist, Gy, M, igr, cov, fixed.effects) beta <- hessian$beta sigma2 <- hessian$sigma2 covout <- NULL if(cov) { covout <- hessian$cov colnames(covout) <- coln rownames(covout) <- coln } theta <- c(lambda, beta, sqrt(sigma2)) names(theta) <- coln environment(formula) <- env.formula sdata <- list( "formula" = formula, "Glist" = deparse(substitute(Glist)), "nfriends" = unlist(lapply(Glist, function(u) sum(u > 0))) ) if (!missing(data)) { sdata <- c(sdata, list("data" = deparse(substitute(data)))) } INFO <- list("M" = M, "n" = n, "fixed.effects" = fixed.effects, "nlinks" = unlist(lapply(Glist, function(u) sum(u > 0))), "formula" = formula, "log.like" = llh) out <- list("info" = INFO, "estimate" = theta, "cov" = covout, "details" = resSAR) class(out) <- "sar" out } #' @title Summarize SAR Model #' @description Summary and print methods for the class `sar` as returned by the function \link{sar}. #' @param object an object of class `sar`, output of the function \code{\link{sar}}. #' @param x an object of class `summary.sar`, output of the function \code{\link{summary.sar}} or #' class `sar`, output of the function \code{\link{sar}}. #' @param ... further arguments passed to or from other methods. #' @return A list of the same objects in `object`. #' @param ... further arguments passed to or from other methods. #' @export "summary.sar" <- function(object, ...) { stopifnot(class(object) == "sar") out <- c(object, list("..." = ...)) if(is.null(object$cov)){ stop("Covariance was not computed") } class(out) <- "summary.sar" out } #' @rdname summary.sar #' @export "print.summary.sar" <- function(x, ...) { stopifnot(class(x) == "summary.sar") M <- x$info$M n <- x$info$n estimate <- x$estimate formula <- x$info$formula K <- length(estimate) coef <- estimate[-K] std <- sqrt(diag(x$cov[-K, -K, drop = FALSE])) sigma <- estimate[K] llh <- x$info$log.like tmp <- fcoefficients(coef, std) out_print <- tmp$out_print out <- tmp$out out_print <- c(list(out_print), x[-(1:4)], list(...)) nfr <- x$info$nlinks cat("SAR Model\n\n") cat("Call:\n") cat(paste0(formula, ", fixed.effects = ", x$info$fixed.effects), "\n") # print(formula) cat("\nMethod: Maximum Likelihood (ML)", "\n\n") cat("Network:\n") cat("Number of groups : ", M, "\n") cat("Sample size : ", n, "\n") cat("Average number of friends: ", sum(nfr)/n, "\n\n") cat("Coefficients:\n") do.call("print", out_print) cat("---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n") cat("sigma: ", sigma, "\n") cat("log likelihood: ", llh, "\n") invisible(x) } #' @rdname summary.sar #' @export "print.sar" <- function(x, ...) { stopifnot(class(x) == "sar") print(summary(x, ...)) } #' @rdname summary.sar #' @importFrom stats cov #' @export "summary.sars" <- function(object, ...) { stopifnot(class(object) %in% c("list", "sars", "summary.sars")) lclass <- unique(unlist(lapply(object, class))) if (!all(lclass %in%c("summary.sar"))) { stop("All the components in `object` should be from `summary.sar` class") } nsim <- length(object) K <- length(object[[1]]$estimate$theta) coef <- do.call("rbind", lapply(object, function(z) t(c(z$estimate$theta)))) meff <- do.call("rbind", lapply(object, function(z) t(z$estimate$marg.effects))) estimate <- colSums(coef)/nsim meffects <- colSums(meff)/nsim vcoef2 <- Reduce("+", lapply(object, function(z) z$cov$parms))/nsim vmeff2 <- Reduce("+", lapply(object, function(z) z$cov$marg.effects))/nsim vcoef1 <- cov(coef) vmeff1 <- cov(meff) vcoef <- vcoef1 + vcoef2 vmeff <- vmeff1 + vmeff2 llh <- unlist(lapply(object, function(z) z$info$log.like)) llh <- c("min" = min(llh), "mean" = mean(llh), "max" = max(llh)) M <- object[[1]]$info$M n <- object[[1]]$info$n INFO <- list("M" = M, "n" = n, "log.like" = llh, "simulation" = nsim) out <- list("info" = INFO, "estimate" = list(theta = estimate, marg.effects = meffects), "cov" = list(parms = vcoef, marg.effects = vmeff), ... = ...) class(out) <- "summary.sars" out } #' @rdname summary.sar #' @importFrom stats cov #' @export "print.summary.sars" <- function(x, ...) { stopifnot(class(x) %in% c("summary.sars")) nsim <- x$info$simulation coef <- x$estimate$theta meffects <- x$estimate$marg.effects K <- length(coef) sigma <- tail(coef, 1) coef <- head(coef, K - 1) RE <- x$info$Rat.Exp vcoef <- x$cov$parms vmeff <- x$cov$marg.effects llh <- x$info$log.like M <- x$info$M n <- x$info$n std <- sqrt(head(diag(vcoef), K-1)) std.meff <- sqrt(diag(vmeff)) tmp <- fcoefficients(coef, std) out_print <- tmp$out_print out <- tmp$out tmp.meff <- fcoefficients(meffects, std.meff) out_print.meff <- tmp.meff$out_print out.meff <- tmp.meff$out out_print <- c(list(out_print), x[-c(1:3)], list(...)) out_print.meff <- c(list(out_print.meff), x[-c(1:3)], list(...)) RE <- FALSE cat("sar Model", ifelse(RE, "with Rational Expectation", ""), "\n\n") cat("Method: Replication of Maximum Likelihood (ML) \nReplication: ", nsim, "\n\n") cat("Coefficients:\n") do.call("print", out_print) cat("\nMarginal Effects:\n") do.call("print", out_print.meff) cat("---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n") cat("sigma: ", sigma, "\n") cat("log likelihood: \n") print(llh) invisible(x) } #' @rdname summary.sar #' @importFrom stats cov #' @export "print.sars" <- function(x, ...) { stopifnot(class(x) %in% c("sars", "list")) print(summary.sars(x, ...)) }
/scratch/gouwar.j/cran-all/cranData/CDatanet/R/SARestim.R
#' @title Simulate data from the linear-in-mean Model with Social Interactions #' @param formula an object of class \link[stats]{formula}: a symbolic description of the model. The `formula` should be as for example \code{y ~ x1 + x2 | x1 + x2} #' where `y` is the endogenous vector, the listed variables before the pipe, `x1`, `x2` are the individual exogenous variables and #' the listed variables after the pipe, `x1`, `x2` are the contextual observable variables. Other formulas may be #' \code{y ~ x1 + x2} for the model without contextual effects, \code{y ~ -1 + x1 + x2 | x1 + x2} for the model #' without intercept or \code{y ~ x1 + x2 | x2 + x3} to allow the contextual variable to be different from the individual variables. #' @param contextual (optional) logical; if true, this means that all individual variables will be set as contextual variables. Set the #' `formula` as `y ~ x1 + x2` and `contextual` as `TRUE` is equivalent to set the formula as `y ~ x1 + x2 | x1 + x2`. #' @param Glist the adjacency matrix or list sub-adjacency matrix. #' @param theta the parameter value as \eqn{\theta = (\lambda, \beta, \gamma, \sigma)}. The parameter \eqn{\gamma} should be removed if the model #' does not contain contextual effects (see details). #' @param RE a Boolean which indicates if the model if under rational expectation of not. #' @param data an optional data frame, list or environment (or object coercible by \link[base]{as.data.frame} to a data frame) containing the variables #' in the model. If not found in data, the variables are taken from \code{environment(formula)}, typically the environment from which `mcmcARD` is called. #' @description #' `simsar` is used to simulate continuous variables with social interactions (see details). The model is presented in Lee(2004). #' @references #' Lee, L. F. (2004). Asymptotic distributions of quasi-maximum likelihood estimators for spatial autoregressive models. \emph{Econometrica}, 72(6), 1899-1925, \doi{10.1111/j.1468-0262.2004.00558.x}. #' @details #' The variable \eqn{\mathbf{y}}{y} is given for all i as #' \deqn{y_i = \lambda \mathbf{g}_i y + \mathbf{x}_i'\beta + \mathbf{g}_i\mathbf{X}\gamma + \epsilon_i,}{y_i = \lambda g_i*y + x_i'\beta + g_i*X\gamma + \epsilon_i,} #' where \eqn{\epsilon_i \sim N(0, \sigma^2)}{\epsilon_i --> N(0, \sigma^2)}. #' @seealso \code{\link{sar}}, \code{\link{simsart}}, \code{\link{simcdnet}}. #' @return A list consisting of: #' \item{y}{the observed count data.} #' \item{Gy}{the average of y among friends.} #' @examples #' # Groups' size #' M <- 5 # Number of sub-groups #' nvec <- round(runif(M, 100, 1000)) #' n <- sum(nvec) #' #' # Parameters #' lambda <- 0.4 #' beta <- c(2, -1.9, 0.8) #' gamma <- c(1.5, -1.2) #' sigma <- 1.5 #' theta <- c(lambda, beta, gamma, sigma) #' #' # X #' X <- cbind(rnorm(n, 1, 1), rexp(n, 0.4)) #' #' # Network #' Glist <- list() #' #' for (m in 1:M) { #' nm <- nvec[m] #' Gm <- matrix(0, nm, nm) #' max_d <- 30 #' for (i in 1:nm) { #' tmp <- sample((1:nm)[-i], sample(0:max_d, 1)) #' Gm[i, tmp] <- 1 #' } #' rs <- rowSums(Gm); rs[rs == 0] <- 1 #' Gm <- Gm/rs #' Glist[[m]] <- Gm #' } #' #' #' # data #' data <- data.frame(x1 = X[,1], x2 = X[,2]) #' #' rm(list = ls()[!(ls() %in% c("Glist", "data", "theta"))]) #' #' ytmp <- simsar(formula = ~ x1 + x2 | x1 + x2, Glist = Glist, #' theta = theta, data = data) #' y <- ytmp$y #' #' # plot histogram #' hist(y) #' #' @importFrom Rcpp sourceCpp #' @export simsar <- function(formula, contextual, Glist, theta, RE = FALSE, data) { if (missing(contextual)) { contextual <- FALSE } if (!is.list(Glist)) { Glist <- list(Glist) } M <- length(Glist) nvec <- unlist(lapply(Glist, nrow)) n <- sum(nvec) igr <- matrix(c(cumsum(c(0, nvec[-M])), cumsum(nvec) - 1), ncol = 2) f.t.data <- formula.to.data(formula, contextual, Glist, M, igr, data, "sim", 0) X <- f.t.data$X # print(colnames(X)) K <- length(theta) if(K != (ncol(X) + 2)) { stop("Length of theta is not suited.") } lambda <- theta[1] b <- theta[2:(K - 1)] sigma <- theta[K ] xb <- c(X %*% b) eps <- rnorm(n, 0, sigma) out <- NULL if(RE){ y <- rep(0, n) ye <- rep(0, n) Gye <- numeric(n) fySarRE(y, Gye, ye, Glist, eps, igr, M, xb, lambda) out <- list("y" = y, "Gy" = Gy) } else { y <- rep(0, n) Gy <- numeric(n) fySar(y, Gy, Glist, eps, igr, M, xb, lambda) out <- list("y" = y, "Gy" = Gy) } out }
/scratch/gouwar.j/cran-all/cranData/CDatanet/R/SARsim.R
#' @title Estimate Count Data Model with Social Interactions using NPL Method #' @param formula an object of class \link[stats]{formula}: a symbolic description of the model. The `formula` should be as for example \code{y ~ x1 + x2 | x1 + x2} #' where `y` is the endogenous vector, the listed variables before the pipe, `x1`, `x2` are the individual exogenous variables and #' the listed variables after the pipe, `x1`, `x2` are the contextual observable variables. Other formulas may be #' \code{y ~ x1 + x2} for the model without contextual effects, \code{y ~ -1 + x1 + x2 | x1 + x2} for the model #' without intercept or \code{ y ~ x1 + x2 | x2 + x3} to allow the contextual variable to be different from the individual variables. #' @param contextual (optional) logical; if true, this means that all individual variables will be set as contextual variables. Set the #' the `formula` as `y ~ x1 + x2` and `contextual` as `TRUE` is equivalent to set the formula as `y ~ x1 + x2 | x1 + x2`. #' @param Glist the adjacency matrix or list sub-adjacency matrix. #' @param Rbar the value of Rbar. If not provided, it is automatically set at \code{quantile(y, 0.9)}. #' @param estim.rho indicates if the parameter \eqn{\rho} should be estimated or set to zero. #' @param starting (optional) starting value of \eqn{\theta = (\lambda, \beta', \gamma')'}, \eqn{\bar{\delta}}{deltabar}, \eqn{\delta = (\delta_2, ..., \delta_{\bar{R}})}{\delta = (\delta_2, ..., \delta_{Rbar})}, and \eqn{\rho}. The parameter \eqn{\gamma} should be removed if the model #' does not contain contextual effects (see details). #' @param yb0 (optional) expectation of y. #' @param optimizer is either `fastlbfgs` (L-BFGS optimization method of the package \pkg{RcppNumerical}), `nlm` (referring to the function \link[stats]{nlm}), or `optim` (referring to the function \link[stats]{optim}). #' Other arguments #' of these functions such as, `control` and `method` can be defined through the argument `opt.ctr`. #' @param npl.ctr list of controls for the NPL method (see details). #' @param opt.ctr list of arguments to be passed in `optim_lbfgs` of the package \pkg{RcppNumerical}, \link[stats]{nlm} or \link[stats]{optim} (the solver set in `optimizer`), such as `maxit`, `eps_f`, `eps_g`, `control`, `method`, ... #' @param cov a Boolean indicating if the covariance should be computed. #' @param data an optional data frame, list or environment (or object coercible by \link[base]{as.data.frame} to a data frame) containing the variables #' in the model. If not found in data, the variables are taken from \code{environment(formula)}, typically the environment from which `cdnet` is called. #' @return A list consisting of: #' \item{info}{list of general information about the model.} #' \item{estimate}{NPL estimator.} #' \item{yb}{ybar (see details), expectation of y.} #' \item{Gyb}{average of the expectation of y among friends.} #' \item{cov}{list of covariance matrices.} #' \item{details}{step-by-step output as returned by the optimizer.} #' @description #' `cdnet` is used to estimate peer effects on counting data with rational expectations (see details). The model is presented in Houndetoungan (2022). #' @details #' ## Model #' Following Houndetoungan (2022), the count data \eqn{\mathbf{y}}{y} is generated from a latent variable \eqn{\mathbf{y}^*}{ys}. #' The latent variable is given for all i as #' \deqn{y_i^* = \lambda \mathbf{g}_i \mathbf{E}(\bar{\mathbf{y}}|\mathbf{X},\mathbf{G}) + \mathbf{x}_i'\beta + \mathbf{g}_i\mathbf{X}\gamma + \epsilon_i,}{ys_i = \lambda g_i*E(ybar|X, G) + x_i'\beta + g_i*X\gamma + \epsilon_i,} #' where \eqn{\epsilon_i \sim N(0, 1)}{\epsilon_i --> N(0, 1)}.\cr #' Then, \eqn{y_i = r} iff \eqn{a_r \leq y_i^* \leq a_{r+1}}{a_r \le ys_i \le a_{r + 1}}, where #' \eqn{a_0 = -\inf}{a_0 = -Inf}, \eqn{a_1 = 0}, \eqn{a_r = \sum_{k = 1}^r\delta_k}{a_r = \delta_1 + ... + \delta_r}. #' The parameter are subject to the constraints \eqn{\delta_r \geq \lambda}{\delta_r \ge \lambda} if \eqn{1 \leq r \leq \bar{R}}{1 \le r \le Rbar}, and #' \eqn{\delta_r = (r - \bar{R})^{\rho}\bar{\delta} + \lambda}{a_r = deltabar*(r - Rbar)^{\rho} + \lambda} if \eqn{r \geq \bar{R} + 1}{r \ge Rbar + 1}. The unknown parameters to be estimated are #' \eqn{\lambda}, \eqn{\beta}, \eqn{\gamma}, \eqn{\delta_2}, ..., \eqn{\delta_{\bar{R}}}{\delta_{Rbar}}, \eqn{\bar{\delta}}{deltabar}, and \eqn{\rho}. #' ## \code{npl.ctr} #' The model parameters is estimated using the Nested Partial Likelihood (NPL) method. This approach #' starts with a guess of \eqn{\theta} and \eqn{\bar{y}}{yb} and constructs iteratively a sequence #' of \eqn{\theta} and \eqn{\bar{y}}{yb}. The solution converges when the \eqn{L_1}{L} distance #' between two consecutive \eqn{\theta} and \eqn{\bar{y}}{yb} is less than a tolerance. \cr #' The argument \code{npl.ctr} is an optional list which contain #' \describe{ #' \item{tol}{ the tolerance of the NPL algorithm (default 1e-4),} #' \item{maxit}{ the maximal number of iterations allowed (default 500),} #' \item{print}{ a boolean indicating if the estimate should be printed at each step.} #' \item{S}{ the number of simulation performed use to compute integral in the covariance by important sampling.} #' } #' @references #' Houndetoungan, E. A. (2022). Count Data Models with Social Interactions under Rational Expectations. Available at SSRN 3721250, \doi{10.2139/ssrn.3721250}. #' @seealso \code{\link{sart}}, \code{\link{sar}}, \code{\link{simcdnet}}. #' @examples #' \donttest{ #' set.seed(123) #' # Groups' size #' nvec <- rep(100, 2) #' M <- length(nvec) #' n <- sum(nvec) #' #' # Parameters #' lambda <- 0.4 #' beta <- c(1.5, 2.2, -0.9) #' gamma <- c(1.5, -1.2) #' delta <- c(1, 0.87, 0.75, 0.6) #' delbar <- 0.05 #' rho <- 0.5 #' theta <- c(lambda, beta, gamma) #' #' # X #' X <- cbind(rnorm(n, 1, 1), rexp(n, 0.4)) #' #' # Network #' Glist <- list() #' #' for (m in 1:M) { #' nm <- nvec[m] #' Gm <- matrix(0, nm, nm) #' max_d <- 30 #' for (i in 1:nm) { #' tmp <- sample((1:nm)[-i], sample(0:max_d, 1)) #' Gm[i, tmp] <- 1 #' } #' rs <- rowSums(Gm); rs[rs == 0] <- 1 #' Gm <- Gm/rs #' Glist[[m]] <- Gm #' } #' #' #' # data #' data <- data.frame(x1 = X[,1], x2 = X[,2]) #' #' ytmp <- simcdnet(formula = ~ x1 + x2 | x1 + x2, Glist = Glist, theta = theta, #' deltabar = delbar, delta = delta, rho = rho, data = data) #' #' y <- ytmp$y #' #' # plot histogram #' hist(y, breaks = max(y)) #' #' data <- data.frame(yt = y, x1 = data$x1, x2 = data$x2) #' rm(list = ls()[!(ls() %in% c("Glist", "data"))]) #' #' out <- cdnet(formula = yt ~ x1 + x2, contextual = TRUE, Glist = Glist, #' data = data, Rbar = 5, estim.rho = TRUE, optimizer = "nlm") #' summary(out)} #' @importFrom stats quantile #' @importFrom utils head #' @importFrom utils tail #' @export cdnet <- function(formula, contextual, Glist, Rbar = NULL, estim.rho = FALSE, starting = list(theta = NULL, deltabar = NULL, delta = NULL, rho = NULL), yb0 = NULL, optimizer = "fastlbfgs", npl.ctr = list(), opt.ctr = list(), cov = TRUE, data) { stopifnot(optimizer %in% c("fastlbfgs", "optim", "nlm")) env.formula <- environment(formula) # controls npl.print <- npl.ctr$print npl.tol <- npl.ctr$tol npl.maxit <- npl.ctr$maxit npl.S <- npl.ctr$S npl.incdit <- npl.ctr$incdit if (is.null(npl.print)) { npl.print <- TRUE } if (is.null(npl.tol)) { npl.tol <- 1e-4 } if (is.null(npl.maxit)) { npl.maxit <- 500L } if (is.null(npl.S)) { npl.S <- 1e3L } if (is.null(npl.incdit)) { npl.incdit<- 30L } # starting values thetat <- starting$theta Deltat <- starting$delta Deltabart <- starting$deltabar rhot <- starting$rho tmp <- c(is.null(thetat), is.null(Deltat), is.null(Deltabart)) if(estim.rho){ tmp <- c(tmp, is.null(rhot)) } if(all(tmp) != any(tmp)){ stop("All the parameters in starting should be either NULL or defined. Set delta = numeric() if Rbar = 1.") } # data if (missing(contextual)) { contextual <- FALSE } if (!is.list(Glist)) { Glist <- list(Glist) } M <- length(Glist) nvec <- unlist(lapply(Glist, nrow)) n <- sum(nvec) igr <- matrix(c(cumsum(c(0, nvec[-M])), cumsum(nvec) - 1), ncol = 2) f.t.data <- formula.to.data(formula, contextual, Glist, M, igr, data, theta0 = thetat) formula <- f.t.data$formula y <- f.t.data$y X <- f.t.data$X coln <- c("lambda", colnames(X)) maxy <- max(y) K <- ncol(X) # # simulations # Ksimu <- 0 # nsimu <- 0 # if(!is.null(simu1)) { # Ksimu <- 1 # nsimu <- ncol(simu1) # stopifnot(nrow(simu1) == n) # coln <- c(coln, "simu1") # } else{ # if(!is.null(simu2)){ # stop("simu1 = NULL whereas simu2 != NULL") # } # } # if(!is.null(simu2)) { # Ksimu <- 2 # stopifnot(nrow(simu1) == n) # coln <- c(coln, "simu2") # } # yb ybt <- rep(0, n) if (!is.null(yb0)) { ybt <- yb0 } # Gybt Gybt <- unlist(lapply(1:M, function(x) Glist[[x]] %*% ybt[(igr[x,1]:igr[x,2])+1])) # Rbar and delta if(is.null(Rbar)){ if(is.null(Deltat)){ Rbar <- max(quantile(y, 0.95), 1) } else { Rbar <- length(Deltat) + 1 } } else { if(!is.null(Deltat)){ if(Rbar != (length(Deltat) + 1)) stop("Rbar != length(delta) + 1") } } stopifnot(Rbar <= max(y)) stopifnot(Rbar >= 1) # check starting and computed them if not defined lmbd0 <- NULL if (!is.null(thetat)) { if(length(thetat) != (K + 1)) { stop("Length of theta is not suited.") } if(thetat[1] <= 0) stop("Starting lambda is not positive.") lmbd0 <- thetat[1] thetat <- c(log(thetat[1]/(1 -thetat[1])), thetat[-1]) if(Rbar > 1) if(any(Deltat < lmbd0)) stop("Multiple equilibrium issue: Starting lambda is greater than starting delta.") if(Deltabart <= 0) stop("Starting deltabar is not positive.") if(estim.rho){ if(rhot <= 0) stop("Starting rho is not positive.") } } else { Xtmp <- cbind(f.t.data$Gy, X) b <- solve(t(Xtmp)%*%Xtmp, t(Xtmp)%*%y) b <- b/sqrt(sum((y - Xtmp%*%b)^2)/n) b[1] <- max(b[1], 0.01) lmbd0 <- b[1] Deltat <- rep(b[1] + 0.01, Rbar - 1) thetat <- c(log(b[1]/(1 - b[1])), b[-1]) Deltabart <- 0.01 rhot <- 0.01 } lDelta <- log(Deltat - lmbd0) lDeltabart <- log(Deltabart) thetat <- c(thetat, lDelta, lDeltabart) if(estim.rho){ lrho <- log(rhot) thetat <- c(thetat, lrho) } # other variables cont <- TRUE t <- 0 Rmax <- NULL theta <- NULL covout <- NULL REt <- NULL llht <- 0 par0 <- NULL par1 <- NULL like <- NULL var.comp <- NULL steps <- list() # arguments ctr <- c(list(Gyb = Gybt, X = X, Rbar = Rbar, maxy = maxy, K = K, n = n, y = y), opt.ctr) # if(Ksimu == 1) { # ctr <- c(ctr, list(Simu1 = simu1, nsimu = nsimu)) # } # if(Ksimu == 2) { # ctr <- c(ctr, list(Simu1 = simu1, Simu2 = simu2, nsimu = nsimu)) # } # Arguments used in the optimizer if (optimizer == "fastlbfgs"){ ctr <- c(ctr, list(par = thetat)); if(estim.rho){optimizer <- "cdnetLBFGSrho"} else{optimizer <- "cdnetLBFGS"} par0 <- "par" par1 <- "par" like <- "value" } else if (optimizer == "optim") { # ctr <- c(ctr, list(fn = ifelse(Ksimu == 0, foptimREM_NPL, # ifelse(Ksimu == 1, foptimREM_NPLncond1, # foptimREM_NPLncond2)), par = thetat)) if(estim.rho){ ctr <- c(ctr, list(fn = foptimREM_NPL2, par = thetat)) } else{ ctr <- c(ctr, list(fn = foptimREM_NPL, par = thetat)) } par0 <- "par" par1 <- "par" like <- "value" } else { # ctr <- c(ctr, list(f = ifelse(Ksimu == 0, foptimREM_NPL, # ifelse(Ksimu == 1, foptimREM_NPLncond1, # foptimREM_NPLncond2)), p = thetat)) if(estim.rho){ ctr <- c(ctr, list(f = foptimREM_NPL2, p = thetat)) } else{ ctr <- c(ctr, list(f = foptimREM_NPL, p = thetat)) } par0 <- "p" par1 <- "estimate" like <- "minimum" } # Fonction used to compute new candidate for E(y) fL_NPLR <- NULL fcovCDIR <- NULL if(estim.rho){ fL_NPLR <- fL_NPL2 fcovCDI <- fcovCDI2 } else{ fL_NPLR <- fL_NPL fcovCDI <- fcovCDI } # NPL algorithm ninc.d <- 0 dist0 <- Inf if(npl.print) { while(cont) { # tryCatch({ ybt0 <- ybt + 0 #copy in different memory # compute theta REt <- do.call(get(optimizer), ctr) thetat <- REt[[par1]] llht <- -REt[[like]] theta <- c(1/(1 + exp(-thetat[1])), thetat[2:(K +1)], exp(thetat[-(1:(K +1))])) if(Rbar > 1){ theta[(K+2):(K+Rbar)] <- theta[(K+2):(K+Rbar)] + theta[1] } # compute y fL_NPLR(ybt, Gybt, Glist, igr, M, X, thetat, Rbar, K, n) # distance # print(theta) var.eps <- abs(c((ctr[[par0]] - thetat)/thetat, (ybt0 - ybt)/ybt)) if(all(!is.finite(var.eps))){ var.eps <- abs(c(ctr[[par0]] - thetat, ybt0 - ybt)) } dist <- max(var.eps, na.rm = TRUE) ninc.d <- (ninc.d + 1)*(dist > dist0) #counts the successive number of times distance increases dist0 <- dist cont <- (dist > npl.tol & t < (npl.maxit - 1)) t <- t + 1 REt$dist <- dist ctr[[par0]] <- thetat steps[[t]] <- REt cat("---------------\n") cat(paste0("Step : ", t), "\n") cat(paste0("Distance : ", round(dist,3)), "\n") cat(paste0("Likelihood : ", round(llht,3)), "\n") cat("Estimate:", "\n") print(theta) if((ninc.d > npl.incdit) | (llht < -1e293)) { cat("** Non-convergence ** Redefining theta and computing a new yb\n") thetat[1] <- -4.5 dist0 <- Inf if(estim.rho){ fnewyb2(ybt, Gybt, Glist, igr, M, X, thetat, Rbar, K, n, npl.tol, npl.maxit) } else{ fnewyb(ybt, Gybt, Glist, igr, M, X, thetat, Rbar, K, n, npl.tol, npl.maxit) } ctr[[par0]] <- thetat } # }, # error = function(e){ # cat("** Non-convergence ** Redefining theta and computing a new yb\n") # thetat[1] <- -4.5 # dist0 <- Inf # if(estim.rho){ # fnewyb2(ybt, Gybt, Glist, igr, M, X, thetat, Rbar, K, n, npl.tol, npl.maxit) # } else{ # fnewyb(ybt, Gybt, Glist, igr, M, X, thetat, Rbar, K, n, npl.tol, npl.maxit) # } # cont <- TRUE # t <- t + 1 # ctr[[par0]] <- thetat # steps[[t]] <- NULL # }) } } else { while(cont) { # tryCatch({ ybt0 <- ybt + 0 #copy in different memory # compute theta REt <- do.call(get(optimizer), ctr) thetat <- REt[[par1]] llht <- -REt[[like]] # compute y fL_NPLR(ybt, Gybt, Glist, igr, M, X, thetat, Rbar, K, n) # distance var.eps <- abs(c((ctr[[par0]] - thetat)/thetat, (ybt0 - ybt)/ybt)) if(all(!is.finite(var.eps))){ var.eps <- abs(c(ctr[[par0]] - thetat, ybt0 - ybt)) } dist <- max(var.eps, na.rm = TRUE) ninc.d <- (ninc.d + 1)*(dist > dist0) #counts the successive number of times distance increases dist0 <- dist cont <- (dist > npl.tol & t < (npl.maxit - 1)) t <- t + 1 REt$dist <- dist ctr[[par0]] <- thetat steps[[t]] <- REt if((ninc.d > npl.incdit) | (llht < -1e293)) { thetat[1] <- -4.5 dist0 <- Inf if(estim.rho){ fnewyb2(ybt, Gybt, Glist, igr, M, X, thetat, Rbar, K, n, npl.tol, npl.maxit) } else{ fnewyb(ybt, Gybt, Glist, igr, M, X, thetat, Rbar, K, n, npl.tol, npl.maxit) } ctr[[par0]] <- thetat } # }, # error = function(e){ # thetat[1] <- -4.5 # dist0 <- Inf # if(estim.rho){ # fnewyb2(ybt, Gybt, Glist, igr, M, X, thetat, Rbar, K, n, npl.tol, npl.maxit) # } else{ # fnewyb(ybt, Gybt, Glist, igr, M, X, thetat, Rbar, K, n, npl.tol, npl.maxit) # } # cont <- TRUE # t <- t + 1 # ctr[[par0]] <- thetat # steps[[t]] <- NULL # }) } theta <- c(1/(1 + exp(-thetat[1])), thetat[2:(K +1)], exp(thetat[-(1:(K +1))])) if(Rbar > 1){ theta[(K+2):(K+Rbar)] <- theta[(K+2):(K+Rbar)] + theta[1] } } # name theta namtheta <- coln if(Rbar > 1){ namtheta <- c(namtheta, paste0("delta", 2:(Rbar))) } namtheta <- c(namtheta, "deltabar") if(estim.rho){ namtheta <- c(namtheta, "rho") } names(theta) <- namtheta environment(formula) <- env.formula sdata <- list( "formula" = formula, "Glist" = deparse(substitute(Glist)), "nfriends" = unlist(lapply(Glist, function(u) sum(u > 0))) ) if (!missing(data)) { sdata <- c(sdata, list("data" = deparse(substitute(data)))) } if (npl.maxit == t) { warning("The maximum number of iterations of the NPL algorithm has been reached.") } # covariance and ME tmp <- fcovCDI(n, Gybt, thetat, X, Rbar, K, npl.S, Glist, igr, M, cov) meffects <- c(tmp$meffects) var.comp <- tmp$var.comp covt <- tmp$covt covm <- tmp$covm Rmax <- tmp$Rmax colnME <- coln if("(Intercept)" %in% coln) { colnME <- coln[-2] meffects <- meffects[-2] covm <- covm[-2, -2] } names(meffects) <- colnME if(!is.null(covt)) { namecovt <- coln if(Rbar > 1){ namecovt <- c(namecovt, paste0("log(delta", 2:(Rbar), ")")) } namecovt <- c(namecovt, "log(deltabar)") if(estim.rho){ namecovt <- c(namecovt, "log(rho)") } colnames(covt) <- namecovt rownames(covt) <- namecovt colnames(covm) <- colnME rownames(covm) <- colnME namecovt[1] <- "log(lambda)" rownames(var.comp$Sigma) <- namecovt rownames(var.comp$Omega) <- namecovt colnames(var.comp$Sigma) <- namecovt colnames(var.comp$Omega) <- namecovt } AIC <- 2*length(theta) - 2*llht BIC <- length(theta)*log(n) - 2*llht INFO <- list("M" = M, "n" = n, "Kz" = K, "nlinks" = unlist(lapply(Glist, function(u) sum(u > 0))), "formula" = formula, "estim.rho" = estim.rho, "Rbar" = Rbar, "Rmax" = Rmax, "log.like" = llht, "npl.iter" = t, "AIC" = AIC, "BIC" = BIC) out <- list("info" = INFO, "estimate" = list(parms = theta, marg.effects = meffects), "yb" = ybt, "Gyb" = Gybt, "cov" = list(parms = covt, marg.effects = covm, var.comp = var.comp), "details" = steps) class(out) <- "cdnet" out } #' @title Summarize Count Data Model with Social Interactions #' @description Summary and print methods for the class `cdnet` as returned by the function \link{cdnet}. #' @param object an object of class `cdnet`, output of the function \code{\link{cdnet}}. #' @param x an object of class `summary.cdnet`, output of the function \code{\link{summary.cdnet}}, #' class `summary.cdnets`, list of outputs of the function \code{\link{summary.cdnet}} #' (when the model is estimated many times to control for the endogeneity) #' or class `cdnet` of the function \code{\link{cdnet}}. #' @param Glist adjacency matrix or list sub-adjacency matrix. This is not necessary if the covariance method was computed in \link{cdnet}. #' @param data a `dataframe` containing the explanatory variables. This is not necessary if the covariance method was computed in \link{cdnet}. #' @param S number of simulation to be used to compute integral in the covariance by important sampling. #' @param ... further arguments passed to or from other methods. #' @return A list of the same objects in `object`. #' @export "summary.cdnet" <- function(object, Glist, data, S = 1e3L, ...) { stopifnot(class(object) == "cdnet") out <- c(object, list("..." = ...)) if(is.null(object$cov$parms)){ env.formula <- environment(object$info$formula) Rbar <- object$info$Rbar Kz <- object$info$Kz estim.rho <- object$info$estim.rho parms <- object$estimate$parms thetat <- c(log(parms[1]/(1 -parms[1])), parms[-1]) if(Rbar > 1){ thetat[(2 + Kz):(Kz + Rbar)] <- thetat[(2 + Kz):(Kz + Rbar)] - parms[1] } thetat[(2 + Kz):(Kz + Rbar + 1 + estim.rho)] <- log(tail(thetat, Rbar + estim.rho)) if(estim.rho){ fcovCDI <- fcovCDI2 } else{ fcovCDI <- fcovCDI } Gybt <- object$Gyb npl.S <- S if (is.null(npl.S)) { npl.S <- 1e3L } contextual <- FALSE formula <- object$info$formula if (!is.list(Glist)) { Glist <- list(Glist) } M <- length(Glist) nvec <- unlist(lapply(Glist, nrow)) n <- sum(nvec) igr <- matrix(c(cumsum(c(0, nvec[-M])), cumsum(nvec) - 1), ncol = 2) f.t.data <- formula.to.data(formula, contextual, Glist, M, igr, data, theta0 = thetat) formula <- f.t.data$formula y <- f.t.data$y X <- f.t.data$X K <- ncol(X) coln <- c("lambda", colnames(X)) tmp <- fcovCDI(n, Gybt, thetat, X, Rbar, K, npl.S, Glist, igr, M, TRUE) meffects <- c(tmp$meffects) var.comp <- tmp$var.comp covt <- tmp$covt covm <- tmp$covm Rmax <- tmp$Rmax colnME <- coln if("(Intercept)" %in% coln) { colnME <- coln[-2] meffects <- meffects[-2] covm <- covm[-2, -2] } names(meffects) <- colnME if(!is.null(covt)) { namecovt <- coln if(Rbar > 1){ namecovt <- c(namecovt, paste0("log(delta", 2:(Rbar), ")")) } namecovt <- c(namecovt, "log(deltabar)") if(estim.rho){ namecovt <- c(namecovt, "log(rho)") } colnames(covt) <- namecovt rownames(covt) <- namecovt colnames(covm) <- colnME rownames(covm) <- colnME namecovt[1] <- "log(lambda)" rownames(var.comp$Sigma) <- namecovt rownames(var.comp$Omega) <- namecovt colnames(var.comp$Sigma) <- namecovt colnames(var.comp$Omega) <- namecovt } out$cov <- list(parms = covt, marg.effects = covm, var.comp = var.comp) } class(out) <- "summary.cdnet" out } #' @rdname summary.cdnet #' @importFrom stats pchisq #' @export "print.summary.cdnet" <- function(x, ...) { stopifnot(class(x) == "summary.cdnet") M <- x$info$M n <- x$info$n iteration <- x$info$npl.iter Rbar <- x$info$Rbar formula <- x$info$formula Kz <- x$info$Kz estim.rho <- x$info$estim.rho AIC <- x$info$AIC BIC <- x$info$BIC parms <- x$estimate$parms coef <- parms[1:(1 + Kz)] K <- length(coef) meff <- x$estimate$marg.effects std <- sqrt(head(diag(x$cov$parms), K)) std.meff <- sqrt(diag(x$cov$marg.effects)) delta <- tail(parms, Rbar + estim.rho) llh <- x$info$log.like tmp <- fcoefficients(coef, std) out_print <- tmp$out_print out <- tmp$out out_print <- c(list(out_print), x[-(1:6)], list(...)) tmp.meff <- fcoefficients(meff, std.meff) out_print.meff <- tmp.meff$out_print out.meff <- tmp.meff$out out_print.meff <- c(list(out_print.meff), x[-(1:6)], list(...)) nfr <- x$info$nlinks cat("Count data Model with Social Interactions\n\n") cat("Call:\n") print(formula) cat("\nMethod: Nested pseudo-likelihood (NPL) \nIteration: ", iteration, sep = "", "\n\n") cat("Network:\n") cat("Number of groups : ", M, sep = "", "\n") cat("Sample size : ", n, sep = "", "\n") cat("Average number of friends: ", sum(nfr)/n, sep = "", "\n\n") cat("Coefficients:\n") do.call("print", out_print) cat("\nMarginal Effects:\n") do.call("print", out_print.meff) cat("---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n") cat("Rbar: ", Rbar, sep = "", "\n") if(Rbar > 1){ cat("delta:", delta[1:(Rbar - 1)], sep = " ", "\n") } cat("deltabar: ", delta[Rbar], sep = "") if(estim.rho){ cat(" -- rho: ", delta[Rbar+1], sep = "", "\n") cat("Wald test H0: rho = 0, Prob = ", 1 - pchisq(1/x$cov$parms[Kz+Rbar+2,Kz+Rbar+2], df = 1), sep = "", "\n") } else{ cat("\n") } cat("log pseudo-likelihood: ", llh, sep = "", "\n") cat("AIC: ", AIC, " -- BIC: ", BIC, sep = "", "\n") invisible(x) } #' @rdname summary.cdnet #' @export "print.cdnet" <- function(x, ...) { stopifnot(class(x) == "cdnet") print(summary(x, ...)) } #' @rdname summary.cdnet #' @importFrom stats cov #' @export "summary.cdnets" <- function(object, ...) { stopifnot(class(object) %in% c("list", "cdnets", "summary.cdnets")) lclass <- unique(unlist(lapply(object, class))) if (!all(lclass %in%c("summary.cdnet"))) { stop("All the components in `object` should be from `summary.cdnet` class") } nsim <- length(object) coef <- do.call("rbind", lapply(object, function(z) t(z$estimate$parms))) meff <- do.call("rbind", lapply(object, function(z) t(z$estimate$marg.effects))) estimate <- colSums(coef)/nsim meffects <- colSums(meff)/nsim vcoef2 <- Reduce("+", lapply(object, function(z) z$cov$parms))/nsim vmeff2 <- Reduce("+", lapply(object, function(z) z$cov$marg.effects))/nsim vcoef1 <- cov(coef) vmeff1 <- cov(meff) vcoef <- vcoef1 + vcoef2 vmeff <- vmeff1 + vmeff2 llh <- unlist(lapply(object, function(z) z$info$log.like)) llh <- c("min" = min(llh), "mean" = mean(llh), "max" = max(llh)) M <- object[[1]]$info$M n <- object[[1]]$info$n Rbar <- object[[1]]$info$Rbar Kz <- object[[1]]$info$Kz estim.rho <- object[[1]]$info$estim.rho INFO <- list("M" = M, "n" = n, "Kz" = Kz, "estim.rho" = estim.rho, "log.like" = llh, "Rbar" = Rbar, "simulation" = nsim) out <- list("info" = INFO, "estimate" = list(parms = coef, marg.effects = meffects), "cov" = list(parms = vcoef, marg.effects = vmeff), ... = ...) class(out) <- "summary.cdnets" out } #' @rdname summary.cdnet #' @importFrom stats cov #' @export "print.summary.cdnets" <- function(x, ...) { stopifnot(class(x) %in% c("summary.cdnets")) nsim <- x$info$simulation Kz <- x$info$Kz estim.rho <- x$info$estim.rho parms <- x$estimate$parms coef <- parms[1:(1 + Kz)] K <- length(coef) meff <- x$estimate$marg.effects std <- sqrt(head(diag(x$cov$parms), K)) std.meff <- sqrt(diag(x$cov$marg.effects)) delta <- tail(parms, Rbar + estim.rho) vcoef <- x$cov$parms vmeff <- x$cov$marg.effects llh <- x$info$log.like M <- x$info$M n <- x$info$n Rbar <- x$info$Rbar std <- sqrt(head(diag(vcoef), K)) std.meff <- sqrt(diag(vmeff)) tmp <- fcoefficients(coef, std) out_print <- tmp$out_print out <- tmp$out tmp.meff <- fcoefficients(meff, std.meff) out_print.meff <- tmp.meff$out_print out.meff <- tmp.meff$out out_print <- c(list(out_print), x[-c(1:3)], list(...)) out_print.meff <- c(list(out_print.meff), x[-c(1:3)], list(...)) cat("Count data Model with Social Interactions\n\n") cat("Method: Replication of Nested pseudo-likelihood (NPL) \nReplication: ", nsim, "\n\n") cat("Coefficients:\n") do.call("print", out_print) cat("\nMarginal Effects:\n") do.call("print", out_print.meff) cat("---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n") cat("Rbar: ", Rbar, "\n") if(Rbar > 1){ cat("delta: ", delta[1:(Rbar - 1)], "\n") } cat("deltabar: ", delta[Rbar]) if(estim.rho){ cat(" -- rho: ", delta[Rbar+1], "\n") cat("Wald test H0: rho = 0, Prob = ", 1 - pchisq(1/x$cov$parms[Kz+Rbar+2,Kz+Rbar+2], df = 1), "\n") } else{ cat("\n") } cat("log pseudo-likelihood: ", "\n") print(llh) invisible(x) } #' @rdname summary.cdnet #' @importFrom stats cov #' @export "print.cdnets" <- function(x, ...) { stopifnot(class(x) %in% c("cdnets", "list")) print(summary.cdnets(x, ...)) }
/scratch/gouwar.j/cran-all/cranData/CDatanet/R/cdnet.R
fcoefficients <- function(coef, std) { cnames <- names(coef) tval <- coef/std pval <- 2*(1 - pnorm(abs(tval))) pval_print <- unlist(lapply(pval, function(u){ ifelse(u < 2e-16, "<2e-16", format(u, digit = 3)) })) refprob <- c(0.001, 0.01, 0.05, 0.1) refstr <- c("***", "**", "*", ".", "") str <- unlist(lapply(pval, function(u) refstr[1 + sum(u > refprob)])) out_print <- data.frame("X1" = round(coef, 6), "X2" = round(std, 6), "X3" = round(tval, 2), "X4" = pval_print, "X5" = str) out <- data.frame("X1" = coef, "X2" = std, "X3" = tval, "X4" = pval) rownames(out_print) <- cnames colnames(out_print) <- c("Estimate", "Std. Error", "t value", "Pr(>|t|)", "") rownames(out) <- cnames colnames(out) <- c("Estimate", "Std. Error", "t value", "Pr(>|t|)") list(out_print = out_print, out = out) }
/scratch/gouwar.j/cran-all/cranData/CDatanet/R/fcoefficients.R
#' @importFrom Formula as.Formula #' @importFrom formula.tools env #' @importFrom stats model.frame #' @importFrom stats terms #' @importFrom stats update #' @importFrom stats model.response #' @importFrom stats model.matrix #' @importFrom stats delete.response #' @importFrom stats as.formula #' @importFrom Matrix rankMatrix #' @importFrom ddpcr quiet formula.to.data <- function(formula, contextual, Glist, M, igr, data, type = "model", theta0 = NULL, fixed.effects = FALSE) { ## Extract data from the formula if (missing(data)) { data <- env(formula) } formula <- as.Formula(formula) if (type == "model") { stopifnot(length(formula)[1] == 1L, length(formula)[2] %in% 1:2) } else { stopifnot(length(formula)[1] == 0L, length(formula)[2] %in% 1:2) } # try to handle dots in formula has_dot <- function(formula) inherits(try(terms(formula), silent = TRUE), "try-error") if(has_dot(formula)) { f1 <- formula(formula, rhs = 1) f2 <- formula(formula, lhs = 0, rhs = 2) if(!has_dot(f1) & has_dot(f2)) { formula <- as.Formula(f1, update(formula(formula, lhs = 0, rhs = 1), f2)) } } ## call model.frame() mf <- model.frame(formula, data = data) ## extract response, terms, model matrices y <- model.response(mf, "numeric") mtXone <- terms(formula, data = data, rhs = 1) Xone <- model.matrix(mtXone, mf) ## X before pipe cnames <- colnames(Xone) mtX <- NULL X <- NULL if(length(formula)[2] >= 2L) { ## X after pipe mtX <- delete.response(terms(formula, data = data, rhs = 2)) X <- model.matrix(mtX, mf) } if (contextual) { if (!is.null(X)) { stop("contextual cannot be TRUE while contextual variable are declared after the pipe.") } X <- Xone tmpx <- as.character.default(formula(formula, lhs = 1, rhs = 1)) formula <- Formula::as.Formula(paste(c(tmpx[c(2, 1, 3)], "|", tmpx[3]), collapse = " ")) } cnames.x <- colnames(X) intercept <- "(Intercept)" %in% cnames.x if (intercept) { X <- X[,-1, drop = FALSE] } if(("(Intercept)" %in% cnames) & fixed.effects){ Xone <- Xone[, -1, drop = FALSE] cnames <- cnames[-1] } # GX and Gy Gy <- NULL if (is.null(theta0)) { if(!is.null(X)) { GXlist <- list() Gylist <- list() for (m in 1:M) { n1 <- igr[m,1] + 1 n2 <- igr[m,2] + 1 GXlist[[m]] <- Glist[[m]] %*% X[n1:n2,] Gylist[[m]] <- Glist[[m]] %*% y[n1:n2] if(fixed.effects){ y[n1:n2] <- y[n1:n2] - mean(y[n1:n2]) Gylist[[m]] <- Gylist[[m]] - mean(Gylist[[m]]) GXlist[[m]] <- apply(GXlist[[m]], 2, function(x) x - mean(x)) Xone[n1:n2,] <- apply(Xone[n1:n2, ,drop = FALSE], 2, function(x) x - mean(x)) } } GX <- do.call("rbind", GXlist) Gy <- unlist(Gylist) Xone <- cbind(Xone, GX) cnames <- c(cnames, paste0("G: ", colnames(X))) } else { Gylist <- list() for (m in 1:M) { n1 <- igr[m,1] + 1 n2 <- igr[m,2] + 1 Gylist[[m]] <- Glist[[m]] %*% y[n1:n2] if(fixed.effects){ y[n1:n2] <- y[n1:n2] - mean(y[n1:n2]) Gylist[[m]] <- Gylist[[m]] - mean(Gylist[[m]]) Xone[n1:n2,] <- apply(Xone[n1:n2, ,drop = FALSE], 2, function(x) x - mean(x)) } } Gy <- unlist(Gylist) } } else { if(!is.null(X)) { GXlist <- list() for (m in 1:M) { n1 <- igr[m,1] + 1 n2 <- igr[m,2] + 1 GXlist[[m]] <- Glist[[m]] %*% X[n1:n2,] } GX <- do.call("rbind", GXlist) Xone <- cbind(Xone, GX) cnames <- c(cnames, paste0("G: ", colnames(X))) } } if(type != "network") { if(rankMatrix(Xone)[1] != ncol(Xone)) { stop("X or [X, GX] is not a full rank matrix. May be there is an intercept in X and in GX.") } } else { if(rankMatrix(Xone)[1] != ncol(Xone)) { stop("X is not a full rank matrix. May be there is an intercept in X and you add intercept in the formula or fixed effects.") } } colnames(Xone) <- cnames list("formula" = formula, "X" = Xone, "y" = y, "Gy" = Gy) }
/scratch/gouwar.j/cran-all/cranData/CDatanet/R/formula.to.data.R
#' @title Convert data between directed network models and symmetric network models. #' @param data is the `matrix` or `data.frame` of the explanatory variables of the network formation model. This #' corresponds to the \code{X} matrix in \code{\link{homophily.fe}} or in \code{\link{homophily.re}}. #' @param nvec is a vector of the number of individuals in the networks. #' @param to indicates the direction of the conversion. For a matrix of explanatory variable `X` (`n*(n-1)` rows), one can #' can select lower triangular entries (`to = "lower"`) or upper triangular entries (`to = "upper`). #' For a triangular `X` (`n*(n-1)/2` rows), one can convert to a full matrix of `n*(n-1)` rows by using symmetry (`to = "symmetric"`). #' @description #' `homophili.data` is used to convert the matrix of explanatory variables between directed network models and symmetric network models. #' @return the transformed `data.frame`. #' @export homophili.data <- function(data, nvec, to = c("lower", "upper", "symmetric")){ to <- tolower(to[1]) stopifnot(to %in% c("lower", "upper", "symmetric")) M <- length(nvec) n <- sum(nvec) tmp1 <- NULL if(to == "symmetric"){ stopifnot(nrow(data) == sum(nvec*(nvec- 1)/2)) tmp1 <- cumsum(unlist(lapply(nvec, function(x) (x - 1):0))) - 1 } else { stopifnot(nrow(data) == sum(nvec*(nvec- 1))) tmp1 <- cumsum(unlist(lapply(nvec, function(x) rep(x - 1, x)))) - 1 } tmp2 <- c(0, tmp1[-n] + 1) index <- cbind(tmp2, tmp1) rm(list = c("tmp1", "tmp2")) indexgr <- matrix(c(cumsum(c(0, nvec[-M])), cumsum(nvec) - 1), ncol = 2) out <- NULL if(to == "symmetric"){ out <- hdata2S(as.matrix(data), nvec, index, indexgr, M) } if(to == "lower"){ out <- hdataF2L(as.matrix(data), nvec, index, M) } if(to == "upper"){ out <- hdataF2U(as.matrix(data), nvec, index, indexgr, M) } colnames(out) <- colnames(data) as.data.frame(out) }
/scratch/gouwar.j/cran-all/cranData/CDatanet/R/homophilidata.R
#' @title Estimate Network Formation Model with Degree Heterogeneity as Fixed Effects #' @param network matrix or list of sub-matrix of social interactions containing 0 and 1, where links are represented by 1 #' @param formula an object of class \link[stats]{formula}: a symbolic description of the model. The `formula` should be as for example \code{~ x1 + x2} #' where `x1`, `x2` are explanatory variable of links formation. If missing, the model is estimated with fixed effects only. #' @param data an optional data frame, list or environment (or object coercible by \link[base]{as.data.frame} to a data frame) containing the variables #' in the model. If not found in data, the variables are taken from \code{environment(formula)}, typically the environment from which `homophily` is called. #' @param symmetry indicates whether the network model is symmetric (see details). #' @param fe.way indicates whether it is a one-way or two-way fixed effect model. The expected value is 1 or 2 (see details). #' @param init (optional) either a list of starting values containing `beta`, an K-dimensional vector of the explanatory variables parameter, #' `mu` an n-dimensional vector, and `nu` an n-dimensional vector, #' where K is the number of explanatory variables and n is the number of individuals; or a vector of starting value for `c(beta, mu, nu)`. #' @param opt.ctr (optional) is a list of `maxit`, `eps_f`, and `eps_g`, which are control parameters used by the solver `optim_lbfgs`, of the package \pkg{RcppNumerical}. #' @param print Boolean indicating if the estimation progression should be printed. #' @description #' `homophily.fe` implements a Logit estimator for network formation model with homophily. The model includes degree heterogeneity as fixed effects (see details). #' @details #' Let \eqn{p_{ij}}{Pij} be a probability for a link to go from the individual \eqn{i} to the individual \eqn{j}. #' This probability is specified for two-way effect models (`fe.way = 2`) as #' \deqn{p_{ij} = F(\mathbf{x}_{ij}'\beta + \mu_j + \nu_j)}{Pij = F(Xij'*\beta + \mu_i + \nu_j),} #' where \eqn{F} is the cumulative of the standard logistic distribution. Unobserved degree heterogeneity is captured by #' \eqn{\mu_i} and \eqn{\nu_j}. The latter are treated as fixed effects (see \code{\link{homophily.re}} for random effect models). #' As shown by Yan et al. (2019), the estimator of #' the parameter \eqn{\beta} is biased. A bias correction is then necessary and is not implemented in this version. However #' the estimator of \eqn{\mu_i} and \eqn{\nu_j} are consistent.\cr #' For one-way fixed effect models (`fe.way = 1`), \eqn{\nu_j = \mu_j}. For symmetric models, the network is not directed and the #' fixed effects need to be one way. #' @seealso \code{\link{homophily.re}}. #' @references #' Yan, T., Jiang, B., Fienberg, S. E., & Leng, C. (2019). Statistical inference in a directed network model with covariates. \emph{Journal of the American Statistical Association}, 114(526), 857-868, \doi{https://doi.org/10.1080/01621459.2018.1448829}. #' @return A list consisting of: #' \item{model.info}{list of model information, such as the type of fixed effects, whether the model is symmetric, #' number of observations, etc.} #' \item{estimate}{maximizer of the log-likelihood.} #' \item{loglike}{maximized log-likelihood.} #' \item{optim}{returned value of the optimization solver, which contains details of the optimization. The solver used is `optim_lbfgs` of the #' package \pkg{RcppNumerical}.} #' \item{init}{returned list of starting value.} #' \item{loglike(init)}{log-likelihood at the starting value.} #' @importFrom stats glm #' @importFrom stats binomial #' @importFrom matrixcalc is.symmetric.matrix #' @examples #' \donttest{ #' set.seed(1234) #' M <- 2 # Number of sub-groups #' nvec <- round(runif(M, 20, 50)) #' beta <- c(.1, -.1) #' Glist <- list() #' dX <- matrix(0, 0, 2) #' mu <- list() #' nu <- list() #' Emunu <- runif(M, -1.5, 0) #expectation of mu + nu #' smu2 <- 0.2 #' snu2 <- 0.2 #' for (m in 1:M) { #' n <- nvec[m] #' mum <- rnorm(n, 0.7*Emunu[m], smu2) #' num <- rnorm(n, 0.3*Emunu[m], snu2) #' X1 <- rnorm(n, 0, 1) #' X2 <- rbinom(n, 1, 0.2) #' Z1 <- matrix(0, n, n) #' Z2 <- matrix(0, n, n) #' #' for (i in 1:n) { #' for (j in 1:n) { #' Z1[i, j] <- abs(X1[i] - X1[j]) #' Z2[i, j] <- 1*(X2[i] == X2[j]) #' } #' } #' #' Gm <- 1*((Z1*beta[1] + Z2*beta[2] + #' kronecker(mum, t(num), "+") + rlogis(n^2)) > 0) #' diag(Gm) <- 0 #' diag(Z1) <- NA #' diag(Z2) <- NA #' Z1 <- Z1[!is.na(Z1)] #' Z2 <- Z2[!is.na(Z2)] #' #' dX <- rbind(dX, cbind(Z1, Z2)) #' Glist[[m]] <- Gm #' mu[[m]] <- mum #' nu[[m]] <- num #' } #' #' mu <- unlist(mu) #' nu <- unlist(nu) #' #' out <- homophily.fe(network = Glist, formula = ~ -1 + dX, fe.way = 2) #' muhat <- out$estimate$mu #' nuhat <- out$estimate$nu #' plot(mu, muhat) #' plot(nu, nuhat) #' } #' @export homophily.fe <- function(network, formula, data, symmetry = FALSE, fe.way = 1, init = NULL, opt.ctr = list(maxit = 1e4, eps_f = 1e-9, eps_g = 1e-9), print = TRUE){ t1 <- Sys.time() fe.way <- as.numeric(fe.way[1]) if(symmetry & fe.way == 2) stop("Two side fixed effects are not allowed for symmetric network models.") stopifnot(fe.way %in% (1:2)) stopifnot(is.null(init) || is.vector(init) || is.list(init)) # Data and dimensions if (!is.list(network)) { network <- list(network) } M <- length(network) nvec <- unlist(lapply(network, nrow)) n <- sum(nvec) Nvec <- NULL if(symmetry){ Nvec <- nvec*(nvec- 1)/2 stopifnot(sapply(network, is.symmetric.matrix)) network <- frMtoVbyCOLsym(network, nvec, M) } else { Nvec <- nvec*(nvec- 1) # network <- unlist(lapply(network, function(x){diag(x) = NA; x})) # network <- network[!is.na(network)] network <- frMtoVbyCOL(network, nvec, M) } N <- sum(Nvec) quiet(gc()) if (sum(!((network == 0) | (network == 1))) != 0) { stop("Network should contain only 0 and 1.") } tmp1 <- NULL if(symmetry){ tmp1 <- cumsum(unlist(lapply(nvec, function(x) (x - 1):0))) - 1 } else { tmp1 <- cumsum(unlist(lapply(nvec, function(x) rep(x - 1, x)))) - 1 } tmp2 <- c(0, tmp1[-n] + 1) index <- cbind(tmp2, tmp1) rm(list = c("tmp1", "tmp2")) quiet(gc()) indexgr <- matrix(c(cumsum(c(0, nvec[-M])), cumsum(nvec) - 1), ncol = 2) #start group, end group # INDEXgr <- matrix(c(cumsum(c(0, Nvec[-M])), cumsum(Nvec) - 1), ncol = 2) # Formula to data dX <- matrix(0, 0, 0) hasX <- FALSE if(!missing(formula)){ f.t.data <- formula.to.data(formula, FALSE, NULL, NULL, NULL, data, type = "network", theta0 = NA) if(!missing(data)) { rm("data") quiet(gc()) } formula <- f.t.data$formula dX <- f.t.data$X if(nrow(dX) != N) stop("The number of observations in X does not match the network.") rm("f.t.data") quiet(gc()) hasX <- TRUE } coln <- colnames(dX) if("(Intercept)" %in% coln){stop("Fixed effect model cannot include intercept.")} K <- length(coln) nlinks <- sum(network) out <- list() if(symmetry){ out <- homophily.LogitFESym(network, M, nvec, n, N, Nvec, index, indexgr, formula, dX, coln, K, init, nlinks, opt.ctr, hasX, print) } else { out <- homophily.LogitFE(network, fe.way, M, nvec, n, N, Nvec, index, indexgr, formula, dX, coln, K, init, nlinks, opt.ctr, hasX, print) } t2 <- Sys.time() timer <- as.numeric(difftime(t2, t1, units = "secs")) if(print) { cat("\n\n") cat("The program successfully executed \n") cat("\n") cat("********SUMMARY******** \n") cat("n.obs : ", N, "\n") cat("n.links : ", nlinks, "\n") cat("K : ", K, "\n") # Print the processing time nhours <- floor(timer/3600) nminutes <- floor((timer-3600*nhours)/60)%%60 nseconds <- timer-3600*nhours-60*nminutes cat("Elapsed time : ", nhours, " HH ", nminutes, " mm ", round(nseconds), " ss \n \n") } out } homophily.LogitFE <- function(network, fe.way, M, nvec, n, N, Nvec, index, indexgr, formula, dX, coln, K, init, nlinks, opt.ctr, hasX, print){ maxit <- opt.ctr$maxit eps_f <- opt.ctr$eps_f eps_g <- opt.ctr$eps_g if(is.null(maxit)){ maxit <- 500 } if(is.null(eps_f)){ eps_f <- 1e-6 } if(is.null(eps_g)){ eps_g <- 1e-5 } #starting value initllh <- NULL quiet(gc()) if(is.null(init)){ if(print) cat("starting point searching\n") beta <- NULL mu <- NULL mylogit <- NULL if(hasX){ mylogit <- glm(network ~ 1 + dX, family = binomial(link = "logit")) } else { mylogit <- glm(network ~ 1, family = binomial(link = "logit")) } beta <- mylogit$coefficients[-1] mu <- rep(mylogit$coefficients[1], n) names(mu) <- NULL nu <- NULL if(fe.way == 2){ nu <- rep(0, n - M) } init <- c(beta, mu, nu) initllh <- -0.5*mylogit$deviance } else { if(is.list(init)){ beta <- c(init$beta) mu <- c(init$mu) nu <- c(init$nu) if((is.null(beta) || is.null(mu)) & hasX){ if(print) cat("starting point searching\n") mylogit <- glm(network ~ 1 + dX, family = binomial(link = "logit")) initllh <- -0.5*mylogit$deviance if(is.null(mu)){ mu <- rep(mylogit$coefficients[1], n); names(mu) <- NULL } if(is.null(beta)){ beta <- mylogit$coefficients[-1] } } if((is.null(beta) || is.null(mu)) & !hasX){ if(print) cat("starting point searching\n") mylogit <- glm(network ~ 1, family = binomial(link = "logit")) initllh <- -0.5*mylogit$deviance if(is.null(mu)){ mu <- rep(mylogit$coefficients[1], n); names(mu) <- NULL } } if(is.null(nu) & (fe.way == 2)){ nu <- rep(0, n - M) } stopifnot(length(beta) == K) stopifnot(length(mu) == n) if(fe.way == 2){ stopifnot(length(nu) == (n - M)) } init <- c(beta, mu, nu) } else if(is.vector(init)){ if(fe.way == 2){ stopifnot(length(init) == (K + 2*n - M)) } else { stopifnot(length(init) == (K + n)) } } } quiet(gc()) theta <- init estim <- NULL quiet(gc()) if(print) { cat("maximizer searching\n") } estim <- NULL if(fe.way == 2){ estim <- fhomobeta2f(theta, c(network), dX, nvec, index, indexgr, M, maxit, eps_f, eps_g, hasX, print) } else { estim <- fhomobeta1f(theta, c(network), dX, nvec, index, indexgr, M, maxit, eps_f, eps_g, hasX, print) } # export degree theta <- c(estim$estimate) names(theta) <- names(init) beta <- head(theta, K) if(hasX){ names(beta) <- coln } mu <- theta[(K + 1):(K + n)] nu <- NULL if(fe.way == 2){ nu <- tail(theta, n - M) nu <- unlist(lapply(1:M, function(x) c(nu[(indexgr[x, 1] + 2 - x):(indexgr[x, 2] + 1 - x)], 0))) } estim$estimate <- c(estim$estimate) estim$gradient <- c(estim$gradient) out <- list("model.info" = list("model" = "logit", "sym.network" = FALSE, "fe.way" = fe.way, "n" = nvec, "n.obs" = N, "n.links" = nlinks, "K" = K), "estimate" = list(beta = beta, mu = mu, nu = nu), "loglike" = -estim$value, "optim" = estim, "init" = init, "loglike(init)" = initllh) class(out) <- "homophily.fe" out } homophily.LogitFESym <- function(network, M, nvec, n, N, Nvec, index, indexgr, formula, dX, coln, K, init, nlinks, opt.ctr, hasX, print){ maxit <- opt.ctr$maxit eps_f <- opt.ctr$eps_f eps_g <- opt.ctr$eps_g if(is.null(maxit)){ maxit <- 500 } if(is.null(eps_f)){ eps_f <- 1e-6 } if(is.null(eps_g)){ eps_g <- 1e-5 } #starting value initllh <- NULL quiet(gc()) if(is.null(init)){ if(print) cat("starting point searching\n") beta <- NULL mu <- NULL mylogit <- NULL if(hasX){ mylogit <- glm(network ~ 1 + dX, family = binomial(link = "logit")) } else { mylogit <- glm(network ~ 1, family = binomial(link = "logit")) } beta <- mylogit$coefficients[-1] mu <- rep(mylogit$coefficients[1], n) names(mu) <- NULL init <- c(beta, mu) initllh <- -0.5*mylogit$deviance } else { if(is.list(init)){ beta <- c(init$beta) mu <- c(init$mu) if((is.null(beta) || is.null(mu)) & hasX){ if(print) cat("starting point searching\n") mylogit <- glm(network ~ 1 + dX, family = binomial(link = "logit")) initllh <- -0.5*mylogit$deviance if(is.null(mu)){ mu <- rep(mylogit$coefficients[1], n); names(mu) <- NULL } if(is.null(beta)){ beta <- mylogit$coefficients[-1] } } if((is.null(beta) || is.null(mu)) & !hasX){ if(print) cat("starting point searching\n") mylogit <- glm(network ~ 1, family = binomial(link = "logit")) initllh <- -0.5*mylogit$deviance if(is.null(mu)){ mu <- rep(mylogit$coefficients[1], n); names(mu) <- NULL } } stopifnot(length(beta) == K) stopifnot(length(mu) == n) init <- c(beta, mu) } else if(is.vector(init)){ stopifnot(length(init) == (K + n)) } } quiet(gc()) theta <- init estim <- NULL quiet(gc()) if(print) { cat("maximizer searching\n") } estim <- fhomobetasym(theta, c(network), dX, nvec, index, indexgr, M, maxit, eps_f, eps_g, hasX, print) # export degree theta <- c(estim$estimate) names(theta) <- names(init) beta <- head(theta, K) if(hasX){ names(beta) <- coln } mu <- tail(theta, n) estim$estimate <- c(estim$estimate) estim$gradient <- c(estim$gradient) out <- list("model.info" = list("model" = "logit", "sym.network" = TRUE, "n" = nvec, "n.obs" = N, "n.links" = nlinks, "K" = K), "estimate" = list(beta = beta, mu = mu), "loglike" = -estim$value, "optim" = estim, "init" = init, "loglike(init)" = initllh) class(out) <- "homophily.fe" out }
/scratch/gouwar.j/cran-all/cranData/CDatanet/R/homophily.fe.R
#' @title Estimate Network Formation Model with Degree Heterogeneity as Random Effects #' @param network matrix or list of sub-matrix of social interactions containing 0 and 1, where links are represented by 1. #' @param formula an object of class \link[stats]{formula}: a symbolic description of the model. The `formula` should be as for example \code{~ x1 + x2} #' where `x1`, `x2` are explanatory variable of links formation. #' @param data an optional data frame, list or environment (or object coercible by \link[base]{as.data.frame} to a data frame) containing the variables #' in the model. If not found in data, the variables are taken from \code{environment(formula)}, typically the environment from which `homophily` is called. #' @param symmetry indicates whether the network model is symmetric (see details). #' @param group.fe indicates whether the model includes group fixed effects. #' @param re.way indicates whether it is a one-way or two-way fixed effect model. The expected value is 1 or 2 (see details). #' @param init (optional) list of starting values containing `beta`, an K-dimensional vector of the explanatory variables parameter, #' `mu` an n-dimensional vector, and `nu` an n-dimensional vector, `smu2` the variance of `mu`, #' and `snu2` the variance of `nu`, #' where K is the number of explanatory variables and n is the number of individuals. #' @param iteration the number of iterations to be performed. #' @param print boolean indicating if the estimation progression should be printed. #' @return A list consisting of: #' \item{model.info}{list of model information, such as the type of random effects, whether the model is symmetric, #' number of observations, etc.} #' \item{posterior}{list of simulations from the posterior distribution.} #' \item{init}{returned list of starting values.} #' @description #' `homophily.re` implements a Bayesian Probit estimator for network formation model with homophily. The model includes degree heterogeneity as random effects (see details). #' @details #' Let \eqn{p_{ij}}{Pij} be a probability for a link to go from the individual \eqn{i} to the individual \eqn{j}. #' This probability is specified for two-way effect models (`fe.way = 2`) as #' \deqn{p_{ij} = F(\mathbf{x}_{ij}'\beta + \mu_j + \nu_j)}{Pij = F(Xij'*\beta + \mu_i + \nu_j),} #' where \eqn{F} is the cumulative of the standard normal distribution. Unobserved degree heterogeneity is captured by #' \eqn{\mu_i} and \eqn{\nu_j}. The latter are treated as random effects (see \code{\link{homophily.fe}} for fixed effect models).\cr #' For one-way random effect models (`fe.way = 1`), \eqn{\nu_j = \mu_j}. For symmetric models, the network is not directed and the #' random effects need to be one way. #' @seealso \code{\link{homophily.fe}}. #' @importFrom ddpcr quiet #' @importFrom stats lm #' @importFrom stats var #' @importFrom stats cov #' @examples #' \donttest{ #' set.seed(1234) #' library(MASS) #' M <- 4 # Number of sub-groups #' nvec <- round(runif(M, 100, 500)) #' beta <- c(.1, -.1) #' Glist <- list() #' dX <- matrix(0, 0, 2) #' mu <- list() #' nu <- list() #' cst <- runif(M, -1.5, 0) #' smu2 <- 0.2 #' snu2 <- 0.2 #' rho <- 0.8 #' Smunu <- matrix(c(smu2, rho*sqrt(smu2*snu2), rho*sqrt(smu2*snu2), snu2), 2) #' for (m in 1:M) { #' n <- nvec[m] #' tmp <- mvrnorm(n, c(0, 0), Smunu) #' mum <- tmp[,1] - mean(tmp[,1]) #' num <- tmp[,2] - mean(tmp[,2]) #' X1 <- rnorm(n, 0, 1) #' X2 <- rbinom(n, 1, 0.2) #' Z1 <- matrix(0, n, n) #' Z2 <- matrix(0, n, n) #' #' for (i in 1:n) { #' for (j in 1:n) { #' Z1[i, j] <- abs(X1[i] - X1[j]) #' Z2[i, j] <- 1*(X2[i] == X2[j]) #' } #' } #' #' Gm <- 1*((cst[m] + Z1*beta[1] + Z2*beta[2] + #' kronecker(mum, t(num), "+") + rnorm(n^2)) > 0) #' diag(Gm) <- 0 #' diag(Z1) <- NA #' diag(Z2) <- NA #' Z1 <- Z1[!is.na(Z1)] #' Z2 <- Z2[!is.na(Z2)] #' #' dX <- rbind(dX, cbind(Z1, Z2)) #' Glist[[m]] <- Gm #' mu[[m]] <- mum #' nu[[m]] <- num #' } #' #' mu <- unlist(mu) #' nu <- unlist(nu) #' #' out <- homophily.re(network = Glist, formula = ~ dX, group.fe = TRUE, #' re.way = 2, iteration = 1e3) #' #' # plot simulations #' plot(out$posterior$beta[,1], type = "l") #' abline(h = cst[1], col = "red") #' plot(out$posterior$beta[,2], type = "l") #' abline(h = cst[2], col = "red") #' plot(out$posterior$beta[,3], type = "l") #' abline(h = cst[3], col = "red") #' plot(out$posterior$beta[,4], type = "l") #' abline(h = cst[4], col = "red") #' #' plot(out$posterior$beta[,5], type = "l") #' abline(h = beta[1], col = "red") #' plot(out$posterior$beta[,6], type = "l") #' abline(h = beta[2], col = "red") #' #' plot(out$posterior$sigma2_mu, type = "l") #' abline(h = smu2, col = "red") #' plot(out$posterior$sigma2_nu, type = "l") #' abline(h = snu2, col = "red") #' plot(out$posterior$rho, type = "l") #' abline(h = rho, col = "red") #' #' i <- 10 #' plot(out$posterior$mu[,i], type = "l") #' abline(h = mu[i], col = "red") #' plot(out$posterior$nu[,i], type = "l") #' abline(h = nu[i], col = "red") #' } #' @export homophily.re <- function(network, formula, data, symmetry = FALSE, group.fe = FALSE, re.way = 1, init = list(), iteration = 1e3, print = TRUE) { t1 <- Sys.time() re.way <- as.numeric(re.way[1]) if(symmetry & re.way == 2) stop("Two side random effects are not allowed for symmetric network models.") stopifnot(re.way %in% (1:2)) # Data and dimensions if (!is.list(network)) { network <- list(network) } M <- length(network) nvec <- unlist(lapply(network, nrow)) n <- sum(nvec) Nvec <- NULL if(symmetry){ Nvec <- nvec*(nvec- 1)/2 stopifnot(sapply(network, is.symmetric.matrix)) network <- frMtoVbyCOLsym(network, nvec, M) } else { Nvec <- nvec*(nvec- 1) # network <- unlist(lapply(network, function(x){diag(x) = NA; x})) # network <- network[!is.na(network)] network <- frMtoVbyCOL(network, nvec, M) } N <- sum(Nvec) quiet(gc()) if (sum(!((network == 0) | (network == 1))) != 0) { stop("Network should contain only 0 and 1.") } tmp1 <- NULL if(symmetry){ tmp1 <- cumsum(unlist(lapply(nvec, function(x) (x - 1):0))) - 1 } else { tmp1 <- cumsum(unlist(lapply(nvec, function(x) rep(x - 1, x)))) - 1 } tmp2 <- c(0, tmp1[-n] + 1) index <- cbind(tmp2, tmp1) rm(list = c("tmp1", "tmp2")) quiet(gc()) indexgr <- matrix(c(cumsum(c(0, nvec[-M])), cumsum(nvec) - 1), ncol = 2) INDEXgr <- matrix(c(cumsum(c(0, Nvec[-M])), cumsum(Nvec) - 1), ncol = 2) # Formula to data f.t.data <- formula.to.data(formula, FALSE, NULL, NULL, NULL, data, type = "network", theta0 = NA) if(!missing(data)) { rm("data") quiet(gc()) } formula <- f.t.data$formula dX <- f.t.data$X if(nrow(dX) != N) stop("The number of observations in X does not match the network.") rm("f.t.data") quiet(gc()) coln <- colnames(dX) nfix <- ifelse("(Intercept)" %in% coln, 1, 0) K <- ncol(dX) if (group.fe) { if(M < 2){ stop("Group fixed effects can be added for only one subnetwork.") } K <- K + M - nfix nfix <- M dX <- dX[,coln != "(Intercept)"] coln <- c(paste0("(Intercept-", 1:M, ")"), coln[coln != "(Intercept)"]) } Kx <- ncol(dX) dXdX <- crossprod(dX) sumnetwork <- NULL invdXdX <- NULL if (nfix >= 2){ sumdX <- do.call(cbind, lapply(1:M, function(m){colSums(dX[(INDEXgr[m,1] + 1):(INDEXgr[m,2] + 1),])})) sumnetwork <- sapply(1:M, function(m){sum(network[(INDEXgr[m,1] + 1):(INDEXgr[m,2] + 1)])}) dXdX <- rbind(cbind(diag(Nvec), t(sumdX)), cbind(sumdX, dXdX)) invdXdX <- solve(as.matrix(dXdX)) rm("sumdX") quiet(gc()) } else{ invdXdX <- solve(as.matrix(dXdX)) } rm("dXdX") quiet(gc()) #starting value beta <- init$beta mu <- init$mu nu <- init$nu smu2 <- init$smu2 snu2 <- init$snu2 rho <- init$rho quiet(gc()) if (is.null(beta)) { # print(dim(invdXdX)) # print(length(sumnetwork)) # print(dim(dX)) # print(length(network)) beta <- c(invdXdX %*% c(sumnetwork, crossprod(dX, network))) } else{ stopifnot(length(beta) == K) } if (is.null(mu)) { mu <- rep(0, n) } else{ stopifnot(length(mu) == n) } if(re.way == 2){ if (is.null(nu)) { nu <- rep(0, n) } else{ stopifnot(length(nu) == n) } } if (is.null(smu2)) { tmp <- var(mu) smu2 <- ifelse(tmp > 0, tmp, 1) } if (is.null(snu2) & re.way == 2) { tmp <- var(nu) snu2 <- ifelse(tmp > 0, tmp, 1) } if (is.null(rho) & re.way == 2) { rho <- cov(mu, nu)/sqrt(smu2*snu2) rho <- (rho >= 1) - (rho <= -1) + rho*((rho >= -1) & (rho <= 1)) } if(re.way == 1){ nu <- NULL snu2 <- NULL rho <- NULL } init <- list(beta = beta, mu = mu, nu = nu, smu2 = smu2, snu2 = snu2, rho = rho) estima <- NULL if(re.way == 1){ estim <- bayesmu(network, dX, invdXdX, beta, mu, smu2, index, indexgr, INDEXgr, nfix, N, M, K, Kx, nvec, n, iteration, symmetry, print) } else{ estim <- bayesmunu(network, dX, invdXdX, beta, mu, nu, smu2, snu2, rho, index, indexgr, INDEXgr, nfix, N, M, K, Kx, nvec, n, iteration, print) } colnames(estim$beta) <- coln t2 <- Sys.time() timer <- as.numeric(difftime(t2, t1, units = "secs")) nlinks <- sum(network) out <- list("model.info" = list("model" = "probit", "sym.network" = symmetry, "re.way" = re.way, "n" = nvec, "n.obs" = N, "n.links" = nlinks, "K" = K, "iteration" = iteration), "posterior" = estim, "init" = init) class(out) <- "homophily.re" if(print) { cat("\n\n") cat("The program successfully executed \n") cat("\n") cat("********SUMMARY******** \n") cat("n.obs : ", N, "\n") cat("n.links : ", nlinks, "\n") cat("K : ", K, "\n") cat("Group FE : ", ifelse(group.fe, "Yes", "No"), "\n") cat("Iteration : ", iteration, "\n\n") # Print the processing time nhours <- floor(timer/3600) nminutes <- floor((timer-3600*nhours)/60)%%60 nseconds <- timer-3600*nhours-60*nminutes cat("Elapsed time : ", nhours, " HH ", nminutes, " mm ", round(nseconds), " ss \n \n") } out }
/scratch/gouwar.j/cran-all/cranData/CDatanet/R/homophily.re.R
jacobSAR <- function(alpha, X, invXX, XX, y, N, G, I, Gy, ngroup, igroup, cov = TRUE, fixed.effects = FALSE) { Ay <- (y - alpha * Gy) beta <- invXX %*% (t(X) %*% Ay) Xbeta <- X %*% beta s2 <- sum((Ay - Xbeta) ^ 2) / ifelse(fixed.effects, N - ngroup, N) nbeta <- length(beta) covout <- NULL if (cov) { covout <- fSARjac(alpha, s2, X, XX, Xbeta, G, I, igroup, ngroup, N, nbeta, fixed.effects) } list("alpha" = alpha, "beta" = beta, "sigma2" = s2, "cov" = covout) }
/scratch/gouwar.j/cran-all/cranData/CDatanet/R/jacobSAR.R
#' @title Cross-Entropy Clustering #' #' @description CEC divides data into Gaussian type clusters. The implementation #' allows the simultaneous use of various type Gaussian mixture models, #' performs the reduction of unnecessary clusters and it's able to discover new #' groups. Based on Spurek, P. and Tabor, J. (2014) <doi:10.1016/j.patcog.2014.03.006> #' \code{cec}. #' #' @name CEC-package #' #' @docType package #' #' @author Konrad Kamieniecki #' #' @seealso \code{\link{cec}} #' #' @keywords package multivariate cluster models #' NULL #' @title Four Gaussian Clusters #' #' @description Matrix of 2-dimensional points forming four Gaussian clusters. #' #' @name fourGaussians #' #' @docType data #' #' @keywords datasets #' #' @examples #' data(fourGaussians) #' plot(fourGaussians, cex = 0.5, pch = 19); #' NULL #' @title Three Gaussian Clusters #' #' @description Matrix of 2-dimensional points forming three Gaussian clusters. #' #' @name threeGaussians #' #' @docType data #' #' @keywords datasets #' #' @examples #' data(threeGaussians) #' plot(threeGaussians, cex = 0.5, pch = 19); #' NULL #' @title Mixed Shapes Clusters #' #' @description Matrix of 2-dimensional points that form circular and elliptical #' patterns. #' #' @name mixShapes #' #' @docType data #' #' @keywords datasets #' #' @examples #' data(mixShapes) #' plot(mixShapes, cex = 0.5, pch = 19); #' NULL #' @title T-Shaped Clusters #' #' @description Matrix of 2-dimensional points that form the letter T. #' #' @name Tset #' #' @docType data #' #' @keywords datasets #' #' @examples #' data(Tset) #' plot(Tset, cex = 0.5, pch = 19); #' NULL
/scratch/gouwar.j/cran-all/cranData/CEC/R/CEC-package.R
#' @title Cross-Entropy Clustering #' #' @aliases cec-class #' #' @description \code{cec} performs Cross-Entropy Clustering on a data matrix. #' See \code{Details} for an explanation of Cross-Entropy Clustering. #' #' @param x A numeric matrix of data. Each row corresponds to a distinct #' observation; each column corresponds to a distinct variable/dimension. It #' must not contain \code{NA} values. #' #' @param centers Either a matrix of initial centers or the number of initial #' centers (\code{k}, single number \code{cec(data, 4, ...)}) or a vector for #' variable number of centers (\code{cec(data, 3:10, ...)}). It must not #' contain \code{NA} values. #' #' If \code{centers} is a vector, \code{length(centers)} clusterings will be #' performed for each start (\code{nstart} argument) and the total number of #' clusterings will be \code{length(centers) * nstart}. #' #' If \code{centers} is a number or a vector, initial centers will be generated #' using a method depending on the \code{centers.init} argument. #' #' @param type The type (or types) of clustering (density family). This can be #' either a single value or a vector of length equal to the number of centers. #' Possible values are: "covariance", "fixedr", "spherical", "diagonal", #' "eigenvalues", "all" (default). #' #' Currently, if the \code{centers} argument is a vector, only a single type can #' be used. #' #' @param iter.max The maximum number of iterations of the clustering algorithm. #' #' @param nstart The number of clusterings to perform (with different initial #' centers). Only the best clustering (with the lowest cost) will be returned. #' A value grater than 1 is valid only if the \code{centers} argument is a #' number or a vector. #' #' If the \code{centers} argument is a vector, \code{length(centers)} #' clusterings will be performed for each start and the total number of #' clusterings will be \code{length(centers) * nstart}. #' #' If the split mode is on (\code{split = TRUE}), the whole procedure (initial #' clustering + split) will be performed \code{nstart} times, which may take #' some time. #' #' @param centers.init The method used to automatically initialize the centers. #' Possible values are: "kmeans++" (default) and "random". #' #' @param param The parameter (or parameters) specific to a particular type of #' clustering. Not all types of clustering require parameters. The types that #' require parameter are: "covariance" (matrix parameter), "fixedr" (numeric #' parameter), "eigenvalues" (vector parameter). This can be a vector or a list #' (when one of the parameters is a matrix or a vector). #' #' @param card.min The minimal cluster cardinality. If the number of #' observations in a cluster becomes lower than card.min, the cluster is #' removed. This argument can be either an integer number or a string ending #' with a percent sign (e.g. "5\%"). #' #' @param keep.removed If this parameter is TRUE, the removed clusters will be #' visible in the results as NA in the "centers" matrix (as well as the #' corresponding values in the list of covariances). #' #' @param interactive If \code{TRUE}, the result of clustering will be plotted after #' every iteration. #' #' @param threads The number of threads to use or "auto" to use the default #' number of threads (usually the number of available processing units/cores) #' when performing multiple starts (\code{nstart} parameter). #' #' The execution of a single start is always performed by a single thread, thus #' for \code{nstart = 1} only one thread will be used regardless of the value #' of this parameter. #' #' @param split If \code{TRUE}, the function will attempt to discover new #' clusters after the initial clustering, by trying to split single clusters #' into two and check whether it lowers the cost function. #' #' For each start (\code{nstart}), the initial clustering will be performed and #' then splitting will be applied to the results. The number of starts in the #' initial clustering before splitting is driven by the #' \code{split.initial.starts} parameter. #' #' @param split.depth The cluster subdivision depth used in split mode. Usually, #' a value lower than 10 is sufficient (when after each splitting, new clusters #' have similar sizes). For some data, splitting may often produce clusters #' that will not be split further, in that case a higher value of #' \code{split.depth} is required. #' #' @param split.tries The number of attempts that are made when trying to split #' a cluster in split mode. #' #' @param split.limit The maximum number of centers to be discovered in split #' mode. #' #' @param split.initial.starts The number of 'standard' starts performed before #' starting the splitting process. #' #' @param readline Used only in the interactive mode. If \code{readline} is #' TRUE, at each iteration, before plotting it will wait for the user to press #' <Return> instead of the standard 'before plotting' waiting #' (\code{graphics::par(ask = TRUE)}). #' #' @details Cross-Entropy Clustering (CEC) aims to partition \emph{m} points #' into \emph{k} clusters so as to minimize the cost function (energy #' \emph{\strong{E}} of the clustering) by switching the points between #' clusters. The presented method is based on the Hartigan approach, where we #' remove clusters which cardinalities decreased below some small prefixed #' level. #' #' The energy function \emph{\strong{E}} is given by: #' #' \deqn{E(Y_1,\mathcal{F}_1;...;Y_k,\mathcal{F}_k) = \sum\limits_{i=1}^{k} #' p(Y_i) \cdot (-ln(p(Y_i)) + H^{\times}(Y_i\|\mathcal{F}_i))}{ E(Y1, F1; ...; #' Yk, Fk) = \sum(p(Yi) * (-ln(p(Yi)) + H(Yi | Fi)))} #' #' where \emph{Yi} denotes the \emph{i}-th cluster, \emph{p(Yi)} is the ratio #' of the number of points in \emph{i}-th cluster to the total number points, #' \emph{\strong{H}(Yi|Fi)} is the value of cross-entropy, which represents the #' internal cluster energy function of data \emph{Yi} defined with respect to a #' certain Gaussian density family \emph{Fi}, which encodes the type of #' clustering we consider. #' #' The value of the internal energy function \emph{\strong{H}} depends on the #' covariance matrix (computed using maximum-likelihood) and the mean (in case #' of the \emph{mean} model) of the points in the cluster. Seven #' implementations of \emph{\strong{H}} have been proposed (expressed as a type #' - model - of the clustering): #' #' \describe{ #' \item{"all": }{All Gaussian densities. Data will form ellipsoids with #' arbitrary radiuses.} #' \item{"covariance": }{Gaussian densities with a fixed given covariance. The #' shapes of clusters depend on the given covariance matrix (additional #' parameter).} #' \item{"fixedr": }{Special case of 'covariance', where the covariance matrix #' equals \emph{rI} for the given \emph{r} (additional parameter). The #' clustering will have a tendency to divide data into balls with approximate #' radius proportional to the square root of \emph{r}.} #' \item{"spherical": }{Spherical (radial) Gaussian densities (covariance #' proportional to the identity). Clusters will have a tendency to form balls #' of arbitrary sizes.} #' \item{"diagonal": }{Gaussian densities with diagonal covariane. Data will #' form ellipsoids with radiuses parallel to the coordinate axes.} #' \item{"eigenvalues": }{Gaussian densities with covariance matrix having #' fixed eigenvalues (additional parameter). The clustering will try to divide #' the data into fixed-shaped ellipsoids rotated by an arbitrary angle.} #' \item{"mean": }{Gaussian densities with a fixed mean. Data will be covered #' with ellipsoids with fixed centers.} #' } #' #' The implementation of \code{cec} function allows mixing of clustering types. #' #' @return An object of class \code{cec} with the following attributes: #' \code{data}, \code{cluster}, \code{probability}, \code{centers}, #' \code{cost.function}, \code{nclusters}, \code{iterations}, \code{cost}, #' \code{covariances}, \code{covariances.model}, \code{time}. #' #' @seealso \code{\link{CEC-package}}, \code{\link{plot.cec}}, #' \code{\link{print.cec}} #' #' @references Spurek, P. and Tabor, J. (2014) Cross-Entropy Clustering #' \emph{Pattern Recognition} \bold{47, 9} 3046--3059 #' #' @keywords cluster models multivariate package #' #' @examples #' ## Example of clustering a random data set of 3 Gaussians, with 10 random #' ## initial centers and a minimal cluster size of 7% of the total data set. #' #' m1 <- matrix(rnorm(2000, sd = 1), ncol = 2) #' m2 <- matrix(rnorm(2000, mean = 3, sd = 1.5), ncol = 2) #' m3 <- matrix(rnorm(2000, mean = 3, sd = 1), ncol = 2) #' m3[,2] <- m3[, 2] - 5 #' m <- rbind(m1, m2, m3) #' #' plot(m, cex = 0.5, pch = 19) #' #' ## Clustering result: #' Z <- cec(m, 10, iter.max = 100, card.min = "7%") #' plot(Z) #' #' # Result: #' Z #' #' ## Example of clustering mouse-like set using spherical Gaussian densities. #' m <- mouseset(n = 7000, r.head = 2, r.left.ear = 1.1, r.right.ear = 1.1, #' left.ear.dist = 2.5, right.ear.dist = 2.5, dim = 2) #' plot(m, cex = 0.5, pch = 19) #' ## Clustering result: #' Z <- cec(m, 3, type = 'sp', iter.max = 100, nstart = 4, card.min = '5%') #' plot(Z) #' # Result: #' Z #' #' ## Example of clustering data set 'Tset' using 'eigenvalues' clustering type. #' data(Tset) #' plot(Tset, cex = 0.5, pch = 19) #' centers <- init.centers(Tset, 2) #' ## Clustering result: #' Z <- cec(Tset, 5, 'eigenvalues', param = c(0.02, 0.002), nstart = 4) #' plot(Z) #' # Result: #' Z #' #' ## Example of using cec split method starting with a single cluster. #' data(mixShapes) #' plot(mixShapes, cex = 0.5, pch = 19) #' ## Clustering result: #' Z <- cec(mixShapes, 1, split = TRUE) #' plot(Z) #' # Result: #' Z #' #' @export cec <- function(x, centers, type = c("covariance", "fixedr", "spherical", "diagonal", "eigenvalues", "mean", "all"), iter.max = 25, nstart = 1, param, centers.init = c("kmeans++", "random"), card.min = "5%", keep.removed = FALSE, interactive = FALSE, threads = 1, split = FALSE, split.depth = 8, split.tries = 5, split.limit = 100, split.initial.starts = 1, readline = TRUE) { ### CHECK ARGUMENTS if (!methods::hasArg(x)) stop("Missing required argument: 'x'.") if (!methods::hasArg(centers)) { centers <- 1 split <- TRUE } if (iter.max < 0) stop("Illegal argument: iter.max must be greater than 0.") if (!is.matrix(x)) stop("Illegal argument: 'x' must be a matrix.") if (ncol(x) < 1) stop("Illegal argument: 'x' must have at least 1 column.") if (nrow(x) < 1) stop("Illegal argument: 'x' must have at least 1 row.") if (!all(stats::complete.cases(x))) stop("Illegal argument: 'x' should not contain NA values.") if (!all(stats::complete.cases(centers))) stop("Illegal argument: 'centers' should not contain NA values.") var.centers <- NULL centers.mat <- NULL if (!is.matrix(centers)) { if (length(centers) > 1) { var.centers <- centers } else { var.centers <- c(centers) } for (i in centers) { if (i < 1) { stop("Illegal argument: 'centers' must only contain integers greater than 0") } } centers.initialized <- FALSE } else { if (ncol(x) != ncol(centers)) { stop("Illegal argument: 'x' and 'centers' must have the same number of columns.") } if (nrow(centers) < 1) { stop("Illegal argument: 'centers' must have at least 1 row.") } var.centers <- c(nrow(centers)) centers.mat <- centers centers.initialized <- TRUE } if (!(attr(regexpr("[\\.0-9]+%{0,1}", perl = TRUE, text = card.min), "match.length") == nchar(card.min))) { stop("Illegal argument: 'card.min' in wrong format.") } if (centers.initialized) { init.method.name <- "none" } else if (methods::hasArg(centers.init)) { init.method.name <- switch(match.arg(centers.init), `kmeans++` = "kmeanspp", random = "random") } else { init.method.name <- "kmeanspp" } if (!methods::hasArg(type)) { type <- "all" } if (length(type) > 1 && length(var.centers) > length(type)) { stop("Illegal argument: 'type' with length > 1 should be equal or greater than the length of the vector of variable number of centers ('centers' as a vector).") } ### INTERACTIVE MODE if (interactive) { if (split == TRUE) { stop("The interactive mode is not available in split mode.") } if (length(var.centers) > 1) { stop("The interactive mode is not available for variable centers.") } if (nstart > 1) { stop("The interactive mode is not available for multiple starts.") } return(cec.interactive(x, centers, type, iter.max, 1, param, centers.init, card.min, keep.removed, readline)) } ### NON-INTERACTIVE MODE n <- ncol(x) m <- nrow(x) if (substr(card.min, nchar(card.min), nchar(card.min)) == "%") { card.min <- as.integer(as.double(substr(card.min, 1, nchar(card.min) - 1)) * m/100) } else { card.min <- as.integer(card.min) } card.min <- max(card.min, n + 1) k <- max(var.centers) startTime <- proc.time() centers.r <- list(init.method = init.method.name, var.centers = as.integer(var.centers), mat = centers.mat) if (threads == "auto") { threads <- 0 } control.r <- list(min.card = as.integer(card.min), max.iters = as.integer(iter.max), starts = as.integer(nstart), threads = as.integer(threads)) models.r <- create.cec.params.for.models(k, n, type, param) if (split) { for (i in 1:k) { if (models.r[[i]]$type != models.r[[1]]$type) { stop("Mixing model types is currently not supported in split mode") } } split.r <- list(depth = as.integer(split.depth), limit = as.integer(split.limit), tries = as.integer(split.tries), initial.starts = as.integer(split.initial.starts)) Z <- .Call(cec_split_r, x, centers.r, control.r, models.r, split.r) } else { Z <- .Call(cec_r, x, centers.r, control.r, models.r) } k.final <- nrow(Z$centers) execution.time <- as.vector((proc.time() - startTime))[3] Z$centers[is.nan(Z$centers)] <- NA tab <- tabulate(Z$cluster) probability <- vapply(tab, function(c.card) { c.card/m }, 0) # TODO: change this temporary hack model.one <- models.r[[1]] if (!keep.removed) { cluster.map <- 1:k.final na.rows <- which(is.na(Z$centers[, 1])) if (length(na.rows) > 0) { for (i in 1:length(na.rows)) { for (j in na.rows[i]:k.final) { cluster.map[j] <- cluster.map[j] - 1 } } Z$cluster <- as.integer(vapply(Z$cluster, function(asgn) { as.integer(cluster.map[asgn]) }, 0)) Z$centers <- matrix(Z$centers[-na.rows, ], , n) Z$covariances <- Z$covariances[-na.rows] probability <- probability[-na.rows] models.r <- models.r[-na.rows] } } covs <- length(Z$covariances) covariances.model <- rep(list(NA), covs) means.model <- Z$centers # TODO: change this temporary hack if (split) { models.r <- rep(list(model.one), covs) } for (i in 1:covs) { covariances.model[[i]] <- model.covariance(models.r[[i]]$type, Z$covariances[[i]], Z$centers[i, ], models.r[[i]]$params) means.model[i, ] <- model.mean(models.r[[i]]$type, Z$centers[i, ], models.r[[i]]$params) } structure( list(data = x, cluster = Z$cluster, centers = Z$centers, probability = probability, cost.function = Z$energy, nclusters = Z$nclusters, iterations = Z$iterations, time = execution.time, covariances = Z$covariances, covariances.model = covariances.model, means.model = means.model), class = "cec") } #' @title Interactive Cross-Entropy Clustering #' #' @description Internal function to run \code{\link{cec}} interactively. #' #' @noRd cec.interactive <- function(x, centers, type = c("covariance", "fixedr", "spherical", "diagonal", "eigenvalues", "all"), iter.max = 40, nstart = 1, param, centers.init = c("kmeans++", "random"), card.min = "5%", keep.removed = FALSE, readline = TRUE) { old.ask <- graphics::par()["ask"] n <- ncol(x) if (n != 2) { stop("interactive mode available only for 2-dimensional data") } i <- 0 if (!is.matrix(centers)) { centers <- init.centers(x, centers, centers.init) } if (readline) { ignore <- readline(prompt = "After each iteration you may:\n - press <Enter> for next iteration \n - write number <n> (may be negative one) and press <Enter> for next <n> iterations \n - write 'q' and abort execution.\n Press <Return>.\n") graphics::par(ask = FALSE) } else { graphics::par(ask = TRUE) } while (TRUE) { Z <- cec(x, centers, type, i, 1, param, centers.init, card.min, keep.removed, FALSE) if (i > Z$iterations | i >= iter.max) { break } desc <- "" if (i == 0) { desc <- "(position of center means before first iteration)" } cat("Iterations:", Z$iterations, desc, "cost function:", Z$cost, " \n ") plot(Z, ellipses = TRUE) if (readline) { line <- readline(prompt = "Press <Enter> OR write number OR write 'q':") lineint <- suppressWarnings(as.integer(line)) if (!is.na(lineint)) { i <- i + lineint - 1 if (i < 0) { i = -1 } } else if (line == "q" | line == "quit") { break } } i <- i + 1 } plot(Z, ellipses = "TRUE") if (readline) { ignore <- readline(prompt = "Press <Enter>:") } graphics::par(old.ask) Z }
/scratch/gouwar.j/cran-all/cranData/CEC/R/cec.R
#' @title Clustering Type to Int #' #' @description Internal function to map clustering types provided as character #' strings to integer. #' #' @noRd resolve.type <- function(type) { types <- c("covariance", "fixedr", "spherical", "diagonal", "eigenvalues", "mean", "all") match.arg(type, types) } #' @title Parameters for C #' #' @description Internal function to prepare the clustering parameters for the #' C function. #' #' @noRd create.cec.params.for.models <- function(k, n, type.arg, param.arg) { models <- replicate(k, list()) types <- vapply(type.arg, resolve.type, "") params <- NULL if (methods::hasArg(param.arg)) { params <- param.arg } if (length(types) == 1) { types <- rep(types, k) if (methods::hasArg(param.arg)) { params <- rep(list(unlist(param.arg)), k) params <- params[!params %in% list(NULL, NA)] } } if (k != length(types)) { stop("Illegal argument: illegal length of 'type' vector.") } idx <- 0 for (i in 1:length(types)) { type <- types[i] models[[i]]$type <- type models[[i]]$params <- list() if (type == resolve.type("covariance")) { idx <- idx + 1 if (length(params) < idx) { stop("Illegal argument: illegal 'param' length.") } cov <- params[[idx]] if (!is.array(cov)) { stop("Illegal argument: illegal parameter for 'covariance' type.") } if (ncol(cov) != n) { stop("Illegal argument: illegal parameter for 'covariance' type.") } if (nrow(cov) != n) { stop("Illegal argument: illegal parameter for 'covariance' type.") } if (!try.chol(cov)) stop("Illegal argument: illegal parameter for 'covariance' type - matrix must be positive-definite.") cov.inv <- solve(cov) models[[i]]$params <- list(cov = cov, cov.inv = cov.inv) } else if (type == resolve.type("fixed")) { idx <- idx + 1 if (length(params) < idx) { stop("Illegal argument: illegal 'param' length.") } r <- params[[idx]] if (length(r) != 1) { stop("Illegal argument: illegal parameter for 'fixedr' type.") } if (!is.numeric(r)) { stop("Illegal argument: illegal parameter for 'fixedr' type.") } if (!r > 0) { stop("Illegal argument: illegal parameter for 'fixedr' type.") } models[[i]]$params <- list(r = r) } else if (type == resolve.type("eigenvalues")) { idx <- idx + 1 if (length(params) < idx) { stop("Illegal argument: illegal 'param' length.") } evals <- params[[idx]] if (length(evals) != n) { stop("Illegal argument: illegal parameter for 'eigenvalues' type: invalid length.") } if (!all(evals != 0)) { stop("Illegal argument: illegal parameter for 'eigenvalues' type: all values must be greater than 0.") } models[[i]]$params <- list(eigenvalues = sort(evals)) } else if (type == resolve.type("mean")) { idx <- idx + 1 if (length(params) < idx) { stop("Illegal argument: illegal 'param' length.") } mean <- params[[idx]] if (length(mean) != n) { stop("Illegal argument: illegal parameter for 'mean' type: invalid length.") } models[[i]]$params <- list(mean = mean) } } models } #' @title Cholesky Decomposition Try #' #' @description Internal function to handle Cholesky decomposition potential #' erroring. #' #' @noRd try.chol <- function(mat) { ifelse("try-error" %in% class(try(chol(mat), silent = TRUE)), FALSE, TRUE) }
/scratch/gouwar.j/cran-all/cranData/CEC/R/cec.params.R
#' @title Ellipse #' #' @description Internal function to compute points on an ellipse's perimeter #' for a given mean vector and covariance matrix. #' #' @noRd ellipse <- function(mean, cov, npoints = 250) { E <- eigen(cov, symmetric = TRUE) eve <- E$vec eva <- E$val r <- seq(-pi, pi, len = npoints) Xa <- 2 * sqrt(eva[1]) * cos(r) Ya <- 2 * sqrt(eva[2]) * sin(r) mm <- c(rep(mean[1], npoints), rep(mean[2], npoints)) means.multiplied <- matrix(mm, nrow = length(Ya), ncol = 2) pts <- cbind(Xa,Ya) pts <- pts %*% eve pts[, 1] <- pts[, 1] * -1 pts <- pts + means.multiplied pts }
/scratch/gouwar.j/cran-all/cranData/CEC/R/ellipse.R
#' @title Cluster Center Initialization #' #' @description \code{init.centers} automatically initializes the centers of the #' clusters before running the Cross-Entropy Clustering algorithm. #' #' @param x A numeric matrix of data. Each row corresponds to a distinct #' observation; each column corresponds to a distinct variable/dimension. It #' must not contain \code{NA} values. #' #' @param k An integer indicating the number of cluster centers to initialize. #' #' @param method A character string indicating the initialization method to use. #' It can take the following values: #' \describe{ #' \item{"kmeans++": }{the centers are selected using the k-means++ algorithm.} #' \item{"random": }{the centers are randomly selected among the values in #' \code{x}} #' } #' #' @return A matrix with \code{k} rows and \code{ncol(x)} columns. #' #' @references Arthur, D., & Vassilvitskii, S. (2007). k-means++: the advantages #' of careful seeding. Proceedings of the Eighteenth Annual ACM-SIAM Symposium #' on Discrete Algorithms, 1027–1035. #' #' @examples #' ## See the examples provided with the cec() function. #' #' @export init.centers <- function(x, k, method = c("kmeans++", "random")) { method <- switch( match.arg(method), "kmeans++" = "kmeanspp", "random" = "random", stop("Unknown intialization method.")) if (!is.matrix(x)) { stop("init.centers: 'x' should be a matrix.") } if (k < 0) { stop("init.centers: 'k' should be greater than 0.") } .Call(cec_init_centers_r, x, as.integer(k), method) }
/scratch/gouwar.j/cran-all/cranData/CEC/R/init.centers.R
#' @title Model Mean #' #' @description Internal function to extract the model mean from the output of #' the Cross-Entropy Clustering algorithm. #' #' @noRd model.mean <- function(type, center, param) { if (length(which(is.na(center))) > 0) { matrix(NA, 1, ncol(center)) } else if (type == resolve.type("mean")) { param$mean } else { center } } #' @title Model Covariance #' #' @description Internal function to extract the model covariance matrix from #' the output of the Cross-Entropy Clustering algorithm. #' #' @noRd model.covariance <- function(type, cov, mean, param) { if (length(which(is.na(cov))) > 0) { matrix(NA, nrow(cov), ncol(cov)) } else if (type == resolve.type("covariance")) { param$cov } else if (type == resolve.type("fixedr")) { diag(ncol(cov)) * param$r } else if (type == resolve.type("spherical")) { diag(ncol(cov)) * sum(diag(ncol(cov)) * cov) / ncol(cov) } else if (type == resolve.type("diagonal")) { cov * diag(ncol(cov)) } else if (type == resolve.type("eigenvalues")) { V <- eigen(cov, symmetric = TRUE)$vec D <- diag(sort(param$eigenvalues, decreasing = TRUE)) V %*% D %*% t(V) } else if (type == resolve.type("mean")) { m <- param$mean mean_diff <- m - mean cov + (mean_diff %*% t(mean_diff)) } else if (type == resolve.type("all")) { cov } }
/scratch/gouwar.j/cran-all/cranData/CEC/R/model.covariance.R
#' @title Plot CEC Objects #' #' @description \code{plot.cec} presents the results from the \code{\link{cec}} #' function in the form of a plot. The colors of the data points represent the #' cluster they belong to. Ellipses are drawn to represent the covariance #' (of either the model or the sample) of each cluster. #' #' @param x A \code{\link{cec}} object resulting from the \code{\link{cec}} #' function. #' #' @param col A specification for the default plotting color of the points in #' the clusters. See \code{\link{par}} for more details. #' #' @param cex A numerical value giving the amount by which plotting text and #' symbols should be magnified relative to the default. See \code{\link{par}} #' for more details. #' #' @param pch Either an integer specifying a symbol or a single character to be #' used as the default in plotting points. See \code{\link{par}} for more #' details. #' #' @param cex.centers The same as \code{cex}, except that it applies only to the #' centers' means. #' #' @param pch.centers The same as \code{pch}, except that it applies only to the #' centers' means. #' #' @param ellipses If this parameter is TRUE, covariance ellipses will be drawn. #' #' @param ellipses.lwd The line width of the covariance ellipses. See \code{lwd} #' in \code{\link{par}} for more details. #' #' @param ellipses.lty The line type of the covariance ellipses. See \code{lty} #' in \code{\link{par}} for more details. #' #' @param model If this parameter is TRUE, the model (expected) covariance will #' be used for each cluster instead of the sample covariance (MLE) of the #' points in the cluster, when drawing the covariance ellipses. #' #' @param xlab A label for the x axis. See \link{plot} for more details. #' #' @param ylab A label for the y axis. See \link{plot} for more details. #' #' @param ... Additional arguments passed to \code{plot} when drawing data #' points. #' #' @return This function returns nothing. #' #' @seealso \code{\link{cec}}, \code{\link{print.cec}} #' #' @keywords hplot #' #' @examples #' ## See the examples provided with the cec() function. #' #' @export plot.cec <- function(x, col, cex = 0.5, pch = 19, cex.centers = 1, pch.centers = 8, ellipses = TRUE, ellipses.lwd = 4, ellipses.lty = 2, model = TRUE, xlab, ylab, ...) { if (ncol(x$data) != 2) { stop("Plotting is available only for 2-dimensional data.") } if (!methods::hasArg(col)) { col <- x$cluster } if (!is.null(colnames(x$data))) { xl <- colnames(x$data)[1] yl <- colnames(x$data)[2] } else { xl <- "x" yl <- "y" } if (methods::hasArg(xlab)) { xl <- xlab } if (methods::hasArg(ylab)) { yl <- ylab } plot(x$data, col = col, cex = cex, pch = pch, xlab = xl, ylab = yl, ...) if (model) { covs <- x$covariances.model means <- x$means.model } else { covs <- x$covariances means <- x$centers } graphics::points(x$means.model, cex = cex.centers, pch = pch.centers) if (ellipses) { for (i in 1:nrow(means)) if (!is.na(means[i, 1])) { tryCatch({ cov <- covs[[i]] pts <- ellipse(means[i, ], cov) graphics::lines(pts, lwd = ellipses.lwd, lty = ellipses.lty) }, finally = {}) } } }
/scratch/gouwar.j/cran-all/cranData/CEC/R/plot.cec.R
#' @title Printing Cross Entropy Clusters #' #' @description Print objects of class \code{\link{cec}}. #' #' @param x An object produced by \code{\link{cec}}. #' #' @param ... Ignored. #' #' @return This function returns nothing. #' #' @seealso \code{\link{cec}}, \code{\link{plot.cec}} #' #' @keywords print #' #' @examples #' ## See the examples provided with the cec() function. #' #' @export print.cec <- function(x, ...) { cat("CEC clustering result: \n") cat("\nProbability vector:\n") print(x$probability) cat("\nMeans of clusters:\n") print(x$centers) cat("\nCost function:\n") print(x$cost) cat("\nNumber of clusters:\n") print(x$nclusters) cat("\nNumber of iterations:\n") print(x$iterations) cat("\nComputation time:\n") print(x$time) cat("\nAvailable components:\n") print(c("data", "cluster", "probabilities", "centers", "cost.function", "nclusters", "iterations", "covariances", "covariances.model", "time")) }
/scratch/gouwar.j/cran-all/cranData/CEC/R/print.cec.R
#' @title Tests #' #' @description Internal function to run tests on the output of \code{\link{cec}}. #' #' @noRd run.cec.tests <- function() { errors <- 0 tests <- list.files(system.file("cec_tests", package = "CEC")) for (test in tests) { if (grepl(".R", test, perl = TRUE)) { testenv <- new.env() local({ # Just to trick R CMD check... testname <- NULL setup <- NULL }, testenv) source(system.file("cec_tests", test, package = "CEC"), local = testenv) errors <- errors + local({ local.errors <- 0 cat(paste("Test:", testname, "\n")) fs <- utils::lsf.str() # Execute setup function if exists if ("setup" %in% fs) { eval(expr = body(setup), envir = testenv) } for (fn in fs) { # Test cases if (grepl("test.", fn)) { cat(paste("---- ", fn)) fbody <- body(eval(parse(text = fn))) # Evaluate test case function and catch (and count) errors local.errors <- local.errors + tryCatch({ eval(expr = fbody, envir = testenv) cat(": OK\n") 0 }, error = function(er) { cat(": FAILED\n") warning(er$message, immediate. = TRUE, call. = FALSE) 1 }) } } local.errors }, envir = testenv)} } if (errors > 0) { stop("One or more tests failed.") } } #' @title Print Message #' #' @description Internal function to print messages. #' #' @noRd printmsg <- function(msg) { if (!is.null(msg)) paste(msg, ":") else "" } #' @title Check Equality of Numeric Vectors #' #' @description Internal function to check whether numeric vectors are equal #' enough. #' #' @noRd checkNumericVectorEquals <- function(ex, ac, msg = NULL, tolerance = .Machine$double.eps ^ 0.5) { if (length(ex) != length(ac)) { stop (paste(printmsg(msg), "The vectors have different lengths.")) } for (i in seq(1, length(ex))) { if (!isTRUE(all.equal.numeric(ex[i], ac[i], tolerance = tolerance))) { stop(paste(printmsg(msg), "The vectors differ at index:", i, ", expected:", ex[i], ", actual:", ac[i])) } } } #' @title Check Equality of Numeric Values #' #' @description Internal function to check whether numeric values are equal #' enough. #' #' @noRd checkNumericEquals <- function(ex, ac, msg = NULL, tolerance = .Machine$double.eps ^ 0.5) { if(!is.numeric(ex)) { stop(paste(printmsg(msg), "The expression:", ex, "is not of numeric type.")) } if(!is.numeric(ac)) { stop(paste(printmsg(msg), "The expression:", ac, "is not of numeric type.")) } if (!isTRUE(all.equal.numeric(ex, ac, tolerance=tolerance))) { stop (paste(printmsg(msg), "The numeric values are different: expected:", ex, ", actual:", ac, ", difference:", abs(ex - ac))) } } #' @title Check Equality of Values #' #' @description Internal function to check whether values are equal enough. #' #' @noRd checkEquals <- function(ex, ac, msg = NULL) { if (!isTRUE(identical(ex, ac))) { stop (paste(printmsg(msg), "The values are not identical: expected:", ex, ", actual:", ac)) } } #' @title Check Thruthiness #' #' @description Internal function to check whether an expression is truly true. #' #' @noRd checkTrue <- function(exp, msg = NULL) { if (!is.logical(exp)) { stop(paste(printmsg(msg), "The expression is not of logical type.")) } if (!isTRUE(exp)) { stop(paste(printmsg(msg), "The expression is not TRUE.")) } } #' @title Check Equality of Numeric Matrices #' #' @description Internal function to check whether numeric matrices are equal #' enough. #' #' @noRd checkNumericMatrixEquals <- function(ex, ac, msg = NULL, tolerance = .Machine$double.eps ^ 0.5) { if (nrow(ex) != nrow(ac)) { stop (paste(printmsg(msg), "The matrices have different dimensions.")) } if (ncol(ex) != ncol(ac)) { stop (paste(printmsg(msg), "The matrices have different dimensions.")) } for (i in seq(1, nrow(ex))){ for (j in seq(1, ncol(ex))) { if (!isTRUE(all.equal.numeric(ex[i, j], ac[i, j], tolerance=tolerance))) { stop (paste(printmsg(msg), "The matrices differ at row:", i, " col:", j, ": expected:", ex[i, j], ", actual:",ac[i, j])) } } } } #' @title Maximum Likelihood of Covariance Matrix #' #' @description Internal function to compute the maximum likelihood estimate of #' a covariance matrix. #' #' @noRd cov.mle <- function(M) { mean <- colMeans(M) mat <- matrix(0, ncol(M), ncol(M)) for (i in seq(1, nrow(M))) { v <- M[i,] mat <- mat + (t(t(v - mean)) %*% t(v - mean)) } mat / nrow(M) }
/scratch/gouwar.j/cran-all/cranData/CEC/R/tests.R
#' @title Ball #' #' @description Internal function to generate a cluster of points uniformly #' distributed inside a disc. #' #' @noRd ball <- function(n = 4000, r = 1, dim = 2) { M <- matrix(0, n, dim) count <- 0; rr <- r ^ 2 while (count < n) { p <- stats::runif(dim, -r, r) if (sum(p ^ 2) <= rr) { count <- count + 1 M[count, ] <- p } } M } #' @title Volume of a Ball #' #' @description Internal function to compute the volume of a ball in \code{n} #' dimensions. #' #' @noRd nballvolume <- function(r, n) { k <- as.integer(n / 2) if (n %% 2 == 0) { pi ^ k / factorial(k) * r ^ n } else { 2 * factorial(k) * (4 * pi) ^ k / factorial(n) * r ^ n } } #' @title Mouse #' #' @description \code{mouseset} generates a cluster of points uniformly #' distributed inside a "mouse head" shape. #' #' @param n The number of points (default: 4000). #' #' @param r.head The radius of the mouse's head (default: 2). #' #' @param r.left.ear,r.right.ear The radii of the left and right ear of the #' mouse's head (default: 1.1). #' #' @param left.ear.dist,right.ear.dist The distance between the center of the #' mouse's head and the center of the left and right ear (default: 2.5). #' #' @param dim The dimensionality of the mouse's head (default: 2). #' #' @return A matrix with \code{n} rows and \code{dim} columns. #' #' @examples #' plot(mouseset()) #' #' @export mouseset <- function(n = 4000, r.head = 2, r.left.ear = 1.1, r.right.ear = 1.1, left.ear.dist = 2.5, right.ear.dist = 2.5, dim = 2) { vh <- nballvolume(r.head, dim) vl <- nballvolume(r.left.ear, dim) vr <- nballvolume(r.right.ear, dim) if (dim < 2) { stop("Illegal argument: 'dim' must be strictly greater than 1.") } pos.h <- rep(0, dim) pos.l <- pos.h pos.r <- pos.h l.offset <- left.ear.dist / sqrt(2) r.offset <- right.ear.dist / sqrt(2) pos.l[1] <- pos.l[1] - l.offset pos.l[2] <- pos.l[2] + l.offset pos.r[1] <- pos.r[1] + r.offset pos.r[2] <- pos.r[2] + r.offset hh <- r.head ^ 2 ll <- r.left.ear ^ 2 rr <- r.right.ear ^ 2 centers <- rbind(pos.h, pos.l, pos.r) rs <- c(r.head, r.left.ear, r.right.ear) rrs <- c(hh, ll, rr) M <- matrix(0, n, dim) count <- 0 while (count < n) { gen <- min(1000, n - count) s <- sample(x = c(1, 2, 3), size = gen, prob = c(vh, vl, vr), replace = TRUE) for (i in s) { r <- rs[i] random.p <- stats::runif(dim, -r, +r) p <- centers[i,] + random.p if (sum(random.p ^ 2) < rrs[i]) { if (i == 1) { count <- count + 1 M[count, ] <- p } else if (i == 2) { if (sum((p - pos.h) ^ 2) > hh && sum((p - pos.r) ^ 2) > rr) { count <- count + 1 M[count,] <- p } } else if (i == 3) { if (sum((p - pos.h) ^ 2) > hh && sum((p - pos.l) ^ 2) > ll) { count <- count + 1 M[count,] <- p } } } } } M }
/scratch/gouwar.j/cran-all/cranData/CEC/R/utils.R
## usethis namespace: start #' @useDynLib CEC, .registration = TRUE ## usethis namespace: end NULL
/scratch/gouwar.j/cran-all/cranData/CEC/R/zzz.R
testname <- "Clustering" setup <- function() { data("fourGaussians", package = "CEC") centers <- as.matrix(read.table(system.file("cec_tests", "four.gaussians.centers.data", package="CEC"))) expected <- dget(system.file("cec_tests", "four.gaussians.result.dp", package="CEC")) } test.clustering.four.gaussians <- function() { CEC <- cec(fourGaussians, centers) CEC:::checkNumericVectorEquals(expected$cluster, CEC$cluster, msg="Clustering vector") CEC:::checkNumericVectorEquals(expected$cost, CEC$cost, msg="Energy") CEC:::checkNumericMatrixEquals(expected$centers, CEC$centers, msg="Centers") CEC:::checkNumericMatrixEquals(fourGaussians, CEC$data, msg="Data") CEC:::checkNumericVectorEquals(expected$probability, CEC$probability, msg="Probability") CEC:::checkNumericVectorEquals(expected$nclusters, CEC$nclusters, msg="Number of clusters") CEC:::checkNumericVectorEquals(expected$iterations, CEC$iterations, msg="Iterations") }
/scratch/gouwar.j/cran-all/cranData/CEC/inst/cec_tests/clustering.test.R
testname <- "Covariance calculation" setup <- function() { B <- as.matrix(read.table(system.file("cec_tests", "ball1.data", package="CEC"))) centers <- as.matrix(read.table(system.file("cec_tests", "centers2.data", package="CEC"))) } test.covariances.before.first.iteraion <- function() { M <- matrix(c(-1, 102, 141, -1, 104, 2, -1, -1, 12, 4), 5, 2) cov <- cov.mle(M) C <- cec(M, centers=1, iter.max=0) checkNumericMatrixEquals(cov, C$covariances[[1]], msg="Covariances") } test.covariances.after.point.movements.between.clusters <- function() { cov <- cov.mle(B) C <- cec(B, centers=centers, type="sp") checkNumericMatrixEquals(cov, C$covariances[[1]], msg="Covariances") }
/scratch/gouwar.j/cran-all/cranData/CEC/inst/cec_tests/covariance.calculation.test.R
testname <- "Energy calculation (ball1)" setup <- function() { B <- as.matrix(read.table(system.file("cec_tests", "ball1.data", package="CEC"))) C <- as.matrix(read.table(system.file("cec_tests", "centers1.data", package="CEC"))) } test.type.covariance <- function() { given.cov = matrix(c(2,1,1,3), 2,2) expected.energy <- 2.766927173 CE <- cec(B, centers=1, type="cov", param = given.cov, iter.max=0) CEC:::checkNumericVectorEquals(expected.energy, CE$cost, msg="Energy") } test.type.fixedr <- function() { r <- 1.5 expected.energy <- 2.410818718 CE <- cec(B, centers=1, type="fix", param = 1.5, iter.max=0) CEC:::checkNumericVectorEquals(expected.energy, CE$cost, msg="Energy") } test.type.spherical <- function() { expected.energy <- 1.456430201 CE <- cec(B, centers=1, type="sp", iter.max=0) CEC:::checkNumericVectorEquals(expected.energy, CE$cost, msg="Energy") } test.type.diagonal <- function() { cov <- cov.mle(B) expected.energy <- 1.45637452 CE <- cec(B, centers=1, type="diag", iter.max=0) CEC:::checkNumericVectorEquals(expected.energy, CE$cost, msg="Energy") } test.type.eigenvalues <- function() { evals <- c(0.1, 0.22) expected.energy <- 1.734310397 CE <- cec(B, centers=1, type="eigen", param=evals, iter.max=0) CEC:::checkNumericVectorEquals(expected.energy, CE$cost, msg="Energy") } test.type.all <- function() { expected.energy <- 1.455903678 CE <- cec(B, centers=1, type="all", iter.max=0) CEC:::checkNumericVectorEquals(expected.energy, CE$cost, msg="Energy") } test.type.mean <- function() { expected.energy <- 1.455960581 CE <- cec(B, 1, type="mean", param=c(0, 0), iter.max=0) CEC:::checkNumericVectorEquals(expected.energy, CE$cost, msg="Energy") } #################################################################################################################### test.type.spherical.cluster.removing <- function() { expected.energy <- 1.456430201 CE <- cec(B, C, type="sp", iter.max=20) CEC:::checkNumericVectorEquals(expected.energy, CE$cost, msg="Energy") }
/scratch/gouwar.j/cran-all/cranData/CEC/inst/cec_tests/energy.calculation.test.ball1.R
testname <- "Energy calculation (mouseset1)" setup <- function() { B <- as.matrix(read.table(system.file("cec_tests", "mouse1.data", package="CEC"))) } test.type.covariance <- function() { given.cov = matrix(c(2,1,1,3), 2,2) expected.energy <- 3.540174056 CE <- cec(B, centers=1, type="cov", param = given.cov, iter.max=0) CEC:::checkNumericVectorEquals(expected.energy, CE$cost, msg="Energy") } test.type.fixedr <- function() { r <- 1.5 expected.energy <- 3.416637007 CE <- cec(B, centers=1, type="fix", param = 1.5, iter.max=0) CEC:::checkNumericVectorEquals(expected.energy, CE$cost, msg="Energy") } test.type.spherical <- function() { expected.energy <- 3.403158062 CE <- cec(B, centers=1, type="sp", iter.max=0) CEC:::checkNumericVectorEquals(expected.energy, CE$cost, msg="Energy") } test.type.diagonal <- function() { expected.energy <- 3.396500695 CE <- cec(B, centers=1, type="diag", iter.max=0) CEC:::checkNumericVectorEquals(expected.energy, CE$cost, msg="Energy") } test.type.all <- function() { expected.energy <- 3.396472329 CE <- cec(B, centers=1, type="all", iter.max=0) CEC:::checkNumericVectorEquals(expected.energy, CE$cost, msg="Energy") }
/scratch/gouwar.j/cran-all/cranData/CEC/inst/cec_tests/energy.calculation.test.mouseset1.R
testname <- "energy calculation (various data sets)" setup <- function() { two.gausses.4d = as.matrix(read.table(system.file("cec_tests", "two.gausses.4d.data", package="CEC"))) mouse3d <- as.matrix(read.table(system.file("cec_tests", "mouse3d.data", package="CEC"))) C <- as.matrix(read.table(system.file("cec_tests", "centers3d.data", package="CEC"))) C4 <- as.matrix(read.table(system.file("cec_tests", "centers43d.data", package="CEC"))) } test.type.covariance <- function() { given.cov = matrix(c(0.770118878, 0.005481129, -0.005991149, 0.005481129, 0.766972716, 0.008996509, -0.005991149, 0.008996509, 0.821481768), 3, 3) expected.energy <- 4.365855156 CE <- cec(mouse3d, centers=C, type="cov", param = given.cov, iter.max=20) CEC:::checkNumericVectorEquals(expected.energy, CE$cost, msg="Energy") } test.type.fixedr.mixture <- function() { r <- c(0.2, 0.3, 0.4) expected.energy <- 4.853461033 CE <- cec(mouse3d, centers=C, type=c("fi", "fi", "fi"), param = r) CEC:::checkNumericVectorEquals(expected.energy, CE$cost, msg="Energy") } test.type.spherical.one.cluster.removed <- function() { expected.energy <- 4.179257781 expected.number.of.clusters <- 3 CE <- cec(mouse3d, C4, type="sp") CEC:::checkNumericVectorEquals(expected.number.of.clusters, CE$nclusters, msg="Number of clusters") CEC:::checkNumericVectorEquals(expected.energy, CE$cost, msg="Energy") } test.type.diagonal.spherical.mixture <- function() { expected.energy <- 4.177793598 expected.number.of.clusters <- 3 CE <- cec(mouse3d, C, type=c("diag", "diag", "sp")) CEC:::checkNumericVectorEquals(expected.number.of.clusters, CE$nclusters, msg="Number of clusters") CEC:::checkNumericVectorEquals(expected.energy, CE$cost, msg="Energy") } test.type.eigenvalues.all.fixedr.mixture <- function() { evals1 <- c(0.8240634, 0.7739987, 0.7595220) evals2 <- c(0.7240634, 0.5739987, 0.3595220) r <- 1.0 expected.energy <- 4.323007035 expected.number.of.clusters <- 3 CE <- cec(mouse3d, C4, type=c("all", "eigen", "fixedr", "eigen"), param=list(evals1, r, evals2)) CEC:::checkNumericVectorEquals(expected.number.of.clusters, CE$nclusters, msg="Number of clusters") CEC:::checkNumericVectorEquals(expected.energy, CE$cost, msg="Energy") } test.type.mean.two.gaussians <- function() { centers = matrix(c(2, 4, 2, 4, 2, 4, 2, 4), 2, 4) means.param = list(c(0, 0, 0, 0), c(5, 5, 5, 5)) expected.energy = 6.142651451 cec = cec(two.gausses.4d, centers, c("mean", "mean"), param=means.param) CEC:::checkNumericVectorEquals(expected.energy, cec$cost, msg="Energy") }
/scratch/gouwar.j/cran-all/cranData/CEC/inst/cec_tests/energy.calculation.test.various.data.sets.R
testname <- "Split method" setup <- function() { set.seed(12345678) data("threeGaussians", package = "CEC") data("fourGaussians", package = "CEC") data("mixShapes", package = "CEC") mixShapesReduced = mixShapes[seq(1, nrow(mixShapes), 2),] } test.should.split.to.4.cluster <- function() { expected.cost = 2.530237 tolerance = 0.001 C = cec(fourGaussians, nstart = 5) CEC:::checkNumericEquals(4, C$nclusters) CEC:::checkNumericEquals(expected.cost, C$cost, msg = "cost", tolerance = tolerance) } test.should.split.to.7.cluster <- function() { expected.cost = 10.16551 tolerance = 0.001 C = cec(mixShapesReduced, 2, nstart = 2, split = T) CEC:::checkNumericEquals(7, C$nclusters) CEC:::checkNumericEquals(expected.cost, C$cost, msg = "cost", tolerance = tolerance) } test.should.limit.split.to.4.cluster <- function() { C = cec(mixShapesReduced, 1, nstart = 2, split = T, split.limit = 4) CEC:::checkNumericEquals(4, C$nclusters) } test.should.split.to.3.cluster.fixed.mean <- function() { C = cec(threeGaussians,, "mean", param = c(0, 0), nstart = 8) CEC:::checkNumericEquals(3, C$nclusters) CEC:::checkNumericEquals(1.726595, C$cost, tolerance = 0.00001) }
/scratch/gouwar.j/cran-all/cranData/CEC/inst/cec_tests/split.test.R
testname <- "Variable centers number" setup <- function() { set.seed(1234567) X1 = matrix(rnorm(1000), 500, 2) X2 = rbind(X1, X1 + 5) X3 = rbind(X2, X1 + 10) } test.should.use.1.cluster <- function() { C = cec(X1, 1:3, "sp", keep.removed=T, iter.max=0, card.min=4, nstart=10) CEC:::checkNumericEquals(1, nrow(C$centers)) } test.should.use.2.cluster <- function() { C = cec(X2, 1:3, "sp", keep.removed=T, iter.max=0, card.min=4, nstart=10) CEC:::checkNumericEquals(2, nrow(C$centers)) } test.should.use.3.cluster <- function() { C = cec(X3, 1:3, "sp", keep.removed=T, iter.max=0, card.min=4, nstart=5) CEC:::checkNumericEquals(3, nrow(C$centers)) }
/scratch/gouwar.j/cran-all/cranData/CEC/inst/cec_tests/variable.centers.test.R
#' Median normalization of sgRNA counts #' #' This function adjusts sgRNA counts by the median ratio method. #' The normalized sgRNA read counts are calculated as the raw read counts #' devided by a size factor. The size factor is calcuated as the median of #' all size factors caculated from negative control sgRNAs (eg., sgRNAs #' corresponding to non-targeting or non-essential genes). #' #' @param data A numeric matrix containing raw read counts of sgRNAs #' with rows corresponding to sgRNAs and columns correspondings to samples. #' @param control A numeric matrix containing raw read counts of negative #' control sgRNAs with rows corresponding to sgRNAs and columns #' corresponding to samples. Sample ordering is the same as in data. #' @return A list with two elements: 1) size factors of all samples; #' 2) normalized counts of sgRNAs. #' @examples #' count <- matrix(rnbinom(5000 * 6, mu=500, size=3), ncol = 6) #' colnames(count) = paste0("sample", 1:6) #' rownames(count) = paste0("sgRNA", 1:5000) #' control <- count[1:100,] #' normalizedcount <- medianNormalization(count, control) #' #' @importFrom stats median #' #' @export medianNormalization <- function(data, control) { gm <- exp(rowMeans(log(control+1))) f <- apply(control, 2, function(u) median((u/gm)[gm > 0])) norm <- sweep(data,2,f,FUN="/") return(list(f=f, count=norm)) } #' Modeling CRISPR screen data by R package limma #' #' The lmFit function in R package limma is employed for group comparisons. #' #' @param data A numeric matrix containing log2 expression levels of sgRNAs #' with rows corresponding to sgRNAs and columns corresponding to samples. #' @param design A design matrix with rows corresponding to samples and #' columns corresponding to coefficients to be estimated. #' @param contrast.matrix A matrix with columns corresponding to contrasts. #' @return A data frame with rows corresponding to sgRNAs and columns #' corresponding to limma results #' @examples #' y <- matrix(rnorm(1000*6),1000,6) #' condition <- gl(2,3,labels=c("Treatment","Baseline")) #' design <- model.matrix(~ 0 + condition) #' contrast.matrix <- makeContrasts("conditionTreatment-conditionBaseline",levels=design) #' limma.fit <- runLimma(y,design,contrast.matrix) #' #' @importFrom limma contrasts.fit makeContrasts lmFit eBayes #' #' @export runLimma <- function(data, design, contrast.matrix) { lmfit <- limma::lmFit(data,design) lmfit.eBayes <- limma::eBayes(contrasts.fit(lmfit, contrast.matrix)) results <- data.frame(lmfit.eBayes$coef, lmfit.eBayes$stdev.unscaled*lmfit.eBayes$sigma, lmfit.eBayes$p.value) names(results) <- c("lfc","se","p") return(results) } #' Modeling CRISPR data with a permutation test between conditions #' by R package limma #' #' The lmFit function in R package limma is employed for group comparisons #' under permutations. #' #' @param data A numeric matrix containing log2 expression level of sgRNAs #' with rows corresponding to sgRNAs and columns to samples. #' @param design A design matrix with rows corresponding to samples and #' columns to coefficients to be estimated. #' @param contrast.matrix A matrix with columns corresponding to contrasts. #' @param nperm Number of permutations #' @return A numeric matrix containing log2 fold changes with permutations #' @examples #' y <- matrix(rnorm(1000*6),1000,6) #' condition <- gl(2,3,labels=c("Control","Baseline")) #' design <- model.matrix(~ 0 + condition) #' contrast.matrix <- makeContrasts("conditionControl-conditionBaseline",levels=design) #' fit <- permuteLimma(y,design,contrast.matrix,20) #' #' @export permuteLimma <- function(data, design, contrast.matrix, nperm) { n.rna <- dim(data)[1] beta.null <- matrix(0,n.rna,nperm) ns.grp <- dim(design)[1]/2 for (j in 1:nperm) { n.floor <- floor(ns.grp/2) n.ceiling <- ceiling(ns.grp/2) col.grp1 <- c(sample(1:ns.grp,n.floor),sample((ns.grp+1):(2*ns.grp),n.ceiling)) col.grp2 <- setdiff(1:(2*ns.grp),col.grp1) col.new <- c(col.grp1,col.grp2) limma.fit.null <- runLimma(data[,col.new],design,contrast.matrix) beta.null[,j] <- limma.fit.null$lfc } return(beta.null) } #' Fitting multi-component normal mixture models by R package mixtools #' #' The function normalmixEM in R package mixtools is employed for #' fitting multi-component normal mixture models. #' #' @param x A numeric vector #' @param k0 Number of components in the normal mixture model #' @param mean_constr A constrain on means of components #' @param sd_constr A constrain on standard deviations of components #' @param npara Number of parameters #' @param d0 Number of times for fitting mixture model using different #' starting values #' @return Normal mixture model fit and BIC value of the log-likelihood #' #' @importFrom mixtools normalmixEM EMFit <- function(x, k0, mean_constr, sd_constr, npara, d0) { for (i in 1:d0) { EM.fit.temp <- mixtools::normalmixEM(x,k=k0,mean.constr=mean_constr,sd.constr=sd_constr) if (i==1) { EM.fit <- EM.fit.temp } else { if (EM.fit.temp$loglik > EM.fit$loglik) { EM.fit <- EM.fit.temp } } } if (k0==3 & (EM.fit$mu[1] > EM.fit$mu[3])) { c1 <- EM.fit$mu[1] EM.fit$mu[1] <- EM.fit$mu[3] EM.fit$mu[3] <- c1 c2 <- EM.fit$sigma[1] EM.fit$sigma[1] <- EM.fit$sigma[3] EM.fit$sigma[3] <- c2 c3 <- EM.fit$lambda[1] EM.fit$lambda[1] <- EM.fit$lambda[3] EM.fit$lambda[3] <- c3 } BIC <- -2*EM.fit$loglik + npara*log(length(x)) return(list(EM.fit=EM.fit,BIC=BIC)) } #' Performing empirical Bayes modeling on limma results #' #' This function perform an empirical Bayes modeling on log fold ratios #' and return the posterior log fold ratios. #' #' @param data A numeric matrix containing limma results and log2 gene #' expression levels that has a column nameed 'lfc' and a column #' named 'exp.level.log2' #' @param theta0 Standard deviation of log2 fold changes under permutations #' @param n.b Number of bins, default is 5 bins #' @param d Number of times for fitting mixture model using different #' starting values, default is 10 #' @return A numeric matrix containing limma results, RNA expression levels, #' posterior log2 fold ratio, log p-values, and estimates of mixture model #' #' @importFrom stats dnorm pnorm #' #' @export normalMM <- function(data, theta0, n.b=5, d=10) { eta <- 0.5 mu.mat <- matrix(0,n.b,3) sigma.mat <- matrix(0,n.b,3) lambda.mat <- matrix(0,n.b,3) beta.cutoff.mat <- matrix(0,n.b,2) xs <- min(data$exp.level.log2) xe <- max(data$exp.level.log2)+0.1 binter <- rep(xe,n.b+1) bl <- (xe-xs)/(n.b+1) for (b in 1:n.b) { binter[b] <- xs + bl*(b-1) } for(b in 1:n.b) { data$exp.level.log2.b[(data$exp.level.log2 >= binter[b]) & (data$exp.level.log2 < binter[b+1])] = b } for (b in 1:n.b) { x <- data$lfc[data$exp.level.log2.b==b] EMfit.3mm <- EMFit(x,k0=3,mean_constr=c(NA,0,NA),sd_constr=c(NA,theta0,NA),npara=4,d0=d) if ( (max(EMfit.3mm$EM.fit$mu) < eta) & (min(EMfit.3mm$EM.fit$mu) < -eta) ) { EMfit.3mm <- EMFit(x,k0=3,mean_constr=c(NA,0,eta),sd_constr=c(NA,theta0,NA),npara=3,d0=d) if (min(EMfit.3mm$EM.fit$mu) > -eta) { EMfit.3mm <- EMFit(x,k0=3,mean_constr=c(-eta,0,eta),sd_constr=c(NA,theta0,NA),npara=2,d0=d) } } if ( (max(EMfit.3mm$EM.fit$mu) > eta) & (min(EMfit.3mm$EM.fit$mu) > -eta) ) { EMfit.3mm <- EMFit(x,k0=3,mean_constr=c(-eta,0,NA),sd_constr=c(NA,theta0,NA),npara=3,d0=d) if (max(EMfit.3mm$EM.fit$mu) < eta) { EMfit.3mm <- EMFit(x,k0=3,mean_constr=c(-eta,0,eta),sd_constr=c(NA,theta0,NA),npara=2,d0=d) } } if ( (max(EMfit.3mm$EM.fit$mu) < eta) & (min(EMfit.3mm$EM.fit$mu) > -eta) ) { EMfit.3mm <- EMFit(x,k0=3,mean_constr=c(-eta,0,eta),sd_constr=c(NA,theta0,NA),npara=2,d0=d) } EMfit.2mm <- EMFit(x,k0=2,mean_constr=c(NA,0),sd_constr=c(NA,theta0),npara=2,d0=d) if ( (EMfit.2mm$EM.fit$mu[1] > 0) & (EMfit.2mm$EM.fit$mu[1] < eta) ) { EMfit.2mm <- EMFit(x,k0=2,mean_constr=c(eta,0),sd_constr=c(NA,theta0),npara=1,d0=d) } if ( (EMfit.2mm$EM.fit$mu[1] < 0) & (EMfit.2mm$EM.fit$mu[1] > -eta) ) { EMfit.2mm <- EMFit(x,k0=2,mean_constr=c(-eta,0),sd_constr=c(NA,theta0),npara=1,d0=d) } BIC.1mm <- -2*sum(dnorm(x,0,theta0,log=TRUE)) + log(length(x)) BIC.all <- data.frame(EMfit.3mm$BIC, EMfit.2mm$BIC, BIC.1mm) if (which(rank(BIC.all)==1)==1) { xorder <- sort(EMfit.3mm$EM.fit$x,index=T)$ix x.ordered <- x[xorder] posterior.xorder <- EMfit.3mm$EM.fit$posterior[xorder,] n1 <- min(which(posterior.xorder[,1]<posterior.xorder[,2])) if (is.na(x.ordered[n1])) { x.comp1.cutoff <- -theta0 } else { x.comp1.cutoff <- min(-theta0,x.ordered[n1]) } n3 <- max(which(posterior.xorder[,3]<posterior.xorder[,2])) if (is.na(x.ordered[n3])) { x.comp3.cutoff <- theta0 } else { x.comp3.cutoff <- max(theta0,x.ordered[n3]) } x.cutoff <- c(x.comp1.cutoff,x.comp3.cutoff) null.posterior <- EMfit.3mm$EM.fit$posterior[,2] beta.cutoff.mat[b,] <- x.cutoff mu.mat[b,] <- EMfit.3mm$EM.fit$mu sigma.mat[b,] <- EMfit.3mm$EM.fit$sigma lambda.mat[b,] <- EMfit.3mm$EM.fit$lambda } if (which(rank(BIC.all)==1)==2) { xorder <- sort(EMfit.2mm$EM.fit$x,index=T)$ix x.ordered <- x[xorder] posterior.xorder <- EMfit.2mm$EM.fit$posterior[xorder,] if(EMfit.2mm$EM.fit$mu[1]<0) { n1 <- min(which(posterior.xorder[,1]<posterior.xorder[,2])) if (is.na(x.ordered[n1])) { x.comp1.cutoff <- -theta0 } else { x.comp1.cutoff <- min(-theta0,x.ordered[n1]) } } else { n1 <- max(which(posterior.xorder[,1]<posterior.xorder[,2])) if (is.na(x.ordered[n1])) { x.comp1.cutoff <- theta0 } else { x.comp1.cutoff <- max(theta0,x.ordered[n1]) } } x.cutoff <- c(x.comp1.cutoff) null.posterior <- EMfit.3mm$EM.fit$posterior[,2] beta.cutoff.mat[b,1] <- x.cutoff mu.mat[b,1:2] <- EMfit.2mm$EM.fit$mu sigma.mat[b,1:2] <- EMfit.2mm$EM.fit$sigma lambda.mat[b,1:2] <- EMfit.2mm$EM.fit$lambda } data.b <- data[data$exp.level.log2.b==b,] data.b$null.posterior <- null.posterior if (beta.cutoff.mat[b,1]==0 & beta.cutoff.mat[b,2]==0) { data.b$lfc_posterior <- (data.b$lfc * (sigma.mat[b,2])^2 + mu.mat[b,2] * (data.b$se)^2)/((data.b$se)^2 + (sigma.mat[b,2])^2) } if (beta.cutoff.mat[b,1]<0 & beta.cutoff.mat[b,2]>0) { data.b.comp2 <- data.b[data.b$lfc < beta.cutoff.mat[b,1],] data.b.comp0 <- data.b[data.b$lfc >= beta.cutoff.mat[b,1] & data.b$lfc <= beta.cutoff.mat[b,2],] data.b.comp1 <- data.b[data.b$lfc > beta.cutoff.mat[b,2],] data.b.comp2$lfc_posterior <- (data.b.comp2$lfc * (sigma.mat[b,1])^2 + mu.mat[b,1] * (data.b.comp2$se)^2)/((data.b.comp2$se)^2 + (sigma.mat[b,1])^2) data.b.comp0$lfc_posterior <- (data.b.comp0$lfc * (sigma.mat[b,2])^2 + mu.mat[b,2] * (data.b.comp0$se)^2)/((data.b.comp0$se)^2 + (sigma.mat[b,2])^2) data.b.comp1$lfc_posterior <- (data.b.comp1$lfc * (sigma.mat[b,3])^2 + mu.mat[b,3] * (data.b.comp1$se)^2)/((data.b.comp1$se)^2 + (sigma.mat[b,3])^2) data.b <- rbind(data.b.comp0,data.b.comp1,data.b.comp2) } if (beta.cutoff.mat[b,1]<0 & beta.cutoff.mat[b,2]==0) { data.b.comp2 <- data.b[data.b$lfc < beta.cutoff.mat[b,1],] data.b.comp0 <- data.b[data.b$lfc >= beta.cutoff.mat[b,1],] data.b.comp2$lfc_posterior <- (data.b.comp2$lfc * (sigma.mat[b,1])^2 + mu.mat[b,1] * (data.b.comp2$se)^2)/((data.b.comp2$se)^2 + (sigma.mat[b,1])^2) data.b.comp0$lfc_posterior <- (data.b.comp0$lfc * (sigma.mat[b,2])^2 + mu.mat[b,2] * (data.b.comp0$se)^2)/((data.b.comp0$se)^2 + (sigma.mat[b,2])^2) data.b <- rbind(data.b.comp0,data.b.comp2) } if (beta.cutoff.mat[b,1]>0 & beta.cutoff.mat[b,2]==0) { data.b.comp0 <- data.b[data.b$lfc <= beta.cutoff.mat[b,1],] data.b.comp1 <- data.b[data.b$lfc > beta.cutoff.mat[b,1],] data.b.comp0$lfc_posterior <- (data.b.comp0$lfc * (sigma.mat[b,2])^2 + mu.mat[b,2] * (data.b.comp0$se)^2)/((data.b.comp0$se)^2 + (sigma.mat[b,2])^2) data.b.comp1$lfc_posterior <- (data.b.comp1$lfc * (sigma.mat[b,3])^2 + mu.mat[b,3] * (data.b.comp1$se)^2)/((data.b.comp1$se)^2 + (sigma.mat[b,3])^2) data.b <- rbind(data.b.comp0,data.b.comp1) } if (b==1) data.temp <- data.b else data.temp <- rbind(data.temp, data.b) } data <- data.temp data$log_p <- log(2)+pnorm(abs(data$lfc_posterior),mean=0,sd=theta0,lower.tail=FALSE,log.p=TRUE) data$log_p_noshrink <- log(2)+pnorm(abs(data$lfc),mean=0,sd=theta0,lower.tail=FALSE,log.p=TRUE) return(list(data=data,beta.cutoff.mat=beta.cutoff.mat,mu.mat=mu.mat,sigma.mat=sigma.mat, lambda.mat=lambda.mat)) } #' Scatter plot of log2 fold ratios against gene expression levels #' #' This function generates a scatter plot of log2 fold ratios of sgRNAs #' against the corresponding gene expression levels. #' #' @param data A numeric matrix from the output of normalMM function #' @param fdr A level of false discovery rate #' @param ... Other graphical parameters #' #' @return No return value #' @importFrom ggplot2 aes_string aes geom_point geom_vline theme theme_bw element_blank xlab ylab #' @importFrom stats p.adjust #' #' @export scatterPlot <- function(data, fdr, ...) { p.fdr <- stats::p.adjust(exp(data$log_p),method="BH") data.fdr <- data[p.fdr<=fdr,] exp.level.log2 <- lfc <- NULL ggplot2::ggplot(data, aes(x = exp.level.log2, y = lfc)) + geom_point(size=1,alpha=0.2) + xlab("log2(gene expression)") + ylab("log2(FC)") + geom_point(data=data.fdr,aes(x=exp.level.log2,y=lfc),size=1,alpha=0.2,color="red3") + theme_bw() + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) } #' Calculating a significance score of a gene based on #' the corresponding sgRNAs' p-values of the gene. #' #' Code was adapted from R package gscreend. #' #' @param pvec A numeric vector of p-values. #' @return A min value of the kth smallest value based on the beta #' distribution B(k, n-k+1), where the n is the number of probabiliteis #' in the vector. This min value is the significance score of the gene. alphaBeta <- function(pvec) { pvec <- sort(pvec) n <- length(pvec) min(stats::pbeta(pvec, seq_len(n), n - seq_len(n) + 1)) } #' Generating the null distribution of the significance score #' of a gene. #' #' Code was adapted from R package gscreend. #' #' @param n An integer representing sgRNA number of a gene. #' @param p A numeric vector which contains the percentiles of the #' p-values that meet the cut-off (alpha). #' @param nperm Number of permutation runs. #' @return A numric vector which contains all the significance scores #' (rho) of genes generated by a permutation test where the sgRNAs are #' randomly assigned to genes. makeRhoNull <- function(n, p, nperm) { rhonull <- lapply(seq_len(nperm), function(x) { p_test <- sort.int(sample(p, n, replace = FALSE)) p_test <- sort(p_test) min(stats::pbeta(p_test, seq_len(n), n - seq_len(n) + 1)) }) unlist(rhonull) } #' Calculating gene level p-values using modified robust rank aggregation #' (alpha-RRA method) on sgRNAs' p-values #' #' Code was adapted from R package gscreend. The alpha-RRA method is #' adapted from MAGeCK. #' #' @param pvec A numeric vector containing p-values of sgRNAs. #' @param genes A character string containing gene names corresponding #' to sgRNAs. #' @param alpha A numeric number denoting the alpha cutoff (i.e. 0.05). #' @param nperm Number of permutations, default is 20 #' @return A list with four elements: 1) a list of genes with their p-values; #' 2) a numeric matrix of rho null, each column corresponding to a different #' number of sgRNAs per gene; 3)a numeric vector of rho; 4) a numeric vector #' of number of sgRNAs per gene. #' @export calculateGenePval <- function(pvec, genes, alpha, nperm=20) { cut.pvec <- pvec <= alpha score_vals <- rank(pvec)/length(pvec) score_vals[!cut.pvec] <- 1 rho <- unsplit(vapply(split(score_vals, genes), FUN = alphaBeta, FUN.VALUE = numeric(1)), genes) guides_per_gene <- sort(unique(table(genes))) permutations = nperm * length(unique(genes)) rho_nullh <- vapply(guides_per_gene, FUN = makeRhoNull, p = score_vals, nperm = permutations, FUN.VALUE = numeric(permutations)) pvalue_gene <- lapply(split(rho, genes), function(x) { n_sgrnas = length(x) mean(rho_nullh[, guides_per_gene == n_sgrnas] <= x[[1]]) }) result <- list(pvalue=pvalue_gene, rho_null=rho_nullh, rho=rho, guides_per_gene=guides_per_gene) return(result) } #' Calculating gene-level log fold ratios #' #' Log fold ratios of all sgRNAs of a gene are averaged to obtain the #' gene level log fold ratio. #' #' @param lfcs A numeric vector containing log fold change of sgRNAs. #' @param genes A character string containing gene names corresponding to sgRNAs. #' @return A numeric vector containing log fold ratio of genes. #' #' @export calculateGeneLFC <- function(lfcs, genes) { vapply(split(lfcs, genes), FUN = mean, FUN.VALUE = numeric(1)) } #' Prepare data for density plot and ridge plot #' #' Input a data frame with each gene one row, and geneID, geneLFC, geneFDR as columns. #' This function will stratify genes into five groups based on their FDR levels: <=0.001, (0.001,0.01], #' (0.01,0.05], (0.05,0.5], (0.5,1] #' #' @param data A data frame containing each gene in one row, and at least three columns with geneID, geneLFC, and geneFDR. #' @param gene.fdr A numeric variable (column) in the data frame, corresponding to the gene level FDR #' @return A data frame based on the original data frame, with an additional column "group" indicating which FDR group this gene belongs to. #' @importFrom dplyr arrange mutate %>% #' @export preparePlotData <- function(data, gene.fdr) { data2 <- data %>% arrange(gene.fdr) %>% mutate(group = ifelse(gene.fdr <= 0.001, "<=0.001", ifelse(gene.fdr > 0.001 & gene.fdr <= 0.01, "(0.001,0.01]", ifelse(gene.fdr > 0.01 & gene.fdr <= 0.05, "(0.01,0.05]", ifelse(gene.fdr > 0.05 & gene.fdr <= 0.5, "(0.05,0.5]", "(0.5,1]"))))) data2$fdr_range = factor(data2$group, levels = c("<=0.001", "(0.001,0.01]", "(0.01,0.05]", "(0.05,0.5]", "(0.5,1]")) return(data2) } #' 2D density contour plot of gene log2 fold ratios against gene expression levels #' #' This function generates a scatter plot with 2D density contour of log2 fold ratios of sgRNAs #' against the corresponding gene expression levels. #' #' @param data A data frame from the output of preparePlotData function #' @param ... Other graphical parameters #' #' @return No return value #' @importFrom ggplot2 aes_string xlim ylim aes geom_point stat_density_2d theme theme_classic element_blank xlab ylab #' @importFrom ggsci scale_color_nejm #' @export densityPlot <- function(data, ...) { ..level.. <- NULL exp.level.log2 <- NULL gene_lfc <- NULL fdr_range <- NULL return( ggplot2::ggplot(data, aes(x=exp.level.log2, y=gene_lfc, color=fdr_range)) + geom_point() + xlim(0,14) + ylim(-10,5) + #scale_color_distiller(palette="Spectral", trans = "reverse") + scale_color_nejm() + theme_classic(base_size = 16) + stat_density_2d(aes(fill = ..level..), geom = "polygon") + theme(legend.text.align = 0) ) } #' Density ridgeline plot of gene expression levels for different FDR groups. #' #' This function generates a density ridgeline plot of gene expression levels for different FDR groups. #' #' @param data A data frame from the output of preparePlotData function #' @param ... Other graphical parameters #' #' @return No return value #' @importFrom ggplot2 aes_string aes theme element_blank xlab ylab scale_fill_manual #' @importFrom ggridges stat_density_ridges geom_density_ridges position_points_jitter #' @importFrom ggprism theme_prism #' @export ridgePlot <- function(data, ...) { exp.level.log2 <- NULL fdr_range <- NULL ggplot2::ggplot(data, aes(x=exp.level.log2, y=fdr_range)) + geom_density_ridges() + stat_density_ridges(quantile_lines = TRUE, quantiles = c(0.5), jittered_points = TRUE, position = position_points_jitter(width = 0.05, height = 0), point_shape = '|', point_size = 3, point_alpha = 1, alpha = 0.7 ) + theme_prism(base_size = 16) }
/scratch/gouwar.j/cran-all/cranData/CEDA/R/CEDA.R
#' CRISPR screen data of cell line MDA-MB-231. #' #' A dataset containing the expression data of sgRNAs #' in a CRISPR screen experiment of cell line MDA-MB-231. #' #' @format A data frame with a list of two elements: #' \describe{ #' \item{sgRNA}{Raw Read counts of sgRNAs} #' \item{negene}{A list of non-essential genes} #' } #' "mda231"
/scratch/gouwar.j/cran-all/cranData/CEDA/R/mda231.R
## ----setup, include = FALSE--------------------------------------------------- knitr::opts_chunk$set( warning = FALSE, message = FALSE, collapse = TRUE, comment = "#>" ) ## ----data--------------------------------------------------------------------- library(CEDA) library(dplyr) library(ggplot2) library(ggsci) library(ggprism) library(ggridges) set.seed(1) data("mda231") class(mda231) length(mda231) names(mda231) ## ----sgRNA-------------------------------------------------------------------- dim(mda231$sgRNA) length(mda231$neGene$Gene) head(mda231$sgRNA) ## ----neGene------------------------------------------------------------------- dim(mda231$neGene) head(mda231$neGene) ## ----filter------------------------------------------------------------------- count_df <- mda231$sgRNA mx.count = apply(count_df[,c(3:8)],1,function(x) sum(x>=1)) table(mx.count) # keep sgRNA with none zero count in at least one sample count_df2 = count_df[mx.count>=1,] ## ----normalization------------------------------------------------------------ mda231.ne <- count_df2[count_df2$Gene %in% mda231$neGene$Gene,] cols <- c(3:8) mda231.norm <- medianNormalization(count_df2[,cols], mda231.ne[,cols])[[2]] ## ----design------------------------------------------------------------------- group <- gl(2, 3, labels=c("Control","Baseline")) design <- model.matrix(~ 0 + group) colnames(design) <- sapply(colnames(design), function(x) substr(x, 6, nchar(x))) contrast.matrix <- makeContrasts("Control-Baseline", levels=design) ## ----limfit------------------------------------------------------------------- limma.fit <- runLimma(log2(mda231.norm+1),design,contrast.matrix) ## ----merge-------------------------------------------------------------------- mda231.limma <- data.frame(count_df2, limma.fit) head(mda231.limma) ## ----betanull----------------------------------------------------------------- betanull <- permuteLimma(log2(mda231.norm + 1), design, contrast.matrix, 10) theta0 <- sd(betanull) theta0 ## ----mm, results='hide'------------------------------------------------------- nmm.fit <- normalMM(mda231.limma, theta0, n.b=3, d=5) ## ----fig1, fig.cap = "Log fold ratios of sgRNAs vs. gene expression level"---- scatterPlot(nmm.fit$data,fdr=0.05,xlim=c(-0.5,12),ylim=c(-8,5)) ## ----pval--------------------------------------------------------------------- mda231.nmm <- nmm.fit[[1]] p.gene <- calculateGenePval(exp(mda231.nmm$log_p), mda231.nmm$Gene, 0.05, nperm=10) gene_fdr <- stats::p.adjust(p.gene$pvalue, method = "fdr") gene_lfc <- calculateGeneLFC(mda231.nmm$lfc, mda231.nmm$Gene) gene_summary <- data.frame(gene_pval=unlist(p.gene$pvalue), gene_fdr=as.matrix(gene_fdr), gene_lfc = as.matrix(gene_lfc)) gene_summary$gene <- rownames(gene_summary) gene_summary <- gene_summary[,c(4,1:3)] ## ----summary------------------------------------------------------------------ #extract gene expression data gene.express <- mda231.nmm %>% group_by(Gene) %>% summarise_at(vars(exp.level.log2), max) #merge gene summary with gene expression gdata <- left_join(gene_summary, gene.express, by = c("gene" = "Gene")) gdata <- gdata %>% filter(is.na(exp.level.log2)==FALSE) # density plot and ridge plot gdata$gene.fdr <- gdata$gene_fdr data <- preparePlotData(gdata, gdata$gene.fdr) ## ----fig2, fig.cap = "2D density plot of gene log fold ratios vs. gene expression level for different FDR groups"---- densityPlot(data)
/scratch/gouwar.j/cran-all/cranData/CEDA/inst/doc/Userguide.R
--- title: "CRISPR Screen and Gene Expression Differential Analysis" author: "Lianbo Yu, Yue Zhao, Kevin R. Coombes, and Lang Li" date: "`r Sys.Date()`" output: pdf_document: latex_engine: xelatex number_sections: yes toc: yes vignette: > %\VignetteIndexEntry{CRISPR Screen and Gene Expression Differential Analysis} %\VignetteEncoding{UTF-8} %\VignetteDepends{CEDA} %\VignettePackage{CEDA} %\VignetteEngine{knitr::rmarkdown} --- ```{r setup, include = FALSE} knitr::opts_chunk$set( warning = FALSE, message = FALSE, collapse = TRUE, comment = "#>" ) ``` # Introduction We developed CEDA to analyze read counts of single guide RNAs (sgRNAs) from CRISPR screening experiments. Except for non-targeting sgRNAs (can be used as negative controls), each sgRNA is targeting one gene, and each gene have multiple sgRNAs targeting it. CEDA models the sgRNA counts at different levels of gene expression by multi-component normal mixtures, with the model fit by an EM algorithm. Posterior estimates at sgRNA level are then summarized for each gene. In this document, we use data from an experiment with the MDA231 cell line to illustrate how to use CEDA to perform CRISPR screen data analysis. # Overview CEDA analysis follows a workflow that is typical for most omics level experiments. 1. Put the data into an appropriate format for input to CEDA. 2. Filter out sgRNA with low read counts (optional). 3. Normalize the raw counts. 4. Fit a linear model to the data. 5. Summarize and view the results. ## Data Format In our experiment, three samples of MDA231 cells were untreated at time T=0, and another three samples of MDA231 cells were treated with DMSO at time T=0. We are interested in detecting sgRNAs that are differentially changed by a treatment. The sgRNA read counts, along with a list of non-essential genes, are stored in the dataset `mda231` that we have included in the `CEDA` package. We read that dataset and explore its structure. ```{r data} library(CEDA) library(dplyr) library(ggplot2) library(ggsci) library(ggprism) library(ggridges) set.seed(1) data("mda231") class(mda231) length(mda231) names(mda231) ``` As you can see, this is a list containing two components 1. `sgRNA`, the observed count data of six samples, and 2. `neGene`, the set of non-essential genes. ```{r sgRNA} dim(mda231$sgRNA) length(mda231$neGene$Gene) head(mda231$sgRNA) ``` Notice that the `sgRNA` component includes an extra column, "`exp.level.log2`", that are the expression level (in log2 scale) of genes and was computed from gene expression data in the unit of FPKM. The second component of the list `mda231` is a data frame `neGene`, which is the gene names of the non-essential genes(Ref 1): ```{r neGene} dim(mda231$neGene) head(mda231$neGene) ``` ## Filter out sgRNAs with low read counts Due to the nature of drop-out screen, some sgRNAs targeting essential genes have low read counts at the end time point samples. Therefore, we must be cautious when applying strict filtering criteria to remove a large portion of sgRNAs before analysis. Gentle filtering criteria (i.e., removal of sgRNAs with 0 count in ≥ 75% samples) is recommended. ```{r filter} count_df <- mda231$sgRNA mx.count = apply(count_df[,c(3:8)],1,function(x) sum(x>=1)) table(mx.count) # keep sgRNA with none zero count in at least one sample count_df2 = count_df[mx.count>=1,] ``` ## Normalization The sgRNA read counts needs to be normalized across sample replicates before formal analysis. The non-essential genes are assumed to have no change after DMSO treatment. So, our recommended procedure is to perform median normalization based on the set of non-essential genes. ```{r normalization} mda231.ne <- count_df2[count_df2$Gene %in% mda231$neGene$Gene,] cols <- c(3:8) mda231.norm <- medianNormalization(count_df2[,cols], mda231.ne[,cols])[[2]] ``` ## Analysis Our primary goal is to detect essential sgRNAs that have different count levels between conditions. We rely on the R package `limma` to calculate log2 ratios (i.e., log fold changes or LFCs) between three untreated and three treated samples. ### Calculating fold ratios First, we have to go through the usual `limma` steps to describe the design of the study. There were two groups of replicate samples. We will call these groups "Control" and "Baseline" (although "Treated" and Untreated" would work just as well). Our main interest is determining the differences between the groups. And we have to record this information in a "contrast matrix" so limma knows what we want to compare. ```{r design} group <- gl(2, 3, labels=c("Control","Baseline")) design <- model.matrix(~ 0 + group) colnames(design) <- sapply(colnames(design), function(x) substr(x, 6, nchar(x))) contrast.matrix <- makeContrasts("Control-Baseline", levels=design) ``` Finally, we can run the limma algorithm. ```{r limfit} limma.fit <- runLimma(log2(mda231.norm+1),design,contrast.matrix) ``` We merge the results from our limma analysis with the post filtering sgRNA count data. ```{r merge} mda231.limma <- data.frame(count_df2, limma.fit) head(mda231.limma) ``` ### Fold ratios under the null hypotheses Under the null hypothses, all sgRNAs levels are unchanged between the two conditions. To obtain fold ratios under the null, samples were permuted between two conditions, and log ratios were obtained from limma analysis under each permutation. ```{r betanull} betanull <- permuteLimma(log2(mda231.norm + 1), design, contrast.matrix, 10) theta0 <- sd(betanull) theta0 ``` ### Fitting three-component mixture models A three-component mixture model (unchanged, overexpresssed, and underexpressed) is assumed for log ratios at different level of gene expression. Empirical Bayes method was employed to estimate parematers of the mixtures and posterior means were obtained for estimating actual log ratios between the two conditions. P-values of sgRNAs were then calculated by permutation method. ```{r mm, results='hide'} nmm.fit <- normalMM(mda231.limma, theta0, n.b=3, d=5) ``` Results from the mixture model were shown in Figure $1$. False discovery rate of $0.05$ was used for declaring significant changes in red color between the two conditions for sgRNAs. The vertical lines are dividing sgRNAs into bins accroding to the gene expression levels of their targetted genes. ```{r fig1, fig.cap = "Log fold ratios of sgRNAs vs. gene expression level"} scatterPlot(nmm.fit$data,fdr=0.05,xlim=c(-0.5,12),ylim=c(-8,5)) ``` ### Gene level summarization From the p-values of sgRNAs, gene level p-values were obtained by using modified robust rank aggregation method (alpha-RRA). Log ratios were also summarized at gene level. ```{r pval} mda231.nmm <- nmm.fit[[1]] p.gene <- calculateGenePval(exp(mda231.nmm$log_p), mda231.nmm$Gene, 0.05, nperm=10) gene_fdr <- stats::p.adjust(p.gene$pvalue, method = "fdr") gene_lfc <- calculateGeneLFC(mda231.nmm$lfc, mda231.nmm$Gene) gene_summary <- data.frame(gene_pval=unlist(p.gene$pvalue), gene_fdr=as.matrix(gene_fdr), gene_lfc = as.matrix(gene_lfc)) gene_summary$gene <- rownames(gene_summary) gene_summary <- gene_summary[,c(4,1:3)] ``` ### Gene level summarization From the p-values of sgRNAs, gene level p-values were obtained by using modified robust rank aggregation method (alpha-RRA). Log ratios were also summarized at gene level. ```{r summary} #extract gene expression data gene.express <- mda231.nmm %>% group_by(Gene) %>% summarise_at(vars(exp.level.log2), max) #merge gene summary with gene expression gdata <- left_join(gene_summary, gene.express, by = c("gene" = "Gene")) gdata <- gdata %>% filter(is.na(exp.level.log2)==FALSE) # density plot and ridge plot gdata$gene.fdr <- gdata$gene_fdr data <- preparePlotData(gdata, gdata$gene.fdr) ``` Results from CEDA were shown in Figure $2$. The points in the scatter plot were stratified into five color groups based on FDR. The 2D contour lines showed how the points distributed in 3D space. ```{r fig2, fig.cap = "2D density plot of gene log fold ratios vs. gene expression level for different FDR groups"} densityPlot(data) ``` The density plot of CEDA results shows that the groups of genes selected by CEDA (FDR<0.05, yellow, blue, and red) showed higher expression median values compared to the rest of genes (purple, green).
/scratch/gouwar.j/cran-all/cranData/CEDA/inst/doc/Userguide.Rmd
--- title: "CRISPR Screen and Gene Expression Differential Analysis" author: "Lianbo Yu, Yue Zhao, Kevin R. Coombes, and Lang Li" date: "`r Sys.Date()`" output: pdf_document: latex_engine: xelatex number_sections: yes toc: yes vignette: > %\VignetteIndexEntry{CRISPR Screen and Gene Expression Differential Analysis} %\VignetteEncoding{UTF-8} %\VignetteDepends{CEDA} %\VignettePackage{CEDA} %\VignetteEngine{knitr::rmarkdown} --- ```{r setup, include = FALSE} knitr::opts_chunk$set( warning = FALSE, message = FALSE, collapse = TRUE, comment = "#>" ) ``` # Introduction We developed CEDA to analyze read counts of single guide RNAs (sgRNAs) from CRISPR screening experiments. Except for non-targeting sgRNAs (can be used as negative controls), each sgRNA is targeting one gene, and each gene have multiple sgRNAs targeting it. CEDA models the sgRNA counts at different levels of gene expression by multi-component normal mixtures, with the model fit by an EM algorithm. Posterior estimates at sgRNA level are then summarized for each gene. In this document, we use data from an experiment with the MDA231 cell line to illustrate how to use CEDA to perform CRISPR screen data analysis. # Overview CEDA analysis follows a workflow that is typical for most omics level experiments. 1. Put the data into an appropriate format for input to CEDA. 2. Filter out sgRNA with low read counts (optional). 3. Normalize the raw counts. 4. Fit a linear model to the data. 5. Summarize and view the results. ## Data Format In our experiment, three samples of MDA231 cells were untreated at time T=0, and another three samples of MDA231 cells were treated with DMSO at time T=0. We are interested in detecting sgRNAs that are differentially changed by a treatment. The sgRNA read counts, along with a list of non-essential genes, are stored in the dataset `mda231` that we have included in the `CEDA` package. We read that dataset and explore its structure. ```{r data} library(CEDA) library(dplyr) library(ggplot2) library(ggsci) library(ggprism) library(ggridges) set.seed(1) data("mda231") class(mda231) length(mda231) names(mda231) ``` As you can see, this is a list containing two components 1. `sgRNA`, the observed count data of six samples, and 2. `neGene`, the set of non-essential genes. ```{r sgRNA} dim(mda231$sgRNA) length(mda231$neGene$Gene) head(mda231$sgRNA) ``` Notice that the `sgRNA` component includes an extra column, "`exp.level.log2`", that are the expression level (in log2 scale) of genes and was computed from gene expression data in the unit of FPKM. The second component of the list `mda231` is a data frame `neGene`, which is the gene names of the non-essential genes(Ref 1): ```{r neGene} dim(mda231$neGene) head(mda231$neGene) ``` ## Filter out sgRNAs with low read counts Due to the nature of drop-out screen, some sgRNAs targeting essential genes have low read counts at the end time point samples. Therefore, we must be cautious when applying strict filtering criteria to remove a large portion of sgRNAs before analysis. Gentle filtering criteria (i.e., removal of sgRNAs with 0 count in ≥ 75% samples) is recommended. ```{r filter} count_df <- mda231$sgRNA mx.count = apply(count_df[,c(3:8)],1,function(x) sum(x>=1)) table(mx.count) # keep sgRNA with none zero count in at least one sample count_df2 = count_df[mx.count>=1,] ``` ## Normalization The sgRNA read counts needs to be normalized across sample replicates before formal analysis. The non-essential genes are assumed to have no change after DMSO treatment. So, our recommended procedure is to perform median normalization based on the set of non-essential genes. ```{r normalization} mda231.ne <- count_df2[count_df2$Gene %in% mda231$neGene$Gene,] cols <- c(3:8) mda231.norm <- medianNormalization(count_df2[,cols], mda231.ne[,cols])[[2]] ``` ## Analysis Our primary goal is to detect essential sgRNAs that have different count levels between conditions. We rely on the R package `limma` to calculate log2 ratios (i.e., log fold changes or LFCs) between three untreated and three treated samples. ### Calculating fold ratios First, we have to go through the usual `limma` steps to describe the design of the study. There were two groups of replicate samples. We will call these groups "Control" and "Baseline" (although "Treated" and Untreated" would work just as well). Our main interest is determining the differences between the groups. And we have to record this information in a "contrast matrix" so limma knows what we want to compare. ```{r design} group <- gl(2, 3, labels=c("Control","Baseline")) design <- model.matrix(~ 0 + group) colnames(design) <- sapply(colnames(design), function(x) substr(x, 6, nchar(x))) contrast.matrix <- makeContrasts("Control-Baseline", levels=design) ``` Finally, we can run the limma algorithm. ```{r limfit} limma.fit <- runLimma(log2(mda231.norm+1),design,contrast.matrix) ``` We merge the results from our limma analysis with the post filtering sgRNA count data. ```{r merge} mda231.limma <- data.frame(count_df2, limma.fit) head(mda231.limma) ``` ### Fold ratios under the null hypotheses Under the null hypothses, all sgRNAs levels are unchanged between the two conditions. To obtain fold ratios under the null, samples were permuted between two conditions, and log ratios were obtained from limma analysis under each permutation. ```{r betanull} betanull <- permuteLimma(log2(mda231.norm + 1), design, contrast.matrix, 10) theta0 <- sd(betanull) theta0 ``` ### Fitting three-component mixture models A three-component mixture model (unchanged, overexpresssed, and underexpressed) is assumed for log ratios at different level of gene expression. Empirical Bayes method was employed to estimate parematers of the mixtures and posterior means were obtained for estimating actual log ratios between the two conditions. P-values of sgRNAs were then calculated by permutation method. ```{r mm, results='hide'} nmm.fit <- normalMM(mda231.limma, theta0, n.b=3, d=5) ``` Results from the mixture model were shown in Figure $1$. False discovery rate of $0.05$ was used for declaring significant changes in red color between the two conditions for sgRNAs. The vertical lines are dividing sgRNAs into bins accroding to the gene expression levels of their targetted genes. ```{r fig1, fig.cap = "Log fold ratios of sgRNAs vs. gene expression level"} scatterPlot(nmm.fit$data,fdr=0.05,xlim=c(-0.5,12),ylim=c(-8,5)) ``` ### Gene level summarization From the p-values of sgRNAs, gene level p-values were obtained by using modified robust rank aggregation method (alpha-RRA). Log ratios were also summarized at gene level. ```{r pval} mda231.nmm <- nmm.fit[[1]] p.gene <- calculateGenePval(exp(mda231.nmm$log_p), mda231.nmm$Gene, 0.05, nperm=10) gene_fdr <- stats::p.adjust(p.gene$pvalue, method = "fdr") gene_lfc <- calculateGeneLFC(mda231.nmm$lfc, mda231.nmm$Gene) gene_summary <- data.frame(gene_pval=unlist(p.gene$pvalue), gene_fdr=as.matrix(gene_fdr), gene_lfc = as.matrix(gene_lfc)) gene_summary$gene <- rownames(gene_summary) gene_summary <- gene_summary[,c(4,1:3)] ``` ### Gene level summarization From the p-values of sgRNAs, gene level p-values were obtained by using modified robust rank aggregation method (alpha-RRA). Log ratios were also summarized at gene level. ```{r summary} #extract gene expression data gene.express <- mda231.nmm %>% group_by(Gene) %>% summarise_at(vars(exp.level.log2), max) #merge gene summary with gene expression gdata <- left_join(gene_summary, gene.express, by = c("gene" = "Gene")) gdata <- gdata %>% filter(is.na(exp.level.log2)==FALSE) # density plot and ridge plot gdata$gene.fdr <- gdata$gene_fdr data <- preparePlotData(gdata, gdata$gene.fdr) ``` Results from CEDA were shown in Figure $2$. The points in the scatter plot were stratified into five color groups based on FDR. The 2D contour lines showed how the points distributed in 3D space. ```{r fig2, fig.cap = "2D density plot of gene log fold ratios vs. gene expression level for different FDR groups"} densityPlot(data) ``` The density plot of CEDA results shows that the groups of genes selected by CEDA (FDR<0.05, yellow, blue, and red) showed higher expression median values compared to the rest of genes (purple, green).
/scratch/gouwar.j/cran-all/cranData/CEDA/vignettes/Userguide.Rmd
#' @title CEEMDAN Decomposition-Based ARIMA-GARCH-ANN Hybrid Modeling #' #' @param Y Univariate time series #' @param ratio Ratio of number of observations in training and testing sets #' @param n_lag Lag of the provided time series data #' @import stats Rlibeemd tseries forecast fGarch aTSA FinTS LSTS earth caret neuralnet e1071 pso #' @return #' \itemize{ #' \item Train_fitted: Train fitted result #' \item Test_predicted: Test predicted result #' \item Accuracy: Accuracy #' } #' @export #' #' @examples #' Y <- rnorm(100, 100, 10) #' result <- carigaan(Y, ratio = 0.8, n_lag = 4) #' @references #' \itemize{ #' \item Garai, S., & Paul, R. K. (2023). Development of MCS based-ensemble models using CEEMDAN decomposition and machine intelligence. Intelligent Systems with Applications, 18, 200202 #' \item Garai, S., Paul, R. K., Rakshit, D., Yeasin, M., Paul, A. K., Roy, H. S., Barman, S. & Manjunatha, B. (2023). An MRA Based MLR Model for Forecasting Indian Annual Rainfall Using Large Scale Climate Indices. International Journal of Environment and Climate Change, 13(5), 137-150. #' } carigaan <- function(Y, ratio = 0.9, n_lag = 4){ optimize_weights<-NULL objective<-NULL all_metrics<-NULL # Y is a vector # ratio is train:test # n_lag is number of lags in the data ###################################### # Data preparation #### # embedding for finding log return diff.1 <- embed(Y, 2) Yt <- diff.1[,1] Yt_1 <- diff.1[,2] y <- log(Yt/Yt_1) # Compute the average of non-zero contents in the data nonzero_data <- y[y != 0 & !is.na(y)] average_nonzero <- mean(nonzero_data) # Replace NaNs with the average of non-zero contents in the data y[is.nan(y)] <- average_nonzero # Check the result y # embedding for finding lag series of actual series # n_lag <- 4 embed_size_y <- n_lag+1 # same lag (n_lag_y-1) for every sub series diff.2 <- embed(Yt,embed_size_y) dim(diff.2) Y_actual <- diff.2[,1] Y_actual_1 <- diff.2[,2] # train-test split # ratio <- 0.8 n <- length(Y_actual) # this is already (original length-embed_y) Y_train <- Y_actual[1:(n*ratio)] Y_test <- Y_actual[(n*ratio+1):n] Y_train_1 <- Y_actual_1[1:(n*ratio)] Y_test_1 <- Y_actual_1[(n*ratio+1):n] # embedding for finding lag series of log return series diff.3 <- embed(y, embed_size_y) y_actual <- diff.3[,1] y_train <- y_actual[1:(n*ratio)] y_test <- y_actual[(n*ratio+1):n] # Data preprocessing #### # ceemdan ceemdan_y <- ceemdan(y,num_imfs = 0, ensemble_size = 250L, noise_strength = sd(y)*0.1^2, S_number = 4L,num_siftings = 50L, rng_seed = 0L,threads = 0L) ceemdan_df <- data.frame(ceemdan_y) # lag matrices for ceemdan decomposed series ceemdan_lag_matrices <- list() for (col_name in colnames(ceemdan_df)) { embedded <- embed(ceemdan_df[[col_name]], embed_size_y) ceemdan_lag_matrices_name <- paste0("embed_", col_name) ceemdan_lag_matrices[[ceemdan_lag_matrices_name]] <- embedded } ceemdan_lag_train <- list() for (names in names(ceemdan_lag_matrices)) { ceemdan_train <- ceemdan_lag_matrices[[names]][1:(n*ratio),] ceemdan_lag_train_name <- names ceemdan_lag_train[[ceemdan_lag_train_name]] <- ceemdan_train } ceemdan_lag_test <- list() for (names in names(ceemdan_lag_matrices)) { ceemdan_test <- ceemdan_lag_matrices[[names]][(n*ratio+1):n,] ceemdan_lag_test_name <- names ceemdan_lag_test[[ceemdan_lag_test_name]] <- ceemdan_test } # model fitting #### # ARIMA arima <- auto.arima(ceemdan_lag_train$embed_Residual[,1]) order <- arimaorder(arima) model_arima<-arima(ceemdan_train[,ncol(ceemdan_train)],order=c(order[1], order[2], order[3])) pred_arima <-arima$fitted forecast_arima <- data.frame(predict(arima,n.ahead=((n-(n*ratio))))) forecast_arima <-forecast_arima$pred #GARCH Model ARCH_pvalue <- as.numeric(FinTS::ArchTest(model_arima$residuals)$p.value) #ARCH_pvalue<-1 if(ARCH_pvalue<=0.05){ garch.fit <- fGarch::garchFit(~garch(1, 1), data = y_train, trace = FALSE) pred_V <- garch.fit@fitted forecast_V <- predict(garch.fit, n.ahead=(n-(n*ratio))) forecast_V <- forecast_V$meanForecast Resid_V <- garch.fit@residuals for_resid<-as.ts(y_test-forecast_V) }else { pred_V <- pred_arima forecast_V <- forecast_arima Resid_V <- as.ts(model_arima$residuals) for_resid<-as.vector(y_test-as.vector(forecast_arima)) } names(for_resid)<-"Resid" names(Resid_V)<-"Resid" bl.test <- Box.Ljung.Test(Resid_V) #not white noise if(max(bl.test$data$y)<=0.05){ Resid_ind<-1 } else { Resid_ind<-2 } # mars_data preparation mars_train <- list() for (names in names(ceemdan_lag_train)) { ceemdan_train_data <- cbind(ceemdan_lag_train[[names]][,1]) ceemdan_train_data_name <- names mars_train[[ceemdan_train_data_name]] <- ceemdan_train_data } mars_test <- list() for (names in names(ceemdan_lag_test)) { ceemdan_test_data <- cbind(ceemdan_lag_test[[names]][,1]) ceemdan_test_data_name <- names mars_test[[ceemdan_test_data_name]] <- ceemdan_test_data } # if arima/garch residual is not white noise it will be used as mars predictor # if(Resid_ind==2){ # predictors_train <- cbind(mars_train[-length(mars_train)],Resid=Resid_V) # colnames(predictors_train)<-c(colnames(mars_train[-length(mars_train)]), "Resid") # predictors_test <- cbind(mars_test[-length(mars_test)],Resid=for_resid) # colnames(predictors_test)<-c(colnames(mars_test[-length(mars_test)]), "Resid") # } else { predictors_train<-data.frame(mars_train[-length(mars_train)]) colnames(predictors_train)<-c(names(mars_train[-length(mars_train)])) predictors_test<-data.frame(mars_test[-length(mars_test)]) colnames(predictors_test)<-c(names(mars_test[-length(mars_test)])) # } # mars: for feature selection data_MARS<-cbind(y=y_train,predictors_train) names(data_MARS) MARS <- earth(y~., data = data_MARS) selcted_var<-MARS$namesx # train and test data for ann data_train <- ceemdan_lag_train[selcted_var] data_test <- ceemdan_lag_test[selcted_var] for(i in seq_along(data_train)) { # determine the name of the current element list_name <- names(data_train)[i] # determine the number of columns for the current matrix num_cols <- ncol(data_train[[i]]) # create a vector of column names for the current matrix col_names <- paste(list_name, "col", 1:num_cols, sep = "_") # assign the column names to the current matrix colnames(data_train[[i]]) <- col_names } for(i in seq_along(data_test)) { # determine the name of the current element list_name <- names(data_test)[i] # determine the number of columns for the current matrix num_cols <- ncol(data_test[[i]]) # create a vector of column names for the current matrix col_names <- paste(list_name, "col", 1:num_cols, sep = "_") # assign the column names to the current matrix colnames(data_test[[i]]) <- col_names } # ann # create empty data frames to store predicted values for each element's response train_predicted_df <- data.frame(matrix(ncol = 2, nrow = 0)) colnames(train_predicted_df) <- c("element", "train_predicted") test_predicted_df <- data.frame(matrix(ncol = 2, nrow = 0)) colnames(test_predicted_df) <- c("element", "test_predicted") for (i in seq_along(data_train)) { # extract the predictors and response from the current matrix predictors <- data_train[[i]][, -1] response <- data_train[[i]][, 1] allvars <- colnames(predictors) predictorvars <- paste(allvars, collapse="+") form <- as.formula(paste0(names(data_train)[i], "_col_1~", predictorvars)) # train a neural network with one hidden layer nn <- neuralnet(form, data_train[[i]], hidden = c(4)) # predict the response for the current matrix in the training set train_predicted <- predict(nn, predictors) # create a data frame with the element name and predicted values for the training set train_predicted_element_df <- data.frame(element = rep(names(data_train[i]), nrow(train_predicted)), train_predicted = train_predicted) # add the predicted values for the current element in the training set to the overall data frame train_predicted_df <- rbind(train_predicted_df, train_predicted_element_df) # save the model for this element assign(paste0("nn_", names(data_train[i])), nn) } # create empty data frames to store predicted values for each element's response test_predicted_df <- data.frame(matrix(ncol = 2, nrow = 0)) colnames(test_predicted_df) <- c("element", "test_predicted") test_predicted_df <- data.frame(matrix(ncol = 2, nrow = 0)) colnames(test_predicted_df) <- c("element", "test_predicted") for (i in seq_along(data_test)) { # extract the predictors and response from the current matrix predictors <- data_test[[i]][, -1] response <- data_test[[i]][, 1] # predict the response for the current matrix in the testing set nn <- get(paste0("nn_", names(data_test[i]))) test_predicted <- predict(nn, predictors) # create a data frame with the element name and predicted values for the testing set test_predicted_element_df <- data.frame(element = rep(names(data_test[i]), nrow(test_predicted)), test_predicted = test_predicted) # add the predicted values for the current element in the testing set to the overall data frame test_predicted_df <- rbind(test_predicted_df, test_predicted_element_df) } # print the predicted values for each element in the training and test sets # Extract unique category names #train colnames (train_predicted_df) <- c('Category', 'Column2') category_names <- unique(train_predicted_df$Category) # Create a list of data frames split by category train_predicted_df_list <- lapply(category_names, function(cat) { train_predicted_df[train_predicted_df$Category == cat, "Column2", drop = FALSE] }) # Rename each data frame with its corresponding category name names(train_predicted_df_list) <- category_names # Create a named list of data frames train_predicted_df_named_list <- as.list(setNames(train_predicted_df_list, category_names)) # Assign each data frame in the named list to a separate variable for (i in seq_along(train_predicted_df_named_list)) { assign(paste0("train_predicted_df", i), data.frame(train_predicted_df_named_list[[i]])) } extra_df <- data.frame(Column2=pred_V) train_predicted_df_list <- c(train_predicted_df_list, list(extra_df)) names(train_predicted_df_list)[length(train_predicted_df_list)] <- "pred_V" train_predicted_df_named_list <- as.list(setNames(train_predicted_df_list, c(category_names,'pred_V'))) # test colnames (test_predicted_df) <- c('Category', 'Column2') category_names <- unique(test_predicted_df$Category) # Create a list of data frames split by category test_predicted_df_list <- lapply(category_names, function(cat) { test_predicted_df[test_predicted_df$Category == cat, "Column2", drop = FALSE] }) # Rename each data frame with its corresponding category name names(test_predicted_df_list) <- category_names # Create a named list of data frames test_predicted_df_named_list <- as.list(setNames(test_predicted_df_list, category_names)) # Assign each data frame in the named list to a separate variable for (i in seq_along(test_predicted_df_named_list)) { assign(paste0("test_predicted_df", i), data.frame(test_predicted_df_named_list[[i]])) } extra_df <- data.frame(Column2=forecast_V) test_predicted_df_list <- c(test_predicted_df_list, list(extra_df)) names(test_predicted_df_list)[length(test_predicted_df_list)] <- "forecast_V" test_predicted_df_named_list <- as.list(setNames(test_predicted_df_list, c(category_names,'forecast_V'))) # pso #### # pso_train y_actual <- y_train df_list <- train_predicted_df_named_list # Function to optimize weights using PSO optimize_weights <- function(df_list, y_actual) { # Flatten the list into a matrix df_mat <- as.matrix(do.call(cbind, df_list)) # Define the objective function for PSO objective <- function(weights) { # Convert weights to numeric vector weights <- as.numeric(weights) # Calculate weighted sum of predicted values y_pred <- df_mat %*% weights # Calculate mean squared error mse <- mean((y_pred - y_actual)^2) # Return the mean squared error return(mse) } # Initialize PSO solver solver <- psoptim( lower = rep(0, ncol(df_mat)), upper = rep(1, ncol(df_mat)), par = runif(ncol(df_mat), 0, 1), fn = objective ) # Extract the optimized weights weights <- solver$par # Calculate optimized prediction y_pred <- df_mat %*% weights # Return the optimized prediction return(y_pred) } # Call the function to obtain optimized prediction y_optimized_train <- optimize_weights(df_list, y_actual) # pso_test y_actual <- y_test df_list <- test_predicted_df_named_list y_optimized_test <- optimize_weights(df_list, y_actual) final_y_optimized_train <- exp(y_optimized_train)*Y_train_1 final_y_optimized_test <- exp(y_optimized_test)*Y_test_1 # all metrics all_metrics <- function(actual, predicted) { # Calculate the residuals residuals <- actual - predicted abs_residuals <- abs(actual - predicted) scaled_abs_residuals <- abs_residuals/actual lag_frame <- data.frame(embed(actual,2)) diff <- lag_frame[,1]-lag_frame[,2] abs_diff <- abs(diff) # Calculate simple metrics mse <- mean(residuals^2) rmse <- sqrt(mse) rrmse <- 100*rmse/mean(actual) mae <- mean(abs_residuals) mape <- 100*mean(scaled_abs_residuals) mase <- mae/mean(abs_diff) # calculate complex matrics nse <- 1- (mse/(mean(actual^2)-(mean(actual))^2)) wi <- 1- (mse/mean((abs(actual-mean(actual))+abs(predicted-mean(actual)))^2)) lme <- 1- mae/mean(abs(actual-mean(actual))) # creating the data frame AllMetrics <- data.frame(cbind(c('RMSE', 'RRMSE', 'MAE', 'MAPE', 'MASE','NSE', 'WI', 'LME'), c(round(rmse,3), round(rrmse,3), round(mae,3), round(mape,3), round(mase,3), round(nse,3), round(wi,3), round(lme,3)))) colnames(AllMetrics) <- c('Metrics','Values') dimnames(AllMetrics) dim(AllMetrics) # returning the table containing all the metrics return(AllMetrics) } # validation_carigaan metrics_carigaan_train <- data.frame(all_metrics( Y_train, final_y_optimized_train)) metrics_carigaan_test <- data.frame(all_metrics( Y_test, final_y_optimized_test)) metrics <- cbind(as.numeric(metrics_carigaan_train$Values), as.numeric(metrics_carigaan_test$Values)) colnames(metrics) <- c('CARIGAAN_Train', 'CARIGAAN_Test') row.names(metrics)<-c('RMSE', 'RRMSE', 'MAE', 'MAPE', 'MASE','NSE', 'WI', 'LME') predict_compare <- data.frame(cbind(train_actual = Y_train, predicted = final_y_optimized_train)) colnames(predict_compare) <- c('train_actual', 'train_predicted') forecast_compare <- data.frame(cbind(test_actual = Y_test, forecast = final_y_optimized_test)) colnames(forecast_compare) <- c('test_actual', 'test_predicted') return(list(Train_fitted=predict_compare, Test_predicted=forecast_compare, Accuracy=metrics)) } #' @title CEEMDAN Decomposition-Based ARIMA-GARCH-SVR Hybrid Modeling #' #' @param Y Univariate time series #' @param ratio Ratio of number of observations in training and testing sets #' @param n_lag Lag of the provided time series data #' @import stats Rlibeemd tseries forecast fGarch aTSA FinTS LSTS earth caret neuralnet e1071 pso #' @return #' \itemize{ #' \item Train_fitted: Train fitted result #' \item Test_predicted: Test predicted result #' \item Accuracy: Accuracy #' } #' @export #' #' @examples #' Y <- rnorm(100, 100, 10) #' result <- carigas(Y, ratio = 0.8, n_lag = 4) #' @references #' \itemize{ #' \item Garai, S., & Paul, R. K. (2023). Development of MCS based-ensemble models using CEEMDAN decomposition and machine intelligence. Intelligent Systems with Applications, 18, 200202 #' \item Garai, S., Paul, R. K., Rakshit, D., Yeasin, M., Paul, A. K., Roy, H. S., Barman, S. & Manjunatha, B. (2023). An MRA Based MLR Model for Forecasting Indian Annual Rainfall Using Large Scale Climate Indices. International Journal of Environment and Climate Change, 13(5), 137-150. #' } carigas <- function(Y, ratio = 0.9, n_lag = 4){ optimize_weights<-NULL objective<-NULL all_metrics<-NULL # Y is a vector # ratio is train:test # n_lag is number of lags in the data ###################################### # Data preparation #### # embedding for finding log return diff.1 <- embed(Y, 2) Yt <- diff.1[,1] Yt_1 <- diff.1[,2] y <- log(Yt/Yt_1) # Compute the average of non-zero contents in the data nonzero_data <- y[y != 0 & !is.na(y)] average_nonzero <- mean(nonzero_data) # Replace NaNs with the average of non-zero contents in the data y[is.nan(y)] <- average_nonzero # Check the result y # embedding for finding lag series of actual series # n_lag <- 4 embed_size_y <- n_lag+1 # same lag (n_lag_y-1) for every sub series diff.2 <- embed(Yt,embed_size_y) dim(diff.2) Y_actual <- diff.2[,1] Y_actual_1 <- diff.2[,2] # train-test split # ratio <- 0.8 n <- length(Y_actual) # this is already (original length-embed_y) Y_train <- Y_actual[1:(n*ratio)] Y_test <- Y_actual[(n*ratio+1):n] Y_train_1 <- Y_actual_1[1:(n*ratio)] Y_test_1 <- Y_actual_1[(n*ratio+1):n] # embedding for finding lag series of log return series diff.3 <- embed(y, embed_size_y) y_actual <- diff.3[,1] y_train <- y_actual[1:(n*ratio)] y_test <- y_actual[(n*ratio+1):n] # Data preprocessing #### # ceemdan ceemdan_y <- ceemdan(y,num_imfs = 0, ensemble_size = 250L, noise_strength = sd(y)*0.1^2, S_number = 4L,num_siftings = 50L, rng_seed = 0L,threads = 0L) ceemdan_df <- data.frame(ceemdan_y) # lag matrices for ceemdan decomposed series ceemdan_lag_matrices <- list() for (col_name in colnames(ceemdan_df)) { embedded <- embed(ceemdan_df[[col_name]], embed_size_y) ceemdan_lag_matrices_name <- paste0("embed_", col_name) ceemdan_lag_matrices[[ceemdan_lag_matrices_name]] <- embedded } ceemdan_lag_train <- list() for (names in names(ceemdan_lag_matrices)) { ceemdan_train <- ceemdan_lag_matrices[[names]][1:(n*ratio),] ceemdan_lag_train_name <- names ceemdan_lag_train[[ceemdan_lag_train_name]] <- ceemdan_train } ceemdan_lag_test <- list() for (names in names(ceemdan_lag_matrices)) { ceemdan_test <- ceemdan_lag_matrices[[names]][(n*ratio+1):n,] ceemdan_lag_test_name <- names ceemdan_lag_test[[ceemdan_lag_test_name]] <- ceemdan_test } # model fitting #### # ARIMA arima <- auto.arima(ceemdan_lag_train$embed_Residual[,1]) order <- arimaorder(arima) model_arima<-arima(ceemdan_train[,ncol(ceemdan_train)],order=c(order[1], order[2], order[3])) pred_arima <-arima$fitted forecast_arima <- data.frame(predict(arima,n.ahead=((n-(n*ratio))))) forecast_arima <-forecast_arima$pred #GARCH Model #### ARCH_pvalue <- as.numeric(FinTS::ArchTest(model_arima$residuals)$p.value) #ARCH_pvalue<-1 if(ARCH_pvalue<=0.05){ garch.fit <- fGarch::garchFit(~garch(1, 1), data = y_train, trace = FALSE) pred_V <- garch.fit@fitted forecast_V <- predict(garch.fit, n.ahead=(n-(n*ratio))) forecast_V <- forecast_V$meanForecast Resid_V <- garch.fit@residuals for_resid<-as.ts(y_test-forecast_V) }else { pred_V <- pred_arima forecast_V <- forecast_arima Resid_V <- as.ts(model_arima$residuals) for_resid<-as.vector(y_test-as.vector(forecast_arima)) } names(for_resid)<-"Resid" names(Resid_V)<-"Resid" bl.test <- Box.Ljung.Test(Resid_V) #not white noise if(max(bl.test$data$y)<=0.05){ Resid_ind<-1 } else { Resid_ind<-2 } # mars_data preparation mars_train <- list() for (names in names(ceemdan_lag_train)) { ceemdan_train_data <- cbind(ceemdan_lag_train[[names]][,1]) ceemdan_train_data_name <- names mars_train[[ceemdan_train_data_name]] <- ceemdan_train_data } mars_test <- list() for (names in names(ceemdan_lag_test)) { ceemdan_test_data <- cbind(ceemdan_lag_test[[names]][,1]) ceemdan_test_data_name <- names mars_test[[ceemdan_test_data_name]] <- ceemdan_test_data } # if arima/garch residual is not white noise it will be used as mars predictor # if(Resid_ind==2){ # predictors_train <- cbind(mars_train[-length(mars_train)],Resid=Resid_V) # colnames(predictors_train)<-c(colnames(mars_train[-length(mars_train)]), "Resid") # predictors_test <- cbind(mars_test[-length(mars_test)],Resid=for_resid) # colnames(predictors_test)<-c(colnames(mars_test[-length(mars_test)]), "Resid") # } else { predictors_train<-data.frame(mars_train[-length(mars_train)]) colnames(predictors_train)<-c(names(mars_train[-length(mars_train)])) predictors_test<-data.frame(mars_test[-length(mars_test)]) colnames(predictors_test)<-c(names(mars_test[-length(mars_test)])) # } # mars data_MARS<-cbind(y=y_train,predictors_train) names(data_MARS) MARS <- earth(y~., data = data_MARS) selcted_var<-MARS$namesx # train and test data for ann and svr data_train <- ceemdan_lag_train[selcted_var] data_test <- ceemdan_lag_test[selcted_var] for(i in seq_along(data_train)) { # determine the name of the current element list_name <- names(data_train)[i] # determine the number of columns for the current matrix num_cols <- ncol(data_train[[i]]) # create a vector of column names for the current matrix col_names <- paste(list_name, "col", 1:num_cols, sep = "_") # assign the column names to the current matrix colnames(data_train[[i]]) <- col_names } for(i in seq_along(data_test)) { # determine the name of the current element list_name <- names(data_test)[i] # determine the number of columns for the current matrix num_cols <- ncol(data_test[[i]]) # create a vector of column names for the current matrix col_names <- paste(list_name, "col", 1:num_cols, sep = "_") # assign the column names to the current matrix colnames(data_test[[i]]) <- col_names } # svr # create empty data frames to store predicted values for each element's response train_predicted_df <- data.frame(matrix(ncol = 2, nrow = 0)) colnames(train_predicted_df) <- c("element", "train_predicted") test_predicted_df <- data.frame(matrix(ncol = 2, nrow = 0)) colnames(test_predicted_df) <- c("element", "test_predicted") for (i in seq_along(data_train)) { # extract the predictors and response from the current matrix predictors <- data_train[[i]][, -1] response <- data_train[[i]][, 1] allvars <- colnames(predictors) predictorvars <- paste(allvars, collapse="+") form <- as.formula(paste0(names(data_train)[i], "_col_1~", predictorvars)) # train a svm model with radial basis function as kernel svmmodel <- svm(form, data_train[[i]], type="eps-regression", kernel = 'radial') # predict the response for the current matrix in the training set train_predicted <- data.frame(predict(svmmodel, predictors)) # create a data frame with the element name and predicted values for the training set train_predicted_element_df <- data.frame(element = rep(names(data_train[i]), nrow(train_predicted)), train_predicted = train_predicted) # add the predicted values for the current element in the training set to the overall data frame train_predicted_df <- rbind(train_predicted_df, train_predicted_element_df) # save the model for this element assign(paste0("svmmodel_", names(data_train[i])), svmmodel) } # create empty data frames to store predicted values for each element's response test_predicted_df <- data.frame(matrix(ncol = 2, nrow = 0)) colnames(test_predicted_df) <- c("element", "test_predicted") test_predicted_df <- data.frame(matrix(ncol = 2, nrow = 0)) colnames(test_predicted_df) <- c("element", "test_predicted") for (i in seq_along(data_test)) { # extract the predictors and response from the current matrix predictors <- data_test[[i]][, -1] response <- data_test[[i]][, 1] # predict the response for the current matrix in the testing set svmmodel <- get(paste0("svmmodel_", names(data_test[i]))) test_predicted <- data.frame(predict(svmmodel, predictors)) # create a data frame with the element name and predicted values for the testing set test_predicted_element_df <- data.frame(element = rep(names(data_test[i]), nrow(test_predicted)), test_predicted = test_predicted) # add the predicted values for the current element in the testing set to the overall data frame test_predicted_df <- rbind(test_predicted_df, test_predicted_element_df) } # print the predicted values for each element in the training and test sets # Extract unique category names #train colnames (train_predicted_df) <- c('Category', 'Column2') category_names <- unique(train_predicted_df$Category) # Create a list of data frames split by category train_predicted_df_list <- lapply(category_names, function(cat) { train_predicted_df[train_predicted_df$Category == cat, "Column2", drop = FALSE] }) # Rename each data frame with its corresponding category name names(train_predicted_df_list) <- category_names # Create a named list of data frames train_predicted_df_named_list <- as.list(setNames(train_predicted_df_list, category_names)) # Assign each data frame in the named list to a separate variable for (i in seq_along(train_predicted_df_named_list)) { assign(paste0("train_predicted_df", i), data.frame(train_predicted_df_named_list[[i]])) } extra_df <- data.frame(Column2=pred_V) train_predicted_df_list <- c(train_predicted_df_list, list(extra_df)) names(train_predicted_df_list)[length(train_predicted_df_list)] <- "pred_V" train_predicted_df_named_list <- as.list(setNames(train_predicted_df_list, c(category_names,'pred_V'))) # test colnames (test_predicted_df) <- c('Category', 'Column2') category_names <- unique(test_predicted_df$Category) # Create a list of data frames split by category test_predicted_df_list <- lapply(category_names, function(cat) { test_predicted_df[test_predicted_df$Category == cat, "Column2", drop = FALSE] }) # Rename each data frame with its corresponding category name names(test_predicted_df_list) <- category_names # Create a named list of data frames test_predicted_df_named_list <- as.list(setNames(test_predicted_df_list, category_names)) # Assign each data frame in the named list to a separate variable for (i in seq_along(test_predicted_df_named_list)) { assign(paste0("test_predicted_df", i), data.frame(test_predicted_df_named_list[[i]])) } extra_df <- data.frame(Column2=forecast_V) test_predicted_df_list <- c(test_predicted_df_list, list(extra_df)) names(test_predicted_df_list)[length(test_predicted_df_list)] <- "forecast_V" test_predicted_df_named_list <- as.list(setNames(test_predicted_df_list, c(category_names,'forecast_V'))) # pso_train y_actual <- y_train df_list <- train_predicted_df_named_list # Function to optimize weights using PSO optimize_weights <- function(df_list, y_actual) { # Flatten the list into a matrix df_mat <- as.matrix(do.call(cbind, df_list)) # Define the objective function for PSO objective <- function(weights) { # Convert weights to numeric vector weights <- as.numeric(weights) # Calculate weighted sum of predicted values y_pred <- df_mat %*% weights # Calculate mean squared error mse <- mean((y_pred - y_actual)^2) # Return the mean squared error return(mse) } # Initialize PSO solver solver <- psoptim( lower = rep(0, ncol(df_mat)), upper = rep(1, ncol(df_mat)), par = runif(ncol(df_mat), 0, 1), fn = objective ) # Extract the optimized weights weights <- solver$par # Calculate optimized prediction y_pred <- df_mat %*% weights # Return the optimized prediction return(y_pred) } # Call the function to obtain optimized prediction y_optimized_train <- optimize_weights(df_list, y_actual) # pso_test y_actual <- y_test df_list <- test_predicted_df_named_list y_optimized_test <- optimize_weights(df_list, y_actual) final_y_optimized_train <- exp(y_optimized_train)*Y_train_1 final_y_optimized_test <- exp(y_optimized_test)*Y_test_1 # all metrics all_metrics <- function(actual, predicted) { # Calculate the residuals residuals <- actual - predicted abs_residuals <- abs(actual - predicted) scaled_abs_residuals <- abs_residuals/actual lag_frame <- data.frame(embed(actual,2)) diff <- lag_frame[,1]-lag_frame[,2] abs_diff <- abs(diff) # Calculate simple metrics mse <- mean(residuals^2) rmse <- sqrt(mse) rrmse <- 100*rmse/mean(actual) mae <- mean(abs_residuals) mape <- 100*mean(scaled_abs_residuals) mase <- mae/mean(abs_diff) # calculate complex matrics nse <- 1- (mse/(mean(actual^2)-(mean(actual))^2)) wi <- 1- (mse/mean((abs(actual-mean(actual))+abs(predicted-mean(actual)))^2)) lme <- 1- mae/mean(abs(actual-mean(actual))) # creating the data frame AllMetrics <- data.frame(cbind(c('RMSE', 'RRMSE', 'MAE', 'MAPE', 'MASE','NSE', 'WI', 'LME'), c(round(rmse,3), round(rrmse,3), round(mae,3), round(mape,3), round(mase,3), round(nse,3), round(wi,3), round(lme,3)))) colnames(AllMetrics) <- c('Metrics','Values') dimnames(AllMetrics) dim(AllMetrics) # returning the table containing all the metrics return(AllMetrics) } # validation_carigas metrics_carigas_train <- data.frame(all_metrics( Y_train, final_y_optimized_train)) metrics_carigas_test <- data.frame(all_metrics( Y_test, final_y_optimized_test)) metrics <- cbind(as.numeric(metrics_carigas_train$Values), as.numeric(metrics_carigas_test$Values)) colnames(metrics) <- c('CARIGAS_Train', 'CARIGAS_Test') row.names(metrics)<-c('RMSE', 'RRMSE', 'MAE', 'MAPE', 'MASE','NSE', 'WI', 'LME') predict_compare <- data.frame(cbind(train_actual = Y_train, predicted = final_y_optimized_train)) colnames(predict_compare) <- c('train_actual', 'train_predicted') forecast_compare <- data.frame(cbind(test_actual = Y_test, forecast = final_y_optimized_test)) colnames(forecast_compare) <- c('test_actual', 'test_predicted') return(list(Train_fitted=predict_compare, Test_predicted=forecast_compare, Accuracy=metrics)) }
/scratch/gouwar.j/cran-all/cranData/CEEMDANML/R/CEEMDANML.R
################################################################################### #' Random Design #' #' Create a random initial population or experimental design, given a specifed creation function, #' as well as a optional set of user-specified design members and a maximum design size. #' Also removes duplicates from the design/population. #' #' @param x Optional list of user specified solutions to be added to the design, defaults to NULL #' @param cf Creation function, creates random new individuals #' @param size size of the design #' @param control not used #' #' @return Returns list with experimental design without duplicates #' #' @seealso \code{\link{optimRS}}, \code{\link{designMaxMinDist}} #' @keywords internal #' @examples #' # Create a design of 10 permutations, each with 5 elements #' design <- designRandom(NULL,function()sample(5),10) #' # Create a design of 20 real valued 2d vectors #' design <- designRandom(NULL,function()runif(2),20) #' @export ################################################################################### designRandom <- function(x=NULL,cf,size,control=list()){ ## initialization if(is.null(x)){ x <- list() k=0 }else{ #given start population k=length(x) } if(k>size){ x <- x[1:size] }else if(k<size){ ## CREATE initial population x <- c(x, replicate(size-k , cf(),simplify=FALSE)) }#else if k==size do nothing. ## REPLACE duplicates from initial population with unique individuals x <- removeDuplicates(x, cf) } ################################################################################### #' Max-Min-Distance Design #' #' Build a design of experiments in a sequential manner: First candidate solution is created at random. #' Afterwards, candidates are added sequentially, maximizing the minimum distances to the existing candidates. #' Each max-min problem is resolved by random sampling. #' The aim is to get a rather diverse design. #' #' @param x Optional list of user specified solutions to be added to the design/population, defaults to NULL #' @param cf Creation function, creates random new individuals #' @param size size of the design #' @param control list of controls. \code{control$distanceFunction} requires a distance function to compare two candidates created by cf. #' \code{control$budget} is the number of candidates for the random sampling, defaults to 100. #' #' @return Returns list with experimental design without duplicates #' #' @seealso \code{\link{optimMaxMinDist}}, \code{\link{designRandom}} #' @keywords internal #' @examples #' # Create a design of 10 permutations, each with n=5 elements, #' # and with 50 candidates for each sample. #' # Note, that in this specific case the number of candidates #' # should be no larger than factorial(n). #' # The default (hamming distance) is used. #' design <- designMaxMinDist(NULL,function()sample(5),10, #' control=list(budget=50)) #' # Create a design of 20 real valued 2d vectors, #' # with 100 candidates for each sample #' # using euclidean distance. #' design <- designMaxMinDist(NULL,function()runif(2),20, #' control=list(budget=100, #' distanceFunction=function(x,y)sqrt(sum((x-y)^2)))) #' # plot the resulting design #' plot(matrix(unlist(design),,2,byrow=TRUE)) #' @export ################################################################################### designMaxMinDist <- function(x=NULL,cf,size,control=list()){ con<-list(budget=100, distanceFunction=distancePermutationHamming ) con[names(control)] <- control control<-con ## initialization if(is.null(x)){ x <- list(cf()) k=1 }else{ #given start population k=length(x) } if(k>size){ x <- x[1:size] }else if(k<size){ ## CREATE initial population for(ki in (k+1):size){ fun <- function(xnew) -min(distanceVector(xnew,x,control$distanceFunction)) res <- optimRS(,fun,control=list(creationFunction=cf,budget=control$budget)) x <- c(x,list(res$xbest)) } }#else if k==size do nothing. ## REPLACE duplicates from initial population with unique individuals x <- removeDuplicates(x, cf) }
/scratch/gouwar.j/cran-all/cranData/CEGO/R/DoE.R
################################################################################### #' NK-Landscape Benchmark Creation #' #' Function that generates a NK-Landscapes. #' #' @param N length of the bit strings #' @param K number of neighbours contributing to fitness of one position #' @param PI vector, giving relative positions of each neighbour in the bit-string #' @param g set of fitness functions for each possible combination of string components. Will be randomly determined if not specified. Should have N rows, and 2^(K+1) columns. #' #' @return the function of type cost=f(bitstring). Returned fitness values will be negative, for purpose of minimization. #' #' @examples #' fun <- benchmarkGeneratorNKL(6,2) #' fun(c(1,0,1,1,0,0)) #' fun(c(1,0,1,1,0,1)) #' fun(c(0,1,0,0,1,1)) #' fun <- benchmarkGeneratorNKL(6,3) #' fun(c(1,0,1,1,0,0)) #' fun <- benchmarkGeneratorNKL(6,2,c(-1,1)) #' fun(c(1,0,1,1,0,0)) #' fun <- benchmarkGeneratorNKL(6,2,c(-1,1),g=matrix(runif(48),6)) #' fun(c(1,0,1,1,0,0)) #' fun(sample(c(0,1),6,TRUE)) #' #' @export ################################################################################### benchmarkGeneratorNKL <- function(N=10,K=1,PI=1:K,g){ if(missing(g)){ # generate the fitness subfunctions g <- matrix(runif(N*2^(K+1)),N) } bits <- 2^(0:K) bits g N K PI function(x){ usum=0 for(i in 1:N){ xx <- x[c(i,((i+PI-1)%%N)+1)] #select current and impacting (neighbouring) bits (circular) usum <- usum+ g[i,sum(bits*xx)+1] } -usum/N #minus for minimization } } ################################################################################### #' MaxCut Benchmark Creation #' #' Generates MaxCut problems, with binary decision variables. #' The MaxCut Problems are transformed to minimization problems by negation. #' #' @param N length of the bit strings #' @param A The adjacency matrix of the graph. Will be created at random if not provided. #' #' @return the function of type cost=f(bitstring). Returned fitness values will be negative, for purpose of minimization. #' #' @examples #' fun <- benchmarkGeneratorMaxCut(N=6) #' fun(c(1,0,1,1,0,0)) #' fun(c(1,0,1,1,0,1)) #' fun(c(0,1,0,0,1,1)) #' fun <- benchmarkGeneratorMaxCut(A=matrix(c(0,1,0,1,1,0,1,0,0,1,0,1,1,0,1,0),4,4)) #' fun(c(1,0,1,0)) #' fun(c(1,0,1,1)) #' fun(c(0,1,0,1)) #' #' @export ################################################################################### benchmarkGeneratorMaxCut <- function(N,A){ if(missing(N)&missing(A)){ stop("No arguments provided to the benchmark generation function.") }else if(missing(N)){ N <- nrow(A) }else if(missing(A)){ nedges <- (N*N - N)/2 weights <- runif(nedges) A <- matrix(0,N,N) A[upper.tri(A)]<-weights A <- A+ t(A) } ## graph laplacian: L <- diag(as.numeric(A %*% rep(1,N)),N) - A L N function(x){ x <- x*2-1 #0.25*(x%*%L%*%x) #exact gain -(x%*%L%*%x) #minimization, removed constant } }
/scratch/gouwar.j/cran-all/cranData/CEGO/R/binaryBenchmarkFunctions.R
################################################################################### #' Binary String Generator Function #' #' Returns a function that generates random bit-strings of length N. #' Can be used to create individuals of NK-Landscapes or other problems with binary representation. #' #' @param N length of the bit-strings #' #' @return returns a function, without any arguments #' #' @export ################################################################################### solutionFunctionGeneratorBinary <- function(N){ N #lazy evaluation fix, faster than force() function()sample(c(0,1),N,replace=TRUE) } ################################################################################### #' Cycle Mutation for Bit-strings #' #' Given a population of bit-strings, this function mutates all #' individuals by cyclical shifting the string to the right or left. #' #' @param population List of bit-strings #' @param parameters list of parameters: parameters$mutationRate => mutation rate, specifying number of bits flipped. Should be in range between zero and one #' #' @return mutated population #' #' @export ################################################################################### mutationBinaryCycle <- function(population, parameters){ mutationRate <- parameters$mutationRate N<-length(population[[1]]) popsize<- length(population) newpop <- list() cmp <- max(min(round(mutationRate*N),N-1),1) direction <- sample(c(TRUE,FALSE),popsize,replace=T) for(i in 1:popsize){ individual <- population[[i]] if(direction[i]) individual <- c(individual[-(1:cmp)],individual[1:cmp]) else individual <- c(individual[N+1-(cmp:1)],individual[-(N+1-(cmp:1))]) newpop <- c(newpop, list(individual)) } newpop } ################################################################################### #' Block Inversion Mutation for Bit-strings #' #' Given a population of bit-strings, this function mutates all #' individuals by inverting a whole block, randomly selected. #' #' @param population List of bit-strings #' @param parameters list of parameters: parameters$mutationRate => mutation rate, specifying number of bits flipped. Should be in range between zero and one #' #' @return mutated population #' #' @export ################################################################################### mutationBinaryBlockInversion <- function(population, parameters){ mutationRate <- parameters$mutationRate N<-length(population[[1]]) popsize<- length(population) newpop <- list() cmp <- max(min(round(mutationRate*N),N-1),1) startIndex <- N-cmp for(i in 1:popsize){ index1 <- sample(startIndex,1,FALSE,NULL) index2 <- index1+cmp sel <- index1:index2 individual <- population[[i]] individual[sel] <- as.numeric(!individual[sel]) newpop <- c(newpop, list(individual)) } newpop } ################################################################################### #' Bit-flip Mutation for Bit-strings #' #' Given a population of bit-strings, this function mutates all #' individuals by randomly inverting one or more bits in each individual. #' #' @param population List of bit-strings #' @param parameters list of parameters: parameters$mutationRate => mutation rate, specifying number of bits flipped. Should be in range between zero and one #' #' @return mutated population #' #' @export ################################################################################### mutationBinaryBitFlip <- function(population, parameters){ mutationRate <- parameters$mutationRate N<-length(population[[1]]) popsize<- length(population) newpop <- list() cmp <- max(min(round(mutationRate*N),N),1) for(i in 1:popsize){ index<-sample(N,cmp,FALSE,NULL) individual <- population[[i]] individual[index]=as.numeric(!individual[index]) newpop <- c(newpop, list(individual)) } newpop } ################################################################################### #' Single Bit-flip Mutation for Bit-strings #' #' Given a population of bit-strings, this function mutates all #' individuals by randomly inverting one bit in each individual. #' Due to the fixed mutation rate, this is computationally faster. #' #' @param population List of bit-strings #' @param parameters not used #' #' @return mutated population #' #' @export ################################################################################### mutationBinarySingleBitFlip <- function(population, parameters){ N<-length(population[[1]]) popsize<- length(population) newpop <- list() index <- sample(N,popsize,TRUE,NULL) for(i in 1:popsize){ individual <- population[[i]] individual[index[i]]=as.numeric(!individual[index[i]]) newpop <- c(newpop, list(individual)) } newpop } ################################################################################### #' Uniform Crossover for Bit Strings #' #' Given a population of bit-strings, this function recombines each #' individual with another individual by randomly picking bits from each parent. #' Note, that \code{\link{optimEA}} will not pass the whole population #' to recombination functions, but only the chosen parents. #' #' @param population List of bit-strings #' @param parameters not used #' #' @return population of recombined offspring #' #' @export ################################################################################### recombinationBinaryUniform <- function(population, parameters){ N <- length(population[[1]]) popsize <- length(population)/2 ## assumes nParents == 2 newpop <- list() for(i in 1:popsize){ index<-sample(N,N*0.5,FALSE,NULL) j <- popsize + i ## second parent parent1 <- population[[i]] parent1[-index] <- population[[j]][-index] #contribution of second parent newpop <- c(newpop, list(parent1)) } newpop } ################################################################################### #' Single Point Crossover for Bit Strings #' #' Given a population of bit-strings, this function recombines each #' individual with another individual by randomly specifying a single position. #' Information before that position is taken from the first parent, #' the rest from the second. #' #' @param population List of bit-strings #' @param parameters not used #' #' @return population of recombined offspring #' #' @export ################################################################################### recombinationBinary1Point <- function(population, parameters){ N <- length(population[[1]]) popsize <- length(population)/2 ## assumes nParents == 2 newpop <- list() for(i in 1:popsize){ index <- sample(2:(N-1),1,FALSE,NULL) inds <- 1:index j <- popsize + i ## second parent parent1 <- population[[i]] #first parent parent1[inds] <- population[[j]][inds] #contribution of second parent newpop <- c(newpop, list(parent1)) } newpop } ################################################################################### #' Two Point Crossover for Bit Strings #' #' Given a population of bit-strings, this function recombines each #' individual with another individual by randomly specifying 2 positions. #' Information in-between is taken from one parent, the rest from the other. #' #' @param population List of bit-strings #' @param parameters not used #' #' @return population of recombined offspring #' #' @export ################################################################################### recombinationBinary2Point <- function(population, parameters){ N <- length(population[[1]]) popsize <- length(population)/2 ## assumes nParents == 2 newpop <- list() for(i in 1:popsize){ index1 <- sample(N,1,FALSE,NULL) index2 <- sample(2:(N-1),1,FALSE,NULL) inds <- index1:index2 j <- popsize + i ## second parent parent1 <- population[[i]] #first parent parent1[inds] <- population[[j]][inds] #contribution of second parent newpop <- c(newpop, list(parent1)) } newpop } ################################################################################### #' Arithmetic (AND) Crossover for Bit Strings #' #' Given a population of bit-strings, this function recombines each #' individual with another individual by computing \code{parent1 & parent2} (logical AND). #' #' @param population List of bit-strings #' @param parameters not used #' #' @return population of recombined offspring #' #' @export ################################################################################### recombinationBinaryAnd <- function(population, parameters){ popsize <- length(population)/2 ## assumes nParents == 2 newpop <- list() for(i in 1:popsize){ j <- popsize + i ## second parent parent1 <- population[[i]] * population[[j]] #logical and (in 0/1 encoding) newpop <- c(newpop, list(parent1)) } newpop }
/scratch/gouwar.j/cran-all/cranData/CEGO/R/binaryOperators.R
################################################################################ ## This program is free software: you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 3 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with this program. If not, see <http://www.gnu.org/licenses/>. ################################################################################ ################################################################################ #' Combinatorial Efficient Global Optimization #' #' Model building, surrogate model #' based optimization and Efficient Global Optimization in combinatorial #' or mixed search spaces. This includes methods for distance calculation, #' modeling and handling of indefinite kernels/distances. #' #' \tabular{ll}{ #' Package: \tab CEGO\cr #' Type: \tab Package\cr #' Version: \tab 2.4.3\cr #' Date: \tab 2024-01-27\cr #' License: \tab GPL (>= 3)\cr #' LazyLoad: \tab yes\cr #' } #' # @name CEGO-package # @aliases CEGO # @docType package #' @title Combinatorial Efficient Global Optimization in R #' @author Martin Zaefferer \email{mzaefferer@@gmail.com} #' @references Zaefferer, Martin; Stork, Joerg; Friese, Martina; Fischbach, Andreas; Naujoks, Boris; Bartz-Beielstein, Thomas. (2014). Efficient global optimization for combinatorial problems. In Proceedings of the 2014 conference on Genetic and evolutionary computation (GECCO '14). ACM, New York, NY, USA, 871-878. DOI=10.1145/2576768.2598282 #' @references Zaefferer, Martin; Stork, Joerg; Bartz-Beielstein, Thomas. (2014). Distance Measures for Permutations in Combinatorial Efficient Global Optimization. In Parallel Problem Solving from Nature - PPSN XIII (p. 373-383). Springer International Publishing. #' @references Zaefferer, Martin and Bartz-Beielstein, Thomas (2016). Efficient Global Optimization with Indefinite Kernels. Parallel Problem Solving from Nature-PPSN XIV. Accepted, in press. Springer. #' @keywords package #' @seealso Interface of main function: \code{\link{optimCEGO}} #' @import MASS #' @import graphics #' @import stats #' @import DEoptim #' @importFrom quadprog solve.QP #' @importFrom Matrix nearPD #' @importFrom methods formalArgs #' @importFrom anticlust balanced_clustering #' @useDynLib CEGO, .registration = TRUE, .fixes = "C_" #' #' @section Acknowledgments: #' This work has been partially supported by the Federal Ministry of Education #' and Research (BMBF) under the grants CIMO (FKZ 17002X11) and #' MCIOP (FKZ 17N0311). #' "_PACKAGE" #ends description ################################################################################
/scratch/gouwar.j/cran-all/cranData/CEGO/R/cegoPackage.R
################################################################################### #' Build clustered model #' #' This function builds an ensemble of Gaussian Process model, where #' each individual model is fitted to a partition of the parameter space. #' Partitions are generated by. #' #' @param x x #' @param y y #' @param distanceFunction distanceFunction #' @param control control #' #' @return an object #' #' @export ################################################################################### modelKrigingClust <- function(x, y, distanceFunction,control=list()){ nsamples <- length(x) con<-list( modelControl=list(), #controls passed to the core model minsize=30 #minimum number of observations for each partition to be modeled ) con[names(control)] <- control control<-con #control$modelControl$target <- c("y","s") #required for weighting the ensemble predictions ## clustering by distance d <- distanceMatrix(x,distFun = distanceFunction) ## d_dist <- as.dist(distanceMatrix(x,distFun = distanceFunction)) groups <- anticlust::balanced_clustering(d,max(floor(nsamples/control$minsize),2)) unique_groups <- na.omit(unique(groups)) ## can't get rid of the nasty NA, so just replace randomly nas <- is.na(groups) groups[nas] <- sample(unique_groups,sum(nas)) ## build a model in each partition models <- list() for(i in 1:length(unique_groups)){ selected <- groups == unique_groups[i] xi <- x[selected] yi <- y[selected] models[[i]] <- modelKriging(x=xi,y=yi,distanceFunction=distanceFunction, control=control$modelControl ) } ## ... fit <- list(fits=models) class(fit)<- "modelKrigingClust" return(fit) } ################################################################################### #' Clustered Kriging Prediction #' #' Predict with a model fit resulting from \code{\link{modelKrigingClust}}. #' #' @param object fit of the clustered Kriging model ensemble (settings and parameters), of class \code{modelKrigingClust}. #' @param newdata list of samples to be predicted #' @param ... further arguments, currently not used #' #' @return list with function value (mean) \code{object$y} and uncertainty estimate \code{object$s} (standard deviation)\cr #' #' @seealso \code{\link{predict.modelKriging}} #' @export ################################################################################### predict.modelKrigingClust <- function(object,newdata,...){ ## predict with each model predictions <- list() for(i in 1:length(object$fits)){ fiti <- object$fits[[i]] fiti$predAll <- T predictions[[i]] <- predict(fiti,newdata) } ## convert predictions of s ps <- do.call(rbind,sapply(predictions,'[',"s")) ## convert predictions of y py <- do.call(rbind,sapply(predictions,'[',"y")) ## compute weights based on s psnegsquare <- ps^-2 weights <- t(t(psnegsquare)/colSums(psnegsquare)) ## compute ensemble prediction for s and y, weighted ensembley <- colSums(py*weights) ensembles <- sqrt(colSums(ps^2*weights^2)) ## end list(y=ensembley,s=ensembles) }
/scratch/gouwar.j/cran-all/cranData/CEGO/R/clusterKriging.R
################################################################################### #' Unimodal Fitness Landscape #' #' This function generates uni-modal fitness landscapes based on distance measures. #' The fitness is the distance to a reference individual or center. Hence, the reference individual #' is the optimum of the landscape. This function is essentially a wrapper #' for the \code{\link{landscapeGeneratorMUL}} #' #' @param ref reference individual #' @param distanceFunction Distance function, used to evaluate d(x,ref), where x is an arbitrary new individual #' #' @return returns a function. The function requires a list of candidate solutions as its input, where each solution is suitable for use with the distance function. The function returns a numeric vector. #' #' @references Moraglio, Alberto, Yong-Hyuk Kim, and Yourim Yoon. "Geometric surrogate-based optimisation for permutation-based problems." Proceedings of the 13th annual conference companion on Genetic and evolutionary computation. ACM, 2011. #' #' @seealso \code{\link{landscapeGeneratorMUL}}, \code{\link{landscapeGeneratorGaussian}} #' #' @examples #' fun <- landscapeGeneratorUNI(ref=1:7,distancePermutationCos) #' ## for single solutions, note that the function still requires list input: #' x <- 1:7 #' fun(list(x)) #' x <- 7:1 #' fun(list(x)) #' x <- sample(7) #' fun(list(x)) #' ## multiple solutions at once: #' x <- replicate(5,sample(7),FALSE) #' fun(x) #' #' @export ################################################################################### landscapeGeneratorUNI <- function(ref,distanceFunction){ #N= number of elements of a string, K= number of neighbours, PI = relative position of each neighbour in string, g= set of fitness functions for each combination of string components landscapeGeneratorMUL(list(ref),distanceFunction) } ################################################################################### #' Multimodal Fitness Landscape #' #' This function generates multi-modal fitness landscapes based on distance measures. #' The fitness is the minimal distance to several reference individuals or centers. Hence, each reference individual #' is an optimum of the landscape. #' #' @param ref list of reference individuals / centers #' @param distanceFunction Distance function, used to evaluate d(x,ref[[n]]), where x is an arbitrary new individual #' #' @return returns a function. The function requires a list of candidate solutions as its input, where each solution is suitable for use with the distance function. The function returns a numeric vector. #' #' @seealso \code{\link{landscapeGeneratorUNI}}, \code{\link{landscapeGeneratorGaussian}} #' #' @examples #' fun <- landscapeGeneratorMUL(ref=list(1:7,c(2,4,1,5,3,7,6)),distancePermutationCos) #' x <- 1:7 #' fun(list(x)) #' x <- c(2,4,1,5,3,7,6) #' fun(list(x)) #' x <- 7:1 #' fun(list(x)) #' x <- sample(7) #' fun(list(x)) #' ## multiple solutions at once: #' x <- append(list(1:7,c(2,4,1,5,3,7,6)),replicate(5,sample(7),FALSE)) #' fun(x) #' #' @export ################################################################################### landscapeGeneratorMUL <- function(ref,distanceFunction){ distanceFunction #lazy evaluation fix, faster than force() ref #lazy evaluation fix, faster than force() function(x){ if(!is.list(x))x<-list(x) k = length(ref) d <- matrix(NA,k,length(x))#rep(NA,k) for(i in 1:k) d[i,] <- distanceVector(ref[[i]],x,distanceFunction) do.call(pmin.int, lapply(1:nrow(d), function(i)d[i,])) #fast row minimum for matrix d, the return value for each candidate solution } } ################################################################################### #' Create Gaussian Landscape #' #' This function is loosely based on the Gaussian Landscape Generator by Bo Yuan and Marcus Gallagher. #' It creates a Gaussian Landscape every time it is called. This Landscape can be evaluated like a function. #' To adapt to combinatorial spaces, the Gaussians are here based on a user-specified distance measure. #' Due to the expected nature of combinatorial spaces and their lack of direction, the resulting #' Gaussians are much simplified in comparison to the continuous, vector-valued case (e.g., no rotation). #' Since the \code{CEGO} package is tailored to minimization, the landscape is inverted. #' #' @param nGaussian number of Gaussian components in the landscape. Default is 10. #' @param theta controls width of Gaussian components as a multiplier. Default is 1. #' @param ratio minimal function value of the local minima. Default is 0.2. (Note: Global minimum will be at zero, local minima will be in range \code{[ratio;1]}) #' @param seed seed for the random number generator used before creation of the landscape. Generator status will be saved and reset afterwards. #' @param distanceFunction A function of type \code{f(x,y)}, to evaluate distance between to samples in their given representation. #' @param creationFunction function to randomly generate the centers of the Gaussians, in form of their given representation. #' #' @return returns a function.The function requires a list of candidate solutions as its input, where each solution is suitable for use with the distance function. #' #' @references B. Yuan and M. Gallagher (2003) "On Building a Principled Framework for Evaluating and Testing Evolutionary Algorithms: A Continuous Landscape Generator". #' In Proceedings of the 2003 Congress on Evolutionary Computation, IEEE, pp. 451-458, Canberra, Australia. #' #' @examples #' #rng seed #' seed=101 #' # distance function #' dF <- function(x,y)(sum((x-y)^2)) #sum of squares #' #dF <- function(x,y)sqrt(sum((x-y)^2)) #euclidean distance #' # creation function #' cF <- function()runif(1) #' # plot pars #' par(mfrow=c(3,1),mar=c(3.5,3.5,0.2,0.2),mgp=c(2,1,0)) #' ## uni modal distance landscape #' # set seed #' set.seed(seed) #' #landscape #' lF <- landscapeGeneratorUNI(cF(),dF) #' x <- as.list(seq(from=0,by=0.001,to=1)) #' plot(x,lF(x),type="l") #' ## multi-modal distance landscape #' # set seed #' set.seed(seed) #' #landscape #' lF <- landscapeGeneratorMUL(replicate(5,cF(),FALSE),dF) #' plot(x,lF(x),type="l") #' ## glg landscape #' #landscape #' lF <- landscapeGeneratorGaussian(nGaussian=20,theta=1, #' ratio=0.3,seed=seed,dF,cF) #' plot(x,lF(x),type="l") #' #' @export ################################################################################### landscapeGeneratorGaussian <- function(nGaussian=10,theta=1,ratio=0.2,seed=1, distanceFunction, creationFunction){ ## save seed status if(exists(as.character(substitute(.Random.seed)))) SAVESEED<-.Random.seed else SAVESEED=NULL ## set seed set.seed(seed) ## create landscape fit <- landscapeGeneratorGaussianBuild(nGaussian,ratio,creationFunction) fit$df <- distanceFunction # Calculate maximum distance between centers, for scaling purposes fit$dmax <- max(distanceMatrix(fit$centers,distanceFunction)) #save width parameter fit$theta <- theta ## load seed status if(!is.null(SAVESEED)) assign(".Random.seed", SAVESEED, envir=globalenv()) fit ## create output function fun <- function(x){ if(!is.list(x))x<-list(x) landscapeGeneratorGaussianEval(x,fit)$value } attributes(fun) <- fit fun } ################################################################################### #' Gaussian Landscape Core function #' #' Core Gaussian landscape function. Should not be called directly, as it does not contain proper seed handling. #' #' @param nGaussian number of Gaussian components in the landscape. Default is 10. #' @param ratio minimal function value of the local minima. Default is 0.2. (Note: Global minimum will be at zero, local minimal will be in range \code{[ratio;1]}) #' @param creationFunction function to randomly generate the centers of the gaussians #' #' @return returns a list, with the following items: #' \describe{ #' \item{\code{centers}}{ samples which are the centers of each Gaussian} #' \item{\code{covinv}}{ inverse of variance of each Gaussian} #' \item{\code{opt}}{ value at randomly chosen optimum center} #' \item{\code{nGauss}}{ number of Gaussian components} #' } #' #' @keywords internal #' @export #' @seealso \code{\link{landscapeGeneratorGaussian}} ################################################################################### landscapeGeneratorGaussianBuild <- function(nGaussian=10,ratio=0.2,creationFunction){ ratio <- 1-ratio if (nGaussian<=0|ratio<=0|ratio>=1){ stop('Incorrect parameter values for gaussian landscape generator') } variance <- matrix(runif(nGaussian,0.1,0.5),nGaussian,1) # avoid zero variance # Generate randomly the centers of the gaussians centers <- replicate(nGaussian,creationFunction(),simplify=FALSE) # assign values to components optimumValue=rep(0,nGaussian) #initialize optimumValue[1]=1 # the first Gaussian is set to be the global optimum # values of others are randomly generated within [0,ratio] optimumValue[2:nGaussian]=matrix(runif(1*(nGaussian-1)),1,nGaussian-1)*ratio list(centers=centers,covinv= 1/variance,opt=1-optimumValue,nGauss=nGaussian) } ################################################################################### #' Gaussian Landscape Evaluation #' #' Evaluate a Gaussian landscape. Should not be called directly. #' #' @param x list of samples to evaluate #' @param glg list of values defining the Gaussian Landscape, created by \code{landscapeGeneratorGaussianBuild}. #' #' @return returns a list, with the following items:\cr #' \code{value} value of the combined landscape #' \code{components} value of each component #' #' @keywords internal #' @export #' @seealso \code{\link{landscapeGeneratorGaussian}} ################################################################################### landscapeGeneratorGaussianEval <- function(x,glg){ covinv <- glg$covinv #the inverse covariance matrix of each component theta <- glg$theta #width parameter, multiplier for each gaussian component centers <- glg$centers #centers of each Gaussian component optimumvalue <- 1-glg$opt #the peak value of each component nGaussian <- glg$nGauss #total number of components p <- length(x) # p: number of individuals; #if(is.null(p))p<-1 tmp <- matrix(0,nGaussian,p) #---------------------------------------------------- for(i in 1:nGaussian) { # calculate the values generated by each component newx <- distanceVector(centers[[i]],x,glg$df)/glg$dmax #x-t(matrix(centers[i,],length(centers[i,]),p,byrow=FALSE)) tmp[i,] <- covinv[i]*newx } f <- exp(-theta*tmp) # f is a nGaussian-by-p matrix f <- f*matrix(optimumvalue,length(optimumvalue),p,byrow=FALSE)# multiply the peak value of each component # the value of each individual generated by each component value <- apply(f,2,max) # choose the maximum values as the fitness values list(value=1-value,components=f) }
/scratch/gouwar.j/cran-all/cranData/CEGO/R/distanceBasedLandscapeGenerators.R
################################################################################### #' Calculate Distance Matrix #' #' Calculate the distance between all samples in a list, and return as matrix. #' #' @param X list of samples, where each list element is a suitable input for \code{distFun} #' @param distFun Distance function of type f(x,y)=r, where r is a scalar and x and y are elements whose distance is evaluated. #' @param ... further arguments passed to distFun #' #' @return The distance matrix #' #' @examples #' x <- list(5:1,c(2,4,5,1,3),c(5,4,3,1,2), sample(5)) #' distanceMatrix(x,distancePermutationHamming) #' #' @export ################################################################################### distanceMatrix <-function(X,distFun,...){ n <- length(X) m <- matrix(0,nrow=n, ncol=n) for(i in seq_len(n - 1)){ ids <- (i+1):n m[ids,i] <- m[i,ids] <- distanceVector(X[[i]],X[ids],distFun,...) } m } ################################################################################### #' Calculate Distance Vector #' #' Calculate the distance between a single sample and all samples in a list. #' #' @param a A single sample which is a suitable input for \code{distFun} #' @param X list of samples, where each list element is a suitable input for \code{distFun} #' @param distFun Distance function of type f(x,y)=r, where r is a scalar and x and y are elements whose distance is evaluated. #' @param ... further arguments passed to distFun #' #' @return A numerical vector of distances #' #' @examples #' x <- 1:5 #' y <- list(5:1,c(2,4,5,1,3),c(5,4,3,1,2)) #' distanceVector(x,y,distancePermutationHamming) #' #' @export ################################################################################### distanceVector <-function(a,X,distFun,...){ unlist(lapply(X,distFun,a,...)) } ################################################################################### #' Update distance matrix #' #' Update an existing distance matrix \code{D_mat} by adding distances #' of all previous candidate solutions to one new candidate solution, \code{d_vec= d(x_i,x_new)}. #' #' @param distanceMat original distance matrix \code{D_mat} #' @param x list of candidate solutions, last in list is the new solution #' @param distanceFunction Distance function of type f(x,y)=r, where r is a scalar and x and y are candidate solutions whose distance is evaluated. #' @param ... further arguments passed to distanceFunction #' #' @return matrix of distances between all solutions x #' #' @examples #' x <- list(5:1,c(2,4,5,1,3),c(5,4,3,1,2)) #' dm <- distanceMatrix(x,distancePermutationHamming) #' x <- append(x,list(1:5)) #' dmUp <- distanceMatrixUpdate(dm,x,distancePermutationHamming) #' #' @export #' @keywords internal ################################################################################### distanceMatrixUpdate <- function(distanceMat,x,distanceFunction,...){ count <- length(x) if(length(distanceFunction)==1){ # in case of a single distance function (all models) newdist = distanceVector(x[[count]],x[-count],distanceFunction,...) distanceMat = cbind(rbind(distanceMat,c(newdist)),c(newdist,0)) }else{ # in case of multiple distance functions (kriging only atm.) for(i in 1:length(distanceFunction)){ newdist = distanceVector(x[[count]],x[-count],distanceFunction[[i]],...) distanceMat[[i]] <- cbind(rbind(distanceMat[[i]],c(newdist)),c(newdist,0)) } } distanceMat } ################################################################################### #' Distance Matrix Wrapper #' #' Wrapper to calculate the distance matrix, with one or multiple distance functions. #' #' @param x list of candidate solutions whose distance is evaluated #' @param distanceFunction Distance function of type f(x,y)=r, where r is a scalar and x and y are candidate solutions whose distance is evaluated. #' @param ... further arguments passed to distanceFunction #' #' @return matrix of distances between all solutions in list x #' #' @examples #' x <- list(5:1,c(2,4,5,1,3),c(5,4,3,1,2)) #' dm1 <- distanceMatrix(x,distancePermutationHamming) #' dm2 <- distanceMatrix(x,distancePermutationInsert) #' dmBoth <- distanceMatrixWrapper(x,list(distancePermutationHamming,distancePermutationInsert)) #' #' @export #' @keywords internal ################################################################################### distanceMatrixWrapper <- function(x,distanceFunction,...){ if(length(distanceFunction)==1){ # in case of a single distance function (all models) distances <- distanceMatrix(x,distanceFunction,...) }else{ # in case of multiple distance functions (kriging only atm.) distances <- list() for(i in 1:length(distanceFunction)){ distances[[i]] <- distanceMatrix(x,distanceFunction[[i]],...) } } distances } ################################################################################### #' Euclidean Distance #' #' The Euclidean distance for real vectors. #' #' @param x first real vector #' @param y second real vector #' #' @return numeric distance value \deqn{d(x,y)} #' #' @examples #' x <- runif(5) #' y <- runif(5) #' distanceRealEuclidean(x,y) #' #' @export ################################################################################### distanceRealEuclidean <- function(x,y){ sqrt(sum((x-y)^2)) }
/scratch/gouwar.j/cran-all/cranData/CEGO/R/distanceCalculation.R
################################################################################### #' Correcting Conditional Negative Semi-Definiteness #' #' Correcting, e.g., a distance matrix with chosen methods so that it becomes a CNSD matrix. #' #' @param mat symmetric matrix, which should be at least of size 3x3 #' @param method string that specifies method for correction: spectrum clip \code{"clip"}, spectrum flip \code{"flip"}, nearest definite matrix \code{"near"}, spectrum square\code{"square"}, spectrum diffusion \code{"diffusion"}. #' @param tol torelance value. Eigenvalues between \code{-tol} and \code{tol} are assumed to be zero. #' #' @return the corrected CNSD matrix #' #' @seealso \code{\link{modelKriging}} #' @export #' @examples #' x <- list(c(2,1,4,3),c(2,4,3,1),c(4,2,1,3),c(4,3,2,1),c(1,4,3,2)) #' D <- distanceMatrix(x,distancePermutationInsert) #' is.CNSD(D) #matrix should not be CNSD #' D <- correctionCNSD(D) #' is.CNSD(D) #matrix should now be CNSD #' D #' # note: to fix the negative distances, use repairConditionsDistanceMatrix. #' # Or else, use correctionDistanceMatrix. #' @references Martin Zaefferer and Thomas Bartz-Beielstein. (2016). Efficient Global Optimization with Indefinite Kernels. Parallel Problem Solving from Nature-PPSN XIV. Accepted, in press. Springer. ################################################################################### correctionCNSD <- function(mat,method="flip",tol=1e-8){ n <- nrow(mat) v <- cbind(c(rep(1,n-1),1+sqrt(n))) Q <- diag(n) - 2 * (1/as.numeric(crossprod(v,v))) * tcrossprod(v,v) Fq <- Q %*% -mat %*% Q F1 <- Fq[-n,-n] Fq[-n,-n] <- correctionDefinite(F1,type="PSD",method=method,tol)$mat #see nearCNSD, or nearest euclidean matrix. just one step. -Q %*% Fq %*% Q } ################################################################################### #' Correcting Definiteness of a Matrix #' #' Correcting a (possibly indefinite) symmetric matrix with chosen approach so that it will have desired definiteness type: positive or negative semi-definite (PSD, NSD). #' #' @param mat symmetric matrix #' @param type string that specifies type of correction: \code{"PSD"},\code{"NSD"} to enforce PSD or NSD matrices respectively. #' @param method string that specifies method for correction: spectrum clip \code{"clip"}, spectrum flip \code{"flip"}, nearest definite matrix \code{"near"}, spectrum square\code{"square"}, spectrum diffusion \code{"diffusion"}. #' @param tol torelance value. Eigenvalues between \code{-tol} and \code{tol} are assumed to be zero. #' #' @return list with #' \describe{ #' \item{\code{mat}}{ corrected matrix} #' \item{\code{isIndefinite}}{ boolean, whether original matrix was indefinite} #' \item{\code{lambda}}{ the eigenvalues of the original matrix} #' \item{\code{lambdanew}}{ the eigenvalues of the corrected matrix } #' \item{\code{U}}{ the matrix of eigenvectors} #' \item{\code{a}}{ the transformation vector} #' } #' #' @seealso \code{\link{modelKriging}} #' @references Martin Zaefferer and Thomas Bartz-Beielstein. (2016). Efficient Global Optimization with Indefinite Kernels. Parallel Problem Solving from Nature-PPSN XIV. Accepted, in press. Springer. #' @export #' @examples #' x <- list(c(2,1,4,3),c(2,4,3,1),c(4,2,1,3),c(4,3,2,1),c(1,4,3,2)) #' D <- distanceMatrix(x,distancePermutationInsert) #' is.NSD(D) #matrix should not be CNSD #' D <- correctionDefinite(D,type="NSD")$mat #' is.NSD(D) #matrix should now be CNSD #' # different example: PSD kernel #' D <- distanceMatrix(x,distancePermutationInsert) #' K <- exp(-0.01*D) #' is.PSD(K) #' K <- correctionDefinite(K,type="PSD")$mat #' is.PSD(K) ################################################################################### correctionDefinite <- function(mat,type='PSD',method="flip",tol=1e-8){ U <- NA isDefinite <- NA if(method != "none"){ if(type=="NSD") defSign <- -1 else if(type=="PSD") defSign <- 1 eig <- eigen(mat,symmetric=T) defEig <- defSign * eig$values U <- eig$vectors a <- rep(1,nrow(U)) isDefinite <- min(defEig) >= -tol if(!isDefinite){ # only adapt if actually needed, else use default MLE algorithm. if(method=="clip"){ #or denoise in wu2005 sel <- defEig>=tol m <- sum(sel) a <- as.numeric(sel) mat <- U[,sel,drop=F] %*% diag(eig$values[sel],m,m) %*% t(U[,sel,drop=F]) }else if(method=="flip"){ # see chen and wu sel <- defEig>=tol | defEig<=-tol m <- sum(sel) a <- sign(defEig) mat <- U[,sel,drop=F] %*% diag(a[sel]*eig$values[sel],m,m)%*% t(U[,sel,drop=F]) }else if(method=="square"){ # mat(mat^T) #mat <- mat %*% t(mat) sel <- defEig>=tol | defEig<=-tol m <- sum(sel) a <- defEig mat <- U[,sel,drop=F] %*% diag(a[sel]*eig$values[sel],m,m)%*% t(U[,sel,drop=F]) }else if(method=="diffusion"){ # expm(mat) sel <- defEig>=tol | defEig<=-tol m <- sum(sel) a <- defSign * exp(defEig) / eig$values mat <- U[,sel,drop=F] %*% diag(defSign * exp(defEig[sel]),m,m)%*% t(U[,sel,drop=F]) }else if(method=="near"){ pd <- nearPD(defSign * mat, eig.tol = tol, conv.tol = tol,corr=TRUE, do2eigen=FALSE,keepDiag=FALSE,conv.norm.type="F") #corr=T forces diagonal 1, do2eigen should not be used! ruins results., the norm type may affect speed, chosen type "F" is in line with higham2002 mat <- defSign * as.matrix(pd$mat) } } return(list(a=a,U=U,lambda=eig$values,lambdanew=a*eig$values,isDefinite=isDefinite,mat=mat)) }else{ return(NA) } } ################################################################################### #' Correction of a Distance Matrix #' #' Convert (possibly non-euclidean or non-metric) distance matrix with chosen approach so that it becomes a CNSD matrix. #' Optionally, the resulting matrix is enforced to have positive elements and zero diagonal, with the \code{repair} parameter. #' Essentially, this is a combination of functions \code{\link{correctionDefinite}} or \code{\link{correctionCNSD}} with \code{\link{repairConditionsDistanceMatrix}}. #' #' @param mat symmetric distance matrix #' @param type string that specifies type of correction: \code{"CNSD"},\code{"NSD"} to enforce CNSD or NSD matrices respectively. #' @param method string that specifies method for correction: spectrum clip \code{"clip"}, spectrum flip \code{"flip"}, nearest definite matrix \code{"near"}, spectrum square\code{"square"}, spectrum diffusion \code{"diffusion"}, feature embedding \code{"feature"}. #' @param repair boolean, whether or not to use condition repair, so that elements are positive, and diagonal is zero. #' @param tol torelance value. Eigenvalues between \code{-tol} and \code{tol} are assumed to be zero. #' #' @return list with corrected distance matrix \code{mat}, \code{isCNSD} (boolean, whether original matrix was CNSD) and transformation matrix \code{A}. #' #' @seealso \code{\link{correctionDefinite}},\code{\link{correctionCNSD}},\code{\link{repairConditionsDistanceMatrix}} #' @references Martin Zaefferer and Thomas Bartz-Beielstein. (2016). Efficient Global Optimization with Indefinite Kernels. Parallel Problem Solving from Nature-PPSN XIV. Accepted, in press. Springer. #' @export #' @examples #' x <- list(c(2,1,4,3),c(2,4,3,1),c(4,2,1,3),c(4,3,2,1),c(1,4,3,2)) #' D <- distanceMatrix(x,distancePermutationInsert) #' is.CNSD(D) #matrix should not be CNSD #' D <- correctionDistanceMatrix(D)$mat #' is.CNSD(D) #matrix should now be CNSD #' D ################################################################################### correctionDistanceMatrix <- function(mat,type="NSD",method="flip",repair=TRUE,tol=1e-8){ isCNSD <- NA A <- NA matNoRep <- NA if((type=="NSD" | type=="CNSD") & any(method==c("clip","flip","near","square","diffusion","feature"))){ isCNSD <- is.CNSD(mat,tol=tol) # check if definite A <- diag(nrow(mat)) if(!isCNSD){# mat is not CNSD, needs correction if(type=="NSD"){ ret <- correctionDefinite(mat,type="NSD",method=method,tol=tol) #resulting, transformed matrix mat <- ret$mat #transformation matrix for new data (predict): A <- ret$U %*% diag(ret$a) %*% t(ret$U) if(repair){ # fix diagonal and range of values if(repair>1) matNoRep <- mat # mat before repair. only needed for Nystroem approximated repair. mat <- repairConditionsDistanceMatrix(mat) } }else if(type=="CNSD"){ if(method == "near"){ mat <- nearCNSD(mat,eig.tol=tol)$mat }else if(method=="feature"){ x <- split(mat,seq(nrow(mat))) #each distance vector in the distance matrix is now a feature vector mat <- distanceMatrix(x,distanceRealEuclidean) #TODO options for other surrogate distances? }else{ mat <- correctionCNSD(mat,method=method,tol=tol) if(repair){ # fix diagonal and range of values if(repair>1) matNoRep <- mat # mat before repair. only needed for Nystroem approximated repair. mat <- repairConditionsDistanceMatrix(mat) } } } } } return(list(mat=mat,isCNSD=isCNSD,A=A,matNoRep=matNoRep)) } ################################################################################### #' Correction of a Kernel (Correlation) Matrix #' #' Convert a non-PSD kernel matrix with chosen approach so that it becomes a PSD matrix. #' Optionally, the resulting matrix is enforced to have values between -1 and 1 and a diagonal of 1s, with the \code{repair} parameter. #' That means, it is (optionally) converted to a valid correlation matrix. #' Essentially, this is a combination of \code{\link{correctionDefinite}} with \code{\link{repairConditionsCorrelationMatrix}}. #' #' @param mat symmetric kernel matrix #' @param method string that specifies method for correction: spectrum clip \code{"clip"}, spectrum flip \code{"flip"}, nearest definite matrix \code{"near"}, spectrum square\code{"square"}, spectrum diffusion \code{"diffusion"}. #' @param repair boolean, whether or not to use condition repair, so that elements between -1 and 1, and the diagonal values are 1. #' @param tol torelance value. Eigenvalues between \code{-tol} and \code{tol} are assumed to be zero. #' #' @return list with corrected kernel matrix \code{mat}, \code{isPSD} (boolean, whether original matrix was PSD), transformation matrix \code{A}, #' the matrix of eigenvectors (\code{U}) and the transformation vector (\code{a}) #' #' @seealso \code{\link{correctionDefinite}}, \code{\link{repairConditionsCorrelationMatrix}} #' @references Martin Zaefferer and Thomas Bartz-Beielstein. (2016). Efficient Global Optimization with Indefinite Kernels. Parallel Problem Solving from Nature-PPSN XIV. Accepted, in press. Springer. #' @export #' @examples #' x <- list(c(2,1,4,3),c(2,4,3,1),c(4,2,1,3),c(4,3,2,1),c(1,4,3,2)) #' D <- distanceMatrix(x,distancePermutationInsert) #' K <- exp(-0.01*D) #' is.PSD(K) #matrix should not be PSD #' K <- correctionKernelMatrix(K)$mat #' is.PSD(K) #matrix should now be CNSD #' K ################################################################################### correctionKernelMatrix <- function(mat,method="flip",repair=TRUE,tol=1e-8){ isPSD <- NA A <- diag(nrow(mat)) a <- NA U <- NA matNoRep <- NA if(any(method==c("clip","flip","near","square","diffusion"))){ isPSD <- is.PSD(mat,tol=tol) # check if definite if(!isPSD){# mat is not PSD, needs correction ret <- correctionDefinite(mat,type="PSD",method=method,tol=tol) #resulting, transformed matrix mat <- ret$mat a <- ret$a U <- ret$U isPSD <- ret$isDefinite #transformation matrix for new data (predict): A <- ret$U %*% diag(ret$a) %*% t(ret$U) if(repair){ # fix diagonal and range of values if(repair>1) matNoRep <- mat # mat before repair. only needed for Nystroem approximated repair. mat <- repairConditionsCorrelationMatrix(mat) } } } return(list(mat=mat,matNoRep=matNoRep,isPSD=isPSD,A=A,a=a,U=U)) } ################################################################################### #' Repair Conditions of a Distance Matrix #' #' This function repairs distance matrices, so that the following two properties are ensured: #' The distance values should be non-zero and the diagonal should be zero. #' Other properties (conditionally negative semi-definitene (CNSD), symmetric) are #' assumed to be given. #' #' @param mat symmetric, CNSD distance matrix. If your matrix is not CNSD, use \code{\link{correctionCNSD}} first. Or use \code{\link{correctionDistanceMatrix}}. #' #' @return repaired distance matrix #' #' @seealso \code{\link{correctionDefinite}}, \code{\link{correctionDistanceMatrix}}, \code{\link{correctionKernelMatrix}}, \code{\link{correctionCNSD}}, \code{\link{repairConditionsCorrelationMatrix}} #' @references Martin Zaefferer and Thomas Bartz-Beielstein. (2016). Efficient Global Optimization with Indefinite Kernels. Parallel Problem Solving from Nature-PPSN XIV. Accepted, in press. Springer. #' @export #' @examples #' x <- list(c(2,1,4,3),c(2,4,3,1),c(4,2,1,3),c(4,3,2,1),c(1,4,3,2)) #' D <- distanceMatrix(x,distancePermutationInsert) #' D <- correctionCNSD(D) #' D #' D <- repairConditionsDistanceMatrix(D) #' D ################################################################################### repairConditionsDistanceMatrix <- function(mat){ n <- nrow(mat) eps <- sqrt(.Machine$double.eps) if(sum(abs(diag(mat)))>eps | (min(mat) < -eps)){ #if diagonal values are non zero, or if negative distance mat <- -mat #make cpsd kernel. (mat HAS TO be cnsd) Kaa <- matrix(diag(mat),n,n) mat <- Kaa + t(Kaa) - 2*mat #convert to valid distance (proven, because (c)psd.) } mat } ################################################################################### #' Repair Conditions of a Correlation Matrix #' #' This function repairs correlation matrices, so that the following two properties are ensured: #' The correlations values should be between -1 and 1, and the diagonal values should be one. #' #' @param mat symmetric, PSD distance matrix. If your matrix is not CNSD, use \code{\link{correctionDefinite}} first. Or use \code{\link{correctionKernelMatrix}}. #' #' @return repaired correlation matrix #' #' @seealso \code{\link{correctionDefinite}}, \code{\link{correctionDistanceMatrix}}, \code{\link{correctionKernelMatrix}}, \code{\link{correctionCNSD}}, \code{\link{repairConditionsDistanceMatrix}} #' @references Martin Zaefferer and Thomas Bartz-Beielstein. (2016). Efficient Global Optimization with Indefinite Kernels. Parallel Problem Solving from Nature-PPSN XIV. Accepted, in press. Springer. #' @export #' @examples #' x <- list(c(2,1,4,3),c(2,4,3,1),c(4,2,1,3),c(4,3,2,1),c(1,4,3,2)) #' D <- distanceMatrix(x,distancePermutationInsert) #' K <- exp(-0.01*D) #' K <- correctionDefinite(K,type="PSD")$mat #' K #' K <- repairConditionsCorrelationMatrix(K) ################################################################################### repairConditionsCorrelationMatrix <- function(mat){ #dg: diag 1 or diag 0 s <- diag(1/sqrt(diag(mat))) mat <- s %*% mat %*% s } ################################################################################### #' Augmented Distance Correction #' #' Correct new (test) distances, via correcting the augmented distance matrix. Internal use only. #' #' @param d new distance vector #' @param object a modelKriging fit #' @param x new samples (belonging to distances d) #' #' @return vector of augmented, corrected distances #' #' @references Martin Zaefferer and Thomas Bartz-Beielstein. (2016). Efficient Global Optimization with Indefinite Kernels. Parallel Problem Solving from Nature-PPSN XIV. Accepted, in press. Springer. #' @keywords internal ################################################################################### correctionAugmentedDistanceVector <- function(d,object,x){ if(is.vector(d)) d <- matrix(d,1) D <- object$origD if(is.list(object$distanceFunction)){ #in case of multiple distance matrices dself <- list() if(object$useDistanceParameters){ indices <- rep(1:length(object$distanceFunction),sapply(object$distanceParametersLower,length)) #indices assigning each parameter to a distances function } for(i in 1:length(object$distanceFunction)){ if(!object$useDistanceParameters){ dself[[i]] <- distanceMatrix(x,object$distanceFunction[[i]]) }else{ dself[[i]] <- distanceMatrix(x,object$distanceFunction[[i]],object$distanceParameters[indices==i]) } if(object$scaling){ dself[[i]] <- dself[[i]]/object$maximumDistance[[i]] } } dself <- Reduce("+",mapply("*",dself,object$distanceWeights,SIMPLIFY=FALSE)) #weight each matrix by corresponding theta value, and compute sum of the matrices }else{ if(!object$useDistanceParameters) dself <- distanceMatrix(x,object$distanceFunction) else dself <- distanceMatrix(x,object$distanceFunction,object$distanceParameters) if(object$scaling){ dself <- dself/object$maximumDistance } } daug <- cbind(d,dself) Daug <- rbind(D,d) Daug <- cbind(Daug,t(daug)) ## Fix Definiteness (NSDness, CNSDness) of the provided distance matrix Daugtransformed <- correctionDistanceMatrix(Daug,object$indefiniteType,object$indefiniteMethod,object$indefiniteRepair)$mat ## extract only the new values dnewtransformed <- Daugtransformed[(nrow(D)+1):nrow(Daugtransformed),1:ncol(D),drop=FALSE] ## return dnewtransformed } ################################################################################### #' Augmented Kernel Correction #' #' Correct new (test) kernel values, via correcting the augmented kernel matrix. Internal use only. #' #' @param k new kernel value vector #' @param object a modelKriging fit #' @param x new samples (belonging to kernel values k) #' #' @return vector of augmented, corrected kernel values #' #' @references Martin Zaefferer and Thomas Bartz-Beielstein. (2016). Efficient Global Optimization with Indefinite Kernels. Parallel Problem Solving from Nature-PPSN XIV. Accepted, in press. Springer. #' @keywords internal ################################################################################### correctionAugmentedKernelVector <- function(k,object,x){ #todo: tolerances! here and above if(is.vector(k)) k <- matrix(k,1) K <- object$origPsi if(is.list(object$distanceFunction)){ #in case of multiple distance matrices dself <- list() if(object$useDistanceParameters){ indices <- rep(1:length(object$distanceFunction),sapply(object$distanceParametersLower,length)) #indices assigning each parameter to a distances function } for(i in 1:length(object$distanceFunction)){ if(!object$useDistanceParameters){ dself[[i]] <- distanceMatrix(x,object$distanceFunction[[i]]) }else{ dself[[i]] <- distanceMatrix(x,object$distanceFunction[[i]],object$distanceParameters[indices==i]) } if(object$scaling){ dself[[i]] <- dself[[i]]/object$maximumDistance[[i]] } } dself <- Reduce("+",mapply("*",dself,object$distanceWeights,SIMPLIFY=FALSE)) #weight each matrix by corresponding theta value, and compute sum of the matrices }else{ if(!object$useDistanceParameters) dself <- distanceMatrix(x,object$distanceFunction) else dself <- distanceMatrix(x,object$distanceFunction,object$distanceParameters) if(object$scaling){ dself <- dself/object$maximumDistance } } if(is.null(object$theta)) #corr function has no parameters kself <- object$corr(dself) else kself <- object$corr(dself,object$theta) kaug <- cbind(k,kself) Kaug <- rbind(K,k) Kaug <- cbind(Kaug,t(kaug)) ## Fix Definiteness (PNSDness) of the provided kernel matrix #Kaugtransformed <- correctionDefinite(Kaug,"PSD",object$indefiniteMethod,object$a)$mat #Kaugtransformed <- repairConditionsCorrelationMatrix(Kaugtransformed) Kaugtransformed <- correctionKernelMatrix(Kaug,object$indefiniteMethod,object$indefiniteRepair)$mat ## The following would only be needed if the whole matrix is of interest #Kaugtransformed <- Kaugtransformed + diag(object$lambda,nrow(Kaugtransformed)) ## extract only the new values knewtransformed <- Kaugtransformed[(nrow(K)+1):nrow(Kaugtransformed),1:ncol(K),drop=FALSE] #NOTE: lambda is not added to diagonal of Kaugtransformed, because this affects only the diagonal # which is not part of the returned vector ## return knewtransformed }
/scratch/gouwar.j/cran-all/cranData/CEGO/R/indefiniteLearning.R
################################################################################### #' Check for Conditional Negative Semi-Definiteness #' #' This function checks whether a symmetric matrix is Conditionally Negative Semi-Definite (CNSD). #' Note that this function does not check whether the matrix is actually symmetric. #' #' @param X a symmetric matrix #' @param method a string, specifiying the method to be used. \code{"alg1"} is based on algorithm 1 in Ikramov and Savel'eva (2000). #' \code{"alg2"} is based on theorem 3.2 in Ikramov and Savel'eva (2000). \code{"eucl"} is based on Glunt (1990). #' @param tol torelance value. Eigenvalues between \code{-tol} and \code{tol} are assumed to be zero. #' #' Symmetric, CNSD matrices are, e.g., euclidean distance matrices, whiche are required to produce Positive Semi-Definite correlation #' or kernel matrices. Such matrices are used in models like Kriging or Support Vector Machines. #' #' @return boolean, which is TRUE if X is CNSD #' #' @seealso \code{\link{is.NSD}}, \code{\link{is.PSD}} #' @references Ikramov, K. and Savel'eva, N. Conditionally definite matrices, Journal of Mathematical Sciences, Kluwer Academic Publishers-Plenum Publishers, 2000, 98, 1-50 #' @references Glunt, W.; Hayden, T. L.; Hong, S. and Wells, J. An alternating projection algorithm for computing the nearest Euclidean distance matrix, SIAM Journal on Matrix Analysis and Applications, SIAM, 1990, 11, 589-600 #' @examples #' # The following permutations will produce #' # a non-CNSD distance matrix with Insert distance #' # and a CNSD distance matrix with Hamming distance #' x <- list(c(2,1,4,3),c(2,4,3,1),c(4,2,1,3),c(4,3,2,1),c(1,4,3,2)) #' D <- distanceMatrix(x,distancePermutationInsert) #' is.CNSD(D,"alg1") #' is.CNSD(D,"alg2") #' is.CNSD(D,"eucl") #' D <- distanceMatrix(x,distancePermutationHamming) #' is.CNSD(D,"alg1") #' is.CNSD(D,"alg2") #' is.CNSD(D,"eucl") #' @export ################################################################################### is.CNSD <- function(X,method="alg1",tol=1e-8){ n <- nrow(X) if(method=="alg1"){ #algorithm 1 (ikramov2000) P <- (diag(n)-matrix(1,n,n)/n) P[n,] <- c(rep(0,n-1),1) Xhat <- P %*% X %*% t(P) eigs <- eigen(Xhat[-n,-n],T,T)$values cnsd <- !eigs[1]>tol }else if(method=="eucl"){ #Glunt1990 P <- (diag(n)-matrix(1,n,n)/n) Xhat <- P %*% X %*% P eigs <- eigen(Xhat,T,T)$values cnsd <- !eigs[1]>tol }else if(method=="alg2"){ #algorithm 2, theorem 3.2 (ikramov2000) eigs <- eigen(rbind(cbind(X,rep(1,n)),c(rep(1,n),0)),T,T)$values cnsd <- !(eigs[1]*eigs[2])>tol } cnsd } ################################################################################### #' Check for Positive Semi-Definiteness #' #' This function checks whether a symmetric matrix is Positive Semi-Definite (PSD). #' That means, it is determined whether all eigenvalues of the matrix are non-negative. #' Note that this function does not check whether the matrix is actually symmetric. #' #' @param X a symmetric matrix #' @param tol torelance value. Eigenvalues between \code{-tol} and \code{tol} are assumed to be zero. #' #' Symmetric, PSD matrices are, e.g., correlation #' or kernel matrices. Such matrices are used in models like Kriging or Support Vector regression. #' #' @return boolean, which is TRUE if X is PSD #' #' @seealso \code{\link{is.CNSD}}, \code{\link{is.NSD}} #' @examples #' # The following permutations will produce #' # a non-PSD kernel matrix with Insert distance #' # and a PSD distance matrix with Hamming distance #' # (for the given theta value of 0.01) #' x <- list(c(2,1,4,3),c(2,4,3,1),c(4,2,1,3),c(4,3,2,1),c(1,4,3,2)) #' K <- exp(-0.01*distanceMatrix(x,distancePermutationInsert)) #' is.PSD(K) #' K <- exp(-0.01*distanceMatrix(x,distancePermutationHamming)) #' is.PSD(K) #' @export ################################################################################### is.PSD <- function(X,tol=1e-8){ eigen(X,T,T)$values[nrow(X)] >= -tol } ################################################################################### #' Check for Negative Semi-Definiteness #' #' This function checks whether a symmetric matrix is Negative Semi-Definite (NSD). #' That means, it is determined whether all eigenvalues of the matrix are non-positive. #' Note that this function does not check whether the matrix is actually symmetric. #' #' @param X a symmetric matrix #' @param tol torelance value. Eigenvalues between \code{-tol} and \code{tol} are assumed to be zero. #' #' Symmetric, NSD matrices are, e.g., correlation #' or kernel matrices. Such matrices are used in models like Kriging or Support Vector regression. #' #' @return boolean, which is TRUE if X is NSD #' #' @seealso \code{\link{is.CNSD}}, \code{\link{is.PSD}} #' @examples #' # The following permutations will produce #' # a non-PSD kernel matrix with Insert distance #' # and a PSD distance matrix with Hamming distance #' # (for the given theta value of 0.01)- #' # The respective negative should be (non-) NSD #' x <- list(c(2,1,4,3),c(2,4,3,1),c(4,2,1,3),c(4,3,2,1),c(1,4,3,2)) #' K <- exp(-0.01*distanceMatrix(x,distancePermutationInsert)) #' is.NSD(-K) #' K <- exp(-0.01*distanceMatrix(x,distancePermutationHamming)) #' is.NSD(-K) #' @export ################################################################################### is.NSD <- function(X,tol=1e-8){ eigen(X,T,T)$values[1] <= tol }
/scratch/gouwar.j/cran-all/cranData/CEGO/R/isDefinite.R
################################################################################### #' Calculate Kernel Matrix #' #' Calculate the similarities between all samples in a list, and return as matrix. #' #' @param X list of samples, where each list element is a suitable input for \code{kernFun} #' @param kernFun Kernel function of type f(x,y)=r, where r is a scalar and x and y are elements whose similarity is evaluated. #' @param ... further arguments passed to distFun #' #' @return The similarity / kernel matrix #' #' @examples #' x <- list(5:1,c(2,4,5,1,3),c(5,4,3,1,2), sample(5)) #' kernFun <- function(x,y){ #' exp(-distancePermutationHamming(x,y)) #' } #' kernelMatrix(x,distancePermutationHamming) #' #' @export ################################################################################### kernelMatrix <-function(X,kernFun,...){ n <- length(X) m <- matrix(0,nrow=n, ncol=n) for(i in seq_len(n - 1)) m[seq(i+1, n),i] <- m[i,seq(i+1, n)] <- distanceVector(X[[i]],X[seq(i+1, n)],kernFun,...) #todo: distancevector? m }#todo unfinished work
/scratch/gouwar.j/cran-all/cranData/CEGO/R/kernelCalculation.R
############################################################## #' Negative Logarithm of Expected Improvement #' #' This function calculates the Expected Improvement" of candidate solutions, #' based on predicted means, standard deviations (uncertainty) and #' the best known objective function value so far. #' #' @param mean predicted mean values #' @param sd predicted standard deviation #' @param min minimum of all observations so far #' #' @return Returns the negative logarithm of the Expected Improvement. #' @export ############################################################## infillExpectedImprovement <- function(mean,sd,min){ im <- min - mean imsd <- im/sd termOne <- im*pnorm(imsd) termTwo <- sd*dnorm(imsd) # == sd/sqrt(2*pi)*exp(-0.5*(imsd)^2) ei <- -log10(termOne+termTwo+(.Machine$double.xmin)) ei[is.na(ei)] <- Inf ei } ################################################################################### #' Remove Duplicates #' #' Remove duplicates in \code{x}, replace with non-duplicated individuals according to \code{cf}. #' #' @param x List of individuals #' @param cf Creation function, creates random new individuals #' #' @return Returns \code{x} without duplicates #' #' @keywords internal #' @export ################################################################################### removeDuplicates <- function(x,cf){ while(any(duplicated(x))){ duplicates <- which(duplicated(x)) for(i in 1:length(duplicates)) x[[duplicates[i]]]=cf() } x } ################################################################################### #' Remove Duplicates from Offsprings #' #' Remove duplicates in \code{c(xhist,off)}, replace with non-duplicated individuals according to \code{cf}. #' #' @param xhist List of previous individuals #' @param off List of offspring individuals #' @param cf Creation function, creates random new individuals #' @param df Dupliate Function. This function determines which elements in a list/population are duplicates. By default, this is the duplicated function from R-base. #' #' @return Returns \code{off} without duplicates #' #' @keywords internal #' @export ################################################################################### removeDuplicatesOffspring <- function(xhist,off,cf,df=duplicated){ x <- c(xhist,off) while(any(dup <- df(x))){ duplicates <- which(dup) x[duplicates] <- replicate(length(duplicates),cf(),simplify=F) } x[(length(xhist)+1):length(x)] } ################################################################################### #' Tournament Selection #' #' Simple Tournament Selection implementation. #' #' @param fitness Fitness values of individuals #' @param tournamentSize Tournament Size #' @param tournamentProbability Tournament Probability #' @param selectFromN Number of tournament winners #' #' @return index of tournament winners #' #' @seealso \code{\link{modelKriging}} #' #' @keywords internal #' @export ################################################################################### tournamentSelection <- function(fitness, tournamentSize, tournamentProbability, selectFromN){ N <- length(fitness) tournamentSize <- min(tournamentSize, N) #can not select more than in population for each tournament. tmp <- seq(0,tournamentSize-1) pvec <- tournamentProbability*(1-tournamentProbability)^tmp #probabilities for individual selection cump <- cumsum(pvec) #cumulative probability vector cump[tournamentSize] <- 1 #make sure that sum is one. tf <- function(){ individuals <- sample(N,tournamentSize,FALSE,NULL)#select TSIZE individuals for a tournament, randomly. rnd <- runif(1) i <- which(cump>rnd)[1] fitnessorderTmp <- order(fitness[individuals]) individuals[fitnessorderTmp[i]] } #unlist(lapply(integer(selectFromN),tf)) replicate(selectFromN,tf()) }
/scratch/gouwar.j/cran-all/cranData/CEGO/R/misc.R
################################################################################### #' Kriging Model #' #' Implementation of a distance-based Kriging model, e.g., for mixed or combinatorial input spaces. #' It is based on employing suitable distance measures for the samples in input space. #' #' The basic Kriging implementation is based on the work of Forrester et al. (2008). #' For adaptation of Kriging to mixed or combinatorial spaces, as well as #' choosing distance measures with Maximum Likelihood Estimation, see the other two references (Zaefferer et al., 2014). #' #' @param x list of samples in input space #' @param y column vector of observations for each sample #' @param distanceFunction a suitable distance function of type f(x1,x2), returning a scalar distance value, preferably between 0 and 1. #' Maximum distances larger 1 are no problem, but may yield scaling bias when different measures are compared. #' Should be non-negative and symmetric. It can also be a list of several distance functions. In this case, Maximum Likelihood Estimation (MLE) is used #' to determine the most suited distance measure. #' The distance function may have additional parameters. For that case, see distanceParametersLower/Upper in the controls. #' If distanceFunction is missing, it can also be provided in the control list. #' @param control (list), with the options for the model building procedure: #' \describe{ #' \item{\code{lower}}{ lower boundary for theta, default is \code{1e-6}} #' \item{\code{upper}}{ upper boundary for theta, default is \code{100}} #' \item{\code{corr}}{ function to be used for correlation modelling, default is \code{fcorrGauss}} #' \item{\code{algTheta}}{ algorithm used to find theta (as well as p and lambda), default is \code{\link{optimInterface}}.} #' \item{\code{algThetaControl}}{ list of controls passed to \code{algTheta}.} #' \item{\code{useLambda}}{ whether or not to use the regularization constant lambda (nugget effect). Default is \code{FALSE}.} #' \item{\code{lambdaLower}}{ lower boundary for lambda (log scale), default is \code{-6}} #' \item{\code{lambdaUpper}}{ upper boundary for lambda (log scale), default is \code{0}} #' \item{\code{distanceParametersLower}}{ lower boundary for parameters of the distance function, default is \code{NA} which means there are no distance function parameters. If several distance functions are supplied, this should be a list of lower boundary vectors for each function.} #' \item{\code{distanceParametersUpper}}{ upper boundary for parameters of the distance function, default is \code{NA} which means there are no distance function parameters. If several distance functions are supplied, this should be a list of upper boundary vectors for each function.} #' \item{\code{distances}}{ a distance matrix. If available, this matrix is used for model building, instead of calculating the distance matrix using the parameters \code{distanceFunction}. Default is \code{NULL}.} #' \item{\code{scaling}}{ If TRUE: Distances values are divided by maximum distance to avoid scale bias.} #' \item{\code{reinterpolate}}{ If TRUE: reinterpolation is used to generate better uncertainty estimates in the presence of noise. } #' \item{\code{combineDistances}}{ By default, several distance functions or matrices are subject to a likelihood based decision, choosing one. If this parameter is TRUE, they are instead combined by determining a weighted sum. The weighting parameters are determined by MLE.} #' \item{\code{userParameters}}{ By default: (\code{NULL}). Else, this vector is used instead of MLE to specify the model parameters, in the following order: kernel parameters, distance weights, lambda, distance parameters.} #' \item{\code{indefiniteMethod}}{ The specific method used for correction: spectrum \code{"clip"}, spectrum \code{"flip"}, spectrum \code{"square"}, spectrum \code{"diffusion"}, feature embedding "feature", nearest definite matrix "near". Default is no correction: \code{"none"}. See Zaefferer and Bartz-Beielstein (2016).} #' \item{\code{indefiniteType}}{ The general type of correction for indefiniteness: \code{"NSD"},\code{"CNSD"} or the default \code{"PSD"}. See Zaefferer and Bartz-Beielstein (2016). Note, that feature embedding may not work in case of multiple distance functions.} #' \item{\code{indefiniteRepair}}{ boolean, whether conditions of the distance matrix (in case of \code{"NSD"},\code{"CNSD"} correction type) or correlation matrix (in case of \code{"PSD"} correction type) are repaired.} ######## \item{\code{conditionalSimulation}}{ boolean, whether a later performed simulation of the fitted model should be conditional on the training data.} #' } #' #' @return an object of class \code{modelKriging} containing the options (see control parameter) and determined parameters for the model: #' \describe{ #' \item{\code{theta}}{ parameters of the kernel / correlation function determined with MLE.} #' \item{\code{lambda}}{ regularization constant (nugget) lambda} #' \item{\code{yMu}}{ vector of observations y, minus MLE of mu} #' \item{\code{SSQ}}{ Maximum Likelihood Estimate (MLE) of model parameter sigma^2} #' \item{\code{mu}}{ MLE of model parameter mu} #' \item{\code{Psi}}{ correlation matrix Psi} #' \item{\code{Psinv}}{ inverse of Psi} #' \item{\code{nevals}}{ number of Likelihood evaluations during MLE of theta/lambda/p} #' \item{\code{distanceFunctionIndexMLE}}{ If a list of several distance measures (\code{distanceFunction}) was given, this parameter contains the index value of the measure chosen with MLE.} #' } #' #' @seealso \code{\link{predict.modelKriging}} #' #' @references Forrester, Alexander I.J.; Sobester, Andras; Keane, Andy J. (2008). Engineering Design via Surrogate Modelling - A Practical Guide. John Wiley & Sons. #' @references Zaefferer, Martin; Stork, Joerg; Friese, Martina; Fischbach, Andreas; Naujoks, Boris; Bartz-Beielstein, Thomas. (2014). Efficient global optimization for combinatorial problems. In Proceedings of the 2014 conference on Genetic and evolutionary computation (GECCO '14). ACM, New York, NY, USA, 871-878. DOI=10.1145/2576768.2598282 #' @references Zaefferer, Martin; Stork, Joerg; Bartz-Beielstein, Thomas. (2014). Distance Measures for Permutations in Combinatorial Efficient Global Optimization. In Parallel Problem Solving from Nature - PPSN XIII (p. 373-383). Springer International Publishing. #' @references Zaefferer, Martin and Bartz-Beielstein, Thomas (2016). Efficient Global Optimization with Indefinite Kernels. Parallel Problem Solving from Nature-PPSN XIV. Accepted, in press. Springer. #' #' @examples #' # Set random number generator seed #' set.seed(1) #' # Simple test landscape #' fn <- landscapeGeneratorUNI(1:5,distancePermutationHamming) #' # Generate data for training and test #' x <- unique(replicate(40,sample(5),FALSE)) #' xtest <- x[-(1:15)] #' x <- x[1:15] #' # Determin true objective function values #' y <- fn(x) #' ytest <- fn(xtest) #' # Build model #' fit <- modelKriging(x,y,distancePermutationHamming, #' control=list(algThetaControl=list(method="L-BFGS-B"),useLambda=FALSE)) #' # Predicted obj. function values #' ypred <- predict(fit,xtest)$y #' # Uncertainty estimate #' fit$predAll <- TRUE #' spred <- predict(fit,xtest)$s #' # Plot #' plot(ytest,ypred,xlab="true value",ylab="predicted value", #' pch=20,xlim=c(0.3,1),ylim=c(min(ypred)-0.1,max(ypred)+0.1)) #' segments(ytest, ypred-spred,ytest, ypred+spred) #' epsilon = 0.02 #' segments(ytest-epsilon,ypred-spred,ytest+epsilon,ypred-spred) #' segments(ytest-epsilon,ypred+spred,ytest+epsilon,ypred+spred) #' abline(0,1,lty=2) #' # Use a different/custom optimizer (here: SANN) for maximum likelihood estimation: #' # (Note: Bound constraints are recommended, to avoid Inf values. #' # This is really just a demonstration. SANN does not respect bound constraints.) #' optimizer1 <- function(x,fun,lower=NULL,upper=NULL,control=NULL,...){ #' res <- optim(x,fun,method="SANN",control=list(maxit=100),...) #' list(xbest=res$par,ybest=res$value,count=res$counts) #' } #' fit <- modelKriging(x,y,distancePermutationHamming, #' control=list(algTheta=optimizer1,useLambda=FALSE)) #' #One-dimensional optimizer (Brent). Note, that Brent will not work when #' #several parameters have to be set, e.g., when using nugget effect (lambda). #' #However, Brent may be quite efficient otherwise. #' optimizer2 <- function(x,fun,lower,upper,control=NULL,...){ #' res <- optim(x,fun,method="Brent",lower=lower,upper=upper,...) #' list(xbest=res$par,ybest=res$value,count=res$counts) #' } #' fit <- modelKriging(x,y,distancePermutationHamming, #' control=list(algTheta=optimizer2,useLambda=FALSE)) #' @export ################################################################################### modelKriging <- function(x, y, distanceFunction,control=list()){ #TODO: use of tcrossprod or crossprod for speedup?? con<-list(lower=-6, upper=5, corr=fcorrGauss, algTheta= optimInterface, algThetaControl= list(funEvals=200,reltol=1e-4,factr=1e12,restarts=TRUE),#TODO: change reltol and factr defaults? combineDistances=FALSE, distanceParametersLower= NA, distanceParametersUpper= NA, useLambda=FALSE, lambdaLower = -6, lambdaUpper = 0, #conditionalSimulation=FALSE, simulationReturnAll = FALSE, lambdaUpper = 0, indefiniteMethod= "none", indefiniteType="PSD", indefiniteRepair=TRUE, scaling=FALSE,reinterpolate=FALSE) #todo always scale, remove scaling variable? con$algThetaControl[names(control$algThetaControl)] <- control$algThetaControl control$algThetaControl <- con$algThetaControl con[names(control)] <- control control<-con # if(missing(distanceFunction)) distanceFunction <- control$distanceFunction if(is.null(distanceFunction)) stop("No distanceFunction passed to modelKriging.") if(length(distanceFunction)==1) control$combineDistances <- FALSE #check whether distance function has parameters useDistanceParameters=FALSE if(!any(is.na(control$distanceParametersLower))&!any(is.na(control$distanceParametersUpper))) useDistanceParameters=TRUE algThetaControl <- control$algThetaControl useLambda <- control$useLambda lambdaLower <- control$lambdaLower lambdaUpper <- control$lambdaUpper distanceParametersLower <- control$distanceParametersLower distanceParametersUpper <- control$distanceParametersUpper combineDistances <- control$combineDistances indefiniteMethod <- control$indefiniteMethod indefiniteType <- control$indefiniteType indefiniteRepair <- control$indefiniteRepair scaling <- control$scaling fcorr <- control$corr fit <- control fit$useDistanceParameters <- useDistanceParameters if(!is.matrix(y)) #TODO why a matrix... y <- as.matrix(y) if(any(duplicated(x)) & !control$useLambda){ #duplicates HAVE to be removed, but duplicates for noisy problems are okay. duplicates <- which(duplicated(x)) x <- x[-duplicates] y <- as.matrix(y[-duplicates]) } fit$x <- x fit$y <- y n <- length(fit$x) #number of observations nd <- length(distanceFunction) # number of distance functions ntheta <- length(fit$lower) #number of theta parameters if(any(is.na(fit$lower))) #no lower bound means no theta parameter. ntheta=0 #calculate distance matrix if(!useDistanceParameters){ #no distance parameters, can compute distance now. else: optimize and compute during MLE. ret <- modelKrigingDistanceCalculation(x,distanceFunction=distanceFunction,parameters=NA, distances=control$distances,scaling=scaling,combineDistances=combineDistances,indefiniteMethod=indefiniteMethod, indefiniteType=indefiniteType,indefiniteRepair=indefiniteRepair,lower=distanceParametersLower) fit[names(ret)] <- ret D <- fit$D fit$D <- NULL } if(is.null(control$userParameters)){ # start point for theta and other model parameters + bounds: res <- modelKrigingInit(fit$startTheta,fit$lower,fit$upper, useLambda,lambdaLower,lambdaUpper, combineDistances,nd,useDistanceParameters, distanceParametersLower,distanceParametersUpper) x0 <- res$x0 lower <- res$lower upper <- res$upper # adapt tuning (MLE) budget to dimensionality of parameter space algThetaControl$funEvals <- algThetaControl$funEvals*length(x0) if(combineDistances | nd==1){ if(!useDistanceParameters){ # if distance function has no parameters (or default parameters are used:) res <- control$algTheta(x=x0,fun=modelKrigingLikelihood,lower=lower,upper=upper, control=algThetaControl,D=D,y=fit$y,useLambda=useLambda,corr=fcorr, indefiniteMethod=indefiniteMethod,indefiniteType=indefiniteType,indefiniteRepair=indefiniteRepair,returnLikelihoodOnly=TRUE,inverter="chol",ntheta=ntheta) fit$distanceFunction <- distanceFunction }else{ # parameters of the distance function optimized during MLE res <- control$algTheta(x=x0,fun=modelKrigingParameterizedLikelihood,lower=lower,upper=upper, control=algThetaControl,xs=fit$x,ys=fit$y,useLambda=useLambda,corr=fcorr, indefiniteMethod=indefiniteMethod,indefiniteType=indefiniteType,indefiniteRepair=indefiniteRepair,returnLikelihoodOnly=TRUE,inverter="chol", distanceFunction=distanceFunction,combineDistances=combineDistances,distanceParametersLower=distanceParametersLower,ntheta=ntheta,scaling=scaling) fit$distanceFunction <- distanceFunction #todo? } nevals <- as.numeric(res$count[[1]]) }else{ res <- list() minlik=Inf minlikindex=1 nevals <- 0 for(i in 1:length(distanceFunction)){ if(!useDistanceParameters){ # if distance function has no parameters (or default parameters are used:) res[[i]] <- control$algTheta(x=x0,fun=modelKrigingLikelihood,lower=lower,upper=upper, control=algThetaControl,D=D[[i]],y=fit$y,useLambda=useLambda,corr=fcorr, indefiniteMethod=indefiniteMethod,indefiniteType=indefiniteType,indefiniteRepair=indefiniteRepair,returnLikelihoodOnly=TRUE,inverter="chol",ntheta=ntheta) }else{ # parameters of the distance function optimized during MLE res[[i]] <- control$algTheta(x=x0,fun=modelKrigingParameterizedLikelihood,lower=lower,upper=upper, control=algThetaControl,xs=fit$x,ys=fit$y,useLambda=useLambda,corr=fcorr, indefiniteMethod=indefiniteMethod,indefiniteType=indefiniteType,indefiniteRepair=indefiniteRepair,returnLikelihoodOnly=TRUE,inverter="chol", distanceFunction=distanceFunction[[i]],combineDistances=combineDistances,distanceParametersLower=distanceParametersLower,ntheta=ntheta,scaling=scaling) } if(res[[i]]$ybest < minlik){ minlik <- res[[i]]$ybest minlikindex <- i } nevals <- nevals + as.numeric(res$count[[1]]) } res <- res[[minlikindex]] fit$distanceFunction <- distanceFunction[[minlikindex]] fit$maximumDistance <- fit$maximumDistance[[minlikindex]] D <- D[[minlikindex]] fit$origD <- fit$origD[[minlikindex]] fit$isCNSD <- fit$isCNSD[[minlikindex]] fit$A <- fit$A[[minlikindex]] fit$distanceFunctionIndex <- minlikindex nd <- 1 } if(is.null(res$xbest)){ res$xbest <- x0 } Params <- res$xbest }else{ Params <- control$userParameters nevals <- 0 fit$distanceFunction <- distanceFunction } # extract model parameters: # kernel parameters (theta) if(ntheta>0){ fit$theta <- Params[1:ntheta] } # weights for each distance matrix (combination) if(combineDistances & nd>1){ fit$distanceWeights <- 10^Params[ntheta+(1:nd)] nweights=nd }else{ nweights=0#number of weight parameters } # lambda if(useLambda){ fit$lambda <- 10^Params[ntheta+nweights+1] }else{ fit$lambda <- 0 } #distance function parameters if(useDistanceParameters){ fit$distanceParameters <- Params[(ntheta+nweights+useLambda+1):length(Params)] res <- modelKrigingParameterizedLikelihood(Params,fit$x,fit$y,useLambda,fcorr, indefiniteMethod,indefiniteType,indefiniteRepair, returnLikelihoodOnly=FALSE,inverter="chol", distanceFunction=fit$distanceFunction,combineDistances=combineDistances, distanceParametersLower=distanceParametersLower,ntheta=ntheta,scaling=scaling ) #need to also return the correlation matrix and other elements of the model }else{ res <- modelKrigingLikelihood(Params,D,fit$y,useLambda,fcorr, indefiniteMethod,indefiniteType,indefiniteRepair, returnLikelihoodOnly=FALSE,inverter="chol",ntheta=ntheta) #need to also return the correlation matrix and other elements of the model } if(is.na(res$Psinv[1])){ #model building failed. no invertible correlation matrix was found. return NA fit stop("Building the Kriging model failed, no invertible correlation matrix was found. This may be due to the specific data-set or distance function used.") } if(useDistanceParameters | nd>1){ fit$A <- res$A #D <- res$D if(!is.null(res$maximumDistance)) fit$maximumDistance <- res$maximumDistance fit$origD <- res$origD fit$isCNSD <- res$isCNSD } #thus, multiple distances are reduced to one: either combined, or one chosen. fit$isIndefinite <- res$isIndefinite fit$U <- res$U fit$a <- res$a fit$yMu <- res$yMu fit$SSQ <- as.numeric(res$SSQ) fit$mu <- res$mu fit$Psi <- res$Psi fit$origPsi <- res$origPsi fit$Psinv <- res$Psinv #fit$unrepairedPsi <- res$unrepairedPsi ##precompute transformations if(indefiniteType=="PSD" & !fit$indefiniteRepair & fit$isIndefinite & any(indefiniteMethod==c("clip","flip","square","diffusion"))){ #RETRANSFORMATION OF THE SOLUTION ONLY A <- res$U %*% diag(res$a) %*% t(res$U) fit$A <- A # fit$Psinv <- t(A) %*% fit$Psinv #retransform the result for prediction fit$PsinvA <- fit$Psinv %*% A #retransform the result (for variance estimation only) } if(indefiniteType=="PSD" & (fit$indefiniteRepair %in% c(2,3,4)) & fit$isIndefinite & any(indefiniteMethod==c("clip","flip","square","diffusion"))){ A <- res$U %*% diag(res$a) %*% t(res$U) fit$A <- A fit$diagUnrepairedPsi <- diag(res$unrepairedPsi) if(fit$indefiniteRepair==2){# for repair with nystroem only: unrepairedPsinv <- try(chol2inv(chol(res$unrepairedPsi)), TRUE) if(class(unrepairedPsinv)[1] == "try-error"){ unrepairedPsinv <- ginv(res$unrepairedPsi) } fit$unrepairedAPsinvA <- t(A) %*% unrepairedPsinv %*% A } fit$ADividedSqrtDiagPsi <- t(A) %*% diag(1/sqrt(diag(res$unrepairedPsi))) # divider for repair during prediction, including A } if(useLambda){ PsiB <- res$Psi-diag(fit$lambda,n)+diag(.Machine$double.eps,n) fit$SSQReint <- as.numeric((t(res$yMu)%*%res$Psinv%*%PsiB%*%res$Psinv%*%res$yMu)/n) #res is used intentionally, needs to be untransformed Psinv fit$PsinvReint <- try(chol2inv(chol(PsiB)), TRUE) if(class(fit$PsinvReint)[1] == "try-error"){ fit$PsinvReint <- ginv(PsiB) } #now apply same transformations as for non-reinterpolating matrices if(indefiniteType=="PSD" & fit$isIndefinite & !fit$indefiniteRepair & any(indefiniteMethod==c("clip","flip","square","diffusion"))){ #RETRANSFORMATION OF THE SOLUTION ONLY fit$PsinvReint <- t(A)%*%fit$PsinvReint %*% A #retransform } } # ## fit$nevals <- nevals fit$like <- res$NegLnLike fit$predAll <- FALSE #todo : should be option fit$D <- D class(fit)<- "modelKriging" return(fit) } ################################################################################### #' Gaussian Kernel for Kriging #' #' @param D distance matrix #' @param theta kernel parameter #' #' @return matrix (Psi) #' #' @seealso \code{\link{modelKriging}} #' #' @export #' @keywords internal ################################################################################### fcorrGauss <- function(D,theta=0){ theta <- 10^theta exp(-theta * D) } ################################################################################### #' Cubic Kernel for Kriging #' #' @param D distance matrix #' @param theta kernel parameter #' #' @return matrix (Psi) #' #' @seealso \code{\link{modelKriging}} #' #' @export #' @keywords internal ################################################################################### fcorrCubic <- function(D,theta=0){ theta <- 10^theta Psi <- pmin(D * theta,1) 1 - Psi^2 * (3 - 2*Psi) } ################################################################################### #' Linear Kernel for Kriging #' #' @param D distance matrix #' @param theta kernel parameter #' #' @return matrix (Psi) #' #' @seealso \code{\link{modelKriging}} #' #' @export #' @keywords internal ################################################################################### fcorrLinear <- function(D,theta=0){ theta <- 10^theta pmax(1- D * theta,0) } ################################################################################### #' Spherical Kernel for Kriging #' #' @param D distance matrix #' @param theta kernel parameter #' #' @return matrix (Psi) #' #' @seealso \code{\link{modelKriging}} #' #' @export #' @keywords internal ################################################################################### fcorrSphere <- function(D,theta=0){ theta <- 10^theta Psi <- pmin(D * theta,1) 1 - Psi * (1.5 - 0.5*Psi^2) } ################################################################################### #' Kriging: Initial guess and bounds #' #' Initialize parameter tuning for the Kriging model, setting the initial guess #' as well as bound constraints. #' #' @param startTheta user provided start guess (optional). #' @param lowerTheta lower boundary for theta values (log scale), the kernel parameters. #' @param upperTheta upper boundary for theta values (log scale), the kernel parameters. #' @param useLambda boolean, whether nugget effect (lambda) is used. #' @param lambdaLower lower boundary for lambda (log scale). #' @param lambdaUpper upper boundary for lambda (log scale). #' @param combineDistances boolean, whether multiple distances are combined. #' @param nd number of distance function. #' @param distanceParameters whether the distance function parameters should be optimized #' @param distanceParametersLower lower boundary for parameters of the distance function, default is \code{NA} which means there are no distance function parameters. If several distance functions are supplied, this should be a list of lower boundary vectors for each function. #' @param distanceParametersUpper upper boundary for parameters of the distance function, default is \code{NA} which means there are no distance function parameters. If several distance functions are supplied, this should be a list of upper boundary vectors for each function. #' #' @return a list with elements \code{x0} (start guess), \code{lower} (lower bound), \code{upper} (upper bound). #' #' @seealso \code{\link{modelKriging}} #' #' @keywords internal ################################################################################### modelKrigingInit <- function(startTheta=NULL,lowerTheta=NULL,upperTheta=NULL,useLambda, lambdaLower, lambdaUpper, combineDistances,nd,distanceParameters=F,distanceParametersLower=NA,distanceParametersUpper=NA){ #ordering of the parameters: #first, the kernel function parameters. number: - length(lowerTheta) #second, the weights for combining several distances (optional), number: - 0|nd #fourth, lambda, regression constant, number: - 0|1 #fifth, distance parameters, number: - 0|length(distanceParametersLower) if(any(is.na(lowerTheta))){ #NA bounds -> no parameter in the correlation function (at least none to be estimated) lowerTheta <- NULL upperTheta <- NULL } if(combineDistances){ lowerTheta <- c(lowerTheta,rep(-8,nd)) upperTheta <- c(upperTheta,rep(6,nd)) } if(useLambda){ #append regression constant lambda (nugget) lowerTheta <- c(lowerTheta,lambdaLower) upperTheta <- c(upperTheta,lambdaUpper) } #parameters of the distance function if(distanceParameters){ if(is.list(distanceParametersLower)){ distanceParametersLower <- unlist(distanceParametersLower) distanceParametersUpper <- unlist(distanceParametersUpper) } lowerTheta <- c(lowerTheta,distanceParametersLower) upperTheta <- c(upperTheta,distanceParametersUpper) } #start value for theta if(is.null(startTheta)){ x0 <- lowerTheta + (upperTheta - lowerTheta)*0.5 }else{ #force x0 into bounds x0 <- pmin(x0,upperTheta) x0 <- pmax(x0,lowerTheta) } list(x0=x0,lower=lowerTheta,upper=upperTheta) } ################################################################################### #' Kriging: Distance Matrix Calculation #' #' Calculate and scale the distance matrix used in a Kriging model. #' Include definiteness correction. #' Not to be called directly. #' #' @param x list of samples in input space #' @param distanceFunction a suitable distance function of type f(x1,x2), returning a scalar distance value, preferably between 0 and 1. #' Maximum distances larger 1 are no problem, but may yield scaling bias when different measures are compared. #' Should be non-negative and symmetric. It can also be a list of several distance functions. In this case, Maximum Likelihood Estimation (MLE) is used #' to determine the most suited distance measure. #' The distance function may have additional parameters. #' @param parameters parameters passed to the distance function as a vector. #' @param distances precomputed distances, set to NA if not available. #' @param scaling boolean, whether to scale the distance matrix. #' @param combineDistances boolean, whether to combine the distances of different functions. #' @param indefiniteMethod method for handling non-conditionally-definite matrices. #' @param indefiniteType type of handling for non-conditionally-definite matrices. #' @param indefiniteRepair whether to further repair other conditions (beside definiteness). #' @param lower lower boundary for distance function parameters. #' #' @return a list with elements \code{D} (distance matrix), \code{maxD} (maximal distance for scaling purpose). #' #' @seealso \code{\link{modelKriging}} #' #' @keywords internal ################################################################################### modelKrigingDistanceCalculation <- function(x,distanceFunction,parameters=NA, distances,scaling,combineDistances,indefiniteMethod,indefiniteType,indefiniteRepair,lower){ nd <- length(distanceFunction) # number of distance functions if(!is.null(distances)){ nodistances <- is.na(distances[1]) }else{ nodistances <- TRUE } #calculate distance matrix if(nd==1){ #one distance function if(nodistances){ if(any(is.na(parameters))) #no parameters given D <-distanceMatrix(x,distanceFunction) else #parameters are given D <-distanceMatrix(x,distanceFunction,parameters) }else{ D <- distances } maxD <- max(D) #maximum distance if(scaling){ D <- D/maxD } }else{ #multiple distance functions if(nodistances){ D <- list() maxD <- list() indices <- rep(1:nd,sapply(lower,length)) #indices assigning each parameter to a distances function for(i in 1:nd){ if(any(is.na(parameters))) #no parameters given D[[i]] <-distanceMatrix(x,distanceFunction[[i]]) else D[[i]] <-distanceMatrix(x,distanceFunction[[i]],parameters[indices==i]) maxD[[i]] <- max(D[[i]]) #maximum distance if(scaling){ D[[i]] <- D[[i]]/maxD[[i]] } } }else{ D <- distances maxD <- list() for(i in 1:nd){ maxD[[i]] <- max(D[[i]]) #maximum distance if(scaling){ D[[i]] <- D[[i]]/maxD[[i]] } } } } # Fix Definiteness (NSDness, CNSDness) of the provided distance matrix/matrices origD <- D A <- NA isCNSD <- NA matNoRep <- NA if(nd==1){#in case of one distance function ret <- correctionDistanceMatrix(D,indefiniteType,indefiniteMethod,indefiniteRepair) D <- ret$mat isCNSD <- ret$isCNSD A <- ret$A matNoRep <- ret$matNoRep }else if(!combineDistances){ #in case of multiple distances, which are not combined (but chosen from): isCNSD <- list() A <- list() matNoRep <- list() for(i in 1:nd){ ret <- correctionDistanceMatrix(D[[i]],indefiniteType,indefiniteMethod,indefiniteRepair) matNoRep[[i]] <- ret$matNoRep D[[i]] <- ret$mat isCNSD[[i]] <- ret$isCNSD A[[i]] <- ret$A } } list(maximumDistance=maxD,D=D,origD=origD,A=A,isCNSD=isCNSD,matNoRep=matNoRep) }
/scratch/gouwar.j/cran-all/cranData/CEGO/R/modelKriging.R
################################################################################### #' Calculate negative log-likelihood #' #' Used to determine theta/lambda/p values for the Kriging model in \code{\link{modelKriging}} #' with Maximum Likelihood Estimation (MLE). #' #' @param xt vector, containing parameters like theta, p and lambda. #' @param D matrix (or list of multiple matrices) of distances between training samples. In case of multiple distance matrices, theta (part of xt) has to be a vector, giving a weighting parameter for each matrix. #' @param y vector of observations at sample locations. #' @param useLambda whether to use nugget effect, i.e., lambda (FALSE at default). #' @param corr whether to use nugget effect, i.e., lambda (fcorrGauss at default). #' @param inverter string specifying method for inversion of correlation matrix ("chol", cholesky decomposition at default, any other string leads to using the solve function). #' @param indefiniteMethod The specific method used for correction: spectrum \code{"clip"}, spectrum \code{"flip"}, spectrum \code{"square"}, spectrum \code{"diffusion"}, feature embedding "feature", nearest definite matrix "near". Default is no correction: \code{"none"}. See Zaefferer and Bartz-Beielstein (2016). #' @param indefiniteType The general type of correction for indefiniteness: \code{"NSD"},\code{"CNSD"} or the default \code{"PSD"}. See Zaefferer and Bartz-Beielstein (2016). #' @param indefiniteRepair boolean, whether conditions of the distance matrix (in case of \code{"NSD"},\code{"CNSD"} correction type) or correlation matrix (in case of \code{"PSD"} correction type) are repaired. #' @param returnLikelihoodOnly boolean, whether the function should return only the likelihood, or a else a list (see return information below). #' @param inverter string, defining the inverter to use. default \code{"chol"} is inversion via \code{chol2inv}. A different string will lead to use of \code{solve}. #' @param ntheta number of kernel parameters. #' #' @return the numeric Likelihood value (if \code{returnLikelihoodOnly} is TRUE) or a list with elements: #' \describe{ #' \item{\code{NegLnLike}}{ concentrated log-likelihood *-1 for minimising } #' \item{\code{Psi}}{ correlation matrix} #' \item{\code{Psinv}}{ inverse of correlation matrix (to save computation time in forrRegPredictor)} #' \item{\code{mu}}{ MLE of model parameter mu } #' \item{\code{yMu}}{ vector of observations y minus mu} #' \item{\code{SSQ}}{ MLE of model parameter sigma^2} #' \item{\code{a}}{ transformation vector for eigenspectrum transformation, see Zaefferer and Bartz-Beielstein (2016)} #' \item{\code{U}}{ Matrix of eigenvectors for eigenspectrum transformation, see Zaefferer and Bartz-Beielstein (2016)} #' \item{\code{isIndefinite}}{ whether the uncorrected correlation (kernel) matrix is indefinite} #' } #' #' @references Forrester, Alexander I.J.; Sobester, Andras; Keane, Andy J. (2008). Engineering Design via Surrogate Modelling - A Practical Guide. John Wiley & Sons. #' @references Zaefferer, Martin; Stork, Joerg; Friese, Martina; Fischbach, Andreas; Naujoks, Boris; Bartz-Beielstein, Thomas. (2014). Efficient global optimization for combinatorial problems. In Proceedings of the 2014 conference on Genetic and evolutionary computation (GECCO '14). ACM, New York, NY, USA, 871-878. DOI=10.1145/2576768.2598282 #' @references Zaefferer, Martin; Stork, Joerg; Bartz-Beielstein, Thomas. (2014). Distance Measures for Permutations in Combinatorial Efficient Global Optimization. In Parallel Problem Solving from Nature - PPSN XIII (p. 373-383). Springer International Publishing. #' @references Martin Zaefferer and Thomas Bartz-Beielstein. (2016). Efficient Global Optimization with Indefinite Kernels. Parallel Problem Solving from Nature-PPSN XIV. Accepted, in press. Springer. #' #' @seealso \code{\link{modelKriging}} #' @keywords internal ################################################################################### modelKrigingLikelihood <- function(xt,D,y,useLambda=FALSE,corr=fcorrGauss, indefiniteMethod="none",indefiniteType="PSD",indefiniteRepair=FALSE, returnLikelihoodOnly=TRUE,inverter = "chol",ntheta=1){ n <- dim(y)[1] #number of observations defaultPenalty <- n * log(var(y)) + 1e4 isIndefinite <- NA if(is.list(D)){ #in case of multiple distance matrices distanceWeights <- 10^xt[ntheta+(1:length(D))] D <- Reduce("+",mapply("*",D,distanceWeights,SIMPLIFY=FALSE)) #combine matrice by corresponding weight value, and compute sum of the matrices origD <- D # Fix Definiteness (NSDness, CNSDness) of the provided distance matrix ret <- correctionDistanceMatrix(D,indefiniteType,indefiniteMethod,indefiniteRepair) D <- ret$mat isCNSD <- ret$isCNSD A <- ret$A } if(ntheta<1){ #corr function has no parameters Psi <- corr(D) }else{ theta <- xt[1:ntheta] Psi <- corr(D,theta) } if(any(is.infinite(Psi))){ # this is required especially if distance matrices are forced to be CNSD/NSD and hence have zero distances penalty <- defaultPenalty if(returnLikelihoodOnly){ return(penalty) } return(list(NegLnLike=penalty,origPsi=NA,Psi=NA,unrepairedPsi=NA,Psinv=NA,mu=NA,yMu=NA,SSQ=NA,a=NA,U=NA,isIndefinite=isIndefinite)) } U <- a <- NA origPsi <- NA unrepairedPsi <- NA if(indefiniteType == "PSD"){ origPsi <- Psi ret <- correctionKernelMatrix(Psi,indefiniteMethod,indefiniteRepair) a <- ret$a U <- ret$U isIndefinite <- !ret$isPSD Psi <- ret$mat unrepairedPsi <- ret$matNoRep #check whether indef-correction somehow yielded malformed values if(any(is.na(Psi))){ #warning("NaN or NA values due to failed indefiniteness-correction in (in modelKrigingLikelihood). Returning penalty.") penalty <- defaultPenalty if(returnLikelihoodOnly){ return(penalty) } return(list(NegLnLike=penalty,origPsi=NA,Psi=NA,unrepairedPsi=NA,Psinv=NA,mu=NA,yMu=NA,SSQ=NA,a=NA,U=NA,isIndefinite=isIndefinite)) } } if(useLambda){ lambda <- 10^xt[length(xt)]; Psi <- Psi + diag(lambda,n) } if(inverter=="chol"){ ## cholesky decomposition cholPsi <- try(chol(Psi), TRUE) ## give penalty if fail if(class(cholPsi)[1] == "try-error"){ #warning("Correlation matrix is not positive semi-definite (in modelKrigingLikelihood). Returning penalty.") penalty <- defaultPenalty - min(eigen(Psi,symmetric=TRUE,only.values=TRUE)$values) #the minimal eigenvalue should push the search towards positive eigenvalues if(returnLikelihoodOnly){ return(penalty) } return(list(NegLnLike=penalty,origPsi=NA,Psi=NA,unrepairedPsi=NA,Psinv=NA,mu=NA,yMu=NA,SSQ=NA,a=NA,U=NA,isIndefinite=isIndefinite)) } #calculate natural log of the determinant of Psi (numerically more reliable and also faster than using det or determinant) LnDetPsi <- 2*sum(log(abs(diag(cholPsi)))) #inverse with cholesky decomposed Psi Psinv <- try(chol2inv(cholPsi),TRUE) if(class(Psinv)[1] == "try-error"){ #warning("Correlation matrix is not positive semi-definite (in modelKrigingLikelihood). Returning penalty.") penalty <- defaultPenalty - min(eigen(Psi,symmetric=TRUE,only.values=TRUE)$values) #the minimal eigenvalue should push the search towards positive eigenvalues if(returnLikelihoodOnly){ return(penalty) } return(list(NegLnLike=penalty,origPsi=NA,Psi=NA,unrepairedPsi=NA,Psinv=NA,mu=NA,yMu=NA,SSQ=NA,a=NA,U=NA,isIndefinite=isIndefinite)) } }else{ Psinv <- try(solve(Psi),TRUE) #inverse with LU decomposition if(class(Psinv)[1] == "try-error"){ #warning("Correlation matrix is not positive semi-definite (in modelKrigingLikelihood). Returning penalty.") penalty <- defaultPenalty - min(eigen(Psi,symmetric=TRUE,only.values=TRUE)$values) #the minimal eigenvalue should push the search towards larger eigenvalues if(returnLikelihoodOnly){ return(penalty) } return(list(NegLnLike=penalty,origPsi=NA,Psi=NA,unrepairedPsi=NA,Psinv=NA,mu=NA,yMu=NA,SSQ=NA,a=NA,U=NA,isIndefinite=isIndefinite)) } #calculate natural log of the determinant of Psi LnDetPsi <- determinant(Psi,logarithm=TRUE)$modulus } ## Check whether Psi is ill-conditioned kap <- 1 / ( max(colSums(abs(Psi))) * max(colSums(abs(Psinv)))) # == rcond(Psi) if(is.na(kap)) kap <- 0 if(kap < 1e-10){ #warning("Correlation matrix is ill-conditioned (in modelKrigingLikelihood). Returning penalty.") penalty <- defaultPenalty if(returnLikelihoodOnly){ return(penalty) } return(list(NegLnLike=penalty,origPsi=NA,Psi=NA,unrepairedPsi=NA,Psinv=NA,mu=NA,yMu=NA,SSQ=NA,a=NA,U=NA,isIndefinite=isIndefinite)) } psisum <- sum(Psinv) #this sum of all matrix elements may sometimes become zero, which may be caused by inaccuracies. then, the following may help if(psisum==0){ psisum <- as.numeric(rep(1,n) %*% Psinv %*% rep(1,n)) if(psisum==0){ #if it is still zero, return penalty #warning("Sum of elements in inverse correlation matrix is zero (in modelKrigingLikelihood). Returning penalty.") penalty <- defaultPenalty if(returnLikelihoodOnly){ return(penalty) } return(list(NegLnLike=penalty,origPsi=NA,Psi=NA,unrepairedPsi=NA,Psinv=NA,mu=NA,yMu=NA,SSQ=NA,a=NA,U=NA,isIndefinite=isIndefinite)) } } mu <- sum(Psinv%*%y)/psisum if(is.infinite(mu)|is.na(mu)){ #warning("MLE estimate of mu is infinite or NaN. Returning penalty.") penalty <- defaultPenalty if(returnLikelihoodOnly){ return(penalty) } return(list(NegLnLike=penalty,origPsi=NA,Psi=NA,unrepairedPsi=NA,Psinv=NA,mu=NA,yMu=NA,SSQ=NA,a=NA,U=NA,isIndefinite=isIndefinite)) } yMu <- y-mu #SigmaSqr <- (t(yMu)%*%Psinv%*%yMu)/n SigmaSqr <- crossprod(yMu,Psinv)%*%yMu/n if(SigmaSqr < 0){ #warning("Maximum Likelihood Estimate of model parameter sigma^2 is negative (in modelKrigingLikelihood). Returning penalty. ") penalty <- as.numeric(defaultPenalty-SigmaSqr) if(returnLikelihoodOnly){ return(penalty) } return(list(NegLnLike=penalty,origPsi=NA,Psi=NA,unrepairedPsi=NA,Psinv=NA,mu=NA,yMu=NA,SSQ=NA,a=NA,U=NA,isIndefinite=isIndefinite)) } NegLnLike <- n*log(SigmaSqr) + LnDetPsi if(is.na(NegLnLike)|is.infinite(NegLnLike)){#this may happen eg if all y are 0 penalty <- defaultPenalty if(returnLikelihoodOnly){ return(penalty) } return(list(NegLnLike=penalty,origPsi=NA,Psi=NA,unrepairedPsi=NA,Psinv=NA,mu=NA,yMu=NA,SSQ=NA,a=NA,U=NA,isIndefinite=isIndefinite)) } if(returnLikelihoodOnly){ return(as.numeric(NegLnLike)) } ret <- list(NegLnLike=NegLnLike,origPsi=origPsi,Psi=Psi,unrepairedPsi=unrepairedPsi,Psinv=Psinv,mu=mu,yMu=yMu,SSQ=SigmaSqr,a=a,U=U,isIndefinite=isIndefinite) if(exists("origD")){ ret$D <- D ret$origD <- origD ret$isCNSD <- isCNSD ret$A <- A } ret } ################################################################################### #' Calculate negative log-likelihood #' #' This is a wrapper for the Kriging likelihood function \code{\link{modelKrigingLikelihood}}. #' It is intended for the case where parameters of the distance function are also optimized #' during maximum likelihood estimation. Thus, the wrapper receives the data, computes the #' parameterized distance matrix and passes it to the standard likelihood function. #' #' @param xt vector, containing parameters like theta, p and lambda. #' @param xs training samples, which are the input for the distance function. Should be in list format. #' @param ys vector of observations at training sample locations. #' @param useLambda whether to use nugget effect, i.e., lambda (FALSE at default). #' @param corr whether to use nugget effect, i.e., lambda (fcorrGauss at default). #' @param inverter string specifying method for inversion of correlation matrix ("chol", cholesky decomposition at default, any other string leads to using the solve function). #' @param indefiniteMethod The specific method used for correction: spectrum \code{"clip"}, spectrum \code{"flip"}, spectrum \code{"square"}, spectrum \code{"diffusion"}, feature embedding "feature", nearest definite matrix "near". Default is no correction: \code{"none"}. See Zaefferer and Bartz-Beielstein (2016). #' @param indefiniteType The general type of correction for indefiniteness: \code{"NSD"},\code{"CNSD"} or the default \code{"PSD"}. See Zaefferer and Bartz-Beielstein (2016). #' @param indefiniteRepair boolean, whether conditions of the distance matrix (in case of \code{"NSD"},\code{"CNSD"} correction type) or correlation matrix (in case of \code{"PSD"} correction type) are repaired. #' @param returnLikelihoodOnly boolean, whether the function should return only the likelihood, or a else a list (see return information below). #' @param distanceFunction the distance function. #' @param combineDistances boolean, whether to combine several distances provided as a list of distance functions. #' @param distanceParametersLower lower boundary for the distance function(s) parameters. A vector in case of one distance, a list of vectors in case of several functions. The parameters are passed as a vector to each respective distance function. #' @param ntheta number of kernel parameters. #' @param scaling boolean, whether to scale the distance matrix. #' #' @return the numeric Likelihood value (if \code{returnLikelihoodOnly} is TRUE) or a list with elements: #' \describe{ #' \item{\code{NegLnLike}}{ concentrated log-likelihood *-1 for minimising} #' \item{\code{Psi}}{ correlation matrix} #' \item{\code{Psinv}}{ inverse of correlation matrix (to save computation time in forrRegPredictor)} #' \item{\code{mu}}{ MLE of model parameter mu } #' \item{\code{yMu}}{ vector of observations y minus mu} #' \item{\code{SSQ}}{ MLE of model parameter sigma^2} #' \item{\code{a}}{ transformation vector for eigenspectrum transformation, see Zaefferer and Bartz-Beielstein (2016)} #' \item{\code{U}}{ Matrix of eigenvectors for eigenspectrum transformation, see Zaefferer and Bartz-Beielstein (2016)} #' \item{\code{isIndefinite}}{ whether the uncorrected correlation (kernel) matrix is indefinite} #' } #' #' @seealso \code{\link{modelKrigingLikelihood}} #' @keywords internal ################################################################################### modelKrigingParameterizedLikelihood <- function(xt,xs,ys,useLambda=FALSE,corr=fcorrGauss, indefiniteMethod="none",indefiniteType="PSD",indefiniteRepair=FALSE,returnLikelihoodOnly=TRUE,inverter = "chol", distanceFunction,combineDistances,distanceParametersLower,ntheta,scaling){ ####### nd <- length(distanceFunction) # number of distance functions if(combineDistances & nd>1){ nweights=nd #number of weight parameters for combining distances }else{ nweights=0 #number of weight parameters for combining distances } # parameters of the distance function(s) if(ntheta > 0 | nweights > 0 | useLambda) distanceParameters <- xt[-(1:(ntheta+nweights+useLambda))] else distanceParameters <- xt #calculate distance matrix ret <- modelKrigingDistanceCalculation(xs,distanceFunction=distanceFunction,parameters=distanceParameters, distances=NULL,scaling=scaling,combineDistances=combineDistances,indefiniteMethod=indefiniteMethod, indefiniteType=indefiniteType,indefiniteRepair=indefiniteRepair,distanceParametersLower) #Call ordinary likelihood function ret2 <- modelKrigingLikelihood(xt=xt[1:(ntheta+nweights+useLambda)],ret$D,ys,useLambda,corr, indefiniteMethod,indefiniteType,indefiniteRepair,returnLikelihoodOnly,inverter,ntheta) if(returnLikelihoodOnly){ return(ret2) } ret2[names(ret)] <- ret ret2 } #todo: testing!
/scratch/gouwar.j/cran-all/cranData/CEGO/R/modelKrigingLikelihood.R
################################################################################### #' Kriging Prediction #' #' Predict with a model fit resulting from \code{\link{modelKriging}}. #' #' @param object fit of the Kriging model (settings and parameters), of class \code{modelKriging}. #' @param x list of samples to be predicted #' @param ... further arguments, not used #' #' @return Returned value depends on the setting of \code{object$predAll}\cr #' TRUE: list with function value (mean) \code{object$y} and uncertainty estimate \code{object$s} (standard deviation)\cr #' FALSE:\code{object$y}only #' #' @seealso \code{\link{modelKriging}} #' @seealso \code{\link{simulate.modelKriging}} #' @export ################################################################################### predict.modelKriging <- function(object,x,...){ ret <- modelKrigingInternalPredictor(object,x) psi <- ret$psi ## return value: res <- list(y=ret$y) ########################################################################## if (object$predAll){ Psinv <- object$Psinv lambda <- object$lambda SigmaSqr <- object$SSQ if(object$indefiniteType=="PSD" & any(object$indefiniteMethod==c("clip","flip","square","diffusion"))){ if(object$isIndefinite){ if(!object$indefiniteRepair){ Psinv <- object$PsinvA } } } if(object$reinterpolate & lambda > 0){ SigmaSqr <- object$SSQReint Psinv <- object$PsinvReint lambda <- 0 } # Psinv / PsinvReint has t(A)%*%Psi%*%A included already, if necessary. else, transformation has already been done performed for psi SSqr <- SigmaSqr*(1+lambda-diag(psi%*%Psinv%*%t(psi))) s <- sqrt(abs(SSqr)) res$s <- as.numeric(s) #return value } res } ################################################################################### #' Kriging Simulation #' #' (Conditional) Simulate at given locations, with a model fit resulting from \code{\link{modelKriging}}. #' In contrast to prediction or estimation, the goal is to reproduce the covariance #' structure, rather than the data itself. Note, that the conditional simulation #' also reproduces the training data, but #' has a two times larger error than the Kriging predictor. #' #' @param object fit of the Kriging model (settings and parameters), of class \code{modelKriging}. #' @param nsim number of simulations #' @param seed random number generator seed. Defaults to NA, in which case no seed is set #' @param xsim list of samples in input space, to be simulated #' @param conditionalSimulation logical, if set to TRUE (default), the simulation is conditioned with the training data of the Kriging model. #' Else, the simulation is non-conditional. #' @param returnAll if set to TRUE, a list with the simulated values (y) and the corresponding covariance matrix (covar) #' of the simulated samples is returned. #' @param ... further arguments, not used #' #' @return Returned value depends on the setting of \code{object$simulationReturnAll} #' #' @references N. A. Cressie. Statistics for Spatial Data. JOHN WILEY & SONS INC, 1993. #' @references C. Lantuejoul. Geostatistical Simulation - Models and Algorithms. Springer-Verlag Berlin Heidelberg, 2002. #' #' @seealso \code{\link{modelKriging}}, \code{\link{predict.modelKriging}} #' @export ################################################################################### simulate.modelKriging <- function(object,nsim=1,seed=NA,xsim,conditionalSimulation=TRUE,returnAll=FALSE,...){ if (!is.na(seed)){ if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) runif(1) R.seed <- get(".Random.seed", envir = .GlobalEnv) set.seed(seed) on.exit(assign(".Random.seed", R.seed, envir = .GlobalEnv)) } # len <- length(xsim) #number of simulated samples (points noise <- matrix(rnorm(len*nsim),len, nsim) # res <- computeCorrelationMatrix(object,xsim) covar <- res$psi # if(conditionalSimulation){ ret <- modelKrigingInternalPredictor(object,xsim) y <- ret$y psi <- ret$psi covarDifference <- covar - psi %*% object$Psinv %*% t(psi) eigv <- eigen(object$SSQ *covarDifference,symmetric=T) #eigen decomposition covarDecomposed <- eigv$vectors %*% diag(sqrt(abs(eigv$values))) %*% eigv$vectors ysim <- covarDecomposed %*% noise #and the following adds the simulation part to the predictor y <- matrix(y,len,nsim) + ysim }else{ eigv <- eigen(object$SSQ *covar,symmetric=T) #eigen decomposition covarDecomposed <- eigv$vectors %*% diag(sqrt(abs(eigv$values))) %*% eigv$vectors y <- object$mu + covarDecomposed %*% noise } res$y <- y if(returnAll) return(res) else return(y) } ################################################################################### #' Kriging Prediction (internal) #' #' Predict with a model fit resulting from \code{\link{modelKriging}}. #' #' @param object fit of the Kriging model (settings and parameters), of class \code{modelKriging}. #' @param x list of samples to be predicted #' #' @return returns a list with: #' \describe{ #' \item{\code{y}}{predicted values} #' \item{\code{psi}}{correlations between x and training data} #' } #' #' @seealso \code{\link{simulate.modelKriging}} #' @seealso \code{\link{predict.modelKriging}} #' @keywords internal ################################################################################### modelKrigingInternalPredictor <- function(object,x){ if(!is.list(x))x<-list(x) xo <- object$x Psinv <- object$Psinv n <- length(xo) #one <- rep(1,n) mu <- object$mu yMu <- object$yMu psi <- matrix(1,length(x),n) fundist <- object$distanceFunction if(is.list(fundist)){ # multiple distance functions to be combined psi <- replicate(length(fundist),psi,simplify=FALSE) if(object$useDistanceParameters){ indices <- rep(1:length(fundist),sapply(object$distanceParametersLower,length)) #indices assigning each parameter to a distances function } for(j in 1:length(fundist)){ for (i in 1:n){ if(!object$useDistanceParameters){ psi[[j]][,i] <- distanceVector(xo[[i]],x,fundist[[j]]) }else{ psi[[j]][,i] <- distanceVector(xo[[i]],x,fundist[[j]],object$distanceParameters[indices==j]) } } if(object$scaling){ psi[[j]] <- psi[[j]]/object$maximumDistance[[j]] } } psi <- Reduce("+",mapply("*",psi,object$distanceWeights,SIMPLIFY=FALSE)) #combine result by weighted sum }else{ #only one distance function for (i in 1:n){ if(!object$useDistanceParameters){ psi[,i] <- distanceVector(xo[[i]],x,fundist) }else{ psi[,i] <- distanceVector(xo[[i]],x,fundist,object$distanceParameters) } } if(object$scaling){ psi <- psi/object$maximumDistance } } # ## # if(object$indefiniteRepair == 3){ #Required distance data for Weighted Sum Repair. rd <- 1/psi #reciprocal distances }else if(object$indefiniteRepair == 4){ #required indices of nearest neighbours for NN repair rd1 <- apply(psi,1,function(x) which(x==min(x))) #which are the nearest neighbors to the predicted points } # ## # if(any(object$indefiniteMethod==c("clip","flip","near","square","diffusion"))){ #Distance matrix was not CNSD, and a suitable correction method was chosen. if(object$indefiniteType=="NSD" & !object$indefiniteRepair){ #no repair, NSD-correction: transformation can be used directly if(!object$isCNSD) psi <- psi %*% t(object$A) } if(object$indefiniteType=="CNSD" | (object$indefiniteType=="NSD" & object$indefiniteRepair)){ if(!object$isCNSD){ if(object$indefiniteRepair == 1 | object$indefiniteType=="CNSD"){ #1 is augmented repair if(object$indefiniteMethod!="near") #this could be removed, but is currently computationally too expensive psi <- correctionAugmentedDistanceVector(psi,object,x) #in case of repair, CNSD-correction: retransform with augmented distance matrix }else if(object$indefiniteRepair == 2){#nystroem. will not work for CNSD, lacks linear transform. #nystroem approximation instead of correctionAugmented* #so: first compute transformed (not repaired) psi, then inverse of the actual, transformed (but not repaired) Psi matrix, # (this may require saving that matrix separately, because A may already be in it.) #then use product to get self-similarities. use as divider for transformed psi. psi <- psi %*% t(object$A) add <- diag(psi %*% ginv(object$matNoRep) %*% t(psi)) #nystroem. only diagonal needed as divider add <- matrix(add,nrow(psi),nrow(object$matNoRep)) add2 <- matrix(diag(object$matNoRep),nrow(psi),nrow(object$matNoRep),byrow=T) psi <- 2*psi - add - add2 #repair #correct lower bound add by rdif <- rowSums(psi<0)>0 # rows with at least one negative value if(any(rdif)) psi[rdif,] <- psi[rdif,,drop=FALSE] - apply(psi[rdif,,drop=FALSE],1,min) }else if(object$indefiniteRepair == 3){ #weighted sum repair. will not work for CNSD, lacks linear transform. rd1 <- rd / rowSums(rd) #scale by row sum, such that rows sum to 1 if(any(is.na(rd1))){ #inf values produce NAs that should be 1 nas <- is.na(rd1) naIndex <- which(nas) rd1[naIndex] <- (nas / rowSums(nas))[naIndex] } psi <- psi %*% t(object$A) #rd is a weight vecor. need weighted sum of diagonal of unrepaired distance matrix (after correction, before repair.) add <- rd1 %*% diag(object$matNoRep) add <- matrix(add,nrow(psi),nrow(object$matNoRep)) add2 <- matrix(diag(object$matNoRep),nrow(psi),nrow(object$matNoRep),byrow=T) #lower bound for add (the estimated repair factor): 2*psi - add2 (add2 is the known repair factor of the observations) psi <- 2*psi - add2 - add #repair procedure #correct lower bound add by rdif <- rowSums(psi<0)>0 # rows with at least one negative value if(any(rdif)) psi[rdif,] <- psi[rdif,,drop=FALSE] - apply(psi[rdif,,drop=FALSE],1,min) }else if(object$indefiniteRepair == 4){ #nearest neighbour repair. will not work for CNSD, lacks linear transform. psi <- psi %*% t(object$A) #transform psi #need mean of diagonal values indexed by rd1, of unrepaired distance matrix (after correction, before repair.) diagNN <- diag(object$matNoRep) meanidx <- function(x){ return(mean(diagNN[x])) } meanDiagNN <- sapply(rd1,meanidx) #get the mean diagonal elements of nearest neighbors add <- matrix(meanDiagNN,nrow(psi),nrow(object$matNoRep)) #the NN elements are the first added factor add2 <- matrix(diag(object$matNoRep),nrow(psi),nrow(object$matNoRep),byrow=T) #the elements of the matNoRep are the second element. psi <- 2*psi - add2 - add #repair procedure ## #lower bound for add (the estimated repair factor): 2*psi - add2 (add2 is the known repair factor of the observations) #correct lower bound add by rdif <- rowSums(psi<0)>0 # rows with at least one negative value if(any(rdif)) psi[rdif,] <- psi[rdif,,drop=FALSE] - apply(psi[rdif,,drop=FALSE],1,min) } } } } if((object$indefiniteType=="CNSD" | object$indefiniteType=="NSD") & object$indefiniteMethod=="feature"){ #distances as features if(!object$isCNSD){ tempx <- split(psi,seq(nrow(psi))) for (i in 1:n) psi[,i] <- distanceVector(object$origD[i,],tempx,distanceRealEuclidean) #todo choice of distance } } # ## # if(is.null(object$theta)) #corr function has no parameters psi <- object$corr(psi) else psi <- object$corr(psi,object$theta) if(object$indefiniteType=="PSD" & any(object$indefiniteMethod==c("clip","flip","near","square","diffusion"))){ if(object$isIndefinite){ if(!object$indefiniteRepair){ #psi <- psi %*% t(object$A) #This is already included in Psinv. do nothing. }else{ if(object$indefiniteRepair == 1){ psi <- correctionAugmentedKernelVector(psi,object,x) }else{ if(object$indefiniteRepair == 2){ # Nystroem repair. #todo: note: this case is not working. zero variances. #psi <- psi %*% t(object$A) #not needed, contained in unrepairedAPsinvA / unrepairedADivier div <- diag(psi %*% object$unrepairedAPsinvA %*% t(psi)) #nystroem. only diagonal needed as divider div <- diag(1/sqrt(div),nrow(psi)) psi <- div %*% psi %*% object$ADividedSqrtDiagPsi #repair }else if(object$indefiniteRepair == 3){ #Weighted Sum Repair. rd1 <- rd / rowSums(rd) #scale by row sum, such that rows sum to 1 if(any(is.na(rd1))){ #inf values produce NAs that should be 1 nas <- is.na(rd1) naIndex <- which(nas) rd1[naIndex] <- (nas / rowSums(nas))[naIndex] } #rd is a weight vecor. need weighted sum of diagonal of unrepaired Psi (after correction, before repair.) rd2 <- rd1 %*% object$diagUnrepairedPsi div <- diag(1/sqrt(as.numeric(rd2)),nrow(psi)) psi <- div %*% psi %*% object$ADividedSqrtDiagPsi #repair }else if(object$indefiniteRepair == 4){ #nearest neighbour repair. will not work for CNSD, lacks linear transform. #need mean of diagonal values indexed by rd1, of unrepaired distance matrix (after correction, before repair.) diagNN <- object$diagUnrepairedPsi meanidx <- function(x){ return(mean(diagNN[x])) } meanDiagNN <- sapply(rd1,meanidx) #get the mean diagonal elements of nearest neighbors div <- diag(1/sqrt(as.numeric(meanDiagNN)),nrow(psi)) psi <- div %*% psi %*% object$ADividedSqrtDiagPsi #repair } psi[psi > 1] <- 1 #make sure that psi stays in bounds. to avoid numerical issues. see "NOTE" above for distance case. psi[psi < -1] <- -1 #no guarantees that approximated repair keeps bounds intact for new data? } } } } y <- as.numeric(psi%*%Psinv%*%yMu)+mu #todo: Psinv%*%yMu can be precomputed for speedup list(y=y,psi=psi) } ################################################################################### #' Compute Correlation Matrix #' #' Compute the correlation matrix of samples x, given the model object. #' #' @param object fit of the Kriging model (settings and parameters), of class \code{modelKriging}. #' @param x list of samples / data #' #' @return the correlation matrix #' #' @seealso \code{\link{simulate.modelKriging}} #' @seealso \code{\link{predict.modelKriging}} #' @keywords internal ################################################################################### computeCorrelationMatrix <- function(object,x){ if(!is.list(x))x<-list(x) if(is.null(object$distanceParameters)) object$distanceParameters <- NA ret <- modelKrigingDistanceCalculation(x,object$distanceFunction,parameters=object$distanceParameters, NULL,object$scaling,object$combineDistances,object$indefiniteMethod,object$indefiniteType,object$indefiniteRepair,object$distanceParametersLower) psi <- ret$D # ## # if(is.null(object$theta)) #corr function has no parameters psi <- object$corr(psi) else psi <- object$corr(psi,object$theta) # ## # ret$U <- NA ret$a <- NA ret$isIndefinite <- NA ret$origPsi <- NA if(object$indefiniteType=="PSD" & any(object$indefiniteMethod==c("clip","flip","near","square","diffusion"))){ #psi <- correctionKernelMatrix(psi,object$indefiniteMethod,object$indefiniteRepair)$mat ret$origPsi <- psi ret2 <- correctionKernelMatrix(psi,object$indefiniteMethod,object$indefiniteRepair) ret$a <- ret2$a ret$U <- ret2$U ret$A <- ret2$A ret$isIndefinite <- !ret2$isPSD psi <- ret2$mat } # ## # if(object$useLambda){ psi <- psi + diag(object$lambda,length(x)) } # ## # ret$psi <- psi ret }
/scratch/gouwar.j/cran-all/cranData/CEGO/R/modelKrigingPredict.R
################################################################################### #' Print Function: modelKriging #' #' Print information about a Kriging fit, as produced by \code{\link{modelKriging}}. #' #' @rdname print #' @method print modelKriging # @S3method print modelKriging #' @param x fit returned by \code{\link{modelKriging}}. #' @param ... additional parameters #' @export #' @keywords internal ################################################################################### print.modelKriging <- function(x,...){ cat("------------------------\n") cat("Kriging model fit of class modelKriging\n") cat("\n") if(!is.null(x$theta)){ cat("Estimated parameters of the correlation function (kernel):\n") cat(paste(round(x$theta,4),sep=" ")) cat("\n \n") } if(!is.null(x$distanceParameters)){ cat("Estimated parameters of the distance function:\n") cat(paste(round(x$distanceParameters,4),sep=" ")) cat("\n \n") } if(x$combineDistances & is.list(x$distanceFunction)){ cat("Several distance functions are combined in this model.\n") cat("The following weights are used to combine them:\n") cat(paste(round(x$distanceWeights,4),sep=" ",collaps=" ")) cat("\n \n") } if(x$useLambda){ cat("Estimated regularization constant (nugget) lambda:\n") cat(x$lambda) cat("\n \n") }else{ cat("No regularization constant (nugget) was used.\n") cat("\n") } cat("Number of Likelihood evaluations during MLE:\n") cat(x$nevals) cat("\n") cat("Minimal NegLnLikelihood:\n") cat(x$like) cat("\n") cat("------------------------\n") }
/scratch/gouwar.j/cran-all/cranData/CEGO/R/modelKrigingPrint.R
################################################################################### #' Distance based Linear Model #' #' A simple linear model based on arbitrary distances. Comparable to a k nearest neighbor model, but potentially able to extrapolate #' into regions of improvement. Used as a simple baseline by Zaefferer et al.(2014). #' #' @param x list of samples in input space #' @param y matrix, vector of observations for each sample #' @param distanceFunction a suitable distance function of type f(x1,x2), returning a scalar distance value, preferably between 0 and 1. #' Maximum distances larger 1 are no problem, but may yield scaling bias when different measures are compared. #' Should be non-negative and symmetric. #' @param control currently unused, defaults to \code{list()} #' #' #' @return a fit (list, modelLinear), with the options and found parameters for the model which has to be passed to the predictor function: #' \describe{ #' \item{\code{x}}{ samples in input space (see parameters)} #' \item{\code{y}}{ observations for each sample (see parameters)} #' \item{\code{distanceFunction}}{ distance function (see parameters)} #' } #' #' @seealso \code{\link{predict.modelLinear}} #' #' @references Zaefferer, Martin; Stork, Joerg; Friese, Martina; Fischbach, Andreas; Naujoks, Boris; Bartz-Beielstein, Thomas. (2014). Efficient global optimization for combinatorial problems. In Proceedings of the 2014 conference on Genetic and evolutionary computation (GECCO '14). ACM, New York, NY, USA, 871-878. DOI=10.1145/2576768.2598282 #' #' @examples #' #set random number generator seed #' set.seed(1) #' #simple test landscape #' fn <- landscapeGeneratorUNI(1:5,distancePermutationHamming) #' #generate data for training and test #' x <- unique(replicate(40,sample(5),FALSE)) #' xtest <- x[-(1:15)] #' x <- x[1:15] #' #determin true objective function values #' y <- fn(x) #' ytest <- fn(xtest) #' #build model #' fit <- modelLinear(x,y,distancePermutationHamming) #' #predicted obj. function values #' ypred <- predict(fit,xtest)$y #' #plot #' plot(ytest,ypred,xlab="true value",ylab="predicted value", #' pch=20,xlim=c(0.3,1),ylim=c(min(ypred)-0.1,max(ypred)+0.1)) #' abline(0,1,lty=2) #' @export ################################################################################### modelLinear <- function(x, y, distanceFunction, control=list()){ #linear distance model fit<-list() fit$distanceFunction <- distanceFunction fit$x <- x fit$y <- y class(fit) <- "modelLinear" fit } ################################################################################### #' Linear Distance-Based Model #' #' DEPRECATED version of the linear, distance-based model, please use \code{\link{modelLinear}} #' #' @param x list of samples in input space #' @param y column vector of observations for each sample #' @param distanceFunction a suitable distance function of type f(x1,x2), returning a scalar distance value #' @param control options for the model building procedure #' #' @keywords internal #' @export ################################################################################### combinatorialLM <- function(x, y, distanceFunction, control = list()){ .Deprecated("modelLinear") modelKriging(x,y,distanceFunction,control) } ################################################################################### #' Predict: Combinatorial Kriging #' #' Predict with amodelLinear fit. #' #' @param object fit of the Kriging model (settings and parameters), of class \code{modelLinear}. #' @param x list of samples to be predicted #' @param ... further arguments, not used #' #' @return numeric vector of predictions #' #' @seealso \code{\link{modelLinear}} #' @export ################################################################################### predict.modelLinear <- function(object,x,...){ #approach: sort by distance, build linear model, predict at zero. if(!is.list(x))x<-list(x) pred=NULL for(i in 1:length(x)){ distx <- distanceVector(x[[i]],object$x,object$distanceFunction) fy=object$y dx=distx #global model s2 <- sort(unique(distx))[2] index <- which(distx <= s2) fy=fy[index] dx=dx[index] #local model index2 <- which(dx < s2) fy1 = mean(fy[index2]) fy2 = mean(fy[-index2]) difY = fy2-fy1 difD = max(dx)-min(dx) m=difY/difD pred <- c(pred, fy1-min(dx)*m) } list(y=pred,s=rep(0,length(pred))) }
/scratch/gouwar.j/cran-all/cranData/CEGO/R/modelLm.R
################################################################################### #' RBFN Model #' #' Implementation of a distance-based Radial Basis Function Network (RBFN) model, e.g., for mixed or combinatorial input spaces. #' It is based on employing suitable distance measures for the samples in input space. For reference, see #' the paper by Moraglio and Kattan (2011). #' #' @param x list of samples in input space #' @param y column vector of observations for each sample #' @param distanceFunction a suitable distance function of type f(x1,x2), returning a scalar distance value, preferably between 0 and 1. #' Maximum distances larger 1 are no problem, but may yield scaling bias when different measures are compared. #' Should be non-negative and symmetric. #' @param control (list), with the options for the model building procedure: #' \describe{ #' \item{\code{beta}}{ Parameter of the radial basis function: exp(-beta*D), where D is the distance matrix. If beta is not specified, the heuristic in fbeta will be used to determine it, which is default behavior.} #' \item{\code{fbeta}}{ Function f(x) to calculate the beta parameter, x is the maximum distance observed in the input data. Default function is \code{1/(2*(x^2))}.} #' \item{\code{distances}}{ a distance matrix. If available, this matrix is used for model building, instead of calculating the distance matrix using the parameters \code{distanceFunction}. Default is \code{NULL}.} #' } #' #' @return a fit (list, modelRBFN), with the options and found parameters for the model which has to be passed to the predictor function: #' \describe{ #' \item{\code{SSQ}}{ Variance of the observations (y)} #' \item{\code{centers}}{ Centers of the RBFN model, samples in input space (see parameters)} #' \item{\code{w}}{ Model parameters (weights) w} #' \item{\code{Phi}}{ Gram matrix} #' \item{\code{Phinv}}{ (Pseudo)-Inverse of Gram matrix} #' \item{\code{w0}}{ Mean of observations (y)} #' \item{\code{dMax}}{ Maximum observed distance} #' \item{\code{D}}{ Matrix of distances between all samples} #' \item{\code{beta}}{ See parameters} #' \item{\code{fbeta}}{ See parameters} #' \item{\code{distanceFunction}}{See parameters} #' } #' #' @seealso \code{\link{predict.modelRBFN}} #' #' @references Moraglio, Alberto, and Ahmed Kattan. "Geometric generalisation of surrogate model based optimisation to combinatorial spaces." Evolutionary Computation in Combinatorial Optimization. Springer Berlin Heidelberg, 2011. 142-154. #' #' @examples #' #set random number generator seed #' set.seed(1) #' #simple test landscape #' fn <- landscapeGeneratorUNI(1:5,distancePermutationHamming) #' #generate data for training and test #' x <- unique(replicate(40,sample(5),FALSE)) #' xtest <- x[-(1:15)] #' x <- x[1:15] #' #determin true objective function values #' y <- fn(x) #' ytest <- fn(xtest) #' #build model #' fit <- modelRBFN(x,y,distancePermutationHamming) #' #predicted obj. function values #' ypred <- predict(fit,xtest)$y #' #plot #' plot(ytest,ypred,xlab="true value",ylab="predicted value", #' pch=20,xlim=c(0.3,1),ylim=c(min(ypred)-0.1,max(ypred)+0.1)) #' abline(0,1,lty=2) #' @export ################################################################################### modelRBFN <- function(x,y,distanceFunction,control=list()){ #x sample locations #y observations #distanceFunction function that returns distance matrix con<-list( fbeta = function(x) 1/(2*(x^2)) ); con[names(control)] <- control; control<-con; #calculate distance matrix? if(is.null(control$distances)) D <-distanceMatrix(x,distanceFunction) else D <- control$distances dMax<-max(D) #maximum distance between samples if(is.null(control$beta)) beta <- control$fbeta(dMax) #force a global model, each center has influence everywhere. beta is spread of each RBF. Each now covers the whole search space. else beta <- control$beta w0 <- mean(y) #all function values out of reach of any center (see spread due to beta) will be set to the average. Phi <- exp(-beta*D^2) Phinv <- ginv(Phi) #pseudo inverse (because not guaranteed to be positive definite in non-euclidean space) w <- Phinv%*%(y-w0) #calculation of wi. SSQ <- var(y) fit <- list(SSQ=SSQ, centers= x, w=w, Phi=Phi, Phinv=Phinv, beta=beta, fbeta=control$fbeta, w0=w0, dMax=dMax, D=D, predAll=FALSE, distanceFunction=distanceFunction) #todo not all parameters needed in predictor? class(fit) <- "modelRBFN" return(fit) } ################################################################################### #' Radial Basis Function Network #' #' DEPRECATED version of the RBFN model, please use \code{\link{modelRBFN}} #' #' @param x list of samples in input space #' @param y column vector of observations for each sample #' @param distanceFunction a suitable distance function of type f(x1,x2), returning a scalar distance value #' @param control options for the model building procedure #' #' @keywords internal #' @export ################################################################################### combinatorialRBFN <- function(x, y, distanceFunction, control = list()){ .Deprecated("modelRBFN") modelKriging(x,y,distanceFunction,control) } ################################################################################### #' Predict: Combinatorial RBFN #' #' Predict with a model fit resulting from \code{\link{modelRBFN}}. #' #' @param object fit of the RBFN model (settings and parameters), of class \code{modelRBFN}. #' @param x list of samples to be predicted #' @param ... further arguments, not used #' #' @return Returned value depends on the setting of \code{object$predAll}\cr #' TRUE: list with function value (mean) \code{$y} and uncertainty estimate \code{$s} (standard deviation)\cr #' FALSE:\code{$y}only #' #' @seealso \code{\link{modelRBFN}} #' @export ################################################################################### predict.modelRBFN <- function(object,x,...){ #x is a new sample, fit is the list of parameters from buildRBFN if(!is.list(x))x<-list(x) psi <- matrix(unlist(lapply(x,distanceVector,object$centers,object$distanceFunction)),length(object$centers),length(x)) psi <- exp(-object$beta*psi^2) pred <- colSums(apply(psi,2,"*",object$w))+object$w0 res <- list(y=pred) if (object$predAll){ variance <- object$SSQ*(1-diag(t(psi)%*%(object$Phinv%*%(psi)))) s <- sqrt(abs(variance)) res$s <- as.numeric(s) } res }
/scratch/gouwar.j/cran-all/cranData/CEGO/R/modelRBFN.R
################################################################################### #' Nearest CNSD matrix #' #' This function #' implements the alternating projection algorithm by Glunt et al. (1990) to calculate the nearest conditionally #' negative semi-definite (CNSD) matrix (or: the nearest Euclidean distance matrix). #' The function is similar to the \code{\link[Matrix]{nearPD}} function from the \code{Matrix} package, #' which implements a very similar algorithm for finding the nearest Positive Semi-Definite (PSD) matrix. #' #' @param x symmetric matrix, to be turned into a CNSD matrix. #' @param eig.tol eigenvalue torelance value. Eigenvalues between \code{-tol} and \code{tol} are assumed to be zero. #' @param conv.tol convergence torelance value. The algorithm stops if the norm of the difference between two iterations is below this value. #' @param maxit maximum number of iterations. The algorithm stops if this value is exceeded, even if not converged. #' @param conv.norm.type type of norm, by default the F-norm (Frobenius). See \code{\link[base]{norm}} for other choices. #' #' @return list with: #' \describe{ #' \item{\code{mat}}{ nearestCNSD matrix} #' \item{\code{normF}}{ F-norm between original and resulting matrices} #' \item{\code{iterations}}{ the number of performed} #' \item{\code{rel.tol}}{ the relative value used for the tolerance convergence criterion} #' \item{\code{converged}}{ a boolean that records whether the algorithm} #' } #' #' @seealso \code{\link[Matrix]{nearPD}}, \code{\link{correctionCNSD}}, \code{\link{correctionDistanceMatrix}} #' #' @export #' @examples #' # example using Insert distance with permutations: #' x <- list(c(2,1,4,3),c(2,4,3,1),c(4,2,1,3),c(4,3,2,1),c(1,4,3,2)) #' D <- distanceMatrix(x,distancePermutationInsert) #' print(D) #' is.CNSD(D) #' nearD <- nearCNSD(D) #' print(nearD) #' is.CNSD(nearD$mat) #' # or example matrix from Glunt et al. (1990): #' D <- matrix(c(0,1,1,1,0,9,1,9,0),3,3) #' print(D) #' is.CNSD(D) #' nearD <- nearCNSD(D) #' print(nearD) #' is.CNSD(nearD$mat) #' # note, that the resulting values given by Glunt et al. (1990) are 19/9 and 76/9 #' @references Glunt, W.; Hayden, T. L.; Hong, S. and Wells, J. An alternating projection algorithm for computing the nearest Euclidean distance matrix, SIAM Journal on Matrix Analysis and Applications, SIAM, 1990, 11, 589-600 ################################################################################### nearCNSD <- function (x, eig.tol = 1e-8, conv.tol = 1e-8, maxit = 1000, conv.norm.type = "F") { n <- ncol(x) X <- -x iter <- 0 converged <- FALSE conv <- Inf #construct transformation matrix (for Projection 1) v <- cbind(c(rep(1,n-1),1+sqrt(n))) Q <- diag(n) - 2 * (1/as.numeric(crossprod(v,v))) * tcrossprod(v,v) #main loop while (iter < maxit && !converged) { ## store result of last iteration Y <- X ## ### Compute first Projection P1: Projection to CPSD matrix ## #Compute F Fq <- Q %*% X %*% Q #transform #Eigen decomposition of submatrix F1 F1 <- Fq[-n,-n] e <- eigen(F1,symmetric=TRUE) U <- e$vectors lambda <- e$values p <- lambda > eig.tol * lambda[1] if (!any(p)) stop("Matrix seems conditionally positive semi-definite") U <- U[, p, drop = FALSE] F1q <- tcrossprod(U * rep(lambda[p], each = nrow(U)), U) #U %*% diag(pmax(lambda,0)) %*% t(U) Fq[-n,-n] <- F1q #replace NON-PSD F1 in F by PSD F1q P1F <- Q %*% Fq %*% Q #compute D from F, yielding CPSD matrix ## ###Compute second Projection P2: P_2(F) = F - diag(F) ## P2P1F <- P1F diag(P2P1F) <- 0 ## correction X <- Y + (P2P1F - P1F) ## calculate convergence rate based on chosen norm conv <- norm(Y - X, conv.norm.type) converged <- (conv <= conv.tol) # determine whether converged # counter of iterations iter <- iter + 1 } if (!converged) warning(gettextf("'nearCNSD()' did not converge in %d iterations",iter)) list(mat = -P2P1F, normF = norm(x - -P2P1F, "F"), iterations = iter, rel.tol = conv, converged = converged) }
/scratch/gouwar.j/cran-all/cranData/CEGO/R/nearCNSD.R
################################################################################### #' Hamming Distance for Vectors #' #' The number of unequal elements of two vectors (which may be of unequal length), divided by the number of elements (of the larger vector). #' #' @param x first vector #' @param y second vector #' #' @return numeric distance value \deqn{d(x,y)}, scaled to values between 0 and 1 #' #' @examples #' #e.g., used for distance between bit strings #' x <- c(0,1,0,1,0) #' y <- c(1,1,0,0,1) #' distanceNumericHamming(x,y) #' p <- replicate(10,sample(c(0,1),5,replace=TRUE),simplify=FALSE) #' distanceMatrix(p,distanceNumericHamming) #' #' @export ################################################################################### distanceNumericHamming <- function(x, y){ sum(x != y)/max(length(x),length(y)) } ################################################################################### #' Levenshtein Distance for Numeric Vectors #' #' Levenshtein distance for two numeric vectors, e.g., bit vectors. #' #' @param x first vector (numeric) #' @param y second vector (numeric) #' #' @return numeric distance value \deqn{d(x,y)}, scaled to values between 0 and 1 #' #' @examples #' #e.g., used for distance between bit strings #' x <- c(0,1,0,1,0) #' y <- c(1,1,0,0,1) #' distanceNumericLevenshtein(x,y) #' p <- replicate(10,sample(c(0,1),5,replace=TRUE),simplify=FALSE) #' distanceMatrix(p,distanceNumericLevenshtein) #' #' @export ################################################################################### distanceNumericLevenshtein <- function(x, y){ #.Call("numericDistanceLevenshtein", as.numeric(x),as.numeric(y), PACKAGE="CEGO")/max(length(x),length(y)) .Call(C_numericDistanceLevenshtein, as.numeric(x),as.numeric(y))/max(length(x),length(y)) } ################################################################################### #' Longest Common Substring for Numeric Vectors #' #' Longest common substring distance for two numeric vectors, e.g., bit vectors. #' #' @param x first vector (numeric) #' @param y second vector (numeric) #' #' @return numeric distance value \deqn{d(x,y)}, scaled to values between 0 and 1 #' #' @examples #' #e.g., used for distance between bit strings #' x <- c(0,1,0,1,0) #' y <- c(1,1,0,0,1) #' distanceNumericLCStr(x,y) #' p <- replicate(10,sample(c(0,1),5,replace=TRUE),simplify=FALSE) #' distanceMatrix(p,distanceNumericLCStr) #' #' @export ################################################################################### distanceNumericLCStr <- function(x, y){ #.Call("numericDistanceLongestCommonSubstring",as.numeric(x),as.numeric(y), PACKAGE="CEGO")/max(length(x),length(y)) .Call(C_numericDistanceLongestCommonSubstring,as.numeric(x),as.numeric(y))/max(length(x),length(y)) }
/scratch/gouwar.j/cran-all/cranData/CEGO/R/numericDistances.R
################################################################################### #' Two-Opt #' #' Implementation of a Two-Opt local search. #' #' @param x start solution of the local search #' @param fun function that determines cost or length of a route/permutation #' @param control (list), with the options: #' \describe{ #' \item{\code{archive}}{ Whether to keep all candidate solutions and their fitness in an archive (TRUE) or not (FALSE). Default is TRUE.} #' \item{\code{budget}}{ The limit on number of target function evaluations (stopping criterion) (default: 100)} #' \item{\code{creationFunction}}{ Function to create individuals/solutions in search space. Default is a function that creates random permutations of length 6} #' \item{\code{vectorized}}{ Boolean. Defines whether target function is vectorized (takes a list of solutions #' as argument) or not (takes single solution as argument). Default: FALSE} #' } #' #' @return a list with: #' \describe{ #' \item{\code{xbest}}{ best solution found} #' \item{\code{ybest}}{ fitness of the best solution} #' \item{\code{count}}{ number of performed target function evaluations } #' } #' #' @examples #' seed=0 #' #distance #' dF <- distancePermutationHamming #' #creation #' cF <- function()sample(5) #' #objective function #' lF <- landscapeGeneratorUNI(1:5,dF) #' #start optimization #' set.seed(seed) #' res <- optim2Opt(,lF,list(creationFunction=cF,budget=100, #' vectorized=TRUE)) ##target function is "vectorized", expects list of solutions as input #' res #' #' @references Wikipedia contributors. "2-opt." Wikipedia, The Free Encyclopedia. Wikipedia, The Free Encyclopedia, 13 Jun. 2014. Web. 21 Oct. 2014. #' #' @seealso \code{\link{optimCEGO}}, \code{\link{optimEA}}, \code{\link{optimRS}}, \code{\link{optimMaxMinDist}} #' #' @export ################################################################################### optim2Opt <- function(x=NULL,fun,control=list()){ con<-list(budget=100 , vectorized=FALSE , creationFunction = solutionFunctionGeneratorPermutation(6) , archive =TRUE ) con[names(control)] <- control control <- con archive <- control$archive budget <- control$budget vectorized <- control$vectorized creationFunction <- control$creationFunction if(is.null(x)){ route=creationFunction() }else{ #given start population route=x[[1]] } improvement=TRUE bestRoute=route if (vectorized) bestDist = fun(list(route)) else bestDist = fun(route) if(archive){ fithist <- bestDist xhist <- route } N=length(route) count=1 while(improvement){ improvement=FALSE i=1 while(i<=(N-1)){ for(k in (i+1):N){ newRoute = step2Opt(bestRoute,i,k) if (vectorized) newDist = fun(list(newRoute)) else newDist = fun(newRoute) if(archive){ xhist <- append(xhist,newRoute) fithist <- c(fithist, newDist) } count=count+1 if (newDist < bestDist) { bestRoute = newRoute bestDist = newDist #i=N improvement=TRUE break; } if(count == budget){ improvement=FALSE i=N break; } } i=i+1 } } if(archive) return(list(xbest=bestRoute,ybest=bestDist,x=xhist,y=fithist, count=count)) else return(list(xbest=bestRoute,ybest=bestDist,count=count)) } ################################################################################### #' 2-Opt Step #' #' Helper function: A single 2-opt step for \code{\link{optim2Opt}} #' #' @param route route to be partially reversed #' @param i start of reversal #' @param k end of reversal #' #' @return a new route #' #' @keywords internal ################################################################################### step2Opt <- function(route, i, k) { newRoute <- route newRoute[i:k] <- rev(route[i:k]) #reversal newRoute }
/scratch/gouwar.j/cran-all/cranData/CEGO/R/optim2opt.R
################################################################################### #' Combinatorial Efficient Global Optimization #' #' Model-based optimization for combinatorial or mixed problems. Based on measures of distance or dissimilarity. #' #' @param x Optional initial design as a list. If NULL (default), \code{creationFunction} (in \code{control} list) is used to create initial design. #' If \code{x} has less individuals than specified by \code{control$evalInit}, \code{creationFunction} will fill up the design. #' @param fun target function to be minimized #' @param control (list), with the options of optimization and model building approaches employed: #' \describe{ #' \item{\code{evalInit}}{ Number of initial evaluations (i.e., size of the initial design), integer, default is \code{2}} #' \item{\code{vectorized}}{ Boolean. Defines whether target function is vectorized (takes a list of solutions as argument) or not (takes single solution as argument). Default: FALSE} #' \item{\code{verbosity}}{ Level of text output during run. Defaults to 0, no output.} #' \item{\code{plotting}}{ Plot optimization progress during run (TRUE) or not (FALSE). Default is FALSE.} #' \item{\code{targetY}}{ optimal value to be found, stopping criterion, default is \code{-Inf}} #' \item{\code{budget}}{ maximum number of target function evaluations, default is \code{100}} #' \item{\code{creationRetries}}{ When a model does not predict an actually improving solution, a random exploration step is performed. \code{creationRetries} solutions are created randomly. #' For each, distance to all known solutions is calculated. The minimum distance is recorded for each random solution. #' The random solution with maximal minimum distance is chosen doe be evaluated in the next iteration.} #' \item{\code{model}}{ Model to be used as a surrogate of the target function. Default is "K" (Kriging). Also #' available are: "LM" (linear, distance-based model), "RBFN" Radial Basis Function Network.} #' \item{\code{modelSettings}}{ List of settings for \code{model} building, passed on as the \code{control} argument to the model training functions \code{\link{modelKriging}}, \code{\link{modelLinear}}, \code{\link{modelRBFN}}.} #' \item{\code{infill}}{ This parameter specifies a function to be used for the infill criterion (e.g., the default is expected improvement \code{infillExpectedImprovement}). #' To use no specific infill criterion this has to be set to \code{NA}, in which case the prediction of the surrogate model is used. Infill criteria are only used with models that may provide some error estimate with predictions.} #' \item{\code{optimizer}}{ Optimizer that finds the minimum of the surrogate model. Default is \code{\link{optimEA}}, an Evolutionary Algorithm.} #' \item{\code{optimizerSettings}}{ List of settings (\code{control}) for the \code{optimizer} function.} #' \item{\code{initialDesign}}{ Design function that generates the initial design. Default is \code{designMaxMinDist}, which creates a design that maximizes the minimum distance between points.} #' \item{\code{initialDesignSettings}}{ List of settings (\code{control}) for the \code{initialDesign} function.} #' \item{\code{creationFunction}}{ Function to create individuals/solutions in search space. Default is a function that creates random permutations of length 6} #' \item{\code{distanceFunction}}{ distanceFunction a suitable distance function of type f(x1,x2), returning a scalar distance value, preferably between 0 and 1. #' Maximum distances larger 1 are not a problem, but may yield scaling bias when different measures are compared. #' Should be non-negative and symmetric. With the setting \code{control$model="K"} this can also be a list of different fitness functions. #' Default is Hamming distance for permutations: distancePermutationHamming.} #' } #' #' @return a list: #' \describe{ #' \item{\code{xbest}}{ best solution found} #' \item{\code{ybest}}{ fitness of the best solution} #' \item{\code{x}}{ history of all evaluated solutions} #' \item{\code{y}}{ corresponding target function values f(x)} #' \item{\code{fit}}{ model-fit created in the last iteration} #' \item{\code{fpred}}{ prediction function created in the last iteration} #' \item{\code{count}}{ number of performed target function evaluations} #' \item{\code{message}}{ message string, giving information on termination reason} #' \item{\code{convergence}}{ error/status code: \code{-1} for termination due #' to failed model building, \code{0} for termination due to depleted budget, #' \code{1} if attained objective value is equal to or below target (\code{control$targetY})} #' } #' #' @examples #' seed <- 0 #' #distance #' dF <- distancePermutationHamming #' #mutation #' mF <- mutationPermutationSwap #' #recombination #' rF <- recombinationPermutationCycleCrossover #' #creation #' cF <- function()sample(5) #' #objective function #' lF <- landscapeGeneratorUNI(1:5,dF) #' #start optimization #' set.seed(seed) #' res1 <- optimCEGO(,lF,list( #' creationFunction=cF, #' distanceFunction=dF, #' optimizerSettings=list(budget=100,popsize=10, #' mutationFunction=mF,recombinationFunction=rF), #' evalInit=5,budget=15,targetY=0,verbosity=1,model=modelKriging, #' vectorized=TRUE)) ##target function is "vectorized", expects list as input #' set.seed(seed) #' res2 <- optimCEGO(,lF,list( #' creationFunction=cF, #' distanceFunction=dF, #' optimizerSettings=list(budget=100,popsize=10, #' mutationFunction=mF,recombinationFunction=rF), #' evalInit=5,budget=15,targetY=0,verbosity=1,model=modelRBFN, #' vectorized=TRUE)) ##target function is "vectorized", expects list as input #' res1$xbest #' res2$xbest #' #' @seealso \code{\link{modelKriging}}, \code{\link{modelLinear}}, \code{\link{modelRBFN}}, \code{\link{buildModel}}, \code{\link{optimEA}} #' #' @references Zaefferer, Martin; Stork, Joerg; Friese, Martina; Fischbach, Andreas; Naujoks, Boris; Bartz-Beielstein, Thomas. (2014). Efficient global optimization for combinatorial problems. In Proceedings of the 2014 conference on Genetic and evolutionary computation (GECCO '14). ACM, New York, NY, USA, 871-878. DOI=10.1145/2576768.2598282 #' @references Zaefferer, Martin; Stork, Joerg; Bartz-Beielstein, Thomas. (2014). Distance Measures for Permutations in Combinatorial Efficient Global Optimization. In Parallel Problem Solving from Nature - PPSN XIII (p. 373-383). Springer International Publishing. #' #' @export ################################################################################### optimCEGO <- function(x=NULL,fun,control=list()){ ## default settings con<-list(evalInit = 2 , vectorized = FALSE , verbosity = 0 , plotting = FALSE , targetY = -Inf , budget = 100 , creationRetries = 100 , distanceFunction = distancePermutationHamming , creationFunction = solutionFunctionGeneratorPermutation(6) , infill= infillExpectedImprovement , model = modelKriging , modelSettings= list() , optimizer = optimEA , optimizerSettings = list() , initialDesign = designMaxMinDist , archiveModelInfo = NULL #TODO document , initialDesignSettings = list()) con[names(control)] <- control control<-con rm(con) count <- control$evalInit archiveModelInfo <- control$archiveModelInfo vectorized <- control$vectorized verbosity <- control$verbosity plotting <- control$plotting creationFunction <- control$creationFunction distanceFunction <- control$distanceFunction if(is.null(control$initialDesignSettings$distanceFunction)) control$initialDesignSettings$distanceFunction <- distanceFunction ## if target function not vectorized: vectorize with lapply fun #lazy load if(!vectorized) fn <- function(x)unlist(lapply(x,fun)) else fn <- fun ## Create main object of this function, which will also be the return value res <- list(xbest=NA, ybest=NA, x=NA,y=NA,distances=NA,modelArchive=NA,count=count,convergence=0,message="") ## Termination information: msg <- "Termination message:" ## Create initial design of experiment res$x <- control$initialDesign(x,creationFunction,count,control$initialDesignSettings) ## Calculate distances between samples. If distance function has parameters: do not calculate. distanceHasParam <- FALSE if(is.function(distanceFunction)){ if(length(distanceFunction)==1) distanceHasParam <- length(formalArgs(distanceFunction))>2 else distanceHasParam <- any(sapply(sapply(distanceFunction,formalArgs,simplify=FALSE),length) > 2) if(!distanceHasParam) res$distances <- distanceMatrixWrapper(res$x,distanceFunction) } ## Evaluate initial population res$y <- fn(res$x) ## determine best indbest <- which.min(res$y) res$ybest <- res$y[[indbest]] res$xbest <- res$x[[indbest]] ## build initial model model <- buildModel(res,distanceFunction,control) ## archive desired model information if(!is.null(archiveModelInfo)){ res$modelArchive <- list() archiveIndex <- 1 if(identical(model,NA)){ res$modelArchive[[archiveIndex]] <- rep(NA,length(archiveModelInfo)) names(res$modelArchive[[archiveIndex]]) <- archiveModelInfo }else{ #todo! res$modelArchive[[archiveIndex]] <- model$fit[archiveModelInfo] names(res$modelArchive[[archiveIndex]]) <- archiveModelInfo } } ## check whether EI infill is used useEI <- is.function(control$infill) ## main loop while((res$count < control$budget) & (res$ybest > control$targetY)){ ## Optimize the surrogate: if(!identical(model,NA)){ optimres <- optimizeModel(res,creationFunction,model,control) ## Handle duplicated new candidate solution duplicate <- list(optimres$xbest) %in% res$x ## Check whether next candidate solution is better than the best observed so far (based on model prediction) improved <- optimres$ybest < optimres$fpredbestKnownY }else{# model building failed, force termination msg <- paste(msg,"Model building failed, optimization stopped prematurely.") warning("Model building failed in optimCEGO, optimization stopped prematurely.") res$convergence <- -1 break; } ## Update evaluation counter res$count <- res$count+1 if(!duplicate && ((improved || useEI))){ #exploitation step #for a new individual to be accepted, it has to have a better predicted value than the prediction for the best known individual. One could also use the actual value of the best known individual, but this would deteriorate results in case of an offset. res$x[[res$count]] <- optimres$xbest #NOTE; exploitation step and exploration is both the same when EI is used., thus the "||" }else{ #exploration step: no promising individual found, or promising individual is a duplicate -> create a new one randomly if(!distanceHasParam){ designSize <- length(res$x)+1 if(is.list(distanceFunction)) #in case of multiple distances dfun <- distanceFunction[[1]] else dfun <- distanceFunction xc <- designMaxMinDist(res$x,creationFunction,designSize,control=list(budget=control$creationRetries,distanceFunction=dfun)) res$x[[res$count]] <- xc[[designSize]] #this maximizes distance, but may still create a duplicate if max(min(dist))==0, e.g. if all randomly created individuals are duplicates of known solutions }else{ res$x[[res$count]] <- optimres$xbest } } res$x <- removeDuplicates(res$x, creationFunction) ## evaluate with real target function res$y <- c(res$y,fn(res$x[res$count])) ## Logging indbest <- which.min(res$y) res$ybest <- res$y[[indbest]] res$xbest <- res$x[[indbest]] ## Plotting and Text output if(verbosity > 0) print(paste("Evaluations:",res$count," Quality:",res$ybest)) if(plotting){ plot(res$y,type="l",xlab="number of evaluations", ylab="y") abline(res$ybest,0,lty=2) } ## Update the distance matrix #TODO what if distance parameters? if(!distanceHasParam & is.function(distanceFunction)) res$distances <- distanceMatrixUpdate(res$distances,res$x,distanceFunction) ## Update surrogate model and prediction function: model <- buildModel(res,distanceFunction,control) ## archive desired model information if(!is.null(archiveModelInfo)){ archiveIndex <- archiveIndex+1 if(identical(model,NA)){ res$modelArchive[[archiveIndex]] <- rep(NA,length(archiveModelInfo)) names(res$modelArchive[[archiveIndex]]) <- archiveModelInfo }else{ res$modelArchive[[archiveIndex]] <- model$fit[archiveModelInfo] names(res$modelArchive[[archiveIndex]]) <- archiveModelInfo } } } #stopping criteria information for user: if(min(res$ybest,na.rm=TRUE) <= control$targetY) { msg <- paste(msg,"Successfully achieved target fitness.") res$convergence <- 1 }else if(res$count >= control$budget){ #budget exceeded msg <- paste(msg,"Target function evaluation budget depleted.") } res$message <- msg res$distances <- NULL res #return } ################################################################################### #' Model building #' #' Model building support function for optimCEGO. Should not be called directly. #' #' @param res list with elements: (x) list of samples in input space and (y) #' matrix, column vector of observations for each sample #' @param distanceFunction a suitable distance function of type f(x1,x2), returning a scalar distance value, preferably between 0 and 1. #' Maximum distances larger 1 are no problem, but may yield scaling bias when different measures are compared. #' Should be non-negative and symmetric. In case Kriging is chosen, it can also be a list of several distance functions. In this case, MLE is used #' to determine the most suited distance measure (see the last reference). #' @param control list with options: #' \describe{ #' \item{\code{model}}{ Model to be used as a surrogate of the target function. Default is "K" (Kriging). Also #' available are: "LM" (linear, distance-based model), "RBFN" Radial Basis Function Network.} #' \item{\code{modelSettings}}{ List of settings for model building, passed on as the control argument to the model training functions \code{\link{modelKriging}}, \code{\link{modelLinear}}, \code{\link{modelRBFN}}.} #' \item{\code{infill}}{ This parameter specifies a function to be used for the infill criterion (e.g., the default is expected improvement \code{infillExpectedImprovement}). #' To use no specific infill criterion this has to be set to \code{NA}. Infill criteria are only used with models that may provide some error estimate with predictions.} #' } #' #' @return a list: #' \describe{ #' \item{\code{fit}}{ model-fit } #' \item{\code{fpred}}{ prediction function} #' } #' #' @seealso \code{\link{optimCEGO}} #' #' @keywords internal #' @export ################################################################################### buildModel <- function(res,distanceFunction,control){ y <- res$y x <- res$x control$modelSettings$distances <- res$distances y #against lazy evaluation if(identical(control$model,"RBFN")) control$model <- modelRBFN else if(identical(control$model,"LM")) control$model <- modelLinear else if(identical(control$model,"K")) control$model <- modelKriging fit<-try(control$model(x,y,distanceFunction,control$modelSettings),TRUE) #fit <- control$model(x,y,distanceFunction,control$modelSettings) if(class(fit)[1] == "try-error"){ #warning("Model building in optimCEGO failed.") #same warning given in optimCEGO function return(NA) } fit$predAll <- is.function(control$infill) fit if(is.function(control$infill)){ fpred <- function(x){ res=predict(fit,x) control$infill(res$y,res$s,min(y)) } }else{ fpred <- function(x){predict(fit,x)$y} } list(fit=fit,fpred=fpred) } ################################################################################### #' Optimize Surrogate Model #' #' Interface to the optimization of the surrogate model #' #' @param res result state of the optimization process #' @param creationFunction Function to create individuals/solutions in search space. #' @param model result of the buildModel function #' @param control list of settings, from optimCEGO #' #' @return result list of the optimizer #' #' @seealso \code{\link{optimCEGO}} #' #' @keywords internal ################################################################################### optimizeModel <- function(res,creationFunction,model,control){ if(identical(control$optimizer,"EA")){ control$optimizer=optimEA } if(is.null(control$optimizerSettings$creationFunction)) control$optimizerSettings$creationFunction <- creationFunction if(is.null(control$optimizerSettings$vectorized)) control$optimizerSettings$vectorized <- TRUE optimres <- control$optimizer(NULL,model$fpred,control$optimizerSettings) optimres$fpredbestKnownY <- model$fpred(list(res$xbest)) optimres }
/scratch/gouwar.j/cran-all/cranData/CEGO/R/optimCEGO.R
################################################################################### #' Evolutionary Algorithm for Combinatorial Optimization #' #' A basic implementation of a simple Evolutionary Algorithm for Combinatorial Optimization. Default evolutionary operators #' aim at permutation optimization problems. #' #' @param x Optional start individual(s) as a list. If NULL (default), \code{creationFunction} (in \code{control} list) is used to create initial design. #' If \code{x} has less individuals than the population size, creationFunction will fill up the rest. #' @param fun target function to be minimized #' @param control (list), with the options: #' \describe{ #' \item{\code{budget}}{The limit on number of target function evaluations (stopping criterion) (default: 1000).} #' \item{\code{popsize}}{Population size (default: 100).} #' \item{\code{generations}}{Number of generations (stopping criterion) (default: Inf).} #' \item{\code{targetY}}{Target function value (stopping criterion) (default: -Inf).} #' \item{\code{vectorized}}{Boolean. Defines whether target function is vectorized (takes a list of solutions as argument) or not (takes single solution as argument). Default: FALSE.} #' \item{\code{verbosity}}{Level of text output during run. Defaults to 0, no output.} #' \item{\code{plotting}}{Plot optimization progress during run (TRUE) or not (FALSE). Default is FALSE.} #' \item{\code{archive}}{Whether to keep all candidate solutions and their fitness in an archive (TRUE) or not (FALSE). Default is TRUE. New solutions that are identical to an archived one, will not be evaluated. Instead, their fitness is taken from the archive.} #' \item{\code{recombinationFunction}}{Function that performs recombination, default: \code{\link{recombinationPermutationCycleCrossover}}, which is cycle crossover for permutations.} #' \item{\code{recombinationRate}}{Number of offspring, defined by the fraction of the population (popsize) that will be recombined.} #' \item{\code{mutationFunction}}{Function that performs mutation, default: \code{\link{mutationPermutationSwap}}, which is swap mutation for permutations.} #' \item{\code{parameters}}{Default parameter list for the algorithm, e.g., mutation rate, etc.} #' \item{\code{selection}}{Survival selection process: "tournament" (default) or "truncation".} #' \item{\code{tournamentSize}}{Tournament size (default: 2).} #' \item{\code{tournamentProbability}}{Tournament probability (default: 0.9).} #' \item{\code{localSearchFunction}}{If specified, this function is used for a local search step. Default is NULL. } #' \item{\code{localSearchRate}}{Specifies on what fraction of the population local search is applied. Default is zero. Maximum is 1 (100 percent).} #' \item{\code{localSearchSettings}}{List of settings passed to the local search function control parameter.} #' \item{\code{stoppingCriterionFunction}}{Custom additional stopping criterion. Function evaluated on the population, receiving all individuals (list) and their fitness (vector). If the result is FALSE, the algorithm stops.} #' \item{\code{verbosity}}{>0 for text output.} #' \item{\code{creationFunction}}{Function to create individuals/solutions in search space. Default is a function that creates random permutations of length 6.} #' \item{\code{selfAdaption}}{An optional ParamHelpers object, that describes parameters of the optimization (see \code{parameters}) which are subject to self-adaption. An example is given in \link{mutationSelfAdapt}.} #' \item{\code{selfAdaptTau}}{Positive numeric value, that controls the learning rate of numerical/integer self-adaptive parameters.} #' \item{\code{selfAdaptP}}{Value in [0,1]. A probability of mutation for all categorical, self-adaptive parameters.} #' } #' #' @return a list: #' \describe{ #' \item{\code{xbest}}{best solution found.} #' \item{\code{ybest}}{fitness of the best solution.} #' \item{\code{x}}{history of all evaluated solutions.} #' \item{\code{y}}{corresponding target function values f(x).} #' \item{\code{count}}{number of performed target function evaluations.} #' \item{\code{message}}{Termination message: Which stopping criterion was reached.} #' \item{\code{population}}{Last population.} #' \item{\code{fitness}}{Fitness of last population.} #' } #' #' @examples #' #First example: permutation optimization #' seed=0 #' #distance #' dF <- distancePermutationHamming #' #mutation #' mF <- mutationPermutationSwap #' #recombination #' rF <- recombinationPermutationCycleCrossover #' #creation #' cF <- function()sample(5) #' #objective function #' lF <- landscapeGeneratorUNI(1:5,dF) #' #start optimization #' set.seed(seed) #' res <- optimEA(,lF,list(creationFunction=cF,mutationFunction=mF,recombinationFunction=rF, #' popsize=6,budget=60,targetY=0,verbosity=1, #' vectorized=TRUE)) ##target function is "vectorized", expects list as input #' res$xbest #' #Second example: binary string optimization #' #number of bits #' N <- 50 #' #target function (simple example) #' f <- function(x){ #' sum(x) #' } #' #function to create random Individuals #' cf <- function(){ #' sample(c(FALSE,TRUE),N,replace=TRUE) #' } #' #control list #' cntrl <- list( #' budget = 100, #' popsize = 5, #' creationFunction = cf, #' vectorized = FALSE, #set to TRUE if f evaluates a list of individuals #' recombinationFunction = recombinationBinary2Point, #' recombinationRate = 0.1, #' mutationFunction = mutationBinaryBitFlip, #' parameters=list(mutationRate = 1/N), #' archive=FALSE #recommended for larger budgets. do not change. #' ) #' #start algorithm #' set.seed(1) #' res <- optimEA(fun=f,control=cntrl) #' res$xbest #' res$ybest #' #' @seealso \code{\link{optimCEGO}}, \code{\link{optimRS}}, \code{\link{optim2Opt}}, \code{\link{optimMaxMinDist}} #' #' @export #' #' @import ParamHelpers #' @import fastmatch ################################################################################### optimEA <- function(x=NULL,fun,control=list()){ #default controls: con<-list(budget = 1000 #default controls: , popsize = 100 , generations = Inf , targetY = -Inf , vectorized=FALSE , creationFunction = solutionFunctionGeneratorPermutation(6) , recombinationRate=0.5 , parameters = list() #manually set paramters for recombination and mutation TODO , mutationFunction = mutationPermutationSwap , recombinationFunction = recombinationPermutationCycleCrossover , selection = "tournament" #or "truncation , tournamentSize = 2 , tournamentProbability = 0.9 , localSearchFunction = NULL , localSearchRate = 0 , localSearchSettings = list() , archive = TRUE , stoppingCriterionFunction = NULL , verbosity = 0 , plotting = FALSE , selfAdaptTau = 1/sqrt(2) , selfAdaptP = 0.5 ); con[names(control)] <- control; control<-con; archive <- control$archive creationFunction <- control$creationFunction budget <- control$budget vectorized <- control$vectorized popsize <- control$popsize generations <- control$generations targetY <- control$targetY tournamentSize <- control$tournamentSize parameters <- control$parameters recombinationRate <- control$recombinationRate recombinationFunction <- control$recombinationFunction mutationFunction <- control$mutationFunction localSearchFunction <- control$localSearchFunction localSearchRate <- control$localSearchRate localSearchSettings <- control$localSearchSettings localSearchSettings$vectorized <- vectorized selection <- control$selection stoppingCriterionFunction <- control$stoppingCriterionFunction verbosity <- control$verbosity plotting <- control$plotting tournamentProbability <- control$tournamentProbability #probability in the tournament selection selfAdaption <- control$selfAdaption selfAdaptTau <- control$selfAdaptTau selfAdaptP <- control$selfAdaptP tournamentSize <- max(tournamentSize,1) #for backwards compatibility only: if(!is.null( control$mutationParameters)) parameters[names(control$mutationParameters)] <- control$mutationParameters if(!is.null( control$recombinationParameters)) parameters[names(control$recombinationParameters)] <- control$recombinationParameters ## Create initial population population <- designRandom(x,creationFunction,popsize) ## Create initial parameters (if self-adaptive) populationSelfAdapt <- NA if(!is.null(selfAdaption)){ populationSelfAdapt <- t(replicate(popsize,getDefaults(selfAdaption))) types <- getParamTypes(selfAdaption) lower <- getLower(selfAdaption) upper <- getUpper(selfAdaption) values <- sapply(getValues(selfAdaption,dict=list()),unlist,simplify=FALSE) inum <- types=="numeric" icat <- types=="discrete" iint <- types=="integer" nnum <- sum(inum) ncat <- sum(icat) nint <- sum(iint) nvalues <- sapply(values,length) } if(vectorized) fitness <- fun(population) else fitness <- unlist(lapply(population,fun)) count <- popsize gen <- 1 fitbest <- min(fitness,na.rm=TRUE) xbest <- population[[which.min(fitness)]] if(archive){ fithist <- fitness xhist <- population } besthist <- fitbest # initialization for plotting run <- TRUE while((count < budget) & (gen < generations) & (fitbest > targetY) & (run)){ gen <- gen+1 ## parent selection if(selection == "tournament"){ #tournament selection parents <- tournamentSelection(fitness,tournamentSize,tournamentProbability,max(floor(popsize*recombinationRate)*2,2)) }else{ #truncation selection parents <- rep(order(fitness),2)[1:max(floor(popsize*recombinationRate)*2,2)] } ## shuffle parents parents <- sample(parents) ## self-adapt parameters (recombine, apply learning) if(!is.null(selfAdaption)){ offspringSelfAdapt <- selfAdapt(populationSelfAdapt[parents,],inum,icat,iint,nnum,ncat,nint,lower,upper,values,nvalues,selfAdaptTau,selfAdaptP) #adapt parameters (learn) parameters$selfAdapt <- offspringSelfAdapt #get parameters of the offspring individuals } ## recombine parents offspring <- recombinationFunction(population[parents],parameters) ## mutate parents offspring <- mutationFunction(offspring,parameters) #optional local search if(!is.null(localSearchFunction) & localSearchRate>0){ if(localSearchRate<1){ subsetsize <- ceiling(length(offspring)*localSearchRate) offspringsubset <- sample(length(offspring),subsetsize) }else{ offspringsubset <- 1:length(offspring) } evaluated <- rep(FALSE,length(offspring)) tempfitness <- NULL for(i in offspringsubset){ if(localSearchSettings$budget > (budget-count)) #local search budget exceeds remaining budget localSearchSettings$budget <- budget-count #set to remaining budget if(localSearchSettings$budget < 2) #local search budget too small break res <- localSearchFunction(x=offspring[i],fun=fun,control=localSearchSettings) if(archive){ #archive local search results xhist <- append(xhist,res$x) fithist <- c(fithist, res$y) } offspring[[i]] <- res$xbest evaluated[i] <- TRUE tempfitness <- c(tempfitness,res$ybest) count <- count + res$count #add local search counted evaluations to evaluation counter of main loop. } if(any(evaluated)){ #add only the evaluated individuals to the population, the rest is added later. this is for efficiency reasons only. offspring <- offspring[-evaluated] offspringLocal <- offspring[evaluated] population <- c(population, offspringLocal) if(!is.null(selfAdaption)){ populationSelfAdapt <- rbind(populationSelfAdapt,offspringSelfAdapt[evaluated,]) offspringSelfAdapt <- offspringSelfAdapt[-evaluated,] } # remember best newbest <- min(tempfitness,na.rm=TRUE) if(newbest < fitbest){ fitbest <- newbest xbest <- offspringLocal[[which.min(tempfitness)]] } fitness <- c(fitness, tempfitness) } } if(length(offspring)>0 & budget > count){ ## append offspring to population, but remove duplicates first. duplicates are replaced by random, unique solutions. offspring <- removeDuplicatesOffspring(population,offspring, creationFunction,duplicated) newfit <- rep(NA,length(offspring)) # the vector of new fitness values evaluated <- rep(FALSE,length(offspring)) # the vector of indicators, whether the respective offspring is already evaluated ## if archive available, take fitness from archive if(archive){ inArchiveIDs <- fmatch(offspring,xhist) #fastmatch package inArchive <- !is.na(inArchiveIDs) if(any(inArchive)){ newfit[inArchive] <- fithist[inArchiveIDs[inArchive]] evaluated[inArchive] <- TRUE } } ## evaluate the rest with fun if(any(!evaluated)){ ## remove offspring which violate the budget evaluateOffspring <- offspring[!evaluated] possibleEvaluations <- min(budget-count,length(evaluateOffspring)) #number of possible evaluations, given the budget evaluateOffspring <- evaluateOffspring[1:possibleEvaluations] #the non-evaluated ones receive NAs. ## evaluate if(vectorized) newfit[!evaluated][1:possibleEvaluations] <- fun(evaluateOffspring) else newfit[!evaluated][1:possibleEvaluations] <- unlist(lapply(evaluateOffspring,fun)) ## save to archive if(archive){ xhist <- append(xhist,evaluateOffspring) fithist <- c(fithist, newfit[!evaluated][1:possibleEvaluations]) } ## mark evaluated evaluated[!evaluated][1:possibleEvaluations] <- TRUE #update count count <- count+ length(evaluateOffspring) } ## append results to population population <- c(population, offspring[evaluated]) if(!is.null(selfAdaption)){ populationSelfAdapt <- rbind(populationSelfAdapt,offspringSelfAdapt[evaluated,]) } fitness <- c(fitness, newfit[evaluated]) ## remember best newbest <- min(newfit,na.rm=TRUE) if(newbest < fitbest){ fitbest <- newbest xbest <- offspring[[which.min(newfit)]] } } if(length(population)>popsize){ #tournament selection if(selection == "tournament"){ #tournament selection popindex <- tournamentSelection(fitness,tournamentSize,tournamentProbability,popsize) }else{ # truncation selection popindex <- order(fitness)[1:popsize] } population <- population[popindex] fitness <- fitness[popindex] if(!is.null(selfAdaption)){ populationSelfAdapt <- populationSelfAdapt[popindex,] } } if(!is.null(stoppingCriterionFunction)) # calculation of additional stopping criteria run <- stoppingCriterionFunction(population,fitness) if(plotting){ besthist <- c(besthist,fitbest) plot(besthist,type="l") } if(verbosity > 0){ print(paste("Generations: ",gen," Evaluations: ",count, "Best Fitness: ",fitbest)) } } #stopping criteria information for user: msg <- "Termination message:" if(!run) #success msg=paste(msg,"Custom stopping criterion satisfied.") if(fitbest <= targetY) msg=paste(msg,"Successfully achieved target fitness.") else if(count >= budget) #budget exceeded msg=paste(msg,"Target function evaluation budget depleted.") else if(gen >= generations) #generation limit exceeded msg=paste(msg,"Number of generations limit reached.") if(archive) return(list(xbest=xbest,ybest=fitbest,x=xhist,y=fithist, count=count, message=msg, population=population, fitness=fitness, populationSelfAdapt= populationSelfAdapt)) else return(list(xbest=xbest,ybest=fitbest,count=count, message=msg, population=population, fitness=fitness , populationSelfAdapt= populationSelfAdapt)) } ################################################################################### #' Self-adaption of EA parameters. #' #' Learning / self-adaption of parameters of the evolutionary algorithm. #' #' @param params parameters to be self-adapted #' @param inum boolean vector, which parameters are numeric #' @param icat boolean vector, which parameters are discrete, factors #' @param iint boolean vector, which parameters are integer #' @param nnum number of numerical parameters #' @param ncat number of discrete parameters #' @param nint number of integer parameters #' @param lower lower bounds (numeric, integer parameters only) #' @param upper upper bounds (numeric, integer parameters only) #' @param values values or levels of the discrete parameters #' @param nvalues number of values for each discrete parameter #' #' @seealso \code{\link{optimEA}} #' #' @export #' @keywords internal ################################################################################### selfAdapt <- function(params,inum,icat,iint,nnum,ncat,nint,lower,upper,values,nvalues,tau,p){ noff <- nrow(params)/2 #number of offspring, assumes 2 parents for each offspring #tau <- 1/sqrt(2) # global parameter for this. mutation rate of the numeric/integer paremters #p <- 0.5# global parameter for this. mutationr ate of the categorical parameters. ainum <- any(inum) aiint <- any(iint) aicat <- any(icat) ## first: recombine if(ainum) ## numerical: intermediate xover params[1:noff,inum] <- (as.numeric(params[1:noff,inum]) + as.numeric(params[(1:noff)+noff,inum])) / 2 if(aiint) ## integer: round, intermediate params[1:noff,iint] <- round((as.numeric(params[1:noff,iint]) + as.numeric(params[(1:noff)+noff,iint])) / 2) if(aicat){ ## discrete: dominant xover index <- sample(1:(noff*ncat),noff*ncat / 2) #select 50 % of the discrete individuals and parameters, to be taken from parent 2 params[1:noff,icat][index] <- params[(1:noff)+noff,icat][index] } params <- params[1:noff,] ## second: mutate the parameters if(ainum) params[,inum] <- as.numeric(params[,inum]) * matrix(exp(tau*rnorm(nnum*noff,0,1)),noff,nnum) if(aiint) params[,iint] <- round(as.numeric(params[,inum]) * matrix(exp(tau*rnorm(nint*noff,0,1)),noff,nint)) if(aicat){ rand <- matrix(runif(ncat*noff),noff,ncat) #get random number ind <- rand < p # if larger than 0.5, change strategy parameter if(any(ind)){ params[,icat][ind] <-sapply(values,FUN=sample,size=noff,replace=TRUE)[ind] } } ## repair: fix to lower/upper if(ainum|aiint){ params[,inum|iint] <- pmax(lower,params[,inum|iint]) params[,inum|iint] <- pmin(upper,params[,inum|iint]) } params } ################################################################################### #' Self-adaptive mutation operator #' #' This mutation function selects an operator and mutationRate (provided in parameters$mutationFunctions) #' based on self-adaptive parameters chosen for each individual separately. #' #' @param population List of permutations #' @param parameters list, contains the available single mutation functions (\code{mutationFunctions}), #' and a data.frame that collects the chosen function and mutation rate for each individual (\code{selfAdapt}). #' #' @seealso \code{\link{optimEA}}, \code{\link{recombinationSelfAdapt}} #' #' @export #' #' @examples #' seed=0 #' N=5 #' require(ParamHelpers) #' #distance #' dF <- distancePermutationHamming #' #mutation #' mFs <- c(mutationPermutationSwap,mutationPermutationInterchange, #' mutationPermutationInsert,mutationPermutationReversal) #' rFs <- c(recombinationPermutationCycleCrossover,recombinationPermutationOrderCrossover1, #' recombinationPermutationPositionBased,recombinationPermutationAlternatingPosition) #' mF <- mutationSelfAdapt #' selfAdaptiveParameters <- makeParamSet( #' makeNumericParam("mutationRate", lower=1/N,upper=1, default=1/N), #' makeDiscreteParam("mutationOperator", values=1:4, default=expression(sample(4,1))), #' #1: swap, 2: interchange, 3: insert, 4: reversal mutation #' makeDiscreteParam("recombinationOperator", values=1:4, default=expression(sample(4,1))) #' #1: CycleX, 2: OrderX, 3: PositionX, 4: AlternatingPosition #' ) #' #recombination #' rF <- recombinationSelfAdapt #' #creation #' cF <- function()sample(N) #' #objective function #' lF <- landscapeGeneratorUNI(1:N,dF) #' #start optimization #' set.seed(seed) #' res <- optimEA(,lF,list(parameters=list(mutationFunctions=mFs,recombinationFunctions=rFs), #' creationFunction=cF,mutationFunction=mF,recombinationFunction=rF, #' popsize=15,budget=100,targetY=0,verbosity=1,selfAdaption=selfAdaptiveParameters, #' vectorized=TRUE)) ##target function is "vectorized", expects list as input #' res$xbest ################################################################################### mutationSelfAdapt <- function(population,parameters){ mutfuns <- parameters$mutationFunctions mutfunsi <- mutfuns[as.numeric(parameters$selfAdapt[,"mutationOperator"])] pars <- parameters$selfAdapt[,"mutationRate"] population <- sapply(population,list,simplify=FALSE) #make each parameter to list, because required by operators. #TODO: more efficient to have separate functions that are able to deal with single non-list input population <- mapply(function(f, x, p) unlist(f(x,list(mutationRate=p))), mutfunsi, population,pars,SIMPLIFY=FALSE) population } ################################################################################### #' Self-adaptive recombination operator #' #' This recombination function selects an operator (provided in parameters$recombinationFunctions) #' based on self-adaptive parameters chosen for each individual separately. #' #' @param population List of permutations #' @param parameters list, contains the available single mutation functions (\code{mutationFunctions}), #' and a data.frame that collects the chosen function and mutation rate for each individual (\code{selfAdapt}). #' #' @seealso \code{\link{optimEA}}, \code{\link{mutationSelfAdapt}} #' #' @export ################################################################################### recombinationSelfAdapt <- function(population,parameters){ recfuns <- parameters$recombinationFunctions recfunsi <- recfuns[as.numeric(parameters$selfAdapt[,"recombinationOperator"])] n <- length(population) population <- sapply(population,list,simplify=FALSE) #make each parameter to list, because required by operators. #TODO: more efficient to have separate functions that are able to deal with single non-list input population <- mapply(function(f, x1, x2) unlist(f(append(x1,x2))), recfunsi, population[1:(n/2)],population[(1+n/2):n],SIMPLIFY=FALSE) population }
/scratch/gouwar.j/cran-all/cranData/CEGO/R/optimEA.R
################################################################################################### #' Optimization Interface (continuous, bounded) #' #' This function is an interface fashioned like the \code{\link{optim}} function. #' Unlike optim, it collects a set of bound-constrained optimization algorithms #' with local as well as global approaches. It is, e.g., used in the CEGO package #' to solve the optimization problem that occurs during parameter estimation #' in the Kriging model (based on Maximum Likelihood Estimation). #' Note that this function is NOT applicable to combinatorial optimization problems. #' #' The control list contains: #' \describe{ #' \item{\code{funEvals}}{ stopping criterion, number of evaluations allowed for \code{fun} (defaults to 100)} #' \item{\code{reltol}}{ stopping criterion, relative tolerance (default: 1e-6)} #' \item{\code{factr}}{ stopping criterion, specifying relative tolerance parameter factr for the L-BFGS-B method in the optim function (default: 1e10) } #' \item{\code{popsize}}{ population size or number of particles (default: \code{10*dimension}, where \code{dimension} is derived from the length of the vector \code{lower}). } #' \item{\code{restarts}}{ whether to perform restarts (Default: TRUE). Restarts will only be performed if some of the evaluation budget is left once the algorithm stopped due to some stopping criterion (e.g., reltol).} #' \item{\code{method}}{ will be used to choose the optimization method from the following list: #' "L-BFGS-B" - BFGS quasi-Newton: \code{stats} Package \code{optim} function\cr #' "nlminb" - box-constrained optimization using PORT routines: \code{stats} Package \code{nlminb} function\cr #' "DEoptim" - Differential Evolution implementation: \code{DEoptim} Package\cr #' Additionally to the above methods, several methods from the package \code{nloptr} can be chosen. #' The complete list of suitable nlopt methods (non-gradient, bound constraints) is: \cr #' "NLOPT_GN_DIRECT","NLOPT_GN_DIRECT_L","NLOPT_GN_DIRECT_L_RAND", #' "NLOPT_GN_DIRECT_NOSCAL","NLOPT_GN_DIRECT_L_NOSCAL","NLOPT_GN_DIRECT_L_RAND_NOSCAL", #' "NLOPT_GN_ORIG_DIRECT","NLOPT_GN_ORIG_DIRECT_L","NLOPT_LN_PRAXIS", #' "NLOPT_GN_CRS2_LM","NLOPT_LN_COBYLA", #' "NLOPT_LN_NELDERMEAD","NLOPT_LN_SBPLX","NLOPT_LN_BOBYQA","NLOPT_GN_ISRES"\cr\cr #' All of the above methods use bound constraints. #' For references and details on the specific methods, please check the documentation of the packages that provide them.} #' } #' #' @param x is a point (vector) in the decision space of \code{fun} #' @param fun is the target function of type \code{y = f(x, ...)} #' @param lower is a vector that defines the lower boundary of search space #' @param upper is a vector that defines the upper boundary of search space #' @param control is a list of additional settings. See details. #' @param ... additional parameters to be passed on to \code{fun} #' #' @return This function returns a list with: #' \describe{ #' \item{\code{xbest}}{ parameters of the found solution} #' \item{\code{ybest}}{ target function value of the found solution} #' \item{\code{count}}{ number of evaluations of \code{fun}} #' } #' #' @export ################################################################################################### optimInterface<-function(x,fun,lower=-Inf,upper=Inf,control=list(),...){ con<-list(funEvals=100 #CON: Internal List with defaults for control ,method="L-BFGS-B" ,reltol=1e-6 ,factr=1e10 ,popsize=NULL ,ineq_constr=NULL ,verbosity=0 ,restarts=TRUE) con[names(control)] <- control; control<-con; #INITIALIZE dim <- length(lower) if(is.null(control$popsize)) control$popsize <- dim * 10 budget <- control$funEvals sumevals <- 0 ymin <- Inf run <- TRUE method <- control$method if(!is.null(control$ineq_constr) & !(method=="NLOPT_GN_ORIG_DIRECT" | method=="NLOPT_LN_COBYLA" )) warning("Constraint function passed to optimInterface. This is not supported with the chosen method.") if(length(x)==0) x <- runif(length(lower))*(upper-lower)+lower #LOOP OVER RESTARTS while(run){ if (method=="L-BFGS-B"){ res <- optim(par=x, fn=fun, method=method,lower=lower,upper=upper,control=list(maxit=budget,trace=control$verbosity,factr=control$factr),...) resval <- res$value respar <- res$par resevals <- res$counts[[1]] +res$counts[[2]] * 2 * dim }else if (method=="nlminb"){ res <- nlminb(start=x, objective=fun, gradient=NULL, hessian=NULL, control=list(eval.max=budget,iter.max=budget,rel.tol=control$reltol,trace=control$verbosity),lower=lower,upper=upper,...) resval <- res$objective respar <- res$par resevals <- sum(res$evaluations) }else if (method=="DEoptim"){ res <- DEoptim::DEoptim(fn=fun ,lower=lower,upper=upper,control=DEoptim.control(NP=control$popsize,itermax=floor((budget-control$popsize)/control$popsize),reltol=control$reltol,trace=FALSE),...) resval <- res$optim$bestval respar <- res$optim$bestmem resevals <- budget # TODO: this is not correct if reltol hits }else if (any(method==c("NLOPT_GN_DIRECT","NLOPT_GN_DIRECT_L","NLOPT_GN_DIRECT_L_RAND", "NLOPT_GN_DIRECT_NOSCAL","NLOPT_GN_DIRECT_L_NOSCAL","NLOPT_GN_DIRECT_L_RAND_NOSCAL", "NLOPT_GN_ORIG_DIRECT","NLOPT_GN_ORIG_DIRECT_L","NLOPT_LN_PRAXIS", "NLOPT_GN_CRS2_LM","NLOPT_LN_COBYLA", "NLOPT_LN_NELDERMEAD","NLOPT_LN_SBPLX","NLOPT_LN_BOBYQA","NLOPT_GN_ISRES"))){ opts=list(algorithm=method,maxeval=budget, ftol_rel=control$reltol, xtol_rel=-Inf, print_level=control$verbosity) res <- nloptr::nloptr(x,fun,lb = lower,ub = upper, eval_g_ineq=control$ineq_constr,opts = opts,...) resval <- res$objective respar <- res$solution resevals <- res$iterations }else{ stop("The chosen optimization method used in optimInterface does not exist.") } #CONTROL RESTARTS sumevals <- sumevals+resevals if(resval < ymin){ resultpar <- respar ymin <- resval } x <- lower+(upper-lower)*runif(dim) #Stop while loop when limit reached if(sumevals>=control$funEvals | !control$restarts){ run=FALSE } } result <- list() result$xbest <- resultpar result$ybest <- ymin result$count <- sumevals result }
/scratch/gouwar.j/cran-all/cranData/CEGO/R/optimInterface.R
# R version of a mixed integer evolution strategy by Martin Zaefferer, intended for use in the CEGO package. ################################################################################### #' Mixed Integer Evolution Strategy (MIES) #' #' An optimization algorithm from the family of Evolution Strategies, designed to #' optimize mixed-integer problems: The search space is composed of continuous (real-valued) parameters, #' ordinal integers and categorical parameters. #' Please note that the categorical parameters need to be coded as integers #' (type should not be a factor or character). #' It is an implementation (with a slight modification) of MIES as described by Li et al. (2013). #' Note, that this algorithm always has a step size for each solution parameter, unlike Li et al., #' we did not include the option to change to a single step-size for all parameters. #' Dominant recombination is used for solution parameters (the search space parameters), #' intermediate recombination for strategy parameters (i.e., step sizes). #' Mutation: Self-adaptive, step sizes sigma are optimized alongside the solution parameters. #' Real-valued parameters are subject to variation based on independent normal distributed random variables. #' Ordinal integers are subject to variation based on the difference of geometric distributions. #' Categorical parameters are changed at random, with a self-adapted probability. #' Note, that a more simple bound constraint method is used. Instead of the Transformation Ta,b(x) #' described by Li et al., optimMIES simply replaces any value that exceeds the bounds by respective boundary value. #' #' The control variables types, lower, upper and levels are especially important. #' #' @param x Optional start individual(s) as a list. If NULL (default), \code{creationFunction} (in \code{control} list) is used to create initial design. #' If \code{x} has less individuals than the population size, creationFunction will fill up the rest. #' @param fun target function to be minimized. #' @param control (list), with the options: #' \describe{ #' \item{\code{budget}}{The limit on number of target function evaluations (stopping criterion) (default: 1000).} #' \item{\code{popsize}}{Population size (default: 100).} #' \item{\code{generations}}{Number of generations (stopping criterion) (default: Inf).} #' \item{\code{targetY}}{Target function value (stopping criterion) (default: -Inf).} #' \item{\code{vectorized}}{Boolean. Defines whether target function is vectorized (takes a list of solutions as argument) or not (takes single solution as argument). Default: FALSE.} #' \item{\code{verbosity}}{Level of text output during run. Defaults to 0, no output.} #' \item{\code{plotting}}{Plot optimization progress during run (TRUE) or not (FALSE). Default is FALSE.} #' \item{\code{archive}}{Whether to keep all candidate solutions and their fitness in an archive (TRUE) or not (FALSE). Default is TRUE.} #' \item{\code{stoppingCriterionFunction}}{Custom additional stopping criterion. Function evaluated on the population, receiving all individuals (list) and their fitness (vector). If the result is FALSE, the algorithm stops.} #' \item{\code{types}}{A vector that specifies the data type of each variable: "numeric", "integer" or "factor".} #' \item{\code{lower}}{Lower bound of each variable. Factor variables can have the lower bound set to NA.} #' \item{\code{upper}}{Upper bound of each variable. Factor variables can have the upper bound set to NA.} #' \item{\code{levels}}{List of levels for each variable (only relevant for categorical variables). #' Should be a vector of numerical values, usually integers, but not necessarily a sequence. #' HAS to be given if any factors/categoricals are present. Else, set to NA.} #' } #' #' @return a list: #' \describe{ #' \item{\code{xbest}}{best solution found.} #' \item{\code{ybest}}{fitness of the best solution.} #' \item{\code{x}}{history of all evaluated solutions.} #' \item{\code{y}}{corresponding target function values f(x).} #' \item{\code{count}}{number of performed target function evaluations.} #' \item{\code{message}}{Termination message: Which stopping criterion was reached.} #' \item{\code{population}}{Last population.} #' \item{\code{fitness}}{Fitness of last population.} #' } #' #' @references Rui Li, Michael T. M. Emmerich, Jeroen Eggermont, Thomas Baeck, Martin Schuetz, Jouke Dijkstra, and Johan H. C. Reiber. 2013. Mixed integer evolution strategies for parameter optimization. Evol. Comput. 21, 1 (March 2013), 29-64. #' #' @examples #' set.seed(1) #' controlList <- list(lower=c(-5,-5,1,1,NA,NA),upper=c(10,5,10,10,NA,NA), #' types=c("numeric","numeric","integer","integer","factor","factor"), #' levels=list(NA,NA,NA,NA,c(1,3,5),1:4), #' vectorized = FALSE) #' objFun <- function(x){ #' x[[3]] <- round(x[[3]]) #' x[[4]] <- round(x[[4]]) #' y <- sum(as.numeric(x[1:4])^2) #' if(x[[5]]==1 & x[[6]]==4) #' y <- exp(y) #' else #' y <- y^2 #' if(x[[5]]==3) #' y<-y-1 #' if(x[[5]]==5) #' y<-y-2 #' if(x[[6]]==1) #' y<-y*2 #' if(x[[6]]==2) #' y<-y * 1.54 #' if(x[[6]]==3) #' y<- y +2 #' if(x[[6]]==4) #' y<- y * 0.5 #' if(x[[5]]==1) #' y<- y * 9 #' y #' } #' res <- optimMIES(,objFun,controlList) #' res$xbest #' res$ybest #' #' @seealso \code{\link{optimCEGO}}, \code{\link{optimRS}}, \code{\link{optimEA}}, \code{\link{optim2Opt}}, \code{\link{optimMaxMinDist}} #' #' @export ################################################################################### optimMIES <- function(x=NULL,fun,control=list()){ #TODO: providing x seems to be buggy #default controls: #todo: document con<-list(budget = 1000 #default controls: , popsize = 100 # mu , lambda = 2 # , generations = Inf , targetY = -Inf , vectorized=FALSE , isotropic = FALSE #one step size for all parameters, or separate for each dimension? , strategy = "plus" # mu + lambda strategy ("plus") or mu,lambda strategy ("comma") # , lower = 0, #lower bounds for all parameters. HAS to be given. # , upper = 1, #upper bounds for all parameters. HAS to be given. # , types = NA, # vector with data types for each dimension. HAS to be given. # , levels = NA, # list of levels for each variable (only relevant for categorical variables). HAS to be given if any factors/categoricals are present. , archive = FALSE , stoppingCriterionFunction = NULL , verbosity = 0 , plotting = FALSE ); con[names(control)] <- control; control<-con; archive <- control$archive budget <- control$budget vectorized <- control$vectorized popsize <- control$popsize generations <- control$generations targetY <- control$targetY stoppingCriterionFunction <- control$stoppingCriterionFunction verbosity <- control$verbosity plotting <- control$plotting lambda <- control$lambda lower <- control$lower upper <- control$upper types <- control$types levels <- control$levels isotropic <- control$isotropic sigma0 <- control$sigma0 strategy <- control$strategy isotropic types levels lower upper if(is.null(types)) stop("Please provide the vector types, which gives the data type (numeric, integer, factor) for each variable.") if(is.null(lower)|is.null(upper)) stop("Please provide lower and upper bounds for the search space in optimMIES, via control list.") npar <- length(lower) #number of parameters nreal <- sum(types=="numeric") #number of real parameters nint <- sum(types=="integer") #number of integer parameters ncat <- sum(types=="factor") #number of categorical parameters ireal <- which(types=="numeric") iint <- which(types=="integer") icat <- which(types=="factor") tauReal <- 1/sqrt(2*nreal) #global learning rate tauRealDash <- 1/sqrt(2*sqrt(nreal)) #local learning rate tauInt <- 1/sqrt(2*nint) #global learning rate tauIntDash <- 1/sqrt(2*sqrt(nint)) #local learning rate tauCat <- 1/sqrt(2*ncat) #global learning rate tauCatDash <- 1/sqrt(2*sqrt(ncat)) #local learning rate ranges <- upper-lower if(is.null(sigma0)){ sigma0 <- numeric(npar) sigma0[ireal] <- ranges[ireal] * 0.1 sigma0[iint] <- ranges[iint] * 0.33 sigma0[icat] <- 0.1 } if(npar != length(upper)) stop("Lower and upper bounds for the search space in optimMIES have to be of the same length. Check the control list.") if(length(sigma0)!=npar) stop("Length of initial step size vector (sigma0) should be one, or same length as lower/upper bounds.") creationFunction <- function(){ x <- numeric(npar) if(length(ireal)>0) x[ireal] <- runif(nreal,lower[ireal],upper[ireal]) if(length(iint)>0) x[iint] <- floor(runif(nint,lower[iint],upper[iint]+ 1-.Machine$double.eps )) if(length(icat)>0) x[icat] <- sapply(levels[icat], sample,1,simplify=T) ## append strategy parameters x <- c(x,sigma0) x } #creationFunction() recombinationFunction <- function(parent1,parent2){ ## dominant recombination for solution parameters inds <- sample(1:npar,ceiling(npar/2)) parent1[inds] <- parent2[inds] ## intermediate recombination for strategy parameters parent1[npar+1] <- (parent1[npar+1] + parent2[npar+1]) / 2 parent1 } #Tlu <- function(x,a,b){ #todo! # y <- (x-a) / (b-a) # ydash <- y # inds <- (floor(y) %% 2) == 0 # ydash[inds] <- abs(y[inds]-floor(y[inds])) # ydash[!inds] <- 1 - abs(y[!inds]-floor(y[!inds])) # xdash <- a + (b - a) * ydash # xdash #} #Tlu(1:4,c(2,2,2,2),c(3,3,3,3)) #Tlu(-1:4,c(1,1,1,1,-1,-1),c(2,3,4,2,10,10)) sigids <- (npar+1):(npar*2) #TODO: auslagern mutationFunctionReal <- function(individual){ Nc <- rnorm(1,0,1) sigma <- individual[sigids][ireal] sigmaDash <- sigma * exp(tauReal * Nc + tauRealDash * rnorm(nreal,0,1)) newval <- as.numeric(individual[ireal]) + sigmaDash * rnorm(nreal,0,1) #individual[ireal] <- Tlu(newval,lower,upper) newval <- pmin(upper[ireal],newval) #fix solution parameters to bounds #TODO: Tab(x) implementation individual[ireal] <- pmax(lower[ireal],newval) individual[sigids][ireal] <- sigmaDash individual } mutationFunctionInt <- function(individual){ Nc <- rnorm(1,0,1) sigma <- individual[sigids][iint] sigmaDash <- pmax(1,sigma * exp(tauInt * Nc + tauIntDash * rnorm(nint,0,1))) #u1 <- runif(nint) #u2 <- runif(nint) p <- sigmaDash/nint p <- 1-p/(1+sqrt(1+p^2)) G1 <- rgeom(nint,prob=p) #TODO: may be NA for very small p (~1e-10) G2 <- rgeom(nint,prob=p) newval <- as.numeric(individual[iint]) + G1-G2 #individual[iint] <- Tlu(newval,lower[iint],upper[iint]) #fix solution parameters to bounds with transformation newval <- pmin(upper[iint],newval) #fix solution parameters to bounds individual[iint] <- pmax(lower[iint],newval) individual[sigids][iint] <- sigmaDash individual } mutationFunctionCat <- function(individual){ Nc <- rnorm(1,0,1) sigma <- individual[sigids][icat] sigmaDash <- 1/(1+((1-sigma)/sigma * exp(-tauCat * Nc - tauCatDash * rnorm(ncat,0,1)))) #sigmaDash <- Tlu(sigmaDash,rep(1/(3*ncat),ncat),rep(0.5,ncat)) sigmaDash <- pmin(0.5,sigmaDash) sigmaDash <- pmax(1/(3*ncat),sigmaDash) u <- runif(ncat) inds <- u < sigmaDash newval <- sapply(levels[icat], sample,1) individual[icat][inds] <- newval[inds] individual[sigids][icat] <- sigmaDash individual } #x1 <- creationFunction() #x1 #mutationFunctionCat(x1) ## Create initial population population <- designRandom(x,creationFunction,popsize) if(vectorized) fitness <- fun(sapply(population,'[',-sigids,simplify=F)) #note: this also first cuts off strategy parameters else fitness <- unlist(lapply(sapply(population,'[',-sigids,simplify=F),fun))#note: this also first cuts off strategy parameters count <- popsize gen <- 1 fitbest <- min(fitness,na.rm=TRUE) xbest <- population[[which.min(fitness)]][-sigids] if(archive){ fithist <- fitness xhist <- population } besthist <- fitbest # initialization for plotting run <- TRUE while((count < budget) & (gen < generations) & (fitbest > targetY) & (run)){ gen <- gen+1 #recombine c1 <- sample(popsize,lambda,replace=TRUE) #first parents c2 <- sample(popsize,lambda,replace=TRUE) #second parents offspring <- mapply(FUN=recombinationFunction,population[c1],population[c2],SIMPLIFY=FALSE) #mutate real, integer, factors: offspring <- sapply(offspring,mutationFunctionReal,simplify=FALSE)#todo: if any offspring <- sapply(offspring,mutationFunctionInt,simplify=FALSE)#todo: if any offspring <- sapply(offspring,mutationFunctionCat,simplify=FALSE)#todo: if any if(length(offspring)>0 & budget > count){ ## remove offspring which violate the budget offspring <- offspring[1:min(budget-count,length(offspring))] #evaluate if(vectorized) newfit <- fun(sapply(offspring,'[',-sigids,simplify=F))#note: this also first cuts off strategy parameters else newfit <- unlist(lapply(sapply(offspring,'[',-sigids,simplify=F),fun))#note: this also first cuts off strategy parameters ####newfit <- fun(unname(split(offspring[,1:npar],1:nrow(offspring))))#note: this also first cuts off strategy parameters #update count count=count+ length(newfit) # keep archive if(archive){ xhist <- append(xhist,offspring) fithist <- c(fithist, newfit) } # remember best newbest <- min(newfit,na.rm=TRUE) if(newbest < fitbest){ fitbest <- newbest xbest <- offspring[[which.min(newfit)]][-sigids] } if(strategy == "plus"){ population <- c(population, offspring) fitness <- c(fitness, newfit) }else if(strategy == "comma"){ population <- offspring fitness <- newfit }else{ stop("Invalid strategy string for MIES, please use plus or comma.") } } if(length(population)>popsize){ #tournament selection #if(selection == "tournament"){ #tournament selection # popindex <- tournamentSelection(fitness,tournamentSize,tournamentProbability,popsize) #}else{ # truncation selection popindex <- order(fitness)[1:popsize] #MIES seems to use truncation selection only. #} population <- population[popindex] fitness <- fitness[popindex] } if(!is.null(stoppingCriterionFunction)) # calculation of additional stopping criteria run <- stoppingCriterionFunction(population,fitness) if(plotting){ besthist <- c(besthist,fitbest) plot(besthist,type="l") } if(verbosity > 0){ print(paste("Generations: ",gen," Evaluations: ",count, "Best Fitness: ", min(fitness,na.rm=TRUE))) } } #stopping criteria information for user: msg <- "Termination message:" if(!run) #success msg=paste(msg,"Custom stopping criterion satisfied.") if(fitbest <= targetY) msg=paste(msg,"Successfully achieved target fitness.") else if(count >= budget) #budget exceeded msg=paste(msg,"Target function evaluation budget depleted.") else if(gen >= generations) #generation limit exceeded msg=paste(msg,"Number of generations limit reached.") if(archive) return(list(xbest=xbest,ybest=fitbest,x=xhist,y=fithist, count=count, message=msg, population=population, fitness=fitness)) else return(list(xbest=xbest,ybest=fitbest,count=count, message=msg, population=population, fitness=fitness)) }
/scratch/gouwar.j/cran-all/cranData/CEGO/R/optimMIES.R
################################################################################### #' Max-Min-Distance Optimizer #' #' One-shot optimizer: Create a design with maximum sum of distances, and evaluate. #' Best candidate is returned. #' #' @param x Optional set of solution(s) as a list, which are added to the randomly generated solutions and are also evaluated with the target function. #' @param fun target function to be minimized #' @param control (list), with the options: #' \describe{ #' \item{\code{budget}}{ The limit on number of target function evaluations (stopping criterion) (default: 100).} #' \item{\code{vectorized}}{ Boolean. Defines whether target function is vectorized (takes a list of solutions as argument) or not (takes single solution as argument). Default: FALSE.} #' \item{\code{creationFunction}}{ Function to create individuals/solutions in search space. Default is a function that creates random permutations of length 6.} #' \item{\code{designBudget}}{ budget of the design function \code{\link{designMaxMinDist}}, which is the number of randomly created candidates in each iteration.} #' } #' #' @return a list: #' \describe{ #' \item{\code{xbest}}{ best solution found} #' \item{\code{ybest}}{ fitness of the best solution} #' \item{\code{x}}{ history of all evaluated solutions} #' \item{\code{y}}{ corresponding target function values f(x)} #' \item{\code{count}}{ number of performed target function evaluations } #' } #' #' @examples #' seed=0 #' #distance #' dF <- distancePermutationHamming #' #creation #' cF <- function()sample(5) #' #objective function #' lF <- landscapeGeneratorUNI(1:5,dF) #' #start optimization #' set.seed(seed) #' res <- optimMaxMinDist(,lF,list(creationFunction=cF,budget=20, #' vectorized=TRUE)) ##target function is "vectorized", expects list as input #' res$xbest #' #' @seealso \code{\link{optimCEGO}}, \code{\link{optimEA}}, \code{\link{optimRS}}, \code{\link{optim2Opt}} #' #' @export ################################################################################### optimMaxMinDist <- function(x=NULL,fun,control=list()){ con<-list( budget=100 ,vectorized=FALSE ,creationFunction = solutionFunctionGeneratorPermutation(6) ,distanceFunction = distancePermutationHamming ,designBudget=100 ) con[names(control)] <- control control<-con budget <- control$budget vectorized <- control$vectorized creationFunction <- control$creationFunction ## Create random solutions without duplicates, filled up with x x <- designMaxMinDist(x,creationFunction,budget,list(budget=control$designBudget,distanceFunction=control$distanceFunction)) #evaluate if(vectorized) y <- fun(x) else y <- unlist(lapply(x,fun)) #best value found: j <- which.min(y) #return list(xbest=x[[j]],ybest=y[j],x=x,y=y, count=budget) }
/scratch/gouwar.j/cran-all/cranData/CEGO/R/optimMaxMinDist.R
################################################################################### #' Combinatorial Random Search #' #' Random Search for mixed or combinatorial optimization. Solutions are generated completely at random. #' #' @param x Optional set of solution(s) as a list, which are added to the randomly generated solutions and are also evaluated with the target function. #' @param fun target function to be minimized #' @param control (list), with the options: #' \describe{ #' \item{\code{budget}}{ The limit on number of target function evaluations (stopping criterion) (default: 100)} #' \item{\code{vectorized}}{ Boolean. Defines whether target function is vectorized (takes a list of solutions as argument) or not (takes single solution as argument). Default: FALSE} #' \item{\code{creationFunction}}{ Function to create individuals/solutions in search space. Default is a function that creates random permutations of length 6} #' } #' #' @return a list: #' \describe{ #' \item{\code{xbest}}{ best solution found} #' \item{\code{ybest}}{ fitness of the best solution} #' \item{\code{x}}{ history of all evaluated solutions} #' \item{\code{y}}{ corresponding target function values f(x)} #' \item{\code{count}}{ number of performed target function evaluations } #' } #' #' @examples #' seed=0 #' #distance #' dF <- distancePermutationHamming #' #creation #' cF <- function()sample(5) #' #objective function #' lF <- landscapeGeneratorUNI(1:5,dF) #' #start optimization #' set.seed(seed) #' res <- optimRS(,lF,list(creationFunction=cF,budget=100, #' vectorized=TRUE)) ##target function is "vectorized", expects list as input #' res$xbest #' #' @seealso \code{\link{optimCEGO}}, \code{\link{optimEA}}, \code{\link{optim2Opt}}, \code{\link{optimMaxMinDist}} #' #' @export ################################################################################### optimRS <- function(x=NULL,fun,control=list()){ con<-list( budget=100 ,vectorized=FALSE ,creationFunction = solutionFunctionGeneratorPermutation(6) ) con[names(control)] <- control control<-con ## Create random solutions without duplicates, filled up with x x <- designRandom(x,control$creationFunction,control$budget) #evaluate if(control$vectorized) y <- fun(x) else y <- unlist(lapply(x,fun)) #best value found: j <- which.min(y) #return list(xbest=x[[j]],ybest=y[j],x=x,y=y, count=control$budget) }
/scratch/gouwar.j/cran-all/cranData/CEGO/R/optimRS.R
################################################################################### #' Create Quadratic Assignment Problem (QAP) Benchmark #' #' Creates a benchmark function for the Quadratic Assignment Problem. #' #' @param a distance matrix #' @param b flow matrix #' #' @return the function of type cost=f(permutation) #' #' @examples #' set.seed(1) #' n=5 #' #ceate a flow matrix #' A <- matrix(0,n,n) #' for(i in 1:n){ #' for(j in i:n){ #' if(i!=j){ #' A[i,j] <- sample(100,1) #' A[j,i] <- A[i,j] #' } #' } #' } #' #create a distance matrix #' locations <- matrix(runif(n*2)*10,,2) #' B <- as.matrix(dist(locations)) #' #create QAP objective function #' fun <- benchmarkGeneratorQAP(A,B) #' #evaluate #' fun(1:n) #' fun(n:1) #' #' @seealso \code{\link{benchmarkGeneratorFSP}}, \code{\link{benchmarkGeneratorTSP}}, \code{\link{benchmarkGeneratorWT}} #' @export ################################################################################### benchmarkGeneratorQAP <- function(a, b) { # Generator function. a b #lazy evaluation fix, faster than force() function(x){ bx<-b[x,x] sum(a*bx) # divide by 2 if exact cost required } } ################################################################################### #' Create Flow shop Scheduling Problem (FSP) Benchmark #' #' Creates a benchmark function for the Flow shop Scheduling Problem. #' #' @param a matrix of processing times for each step and each machine #' @param n number of jobs #' @param m number of machines #' #' @return the function of type cost=f(permutation) #' #' @examples #' n=10 #' m=4 #' #ceate a matrix of processing times #' A <- matrix(sample(n*m,replace=TRUE),n,m) #' #create FSP objective function #' fun <- benchmarkGeneratorFSP(A,n,m) #' #evaluate #' fun(1:n) #' fun(n:1) #' #' @seealso \code{\link{benchmarkGeneratorQAP}}, \code{\link{benchmarkGeneratorTSP}}, \code{\link{benchmarkGeneratorWT}} #' @export ################################################################################### benchmarkGeneratorFSP <- function(a, n, m) { # Generator function. see Reeves1995 a n #lazy evaluation fix, faster than force() m function(x){ C=matrix(NA,n,m) ax <- a[x,] C[,1]<-as.numeric(cumsum(ax[,1])) C[1,]<-as.numeric(cumsum(ax[1,])) for(i in 2:n){ for(j in 2:m){ C[i,j]=max(C[i-1,j],C[i,j-1])+ax[i,j] } } C[n,m] } } ################################################################################### #' Create (Asymmetric) Travelling Salesperson Problem (TSP) Benchmark #' #' Creates a benchmark function for the (Asymmetric) Travelling Salesperson Problem. #' Path (Do not return to start of tour. Start and end of tour not fixed.) #' or Cycle (Return to start of tour). Symmetry depends on supplied distance matrix. #' #' @param distanceMatrix Matrix that collects the distances between travelled locations. #' @param type Can be "Cycle" (return to start, default) or "Path" (no return to start). #' #' @return the function of type cost=f(permutation) #' #' @examples #' set.seed(1) #' #create 5 random locations to be part of a tour #' n=5 #' cities <- matrix(runif(2*n),,2) #' #calculate distances between cities #' cdist <- as.matrix(dist(cities)) #' #create objective functions (for path or cycle) #' fun1 <- benchmarkGeneratorTSP(cdist, "Path") #' fun2 <- benchmarkGeneratorTSP(cdist, "Cycle") #' #evaluate #' fun1(1:n) #' fun1(n:1) #' fun2(n:1) #' fun2(1:n) #' #' @seealso \code{\link{benchmarkGeneratorQAP}}, \code{\link{benchmarkGeneratorFSP}}, \code{\link{benchmarkGeneratorWT}} #' @export ################################################################################### benchmarkGeneratorTSP <- function(distanceMatrix, type="Cycle") { # Generator function distanceMatrix #lazy evaluation fix, faster than force() if(type=="Path"){ f <- function (x){ x1 <- x[-1] #without return to start point: path. x <- x[-length(x)] sum(distanceMatrix[cbind(x,x1)]) } }else{ f <- function (x){ x1 <- c(x[-1],x[1]) #with return to start point: cycle. sum(distanceMatrix[cbind(x,x1)]) } } return(f) } ################################################################################### #' Create single-machine total Weighted Tardiness (WT) Problem Benchmark #' #' Creates a benchmark function for the single-machine total Weighted Tardiness Problem. #' #' @param p processing times #' @param w weights #' @param d due dates #' #' @return the function of type cost=f(permutation) #' #' @examples #' n=6 #' #processing times #' p <- sample(100,n,replace=TRUE) #' #weights #' w <- sample(10,n,replace=TRUE) #' #due dates #' RDD <- c(0.2, 0.4, 0.6,0.8,1.0) #' TF <- c(0.2, 0.4, 0.6,0.8,1.0) #' i <- 1 #' j <- 1 #' P <- sum(p) #' d <- runif(n,min=P*(1-TF[i]-RDD[j]/2),max=P*(1-TF[i]+RDD[j]/2)) #' #create WT objective function #' fun <- benchmarkGeneratorWT(p,w,d) #' fun(1:n) #' fun(n:1) #' #' @seealso \code{\link{benchmarkGeneratorQAP}}, \code{\link{benchmarkGeneratorTSP}}, \code{\link{benchmarkGeneratorFSP}} #' @export ################################################################################### benchmarkGeneratorWT <- function(p, w, d) { # Generator function p w #lazy evaluation fix, faster than force() d n= length(p) function(x){ px <- p[x] dx <- d[x] wx <- w[x] s=c(0,cumsum(px[-n])) Ti=pmax(s+px-dx,0)*wx return(sum(Ti)) } }
/scratch/gouwar.j/cran-all/cranData/CEGO/R/permutationBenchmarkFunctions.R
################################################################################### #' Interchange Distance for Permutations #' #' The interchange distance is an edit-distance, counting how many edit operation (here: interchanges, i.e., transposition of two arbitrary elements) have to be #' performed to transform permutation x into permutation y. #' #' @param x first permutation (integer vector) #' @param y second permutation (integer vector) #' #' @return numeric distance value \deqn{d(x,y)}, scaled to values between 0 and 1 (based on the maximum possible distance between two permutations) #' #' @references Schiavinotto, Tommaso, and Thomas Stuetzle. "A review of metrics on permutations for search landscape analysis." Computers & operations research 34.10 (2007): 3143-3153. #' #' @examples #' x <- 1:5 #' y <- c(1,4,3,2,5) #' distancePermutationInterchange(x,y) #' p <- replicate(10,sample(1:5),simplify=FALSE) #' distanceMatrix(p,distancePermutationInterchange) #' #' @export ################################################################################### distancePermutationInterchange <- function(x, y){ N<-length(x) if(N!=length(y)|!is.numeric(x)|!is.numeric(y)) stop("Incorrect input to distance function, only permutations of same length are allowed.") #x<-y[order(x)] #result <- .Call("permutationDistanceInterchange", as.integer(x),as.integer(y), PACKAGE="CEGO") result <- .Call(C_permutationDistanceInterchange, as.integer(x),as.integer(y)) (N-result) / (N-1) } ################################################################################### # Longest Common Subsequence Distance for Permutations # # DEPRECATED, see \code{\link{distancePermutationInsert}}. # # @param x first permutation (integer vector) # @param y second permutation (integer vector) # # @return numeric distance value \deqn{d(x,y)}, scaled to values between 0 and 1 (based on the maximum possible distance between two permutations) # # @export # @keywords internal ################################################################################### #distancePermutationLCSeq<- function(x, y){ # .Deprecated("distancePermutationInsert") #N <- length(x) #if(N!=length(y)|!is.numeric(x)|!is.numeric(y)) stop("Incorrect input to distance function, only permutations of same length are allowed.") #result <- .Call("permutationDistanceLongestCommonSubsequence", as.integer(x),as.integer(y), PACKAGE="CEGO") #(N-result)/(N-1) # N-1 is for PERMUTATIONS only, because two permutations have always at least a common string/sequence of length one. # this will be different for strings where two strings may have no single character in common. permutations always contain all # of N characters. ### ## insert is identical but faster ### # distancePermutationInsert(x,y) #} ################################################################################### #' Longest Common Substring Distance for Permutations #' #' Distance of permutations. Based on the longest string of adjacent elements that two permutations have in common. #' #' @param x first permutation (integer vector) #' @param y second permutation (integer vector) #' #' #' @return numeric distance value \deqn{d(x,y)}, scaled to values between 0 and 1 (based on the maximum possible distance between two permutations) #' #' @references Hirschberg, Daniel S. "A linear space algorithm for computing maximal common subsequences." Communications of the ACM 18.6 (1975): 341-343. #' #' @examples #' x <- 1:5 #' y <- c(5,1,2,3,4) #' distancePermutationLCStr(x,y) #' p <- replicate(10,sample(1:5),simplify=FALSE) #' distanceMatrix(p,distancePermutationLCStr) #' #' @export #' ################################################################################### distancePermutationLCStr <- function(x, y){ N <- length(x) if(N!=length(y)|!is.numeric(x)|!is.numeric(y)) stop("Incorrect input to distance function, only permutations of same length are allowed.") #result <- .Call("permutationDistanceLongestCommonSubstring", as.integer(x),as.integer(y), PACKAGE="CEGO") result <- .Call(C_permutationDistanceLongestCommonSubstring, as.integer(x),as.integer(y)) (N-result)/(N-1) } ################################################################################### #' Levenshtein Distance for Permutations #' #' Levenshtein Distance, often just called "Edit Distance". The number of insertions, substitutions or deletions to turn one permutation (or string of equal length) into another. #' #' @param x first permutation (integer vector) #' @param y second permutation (integer vector) #' #' @return numeric distance value \deqn{d(x,y)}, scaled to values between 0 and 1 (based on the maximum possible distance between two permutations) #' #' @references Levenshtein, Vladimir I. "Binary codes capable of correcting deletions, insertions and reversals." Soviet physics doklady. Vol. 10. 1966. #' #' @examples #' x <- 1:5 #' y <- c(1,2,5,4,3) #' distancePermutationLevenshtein(x,y) #' p <- replicate(10,sample(1:5),simplify=FALSE) #' distanceMatrix(p,distancePermutationLevenshtein) #' #' @export #' ################################################################################### distancePermutationLevenshtein <- function(x, y){ N <- length(x) if(N!=length(y)|!is.numeric(x)|!is.numeric(y)) stop("Incorrect input to distance function, only permutations of same length are allowed.") .Call(C_permutationDistanceLevenshtein, as.integer(x),as.integer(y)) / N } ################################################################################### #' Swap-Distance for Permutations #' #' The swap distance is an edit-distance, counting how many edit operation (here: swaps, i.e., transposition of two adjacent elements) have to be #' performed to transform permutation x into permutation y. #' Note: In v2.4.0 of CEGO and earlier, this function actually computed the swap distance on the inverted permutations #' (i.e., on the rankings, rather than orderin). #' This is now (v2.4.2 and later) corrected by inverting the permutations x and y before computing the distance (ie. computing ordering first). #' The original behavior can be reproduced by \code{\link{distancePermutationSwapInv}}. #' This issue was kindly reported by Manuel Lopez-Ibanez and the difference in terms of behavior is discussed by Ekhine Irurozki and him (2021). #' #' @param x first permutation (integer vector) #' @param y second permutation (integer vector) #' #' @return numeric distance value \deqn{d(x,y)}, scaled to values between 0 and 1 (based on the maximum possible distance between two permutations) #' #' @references Schiavinotto, Tommaso, and Thomas Stuetzle. "A review of metrics on permutations for search landscape analysis." Computers & operations research 34.10 (2007): 3143-3153. #' @references Irurozki, Ekhine and Ibanez-Lopez Unbalanced Mallows Models for Optimizing Expensive Black-Box Permutation Problems. In Proceedings of the Genetic and Evolutionary Computation Conference, GECCO 2021. ACM Press, New York, NY, 2021. doi: 10.1145/3449639.3459366 #' #' @examples #' x <- 1:5 #' y <- c(1,2,3,5,4) #' distancePermutationSwap(x,y) #' p <- replicate(10,sample(1:5),simplify=FALSE) #' distanceMatrix(p,distancePermutationSwap) #' #' @export #' ################################################################################### distancePermutationSwap <- function(x, y){ N <- length(x) #x <- order(x) #y <- order(y) if(N!=length(y)|!is.numeric(x)|!is.numeric(y)) stop("Incorrect input to distance function, only permutations of same length are allowed.") result <- .Call(C_permutationDistanceSwap, as.integer(x),as.integer(y)) 2*result / (N^2 - N) } ################################################################################### #' Inverse-Swap-Distance for Permutations #' #' The swap distance on the inverse of permutations x and y. #' See \code{\link{distancePermutationSwap}} for non-inversed version. #' #' @param x first permutation (integer vector) #' @param y second permutation (integer vector) #' #' @return numeric distance value \deqn{d(x,y)}, scaled to values between 0 and 1 (based on the maximum possible distance between two permutations) #' #' @examples #' x <- 1:5 #' y <- c(1,2,3,5,4) #' distancePermutationSwapInv(x,y) #' p <- replicate(10,sample(1:5),simplify=FALSE) #' distanceMatrix(p,distancePermutationSwapInv) #' #' @export ################################################################################### distancePermutationSwapInv <- function(x, y){ N <- length(x) if(N!=length(y)|!is.numeric(x)|!is.numeric(y)) stop("Incorrect input to distance function, only permutations of same length are allowed.") result <- .Call(C_permutationDistanceSwapInv, as.integer(x),as.integer(y)) 2*result / (N^2 - N) } ################################################################################### #' R-Distance for Permutations #' #' R distance or unidirectional adjacency distance. Based on count of number of times that a two element sequence in x also occurs in y, in the same order. #' #' @param x first permutation (integer vector) #' @param y second permutation (integer vector) #' #' @return numeric distance value \deqn{d(x,y)}, scaled to values between 0 and 1 (based on the maximum possible distance between two permutations) #' #' @references Sevaux, Marc, and Kenneth Soerensen. "Permutation distance measures for memetic algorithms with population management." Proceedings of 6th Metaheuristics International Conference (MIC'05). 2005. #' @references Reeves, Colin R. "Landscapes, operators and heuristic search." Annals of Operations Research 86 (1999): 473-490. #' #' @examples #' x <- 1:5 #' y <- c(1,2,3,5,4) #' distancePermutationR(x,y) #' p <- replicate(10,sample(1:5),simplify=FALSE) #' distanceMatrix(p,distancePermutationR) #' #' @export #' ################################################################################### distancePermutationR <- function(x, y){ N<-length(x) if(N!=length(y)|!is.numeric(x)|!is.numeric(y)) stop("Incorrect input to distance function, only permutations of same length are allowed.") #result <- .Call("permutationDistanceR", as.integer(x), as.integer(y), PACKAGE="CEGO") result <- .Call(C_permutationDistanceR, as.integer(x), as.integer(y)) result / (N - 1) } ################################################################################### #' Adjacency Distance for Permutations #' #' Bi-directional adjacency distance for permutations, depending on how often two elements are neighbours in both permutations x and y. #' #' @param x first permutation (integer vector) #' @param y second permutation (integer vector) #' #' @return numeric distance value \deqn{d(x,y)}, scaled to values between 0 and 1 (based on the maximum possible distance between two permutations) #' #' @references Sevaux, Marc, and Kenneth Soerensen. "Permutation distance measures for memetic algorithms with population management." Proceedings of 6th Metaheuristics International Conference (MIC'05). 2005. #' @references Reeves, Colin R. "Landscapes, operators and heuristic search." Annals of Operations Research 86 (1999): 473-490. #' #' @examples #' x <- 1:5 #' y <- 5:1 #' distancePermutationAdjacency(x,y) #' p <- replicate(10,sample(1:5),simplify=FALSE) #' distanceMatrix(p,distancePermutationAdjacency) #' #' @export #' ################################################################################### distancePermutationAdjacency <- function(x, y){ N<-length(x) if(N!=length(y)|!is.numeric(x)|!is.numeric(y)) stop("Incorrect input to distance function, only permutations of same length are allowed.") #result <- .Call("permutationDistanceAdjacency", as.integer(x), as.integer(y), PACKAGE="CEGO") result <- .Call(C_permutationDistanceAdjacency, as.integer(x), as.integer(y)) (N-result-1) / (N - 1) } ################################################################################### #' Position Distance for Permutations #' #' Position distance (or Spearmans Correlation Coefficient), scaled to values between 0 and 1. #' #' @param x first permutation (integer vector) #' @param y second permutation (integer vector) #' #' @return numeric distance value \deqn{d(x,y)}, scaled to values between 0 and 1 (based on the maximum possible distance between two permutations) #' #' @references Schiavinotto, Tommaso, and Thomas Stuetzle. "A review of metrics on permutations for search landscape analysis." Computers & operations research 34.10 (2007): 3143-3153. #' @references Reeves, Colin R. "Landscapes, operators and heuristic search." Annals of Operations Research 86 (1999): 473-490. #' #' @examples #' x <- 1:5 #' y <- c(1,3,5,4,2) #' distancePermutationPosition(x,y) #' p <- replicate(10,sample(1:5),simplify=FALSE) #' distanceMatrix(p,distancePermutationPosition) #' #' @export #' ################################################################################### distancePermutationPosition <- function(x, y){ N<-length(x) if(N!=length(y)|!is.numeric(x)|!is.numeric(y)) stop("Incorrect input to distance function, only permutations of same length are allowed.") #dis <- .Call("permutationDistancePosition", as.integer(x), as.integer(y), PACKAGE="CEGO") dis <- .Call(C_permutationDistancePosition, as.integer(x), as.integer(y)) if(N%%2) #scale to [0;1] in case of odd N dis <- dis / ((N^2-1)/ 2) else #scale to [0;1] in case of even N dis <- dis / (N^2 / 2) dis } ################################################################################### #' Squared Position Distance for Permutations #' #' Squared position distance (or Spearmans Footrule), scaled to values between 0 and 1. #' #' @param x first permutation (integer vector) #' @param y second permutation (integer vector) #' #' @return numeric distance value \deqn{d(x,y)}, scaled to values between 0 and 1 (based on the maximum possible distance between two permutations) #' #' @references Schiavinotto, Tommaso, and Thomas Stuetzle. "A review of metrics on permutations for search landscape analysis." Computers & operations research 34.10 (2007): 3143-3153. #' @references Reeves, Colin R. "Landscapes, operators and heuristic search." Annals of Operations Research 86 (1999): 473-490. #' #' @examples #' x <- 1:5 #' y <- c(1,3,5,4,2) #' distancePermutationPosition2(x,y) #' p <- replicate(10,sample(1:5),simplify=FALSE) #' distanceMatrix(p,distancePermutationPosition2) #' #' @export #' ################################################################################### distancePermutationPosition2 <- function(x, y){ N<-length(x) if(N!=length(y)|!is.numeric(x)|!is.numeric(y)) stop("Incorrect input to distance function, only permutations of same length are allowed.") #result <- .Call("permutationDistancePosition2", as.integer(x), as.integer(y), PACKAGE="CEGO") result <- .Call(C_permutationDistancePosition2, as.integer(x), as.integer(y)) result / ((N^3-N)/3) } ################################################################################### #' Hamming Distance for Permutations #' #' Hamming distance for permutations, scaled to values between 0 and 1. #' That is, the number of unequal elements of two permutations, divided by the permutations length. #' #' @param x first permutation (integer vector) #' @param y second permutation (integer vector) #' #' @return numeric distance value \deqn{d(x,y)}, scaled to values between 0 and 1 (based on the maximum possible distance between two permutations) #' #' @examples #' x <- 1:5 #' y <- c(5,1,2,3,4) #' distancePermutationHamming(x,y) #' p <- replicate(10,sample(1:5),simplify=FALSE) #' distanceMatrix(p,distancePermutationHamming) #' #' @export ################################################################################### distancePermutationHamming <- function(x, y){ N <- length(x) if(N!=length(y)|!is.numeric(x)|!is.numeric(y)) stop("Incorrect input to distance function, only permutations of same length are allowed.") sum(x != y)/N } #distancePermutationHamming <- function(x, y){ # N <- length(x) # if(N!=length(y)|!is.numeric(x)|!is.numeric(y)) stop("Incorrect input to distance function, only permutations of same length are allowed.") # .Call("permutationDistanceHamming", as.integer(x), as.integer(y),PACKAGE="CEGO") / N #this works,but is actually slower than a pure R implementation #} ################################################################################### #' Euclidean Distance for Permutations #' #' Euclidean distance for permutations, scaled to values between 0 and 1: #' \deqn{d(x,y) = \frac{1}{r} \sqrt(\sum_{i=1}^n (x_i - y_i)^2) }{ d(x,y) = 1/r * sqrt(\sum_{i=1}^n (x_i - y_i)^2)} #' where n is the length of the permutations x and y, and scaling factor \eqn{r=sqrt(2*4*c*(c+1)*(2*c+1)/6)} with \eqn{c=(n-1)/2} (if n is odd) #' or \eqn{r=sqrt(2*c*(2*c-1)*(2*c+1)/3)} with \eqn{c=n/2} (if n is even). #' #' @param x first permutation (integer vector) #' @param y second permutation (integer vector) #' #' @return numeric distance value \deqn{d(x,y)}, scaled to values between 0 and 1 (based on the maximum possible distance between two permutations) #' #' @examples #' x <- 1:5 #' y <- c(5,1,2,3,4) #' distancePermutationEuclidean(x,y) #' p <- replicate(10,sample(1:5),simplify=FALSE) #' distanceMatrix(p,distancePermutationEuclidean) #' #' @export ################################################################################### distancePermutationEuclidean <- function(x, y){ N<-length(x) if(N!=length(y)|!is.numeric(x)|!is.numeric(y)) stop("Incorrect input to distance function, only permutations of same length are allowed.") #result <- .Call("permutationDistanceEuclidean", as.integer(x), as.integer(y), PACKAGE="CEGO") #this works,but is actually slower than a pure R implementation, as below. at least for permutation length 30 #mdis <- sqrt(result) mdis <- sqrt(sum((x-y)^2)) if(N%%2){ #scale to [0;1] in case of odd N n=(N-1)/2 dis <- mdis / sqrt(8*n*(n+1)*(2*n+1)/6) }else{ #scale to [0;1] in case of even N n=N/2 dis <- mdis / sqrt(2*n*(4*n^2-1)/3) } dis } ################################################################################### #' Manhattan Distance for Permutations #' #' Manhattan distance for permutations, scaled to values between 0 and 1: #' \deqn{d(x,y) = \frac{1}{r} \sum_{i=1}^n |x_i - y_i| }{ d(x,y) = 1/r * \sum_{i=1}^n |x_i - y_i|} #' where n is the length of the permutations x and y, and scaling factor \eqn{r=(n^2-1)/2} (if n is odd) #' or \eqn{r=((n^2)/2} (if n is even). #' #' @param x first permutation (integer vector) #' @param y second permutation (integer vector) #' #' @return numeric distance value \deqn{d(x,y)}, scaled to values between 0 and 1 (based on the maximum possible distance between two permutations) #' #' @examples #' x <- 1:5 #' y <- c(5,1,2,3,4) #' distancePermutationManhattan(x,y) #' p <- replicate(10,sample(1:5),simplify=FALSE) #' distanceMatrix(p,distancePermutationManhattan) #' #' @export ################################################################################### distancePermutationManhattan <- function(x, y){ N <- length(x) if(N!=length(y)|!is.numeric(x)|!is.numeric(y)) stop("Incorrect input to distance function, only permutations of same length are allowed.") mdis<-sum(abs(x-y)) if(N%%2) #scale to [0;1] in case of odd N dis <- mdis / ((N^2-1)/ 2) else #scale to [0;1] in case of even N dis <- mdis / (N^2 / 2) dis } ################################################################################### #' Chebyshev Distance for Permutations #' #' Chebyshev distance for permutations. Specific to permutations is only the scaling to values of 0 to 1: #' \deqn{d(x,y) = \frac{max(|x - y|) }{ (n-1) } }{d(x,y) = max(|x - y|) / (n-1)} #' where n is the length of the permutations x and y. #' #' @param x first permutation (integer vector) #' @param y second permutation (integer vector) #' #' @return numeric distance value \deqn{d(x,y)}, scaled to values between 0 and 1 (based on the maximum possible distance between two permutations) #' #' @examples #' x <- 1:5 #' y <- c(5,1,2,3,4) #' distancePermutationChebyshev(x,y) #' p <- replicate(10,sample(1:5),simplify=FALSE) #' distanceMatrix(p,distancePermutationChebyshev) #' #' @export ################################################################################### distancePermutationChebyshev <- function(x, y){ N <- length(x) if(N!=length(y)|!is.numeric(x)|!is.numeric(y)) stop("Incorrect input to distance function, only permutations of same length are allowed.") max(abs(x-y)) / (N-1) } ################################################################################### #' Lee Distance for Permutations #' #' Usually a string distance, with slightly different definition. #' Adapted to permutations as: #' \deqn{d(x,y) = \sum_{i=1}^n min(|x_i - y_i|), n- |x_i - y_i|)}{ d(x,y) = \sum_{i=1}^n min(|x_i - y_i|), n- |x_i - y_i|)} #' where n is the length of the permutations x and y. #' #' @param x first permutation (integer vector) #' @param y second permutation (integer vector) #' #' @return numeric distance value \deqn{d(x,y)}, scaled to values between 0 and 1 (based on the maximum possible distance between two permutations) #' #' @references Lee, C., "Some properties of nonbinary error-correcting codes," Information Theory, IRE Transactions on, vol.4, no.2, pp.77,82, June 1958 #' #' @examples #' x <- 1:5 #' y <- c(5,1,2,3,4) #' distancePermutationLee(x,y) #' p <- replicate(10,sample(1:5),simplify=FALSE) #' distanceMatrix(p,distancePermutationLee) #' #' @export #' ################################################################################### distancePermutationLee <- function(x,y){ N <- length(x) if(N!=length(y)|!is.numeric(x)|!is.numeric(y)) stop("Incorrect input to distance function, only permutations of same length are allowed.") #mdis <- .Call("permutationDistanceLee", as.integer(x), as.integer(y), PACKAGE="CEGO") #this works,but is actually slower than a pure R implementation, as below. at least for permutation length 30 mdis <- .Call(C_permutationDistanceLee, as.integer(x), as.integer(y)) #this works,but is actually slower than a pure R implementation, as below. at least for permutation length 30 #mdis <- sum(pmin(abs(x-y),N-abs(x-y))) if(N%%2) #scale to [0;1] in case of odd N dis <- mdis / ((N^2-1)/ 2) else #scale to [0;1] in case of even N dis <- mdis / (N^2 / 2) dis } ################################################################################### #' Insert Distance for Permutations #' #' The Insert Distance is an edit distance. It counts the minimum number of delete/insert operations #' required to transform one permutation into another. A delete/insert operation shifts one element to a new position. #' All other elements move accordingly to make place for the element. E.g., the following shows a single delete/insert move that #' sorts the corresponding permutation: 1 4 2 3 5 -> 1 2 3 4 5. #' #' @param x first permutation (integer vector) #' @param y second permutation (integer vector) #' #' @return numeric distance value \deqn{d(x,y)}, scaled to values between 0 and 1 (based on the maximum possible distance between two permutations) #' #' @references Schiavinotto, Tommaso, and Thomas Stuetzle. "A review of metrics on permutations for search landscape analysis." Computers & operations research 34.10 (2007): 3143-3153. #' @references Wikipedia contributors, "Longest increasing subsequence", Wikipedia, The Free Encyclopedia, 12 November 2014, 19:38 UTC, [accessed 13 November 2014] #' #' @examples #' x <- 1:5 #' y <- c(5,1,2,3,4) #' distancePermutationInsert(x,y) #' p <- replicate(10,sample(1:5),simplify=FALSE) #' distanceMatrix(p,distancePermutationInsert) #' #' @export #' ################################################################################### distancePermutationInsert <- function(x, y){ N<-length(x) if(N!=length(y)|!is.numeric(x)|!is.numeric(y)) stop("Incorrect input to distance function, only permutations of same length are allowed.") #x<-order(y)[x] #Important note: this line means "p1 * p2^{-1}". Schiavinotto say it should be "p2^{-1} * p1" . That seems to be a typo/error. (of course it does not matter which permutation is is inverted. but the inverted permutation should be at the right.). Is now calculated in c code #result <- .Call("permutationDistanceInsert", as.integer(x), as.integer(y), PACKAGE="CEGO") result <- .Call(C_permutationDistanceInsert, as.integer(x), as.integer(y)) (N-result)/(N-1) } ################################################################################### #' Cosine Distance for Permutations #' #' The Cosine distance for permutations is derived from the Cosine similarity measure #' which has been applied in fields like text mining. #' It is based on the scalar product of two vectors (here: permutations). #' #' @param x first permutation (integer vector) #' @param y second permutation (integer vector) #' #' @return numeric distance value \deqn{d(x,y)}, scaled to values between 0 and 1 (based on the maximum possible distance between two permutations) #' #' @references Singhal, Amit (2001)."Modern Information Retrieval: A Brief Overview". Bulletin of the IEEE Computer Society Technical Committee on Data Engineering 24 (4): 35-43 #' #' @examples #' x <- 1:5 #' y <- c(5,1,2,3,4) #' distancePermutationCos(x,y) #' p <- replicate(10,sample(1:5),simplify=FALSE) #' distanceMatrix(p,distancePermutationCos) #' #' @export ################################################################################### distancePermutationCos <- function(x,y){ N=length(x) if(N!=length(y)|!is.numeric(x)|!is.numeric(y)) stop("Incorrect input to distance function, only permutations of same length are allowed.") as.numeric(1-((x%*%y)-(N*(N+1)*(N+2)/6))/((N^3-N)/6)) # |a| = |b| because permutation -> |a| * |a| = sum_{i=1}^n i^2 = (N*(N+1)*(2*N+1)/6) }#the minus factor is tetrahedral pyramid ################################################################################### #' Lexicographic permutation distance #' #' This function calculates the lexicographic permutation distance. That is the difference of positions #' that both positions would receive in a lexicographic ordering. Note, that this distance #' measure can quickly become inaccurate if the length of the permutations grows too large, due #' to being based on the factorial of the length. In general, permutations longer than 100 elements should #' be avoided. #' #' @param x first permutation (integer vector) #' @param y second permutation (integer vector) #' #' @return numeric distance value \deqn{d(x,y)}, scaled to values between 0 and 1 (based on the maximum possible distance between two permutations) #' #' @seealso \code{\link{lexicographicPermutationOrderNumber}} #' #' @examples #' x <- 1:5 #' y <- c(1,2,3,5,4) #' distancePermutationLex(x,y) #' p <- replicate(10,sample(1:5),simplify=FALSE) #' distanceMatrix(p,distancePermutationLex) #' #' @export ################################################################################### distancePermutationLex <- function(x,y){ N <- length(x) if(N!=length(y)|!is.numeric(x)|!is.numeric(y)) stop("Incorrect input to distance function, only permutations of same length are allowed.") abs(lexicographicPermutationOrderNumber(x) - lexicographicPermutationOrderNumber(y)) / (factorial(N)-1) } ################################################################################### #' Lexicographic order number #' #' This function returns the position-number that a permutation would receive in a lexicographic ordering. #' It is used in the lexicographic distance measure. #' #' @param x permutation (integer vector) #' #' @return numeric value giving position in lexicographic order. #' #' @seealso \code{\link{distancePermutationLex}} #' #' @examples #' lexicographicPermutationOrderNumber(1:5) #' lexicographicPermutationOrderNumber(c(1,2,3,5,4)) #' lexicographicPermutationOrderNumber(c(1,2,4,3,5)) #' lexicographicPermutationOrderNumber(c(1,2,4,5,3)) #' lexicographicPermutationOrderNumber(c(1,2,5,3,4)) #' lexicographicPermutationOrderNumber(c(1,2,5,4,3)) #' lexicographicPermutationOrderNumber(c(1,3,2,4,5)) #' lexicographicPermutationOrderNumber(5:1) #' lexicographicPermutationOrderNumber(1:7) #' lexicographicPermutationOrderNumber(7:1) #' #' @export #' ################################################################################### lexicographicPermutationOrderNumber <- function(x){ N <- length(x) if(!is.numeric(x)) stop("Incorrect input to lexicographicPermutationOrderNumber, should be permutation (numeric vector).") #result <- .Call("lexPermOrder", as.integer(x), PACKAGE="CEGO") result <- .Call(C_lexPermOrder, as.integer(x)) sum(result*factorial((N-1):0))+1 }
/scratch/gouwar.j/cran-all/cranData/CEGO/R/permutationDistances.r
################################################################################### #' Permutation Generator Function #' #' Returns a function that generates random permutations of length N. #' Can be used to generate individual solutions for permutation problems, e.g., Travelling Salesperson Problem #' #' @param N length of the permutations returned #' #' @return returns a function, without any arguments #' #' @examples #' fun <- solutionFunctionGeneratorPermutation(10) #' fun() #' fun() #' fun() #' #' @export ################################################################################### solutionFunctionGeneratorPermutation <- function(N){ N #lazy evaluation fix, faster than force() function()sample(1:N,replace=FALSE) } ################################################################################### #' Interchange Mutation for Permutations #' #' Given a population of permutations, this function mutates all #' individuals by randomly interchanging two arbitrary elements of the permutation. #' #' @param population List of permutations #' @param parameters list of parameters, currently only uses parameters$mutationRate, #' which should be between 0 and 1 (but can be larger than 1). The mutation rate determines the number of interchanges #' performed, relative to the permutation length (N). 0 means none. 1 means N interchanges. #' The default is 1/N. #' #' @return mutated population #' #' @export ################################################################################### mutationPermutationInterchange <- function(population, parameters=list()){ N <- length(population[[1]]) if(is.null(parameters$mutationRate)) parameters$mutationRate <- 1/N mrate <- parameters$mutationRate popsize <- length(population) mutations <- ceiling(N * mrate) if(mutations<=0) return(population) samples <- mutations * popsize index1 <- sample.int(N,samples,TRUE,NULL) index2 <- sample.int(N,samples,TRUE,NULL) mutationPermutationInterchangeCore(population,popsize,mutations,index1,index2) } #mutationPermutationInterchange(list(1:5),list(mutationRate=1)) ################################################################################### #' Interchange of permutation elements #' #' Support function for \code{\link{mutationPermutationInterchange}} and \code{\link{mutationPermutationSwap}}. #' #' @param population List of permutations #' @param popsize population size #' @param mutations number of mutated elements for each individual #' @param index1 vector of first indices, one element for each interchange #' @param index2 vector of second indices, one element for each interchange #' #' @return mutated population #' #' @keywords internal ################################################################################### mutationPermutationInterchangeCore <- function(population,popsize,mutations,index1,index2){ newpop <- list() for(i in 1:popsize){ individual <- population[[i]] if(mutations == 1){ val1 <- individual[index1[i]] individual[index1[i]] <- individual[index2[i]] individual[index2[i]] <- val1 }else{ j <- ((i-1)*mutations+1) : (i*mutations) for(jj in j){ i1 <- index1[jj] i2 <- index2[jj] val1= individual[i1] individual[i1]= individual[i2] individual[i2]= val1 } } newpop <- c(newpop, list(individual)) } newpop } ################################################################################### #' Swap Mutation for Permutations #' #' Given a population of permutations, this function mutates all #' individuals by randomly interchanging two adjacent elements of the permutation. #' #' @param population List of permutations #' @param parameters list of parameters, currently only uses parameters$mutationRate, #' which should be between 0 and 1 (but can be larger than 1). The mutation rate determines the number of swaps #' performed, relative to the permutation length (N). 0 means none. 1 means N swaps. #' The default is 1/N. #' #' @return mutated population #' #' @export ################################################################################### mutationPermutationSwap <- function(population,parameters=list()){ N <- length(population[[1]]) if(is.null(parameters$mutationRate)) parameters$mutationRate <- 1/N mrate <- parameters$mutationRate popsize <- length(population) mutations <- ceiling(N * mrate) if(mutations<=0) return(population) samples <- mutations * popsize index1 <- sample.int(N-1,samples,TRUE,NULL) index2 <- index1 +1 mutationPermutationInterchangeCore(population,popsize,mutations,index1,index2) } #mutationPermutationSwap(list(1:5),list(mutationRate=1)) ################################################################################### #' Reversal Mutation for Permutations #' #' Given a population of permutations, this function mutates all #' individuals by randomly selecting two indices, and reversing the respective sub-permutation. #' #' @param population List of permutations #' @param parameters list of parameters, currently only uses parameters$mutationRate, #' which should be between 0 and 1 (but can be larger than 1). The mutation rate determines the number of reversals #' performed, relative to the permutation length (N). 0 means none. 1 means N reversals. #' The default is 1/N. #' #' @return mutated population #' #' @export ################################################################################### mutationPermutationReversal <- function(population, parameters=list()){ N <- length(population[[1]]) if(is.null(parameters$mutationRate)) parameters$mutationRate <- 1/N mrate <- parameters$mutationRate popsize <- length(population) mutations <- ceiling(N * mrate) if(mutations<=0) return(population) samples <- mutations * popsize newpop <- list() index1 <- sample.int(N,samples,TRUE,NULL) index2 <- sample.int(N,samples,TRUE,NULL) for(i in 1:popsize){ individual <- population[[i]] if(mutations == 1){ individual[index1[i]:index2[i]] <- individual[index2[i]:index1[i]] }else{ j <- ((i-1)*mutations+1) : (i*mutations) for(jj in j){ i1 <- index1[jj] i2 <- index2[jj] individual[i1:i2] <- individual[i2:i1] } } newpop <- c(newpop, list(individual)) } newpop } ################################################################################### #' Insert Mutation for Permutations #' #' Given a population of permutations, this function mutates all #' individuals by randomly selecting two indices. #' The element at index1 is moved to positition index2, other elements #' #' #' @param population List of permutations #' @param parameters list of parameters, currently only uses parameters$mutationRate, #' which should be between 0 and 1 (but can be larger than 1). The mutation rate determines the number of reversals #' performed, relative to the permutation length (N). 0 means none. 1 means N reversals. #' The default is 1/N. #' #' @return mutated population #' #' @export ################################################################################### mutationPermutationInsert <- function(population, parameters=list()){ N <- length(population[[1]]) if(is.null(parameters$mutationRate)) parameters$mutationRate <- 1/N mrate <- parameters$mutationRate popsize <- length(population) mutations <- ceiling(N * mrate) if(mutations<=0) return(population) samples <- mutations * popsize newpop <- list() index1 <- sample.int(N,samples,TRUE,NULL) index2 <- sample.int(N,samples,TRUE,NULL) for(i in 1:popsize){ individual <- population[[i]] #if(mutations == 1){ # individual[index1[i]:index2[i]] <- individual[index2[i]:index1[i]] #}else{ j <- ((i-1)*mutations+1) : (i*mutations) for(jj in j){ i1 <- index1[jj] i2 <- index2[jj] #print(i1) #print(i2) ind1 <- individual[i1] #move out individual <- individual[-i1] if(i2==1) individual <- c(ind1,individual) #and insert else if(i2==N) individual <- c(individual,ind1) #and insert else individual <- c(individual[1:(i2-1)],ind1,individual[i2:(N-1)]) #and insert } #} newpop <- c(newpop, list(individual)) } newpop } #mutationPermutationInsert(list(1:5)) ################################################################################### #' Cycle Crossover (CX) for Permutations #' #' Given a population of permutations, this function recombines each #' individual with another individual. #' Note, that \code{\link{optimEA}} will not pass the whole population #' to recombination functions, but only the chosen parents. #' #' @param population List of permutations #' @param parameters not used #' #' @return population of recombined offspring #' #' @export ################################################################################### recombinationPermutationCycleCrossover <- function(population, parameters){ popsize <- length(population)/2 ## assumes nParents == 2 newpop <- list() for(i in 1:popsize){ j <- popsize + i ## second parent parent1 <- population[[i]] parent2 <- population[[j]] e1 <- parent1[1] e2 <- parent2[1] parent2[1] <- e1 while(e1 != e2){ e1 <- e2 rplc <- which(parent1==e1) e2 <- parent2[rplc] parent2[rplc] <- e1 } newpop <- c(newpop, list(parent2)) } newpop } ################################################################################### #' Order Crossover 1 (OX1) for Permutations #' #' Given a population of permutations, this function recombines each #' individual with another individual. #' Note, that \code{\link{optimEA}} will not pass the whole population #' to recombination functions, but only the chosen parents. #' #' @param population List of permutations #' @param parameters not used #' #' @return population of recombined offspring #' #' @export ################################################################################### recombinationPermutationOrderCrossover1 <- function(population, parameters){ popsize <- length(population)/2 ## assumes nParents == 2 N <- length(population[[1]]) #number of elements newpop <- list() for(i in 1:popsize){ j <- popsize + i ## second parent parent1 <- population[[i]] parent2 <- population[[j]] idx <- sort(sample(N,2) )## select part from first parent pnew <- setdiff(parent2,parent1[idx[1]:idx[2]])## identify parts from second parent which are not in the selected part parent1[-(idx[1]:idx[2])] <- pnew #insert newpop <- c(newpop, list(parent1)) } newpop } ################################################################################### #' Position Based Crossover (POS) for Permutations #' #' Given a population of permutations, this function recombines each #' individual with another individual. #' Note, that \code{\link{optimEA}} will not pass the whole population #' to recombination functions, but only the chosen parents. #' #' @param population List of permutations #' @param parameters not used #' #' @return population of recombined offspring #' #' @export ################################################################################### recombinationPermutationPositionBased <- function(population, parameters){ popsize <- length(population)/2 ## assumes nParents == 2 N <- length(population[[1]]) #number of elements newpop <- list() for(i in 1:popsize){ j <- popsize + i ## second parent parent1 <- population[[i]] parent2 <- population[[j]] idx <- sample(N,N/2,replace=FALSE) parent1[-idx] <- setdiff(parent2,parent1[idx]) newpop <- c(newpop, list(parent1)) } newpop } ################################################################################### #' Alternating Position Crossover (AP) for Permutations #' #' Given a population of permutations, this function recombines each #' individual with another individual. #' Note, that \code{\link{optimEA}} will not pass the whole population #' to recombination functions, but only the chosen parents. #' #' @param population List of permutations #' @param parameters not used #' #' @return population of recombined offspring #' #' @export ################################################################################### recombinationPermutationAlternatingPosition <- function(population, parameters){ popsize <- length(population)/2 ## assumes nParents == 2 N <- length(population[[1]]) #number of elements newpop <- list() for(i in 1:popsize){ j <- popsize + i ## second parent parent1 <- population[[i]] parent2 <- population[[j]] pnew <- NULL for(i in 1:N){ if(i%%2==1) pnew <- c(pnew,setdiff(parent1,pnew)[1]) if(i%%2==0) pnew <- c(pnew,setdiff(parent2,pnew)[1]) } newpop <- c(newpop, list(pnew)) } newpop }
/scratch/gouwar.j/cran-all/cranData/CEGO/R/permutationOperators.R
################################################################################### #' Levenshtein Distance forsSequences of numbers #' #' Levenshtein distance for two sequences of numbers #' #' @param x first vector (numeric vector) #' @param y second vector (numeric vector) #' #' @return numeric distance value \deqn{d(x,y)} #' #' @examples #' #e.g., used for distance between integer sequence #' x <- c(0,1,10,2,4) #' y <- c(10,1,0,4,-4) #' distanceSequenceLevenshtein(x,y) #' p <- replicate(10,sample(1:5,3,replace=TRUE),simplify=FALSE) #' distanceMatrix(p,distanceSequenceLevenshtein) #' #' @export ################################################################################### distanceSequenceLevenshtein <- function(x, y){ .Call(C_numericDistanceLevenshtein, as.numeric(x),as.numeric(y)) }
/scratch/gouwar.j/cran-all/cranData/CEGO/R/sequenceDistances.R
################################################################################### #' Hamming Distance for Strings #' #' Number of unequal letters in two strings. #' #' @param x first string (class: character) #' @param y second string (class: character) #' #' @return numeric distance value \deqn{d(x,y)} #' #' @examples #' distanceStringHamming("ABCD","AACC") #' #' @export #' ################################################################################### distanceStringHamming <- function(x, y){ #.Call("stringDistanceHamming", as.character(x), as.character(y), PACKAGE="CEGO") .Call(C_stringDistanceHamming, as.character(x), as.character(y)) } ################################################################################### #' Levenshtein Distance for Strings #' #' Number of insertions, deletions and substitutions to transform one string into another #' #' @param x first string (class: character) #' @param y second string (class: character) #' #' @return numeric distance value \deqn{d(x,y)} #' #' @examples #' distanceStringLevenshtein("ABCD","AACC") #' #' @export #' ################################################################################### distanceStringLevenshtein <- function(x, y){ #.Call("stringDistanceLevenshtein", as.character(x), as.character(y), PACKAGE="CEGO") .Call(C_stringDistanceLevenshtein, as.character(x), as.character(y)) } ################################################################################### #' Longest Common Substring distance #' #' Distance between strings, based on the longest common substring. #' #' @param x first string (class: character) #' @param y second string (class: character) #' #' @return numeric distance value \deqn{d(x,y)} #' #' @examples #' distanceStringLCStr("ABCD","AACC") #' #' @export #' ################################################################################### distanceStringLCStr <- function(x, y){ #.Call("stringDistanceLongestCommonSubstring", as.character(x), as.character(y), PACKAGE="CEGO") .Call(C_stringDistanceLongestCommonSubstring, as.character(x), as.character(y)) }
/scratch/gouwar.j/cran-all/cranData/CEGO/R/stringDistances.R
################################################################################### #' String Generator Function #' #' Returns a function that generates random strings of length N, with given letters. #' Can be used to generate individual solutions for permutation problems, e.g., Travelling Salesperson Problem #' #' @param N length of the permutations returned #' @param lts letters allowed in the string #' #' @return returns a function, without any arguments #' #' @examples #' fun <- solutionFunctionGeneratorString(10,c("A","C","G","T")) #' fun() #' fun() #' fun() #' #' @export ################################################################################### solutionFunctionGeneratorString <- function(N,lts=c("A","C","G","T")){ N #lazy evaluation fix, faster than force() function()paste(sample(lts,N,replace = TRUE),collapse="") } ################################################################################### #' Mutation for Strings #' #' Given a population of strings, this function mutates all #' individuals by randomly changing an element of the string. #' #' @param population List of permutations #' @param parameters list of parameters, with \code{parameters$mutationRate} and \code{parameters$lts}. #' \code{parameters$mutationRate} should be between 0 and 1 (but can be larger than 1). The mutation rate determines the number of interchanges #' performed, relative to the permutation length (N). 0 means none. 1 means N interchanges. #' The default is 1/N. \code{parameters$lts} are the possible letters in the string. #' #' @return mutated population #' #' @export ################################################################################### mutationStringRandomChange <- function(population, parameters=list()){ N <- nchar(population[[1]]) if(is.null(parameters$mutationRate)) parameters$mutationRate <- 1/N if(is.null(parameters$lts)) parameters$lts <- c("A","C","G","T") mrate <- parameters$mutationRate popsize <- length(population) mutations <- ceiling(N * mrate) lts <- parameters$lts if(mutations==0) return(population) samples <- mutations * popsize index <- sample.int(N,samples,TRUE,NULL) newLetter <- sample(lts,samples,TRUE,NULL) newpop <- list() for(i in 1:popsize){ individual <- population[[i]] if(mutations == 1){ ind <- index[i] substr(individual,ind,ind) <- newLetter[i] }else{ j <- ((i-1)*mutations+1) : (i*mutations) for(jj in j){ ind <- index[jj] substr(individual,ind,ind) <- newLetter[jj] } } newpop <- c(newpop, list(individual)) } newpop } ################################################################################### #' Single Point Crossover for Strings #' #' Given a population of strings, this function recombines each #' individual with another random individual. #' Note, that \code{\link{optimEA}} will not pass the whole population #' to recombination functions, but only the chosen parents. #' #' @param population List of strings #' @param parameters not used #' #' @return population of recombined offspring #' #' @export ################################################################################### recombinationStringSinglePointCrossover <- function(population, parameters){ N <- nchar(population[[1]]) popsize <- length(population)/2 newpop <- list() index <- sample.int(N-1,popsize,TRUE,NULL) #index for switch between parent 1 and 2 for each operation for(i in 1:popsize){ j <- popsize + i ## second parent ind <- index[i] parent1 <- population[[i]] parent2 <- population[[j]] child <- paste(substr(parent1,1,ind),substr(parent2,ind+1,N),sep="") newpop <- c(newpop, child) } newpop }
/scratch/gouwar.j/cran-all/cranData/CEGO/R/stringOperators.R
################################################################################### #' Simulation-based Test Function Generator, Object Interface #' #' Generate test functions for assessment of optimization algorithms with #' non-conditional or conditional simulation, based on real-world data. #' For a more streamlined interface, see \code{\link{testFunctionGeneratorSim}}. #' #' @param xsim list of samples in input space, for simulation #' @param fit an object generated by \code{\link{modelKriging}} #' @param nsim the number of simulations, or test functions, to be created #' @param conditionalSimulation whether (TRUE) or not (FALSE) to use conditional simulation #' @param seed a random number generator seed. Defaults to NA; which means no seed is set. For sake of reproducibility, set this to some integer value.\cr #' #' @return a list of functions, where each function is the interpolation of one simulation realization. The length of the list depends on the nsim parameter. #' #' @seealso \code{\link{modelKriging}}, \code{\link{simulate.modelKriging}}, \code{\link{testFunctionGeneratorSim}} #' #' @references N. A. Cressie. Statistics for Spatial Data. JOHN WILEY & SONS INC, 1993. #' @references C. Lantuejoul. Geostatistical Simulation - Models and Algorithms. Springer-Verlag Berlin Heidelberg, 2002. #' @references Zaefferer, M.; Fischbach, A.; Naujoks, B. & Bartz-Beielstein, T. Simulation Based Test Functions for Optimization Algorithms Proceedings of the Genetic and Evolutionary Computation Conference 2017, ACM, 2017, 8. #' #' @examples #' nsim <- 10 #' seed <- 12345 #' n <- 6 #' set.seed(seed) #' #target function: #' fun <- function(x){ #' exp(-20* x) + sin(6*x^2) + x #' } #' # "vectorize" target #' f <- function(x){sapply(x,fun)} #' # distance function #' dF <- function(x,y)(sum((x-y)^2)) #sum of squares #' #start pdf creation #' # plot params #' par(mfrow=c(4,1),mar=c(2.3,2.5,0.2,0.2),mgp=c(1.4,0.5,0)) #' #test samples for plots #' xtest <- as.list(seq(from=-0,by=0.005,to=1)) #' plot(xtest,f(xtest),type="l",xlab="x",ylab="Obj. function") #' #evaluation samples (training) #' xb <- as.list(runif(n)) #' yb <- f(xb) #' # support samples for simulation #' x <- as.list(sort(c(runif(100),unlist(xb)))) #' # fit the model #' fit <- modelKriging(xb,yb,dF,control=list( #' algThetaControl=list(method="NLOPT_GN_DIRECT_L",funEvals=100),useLambda=FALSE)) #' fit #' #predicted obj. function values #' ypred <- predict(fit,as.list(xtest))$y #' plot(unlist(xtest),ypred,type="l",xlab="x",ylab="Estimation") #' points(unlist(xb),yb,pch=19) #' ############################## #' # create test function non conditional #' ############################## #' fun <- createSimulatedTestFunction(x,fit,nsim,FALSE,seed=1) #' ynew <- NULL #' for(i in 1:nsim) #' ynew <- cbind(ynew,fun[[i]](xtest)) #' rangeY <- range(ynew) #' plot(unlist(xtest),ynew[,1],type="l",ylim=rangeY,xlab="x",ylab="Simulation") #' for(i in 2:nsim){ #' lines(unlist(xtest),ynew[,i],col=i,type="l") #' } #' ############################## #' # create test function conditional #' ############################## #' fun <- createSimulatedTestFunction(x,fit,nsim,TRUE,seed=1) #' ynew <- NULL #' for(i in 1:nsim) #' ynew <- cbind(ynew,fun[[i]](xtest)) #' rangeY <- range(ynew) #' plot(unlist(xtest),ynew[,1],type="l",ylim=rangeY,xlab="x",ylab="Conditional sim.") #' for(i in 2:nsim){ #' lines(unlist(xtest),ynew[,i],col=i,type="l") #' } #' points(unlist(xb),yb,pch=19) #' dev.off() #' #' @export ################################################################################### createSimulatedTestFunction <- function(xsim, fit, nsim=10, conditionalSimulation=TRUE,seed=NA){ if(!is.list(xsim))xsim<-list(xsim) simfit <- simulate(fit,nsim,seed,xsim,conditionalSimulation,TRUE) ynew <- simfit$y fit$Psi <- simfit$psi fit$origPsi <- simfit$origPsi fit$origD <- simfit$origD fit$D <- simfit$D fit$A <- simfit$A fit$U <- simfit$U fit$a <- simfit$a fit$isIndefinite <- simfit$isIndefinite fit$isCNSD <- simfit$isCNSD fit$maximumDistance <- simfit$maximumDistance fit$Psinv <- MASS::ginv(fit$Psi) #should already be fixed in the simulation function, hence ginv should be fine fit$x <- xsim PsinvSaved <- fit$Psinv n <- length(xsim) ##precompute transformations if(fit$indefiniteType=="PSD" & !fit$indefiniteRepair & fit$isIndefinite & any(fit$indefiniteMethod==c("clip","flip","square","diffusion"))){ #RETRANSFORMATION OF THE SOLUTION ONLY fit$Psinv <- t(fit$A)%*%fit$Psinv #retransform the result for prediction fit$PsinvA <- fit$Psinv %*% fit$A #retransform the result (for variance estimation only) } ## fun <- list() for(i in 1:nsim){ fit$y <- ynew[,i,drop=FALSE] fit$yMu <- ynew[,i,drop=FALSE] - fit$mu ## if(fit$useLambda){ PsiB <- fit$Psi-diag(fit$lambda,n)+diag(.Machine$double.eps,n) fit$SSQReint <- as.numeric((t(fit$yMu)%*%PsinvSaved%*%PsiB%*%PsinvSaved%*%fit$yMu)/n) #PsinvSaved is used intentionally, needs to be untransformed Psinv fit$PsinvReint <- try(chol2inv(chol(PsiB)), TRUE) if(class(fit$PsinvReint)[1] == "try-error"){ fit$PsinvReint <- MASS::ginv(PsiB) } #now apply same transformations as for non-reinterpolating matrices if(fit$indefiniteType=="PSD" & fit$isIndefinite & !fit$indefiniteRepair & any(fit$indefiniteMethod==c("clip","flip","square","diffusion"))){ #RETRANSFORMATION OF THE SOLUTION ONLY fit$PsinvReint <- t(fit$A)%*%fit$PsinvReint %*% fit$A #retransform } } ## ##create the test function #f <- function(x){ # predict(fit,x)$y #} testFun <- NULL assign("testFun", eval(substitute( function(x){ predict(fit,x)$y }, list(fit=fit) ) ), envir=environment()) fun[[i]] <- testFun } return(fun) } ################################################################################### #' Simulation-based Test Function Generator, Data Interface #' #' Generate test functions for assessment of optimization algorithms with #' non-conditional or conditional simulation, based on real-world data. # todo extend #' #' @param x list of samples in input space, training data #' @param y column vector of observations for each sample, training data #' @param xsim list of samples in input space, for simulation #' @param distanceFunction a suitable distance function of type f(x1,x2), returning a scalar distance value, preferably between 0 and 1. #' Maximum distances larger 1 are no problem, but may yield scaling bias when different measures are compared. #' Should be non-negative and symmetric. It can also be a list of several distance functions. In this case, Maximum Likelihood Estimation (MLE) is used #' to determine the most suited distance measure. #' The distance function may have additional parameters. For that case, see distanceParametersLower/Upper in the controls. #' If distanceFunction is missing, it can also be provided in the control list. #' @param controlModel (list), with the options for the model building procedure, #' it will be passed to the \code{\link{modelKriging}} function. #' @param controlSimulation (list), with the parameters of the simulation: #' \describe{ #' \item{\code{nsim}}{ the number of simulations, or test functions, to be created.} #' \item{\code{conditionalSimulation}}{ whether (TRUE) or not (FALSE) to use conditional simulation.} #' \item{\code{simulationSeed}}{ a random number generator seed. Defaults to NA; which means no seed is set. For sake of reproducibility, set this to some integer value.} #' } #' #' @return a list with the following elements: \code{fun} is a list of functions, where each function is the interpolation of one simulation realization. The length of the list depends on the nsim parameter. #' \code{fit} is the result of the modeling procedure, that is, the model fit of class \code{modelKriging}. #' #' @seealso \code{\link{modelKriging}}, \code{\link{simulate.modelKriging}}, \code{\link{createSimulatedTestFunction}}, #' #' @references N. A. Cressie. Statistics for Spatial Data. JOHN WILEY & SONS INC, 1993. #' @references C. Lantuejoul. Geostatistical Simulation - Models and Algorithms. Springer-Verlag Berlin Heidelberg, 2002. #' @references Zaefferer, M.; Fischbach, A.; Naujoks, B. & Bartz-Beielstein, T. Simulation Based Test Functions for Optimization Algorithms Proceedings of the Genetic and Evolutionary Computation Conference 2017, ACM, 2017, 8. #' #' @examples #' nsim <- 10 #' seed <- 12345 #' n <- 6 #' set.seed(seed) #' #target function: #' fun <- function(x){ #' exp(-20* x) + sin(6*x^2) + x #' } #' # "vectorize" target #' f <- function(x){sapply(x,fun)} # distance function #' dF <- function(x,y)(sum((x-y)^2)) #sum of squares #' # plot params #' par(mfrow=c(4,1),mar=c(2.3,2.5,0.2,0.2),mgp=c(1.4,0.5,0)) #' #test samples for plots #' xtest <- as.list(seq(from=-0,by=0.005,to=1)) #' plot(xtest,f(xtest),type="l",xlab="x",ylab="Obj. function") #' #evaluation samples (training) #' xb <- as.list(runif(n)) #' yb <- f(xb) #' # support samples for simulation #' x <- as.list(sort(c(runif(100),unlist(xb)))) #' # fit the model and simulate: #' res <- testFunctionGeneratorSim(xb,yb,x,dF, #' list(algThetaControl=list(method="NLOPT_GN_DIRECT_L",funEvals=100), #' useLambda=FALSE), #' list(nsim=nsim,conditionalSimulation=FALSE)) #' fit <- res$fit #' fun <- res$fun #' #predicted obj. function values #' ypred <- predict(fit,as.list(xtest))$y #' plot(unlist(xtest),ypred,type="l",xlab="x",ylab="Estimation") #' points(unlist(xb),yb,pch=19) #' ############################## #' # plot non-conditional simulation #' ############################## #' ynew <- NULL #' for(i in 1:nsim) #' ynew <- cbind(ynew,fun[[i]](xtest)) #' rangeY <- range(ynew) #' plot(unlist(xtest),ynew[,1],type="l",ylim=rangeY,xlab="x",ylab="Simulation") #' for(i in 2:nsim){ #' lines(unlist(xtest),ynew[,i],col=i,type="l") #' } #' ############################## #' # create and plot test function, conditional #' ############################## #' fun <- testFunctionGeneratorSim(xb,yb,x,dF, #' list(algThetaControl= #' list(method="NLOPT_GN_DIRECT_L",funEvals=100), #' useLambda=FALSE), #' list(nsim=nsim,conditionalSimulation=TRUE))$fun #' ynew <- NULL #' for(i in 1:nsim) #' ynew <- cbind(ynew,fun[[i]](xtest)) #' rangeY <- range(ynew) #' plot(unlist(xtest),ynew[,1],type="l",ylim=rangeY,xlab="x",ylab="Conditional sim.") #' for(i in 2:nsim){ #' lines(unlist(xtest),ynew[,i],col=i,type="l") #' } #' points(unlist(xb),yb,pch=19) #' #' @export ################################################################################### testFunctionGeneratorSim <- function(x,y,xsim,distanceFunction,controlModel=list(),controlSimulation=list()){ con<-list(nsim=1,conditionalSimulation=FALSE,simulationSeed=NA) con[names(controlSimulation)] <- controlSimulation controlSimulation<-con fit <- modelKriging(x,y,distanceFunction,control=controlModel) testFuns <- createSimulatedTestFunction(xsim=xsim,fit=fit, nsim=controlSimulation$nsim, conditionalSimulation=controlSimulation$conditionalSimulation, seed=controlSimulation$simulationSeed) return(list(fit=fit,fun=testFuns)) }
/scratch/gouwar.j/cran-all/cranData/CEGO/R/testFunctionGeneration.R
#' Import datasets / microdata from the "Centre d'Estudis d'Opinio" #' #' Easy and convenient access to the datasets / microdata of the "Centre #' d'Estudis d'Opinio", the Catalan institution for polling and public opinion. #' The package uses the data stored in the servers of the CEO and returns it in #' a tidy format (tibble). #' #' It works either by specifying the kind of merged barometer (using the #' \code{kind} argument), or either providing a singular study (using the #' \code{reo} argument). #' #' @encoding UTF-8 #' @param kind Character vector with the sort of microdata required. Defaults to "barometer", that contains the whole set of Barometers from 2014 (presential interviews). "barometer_until_2013" contains the interviews performed by phone until 2013, with a somewhat different questionnaire and structure. For such dataset you need a third-party software installed in your computer to be able to uncompress the RAR original file. It is the option by default. But if a specific reo study is requested in the \code{reo} argument, then the \code{kind} argument does not apply anymore and only a specific study is retrieved. #' @param reo Character vector of length one that allows to get the dataset of a specific REO study (Registre d'Estudis d'Opinio, the internal register ID used by the CEO) to download. By default (when \code{reo = NA}) it uses the \code{kind} argument. Not all the studies carried on by the CEO (and therefore listed in the \code{CEOmeta()} function call) have microdata available. Only the ones that return TRUE to the column \code{microdata_available} in \code{CEOmeta()}. #' @param raw Logical value to indicate if SPSS labels are transformed into factors. Defaults to FALSE. Otherwise, when TRUE, it returns the matrices as imported by haven::read_spss() without modification. Does not apply to data from singular REOs, only to barometers retrieved using \code{kind}. #' @param extra_variables Logical value as to whether include (default) complementary variables such as date (Data). Defaults to TRUE. Names of such new variables only use upper case in the first letter. Extra variables are added at the end. Does not apply to data from singular REOs, only to barometers retrieved using \code{kind}. #' @param date_start Character vector with a starting date ("YYYY-MM-DD") for the data. It only applies to the barometers retrieved using \code{kind}, not to other studies. #' @param date_end Character vector with an end date ("YYYY-MM-DD") for the data. It only applies to the barometers retrieved using \code{kind}, not to other studies. #' @export #' @return A tibble with the individuals' responses to the questionnaire retrieved. #' @examples #'\dontrun{ #' # Get the merged barometer from 2014, by default (assume kind = "barometer"). #' d <- CEOdata() #' #' # Get the number of individuals surveyed and the number of variables recorded. #' dim(d) #' #' # Get the identifiers of the different Barometers retrieved #' unique(d$BOP_NUM) #' #' # Get a specific study #' d746 <- CEOdata(reo = "746") #'} CEOdata <- function(kind = "barometer", reo = NA, raw = FALSE, extra_variables = TRUE, date_start = NA, date_end = NA) { # Function used later to process SPSS labels into factors is_haven_labelled <- function(x) { ifelse(length(which(class(x) %in% "haven_labelled")) > 0, TRUE, FALSE) } if (is.na(reo)) { # # Define URLs # url.phone.barometer <- "https://ceo.gencat.cat/web/.content/20_barometre/Matrius_BOP/2013_Microdades_anonimitzades_fusio_cine_telf.zip" file.phone.barometer.rar <- "2013_Microdades_anonimitzades_fusio_cine_telf.zip" file.phone.barometer <- "Microdades anonimitzades fusio cine telf.sav" url.presential.barometer <- "https://ceo.gencat.cat/web/.content/20_barometre/Matrius_BOP/Microdades_barometre.zip" file.presential.barometer <- "Microdades anonimitzades fusio presencial.sav" # Process barometer merged from 2014 if (kind == "barometer") { message("Downloading the barometer.") tmp <- tempfile() try({download.value <- download.file(url.presential.barometer, tmp)}, silent = TRUE) if (exists(quote(download.value))) { if (download.value == 0) { # success downloading the file file <- unzip(tmp, file.presential.barometer) message("Converting the original data into R. This may take a while.") d <- haven::read_spss(file) if (file.exists(file)) { unlink(file) } } } else { message("A problem downloading the barometer file has occurred. The server may be temporarily down, or the file name has changed. Please try again later or open an issue at https://github.com/ceopinio/CEOdata indicating 'Problem with barometer'") return(NULL) } } # Process barometer merged until 2013 if (kind == "barometer_until_2013") { message("Downloading the barometer until 2013.") try({download.value <- download.file(url.phone.barometer, file.phone.barometer.rar)}, silent = TRUE) if (exists(quote(download.value))) { if (download.value == 0) { # success downloading the file # This must be fixed because the original file as of 211027 is not a zip file, but a RAR file message("Uncompressing the original downloaded file. This may take a while.") system("unrar e 2013_Microdades_anonimitzades_fusio_cine_telf.zip") file <- file.phone.barometer message("Converting the original data into R. This may take a while.") d <- haven::read_spss(file) names(d) <- toupper(names(d)) # Add variable REO d <- d |> dplyr::mutate(REO = as.numeric(stringr::str_extract(BOP_NUM, "...$"))) if (file.exists(file)) { unlink(file) } if (file.exists(file.phone.barometer.rar)) { unlink(file.phone.barometer.rar) } } } else { message("A problem downloading the barometer until 2013 file has occurred. The server may be temporarily down, or the file name has changed. Please try again later or open an issue at https://github.com/ceopinio/CEOdata indicating 'Problem with barometer until 2013'") return(NULL) } } message("Post-processing the data. This may take a while.") # Arrange the barometer to process # Arrange factors if (!raw) { # Transform SPSS labels into proper R factors d <- d |> dplyr::mutate_if(is_haven_labelled, haven::as_factor, levels = "default") } # Add extra variables (date, ...) if (extra_variables) { if (kind == "barometer_until_2013") { d <- d |> dplyr::mutate(Data = as.Date(paste(ANY, sprintf("%02d", MES), 28, sep = "-"))) } else { d <- d |> dplyr::mutate(Data = as.Date(paste(ANY, sprintf("%02d", MES), ifelse(is.na(DIA), 28, DIA), sep = "-"))) } } # # Filter by dates # if (!is.na(date_start)) { d <- d |> dplyr::filter(Data >= date_start) } if (!is.na(date_end)) { d <- d |> dplyr::filter(Data <= date_end) } } else { # # Serve only a single, untreated REO # if (is.character(reo)) { if (length(reo) == 1) { url.reo <- CEOmetadata()$`Enllac matriu de dades`[CEOmetadata()$REO == reo] if (!is.na(url.reo)) { tmp <- tempfile() try({download.value <- download.file(url.reo, tmp)}, silent = TRUE) if (exists(quote(download.value))) { if (download.value == 0) { # success downloading the file files.within <- unzip(tmp, list = TRUE) if (dim(files.within)[1] == 1) { if (!stringr::str_detect(files.within$Name, "\\.sav$")) { warning("This zip file does not contain a .sav file") return(NULL) } else { file <- unzip(tmp, files.within$Name) message("Converting the original data into R. This may take a while.") try({d <- haven::read_spss(file)}, silent = TRUE) if (!exists(quote(d))) { warning("The .sav file can't be processed.") return(NULL) } else { # Arrange factors if (!raw) { # Transform SPSS labels into proper R factors d <- d |> dplyr::mutate_if(is_haven_labelled, haven::as_factor, levels = "default") } } if (file.exists(file)) { unlink(file) } } } else { warning("The zip file does not contain one, and only one, single file") return(NULL) } } } else { message("A problem downloading the specific barometer file has occurred. The server may be temporarily down, or the file name has changed. Please try again later or open an issue at https://github.com/ceopinio/CEOdata indicating 'Problem with barometer'") return(NULL) } } else { message(paste0("There is no dataset available for REO ", reo)) return(NULL) } } else { message("'reo' must pass only a single REO.") return(NULL) } } else { stop("'reo' must be a character vector.") } } # return(d) }
/scratch/gouwar.j/cran-all/cranData/CEOdata/R/CEOdata.R
#' Internal function to get the metadata of CEO surveys into cache #' #' Used when loading the package, it gets the last update of the available #' meta data of CEO surveys, cleans it and makes it ready for the rest #' of the functions in the package. #' @keywords internal #' @encoding UTF-8 the <- new.env(parent = emptyenv()) CEOmetadata <- function() { if (is.null(the$CEOmetadata)) { the$CEOmetadata <- getCEOmetadata() } return(the$CEOmetadata) } getCEOmetadata <- function() { # # Define URL with the main table that contains the register of all surveys # # # CSV, TSV, ... all fail because there are newlines in the data # # Therefore a more structured system is required: JSON url.ceo.table <- "https://analisi.transparenciacatalunya.cat/api/views/m5mb-xt5e/rows.json?accessType=DOWNLOAD&sorting=true" try({ceo.meta <- jsonlite::fromJSON(url.ceo.table)}, silent = TRUE) if (exists(quote(ceo.meta))) { ceo.meta.table <- ceo.meta[[1]] ceo.table <- ceo.meta[[2]] ceo.table <- t(as.data.frame(lapply(ceo.table, read.ceo.json))) ceo.table <- as_tibble(as.data.frame(ceo.table)) names(ceo.table) <- ceo.meta.table[[1]]$columns$name # Manually transform non-ASCII column names so that this can be packaged # and pass through CRAN # But only for variables that are going to be somewhat transformed names(ceo.table)[grep("Enlla.$", names(ceo.table))] <- "Enllac" names(ceo.table)[grep("Enlla. matriu de dades$", names(ceo.table))] <- "Enllac matriu de dades" names(ceo.table)[grep("M.tode de recollida de dades$", names(ceo.table))] <- "Metode de recollida de dades" names(ceo.table)[grep(".mbit territorial", names(ceo.table))] <- "Ambit territorial" names(ceo.table)[grep("T.tol enquesta", names(ceo.table))] <- "Titol enquesta" names(ceo.table)[grep("T.tol estudi", names(ceo.table))] <- "Titol estudi" CEOmeta <- ceo.table |> dplyr::select(-c(sid, id, position, created_at, created_meta, updated_at, updated_meta, meta)) |> dplyr::mutate(REO = factor(REO, levels = rev(REO))) |> dplyr::mutate(`Metodologia enquesta` = factor(`Metodologia enquesta`)) |> dplyr::mutate(`Metode de recollida de dades` = factor(`Metode de recollida de dades`)) |> dplyr::mutate(`Ambit territorial` = factor(`Ambit territorial`)) |> dplyr::mutate(`Dia inici treball de camp` = as.Date(stringr::str_sub(`Dia inici treball de camp`, 1L, 10L), format = "%Y-%m-%d")) |> dplyr::mutate(`Dia final treball de camp` = as.Date(stringr::str_sub(`Dia final treball de camp`, 1L, 10L), format = "%Y-%m-%d")) |> dplyr::mutate(`Any d'entrada al REO` = as.integer(`Any d'entrada al REO`, format = "")) |> dplyr::mutate(`Data d'alta al REO` = as.Date(stringr::str_sub(`Data d'alta al REO`, 1L, 10L), format = "%Y-%m-%d")) |> dplyr::mutate(`Mostra estudis quantitatius` = as.numeric(`Mostra estudis quantitatius`)) |> dplyr::mutate(Cost = as.numeric(Cost)) |> dplyr::mutate(microdata_available = ifelse(is.na(`Enllac matriu de dades`), FALSE, TRUE)) return(CEOmeta) } else { message("A problem downloading the metadata has occurred. The server may be temporarily down, or the file name has changed. Please try again later or open an issue at https://github.com/ceopinio/CEOdata indicating 'Problem with metadata file'") return(NULL) } } #' Import metadata from the "Centre d'Estudis d'Opinio" #' #' Easy and convenient access to the metadata of the "Centre #' d'Estudis d'Opinio", the Catalan institution for polling and public opinion. #' It allows to search for specific terms to obtain the details of the datasets available #' #' @encoding UTF-8 #' @param reo Character vector of length one that allows to get the metadata only of a specific REO (Registre d'Estudis d'Opinio, the internal register ID used by the CEO) to download. When not NULL it has precedence with the search, date_start and date_end arguments. #' @param search Character vector with keywords to look for within several columns of the CEO metadata (title, summary, objectives and tags -descriptors-). Each element of the vector is strictly evaluated (all words are considered to be found in the format they appear, like in "AND"), while by using several elements in the vector the search works like an "OR" clause. Lower or upper cases are not considered. #' @param date_start Character vector with a starting date ("YYYY-MM-DD") for the data. #' @param date_end Character vector with an end date ("YYYY-MM-DD") for the data. #' @param browse Logical value. When turned to TRUE, the browser opens the URLs of the required surveys. Only a maximum of 10 entries are opened. #' @param browse_translate When opening the relevant entries in the browser (browse must be TRUE), use automatic translation to the language specified using Google Translate ('oc' for Occitan/Aranese, 'de' to German, 'en' to English, 'eu' to Basque, 'gl' for Galician or 'sp' to Spanish). #' @param browse_force Logical value. When TRUE it overcomes the limitation of only opening a maximum of 10 URLs. Use it with caution. #' @export #' @return A tibble with the metadata of the surveys produced by the CEO. #' @examples #'\dontrun{ #' # Retrieve the metadata of the surveys ever produced by the CEO: #' meta <- CEOmeta() #' dim(meta) #' #' # Search for specific terms in any of the metadata fields #' # in this case, "internet". #' CEOmeta(search = "internet") #' #' # now for the combination of "Medi" AND "Ambient" #' CEOmeta(search = "Medi ambient") #' #' # now for the combination of ("Medi" AND "Ambient") OR "Municipi" #' CEOmeta(search = c("Medi ambient", "Municipi")) #' #' # Search for all registers starting in 2020 #' CEOmeta(date_start = "2020-01-01") #' #' # Get the entry for a specific study (REO) and open its description in a browser #' CEOmeta(reo = "746", browse = TRUE) #'} CEOmeta <- function( reo = NULL, search = NULL, date_start = NA, date_end = NA, browse = FALSE, browse_translate = NULL, browse_force = FALSE) { # If search is not empty, return parts according to searched fields # If search is empty, just return all the metadata # If browse, then open the URLs in the browser # Start with the whole cached data, and keep on subsetting d <- CEOmetadata() # # Limit by search # if (!is.null(reo)) { if (!is.character(reo)) { stop("The 'reo' argument must be character.") } d <- d |> filter(REO == reo) } else if (!is.null(search)) { if (!is.character(search)) { stop("The 'search' argument must be character.") } search <- tolower(search) search.strings <- stringr::str_trim(search) message(paste0("Looking for entries with: ", paste(search.strings, collapse = " OR "))) columns.to.search <- c("Titol enquesta", "Titol estudi", "Objectius", "Resum", "Descriptors") # Get the REO values that match the given string of text in any of # the columns considered reo.match <- d |> dplyr::select(REO, columns.to.search) |> dplyr::mutate_at(columns.to.search, tolower) %>% #|> #{function(x) dplyr::filter_all(dplyr::any_vars(stringr::str_detect(x, pattern = paste(search.strings, collapse = "|"))))}() |> dplyr::filter_all(dplyr::any_vars(stringr::str_detect(., pattern = paste(search.strings, collapse = "|")))) |> dplyr::select(REO) |> {function(x) unlist(x, use.names = FALSE)}() if (length(reo.match) < 1) { stop(paste0("There are no entries with the string '", search, "'.\nYou may want to reduce the scope or change the text.")) } d <- d |> dplyr::filter(REO %in% reo.match) } # # Limit by date # if (!is.na(date_start)) { d <- d |> dplyr::filter(`Data d'alta al REO` >= date_start) } if (!is.na(date_end)) { d <- d |> dplyr::filter(`Data d'alta al REO` <= date_end) } # # Open the URLs of the matches # Deal with translations if necessary # if (browse) { if (dim(d)[1] <= 10 | (browse_force)) { for (i in 1:(dim(d)[1])) { url.to.open <- d$`Enllac`[i] if (!is.null(browse_translate)) { if (browse_translate == "oc") { # For occitan, use apertium url.to.open <- paste0("https://www.apertium.org/index.eng.html#webpageTranslation?dir=cat-oci&qW=", url.to.open) } else { # Use google translate url.to.open <- paste0("https://", gsub("\\.", "-", urltools::domain(url.to.open)), ".translate.goog/", sub("http.+//[^/]*", "", url.to.open), "&_x_tr_sl=ca&_x_tr_tl=", browse_translate) } } browseURL(url.to.open) Sys.sleep(0.05) } } } # return(d) } #' Internal function to be able to properly read the JSON from CEO #' #' Used to address the limitations of the JSON format provided #' #' @keywords internal #' @encoding UTF-8 #' @param x JSON data structure read.ceo.json <- function(x) { row <- rep(NA, length(x)) for (i in 1:length(x)) { element <- x[[i]] if (length(element) == 0) { # if there is nothing, return NA row[i] <- NA } else if (length(element) == 1) { # if there is only one element, get it row[i] <- element } else { # if there is more than one element, # only take care of the first element row[i] <- element[1] } } return(row) }
/scratch/gouwar.j/cran-all/cranData/CEOdata/R/CEOmeta.R
#' Search for keywords in the labels of variables and responses of the survey data #' #' Easy and convenient access to the metadata of the "Centre #' d'Estudis d'Opinio", the Catalan institution for polling and public opinion. #' It allows to search for specific terms to obtain the details of the datasets available #' #' @encoding UTF-8 #' @param d Microdata retrieved from the CEO using the CEOdata() function. It is a data frame with variable labels. #' @param keyword The character string defining the word / concept to look for within the microdata. #' @param where A character vector specifying if the function should look amongst variable labels ("variables", default), or amongst value labels ("values"). #' @param translate Logical. When TRUE, it opens a browser with an automatic translation to English of the variable names and labels using Google Translate. Given the specificity of the terms, only the English translation is provided. Defaults to FALSE. #' @export #' @return A tibble with the set of variables that match the keyword ("Variable"). If the variables are requested, the second variable is their labels ("Label"), and if the values are required the second on is the value labels ("Value"). #' @examples #'\dontrun{ #' # Retrieve a dataset to use the function #' d <- CEOdata() #' #' # Get the whole set of variable labels #' CEOsearch(d) #' #' # Get the whole set of value labels #' CEOsearch(d, where = "values") #' #' # Search for specific variable names and variable labels with the string "edat" (age). #' CEOsearch(d, keyword = "edat") #' #' # Search for specific variable names and variable labels with the string "edat" (age), #' # and translate the results to English. #' CEOsearch(d, keyword = "edat", translate = TRUE) #' #' # now for the combination of "valoracio" OR "covid" OR "govern". #' CEOsearch(d, keyword = c("valoració", "covid", "govern")) #'} CEOsearch <- function( d, keyword = NULL, where = "variables", translate = FALSE) { # If search is empty, just return all the entries # If browse, then open the automatic translation in the browser # # Limit by search # if (is.null(d) | !inherits(d, "data.frame")) { stop("The dataset must where to look for does not exist or is not a data frame.") } # Get either variable or value labels, all of them if (where == "variables") { v <- NULL for (i in 1:(dim(d)[2])) { v <- bind_rows(v, dplyr::as_tibble(data.frame(Variable = names(d)[i], Label = ifelse(!is.null(attr(d[[names(d)[i]]], "label")), attr(d[[names(d)[i]]], "label"), NA)))) } } else if (where == "values") { v <- NULL for (i in 1:(dim(d)[2])) { if (!is.null(attr(d[[names(d)[i]]], "levels"))) { v <- bind_rows(v, dplyr::as_tibble(expand.grid(Variable = names(d)[i], Value = attr(d[[names(d)[i]]], "levels")))) } } } #v <- dplyr::mutate_if(v, is.character, as.factor) # Search terms or return the whole set of variable/value labels if (is.null(keyword)) { return(v) } else { if (!is.character(keyword)) { stop("The 'keyword' argument must be character.") } keyword <- tolower(keyword) keyword.strings <- stringr::str_trim(keyword) message(paste0("Looking for entries with: ", paste(keyword.strings, collapse = " OR "))) # Get the values that match the given string of text in any of # the columns columns.to.search <- names(v) v.match <- v |> dplyr::mutate(Original.Variable = Variable) |> dplyr::mutate_at(columns.to.search, tolower) %>% #|> #{function(x) dplyr::filter_all(dplyr::any_vars(stringr::str_detect(x, pattern = paste(search.strings, collapse = "|"))))}() |> dplyr::filter_all(dplyr::any_vars(stringr::str_detect(., pattern = paste(keyword.strings, collapse = "|")))) |> dplyr::select(Original.Variable) |> {function(x) unlist(x, use.names = FALSE)}() if (length(v.match) < 1) { message(paste0("There are no entries with the string '", keyword, "'.\nYou may want to reduce the scope or change the text.")) v <- v[0,] } v <- v |> dplyr::filter(Variable %in% v.match) } # Open the URLs of the matches # Deal with translations if necessary # if (translate) { if (is.null(dim(v))) { message("No entries to translate.") } else { text.translate <- NULL if (dim(v)[1] > 1) { for (i in 1:(dim(v)[1])) { if (i == 1) { text.translate <- toString(data.frame(v[i,])) } else { text.translate <- paste0(text.translate, "%0A", toString(data.frame(v[i,]))) } } } else if (dim(v)[1] == 1) { text.translate <- toString(data.frame(v[1,])) } url.to.open <- paste0("https://translate.google.com/?sl=ca&tl=en&text=", text.translate, "&op=translate") browseURL(url.to.open) } } # return(v) }
/scratch/gouwar.j/cran-all/cranData/CEOdata/R/CEOsearch.R