content
stringlengths
0
14.9M
filename
stringlengths
44
136
# Hack to avoid NOTES in R CMD check # Hadley does not seem to like it: http://stackoverflow.com/questions/9439256/how-can-i-handle-r-cmd-check-no-visible-binding-for-global-variable-notes-when if (base::getRversion() >= "2.15.1") { utils::globalVariables(c( "ANY", "BOP_NUM", "Data", "DIA", "MES", "REO", ".", #"Data alta CEO", "sid", "id", "position", "created_at", "created_meta", "updated_at", "updated_meta", "meta", "Metodologia enquesta", "Metode de recollida de dades", "Ambit territorial", "Dia inici treball de camp", "Dia final treball de camp", "Any d'entrada al REO", "Data d'alta al REO", "Mostra estudis quantitatius", "Enllac matriu de dades", "Cost", "Variable", "Original.Variable", "is_haven_labelled" # from CEOdata )) }
/scratch/gouwar.j/cran-all/cranData/CEOdata/R/globals.R
#' Datasets of the CEO (Centre d'Estudis d'Opinio). Opinion polls in Catalonia. #' #' Easy and convenient access to the datasets / microdata of the "Centre #' d'Estudis d'Opinio", the catalan institution for polling and public opinion. #' The package uses the data stored in the servers of the CEO and returns it in #' a tidy format (tibble). #' #' @encoding UTF-8 #' @references \url{http://xavier-fim.net/packages/CEOdata/}. #' @importFrom haven read_spss as_factor #' @importFrom dplyr mutate_if %>% mutate filter select as_tibble bind_rows #' @importFrom utils download.file unzip browseURL #' @importFrom stringr str_detect str_extract str_sub str_trim #' @importFrom urltools domain #' @importFrom jsonlite fromJSON #' @docType package #' @name CEOdata NULL
/scratch/gouwar.j/cran-all/cranData/CEOdata/R/help.R
CEOdataStartupMessage <- function() { msg <- c(paste0("CEOdata version ", utils::packageVersion("CEOdata")), "\nThis package needs a working Internet connection to effectively run.", "\nPlease acknowledge the CEO in your publications.\nType \"vignette('using_CEOdata')\" or \"vignette('cheatsheet')\" for basic help.", "\n\nThis package, by default, transforms the data gathered from the CEO\ninto pure-R factors. If you want to keep the SPSS labelled format\nyou can use 'raw = TRUE' when calling its functions.") return(msg) } .onAttach <- function(lib, pkg) { # startup message msg <- CEOdataStartupMessage() if(!interactive()) msg[1] <- paste("Package 'CEOdata' version", utils::packageVersion("CEOdata")) packageStartupMessage(msg) invisible() # options(encoding = "UTF-8") }
/scratch/gouwar.j/cran-all/cranData/CEOdata/R/zzz.R
## ----echo=FALSE, message=FALSE, warning=FALSE--------------------------------- library(CEOdata) ## ----message = FALSE, echo = TRUE, eval = FALSE------------------------------- # library(CEOdata) # d <- CEOdata() ## ----message = FALSE, echo = FALSE, eval = TRUE------------------------------- library(knitr) library(CEOdata) d <- CEOdata() # If there is an internet problem, do not run the remaining of the chunks. if (is.null(d)) { print("here") knitr::opts_chunk$set(eval = FALSE) } else { knitr::opts_chunk$set(eval = TRUE) } ## ----------------------------------------------------------------------------- dim(d) ## ----------------------------------------------------------------------------- d ## ----------------------------------------------------------------------------- names(d)[1:50] ## ---- eval = FALSE------------------------------------------------------------ # d.raw <- CEOdata(raw = FALSE) ## ----------------------------------------------------------------------------- d746 <- CEOdata(reo = "746") d746 ## ----------------------------------------------------------------------------- b2019 <- CEOdata(date_start = "2019-01-01", date_end = "2019-12-31") b2019 ## ----------------------------------------------------------------------------- tail(names(d)) ## ----------------------------------------------------------------------------- d.lowercase <- d names(d.lowercase) <- tolower(names(d.lowercase)) ## ----------------------------------------------------------------------------- CEOmeta() ## ----------------------------------------------------------------------------- CEOmeta(reo = "746") ## ----------------------------------------------------------------------------- CEOmeta(search = "Medi ambient") ## ----------------------------------------------------------------------------- CEOmeta(search = c("Medi ambient", "Municipi")) ## ----------------------------------------------------------------------------- CEOmeta() |> filter(`Metode de recollida de dades` == "internet") ## ----------------------------------------------------------------------------- CEOmeta() |> filter(`Mostra estudis quantitatius` < 500) ## ----------------------------------------------------------------------------- CEOmeta(date_start = "2019-01-01", date_end = "2019-12-31") ## ---- eval = FALSE------------------------------------------------------------ # CEOmeta(search = "Medi ambient a", browse = TRUE) ## ---- eval = FALSE------------------------------------------------------------ # CEOmeta(reo = "746", browse = TRUE) ## ---- eval = FALSE------------------------------------------------------------ # CEOmeta(search = "Medi ambient a", browse = TRUE, browse_translate = "de") ## ----------------------------------------------------------------------------- CEOsearch(d) # equivalent to CEOsearch(d, where = "variables") ## ----------------------------------------------------------------------------- CEOsearch(d, where = "values") ## ----------------------------------------------------------------------------- CEOsearch(d, keyword = "edat") ## ----------------------------------------------------------------------------- CEOsearch(d, keyword = "edat", translate = TRUE) ## ----------------------------------------------------------------------------- CEOsearch(d) |> left_join(CEOsearch(d, where = "values"))
/scratch/gouwar.j/cran-all/cranData/CEOdata/inst/doc/using_CEOdata.R
--- title: Using the CEOdata package author: Xavier Fernández-i-Marín date: "`r format(Sys.time(), '%d/%m/%Y')` - Version `r packageVersion('CEOdata')`" classoption: a4paper,justified output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Using the CEOdata package} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r echo=FALSE, message=FALSE, warning=FALSE} library(CEOdata) ``` CEOdata is a package that facilitates the incorporation of microdata (individual responses) of public opinion polls in Catalonia into `R`, as performed by the "Centre d'Estudis d'Opinió" (CEO, Opinion Studies Center). It has basically three main functions with a separate purpose: - **`CEOdata()`**: that provides the _data_ of the surveys directly into `R`. - **`CEOmeta()`**: that allows the user to inspect the details of the available surveys (_metadata_) and to search for specific topics and get the survey details. - **`CEOsearch()`**: that allows the user to search for variables, variable labels and value labels within a survey data gathered using the `CEOdata()` function. # `CEOdata()`: Get the survey data The most comprehensive kind of data on Catalan public opinion is the "Barometer", that can be retrieved by default by the main function `CEOdata()`. ```{r message = FALSE, echo = TRUE, eval = FALSE} library(CEOdata) d <- CEOdata() ``` ```{r message = FALSE, echo = FALSE, eval = TRUE} library(knitr) library(CEOdata) d <- CEOdata() # If there is an internet problem, do not run the remaining of the chunks. if (is.null(d)) { print("here") knitr::opts_chunk$set(eval = FALSE) } else { knitr::opts_chunk$set(eval = TRUE) } ``` This provides a cleaned and merged version of all the available Barometers, since 2017, providing easy access to the following number of responses and variables: ```{r} dim(d) ``` ```{r} d ``` ```{r} names(d)[1:50] ``` But default `CEOdata()` transforms the gathered data into pure-R format (labelled SPSS variables are converted into factors). If you want to use `haven_labelled` variables as provided by the raw SPSS files available, you can use the argument `raw = TRUE`. ```{r, eval = FALSE} d.raw <- CEOdata(raw = FALSE) ``` ## Specific studies or time frames `CEOdata()` allows you to select specific Barometers, by providing their internal register in the `reo` argument. The reo is the internal name that the CEO uses, and stands for "Registre d'Estudis d'Opinió" (register of opinion studies), and is the main identifier of the survey, also present in the table of meta data. Although many of them are numbers, some have a number, a slash and another number, and therefore a character vector must be passed. Only a single REO can be passed, as it is not guaranteed that different data matrices share any column, and may refer to very different topics. For instance, to get only the data of the study with register "746" (corresponding to March 2013): ```{r} d746 <- CEOdata(reo = "746") d746 ``` Not all studies carried on by the CEO (and therefore listed in the `CEOmeta()` function --see below--) have microdata available. For convenience, there is a variable in the metadata that returns whether the microdata is available or not (`microdata_available`). When using the `kind` argument (which is the default), the function `CEOdata()` also allows to restrict the whole set of barometers based on specific time frames defined by a date with the arguments `date_start` and `date_end` using the YYYY-MM-DD format. Notice that only the barometers are considered when using this arguments, not other studies. ```{r} b2019 <- CEOdata(date_start = "2019-01-01", date_end = "2019-12-31") b2019 ``` ## Extra variables By default `CEOdata()` incorporates new variables to the original matrix. Variables that are created for convenience, such as the date of the survey. The CEO data not always provides a day of the month. In that case, 28 is used. These variables appear at the end of the dataset and can be distinguished from the original CEO variables because only the first letter is capitalized. ```{r} tail(names(d)) ``` In case of desiring all variable names to be lowercase, one can simply convert them with `tolower()`: ```{r} d.lowercase <- d names(d.lowercase) <- tolower(names(d.lowercase)) ``` # `CEOmeta()`: Access to the metadata of studies and surveys The function `CEOmeta` allows to easily retrieve, search and restrict by time the list of all the surveys produced by the CEO, which amounts to more than a thousand as of early 2022. When called alone, the function downloads the latest version of the metadata published by the center, in a transparent way, and caching its content so that any subsequent calls in the same `R` session do not need to download it again. ```{r} CEOmeta() ``` ## Get a specific study In order to get the metadata of a specific study, the `reo` argument can be used: ```{r} CEOmeta(reo = "746") ``` ## Search for specific topics though keywords The first relevant argument for `CEOmeta()` is `search`, which is a built-in simple search engine that goes through the columns of the metadata containing potential descriptive information (title, summary, objectives and tags -descriptors-) and returns the studies that contain such keyword. ```{r} CEOmeta(search = "Medi ambient") ``` It is also possible to pass more than one value to `search`, so that the search includes them (either one of them OR any other). ```{r} CEOmeta(search = c("Medi ambient", "Municipi")) ``` In addition to the built-in argument to search through the columns of the survey title, the study title, the objectives, the summary and the tags (descriptors), it is possible to combine `CEOmeta()` with `dplyr`'s `filter()` to limit the results of studies returned. For example, to get the studies that have been performed using Internet to get the data: ```{r} CEOmeta() |> filter(`Metode de recollida de dades` == "internet") ``` Or to get studies with a specific quantitative sample size limit: ```{r} CEOmeta() |> filter(`Mostra estudis quantitatius` < 500) ``` ## Restrict by time Metadata can be retrieved for a specific period of time, by using the arguments `date_start` and `date_end`, also using the YYYY-MM-DD format. In this case the dates that are taken into account are dates where the study gets into the records, not the fieldwork dates. ```{r} CEOmeta(date_start = "2019-01-01", date_end = "2019-12-31") ``` ## Browse the CEO site In addition, to the search engine and the restriction by time `CEOmeta()` also allows to automatically open the relevant URLs at the CEO domain that contain the details of the studies gathered with the function. This can be done setting the `browse` argument to `TRUE`. However, there is a soft limitation of only 10 URLs to be opened, unless the user forces to really open all of them (proceed with caution, as this may open many tabs in your browser and leave your computer out of RAM in some scenarios of RAM black holes, such as Chrome). ```{r, eval = FALSE} CEOmeta(search = "Medi ambient a", browse = TRUE) ``` To open a specific REO, a simpler call with its specific identifier can be used: ```{r, eval = FALSE} CEOmeta(reo = "746", browse = TRUE) ``` It is also possible to specify an alternative language, so the default Catalan pages are substituted by the automatic translations provided by Apertium (for Occitan/Aranese) or Google Translate. ```{r, eval = FALSE} CEOmeta(search = "Medi ambient a", browse = TRUE, browse_translate = "de") ``` # `CEOsearch()`: Access to the variable and value labels Contrary to `CEOdata()` and `CEOmeta()`, `CEOsearch()` needs at least one argument: the survey data (microdata) for which we want to extract the variable labels and the value labels. By default it provides the variable labels in a tidy object: ```{r} CEOsearch(d) # equivalent to CEOsearch(d, where = "variables") ``` Equivalently, the use of `where = "values"` provides with a tidy object containing the value labels. Notice that in this case the variable names are repeated to accommodate each of the different value labels. ```{r} CEOsearch(d, where = "values") ``` Just like with the `CEOmeta()`, `CEOsearch()` has a simple built-in search facility that allows to retrieve only the rows that match a specific keyword(s). In the following example, we restrict the variables to those that contain "edat" (age). ```{r} CEOsearch(d, keyword = "edat") ``` Finally, an English translation of the variable labels/values is provided if the argument `translate` is set to `TRUE`, by opening a browser tab with the translations. ```{r} CEOsearch(d, keyword = "edat", translate = TRUE) ``` Of course, variable labels and values can be merged into a single object using a combination of `join` and `CEOsearch()`: ```{r} CEOsearch(d) |> left_join(CEOsearch(d, where = "values")) ``` # Development and acknowledgement The development of `CEOdata` (track changes, propose improvements, report bugs) can be followed at [github](https://github.com/ceopinio/CEOdata/). If using the data and the package, please cite and acknowledge properly the CEO and the package, respectively. <!-- # References -->
/scratch/gouwar.j/cran-all/cranData/CEOdata/inst/doc/using_CEOdata.Rmd
## ----echo=FALSE, message=FALSE, warning=FALSE--------------------------------- library(CEOdata) ## ----message = FALSE, echo = TRUE, eval = FALSE------------------------------- # library(CEOdata) # d <- CEOdata() ## ----message = FALSE, echo = FALSE, eval = TRUE------------------------------- library(knitr) library(CEOdata) d <- CEOdata() # If there is an internet problem, do not run the remaining of the chunks. if (is.null(d)) { print("here") knitr::opts_chunk$set(eval = FALSE) } else { knitr::opts_chunk$set(eval = TRUE) } ## ---- message = FALSE, warning = FALSE---------------------------------------- library(dplyr) library(tidyr) library(ggplot2) ## ----------------------------------------------------------------------------- d |> count(SEXE) ## ----prop-females, fig.width = 8, fig.height = 4, fig.cap = 'Proportion of females in the different Barometers.'---- d |> group_by(BOP_NUM) |> summarize(propFemales = length(which(SEXE == "Dona")) / n()) |> ggplot(aes(x = BOP_NUM, y = propFemales, group = 1)) + geom_point() + geom_line() + theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5)) + expand_limits(y = c(0, 1)) ## ----tags, fig.width = 6, fig.height = 6, fig.cap = 'Prevalence of topics covered.'---- tags <- CEOmeta() |> separate_rows(Descriptors, sep = ";") |> mutate(tag = factor(stringr::str_trim(Descriptors))) |> select(REO, tag) tags |> group_by(tag) |> count() |> filter(n > 5) |> ggplot(aes(x = n, y = reorder(tag, n))) + geom_point() + ylab("Topic") ## ----fieldwork, fig.width = 8, fig.height = 10, fig.cap = 'Fieldwork periods.'---- CEOmeta() |> filter(`Dia inici treball de camp` > "2018-01-01") |> ggplot(aes(xmin = `Dia inici treball de camp`, xmax = `Dia final treball de camp`, y = reorder(REO, `Dia final treball de camp`), color = microdata_available)) + geom_linerange() + xlab("Date") + ylab("Surveys with fieldwork") + theme(axis.ticks.y = element_blank(), axis.text.y = element_blank()) ## ----------------------------------------------------------------------------- survey.data <- d |> mutate(Female = ifelse(SEXE == "Dona", 1, 0), Age = EDAT, # Pass NA correctly Income = ifelse(INGRESSOS_1_15 %in% c("No ho sap", "No contesta"), NA, INGRESSOS_1_15), Date = Data, # Reorganize factor labels `Place of birth` = factor(case_when( LLOC_NAIX == "Catalunya" ~ "Catalonia", LLOC_NAIX %in% c("No ho sap", "No contesta") ~ as.character(NA), TRUE ~ "Outside Catalonia")), # Convert into numerical (integer) `Interest in politics` = case_when( INTERES_POL == "Gens" ~ 0L, INTERES_POL == "Poc" ~ 1L, INTERES_POL == "Bastant" ~ 2L, INTERES_POL == "Molt" ~ 3L, TRUE ~ as.integer(NA)), # Convert into numeric (double) and properly address missing values `Satisfaction with democracy` = ifelse( SATIS_DEMOCRACIA %in% c("No ho sap", "No contesta"), NA, as.numeric(SATIS_DEMOCRACIA))) |> # Center income to the median mutate(Income = Income - median(Income, na.rm = TRUE)) |> # Pick only specific variables select(Date, Female, Age, Income, `Place of birth`, `Interest in politics`, `Satisfaction with democracy`) ## ----eval = FALSE------------------------------------------------------------- # save(survey.data, file = "my_cleaned_dataset.RData") ## ---- eval = FALSE, echo = TRUE----------------------------------------------- # library(vtable) # st(survey.data) ## ---- eval = TRUE, echo = FALSE----------------------------------------------- if (exists("survey.data")) { if (!is.null(survey.data)) { vtable::st(survey.data, out = "kable") } } ## ---- eval = FALSE, echo = TRUE----------------------------------------------- # library(compareGroups) # createTable(compareGroups(Female ~ . -Date, data = survey.data)) ## ---- eval = TRUE, echo = FALSE----------------------------------------------- if (exists("survey.data")) { if (!is.null(survey.data)) { library(compareGroups) createTable(compareGroups(Female ~ . -Date, data = survey.data)) } }
/scratch/gouwar.j/cran-all/cranData/CEOdata/inst/doc/working_with_survey_data_using_the_CEOdata_package.R
--- title: Working with survey data using the CEOdata package author: Xavier Fernández-i-Marín date: "`r format(Sys.time(), '%d/%m/%Y')` - Version `r packageVersion('CEOdata')`" classoption: a4paper,justified output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Working with survey data using the CEOdata package} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r echo=FALSE, message=FALSE, warning=FALSE} library(CEOdata) ``` When working with survey data there are several issues / strategies to clean and prepare the data that are useful and worth being incorporated to the routines and workflow. This vignette uses the `CEOdata` package to present several examples. It uses primarily the data retrieved by default using the `CEOdata()` function in its default form, which retrieves the compiled "Barometers" from 2014 onwards. ```{r message = FALSE, echo = TRUE, eval = FALSE} library(CEOdata) d <- CEOdata() ``` ```{r message = FALSE, echo = FALSE, eval = TRUE} library(knitr) library(CEOdata) d <- CEOdata() # If there is an internet problem, do not run the remaining of the chunks. if (is.null(d)) { print("here") knitr::opts_chunk$set(eval = FALSE) } else { knitr::opts_chunk$set(eval = TRUE) } ``` # Incorporate Tables and Figures Once you have retrieved the data of the surveys, it is easy to accommodate them to your regular workflow. For instance, to get the overall number of males and females surveyed: ```{r, message = FALSE, warning = FALSE} library(dplyr) library(tidyr) library(ggplot2) ``` ```{r} d |> count(SEXE) ``` Or to trace the proportion of females surveyed over time, across barometers: ```{r prop-females, fig.width = 8, fig.height = 4, fig.cap = 'Proportion of females in the different Barometers.'} d |> group_by(BOP_NUM) |> summarize(propFemales = length(which(SEXE == "Dona")) / n()) |> ggplot(aes(x = BOP_NUM, y = propFemales, group = 1)) + geom_point() + geom_line() + theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5)) + expand_limits(y = c(0, 1)) ``` # Topics (Tags) Alternatively, the metadata can also be explored using the different topics (tags, called "Descriptors") covered as reported by the CEO. ```{r tags, fig.width = 6, fig.height = 6, fig.cap = 'Prevalence of topics covered.'} tags <- CEOmeta() |> separate_rows(Descriptors, sep = ";") |> mutate(tag = factor(stringr::str_trim(Descriptors))) |> select(REO, tag) tags |> group_by(tag) |> count() |> filter(n > 5) |> ggplot(aes(x = n, y = reorder(tag, n))) + geom_point() + ylab("Topic") ``` # Fieldwork The metadata also provides the option of examining the time periods where there has been fieldwork in quantitative studies, since 2018. In addition, we can distinguish between studies that provide microdata and surveys that don't. ```{r fieldwork, fig.width = 8, fig.height = 10, fig.cap = 'Fieldwork periods.'} CEOmeta() |> filter(`Dia inici treball de camp` > "2018-01-01") |> ggplot(aes(xmin = `Dia inici treball de camp`, xmax = `Dia final treball de camp`, y = reorder(REO, `Dia final treball de camp`), color = microdata_available)) + geom_linerange() + xlab("Date") + ylab("Surveys with fieldwork") + theme(axis.ticks.y = element_blank(), axis.text.y = element_blank()) ``` # Arrange and store Once a dataset has been retrieved from the CEO servers, it is important to clean it and arrange it to one's individual preferences, and store the result in an R object. The following example, for instance, process several variables of the survey, picks them and stores the resulting object in a workspace (RData) format. ```{r} survey.data <- d |> mutate(Female = ifelse(SEXE == "Dona", 1, 0), Age = EDAT, # Pass NA correctly Income = ifelse(INGRESSOS_1_15 %in% c("No ho sap", "No contesta"), NA, INGRESSOS_1_15), Date = Data, # Reorganize factor labels `Place of birth` = factor(case_when( LLOC_NAIX == "Catalunya" ~ "Catalonia", LLOC_NAIX %in% c("No ho sap", "No contesta") ~ as.character(NA), TRUE ~ "Outside Catalonia")), # Convert into numerical (integer) `Interest in politics` = case_when( INTERES_POL == "Gens" ~ 0L, INTERES_POL == "Poc" ~ 1L, INTERES_POL == "Bastant" ~ 2L, INTERES_POL == "Molt" ~ 3L, TRUE ~ as.integer(NA)), # Convert into numeric (double) and properly address missing values `Satisfaction with democracy` = ifelse( SATIS_DEMOCRACIA %in% c("No ho sap", "No contesta"), NA, as.numeric(SATIS_DEMOCRACIA))) |> # Center income to the median mutate(Income = Income - median(Income, na.rm = TRUE)) |> # Pick only specific variables select(Date, Female, Age, Income, `Place of birth`, `Interest in politics`, `Satisfaction with democracy`) ``` Finally, this can be stored for further analysis (hence, without the need to download and arrange the data again) in an R's native format: ```{r eval = FALSE} save(survey.data, file = "my_cleaned_dataset.RData") ``` # Descriptive summary There are several packages that construct convenient tables with the descriptive summary of a dataset. For example, using the `vtable` package to produce a table with descriptive statistics. ```{r, eval = FALSE, echo = TRUE} library(vtable) st(survey.data) ``` ```{r, eval = TRUE, echo = FALSE} if (exists("survey.data")) { if (!is.null(survey.data)) { vtable::st(survey.data, out = "kable") } } ``` Or the `compareGroups` that allows to flexibly produce tables that compare descriptive statistics for different groups of individuals. ```{r, eval = FALSE, echo = TRUE} library(compareGroups) createTable(compareGroups(Female ~ . -Date, data = survey.data)) ``` ```{r, eval = TRUE, echo = FALSE} if (exists("survey.data")) { if (!is.null(survey.data)) { library(compareGroups) createTable(compareGroups(Female ~ . -Date, data = survey.data)) } } ``` # Development and acknowledgement The development of `CEOdata` (track changes, propose improvements, report bugs) can be followed at [github](https://github.com/ceopinio/CEOdata/). If using the data and the package, please cite and acknowledge properly the CEO and the package, respectively. <!-- # References -->
/scratch/gouwar.j/cran-all/cranData/CEOdata/inst/doc/working_with_survey_data_using_the_CEOdata_package.Rmd
--- title: Using the CEOdata package author: Xavier Fernández-i-Marín date: "`r format(Sys.time(), '%d/%m/%Y')` - Version `r packageVersion('CEOdata')`" classoption: a4paper,justified output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Using the CEOdata package} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r echo=FALSE, message=FALSE, warning=FALSE} library(CEOdata) ``` CEOdata is a package that facilitates the incorporation of microdata (individual responses) of public opinion polls in Catalonia into `R`, as performed by the "Centre d'Estudis d'Opinió" (CEO, Opinion Studies Center). It has basically three main functions with a separate purpose: - **`CEOdata()`**: that provides the _data_ of the surveys directly into `R`. - **`CEOmeta()`**: that allows the user to inspect the details of the available surveys (_metadata_) and to search for specific topics and get the survey details. - **`CEOsearch()`**: that allows the user to search for variables, variable labels and value labels within a survey data gathered using the `CEOdata()` function. # `CEOdata()`: Get the survey data The most comprehensive kind of data on Catalan public opinion is the "Barometer", that can be retrieved by default by the main function `CEOdata()`. ```{r message = FALSE, echo = TRUE, eval = FALSE} library(CEOdata) d <- CEOdata() ``` ```{r message = FALSE, echo = FALSE, eval = TRUE} library(knitr) library(CEOdata) d <- CEOdata() # If there is an internet problem, do not run the remaining of the chunks. if (is.null(d)) { print("here") knitr::opts_chunk$set(eval = FALSE) } else { knitr::opts_chunk$set(eval = TRUE) } ``` This provides a cleaned and merged version of all the available Barometers, since 2017, providing easy access to the following number of responses and variables: ```{r} dim(d) ``` ```{r} d ``` ```{r} names(d)[1:50] ``` But default `CEOdata()` transforms the gathered data into pure-R format (labelled SPSS variables are converted into factors). If you want to use `haven_labelled` variables as provided by the raw SPSS files available, you can use the argument `raw = TRUE`. ```{r, eval = FALSE} d.raw <- CEOdata(raw = FALSE) ``` ## Specific studies or time frames `CEOdata()` allows you to select specific Barometers, by providing their internal register in the `reo` argument. The reo is the internal name that the CEO uses, and stands for "Registre d'Estudis d'Opinió" (register of opinion studies), and is the main identifier of the survey, also present in the table of meta data. Although many of them are numbers, some have a number, a slash and another number, and therefore a character vector must be passed. Only a single REO can be passed, as it is not guaranteed that different data matrices share any column, and may refer to very different topics. For instance, to get only the data of the study with register "746" (corresponding to March 2013): ```{r} d746 <- CEOdata(reo = "746") d746 ``` Not all studies carried on by the CEO (and therefore listed in the `CEOmeta()` function --see below--) have microdata available. For convenience, there is a variable in the metadata that returns whether the microdata is available or not (`microdata_available`). When using the `kind` argument (which is the default), the function `CEOdata()` also allows to restrict the whole set of barometers based on specific time frames defined by a date with the arguments `date_start` and `date_end` using the YYYY-MM-DD format. Notice that only the barometers are considered when using this arguments, not other studies. ```{r} b2019 <- CEOdata(date_start = "2019-01-01", date_end = "2019-12-31") b2019 ``` ## Extra variables By default `CEOdata()` incorporates new variables to the original matrix. Variables that are created for convenience, such as the date of the survey. The CEO data not always provides a day of the month. In that case, 28 is used. These variables appear at the end of the dataset and can be distinguished from the original CEO variables because only the first letter is capitalized. ```{r} tail(names(d)) ``` In case of desiring all variable names to be lowercase, one can simply convert them with `tolower()`: ```{r} d.lowercase <- d names(d.lowercase) <- tolower(names(d.lowercase)) ``` # `CEOmeta()`: Access to the metadata of studies and surveys The function `CEOmeta` allows to easily retrieve, search and restrict by time the list of all the surveys produced by the CEO, which amounts to more than a thousand as of early 2022. When called alone, the function downloads the latest version of the metadata published by the center, in a transparent way, and caching its content so that any subsequent calls in the same `R` session do not need to download it again. ```{r} CEOmeta() ``` ## Get a specific study In order to get the metadata of a specific study, the `reo` argument can be used: ```{r} CEOmeta(reo = "746") ``` ## Search for specific topics though keywords The first relevant argument for `CEOmeta()` is `search`, which is a built-in simple search engine that goes through the columns of the metadata containing potential descriptive information (title, summary, objectives and tags -descriptors-) and returns the studies that contain such keyword. ```{r} CEOmeta(search = "Medi ambient") ``` It is also possible to pass more than one value to `search`, so that the search includes them (either one of them OR any other). ```{r} CEOmeta(search = c("Medi ambient", "Municipi")) ``` In addition to the built-in argument to search through the columns of the survey title, the study title, the objectives, the summary and the tags (descriptors), it is possible to combine `CEOmeta()` with `dplyr`'s `filter()` to limit the results of studies returned. For example, to get the studies that have been performed using Internet to get the data: ```{r} CEOmeta() |> filter(`Metode de recollida de dades` == "internet") ``` Or to get studies with a specific quantitative sample size limit: ```{r} CEOmeta() |> filter(`Mostra estudis quantitatius` < 500) ``` ## Restrict by time Metadata can be retrieved for a specific period of time, by using the arguments `date_start` and `date_end`, also using the YYYY-MM-DD format. In this case the dates that are taken into account are dates where the study gets into the records, not the fieldwork dates. ```{r} CEOmeta(date_start = "2019-01-01", date_end = "2019-12-31") ``` ## Browse the CEO site In addition, to the search engine and the restriction by time `CEOmeta()` also allows to automatically open the relevant URLs at the CEO domain that contain the details of the studies gathered with the function. This can be done setting the `browse` argument to `TRUE`. However, there is a soft limitation of only 10 URLs to be opened, unless the user forces to really open all of them (proceed with caution, as this may open many tabs in your browser and leave your computer out of RAM in some scenarios of RAM black holes, such as Chrome). ```{r, eval = FALSE} CEOmeta(search = "Medi ambient a", browse = TRUE) ``` To open a specific REO, a simpler call with its specific identifier can be used: ```{r, eval = FALSE} CEOmeta(reo = "746", browse = TRUE) ``` It is also possible to specify an alternative language, so the default Catalan pages are substituted by the automatic translations provided by Apertium (for Occitan/Aranese) or Google Translate. ```{r, eval = FALSE} CEOmeta(search = "Medi ambient a", browse = TRUE, browse_translate = "de") ``` # `CEOsearch()`: Access to the variable and value labels Contrary to `CEOdata()` and `CEOmeta()`, `CEOsearch()` needs at least one argument: the survey data (microdata) for which we want to extract the variable labels and the value labels. By default it provides the variable labels in a tidy object: ```{r} CEOsearch(d) # equivalent to CEOsearch(d, where = "variables") ``` Equivalently, the use of `where = "values"` provides with a tidy object containing the value labels. Notice that in this case the variable names are repeated to accommodate each of the different value labels. ```{r} CEOsearch(d, where = "values") ``` Just like with the `CEOmeta()`, `CEOsearch()` has a simple built-in search facility that allows to retrieve only the rows that match a specific keyword(s). In the following example, we restrict the variables to those that contain "edat" (age). ```{r} CEOsearch(d, keyword = "edat") ``` Finally, an English translation of the variable labels/values is provided if the argument `translate` is set to `TRUE`, by opening a browser tab with the translations. ```{r} CEOsearch(d, keyword = "edat", translate = TRUE) ``` Of course, variable labels and values can be merged into a single object using a combination of `join` and `CEOsearch()`: ```{r} CEOsearch(d) |> left_join(CEOsearch(d, where = "values")) ``` # Development and acknowledgement The development of `CEOdata` (track changes, propose improvements, report bugs) can be followed at [github](https://github.com/ceopinio/CEOdata/). If using the data and the package, please cite and acknowledge properly the CEO and the package, respectively. <!-- # References -->
/scratch/gouwar.j/cran-all/cranData/CEOdata/vignettes/using_CEOdata.Rmd
--- title: Working with survey data using the CEOdata package author: Xavier Fernández-i-Marín date: "`r format(Sys.time(), '%d/%m/%Y')` - Version `r packageVersion('CEOdata')`" classoption: a4paper,justified output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Working with survey data using the CEOdata package} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r echo=FALSE, message=FALSE, warning=FALSE} library(CEOdata) ``` When working with survey data there are several issues / strategies to clean and prepare the data that are useful and worth being incorporated to the routines and workflow. This vignette uses the `CEOdata` package to present several examples. It uses primarily the data retrieved by default using the `CEOdata()` function in its default form, which retrieves the compiled "Barometers" from 2014 onwards. ```{r message = FALSE, echo = TRUE, eval = FALSE} library(CEOdata) d <- CEOdata() ``` ```{r message = FALSE, echo = FALSE, eval = TRUE} library(knitr) library(CEOdata) d <- CEOdata() # If there is an internet problem, do not run the remaining of the chunks. if (is.null(d)) { print("here") knitr::opts_chunk$set(eval = FALSE) } else { knitr::opts_chunk$set(eval = TRUE) } ``` # Incorporate Tables and Figures Once you have retrieved the data of the surveys, it is easy to accommodate them to your regular workflow. For instance, to get the overall number of males and females surveyed: ```{r, message = FALSE, warning = FALSE} library(dplyr) library(tidyr) library(ggplot2) ``` ```{r} d |> count(SEXE) ``` Or to trace the proportion of females surveyed over time, across barometers: ```{r prop-females, fig.width = 8, fig.height = 4, fig.cap = 'Proportion of females in the different Barometers.'} d |> group_by(BOP_NUM) |> summarize(propFemales = length(which(SEXE == "Dona")) / n()) |> ggplot(aes(x = BOP_NUM, y = propFemales, group = 1)) + geom_point() + geom_line() + theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5)) + expand_limits(y = c(0, 1)) ``` # Topics (Tags) Alternatively, the metadata can also be explored using the different topics (tags, called "Descriptors") covered as reported by the CEO. ```{r tags, fig.width = 6, fig.height = 6, fig.cap = 'Prevalence of topics covered.'} tags <- CEOmeta() |> separate_rows(Descriptors, sep = ";") |> mutate(tag = factor(stringr::str_trim(Descriptors))) |> select(REO, tag) tags |> group_by(tag) |> count() |> filter(n > 5) |> ggplot(aes(x = n, y = reorder(tag, n))) + geom_point() + ylab("Topic") ``` # Fieldwork The metadata also provides the option of examining the time periods where there has been fieldwork in quantitative studies, since 2018. In addition, we can distinguish between studies that provide microdata and surveys that don't. ```{r fieldwork, fig.width = 8, fig.height = 10, fig.cap = 'Fieldwork periods.'} CEOmeta() |> filter(`Dia inici treball de camp` > "2018-01-01") |> ggplot(aes(xmin = `Dia inici treball de camp`, xmax = `Dia final treball de camp`, y = reorder(REO, `Dia final treball de camp`), color = microdata_available)) + geom_linerange() + xlab("Date") + ylab("Surveys with fieldwork") + theme(axis.ticks.y = element_blank(), axis.text.y = element_blank()) ``` # Arrange and store Once a dataset has been retrieved from the CEO servers, it is important to clean it and arrange it to one's individual preferences, and store the result in an R object. The following example, for instance, process several variables of the survey, picks them and stores the resulting object in a workspace (RData) format. ```{r} survey.data <- d |> mutate(Female = ifelse(SEXE == "Dona", 1, 0), Age = EDAT, # Pass NA correctly Income = ifelse(INGRESSOS_1_15 %in% c("No ho sap", "No contesta"), NA, INGRESSOS_1_15), Date = Data, # Reorganize factor labels `Place of birth` = factor(case_when( LLOC_NAIX == "Catalunya" ~ "Catalonia", LLOC_NAIX %in% c("No ho sap", "No contesta") ~ as.character(NA), TRUE ~ "Outside Catalonia")), # Convert into numerical (integer) `Interest in politics` = case_when( INTERES_POL == "Gens" ~ 0L, INTERES_POL == "Poc" ~ 1L, INTERES_POL == "Bastant" ~ 2L, INTERES_POL == "Molt" ~ 3L, TRUE ~ as.integer(NA)), # Convert into numeric (double) and properly address missing values `Satisfaction with democracy` = ifelse( SATIS_DEMOCRACIA %in% c("No ho sap", "No contesta"), NA, as.numeric(SATIS_DEMOCRACIA))) |> # Center income to the median mutate(Income = Income - median(Income, na.rm = TRUE)) |> # Pick only specific variables select(Date, Female, Age, Income, `Place of birth`, `Interest in politics`, `Satisfaction with democracy`) ``` Finally, this can be stored for further analysis (hence, without the need to download and arrange the data again) in an R's native format: ```{r eval = FALSE} save(survey.data, file = "my_cleaned_dataset.RData") ``` # Descriptive summary There are several packages that construct convenient tables with the descriptive summary of a dataset. For example, using the `vtable` package to produce a table with descriptive statistics. ```{r, eval = FALSE, echo = TRUE} library(vtable) st(survey.data) ``` ```{r, eval = TRUE, echo = FALSE} if (exists("survey.data")) { if (!is.null(survey.data)) { vtable::st(survey.data, out = "kable") } } ``` Or the `compareGroups` that allows to flexibly produce tables that compare descriptive statistics for different groups of individuals. ```{r, eval = FALSE, echo = TRUE} library(compareGroups) createTable(compareGroups(Female ~ . -Date, data = survey.data)) ``` ```{r, eval = TRUE, echo = FALSE} if (exists("survey.data")) { if (!is.null(survey.data)) { library(compareGroups) createTable(compareGroups(Female ~ . -Date, data = survey.data)) } } ``` # Development and acknowledgement The development of `CEOdata` (track changes, propose improvements, report bugs) can be followed at [github](https://github.com/ceopinio/CEOdata/). If using the data and the package, please cite and acknowledge properly the CEO and the package, respectively. <!-- # References -->
/scratch/gouwar.j/cran-all/cranData/CEOdata/vignettes/working_with_survey_data_using_the_CEOdata_package.Rmd
#' Fits a Random Forest of Interactions Trees ## usethis namespace: start #' @useDynLib CERFIT, .registration = TRUE ## usethis namespace: end ## usethis namespace: start #' @importFrom Rcpp sourceCpp ## usethis namespace: end #' @importFrom partykit partysplit #' @importFrom partykit party #' @importFrom partykit partynode #' @importFrom partykit kidids_split #' @importFrom partykit nodeids #' @importFrom partykit fitted_node #' @importFrom partykit as.constparty #' @importFrom partykit nodeapply #' @importFrom partykit split_node #' @importFrom partykit info_node #' @importFrom grid depth #' @importFrom stats complete.cases #' @importFrom stats terms #' @description Estimates an observations individualized treatment effect for RCT #' and observational data. Treatment can be an binary, categorical, ordered, or continuous #' variable. Currently if response is binary useRes must be set equal to TRUE. #' @param formula Formula to build CERFIT. Categorical predictors must be listed as a factor. e.g., Y ~ x1 + x2 | treatment #' @param data Data to grow a tree. #' @param ntrees Number of Trees to grow #' @param subset A logical vector that controls what observations are used to grow the forest. #' The default value will use the entire dataframe #' @param search Method to search through candidate splits #' @param method For observational study data, method="observational";for randomized study data, method="RCT". #' @param PropForm Method to estimate propensity score #' @param split Impurity measure splitting statistic #' @param mtry Number of variables to consider at each split #' @param nsplit Number of cut points selected #' @param nsplit.random Logical: indicates if process to select cut points are random #' @param minsplit Number of observations required to continue growing tree #' @param minbucket Number of observations required in each child node #' @param maxdepth Maximum depth of tree #' @param a Sigmoid approximation variable (for "sss" which is still under development) #' @param sampleMethod Method to sample learning sample. Default is bootstrap. Subsample #' takes a subsample of the original data. SubsamplebyID samples by an ID column and #' uses all observations that have that ID. allData uses the entire data set #' for every tree. #' @param useRes Logical indicator if you want to fit the CERFIT model to #' the residuals from a linear model #' @param scale.y Logical, standardize y when creating splits (For "sss" to increase stability) #' @return Returns a fitted CERFIT object which is a list with the following elements #' \itemize{ #' \item RandFor: The Random forest of interaction trees #' \item trt.type: A string containing the treatment type of the data used to fit the model. #' Cant be binary, multiple, ordered or continuous. #' \item response.type: A string representing the response type of the data. Can be #' binary or continuous. #' \item useRes: A logical indicator that is TRUE if the model was fit on the #' residuals of a linear model #' \item data: The data used to fit the model also contains the propensity score if #' method was set to observational} #' @details This function implements Random Forest of Interaction Trees proposed #' in Su (2018). Which is a modification of the Random Forest algorithm where #' instead of a split being chosen to maximize prediction accuracy each split #' is chosen to maximized subgroup treatment heterogeneity. It chooses the best #' split by maximizing the test statistic for \eqn{H_0: \beta_3=0} in the #' following linear model #' #' \eqn{Y_i = \beta_0 + \beta_1I(X_{ij} < c) + \beta_2I(Z = 1) + \beta_3I(X_{ij} < c)I(Z = 1) + \varepsilon_i} #' #' Where \eqn{X_{ij}} represents the splitting variable and Z = 1 represents #' treatment. So, by maximizing the test statistic for \eqn{\beta_3} we are #' maximizing the treatment difference between the nodes. #' #' The above equation only works when the data comes from a randomized controlled #' trial. But we can modify it to gives us unbiased estimates of treatment #' effect in observational studies Li et al. (2022). To do that we add propensity score into the #' linear model. #' #'\eqn{Y_i = \beta_0 + \beta_1I(X_{ij} < c) + \beta_2I(Z = 1) + \beta_3I(X_{ij} < c)I(Z = 1) + \beta_4e_i + \varepsilon_i} #' #'Where \eqn{e_i} represents the propensity score. The CERIT function will estimate #'propensity score automatically when the method argument is set to observational. #' #'To control how this function estimates propensity score you can use the #'PropForm argument. Which can take four possible values randomForest, CBPS, #' GBM and HI. randomForest uses the randomForest package to use a random forest #' to estimate propensity score, CBPS uses Covariate balancing propensity score #' to estimate propensity score GBM uses generalized boosted regression models #' to estimate propensity score, and HI is for continuous treatment and #' estimates the general propensity score. Some of these options only work #' for certain treatment types. Full list below #' \itemize{ #' \item binary: GBM, CBPS, randomForest #' \item categorical: GBM, CBPS #' \item ordered: GBM, CBPS #' \item continuous: CBPS, HI #' } #' #' @references #' \itemize{ #' \item Li, Luo, et al. Causal Effect Random Forest of #' Interaction Trees for Learning Individualized Treatment Regimes with #' Multiple Treatments in Observational Studies. Stat, 2022, #' https://doi.org/10.1002/sta4.457. #' \item Su, X., Peña, A., Liu, L., & Levine, R. (2018). Random forests of interaction trees for estimating individualized treatment effects in randomized trials. #' Statistics in Medicine, 37(17), 2547- 2560. #' \item G. W. Imbens, The role of the propensity score in estimating dose-response #' functions., Biometrika, 87 (2000), pp. 706–710. #' \item G. Ridgeway, D. McCarey, and A. Morral, The twang package: Toolkit for #' weighting and analysis of nonequivalent groups, (2006). #' \item A. Liaw and M. Wiener, Classification and regression by randomforest, R #' News, 2 (2002), pp. 18–22} #' @examples #' fit <- CERFIT(Result_of_Treatment ~ sex + age + Number_of_Warts + Area + Time + Type | treatment, #' data = warts, #' ntrees = 30, #' method = "RCT", #' mtry = 2) #' #' @export ### Grows a random forest ### # Res is for fitting the residuals CERFIT <- function( formula, data, ntrees, subset = NULL,search=c("exhaustive","sss"), method=c("RCT","observational"), PropForm=c("randomForest","CBPS","GBM", "HI"), split=c("t.test"), mtry=NULL, nsplit=NULL, nsplit.random=FALSE, minsplit=20, minbucket=round(minsplit/3), maxdepth=30, a=50, sampleMethod=c('bootstrap','subsample','subsampleByID','allData'), useRes=TRUE, scale.y=FALSE)# { sampleMethod <- match.arg(sampleMethod, c('bootstrap','subsample','subsampleByID','allData')) if (missing(formula)) stop("A formula must be supplied.", call. = FALSE) if (missing(data)) data <- NULL response <- data[[all.vars(formula)[1]]] response.type = "continous" if(is.factor(response) & length(levels(response)) == 2){ response.type = "binary" #useRes = FALSE # Residual dont work with binary response right now } response_print <- paste0(toupper(substring(response.type,first = 1,last = 1)), substring(response.type,first = 2)) cat(paste(response_print,"Response","\n")) if(useRes){ if (response.type == "binary") { resformula<- stats::as.formula(paste(all.vars(formula)[1], paste(all.vars(formula)[2:(length(all.vars(formula))-1)], collapse=" + "), sep=" ~ ")) reslm <- stats::glm(resformula,data,family = stats::binomial) eres <- (as.numeric(data[[all.vars(formula)[1]]]) - 1) - stats::fitted(reslm) data$yo <- data[[all.vars(formula)[1]]] data[[all.vars(formula)[1]]] <- eres } else { resformula<- stats::as.formula(paste(all.vars(formula)[1], paste(all.vars(formula)[2:(length(all.vars(formula))-1)], collapse=" + "), sep=" ~ ")) reslm <- stats::lm(resformula,data) eres <- stats::resid(reslm) data$yo <- data[[all.vars(formula)[1]]] data[[all.vars(formula)[1]]] <- eres } } else { data$yo <- data[[all.vars(formula)[1]]] } TrT <- data[all.vars(formula)[length(all.vars(formula))]] trt.length<-nrow(unique(TrT)) if (trt.length<2) stop("Only one treatment?", call. = FALSE) trt.type <- ifelse(trt.length==2,"binary","multiple") trt.type <- ifelse(is.ordered(TrT[[1]]),"ordered",trt.type) trt.type <- ifelse(trt.length>10, "continuous", trt.type) trtlevels<-c(1:trt.length) trttype_print <- paste0(toupper(substring(trt.type,first = 1,last = 1)), substring(trt.type,first = 2)) cat("Treatment Levels: ") cat(paste0(trtlevels),"\n") cat(paste(trttype_print,"Treatment","\n")) if(method=="observational"){ propformula <- stats::as.formula(paste(all.vars(formula)[length(all.vars(formula))], paste(all.vars(formula)[2:(length(all.vars(formula))-1)], collapse=" + "), sep=" ~ ")) if(trt.type=="continuous"){ if(PropForm=="CBPS"){ propfun <- CBPS::CBPS(propformula, data = data[,all.vars(formula)[-1]],ATT=FALSE,method = "exact")# prop <- propfun$fitted.values Iptw <- propfun$weights } else if(PropForm=="HI") { propfun <- stats::lm(propformula,data=data[all.vars(formula)[-1]]) prt <- stats::predict(propfun) sigm <- summary(propfun)$sigma prop <- stats::dnorm(TrT,prt,sigm) modhi = stats::lm(TrT~1) ps.num = stats::dnorm((TrT-modhi$fitted)/(summary(modhi))$sigma,0,1) Iptw=ps.num/prop } } else if(trt.type=="binary") { if (PropForm=="GBM") { propfun <- twang::ps(propformula,data=data[,all.vars(formula)[-1]],interaction.depth = 4, stop.method = "es.max",estimand="ATE",verbose=FALSE,n.trees = 10000) prop <- propfun$ps Iptw<- twang::get.weights(propfun,stop.method = "es.max",estimand="ATE") } else if (PropForm=="CBPS") { propfun <- CBPS::CBPS(propformula, data = data[,all.vars(formula)[-1]],ATT=FALSE,method = "exact")# prop <- propfun$fitted.values Iptw <- propfun$weights } else if (PropForm=="randomForest") { propfun<- suppressWarnings(randomForest::randomForest(propformula,data=data[all.vars(formula)[-1]])) prop <- propfun$predicted Iptw <- sum(TrT)/length(TrT)*TrT/prop+sum(1-TrT)/length(TrT)*(1-TrT)/(1-prop) #Iptw <-TrT/prop+(1-TrT)/(1-prop) Iptw <- truncquant(Iptw[[1]],q=0.9) } } else if (trt.type=="multiple"){ if(PropForm=="GBM") { data[,all.vars(formula)[length(all.vars(formula))]]<-as.factor(data[,all.vars(formula)[length(all.vars(formula))]]) propfun <- twang::mnps(propformula,data=data[,all.vars(formula)[-1]],interaction.depth = 4, stop.method = "es.max",estimand="ATE",verbose=FALSE,n.trees = 10000) pslist<-propfun$psList prop<-matrix(NA,ncol=trt.length,nrow=nrow(data)) for(i in 1:trt.length){ prop[,i]<-unlist(pslist[[i]]$ps) } colnames(prop)<-levels(data[,all.vars(formula)[length(all.vars(formula))]]) levels(data[,all.vars(formula)[length(all.vars(formula))]])<-c(1:trt.length) Iptw <- twang::get.weights(propfun,stop.method = "es.max",estimand="ATE") } else if (PropForm=="CBPS" & trt.length<5 ) { data[,all.vars(formula)[length(all.vars(formula))]]<-as.factor(data[,all.vars(formula)[length(all.vars(formula))]]) propfun <- CBPS::CBPS(propformula, data = data[,all.vars(formula)[-1]],ATT=FALSE,method = "exact")# prop <- propfun$fitted.values Iptw <- propfun$weights levels(data[,all.vars(formula)[length(all.vars(formula))]])<-c(1:trt.length) } } else if(trt.type == "ordered") { if(PropForm == "GBM") { prop <- matrix(NA,ncol= length(unique(TrT[[1]])),nrow=nrow(data)) propfun <- twang::mnps(propformula,data=data[,all.vars(formula)[-1]],interaction.depth = 4, stop.method = "es.max",estimand="ATE",verbose=FALSE,n.trees = 10000) pslist <- propfun$psList for(i in 1:length(unique(TrT[[1]]))){ prop[,i]<-unlist(pslist[[i]]$ps) } prop <- t(apply(prop, 1, cumsum)) prop <- as.data.frame(prop)[,colSums(is.na(prop)) == 0] prop <- prop / prop[,length(unique(TrT[[1]]))] #names(prop) <- TrT_splits[!is.na(TrT_splits)] Iptw <- twang::get.weights(propfun,stop.method = "es.max",estimand="ATE") #Iptw<- rep(1,nrow(data)) #Iptw <- sum(TrT)/length(TrT)*TrT/prop+sum(1-TrT)/length(TrT)*(1-TrT)/(1-prop) #Iptw <-TrT/prop+(1-TrT)/(1-prop) Iptw <- truncquant(Iptw,q=0.9) } else if (PropForm == "CBPS") { #data[,all.vars(formula)[length(all.vars(formula))]]<-as.factor(data[,all.vars(formula)[length(all.vars(formula))]]) propfun <- CBPS::CBPS(propformula, data = data[,all.vars(formula)[-1]],ATT=FALSE,method = "exact")# prop <- propfun$fitted.values print(prop) Iptw <- propfun$weights levels(data[,all.vars(formula)[length(all.vars(formula))]])<-c(1:trt.length) } else if (PropForm == "old") { prop <- matrix(NA,ncol= length(unique(TrT[[1]])),nrow=nrow(data)) propfun <- twang::mnps(propformula,data=data[,all.vars(formula)[-1]],interaction.depth = 4, stop.method = "es.max",estimand="ATE",verbose=FALSE,n.trees = 10000) pslist <- propfun$psList for(i in 1:length(unique(TrT[[1]]))){ prop[,i]<-unlist(pslist[[i]]$ps) } prop <- t(apply(prop, 1, cumsum)) prop <- as.data.frame(prop)[,colSums(is.na(prop)) == 0] #names(prop) <- TrT_splits[!is.na(TrT_splits)] Iptw <- twang::get.weights(propfun,stop.method = "es.max",estimand="ATE") #Iptw<- rep(1,nrow(data)) #Iptw <- sum(TrT)/length(TrT)*TrT/prop+sum(1-TrT)/length(TrT)*(1-TrT)/(1-prop) #Iptw <-TrT/prop+(1-TrT)/(1-prop) Iptw <- truncquant(Iptw,q=0.9) } } else stop("Please specify a propensity score method: randomForest or CBPS or GBM", call. = FALSE) } else if (method=="RCT") { prop <- rep(1,nrow(data))#rep("none",nrow(data)) # for observational no prop need Iptw<- rep(1,nrow(data))} if(!exists("Iptw")) { stop("Not able to estimate Propenisty Score. \n Check that your function arguments are correct") } #data[,all.vars(formula)[length(all.vars(formula))]]<-as.numeric(as.character(data[,all.vars(formula)[length(all.vars(formula))]])) data[,all.vars(formula)[-length(all.vars(formula))]] <- sapply(data[,all.vars(formula)[-length(all.vars(formula))]],as.numeric) data$iptw <- Iptw data$prop <- prop #data <- cbind(data,prop) #return(data) #Construct random forest randFor <- lapply(1:ntrees,function(b){ if(b%%10==0){cat(paste0("Tree Number: ",b,"\n"))} #print(paste0("Tree Number: ",b)) obs.b <- switch(sampleMethod, bootstrap = sample.int(nrow(data), size=nrow(data), replace=TRUE, prob=data$iptw), #inverse weighting in boostrapping subsample = sample.int(nrow(data), size=round(nrow(data)*0.632), replace=FALSE,prob=data$iptw), # stratified sampling #subsampleByID = {nIds <- length(unique(data[[idVar]])) #unlist(lapply(sample(unique(data[[idVar]]), size=round(nIds*0.632), replace=FALSE), # function(x){which(data[[idVar]] == x)}))}, allData = 1:nrow(data)) sample.b <- data[obs.b,] tree.b <- growTree(formula=formula, data=sample.b, subset=subset, search=search, method=method, split=split, mtry=mtry, nsplit=nsplit, nsplit.random=nsplit.random, minsplit=minsplit, minbucket=minbucket, maxdepth=maxdepth, a=a, scale.y=scale.y, useRes=useRes, trtlevels=trtlevels,response.type = response.type)#, useRpart=useRpart, minpvalue=minpvalue, corstr=corstr) list(tree=tree.b,cases=sort(unique(obs.b))) }) trt <- data[[all.vars(formula)[length(all.vars(formula))]]] #print(length(trt)) #print(nrow(data)) #print(length(all.vars(formula))) data[[all.vars(formula)[length(all.vars(formula))]]] <- as.numeric(as.character(trt)) object <- list(randFor = randFor,trt.type = trt.type, response.type = response.type, useRes = useRes, data = data) class(object) <- "CERFIT" return(object) } # Having issues in mutiple treatment where in partition only a single treatment # is present in the data
/scratch/gouwar.j/cran-all/cranData/CERFIT/R/CERFIT.R
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 find_split <- function(y, x, trt, cutpts, method, propensity, minbucket, response_type) { .Call(`_CERFIT_find_split`, y, x, trt, cutpts, method, propensity, minbucket, response_type) }
/scratch/gouwar.j/cran-all/cranData/CERFIT/R/RcppExports.R
#' Observational Educational Dataset #' #' A simulated dataset containing the grades and other attributes of 1000 #' simulated students #' #' #' @format A data frame with 1000 rows and 7 variables: #' \describe{ #' \item{SAT_MATH}{SAT Math Score} #' \item{HSGPA}{High School GPA} #' \item{AGE}{Age of Student} #' \item{GENDER}{Gender of Student} #' \item{URM}{Under Represented Minority} #' \item{A}{Treatment Variable} #' \item{Y}{Students Final Grade} #' } #' @source Wilke, Morten C., et al. “Estimating the Optimal Treatment #' Regime for Student Success Programs.” Behaviormetrika, vol. 48, no. 2, 2021, #' pp. 309–343., https://doi.org/10.1007/s41237-021-00140-0. "educational" #' Randomized Controlled Trial Warts Dataset #' #' A dataset comparing immunotherapy to cryotherapy treatments and their effeteness of #' removing warts #' #' #' @format A data frame with 180 rows and 8 variables: #' \describe{ #' \item{sex}{Patients Sex} #' \item{age}{Patients Age} #' \item{Time}{Time Elapsed Before Treatment} #' \item{Number_of_Warts}{Number of Warts} #' \item{Type}{Type of Wart} #' \item{Area}{Wart Surface Area} #' \item{Result_of_Treatment}{Treatment Outcome} #' \item{treatment}{0 for immunotherapy and 1 for cryotherapy} #' } #' @source Khozeimeh, Fahime, et al. “An Expert System for Selecting Wart #' Treatment Method.” Computers in Biology and Medicine, vol. 81, 2017, #' pp. 167–175., https://doi.org/10.1016/j.compbiomed.2017.01.001. "warts"
/scratch/gouwar.j/cran-all/cranData/CERFIT/R/data.R
###Finds candidate cutpoints ### findCutpts <- function(x, minbucket) { nX <- length(x) x_sort <- sort(x,method = "quick") #Due to ties, it's possible minbucket cannot be satisfied if (x_sort[minbucket]==rev(x_sort)[minbucket]) { cutpts=NULL} else { #Cutpoints considered must satisfy minbucket cutpts <- unique(x_sort[minbucket:(nX-minbucket+1)]) if(length(cutpts)==1){stop(paste0("Only 1 cutpt??? ", cutpts, x))} cutpts <- (cutpts[1:(length(cutpts)-1)]+cutpts[2:length(cutpts)])/2 } return(cutpts) }
/scratch/gouwar.j/cran-all/cranData/CERFIT/R/findCutpts.R
### Grow tree by using partition() function several times in a recursive loop ### growTemp <- function(id=1L, depth=1L, data, response, treatment, Propensity, subset, search, method, split, mtry, nsplit, nsplit.random, minsplit, minbucket, maxdepth, a, scale.y, trtlevels,response.type){ if (depth > maxdepth) {return(partynode(id=id))} y <- data[[response]] trt<-data[[treatment]] propensity<-data[[Propensity]] varSelected <- sort(sample.int(ncol(data)-4, mtry)) vars <- data[varSelected] colnames(vars) <- varSelected #Have columns represent varid sp <- partition(vars=vars, y=y, subset=subset,trt=trt,propensity=propensity, search=search, method=method, split=split, nsplit=nsplit, nsplit.random=nsplit.random, minsplit=minsplit, minbucket=minbucket, a=a, scale.y=scale.y, trtlevels=trtlevels,response.type = response.type) #useSearch=useSearch, useOptim=useOptim, if (is.null(sp)) {return(partynode(id=id))} # Split the data kidids <- kidids_split(sp, data=data) depth <- depth + 1 #print(max(kidids, na.rm=TRUE)) kids <- vector(mode="list", length=max(kidids, na.rm=TRUE)) for (kidid in seq_along(kids)) { s <- subset # subset is the previous loops s s[kidids != kidid] <- FALSE # Node ID if (kidid > 1) {myid <- max(nodeids(kids[[kidid-1]])) } else {myid <- id} # Start recursion on this daugther node kids[[kidid]] <- growTemp(id=as.integer(myid+1), depth=depth, data=data, response=response, treatment=treatment, Propensity=Propensity, subset=s, search=search, method=method, split=split, mtry=mtry, nsplit=nsplit, nsplit.random=nsplit.random, minsplit=minsplit, minbucket=minbucket, maxdepth=maxdepth, a=a, scale.y=scale.y, trtlevels=trtlevels,response.type = response.type) } #print(sapply(kids, class)) #print(length(kids)) #print(class(kids)) return(partynode(id=as.integer(id), split=sp, kids=kids, #info=list(stats=max(info_split(sp)$stats, na.rm=TRUE)))) info = depth)) }
/scratch/gouwar.j/cran-all/cranData/CERFIT/R/growTemp.R
growTree <- function(formula, data, subset=NULL, search=c("exhaustive","sss"), method=c("RCT","observational"), split=c("t.test", "pvalue"),#, "gini", "entropy", "information"), mtry=NULL, nsplit=NULL, nsplit.random=TRUE, minsplit=20, minbucket=round(minsplit/3), maxdepth=20, a=50, useRes, scale.y=FALSE, trtlevels,response.type) { search <- match.arg(search,c("exhaustive","sss")) method <- match.arg(method,c("RCT","observational")) split <- match.arg(split,c("t.test", "pvalue")) stopifnot(is.logical(nsplit.random), is.logical(scale.y), is.logical(useRes))#, is.logical(useRpart)) if (is.numeric(nsplit) && !nsplit.random && nsplit < 5) {"Selecting <5 ordered splits may yield unexpected results"} response <- all.vars(formula)[1] if(grepl("\\|", as.character(formula)[3])){ treatment <- trimws(strsplit(as.character(formula)[3], "\\|")[[1]][2], which="both") } else {stop("Please specify the treatment in formula")} Propensity <- "prop" Iptw <- "iptw" data <- data[c(all.vars(formula)[-1], response, Iptw,Propensity)] #Rearrange data so that response comes last if (!all(complete.cases(data[-length(data)])) & !is.null(subset)) { paste0("Specifying subset with missing data can yield unexpected results") } data <- data[complete.cases(data[-length(data)]),] # The reason for not checking the last column for NA's is that the last column can be a # data.frame(?) and would through an error. Since the last column is propensity any # NA's are caused by NA is other columns so this should be fine if (is.null(mtry)){mtry <- length(all.vars(formula[[3]]))-1} #defult mtry=p #if(is.factor(data[[response]])){data[[response]]=as.numeric(data[[response]]==levels(data[[response]])[1])} if (is.null(subset)){subset <- rep(TRUE, nrow(data))} # Grow tree nodes <- growTemp(id=1L, depth=1L, data=data, response=response, treatment=treatment, Propensity=Propensity, subset=subset, search=search, method=method, split=split, mtry=mtry, nsplit=nsplit, nsplit.random=nsplit.random, minsplit=minsplit, minbucket=minbucket, maxdepth=maxdepth, a=a, scale.y=scale.y, trtlevels=trtlevels,response.type = response.type) # Compute terminal node number for each observation fitted <- fitted_node(nodes, data=data) thing <- ncol(data[Propensity]) if(thing !=1) { daprop<-cbind(data[[treatment]],data[[Propensity]]) ps<-apply(daprop,1,function(v){x<-v[1];return(v[x+1])}) } ps<- data[[Propensity]] # Return rich constparty object ret <- party(nodes, data = data, fitted = data.frame("(fitted)" = fitted, "(response)" = data[[response]], "(treatment)" = data[[treatment]], "(propensity)"= ps,#data[[Propensity]], "(iptw)"= data[[Iptw]], check.names = FALSE), terms = terms(formula)) as.constparty(ret) #as.simpleparty(ret) }
/scratch/gouwar.j/cran-all/cranData/CERFIT/R/growTree.R
#' Calculate Variable Importance #' #' @param cerfit A fitted CERFIT object #' @return Returns a named vector with the name of each predictor used to fit the CERFIT #' object and its corresponding average minimal depth across all trees #' @description Calculates the average minimal depth of each predictor used to fit #' a CERFIT object. It calculates Variables importance by using a Variables average minimal depth. #' variable's with a lower average minimal depth are more important. #' @details The depth of the root node is zero and if a variable does not appear #' at any split in a tree it is assigned maxdepth + 1 for that tree. #' @examples #' fit <- CERFIT(Result_of_Treatment ~ sex + age + Number_of_Warts + Area + Time + Type | treatment, #' data = warts, #' ntrees = 30, #' method = "RCT", #' mtry = 2) #' importance <- MinDepth(fit) #' @export MinDepth <- function(cerfit){ # need to given number of levels if observation cerfit <- cerfit$randFor Term<-cerfit[[1]]$tree$terms dataTemp<-all.vars(Term[[3]]) vars<-dataTemp[-length(dataTemp)] mindepth <- rep(0, length(vars)) for (t in seq_along(cerfit)) { intNodes <- nodeids(cerfit[[t]]$tree)[-nodeids(cerfit[[t]]$tree, terminal = TRUE)] varsInTree <- vars[unique(unlist(nodeapply(cerfit[[t]]$tree, ids=intNodes, FUN=function(n){split_node(n)$varid})))] varsAtNode <- unlist(nodeapply(cerfit[[t]]$tree, ids=intNodes, FUN=function(n){split_node(n)$varid})) #Root node should be counted as 0 #depthAtNode <- table(unlist(lapply(intNodes, function(x) intersect(intNodes, nodeids(cerfit[[t]]$tree, from=x)))))-1 #depthAtNode <- idDepth(cerfit[[t]]$tree) depthAtNode <- unlist(nodeapply(cerfit[[t]]$tree, ids = intNodes, info_node)) - 2 treeDepth <- depth(cerfit[[t]]$tree) for (j in seq_along(vars)) { if (is.element(vars[j], varsInTree)) { #If variable is in tree mindepth[j]=mindepth[j]+min(depthAtNode[varsAtNode==j]) } else { #If variable not in tree, set mindepth to maximum depth+1 mindepth[j]=mindepth[j]+treeDepth+1 } } } mindepth <- mindepth/length(cerfit) names(mindepth) <- vars return(mindepth) }
/scratch/gouwar.j/cran-all/cranData/CERFIT/R/minDepth.R
### Convert factors to numerical value. ### ordinalize <- function(x, y, sortCat=TRUE){ if (is.factor(x)) { x <- factor(x) #Remove factors not listed #One can randomly assign a category a distinct numerical value if (!sortCat) { cutToLvl <- t(sample.int(length(levels(x)))) colnames(cutToLvl)=levels(x) } else { #For binary, sort data by proportion in class 1. For continuous, sort by means if (is.factor(y)) { cutToLvl <- prop.table(table(y,x),2)[1,,drop=FALSE] } else {cutToLvl <- t(vapply(levels(x), function(z){mean(y[x==z])}, numeric(1)))} } #Convert lvls to numerical value. Slow method. Make this faster later. xTemp <- rep(NA,length(x)) for (lvls in levels(x)) { xTemp[x==lvls] <- cutToLvl[colnames(cutToLvl)==lvls] } } else { xTemp <- x cutToLvl <- NULL } return(list(x=xTemp, cutToLvl=cutToLvl)) }
/scratch/gouwar.j/cran-all/cranData/CERFIT/R/ordinalize.R
partition<- function(vars, y, trt, propensity, subset, search, method, split, nsplit, nsplit.random, minsplit, minbucket, a, scale.y, useSearch, useOptim,trtlevels,response.type){#, allVars if (sum(subset) < minsplit) {return(NULL)} vars <- vars[subset,,drop=FALSE] y <- y[subset] trt<- trt[subset] if (length(unique(trt)) < 2) {return(NULL)} if(length(trtlevels) > 2 & length(trtlevels<10) & method != "RCT") { #& !is.ordered(trt)) { propensity <- propensity[subset,] } else { propensity <- propensity[subset] } trt.length<-length(trtlevels) if (method != "RCT") { if (is.ordered(trt)) { # Chosses the split point for ordered treatment ran <- sample(1:(length(propensity) - 2),1) # fix this so it chooses right split point # and stuff propensity <- propensity[,ran] trt <- ifelse(trt <= ran,1,0) } else if (trt.length>2 & trt.length < 10) { ## if less than 10 treatments/levels ran<- sample(unique(trt),2) vars<-subset(vars,trt==ran[1] | trt==ran[2]) y<-subset(y,trt==ran[1] | trt==ran[2]) propensity<-subset(propensity,trt==ran[1] | trt==ran[2]) trt<-subset(trt,trt==ran[1] | trt==ran[2]) trt<-ifelse(trt==ran[1],1,0) propensity<-propensity[,ran[1]] # need to make sure trt is levels as propensity nameorders } } else { if (is.ordered(trt)) { # Chooses the split point for ordered treatment trt <- as.numeric(trt) ran <- sample(min(trt):(max(trt) - 2),1) # fix this so it chooses right split point # and stuff trt <- ifelse(trt <= ran,1,0) } else if (trt.length>2 & trt.length < 10) { ## if less than 10 treatments/levels ran<- sample(unique(trt),2) vars<-subset(vars,trt==ran[1] | trt==ran[2]) y<-subset(y,trt==ran[1] | trt==ran[2]) #propensity<-subset(propensity,trt==ran[1] | trt==ran[2]) trt<-subset(trt,trt==ran[1] | trt==ran[2]) trt<-ifelse(trt==ran[1],1,0) #propensity<-propensity[,ran[1]] } } if (NROW(vars) < 2*minbucket) {return(NULL)} if (length(unique(y))==1) {return(NULL)} stats<- cutoff<- breakLeft<-NA findStats<-sapply(vars,function(x){ x_factor_check <- as.numeric(x) if (search=="exhaustive" && !is.null(nsplit) && nsplit.random) { xTemp <- ordinalize(x, y, sortCat=FALSE) } else { xTemp <- ordinalize(x, y, sortCat=TRUE) } x <- xTemp$x #If all x values the same, do not check optimal split if (abs(max(x) - min(x)) > 1e-8) { #The SSS partition deals with problems when there is a very small number of observations #Use exhaustive search in this case (or set minsplit >= 5) if (search=="sss") { #leave sss here for now print("sss not ready") } else if (search=="exhaustive") { #current codes only work exhaustive search cutpts <- findCutpts(x, minbucket) #z <- matrix(x,ncol = length(x))[rep(1, length(cutpts)), ] < cutpts if (is.null(nsplit)) { nsplit<- length(cutpts) } #Take nsplit cutpoints (if applicable) if (!is.null(nsplit) && !is.null(cutpts) && length(cutpts) > 1) { #If nsplit.random is TRUE, take nsplit cutpts randomly. Otherwise, take nsplit cutpts equally spread out across cutpts if (!nsplit.random & length(cutpts) > nsplit) { #if not random select nsplit cut cutpts <- unique(cutpts[seq(1, length(cutpts), length.out=nsplit)]) } else { cutpts <- sort(sample(cutpts, min(c(nsplit, length(cutpts))), replace=FALSE)) } } # #It is possible (unlikely) no cutpoint can satisfy minbucket if (!is.null(cutpts)) { #print(list(y,x,trt,cutpts,method,propensity,minbucket,response.type)) mod <- find_split(y=y, x=x, trt=trt,cutpts=cutpts, method=method,propensity = propensity, minbucket=minbucket,response_type = response.type) if (!is.na(mod$stat)) { stats <- mod$stat if (is.factor(x_factor_check)) { cutoff<- "factor" breakLeft <- rep(NA, length(levels(x))) breakLeft[levels(x) %in% colnames(xTemp$cutToLvl)[xTemp$cutToLvl <= mod$cutoff]]=1L breakLeft[levels(x) %in% colnames(xTemp$cutToLvl)[xTemp$cutToLvl > mod$cutoff]]=2L print(breakLeft) if (all(is.na(breakLeft)) & length(unique(breakLeft))<=1) {stop("Did not find correct cutpoints")} } else {cutoff <- mod$cutoff; breakLeft<-NA} } } } else {stop("Unexpected search")} } return(c(stats,cutoff,breakLeft))}) #If randomly picking a subset of categories, do not sort by mean. Would be more likely to select variables when sorted #return(findStats) #If each candidate variable cannot be split (e.g. cannot satisfy minbucket), return null if (all(is.na(findStats[1,]))) {return(NULL)} if (inherits(findStats[2,which.max(findStats[1,])],"factor")) { #Index is used for categorical variable splits print("factor") return(partysplit(varid=as.integer(colnames(findStats)[which.max(findStats[1,])]), index=findStats[3,which.max(findStats[1,])], info=list(stats=findStats[1,]))) } else { #Breaks is used for continuous variable splits #print(as.integer(colnames(findStats)[which.max(findStats[1,])])) return(partysplit(varid=as.integer(colnames(findStats)[which.max(findStats[1,])]), breaks=findStats[2, which.max(findStats[1,])], info=list(stats=findStats[1,]))) } }
/scratch/gouwar.j/cran-all/cranData/CERFIT/R/partition.R
#' Get predictions from a CERFIT object #' #' @param object A fitted CERFIT object #' @param newdata New data to make predictions from. IF not provided will make predictions #' on training data #' @param gridval For continuous treatment. Controls for what values of treatment to predict #' @param prediction Return prediction using all trees ("overall") or using first i trees ("by iter") #' @param type Choose what value you wish to predict. Response will predict the response. #' ITE will predict the Individualized treatment effect. Node will predict the node. And opT #' will predict the optimal treatment for each observation. #' @param alpha For continuous treatment it is the mixing parameter for the elastic #' net regularization in each node. When equal to 0 it is ridge regression and #' when equal to 1 it is lasso regression. #' @param ... Additional Arguments #' @return The return value depends of the type argument. If type is response the function #' will return a matrix with n rows and the number of columns equal to the level of treatment. #' If type is ITE then it returns a matrix with n rows and a number of columns equal to #' one minus the levels of treatment. And if type is opT then it returns a matrix with n #' rows and two columns. With the first column denoting the optimal treatment and #' the second column denoting the optimal response. #' @examples #' fit <- CERFIT(Result_of_Treatment ~ sex + age + Number_of_Warts + Area + Time + Type | treatment, #' data = warts, #' ntrees = 30, #' method = "RCT", #' mtry = 2) #' ite <- predict(fit,type = "ITE") #' @export predict.CERFIT <- function(object,newdata = NULL, gridval=NULL, prediction=c("overall","by iter"), type=c("response","ITE","node","opT"), alpha=0.5,...){ #Return prediction using all trees ("overall") or using first i trees ("by iter")S prediction <- match.arg(prediction, c("overall","by iter")) useRse <- object$useRes data <- object$data if (is.null(newdata)) newdata <- object$data response.type <- object$response.type treatment.type <- object$trt.type object <- object$randFor type <- match.arg(type, c("response","ITE","node","opT")) cumMeanNA <- function(x){ xTemp <- x; xTemp[is.na(xTemp)] <- 0 cumsum(xTemp)/cumsum(!is.na(x)) } #utrt<- sort(unique(c(fitted(x[[1]]$tree)[,3],fitted(x[[2]]$tree)[,3],fitted(x[[3]]$tree)[,3]))) formulaTree <- stats::formula(object[[1]]$tree$terms) treatment <- all.vars(formulaTree)[length(all.vars(formulaTree))] utrt<-sort(unique(data[[treatment]])) LB<-min(data[[treatment]]) UB<-max(data[[treatment]]) qu<-seq(LB,UB,length.out = 6) ## add a statement warning if gridvalue beyond the LB and UB ## should add warnings here if gridbalue beyond min or max utrt ntrt <- length(utrt) # if grival is null, use the 10th quantile if(useRse == TRUE & response.type == "continous"){ resformula <- stats::as.formula(paste("yo", paste(all.vars(formulaTree)[2:(length(all.vars(formulaTree))-1)], collapse=" + "), sep=" ~ ")) reslm <- stats::lm(resformula,data) ylmp <- stats::predict(reslm,newdata) #print("WHAT") } else if(useRse == TRUE & response.type == "binary") { resformula <- stats::as.formula(paste("yo", paste(all.vars(formulaTree)[2:(length(all.vars(formulaTree))-1)], collapse=" + "), sep=" ~ ")) reslm <- stats::glm(resformula,data,family = stats::binomial()) ylmp <- stats::predict(reslm,newdata,type = "response") print(length(ylmp)) } else { ylmp<-rep(0,nrow(newdata)) } if(length(utrt)<=20){ ## if less than 20 unique treatments/levels using unique treatments ntrt=length(utrt) gridval<-utrt } else if(is.null(gridval)) { # if more than 20, and gridval is null, use percentiles at 5% increment gridval <- stats::quantile(utrt, prob = seq(0, 1, length = 21)) ntrt<-length(gridval)-1 } else { ntrt<-length(gridval)} print(gridval) if(type!="opT"){ predictMat <- lapply(lapply(object, "[[" , "tree"), predictTree, newdata=newdata,gridval=gridval,ntrt=ntrt,type=type,LB=LB,UB=UB,alpha=alpha) ypre<- do.call(cbind,predictMat) #yp<- lapply(1:ntrt,function(i,k) k[,seq(i, by = ntrt, length = NCOL(ypre) / ntrt)],k=ypre) ypre<- lapply(1:ntrt,function(i,k) k[,seq(i, NCOL(ypre), by = ntrt)], k=ypre) y.pre<- t(matrix(unlist(lapply(ypre,rowMeans,na.rm=TRUE)), ncol=NROW(newdata),byrow = TRUE)) y.pre<-y.pre+ylmp #y.pre: by row observation, each column is the corresponding predition for 1 treatment. } else if (type == "opT" && treatment.type != "continous"){ predictMat<-lapply(lapply(object , "[[" , "tree"), predictTree, newdata=newdata,gridval=gridval,ntrt=ntrt,type="opT", LB=LB,UB=UB,alpha=alpha) #ntrt<-2 ypre<- do.call(cbind,predictMat) ypre<- lapply(1:ntrt,function(i,k) k[,seq(i, NCOL(ypre), by = ntrt)], k=ypre) y.pre<- t(matrix(unlist(lapply(ypre,rowMeans,na.rm=TRUE)), ncol=NROW(newdata),byrow = TRUE)) y.pre<- y.pre + ylmp t.opt <- max.col(y.pre) y.opt <- apply(y.pre, 1, max, na.rm = TRUE) #topt<-as.matrix(ypre[[1]]) #yopt<-as.matrix(ypre[[2]]) #y.opt<-rowMeans(yopt,na.rm = T)+ylmp #t.opt<-rowMeans(topt,na.rm = T) y.pre<- cbind(t.opt,y.opt) } else { predictMat<-lapply(lapply(object , "[[" , "tree"), predictTree, newdata=newdata,gridval=gridval,ntrt=ntrt,type="opT", LB=LB,UB=UB,alpha=alpha) ntrt<-2 ypre<- do.call(cbind,predictMat) ypre<- lapply(1:ntrt,function(i,k) k[,seq(i, NCOL(ypre), by = ntrt)], k=ypre) topt<-as.matrix(ypre[[1]]) yopt<-as.matrix(ypre[[2]]) y.opt<-rowMeans(yopt)+ylmp t.opt<-rowMeans(topt) y.pre<- cbind(t.opt,y.opt) } yname<-NA if (prediction=="overall") { if(type=="response") { resp <- y.pre yname<- paste("y=",gridval,sep="") colnames(resp) <- yname return(resp)} if(type=="ITE") { #using the first level or smallest value as reference group yname<-paste("y",utrt,"-y",utrt[1],sep="") ite<- y.pre-y.pre[,1] colnames(ite) <- c(yname) return(ite[,-1]) } if(type=="opT") { yname<-c("opTreat","opResponse") opTY<-y.pre colnames(opTY) <- c(yname) return(opTY) } } else if(prediction=="by iter"){ Ypre<-as.list(NA) for(i in 1: ntrt){ Ypre[[i]]<-t(apply(ypre[[i]],1,cumMeanNA)) } cumypre<-t(matrix(unlist(Ypre),ncol=NROW(newdata),byrow = TRUE)) ntree<-length(object) cumypre.l<- lapply(seq(1,(ntrt*ntree),by=ntree),function(i,k) k[,i:(i+ntree-1)], k=cumypre) print(cumypre.l) if(type=="response"){ yname<-paste("ycum",utrt,sep="") names(cumypre.l) <- yname return(cumypre.l)} if(type=="ITE") { cumite<-as.list(NA) for(i in 1:ntrt){ cumite[[i]]<- cumypre.l[[i]]-cumypre.l[[1]] } yname<-paste("ycum",utrt,"-ycum",utrt[1],sep="") names(cumite)<-yname print(yname) return(cumite[[-1]]) }} }
/scratch/gouwar.j/cran-all/cranData/CERFIT/R/predict.R
### Return predicted values from a tree. ### predictTree <- function(tree, newdata=tree$data, gridval, LB, UB, ntrt,type="response",alpha ){ da <- stats::fitted(tree) colnames(da)[2:5]<-c("y","Trt","prop","ww") ufit<-sort(unique(da[["(fitted)"]])) nodesNewdata <- stats::predict(tree, newdata=newdata, type="node") Y.min<-ifelse(min(da[,2])<0,2*min(da[,2]),min(da[,2])/2) Y.max<-ifelse(max(da[,2])<0,max(da[,2])/2,2*max(da[,2])) #if (ntrt<=10){ #for binary and multiple trt, ignore gridval if(ntrt<=10){ pred<- lapply(split(da ,list(da[["(fitted)"]],da[,3])), function(da){ ytemp<-try(stats::weighted.mean(da[,2],da[,length(da)],na.rm=T))#This is only using the first propensity (fixed) if(inherits(ytemp,"try-error")) { return(NA) } else { return(ytemp) } }) nodepred<- cbind(ufit,t(matrix(unlist(pred), ncol = length(ufit), byrow = TRUE))) } else { pred<- lapply(split(da ,da[["(fitted)"]]), function(da){ Trt<-da$Trt x<-cbind(Trt,Trt^2,Trt^3) lambdas <- 10^seq(5, -3, by = -.1) fit <- try(glmnet::cv.glmnet(x, da$y, family = "gaussian", alpha = alpha, lambda = lambdas,nfolds =10),silent=TRUE) if (inherits(fit, "try-error")){ fit2<-try(glmnet::cv.glmnet(x, jitter(da$y), family = "gaussian", alpha = alpha, lambda = lambdas, nfolds =10),silent=TRUE) if (inherits(fit2, "try-error")) { return(NA)} else {fit<-fit2} } Trt<-gridval ext<-Trt>max(da[,3])|Trt<min(da[,3]) nd<-cbind(gridval,gridval^2,gridval^3) ytemp <- stats::predict(fit, newx = nd, s=fit$lambda.min) ytemp[!ext]=ifelse(ytemp[!ext]>Y.max,Y.max,ytemp[!ext])##avoid extrem values ytemp[!ext]=ifelse(ytemp[!ext]<Y.min,Y.min,ytemp[!ext])#mean(da[,2]) ytemp[ext]=ifelse(ytemp[ext]>Y.max,NA,ytemp[ext]) ytemp[ext]=ifelse(ytemp[ext]<Y.min,NA,ytemp[ext]) if (type!="opT") { return(ytemp) }else { #top<-gridval[which.max(ytemp)] #yop<-max(ytemp) lengthout<-5 B <- seq(LB, UB, length.out=lengthout) opY<-Y.min; opTrt <- NA pref<-function(Trt){ trtt<-cbind(Trt,Trt^2,Trt^3) yp<- stats::predict(fit, newx = trtt, s=fit$lambda.min) return(yp)} for (b in 1:(lengthout-1)) { fit.tmp <- suppressWarnings(stats::optimize(pref, lower=B[b], upper=B[b+1], maximum=TRUE)) if (is.na(fit.tmp$objective)) { opY<-opY opTrt<-opTrt } else { if (!is.nan(fit.tmp$objective) && fit.tmp$objective > opY && fit.tmp$objective < Y.max ) { opY <- fit.tmp$objective opTrt <- fit.tmp$maximum } } } return(cbind(opTrt,opY))} }) nodepred<- cbind(ufit,matrix(unlist(pred), ncol = length(pred[[1]]), byrow = TRUE)) } if(type=="opT" && ntrt > 10) { ntrt<-2 } predictions<-as.data.frame(cbind(nodesNewdata,matrix(NA,ncol=ntrt,nrow=nrow(newdata)))) predictions[,2:(ntrt+1)] <- nodepred[match(predictions$nodesNewdata,nodepred[,1]),2:(ntrt+1)] return(predictions[,2:(ntrt+1)]) }
/scratch/gouwar.j/cran-all/cranData/CERFIT/R/predictTree.R
#' @export print.CERFIT <- function(x,...){ cat(paste("Numer of Trees:",length(x$randFor),"\n")) cat(paste("Treatment Type:",x$trt.type, "\n")) cat(paste("Response Type:",x$response.type)) } # CapStr <- function(y) { # c <- strsplit(y, " ")[[1]] # paste(toupper(substring(c, 1,1)), substring(c, 2), # sep="", collapse=" ") # }
/scratch/gouwar.j/cran-all/cranData/CERFIT/R/print.R
truncquant <- function(prop,q=0.9){ qmax <- stats::quantile(prop,q) qmin <- stats::quantile(prop,1-q) prop[prop >= qmax] <- qmax prop[prop<=qmin] <- qmin return(prop) }
/scratch/gouwar.j/cran-all/cranData/CERFIT/R/truncquant.R
CEoptim <- function(f, f.arg=NULL, maximize=FALSE, continuous=NULL, discrete=NULL, N=100L, rho=0.1, iterThr=1e4L, noImproveThr=5, verbose=FALSE ) { ### Parse and check arguments. ## f should be a function if (!is.function(f)) stop("Argument 1 (f) should be a function.") ## Argument <continuous> should be a list specifying the sampling ## distribution of the continuous optimization ## variables. Incidentally it will define the number of such ## variables. ## The continuous sampling parameters and their defaults. ctsDefaults<- list(mean=NULL, sd=NULL, conMat=NULL, conVec=NULL,smoothMean=1, smoothSd=1, sdThr=0.001) if (!is.null(continuous)) { if (!is.list(continuous)) stop('Argument 2 (continuous) should be a list specifying the sampling distribution of the continuous optimization variables.') nameCon <- names(ctsDefaults) #name inner list nameInput <- names(continuous) if (length(noNms <- nameInput[!nameInput %in% nameCon])!=0) { stop("unknown parameter(s) in continuous: ", paste(noNms, collapse = ", ")) } else ctsDefaults[nameInput]<-continuous } mu0 <- ctsDefaults$mean sigma0 <- ctsDefaults$sd A <- ctsDefaults$conMat b <- ctsDefaults$conVec alpha <- ctsDefaults$smoothMean beta <- ctsDefaults$smoothSd eps <- ctsDefaults$sdThr ## Argument <discrete> should be a list specifying the sampling ## distribution of the discrete optimization variables. Incidentally ## it will define the number of such variables. ## The discrete sampling parameters and their defaults. discDefaults<-list(categories=NULL,probs=NULL, smoothProb=1,probThr=0.001) if (!is.null(discrete)) { if (!is.list(discrete)) stop('Argument 3 (discrete) should be a list specifying the sampling distribution of the discrete optimization variables.') nameDis <-names(discDefaults) nameDinput <-names(discrete) if (length(noNms <- nameDinput[!nameDinput %in% nameDis])!=0) { stop("unknown parameter(s) in discrete: ", paste(noNms, collapse = ", ")) } else discDefaults[nameDinput]<-discrete } categories<-discDefaults$categories tau0<- discDefaults$probs gamma <- discDefaults$smoothProb eta <- discDefaults$probThr r <- NULL unspecMu <- NULL if (!is.null(mu0) || !is.null(sigma0)) { if (is.null(mu0) || is.null(sigma0)) stop("If mu0 is specified so must be sigma0, and vice versa.") # if (!is.matrix(sigma0)) Sigma0 <- diag(sigma0,nrow=length(sigma0))^2 #QB Sigma0 if (length(mu0)!=length(sigma0)) stop("Arguments \"mu\" and \"sigma\" must be of same length.") else r <- length(mu0) ## The user might give NA as distribution parameter for some variables. unspecMu <- is.na(mu0) } ## If A is specified it should be a p-column matrix. if (!is.null(A)) { if (!is.matrix(A)) stop("Argument \"A\" should be a matrix.") p <- dim(A)[2] # problem cts dimension c <- dim(A)[1] # no. cts constraints # Test conformity with mu0 and sigma0. if (!is.null(r) && r!=p) stop('Number of continuous variables implied by "A" argument does not match that implied by mu0 and sigma0.') # Set unspecMu if necessary. if (is.null(unspecMu)) unspecMu <- rep(TRUE,p) # Test b given if (is.null(b)) stop('Argument "A" requires argument "b"') if (length(b)!=c) stop('Argument "b" not same length as no. rows A') } else { p <- r if (is.null(p)) p <- 0L if (!is.null(b)) stop('Argument "b" requires argument "A"') } ## If there are continuous vars then they must have an initial ## distribution of some sort. ## Could test whether the constraints define a bounded search region. if (any(unspecMu)) stop("Mu and Sigma must be specified for continuous variables.") ## if categories is specified it should be a vector of integers if (!is.null(categories) && is.null(tau0)) { if (!is.vector(categories) || !identical(typeof(categories),'integer')) stop("Argument \"categories\" should be an integer vector.") q <- length(categories) tau0 <- list(); for (c in categories) { tau0 <- c(tau0,list(rep(1.0/c,c))) } } if (!is.null(tau0)){ if(!is.list(tau0)) stop("\"tau\" in Argument discrete should be a list") if(is.null(categories)) categories<-mapply(length,tau0) } q <- length(categories) ## There should be at least one argument to f. if (p+q==0L) stop('There should be at least one argument discrete or continuous to f.') ## Elite prop. rho should be between 0 and 1. if (!is.numeric(rho) || !is.vector(rho) || length(rho)!=1 || rho>=1.0 || rho<=0.0) stop("Argument \"rho\" should be a number between 0 and 1\n") nElite <- round(N*rho) ## Update smoothing pars. alpha, beta and gamma should be between 0 and 1. if (any(!is.numeric(c(alpha,beta,gamma))) || any(c(alpha,beta,gamma)<0) || any(c(alpha,beta,gamma)>1)) stop('Arguments alpha, beta and gamma should be between 0 and 1.') ## Echo args for debugging. if (verbose) { cat('Number of continuous variables:',p,' \n') cat('Number of discrete variables:',q,'\n') cat('conMat=','\n') print(A) cat('conVec=','\n') print(b) cat('smoothMean:',alpha,'smoothSd:',beta,'smoothProb:',gamma,'\n') cat('N:',N,'rho:',rho,'iterThr:',iterThr,'sdThr:',eps,'probThr',eta,'\n') } ## Create a wrapper for f that takes two arguments, one for the ## continuous optimization variables, one for the discrete. s = (-1)^maximize if (p==0L) ff <- function(XC,XD) { return(s*do.call(f, c(list(XD),f.arg))) } else if (q==0L) ff <- function(XC,XD) { s*do.call(f,c(list(XC),f.arg)) } else ff <- function(XC,XD) { s*do.call(f,c(list(XC,XD),f.arg)) } ## Generate an initial sample. Xc <- matrix(nrow=N,ncol=p) #cts portion of sample if (p>0) { Xc <- rtmvnorm(N,mu0,Sigma0,A,b)$X #QB Sigma0 Variance } Xd <- matrix(nrow=N,ncol=q) if (q>0) { for (i in 1:q) { Xd[,i] <- sample(0:(categories[i]-1),N,replace=T,prob=tau0[[i]]) } } ## Evaluate objective function over initial sample Y <- mapply(ff,lapply(1:N,function(j)Xc[j, ,drop=F]), lapply(1:N,function(k)Xd[k, ,drop=F])) ## Identify elite. IX <- sort(Y,index.return=TRUE,decreasing=F)$ix elite <- IX[1:nElite] ## Estimate sampling distributions. if (p>0) { ## Estimate a normal distribution with smoothing mu <- colMeans(Xc[elite, ,drop=F])*alpha + mu0*(1-alpha) sigma <- apply(Xc[elite, ,drop=F],MARGIN=2,FUN=sd)*beta + sigma0*(1-beta) #QB Smoothing parameter for the standard deviations Sigma <- diag(sigma,nrow=p)^2 } if (q>0) { counts <- lapply(split(Xd[elite, ,drop=F],col(Xd[elite, ,drop=F])), table) tau <- list() for (i in 1:q) { v <- rep(0,categories[i]) v[1+as.numeric(dimnames(counts[[i]])[[1]])] <- as.vector(counts[[i]]) tau[[i]] <- v/nElite } ## Combine tau and tau0. tau <- mapply('+',lapply(tau,'*',gamma),lapply(tau0,'*',1-gamma), SIMPLIFY=F) } iter <- 0 ctsOpt <- Xc[elite[1],] disOpt <- Xd[elite[1],] optimum <- Y[elite[1]] gammat <-Y[elite[nElite]] ceprocess <- NULL diffopt <- Inf CEstates<- NULL probst<-list() ### Main loop -- test termination conditions while (iter < iterThr && diffopt!=0 && (( p>0 && max(sigma) > eps) || (q>0 && max(1.0-sapply(tau,max)) > eta))) { CEt<- NULL CEt<- c(iter,optimum*s,gammat*s) if(p>0){ CEt<- c(CEt,mu,max(sigma))} if(q>0){ CEt<- c(CEt,max(1.0-sapply(tau,max))) namet<-paste("probs",iter,sep="") assign(namet,tau) probst[[iter+1]]<-get(namet)} CEstates<- rbind(CEstates,CEt) if (verbose) { cat('iter:',iter, ' opt:',optimum*s) if(p>0){ cat(' maxSd:',max(sigma))} if(q>0){ cat(' maxProbs:', max(1.0-sapply(tau,max))) } cat('\n') } # if (verbose) { # CEt<- NULL # # cat('iter:',iter, # ' opt:',optimum*s) # CEt<- c(iter,optimum*s,gammat*s) # if(p>0){ # cat(' maxSd:',max(sigma)) # CEt<- c(CEt,mu,max(sigma))} # # if(q>0){ # cat(' maxProbs:', max(1.0-sapply(tau,max))) # CEt<- c(CEt,max(1.0-sapply(tau,max))) # # namet<-paste("probs",iter,sep="") # assign(namet,tau) # probst[[iter+1]]<-get(namet) # # } # # CEstates<- rbind(CEstates,CEt) # cat('\n') # # } # ## Generate sample and evaluate objective function if (p>0) { ## Sample truncated normal distributions. Xc <- rtmvnorm(N,mu,Sigma,A,b)$X #QB change sigma to Sigma } if (q>0) { ## sample categorical distributions. ## tjb should use some sort of "apply" instead of for loop. for (i in 1:q) { Xd[,i] <- sample(0:(categories[i]-1),N,replace=T,prob=tau[[i]]) } } Y <- mapply(ff,lapply(1:N,function(j)Xc[j, ,drop=F]), lapply(1:N,function(k)Xd[k, ,drop=F])) ## Identify elite. IX <- sort(Y,index.return=TRUE,decreasing=F)$ix elite <- IX[1:nElite] ## test for new optimum if (Y[elite[1]] < optimum ) { if (p>0) ctsOpt <- Xc[elite[1],] if (q>0) disOpt <- Xd[elite[1],] optimum <- Y[elite[1]] if(Y[elite[nElite]]<gammat) gammat<-Y[elite[nElite]] }# change here #ce process ceprocess <- c(ceprocess,optimum) if (iter > noImproveThr ){ diffopt<-sum(abs(ceprocess[(iter-noImproveThr):(iter-1)]-optimum)) } ## Reestimate sampling distributions. if (p>0) { mu <- colMeans(Xc[elite, ,drop=F])*alpha + mu*(1-alpha) sigma <- apply(Xc[elite, ,drop=F],MARGIN=2,FUN=sd)*beta + sigma*(1-beta) Sigma <- diag(sigma,nrow=p)^2 } if (q>0) { ## Reestimate multivariate categorical distribution with smoothing. tau0 <- tau counts <- lapply(split(Xd[elite, ,drop=F],col(Xd[elite, ,drop=F])), table) tau <- list() for (i in 1:q) { v <- rep(0,categories[i]) v[1+as.numeric(dimnames(counts[[i]])[[1]])] <- as.vector(counts[[i]]) tau[[i]] <- v/nElite } ## Combine tau and tau0. tau <- mapply('+',lapply(tau, '*',gamma),lapply(tau0,'*',1-gamma), SIMPLIFY=F) } iter <- iter+1 } if(iter==iterThr) convergence="Not converged" else if(diffopt==0) convergence=paste("Optimum did not change for",noImproveThr,"iterations") else convergence="Variance converged" if(verbose){ rownames(CEstates)<-c() if(p>0&&q==0) colnames(CEstates)<- c("iter","optimum","gammat",paste("mean",1:p,sep=""),"maxSd") else if(p==0 && q>0) colnames(CEstates)<- c("iter","optimum","gammat","maxProbs") else if(p>0 && q>0) colnames(CEstates)<- c("iter","optimum","gammat",paste("mean",1:p,sep=""),"maxSd","maxProbs") } out<-list(optimizer=list(continuous=ctsOpt,discrete=disOpt), optimum=s*optimum,termination=list(niter=iter, nfe=iter*N, convergence=convergence), states=CEstates,states.probs=probst) class(out)<- "CEoptim" out }
/scratch/gouwar.j/cran-all/cranData/CEoptim/R/CEoptim.R
dirichletrnd <- function(a,n) { out <- matrix(nrow=n,ncol=length(a)) for (i in 1:n){ y <- rgamma(length(a), a, 1) out[i,] <- y / sum(y) } return(out) }
/scratch/gouwar.j/cran-all/cranData/CEoptim/R/dirichletrnd.R
print.CEoptim<-function(x,...){ if (!inherits(x, "CEoptim")) stop("The object x is not the result from CEoptim function") #if(class(x)!="CEoptim") ## I changed due to issue for submission in CRAN args <- list(...) OutPut<- list(optimizer=TRUE, optimum=TRUE, termination=TRUE, states=FALSE,states.probs=FALSE) if((!is.null(args$optimizer)&&args$optimizer==TRUE)|| (!is.null(args$optimum)&&args$optimum==TRUE)|| (!is.null(args$termination)&&args$termination==TRUE)){ OutPut<- list(optimizer=FALSE, optimum=FALSE, termination=FALSE, states=FALSE,states.probs=FALSE) } nameOutPut <- names(OutPut) #name inner list nameInput <- names(args) if (length(noNms <- nameInput[!nameInput %in% nameOutPut])!=0) { stop("unknown parameter(s) output: ", paste(noNms, collapse = ", ")) } else OutPut[nameInput]<-args optimizer <- OutPut$optimizer optimum <- OutPut$optimum termination <- OutPut$termination states <- OutPut$states states.probs <- OutPut$states.probs if(optimizer==TRUE){ if(length(x$optimizer$continuous)>0) cat("Optimizer for continuous part:","\n",x$optimizer$continuous,"\n") if(length(x$optimizer$discrete)>0) cat("Optimizer for discrete part:","\n",x$optimizer$discrete,"\n") } if(optimum==TRUE){ cat("Optimum:","\n",x$optimum,"\n") } if(termination==TRUE){ cat("Number of iterations:","\n",x$termination$niter,"\n") cat("Total number of function evaluations:","\n",x$termination$nfe,"\n") cat("Convergence:","\n",x$termination$convergence,"\n") } if(states==TRUE){ cat("states:","\n") print(x$states) } if(states.probs==TRUE){ cat("Categorical sampling probabilities:","\n") print(x$states.probs) } }
/scratch/gouwar.j/cran-all/cranData/CEoptim/R/print.CEoptim.R
### Converted from MATLAB by Tim J. Benham, 2014 ## function [X, rho, nar, ngibbs] = rmvnrnd(mu,sigma,N,A,b,rhoThr) ## %RMVNRND Draw from the truncated multivariate normal distribution. ## % X = rmvnrnd(MU,SIG,N,A,B) returns in N-by-P matrix X a ## % random sample drawn from the P-dimensional multivariate normal ## % distribution with mean MU and covariance SIG truncated to a ## % region bounded by the hyperplanes defined by the inequalities Ax<=B. ## % ## % [X,RHO,NAR,NGIBBS] = rmvnrnd(MU,SIG,N,A,B) returns the ## % acceptance rate RHO of the accept-reject portion of the algorithm ## % (see below), the number NAR of returned samples generated by ## % the accept-reject algorithm, and the number NGIBBS returned by ## % the Gibbs sampler portion of the algorithm. ## % ## % rmvnrnd(MU,SIG,N,A,B,RHOTHR) sets the minimum acceptable ## % acceptance rate for the accept-reject portion of the algorithm ## % to RHOTHR. The default is the empirically identified value ## % 2.9e-4. ## % ## % ALGORITHM ## % The function begins by drawing samples from the untruncated MVN ## % distribution and rejecting those which fail to satisfy the ## % constraints. If, after a number of iterations with escalating ## % sample sizes, the acceptance rate is less than RHOTHR it ## % switches to a Gibbs sampler. ## % ## % ACKNOWLEDGEMENT ## % This makes use of TruncatedGaussian by Bruno Luong (File ID: ## % #23832) to generate draws from the one dimensional truncated normal. ## % ## % REFERENCES ## % Robert, C.P, "Simulation of truncated normal variables", ## % Statistics and Computing, pp. 121-125 (1995). ## % Copyright 2011 Tim J. Benham, School of Mathematics and Physics, ## % University of Queensland. rtmvnorm <- function(N, mu, sigma, A, b,..., rhoThr=NULL, maxSample=NULL) { ## Constant parameters defaultRhoThr = 1e-4 # min. acceptance rate to apply accept-reject sampling. defaultMaxSample <- 1e6 # largest sample to draw ## ## Process input arguments. ## if (is.null(rhoThr) || rhoThr<0) rhoThr = defaultRhoThr if (is.null(maxSample) || maxSample<0) maxSample = defaultMaxSample mu <- t(mu) p <- length(mu) #dimensions if (p<1) stop('Problem dimension must be at least 1') if (is.null(A) || is.na(A) || !is.matrix(A) || dim(A)[2]==0) { A <- matrix(rep(0,p),nrow=1) b=c(0); } A <- t(A); b <- t(b); m <- dim(A)[2] #no. constraints if (length(b) != m) stop('A and b not conformable') ## ## initialize return arguments ## X <- matrix(nrow=N, ncol=p) nar <- 0; ngibbs <- 0; rho <- 1 ### ### Approach 1 : Accept/Reject ### if (rhoThr<1) { ## Try accept-reject approach. n <- 0 # no. accepted trials <- 0; passes <- 0; s <- N while (n<N && (rho>rhoThr || s<maxSample)) { # cat('n:',n,a'rho:',rho,'s:',s,'\n') R <- mvrnorm(s,mu,sigma) YY <- R %*% A <= matrix(rep(b,s),nrow=s,byrow=T) YY <- matrix(as.numeric(YY),nrow=s) R <- R[rowSums(YY) == m, ,drop=F] nr <- dim(R)[1] #no. valid proposals if (nr > 0) { X[(n+1):min(N,n+nr),] <- R[1:min(N-n,nr),] nar <- nar + min(N,n+nr) - n } n <- n+nr; trials <- trials+s; rho <- n/trials; if (rho>0) { s <- min(maxSample, ceiling((N-n)/rho), 10*s) } else { s = min(maxSample,10*s) } passes <- passes+1 } } ### ### Approach 2: Gibbs sampler of Robert, 1995. ### if (nar < N) { ## % choose starting point if (nar>0) x <- X[nar,] else x <- mu; ## set up inverse Sigma SigmaInv <- ginv(sigma) n <- nar while (n<N) { ## choose p new components for (i in 1:p) { ## Sigmai_i is the (p-1) vector derived from the i-th ## column of Sigma by removing the i-th row term. Sigmai_i = sigma[-i,i]; ## Sigma_i_iInv is the inverse of the (p-1)x(p-1) ## matrix derived from Sigma = (sigma(ij) ) by ## eliminating its i-th row and its i-th column Sigma_i_iInv = SigmaInv[-i,-i,drop=F] - SigmaInv[-i,i,drop=F] %*% t(SigmaInv[-i,i,drop=F]) / SigmaInv[i,i,drop=F]; ## x_i is the (p-1) vector of components not being updated ## at this iteration. /// mu_i x_i = x[-i,drop=F]; mu_i = mu[-i,drop=F]; ## mui is E(xi|x_i) mui = mu[i] + t(Sigmai_i) %*% Sigma_i_iInv %*% as.matrix(x_i - mu_i, ncol=1); s2i = sigma[i,i] - t(Sigmai_i)%*%Sigma_i_iInv%*%Sigmai_i; ## Find points where the line with the (p-1) components x_i ## fixed intersects the bounding polytope. ## A_i is the (p-1) x m matrix derived from A by removing ## the i-th row. A_i = A[-i,,drop=F]; ## Ai is the i-th row of A Ai = A[i,,drop=F] c = (b-x_i %*% A_i)/Ai lb = max(c[Ai<0]) if (is.null(lb) || is.na(lb) || length(lb)==0) lb=-Inf ub = min(c[Ai>0]) if (length(ub)==0) ub=Inf ## % now draw from the 1-d normal truncated to [lb, ub] x[i] <- rtnorm(1,mean=mui,sd=sqrt(s2i),lower=lb,upper=ub) } n = n + 1; X[n,] = x; ngibbs = ngibbs+1; } } return (list(X=X,rho=rho,nar=nar,ngibbs=ngibbs)) }
/scratch/gouwar.j/cran-all/cranData/CEoptim/R/rtmvnorm.R
#' Colony Formation Assay data on cellular cooperation #' #' Clonogenic survival data from seven cell lines T47D, MDA-MB231, A549, #' HCC1806, SKBR3, SKLU1 and BT20 as presented in #' Figure 2 in Brix et al. (2020). #' #' @docType data #' #' @usage data(CFAdata) #' #' @format \code{data.frame} #' #' @keywords dataset #' #' @references Brix, N., Samaga, D., Hennel, R. et al. #' "The clonogenic assay: robustness of plating efficiency-based analysis is #' strongly compromised by cellular cooperation." Radiat Oncol 15, 248 (2020). #' <doi:10.1186/s13014-020-01697-y> #' #' @examples #' data(CFAdata) #' head(CFAdata) #' cll <- levels(CFAdata$cell.line) "CFAdata"
/scratch/gouwar.j/cran-all/cranData/CFAcoop/R/CFAdata.R
#' @title analyze_survival #' #' @description wrapper function for robust analysis of clonogenic survival data #' from the colony formation assay according to Brix et al. (2020), #' Radiation Oncology. #' Mean values are calculated and used for power regression. #' Resulting coefficients are used for #' calculation of survival fractions and corresponding uncertainty analysis. #' #' @param RD data.frame or matrix containing a table of experiment data #' @param name optional: experiment name (e.g. name of cell line) #' @param xtreat optional: treatment dose of the colonies counted in the #' corresponding columns of RD #' @param C number of colonies counted for which the survival fraction is to be #' calculated (default = 20)) #' #' @return list object containing several experiments and treatments organized #' for convenient plotting with \code{plot_sf} #' #' @examples #' seeded <- rep(10^(seq(1,5,0.5)),each = 3) #' df.1 <- data.frame( #' "seeded" = seeded, #' "counted1" = 0.4 * seeded^1.1 * rnorm(n = length(seeded),1,0.05), #' "counted2" = 0.2 * seeded^1.125 * rnorm(n = length(seeded),1,0.05), #' "counted3" = 0.05 * seeded^1.25 * rnorm(n = length(seeded),1,0.05)) #' df.2 <- data.frame("seeded" = seeded, #' "counted1" = 0.5 * seeded^1.01 * rnorm(n = length(seeded),1,0.05), #' "counted2" = 0.4 * seeded^1.0125 * rnorm(n = length(seeded),1,0.05), #' "counted3" = 0.2 * seeded^1.025 * rnorm(n = length(seeded),1,0.05)) #' SF <- vector("list",2) #' SF[[1]] <- analyze_survival(RD = df.1, #' name = "cell line a", #' xtreat = c(0,1,4), #' C = 20) #' SF[[2]] <- analyze_survival(RD = df.2, #' name = "cell line b", #' xtreat = c(0,1,4)) #' @importFrom stats "aggregate" "quantile" "vcov" #' @export #' analyze_survival <- function(RD, name = "no name", xtreat = NULL, C = 20) { if (!(class(RD)[1] %in% c("data.frame","matrix"))){ stop("error: RD must be of class data.frame or matrix") } result <- list("name" = name) if (is.null(xtreat)) { result$"xtreat" <- 0:(ncol(RD) - 2) } else { if (length(xtreat) != (ncol(RD) - 1)) { stop("error: length of assigned treatments does not match data ") } result$"xtreat" <- xtreat } result$"raw" <- as.data.frame(RD) result$"mean" <- aggregate( x = result$raw, by = list(result$raw[, 1]), FUN = "mean", na.rm = TRUE ) # store fit-summaries result$"fit" <- vector("list", dim(RD)[2] - 1) # store survival fractions result$"SF" <- rep(NA, dim(RD)[2] - 2) # summary of uncertainty analysis udf <- data.frame( "treatment" = result$"xtreat", "C" = C, "SF" = NA, "sd.SF" = NA, "lb.SF" = NA, "ub.SF" = NA, "log10.SF" = NA, "sd.log10.SF" = NA, "lb.log10.SF" = NA, "ub.log10.SF" = NA, "b" = NA, "sd.b" = NA ) P0 <- pwr_reg(seeded = result$"mean"[, 2], counted = result$"mean"[, 3]) result$"fit"[[1]] <- P0 udf$"SF"[1] <- 1 udf$"log10.SF"[1] <- 0 udf$"b"[1] <- P0$coefficients[2,1] udf$"sd.b"[1] <- sqrt(vcov(P0)[2,2]) # calculate SF for treatments with colony numbers overlapping with reference for (i in seq_along(result$"fit")[-1]) { Px <- pwr_reg(seeded = result$"mean"[, 2], counted = result$"mean"[, i + 2]) result$"fit"[[i]] <- Px ran_ref <- range(result$"raw"[, 2],na.rm = TRUE) ran_tre <- range(result$"raw"[, i + 1],na.rm = TRUE) if ( (ran_ref[2] >= ran_tre[1] ) & (ran_ref[1]) <= ran_tre[2] ){ udf$"b"[i] <- Px$coefficients[2,1] udf$"sd.b"[i] <- sqrt(vcov(Px)[2,2]) # calculate survival fraction sf <- calculate_sf( par_ref = P0, par_treat = Px, C = C ) udf$"SF"[i] <- sf if(sf>0) { udf$"log10.SF"[i] <- log10(sf) } result$"SF"[i - 1] <- sf # calculate uncertainty (First-Order-Taylor-Series-Approximation) b0 <- P0$coefficients[2,1] z0 <- c(1, (log(C)-P0$coefficients[1,1])/b0) S0 <- vcov(P0) bx <- Px$coefficients[2,1] zx <- c(1, (log(C)-Px$coefficients[1,1])/bx) Sx <- vcov(Px) var_lsf <- (1/b0)^2*(z0%*%S0%*%z0) + (1/bx)^2*(zx%*%Sx%*%zx) # sd(log10(SF)) udf$"sd.log10.SF"[i] <- sqrt(var_lsf)/log(10) # sd(SF) udf$"sd.SF"[i] <- sf * sqrt(var_lsf) } else { warning("warning: SF calculation omitted, range of colonies counted in reference and treated cells do not overlap.") result$"SF"[[i - 1]] <- NaN } } udf$"lb.log10.SF" <- udf$"log10.SF" - 1.96 * udf$"sd.log10.SF" udf$"ub.log10.SF" <- udf$"log10.SF" + 1.96 * udf$"sd.log10.SF" udf$"lb.SF" <- 10^(udf$"lb.log10.SF") udf$"ub.SF" <- 10^(udf$"ub.log10.SF") result$"uncertainty" <- udf return(result) }
/scratch/gouwar.j/cran-all/cranData/CFAcoop/R/analyze_survival.R
#' @title calculate_sf #' #' @description calculates the survival fraction according #' to the procedure presented in Brix et al. (2020), which is robust against #' cellular cooperation. #' #' @param par_ref \code{summary.lm} object or 2-column matrix for the #' treatment-free reference survival #' @param par_treat \code{summary.lm} object or 2-column matrix for the #' clonogenic survival after treatment #' @param C colony number for which the survival fraction is calculated #' (default = 20) #' #' @return survival fractions. #' If par_ref and par_treat are \code{summary.lm} objects, #' a scalar is returned. #' If par_ref and par_treat are matrices, #' a vector of the same length as nrow(par_treat) is returned #' #' @examples #' seeded <- 10^(seq(1, 5, 0.5)) #' counted.ref <- 0.4 * 10^(seq(1, 5, 0.5) + rnorm(n = 9, 0, 0.1))^1.1 #' counted.treat <- 0.01 * 10^(seq(1, 5, 0.5) + rnorm(n = 9, 0, 0.1))^1.2 #' fit_ref <- pwr_reg(seeded = seeded, counted = counted.ref) #' fit_treat <- pwr_reg(seeded = seeded, counted = counted.treat) #' calculate_sf(par_ref = fit_ref, par_treat = fit_treat) #' data("CFAdata") #' D <- subset.data.frame( #' x = CFAdata, #' subset = cell.line == levels(CFAdata$cell.line)[1] #' ) #' fit_ref <- pwr_reg(seeded = D$`Cells seeded`, counted = D$`0 Gy`) #' fit_treat <- pwr_reg(seeded = D$`Cells seeded`, counted = D$`4 Gy`) #' calculate_sf(par_ref = fit_ref, par_treat = fit_treat) #' @export #' calculate_sf <- function(par_ref, par_treat, C = 20) { if (!prod(c(class(par_ref)[1], class(par_treat)[1]) %in% c( "summary.lm", "matrix" ))) { stop("error: par_ref and par_treat must be of class summary.lm or matrix") } if (class(par_ref)[1] != class(par_treat)[1]) { stop("error: class of par_ref and par_treat must be identical ") } if (class(par_ref)[1] == "summary.lm") { # calculate survival fraction from two sfit-objects or two pairs c(a,b) SF <- exp(((log(C) - par_ref$coefficients[1, 1]) / par_ref$coefficients[2, 1]) - ((log(C) - par_treat$coefficients[1, 1]) / par_treat$coefficients[2, 1])) names(SF) <- C } else { if (!identical(dim(par_ref), dim(par_treat))) { stop("error: par_ref and par_treat must be of identical size") } if (nrow(par_ref) > 1) { SF <- exp(((log(C) - par_ref[, 1]) / par_ref[, 2]) - ((log(C[1]) - par_treat[, 1]) / par_treat[, 2])) } else { SF <- exp(((log(C) - par_ref[1]) / par_ref[2]) - ((log(C) - par_treat[1]) / par_treat[2])) names(SF) <- C } } return(SF) }
/scratch/gouwar.j/cran-all/cranData/CFAcoop/R/calculate_sf.R
#' @title export_sf #' #' @description export table with results of clonogenic survival analysis #' from the colony formation assay considering cellular cooperation #' #' @param SF list build of objects returned by \code{analyze_survival} #' @return data.frame containing all estimated coefficients and effects from #' all experiments contained in \code{SF} #' #' @examples #' seeded <- rep(10^(seq(1, 5, 0.5)), each = 3) #' df.1 <- data.frame( #' "seeded" = seeded, #' "counted1" = 0.4 * seeded^1.1 * rnorm(n = length(seeded), 1, 0.05), #' "counted2" = 0.2 * seeded^1.125 * rnorm(n = length(seeded), 1, 0.05), #' "counted3" = 0.05 * seeded^1.25 * rnorm(n = length(seeded), 1, 0.05) #' ) #' df.2 <- data.frame( #' "seeded" = seeded, #' "counted1" = 0.5 * seeded^1.01 * rnorm(n = length(seeded), 1, 0.05), #' "counted2" = 0.4 * seeded^1.0125 * rnorm(n = length(seeded), 1, 0.05), #' "counted3" = 0.2 * seeded^1.025 * rnorm(n = length(seeded), 1, 0.05) #' ) #' SF <- vector("list", 2) #' SF[[1]] <- analyze_survival( #' RD = df.1, name = "cell line a", #' xtreat = c(0, 1, 4) #' ) #' SF[[2]] <- analyze_survival( #' RD = df.2, name = "cell line b", #' xtreat = c(0, 1, 4) #' ) #' export_sf(SF) #' #' data("CFAdata") #' SF <- vector("list", 4) #' ll <- levels(CFAdata$cell.line)[c(1, 3, 5, 7)] #' for (i in seq_along(ll)) { #' cdat <- subset.data.frame( #' x = CFAdata, #' subset = CFAdata$cell.line == ll[i] #' ) #' SF[[i]] <- analyze_survival( #' RD = cdat[, -1], #' name = ll[i], #' xtreat = c(0, 1, 2, 4, 6, 8) #' ) #' } #' export_sf(SF) #' @export #' export_sf <- function(SF) { if (class(SF) != "list") { stop("error: SF must be of class 'list'") } if (!is.list(SF[[1]])) { if (!identical( x = names(SF), y = c( "name", "xtreat", "raw", "mean", "fit", "SF", "uncertainty" ) )) { stop("error: SF object is not of the form as returned by analyze_survival.") } SFinput <- SF SF <- vector("list", 1) SF[[1]] <- SFinput } result <- data.frame( "cell.line" = NA, "treatment" = NA, "ln.a" = NA, "sd.ln.a" = NA, "b" = NA, "sd.b" = NA, "r.ln.a.b" = NA, "SF" = NA, "sd.SF" = NA, "lb.SF" = NA, "ub.SF" = NA, "log10.SF" = NA, "sd.log10.SF" = NA, "lb.log10.SF" = NA, "ub.log10.SF" = NA ) cur.index <- 1 for (i in seq_along(SF)) { cSF <- SF[[i]] for (j in seq_along(cSF$xtreat)) { result[cur.index, 1] <- as.character(cSF$name[1]) cur.exp <- c( cSF$xtreat[j], round(cSF$fit[[j]]$coefficients[1, 1], digits = 3), round(cSF$fit[[j]]$coefficients[1, 2], digits = 3), round(cSF$fit[[j]]$coefficients[2, 1], digits = 3), round(cSF$fit[[j]]$coefficients[2, 2], digits = 3), round(vcov(cSF$fit[[j]])[1, 2] / prod(sqrt(diag(vcov(cSF$fit[[j]])))), digits = 3) ) result[cur.index, 2:7] <- cur.exp result[cur.index, 8:15] <- round(cSF$uncertainty[j,3:10],digits = 4) cur.index <- cur.index + 1 } } return(result) }
/scratch/gouwar.j/cran-all/cranData/CFAcoop/R/export_sf.R
#' @title plot_sf #' #' @description plot cellular cooperativity and clonogenic survival for #' colony formation assay data #' #' @param SF list build of objects returned by \code{analyze_survival} #' @param showUncertainty logical, switches on/off uncertainty bands for #' sf-values. #' @return none #' #' @examples #' seeded <- rep(10^(seq(1, 5, 0.5)), each = 3) #' df.1 <- data.frame( #' "seeded" = seeded, #' "counted1" = 0.4 * seeded^1.1 * rnorm(n = length(seeded), 1, 0.05), #' "counted2" = 0.2 * seeded^1.125 * rnorm(n = length(seeded), 1, 0.05), #' "counted3" = 0.05 * seeded^1.25 * rnorm(n = length(seeded), 1, 0.05) #' ) #' df.2 <- data.frame( #' "seeded" = seeded, #' "counted1" = 0.5 * seeded^1.01 * rnorm(n = length(seeded), 1, 0.05), #' "counted2" = 0.4 * seeded^1.0125 * rnorm(n = length(seeded), 1, 0.05), #' "counted3" = 0.2 * seeded^1.025 * rnorm(n = length(seeded), 1, 0.05) #' ) #' SF <- vector("list", 2) #' SF[[1]] <- analyze_survival( #' RD = df.1, name = "cell line a", #' xtreat = c(0, 1, 4) #' ) #' SF[[2]] <- analyze_survival( #' RD = df.2, name = "cell line b", #' xtreat = c(0, 1, 4) #' ) #' plot_sf(SF) #' #' data("CFAdata") #' SF <- vector("list", 4) #' ll <- levels(CFAdata$cell.line)[c(1, 3, 5, 7)] #' for (i in seq_along(ll)) { #' cdat <- subset.data.frame( #' x = CFAdata, #' subset = CFAdata$cell.line == ll[i] #' ) #' SF[[i]] <- analyze_survival( #' RD = cdat[, -1], #' name = ll[i], #' xtreat = c(0, 1, 2, 4, 6, 8) #' ) #' } #' plot_sf(SF) #' @importFrom grDevices "col2rgb" "colorRampPalette" "rgb" #' @importFrom graphics "abline" "axis" "par" "plot" "title" #' @importFrom Hmisc "errbar" #' @importFrom graphics "polygon" #' @export #' plot_sf <- function(SF, showUncertainty = TRUE) { if (length(SF) > 10) { stop( "error: more than ten experiments were chosen for plotting. Consider separating data set for presentation." ) } else if (length(SF) == 0) { stop( "error: empty SF object cannot be plotted." ) } if ((length(SF) == 7) & (typeof(SF[[1]]) == "character")){ SF <- list("SF" = SF) } else if (typeof(SF[[1]]) == "character") { stop( "error: no SF object - cannot be plotted." ) } oldpar <- par(no.readonly = TRUE) on.exit(par(oldpar)) par(mfrow = c(2, length(SF))) par( mar = c(2.5, 3.5, 0.5, 0.5), mgp = c(1.5, 0.5, 0) ) collect_sf <- data.frame( "Exp" = NA, "treat" = NA, "sf" = NA, "sf.msd" = NA, "sf.psd" = NA ) alpha_bg <- 2 * 42 for (t in seq_along(SF)) { CurSF <- SF[[t]] CurUM <- CurSF$uncertainty N_treat <- length(CurSF$fit) if (CurSF$name == "no name") { CurSF$name <- paste0("Experiment ", t) } CurSF$plot <- CurSF$raw CurSF$plot[CurSF$plot == 0] <- 0.1 CurSF$plot <- log(CurSF$plot) / log(10) # log_b(x) = log_a(x)/log_a(b) for plotting CurSF$pm <- log(CurSF$mean) / log(10) CurSF$pm[CurSF$pm == -Inf] <- NaN with_data <- apply( X = !is.na(CurSF$raw[, c(2:(dim(CurSF$raw)[2]))]), MARGIN = 1, FUN = "sum", na.rm = TRUE ) >= 1 x_lim <- range(CurSF$plot[with_data, 1]) y_lim <- c(-1, 3) colhex <- colorRampPalette(c("#43E08700", "#00612A00"))(N_treat) # D23264 colors <- col2rgb(colhex) / 255 alpha <- 0.42 plot( x = CurSF$plot[, 1], y = CurSF$plot[, 2], main = "", xlim = x_lim, ylim = y_lim, pch = "+", yaxt = "n", xlab = "cells seeded", ylab = "colonies counted", axes = FALSE, col = rgb( red = colors[1, 1], green = colors[2, 1], blue = colors[3, 1], alpha = alpha, maxColorValue = 1 ) ) xtick <- floor(min(CurSF$plot[, 1], na.rm = TRUE)): ceiling(max(CurSF$plot[, 1], na.rm = TRUE)) ytick <- -1:3 axis( side = 2, at = ytick, las = 1, labels = c("no cols.", "1", "10", "100", "1000") ) axis( side = 1, at = xtick, las = 1, labels = 10^xtick ) par(new = TRUE) polygon( x = c(x_lim * c(0.8, 1.2), rev(x_lim * c(0.8, 1.2))), y = log10(c(5, 5, 100, 100)), border = NA, col = rgb(25, 25, 25, alpha = 42, maxColorValue = 255) ) par(new = TRUE) plot( x = CurSF$pm[, 1], y = CurSF$pm[, 3], xlim = x_lim, ylim = y_lim, pch = 19, ann = FALSE, xaxt = "n", yaxt = "n", axes = FALSE, col = rgb( red = colors[1, 1], green = colors[2, 1], blue = colors[3, 1], alpha = 1, maxColorValue = 1 ) ) for (a.l in 0:-4){ abline( a = a.l, b = 1, lty = 2, col = rgb(25, 25, 25, alpha = alpha_bg, maxColorValue = 255) ) } abline( a = CurSF$fit[[1]]$coefficients[1, 1] / log(10), b = CurSF$fit[[1]]$coefficients[2, 1], col = rgb( red = colors[1, 1], green = colors[2, 1], blue = colors[3, 1], alpha = 1, maxColorValue = 1 ) ) collect_sf <- rbind(collect_sf, c(t, CurSF$xtreat[1], 1, 1, 1)) sf_vec <- NULL q1_vec <- NULL q2_vec <- NULL x_vec <- NULL col_vec <- colhex[1] for (i in 2:N_treat) { par(new = TRUE) plot( x = CurSF$plot[, 1], y = CurSF$plot[, i + 1], xlim = x_lim, ylim = y_lim, pch = "+", ann = FALSE, axes = FALSE, xaxt = "n", yaxt = "n", col = rgb( red = colors[1, i], green = colors[2, i], blue = colors[3, i], alpha = alpha, maxColorValue = 1 ) ) par(new = TRUE) plot( x = CurSF$pm[, 1], y = CurSF$pm[, i + 2], xlim = x_lim, ylim = y_lim, pch = 19, ann = FALSE, xaxt = "n", yaxt = "n", axes = FALSE, col = rgb( red = colors[1, i], green = colors[2, i], blue = colors[3, i], alpha = 1, maxColorValue = 1 ) ) abline( a = CurSF$fit[[i]]$coefficients[1, 1] / log(10), b = CurSF$fit[[i]]$coefficients[2, 1], col = rgb( red = colors[1, i], green = colors[2, i], blue = colors[3, i], alpha = 1, maxColorValue = 1 ) ) sf_vec <- c(sf_vec, CurSF$"SF"[i - 1]) q1_vec <- c(q1_vec, 10^(CurUM$log10.SF[i]-CurUM$sd.log10.SF[i])) q2_vec <- c(q2_vec, 10^(CurUM$log10.SF[i]+CurUM$sd.log10.SF[i])) x_vec <- c(x_vec, CurSF$"xtreat"[i]) col_vec <- c(col_vec, colhex[i]) } keep_sf <- data.frame( "Exp" = rep(t, length(x_vec)), "treat" = x_vec, "sf" = sf_vec, "sf.msd" = q1_vec, "sf.psd" = q2_vec ) collect_sf <- rbind(collect_sf, keep_sf) } collect_sf <- collect_sf[-1, ] par(mar = c(2.5, 3.75, 2.5, 0.5), mgp = c(1.5, 0.5, 0)) for (sfi in seq_along(SF)) { if (SF[[sfi]]$name == "no name") { SF[[sfi]]$name <- paste0("Experiment ", sfi) } PD <- subset.data.frame(x = collect_sf, subset = (collect_sf$"Exp" == sfi)) plot( x = PD$treat, log10(PD$sf), main = SF[[sfi]]$name, col.main = rgb( red = 0, green = 148, blue = 64, alpha = 255, maxColorValue = 255 ), las = 1, ylab = "", xaxt = "n", yaxt = "n", xlab = "treatment", ylim = range(log10(collect_sf$sf)), col = col_vec, axes = FALSE, pch = 19 ) ytick <- round(min(log10(collect_sf$sf), na.rm = TRUE)): round(max(log10(collect_sf$sf), na.rm = TRUE)) axis( side = 2, at = ytick, las = 1, labels = paste0(10^ytick * 100, "%") ) title( ylab = "clonogenic survival", line = 2.5 ) axis( side = 1, at = PD$treat, labels = PD$treat ) if (showUncertainty) { with(data = PD, errbar(treat, log10(sf), log10(sf.msd), log10(sf.psd), col = col_vec, add = TRUE, pch = 1, errbar.col = col_vec )) } } }
/scratch/gouwar.j/cran-all/cranData/CFAcoop/R/plot_sf.R
#' @title pwr_reg #' #' @description \code{pwr_reg} performs a power regression #' (log(C) = log(a) + b * log(S) + e)) for clonogenic assay data of #' experiments examining the cellular cooperation. #' #' @param seeded numeric vector with number of cells seeded (S) #' @param counted numeric vector with number of colonies counted (C, same #' length as \code{seeded}) #' #' @return \code{summary.lm} object as returned by \code{\link{summary}} #' #' @examples #' pwr_reg( #' seeded = 10^(seq(1, 5, 0.5)), #' counted = 0.4 * (10^seq(1, 5, 0.5))^1.25 * rnorm(n = 9, 1, 0.05) #' ) #' data(CFAdata) #' D <- subset.data.frame( #' x = CFAdata, #' subset = cell.line == levels(CFAdata$cell.line)[1] #' ) #' pwr_reg(seeded = D$`Cells seeded`, counted = D$`0 Gy`) #' @export #' @importFrom stats "lm" pwr_reg <- function(seeded, counted) { if (!is.numeric(seeded) | !is.numeric(counted)) { stop("error: input must be numeric") } if (length(seeded) != length(counted)) { stop("error: input vectors must be of identical length") } x <- data.frame("S" = seeded, "C" = counted) x <- x[(!is.na(x$S) & !is.na(x$C)), ] if (sum(x <= 0) > 0) { warning( "log(0) = -Inf; power regression not applicable to null-valued variables; non-positive data points removed from analysis, consider regressing mean values." ) x <- x[((x$S > 0) & (x$C > 0)), ] } if (nrow(x) < 3) { stop("error: not enough data for power regression") } x$lnS <- log(x$S) x$lnC <- log(x$C) fit <- lm(formula = "lnC ~ 1 + lnS", data = x) sfit <- summary(fit) return(sfit) }
/scratch/gouwar.j/cran-all/cranData/CFAcoop/R/pwr_reg.R
## ---- include = FALSE--------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----loadData----------------------------------------------------------------- library(CFAcoop) data("CFAdata") summary(CFAdata) ## ----show1, fig.width=7, fig.height=5----------------------------------------- data("CFAdata") data1 <- subset.data.frame(x = CFAdata, subset = CFAdata$cell.line == "T47D") data2 <- subset.data.frame(x = CFAdata, subset = CFAdata$cell.line == "BT20") SF <- vector("list", 2) SF[[1]] <- analyze_survival( RD = data1[, c("Cells seeded","0 Gy","1 Gy","2 Gy","4 Gy","6 Gy","8 Gy")], name = as.character(data1[1,1]), xtreat = c(0, 1, 2, 4, 6, 8), C = 20) SF[[2]] <- analyze_survival( RD = data2[,-1], name = as.character(data2[1,1]), xtreat = c(0, 1, 2, 4, 6, 8)) plot_sf(SF = SF) ## ----export_sf---------------------------------------------------------------- summary_df <- export_sf(SF) ## ----summary_export_sf-------------------------------------------------------- colnames(summary_df) head(summary_df) summary(summary_df) ## ----sf_details--------------------------------------------------------------- SF[[2]]$fit[1] SF[[1]]$SF ## ----PowerReg, fig.width=5, fig.height=4-------------------------------------- data <- subset.data.frame(x = CFAdata, subset = CFAdata$cell.line == "BT20") data <- aggregate(x = data[, -1], by = list(data$`Cells seeded`), FUN = "mean", na.rm = TRUE) par_0 <- pwr_reg(seeded = data$`Cells seeded`, counted = data$`0 Gy`) par_0$coefficients plot(x = log10(data$`Cells seeded`), y = log10(data$`0 Gy`),xlim = c(2,3.5)) abline(a = log10(exp(1)) * par_0$coefficients[1, 1], b = par_0$coefficients[2, 1]) ## ----TestingCooperation------------------------------------------------------- p_value <- (1 - pt( q = (par_0$coefficients[2, 1] - 1) / par_0$coefficients[2, 2], df = par_0$df[2] )) ## ----calculateSF-------------------------------------------------------------- data <- subset.data.frame(x = CFAdata, subset = CFAdata$cell.line == "BT20") data <- aggregate(x = data[, -1], by = list(data$`Cells seeded`), FUN = "mean", na.rm = TRUE) par_0 <- pwr_reg(seeded = data$`Cells seeded`, counted = data$`0 Gy`) par_4 <- pwr_reg(seeded = data$`Cells seeded`, counted = data$`4 Gy`) calculate_sf(par_ref = par_0, par_treat = par_4, C = 20) ## ----PEfail1a, fig.width=5, fig.height=4-------------------------------------- data(CFAdata) data <- subset.data.frame(x = CFAdata, subset = CFAdata$cell.line == "T47D") data <- aggregate(x = data[, -1], by = list(data$`Cells seeded`), FUN = "mean", na.rm = TRUE) PE_x <- data$`4 Gy` / data$`Cells seeded` PE_0 <- data$`0 Gy` / data$`Cells seeded` plot(x = rep(c(0, 1), each = 18), y = c(PE_0, PE_x), lty = 0, ylim = c(0,0.5),xlim = c(-0.1,1.1), xlab = "treatment", ylab = "C / S", axes = FALSE, main = "T47D") axis(side = 1,at = c(0,1),labels = c("0 Gy","4 Gy")) axis(side = 2,at = seq(0,0.5,0.1)) ## ----PEfail1b, fig.width=5, fig.height=4-------------------------------------- SF_resample <- rep(PE_x, each = length(PE_0)) / rep(PE_0, times = length(PE_x)) hist(SF_resample, breaks = 25,xlim = c(0.12,0.25),xlab = "(C_4/S_4) / (C_0/S_0)", main = "valid PE-based SF'-values") ## ----PEfail2------------------------------------------------------------------ range(SF_resample,na.rm = TRUE) as_nc_0 <- analyze_survival(RD = data[,-1],C = 20) as_nc_0$uncertainty[4,c(3,5,6)] ## ----PEfailCoop, fig.width=5, fig.height=4------------------------------------ data(CFAdata) data <- subset.data.frame(x = CFAdata, subset = CFAdata$cell.line == "BT20") data <- aggregate(x = data[, -1], by = list(data$`Cells seeded`), FUN = "mean", na.rm = TRUE) PE_x <- data$`4 Gy` / data$`Cells seeded` PE_0 <- data$`0 Gy` / data$`Cells seeded` plot(x = rep(c(0, 1), each = length(PE_x)), y = c(PE_0, PE_x), lty = 0, ylim = c(0,0.08),xlim = c(-0.1,1.1), xlab = "treatment", ylab = "C / S", axes = FALSE, main = "BT20") axis(side = 1,at = c(0,1),labels = c("0 Gy","4 Gy")) axis(side = 2,at = seq(0,0.08,0.02),las = 1) SF_resample <- rep(PE_x, each = length(PE_0)) / rep(PE_0, times = length(PE_x)) hist(SF_resample, breaks = 100,xlim = c(0,10),xlab = "(C_4/S_4) / (C_0/S_0)", main = "valid PE-based SF'-values") ## ----PEfailCoop2, fig.width=6, fig.height=5----------------------------------- range(SF_resample,na.rm = TRUE) as_c_4 <- analyze_survival(RD = data[,-1],C = 20) as_c_4$uncertainty[4,c(3,5,6)]
/scratch/gouwar.j/cran-all/cranData/CFAcoop/inst/doc/CFAcoop.R
--- title: "CFAcoop" output: rmarkdown::html_vignette: fig_width: 7 fig_heigth: 9 vignette: > %\VignetteIndexEntry{CFAcoop} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` # How to use this package (and why) ## Introduction The **CFAcoop** package equips you with functions to analyze data from the clonogenic assay (also called 'Colony Formation Assay') in presence or absence of cellular cooperation. Thus, this package allows you to robustly extract clonogenic survival information for your cell lines under a given treatment. This vignette is meant to enable you to process your data following the method first presented in Brix et al., Radiation Oncology, 2020: "The clonogenic assay: robustness of plating efficiency-based analysis is strongly compromised by cellular cooperation". Therefore, the data presented in Figure 2 in Brix et al., which is provided within the package, is used to illustrate how to use the **CFAcoop** package. Further, it is shown, how the survival fractions of cooperative cell lines would look like, if cellular cooperation was ignored. ## Cellular Cooperation ## In order to avoid confusion, cellular cooperation should be defined. Understanding this concept comes easy, when starting from the opposite. Clonogenic growth of non-cooperative cells is independent of cell density. Thus, there is a constant relationship between cells seeded $S$ and colonies counted $C$: \[C = a \cdot S,\] where $a$ corresponds just to the conventional plating efficiency ($PE = \frac{C}{S}$). Now, cellular cooperation refers to the benefit cells can have from their surrounding neighbors which results in a non-constant relation of cells seeded and colonies counted (For details, see Brix et al., Radiation Oncology, 2020). The probability of clonogenic growth for a single cell increases with the number of surrounding cells to cooperate with. It has turned out that generalizing the equation above by a parameter $b$ adequately models the colonies counted of cooperative and non-cooperative cell lines: \[C = a \cdot S^b.\] In this model, $b = 1$ gives the non-cooperative case and $b > 1$ corresponds to cooperative growth. In short, a cell line is called cooperative, if $b > 1$. ## Clonogenic Survival ## Conventionally, clonogenic survival at a given treatment $x$ was determined as the ratio of colonies counted $C_x$ and the cells seeded $S_x$ scaled to the plating efficiency of a reference $PE_0 = \frac{C_0}{S_0}$ \[SF'_x = \frac{\frac{C_x}{S_x}}{PE_0}.\] The new method now shifts the focus of the survival fraction directly to the number of cells needed to be seeded under the two conditions (treated and untreated) in order to achieve an __identical__ expectation of the number of colonies formed $C$. Essentially, the new method does not focus on the number of colonies formed after growth in different cell densities, but on the number of seeded single cells with clonogenic potential before growing to identical colony numbers. \[SF_x(C) = \frac{S_0(C)}{S_x(C)} = exp\left( \frac{log\left(\frac{C}{a_0}\right)}{b_0} - \frac{log\left(\frac{C}{a_x}\right)}{b_x}\right)\] Obviously, for $b_x = b_0 = 1$ the equivalence $SF_x(C) \equiv SF'$ holds for all $C$, and thus, the non-cooperative case is well covered by the new method. Importantly, the conventional determination of clonogenic survival is heavily compromised by cellular cooperation, if present. ## Getting Started ## The data as presented in Figure 2 in Brix et al. is included in the package in form of a `` `data.frame` `` *CFAdata*. It can be loaded and summarized by: ```{r loadData} library(CFAcoop) data("CFAdata") summary(CFAdata) ``` # Fast Analysis and Plotting of Results The shortcut to analyze data, is using the wrapper function `` `analyze_survival(RD, name, xtreat)` `` where RD is a `` `data.frame` `` or `` `matrix` `` containing your numbers of seeded cells (first column) and numbers of colonies counted under the treatments (numeric argument, e.g. the dose applied `` `xtreat = c(0,1,2,4,6,8)` ``). The returned objects should be concatenated in a list-object and can be plotted by `` `plot_sf()` ``. ```{r show1, fig.width=7, fig.height=5} data("CFAdata") data1 <- subset.data.frame(x = CFAdata, subset = CFAdata$cell.line == "T47D") data2 <- subset.data.frame(x = CFAdata, subset = CFAdata$cell.line == "BT20") SF <- vector("list", 2) SF[[1]] <- analyze_survival( RD = data1[, c("Cells seeded","0 Gy","1 Gy","2 Gy","4 Gy","6 Gy","8 Gy")], name = as.character(data1[1,1]), xtreat = c(0, 1, 2, 4, 6, 8), C = 20) SF[[2]] <- analyze_survival( RD = data2[,-1], name = as.character(data2[1,1]), xtreat = c(0, 1, 2, 4, 6, 8)) plot_sf(SF = SF) ``` Raw data from single replicates is plotted as '$+$'-symbols. Corresponding regression lines are calculated using the mean values of replicates of identical numbers of cells seeded, which are plotted as dots. The color indicates the treatment (irradiation with 0 to 8 Gy) and links the numbers of colonies counted from the upper plot to the calculated clonogenic survival in the lower plot. Shaded areas indicate the span from $C = 5$ to $C = 100$, which is within the target region of good experimental practice. Dashed lines show regression lines with a slope of $b=1$ (at $log10(a)$ varying from 0 to 4) for orientation, so that any substantially non-linear relation (i.e. $b \neq 1$) between the number of colonies counted ($C$) and the number of cells seeded ($S$) can be spotted easily. The dots in the treatment response curves correspond to the survival fractions at $C = 20$ with error bars indicating the uncertainty of the estimated survival fractions in terms of its standard deviation. This uncertainty is calculated via First-Order-Taylor-Series-Approximation of $SF_x(C)$. All information used for plotting is contained in the objects returned by `` `analyze_survival` ``. A `` `data.frame` `` with a summary of the estimated survival fractions can be generated by ```{r export_sf} summary_df <- export_sf(SF) ``` to export this `` `data.frame` `` in a csv-File, execute: `` ` write.csv(x = summary_df,file = "CFAcoopResult.csv") ` `` The `` `data.frame` `` includes the following columns ```{r summary_export_sf} colnames(summary_df) head(summary_df) summary(summary_df) ``` All information of this `` `data.frame` `` is also accessible directly in the object returned by `` `analyze_survival` ``. For instance, the information about the regression of the 0 Gy reference of the cell line BT20 or the survival fractions of the 5 treatments for T47D (at $C = 20$) can be recalled by: ```{r sf_details} SF[[2]]$fit[1] SF[[1]]$SF ``` # Details for Focussed Analysis ## Assess Cellular Cooperation Key to the robust analysis of clonogenic analysis data is the modeling of the cellular cooperation. We assume that the underlying functional dependency of seeded cells and counted colonies is of the form \[C = a \cdot S^{b},\] where $b$ indicates the degree of cellular cooperation ($b = 1$ is implicitly assumed for the PE-based approach). The coefficient $b$ is estimated in a linear regression model \[log(C) = log(a) + b \cdot log(S) + \varepsilon, \varepsilon \sim \mathcal{N}(0,\sigma^2). \] The function `` `pwr_reg(seede, counted)` `` provides this regression and returns a `` `summary.lm` `` object. Note that the analysis of cellular cooperation is restricted to the range of seeded cells, where at least one colony was observed. Outside this range, the attempt of studying clonogenic survival based on no observed colony counts is not reasonable and thus, `` `pwr_reg` `` will remove those data points from analysis. Thus, it is strongly recommended to use the averaged data for regression. By doing so, the range of the independent variable of the regression is widened. (Removing only those replicates with no colonies at one or few specific cell densities would bias the model fitting.) ```{r PowerReg, fig.width=5, fig.height=4} data <- subset.data.frame(x = CFAdata, subset = CFAdata$cell.line == "BT20") data <- aggregate(x = data[, -1], by = list(data$`Cells seeded`), FUN = "mean", na.rm = TRUE) par_0 <- pwr_reg(seeded = data$`Cells seeded`, counted = data$`0 Gy`) par_0$coefficients plot(x = log10(data$`Cells seeded`), y = log10(data$`0 Gy`),xlim = c(2,3.5)) abline(a = log10(exp(1)) * par_0$coefficients[1, 1], b = par_0$coefficients[2, 1]) ``` With the results of this function, we can also test for cellular cooperation. Note, that the _p-value_ in the _coefficients_ table corresponds to the null hypothesis $b = 0$, but we are interested in the null hypothesis of $b = 1$. Thus, we find our p-value of interest by computing ```{r TestingCooperation} p_value <- (1 - pt( q = (par_0$coefficients[2, 1] - 1) / par_0$coefficients[2, 2], df = par_0$df[2] )) ``` Thus, BT20 is higly cooperative ($b = 1.76$, $\hat{\sigma}_b = 0.12$, $p < 0.001$). ## Determine Clonogenic Survival Fractions In this package, the survival fraction $SF(C)$ for clonogenic survival is calculated as the number of cells that need to be seeded without treatment divided by the number of cells needed to be seeded with treatment for obtaining __the same__ expectation of colonies counted __$C$__. \[ SF(C) = \frac{S_0(C)}{S_x(C)} = exp\left( \frac{log\left(\frac{C}{a_0}\right)}{b_0} - \frac{log\left(\frac{C}{a_x}\right)}{b_x} \right)\] Given two parameter sets of clonogenic assay data, the clonogenic survival can be calculated as: ```{r calculateSF} data <- subset.data.frame(x = CFAdata, subset = CFAdata$cell.line == "BT20") data <- aggregate(x = data[, -1], by = list(data$`Cells seeded`), FUN = "mean", na.rm = TRUE) par_0 <- pwr_reg(seeded = data$`Cells seeded`, counted = data$`0 Gy`) par_4 <- pwr_reg(seeded = data$`Cells seeded`, counted = data$`4 Gy`) calculate_sf(par_ref = par_0, par_treat = par_4, C = 20) ``` ### Remark on Ignoring Cellular Cooperation Note that in case of cooperative cell lines, the parameter $a$ does not correspond to a plating efficiency as for non-cooperative cell lines. The concept of a characteristic plating efficiency does not apply to cooperative cell lines. ## Determine Uncertainty of Survival Fractions The survival fraction at $C$ for treatment $x$ is calculated by the function \[SF_x(C) = \frac{S_0(C)}{S_x(C)} = exp\left( \frac{log\left(\frac{C}{a_0}\right)}{b_0} - \frac{log\left(\frac{C}{a_x}\right)}{b_x}\right).\] Since the SF-values are solely dependent on the estimated parameters in the power regression (and the chosen $C$), the inherent uncertainty can be assessed via parametric bootstrapping (e.g. using the package **mvtnorm** to generate parameter sets according to the variance-covariance matrix of the fit), or by following the laws of error propagation (First-Order Taylor-Series Approximation). We choose the analytic approximation. In order to build meaningful uncertainty intervals (i.e. respect that survival fractions will never be below zero), we work on the log-scale and transform the boundaries to the linear scale at the end. For the sake of a shorter notation, we write: \[g = log(SF_x(C)) = \frac{d-\alpha_0}{b_0} - \frac{d-\alpha_x}{b_x} \] According to $\Sigma_g \approx J \Sigma_pJ^T$, where $J$ denotes the Jacobian of $g$ and $\Sigma_p$ the variance-covariance matrix of the estimated parameters $\alpha = log(a)$ and $b$ at $0$ and $x$ Gy, we find \[\sigma_g^2 \approx \frac{1}{b_0^2} z_0 \Sigma_0 z_0^T + \frac{1}{b_x^2} z_x \Sigma_x z_x^T \] with \[ z_x = \left(\begin{matrix}1 & \frac{d-\alpha_x}{b_x}\end{matrix}\right) \] and \[\Sigma_x = \left(\begin{matrix} \sigma_{\alpha_x}^2 & \sigma_{\alpha_x b_x}\\ \sigma_{\alpha_x b_x} & \sigma_{b_x}^2 \end{matrix} \right).\] # What's the problem with PE-based analysis? In short: The plating efficiency frequently is not as constant as it needs to be in order to serve as an adequate normalization factor. To illustrate this, we compare the PE-based calculated $SF'$-values with the $SF(C = 20)$-values calculated with the new method for (1) the non-cooperative cell line T47D and (2) the cooperative cell line (BT20). ## (1) The non-cooperative case For calculating the survival fraction, plating efficiencies are required. Plating efficiencies ($C/S$) are calculated easily as: ```{r PEfail1a, fig.width=5, fig.height=4} data(CFAdata) data <- subset.data.frame(x = CFAdata, subset = CFAdata$cell.line == "T47D") data <- aggregate(x = data[, -1], by = list(data$`Cells seeded`), FUN = "mean", na.rm = TRUE) PE_x <- data$`4 Gy` / data$`Cells seeded` PE_0 <- data$`0 Gy` / data$`Cells seeded` plot(x = rep(c(0, 1), each = 18), y = c(PE_0, PE_x), lty = 0, ylim = c(0,0.5),xlim = c(-0.1,1.1), xlab = "treatment", ylab = "C / S", axes = FALSE, main = "T47D") axis(side = 1,at = c(0,1),labels = c("0 Gy","4 Gy")) axis(side = 2,at = seq(0,0.5,0.1)) ``` Now, which values are to be compared? When there is no effect of the cell density, as assumed by the conventional approach (not taking cellular cooperation into account), each combination (of a $C_{0 Gy}/S_{0 Gy}$- and $C_{4 Gy}/S_{4 Gy}$-value) is equally reliable. Thus, the full set of all combinations is: ```{r PEfail1b, fig.width=5, fig.height=4} SF_resample <- rep(PE_x, each = length(PE_0)) / rep(PE_0, times = length(PE_x)) hist(SF_resample, breaks = 25,xlim = c(0.12,0.25),xlab = "(C_4/S_4) / (C_0/S_0)", main = "valid PE-based SF'-values") ``` Without the assessment of cellular cooperation, conventional calculation of an $SF'$-value, corresponds to picking randomly a sample from the distribution shown in the histogram above. A comparison of the range of this distribution and the calculated uncertainties of the new method shows, that for non-cooperative cell lines such as T47D, there is no big difference in this variability/uncertainty. ```{r PEfail2} range(SF_resample,na.rm = TRUE) as_nc_0 <- analyze_survival(RD = data[,-1],C = 20) as_nc_0$uncertainty[4,c(3,5,6)] ``` ## (2) The cooperative case Now, making the same comparison as in (1) for the cooperative cell line BT20 shows the disastrous effect of ignoring the coefficient $b$, when it is in fact different from $1$. ```{r PEfailCoop, fig.width=5, fig.height=4} data(CFAdata) data <- subset.data.frame(x = CFAdata, subset = CFAdata$cell.line == "BT20") data <- aggregate(x = data[, -1], by = list(data$`Cells seeded`), FUN = "mean", na.rm = TRUE) PE_x <- data$`4 Gy` / data$`Cells seeded` PE_0 <- data$`0 Gy` / data$`Cells seeded` plot(x = rep(c(0, 1), each = length(PE_x)), y = c(PE_0, PE_x), lty = 0, ylim = c(0,0.08),xlim = c(-0.1,1.1), xlab = "treatment", ylab = "C / S", axes = FALSE, main = "BT20") axis(side = 1,at = c(0,1),labels = c("0 Gy","4 Gy")) axis(side = 2,at = seq(0,0.08,0.02),las = 1) SF_resample <- rep(PE_x, each = length(PE_0)) / rep(PE_0, times = length(PE_x)) hist(SF_resample, breaks = 100,xlim = c(0,10),xlab = "(C_4/S_4) / (C_0/S_0)", main = "valid PE-based SF'-values") ``` The range of the PE-based $SF'$-values does not correspond to the uncertainty of the $SF(20)$-values: ```{r PEfailCoop2, fig.width=6, fig.height=5} range(SF_resample,na.rm = TRUE) as_c_4 <- analyze_survival(RD = data[,-1],C = 20) as_c_4$uncertainty[4,c(3,5,6)] ``` Even though the survival fraction can be accurately estimated under consideration of cellular cooperation the PE-based approach fails in returning a trustworthy estimate of the fraction of cells losing their potential due to the treatment (see histogram). In particular, the average of PE-based $SF'$ calculations does not asymptotically tend to a meaningful value. In case of strong cellular cooperation, the PE-based calculated $SF'$-value is heavily affected by this cellular cooperation and the treatment effect of interest is degraded to a side effect. ## Conclusion from (1) and (2) Before calculating PE-based survival fractions, one must check whether there is cellular cooperation or not. Essentially, to decide, whether you have a cooperativity issue or not, you need to conduct the same analysis and to generate the same data that is necessary to solve this issue anyways. <!-- # Full CFAdata Set --> <!-- ```{r Full, fig.width=13, fig.height=6} --> <!-- data("CFAdata") --> <!-- SF <- vector('list',7) --> <!-- for (i in seq_along(SF)){ --> <!-- SF[[i]] <- analyze_survival( --> <!-- RD = subset.data.frame(x = CFAdata, --> <!-- subset = CFAdata$cell.line==levels(CFAdata$cell.line)[i])[,-1], --> <!-- name = levels(CFAdata$cell.line)[i], --> <!-- xtreat = c(0,1,2,4,6,8), --> <!-- c_range = c(5,100)) --> <!-- } --> <!-- plot_sf(SF) --> <!-- ``` -->
/scratch/gouwar.j/cran-all/cranData/CFAcoop/inst/doc/CFAcoop.Rmd
--- title: "CFAcoop" output: rmarkdown::html_vignette: fig_width: 7 fig_heigth: 9 vignette: > %\VignetteIndexEntry{CFAcoop} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` # How to use this package (and why) ## Introduction The **CFAcoop** package equips you with functions to analyze data from the clonogenic assay (also called 'Colony Formation Assay') in presence or absence of cellular cooperation. Thus, this package allows you to robustly extract clonogenic survival information for your cell lines under a given treatment. This vignette is meant to enable you to process your data following the method first presented in Brix et al., Radiation Oncology, 2020: "The clonogenic assay: robustness of plating efficiency-based analysis is strongly compromised by cellular cooperation". Therefore, the data presented in Figure 2 in Brix et al., which is provided within the package, is used to illustrate how to use the **CFAcoop** package. Further, it is shown, how the survival fractions of cooperative cell lines would look like, if cellular cooperation was ignored. ## Cellular Cooperation ## In order to avoid confusion, cellular cooperation should be defined. Understanding this concept comes easy, when starting from the opposite. Clonogenic growth of non-cooperative cells is independent of cell density. Thus, there is a constant relationship between cells seeded $S$ and colonies counted $C$: \[C = a \cdot S,\] where $a$ corresponds just to the conventional plating efficiency ($PE = \frac{C}{S}$). Now, cellular cooperation refers to the benefit cells can have from their surrounding neighbors which results in a non-constant relation of cells seeded and colonies counted (For details, see Brix et al., Radiation Oncology, 2020). The probability of clonogenic growth for a single cell increases with the number of surrounding cells to cooperate with. It has turned out that generalizing the equation above by a parameter $b$ adequately models the colonies counted of cooperative and non-cooperative cell lines: \[C = a \cdot S^b.\] In this model, $b = 1$ gives the non-cooperative case and $b > 1$ corresponds to cooperative growth. In short, a cell line is called cooperative, if $b > 1$. ## Clonogenic Survival ## Conventionally, clonogenic survival at a given treatment $x$ was determined as the ratio of colonies counted $C_x$ and the cells seeded $S_x$ scaled to the plating efficiency of a reference $PE_0 = \frac{C_0}{S_0}$ \[SF'_x = \frac{\frac{C_x}{S_x}}{PE_0}.\] The new method now shifts the focus of the survival fraction directly to the number of cells needed to be seeded under the two conditions (treated and untreated) in order to achieve an __identical__ expectation of the number of colonies formed $C$. Essentially, the new method does not focus on the number of colonies formed after growth in different cell densities, but on the number of seeded single cells with clonogenic potential before growing to identical colony numbers. \[SF_x(C) = \frac{S_0(C)}{S_x(C)} = exp\left( \frac{log\left(\frac{C}{a_0}\right)}{b_0} - \frac{log\left(\frac{C}{a_x}\right)}{b_x}\right)\] Obviously, for $b_x = b_0 = 1$ the equivalence $SF_x(C) \equiv SF'$ holds for all $C$, and thus, the non-cooperative case is well covered by the new method. Importantly, the conventional determination of clonogenic survival is heavily compromised by cellular cooperation, if present. ## Getting Started ## The data as presented in Figure 2 in Brix et al. is included in the package in form of a `` `data.frame` `` *CFAdata*. It can be loaded and summarized by: ```{r loadData} library(CFAcoop) data("CFAdata") summary(CFAdata) ``` # Fast Analysis and Plotting of Results The shortcut to analyze data, is using the wrapper function `` `analyze_survival(RD, name, xtreat)` `` where RD is a `` `data.frame` `` or `` `matrix` `` containing your numbers of seeded cells (first column) and numbers of colonies counted under the treatments (numeric argument, e.g. the dose applied `` `xtreat = c(0,1,2,4,6,8)` ``). The returned objects should be concatenated in a list-object and can be plotted by `` `plot_sf()` ``. ```{r show1, fig.width=7, fig.height=5} data("CFAdata") data1 <- subset.data.frame(x = CFAdata, subset = CFAdata$cell.line == "T47D") data2 <- subset.data.frame(x = CFAdata, subset = CFAdata$cell.line == "BT20") SF <- vector("list", 2) SF[[1]] <- analyze_survival( RD = data1[, c("Cells seeded","0 Gy","1 Gy","2 Gy","4 Gy","6 Gy","8 Gy")], name = as.character(data1[1,1]), xtreat = c(0, 1, 2, 4, 6, 8), C = 20) SF[[2]] <- analyze_survival( RD = data2[,-1], name = as.character(data2[1,1]), xtreat = c(0, 1, 2, 4, 6, 8)) plot_sf(SF = SF) ``` Raw data from single replicates is plotted as '$+$'-symbols. Corresponding regression lines are calculated using the mean values of replicates of identical numbers of cells seeded, which are plotted as dots. The color indicates the treatment (irradiation with 0 to 8 Gy) and links the numbers of colonies counted from the upper plot to the calculated clonogenic survival in the lower plot. Shaded areas indicate the span from $C = 5$ to $C = 100$, which is within the target region of good experimental practice. Dashed lines show regression lines with a slope of $b=1$ (at $log10(a)$ varying from 0 to 4) for orientation, so that any substantially non-linear relation (i.e. $b \neq 1$) between the number of colonies counted ($C$) and the number of cells seeded ($S$) can be spotted easily. The dots in the treatment response curves correspond to the survival fractions at $C = 20$ with error bars indicating the uncertainty of the estimated survival fractions in terms of its standard deviation. This uncertainty is calculated via First-Order-Taylor-Series-Approximation of $SF_x(C)$. All information used for plotting is contained in the objects returned by `` `analyze_survival` ``. A `` `data.frame` `` with a summary of the estimated survival fractions can be generated by ```{r export_sf} summary_df <- export_sf(SF) ``` to export this `` `data.frame` `` in a csv-File, execute: `` ` write.csv(x = summary_df,file = "CFAcoopResult.csv") ` `` The `` `data.frame` `` includes the following columns ```{r summary_export_sf} colnames(summary_df) head(summary_df) summary(summary_df) ``` All information of this `` `data.frame` `` is also accessible directly in the object returned by `` `analyze_survival` ``. For instance, the information about the regression of the 0 Gy reference of the cell line BT20 or the survival fractions of the 5 treatments for T47D (at $C = 20$) can be recalled by: ```{r sf_details} SF[[2]]$fit[1] SF[[1]]$SF ``` # Details for Focussed Analysis ## Assess Cellular Cooperation Key to the robust analysis of clonogenic analysis data is the modeling of the cellular cooperation. We assume that the underlying functional dependency of seeded cells and counted colonies is of the form \[C = a \cdot S^{b},\] where $b$ indicates the degree of cellular cooperation ($b = 1$ is implicitly assumed for the PE-based approach). The coefficient $b$ is estimated in a linear regression model \[log(C) = log(a) + b \cdot log(S) + \varepsilon, \varepsilon \sim \mathcal{N}(0,\sigma^2). \] The function `` `pwr_reg(seede, counted)` `` provides this regression and returns a `` `summary.lm` `` object. Note that the analysis of cellular cooperation is restricted to the range of seeded cells, where at least one colony was observed. Outside this range, the attempt of studying clonogenic survival based on no observed colony counts is not reasonable and thus, `` `pwr_reg` `` will remove those data points from analysis. Thus, it is strongly recommended to use the averaged data for regression. By doing so, the range of the independent variable of the regression is widened. (Removing only those replicates with no colonies at one or few specific cell densities would bias the model fitting.) ```{r PowerReg, fig.width=5, fig.height=4} data <- subset.data.frame(x = CFAdata, subset = CFAdata$cell.line == "BT20") data <- aggregate(x = data[, -1], by = list(data$`Cells seeded`), FUN = "mean", na.rm = TRUE) par_0 <- pwr_reg(seeded = data$`Cells seeded`, counted = data$`0 Gy`) par_0$coefficients plot(x = log10(data$`Cells seeded`), y = log10(data$`0 Gy`),xlim = c(2,3.5)) abline(a = log10(exp(1)) * par_0$coefficients[1, 1], b = par_0$coefficients[2, 1]) ``` With the results of this function, we can also test for cellular cooperation. Note, that the _p-value_ in the _coefficients_ table corresponds to the null hypothesis $b = 0$, but we are interested in the null hypothesis of $b = 1$. Thus, we find our p-value of interest by computing ```{r TestingCooperation} p_value <- (1 - pt( q = (par_0$coefficients[2, 1] - 1) / par_0$coefficients[2, 2], df = par_0$df[2] )) ``` Thus, BT20 is higly cooperative ($b = 1.76$, $\hat{\sigma}_b = 0.12$, $p < 0.001$). ## Determine Clonogenic Survival Fractions In this package, the survival fraction $SF(C)$ for clonogenic survival is calculated as the number of cells that need to be seeded without treatment divided by the number of cells needed to be seeded with treatment for obtaining __the same__ expectation of colonies counted __$C$__. \[ SF(C) = \frac{S_0(C)}{S_x(C)} = exp\left( \frac{log\left(\frac{C}{a_0}\right)}{b_0} - \frac{log\left(\frac{C}{a_x}\right)}{b_x} \right)\] Given two parameter sets of clonogenic assay data, the clonogenic survival can be calculated as: ```{r calculateSF} data <- subset.data.frame(x = CFAdata, subset = CFAdata$cell.line == "BT20") data <- aggregate(x = data[, -1], by = list(data$`Cells seeded`), FUN = "mean", na.rm = TRUE) par_0 <- pwr_reg(seeded = data$`Cells seeded`, counted = data$`0 Gy`) par_4 <- pwr_reg(seeded = data$`Cells seeded`, counted = data$`4 Gy`) calculate_sf(par_ref = par_0, par_treat = par_4, C = 20) ``` ### Remark on Ignoring Cellular Cooperation Note that in case of cooperative cell lines, the parameter $a$ does not correspond to a plating efficiency as for non-cooperative cell lines. The concept of a characteristic plating efficiency does not apply to cooperative cell lines. ## Determine Uncertainty of Survival Fractions The survival fraction at $C$ for treatment $x$ is calculated by the function \[SF_x(C) = \frac{S_0(C)}{S_x(C)} = exp\left( \frac{log\left(\frac{C}{a_0}\right)}{b_0} - \frac{log\left(\frac{C}{a_x}\right)}{b_x}\right).\] Since the SF-values are solely dependent on the estimated parameters in the power regression (and the chosen $C$), the inherent uncertainty can be assessed via parametric bootstrapping (e.g. using the package **mvtnorm** to generate parameter sets according to the variance-covariance matrix of the fit), or by following the laws of error propagation (First-Order Taylor-Series Approximation). We choose the analytic approximation. In order to build meaningful uncertainty intervals (i.e. respect that survival fractions will never be below zero), we work on the log-scale and transform the boundaries to the linear scale at the end. For the sake of a shorter notation, we write: \[g = log(SF_x(C)) = \frac{d-\alpha_0}{b_0} - \frac{d-\alpha_x}{b_x} \] According to $\Sigma_g \approx J \Sigma_pJ^T$, where $J$ denotes the Jacobian of $g$ and $\Sigma_p$ the variance-covariance matrix of the estimated parameters $\alpha = log(a)$ and $b$ at $0$ and $x$ Gy, we find \[\sigma_g^2 \approx \frac{1}{b_0^2} z_0 \Sigma_0 z_0^T + \frac{1}{b_x^2} z_x \Sigma_x z_x^T \] with \[ z_x = \left(\begin{matrix}1 & \frac{d-\alpha_x}{b_x}\end{matrix}\right) \] and \[\Sigma_x = \left(\begin{matrix} \sigma_{\alpha_x}^2 & \sigma_{\alpha_x b_x}\\ \sigma_{\alpha_x b_x} & \sigma_{b_x}^2 \end{matrix} \right).\] # What's the problem with PE-based analysis? In short: The plating efficiency frequently is not as constant as it needs to be in order to serve as an adequate normalization factor. To illustrate this, we compare the PE-based calculated $SF'$-values with the $SF(C = 20)$-values calculated with the new method for (1) the non-cooperative cell line T47D and (2) the cooperative cell line (BT20). ## (1) The non-cooperative case For calculating the survival fraction, plating efficiencies are required. Plating efficiencies ($C/S$) are calculated easily as: ```{r PEfail1a, fig.width=5, fig.height=4} data(CFAdata) data <- subset.data.frame(x = CFAdata, subset = CFAdata$cell.line == "T47D") data <- aggregate(x = data[, -1], by = list(data$`Cells seeded`), FUN = "mean", na.rm = TRUE) PE_x <- data$`4 Gy` / data$`Cells seeded` PE_0 <- data$`0 Gy` / data$`Cells seeded` plot(x = rep(c(0, 1), each = 18), y = c(PE_0, PE_x), lty = 0, ylim = c(0,0.5),xlim = c(-0.1,1.1), xlab = "treatment", ylab = "C / S", axes = FALSE, main = "T47D") axis(side = 1,at = c(0,1),labels = c("0 Gy","4 Gy")) axis(side = 2,at = seq(0,0.5,0.1)) ``` Now, which values are to be compared? When there is no effect of the cell density, as assumed by the conventional approach (not taking cellular cooperation into account), each combination (of a $C_{0 Gy}/S_{0 Gy}$- and $C_{4 Gy}/S_{4 Gy}$-value) is equally reliable. Thus, the full set of all combinations is: ```{r PEfail1b, fig.width=5, fig.height=4} SF_resample <- rep(PE_x, each = length(PE_0)) / rep(PE_0, times = length(PE_x)) hist(SF_resample, breaks = 25,xlim = c(0.12,0.25),xlab = "(C_4/S_4) / (C_0/S_0)", main = "valid PE-based SF'-values") ``` Without the assessment of cellular cooperation, conventional calculation of an $SF'$-value, corresponds to picking randomly a sample from the distribution shown in the histogram above. A comparison of the range of this distribution and the calculated uncertainties of the new method shows, that for non-cooperative cell lines such as T47D, there is no big difference in this variability/uncertainty. ```{r PEfail2} range(SF_resample,na.rm = TRUE) as_nc_0 <- analyze_survival(RD = data[,-1],C = 20) as_nc_0$uncertainty[4,c(3,5,6)] ``` ## (2) The cooperative case Now, making the same comparison as in (1) for the cooperative cell line BT20 shows the disastrous effect of ignoring the coefficient $b$, when it is in fact different from $1$. ```{r PEfailCoop, fig.width=5, fig.height=4} data(CFAdata) data <- subset.data.frame(x = CFAdata, subset = CFAdata$cell.line == "BT20") data <- aggregate(x = data[, -1], by = list(data$`Cells seeded`), FUN = "mean", na.rm = TRUE) PE_x <- data$`4 Gy` / data$`Cells seeded` PE_0 <- data$`0 Gy` / data$`Cells seeded` plot(x = rep(c(0, 1), each = length(PE_x)), y = c(PE_0, PE_x), lty = 0, ylim = c(0,0.08),xlim = c(-0.1,1.1), xlab = "treatment", ylab = "C / S", axes = FALSE, main = "BT20") axis(side = 1,at = c(0,1),labels = c("0 Gy","4 Gy")) axis(side = 2,at = seq(0,0.08,0.02),las = 1) SF_resample <- rep(PE_x, each = length(PE_0)) / rep(PE_0, times = length(PE_x)) hist(SF_resample, breaks = 100,xlim = c(0,10),xlab = "(C_4/S_4) / (C_0/S_0)", main = "valid PE-based SF'-values") ``` The range of the PE-based $SF'$-values does not correspond to the uncertainty of the $SF(20)$-values: ```{r PEfailCoop2, fig.width=6, fig.height=5} range(SF_resample,na.rm = TRUE) as_c_4 <- analyze_survival(RD = data[,-1],C = 20) as_c_4$uncertainty[4,c(3,5,6)] ``` Even though the survival fraction can be accurately estimated under consideration of cellular cooperation the PE-based approach fails in returning a trustworthy estimate of the fraction of cells losing their potential due to the treatment (see histogram). In particular, the average of PE-based $SF'$ calculations does not asymptotically tend to a meaningful value. In case of strong cellular cooperation, the PE-based calculated $SF'$-value is heavily affected by this cellular cooperation and the treatment effect of interest is degraded to a side effect. ## Conclusion from (1) and (2) Before calculating PE-based survival fractions, one must check whether there is cellular cooperation or not. Essentially, to decide, whether you have a cooperativity issue or not, you need to conduct the same analysis and to generate the same data that is necessary to solve this issue anyways. <!-- # Full CFAdata Set --> <!-- ```{r Full, fig.width=13, fig.height=6} --> <!-- data("CFAdata") --> <!-- SF <- vector('list',7) --> <!-- for (i in seq_along(SF)){ --> <!-- SF[[i]] <- analyze_survival( --> <!-- RD = subset.data.frame(x = CFAdata, --> <!-- subset = CFAdata$cell.line==levels(CFAdata$cell.line)[i])[,-1], --> <!-- name = levels(CFAdata$cell.line)[i], --> <!-- xtreat = c(0,1,2,4,6,8), --> <!-- c_range = c(5,100)) --> <!-- } --> <!-- plot_sf(SF) --> <!-- ``` -->
/scratch/gouwar.j/cran-all/cranData/CFAcoop/vignettes/CFAcoop.Rmd
# This file was generated by Rcpp::compileAttributes # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 cscr_samples_Cpp <- function(func_list, init_list, free_list, arg_list, tout, Nmax, rel_tol, nsmp, ncore) { .Call('CFC_cscr_samples_Cpp', PACKAGE = 'CFC', func_list, init_list, free_list, arg_list, tout, Nmax, rel_tol, nsmp, ncore) }
/scratch/gouwar.j/cran-all/cranData/CFC/R/RcppExports.R
# standard single-interval Simpson rule for numerical integration # implemented as a reference simpson.standard <- function(f, a, b, arg) { c <- (a + b) / 2.0 h3 <- (b - a) / 6.0 return (h3*(f(a, arg) + 4.0*f(c, arg) + f(b, arg))) } # numerical integration "\int_a^b f(t, ...) dg(t, ...)" over a single interval, using generalized Simpson's rule simpson.generalized <- function(f, g = function(x, ...) x, a, b, ...) { # can be abbreviated if f,g are assumed to be vectorized m <- (a + b) / 2 ga <- g(a, ...) gm <- g(m, ...) gb <- g(b, ...) fa <- f(a, ...) fm <- f(m, ...) fb <- f(b, ...) h <- gb - ga d <- 2 * gm - (ga + gb) r <- d / h ret <- (h / 6) * ((fa + 4 * fm + fb) + 2 * r * (fa - fb) - 3 * r^2 * (fa + fb)) / (1 - r^2) return (ret) } plot.summary.cfc <- function( x , which = c(1, 2) , ... ) { tvec <- x$tout ci.q <- x$ci s.q <- x$s quantiles <- x$quantiles dimCI <- dim(ci.q) K <- dimCI[length(dimCI)] if (1 %in% which) { # cumulative incidence function for (k in 1:K) { if (quantiles) { ci.ylim <- range(ci.q[, , k]) plot(tvec, ci.q[2, , k], type = "l" , xlab = "time from index", ylab = "CI" , ylim = ci.ylim, main = paste0("cause ", k, " - cumulative incidence")) lines(tvec, ci.q[1, , k], lty = 2) lines(tvec, ci.q[3, , k], lty = 2) } else { plot(tvec, ci.q[, k], type = "l" , xlab = "time from index", ylab = "CI" , main = paste0("cause ", k, " - cumulative incidence")) } } } if (2 %in% which) { # survival probability for (k in 1:K) { if (quantiles) { s.ylim <- range(s.q[, , k]) plot(tvec, s.q[2, , k], type = "l" , xlab = "time from index", ylab = "survival probability" , ylim = s.ylim, main = paste0("cause ", k, " - survival")) lines(tvec, s.q[1, , k], lty = 2) lines(tvec, s.q[3, , k], lty = 2) } else { plot(tvec, s.q[, k], type = "l" , xlab = "time from index", ylab = "survival probability" , main = paste0("cause ", k, " - survival")) } } } } summary.cfc <- function( object , f.reduce = function(x) x # transformation function applied to values at each time-point and cause , pval = 0.05 , ... # other arguments passed to 'f' ) { # check if f.reduce output is a vector or a scalar # if vector: we treat it as samples and compute quantiles for it # if scalar: just report the single number; no quantile calculation needed isVector <- length(f.reduce(object$ci[1, 1, ], ...)) > 1 s <- NA ci <- NA if (isVector) { ci <- apply(object$ci, MARGIN = c(1, 2), FUN = function(x) { quantile(f.reduce(x, ...), probs = c(pval / 2, 0.5, 1 - pval / 2)) }) s <- apply(object$s, MARGIN = c(1, 2), FUN = function(x) { quantile(f.reduce(x, ...), probs = c(pval / 2, 0.5, 1 - pval / 2)) }) } else { ci <- apply(object$ci, MARGIN = c(1, 2), FUN = f.reduce, ...) s <- apply(object$s, MARGIN = c(1, 2), FUN = f.reduce, ...) } ret <- list(tout = object$tout, ci = ci, s = s, quantiles = isVector) class(ret) <- c("summary.cfc", class(ret)) return (ret) } print.summary.cfc <- function(x, ...) {} cfc <- function(f.list, args.list, n, tout, Nmax = 100L, rel.tol = 1e-5, ncores = 1) { if (!is.list(f.list)) stop("f.list must be a list") K <- length(f.list) # number of causes if (K != length(args.list)) stop("length of args.list must match that of f.list") if (is.function(f.list[[1]])) { # R path ret <- cscr.samples.R(f.list, args.list, tout, Nmax, rel.tol, n, ncores) } else { # Cpp path func_list <- list() init_list <- list() free_list <- list() for (k in 1:K) { func_list[[k]] <- f.list[[k]][[1]] init_list[[k]] <- f.list[[k]][[2]] free_list[[k]] <- f.list[[k]][[3]] } ret <- cscr_samples_Cpp(func_list, init_list, free_list, args.list, tout, Nmax, rel.tol, n, ncores) ret$is.maxiter <- as.vector(ret$is.maxiter) # TODO: find better way to covert to vector (from matrix) inside Cpp code } ret$tout <- tout # we use this in plotting inside summary.cfc class(ret) <- c("cfc", class(ret)) return (ret) } cscr.samples.R <- function(f.list, args.list, tout, Nmax = 100L, rel.tol = 1e-6, nsmp, ncores = 1) { trapezoidal.step <- function(fvec, gvec) { h <- gvec[3] - gvec[1] ret <- (h / 2) * (fvec[1] + fvec[3]) return (ret) } simpson.step <- function(fvec, gvec) { gdiff <- diff(gvec) #h <- gvec[3] - gvec[1] #if (abs(h) < .Machine$double.eps) return (0.0) if (abs(gdiff[1]) < .Machine$double.eps) { return (trapezoidal.step(c(fvec[2], NA, fvec[3]), c(gvec[2], NA, gvec[3]))) } else if (abs(gdiff[2]) < .Machine$double.eps) { return (trapezoidal.step(c(fvec[1], NA, fvec[2]), c(gvec[1], NA, gvec[2]))) } #r <- (2* gvec[2] - gvec[1] - gvec[3]) / h h <- gdiff[1] + gdiff[2] r <- (gdiff[1] - gdiff[2]) / h ret <- (h / 6) * ((fvec[1] + 4 * fvec[2] + fvec[3]) + 2 * r * (fvec[1] - fvec[3]) - 3 * r^2 * (fvec[1] + fvec[3])) / (1 - r^2) return (ret) } tmax <- max(tout) nout <- length(tout) K <- length(f.list) I_out_value <- array(NA, dim = c(nout, K, nsmp)) scube <- array(NA, dim = c(nout, K, nsmp)) # unadjusted survival probabilities smat <- array(NA, dim = c(nout, K)) is.maxiter <- rep(F, nsmp) registerDoParallel(ncores) retsink <- foreach(j=1:nsmp) %dopar% { #for (j in 1:nsmp) { N <- 1 xvec.new <- c(0.0, tmax/2, tmax) fmat.new <- array(NA, dim = c(3, K)) for (k in 1:K) fmat.new[, k] <- f.list[[k]](xvec.new, args.list[[k]], j) fprodmat.new <- t(apply(fmat.new, 1, function(x) prod(x)/x)) # TODO: handle division by zero I.trap.int.new <- -sapply(1:K, function(k) { trapezoidal.step(fprodmat.new[, k], fmat.new[, k]) }) I.simp.int.new <- -sapply(1:K, function(k) { simpson.step(fprodmat.new[, k], fmat.new[, k]) }) xvec <- xvec.new f.mat <- fmat.new fprod.mat <- fprodmat.new I.trap.int <- matrix(I.trap.int.new, ncol = K) I.simp.int <- matrix(I.simp.int.new, ncol = K) I.trap.cum <- rbind(0, I.trap.int) I.simp.cum <- rbind(0, I.simp.int) err.abs.int <- abs(I.simp.int - I.trap.int) err.abs.cum <- abs(I.simp.cum - I.trap.cum) err.rel.cum <- err.abs.cum / abs(I.simp.cum) err.abs <- err.abs.cum[N + 1, ] err.rel <- err.rel.cum[N + 1, ] err.rel.max <- max(err.rel) while(max(err.rel) > rel.tol && N < Nmax) { # add Nmin idx <- which.max(apply(err.abs.int, 1, max)) xvec.new <- c(mean(xvec[(2*idx - 1):(2*idx)]), xvec[2*idx], mean(xvec[(2*idx):(2*idx + 1)])) xvec <- c(xvec[1:(2*idx - 1)], xvec.new, xvec[(2*idx + 1):(2*N + 1)]) for (k in 1:K) { fmat.new[, k] <- f.list[[k]](xvec.new, args.list[[k]], j) } fprodmat.new <- t(apply(fmat.new, 1, function(x) prod(x)/x)) # TODO: handle division by zero f.mat <- rbind(f.mat[1:(2*idx - 1), ], fmat.new, f.mat[(2*idx + 1):(2*N + 1), ]) fprod.mat <- rbind(fprod.mat[1:(2*idx - 1), ], fprodmat.new, fprod.mat[(2*idx + 1):(2*N + 1), ]) I.trap.int.1 <- -sapply(1:K, function(k) { trapezoidal.step(fprod.mat[2*idx + (-1:1), k], f.mat[2*idx + (-1:1), k]) }) I.trap.int.2 <- -sapply(1:K, function(k) { trapezoidal.step(fprod.mat[2*idx + (1:3), k], f.mat[2*idx + (1:3), k]) }) I.simp.int.1 <- -sapply(1:K, function(k) { simpson.step(fprod.mat[2*idx + (-1:1), k], f.mat[2*idx + (-1:1), k]) }) I.simp.int.2 <- -sapply(1:K, function(k) { simpson.step(fprod.mat[2*idx + (1:3), k], f.mat[2*idx + (1:3), k]) }) if (idx == 1) { I.trap.int <- rbind(I.trap.int.1, I.trap.int.2, I.trap.int[-1, ]) I.simp.int <- rbind(I.simp.int.1, I.simp.int.2, I.simp.int[-1, ]) } else if (idx == N) { I.trap.int <- rbind(I.trap.int[-N, ], I.trap.int.1, I.trap.int.2) I.simp.int <- rbind(I.simp.int[-N, ], I.simp.int.1, I.simp.int.2) } else { I.trap.int <- rbind(I.trap.int[1:(idx - 1), ], I.trap.int.1, I.trap.int.2, I.trap.int[(idx + 1):N, ]) I.simp.int <- rbind(I.simp.int[1:(idx - 1), ], I.simp.int.1, I.simp.int.2, I.simp.int[(idx + 1):N, ]) } I.trap.cum <- rbind(0, apply(I.trap.int, 2, cumsum)) # room for efficiency I.simp.cum <- rbind(0, apply(I.simp.int, 2, cumsum)) # room for efficiency err.abs.cum <- abs(I.simp.cum - I.trap.cum) err.rel.cum <- err.abs.cum / abs(I.simp.cum) err.abs.int <- abs(I.simp.int - I.trap.int) err.rel.int <- err.abs.int / abs(I.simp.int) err.abs <- err.abs.cum[N + 2, ] err.rel <- err.rel.cum[N + 2, ] err.rel.max <- max(err.rel) N <- N + 1 } idx.nodes <- seq(from = 1, to = 2*N + 1, by = 2) I.trap.out <- apply(I.trap.cum, 2, function(x) approx(xvec[idx.nodes], x, tout)$y) # TODO: consider nonlinear interpolations to make the curves look smoother I.simp.out <- apply(I.simp.cum, 2, function(x) approx(xvec[idx.nodes], x, tout)$y) # TODO: see above for (k in 1:K) { smat[, k] <- f.list[[k]](tout, args.list[[k]], j) } return (list(N = N, I.simp.out = I.simp.out, smat = smat)) } stopImplicitCluster() for (j in 1:nsmp) { is.maxiter[j] <- 1*(retsink[[j]]$N == Nmax) I_out_value[, , j] <- retsink[[j]]$I.simp.out scube[, , j] <- retsink[[j]]$smat } n.maxiter <- sum(is.maxiter) if (n.maxiter > 0) warning(paste0(n.maxiter, " of ", nsmp, " integrals did not converge after reaching maximum iterations")) return (list(ci = I_out_value, s = scube, is.maxiter = is.maxiter, n.maxiter = n.maxiter)) }
/scratch/gouwar.j/cran-all/cranData/CFC/R/cfc.R
#### SPLIT UP THE CODE FOR TIME BASIS AND QUANTILE BASIS: IT REDUCES CONFUSION AND IT ALSO CREATES MORE FUNCTIONS AND "FELESH"!! cfc.tbasis <- function(p1, p2, unity.tol = 1e-6, diff.tol = 1e-2, diff.tol.policy = c("mean","all"), check = TRUE) { # checks for p1,p2: 1) same dimensions, 2) between 0.0 and 1.0, 3) non-increasing with time, 4) start at 1.0, 5) check for large steps diff.tol.policy <- match.arg(diff.tol.policy) if (is.null(dim(p1))) { nt <- length(p1) if (length(p2) != nt) stop("p1 and p2 have unequal lengths") nother <- 1 dim.p1 <- c(nt, 1) } else { dim.p1 <- dim(p1) if (!identical(dim.p1, dim(p2))) stop("p1 and p2 dimensions do not match") nt <- dim.p1[1] nother <- prod(dim.p1)/nt } if (check) if (any(p1<0.0 | p1>1.0 | p2<0.0 | p2>1.0)) stop("out-of-range probabilities") p1.2d <- array(p1, dim = c(nt, nother)) p2.2d <- array(p2, dim = c(nt, nother)) if (check) if (any(abs(p1.2d[1,] - 1.0) > unity.tol)) stop("p1 probabilities must start at 1.0") if (check) if (any(abs(p2.2d[1,] - 1.0) > unity.tol)) stop("p2 probabilities must start at 1.0") seq.left <- 1:(nt-1) seq.right <- seq.left+1 dp1 <- apply(p1.2d, 2, diff) if (check) { if (any(dp1>0.0)) stop("increasing probabilities with time detected for p1") if (diff.tol.policy == "mean") { if (mean(dp1) < -1.0*diff.tol) stop("average change in p1 exceeds threshold") } else if (diff.tol.policy == "all") { if (any(dp1 < -1.0*diff.tol)) stop("one or more changes in p1 exceed threshold") } } dci1 <- -0.5 * (p2.2d[seq.left,] + p2.2d[seq.right,]) * dp1 ci1 <- rbind(0, apply(dci1, 2, cumsum)) ci1 <- array(ci1, dim = dim.p1) dp2 <- apply(p2.2d, 2, diff) if (check) { if (any(dp2>0.0)) stop("increasing probabilities with time detected for p2") if (diff.tol.policy == "mean") { if (mean(dp2) < -1.0*diff.tol) stop("average change in p2 exceeds threshold") } else if (diff.tol.policy == "all") { if (any(dp2 < -1.0*diff.tol)) stop("one or more changes in p2 exceed threshold") } } dci2 <- -0.5 * (p1.2d[seq.left,] + p1.2d[seq.right,]) * dp2 ci2 <- rbind(0, apply(dci2, 2, cumsum)) ci2 <- array(ci2, dim = dim.p1) if (is.null(dim(p1))) { ci1 <- drop(ci1) ci2 <- drop(ci2) } if (is.null(dim(p1))) { ret <- cbind(ci1, ci2, p1*p2) colnames(ret) <- c("ci1", "ci2", "efp") } else { ret <- list(ci1 = ci1, ci2 = ci2, efp = p1*p2) } class(ret) <- c("cfc.tbasis", class(ret)) return (ret) } summary.cfc.tbasis <- function(object, MARGIN = if (class(object)[2] == "matrix") NULL else 1, ...) { if (class(object)[2] == "matrix") { class(object)[1] <- "summary.cfc.tbasis" attr(object, "popavg") <- FALSE return (object) } MARGIN <- as.integer(MARGIN) if (!(1 %in% MARGIN)) stop("time dimension cannot be aggregated") if (identical(MARGIN, 1:length(dim(object$ci1)))) stop("cannot keep all dimensions during aggregation") #cat("MARGIN:", MARGIN, "\n") ci1 <- apply(object$ci1, MARGIN = MARGIN, mean) ci2 <- apply(object$ci2, MARGIN = MARGIN, mean) efp <- apply(object$efp, MARGIN = MARGIN, mean) if (is.null(dim(ci1))) { ret <- cbind(ci1, ci2, efp) colnames(ret) <- c("ci1", "ci2", "efp") } else { ret <- list(ci1 = ci1, ci2 = ci2, efp = efp) } class(ret) <- c("summary.cfc.tbasis", class(ret)) return (invisible(ret)) } plot.summary.cfc.tbasis <- function(x, t = 1, ci = 0.95, ...) { if (class(x)[2] == "matrix") { nt <- dim(x)[1] if (length(t) == 1) t <- (0:(nt-1))*t else if (length(t) != nt) stop("bad length for t vector") plot(t, x[,"efp"], type = "l", ylim = c(0.0,1.0) , xlab = "Time", ylab = "Probability", col = "black") lines(t, x[,"ci1"], col = "red") lines(t, x[,"ci2"], col = "green") legend("topright", legend = c("Event-Free", "CI - Cause 1", "CI - Cause 2") , col = c("black", "red", "green"), lty = rep(1,3)) } else { dims <- dim(x$ci1) nt <- dims[1] if (length(t) == 1) t <- (0:(nt-1))*t else if (length(t) != nt) stop("bad length for t vector") nother <- prod(dims)/nt ci1.2d <- array(x$ci1, dim = c(nt, nother)) ci2.2d <- array(x$ci2, dim = c(nt, nother)) efp.2d <- array(x$efp, dim = c(nt, nother)) qvec <- c(0.5*(1-ci), 0.5, 0.5*(1+ci)) efp.q <- t(apply(efp.2d, 1, quantile, probs = qvec)) ci1.q <- t(apply(ci1.2d, 1, quantile, probs = qvec)) ci2.q <- t(apply(ci2.2d, 1, quantile, probs = qvec)) plot(t, efp.q[,2], type = "l", ylim = c(0.0, 1.0) , xlab = "Time", ylab = "Population Average", col = "black") lines(t, efp.q[,1], col = "black", lty = 2) lines(t, efp.q[,3], col = "black", lty = 2) lines(t, ci1.q[,2], col = "red") lines(t, ci1.q[,1], col = "red", lty = 2) lines(t, ci1.q[,3], col = "red", lty = 2) lines(t, ci2.q[,2], col = "green") lines(t, ci2.q[,1], col = "green", lty = 2) lines(t, ci2.q[,3], col = "green", lty = 2) legend("topright", legend = c("Event-Free", "CI - Cause 1", "CI - Cause 2") , col = c("black", "red", "green"), lty = rep(1,3)) return (invisible(list(efp = efp.q, ci1 = ci1.q, ci2 = ci2.q))) } } cfc.pbasis <- function(t1, t2, probs, unity.tol = 1e-6, diff.tol = 1e-2, diff.tol.policy = c("all", "mean")) { # TODO: consider allowing unsorted vectors; sort and then check for validity diff.tol.policy <- match.arg(diff.tol.policy) if (abs(probs[1] - 1.0) > unity.tol) stop("probability vector must start at 1.0") if (any(diff(probs) >= 0.0)) stop("probabilities must be decreasing with time") if (diff.tol.policy == "all") { if (any(diff(probs) < -1.0*diff.tol)) stop("one or more changes in probs exceed threshold") } else if (diff.tol.policy == "mean") { if (mean(diff(probs)) < -1.0*diff.tol) stop("average change in probs exceeds threshold") } if (is.null(dim(t1))) { nt <- length(t1) if (!is.null(dim(t2)) || length(t2) != nt) stop("t1 and t2 dimensions do not match") nother <- 1 dim.t1 <- c(nt, 1) } else { dim.t1 <- dim(t1) if (!identical(dim.t1, dim(t2))) stop("t1 and t2 dimensions do not match") nt <- dim.t1[1] nother <- prod(dim.t1)/nt } t1.2d <- array(t1, dim = c(nt, nother)) t2.2d <- array(t2, dim = c(nt, nother)) dt1 <- apply(t1.2d, 2, diff) dt2 <- apply(t2.2d, 2, diff) if (any(dt1 <= 0.0)) stop("non-increasing times detected in t1") if (any(dt2 <= 0.0)) stop("non-increasing times detected in t2") ret <- lapply(1:nother, function(n) { ta <- t1.2d[,n] tb <- t2.2d[,n] tmax <- min(max(ta), max(tb)) tcomb <- sort(unique(c(ta, tb))) tcomb <- tcomb[which(tcomb <= tmax)] pa <- approx(ta, probs, tcomb)$y pb <- approx(tb, probs, tcomb)$y rettmp <- cbind(tcomb, cfc.tbasis(pa, pb, check = FALSE)) colnames(rettmp) <- c("time", "ci1", "ci2", "efp") return (rettmp) }) if (nother == 1) ret <- ret[[1]] class(ret) <- c("cfc.pbasis", class(ret)) return (ret) } summary.cfc.pbasis <- function(object, ...) { if (class(object)[2] == "matrix") { class(object)[1] <- "summary.cfc.pbasis" attr(object, "popavg") <- FALSE return (object) } tmax <- min(sapply(object, function(x) max(x[,"time"]))) tvec <- unique(sort(unlist(sapply(object, function(x) x[,"time"])))) tvec <- tvec[tvec < tmax] ci1 <- rowMeans(sapply(object, function(x) { approx(x[,"time"], x[,"ci1"], tvec)$y })) ci2 <- rowMeans(sapply(object, function(x) { approx(x[,"time"], x[,"ci2"], tvec)$y })) efp <- 1 - (ci1 + ci2) ret <- cbind(tvec, ci1, ci2, efp) colnames(ret) <- c("time", "ci1", "ci2", "efp") attr(ret, "popavg") <- TRUE class(ret) <- c("summary.cfc.pbasis", class(ret)) return (invisible(ret)) } plot.summary.cfc.pbasis <- function(x, ...) { popavg <- attr(x, "popavg") ylim <- c(0.0, 1.0) ylab <- if (popavg) "Population Average" else "Probability" plot(x[,"time"], x[,"efp"], type = "l", col = "black" , xlab = "Time", ylab = ylab, ylim = ylim) lines(x[,"time"], x[,"ci1"], col = "red") lines(x[,"time"], x[,"ci2"], col = "green") legend("topright", col = c("black", "red", "green") , legend = c("Event-Free", "CI - Cause 1", "CI - Cause 2") , lty = rep(1,3)) return (invisible(NULL)) } # print.summary.cfc.pbasis <- function(x, ...) { # if (attr(x, "popavg")) cat("Population averages:\n") # nprint <- 6 # print(head(x, nprint)) # if (nrow(x) > nprint) cat("(", nrow(x)-nprint, " more rows ...)\n", sep = "") # return (invisible(NULL)) # }
/scratch/gouwar.j/cran-all/cranData/CFC/R/cfc_legacy.R
cfc.prepdata <- function(formul, dat) { vars.all <- all.vars(formul) var.time <- vars.all[1] var.status <- vars.all[2] vars.expl <- vars.all[-c(1,2)] causes.plus.censoring <- sort(unique(dat[, var.status])) causes <- setdiff(causes.plus.censoring, 0) K <- length(causes) vars.newstatus <- paste("status", causes, sep = "_") formul.list <- list() for (k in 1:K) { dat[, vars.newstatus[k]] <- 1 * (dat[, var.status] == causes[k]) formul.list[[k]] <- as.formula(paste0("Surv(", var.time, ",", vars.newstatus[k], ") ~ ", paste(vars.expl, collapse = "+"))) } formul.noresp <- as.formula(paste0("~", paste(vars.expl, collapse = "+"))) return (list(K = K, dat = dat, formula.list = formul.list, formula.noresp = formul.noresp, tmax = max(dat[, var.time]))) } cfc.survreg.survprob <- function(t, args, n) { # predicting survival probability at a given time from index using object of class "survreg" dist.name <- args$dist mydist <- unlist(unname(survreg.distributions[dist.name])) if (any(names(mydist) == "dist")) { # derived distribution mydist.base <- unlist(unname(survreg.distributions[mydist$dist])) p <- mydist.base$density((mydist$trans(t) - t(args$coefficients) %*% c(args$x[n, ])) / args$scale)[, 1] } else { # base distribution p <- mydist$density((t - t(args$coefficients) %*% c(args$x[n, ])) / args$scale)[, 1] } return (1 - p) } cfc.survreg <- function(formula, data, newdata = NULL, dist = "weibull", control = survreg.control() , tout, Nmax = 100L, rel.tol = 1e-5) { # TODO: fix this to use newdata, not data during prediction # prepare data for cause-specific survival regression ret <- cfc.prepdata(formul = formula, dat = data) K <- ret$K formula.list <- ret$formula.list formula.noresp <- ret$formula.noresp dat <- ret$dat if (missing(tout)) tout <- seq(from = 0.0, to = ret$tmax, length.out = 100L) # survival regression on each cause if (length(dist) == 1) dist <- rep(dist, K) reg.list <- list() for (k in 1:K) reg.list[[k]] <- survreg(formula.list[[k]], dat, dist = dist[k], control = control, x = TRUE) # preparing new data if (!is.null(newdata)) { for (k in 1:K) { mf <- model.frame(formula.noresp, newdata) mm <- model.matrix(formula.noresp, mf) reg.list[[k]]$x <- mm[, colnames(reg.list[[k]]$x)] } } # calculating cumulative incidence f.list <- list(cfc.survreg.survprob, cfc.survreg.survprob) cscr.out <- cscr.samples.R(f.list, reg.list, tout, Nmax = Nmax, rel.tol = rel.tol, nrow(reg.list[[1]]$x)) class(cscr.out) <- "cfc" ret.final <- list(K = K, formulas = formula.list, regs = reg.list, tout = tout, cfc = cscr.out) class(ret.final) <- c("cfc.survreg", class(ret.final)) return (ret.final) } summary.cfc.survreg <- function(object, obs.idx = "all", ...) { if (obs.idx[1] == "all") obs.idx <- 1:nrow(object$regs[[1]]$x) ci.mean <- apply(X = object$cfc$ci, MARGIN = c(1,2), FUN = mean) s.mean <- apply(X = object$cfc$s, MARGIN = c(1,2), FUN = mean) ret <- list(tout = object$tout, ci = ci.mean, s = s.mean) class(ret) <- "summary.cfc.survreg" return (ret) } plot.summary.cfc.survreg <- function(x, which = c(1, 2), ...) { K <- ncol(x$ci) if (1 %in% which) { ylimm <- range(x$ci) col.vec <- 1:K plot(x$tout, x$ci[, 1], type = "l", ylim = ylimm, col = col.vec[1] , xlab = "time from index", ylab = "cumulative incidence") for (k in 2:K) { lines(x$tout, x$ci[, k], col = col.vec[k]) } legend("topleft", legend = paste("cause", 1:K), col = col.vec, lty = rep(1, K)) } if (2 %in% which) { for (k in 1:K) { plot(x$tout, x$ci[, k], type = "l", ylim = range(x$ci[, k], 1 - x$s[, k]) , xlab = "time from index", ylab = "cumulative incidence", main = paste("cause", k)) lines(x$tout, 1 - x$s[, k], lty = 2) legend("topleft", legend = c("competing-risk adjustment", "no adjustment"), col = c(1, 1), lty = c(1,2)) } } }
/scratch/gouwar.j/cran-all/cranData/CFC/R/cfc_survreg.R
.onAttach <- function(libname, pkgname) { RFver <- read.dcf(file=system.file("DESCRIPTION", package=pkgname), fields="Version") packageStartupMessage(paste0("Package: ", pkgname, ", Version: ", RFver)) packageStartupMessage("Cause-specific Framework for Competing-risk survival analysis") packageStartupMessage("School of Public Heath, Imperial College London &") packageStartupMessage("Davidson Kempner Capital Management, NY") }
/scratch/gouwar.j/cran-all/cranData/CFC/R/zzz.R
#' @export Score_replace <- function(ratings, sim_index, ac){ #---------------------------------conditions #1 ratings if (missing(ratings) || !is.matrix(ratings)){ stop("No ratings supplied.") } #2 sim_index if (missing(sim_index) || !is.vector(sim_index) || !is.numeric(sim_index)){ stop("No sim_index supplied.") } #3 ac if (missing(ac) || !is.numeric(ac) || any(ac<=0) || any(ac>dim(ratings)[2]) ){ stop("No active_user specified.") } #---------------------------------conditions ratings2 <- ratings flag1 <- c(rep(NaN, dim(ratings)[1])) for (h in 1:dim(ratings)[2]) { for (j in 1:dim(ratings)[1]) { if(is.na(ratings2[j,ac]) && !is.na(ratings[j,sim_index[h]]) && is.na(flag1[j])) { ratings2[j,ac] <- ratings[j,sim_index[h]] flag1[j] <- 1 } } } return(ratings2) }
/scratch/gouwar.j/cran-all/cranData/CFF/R/Score_replace.R
#' @export simple_predict <- function(ratings, ratings2, ac){ #---------------------------------conditions #1 ratings if (missing(ratings) || !is.matrix(ratings)){ stop("No ratings supplied.") } #2 ratings2 if (missing(ratings2) || !is.matrix(ratings2)){ stop("No ratings2 supplied.") } #3 ac if (missing(ac) || !is.numeric(ac) || any(ac<=0) || any(ac>dim(ratings)[2])){ stop("No active_user specified.") } #---------------------------------conditions sort_ratings_ac <- sort(ratings2[,ac],na.last = TRUE,method = "radix", decreasing = TRUE, index.return = TRUE) predictedItems_ix <- sort_ratings_ac$ix #predictedItems_x <- sort_ratings_ac$x k1 <- dim(ratings)[1] for(j in 1:dim(ratings)[1]) { if(!is.na(ratings[predictedItems_ix[j],ac])) { predictedItems_ix[j] <- NaN k1 <- k1-1 } } predictedItems <- c(rep(NaN,k1)) k2 <- 0 for(j in 1:dim(ratings)[1]) { if(!is.na(predictedItems_ix[j])) { k2 <- k2+1 predictedItems[k2] <- predictedItems_ix[j] } } return(predictedItems) }
/scratch/gouwar.j/cran-all/cranData/CFF/R/simple_predict.R
#' @export simple_similarity <- function(ratings, max_score=5, min_score=1, ac){ #---------------------------------conditions #1 ratings if (missing(ratings) || !is.matrix(ratings)){ stop("No ratings supplied.") } #2,3 max_score, min_score if (missing(max_score) || missing(min_score) || !is.numeric(max_score) || !is.numeric(min_score) || any(!is.finite(max_score)) || any(!is.finite(min_score)) || any(max_score<=min_score)){ stop("The score is invalidly specified.") } #4 ac if (missing(ac) || !is.numeric(ac) || any(ac<=0) || any(ac>dim(ratings)[2])){ stop("No active_user specified.") } #---------------------------------conditions sim_i <- c(rep(NaN, dim(ratings)[2])) for (i in 1:dim(ratings)[2]) { N_sim<-0 sum_sim<-0 dif <- c(rep(NaN, dim(ratings)[1])) sim_partial <- c(rep(NaN, dim(ratings)[1])) for (j in 1:dim(ratings)[1]) { if (!is.na(ratings[j,ac]) && !is.na(ratings[j,i])) { dif[j]<-abs(ratings[j,ac]-ratings[j,i]) #if(dif[j]<=(max_score-min_score)/2) #{ sim_partial[j] <- ((-dif[j])/(max_score-min_score))+1 sum_sim <- sum_sim + sim_partial[j] N_sim <- N_sim + 1 #} } }#j sim_i[i] <- sum_sim/N_sim }#i sim_i[ac]<-NaN sort_sim <- sort(sim_i,na.last = TRUE,method = "radix", decreasing = TRUE, index.return = TRUE) t <- length(sort_sim$x) for (k in 1:length(sort_sim$x)) { if(is.na(sort_sim$x[k])) { sort_sim$ix[k] <- NaN t<-t-1 } } obj_simple_similarity <- list( call = match.call(), sim_x=sort_sim$x[1:t], sim_index=sort_sim$ix[1:t]) class(obj_simple_similarity) <- "simple_similarity" return(obj_simple_similarity) }
/scratch/gouwar.j/cran-all/cranData/CFF/R/simple_similarity.R
#' #' Determination of the dose level for next cohort in the calibration-free odds (CFO) design #' #' In the CFO design, the function is used to determine the dose movement based on the toxicity outcomes of the enrolled cohorts. #' #' @usage CFO.next(target, cys, cns, currdose, #' prior.para = list(alp.prior = target, bet.prior = 1 - target), #' cutoff.eli = 0.95, early.stop = 0.95) #' #' @param target the target DLT rate. #' @param cys the cumulative numbers of DLTs observed at the left, current, and right dose levels. #' @param cns the cumulative numbers of patients treated at the left, current, and right dose levels. #' @param currdose the current dose level. #' @param prior.para the prior parameters for a beta distribution, where set as \code{list(alp.prior = target, bet.prior = 1 - target)} #' by default, \code{alp.prior} and \code{bet.prior} represent the parameters of the prior distribution for #' the true DLT rate at any dose level. This prior distribution is specified as Beta(\code{alpha.prior}, \code{beta.prior}). #' @param cutoff.eli the cutoff to eliminate overly toxic doses for safety. We recommend #' the default value of \code{cutoff.eli = 0.95} for general use. #' @param early.stop the threshold value for early stopping. The default value \code{early.stop = 0.95} #' generally works well. #' #' @details The CFO design determines the dose level for the next cohort by assessing evidence from the current #' dose level and its adjacent levels. This evaluation is based on odds ratios denoted as \eqn{O_k}, where #' \eqn{k = L, C, R} represents left, current (central), and right dose levels. Additionally, we define \eqn{\overline{O}_k = 1/O_k}. #' The ratio \eqn{O_C / \overline{O}_{L}} indicates the inclination for de-escalation, while \eqn{\overline{O}_C / O_R} #' quantifies the tendency for escalation. Threshold values \eqn{\gamma_L} and \eqn{\gamma_R} are chosen to #' minimize the probability of making incorrect decisions. The decision process is summarized in Table 1 #' of Jin and Yin (2022). #' The early stopping and dose elimination rules are implemented to ensure patient safety. If the data suggest excessive #' toxicity at the current dose level, we exclude that dose level and those higher levels. If the lowest dose level is overly toxic, #' the trial will be terminated according to the early stopping rule. #' #' @note When the current dose level is the lowest or highest (i.e., at the boundary), the parts in \code{cys} and #' \code{cns} where there is no data are filled with \code{NA}. \cr #' The dose level indicated by \code{overtox} and all the dose levels above experience over-toxicity, and these dose levels will be eliminated. #' #' @return The \code{CFO.next()} function returns a list object comprising the following elements: #' \itemize{ #' \item target: the target DLT rate. #' \item cys: the cumulative counts of DLTs observed at the left, current, and right dose levels. #' \item cns: the cumulative counts of patients treated at the left, current, and right dose levels. #' \item decision: the decision in the CFO design, where \code{left}, \code{stay}, and \code{right} represent the #' movement directions, and \code{stop} indicates stopping the experiment. #' \item currdose: the current dose level. #' \item nextdose: the recommended dose level for the next cohort. \code{nextdose = 99} indicates that the trial is #' terminated due to early stopping. #' \item overtox: the situation regarding which positions experience over-toxicity. The dose level indicated #' by \code{overtox} and all the dose levels above experience over-toxicity. \code{overtox = NA} signifies that #' the occurrence of over-toxicity did not happen. #' } #' @author Jialu Fang, Wenliang Wang, and Guosheng Yin #' #' @references Jin H, Yin G (2022). CFO: Calibration-free odds design for phase I/II clinical trials. #' \emph{Statistical Methods in Medical Research}, 31(6), 1051-1066. #' #' @examples #' ## determine the dose level for the next cohort of new patients #' cys <- c(0, 1, 0); cns <- c(3, 6, 0) #' decision <- CFO.next(target=0.2, cys=cys, cns=cns, currdose=3) #' summary(decision) #' #' cys <- c(NA, 3, 0); cns <- c(NA, 3, 0) #' decision <- CFO.next(target=0.2, cys=cys, cns=cns, currdose=1) #' summary(decision) #' #' cys <- c(0, 3, NA); cns <- c(3, 3, NA) #' decision <- CFO.next(target=0.2, cys=cys, cns=cns, currdose=7) #' summary(decision) #' #' @import stats #' @export CFO.next <- function(target, cys, cns, currdose, prior.para=list(alp.prior=target, bet.prior=1-target), cutoff.eli=0.95, early.stop=0.95){ ############################################################################### ###############define the functions used for main function##################### ############################################################################### # posterior probability of pj >= phi given data post.prob.fn <- function(phi, y, n, alp.prior=0.1, bet.prior=0.1){ alp <- alp.prior + y bet <- bet.prior + n - y 1 - pbeta(phi, alp, bet) } overdose.fn <- function(phi, threshold, prior.para=list()){ y <- prior.para$y n <- prior.para$n alp.prior <- prior.para$alp.prior bet.prior <- prior.para$bet.prior pp <- post.prob.fn(phi, y, n, alp.prior, bet.prior) # print(data.frame("prob of overdose" = pp)) if ((pp >= threshold) & (prior.para$n>=3)){ return(TRUE) }else{ return(FALSE) } } prob.int <- function(phi, y1, n1, y2, n2, alp.prior, bet.prior){ alp1 <- alp.prior + y1 alp2 <- alp.prior + y2 bet1 <- bet.prior + n1 - y1 bet2 <- bet.prior + n2 - y2 fn.min <- function(x){ dbeta(x, alp1, bet1)*(1-pbeta(x, alp2, bet2)) } fn.max <- function(x){ pbeta(x, alp1, bet1)*dbeta(x, alp2, bet2) } const.min <- integrate(fn.min, lower=0, upper=1)$value const.max <- integrate(fn.max, lower=0, upper=1)$value p1 <- integrate(fn.min, lower=0, upper=phi)$value/const.min p2 <- integrate(fn.max, lower=0, upper=phi)$value/const.max list(p1=p1, p2=p2) } OR.values <- function(phi, y1, n1, y2, n2, alp.prior, bet.prior, type){ ps <- prob.int(phi, y1, n1, y2, n2, alp.prior, bet.prior) if (type=="L"){ pC <- 1 - ps$p2 pL <- 1 - ps$p1 oddsC <- pC/(1-pC) oddsL <- pL/(1-pL) OR <- oddsC*oddsL }else if (type=="R"){ pC <- 1 - ps$p1 pR <- 1 - ps$p2 oddsC <- pC/(1-pC) oddsR <- pR/(1-pR) OR <- (1/oddsC)/oddsR } return(OR) } All.OR.table <- function(phi, n1, n2, type, alp.prior, bet.prior){ ret.mat <- matrix(rep(0, (n1+1)*(n2+1)), nrow=n1+1) for (y1cur in 0:n1){ for (y2cur in 0:n2){ ret.mat[y1cur+1, y2cur+1] <- OR.values(phi, y1cur, n1, y2cur, n2, alp.prior, bet.prior, type) } } ret.mat } # compute the marginal prob when lower < phiL/phiC/phiR < upper # i.e., Pr(Y=y|lower<phi<upper) margin.phi <- function(y, n, lower, upper){ C <- 1/(upper-lower) fn <- function(phi) { dbinom(y, n, phi)*C } integrate(fn, lower=lower, upper=upper)$value } # Obtain the table of marginal distribution of (y1, y2) # after intergrate out (phi1, phi2) # under H0 and H1 # H0: phi1=phi, phi < phi2 < 2phi # H1: phi2=phi, 0 < phi1 < phi margin.ys.table <- function(n1, n2, phi, hyperthesis){ if (hyperthesis=="H0"){ p.y1s <- dbinom(0:n1, n1, phi) p.y2s <- sapply(0:n2, margin.phi, n=n2, lower=phi, upper=2*phi) }else if (hyperthesis=="H1"){ p.y1s <- sapply(0:n1, margin.phi, n=n1, lower=0, upper=phi) p.y2s <- dbinom(0:n2, n2, phi) } p.y1s.mat <- matrix(rep(p.y1s, n2+1), nrow=n1+1) p.y2s.mat <- matrix(rep(p.y2s, n1+1), nrow=n1+1, byrow=TRUE) margin.ys <- p.y1s.mat * p.y2s.mat margin.ys } # Obtain the optimal gamma for the hypothesis test optim.gamma.fn <- function(n1, n2, phi, type, alp.prior, bet.prior){ OR.table <- All.OR.table(phi, n1, n2, type, alp.prior, bet.prior) ys.table.H0 <- margin.ys.table(n1, n2, phi, "H0") ys.table.H1 <- margin.ys.table(n1, n2, phi, "H1") argidx <- order(OR.table) sort.OR.table <- OR.table[argidx] sort.ys.table.H0 <- ys.table.H0[argidx] sort.ys.table.H1 <- ys.table.H1[argidx] n.tol <- length(sort.OR.table) if (type=="L"){ errs <- rep(0, n.tol-1) for (i in 1:(n.tol-1)){ err1 <- sum(sort.ys.table.H0[1:i]) err2 <- sum(sort.ys.table.H1[(i+1):n.tol]) err <- err1 + err2 errs[i] <- err } min.err <- min(errs) if (min.err > 1){ gam <- 0 min.err <- 1 }else { minidx <- which.min(errs) gam <- sort.OR.table[minidx] } }else if (type=='R'){ errs <- rep(0, n.tol-1) for (i in 1:(n.tol-1)){ err1 <- sum(sort.ys.table.H1[1:i]) err2 <- sum(sort.ys.table.H0[(i+1):n.tol]) err <- err1 + err2 errs[i] <- err } min.err <- min(errs) if (min.err > 1){ gam <- 0 min.err <- 1 }else { minidx <- which.min(errs) gam <- sort.OR.table[minidx] } } list(gamma=gam, min.err=min.err) } ############################################################################### ############################MAIN DUNCTION###################################### ############################################################################### if (is.null(prior.para$alp.prior)){ prior.para <- c(prior.para, list(alp.prior=target, bet.prior=1-target)) } alp.prior <- prior.para$alp.prior bet.prior <- prior.para$bet.prior cover.doses <- c(0,0,0) for (i in 1:3){ cy <- cys[i] cn <- cns[i] if (is.na(cn)){ cover.doses[i] <- NA }else{ prior.para <- c(list(y=cy, n=cn),list(alp.prior=alp.prior, bet.prior=bet.prior)) if (overdose.fn(target, cutoff.eli, prior.para)){ cover.doses[i:3] <- 1 break() } } } if (cutoff.eli != early.stop) { cy <- cys[1] cn <- cns[1] if (is.na(cn)){ cover.doses[i] <- NA }else{ prior.para <- c(list(y=cy, n=cn),list(alp.prior=alp.prior, bet.prior=bet.prior)) if (overdose.fn(target, early.stop, prior.para)){ cover.doses[1:3] <- 1 } } } cover.doses <- ifelse(is.na(cys), NA, cover.doses) position <- which(cover.doses == 1)[1] overtox <- c(-1, 0, 1)[position] + currdose prior.para <- c(list(alp.prior=alp.prior, bet.prior=bet.prior)) if ((cover.doses[2] == 1)&(currdose == 1)){ index <- NA decision <- "stop" } else { if (cover.doses[2] == 1){ index <- -1 decision <- "de-escalation" } else{ if (is.na(cys[1]) & (cover.doses[3]==1)){ index <- 0 decision <- "stay" } else if (is.na(cys[1]) & (!(cover.doses[3]==1))){ gam2 <- optim.gamma.fn(cns[2], cns[3], target, "R", alp.prior, bet.prior)$gamma OR.v2 <- OR.values(target, cys[2], cns[2], cys[3], cns[3], alp.prior, bet.prior, type="R") if (OR.v2>gam2){ index <- 1 decision <- "escalation" }else{ index <- 0 decision <- "stay" } } else if (is.na(cys[3]) | (cover.doses[3]==1)){ gam1 <- optim.gamma.fn(cns[1], cns[2], target, "L", alp.prior, bet.prior)$gamma OR.v1 <- OR.values(target, cys[1], cns[1], cys[2], cns[2], alp.prior, bet.prior, type="L") if (OR.v1>gam1){ index <- -1 decision <- "de-escalation" }else{ index <- 0 decision <- "stay" } } else if (!(is.na(cys[1]) | is.na(cys[3]) | cover.doses[3]==1)){ gam1 <- optim.gamma.fn(cns[1], cns[2], target, "L", alp.prior, bet.prior)$gamma gam2 <- optim.gamma.fn(cns[2], cns[3], target, "R", alp.prior, bet.prior)$gamma OR.v1 <- OR.values(target, cys[1], cns[1], cys[2], cns[2], alp.prior, bet.prior, type="L") OR.v2 <- OR.values(target, cys[2], cns[2], cys[3], cns[3], alp.prior, bet.prior, type="R") v1 <- OR.v1 > gam1 v2 <- OR.v2 > gam2 if (v1 & !v2){ index <- -1 decision <- "de-escalation" }else if (!v1 & v2){ index <- 1 decision <- "escalation" }else{ index <- 0 decision <- "stay" } } } } if (decision=='stop'){ nextdose <- 99 }else{ nextdose <- currdose+index } out <- list(target=target, cys=cys, cns=cns, decision=decision, currdose = currdose, nextdose=nextdose, overtox=overtox) class(out) <- c("cfo_decision", "cfo") return(out) }
/scratch/gouwar.j/cran-all/cranData/CFO/R/CFO.next.R
#' Generate operating characteristics of sigle-drug trials in multiple simulations #' #' This function is used to perform multiple simulations for single-drug trials and obtain relevant operating characteristics. #' #' @usage CFO.oc(nsimu = 5000, design, target, p.true, init.level = 1, ncohort, cohortsize, #' assess.window = NA, tte.para = NA, accrual.rate = NA, accrual.dist = NA, #' prior.para = list(alp.prior = target, bet.prior = 1 - target), #' cutoff.eli = 0.95, early.stop = 0.95, seeds = NULL) #' #' @param nsimu the total number of trials to be simulated. The default value is 5000. #' @param design option for selecting different designs, which can be set as \code{'CFO'}, \code{'aCFO'}, #' \code{'TITE-CFO'}, \code{'TITE-aCFO'}, \code{'fCFO'}, \code{'f-aCFO'}, \code{'bCFO'}, #' and \code{'b-aCFO'}. Specifically, \code{'bCFO'} refers to the benchmark CFO design, and #' \code{'b-aCFO'} denotes the benchmark aCFO design. #' @param target the target DLT rate. #' @param p.true the true DLT rates under the different dose levels. #' @param init.level the dose level assigned to the first cohort. The default value \code{init.level} is 1. #' @param ncohort the total number of cohorts. #' @param cohortsize the number of patients of each cohort. #' @param assess.window the maximal assessment window size. \code{NA} should be assigned if the design without late-oneset outcomes. #' @param tte.para the parameter related with the distribution of the time to DLT events. The time to DLT is sampled from a Weibull #' distribution, with \code{tte.para} representing the proportion of DLTs occurring within the first half of the #' assessment window. \code{NA} should be assigned if the design without late-oneset outcomes. #' @param accrual.rate the accrual rate, i.e., the number of patients accrued per unit time. \code{NA} should be assigned #' if the design without late-onset outcomes. #' @param accrual.dist the distribution of the arrival times of patients. When \code{accrual.dist = 'fix'}, it corresponds to all #' patients in each cohort arriving simultaneously at a given accrual rate. When \code{accrual.dist = 'unif'}, #' it corresponds to a uniform distribution, and when \code{accrual.dist = 'exp'}, it corresponds to an #' exponential distribution. \code{NA} should be assigned if the design without late-oneset outcomes. #' @param prior.para the prior parameters for a beta distribution, where set as \code{list(alp.prior = target, bet.prior = 1 - target)} #' by default, \code{alp.prior} and \code{bet.prior} represent the parameters of the prior distribution for #' the true DLT rate at any dose level. This prior distribution is specified as Beta(\code{alpha.prior}, \code{beta.prior}). #' @param cutoff.eli the cutoff to eliminate overly toxic doses for safety. We recommend #' the default value of \code{cutoff.eli = 0.95} for general use. #' @param early.stop the threshold value for early stopping. The default value \code{early.stop = 0.95} #' generally works well. #' @param seeds A vector of random seeds for each simulation, for example, \code{seeds = 1:nsimu} (default is NULL). #' #' @note The operating characteristics are generated by simulating multiple single-drug trials under the #' pre-specified true toxicity probabilities of the investigational doses. The choice of which design to execute #' is determined by setting the \code{design} argument. Some time-related arguments (\code{assess.window}, \code{accrual.rate}, #' \code{tte.para}, and \code{accrual.dist}) need to be set as values only when running a design that can handle late-onset #' toxicities; otherwise, they default to \code{NA}.\cr #' Additionally, in the example, we set \code{nsimu = 5} for testing time considerations. In reality, \code{nsimu} #' is typically set to 5000 to ensure the accuracy of the results. #' #' @return The \code{CFO.oc()} function returns basic setup of ($simu.setup) and the operating #' characteristics of the design: \cr #' \itemize{ #' \item p.true: the true DLT rates under the different dose levels. #' \item selpercent: the selection percentage at each dose level. #' \item npatients: the averaged number of patients treated at each dose level in one simulation. #' \item ntox: the averaged number of toxicity observed at each dose level in one simulation. #' \item MTDsel: the percentage of correct selection of the MTD. #' \item MTDallo: the percentage of patients allocated to the MTD. #' \item oversel: the percentage of selecting a dose above the MTD. #' \item overallo: the percentage of allocating patients at dose levels above the MTD. #' \item averDLT: the percentage of the patients suffering DLT. #' \item averdur: the average trial duration if trials with late-onset toxicities. #' \item percentstop: the percentage of early stopping without selecting the MTD. #' \item simu.setup: the parameters for the simulation set-up. #' } #' #' @author Jialu Fang, Wenliang Wang, and Guosheng Yin #' #' @references Jin H, Yin G (2022). CFO: Calibration-free odds design for phase I/II clinical trials. #' \emph{Statistical Methods in Medical Research}, 31(6), 1051-1066. \cr #' Jin H, Yin G (2023). Time‐to‐event calibration‐free odds design: A robust efficient design for #' phase I trials with late‐onset outcomes. \emph{Pharmaceutical Statistics}. 22(5), 773–783.\cr #' Yin G, Zheng S, Xu J (2013). Fractional dose-finding methods with late-onset toxicity in #' phase I clinical trials. \emph{Journal of Biopharmaceutical Statistics}, 23(4), 856-870. #' #' @importFrom dplyr transmute #' @export #' #' @examples #' ## setting #' nsimu <- 5; target <- 0.2; ncohort <- 12; cohortsize <- 3; init.level <- 1 #' p.true <- c(0.01, 0.07, 0.20, 0.35, 0.50, 0.65, 0.80) #' prior.para = list(alp.prior = target, bet.prior = 1 - target) #' assess.window <- 3; accrual.rate <- 2; tte.para <- 0.5; accrual.dist <- 'unif' #' #' #' ## get the operating characteristics for 5 simulations using the f-aCFO design #' faCFOoc <- CFO.oc (nsimu, design='f-aCFO', target, p.true, init.level, ncohort, cohortsize, #' assess.window, tte.para, accrual.rate, accrual.dist, seeds = 1:nsimu) #' summary(faCFOoc) #' plot(faCFOoc) #' #' \donttest{ #' # This test may take longer than 5 seconds to run #' # It is provided for illustration purposes only #' # Users can run this code directly #' #' ## get the operating characteristics for 5 simulations using the CFO design #' CFOoc <- CFO.oc (nsimu, design = 'CFO', target, p.true, init.level, ncohort, cohortsize, #' assess.window = NA, tte.para = NA, accrual.rate = NA, accrual.dist = NA, seeds = 1:nsimu) #' summary(CFOoc) #' plot(CFOoc) #' #' ## get the operating characteristics for 5 simulations using the aCFO design #' aCFOoc <- CFO.oc (nsimu, design = 'aCFO', target, p.true, init.level, ncohort, cohortsize, #' assess.window = NA, tte.para = NA, accrual.rate = NA, accrual.dist = NA, seeds = 1:nsimu) #' summary(aCFOoc) #' plot(aCFOoc) #' ## get the operating characteristics for 5 simulations using the TITE-CFO design #' TITECFOoc <- CFO.oc (nsimu, design = 'TITE-CFO', target, p.true, init.level, ncohort, cohortsize, #' assess.window, tte.para, accrual.rate, accrual.dist, seeds = 1:nsimu) #' summary(TITECFOoc) #' plot(TITECFOoc) #' ## get the operating characteristics for 5 simulations using the TITE-aCFO design #' TITEaCFOoc <- CFO.oc (nsimu, design = 'TITE-aCFO', target, p.true, init.level, ncohort, cohortsize, #' assess.window, tte.para, accrual.rate, accrual.dist, seeds = 1:nsimu) #' summary(TITEaCFOoc) #' plot(TITEaCFOoc) #' ## get the operating characteristics for 5 simulations using the fCFO design #' fCFOoc <- CFO.oc (nsimu, design = 'fCFO', target, p.true, init.level, ncohort, cohortsize, #' assess.window, tte.para, accrual.rate, accrual.dist, seeds = 1:nsimu) #' summary(fCFOoc) #' plot(fCFOoc) #' ## get the operating characteristics for 5 simulations using the bCFO design #' bCFOoc <- CFO.oc (nsimu, design = 'bCFO', target, p.true, init.level, ncohort, cohortsize, #' assess.window, tte.para, accrual.rate, accrual.dist, seeds = 1:nsimu) #' summary(bCFOoc) #' plot(bCFOoc) #' ## get the operating characteristics for 5 simulations using the b-aCFO design #' baCFOoc <- CFO.oc (nsimu, design = 'b-aCFO', target, p.true, init.level, ncohort, cohortsize, #' assess.window, tte.para, accrual.rate, accrual.dist, seeds = 1:nsimu) #' summary(baCFOoc) #' plot(baCFOoc) #' } CFO.oc <- function(nsimu=5000, design, target, p.true, init.level=1, ncohort, cohortsize, assess.window=NA, tte.para=NA, accrual.rate=NA, accrual.dist=NA, prior.para=list(alp.prior=target, bet.prior=1-target), cutoff.eli=0.95, early.stop=0.95, seeds = NULL){ ############################################################################### ###############define the functions used for main function##################### ############################################################################### MTD.level <- function(phi, p.true){ if (p.true[1]>phi+0.1){ MTD <- 99 return(MTD) } MTD <- which.min(abs(phi - p.true)) return(MTD) } ############################################################################### ############################MAIN DUNCTION###################################### ############################################################################### run.fn <- function(i){ if (design == 'CFO' || design == 'aCFO'){ res <- CFO.simu(design, target, p.true, init.level, ncohort, cohortsize, prior.para, cutoff.eli, early.stop, seed = seeds[i]) }else{ res <- lateonset.simu(design, target, p.true, init.level, ncohort, cohortsize, assess.window, tte.para, accrual.rate, accrual.dist, prior.para = prior.para, cutoff.eli = cutoff.eli, early.stop = early.stop, seed = seeds[i]) } ress <- list( res=res, paras=list(p.true=p.true, mtd=tmtd, prior.para=prior.para, target=target, ncohort=ncohort, cohortsize=cohortsize) ) return(ress) } tmtd <- MTD.level(target, p.true) results <- lapply(1:nsimu, run.fn) results_nopara <- lapply(1:nsimu, function(i)results[[i]]$res) ndose <- length(results_nopara[[1]]$npatients) Perc <- rep(0, ndose) nPatients <- rep(0, ndose); nTox <- rep(0, ndose) sumPatients <- 0; sumTox <- 0 nonErrStops <- 0 MTDsel <- 0; MTDallo <- 0; oversel <- 0; overallo <- 0 totaltime <- 0 for (res in results_nopara){ if (res$MTD != 99){ nonErrStops <- nonErrStops + 1 Perc[res$MTD] <- Perc[res$MTD] + 1 oversel <- oversel + sum(res$MTD>tmtd) if (tmtd==ndose){ overallo <- overallo }else{ overallo <- overallo + sum(res$npatients[(tmtd+1):ndose]) } } if (!is.null(res$totaltime)){ totaltime <- totaltime + res$totaltime } MTDsel <- MTDsel + sum(res$MTD==tmtd) MTDallo <- MTDallo + res$npatients[tmtd] sumTox <- sumTox + sum(res$ntox) sumPatients <- sumPatients + sum(res$npatients) nPatients <- nPatients + res$npatients nTox <- res$ntox + nTox } selpercent <- Perc/nsimu MTDsel <- MTDsel/nsimu MTDallo <- MTDallo/sumPatients oversel <- oversel/nsimu overallo <- overallo/sumPatients averDLT <- sumTox/sumPatients errStop <- nsimu-nonErrStops if (design == 'CFO' || design == 'aCFO'){ out <- list(p.true=p.true, selpercent=selpercent, npatients=nPatients/nsimu, ntox=nTox/nsimu, MTDsel=MTDsel, MTDallo=MTDallo, oversel=oversel, overallo=overallo, averDLT=averDLT, percentstop=errStop/nsimu, simu.setup = data.frame(target = target,ncohort = ncohort, cohortsize = cohortsize, design = design, nsimu = nsimu)) }else{ out <- list(p.true=p.true, selpercent=selpercent, npatients=nPatients/nsimu, ntox=nTox/nsimu, MTDsel=MTDsel, MTDallo=MTDallo, oversel=oversel, overallo=overallo, averDLT=averDLT, averdur = totaltime/nsimu, percentstop=errStop/nsimu, simu.setup = data.frame(target = target, ncohort = ncohort, cohortsize = cohortsize, design = design, nsimu = nsimu)) } class(out) <- c("cfo_oc","cfo") return(out) }
/scratch/gouwar.j/cran-all/cranData/CFO/R/CFO.oc.R
#' #' Select the maximum tolerated dose (MTD) for the real single-drug trial #' #' Select the maximum tolerated dose (MTD) when the real single-drug trial is completed #' #' @usage CFO.selectmtd(target, npts, ntox, #' prior.para = list(alp.prior = target, bet.prior = 1 - target), #' cutoff.eli = 0.95, early.stop = 0.95, verbose = TRUE) #' #' @param target the target DLT rate. #' @param npts a vector containing the number of patients treated at each dose level. #' @param ntox a vector containing the number of patients who experienced DLT at each dose level. #' @param prior.para the prior parameters for a beta distribution, where set as \code{list(alp.prior = target, bet.prior = 1 - target)} #' by default, \code{alp.prior} and \code{bet.prior} represent the parameters of the prior distribution for #' the true DLT rate at any dose level. This prior distribution is specified as Beta(\code{alpha.prior}, \code{beta.prior}). #' @param cutoff.eli the cutoff to eliminate overly toxic doses for safety. We recommend #' the default value of \code{cutoff.eli = 0.95} for general use. #' @param early.stop the threshold value for early stopping. The default value \code{early.stop = 0.95} #' generally works well. #' @param verbose set \code{verbose=TRUE} to return more details of the results. #' #' @details \code{CFO.selectmtd()} selects the MTD based on isotonic estimates of toxicity #' probabilities. \code{CFO.selectmtd()} selects as the MTD dose \eqn{j^*}, for which the #' isotonic estimate of the DLT rate is closest to the target. If there #' are ties, we select from the ties the highest dose level when the estimate #' of the DLT rate is smaller than the target, or the lowest dose level #' when the estimate of the DLT rate is greater than the target. The #' isotonic estimates are obtained by the pooled-adjacent-violators algorithm #' (PAVA). #' #' #' @return \code{CFO.selectmtd()} returns #' \itemize{ #' \item target: the target DLT rate. #' \item MTD: the selected MTD. \code{MTD = 99} indicates that all tested doses are overly toxic. #' \item p_est: the isotonic estimate of the DLT probablity at each dose and associated \eqn{95\%} credible interval. #' \code{p_est = NA} if all tested doses are overly toxic. #' \item p_overdose: the probability of overdosing defined as \eqn{Pr(toxicity > \code{target}|data)}. #' \code{p_overdose = NA} if all tested doses are overly toxic. #' } #' #' #' @note The MTD selection and dose escalation/de-escalation rule are two independent #' components of the trial design. Isotonic regression is employed to select the MTD after the completion of the trial. #' When appropriate, another dose selection procedure (e.g., based on a fitted logistic model) can be used to select #' the MTD after the completion of the trial using the CFO-type design. #' #' #' @author Jialu Fang, Wenliang Wang, and Guosheng Yin #' #' @references Jin H, Yin G (2022). CFO: Calibration-free odds design for phase I/II clinical trials. #' \emph{Statistical Methods in Medical Research}, 31(6), 1051-1066. \cr #' Bril G, Dykstra R, Pillers C, Robertson T (1984). Algorithm AS 206: Isotonic regression in two independent variables. #' \emph{Journal of the Royal Statistical Society. Series C (Applied Statistics)}, 33(3), 352–357. #' #' #' @examples #' #' ### select the MTD for the CFO-type single-drug trial #' n <- c(3,3,27,3,0,0,0) #' y <- c(0,0,4,2,0,0,0) #' selmtd <- CFO.selectmtd(target=0.2, npts=n, ntox=y) #' summary(selmtd) #' plot(selmtd) #' #' @export CFO.selectmtd <- function (target, npts, ntox, prior.para=list(alp.prior=target, bet.prior=1-target), cutoff.eli = 0.95, early.stop = 0.95, verbose = TRUE) { pava <- function(x, wt = rep(1, length(x))) { n <- length(x) if (n <= 1) return(x) if (any(is.na(x)) || any(is.na(wt))) { stop("Missing values in 'x' or 'wt' not allowed") } lvlsets <- (1:n) repeat { viol <- (as.vector(diff(x)) < 0) if (!(any(viol))) break i <- min((1:(n - 1))[viol]) lvl1 <- lvlsets[i] lvl2 <- lvlsets[i + 1] ilvl <- (lvlsets == lvl1 | lvlsets == lvl2) x[ilvl] <- sum(x[ilvl] * wt[ilvl])/sum(wt[ilvl]) lvlsets[ilvl] <- lvl1 } x } if (is.null(prior.para$alp.prior)){ prior.para <- c(prior.para, list(alp.prior=target, bet.prior=1-target)) } alp.prior <- prior.para$alp.prior bet.prior <- prior.para$bet.prior y = ntox n = npts ndose = length(n) elimi = rep(0, ndose) for (i in 1:ndose) { if (n[i] >= 3) { if (1 - pbeta(target, y[i] + alp.prior, n[i] - y[i] + bet.prior) > cutoff.eli) { elimi[i:ndose] = 1 break } } } if (cutoff.eli != early.stop) { if (n[1] >= 3) { if (1 - pbeta(target, y[1] + alp.prior, n[1] - y[1] + bet.prior) > early.stop) { elimi[1:ndose] = 1 } } } if (elimi[1] == 1 || sum(n[elimi == 0]) == 0){ selectdose = 99 }else { adm.set = (n != 0) & (elimi == 0) adm.index = which(adm.set == T) y.adm = y[adm.set] n.adm = n[adm.set] phat = (y.adm + alp.prior)/(n.adm + alp.prior + bet.prior) phat.var = (y.adm + alp.prior) * (n.adm - y.adm + bet.prior)/((n.adm + alp.prior + bet.prior)^2 * (n.adm + alp.prior + bet.prior + 1)) phat = pava(phat, wt = 1/phat.var) phat = phat + (1:length(phat)) * 1e-10 selectd = sort(abs(phat - target), index.return = T)$ix[1] selectdose = adm.index[selectd] } if (verbose == TRUE) { trtd = (n != 0) poverdose = pava(1 - pbeta(target, y[trtd] + alp.prior, n[trtd] - y[trtd] + bet.prior)) phat.all = pava((y[trtd] + alp.prior)/(n[trtd] + alp.prior + bet.prior), wt = 1/((y[trtd] + alp.prior) * (n[trtd] - y[trtd] + bet.prior)/((n[trtd] + alp.prior + bet.prior)^2 * (n[trtd] + alp.prior + bet.prior + 1)))) A1 = A2 = A3 = A4 = NULL k = 1 for (i in 1:ndose) { if (n[i] > 0) { A1 = append(A1, formatC(phat.all[k], digits = 2, format = "f")) A2 = append(A2, formatC(qbeta(0.025, y[i] + alp.prior, n[i] - y[i] + bet.prior), digits = 2, format = "f")) A3 = append(A3, formatC(qbeta(0.975, y[i] + alp.prior, n[i] - y[i] + bet.prior), digits = 2, format = "f")) A4 = append(A4, formatC(poverdose[k], digits = 2, format = "f")) k = k + 1 } else { A1 = append(A1, "----") A2 = append(A2, "----") A3 = append(A3, "----") A4 = append(A4, "----") } } p_est = data.frame(cbind(dose = 1:length(npts), phat = A1, CI = paste("(", A2, ",", A3, ")", sep = ""))) if (selectdose == 99) { message("All tested doses are overly toxic. No MTD is selected! \n") out = list(target = target, MTD = selectdose, p_est = NA, p_overdose = NA) } else { out = list(target = target, MTD = selectdose, p_est = p_est, p_overdose = A4) } } else { if (selectdose == 99) { message("All tested doses are overly toxic. No MTD is selected! \n") } out = list(target = target, MTD = selectdose) } class(out)<-c("cfo_sel","cfo") return(out) }
/scratch/gouwar.j/cran-all/cranData/CFO/R/CFO.selectmtd.R
#' Conduct one simulation using the Calibration-free odds (CFO) or accumulative CFO (aCFO) design. #' #' In the CFO and aCFO designs, the function is used to conduct one single simulation and find the maximum tolerated dose (MTD). #' #' @usage CFO.simu(design, target, p.true, init.level = 1, ncohort, cohortsize, #' prior.para = list(alp.prior = target, bet.prior = 1 - target), #' cutoff.eli = 0.95, early.stop = 0.95, seed = NULL) #' #' @param design option for selecting different designs, which can be set as \code{'CFO'} and \code{'aCFO'}. #' @param target the target DLT rate. #' @param p.true the true DLT rates under the different dose levels. #' @param init.level the dose level assigned to the first cohort. The default value \code{init.level} is 1. #' @param ncohort the total number of cohorts. #' @param cohortsize the number of patients of each cohort. #' @param prior.para the prior parameters for a beta distribution, where set as \code{list(alp.prior = target, bet.prior = 1 - target)} #' by default, \code{alp.prior} and \code{bet.prior} represent the parameters of the prior distribution for #' the true DLT rate at any dose level. This prior distribution is specified as Beta(\code{alpha.prior}, \code{beta.prior}). #' @param cutoff.eli the cutoff to eliminate overly toxic doses for safety. We recommend #' the default value of \code{cutoff.eli = 0.95} for general use. #' @param early.stop the threshold value for early stopping. The default value \code{early.stop = 0.95} #' generally works well. #' @param seed an integer to be set as the seed of the random number generator for reproducible results. The default value is set to \code{NULL}. #' #' @note The \code{CFO.simu()} function is designed to conduct a single CFO or aCFO simulation. If \code{design = 'CFO'}, it corresponds #' to the CFO design. If \code{design = 'aCFO'}, it corresponds to the aCFO design. \cr #' The early stopping and dose elimination rules are incorporated into the CFO or aCFO design #' to ensure patient safety and benefit. If there is substantial evidence indicating that the current dose level #' exhibits excessive toxicity, we exclude the current dose level as well as higher dose levels from the trial. If the lowest dose level is overly toxic, the trial will be terminated #' according to the early stopping rule. Upon the predefined maximum sample size is reached or the lowest dose #' level is over-toxicity, the experiment is concluded, and the MTD is determined using isotonic regression. #' #' @return The \code{CFO.simu} function returns a list object comprising the following components: #' \itemize{ #' \item target: the target DLT rate. #' \item MTD: the selected MTD. \code{MTD = 99} indicates that the simulation is terminated due to early stopping. #' \item correct: a binary indicator of whether the recommended dose level matches the target DLT rate (1 for yes). #' \item npatients: the total number of patients allocated to all dose levels. #' \item ntox: the total number of DLTs observed for all dose levels. #' \item npercent: the percentage of subjects assigned to the target DLT rate. #' \item over.doses: a vector indicating whether each dose is overdosed or not (1 for yes). #' \item cohortdose: a vector including the dose level assigned to each cohort. #' \item ptoxic: the percentage of subjects assigned to dose levels with a DLT rate greater than the target. #' \item patientDLT: a vector including the DLT outcome observed for each patient. #' \item sumDLT: the total number of DLT observed. #' \item earlystop: a binary indicator of whether the trial is early stopped (1 for yes). #' } #' #' @author Jialu Fang, Wenliang Wang, and Guosheng Yin #' #' @references Jin H, Yin G (2022). CFO: Calibration-free odds design for phase I/II clinical trials. #' \emph{Statistical Methods in Medical Research}, 31(6), 1051-1066. #' #' @examples #' target <- 0.2; ncohort <- 12; cohortsize <- 3; init.level <- 1 #' p.true <- c(0.01, 0.07, 0.20, 0.35, 0.50, 0.65, 0.80) #' ### find the MTD for a single CFO simulation #' CFOtrial <- CFO.simu(design = 'CFO', target, p.true, init.level, ncohort, cohortsize, seed = 1) #' summary(CFOtrial) #' plot(CFOtrial) #' ### find the MTD for a single aCFO simulation #' aCFOtrial <- CFO.simu(design = 'aCFO', target, p.true, init.level, ncohort, cohortsize, seed = 1) #' summary(aCFOtrial) #' plot(aCFOtrial) #' @export CFO.simu <- function(design, target, p.true, init.level=1, ncohort, cohortsize, prior.para=list(alp.prior=target, bet.prior=1-target), cutoff.eli=0.95, early.stop=0.95, seed=NULL){ ############################################################################### ###############define the functions used for main function##################### ############################################################################### # posterior probability of pj >= phi given data post.prob.fn <- function(phi, y, n, alp.prior=0.1, bet.prior=0.9){ alp <- alp.prior + y bet <- bet.prior + n - y 1 - pbeta(phi, alp, bet) } overdose.fn <- function(phi, threshold, prior.para=list()){ y <- prior.para$y n <- prior.para$n alp.prior <- prior.para$alp.prior bet.prior <- prior.para$bet.prior pp <- post.prob.fn(phi, y, n, alp.prior, bet.prior) # print(data.frame("prob of overdose" = pp)) if ((pp >= threshold) & (prior.para$n>=3)){ return(TRUE) }else{ return(FALSE) } } ############################################################################### ############################MAIN DUNCTION###################################### ############################################################################### if (is.null(prior.para$alp.prior)){ prior.para <- c(prior.para, list(alp.prior=target, bet.prior=1-target)) } alp.prior <- prior.para$alp.prior bet.prior <- prior.para$bet.prior set.seed(seed) earlystop <- 0 ndose <- length(p.true) doselist <- rep(0, ncohort) currdose <- init.level ays <- rep(0, ndose) # number of responses for different doses. ans <- rep(0, ndose) # number of subject for different doses. tover.doses <- rep(0, ndose) # Whether each dose is overdosed or not, 1 yes DLTlist <- c() for (i in 1:ncohort){ pc <- p.true[currdose] doselist[i] <- currdose # sample from current dose cres <- rbinom(cohortsize, 1, pc) DLTlist <- c(DLTlist, cres) # update results ays[currdose] <- ays[currdose] + sum(cres) ans[currdose] <- ans[currdose] + cohortsize cy <- ays[currdose] cn <- ans[currdose] prior.para <- c(list(y=cy, n=cn), list(alp.prior=alp.prior, bet.prior=bet.prior)) if (overdose.fn(target, cutoff.eli, prior.para)){ tover.doses[currdose:ndose] <- 1 } if (currdose == 1){ if (cutoff.eli != early.stop) { cy <- ays[1] cn <- ans[1] prior.para <- c(list(y=cy, n=cn), list(alp.prior=alp.prior, bet.prior=bet.prior)) if (overdose.fn(target, early.stop, prior.para)){ tover.doses[1:ndose] <- 1 } } } if (tover.doses[1] == 1){ earlystop <- 1 break() } prior.para <- c(list(alp.prior=alp.prior, bet.prior=bet.prior)) if (design == 'CFO'){ # the results for current 3 dose levels if (currdose!=1){ cys <- ays[(currdose-1):(currdose+1)] cns <- ans[(currdose-1):(currdose+1)] }else{ cys <- c(NA, ays[1:(currdose+1)]) cns <- c(NA, ans[1:(currdose+1)]) } currdose <- CFO.next(target, cys, cns, currdose, prior.para, cutoff.eli, early.stop)$nextdose }else if (design == 'aCFO'){ currdose <- aCFO.next(target, ays, ans, currdose, prior.para, cutoff.eli, early.stop)$nextdose }else{ stop("The input design is invalid; it can only be set as 'CFO' or 'aCFO'.") } } if (earlystop==0){ MTD <- CFO.selectmtd(target, ans, ays, prior.para, cutoff.eli, early.stop, verbose=FALSE)$MTD }else{ MTD <- 99 } correct <- 0 if (MTD == target){ correct <- 1 } npercent <- ans[which(p.true == target)]/(ncohort*cohortsize) ptoxic <- sum(ans[which(p.true > target)])/(ncohort*cohortsize) out<-list(target=target, MTD=MTD, correct=correct, npatients=ans, ntox=ays, npercent=npercent, over.doses=tover.doses, cohortdose=doselist, ptoxic=ptoxic, patientDLT=DLTlist, sumDLT=sum(DLTlist), earlystop=earlystop) class(out) <- c("cfo_trial", "cfo") return(out) }
/scratch/gouwar.j/cran-all/cranData/CFO/R/CFO.simu.R
#' Determinate the dose level for the next cohort in the two-dimensional calibration-free odds (2dCFO) design. #' #' This function is used to determine the next dose level for the next cohort in the 2dCFO design. #' #' @usage CFO2d.next(target, cys, cns, currdose, #' prior.para = list(alp.prior = target, bet.prior = 1 - target), #' cutoff.eli = 0.95, early.stop = 0.95, seed = NULL) #' #' @param target the target DLT rate. #' @param cys a matrix of the number of DLTs observed for each dose combination. #' @param cns a matrix of the number of patients allocated to each dose combination. #' @param currdose a vector of the current dose indices in the horizontal and vertical direction. #' @param prior.para the prior parameters for a beta distribution, where set as \code{list(alp.prior = target, bet.prior = 1 - target)} #' by default, \code{alp.prior} and \code{bet.prior} represent the parameters of the prior distribution for #' the true DLT rate at any dose level. This prior distribution is specified as Beta(\code{alpha.prior}, \code{beta.prior}). #' @param cutoff.eli the cutoff to eliminate overly toxic doses for safety. We recommend #' the default value of \code{cutoff.eli = 0.95} for general use. #' @param early.stop the threshold value for early stopping. The default value \code{early.stop = 0.95} #' generally works well. #' @param seed an integer to be set as the seed of the random number generator for reproducible results. The default value is set to \code{NULL}. #' #' @details In the 2dCFO design, decision-making within the two-dimensional toxicity probability space is conducted by performing two independent one-dimensional #' CFO analyses along both the horizontal and vertical axes (Wang et al. 2023). #' #' @note When the current dose level is the lowest or highest (i.e., at the boundary), the parts in \code{cys} and #' \code{cns} where there is no data are filled with \code{NA}. \cr #' The dose level indicated by \code{overtox} and all the dose levels above experience over-toxicity, and these dose levels will be eliminated. #' #' @return The \code{CFO2d.next()} function returns a list with the following components: #' \itemize{ #' \item target: the target DLT rate. #' \item cys: a 3 by 3 matrix of the number of DLT observed for each dose combination at and around the current dose. #' \item cns: a 3 by 3 matrix of the number of patients allocated to each dose combination at and around the current dose. #' \item decision: a vector of length 2 representing the recommended decisions for vertical and horizontal #' directions, and \code{stop} indicates stopping the experiment. #' \item currdose: the current dose combination. #' \item nextdose: the recommended dose combination for the next cohort. \code{nextdose = (99, 99)} indicates that the trial is #' terminated due to early stopping. #' \item overtox: the situation regarding which positions experience over-toxicity. The dose level indicated #' by \code{overtox} and all the dose levels above experience over-toxicity. \code{overtox = NA} signifies that the #' occurrence of over-toxicity did not happen. #' } #' #' @author Jialu Fang, Wenliang Wang, and Guosheng Yin #' #' @references Jin H, Yin G (2022). CFO: Calibration-free odds design for phase I/II clinical trials. #' \emph{Statistical Methods in Medical Research}, 31(6), 1051-1066. \cr #' Wang W, Jin H, Zhang Y, Yin G (2023). Two-dimensional calibration-free odds (2dCFO) #' design for phase I drug-combination trials. \emph{Frontiers in Oncology}, 13, 1294258. #' #' @export #' @examples #' cns <- matrix(c(3, 3, 0, #' 0, 6, 0, #' 0, 0, 0), #' nrow = 3, ncol = 3, byrow = TRUE) #' #' cys <- matrix(c(0, 1, 0, #' 0, 2, 0, #' 0, 0, 0), #' nrow = 3, ncol = 3, byrow = TRUE) #' currdose <- c(2,3) #' decision <- CFO2d.next(target = 0.3, cys, cns, currdose = currdose, seed = 1) #' summary(decision) CFO2d.next <- function(target, cys, cns, currdose, prior.para=list(alp.prior=target, bet.prior=1-target), cutoff.eli=0.95, early.stop=0.95, seed=NULL){ cidx.A <- 0 cidx.B <- 0 alp.prior <- prior.para$alp.prior bet.prior <- prior.para$bet.prior set.seed(seed) cover.doses=matrix(0,3,3) overtox <- NA # posterior probability of pj >= phi given data post.prob.fn <- function(phi, y, n, alp.prior=0.1, bet.prior=0.9){ alp <- alp.prior + y bet <- bet.prior + n - y 1 - pbeta(phi, alp, bet) } prob.int <- function(phi, y1, n1, y2, n2, alp.prior, bet.prior){ alp1 <- alp.prior + y1 alp2 <- alp.prior + y2 bet1 <- alp.prior + n1 - y1 bet2 <- alp.prior + n2 - y2 fn.min <- function(x){ dbeta(x, alp1, bet1)*(1-pbeta(x, alp2, bet2)) } fn.max <- function(x){ pbeta(x, alp1, bet1)*dbeta(x, alp2, bet2) } const.min <- integrate(fn.min, lower=0, upper=1)$value const.max <- integrate(fn.max, lower=0, upper=1)$value p1 <- integrate(fn.min, lower=0, upper=phi)$value/const.min p2 <- integrate(fn.max, lower=0, upper=phi)$value/const.max list(p1=p1, p2=p2) } overdose.fn <- function(phi, threshold, y, n, prior.para=list(alp.prior=phi, bet.prior=1-phi)){ alp.prior <- prior.para$alp.prior bet.prior <- prior.para$bet.prior pp <- post.prob.fn(phi, y, n, alp.prior, bet.prior) if ((pp >= 0.95) & (n>=3)){ return(TRUE) }else{ return(FALSE) } } OR.values <- function(phi, y1, n1, y2, n2, alp.prior, bet.prior, type){ ps <- prob.int(phi, y1, n1, y2, n2, alp.prior, bet.prior) if (type=="L"){ pC <- 1 - ps$p2 pL <- 1 - ps$p1 oddsC <- pC/(1-pC) oddsL <- pL/(1-pL) OR <- oddsC*oddsL }else if (type=="R"){ pC <- 1 - ps$p1 pR <- 1 - ps$p2 oddsC <- pC/(1-pC) oddsR <- pR/(1-pR) OR <- (1/oddsC)/oddsR }else if (type=="D"){ pC <- 1 - ps$p2 pD <- 1 - ps$p1 oddsC <- pC/(1-pC) oddsD <- pD/(1-pD) OR <- oddsC*oddsD }else if (type=="U"){ pC <- 1 - ps$p1 pU <- 1 - ps$p2 oddsC <- pC/(1-pC) oddsU <- pU/(1-pU) OR <- (1/oddsC)/oddsU } return(OR) } All.OR.table <- function(phi, n1, n2, type, alp.prior, bet.prior){ ret.mat <- matrix(rep(0, (n1+1)*(n2+1)), nrow=n1+1) for (y1cur in 0:n1){ for (y2cur in 0:n2){ ret.mat[y1cur+1, y2cur+1] <- OR.values(phi, y1cur, n1, y2cur, n2, alp.prior, bet.prior, type) } } ret.mat } # compute the marginal prob when lower < phiL/phiC/phiR < upper # i.e., Pr(Y=y|lower<phi<upper) margin.phi <- function(y, n, lower, upper){ C <- 1/(upper-lower) fn <- function(phi) { dbinom(y, n, phi)*C } integrate(fn, lower=lower, upper=upper)$value } # Obtain the table of marginal distribution of (y1, y2) # after intergrate out (phi1, phi2) # under H0 and H1 # H0: phi1=phi, phi < phi2 < 2phi # H1: phi2=phi, 0 < phi1 < phi margin.ys.table <- function(n1, n2, phi, hyperthesis){ if (hyperthesis=="H0"){ p.y1s <- dbinom(0:n1, n1, phi) p.y2s <- sapply(0:n2, margin.phi, n=n2, lower=phi, upper=2*phi) }else if (hyperthesis=="H1"){ p.y1s <- sapply(0:n1, margin.phi, n=n1, lower=0, upper=phi) p.y2s <- dbinom(0:n2, n2, phi) } p.y1s.mat <- matrix(rep(p.y1s, n2+1), nrow=n1+1) p.y2s.mat <- matrix(rep(p.y2s, n1+1), nrow=n1+1, byrow=TRUE) margin.ys <- p.y1s.mat * p.y2s.mat margin.ys } # Obtain the optimal gamma for the hypothesis test optim.gamma.fn <- function(n1, n2, phi, type, alp.prior, bet.prior){ OR.table <- All.OR.table(phi, n1, n2, type, alp.prior, bet.prior) ys.table.H0 <- margin.ys.table(n1, n2, phi, "H0") ys.table.H1 <- margin.ys.table(n1, n2, phi, "H1") argidx <- order(OR.table) sort.OR.table <- OR.table[argidx] sort.ys.table.H0 <- ys.table.H0[argidx] sort.ys.table.H1 <- ys.table.H1[argidx] n.tol <- length(sort.OR.table) if (type=="L"){ errs <- rep(0, n.tol-1) for (i in 1:(n.tol-1)){ err1 <- sum(sort.ys.table.H0[1:i]) err2 <- sum(sort.ys.table.H1[(i+1):n.tol]) err <- err1 + err2 errs[i] <- err } min.err <- min(errs) if (min.err > 1){ gam <- 0 min.err <- 1 }else { minidx <- which.min(errs) gam <- sort.OR.table[minidx] } }else if (type=='R'){ errs <- rep(0, n.tol-1) for (i in 1:(n.tol-1)){ err1 <- sum(sort.ys.table.H1[1:i]) err2 <- sum(sort.ys.table.H0[(i+1):n.tol]) err <- err1 + err2 errs[i] <- err } min.err <- min(errs) if (min.err > 1){ gam <- 0 min.err <- 1 }else { minidx <- which.min(errs) gam <- sort.OR.table[minidx] } } list(gamma=gam, min.err=min.err) } make.decision.1dCFO.fn <- function(phi, cys, cns, alp.prior, bet.prior, cover.doses, diag=FALSE){ if (cover.doses[2] == 1){ return(1) }else{ if (is.na(cys[1]) & (cover.doses[3]==1)){ return(2) }else if (is.na(cys[1]) & (!(cover.doses[3]==1))){ gam2 <- optim.gamma.fn(cns[2], cns[3], phi, "R", alp.prior, bet.prior)$gamma OR.v2 <- OR.values(phi, cys[2], cns[2], cys[3], cns[3], alp.prior, bet.prior, type="R") if (OR.v2>gam2){ return(3) }else{ return(2) } }else if (is.na(cys[3]) | (cover.doses[3]==1)){ gam1 <- optim.gamma.fn(cns[1], cns[2], phi, "L", alp.prior, bet.prior)$gamma OR.v1 <- OR.values(phi, cys[1], cns[1], cys[2], cns[2], alp.prior, bet.prior, type="L") if (OR.v1>gam1){ return(1) }else{ return(2) } }else if (!(is.na(cys[1]) | is.na(cys[3]) | cover.doses[3]==1)){ gam1 <- optim.gamma.fn(cns[1], cns[2], phi, "L", alp.prior, bet.prior)$gamma gam2 <- optim.gamma.fn(cns[2], cns[3], phi, "R", alp.prior, bet.prior)$gamma OR.v1 <- OR.values(phi, cys[1], cns[1], cys[2], cns[2], alp.prior, bet.prior, type="L") OR.v2 <- OR.values(phi, cys[2], cns[2], cys[3], cns[3], alp.prior, bet.prior, type="R") v1 <- OR.v1 > gam1 v2 <- OR.v2 > gam2 if (v1 & !v2){ return(1) }else if (!v1 & v2){ return(3) }else{ return(2) } } } } if (overdose.fn(target, cutoff.eli, cys[2,2], cns[2,2], prior.para)){ cover.doses[2,2] <- 1 overtox <- currdose } if (!is.na(cns[2,3])){ if (overdose.fn(target, cutoff.eli, cys[2,3], cns[2,3], prior.para)){ cover.doses[2,3] <- 1 cover.doses[3,3] <- 1 overtox <- currdose + c(0,1) } } else { cover.doses[2,3] <- NA cover.doses[3,3] <- NA } if (!is.na(cns[3,2])){ if (overdose.fn(target, cutoff.eli, cys[3,2], cns[3,2], prior.para)){ cover.doses[3,2] <- 1 cover.doses[3,3] <- 1 overtox <- currdose + c(1,0) } } else { cover.doses[3,2] <- NA cover.doses[3,3] <- NA } if (!is.na(cns[2,3])&!is.na(cns[3,2])){ if(overdose.fn(target, cutoff.eli, cys[2,3], cns[2,3], prior.para)&overdose.fn(target, cutoff.eli, cys[3,2], cns[3,2], prior.para)){ overtox <- currdose } } if (cutoff.eli != early.stop) { if (currdose==c(1,1) & overdose.fn(target, early.stop, cys[1,1], cns[1,1], prior.para)){ cover.doses[1,1] <- 1 out <- list(target=target, cys=cys, cns=cns, decision="stop", currdose = currdose, nextdose = c(99,99), overtox = c(1,1)) class(out) <- "cfo" return(out) } } # horizontal direction idx.chg.A <- make.decision.1dCFO.fn(target, cys[2,], cns[2,], alp.prior, bet.prior, cover.doses[2,]) - 2 # vertical direction idx.chg.B <- make.decision.1dCFO.fn(target, cys[,2], cns[,2], alp.prior, bet.prior, cover.doses[,2]) - 2 if (idx.chg.A == 1 & idx.chg.B == 1){ ### horizontal and vertical only OR.R <- OR.values(target, cys[2,2], cns[2,2], cys[2,3], cns[2,3], alp.prior, bet.prior, type="R") OR.U <- OR.values(target, cys[2,2], cns[2,2], cys[3,2], cns[3,2], alp.prior, bet.prior, type="R") if (OR.R == OR.U){ rand <- rbinom(1,1,0.5) if(rand == 0){ cidx.A <- 1 } else { cidx.B <- 1 } } else if (OR.R > OR.U){ cidx.B <- 1 } else { cidx.A <- 1 } } else if (idx.chg.A == -1 & idx.chg.B == -1){ if (is.na(cys[2,1]) & is.na(cys[1,2])){ cidx.A <- 0 cidx.B <- 0 } else if (is.na(cys[2,1])){ cidx.A <- -1 } else if (is.na(cys[1,2])){ cidx.B <- -1 } else { OR.L <- OR.values(target, cys[2,2], cns[2,2], cys[2,1], cns[2,1], alp.prior, bet.prior, type="L") OR.D <- OR.values(target, cys[2,2], cns[2,2], cys[1,2], cns[1,2], alp.prior, bet.prior, type="L") if (OR.L == OR.D){ rand <- rbinom(1,1,0.5) if(rand == 0){ cidx.A <- -1 } else { cidx.B <- -1 } } else if (OR.L > OR.D){ cidx.B <- -1 } else { cidx.A <- -1 } } } else if (idx.chg.A == 1 & idx.chg.B == -1){ DCR <- make.decision.1dCFO.fn(target, c(cys[1,2],cys[2,2],cys[2,3]), c(cns[1,2],cns[2,2],cns[2,3]), alp.prior, bet.prior, c(cover.doses[1,2],cover.doses[2,2],cover.doses[2,3])) - 2 if (DCR == 1){ cidx.B <- 1 } else if (DCR == -1){ cidx.A <- -1 } } else if (idx.chg.A == -1 & idx.chg.B == 1){ LCU <- make.decision.1dCFO.fn(target, c(cys[2,1],cys[2,2],cys[3,2]), c(cns[2,1],cns[2,2],cns[3,2]), alp.prior, bet.prior, c(cover.doses[2,1],cover.doses[2,2],cover.doses[3,2])) - 2 if (LCU == 1){ cidx.A <- 1 } else if (LCU == -1){ cidx.B <- -1 } } else if (idx.chg.A == 1 & idx.chg.B == 0){ cidx.B <- 1 } else if (idx.chg.A == 0 & idx.chg.B == 1){ cidx.A <- 1 } else if (idx.chg.A == -1 & idx.chg.B == 0){ cidx.B <- -1 } else if (idx.chg.A == 0 & idx.chg.B == -1){ cidx.A <- -1 } nextdose <- currdose+c(cidx.A, cidx.B) decision_values <- c("de-escalation", "stay", "escalation") decision <- decision_values[match(c(cidx.A, cidx.B), c(-1, 0, 1))] out <- list(target=target, cys=cys, cns=cns, decision=decision, currdose = currdose, nextdose = nextdose, overtox = overtox) class(out) <- c("cfo_decision", "cfo") return(out) }
/scratch/gouwar.j/cran-all/cranData/CFO/R/CFO2d.next.R
#' Generate operating characteristics of drug-combination trials in multiple simulations #' #' This function is used to conduct multiple simulations of drug-combination trials and obtain relevant the operating characteristics. #' #' @usage CFO2d.oc(nsimu = 1000, target, p.true, init.level = c(1,1), ncohort, cohortsize, #' prior.para = list(alp.prior = target, bet.prior = 1 - target), #' cutoff.eli = 0.95, early.stop = 0.95, seeds = NULL) #' #' @param nsimu the total number of trials to be simulated. The default value is 1000. #' @param target the target DLT rate. #' @param p.true a matrix representing the true DIL rates under the different dose levels. #' @param init.level a numeric vector of length 2 representing the initial dose level (default is \code{c(1,1)}). #' @param ncohort the total number of cohorts. #' @param cohortsize the number of patients of each cohort. #' @param prior.para the prior parameters for a beta distribution, where set as \code{list(alp.prior = target, bet.prior = 1 - target)} #' by default, \code{alp.prior} and \code{bet.prior} represent the parameters of the prior distribution for #' the true DLT rate at any dose level. This prior distribution is specified as Beta(\code{alpha.prior}, \code{beta.prior}). #' @param cutoff.eli the cutoff to eliminate overly toxic doses for safety. We recommend #' the default value of (\code{cutoff.eli = 0.95}) for general use. #' @param early.stop the threshold value for early stopping. The default value \code{early.stop = 0.95} #' generally works well. #' @param seeds A vector of random seeds for each simulation, for example, \code{seeds = 1:nsimu} (default is \code{NULL}). #' #' @note In the example, we set \code{nsimu = 10} for testing time considerations. In reality, \code{nsimu} #' is typically set to 1000 or 5000 to ensure the accuracy of the results. #' #' @return The \code{CFO.oc()} function returns basic setup of ($simu.setup) and the operating #' characteristics of the design: \cr #' \itemize{ #' \item p.true: the matrix of the true DLT rates under the different dose levels. #' \item selpercent: the matrix of the selection percentage of each dose level. #' \item npatients: a matrix of the averaged number of patients allocated to different doses in one simulation. #' \item ntox: a matrix of the averaged number of DLT observed for different doses in one simulation. #' \item MTDsel: the percentage of the correct selection of the MTD. #' \item MTDallo: the averaged percentage of patients assigned to the target DLT rate. #' \item oversel: the percentage of selecting a dose above the MTD. #' \item overallo: the averaged percentage of patients assigned to dose levels with a DLT rate greater than the target. #' \item averDLT: the averaged total number of DLTs observed. #' \item percentstop: the percentage of early stopping without selecting the MTD. #' \item simu.setup: the parameters for the simulation set-up. #' } #' #' @author Jialu Fang, Wenliang Wang, and Guosheng Yin #' #' @references Jin H, Yin G (2022). CFO: Calibration-free odds design for phase I/II clinical trials. #' \emph{Statistical Methods in Medical Research}, 31(6), 1051-1066. \cr #' Wang W, Jin H, Zhang Y, Yin G (2023). Two-dimensional calibration-free odds (2dCFO) #' design for phase I drug-combination trials. \emph{Frontiers in Oncology}, 13, 1294258. #' #' @export #' @examples #' ## Simulate a two-dimensional dose-finding trial with 20 cohorts of size 3 for 10 replications. #' p.true <- matrix(c(0.05, 0.10, 0.15, 0.30, 0.45, #' 0.10, 0.15, 0.30, 0.45, 0.55, #' 0.15, 0.30, 0.45, 0.50, 0.60), #' nrow = 3, ncol = 5, byrow = TRUE) #' target <- 0.3; ncohort <- 12; cohortsize <- 3 #' CFO2doc <- CFO2d.oc(nsimu = 5, target, p.true, init.level = c(1,1), ncohort, cohortsize, #' seeds = 1:5) #' summary(CFO2doc) #' plot(CFO2doc) CFO2d.oc <- function(nsimu = 1000, target, p.true, init.level = c(1,1), ncohort, cohortsize, prior.para = list(alp.prior = target, bet.prior = 1 - target), cutoff.eli = 0.95, early.stop = 0.95, seeds = NULL){ # Run the CFO2d.simu function nsimu times using lapply results <- lapply(1:nsimu, function(i) { CFO2d.simu(target, p.true, init.level, ncohort, cohortsize, prior.para, cutoff.eli=cutoff.eli, early.stop=early.stop, seed = seeds[i]) }) selpercent <- matrix(0, dim(p.true)[1], dim(p.true)[2]) for (i in 1:nsimu) { selpercent[results[[i]]$MTD] <- selpercent[results[[i]]$MTD] + 1 } # Compute the average of the results avg_results <- list() avg_results$p.true <- p.true avg_results$selpercent <- selpercent / nsimu avg_results$npatients <- Reduce('+', lapply(results, `[[`, "npatients")) / nsimu avg_results$ntox <- Reduce('+', lapply(results, `[[`, "ntox")) / nsimu avg_results$MTDsel <- mean(sapply(results, `[[`, "correct")) avg_results$MTDallo <- mean(sapply(results, `[[`, "npercent")) avg_results$oversel <- sum(avg_results$selpercent[p.true > target]) avg_results$overallo <- mean(sapply(results, `[[`, "ptoxic")) avg_results$averDLT <- mean(sapply(results, `[[`, "sumDLT")) avg_results$percentstop <- mean(sapply(results, `[[`, "earlystop")) avg_results$simu.setup <- data.frame(target = target, ncohort = ncohort, cohortsize = cohortsize, design = "2dCFO", nsimu = nsimu) class(avg_results) <- c("cfo_oc","cfo") return(avg_results) }
/scratch/gouwar.j/cran-all/cranData/CFO/R/CFO2d.oc.R
#' Select the maximum tolerated dose (MTD) for the real drug combination trial #' #' Select the maximum tolerated dose (MTD) when the real drug combination trial is completed #' #' @usage CFO2d.selectmtd(target, npts, ntox, #' prior.para = list(alp.prior = target, bet.prior = 1 - target), #' cutoff.eli = 0.95, early.stop = 0.95, verbose = TRUE) #' #' @param target the target DLT rate. #' @param npts a matrix containing the number of patients treated at each dose level. #' @param ntox a matrix containing the number of patients who experienced DLT at each dose level. #' @param prior.para the prior parameters for a beta distribution, where set as \code{list(alp.prior = target, bet.prior = 1 - target)} #' by default, \code{alp.prior} and \code{bet.prior} represent the parameters of the prior distribution for #' the true DLT rate at any dose level. This prior distribution is specified as Beta(\code{alpha.prior}, \code{beta.prior}). #' @param cutoff.eli the cutoff to eliminate overly toxic doses for safety. We recommend #' the default value of \code{cutoff.eli = 0.95} for general use. #' @param early.stop the threshold value for early stopping. The default value \code{early.stop = 0.95} #' generally works well. #' @param verbose set \code{verbose = TRUE} to return more details of the results. #' #' @details \code{CFO2d.selectmtd()} selects the MTD based on isotonic estimates of toxicity #' probabilities. \code{CFO2d.selectmtd()} selects as the MTD dose \eqn{j^*}, for which the #' isotonic estimate of the DLT rate is closest to the target. If there #' are ties, we select from the ties the highest dose level when the estimate #' of the DLT rate is smaller than the target, or the lowest dose level #' when the estimate of the DLT rate is greater than the target. The #' isotonic estimates are obtained by the pooled-adjacent-violators algorithm #' (PAVA). #' #' @note The MTD selection and dose escalation/deescalation rule are two independent #' components of the trial design. Isotonic regression is employed to select the MTD after the completion of the trial. #' When appropriate, another dose selection procedure (e.g., based on a fitted logistic model) can be used to select #' the MTD after the completion of the trial using the 2dCFO design. #' #' @return \code{CFO2d.selectmtd()} returns #' \itemize{ #' \item target: the target DLT rate. #' \item MTD: the selected MTD. \code{MTD = (99, 99)} indicates that all tested doses are overly toxic. #' \item p_est: the isotonic estimate of the DLT probablity at each dose and associated \eqn{95\%} credible interval. #' \code{p_est = NA} if all tested doses are overly toxic. #' \item p_est_CI: the credible interval for the isotonic estimate. #' \code{p_est_CI = NA} if all tested doses are overly toxic. #' } #' #' #' @author Jialu Fang, Wenliang Wang, and Guosheng Yin #' #' @references Jin H, Yin G (2022). CFO: Calibration-free odds design for phase I/II clinical trials. #' \emph{Statistical Methods in Medical Research}, 31(6), 1051-1066. \cr #' Wang W, Jin H, Zhang Y, Yin G (2023). Two-dimensional calibration-free odds (2dCFO) #' design for phase I drug-combination trials. \emph{Frontiers in Oncology}, 13, 1294258. \cr #' Bril G, Dykstra R, Pillers C, Robertson T (1984). Algorithm AS 206: Isotonic regression in two independent variables. #' \emph{Journal of the Royal Statistical Society. Series C (Applied Statistics)}, 33(3), 352–357. #' #' @examples #' ntox <- matrix(c(0, 0, 2, 0, 0, #' 0, 2, 7, 0, 0, #' 0, 2, 0, 0, 0), #' nrow = 3, ncol = 5, byrow = TRUE) #' #' npts <- matrix(c(3, 0, 12, 0, 0, #' 3, 12, 24, 0, 0, #' 3, 3, 0, 0, 0), #' nrow = 3, ncol = 5, byrow = TRUE) #' selmtd <- CFO2d.selectmtd(target=0.3, npts=npts, ntox=ntox) #' summary(selmtd) #' plot(selmtd) #' #' @import Iso #' @export CFO2d.selectmtd <- function (target, npts, ntox, prior.para=list(alp.prior=target, bet.prior=1-target), cutoff.eli = 0.95, early.stop = 0.95, verbose = TRUE) { y = ntox n = npts if (is.null(prior.para$alp.prior)){ prior.para <- c(prior.para, list(alp.prior=target, bet.prior=1-target)) } alp.prior <- prior.para$alp.prior bet.prior <- prior.para$bet.prior if (nrow(n) > ncol(n) | nrow(y) > ncol(y)) { stop("npts and ntox should be arranged in a way (i.e., rotated) such that for each of them, the number of rows is less than or equal to the number of columns.") } elimi = matrix(0, dim(n)[1], dim(n)[2]) if (cutoff.eli != early.stop) { if (n[1, 1] >= 3) { if (1 - pbeta(target, y[1,1] + alp.prior, n[1,1] - y[1,1] + bet.prior) > early.stop) { elimi[, ] = 1 } } } for (i in 1:dim(n)[1]) { for (j in 1:dim(n)[2]) { if (n[i, j] >= 3) { if (1 - pbeta(target, y[i,j] + alp.prior, n[i,j] - y[i,j] + bet.prior) > cutoff.eli) { elimi[i:dim(n)[1], j] = 1 elimi[i, j:dim(n)[2]] = 1 break } } } } selectdose=NULL if (elimi[1] == 1) { selectdose = c(99, 99) selectdoses = matrix(selectdose, nrow = 1) }else { phat = (y + alp.prior)/(n + alp.prior + bet.prior) phat = round(Iso::biviso(phat, n + alp.prior + bet.prior, warn = TRUE)[, ],2) # phat.out = phat lower.mat=qbeta(0.025,y+alp.prior,n-y+bet.prior) lower.mat=round(Iso::biviso(lower.mat),2) upper.mat=qbeta(0.975,y+alp.prior,n-y+bet.prior) upper.mat=round(Iso::biviso(upper.mat),2) phat.out<-matrix(paste0(format(phat,digits=1),"(",lower.mat,", ",upper.mat,")"),byrow=FALSE,nrow=dim(phat)[1]) colnames(phat.out)=paste0("B",1:dim(n)[2]) rownames(phat.out)=paste0("A",1:dim(n)[1]) phat.out.noCI=round(phat,2) phat.out[n == 0] = "NA" phat[elimi == 1] = 1.1 phat = phat * (n != 0) + (1e-05) * (matrix(rep(1:dim(n)[1], each = dim(n)[2], len = length(n)), dim(n)[1], byrow = T) + matrix(rep(1:dim(n)[2], each = dim(n)[1], len = length(n)), dim(n)[1])) if(is.null(selectdose)){ phat[n == 0] = 10 selectdose = which(abs(phat - target) == min(abs(phat - target)), arr.ind = TRUE) if (length(selectdose) > 2) selectdose = selectdose[1, ] aa = function(x) as.numeric(as.character(x)) selectdoses = matrix(99, nrow = 1, ncol = 2) selectdoses[1, ] = matrix(selectdose, nrow = 1) selectdoses = matrix(selectdoses[selectdoses[, 2] != 99, ], ncol = 2) } colnames(selectdoses) = c("DoseA", "DoseB") } if (verbose == TRUE) { if (selectdoses[1, 1] == 99 && selectdoses[1, 2] == 99) { message("All tested doses are overly toxic. No MTD is selected! \n") out=list(target = target, MTD = selectdoses, p_est = NA, p_est_CI = NA) }else{ out=list(target = target, MTD = selectdoses, p_est=phat.out.noCI, p_est_CI = phat.out) } } else { if (selectdoses[1, 1] == 99 && selectdoses[1, 2] == 99) { message("All tested doses are overly toxic. No MTD is selected! \n") } out = list(target = target, MTD = selectdoses) } class(out)<-c("cfo_sel","cfo") return(out) }
/scratch/gouwar.j/cran-all/cranData/CFO/R/CFO2d.selectmtd.R
#' Conduct one simulation using the two-dimensional calibration-free odds (2dCFO) design. #' #' In the 2dCFO design, the function is used to conduct one single simulation and find the maximum tolerated dose (MTD). #' #' @usage CFO2d.simu(target, p.true, init.level = c(1,1), ncohort, cohortsize, #' prior.para = list(alp.prior = target, bet.prior = 1 - target), #' cutoff.eli = 0.95, early.stop = 0.95, seed = NULL) #' #' @param target the target DLT rate. #' @param p.true a matrix representing the true DIL rates under the different dose levels. #' @param init.level the dose level assigned to the first cohort. The default value \code{init.level} is \code{c(1,1)}. #' @param ncohort the total number of cohorts. #' @param cohortsize the number of patients of each cohort. #' @param prior.para the prior parameters for a beta distribution, where set as \code{list(alp.prior = target, bet.prior = 1 - target)} #' by default, \code{alp.prior} and \code{bet.prior} represent the parameters of the prior distribution for #' the true DLT rate at any dose level. This prior distribution is specified as Beta(\code{alpha.prior}, \code{beta.prior}). #' @param cutoff.eli the cutoff to eliminate overly toxic doses for safety. We recommend #' the default value of (\code{cutoff.eli = 0.95}) for general use. #' @param early.stop the threshold value for early stopping. The default value \code{early.stop = 0.95} #' generally works well. #' @param seed an integer to be set as the seed of the random number generator for reproducible results. The default is set to \code{NULL}. #' #' @details The \code{CFO2d.simu()} function simulates the operating characteristics of the 2dCFO design #' in a dose-combination trial. #' The early stopping and dose elimination rules are incorporated into the 2dCFO design #' to ensure patient safety and benefit. #' #' #' @return The \code{CFO2d.simu()} function returns a list with the following components: #' \itemize{ #' \item target: the target DLT rate. #' \item MTD: a vector of length 2 representing the recommended dose level. \code{MTD = (99, 99)} indicates that this trial is terminated due to early stopping. #' \item correct: a binary indicator of whether the recommended dose level matches the target DLT rate (1 for yes). #' \item npatients: a matrix of the number of patients allocated to different doses. #' \item ntox: a matrix of the number of DLT observed for different doses. #' \item npercent: the percentage of patients assigned to the target DLT rate. #' \item over.doses: a matrix indicating whether each dose is overdosed or not (1 for yes). #' \item cohortdose: the dose combination assigned to each cohort. #' \item ptoxic: the percentage of subjects assigned to dose levels with a DLT rate greater than the target. #' \item patientDLT: the DLT observed at each cohort. #' \item sumDLT: the total number of DLT observed. #' \item earlystop: a binary indicator of whether the trial is early stopped (1 for yes). #' } #' #' @author Jialu Fang, Wenliang Wang, and Guosheng Yin #' #' @references Jin H, Yin G (2022). CFO: Calibration-free odds design for phase I/II clinical trials. #' \emph{Statistical Methods in Medical Research}, 31(6), 1051-1066. \cr #' Wang W, Jin H, Zhang Y, Yin G (2023). Two-dimensional calibration-free odds (2dCFO) #' design for phase I drug-combination trials. \emph{Frontiers in Oncology}, 13, 1294258. #' #' @export #' #' @examples #' ## Simulate a two-dimensional dose-finding trial with 20 cohorts of size 3. #' p.true <- matrix(c(0.05, 0.10, 0.15, 0.30, 0.45, #' 0.10, 0.15, 0.30, 0.45, 0.55, #' 0.15, 0.30, 0.45, 0.50, 0.60), #' nrow = 3, ncol = 5, byrow = TRUE) #' target <- 0.3; ncohort <- 20; cohortsize <- 3 #' CFO2dtrial <- CFO2d.simu(target, p.true, init.level = c(1,1), ncohort, cohortsize, seed = 1) #' summary(CFO2dtrial) #' plot(CFO2dtrial) CFO2d.simu <- function(target, p.true, init.level=c(1,1), ncohort, cohortsize, prior.para=list(alp.prior=target, bet.prior=1-target), cutoff.eli=0.95, early.stop=0.95, seed=NULL){ # target: Target DIL rate # p.true: True DIL rates under the different dose levels # ncohort: The number of cohorts # cohortsize: The sample size in each cohort # alp.prior, bet.prior: prior parameters if (!is.null(seed)){ set.seed(seed) } earlystop <- 0 ndose.A <- length(p.true[,1]) ndose.B <- length(p.true[1,]) cidx.A <- init.level[1] cidx.B <- init.level[2] obs <- list() ays <- matrix(0, ndose.A, ndose.B) # number of responses for different doses. ans <- matrix(0, ndose.A, ndose.B) # number of subject for different doses. tover.doses <- matrix(0, ndose.A, ndose.B) # Whether each dose is overdosed or not, 1 yes # Initialize vectors to store dose combinations and number of DLTs for each cohort # simu.res.dose <- vector("list", ncohort) # Change to list to store dose pairs simu.res.dose <- matrix(nrow = ncohort, ncol = 2) simu.res.DLT <- matrix(nrow = ncohort, ncol = cohortsize) overdose.2d <- function(phi, threshold, obs, prior.para=list(alp.prior=phi, bet.prior=1-phi)){ y <- obs$y n <- obs$n alp.prior <- prior.para$alp.prior bet.prior <- prior.para$bet.prior pp <- post.prob.fn(phi, y, n, alp.prior, bet.prior) if ((pp >= threshold) & (obs$n>=3)){ return(TRUE) }else{ return(FALSE) } } post.prob.fn <- function(phi, y, n, alp.prior=phi, bet.prior=1-phi){ alp <- alp.prior + y bet <- bet.prior + n - y 1 - pbeta(phi, alp, bet) } for (i in 1:ncohort){ pc <- p.true[cidx.A, cidx.B] if (!is.null(seed)) { iter_seed <- (seed * 100) + i set.seed(iter_seed) } cres <- rbinom(cohortsize, 1, pc) ays[cidx.A, cidx.B] <- ays[cidx.A, cidx.B] + sum(cres) ans[cidx.A, cidx.B] <- ans[cidx.A, cidx.B] + cohortsize simu.res.dose[i, ] <- c(cidx.A, cidx.B) simu.res.DLT[i,] <- cres cy <- ays[cidx.A, cidx.B] cn <- ans[cidx.A, cidx.B] obs <- c(list(y=cy, n=cn, ays=ays, ans=ans, cidx.A=cidx.A, cidx.B=cidx.B), obs) if (overdose.2d(target, cutoff.eli, obs)){ tover.doses[cidx.A:ndose.A, cidx.B:ndose.B] <- 1 } if (cidx.A == 1 & cidx.B == 1) { if (cutoff.eli != early.stop) { if (overdose.2d(target, early.stop, obs)){ tover.doses[1:1] <- 1 } } } if (tover.doses[1,1] == 1){ earlystop <- 1 break() } if (cidx.A!=1 & cidx.B!=1 & cidx.A!=ndose.A & cidx.B!=ndose.B){ # no boundary cys <- ays[(cidx.A-1):(cidx.A+1), (cidx.B-1):(cidx.B+1)] cns <- ans[(cidx.A-1):(cidx.A+1), (cidx.B-1):(cidx.B+1)] cover.doses <- tover.doses[(cidx.A-1):(cidx.A+1), (cidx.B-1):(cidx.B+1)] } else if (cidx.A==1 & cidx.B==1){ # (1, 1) cys <- rbind(c(NA,NA,NA),cbind(c(NA,NA),ays[1:2,1:2])) cns <- rbind(c(NA,NA,NA),cbind(c(NA,NA),ans[1:2,1:2])) cover.doses <- rbind(c(NA,NA,NA),cbind(c(NA,NA),tover.doses[1:2,1:2])) } else if (cidx.A==ndose.A & cidx.B==ndose.B){ # (nA, nB) cys <- rbind(cbind(ays[(cidx.A-1):cidx.A,(cidx.B-1):cidx.B],c(NA,NA)), c(NA,NA,NA)) cns <- rbind(cbind(ans[(cidx.A-1):cidx.A,(cidx.B-1):cidx.B],c(NA,NA)), c(NA,NA,NA)) cover.doses <- rbind(cbind(tover.doses[(cidx.A-1):cidx.A,(cidx.B-1):cidx.B],c(NA,NA)), c(NA,NA,NA)) } else if (cidx.A==1 & cidx.B==ndose.B){ # (1, nB) cys <- rbind(c(NA,NA,NA),cbind(ays[1:2,(cidx.B-1):cidx.B],c(NA,NA))) cns <- rbind(c(NA,NA,NA),cbind(ans[1:2,(cidx.B-1):cidx.B],c(NA,NA))) cover.doses <- rbind(c(NA,NA,NA),cbind(tover.doses[1:2,(cidx.B-1):cidx.B],c(NA,NA))) } else if (cidx.A==ndose.A & cidx.B==1){ # (nA, 1) cys <- rbind(cbind(c(NA,NA), ays[(cidx.A-1):cidx.A,1:2]),c(NA,NA,NA)) cns <- rbind(cbind(c(NA,NA), ans[(cidx.A-1):cidx.A,1:2]),c(NA,NA,NA)) cover.doses <- rbind(cbind(c(NA,NA), tover.doses[(cidx.A-1):cidx.A,1:2]),c(NA,NA,NA)) } else if (cidx.A==1 & cidx.B!=1){ # (1, 2:(nB-1)) cys <- rbind(c(NA,NA,NA), ays[1:2, (cidx.B-1):(cidx.B+1)]) cns <- rbind(c(NA,NA,NA), ans[1:2, (cidx.B-1):(cidx.B+1)]) cover.doses <- rbind(c(NA,NA,NA), tover.doses[1:2, (cidx.B-1):(cidx.B+1)]) } else if (cidx.A!=1 & cidx.B==1){ # (2:(nA-1), 1) cys <- cbind(c(NA,NA,NA), ays[(cidx.A-1):(cidx.A+1), 1:2]) cns <- cbind(c(NA,NA,NA), ans[(cidx.A-1):(cidx.A+1), 1:2]) cover.doses <- cbind(c(NA,NA,NA), tover.doses[(cidx.A-1):(cidx.A+1), 1:2]) } else if (cidx.A==ndose.A & cidx.B!=ndose.B){ # (nA, 2:(nB-1)) cys <- rbind(ays[(ndose.A-1):ndose.A, (cidx.B-1):(cidx.B+1)], c(NA,NA,NA)) cns <- rbind(ans[(ndose.A-1):ndose.A, (cidx.B-1):(cidx.B+1)], c(NA,NA,NA)) cover.doses <- rbind(tover.doses[(ndose.A-1):ndose.A, (cidx.B-1):(cidx.B+1)], c(NA,NA,NA)) } else if (cidx.A!=ndose.A & cidx.B==ndose.B){ # (2:(nA-1), nB) cys <- cbind(ays[(cidx.A-1):(cidx.A+1), (cidx.B-1):cidx.B], c(NA,NA,NA)) cns <- cbind(ans[(cidx.A-1):(cidx.A+1), (cidx.B-1):cidx.B], c(NA,NA,NA)) cover.doses <- cbind(tover.doses[(cidx.A-1):(cidx.A+1), (cidx.B-1):cidx.B], c(NA,NA,NA)) } else { message('no such case') } idx <- CFO2d.next(target, cys, cns, c(cidx.A, cidx.B), prior.para=prior.para, cutoff.eli=cutoff.eli, early.stop=early.stop, seed=seed)$nextdose cidx.A <- idx[1] cidx.B <- idx[2] } if (earlystop==0){ MTD <- CFO2d.selectmtd(target, ans, ays)$MTD }else{ MTD <- c(99,99) } correct <- 0 if(MTD[1]>ndose.A | MTD[2]>ndose.B){ correct <- 0 } else if (length(MTD)!=2){ correct <- 0 }else if (p.true[MTD[1],MTD[2]]==target){ correct <- 1 } npercent <- 0 for (j in 1:ndose.A) { for (k in 1:ndose.B) { if (p.true[j,k]==target){ npercent <- npercent + ans[j,k] } } } npercent <- npercent/(ncohort*cohortsize) ptoxic <- 0 for (j in 1:ndose.A) { for (k in 1:ndose.B) { if (p.true[j,k]>target){ ptoxic <- ptoxic + ans[j,k] } } } ptoxic <- ptoxic/(ncohort*cohortsize) # simu.res <- list(dose = simu.res.dose, DLT = simu.res.DLT) out<-list(target=target, MTD=MTD, correct=correct, npatients=ans, ntox=ays, npercent=npercent, over.doses=tover.doses, cohortdose=simu.res.dose, ptoxic=ptoxic, patientDLT = simu.res.DLT, sumDLT=sum(simu.res.DLT), earlystop=earlystop) class(out) <- c("cfo_trial", "cfo") return(out) }
/scratch/gouwar.j/cran-all/cranData/CFO/R/CFO2d.simu.R
#' Determination of the dose level for next cohort in the accumulative calibration-free odds (aCFO) design #' #' In the aCFO design, the function is used to determine the dose movement based on the toxicity outcomes of the enrolled cohorts. #' #' @usage aCFO.next(target, ays, ans, currdose, #' prior.para = list(alp.prior = target, bet.prior = 1 - target), #' cutoff.eli = 0.95, early.stop = 0.95) #' #' @param target the target DLT rate. #' @param ays the cumulative numbers of DLTs observed in patients for all dose levels. #' @param ans the cumulative numbers of patients for all dose levels. #' @param currdose the current dose level. #' @param prior.para the prior parameters for a beta distribution, where set as \code{list(alp.prior = target, bet.prior = 1 - target)} #' by default, \code{alp.prior} and \code{bet.prior} represent the parameters of the prior distribution for #' the true DLT rate at any dose level. This prior distribution is specified as Beta(\code{alpha.prior}, \code{beta.prior}). #' @param cutoff.eli the cutoff to eliminate overly toxic doses for safety. We recommend #' the default value of \code{cutoff.eli = 0.95} for general use. #' @param early.stop the threshold value for early stopping. The default value \code{early.stop = 0.95} #' generally works well. #' #' #' @details The aCFO design is an extension of the CFO design. It integrates dose information from all positions (ranging #' from the lowest to the highest dose levels) into the decision-making process of the trial. Before assigning the dose level #' for a new cohort, aCFO compares the evidence from the current dose level with all doses to its left and right. In contrast, #' the original CFO design makes dose allocation by examining one dose level above and one below the current dose level. #' Consequently, the aCFO design enhances the utilization of information while maintaining the characteristics of the CFO #' design (model-free and calibration-free). Additionally, the aCFO design preserves the same early stopping and dose #' elimination criteria as the CFO design. #' #' @note The dose level indicated by \code{overtox} and all the dose levels above experience over-toxicity, and these dose levels will be eliminated. #' #' @return The \code{aCFO.next()} function returns a list object comprising the following elements: #' \itemize{ #' \item target: the target DLT rate. #' \item ays: the cumulative counts of DLTs observed at all dose levels. #' \item ans: the cumulative counts of patients treated at all dose levels. #' \item decision: the decision in the aCFO design, where \code{left}, \code{stay}, and \code{right} represent the #' movement directions, and \code{stop} indicates stopping the experiment. #' \item currdose: the current dose level. #' \item nextdose: the recommended dose level for the next cohort. \code{nextdose = 99} indicates that the trial is #' terminated due to early stopping. #' \item overtox: the situation regarding which position experiences over-toxicity. The dose level indicated by #' \code{overtox} and all the dose levels above experience over-toxicity. \code{overtox = NA} signifies that the #' occurrence of over-toxicity did not happen. #' } #' #' @author Jialu Fang, Wenliang Wang, and Guosheng Yin #' #' @references Jin H, Yin G (2022). CFO: Calibration-free odds design for phase I/II clinical trials. #' \emph{Statistical Methods in Medical Research}, 31(6), 1051-1066. #' #' @examples #' ## determine the dose level for the next cohort of new patients #' ays <- c(0, 0, 1, 0, 0, 0, 0); ans <- c(3, 3, 6, 0, 0, 0, 0) #' decision <- aCFO.next(target = 0.2, ays = ays, ans = ans, currdose = 3, #' prior.para = list(alp.prior = 0.2, bet.prior = 0.8)) #' summary(decision) #' #' ays <- c(3, 0, 0, 0, 0, 0, 0); ans <- c(3, 0, 0, 0, 0, 0, 0) #' decision <- aCFO.next(target = 0.2, ays = ays, ans = ans, currdose = 1, #' prior.para = list(alp.prior = 0.2, bet.prior = 0.8)) #' summary(decision) #' #' ays <- c(0, 0, 0, 0, 0, 0, 3); ans <- c(3, 3, 3, 3, 3, 3, 3) #' decision <- aCFO.next(target = 0.2, ays = ays, ans = ans, currdose = 7, #' prior.para = list(alp.prior = 0.2, bet.prior = 0.8)) #' summary(decision) #' #' @import stats #' @export aCFO.next <- function(target, ays, ans, currdose, prior.para=list(alp.prior=target, bet.prior=1-target), cutoff.eli=0.95, early.stop=0.95){ ############################################################################### ###############define the functions used for main function##################### ############################################################################### # posterior probability of pj >= phi given data post.prob.fn <- function(phi, y, n, alp.prior=0.1, bet.prior=0.1){ alp <- alp.prior + y bet <- bet.prior + n - y 1 - pbeta(phi, alp, bet) } overdose.fn <- function(phi, threshold, prior.para=list()){ y <- prior.para$y n <- prior.para$n alp.prior <- prior.para$alp.prior bet.prior <- prior.para$bet.prior pp <- post.prob.fn(phi, y, n, alp.prior, bet.prior) # print(data.frame("prob of overdose" = pp)) if ((pp >= threshold) & (prior.para$n>=3)){ return(TRUE) }else{ return(FALSE) } } prob.int <- function(phi, y1, n1, y2, n2, alp.prior, bet.prior){ alp1 <- alp.prior + y1 alp2 <- alp.prior + y2 bet1 <- bet.prior + n1 - y1 bet2 <- bet.prior + n2 - y2 fn.min <- function(x){ dbeta(x, alp1, bet1)*(1-pbeta(x, alp2, bet2)) } fn.max <- function(x){ pbeta(x, alp1, bet1)*dbeta(x, alp2, bet2) } const.min <- integrate(fn.min, lower=0, upper=1)$value const.max <- integrate(fn.max, lower=0, upper=1)$value p1 <- integrate(fn.min, lower=0, upper=phi)$value/const.min p2 <- integrate(fn.max, lower=0, upper=phi)$value/const.max list(p1=p1, p2=p2) } OR.values <- function(phi, y1, n1, y2, n2, alp.prior, bet.prior, type){ ps <- prob.int(phi, y1, n1, y2, n2, alp.prior, bet.prior) if (type=="L"){ pC <- 1 - ps$p2 pL <- 1 - ps$p1 oddsC <- pC/(1-pC) oddsL <- pL/(1-pL) OR <- oddsC*oddsL }else if (type=="R"){ pC <- 1 - ps$p1 pR <- 1 - ps$p2 oddsC <- pC/(1-pC) oddsR <- pR/(1-pR) OR <- (1/oddsC)/oddsR } return(OR) } OR.union.values <- function(phi, cans, cays, alp.prior, bet.prior, type){ ndose <- length(cays) if (type=="L"){ OR.list <- rep(0, ndose-1) for (i in 1:(ndose-1)){ OR.list[i] <- OR.values(phi, cays[i], cans[i], cays[ndose], cans[ndose], alp.prior, bet.prior, type) } }else if (type=="R"){ OR.list <- rep(0, ndose-1) for (i in 2:ndose){ OR.list[i-1] <- OR.values(phi, cays[1], cans[1], cays[i], cans[i], alp.prior, bet.prior, type) } } return(sum(OR.list)) } All.OR.table <- function(phi, n1, n2, type, alp.prior, bet.prior){ ret.mat <- matrix(rep(0, (n1+1)*(n2+1)), nrow=n1+1) for (y1cur in 0:n1){ for (y2cur in 0:n2){ ret.mat[y1cur+1, y2cur+1] <- OR.values(phi, y1cur, n1, y2cur, n2, alp.prior, bet.prior, type) } } ret.mat } # compute the marginal prob when lower < phiL/phiC/phiR < upper # i.e., Pr(Y=y|lower<phi<upper) margin.phi <- function(y, n, lower, upper){ C <- 1/(upper-lower) fn <- function(phi) { dbinom(y, n, phi)*C } integrate(fn, lower=lower, upper=upper)$value } # Obtain the table of marginal distribution of (y1, y2) # after intergrate out (phi1, phi2) # under H0 and H1 # H0: phi1=phi, phi < phi2 < 2phi # H1: phi2=phi, 0 < phi1 < phi margin.ys.table <- function(n1, n2, phi, hyperthesis){ if (hyperthesis=="H0"){ p.y1s <- dbinom(0:n1, n1, phi) p.y2s <- sapply(0:n2, margin.phi, n=n2, lower=phi, upper=2*phi) }else if (hyperthesis=="H1"){ p.y1s <- sapply(0:n1, margin.phi, n=n1, lower=0, upper=phi) p.y2s <- dbinom(0:n2, n2, phi) } p.y1s.mat <- matrix(rep(p.y1s, n2+1), nrow=n1+1) p.y2s.mat <- matrix(rep(p.y2s, n1+1), nrow=n1+1, byrow=TRUE) margin.ys <- p.y1s.mat * p.y2s.mat margin.ys } # Obtain the optimal gamma for the hypothesis test optim.gamma.fn <- function(n1, n2, phi, type, alp.prior, bet.prior){ OR.table <- All.OR.table(phi, n1, n2, type, alp.prior, bet.prior) ys.table.H0 <- margin.ys.table(n1, n2, phi, "H0") ys.table.H1 <- margin.ys.table(n1, n2, phi, "H1") argidx <- order(OR.table) sort.OR.table <- OR.table[argidx] sort.ys.table.H0 <- ys.table.H0[argidx] sort.ys.table.H1 <- ys.table.H1[argidx] n.tol <- length(sort.OR.table) if (type=="L"){ errs <- rep(0, n.tol-1) for (i in 1:(n.tol-1)){ err1 <- sum(sort.ys.table.H0[1:i]) err2 <- sum(sort.ys.table.H1[(i+1):n.tol]) err <- err1 + err2 errs[i] <- err } min.err <- min(errs) if (min.err > 1){ gam <- 0 min.err <- 1 }else { minidx <- which.min(errs) gam <- sort.OR.table[minidx] } }else if (type=='R'){ errs <- rep(0, n.tol-1) for (i in 1:(n.tol-1)){ err1 <- sum(sort.ys.table.H1[1:i]) err2 <- sum(sort.ys.table.H0[(i+1):n.tol]) err <- err1 + err2 errs[i] <- err } min.err <- min(errs) if (min.err > 1){ gam <- 0 min.err <- 1 }else { minidx <- which.min(errs) gam <- sort.OR.table[minidx] } } list(gamma=gam, min.err=min.err) } optim.gamma.union.fn <- function(cans, phi, type, alp.prior, bet.prior){ ndose <- length(cans) if (type == "L"){ gamma.list <- rep(0, ndose-1) for (i in 1:(ndose-1)){ gamma.list[i] <- optim.gamma.fn(cans[i], cans[ndose], phi, type, alp.prior, bet.prior)$gamma } }else if (type == "R"){ gamma.list <- rep(0, ndose-1) for (i in 2:ndose){ gamma.list[i-1] <- optim.gamma.fn(cans[1], cans[i], phi, type, alp.prior, bet.prior)$gamma } } return(sum(gamma.list)) } ############################################################################### ############################MAIN DUNCTION###################################### ############################################################################### ndose <- length(ays) if (is.null(prior.para$alp.prior)){ prior.para <- c(prior.para, list(alp.prior=target, bet.prior=1-target)) } alp.prior <- prior.para$alp.prior bet.prior <- prior.para$bet.prior tover.doses <- rep(0, ndose) for (i in 1:ndose){ cy <- ays[i] cn <- ans[i] prior.para <- c(list(y=cy, n=cn), list(alp.prior=alp.prior, bet.prior=bet.prior)) if (overdose.fn(target, cutoff.eli, prior.para)){ tover.doses[i:ndose] <- 1 break() } } if (cutoff.eli != early.stop) { cy <- ays[1] cn <- ans[1] prior.para <- c(list(y=cy, n=cn),list(alp.prior=alp.prior, bet.prior=bet.prior)) if (overdose.fn(target, early.stop, prior.para)){ tover.doses[1:ndose] <- 1 } } position <- which(tover.doses == 1)[1] prior.para <- c(list(alp.prior=alp.prior, bet.prior=bet.prior)) if ((tover.doses[1] == 1) & (position == 1)){ index <- NA decision <- "stop" } else { if (currdose!=1){ cys <- ays[(currdose-1):(currdose+1)] cns <- ans[(currdose-1):(currdose+1)] cover.doses <- tover.doses[(currdose-1):(currdose+1)] #cover.doses <- c(0, 0, 0) # No elimination rule }else{ cys <- c(NA, ays[1:(currdose+1)]) cns <- c(NA, ans[1:(currdose+1)]) cover.doses <- c(NA, tover.doses[1:(currdose+1)]) #cover.doses <- c(NA, 0, 0) # No elimination rule } if (cover.doses[2] == 1){ index <- -1 decision <- "de-escalation" } else{ if (is.na(cys[1]) & (cover.doses[3]==1)){ index <- 0 decision <- "stay" } else if (is.na(cys[1]) & (!(cover.doses[3]==1))){ OR.v2 <- OR.union.values(target, ans[currdose:ndose], ays[currdose:ndose], alp.prior, bet.prior, type="R") gam2 <- optim.gamma.union.fn(ans[currdose:ndose], target, "R", alp.prior, bet.prior) if (OR.v2>gam2){ index <- 1 decision <- "escalation" }else{ index <- 0 decision <- "stay" } } else if (is.na(cys[3]) | (cover.doses[3]==1)){ gam1 <- optim.gamma.union.fn(ans[1:currdose], target, "L", alp.prior, bet.prior) OR.v1 <- OR.union.values(target, ans[1:currdose], ays[1:currdose], alp.prior, bet.prior, type="L") if (OR.v1>gam1){ index <- -1 decision <- "de-escalation" }else{ index <- 0 decision <- "stay" } } else if (!(is.na(cys[1]) | is.na(cys[3]) | cover.doses[3]==1)){ gam1 <- optim.gamma.union.fn(ans[1:currdose], target, "L", alp.prior, bet.prior) gam2 <- optim.gamma.union.fn(ans[currdose:ndose], target, "R", alp.prior, bet.prior) OR.v1 <- OR.union.values(target, ans[1:currdose], ays[1:currdose], alp.prior, bet.prior, type="L") OR.v2 <- OR.union.values(target, ans[currdose:ndose], ays[currdose:ndose], alp.prior, bet.prior, type="R") v1 <- OR.v1 > gam1 v2 <- OR.v2 > gam2 if (v1 & !v2){ index <- -1 decision <- "de-escalation" }else if (!v1 & v2){ index <- 1 decision <- "escalation" }else{ index <- 0 decision <- "stay" } } } } if (decision=='stop'){ nextdose <- 99 }else{ nextdose <- currdose+index } out <- list(target=target, ays=ays, ans=ans, decision=decision, currdose = currdose, nextdose=nextdose, overtox=position) class(out) <- c("cfo_decision", "cfo") return(out) }
/scratch/gouwar.j/cran-all/cranData/CFO/R/aCFO.next.R
#' Determination of the dose level for next cohort in the calibration-free odds type (CFO-type) design with late-onset toxicity #' #' The function is used to determine the next dose level in the CFO-type design with late-onset toxicity, specifically, including #' time-to-event CFO (TITE-CFO) design, fractional CFO (fCFO) design, benchmark CFO design, #' time-to-event accumulative CFO (TITE-aCFO) design, fractional accumulative CFO (f-aCFO) design #' and benchmark aCFO design. #' #' @usage lateonset.next(design, target, p.true, currdose, assess.window, enter.times, dlt.times, #' current.t, doses, prior.para = list(alp.prior = target, bet.prior = 1 - target), #' cutoff.eli = 0.95, early.stop = 0.95) #' #' @param design option for selecting different designs, which can be set as \code{'TITE-CFO'}, \code{'TITE-aCFO'}, #' \code{'fCFO'}, \code{'f-aCFO'}, \code{'bCFO'}, and \code{'b-aCFO'}. Specifically, \code{'bCFO'} refers #' to the benchmark CFO design, and \code{'b-aCFO'} denotes the benchmark aCFO design. #' @param target the target DLT rate. #' @param p.true the true DLT rates under the different dose levels. #' @param currdose the current dose level. #' @param assess.window the maximal assessment window size. #' @param enter.times the time that each participant enters the trial. #' @param dlt.times the time to DLT for each subject in the trial. If no DLT occurs for a subject, #' \code{dlt.times} is set to 0. #' @param current.t the current time. #' @param doses the dose level for each subject in the trial. #' @param prior.para the prior parameters for a beta distribution, where set as \code{list(alp.prior = target, bet.prior = 1 - target)} #' by default, \code{alp.prior} and \code{bet.prior} represent the parameters of the prior distribution for #' the true DLT rate at any dose level. This prior distribution is specified as Beta(\code{alpha.prior}, \code{beta.prior}). #' @param cutoff.eli the cutoff to eliminate overly toxic doses for safety. We recommend #' the default value of \code{cutoff.eli = 0.95} for general use. #' @param early.stop the threshold value for early stopping. The default value \code{early.stop = 0.95} #' generally works well. #' #' @details Late-onset outcomes commonly occur in phase I trials involving targeted agents or immunotherapies. The TITE #' framework and fractional framework serve as two imputation methods to handle pending data #' related to late-onset outcomes. This approach extends the CFO and aCFO designs to integrate time information #' for delayed outcomes, leading to the development of TITE-CFO, fCFO, TITE-aCFO, and f-aCFO designs. \cr #' In the TITE framework context, an assumption about the distribution of time to DLT must be pre-specified, #' whereas the fractional framework does not require justification for a specific distribution of the time to #' DLT. Consequently, fCFO and f-aCFO adapt to a more diverse range of scenarios.\cr #' The function \code{lateonset.next()} also provides the option to execute #' the benchmark CFO and benchmark aCFO design. These two methods await complete observation of toxicity outcomes for #' the previous cohorts before determining the next dose assignment. This enhances precision but comes at the #' expense of a prolonged trial duration. #' #' @return The \code{lateonset.next()} function returns #' \itemize{ #' \item target: the target DLT rate. #' \item decision: the decision in the CFO design, where \code{left}, \code{stay}, and \code{right} represent the #' movement directions, and \code{stop} indicates stopping the experiment. #' \item currdose: the current dose level. #' \item nextdose: the recommended dose level for the next cohort. #' \item overtox: the situation regarding which position experiences over-toxicity. The dose level indicated by #' \code{overtox} and all the dose levels above experience over-toxicity. \code{overtox = NA} signifies that the #' occurrence of over-toxicity did not happen. #' \item over.doses: a vector indicating whether the dose level (from the first to last dose level) is over-toxic #' or not (1 for yes). #' } #' #' @author Jialu Fang, Wenliang Wang, and Guosheng Yin #' #' @references Jin H, Yin G (2022). CFO: Calibration-free odds design for phase I/II clinical trials. #' \emph{Statistical Methods in Medical Research}, 31(6), 1051-1066. \cr #' Jin H, Yin G (2023). Time‐to‐event calibration‐free odds design: A robust efficient design for #' phase I trials with late‐onset outcomes. \emph{Pharmaceutical Statistics}, 22(5), 773–783.\cr #' Yin G, Zheng S, Xu J (2013). Fractional dose-finding methods with late-onset toxicity in #' phase I clinical trials. \emph{Journal of Biopharmaceutical Statistics}, 23(4), 856-870. #' @import survival #' @importFrom utils tail #' @export #' #' @examples #' target <- 0.2; p.true <- c(0.01, 0.07, 0.20, 0.35, 0.50, 0.65, 0.80) #' enter.times<- c(0, 0.266, 0.638, 1.54, 2.48, 3.14, 3.32, 4.01, 4.39, 5.38, 5.76, #' 6.54, 6.66, 6.93, 7.32, 7.66, 8.14, 8.74) #' dlt.times<- c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0.610, 0, 2.98, 0, 0, 1.95, 0, 0, 1.48) #' current.t<- 9.41 #' doses<-c(1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 3, 3, 3, 4, 4, 4) #' ## determine the dose level for the next cohort using the TITE-CFO design #' decision <- lateonset.next(design = 'TITE-CFO', target, p.true, currdose = 4, assess.window = 3, #' enter.times, dlt.times, current.t, doses) #' summary(decision) #' ## determine the dose level for the next cohort using the TITE-aCFO design #' decision <- lateonset.next(design = 'TITE-aCFO', target, p.true, currdose = 4, assess.window = 3, #' enter.times, dlt.times, current.t, doses) #' summary(decision) #' ## determine the dose level for the next cohort using the f-CFO design #' decision <- lateonset.next(design = 'fCFO', target, p.true, currdose = 4, assess.window = 3, #' enter.times, dlt.times, current.t, doses) #' summary(decision) #' ## determine the dose level for the next cohort using the f-aCFO design #' decision <- lateonset.next(design = 'f-aCFO', target, p.true, currdose = 4, assess.window = 3, #' enter.times, dlt.times, current.t, doses) #' summary(decision) #' ## determine the dose level for the next cohort using the benchmark CFO design #' decision <- lateonset.next(design = 'bCFO', target, p.true, currdose = 4, assess.window = 3, #' enter.times, dlt.times, current.t, doses) #' summary(decision) #' ## determine the dose level for the next cohort using the benchmark aCFO design #' decision <- lateonset.next(design='b-aCFO', target, p.true, currdose = 4, assess.window = 3, #' enter.times, dlt.times, current.t, doses) #' summary(decision) #' lateonset.next <- function(design, target, p.true, currdose, assess.window, enter.times, dlt.times, current.t, doses, prior.para=list(alp.prior=target, bet.prior=1-target), cutoff.eli=0.95, early.stop=0.95){ ############################################################################### ###############define the functions used for main function##################### ############################################################################### # Below functions are to impute missing y #------------------------------------------------------------------------------------------ fracImpute <- function(enter.times, dlt.times, current.time, assess.window){ #args: # enter.times: The enter times of the patients, a vector # dlt.times: The DLT times of the patients, if no DLT, 0, a vector # current.time: Current time point: a value # assess.window: Observing window size #return: # ym: Imputed y for the patient with no DLT and follow-up time < assess.window assesstime = enter.times+assess.window; dlt.times[dlt.times==0]= assess.window+1; yo = (dlt.times<=assess.window)*(assesstime<=current.time)+(dlt.times<=(current.time-enter.times))*(current.time<assesstime); No.impute <- FALSE if (sum(yo)==0) { No.impute <- TRUE ym <- yo #stop("fraction design takes effect when at least one DLT has been observed") } if (sum(yo)!=0){ otime = yo*dlt.times+(1-yo)*((current.time-enter.times)*(current.time<assesstime)+assess.window*(assesstime<=current.time)) kmfit = survival::survfit(survival::Surv(otime,yo)~1) ym = yo for (i in 1:length(yo)){ if (current.time<assesstime[i] & yo[i]==0){ ym[i]=(kmfit$surv[tail(which(kmfit$time<=(current.time-assesstime[i]+assess.window+0.00001)),n=1)]- kmfit$surv[tail(which(kmfit$time<=assess.window),n=1)])/ kmfit$surv[tail(which(kmfit$time<=(current.time-assesstime[i]+assess.window+0.00001)),n=1)] } } } obsIdxs <- current.time >= assesstime obsIdxs[yo==1] <- TRUE res <- list(y.impute=ym, y.raw=yo, obsIdxs=obsIdxs, No.impute=No.impute) res } TITEImpute.one <- function(followup.times, assess.window, y, n, prior.paras){ #args: # followup.times: The follow-up times of the pending patients at the dose level # assess.window: Observing window size # y: Num of Observed DLT at the dose level # n: Num of patients with observed results at the dose level # prior.paras: a vector of 2, prior when estimating ptilde #return: # ym: imputed y p.tilde <- (y+prior.paras[1])/(n+sum(prior.paras)) #ym <- p.tilde * (1-followup.times/assess.window) ym <- p.tilde * (1-followup.times/assess.window) /((1-p.tilde)+p.tilde * (1-followup.times/assess.window)) # ym <- p.tilde * (1-followup.times/assess.window) /(1-p.tilde) # ym[ym >1] <- 1 # trunc the value ym } TITEImpute <- function(enter.times, dlt.times, current.time, assess.window, dose.levels, ndose, prior.paras){ #args: # enter.times: The enter times of the patients, a vector # dlt.times: The DLT times of the patients, if no DLT before assess.window, 0, a vector # current.time: Current time point: a value # assess.window: Observing window size # dose.levels: dose level for each subject # ndose: num of dose levels # prior.paras: a vector of 2, prior when estimating ptilde #return: # ym: Imputed y for the patient with no DLT and follow-up time < assess.window assesstime = enter.times + assess.window; followup.times <- current.time - enter.times dlt.times[dlt.times==0]= assess.window+1; yo <- (dlt.times<=assess.window)*(assesstime<=current.time)+(dlt.times<=followup.times)*(current.time<assesstime); obsIdxs <- current.time >= assesstime obsIdxs[yo==1] <- TRUE ym <- yo for (i in 1:ndose){ doseIdxs <- dose.levels == i if (sum(1-obsIdxs[doseIdxs]!=0)){ y <- sum(yo[doseIdxs]) n <- sum(doseIdxs[obsIdxs==1]) kpidxs <- doseIdxs & (obsIdxs!=1) ym.part <- TITEImpute.one(followup.times[kpidxs], assess.window, y, n, prior.paras) ym[kpidxs] <- ym.part } } res <- list(y.impute=ym, y.raw=yo, obsIdxs=obsIdxs) res } # posterior probability of pj >= phi given data post.prob.fn <- function(phi, y, n, alp.prior=0.1, bet.prior=0.9){ alp <- alp.prior + y bet <- bet.prior + n - y 1 - pbeta(phi, alp, bet) } overdose.fn <- function(phi, threshold, prior.para=list()){ y <- prior.para$y n <- prior.para$n alp.prior <- prior.para$alp.prior bet.prior <- prior.para$bet.prior pp <- post.prob.fn(phi, y, n, alp.prior, bet.prior) # print(data.frame("prob of overdose" = pp)) if ((pp >= threshold) & (prior.para$n>=3)){ return(TRUE) }else{ return(FALSE) } } ############################################################################### ############################MAIN DUNCTION###################################### ############################################################################### if (design == 'TITE-CFO'){accumulation = FALSE; impute.method = "TITE" }else if (design == 'fCFO'){accumulation = FALSE; impute.method = "frac" }else if (design == 'bCFO'){accumulation = FALSE; impute.method = "No" }else if (design == 'TITE-aCFO'){accumulation = TRUE; impute.method = "TITE" }else if (design == 'f-aCFO'){accumulation = TRUE; impute.method = "frac" }else if (design == 'b-aCFO'){accumulation = TRUE; impute.method = "No"} ndose <- length(p.true) if (is.null(prior.para$alp.prior)){ prior.para <- c(prior.para, list(alp.prior=target, bet.prior=1-target)) } alp.prior <- prior.para$alp.prior bet.prior <- prior.para$bet.prior ays = NULL ans = NULL ## Obtain effective results if (impute.method == "frac"){ impute.res <- fracImpute(enter.times, dlt.times, current.t, assess.window) y.raw <- impute.res$y.raw y.impute <- impute.res$y.impute if (impute.res$No.impute){ for (i in 1:ndose){ ays <- c(ays, sum(y.raw[doses==i])) ans <- c(ans, sum(doses==i)) } } else{ for (i in 1:ndose){ ays <- c(ays, sum(y.impute[doses==i])) ans <- c(ans, sum(doses==i)) } } }else if(impute.method == "TITE"){ impute.res <- TITEImpute(enter.times, dlt.times, current.t, assess.window, doses, ndose, c(target/2, 1-target/2)) y.raw <- impute.res$y.raw y.impute <- impute.res$y.impute for (i in 1:ndose){ ays <- c(ays, sum(y.impute[doses==i])) ans <- c(ans, sum(doses==i)) } }else if(impute.method == "No"){ assesstime = enter.times+assess.window; dlt.times[dlt.times==0]= assess.window+1; y.impute <- (dlt.times<=assess.window)*(assesstime<=current.t) for (i in 1:ndose){ ays <- c(ays, sum(y.impute[doses==i])) ans <- c(ans, sum(doses==i)) } } over.doses <- rep(0, ndose) for (i in 1:ndose){ cy <- ays[i] cn <- ans[i] prior.para <- c(list(y=cy, n=cn), list(alp.prior=alp.prior, bet.prior=bet.prior)) if (overdose.fn(target, cutoff.eli, prior.para)){ over.doses[i:ndose] <- 1 break() } } if (cutoff.eli != early.stop) { cy <- ays[1] cn <- ans[1] prior.para <- c(list(y=cy, n=cn),list(alp.prior=alp.prior, bet.prior=bet.prior)) if (overdose.fn(target, early.stop, prior.para)){ over.doses[1:ndose] <- 1 } } position <- which(over.doses == 1)[1] prior.para <- c(list(alp.prior=alp.prior, bet.prior=bet.prior)) if (accumulation == FALSE){ if (currdose==1){ cys <- c(NA, ays[1:(currdose+1)]) cns <- c(NA, ans[1:(currdose+1)]) }else if (currdose==ndose){ cys <- c(ays[(currdose-1):ndose], NA) cns <- c(ans[(currdose-1):ndose], NA) }else { cys <- ays[(currdose-1):(currdose+1)] cns <- ans[(currdose-1):(currdose+1)] } res <- CFO.next(target, cys, cns, currdose, prior.para, cutoff.eli, early.stop) }else{ res <- aCFO.next (target, ays, ans, currdose, prior.para, cutoff.eli, early.stop) } nextdose <- res$nextdose decision <- res$decision overtox <- res$overtox out <- list(target=target, ays=ays, ans=ans, decision=decision, currdose = currdose, nextdose=nextdose, overtox=overtox, over.doses=over.doses) class(out) <- c("cfo_decision", "cfo") return(out) }
/scratch/gouwar.j/cran-all/cranData/CFO/R/lateonset.next.R
#' Conduct one simulation using the calibration-free odds type (CFO-type) design with late-onset toxicity. #' #' The function is used to conduct one single simulation and find the maximum tolerated dose (MTD) for the CFO-type designs with late-onset toxicities, #' specifically, including time-to-event CFO (TITE-CFO) design, fractional CFO (fCFO) design, benchmark CFO design, #' time-to-event accumulative CFO (TITE-aCFO) design, fractional accumulative CFO (f-aCFO) design and benchmark aCFO design. #' #' @usage lateonset.simu(design, target, p.true, init.level = 1, ncohort, cohortsize, #' assess.window, tte.para, accrual.rate, accrual.dist, #' prior.para = list(alp.prior = target, bet.prior = 1 - target), #' cutoff.eli = 0.95, early.stop = 0.95, seed = NULL) #' #' @param design option for selecting different designs, which can be set as \code{'TITE-CFO'}, \code{'TITE-aCFO'}, #' \code{'fCFO'}, \code{'f-aCFO'}, \code{'bCFO'}, and \code{'b-aCFO'}. Specifically, \code{'bCFO'} refers #' to the benchmark CFO design, and \code{'b-aCFO'} denotes the benchmark aCFO design. #' @param target the target DLT rate. #' @param p.true the true DLT rates under the different dose levels. #' @param ncohort the total number of cohorts. #' @param cohortsize the number of patients of each cohort. #' @param assess.window the maximal assessment window size. #' @param tte.para the parameter related with the distribution of the time to DLT events. The time to DLT is sampled from a Weibull #' distribution, with \code{tte.para} representing the proportion of DLTs occurring within the first half of the #' assessment window. #' @param accrual.rate the accrual.rate rate, i.e., the number of patients accrued per unit time. #' @param accrual.dist the distribution of the arrival times of patients. When \code{accrual.dist = 'fix'}, it corresponds to all #' patients in each cohort arriving simultaneously at a given accrual rate. When \code{accrual.dist = 'unif'}, #' it corresponds to a uniform distribution, and when \code{accrual.dist = 'exp'}, it corresponds to an #' exponential distribution. #' @param init.level the dose level assigned to the first cohort. The default value \code{init.level} is 1. #' @param prior.para the prior parameters for a beta distribution, where set as \code{list(alp.prior = target, bet.prior = 1 - target)} #' by default, \code{alp.prior} and \code{bet.prior} represent the parameters of the prior distribution for #' the true DLT rate at any dose level. This prior distribution is specified as Beta(\code{alpha.prior}, \code{beta.prior}). #' @param cutoff.eli the cutoff to eliminate overly toxic doses for safety. We recommend #' the default value of \code{cutoff.eli = 0.95} for general use. #' @param early.stop the threshold value for early stopping. The default value \code{early.stop = 0.95} #' generally works well. #' @param seed an integer to set as the seed of the random number generator for reproducible results. The default value is set to NULL. #' #' @note The early stopping and dose elimination rules are incorporated into the design #' to ensure patient safety and benefit. #' #' @return The \code{lateonset.simu()} function returns a list object comprising the following components: #' \itemize{ #' \item target: the target DLT rate. #' \item MTD: the selected MTD. \code{MTD = 99} indicates that this trial is terminated due to early stopping. #' \item correct: a binary indicator of whether the recommended dose level matches the target DLT rate (1 for yes). #' \item npatients: the total number of patients allocated to all dose levels #' \item ntox: the total number of DLTs observed for all dose levels. #' \item npercent: the percentage of subjects assigned to the target DLT rate. #' \item over.doses: a vector indicating whether each dose is overdosed or not (1 for yes). #' \item cohortdose: a vector including the dose level assigned to each cohort. #' \item ptoxic: the percentage of subjects assigned to dose levels with a DLT rate greater than the target. #' \item patientDLT: a vector including the DLT outcome observed for each patient. #' \item sumDLT: the total number of DLT observed. #' \item earlystop: a binary indicator of whether the trial is early stopped (1 for yes). #' \item totaltime: the duration of the trial. #' \item entertimes: the time that each participant enters the trial. #' \item DLT.times: the time to DLT for each subject in the trial. If no DLT occurs for a certain subject, #' \code{DLT.times} is 0. #' } #' #' #' @author Jialu Fang, Wenliang Wang, and Guosheng Yin #' #' @references Jin H, Yin G (2022). CFO: Calibration-free odds design for phase I/II clinical trials. #' \emph{Statistical Methods in Medical Research}, 31(6), 1051-1066. \cr #' Jin H, Yin G (2023). Time‐to‐event calibration‐free odds design: A robust efficient design for #' phase I trials with late‐onset outcomes. \emph{Pharmaceutical Statistics}. 22(5), 773–783.\cr #' Yin G, Zheng S, Xu J (2013). Fractional dose-finding methods with late-onset toxicity in #' phase I clinical trials. \emph{Journal of Biopharmaceutical Statistics}, 23(4), 856-870. #' @export #' #' @examples #' target <- 0.2; ncohort <- 12; cohortsize <- 3; init.level <- 1 #' p.true <- c(0.01, 0.07, 0.20, 0.35, 0.50, 0.65, 0.80) #' assess.window <- 3; accrual.rate <- 2; tte.para <- 0.5; accrual.dist <- 'unif' #' ## find the MTD for a single TITE-CFO simulation #' TITECFOtrial <- lateonset.simu (design = 'TITE-CFO', target, p.true, init.level, #' ncohort, cohortsize, assess.window, tte.para, accrual.rate, accrual.dist, seed = 1) #' summary(TITECFOtrial) #' plot(TITECFOtrial) #' ## find the MTD for a single TITE-aCFO simulation #' TITEaCFOtrial <- lateonset.simu (design = 'TITE-aCFO', target, p.true, init.level, #' ncohort, cohortsize, assess.window, tte.para, accrual.rate, accrual.dist, seed = 1) #' summary(TITEaCFOtrial) #' plot(TITEaCFOtrial) #' ## find the MTD for a single fCFO simulation #' fCFOtrial <- lateonset.simu (design = 'fCFO', target, p.true, init.level, #' ncohort, cohortsize, assess.window, tte.para, accrual.rate, accrual.dist, seed = 1) #' summary(fCFOtrial) #' plot(fCFOtrial) #' ## find the MTD for a single f-aCFO simulation #' faCFOtrial <- lateonset.simu (design = 'f-aCFO', target, p.true, init.level, #' ncohort, cohortsize, assess.window, tte.para, accrual.rate, accrual.dist, seed = 1) #' summary(faCFOtrial) #' plot(faCFOtrial) lateonset.simu <- function(design, target, p.true, init.level=1, ncohort, cohortsize, assess.window, tte.para, accrual.rate, accrual.dist, prior.para=list(alp.prior=target, bet.prior=1-target), cutoff.eli=0.95, early.stop=0.95, seed=NULL){ ############################################################################### ###############define the functions used for main function##################### ############################################################################### # The function is to obtain the DLT results (with TITE) for each subject gen.tite<-function(n, pi, assess.window=1, alpha=0.5){ #args: # n: Num of subjects to generate # pi: Target DLT rate, pi=Pr(T<=assess.window) # assess.window: Maximal window size # alpha: Parameter for generate time #Return: # if no DLT, tox.t=0 ############ subroutines ############ weib<-function(n, pi, pihalft) { ## solve parameters for Weibull given pi=1-S(T) and phalft=1-S(T/2) alpha = log(log(1-pi)/log(1-pihalft))/log(2); lambda = -log(1-pi)/(assess.window^alpha); t = (-log(runif(n))/lambda)^(1/alpha); return(t); } ############ end of subroutines ############ tox = rep(0, n); t.tox = rep(0, n); #### Weibull pihalft = alpha*pi; # alpha*100% event in (0, 1/2T) t.tox = weib(n, pi, pihalft); tox[t.tox<=assess.window]=1; ntox.st = sum(tox); t.tox[tox==0]=0; return(list(tox=tox, t.tox=t.tox, ntox.st=ntox.st)); } ############################################################################### ############################MAIN DUNCTION###################################### ############################################################################### if (is.null(prior.para$alp.prior)){ prior.para <- c(prior.para, list(alp.prior=target, bet.prior=1-target)) } alp.prior <- prior.para$alp.prior bet.prior <- prior.para$bet.prior set.seed(seed) ndose <- length(p.true) doselist <- rep(0, ncohort) earlystop <- 0 enter.times <- NULL # enter time of each subject dlt.times <- NULL # dlt time of each subject dlts <- NULL # dlt event for each subject doses <- NULL # dose level for each subject current.t<- 0 currdose <- init.level #current dose level over.doses <- rep(0, ndose) for (i in 1:ncohort){ curP <- p.true[currdose] doselist[i] <- currdose if (accrual.dist=='fix'){ delta.times <- rep(0, cohortsize) }else if (accrual.dist == 'unif'){ delta.times <- cumsum(c(0, runif(cohortsize-1, 0, 2/accrual.rate))) }else if (accrual.dist == 'exp'){ delta.times <- cumsum(c(0, rexp(cohortsize-1, rate=accrual.rate))) } enter.times <- c(enter.times, current.t+delta.times) # obtain the results of the patients obscohort <- gen.tite(cohortsize, curP, alpha=tte.para, assess.window=assess.window); dlt.times <- c(dlt.times, obscohort$t.tox); dlts <- c(dlts, obscohort$tox); doses <- c(doses, rep(currdose, cohortsize)); # Move to next cohort if (i != ncohort){ if (accrual.dist=='fix'){ delta.time <- cohortsize/accrual.rate }else if (accrual.dist == 'unif'){ delta.time <- runif(1, 0, 2/accrual.rate) }else if (accrual.dist == 'exp'){ delta.time <- rexp(1, rate=accrual.rate) } }else{ delta.time <- assess.window } current.t<- enter.times[length(enter.times)] + delta.time; if (design == 'bCFO' || design == 'b-aCFO'){ current.t <- enter.times[length(enter.times)] + assess.window res <- lateonset.next(design, target, p.true, currdose, assess.window, enter.times, dlt.times, current.t, doses, prior.para, cutoff.eli, early.stop) over.doses <- res$over.doses overTox <- res$overTox current.t <- current.t + delta.time }else{ res <- lateonset.next(design, target, p.true, currdose, assess.window, enter.times, dlt.times, current.t, doses, prior.para, cutoff.eli, early.stop) over.doses <- res$over.doses overTox <- res$overTox } if (over.doses[1] == 1){ earlystop <- 1 break() } else{ currdose <- res$nextdose } } ans <- NULL ays <- NULL assess.t <- enter.times + assess.window y.raw <- (dlt.times!=0)*1 for (j in 1:ndose){ ans <- c(ans, sum(doses==j)) ays <- c(ays, sum(y.raw[doses==j])) } if (earlystop==0){ MTD <- CFO.selectmtd(target, ans, ays, prior.para, cutoff.eli, early.stop, verbose=FALSE)$MTD }else{ MTD <- 99 } correct <- 0 if (MTD == target){ correct <- 1 } npercent <- ans[which(p.true == target)]/(ncohort*cohortsize) ptoxic <- sum(ans[which(p.true > target)])/(ncohort*cohortsize) out <- list(target=target, MTD=MTD, correct=correct, npatients=ans, ntox=ays, npercent=npercent, over.doses=over.doses, cohortdose=doselist, ptoxic=ptoxic, patientDLT = dlts, sumDLT=sum(dlts), earlystop=earlystop, totaltime=assess.t[length(assess.t)], entertimes=enter.times, DLTtimes=dlt.times) class(out) <- c("cfo_trial", "cfo") return(out) }
/scratch/gouwar.j/cran-all/cranData/CFO/R/lateonset.simu.R
#' Plot the results by other functions #' #' Plot the objects returned by other functions, including (1) dose allocation of a single trial; #' (2) the estimate of toxicity probability for each dose and corresponding 95% credible interval; #' (3) operating characteristics of multiple simulations, including MTD selection percentage, #' the averaged number of patients allocated to different doses in one simulation and the averaged #' number of DLT observed for different doses in one simulation. #' #' @param x the object returned by other functions #' @param ... ignored arguments #' @param name the name of the object to be plotted. #' User does not need to input this parameter. #' #' @return \code{plot()} returns a figure or a series of figures depending on the object entered. #' #' @note In the example, we set \code{nsimu = 5} for testing time considerations. In reality, \code{nsimu} #' is typically set to 5000 to ensure the accuracy of the results. #' #' #' @author Jialu Fang, Wenliang Wang, and Guosheng Yin #' #' @importFrom grDevices dev.flush dev.hold devAskNewPage #' @importFrom graphics axis barplot mtext par plot #' @importFrom graphics abline arrows legend points #' @import ggplot2 #' @export #' #' @examples #' #' #' ## settings for 1dCFO #' nsimu <- 5; ncohort <- 12; cohortsize <- 3; init.level <- 1 #' p.true <- c(0.02, 0.05, 0.20, 0.28, 0.34, 0.40, 0.44); target <- 0.2 #' assess.window <- 3; accrual.rate <- 2; tte.para <- 0.5; accrual.dist <- 'unif' #' #' ## plot the object returned by CFO.simu() #' CFOtrial <- CFO.simu(design = 'CFO', target, p.true, init.level, ncohort, cohortsize, seed = 1) #' plot(CFOtrial) #' #' ## plot the object returned by lateonset.simu() #' ## f-aCFO design #' faCFOtrial <- lateonset.simu (design = 'f-aCFO', target, p.true, init.level, #' ncohort, cohortsize, assess.window, tte.para, accrual.rate, accrual.dist, seed = 1) #' plot(faCFOtrial) #' #' ## summarize the object returned by CFO.oc() #' faCFOoc <- CFO.oc (nsimu, design = 'f-aCFO', target, p.true, init.level, ncohort, cohortsize, #' assess.window, tte.para, accrual.rate, accrual.dist, seeds = 1:nsimu) #' plot(faCFOoc) #' #' ## plot the object returned by CFO.selectmtd() #' selmtd <- CFO.selectmtd(target=0.2, npts=c(3,3,27,3,0,0,0), ntox=c(0,0,4,2,0,0,0)) #' plot(selmtd) #' #' \donttest{ #' # This test may take longer than 5 seconds to run #' # It is provided for illustration purposes only #' # Users can run this code directly #' ## settings for 2dCFO #' p.true <- matrix(c(0.05, 0.10, 0.15, 0.30, 0.45, #' 0.10, 0.15, 0.30, 0.45, 0.55, #' 0.15, 0.30, 0.45, 0.50, 0.60), #' nrow = 3, ncol = 5, byrow = TRUE) #' target <- 0.3; ncohort <- 12; cohortsize <- 3 #' #' ## plot the single simulation returned by CFO2d.simu() #' CFO2dtrial <- CFO2d.simu(target, p.true, init.level = c(1,1), ncohort, cohortsize, seed = 1) #' plot(CFO2dtrial) #' #' ## plot the multiple simulation returned by CFO2d.oc() #' CFO2doc <- CFO2d.oc(nsimu = 5, target, p.true, init.level = c(1,1), ncohort, cohortsize, #' seeds = 1:5) #' plot(CFO2doc) #' #' ## select a MTD based on the trial data #' ntox <- matrix(c(0, 0, 2, 0, 0, 0, 2, 7, 0, 0, 0, 2, 0, 0, 0), nrow = 3, ncol = 5, byrow = TRUE) #' npts <- matrix(c(3, 0, 12, 0, 0, 3, 12, 24, 0, 0, 3, 3, 0, 0, 0), nrow = 3, ncol = 5, byrow = TRUE) #' selmtd <- CFO2d.selectmtd(target=0.3, npts=npts, ntox=ntox) #' plot(selmtd) #'} #' plot.cfo<- function (x,..., name = deparse(substitute(x))) { new.obj = unlist(strsplit(name, split = "\\$")) strpattern = "none" if (length(new.obj) >= 2){ strpattern = new.obj[2] } assign("objectPlot", get(new.obj[1])) if (!is.element(strpattern, c("none", names(objectPlot)))) { warning("Please double check and specify the variable to be plotted...\n") } else { ############################################################################### ############################plot for CFO.oc()############################### ############################################################################### if (!is.null(objectPlot$simu.setup)) { #plot for one-dim multiple simulations oldpar <- par(no.readonly = TRUE) on.exit(par(oldpar)) if(is.null(dim(objectPlot$selpercent))){ attributesToPlot <- c("selpercent", "npatients", "ntox") titles <- c("MTD selection", "Average patients allocation", "Average DLT observed") ylabels <- c("Percentage (%)", "Number of patients", "Number of DLTs") par(mfrow = c(3, 1)) # Loop through each attribute and create a plot for (i in seq_along(attributesToPlot)) { attr <- attributesToPlot[i] # Check if the attribute exists in the objectPlot if (!is.null(objectPlot[[attr]])) { # Extract the vector vectorToPlot <- objectPlot[[attr]] # Convert to percentages only for selPercent if (attr == "selpercent") { vectorToPlot <- vectorToPlot * 100 } # Create the bar plot with horizontal x-axis labels bplot <- barplot(vectorToPlot, ylab = ylabels[i], main = titles[i], xlab = "Dose level", cex.names = 1, xaxt = "n", ylim = c(0, max(vectorToPlot))*1.3, cex.lab = 1.3) axis(1, at = bplot, labels = seq(1, length(objectPlot[[attr]]))) } } } else if(length(dim(objectPlot$selpercent))==2) { attributesToPlot <- c("selpercent", "npatients", "ntox") titles <- c("MTD selection", "Average patients allocation", "Average DLT observed") ylabels <- c("Percentage (%)", "Number of patients", "Number of DLTs") par(mfrow = c(3, 1)) # Loop through each attribute and create a plot for (i in seq_along(attributesToPlot)) { attr <- attributesToPlot[i] # Check if the attribute exists in the objectPlot if (!is.null(objectPlot[[attr]])) { # Extract the matrix matrixToPlot <- objectPlot[[attr]] # Convert the matrix to a vector by column matrixVector <- as.vector(matrixToPlot) # Convert to percentages only for selpercent if (attr == "selpercent") { matrixVector <- matrixVector * 100 } # Create x-axis labels dimMatrix <- dim(matrixToPlot) xLabels <- expand.grid(row = 1:dimMatrix[1], col = 1:dimMatrix[2]) xLabels <- apply(xLabels, 1, function(x) paste("(", x[1], ",", x[2], ")", sep = "")) # Create the bar plot with horizontal x-axis labels barplot(matrixVector, names.arg = xLabels, las = 2, xlab = "Combined dose level", ylab = ylabels[i], main = titles[i]) } } } } ############################################################################### #########################plot for XXX.simu()################################### ############################################################################### else if (!is.null(objectPlot$correct)) { if(length(objectPlot$MTD) == 1){ if (!is.null(objectPlot$totaltime)){ #plot for lateonset.simu() dose <- objectPlot$cohortdose DLT <- objectPlot$patientDLT ncohort <- length(objectPlot$cohortdose) cohortsize <- sum(objectPlot$npatients)/ncohort # Generate y_labels y_labels <- seq(1, max(dose)) # Generate sequences for each patient sequences <- objectPlot$entertimes # Generate dose_levels for each patient dose_levels <- rep(dose, each = cohortsize) # Generate DLT_observed for each patient DLT_observed <- matrix(DLT, nrow = cohortsize, ncol = ncohort) new_seq <- ifelse(objectPlot$DLTtimes!=0, sequences+objectPlot$DLTtimes, NA) new_y <- ifelse(objectPlot$DLTtimes!=0, dose_levels, NA) add_noise <- function(vec) { counts <- table(vec) counts <- table(names(counts)) result <- numeric(length(vec)) for (i in seq_along(vec)) { if (!is.na(vec[i])) { result[i] <- vec[i] + 0.1 * counts[as.character(vec[i])] # add 0.05 for unique value counts[as.character(vec[i])] <- counts[as.character(vec[i])] + 1 } } return(result) } new_y <- add_noise(new_y) df <- data.frame(sequence = sequences, dose_levels = dose_levels, DLT_observed = DLT_observed) dfnew <- data.frame(sequence = sequences, dose_levels = dose_levels, new_seq = new_seq, new_y = new_y) dfnew <- na.omit(dfnew) # Create the plot p <- ggplot(df, aes(x = sequence, y = dose_levels)) + geom_point(aes(shape = factor(DLT_observed,levels=c(0,1,2))), color = 'black', size = 2.5) + geom_step(direction = 'hv', color = 'black') + scale_y_continuous(breaks = 1:length(y_labels), labels = y_labels) + labs(x = "Time (in months)", y = "Dose level", fill = 'DLT observed') + theme_minimal() + theme(text = element_text(size = 12), legend.title=element_blank(), legend.position = c(1, 0), legend.justification = c(1, 0)) + scale_shape_manual(values = c(1, 16, 4), labels = c('DLT not observed', 'DLT observed',"DLT time"), drop = FALSE) for (row in 1:(nrow(dfnew))){ xuse=c(dfnew[row,"sequence"],dfnew[row,"new_seq"]) yuse=c(dfnew[row,"dose_levels"],dfnew[row,"new_y"]) dfuse <-data.frame(xuse=xuse, yuse=yuse) p <- p + annotate("point", x = xuse[2], y = yuse[2], shape = 4,size = 2.5) + geom_step(aes(x = xuse, y = yuse), data = dfuse,direction = 'vh', linetype = 2) } print(p) } else{ #plot for CFO.simu() dose <- objectPlot$cohortdose DLT <- objectPlot$patientDLT ncohort <- length(objectPlot$cohortdose) cohortsize <- sum(objectPlot$npatients)/ncohort # Generate y_labels y_labels <- seq(1, max(dose)) # Generate sequences for each patient sequences <- 1:(ncohort * cohortsize) # Generate dose_levels for each patient dose_levels <- rep(dose, each = cohortsize) # Generate DLT_observed for each patient DLT_observed <- matrix(DLT, nrow = cohortsize, ncol = ncohort) df <- data.frame(sequence = sequences, dose_levels = dose_levels, DLT_observed = DLT_observed) # Create the plot p <- ggplot(df, aes(x = sequence, y = dose_levels)) + geom_point(aes(fill = as.factor(DLT_observed)), color = 'black', shape = 21, size = 2.5) + geom_step(direction = 'hv', color = 'black') + scale_y_continuous(breaks = 1:length(y_labels), labels = y_labels) + labs(x = "Sequence of patients treated", y = "Dose level", fill = 'DLT observed') + theme_minimal() + theme(text = element_text(size = 12), legend.title=element_blank(), legend.position = c(1, 0), legend.justification = c(1, 0)) + scale_fill_manual(values = c('white', 'black'), labels = c('DLT not observed', 'DLT observed')) # Display the plot print(p) } } else{ dose <- objectPlot$cohortdose DLT <- objectPlot$patientDLT ###need to change!!!! ncohort <- dim(objectPlot$cohortdose)[1] cohortsize <- sum(objectPlot$npatients)/ncohort dim <- dim(objectPlot$ntox) # Generate y_labels y_labels <- expand.grid(1:dim[1], 1:dim[2]) y_labels <- apply(y_labels, 1, function(x) paste('(', x[1], ',', x[2], ')')) # Generate sequences for each patient sequences <- 1:(ncohort * cohortsize) # Generate dose_levels for each patient dose_levels <- rep(match(apply(dose, 1, function(x) paste('(', x[1], ',', x[2], ')')), y_labels), each = cohortsize) # Generate DLT_observed for each patient DLT_observed <- t(DLT) df <- data.frame(sequence = sequences, dose_levels = dose_levels, DLT_observed = DLT_observed) # Create the plot p <- ggplot(df, aes(x = sequence, y = dose_levels)) + geom_point(aes(fill = as.factor(DLT_observed)), color = 'black', shape = 21, size = 2.5) + geom_step(direction = 'hv', color = 'black') + scale_y_continuous(breaks = 1:length(y_labels), labels = y_labels) + labs(x = "Sequence of patients treated", y = "Combined dose level", fill = 'DLT observed') + theme_minimal() + theme(text = element_text(size = 12), legend.title=element_blank(), legend.position = c(1, 0), legend.justification = c(1, 0)) + scale_fill_manual(values = c('white', 'black'), labels = c('DLT not observed', 'DLT observed')) # Display the plot print(p) } } ############################################################################### #########################plot for CFO.selectmtd()################################### ############################################################################### else if (!is.null(objectPlot$p_est)){ if (objectPlot$MTD[1] == 99) { warning("All tested doses are overly toxic. No MTD is selected!\n") } else { if (!is.null(objectPlot$p_est)) { if (length(objectPlot$MTD) >= 2) { p_est.comb=objectPlot$p_est rownames(p_est.comb)=1:dim(p_est.comb)[1] colnames(p_est.comb)=1:dim(p_est.comb)[2] barplot(p_est.comb,beside=TRUE,ylab="DLT rate", ylim=c(0,round(max(p_est.comb,na.rm=TRUE)*1.5,1)),xlab="Drug B",legend.text=rownames(p_est.comb), args.legend=list(title="Drug A",horiz=TRUE,x="top")) } else { p_est = objectPlot$p_est p_hat = p_est[, 2] ci = p_est[, 3] ci = gsub("[\\(\\)]", "", ci) conf.intv = matrix(unlist(strsplit(ci, ",")), byrow = TRUE, ncol = 2) if (p_est[1, 2] == "----") { warning("The trial is stopped since the lowest dose is too toxic.\n") } else { numbs = ifelse(sum(p_hat == "----") == 0, length(p_hat), min(which(p_hat == "----")) - 1) numbs2 = length(p_hat) phatx = as.numeric(as.character(p_hat[1:numbs])) lwr = as.numeric(as.character(conf.intv[1:numbs, 1])) upr = as.numeric(as.character(conf.intv[1:numbs, 2])) plot(1:numbs2, ylim = c(0, 1), xlab = "Dose level", ylab = "DLT rate", pch = "", xaxt = "n", cex.lab = 1.3) axis(1, at = 1:numbs2, labels = 1:numbs2) abline(h = objectPlot$target, lty = 2, col = 2) points(1:numbs, phatx, pch = 19) arrows(x0 = 1:numbs, x1 = 1:numbs, y0 = lwr, y1 = upr, code = 3, angle = 90, length = 0.1) if (numbs < numbs2) { points((numbs + 1):numbs2, seq(min(1, max(phatx, na.rm = T) + 0.05), min(max(phatx, na.rm = T) + 0.2, 1), length = numbs2 - numbs), pch = "*", cex = 1.5) legend("topleft", "* no patient treated") } } } } } } ############################################################################### #########################plot for others################################### ############################################################################### else{ warning("The variable cannot be plotted. \n Please double check and specify the variable to be plotted...\n") } } }
/scratch/gouwar.j/cran-all/cranData/CFO/R/plot.cfo.R
#' #' Generate descriptive summary for objects returned by other functions #' #' Generate descriptive summary for objects returned by other functions. #' #' @param x the object returned by other functions #' @param ... ignored arguments #' #' #' @details \code{print()} prints the objects returned by other functions. #' #' @return \code{print()} prints the objects returned by other functions. #' #' @author Jialu Fang, Wenliang Wang, and Guosheng Yin #' #' @note In the example, we set \code{nsimu = 5} for testing time considerations. In reality, \code{nsimu} #' is typically set to 5000 to ensure the accuracy of the results. #' #' @examples #' #' ## settings for 1dCFO #' nsimu <- 5; ncohort <- 12; cohortsize <- 3; init.level <- 1 #' p.true <- c(0.02, 0.05, 0.20, 0.28, 0.34, 0.40, 0.44); target <- 0.2 #' assess.window <- 3; accrual.rate <- 2; tte.para <- 0.5; accrual.dist <- 'unif' #' #' ## summarize the object returned by CFO.next() #' decision <- CFO.next(target = 0.2, cys = c(0, 1, 0), cns = c(3, 6, 0), currdose = 3) #' print(decision) #' #' ## summarize the object returned by lateonset.next() #' enter.times<- c(0, 0.266, 0.638, 1.54, 2.48, 3.14, 3.32, 4.01, 4.39, 5.38, 5.76, #' 6.54, 6.66, 6.93, 7.32, 7.65, 8.14, 8.74) #' dlt.times<- c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0.995, 0, 0, 0, 0, 0, 0, 0, 2.58) #' current.t<- 9.41 #' doses<-c(1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 3, 3, 3, 4, 4, 4) #' decision <- lateonset.next(design = 'f-aCFO', target, p.true, currdose = 4, assess.window, #' enter.times, dlt.times, current.t, doses) #' print(decision) #' #' ## summarize the object returned by CFO.selectmtd() #' selmtd <- CFO.selectmtd(target=0.2, npts=c(3,3,27,3,0,0,0), ntox=c(0,0,4,2,0,0,0)) #' print(selmtd) #' #' ## summarize the object returned by CFO.simu() #' aCFOtrial <- CFO.simu(design = 'aCFO', target, p.true, init.level, ncohort, cohortsize, seed = 1) #' print(aCFOtrial) #' #' ## summarize the object returned by lateonset.simu() #' faCFOtrial <- lateonset.simu (design = 'f-aCFO', target, p.true, init.level, #' ncohort, cohortsize, assess.window, tte.para, accrual.rate, accrual.dist, seed = 1) #' print(faCFOtrial) #' #' ## summarize the object returned by CFO.oc() #' faCFOoc <- CFO.oc (nsimu, design = 'f-aCFO', target, p.true, init.level, ncohort, cohortsize, #' assess.window, tte.para, accrual.rate, accrual.dist, seeds = 1:nsimu) #' print(faCFOoc) #' #' \donttest{ #' # This test may take longer than 5 seconds to run #' # It is provided for illustration purposes only #' # Users can run this code directly #' ## settings for 2dCFO #' p.true <- matrix(c(0.05, 0.10, 0.15, 0.30, 0.45, #' 0.10, 0.15, 0.30, 0.45, 0.55, #' 0.15, 0.30, 0.45, 0.50, 0.60), #' nrow = 3, ncol = 5, byrow = TRUE) #' #' cns <- matrix(c(3, 3, 0, #' 0, 6, 0, #' 0, 0, 0), #' nrow = 3, ncol = 3, byrow = TRUE) #' cys <- matrix(c(0, 1, 0, #' 0, 2, 0, #' 0, 0, 0), #' nrow = 3, ncol = 3, byrow = TRUE) #' currdose <- c(2,3); target <- 0.3; ncohort <- 12; cohortsize <- 3 #' #' ## summarize the object returned by CFO2d.next() #' decision <- CFO2d.next(target, cys, cns, currdose = currdose, seed = 1) #' print(decision) #' #' ## summarize the object returned by CFO2d.selectmtd() #' ntox <- matrix(c(0, 0, 2, 0, 0, 0, 2, 7, 0, 0, 0, 2, 0, 0, 0), nrow = 3, ncol = 5, byrow = TRUE) #' npts <- matrix(c(3, 0, 12, 0, 0, 3, 12, 24, 0, 0, 3, 3, 0, 0, 0), nrow = 3, ncol = 5, byrow = TRUE) #' selmtd <- CFO2d.selectmtd(target=0.3, npts=npts, ntox=ntox) #' print(selmtd) #' #' ## summarize the object returned by CFO2d.simu() #' CFO2dtrial <- CFO2d.simu(target, p.true, init.level = c(1,1), ncohort, cohortsize, seed = 1) #' print(CFO2dtrial) #' #' ## summarize the object returned by CFO2d.oc() #' CFO2doc <- CFO2d.oc(nsimu = 5, target, p.true, init.level = c(1,1), ncohort, cohortsize, #' seeds = 1:5) #' print(CFO2doc) #' } #' #' @export print.cfo<-function(x,...){ print.default(x) }
/scratch/gouwar.j/cran-all/cranData/CFO/R/print.cfo.R
#' Generate descriptive summary for objects returned by other functions #' #' Generate descriptive summary for objects returned by other functions. #' #' @param object the object returned by other functions. #' @param ... ignored arguments #' #' #' @return \code{summary()} prints the objects returned by other functions. #' #' #' @author Jialu Fang, Wenliang Wang, and Guosheng Yin #' #' @note In the example, we set \code{nsimu = 5} for testing time considerations. In reality, \code{nsimu} #' is typically set to 5000 to ensure the accuracy of the results. #' #' @export #' #' @examples #' #' #' ## settings for 1dCFO #' nsimu <- 5; ncohort <- 12; cohortsize <- 3; init.level <- 1 #' p.true <- c(0.02, 0.05, 0.20, 0.28, 0.34, 0.40, 0.44); target <- 0.2 #' assess.window <- 3; accrual.rate <- 2; tte.para <- 0.5; accrual.dist <- 'unif' #' #' ## summarize the object returned by CFO.next() #' decision <- CFO.next(target = 0.2, cys = c(0, 1, 0), cns = c(3, 6, 0), currdose = 3) #' summary(decision) #' #' ## summarize the object returned by lateonset.next() #' enter.times<- c(0, 0.266, 0.638, 1.54, 2.48, 3.14, 3.32, 4.01, 4.39, 5.38, 5.76, #' 6.54, 6.66, 6.93, 7.32, 7.65, 8.14, 8.74) #' dlt.times<- c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0.995, 0, 0, 0, 0, 0, 0, 0, 2.58) #' current.t<- 9.41 #' doses<-c(1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 3, 3, 3, 4, 4, 4) #' decision <- lateonset.next(design = 'f-aCFO', target, p.true, currdose = 4, assess.window, #' enter.times, dlt.times, current.t, doses) #' summary(decision) #' #' ## summarize the object returned by CFO.selectmtd() #' selmtd <- CFO.selectmtd(target=0.2, npts=c(3,3,27,3,0,0,0), ntox=c(0,0,4,2,0,0,0)) #' summary(selmtd) #' #' ## summarize the object returned by CFO.simu() #' aCFOtrial <- CFO.simu(design = 'aCFO', target, p.true, init.level, ncohort, cohortsize, seed = 1) #' summary(aCFOtrial) #' #' ## summarize the object returned by lateonset.simu() #' faCFOtrial <- lateonset.simu (design = 'f-aCFO', target, p.true, init.level, #' ncohort, cohortsize, assess.window, tte.para, accrual.rate, accrual.dist, seed = 1) #' summary(faCFOtrial) #' #' ## summarize the object returned by CFO.oc() #' faCFOoc <- CFO.oc (nsimu, design = 'f-aCFO', target, p.true, init.level, ncohort, cohortsize, #' assess.window, tte.para, accrual.rate, accrual.dist, seeds = 1:nsimu) #' summary(faCFOoc) #' #' \donttest{ #' # This test may take longer than 5 seconds to run #' # It is provided for illustration purposes only #' # Users can run this code directly #' ## settings for 2dCFO #' p.true <- matrix(c(0.05, 0.10, 0.15, 0.30, 0.45, #' 0.10, 0.15, 0.30, 0.45, 0.55, #' 0.15, 0.30, 0.45, 0.50, 0.60), #' nrow = 3, ncol = 5, byrow = TRUE) #' #' cns <- matrix(c(3, 3, 0, #' 0, 6, 0, #' 0, 0, 0), #' nrow = 3, ncol = 3, byrow = TRUE) #' cys <- matrix(c(0, 1, 0, #' 0, 2, 0, #' 0, 0, 0), #' nrow = 3, ncol = 3, byrow = TRUE) #' currdose <- c(2,3); target <- 0.3; ncohort <- 12; cohortsize <- 3 #' #' ## summarize the object returned by CFO2d.next() #' decision <- CFO2d.next(target, cys, cns, currdose = currdose, seed = 1) #' summary(decision) #' #' ## summarize the object returned by CFO2d.selectmtd() #' ntox <- matrix(c(0, 0, 2, 0, 0, 0, 2, 7, 0, 0, 0, 2, 0, 0, 0), nrow = 3, ncol = 5, byrow = TRUE) #' npts <- matrix(c(3, 0, 12, 0, 0, 3, 12, 24, 0, 0, 3, 3, 0, 0, 0), nrow = 3, ncol = 5, byrow = TRUE) #' selmtd <- CFO2d.selectmtd(target=0.3, npts=npts, ntox=ntox) #' summary(selmtd) #' #' ## summarize the object returned by CFO2d.simu() #' CFO2dtrial <- CFO2d.simu(target, p.true, init.level = c(1,1), ncohort, cohortsize, seed = 1) #' summary(CFO2dtrial) #' #' ## summarize the object returned by CFO2d.oc() #' CFO2doc <- CFO2d.oc(nsimu = 5, target, p.true, init.level = c(1,1), ncohort, cohortsize, #' seeds = 1:5) #' summary(CFO2doc) #' } #' summary.cfo<- function (object, ...) { ############################################################################### ############################summary for XXX.oc()############################### ############################################################################### if (!is.null(object$simu.setup)) { if(is.null(dim(object$selpercent))){ if (object$percentstop == 0){ cat("No instance of early stopping was observed in", object$simu.setup$nsimu, "simulations. \n") }else{ nstop = object$percentstop*object$simu.setup$nsimu cat("In", object$simu.setup$nsimu, "simulations, early stopping occurred", nstop, "times. \n") cat("Among simulations where early stopping did not occur: \n") } cat("Selection percentage at each dose level:\n") cat(formatC(object$selpercent, digits = 3, format = "f"), sep = " ", "\n") cat("Average number of patients treated at each dose level:\n") cat(formatC(object$npatients, digits = 3, format = "f"), sep = " ", "\n") cat("Average number of toxicities observed at each dose level:\n") cat(formatC(object$ntox, digits = 3, format = "f"), sep = " ", "\n") cat("Percentage of correct selection of the MTD:", formatC(object$MTDsel, digits = 3, format = "f"), "\n") cat("Percentage of patients allocated to the MTD:", formatC(object$MTDallo, digits = 3, format = "f"), "\n") cat("Percentage of selecting a dose above the MTD:", formatC(object$oversel, digits = 3, format = "f")," \n") cat("Percentage of allocating patients at dose levels above the MTD:", formatC(object$overallo, digits = 3, format = "f")," \n") cat("Percentage of the patients suffering DLT:", formatC(object$averDLT, digits = 3, format = "f")," \n") if (!is.null(object$averdur)){ cat("Average trial duration:", formatC(object$averdur, digits = 1, format = "f")," \n") } } else if(length(dim(object$selpercent))==2) { # Summary for 2dCFO multiple trail simulation cat("Selection percentage at each dose combination:\n") print(object$selpercent) cat("Average number of patients treated at each dose combination:\n") print(object$npatients) cat("Average number of toxicities observed at each dose combination:\n") print(object$ntox) cat("Percentage of correct selection of the MTD:", formatC(object$MTDsel, digits = 3, format = "f"), "\n") cat("Percentage of patients allocated to the MTD:", formatC(object$MTDallo, digits = 3, format = "f"), "\n") cat("Percentage of selecting a dose above the MTD:", formatC(object$oversel, digits = 3, format = "f")," \n") cat("Percentage of allocating patients at dose levels above the MTD:", formatC(object$overallo, digits = 3, format = "f")," \n") cat("Percentage of the patients suffering DLT:", formatC(object$averDLT/sum(object$npatients), digits = 3, format = "f")," \n") } } ############################################################################### #########################summary for XXX.simu()############################### ############################################################################### if(!is.null(object$correct)){ ###summary for XXX.simu() if (length(object$MTD) == 1) { ###summary for one-dim XXX.simu() if (object$MTD == 99) { warning("All tested doses are overly toxic. No MTD should be selected! \n\n") } else { cat("The selected MTD is dose level", paste0(object$MTD, "."), "\n") cat("For",length(object$cohortdose),"cohorts, the dose level assigned to each cohort is: \n") cat(formatC(object$cohortdose, format = "d"), sep = " ", "\n") cat("Number of toxicities observed at each dose level:\n") cat(formatC(object$ntox, format = "d"), sep = " ", "\n") cat("Number of patients treated at each dose level:\n") cat(formatC(object$npatients, format = "d"), sep = " ", "\n") if (!is.null(object$totaltime)){ cat("The duration of the trial in months:", formatC(object$totaltime, digits = 3, format = "f")," \n") } } } else { ###summary for two-dim XXX.simu() if (object$MTD[1] == 99 | object$MTD[2] == 99) { warning("All tested doses are overly toxic. No MTD should be selected! \n\n") } else { # Summary for 2dCFO single trail simulation cat("The selected MTD is dose level (", object$MTD[1], ",",object$MTD[2], ").\n\n") } # print assgined dosage for each cohort doses <- object$cohortdose cohort_data <- data.frame( cohort = 1:nrow(doses), dose_A = doses[, 1], dose_B = doses[, 2] ) print(cohort_data, row.names = FALSE) cat("\n") cat("Number of toxicity observed at each dose level:\n") print(object$ntox) cat("\n") cat("Number of patients treated at each dose level:\n") print(object$npatients) } } ############################################################################### #########################summary for XXX.next()################################ ############################################################################### if(!is.null(object$decision)){ if(length(object$decision)==2){ ##summary for two dim XXX.next() if (is.na(object$overtox)) { cat("All tested doses are not overly toxic. \n\n") } else { cat("Dose level", object$overtox, "and all levels above exhibit excessive toxicity.", "\n") } cat("The decision regarding the direction of movement for drug A is", paste0(object$decision[1], "."), "\n") cat("The decision regarding the direction of movement for drug B is", paste0(object$decision[2], "."), "\n") cat("The next cohort will be assigned to dose level (", object$nextdose[1],",",object$nextdose[2],").", "\n") } else { ##summary for one dim XXX.next() if (is.na(object$overtox)) { cat("All tested doses are not overly toxic. \n\n") } else { cat("Dose level", object$overtox, "and all levels above exhibit excessive toxicity.", "\n") } if (object$decision == "stop"){ cat("The lowest dose level is overly toxic. We terminate the entire trial for safety.") }else{ cat("The current dose level is", paste0(object$currdose, "."), "\n") cat("The decision regarding the direction of movement is", paste0(object$decision, "."), "\n") cat("The next cohort will be assigned to dose level", paste0(object$nextdose, "."), "\n") } } } ############################################################################### #########################summary for CFO.selectmtd()############################## ############################################################################### if (!is.null(object$p_est)){ ##summary for CFO.selectmtd() if (length(object$MTD) == 1) { ##summary for one dim CFO.selectmtd() if (object$MTD == 99) { warning("All tested doses are overly toxic. No MTD should be selected! \n\n") } else { cat("The MTD is dose level ", paste0(object$MTD, "."), "\n\n") cat("Dose Posterior DLT 95% \n", sep = "") cat("Level Estimate Credible Interval Pr(toxicity>", object$target, "|data)\n", sep = "") for (i in 1:nrow(object$p_est)) { cat(" ", i, " ", as.character(object$p_est[i,2]), " ", as.character(object$p_est[i,3]), " ", as.character(object$p_overdose[i]), "\n") } cat("NOTE: no estimate is provided for the doses at which no patient was treated.\n") } } if (length(object$MTD) >= 2) { if (length(object$MTD) == 2) { if (object$MTD[1, 1] == 99 && object$MTD[1, 2] ==99) { warning("All tested doses are overly toxic. No MTD is selected! \n") } else { cat("The MTD is dose combination (", object$MTD[1,1], ", ", object$MTD[1, 2], "). \n\n") cat("Isotonic estimates of toxicity probabilities and 95% credible intervals for dose combinations are \n") # for (i in 1:dim(object$p_est_CI)[1]) { # cat(formatC(object$p_est_CI[i, ], digits = 2, format = "f", # width = 5), sep = " ", "\n") # } print(noquote(object$p_est_CI)) cat("\n") cat("NOTE: no estimate is provided for the doses at which no patient was treated.\n\n") } } } } }
/scratch/gouwar.j/cran-all/cranData/CFO/R/summury.cfo.R
#'A Reference Class to represent a object CF #'@description A class of objects created structured with the following objects: the MU - Utility Matrix, the SU1 and SU2 - Matrices of Similarity #'between Users, the SI1 e SI2 - Matrices of Similarity between Items, and the vectors averages_u, averages_i, n_aval_u and n_aval_i. The class #'contains methods, general functions with the objectives of manipulating the data and making recommendations, from the structures #'present in the class. The data manipulation methods comprise addsimilarity, addnewuser, addnewemptyuser, deleteuser, addnewitem, addnewemptyitem, deleteitem, newrating and deleterating, #'while the recommendations methods recommend, kclosestitems, topkusers, topkitems are created through choices available in the #'Collaborative Filtering methodology. All objects and methods are accessed through the "$" character. A CF class object is created through #'the CFbuilder function. #' #'@field MU A utility matrix, matrix that contains all the users ratings. The rows comprise users and the columns, itens. #'@field SU1 A superior triangular user similarity matrix that contains the similarities between users, calculated using Cosine similarity #'@field SU2 A superior triangular user similarity matrix that contains the similarities between users, calculated using Pearson Correlation. #'@field SI1 A superior triangular item similarity matrix that contains the similarities between items, calculated using Cosine similarity. #'@field SI2 A superior triangular item similarity matrix that contains the similarities between items, calculated using Adjusted Cosine similarity. #'@field averages_u A vector that contains the averages of users ratings. #'@field averages_i A vector that contains the averages of item ratings. #'@field n_aval_u A vector that contains the numbers of ratings performed by each user. #'@field n_aval_i A vector that contains the numbers of ratings received for each item. #' #'@references #'\itemize{ #'\item LINDEN, G.; SMITH, B.; YORK, J. Amazon. com recommendations: Item-toitem collaborative filtering. Internet Computing, IEEE, v. 7, n. 1, p. 76-80,2003 #'\item Aggarwal, C. C. (2016). Recommender systems (Vol. 1). Cham: Springer International Publishing. #'\item Leskovec, J., Rajaraman, A., & Ullman, J. D. (2020). Mining of massive data sets. Cambridge university press. #'} #' #'@seealso \code{\link[CFilt]{CFbuilder}} #' #' #'@author Thiago Lima, Jessica Kubrusly. #'@import methods #'@export #'@examples #'ratings<-movies[1:1000,] #'objectCF<-CFbuilder(Data = ratings, sim_user="pearson", sim_item="adjcos") #'objectCF$addsimilarity(sim_user="cos",sim_item="cos") #'objectCF$MU #'objectCF$SU1 #'objectCF$SU2 #'objectCF$SI1 #'objectCF$SI2 #'objectCF$averages_u #'objectCF$averages_i #'objectCF$n_aval_u #'objectCF$n_aval_i #'objectCF$addnewuser(Id_u = "Thiago",Ids_i = "The Hunger Games: Catching Fire",r = 5) #'objectCF$addnewemptyuser(Id_u = "Jessica") #'objectCF$deleteuser(Id_u = "Jessica") #'objectCF$addnewitem(Id_i = "Avengers: Endgame",Ids_u = c("1","2"),r = c(5,3)) #'objectCF$addnewemptyitem(Id_i = "Star Wars") #'objectCF$deleteitem(Id_i="Star Wars") #'objectCF$newrating(Id_u = "1", Id_i = "Till Luck Do Us Part 2",r = 2) #'objectCF$recommend(Id_u = "2", Id_i = "Iron Man 3", type = "user") #'objectCF$kclosestitems(Id_i = "Iron Man 3", k = 3) #'objectCF$topkitems(Id_u = "3",k = 3, type = "user") #'objectCF$topkusers(Id_i = "Thor: The Dark World", k = 3,type = "item") #'objectCF$estimaterating(Id_u = "2",Id_i = "Iron Man 3", type = "user") #'objectCF$deleterating("1","Brazilian Western") #'objectCF$changerating("1","Wreck-It Ralph",2) CF = setRefClass("CF",fields = list( MU="matrix", SU1="matrix", SU2="matrix", SI1="matrix", SI2="matrix", averages_u = "numeric", averages_i = "numeric", n_aval_u = "numeric", n_aval_i = "numeric" ), methods =list( addsimilarity=function(sim_user='none',sim_item='none'){ "Adds new methodologies even after the construction and modification of the CF object used. The matrices of similarities representing each requested methodology will be added. sim_user: a methodology used to estimate the rating by users similarity. Can be 'cos','pearson','both' or 'none'. If it equals 'cos' (Cosine Similarity), the SU1 will be built. If it equals 'pearson' (Pearson Similarity), the SU2 will be built. If it equals 'both', the SU1 and SU2 will be built. If it equals 'none', nothing will be built. sim_item: A methodology used to estimate the rating by itens similarity. Can be 'cos','adjcos','both' or 'none'. If it equals 'cos' (Cosine Similarity), the SI1 will be built. If it equals 'adjcos' (Adjusted Cosine Similarity), the SI2 will be built. If it equals 'both', the SI1 and SI2 will be built. If it equals 'none', nothing will be built." if(sim_user!="cos" && sim_user!="pearson" && sim_user!="both" && sim_user!="none" ){stop("sim_user can be only 'cos', 'pearson','both' or 'none'.")} if(sim_item!="cos" && sim_item!="adjcos" && sim_item!="both" && sim_item!="none" ){stop("sim_user can be only 'cos', 'adjcos','both' or 'none'.")} if((sim_user=="cos"||sim_user=="both") && sum(dim(SU1))!=0){stop("You already have the Cosine similarity in SU1.")} if((sim_user=="pearson"||sim_user=="both") && sum(dim(SU2))!=0){stop("You already have the Pearson similarity in SU2.")} if((sim_item=="cos"||sim_item=="both") && sum(dim(SI1))!=0){stop("You already have the Cosine similarity in SI1.")} if((sim_item=="adjcos"||sim_item=="both") && sum(dim(SI2))!=0){stop("You already have the Adjusted Cosine similarity in SI2.")} if(sim_user=="none" && sim_item =="none"){stop("Nothing will change.")} m=nrow(MU) n=ncol(MU) nome_i=colnames(MU) nome_u = rownames(MU) if(sim_user=="cos" || sim_user=="both"){ message("Building SU1...") pb <- txtProgressBar(min = 0, max = m, style = 3) SU1<<-matrix(NA,m,m,dimnames = list(nome_u,nome_u)) for(i in 1:m){ setTxtProgressBar(pb, i) for(j in i:m){ if(j!=i){ SU1[i,j]<<-sum(MU[i,]*MU[j,],na.rm = T)/(sqrt(sum((MU[i,])^2,na.rm = T))*sqrt(sum((MU[j,])^2,na.rm = T))) } if(j==i){SU1[i,j]<<-1} } } setTxtProgressBar(pb,m) close(pb) } if(sim_item=="cos" || sim_item=="both"){ message("Building SI1...") pb <- txtProgressBar(min = 0, max = n, style = 3) SI1 <<- matrix(NA,n,n,dimnames=list(nome_i,nome_i)) for(i in 1:n){ setTxtProgressBar(pb, i) for(j in i:n){ if(j!=i){ SI1[i,j] <<- sum(MU[,i]*MU[,j],na.rm=T)/(sqrt(sum((MU[,i])^2,na.rm=T))*sqrt(sum((MU[,j])^2,na.rm=T))) } if(j==i){SI1[i,j]<<-1} } } setTxtProgressBar(pb,n) close(pb) } if(sim_item=="adjcos" || sim_item=="both"){ message("Building SI2...") pb <- txtProgressBar(min = 0, max = n, style = 3) SI2<<-matrix(NA,n,n,dimnames = list(nome_i,nome_i)) for(i in 1:n){ setTxtProgressBar(pb, i) for(j in i:n){ if(j!= i){ SI2[i,j]<<-sum((MU[,i]-averages_u)*(MU[,j]-averages_u),na.rm = T)/(sqrt(sum((MU[,i]-averages_u)^2,na.rm = T))*sqrt(sum((MU[,j]-averages_u)^2,na.rm = T))) } if(j==i){SI2[i,j]<<-1} } } setTxtProgressBar(pb,n) close(pb) } if(sim_user=="pearson" || sim_user == "both" ){ message("Building SU2...") pb <- txtProgressBar(min = 0, max = m, style = 3) SU2<<-matrix(NA,m,m,dimnames = list(nome_u,nome_u)) for(i in 1:m){ setTxtProgressBar(pb,i) for(j in i:m){ if(j!= i){ SU2[i,j]<<-sum((MU[i,]-averages_u[i])*(MU[j,]-averages_u[j]),na.rm = T)/(sqrt(sum((MU[i,]-averages_u[i])^2,na.rm = T))*sqrt(sum((MU[j,]-averages_u[j])^2,na.rm = T))) } if(j==i){SU2[i,j]<<-1} } } setTxtProgressBar(pb,m) close(pb) } }, addnewuser=function(Id_u,Ids_i,r){ "Adds a new user who rated one or more items. The object CF matrices and vectors will be updated. Id_u : a character, a user ID; Ids_i : a character vector, item IDs; r : a vector with its respective ratings. If you want to add more users, you can use lists, where Id_u: list of characters; Ids_i: list of vectors of characters; r: list of vectors of ratings. " if(is.list(Id_u) && is.list(Ids_i) && is.list(r)){ u=length(Id_u) for(k in 1:u){ if(k==u){ Id_u = as.character(Id_u[[k]]) Ids_i= as.character(Ids_i[[k]]) r= as.numeric(r[[k]]) }else{ addnewuser(Id_u = Id_u[[k]], Ids_i = Ids_i[[k]], r=r[[k]]) } } } if(!is.character(Id_u)){stop("Id_u must be a character.")} if(sum(!is.character(Ids_i))!=0){stop("Ids_i must be a character vector.")} have_SU1=F have_SU2=F have_SI1=F have_SI2=F if(sum(dim(SU1))!=0) have_SU1=TRUE if(sum(dim(SU2))!=0) have_SU2=TRUE if(sum(dim(SI1))!=0) have_SI1=TRUE if(sum(dim(SI2))!=0) have_SI2=TRUE #Verificar se existe um usuário com o mesmo identificador: N = dim(MU)[2] M=dim(MU)[1] k=M + 1 #linha do novo usuario for(p in 1:length(r)){ if(is.na(r[p])){stop("The vector 'r' can't have NA values.")} } for(i in 1:M){ if(rownames(MU)[i]==Id_u){ stop("This user ID already exist. If you want to add a new rating, use $newrating().") } } N = dim(MU)[2] M=dim(MU)[1] k=M + 1 #linha do novo usuario #Achar os indices dos itens indices_v=NULL encontrou_j = F for(j in 1:length(Ids_i)){ for(i in 1:N){ if(colnames(MU)[i]==Ids_i[j]){ encontrou_j = T indices_v=c(indices_v,i) break } } if(!encontrou_j) stop("This is not a valid item.") } message("Progress:") pb <- txtProgressBar(min = 1, max = 5, style = 3) contador = 1 setTxtProgressBar(pb, contador) #Alteracao MU novo_usuario<-matrix(NA,1,N,dimnames=list(Id_u,NULL)) MU<<-rbind(MU,novo_usuario) t=1 for(i in indices_v){ MU[k,i]<<-r[t] t=t+1 } #Alteracao averages_u averages_u<<-c(averages_u,mean(r,na.rm = T)) names(averages_u)[k]<<-Id_u #Alteracao averages_i, n_aval_i e n_aval_u t=1 for(j in indices_v){ averages_i[j] <<- averages_i[j]*n_aval_i[j]/(n_aval_i[j]+1) + r[t]/(n_aval_i[j]+1) n_aval_i[j] <<- n_aval_i[j] + 1 t=t+1 } n_aval_u<<- c(n_aval_u,length(r)) names(n_aval_u)[k]<<-Id_u #Verificar quais usuarios avaliaram os itens Ids_i e criar um vetor com a Uniao dos indices(sem repeticao) uniao_indices_i<-NULL for (i in indices_v){ #Cada item for(u in 1:M){ #percorre a coluna do item i if(!is.na(MU[u,i])){ #Verifica se o usuario avaliou ou nao if(is.null(uniao_indices_i)){uniao_indices_i=c(uniao_indices_i,u)} #Acrescenta o primeiro indice else{ for(p in 1:length(uniao_indices_i)){ #Verifica se o indice ja existe no vetor uniao_indices_i if(uniao_indices_i[p]==u){ ja_incluido=T break} ja_incluido=F } if(ja_incluido==F){uniao_indices_i=c(uniao_indices_i,u)} #Se o indice nao existe no vetor, acrescente o indice } } } } if(have_SU1==TRUE){ #Alteracao SU1 SU1<<-rbind(SU1,NA) rownames(SU1)[k]<<-Id_u SU1<<-cbind(SU1,0) colnames(SU1)[k]<<-Id_u SU1[k,k]<<-1 u=1 for(i in uniao_indices_i){ contador = 1 + (u/length(uniao_indices_i))*1 setTxtProgressBar(pb,contador) u=u+1 if(i<k){ SU1[i,k]<<-sum(MU[i,]*MU[k,],na.rm = T)/(sqrt(sum(MU[i,]^2,na.rm = T))*sqrt(sum(MU[k,]^2,na.rm = T))) } } } if(have_SU2==TRUE){ #Alteracao SU2 SU2<<-rbind(SU2,NA) rownames(SU2)[k]<<-Id_u SU2<<-cbind(SU2,0) colnames(SU2)[k]<<-Id_u SU2[k,k]<<-1 u=2 for(i in uniao_indices_i){ contador = 2 + (u/length(uniao_indices_i))*1 setTxtProgressBar(pb,contador) u=u+1 if(i<k){ SU2[i,k]<<-sum((MU[i,]-averages_u[i])*(MU[k,]-averages_u[k]),na.rm = T)/ (sqrt(sum((MU[i,]-averages_u[i])^2,na.rm = T))*sqrt(sum((MU[k,]-averages_u[k])^2,na.rm = T))) } } } #Alteracao SI2 if(have_SI2==TRUE){ u=3 for(i in indices_v){ for(j in indices_v){ if(j>i){ SI2[i,j]<<-sum((MU[,i]-averages_u)*(MU[,j]-averages_u),na.rm = T)/(sqrt(sum((MU[,i]-averages_u)^2,na.rm = T))*sqrt(sum((MU[,j]-averages_u)^2,na.rm = T))) } } contador = 3 + (u/length(indices_v))*1 setTxtProgressBar(pb,contador) u=u+1 } } #Alteracao SI1 if(have_SI1==TRUE){ u=4 for(i in indices_v){ for(j in indices_v){ if(j>i){ SI1[i,j]<<-sum(MU[,i]*MU[,j],na.rm = T)/(sqrt(sum(MU[,i]^2,na.rm = T))*sqrt(sum(MU[,j]^2,na.rm = T))) } } contador = 4 + (u/length(indices_v))*1 setTxtProgressBar(pb,contador) u=u+1 } } setTxtProgressBar(pb,5) close(pb) }, deleteuser=function(Id_u){ "Deletes an already registered user. The object CF matrices and vectors will be updated. Id_u : A character, a user ID that will be deleted. If you want to delete more users, you can use lists where Id_u is a list of characters." if(is.list(Id_u)){ u=length(Id_u) for(k in 1:u){ if(k==u){ Id_u = as.character(Id_u[[k]]) }else{ deleteuser(Id_u = Id_u[[k]]) } } } if(!is.character(Id_u)){stop("Id_u must be a character.")} have_SU1=F have_SU2=F have_SI1=F have_SI2=F if(sum(dim(SU1))!=0) have_SU1=TRUE if(sum(dim(SU2))!=0) have_SU2=TRUE if(sum(dim(SI1))!=0) have_SI1=TRUE if(sum(dim(SI2))!=0) have_SI2=TRUE #Verificar se existe um usuário com o mesmo identificador: N = dim(MU)[2] M = dim(MU)[1] #Achar indice i do usuário, e caso não achar, informar que o usuário não existe. for(k in 1:M){ if(rownames(MU)[k]==Id_u){ i=k break } if(k==M){stop("This user ID doesn't exist. If you want to add a new user, use $addnewuser().")} } message("Progress:") pb <- txtProgressBar(min = 1, max = 5, style = 3) contador = 1 setTxtProgressBar(pb, contador) #Vetores "avaliacoes" e "ind_aval" que contém respectivamente, as avaliações dos itens e os indices dos itens avaliados por Id_u. avaliacoes = NULL ind_aval = NULL t=1 for(k in MU[i,]){ if(!is.na(MU[i,t])){ avaliacoes<-c(avaliacoes,k) ind_aval<-c(ind_aval,t)} t=t+1 } #Alteracao MU MU<<-MU[-i,] #Alteracao averages_u e n_aval_u averages_u<<-averages_u[-i] n_aval_u<<-n_aval_u[-i] #Alteracao averages_i, n_aval_i t=1 for(j in ind_aval){ averages_i[j] <<- averages_i[j]*n_aval_i[j]/(n_aval_i[j]-1) - avaliacoes[t]/(n_aval_i[j]-1) n_aval_i[j] <<- n_aval_i[j] - 1 t=t+1 } #Alteracao SU1 if(have_SU1==T){ SU1<<-SU1[-i,-i] } #Alteracao SU2 if(have_SU2==T){ SU2<<-SU2[-i,-i] } #Alteracao SI2 if(have_SI2==T){ u=3 for(k in ind_aval){ for(j in ind_aval){ if(j>k){ SI2[k,j]<<-sum((MU[,k]-averages_u)*(MU[,j]-averages_u),na.rm = T)/(sqrt(sum((MU[,k]-averages_u)^2,na.rm = T))*sqrt(sum((MU[,j]-averages_u)^2,na.rm = T))) } } contador = 3 + (u/length(ind_aval))*1 setTxtProgressBar(pb,contador) u=u+1 } } #Alteracao SI1 if(have_SI1==T){ u=4 for( k in ind_aval){ for(j in ind_aval){ if(j>k){ SI1[k,j]<<-sum(MU[,k]*MU[,j],na.rm = T)/(sqrt(sum(MU[,k]^2,na.rm = T))*sqrt(sum(MU[,j]^2,na.rm = T))) } } contador = 4 + (u/length(ind_aval))*1 setTxtProgressBar(pb,contador) u=u+1 } } setTxtProgressBar(pb,5) close(pb) }, newrating=function(Id_u,Id_i,r){ "Adds a new rating from user Id_u to item Id_i.The object CF matrices and vectors will be updated. Id_u : a character, a user ID; Id_i : a character, an item ID; r : the rating. If you want to add more ratings, you can use lists, where Id_u and Id_i are lists of characters and r is a list of ratings." if(is.list(Id_u) && is.list(Id_i) && is.list(r)){ u=length(Id_u) for(k in 1:u){ if(k==u){ Id_u = as.character(Id_u[[k]]) Id_i = as.character(Id_i[[k]]) r = as.numeric(r[[k]]) }else{ newrating(Id_u = Id_u[[k]], Id_i = Id_i[[k]], r=r[[k]]) } } } if(!is.character(Id_u)){stop("Id_u must be a character.")} if(!is.character(Id_i)){stop("Id_i must be a character.")} have_SU1=F have_SU2=F have_SI1=F have_SI2=F if(sum(dim(SU1))!=0) have_SU1=TRUE if(sum(dim(SU2))!=0) have_SU2=TRUE if(sum(dim(SI1))!=0) have_SI1=TRUE if(sum(dim(SI2))!=0) have_SI2=TRUE M = nrow(MU) N = ncol(MU) encontrou_i = F for(i in 1:M){ if(rownames(MU)[i]==Id_u){ encontrou_i = T break } } if(!encontrou_i) stop("This is not a valid user.") #i guarda o num da linha de MU correspondente ao usuario Id_u encontrou_j = F for(j in 1:N){ if(colnames(MU)[j]==Id_i){ encontrou_j = T break } } if(!encontrou_j) stop("This is not a valid item.") if(!is.na(MU[i,j])){stop("This rating already exist.If you want change a rating, use $changerating()")} #Alteracao MU MU[i,j]<<-r #Alteracao averages_i e n_aval_i averages_i[j] <<- averages_i[j]*n_aval_i[j]/(n_aval_i[j]+1) + r/(n_aval_i[j]+1) n_aval_i[j] <<- n_aval_i[j] + 1 #Alteracao averages_u e n_aval_u averages_u[i] <<- averages_u[i]*n_aval_u[i]/(n_aval_u[i]+1) + r/(n_aval_u[i]+1) n_aval_u[i] <<- n_aval_u[i] + 1 ind_aval_i<-NULL #indices dos usuarios que avaliaram o item j for(k in 1:M){ if(!is.na(MU[k,j])) ind_aval_i=c(ind_aval_i,k) } #Alteracao SU1 if(have_SU1==T){ for(k in ind_aval_i){ #calcular a similaridade de i com k nova_sim_i_k = sum(MU[k,]*MU[i,],na.rm = T)/(sqrt(sum(MU[k,]^2,na.rm = T))*sqrt(sum(MU[i,]^2,na.rm = T))) if(i!=k){ if(i < k){ SU1[i,k] <<- nova_sim_i_k }else{ SU1[k,i] <<- nova_sim_i_k } } } } #Alteracao SU2 if(have_SU2==T){ for(k in ind_aval_i){ #calcular a similaridade de i com k nova_sim_i_k = sum((MU[k,]-averages_u[k])*(MU[i,]-averages_u[i]),na.rm = T)/(sqrt(sum((MU[k,]-averages_u[k])^2,na.rm = T))*sqrt(sum((MU[i,]-averages_u[i])^2,na.rm = T))) if(i!=k){ if(i < k){ SU2[i,k] <<- nova_sim_i_k }else{ SU2[k,i] <<- nova_sim_i_k } } } } ind_aval_u<-NULL #indice dos itens que foram avaliados por i for(k in 1:N){ if(!is.na(MU[i,k])) ind_aval_u=c(ind_aval_u,k) } #Alteracao SI1 if(have_SI1==T){ for(k in ind_aval_u){ nova_sim_j_k = sum(MU[,k]*MU[,j],na.rm = T)/(sqrt(sum(MU[,k]^2,na.rm = T))*sqrt(sum(MU[,j]^2,na.rm = T))) if(j!=k){ if(j < k){ SI1[j,k] <<- nova_sim_j_k }else{ SI1[k,j] <<- nova_sim_j_k } } } } #Alteração SI2 if(have_SI2==T){ for(k in ind_aval_u){ nova_sim_j_k = sum((MU[,k]-averages_u)*(MU[,j]-averages_u),na.rm = T)/(sqrt(sum((MU[,k]-averages_u)^2,na.rm = T))*sqrt(sum((MU[,j]-averages_u)^2,na.rm = T))) if(j!=k){ if(j < k){ SI2[j,k] <<- nova_sim_j_k }else{ SI2[k,j] <<- nova_sim_j_k } } } } }, addnewemptyuser=function(Id_u){ "Adds a new user without ratings. The object CF matrices and vectors will be updated. Id_u : a character, a user ID. If you want to add more users, you can use lists, where Id_u is a list of characters." if(is.list(Id_u)){ u=length(Id_u) for(k in 1:u){ if(k==u){ Id_u = as.character(Id_u[[k]]) }else{ addnewemptyuser(Id_u = Id_u[[k]]) } } } if(!is.character(Id_u)){stop("Id_u must be a character.")} have_SU1=F have_SU2=F if(sum(dim(SU1))!=0) have_SU1=TRUE if(sum(dim(SU2))!=0) have_SU2=TRUE N=ncol(MU) M=nrow(MU) k=M+1 for(i in 1:M){ if(rownames(MU)[i]==Id_u){ stop("This user ID already exist.") } } #Alteracao MU novo_usuario<-matrix(NA,1,N,dimnames=list(Id_u,NULL)) MU<<-rbind(MU,novo_usuario) #Alteracao averages_u e n_aval_u averages_u<<-c(averages_u,0) names(averages_u)[k]<<-Id_u n_aval_u<<-c(n_aval_u,0) names(n_aval_u)[k]<<-Id_u #Alteracao SU1 if(have_SU1==T){ SU1<<-rbind(SU1,NA) rownames(SU1)[k]<<-Id_u SU1<<-cbind(SU1,0) colnames(SU1)[k]<<-Id_u SU1[k,k]<<-1 } #Alteracao SU2 if(have_SU2==T){ SU2<<-rbind(SU2,NA) rownames(SU2)[k]<<-Id_u SU2<<-cbind(SU2,0) colnames(SU2)[k]<<-Id_u SU2[k,k]<<-1 } #Alteracao SI(Nao se altera) }, addnewemptyitem=function(Id_i){ "Adds a new item without ratings. The object CF matrices and vectors will be updated. Id_i : a character, an item ID. If you want to add more items, you can use lists where Id_i is a list of characters." if(is.list(Id_i)){ u=length(Id_i) for(k in 1:u){ if(k==u){ Id_i = as.character(Id_i[[k]]) }else{ addnewemptyitem(Id_i = Id_i[[k]]) } } } if(!is.character(Id_i)){stop("Id_i must be a character.")} have_SI1=F have_SI2=F if(sum(dim(SI1))!=0) have_SI1=TRUE if(sum(dim(SI2))!=0) have_SI2=TRUE M=nrow(MU) N=ncol(MU) k=N+1 for(i in 1:N){ if(colnames(MU)[i]==Id_i){ stop("This Item ID already exist.") } } #Alteracao MU novo_item<-matrix(NA,M,1,dimnames=list(NULL,Id_i)) MU<<-cbind(MU,novo_item) #Alteracao averages_i e n_aval_i averages_i<<-c(averages_i,0) names(averages_i)[k]<<-Id_i n_aval_i<<-c(n_aval_i,0) names(n_aval_i)[k]<<-Id_i #Alteracao SI1 if(have_SI1==T){ SI1<<-rbind(SI1,NA) rownames(SI1)[k]<<-Id_i SI1<<-cbind(SI1,0) colnames(SI1)[k]<<-Id_i SI1[k,k]<<-1 } #Alteracao SI2 if(have_SI2==T){ SI2<<-rbind(SI2,NA) rownames(SI2)[k]<<-Id_i SI2<<-cbind(SI2,0) colnames(SI2)[k]<<-Id_i SI2[k,k]<<-1 } #Alteracao SU(NaO SE ALTERA) }, addnewitem=function(Id_i,Ids_u,r){ "Adds a new item that has been rated by one or more users. The object CF matrices and vectors will be updated. Id_i : a character, an item ID; Ids_u : a character vector, a user IDs; r : a vector with its respective ratings. If you want to add more items, you can use lists, where Id_i is a list of characters; Ids_u is a list of vectors of characters; r is a list of vectors of ratings. " if(is.list(Id_i) && is.list(Ids_u) && is.list(r)){ u=length(Id_i) for(k in 1:u){ if(k==u){ Id_i = as.character(Id_i[[k]]) Ids_u= as.character(Ids_u[[k]]) r= as.numeric(r[[k]]) }else{ addnewitem(Id_i = Id_i[[k]], Ids_u = Ids_u[[k]], r=r[[k]]) } } } if(sum(!is.character(Ids_u))!=0){stop("Ids_i must be a character.")} if(!is.character(Id_i)){stop("Id_i must be a character.")} for(p in 1:length(r)){ if(is.na(r[p])){stop("The vector 'r' can't have NA values.")} } have_SU1=F have_SU2=F have_SI1=F have_SI2=F if(sum(dim(SU1))!=0) have_SU1=TRUE if(sum(dim(SU2))!=0) have_SU2=TRUE if(sum(dim(SI1))!=0) have_SI1=TRUE if(sum(dim(SI2))!=0) have_SI2=TRUE M = dim(MU)[1] N=dim(MU)[2] k=N + 1 #linha do novo usuario for(i in 1:N){ if(colnames(MU)[i]==Id_i){ stop("This Item ID already exist. If you want to add a new rating, use $newrating().") } } #Achar os indices dos usuarios que avaliaram o item indices_v=NULL encontrou_i = F for(i in 1:length(Ids_u)){ for(j in 1:M){ if(rownames(MU)[j]==Ids_u[i]){ encontrou_i = T indices_v=c(indices_v,j) break } } if(!encontrou_i) stop("This is not a valid user.") } ##Barra de Progresso message("Progress:") pb <- txtProgressBar(min = 1, max = 5, style = 3) contador = 1 setTxtProgressBar(pb, contador) #### #Alteracao MU novo_item<-matrix(NA,M,1,dimnames=list(NULL,Id_i)) MU<<-cbind(MU,novo_item) t=1 for(i in indices_v){ MU[i,k]<<-r[t] t=t+1 } #Alteracao averages_i averages_i<<-c(averages_i,mean(r,na.rm = T)) names(averages_i)[k]<<-Id_i #Alteracao averages_u, n_aval_i e n_aval_u t=1 for(i in indices_v){ averages_u[i] <<- averages_u[i]*n_aval_u[i]/(n_aval_u[i]+1) + r[t]/(n_aval_u[i]+1) n_aval_u[i] <<- n_aval_u[i] + 1 t=t+1 } n_aval_i<<- c(n_aval_i,length(r)) names(n_aval_i)[k]<<-Id_i #Verificar quais itens avaliados pelos usuarios Ids_u e criar um vetor com a Uniao dos indices(sem repeticao) uniao_indices_j<-NULL for (i in indices_v){ #Cada usuario for(u in 1:N){ #percorre a linha do usuario i if(!is.na(MU[i,u])){ #Verifica se o item e avaliado ou nao if(is.null(uniao_indices_j)){uniao_indices_j=c(uniao_indices_j,u)} #Acrescenta o primeiro indice else{ for(p in 1:length(uniao_indices_j)){ #Verifica se o indice ja existe no vetor uniao_indices_j if(uniao_indices_j[p]==u){ ja_incluido=T break} ja_incluido=F } if(ja_incluido==F){uniao_indices_j=c(uniao_indices_j,u)} #Se o indice nao existe no vetor, acrescente o indice } } } } if(have_SI1==T){ #Alteracao SI1 SI1<<-rbind(SI1,NA) rownames(SI1)[k]<<-Id_i SI1<<-cbind(SI1,0) colnames(SI1)[k]<<-Id_i SI1[k,k]<<-1 u=1 for(i in uniao_indices_j){ contador = 1 + (u/length(uniao_indices_j))*1 setTxtProgressBar(pb,contador) u=u+1 if(i<k){ SI1[i,k]<<-sum(MU[,i]*MU[,k],na.rm = T)/(sqrt(sum(MU[,i]^2,na.rm = T))*sqrt(sum(MU[,k]^2,na.rm = T))) } } } if(have_SI2==T){ #Alteracao SI2 SI2<<-rbind(SI2,0) rownames(SI2)[k]<<-Id_i SI2<<-cbind(SI2,0) colnames(SI2)[k]<<-Id_i SI2[k,k]<<-1 u=2 for(i in uniao_indices_j){ contador = 2 + (u/length(uniao_indices_j))*1 setTxtProgressBar(pb,contador) u=u+1 if(i<k){ SI2[i,k]<<-sum((MU[,i]-averages_u)*(MU[,k]-averages_u),na.rm = T)/ (sqrt(sum((MU[,i]-averages_u)^2,na.rm = T))*sqrt(sum((MU[,k]-averages_u)^2,na.rm = T))) } } } if(have_SU1==T){ #Alteracao SU1 u=3 for(i in indices_v){ contador = 3 + (u/length(indices_v))*1 setTxtProgressBar(pb,contador) u=u+1 for(j in indices_v){ if(j>i){ SU1[i,j]<<-sum(MU[i,]*MU[j,],na.rm = T)/(sqrt(sum(MU[i,]^2,na.rm = T))*sqrt(sum(MU[j,]^2,na.rm = T))) } } } } if(have_SU2==T){ #Alteracao SU2 u=4 for(i in indices_v){ contador = 4 + (u/length(indices_v))*1 setTxtProgressBar(pb,contador) u=u+1 for(j in indices_v){ if(j>i){ SU2[i,j]<<-sum((MU[i,]-averages_u[i])*(MU[j,]-averages_u[j]),na.rm = T)/(sqrt(sum((MU[i,]-averages_u[i])^2,na.rm = T))*sqrt(sum((MU[j,]-averages_u[j])^2,na.rm = T))) } } } } setTxtProgressBar(pb,5) close(pb) }, deleteitem=function(Id_i){"Deletes an already registered item. The object CF matrices and vectors will be updated. Id_i : a character, a item ID that will be deleted. If you want to delete more items, you can use lists, where Id_i is a list of characters." if(is.list(Id_i)){ u=length(Id_i) for(k in 1:u){ if(k==u){ Id_i = as.character(Id_i[[k]]) }else{ deleteitem(Id_i = Id_i[[k]]) } } } if(!is.character(Id_i)){stop("Id_i must be a character.")} M = dim(MU)[1] N=dim(MU)[2] k=N + 1 #linha do novo usuario for(k in 1:N){ if(colnames(MU)[k]==Id_i){ j=k break } if(k==N){stop("This Item ID doesn't exist. If you want to add a new item, use $addnewitem().")} } have_SU1=F have_SU2=F have_SI1=F have_SI2=F if(sum(dim(SU1))!=0) have_SU1=TRUE if(sum(dim(SU2))!=0) have_SU2=TRUE if(sum(dim(SI1))!=0) have_SI1=TRUE if(sum(dim(SI2))!=0) have_SI2=TRUE #Vetores "avaliacoes" e "ind_aval" que representam respectivamente, as avaliações e os indices dos usuarios que avaliaram o item Id_i avaliacoes = NULL ind_aval = NULL t=1 for(k in MU[,j]){ if(!is.na(MU[t,j])){ avaliacoes<-c(avaliacoes,k) ind_aval<-c(ind_aval,t)} t=t+1 } ##Barra de Progresso message("Progress:") pb <- txtProgressBar(min = 1, max = 5, style = 3) contador = 1 setTxtProgressBar(pb, contador) #### #Alteracao MU MU<<-MU[,-j] #Alteracao averages_i e n_aval_i averages_i<<-averages_i[-j] n_aval_i<<-n_aval_i[-j] #Alteracao averages_u e n_aval_u t=1 for(k in ind_aval){ averages_u[k] <<- averages_u[k]*n_aval_u[k]/(n_aval_u[k]-1) - avaliacoes[t]/(n_aval_u[k]-1) n_aval_u[k] <<- n_aval_u[k] - 1 t=t+1 } #Alteracao SI1 if(have_SI1==T){ SI1<<-SI1[-j,-j] } #Alteracao SI2 if(have_SI2==T){ SI2<<-SI2[-j,-j] } #Alteracao SU1 if(have_SU1==T){ u=3 for(i in ind_aval){ contador = 3 + (u/length(ind_aval))*1 setTxtProgressBar(pb,contador) u=u+1 for(k in ind_aval){ if(k>i){ SU1[i,k]<<-sum(MU[i,]*MU[k,],na.rm = T)/(sqrt(sum(MU[i,]^2,na.rm = T))*sqrt(sum(MU[k,]^2,na.rm = T))) } } } } if(have_SU2==T){ #Alteracao SU2 u=4 for(i in ind_aval){ contador = 4 + (u/length(ind_aval))*1 setTxtProgressBar(pb,contador) u=u+1 for(k in ind_aval){ if(k>i){ SU2[i,k]<<-sum((MU[i,]-averages_u[i])*(MU[k,]-averages_u[k]),na.rm = T)/(sqrt(sum((MU[i,]-averages_u[i])^2,na.rm = T))*sqrt(sum((MU[k,]-averages_u[k])^2,na.rm = T))) } } } } setTxtProgressBar(pb,5) close(pb)}, recommend=function(Id_u,Id_i,type,neighbors=5,cuts=3.5,similarity=ifelse(type=='user','pearson','adjcos')){ "A function that returns True if user Id_u will like item Id_i or returns FALSE, otherwise. The recommendation can be made through similarity between users, when type = 'user', as well as through the similarity between items, when type = 'item'. Id_u : a character, a User ID; Id_i : a character, an Item ID; type: a character string, 'user' or 'item'; neighbors: number of similarities used for to estimates (default = 5); cuts: cut score designated to determine if it is recommended (default=3.5); similarity: the methodology used to estimate the rating. If type = 'user', must be one of 'cos', for cosine similarity, or 'pearson' (default), for pearson similarity. If type='item', must be one of 'cos', for cosine similarity, or 'adjcos' (default), for adjusted cosine similarity. This choice can alter the way the estimate is calculated." if(!is.character(similarity)){stop("similarity must be a character.")} if(!is.character(Id_u)){stop("Id_u must be a character.")} if(!is.character(Id_i)){stop("Id_i must be a character.")} if(type!="user" && type!="item"){stop("type should be one of 'user' or 'item'.")} if(type=="user" && similarity == "adjcos"){stop("If type = 'user', similarity can be only 'cos' or 'pearson'.")} if(type=="item" && similarity == "pearson"){stop("If type = 'item', similarity can be only 'cos' or 'adjcos'.")} if(similarity !="cos" && similarity !="adjcos" && similarity !="pearson"){stop("similarity should be one of 'cos', 'adjcos' or 'pearson'.")} if(sum(dim(SU1))==0 && type=='user' && similarity == 'cos'){stop("if you want to make this recommendation, add the methodology of cosine similarity using the function add_cosine()")} if(sum(dim(SU2))==0 && type=='user' && similarity == 'pearson'){stop("if you want to make this recommendation, add the methodology of pearson similarity using the function add_pearson()")} if(sum(dim(SI1))==0 && type=='item' && similarity == 'cos'){stop("if you want to make this recommendation, add the methodology of cosine similarity using the function add_cosine()")} if(sum(dim(SI2))==0 && type=='item' && similarity == 'adjcos'){stop("if you want to make this recommendation, add the methodology of adjusted cosine similarity using the function add_adjcos()")} M = nrow(MU) encontrou_i = F for(i in 1:M){ if(rownames(MU)[i]==Id_u){ encontrou_i = T break } } if(!encontrou_i) stop("This is not a valid user.") #i guarda o num da linha de MU correspondente ao usuario Id_u N = ncol(MU) encontrou_j = F for(j in 1:N){ if(colnames(MU)[j]==Id_i){ encontrou_j = T break } } if(!encontrou_j) stop("This is not a valid item.") if(type=="user"){ #busca os n indices dos usuarios mais proximos de i que avaliaram j #no final v_ind e v_valores sao arrays de tamanho n ou menos, #a primeira posicao guarda um array com indices dos usuarios acima e a #segunda posicao um array com os valores das similaridades entre entre esses #usuarios e o usuario k if(similarity=="cos"){ M = ncol(SU1) if(i==1){copia=c(NA,SU1[i,(i+1):M])}else{if(i==M){copia=c(SU1[1:(i-1),i],NA)}else{copia = c(SU1[1:(i-1),i],NA,SU1[i,(i+1):M])}} #varrer o copia colocando NA nos usuarios que nao avaliaram j for(l in 1:M){ if(is.na(MU[l,j])){ copia[l] = NA } } if(sum(!is.na(copia))==0){return(F)}else{ v_ind = NULL v_valor = NULL for(l in 1:neighbors){ if(sum(!is.na(copia))==0){break} ind = which.max(copia) valor = max(copia,na.rm = T) if(valor <= 0) break v_ind = c(v_ind,ind) v_valor = c(v_valor,valor) copia[ind] = NA } #agora v_ind e v_valor guardam os indices e as similaridades dos usuarios mais semelhantes a i que avaliaram j #cuidado, os arrays podem ser vazios. n_ = length(v_ind) if(n_ == 0){ message("It isn't possible to estimate the rating for this user and item.") return(FALSE) } nota = sum(MU[v_ind,j]*v_valor)/sum(v_valor) if(nota>=cuts) return(T) return(F) } } if(similarity=="pearson"){ M = ncol(SU2) if(i==1){copia=c(NA,SU2[i,(i+1):M])}else{if(i==M){copia=c(SU2[1:(i-1),i],NA)}else{copia = c(SU2[1:(i-1),i],NA,SU2[i,(i+1):M])}} #varrer o copia colocando NA nos usuarios que nao avaliaram j for(l in 1:M){ if(is.na(MU[l,j])){ copia[l] = NA } } if(sum(!is.na(copia))==0){return(F)}else{ v_ind = NULL v_valor = NULL for(l in 1:neighbors){ if(sum(!is.na(copia))==0){break} ind = which.max(copia) valor = max(copia,na.rm = T) if(valor <= 0) break v_ind = c(v_ind,ind) v_valor = c(v_valor,valor) copia[ind] = NA } #agora v_ind e v_valor guardam os indices e as similaridades dos usuarios mais semelhantes a i que avaliaram j #cuidado, os arrays podem ser vazios. n_ = length(v_ind) if(n_ == 0){ message("It isn't possible to estimate the rating for this user and item.") return(FALSE) } nota = averages_u[i] + ((sum((MU[v_ind,j]-averages_u[v_ind])*v_valor))/sum(v_valor)) nota = as.numeric(nota) if(nota>=cuts) return(T) return(F) } } } if(type=="item"){ #busca os n indices dos itens mais proximos de j que foram avaliados por i #no final v_ind e v_valores sao arrays de tamanho n ou menos, #a primeira posicao guarda um array com indices dos itens acima e a #segunda posicao um array com os valores das similaridades entre entre esses #itens e o item k if(similarity=="cos"){ N = ncol(SI1) if(j==1){copia=c(NA,SI1[j,(j+1):N])}else{if(j==N){copia=c(SI1[1:(j-1),j],NA)}else{copia = c(SI1[1:(j-1),j],NA,SI1[j,(j+1):N])}} #varrer o copia colocando NA nos usuarios que nao avaliaram j for(l in 1:N){ if(is.na(MU[i,l])){ copia[l] = NA } } if(sum(!is.na(copia))==0){return(F)}else{ v_ind = NULL v_valor = NULL for(l in 1:neighbors){ if(sum(!is.na(copia))==0){break} ind = which.max(copia) valor = max(copia,na.rm = T) if(valor <= 0) break v_ind = c(v_ind,ind) v_valor = c(v_valor,valor) copia[ind] = NA } #agora v_ind e v_valor guardam os indices e as similaridades dos itens mais semelhantes a j que foram avaliados por i #cuidado, os arrays podem ser vazios. n_ = length(v_ind) if(n_ == 0){ message("It isn't possible to estimate the rating for this user and item.") return(FALSE) } nota = sum(MU[i,v_ind]*v_valor)/sum(v_valor) if(nota>=cuts) return(T) return(F) } } if(similarity=="adjcos"){ N = ncol(SI2) if(j==1){copia=c(NA,SI2[j,(j+1):N])}else{if(j==N){copia=c(SI2[1:(j-1),j],NA)}else{copia = c(SI2[1:(j-1),j],NA,SI2[j,(j+1):N])}} #varrer o copia colocando NA nos usuarios que nao avaliaram j for(l in 1:N){ if(is.na(MU[i,l])){ copia[l] = NA } } if(sum(!is.na(copia))==0){return(F)}else{ v_ind = NULL v_valor = NULL for(l in 1:neighbors){ if(sum(!is.na(copia))==0){break} ind = which.max(copia) valor = max(copia,na.rm = T) if(valor <= 0) break v_ind = c(v_ind,ind) v_valor = c(v_valor,valor) copia[ind] = NA } #agora v_ind e v_valor guardam os indices e as similaridades dos itens mais semelhantes a j que foram avaliados por i #cuidado, os arrays podem ser vazios. n_ = length(v_ind) if(n_ == 0){ message("It isn't possible to estimate the rating for this user and item.") return(FALSE) } nota = sum(MU[i,v_ind]*v_valor)/sum(v_valor) if(nota>=cuts){ return(T)} return(F) } } } }, kclosestitems=function(Id_i,k=5,similarity='adjcos'){ "A function that returns the k items most similar to an item. Id_i : A Character, a Item ID; k : Number of items most similar to item Id_i (deafult = 5); similarity: the methodology used to estimate the rating. Must be one of 'cos', for cosine similarity, or 'adjcos' (default), for adjusted cosine similarity. This choice can alter the way the estimate is calculated." if(!is.character(Id_i)){stop("Id_i must be a character.")} if(!is.character(similarity)){stop("similarity must be a character.")} if(similarity !="cos" && similarity !="adjcos"){stop("similarity should be one of 'cos' or 'adjcos'.")} if(sum(dim(SI1))==0 && similarity == 'cos'){stop("if you want to make this recommendation, add the methodology of cosine similarity using the function add_cosine()")} if(sum(dim(SI2))==0 && similarity == 'adjcos'){stop("if you want to make this recommendation, add the methodology of adjusted cosine similarity using the function add_adjcos()")} N = ncol(MU) encontrou_j = F for(j in 1:N){ if(colnames(MU)[j]==Id_i){ encontrou_j = T break } } if(!encontrou_j) stop("This is not a valid item.") #j guarda o num da coluna de MU correspondente ao item Id_i if(similarity=="cos"){ #Criaremos o vetor copia com as respectivas similaridades dos itens com o item Id_i if(j==1){copia = c(NA,SI1[j,(j+1):N])}else{if(j==ncol(SI1)){copia=c(SI1[1:(j-1),j],NA)}else{ copia = c(SI1[1:(j-1),j],NA,SI1[j,(j+1):N])}} v_nomes = NULL #vetor com os nomes dos itens for(l in 1:k){ # encontrar os n itens mais similares ao item Id_i, guardando os indices, os valores e os respectivos nomes. ind = which.max(copia) valor = max(copia,na.rm = T) if(valor <= 0) break v_nomes = c(v_nomes,colnames(MU)[ind]) copia[ind] = NA } return(v_nomes) } if(similarity=="adjcos"){ #Criaremos o vetor copia com as respectivas similaridades dos itens com o item Id_i if(j==1){copia = c(NA,SI2[j,(j+1):N])}else{if(j==ncol(SI2)){copia=c(SI2[1:(j-1),j],NA)}else{ copia = c(SI2[1:(j-1),j],NA,SI2[j,(j+1):N])}} v_nomes = NULL #vetor com os nomes dos itens for(l in 1:k){ # encontrar os n itens mais similares ao item Id_i, guardando os indices, os valores e os respectivos nomes. ind = which.max(copia) valor = max(copia,na.rm = T) if(valor <= 0) break v_nomes = c(v_nomes,colnames(MU)[ind]) copia[ind] = NA } return(v_nomes) } }, topkitems=function(Id_u,k=5,type,neighbors=5,cuts=3.5,similarity =ifelse(type=='user','pearson','adjcos')){ "A function that recommends k items for an Id_u user. The recommendation can be made through similarity between users, when type = 'user', as well as through similarity between items, when type = 'item'. Id_u : a character, a User ID; k : number of recommendations (default=5); type: a character string, 'user' or 'item'; neighbors: number of similarities used for the estimates(default=5); cuts: cut score designated to determine if it is recommended (default = 3.5); similarity: the methodology used to estimate the rating. If type = 'user', must be one of 'cos', for cosine similarity, or 'pearson' (default), for pearson similarity. If type='item', must be one of 'cos', for cosine similarity, or 'adjcos' (default), for adjusted cosine similarity. This choice can alter the way the estimate is calculated." if(!is.character(similarity)){stop("similarity must be a character.")} if(!is.character(Id_u)){stop("Id_u must be a character.")} if(type!="user" && type!="item"){stop("type should be one of 'user' or 'item'.")} if(type=="user" && similarity == "adjcos"){stop("If type = 'user', similarity can be only 'cos' or 'pearson'.")} if(type=="item" && similarity == "pearson"){stop("If type = 'item', similarity can be only 'cos' or 'adjcos'.")} if(similarity !="cos" && similarity !="adjcos" && similarity !="pearson"){stop("similarity should be one of 'cos', 'adjcos' or 'pearson'.")} if(sum(dim(SU1))==0 && type=='user' && similarity == 'cos'){stop("if you want to make this recommendation, add the methodology of cosine similarity using the function add_cosine()")} if(sum(dim(SU2))==0 && type=='user' && similarity == 'pearson'){stop("if you want to make this recommendation, add the methodology of pearson similarity using the function add_pearson()")} if(sum(dim(SI1))==0 && type=='item' && similarity == 'cos'){stop("if you want to make this recommendation, add the methodology of cosine similarity using the function add_cosine()")} if(sum(dim(SI2))==0 && type=='item' && similarity == 'adjcos'){stop("if you want to make this recommendation, add the methodology of adjusted cosine similarity using the function add_adjcos()")} M = nrow(MU) N = ncol(MU) encontrou_i = F for(i in 1:M){ if(rownames(MU)[i]==Id_u){ encontrou_i = T break } } if(!encontrou_i) stop("This is not a valid user.") #i guarda o num da linha de MU correspondente ao usuario Id_u message("Progress:") pb <- txtProgressBar(min = 1, max = 3, style = 3) if(type=="item"){ if(similarity=="cos"){ v_notas=NULL v_ind = NULL for(j in 1:N){ # Percorrer o vetor das avaliacoes do usuario Id_u, e estimar as avaliacoes dos itens nao avaliados por ele. contador = 1 + (j/N)*1 setTxtProgressBar(pb,contador) if(is.na(MU[i,j])){ v_ind_b = NULL v_valor_b = NULL if(j==1){copia = c(NA,SI1[j,(j+1):N])}else{if(j==ncol(SI1)){copia=c(SI1[1:(j-1),j],NA)}else{ copia = c(SI1[1:(j-1),j],NA,SI1[j,(j+1):N])}} for(l in 1:N){ if(is.na(MU[i,l])){ copia[l] = NA } } if(sum(!is.na(copia))==0){nota=NA}else{ for(s in 1:neighbors){ if(sum(!is.na(copia))==0){break} ind = which.max(copia) valor = max(copia,na.rm = T) if(valor <= 0) break v_ind_b = c(v_ind_b,ind) v_valor_b = c(v_valor_b,valor) copia[ind] = NA } n_=length(v_ind_b) if(n_==0){nota=NA}else{#Caso nao seja possivel estimar, nota=NA nota= sum(MU[i,v_ind_b]*v_valor_b,na.rm=T)/sum(v_valor_b)} } v_notas=c(v_notas,nota) v_ind=c(v_ind,j) } } nomes = NULL for(u in 1:k){ contador = 2 + (u/k) setTxtProgressBar(pb,contador) if(sum(!is.na(v_notas))==0){break} nota<-max(v_notas,na.rm=T) indice<-v_ind[which.max(v_notas)] #Pega o indice da nota estimada referente a MU if(nota<cuts){break} nomes <- c(nomes,colnames(MU)[indice]) v_notas[which.max(v_notas)] = NA } if(length(nomes)==0){ message("Sorry, we don't have any items to recommend to this user.") return(NA) } setTxtProgressBar(pb,3) close(pb) return(nomes) } if(similarity=="adjcos"){ v_notas=NULL v_ind = NULL for(j in 1:N){ # Percorrer o vetor das avaliacoes do usuario Id_u, e estimar as avaliacoes dos itens nao avaliados por ele. contador = 1 + (j/N)*1 setTxtProgressBar(pb,contador) if(is.na(MU[i,j])){ v_ind_b = NULL v_valor_b = NULL if(j==1){copia = c(NA,SI2[j,(j+1):N])}else{if(j==ncol(SI2)){copia=c(SI2[1:(j-1),j],NA)}else{ copia = c(SI2[1:(j-1),j],NA,SI2[j,(j+1):N])}} for(l in 1:N){ if(is.na(MU[i,l])){ copia[l] = NA } } if(sum(!is.na(copia))==0){nota=NA}else{ for(s in 1:neighbors){ if(sum(!is.na(copia))==0){break} ind = which.max(copia) valor = max(copia,na.rm = T) if(valor <= 0) break v_ind_b = c(v_ind_b,ind) v_valor_b = c(v_valor_b,valor) copia[ind] = NA } n_=length(v_ind_b) if(n_==0){nota=NA}else{#Caso nao seja possivel estimar, nota=NA nota= sum(MU[i,v_ind_b]*v_valor_b,na.rm=T)/sum(v_valor_b)} } v_notas=c(v_notas,nota) v_ind=c(v_ind,j) } } nomes = NULL for(u in 1:k){ contador = 2 + (u/k)*1 setTxtProgressBar(pb,contador) if(sum(!is.na(v_notas))==0){break} nota<-max(v_notas,na.rm=T) indice<-v_ind[which.max(v_notas)] #Pega o indice da nota estimada referente a MU if(nota<cuts){break} nomes <- c(nomes,colnames(MU)[indice]) v_notas[which.max(v_notas)] = NA } if(length(nomes)==0){ message("Sorry, we don't have any items to recommend to this user.") return(NA) } setTxtProgressBar(pb,3) close(pb) return(nomes) } } if(type=="user"){ if(similarity=="cos"){ v_notas= NULL v_ind = NULL for(j in 1:N){ # Percorrer o vetor das avaliacoes do usuario Id_u, e estimar as avaliacoes dos itens nao avaliados por ele. contador = 1 + (j/N)*1 setTxtProgressBar(pb,contador) if(is.na(MU[i,j])){ v_ind_b = NULL v_valor_b = NULL if(i==1){copia=c(NA,SU1[i,(i+1):M])}else{if(i==M){copia=c(SU1[1:(i-1),i],NA)}else{copia = c(SU1[1:(i-1),i],NA,SU1[i,(i+1):M])}} #varrer o copia colocando NA nos usuarios que nao avaliaram j for(l in 1:M){ if(is.na(MU[l,j])){ copia[l] = NA } } if(sum(!is.na(copia))==0){nota=NA}else{ for(s in 1:neighbors){ if(sum(!is.na(copia))==0){break} ind = which.max(copia) valor = max(copia,na.rm = T) if(valor <= 0) break v_ind_b = c(v_ind_b,ind) v_valor_b = c(v_valor_b,valor) copia[ind] = NA } n_=length(v_ind_b) if(n_==0){nota=NA}else{#Caso nao seja possivel estimar, nota=NA nota= sum(MU[v_ind_b,j]*v_valor_b,na.rm=T)/sum(v_valor_b)} } v_notas=c(v_notas,nota) v_ind=c(v_ind,j) } } nomes = NULL for(u in 1:k){ contador = 2 + (u/k)*1 setTxtProgressBar(pb,contador) if(sum(!is.na(v_notas))==0){break} nota<-max(v_notas,na.rm=T) indice<-v_ind[which.max(v_notas)] #Pega o indice da nota estimada referente a MU if(nota<cuts){break} nomes <- c(nomes,colnames(MU)[indice]) v_notas[which.max(v_notas)] = NA } if(length(nomes)==0){ message("Sorry, we don't have any items to recommend to this user.") return(NA) } setTxtProgressBar(pb,3) close(pb) return(nomes) } #Utilizando a SU 2 - Coeficiente de Pearson (Coseno Ajustado) if(similarity=="pearson"){ v_notas= NULL v_ind = NULL for(j in 1:N){ # Percorrer o vetor das avaliacoes do usuario Id_u, e estimar as avaliacoes dos itens nao avaliados por ele. contador = 1 + (j/N)*1 setTxtProgressBar(pb,contador) if(is.na(MU[i,j])){ v_ind_b = NULL v_valor_b = NULL if(i==1){copia=c(NA,SU2[i,(i+1):M])}else{if(i==M){copia=c(SU2[1:(i-1),i],NA)}else{copia = c(SU2[1:(i-1),i],NA,SU2[i,(i+1):M])}} #varrer o copia colocando NA nos usuarios que nao avaliaram j for(l in 1:M){ if(is.na(MU[l,j])){ copia[l] = NA } } if(sum(!is.na(copia))==0){nota=NA}else{ for(s in 1:neighbors){ if(sum(!is.na(copia))==0){break} ind = which.max(copia) valor = max(copia,na.rm = T) if(valor <= 0) break v_ind_b = c(v_ind_b,ind) v_valor_b = c(v_valor_b,valor) copia[ind] = NA } n_=length(v_ind_b) if(n_==0){nota=NA}else{#Caso nao seja possivel estimar, nota=NA nota= averages_u[i] + (sum((MU[v_ind_b,j]-averages_u[v_ind_b])*v_valor_b,na.rm=T)/sum(v_valor_b))} nota = as.numeric(nota) } v_notas=c(v_notas,nota) v_ind=c(v_ind,j) } } nomes = NULL for(u in 1:k){ contador = 2 + (u/k)*1 setTxtProgressBar(pb,contador) if(sum(!is.na(v_notas))==0){break} nota<-max(v_notas,na.rm=T) indice<-v_ind[which.max(v_notas)] #Pega o indice da nota estimada referente a MU if(nota<cuts){break} nomes <- c(nomes,colnames(MU)[indice]) v_notas[which.max(v_notas)] = NA } if(length(nomes)==0){ message("Sorry, we don't have any items to recommend to this user.") return(NA) } setTxtProgressBar(pb,3) close(pb) return(nomes) } } }, topkusers=function(Id_i,k=5,type,neighbors=5,cuts=3.5,similarity =ifelse(type=='user','pearson','adjcos')){ "A function that indicates the k users who will like the item Id_i.The recommendation can be made through similarity between users, when type = 'user', as well as through similarity between items, when type = 'item'. Id_i : A Character, a Item ID; k : Number of recommendations (default=5); type: A character string, 'user' or 'item'; neighbors: Number of similarities used for the estimates (default=5); cuts: Cut score designated to determine if it is recommended (default=3.5); similarity: the methodology used to estimate the rating. If type = 'user', must be one of 'cos', for cosine similarity, or 'pearson' (default), for pearson similarity. If type='item', must be one of 'cos', for cosine similarity, or 'adjcos' (default), for adjusted cosine similarity. This choice can alter the way the estimate is calculated." if(!is.character(similarity)){stop("similarity must be a character.")} if(!is.character(Id_i)){stop("Id_i must be a character.")} if(type!="user" && type!="item"){stop("type should be one of 'user' or 'item'.")} if(type=="user" && similarity == "adjcos"){stop("If type = 'user', similarity can be only 'cos' or 'pearson'.")} if(type=="item" && similarity == "pearson"){stop("If type = 'item', similarity can be only 'cos' or 'adjcos'.")} if(similarity !="cos" && similarity !="adjcos" && similarity !="pearson"){stop("similarity should be one of 'cos', 'adjcos' or 'pearson'.")} if(sum(dim(SU1))==0 && type=='user' && similarity == 'cos'){stop("if you want to make this recommendation, add the methodology of cosine similarity using the function add_cosine()")} if(sum(dim(SU2))==0 && type=='user' && similarity == 'pearson'){stop("if you want to make this recommendation, add the methodology of pearson similarity using the function add_pearson()")} if(sum(dim(SI1))==0 && type=='item' && similarity == 'cos'){stop("if you want to make this recommendation, add the methodology of cosine similarity using the function add_cosine()")} if(sum(dim(SI2))==0 && type=='item' && similarity == 'adjcos'){stop("if you want to make this recommendation, add the methodology of adjusted cosine similarity using the function add_adjcos()")} M = nrow(MU) N = ncol(MU) encontrou_j = F for(j in 1:N){ if(colnames(MU)[j]==Id_i){ encontrou_j = T break } } if(!encontrou_j) stop("This is not a valid item.") message("Progress:") pb <- txtProgressBar(min = 1, max = 3, style = 3) if(type=="user"){ ####type = user - VIA SU if(similarity=="cos"){ ### VIA SU1 - Distancia Cosseno v_notas= NULL v_ind = NULL for(i in 1:M){ # Percorrer o vetor das avaliacoes do usuario Id_u, e estimar as avaliacoes dos itens nao avaliados por ele. contador = 1 + (i/M)*1 setTxtProgressBar(pb,contador) if(is.na(MU[i,j])){ v_ind_b = NULL v_valor_b = NULL M = ncol(SU1) if(i==1){copia=c(NA,SU1[i,(i+1):M])}else{if(i==M){copia=c(SU1[1:(i-1),i],NA)}else{copia = c(SU1[1:(i-1),i],NA,SU1[i,(i+1):M])}} #varrer o copia colocando NA nos usuarios que nao avaliaram j for(l in 1:M){ if(is.na(MU[l,j])){ copia[l] = NA } } if(sum(!is.na(copia))==0){nota=NA}else{ for(s in 1:neighbors){ if(sum(!is.na(copia))==0){break} ind = which.max(copia) valor = max(copia,na.rm = T) if(valor <= 0) break v_ind_b = c(v_ind_b,ind) v_valor_b = c(v_valor_b,valor) copia[ind] = NA } n_=length(v_ind_b) if(n_==0){nota=NA}else{#Caso nao seja possivel estimar, nota=NA nota= sum(MU[v_ind_b,j]*v_valor_b,na.rm=T)/sum(v_valor_b)} } v_notas=c(v_notas,nota) v_ind=c(v_ind,i) } } nomes = NULL for(u in 1:k){ contador = 2 + (u/k) setTxtProgressBar(pb,contador) nota<-max(v_notas,na.rm=T) indice<-v_ind[which.max(v_notas)] #Pega o indice da nota estimada referente a MU if(nota<cuts){break} nomes <- c(nomes,rownames(MU)[indice]) v_notas[which.max(v_notas)] = NA } if(length(nomes)==0){ message("Sorry, we don't have any users to recommend to this item.") return(NA)} setTxtProgressBar(pb,3) close(pb) return(nomes) } if(similarity=="pearson"){ v_notas= NULL v_ind = NULL for(i in 1:M){ # Percorrer o vetor das avaliacoes do usuario Id_u, e estimar as avaliacoes dos itens nao avaliados por ele. contador = 1 + (i/M)*1 setTxtProgressBar(pb,contador) if(is.na(MU[i,j])){ v_ind_b = NULL v_valor_b = NULL M = ncol(SU2) if(i==1){copia=c(NA,SU2[i,(i+1):M])}else{if(i==M){copia=c(SU2[1:(i-1),i],NA)}else{copia = c(SU2[1:(i-1),i],NA,SU2[i,(i+1):M])}} #varrer o copia colocando NA nos usuarios que nao avaliaram j for(l in 1:M){ if(is.na(MU[l,j])){ copia[l] = NA } } if(sum(!is.na(copia))==0){nota=NA}else{ for(s in 1:neighbors){ if(sum(!is.na(copia))==0){break} ind = which.max(copia) valor = max(copia,na.rm = T) if(valor <= 0) break v_ind_b = c(v_ind_b,ind) v_valor_b = c(v_valor_b,valor) copia[ind] = NA } n_=length(v_ind_b) if(n_==0){nota=NA}else{#Caso nao seja possivel estimar, nota=NA nota = averages_u[i] + (sum((MU[v_ind_b,j]-averages_u[v_ind_b])*v_valor_b,na.rm=T)/sum(v_valor_b))} nota = as.numeric(nota) } v_notas=c(v_notas,nota) v_ind=c(v_ind,i) } } nomes = NULL for(u in 1:k){ contador = 2 + (u/k)*1 setTxtProgressBar(pb,contador) nota<-max(v_notas,na.rm=T) indice<-v_ind[which.max(v_notas)] #Pega o indice da nota estimada referente a MU if(nota<cuts){break} nomes <- c(nomes,rownames(MU)[indice]) v_notas[which.max(v_notas)] = NA } if(length(nomes)==0){ message("Sorry, we don't have any users to recommend to this item.") return(NA) } setTxtProgressBar(pb,3) close(pb) return(nomes) } } if(type=="item"){ ## type = "item" if(similarity=="cos"){ v_notas=NULL v_ind = NULL for(i in 1:M){ # Percorrer o vetor das avaliacoes do usuario Id_u, e estimar as avaliacoes dos itens nao avaliados por ele. contador = 1 + (i/M)*1 setTxtProgressBar(pb,contador) if(is.na(MU[i,j])){ v_ind_b = NULL v_valor_b = NULL if(j==1){copia = c(NA,SI1[j,(j+1):N])}else{if(j==ncol(SI1)){copia=c(SI1[1:(j-1),j],NA)}else{ copia = c(SI1[1:(j-1),j],NA,SI1[j,(j+1):N])}} for(l in 1:N){ if(is.na(MU[i,l])){ copia[l] = NA } } if(sum(!is.na(copia))==0){nota=NA}else{ for(s in 1:neighbors){ if(sum(!is.na(copia))==0){break} ind = which.max(copia) valor = max(copia,na.rm = T) if(valor <= 0) break v_ind_b = c(v_ind_b,ind) v_valor_b = c(v_valor_b,valor) copia[ind] = NA } n_=length(v_ind_b) if(n_==0){nota=NA}else{#Caso nao seja possivel estimar, nota=NA nota= sum(MU[i,v_ind_b]*v_valor_b,na.rm=T)/sum(v_valor_b)} } v_notas=c(v_notas,nota) v_ind=c(v_ind,i) } } nomes = NULL for(u in 1:k){ contador = 2 + (u/k)*1 setTxtProgressBar(pb,contador) nota<-max(v_notas,na.rm=T) indice<-v_ind[which.max(v_notas)] #Pega o indice da nota estimada referente a MU if(nota<cuts){break} nomes <- c(nomes,rownames(MU)[indice]) v_notas[which.max(v_notas)] = NA } if(length(nomes)==0){ message("Sorry, we don't have any users to recommend to this item.") return(NA) } setTxtProgressBar(pb,3) close(pb) return(nomes) } if(similarity=="adjcos"){ v_notas=NULL v_ind = NULL for(i in 1:M){ # Percorrer o vetor das avaliacoes do usuario Id_u, e estimar as avaliacoes dos itens nao avaliados por ele. contador = 1 + (i/M)*1 setTxtProgressBar(pb,contador) if(is.na(MU[i,j])){ v_ind_b = NULL v_valor_b = NULL if(j==1){copia = c(NA,SI2[j,(j+1):N])}else{if(j==ncol(SI2)){copia=c(SI2[1:(j-1),j],NA)}else{ copia = c(SI2[1:(j-1),j],NA,SI2[j,(j+1):N])}} for(l in 1:N){ if(is.na(MU[i,l])){ copia[l] = NA } } if(sum(!is.na(copia))==0){nota=NA}else{ for(s in 1:neighbors){ if(sum(!is.na(copia))==0){break} ind = which.max(copia) valor = max(copia,na.rm = T) if(valor <= 0) break v_ind_b = c(v_ind_b,ind) v_valor_b = c(v_valor_b,valor) copia[ind] = NA } n_=length(v_ind_b) if(n_==0){nota=NA}else{#Caso nao seja possivel estimar, nota=NA nota= sum(MU[i,v_ind_b]*v_valor_b,na.rm=T)/sum(v_valor_b)} } v_notas=c(v_notas,nota) v_ind=c(v_ind,i) } } nomes = NULL for(u in 1:k){ contador = 2 + (u/k)*1 setTxtProgressBar(pb,contador) nota<-max(v_notas,na.rm=T) indice<-v_ind[which.max(v_notas)] #Pega o indice da nota estimada referente a MU if(nota<cuts){break} nomes <- c(nomes,rownames(MU)[indice]) v_notas[which.max(v_notas)] = NA } if(length(nomes)==0){ message("Sorry, we don't have any users to recommend to this item.") return(NA) } setTxtProgressBar(pb,3) close(pb) return(nomes) } } }, estimaterating=function(Id_u,Id_i,type,neighbors=5,similarity =ifelse(type=='user','pearson','adjcos')){ "A function that returns the estimated rating for the evaluation of item Id_i by user Id_u. The recommendation can be made through similarity between users, when type = 'user', and also through the similarity between items, when type = 'item'. Id_u: A character, a user ID; Id_i: A character, an item ID; type: A character string, 'user' or 'item'; neighbors: Number of similarities used for the estimates.(default=5); similarity: the methodology used to estimate the rating. If type = 'user', must be one of 'cos', for cosine similarity, or 'pearson' (default), for pearson similarity. If type='item', must be one of 'cos', for cosine similarity, or 'adjcos' (default), for adjusted cosine similarity. This choice can alter the way the estimate is calculated." if(!is.character(similarity)){stop("similarity must be a character.")} if(!is.character(Id_u)){stop("Id_u must be a character.")} if(!is.character(Id_i)){stop("Id_i must be a character.")} if(type!="user" && type!="item"){stop("type should be one of 'user' or 'item'.")} if(type=="user" && similarity == "adjcos"){stop("If type = 'user', similarity can be only 'cos' or 'pearson'.")} if(type=="item" && similarity == "pearson"){stop("If type = 'item', similarity can be only 'cos' or 'adjcos'.")} if(similarity !="cos" && similarity !="adjcos" && similarity !="pearson"){stop("similarity should be one of 'cos', 'adjcos' or 'pearson'.")} if(sum(dim(SU1))==0 && type=='user' && similarity == 'cos'){stop("if you want to make this recommendation, add the methodology of cosine similarity using the function add_cosine()")} if(sum(dim(SU2))==0 && type=='user' && similarity == 'pearson'){stop("if you want to make this recommendation, add the methodology of pearson similarity using the function add_pearson()")} if(sum(dim(SI1))==0 && type=='item' && similarity == 'cos'){stop("if you want to make this recommendation, add the methodology of cosine similarity using the function add_cosine()")} if(sum(dim(SI2))==0 && type=='item' && similarity == 'adjcos'){stop("if you want to make this recommendation, add the methodology of adjusted cosine similarity using the function add_adjcos()")} M = nrow(MU) encontrou_i = F for(i in 1:M){ if(rownames(MU)[i]==Id_u){ encontrou_i = T break } } if(!encontrou_i) stop("This is not a valid user.") #i guarda o num da linha de MU correspondente ao usuario Id_u N = ncol(MU) encontrou_j = F for(j in 1:N){ if(colnames(MU)[j]==Id_i){ encontrou_j = T break } } if(!encontrou_j) stop("This is not a valid item.") if(type=="user"){ #busca os n indices dos usuarios mais proximos de i que avaliaram j #no final v_ind e v_valores sao arrays de tamanho n ou menos, #a primeira posicao guarda um array com indices dos usuarios acima e a #segunda posicao um array com os valores das similaridades entre entre esses #usuarios e o usuario k if(similarity=="cos"){ M = ncol(SU1) if(i==1){copia=c(NA,SU1[i,(i+1):M])}else{if(i==M){copia=c(SU1[1:(i-1),i],NA)}else{copia = c(SU1[1:(i-1),i],NA,SU1[i,(i+1):M])}} #varrer o copia colocando NA nos usuarios que nao avaliaram j for(l in 1:M){ if(is.na(MU[l,j])){ copia[l] = NA } } if(sum(!is.na(copia))==0){stop("It isn't possible to estimate the rating for this user and item.")}else{ v_ind = NULL v_valor = NULL for(k in 1:neighbors){ if(sum(!is.na(copia))==0){break} ind = which.max(copia) valor = max(copia,na.rm = T) if(valor <= 0) break v_ind = c(v_ind,ind) v_valor = c(v_valor,valor) copia[ind] = NA } #agora v_ind e v_valor guardam os indices e as similaridades dos usuarios mais semelhantes a i que avaliaram j #cuidado, os arrays podem ser vazios. n_ = length(v_ind) if(n_ == 0){ stop("It isn't possible to estimate the rating for this user and item.") } nota = sum(MU[v_ind,j]*v_valor)/sum(v_valor) return(nota) } } if(similarity=="pearson"){ M = ncol(SU2) if(i==1){copia=c(NA,SU2[i,(i+1):M])}else{if(i==M){copia=c(SU2[1:(i-1),i],NA)}else{copia = c(SU2[1:(i-1),i],NA,SU2[i,(i+1):M])}} #varrer o copia colocando NA nos usuarios que nao avaliaram j for(l in 1:M){ if(is.na(MU[l,j])){ copia[l] = NA } } if(sum(!is.na(copia))==0){stop("It isn't possible to estimate the rating for this user and item.")}else{ v_ind = NULL v_valor = NULL for(k in 1:neighbors){ if(sum(!is.na(copia))==0){break} ind = which.max(copia) valor = max(copia,na.rm = T) if(valor <= 0) break v_ind = c(v_ind,ind) v_valor = c(v_valor,valor) copia[ind] = NA } #agora v_ind e v_valor guardam os indices e as similaridades dos usuarios mais semelhantes a i que avaliaram j #cuidado, os arrays podem ser vazios. n_ = length(v_ind) if(n_ == 0){ stop("It isn't possible to estimate the rating for this user and item.") } nota = averages_u[i] + (sum((MU[v_ind,j]-averages_u[v_ind])*v_valor)/sum(v_valor)) nota = as.numeric(nota) return(nota) } } } if(type=="item"){ #busca os n indices dos itens mais proximos de j que foram avaliados por i #no final v_ind e v_valores sao arrays de tamanho n ou menos, #a primeira posicao guarda um array com indices dos itens acima e a #segunda posicao um array com os valores das similaridades entre entre esses #itens e o item k if(similarity=="cos"){ N = ncol(SI1) if(j==1){copia=c(NA,SI1[j,(j+1):N])}else{if(j==N){copia=c(SI1[1:(j-1),j],NA)}else{copia = c(SI1[1:(j-1),j],NA,SI1[j,(j+1):N])}} #varrer o copia colocando NA nos usuarios que nao avaliaram j for(l in 1:N){ if(is.na(MU[i,l])){ copia[l] = NA } } if(sum(!is.na(copia))==0){stop("It isn't possible to estimate the rating for this user and item.")}else{ v_ind = NULL v_valor = NULL for(l in 1:neighbors){ if(sum(!is.na(copia))==0){break} ind = which.max(copia) valor = max(copia,na.rm = T) if(valor <= 0) break v_ind = c(v_ind,ind) v_valor = c(v_valor,valor) copia[ind] = NA } #agora v_ind e v_valor guardam os indices e as similaridades dos itens mais semelhantes a j que foram avaliados por i #cuidado, os arrays podem ser vazios. n_ = length(v_ind) if(n_ == 0){ stop("It isn't possible to estimate the rating for this user and item.") } nota = sum(MU[i,v_ind]*v_valor)/sum(v_valor) return(nota) } } if(similarity=="adjcos"){ N = ncol(SI2) if(j==1){copia=c(NA,SI2[j,(j+1):N])}else{if(j==N){copia=c(SI2[1:(j-1),j],NA)}else{copia = c(SI2[1:(j-1),j],NA,SI2[j,(j+1):N])}} #varrer o copia colocando NA nos usuarios que nao avaliaram j for(l in 1:N){ if(is.na(MU[i,l])){ copia[l] = NA } } if(sum(!is.na(copia))==0){stop("It isn't possible to estimate the rating for this user and item.")}else{ v_ind = NULL v_valor = NULL for(k in 1:neighbors){ if(sum(!is.na(copia))==0){break} ind = which.max(copia) valor = max(copia,na.rm = T) if(valor <= 0) break v_ind = c(v_ind,ind) v_valor = c(v_valor,valor) copia[ind] = NA } #agora v_ind e v_valor guardam os indices e as similaridades dos itens mais semelhantes a j que foram avaliados por i #cuidado, os arrays podem ser vazios. n_ = length(v_ind) if(n_ == 0){ stop("It isn't possible to estimate the rating for this user and item.") } nota = sum(MU[i,v_ind]*v_valor)/sum(v_valor) return(nota) } } } }, deleterating=function(Id_u,Id_i){ "Deletes the rating from user Id_u to item Id_i. The object CF matrices and vectors will be updated. Id_u : A character, a user ID; Id_i : A character, an item ID. If you want to delete more ratings, you can use lists, where Id_u and Id_i are lists of characters." if(is.list(Id_u) && is.list(Id_i)){ u=length(Id_u) for(k in 1:u){ if(k==u){ Id_u = as.character(Id_u[[k]]) Id_i = as.character(Id_i[[k]]) }else{ deleterating(Id_u = Id_u[[k]], Id_i=Id_i[[k]]) } } } if(!is.character(Id_u)){stop("Id_u must be a character.")} if(!is.character(Id_i)){stop("Id_i must be a character.")} have_SU1=F have_SU2=F have_SI1=F have_SI2=F if(sum(dim(SU1))!=0) have_SU1=TRUE if(sum(dim(SU2))!=0) have_SU2=TRUE if(sum(dim(SI1))!=0) have_SI1=TRUE if(sum(dim(SI2))!=0) have_SI2=TRUE M = nrow(MU) N = ncol(MU) encontrou_i = F for(i in 1:M){ if(rownames(MU)[i]==Id_u){ encontrou_i = T break } } if(!encontrou_i) stop("This is not a valid user.") #i guarda o num da linha de MU correspondente ao usuario Id_u encontrou_j = F for(j in 1:N){ if(colnames(MU)[j]==Id_i){ encontrou_j = T break } } if(!encontrou_j) stop("This is not a valid item.") if(is.na(MU[i,j])){stop("This rating doesnt exist.")} valor = MU[i,j] #Alteracao MU MU[i,j]<<-NA #Alteracao averages_i e n_aval_i averages_i[j] <<- averages_i[j]*n_aval_i[j]/(n_aval_i[j]-1) - valor/(n_aval_i[j]-1) n_aval_i[j] <<- n_aval_i[j] - 1 #Alteracao averages_u e n_aval_u averages_u[i] <<- averages_u[i]*n_aval_u[i]/(n_aval_u[i]-1) - valor/(n_aval_u[i]-1) n_aval_u[i] <<- n_aval_u[i] - 1 #Alteracao SUs ind_aval_i<-NULL #indices dos usuarios que avaliaram o item j for(k in 1:M){ if(!is.na(MU[k,j])) ind_aval_i=c(ind_aval_i,k) } #Alteracao SU1 if(have_SU1==T){ for(k in ind_aval_i){ #calcular a similaridade de i com k nova_sim_i_k = sum(MU[k,]*MU[i,],na.rm = T)/(sqrt(sum(MU[k,]^2,na.rm = T))*sqrt(sum(MU[i,]^2,na.rm = T))) if(i!=k){ if(i < k){ SU1[i,k] <<- nova_sim_i_k }else{ SU1[k,i] <<- nova_sim_i_k } } } } #Alteracao SU2 if(have_SU2==T){ for(k in ind_aval_i){ #calcular a similaridade de i com k nova_sim_i_k = sum((MU[k,]-averages_u[k])*(MU[i,]-averages_u[i]),na.rm = T)/(sqrt(sum((MU[k,]-averages_u[k])^2,na.rm = T))*sqrt(sum((MU[i,]-averages_u[i])^2,na.rm = T))) if(i!=k){ if(i < k){ SU2[i,k] <<- nova_sim_i_k }else{ SU2[k,i] <<- nova_sim_i_k } } } } #Alteracao SIs ind_aval_u<-NULL #indice dos itens que foram avaliados por i for(k in 1:N){ if(!is.na(MU[i,k])) ind_aval_u=c(ind_aval_u,k) } #Alteracao SI1 if(have_SI1==T){ for(k in ind_aval_u){ nova_sim_j_k = sum(MU[,k]*MU[,j],na.rm = T)/(sqrt(sum(MU[,k]^2,na.rm = T))*sqrt(sum(MU[,j]^2,na.rm = T))) if(j!=k){ if(j < k){ SI1[j,k] <<- nova_sim_j_k }else{ SI1[k,j] <<- nova_sim_j_k } } } } #Alteração SI2 if(have_SI2==T){ for(k in ind_aval_u){ nova_sim_j_k = sum((MU[,k]-averages_u)*(MU[,j]-averages_u),na.rm = T)/(sqrt(sum((MU[,k]-averages_u)^2,na.rm = T))*sqrt(sum((MU[,j]-averages_u)^2,na.rm = T))) if(j!=k){ if(j < k){ SI2[j,k] <<- nova_sim_j_k }else{ SI2[k,j] <<- nova_sim_j_k } } } } }, changerating=function(Id_u,Id_i,r){ "Changes the rating from user Id_u to item Id_i. The object CF matrices and vectors will be updated. Id_u : A character, a user ID; Id_i : A character, an item ID; r : The new rating. If you want to change more ratings, you can use lists where Id_u and Id_i are lists of characters and r is a list of ratings." if(is.list(Id_u) && is.list(Id_i) && is.list(r)){ u=length(Id_u) for(k in 1:u){ if(k==u){ Id_u=as.character(Id_u[[k]]) Id_i=as.character(Id_i[[k]]) r=as.numeric(r[[k]]) }else{ changerating(Id_u = Id_u[[k]], Id_i = Id_i[[k]], r=r[[k]]) } } } if(!is.character(Id_u)){stop("Id_u must be a character.")} if(!is.character(Id_i)){stop("Id_i must be a character.")} M = nrow(MU) N = ncol(MU) encontrou_i = F for(i in 1:M){ if(rownames(MU)[i]==Id_u){ encontrou_i = T break } } if(!encontrou_i) stop("This is not a valid user.") #i guarda o num da linha de MU correspondente ao usuario Id_u encontrou_j = F for(j in 1:N){ if(colnames(MU)[j]==Id_i){ encontrou_j = T break } } if(!encontrou_j) stop("This is not a valid item.") if(is.na(MU[i,j])){stop("If you want to add a new rating, use $newrating().")} deleterating(Id_u,Id_i) newrating(Id_u,Id_i,r) } ) )
/scratch/gouwar.j/cran-all/cranData/CFilt/R/CF.R
#'@name CFbuilder #'@aliases CFbuilder #'@title A function to create and build a CF class object #'@description #'A CF class object constructor. This function can perform two procedures: If Data is a data frame, style: User Id - Item Id - Ratings, it creates #'and builds an FC class object from the data frame, containing a Utility Matrix, a User Similarity Matrix, an Item Similarity Matrix, a vector with #'the number of user ratings, a vector with the number of ratings received for the items, a vector with the average ratings of each user and another #'vector with the average ratings received from each item. If Data is the Utility Matrix, it also constructs all matrices and vectors. When building #'the object, the progress percentage is shown. Step 1: Building the MU and vectors. Step 2: Building the SU. Step 3: Building the SI. #'@usage CFbuilder(Data, sim_user, sim_item) #'@param Data A data frame by style: User ID - Item ID - Ratings, or a Utility Matrix. #'@param sim_user A methodology used to estimate the rating by users similarity. Can be 'cos','pearson','both' or 'none'. If it equals 'cos', the SU1 will be built. If it equals 'pearson', the SU2 will be built. If it equals 'both', the SU1 and SU2 will be built. If it equals 'none', nothing will be built. #'@param sim_item A methodology used to estimate the rating by itens similarity. Can be 'cos','adjcos','both' or 'none'. If it equals 'cos', the SI1 will be built. If it equals 'adjcos', the SI2 will be built. If it equals 'both', the SI1 and SI2 will be built. If it equals 'none', nothing will be built. #'@author Thiago Lima, Jessica Kubrusly. #'@return a CF class object. #'@references #'LINDEN, G.; SMITH, B.; YORK, J. Amazon. com recommendations: Item-to-item collaborative filtering. Internet Computing, IEEE, v. 7, n. 1, p. 76-80,2003 #'@seealso \code{\link[CFilt]{CF-class}} #'@keywords Refference Class Collaborative Filtering #' #'@export #'@examples #'ratings<-movies[1:1000,] #'objectCF<-CFbuilder(Data = ratings, sim_user = "pearson", sim_item = "adjcos") CFbuilder <- function(Data, sim_user = "pearson", sim_item = "adjcos"){ cal_SU1=F cal_SU2=F cal_SI1=F cal_SI2=F if(sim_user == "both"){ cal_SU1=TRUE cal_SU2=TRUE } if(sim_user=="cos"){cal_SU1=TRUE} if(sim_user=="pearson"){cal_SU2=TRUE} if(sim_user=="none"){ cal_SU1=F cal_SU2=F } if(sim_user!="cos" && sim_user!="pearson" && sim_user!="both" && sim_user!="none" ){ stop("sim_user can be only 'cos', 'pearson','both' or 'none'.") } if(sim_item == "both"){ cal_SI1=TRUE cal_SI2=TRUE } if(sim_item=="cos"){cal_SI1=TRUE} if(sim_item=="adjcos"){cal_SI2=TRUE} if(sim_item=="none"){ cal_SI1=F cal_SI1=F } if(sim_item!="cos" && sim_item!="adjcos" && sim_item!="both" && sim_item!="none" ){ stop("sim_user can be only 'cos', 'adjcos','both' or 'none'.") } if(is.data.frame(Data)){ obj_FC<-CF() Data<-as.data.frame(Data) #padrao da Data #objeto do type matrix ou dataframe #1a coluna tem o nome (ou identificador) dos usuarios #2a coluna tem o nome (ou identificador) dos itens #3a coluna tem um numero (1-5 ou 1-10) com as avalicoes ou 1 ou 0, indicanco se o usuario viu ou nao. #para saber os nomes e o num de usuarios distintos if(is.numeric(Data[,1])){ tab_u = table(as.numeric(as.character(Data[,1]))) nome_u = names(tab_u) m = dim(tab_u) } if(is.character(Data[,1])){ tab_u = table(as.character(Data[,1])) nome_u = names(tab_u) m = dim(tab_u) } #para saber os nomes e o num de itens distintos tab_i = table(as.character(Data[,2])) nome_i = names(tab_i) n = dim(tab_i) #criar a matriz MU = matrix(NA,nrow=m,ncol=n,dimnames = list(nome_u,nome_i)) #Construindo a MU, averages_u, averages_i, n_aval_i e n_aval_u averages_u=rep(0,m) averages_i=rep(0,n) n_aval_u=rep(0,m) n_aval_i=rep(0,n) names(averages_u)<-nome_u names(averages_i)<-nome_i names(n_aval_u)<-nome_u names(n_aval_i)<-nome_i n_linhas = dim(Data)[1] message("Step 1 of 3: Building MU") pb <- txtProgressBar(min = 0, max = n_linhas, style = 3) for(i in 1:n_linhas){ setTxtProgressBar(pb, i) usua = as.character(Data[i,1]) item = as.character(Data[i,2]) nota = as.numeric(Data[i,3]) MU[usua,item]=nota averages_u[usua] <- averages_u[usua]*n_aval_u[usua]/(n_aval_u[usua]+1) + nota/(n_aval_u[usua]+1) averages_i[item] <- averages_i[item]*n_aval_i[item]/(n_aval_i[item]+1) + nota/(n_aval_i[item]+1) n_aval_u[usua]<-n_aval_u[usua]+1 n_aval_i[item]<-n_aval_i[item]+1 } close(pb) obj_FC$MU=MU obj_FC$averages_u=averages_u obj_FC$averages_i=averages_i obj_FC$n_aval_u=n_aval_u obj_FC$n_aval_i=n_aval_i message("Step 2 of 3: Building SU") pb <- txtProgressBar(min = 0, max = 2*m, style = 3) if(cal_SU1 == TRUE){ #Criar SU1 - Coeficiente Cosseno: SU1=matrix(NA,m,m,dimnames = list(nome_u,nome_u)) for(i in 1:m){ setTxtProgressBar(pb, i) for(j in i:m){ if(j!=i){ SU1[i,j]<-sum(MU[i,]*MU[j,],na.rm = T)/(sqrt(sum((MU[i,])^2,na.rm = T))*sqrt(sum((MU[j,])^2,na.rm = T))) } if(j==i){SU1[i,j]<-1} } } obj_FC$SU1=SU1 } #Criar SU2 - Coeficiente de Person: if(cal_SU2==TRUE){ SU2=matrix(NA,m,m,dimnames = list(nome_u,nome_u)) for(i in 1:m){ setTxtProgressBar(pb,(m+i)) for(j in i:m){ if(j!= i){ SU2[i,j]<-sum((MU[i,]-averages_u[i])*(MU[j,]-averages_u[j]),na.rm = T)/(sqrt(sum((MU[i,]-averages_u[i])^2,na.rm = T))*sqrt(sum((MU[j,]-averages_u[j])^2,na.rm = T))) } if(j==i){SU2[i,j]<-1} } } obj_FC$SU2=SU2 } setTxtProgressBar(pb,2*m) close(pb) message("Step 3 of 3: Building SI") pb <- txtProgressBar(min = 0, max = 2*n, style = 3) if(cal_SI2==TRUE){ #Criar SI2 - Cosseno Ajustado SI2=matrix(NA,n,n,dimnames = list(nome_i,nome_i)) for(i in 1:n){ setTxtProgressBar(pb, i) for(j in i:n){ if(j!= i){ SI2[i,j]=sum((MU[,i]-averages_u)*(MU[,j]-averages_u),na.rm = T)/(sqrt(sum((MU[,i]-averages_u)^2,na.rm = T))*sqrt(sum((MU[,j]-averages_u)^2,na.rm = T))) } if(j==i){SI2[i,j]<-1} } } obj_FC$SI2=SI2 } if(cal_SI1==TRUE){ #Criar SI1 - Distancia Cosseno normal SI1 = matrix(NA,n,n,dimnames=list(nome_i,nome_i)) for(i in 1:n){ setTxtProgressBar(pb, (n+i)) for(j in i:n){ if(j!=i){ SI1[i,j] = sum(MU[,i]*MU[,j],na.rm=T)/(sqrt(sum((MU[,i])^2,na.rm=T))*sqrt(sum((MU[,j])^2,na.rm=T))) } if(j==i){SI1[i,j]<-1} } } obj_FC$SI1=SI1 } setTxtProgressBar(pb,2*n) close(pb) return(obj_FC) } if(is.matrix(Data)){ MU=Data obj_FC = CF(MU=MU) M=dim(MU)[1] N=dim(MU)[2] #Cria vetores de medias, para linhas e colunas: averages_u=apply(MU,1,mean,na.rm=T) averages_i=apply(MU,2,mean,na.rm=T) obj_FC$averages_u=averages_u obj_FC$averages_i=averages_i #Criar vetores de Numeros de avaliacoes de usuarios e quantas vzs um item nao avaliado aplica=!apply(MU,1:2,is.na) n_aval_u=apply(aplica,1,sum) n_aval_i=apply(aplica,2,sum) obj_FC$n_aval_u=n_aval_u obj_FC$n_aval_i=n_aval_i message("Step 1 of 2: Building SU") pb <- txtProgressBar(min = 0, max = 2*M, style = 3) if(cal_SU1==TRUE){ #Criar SU1 - Similaridade Cosseno : SU1=matrix(NA,M,M,dimnames = list(rownames(Data),rownames(Data))) for(i in 1:M){ setTxtProgressBar(pb, i) for(j in i:M){ if(j!= i){ SU1[i,j]<-sum(MU[i,]*MU[j,],na.rm = T)/(sqrt(sum((MU[i,])^2,na.rm = T))*sqrt(sum((MU[j,])^2,na.rm = T))) } if(j==i){SU1[i,j]<-1} } } obj_FC$SU1=SU1 } if(cal_SU2==TRUE){ #Criar SU2 - Adjusted Cosseno : SU2=matrix(NA,M,M,dimnames = list(rownames(Data),rownames(Data))) for(i in 1:M){ setTxtProgressBar(pb, (M+i)) for(j in i:M){ if(j!= i){ SU2[i,j]<-sum((MU[i,]-averages_u[i])*(MU[j,]-averages_u[j]),na.rm = T)/(sqrt(sum((MU[i,]-averages_u[i])^2,na.rm = T))*sqrt(sum((MU[j,]-averages_u[j])^2,na.rm = T))) } if(j==i){SU2[i,j]<-1} } } obj_FC$SU2=SU2 } setTxtProgressBar(pb, 2*M) close(pb) message("Step 2 of 2: Building SI") n=dim(MU)[2] pb <- txtProgressBar(min = 0, max = 2*n, style = 3) if(cal_SI2==TRUE){ #Criar SI2 - Cosseno Ajustado SI2=matrix(NA,n,n,dimnames = list(colnames(Data),colnames(Data))) for(i in 1:n){ setTxtProgressBar(pb, i) for(j in i:n){ if(j!= i){ SI2[i,j]=sum((MU[,i]-averages_u)*(MU[,j]-averages_u),na.rm = T)/(sqrt(sum((MU[,i]-averages_u)^2,na.rm = T))*sqrt(sum((MU[,j]-averages_u)^2,na.rm = T))) } if(j==i){SI2[i,j]<-1} } } obj_FC$SI2=SI2 } #Criar SI1 - Cosseno if(cal_SI1==TRUE){ SI1=matrix(NA,n,n,dimnames = list(colnames(Data),colnames(Data))) for(i in 1:n){ setTxtProgressBar(pb, (n+i)) for(j in i:n){ if(j!=i){ SI1[i,j] = sum(MU[,i]*MU[,j],na.rm=T)/(sqrt(sum((MU[,i])^2,na.rm=T))*sqrt(sum((MU[,j])^2,na.rm=T))) } if(j==i){SI1[i,j]<-1} } } obj_FC$SI1=SI1 } setTxtProgressBar(pb, 2*n) close(pb) return(obj_FC) } if(!is.data.frame(Data) && !is.matrix(Data)){stop("The data would be a data frame or a matrix.")} }
/scratch/gouwar.j/cran-all/cranData/CFilt/R/CFbuilder.R
#' CFilt: A package about Collaborative Filtering by RC in R. #' #'@description #' The CFilt package provides one builder function CFbuilder and one class CF with methods that serve to change objects and recommend items or users. #' #'@details #' #' Two main goals: #' \itemize{ #' \item Structure the database so that changes can be made in a practical way through object-oriented programming. #' \item Make recommendations through choices by the Collaborative Filtering methodology in a practical, fast and efficient manner. #' } #' @docType package #' @name CFilt #' #' @author #' Authors: #' \itemize{ #' \item Jessica Quintanilha Kubrusly - [email protected] #' \item Thiago Augusto Santos Lima - [email protected] #' } #' @importFrom methods setRefClass #' @importFrom utils setTxtProgressBar #' @importFrom utils txtProgressBar #' NULL
/scratch/gouwar.j/cran-all/cranData/CFilt/R/CFilt.R
#' Movie ratings by users #' #' A dataset containing 7276 ratings for 50 movies by 526 users. This database was created by Giglio (2014). #' #' @format A data frame with 7276 rows and 3 variables: #' \describe{ #' \item{Id Users}{Users identifier. Numbers 1 to 526. } #' \item{Id Items}{ #' Movies identifier. Movies list: #' \enumerate{ #' \item Iron Man 3 #' \item Despicable Me 2 #' \item My Mom Is a Character #' \item Fast & Furious 6 #' \item The Wolverine #' \item Thor: The Dark World #' \item Hansel & Gretel: Witch Hunters #' \item Wreck-It Ralph #' \item Monsters University #' \item The Hangover Part III #' \item Vai Que Dá Certo #' \item Meu Passado me Condena #' \item We’re So Young #' \item Brazilian Western #' \item O Concurso #' \item Mato sem Cachorro #' \item Cine Holliudy #' \item Odeio o Dia dos Namorados #' \item Argo #' \item Django Unchained #' \item Life of Pi #' \item Lincoln #' \item Zero Dark Thirty #' \item Les Miserables #' \item Silver Linings Playbook #' \item Beasts of the Southern Wild #' \item Amour #' \item A Royal Affair #' \item American Hustle #' \item Capitain Phillips #' \item 12 Years a Slave #' \item Dallas Buyers Club #' \item Gravity #' \item Her #' \item Philomena #' \item The Wolf of Wall Street #' \item The Hunt #' \item Frozen #' \item Till Luck Do Us Part 2 #' \item Muita Calma Nessa Hora 2 #' \item Paranormal Activity: The Marked Ones #' \item I, Frankenstein, #' \item The Legend of Tarzan #' \item The Book Thief #' \item The Lego Movie, , , #' \item Walking With Dinosaurs #' \item The Hunger Games: Catching Fire #' \item Blue Is The Warmest Color #' \item Reaching for the Moon #' \item The Hobbit: The Desolation of Smaug #' } #' } #' \item{Ratings}{Movie ratings by users. The ratings follows the Likert scale: 1 to 5.} #' } #'@references Giglio , J. C. (2014). Recomendação de Filmes Utilizando Filtragem Colaborativa [Recommending Films Using Collaborative Filtering]. Undergraduate thesis - Universidade Federal Fluminense. "movies"
/scratch/gouwar.j/cran-all/cranData/CFilt/R/movies.R
#' CFdatum class #' #' This internal class stores the information to represent date and time values using #' the CF conventions. This class is not supposed to be used by end-users directly. #' An instance is created by the exported `CFtime` class, which also exposes the #' relevant properties of this class. #' #' The following calendars are supported: #' #' \itemize{ #' \item `gregorian` or `standard`, the international standard calendar for civil use. #' \item `proleptic_gregorian`, the standard calendar but extending before 1582-10-15 #' when the Gregorian calendar was adopted. #' \item `noleap` or `365_day`, all years have 365 days. #' \item `all_leap` or `366_day`, all years have 366 days. #' \item `360_day`, all years have 360 days, divided over 12 months of 30 days. #' \item `julian`, every fourth year is a leap year (so including the years 1700, 1800, 1900, 2100, etc). #' } #' #' @slot definition character. The string that defines the time unit and base date/time. #' @slot unit numeric. The unit of time in which offsets are expressed. #' @slot origin data.frame. Data frame with 1 row that defines the origin time. #' @slot calendar character. The CF-calendar for the instance. #' @slot cal_id numeric. The internal identifier of the CF-calendar to use. #' #' @returns An object of class CFdatum #' @noRd setClass("CFdatum", slots = c( definition = "character", unit = "numeric", origin = "data.frame", calendar = "character", cal_id = "numeric" )) #' Create a CFdatum object #' #' This function creates an instance of the `CFdatum` class. After creation the #' instance is read-only. The parameters to the call are typically read from a #' CF-compliant data file with climatological observations or predictions. #' #' @param definition character. An atomic string describing the time coordinate #' of a CF-compliant data file. #' @param calendar character. An atomic string describing the calendar to use #' with the time dimension definition string. #' #' @returns An object of the `CFdatum` class. #' @noRd CFdatum <- function(definition, calendar) { stopifnot(length(definition) == 1L, length(calendar) == 1L) definition <- tolower(definition) calendar <- tolower(calendar) parts <- strsplit(definition, " ")[[1L]] if ((length(parts) < 3L) || !(parts[2L] %in% c("since", "after", "from", "ref", "per"))) stop("Definition string does not appear to be a CF-compliant time coordinate description") u <- which(CFt$CFunits$unit == parts[1L]) if (length(u) == 0L) stop("Unsupported unit: ", parts[1L]) cal <- CFt$calendars$id[which(calendar == CFt$calendars$name)] if (length(cal) == 0L) stop("Invalid calendar specification") nw <- methods::new("CFdatum", definition = definition, unit = CFt$CFunits$id[u], origin = data.frame(), calendar = calendar, cal_id = cal) dt <- .parse_timestamp(nw, paste(parts[3L:length(parts)], collapse = " ")) if (is.na(dt$year[1L])) stop("Definition string does not appear to be a CF-compliant time coordinate description: invalid base date specification") nw@origin <- dt return(nw) } setMethod("show", "CFdatum", function(object) { if (object@origin$tz[1L] == "00:00") tz = "" else tz = object@origin$tz[1L] cat("CF datum of origin:", "\n Origin : ", origin_date(object), " ", origin_time(object), tz, "\n Units : ", CFt$units$name[object@unit], "\n Calendar: ", object@calendar, "\n", sep = "") }) #' Equivalence of CFdatum objects #' #' This function can be used to test if two `CFdatum` objects represent the same datum #' for CF-convention time coordinates. Two `CFdatum` objects are considered equivalent #' if they have the same definition string and the same calendar. Calendars #' "standard", "gregorian" and "proleptic_gregorian" are considered equivalent, #' as are the pairs of "365_day" and "no_leap", and "366_day" and "all_leap". #' #' @param e1,e2 CFdatum Instances of the CFdatum class. #' #' @returns `TRUE` if the `CFdatum` objects are equivalent, `FALSE` otherwise. #' @noRd .datum_equivalent <- function(e1, e2) { sum(e1@origin[1L,1L:6L] != e2@origin[1L,1L:6L]) == 0L && # Offset column is NA e1@unit == e2@unit && e1@cal_id == e2@cal_id } #' Compatibility of CFdatum objects #' #' This function can be used to test if two `CFdatum` objects have the same unit #' and calendar for CF-convention time coordinates. Calendars "standard", #' "gregorian" and "proleptic_gregorian" are considered compatible, as are the #' pairs of "365_day" and "no_leap", and "366_day" and "all_leap". #' #' @param e1,e2 CFdatum Instances of the CFdatum class. #' #' @returns `TRUE` if the `CFdatum` objects are compatible, `FALSE` otherwise. #' @noRd .datum_compatible <- function(e1, e2) e1@unit == e2@unit && e1@cal_id == e2@cal_id definition <- function(x) x@definition calendar <- function(x) x@calendar calendar_id <- function(x) x@cal_id unit <- function(x) x@unit origin_date <- function(x) sprintf("%04d-%02d-%02d", x@origin$year[1L], x@origin$month[1L], x@origin$day[1L]) origin_time <- function(x) .format_time(x@origin) timezone <- function(x) x@origin$tz[1L]
/scratch/gouwar.j/cran-all/cranData/CFtime/R/CFdatum.R
#' Create a factor from the offsets in an CFtime instance #' #' With this function a factor can be generated for the time series, or a part #' thereof, contained in the `CFtime` instance. This is specifically interesting #' for creating factors from the date part of the time series that aggregate the #' time series into longer time periods (such as month) that can then be used to #' process daily CF data sets using, for instance, `tapply()`. #' #' The factor will respect the calendar of the datum that the time series is #' built on. For `period`s longer than a day this will result in a factor where #' the calendar is no longer relevant (because calendars impacts days, not #' dekads, months or seasons). #' #' The factor will be generated in the order of the offsets of the `CFtime` #' instance. While typical CF-compliant data sources use ordered time series #' there is, however, no guarantee that the factor is ordered as multiple `CFtime` #' objects may have been merged out of order. #' #' If the `epoch` parameter is specified, either as a vector of years to include #' in the factor, or as a list of such vectors, the factor will only consider #' those values in the time series that fall within the list of years, inclusive #' of boundary values. Other values in the factor will be set to `NA`. The years #' need not be contiguous, within a single vector or among the list items, or in #' order. #' #' The following periods are supported by this function: #' #' \itemize{ #' \item `year`, the year of each offset is returned as "YYYY". #' \item `season`, the meteorological season of each offset is returned as #' "DJF", "MAM", "JJA" or "SON", preceeded by "YYYY-" if no `epoch` is #' specified. Note that December dates are labeled as belonging to the #' subsequent year, so the date "2020-12-01" yields "2021-DJF". This implies #' that for standard CMIP files having one or more full years of data the #' first season will have data for the first two months (January and #' February), while the final season will have only a single month of data #' (December). #' \item `month`, the month of each offset is returned as "01" to #' "12", preceeded by "YYYY-" if no `epoch` is specified. This is the default #' period. #' \item `dekad`, ten-day periods are returned as #' "Dxx", where xx runs from "01" to "36", preceeded by "YYYY" if no `epoch` #' is specified. Each month is subdivided in dekads as follows: 1- days 01 - #' 10; 2- days 11 - 20; 3- remainder of the month. #' \item `day`, the month and day of each offset are returned as "MM-DD", #' preceeded by "YYYY-" if no `epoch` is specified. #' } #' #' It is not possible to create a factor for a period that is shorter than the #' temporal resolution of the source data set from which the `cf` argument #' derives. As an example, if the source data set has monthly data, a dekad or #' day factor cannot be created. #' #' Creating factors for other periods is not supported by this function. Factors #' based on the timestamp information and not dependent on the calendar can #' trivially be constructed from the output of the [CFtimestamp()] function. #' #' @param cf CFtime. An atomic instance of the `CFtime` class whose offsets will #' be used to construct the factor. #' @param period character. An atomic character string with one of the values #' "year", "season", "month" (the default), "dekad" or "day". #' @param epoch numeric or list, optional. Vector of years for which to construct #' the factor, or a list whose elements are each a vector of years. If `epoch` #' is not specified, the factor will use the entire time series for the #' factor. #' #' @returns If `epoch` is a single vector or not specified, a factor with a #' length equal to the number of offsets in `cf`. If `epoch` is a list, a #' list with the same number of elements and names as `epoch`, each containing #' a factor. Elements in the factor will be set to `NA` for time series values #' outside of the range of specified years. #' @export #' #' @examples #' cf <- CFtime("days since 1949-12-01", "360_day", 19830:54029) #' #' # Create a dekad factor for the whole time series #' f <- CFfactor(cf, "dekad") #' #' # Create three monthly factors for early, mid and late 21st century epochs #' ep <- CFfactor(cf, epoch = list(early = 2021:2040, mid = 2041:2060, late = 2061:2080)) CFfactor <- function(cf, period = "month", epoch = NULL) { if (!(methods::is(cf, "CFtime"))) stop("First argument to CFfactor() must be an instance of the `CFtime` class") if (length(cf@offsets) < 10L) stop("Cannot create a factor for very short time series") period <- tolower(period) if (!((length(period) == 1L) && (period %in% CFt$factor_periods))) stop("Period specifier must be an atomic value of a supported period") # No fine-grained period factors for coarse source data timestep <- CFt$units$seconds[unit(cf@datum)] * cf@resolution; if ((period == "year") && (timestep > 86400 * 366) || (period == "season") && (timestep > 86400 * 90) || # Somewhat arbitrary (period == "month") && (timestep > 86400 * 31) || (period == "dekad") && (timestep > 86400) || # Must be constructed from daily or finer data (period == "day") && (timestep > 86400)) # Must be no longer than a day stop("Cannot produce a short period factor from source data with long time interval") time <- .offsets2time(cf@offsets, cf@datum) seasons <- c("DJF", "DJF", "MAM", "MAM", "MAM", "JJA", "JJA", "JJA", "SON", "SON", "SON", "DJF") months <- c("01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12") if (is.null(epoch)) { f <- switch(period, "year" = sprintf("%04d", time$year), "season" = ifelse(time$month == 12L, sprintf("%04d-DJF", time$year + 1L), sprintf("%04d-%s", time$year, seasons[time$month])), "month" = sprintf("%04d-%s", time$year, months[time$month]), "dekad" = sprintf("%04dD%02d", time$year, (time$month - 1L) * 3L + pmin.int((time$day - 1L) %/% 10L + 1L, 3L)), "day" = sprintf("%04d-%02d-%02d", time$year, time$month, time$day) ) out <- as.factor(f) attr(out, "epoch") <- -1L attr(out, "period") <- period return(out) } if (is.numeric(epoch)) ep <- list(epoch) else if ((is.list(epoch) && all(unlist(lapply(epoch, is.numeric))))) ep <- epoch else stop("When specified, the `epoch` parameter must be a numeric vector or a list thereof") out <- lapply(ep, function(years) { f <- switch(period, "year" = ifelse(time$year %in% years, sprintf("%04d", time$year), NA_character_), "season" = ifelse((time$month == 12L) & ((time$year + 1L) %in% years), "DJF", ifelse((time$month < 12L) & (time$year %in% years), seasons[time$month], NA_character_)), "month" = ifelse(time$year %in% years, months[time$month], NA_character_), "dekad" = ifelse(time$year %in% years, sprintf("D%02d", (time$month - 1L) * 3L + pmin.int((time$day - 1L) %/% 10L + 1L, 3L)), NA_character_), "day" = ifelse(time$year %in% years, sprintf("%s-%02d", months[time$month], time$day), NA_character_) ) f <- as.factor(f) attr(f, "epoch") <- length(years) attr(f, "period") <- period f }) if (is.numeric(epoch)) out <- out[[1L]] else names(out) <- names(epoch) return(out) } #' Number of base time units in each factor level #' #' Given a factor as returned by [CFfactor()] and the `CFtime` instance from #' which the factor was derived, this function will return a numeric vector with #' the number of time units in each level of the factor. #' #' The result of this function is useful to convert between absolute and #' relative values. Climate change anomalies, for instance, are usually computed #' by differencing average values between a future period and a baseline period. #' Going from average values back to absolute values for an aggregate period #' (which is typical for temperature and precipitation, among other variables) #' is easily done with the result of this function, without having to consider #' the specifics of the calendar of the data set. #' #' If the factor `f` is for an epoch (e.g. spanning multiple years and the #' levels do not indicate the specific year), then the result will indicate the #' number of time units of the period in a regular single year. In other words, #' for an epoch of 2041-2060 and a monthly factor on a standard calendar with a #' `days` unit, the result will be `c(31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)`. #' Leap days are thus only considered for the `366_day` and `all_leap` calendars. #' #' Note that this function gives the number of time units in each level of the #' factor - the actual number of data points in the `cf` instance per factor #' level may be different. Use [CFfactor_coverage()] to determine the actual #' number of data points or the coverage of data points relative to the factor #' level. #' #' @param cf CFtime. An instance of CFtime. #' @param f factor or list. A factor or a list of factors derived from the #' parameter `cf`. The factor or list thereof should generally be generated by #' the function [CFfactor()]. #' #' @returns If `f` is a factor, a numeric vector with a length equal to the #' number of levels in the factor, indicating the number of time units in each #' level of the factor. If `f` is a list of factors, a list with each element #' a numeric vector as above. #' @export #' #' @examples #' cf <- CFtime("days since 2001-01-01", "365_day", 0:364) #' f <- CFfactor(cf, "dekad") #' CFfactor_units(cf, f) CFfactor_units <- function(cf, f) { if (!(methods::is(cf, "CFtime"))) stop("First argument to `CFfactor_units()` must be an instance of the `CFtime` class") if (is.list(f)) factors <- f else factors <- list(f) if (!(all(unlist(lapply(factors, function(x) is.factor(x) && is.numeric(attr(x, "epoch")) && attr(x, "period") %in% CFt$factor_periods))))) stop("Argument `f` must be a factor generated by the function `CFfactor()`") cal <- calendar_id(cf@datum) upd <- CFt$units$per_day[unit(cf@datum)] out <- lapply(factors, function(fac) .factor_units(fac, cal, upd)) if (is.factor(f)) out <- out[[1L]] return(out) } #' Calculate time units in factors #' #' This is an internal function that should not generally be used outside of #' the CFtime package. #' #' @param f factor. Factor as generated by `CFfactor()`. #' @param cal numeric. Calendar id of the `CFtime()` instance. #' @param upd numeric. Number of units per day, from the `CFt` environment. #' #' @returns A vector as long as the number of levels in the factor. #' @noRd .factor_units <- function(f, cal, upd) { period <- attr(f, "period") if (cal == 3L) { res <- rep(c(360L, 90L, 30L, 10L, 1L)[which(CFt$factor_periods == period)], nlevels(f)) } else { if (attr(f, "epoch") > 0L) { if (cal %in% c(1L, 2L, 4L)) { res <- switch(period, "year" = rep(365L, nlevels(f)), "season" = ifelse(levels(f) %in% c("MAM", "JJA"), 92L, ifelse(levels(f) == "SON", 91L, 90L)), "month" = c(31L, 28L, 31L, 30L, 31L, 30L, 31L, 31L, 30L, 31L, 30L, 31L)[as.integer(levels(f))], "dekad" = { dk <- as.integer(substr(levels(f), 2L, 3L)) ifelse(dk %% 3L > 0L | dk %in% c(12L, 18L, 27L, 33L), 10L, ifelse(dk %in% c(3L, 9L, 15L, 21L, 24L, 30L, 36L), 11L, 8L)) }, "day" = rep(1L, nlevels(f)) ) } else if (cal == 5L) { res <- switch(period, "year" = rep(366L, nlevels(f)), "season" = ifelse(levels(f) %in% c("MAM", "JJA"), 92L, 91L), "month" = c(31L, 29L, 31L, 30L, 31L, 30L, 31L, 31L, 30L, 31L, 30L, 31L)[as.integer(levels(f))], "dekad" = { dk <- as.integer(substr(levels(f), 2L, 3L)) ifelse(dk %% 3L > 0L | dk %in% c(12L, 18L, 27L, 33L), 10L, ifelse(dk %in% c(3L, 9L, 15L, 21L, 24L, 30L, 36L), 11L, 9L)) }, "day" = rep(1L, nlevels(f)) ) } } else { # not an epoch factor res <- switch(period, "year" = ifelse(.is_leap_year(as.integer(levels(f)), cal), 366L, 365L), "season" = { year <- substr(levels(f), 1L, 4L) season <- substr(levels(f), 6L, 8L) ifelse(season %in% c("MAM", "JJA"), 92L, ifelse(season == "SON", 91L, ifelse(.is_leap_year(year, cal), 91L, 90L))) }, "month" = { year <- as.integer(substr(levels(f), 1L, 4L)) month <- as.integer(substr(levels(f), 6L, 7L)) ifelse(.is_leap_year(year, cal), c(31L, 29L, 31L, 30L, 31L, 30L, 31L, 31L, 30L, 31L, 30L, 31L)[month], c(31L, 28L, 31L, 30L, 31L, 30L, 31L, 31L, 30L, 31L, 30L, 31L)[month]) }, "dekad" = { year <- as.integer(substr(levels(f), 1L, 4L)) dk <- as.integer(substr(levels(f), 6L, 7L)) ifelse(dk %% 3L > 0L | dk %in% c(12L, 18L, 27L, 33L), 10L, ifelse(dk %in% c(3L, 9L, 15L, 21L, 24L, 30L, 36L), 11L, ifelse(.is_leap_year(year, cal), 9L, 8L))) }, "day" = rep(1L, nlevels(f)) ) } } return(res * upd) } #' Coverage of time elements for each factor level #' #' This function calculates the number of time elements, or the relative #' coverage, in each level of a factor generated by [CFfactor()]. #' #' @param cf CFtime. An instance of CFtime. #' @param f factor or list. A factor or a list of factors derived from the #' parameter `cf`. The factor or list thereof should generally be generated by #' the function [CFfactor()]. #' @param coverage "absolute" or "relative". #' #' @returns If `f` is a factor, a numeric vector with a length equal to the #' number of levels in the factor, indicating the number of units from the #' time series in `cf` contained in each level of the factor when #' `coverage = "absolute"` or the proportion of units present relative to the #' maximum number when `coverage = "relative"`. If `f` is a list of factors, a #' list with each element a numeric vector as above. #' @export #' #' @examples #' cf <- CFtime("days since 2001-01-01", "365_day", 0:364) #' f <- CFfactor(cf, "dekad") #' CFfactor_coverage(cf, f, "absolute") CFfactor_coverage <- function(cf, f, coverage = "absolute") { if (!(methods::is(cf, "CFtime"))) stop("First argument to `CFfactor_coverage()` must be an instance of the `CFtime` class") if (is.list(f)) factors <- f else factors <- list(f) if (!(all(unlist(lapply(factors, function(x) is.factor(x) && is.numeric(attr(x, "epoch")) && attr(x, "period") %in% CFt$factor_periods))))) stop("Argument `f` must be a factor generated by the function `CFfactor()`") if (!(is.character(coverage) && coverage %in% c("absolute", "relative"))) stop("Argument `coverage` must be an atomic string with a value of \"absolute\" or \"relative\"") if (coverage == "relative") { cal <- calendar_id(cf@datum) upd <- CFt$units$per_day[unit(cf@datum)] out <- lapply(factors, function(fac) { res <- tabulate(fac) / .factor_units(fac, cal, upd) yrs <- attr(fac, "epoch") if (yrs > 0) res <- res / yrs return(res) }) } else { out <- lapply(factors, tabulate) } if (is.factor(f)) out <- out[[1L]] return(out) }
/scratch/gouwar.j/cran-all/cranData/CFtime/R/CFfactor.R
#' Create a vector that represents CF timestamps #' #' This function generates a vector of character strings or `POSIXct`s that #' represent the date and time in a selectable combination for each offset. #' #' The character strings use the format `YYYY-MM-DDThh:mm:ss±hh:mm`, depending #' on the `format` specifier. The date in the string is not necessarily #' compatible with `POSIXt` - in the `360_day` calendar `2017-02-30` is valid #' and `2017-03-31` is not. #' #' For the "standard", "gregorian" and "proleptic_gregorian" calendars the #' output can also be generated as a vector of `POSIXct` values by specifying #' `asPOSIX = TRUE`. #' #' @param cf CFtime. The `CFtime` instance that contains the offsets to use. #' @param format character. An atomic string with either of the values "date" or #' "timestamp". If the argument is not specified, the format used is #' "timestamp" if there is time information, "date" otherwise. #' @param asPOSIX logical. If `TRUE`, for "standard", "gregorian" and #' "proleptic_gregorian" calendars the output is a vector of `POSIXct` - for #' other calendars the result is `NULL`. Default value is `FALSE`. #' #' @returns A character vector where each element represents a moment in time #' according to the `format` specifier. Time zone information is not #' represented. #' @export #' #' @examples #' cf <- CFtime("hours since 2020-01-01", "standard", seq(0, 24, by = 0.25)) #' CFtimestamp(cf, "timestamp") #' #' cf2 <- CFtime("days since 2002-01-21", "standard", 0:20) #' tail(CFtimestamp(cf2, asPOSIX = TRUE)) #' #' tail(CFtimestamp(cf2)) #' #' tail(CFtimestamp(cf2 + 1.5)) CFtimestamp <- function(cf, format = NULL, asPOSIX = FALSE) { if (!(methods::is(cf, "CFtime"))) stop("First argument to CFtimestamp must be an instance of the `CFtime` class") time <- .offsets2time(cf@offsets, cf@datum) if (nrow(time) == 0L) return() if (is.null(format)) format <- ifelse(unit(cf@datum) < 4L || .has_time(time), "timestamp", "date") else if (!(format %in% c("date", "time", "timestamp"))) stop("Format specifier not recognized") if (asPOSIX) { if (calendar_id(cf@datum) != 1L) stop("Cannot make a POSIX timestamp on a non-standard calendar") if (format == "date") ISOdate(time$year, time$month, time$day, 0L) else ISOdatetime(time$year, time$month, time$day, time$hour, time$minute, time$second, "UTC") } else { if (format == "date") sprintf("%04d-%02d-%02d", time$year, time$month, time$day) else sprintf("%04d-%02d-%02dT%s", time$year, time$month, time$day, .format_time(time)) } } #' Formatting of time strings from time elements #' #' This is an internal function that should not generally be used outside of #' the CFtime package. #' #' @param t data.frame. A data.frame representing timestamps. #' #' @returns A vector of character strings with a properly formatted time. If any #' timestamp has a fractional second part, then all time strings will report #' seconds at milli-second precision. #' @noRd .format_time <- function(t) { fsec <- t$second %% 1L if (any(fsec > 0L)) { paste0(sprintf("%02d:%02d:", t$hour, t$minute), ifelse(t$second < 10, "0", ""), sprintf("%.3f", t$second)) } else { sprintf("%02d:%02d:%02d", t$hour, t$minute, t$second) } } #' Do the time elements have time-of-day information? #' #' If any time information > 0, then `TRUE` otherwise `FALSE` #' #' This is an internal function that should not generally be used outside of #' the CFtime package. #' #' @param t data.frame. A data.frame representing timestamps. #' #' @returns `TRUE` if any timestamp has time-of-day information, `FALSE` otherwise. #' @noRd .has_time <- function(t) { any(t$hour > 0) || any(t$minute > 0) || any(t$second > 0) }
/scratch/gouwar.j/cran-all/cranData/CFtime/R/CFformat.R
#' Parse series of timestamps in CF format to date-time elements #' #' This function will parse a vector of timestamps in ISO8601 or UDUNITS format #' into a data frame with columns for the elements of the timestamp: year, #' month, day, hour, minute, second, time zone. Those timestamps that could not #' be parsed or which represent an invalid date in the indicated `CFtime` #' instance will have `NA` values for the elements of the offending timestamp #' (which will generate a warning). #' #' The supported formats are the *broken timestamp* format from the UDUNITS #' library and ISO8601 *extended*, both with minor changes, as suggested by the #' CF Metadata Conventions. In general, the format is `YYYY-MM-DD hh:mm:ss.sss #' hh:mm`. The year can be from 1 to 4 digits and is interpreted literally, so #' `79-10-24` is the day Mount Vesuvius erupted and destroyed Pompeii, not #' `1979-10-24`. The year and month are mandatory, all other fields are #' optional. There are defaults for all missing values, following the UDUNITS #' and CF Metadata Conventions. Leading zeros can be omitted in the UDUNITS #' format, but not in the ISO8601 format. The optional fractional part can have #' as many digits as the precision calls for and will be applied to the smallest #' specified time unit. In the result of this function, if the fraction is #' associated with the minute or the hour, it is converted into a regular #' `hh:mm:ss.sss` format, i.e. any fraction in the result is always associated #' with the second, rounded down to milli-second accuracy. The time zone is #' optional and should have at least the hour or `Z` if present, the minute is #' optional. The time zone hour can have an optional sign. The separator between #' the date and the time can be a single whitespace character or a `T`; in the #' UDUNITS format the separator between the time and the time zone must be a #' single whitespace character. #' #' Currently only the extended formats (with separators between the elements) #' are supported. The vector of timestamps may have any combination of ISO8601 #' and UDUNITS formats. #' #' Timestamps that are prior to the datum are not allowed. The corresponding row #' in the result will have `NA` values. #' #' @param cf CFtime. An instance of `CFtime` indicating the CF calendar and #' datum to use when parsing the date. #' @param x character. Vector of character strings representing timestamps in #' ISO8601 extended or UDUNITS broken format. #' #' @returns A data frame with constituent elements of the parsed timestamps in #' numeric format. The columns are year, month, day, hour, minute, second #' (with an optional fraction), time zone (character string), and the #' corresponding offset value from the datum. Invalid input data will appear #' as `NA` - if this is the case, a warning message will be displayed - other #' missing information on input will use default values. #' @export #' @examples #' cf <- CFtime("days since 0001-01-01", "proleptic_gregorian") #' #' # This will have `NA`s on output and generate a warning #' timestamps <- c("2012-01-01T12:21:34Z", "12-1-23", "today", #' "2022-08-16T11:07:34.45-10", "2022-08-16 10.5+04") #' CFparse(cf, timestamps) CFparse <- function(cf, x) { stopifnot(is.character(x), methods::is(cf, "CFtime")) if (cf@datum@unit > 4) stop("Parsing of timestamps on a \"month\" or \"year\" datum is not supported.") out <- .parse_timestamp(cf@datum, x) if (anyNA(out$year)) warning("Some dates could not be parsed. Result contains `NA` values.") if (length(unique(out$tz)) > 1) warning("Timestamps have multiple time zones. Some or all may be different from the datum time zone.") else if (out$tz[1] != CFtimezone(cf)) warning("Timestamps have time zone that is different from the datum.") return(out) } #' Parsing a vector of date-time strings, using a CFtime specification #' #' This is an internal function that should not generally be used outside of #' the CFtime package. #' #' @param datum CFdatum. The `CFdatum` instance that is the datum for the dates. #' @param d character. A vector of strings of dates and times. #' #' @returns A data frame with columns year, month, day, hour, minute, second, #' time zone, and offset. Invalid input data will appear as `NA`. #' @noRd .parse_timestamp <- function(datum, d) { # Parsers # UDUNITS broken timestamp definition, with some changes # broken_timestamp {broken_date}({space|T}+{broken_clock})? -- T not in definition but present in lexer code # broken_date {year}-{month}(-{day})? # year [+-]?[0-9]{1,4} # month 0?[1-9]|1[0-2] # day 0?[1-9]|[1-2][0-9]|30|31 # broken_clock {hour}:{minute}(:{second})? # hour [0-1]?[0-9]|2[0-3] -- sign on hour not allowed, but see timezone # minute [0-5]?[0-9] # second {minute}? -- leap second not supported # fractional part (\.[0-9]*)? # timezone [+-]?{hour}(:{minute})? -- added, present in lexer code broken <- paste0( "^", # anchor string at start "([+-]?[0-9]{1,4})", # year, with optional sign "-(0?[1-9]|1[012])", # month "(?:-(0?[1-9]|[12][0-9]|3[01]))?", # day, optional "(?:[T ]", # if a time is following, separate with a single whitespace character or a "T" "([01]?[0-9]|2[0-3])", # hour ":([0-5]?[0-9])", # minute "(?::([0-5]?[0-9]))?", # second, optional "(?:\\.([0-9]*))?", # optional fractional part of the smallest specified unit ")?", # close optional time capture group "(?:\\s", # if a time zone offset is following, separate with a single whitespace character "([+-])?([01]?[0-9]|2[0-3])", # tz hour, with optional sign "(?::(00|15|30|45))?", # optional tz minute, only 4 possible values ")?", # close optional timezone capture group "$" # anchor string at end ) iso8601 <- paste0( "^", "([0-9]{4})", "-(0[1-9]|1[012])", "-(0[1-9]|[12][0-9]|3[01])?", "(?:", "[T ]([01][0-9]|2[0-3])", "(?::([0-5][0-9]))?", "(?::([0-5][0-9]))?", "(?:\\.([0-9]*))?", ")?", "(?:([Z+-])([01][0-9]|2[0-3])?(?::(00|15|30|45))?", ")?$" ) # UDUNITS packed timestamp definition - NOT YET USED # packed_timestamp {packed_date}({space|T}+{packed_clock})? -- T and space only allowed in packed time follows # packed_date {year}({month}{day}?)? -- must be YYYYMMDD or else format is ambiguous, as per lexer code # packed_clock {hour}({minute}{second}?)? -- must be HHMMSS to be unambiguous # timezone [+-]?{hour}({minute})? -- added, present in lexer code, must be HHMM # packed <- stringi::stri_join( # "^", # anchor string at start # "([+-]?[0-9]{4})", # year, with optional sign # "(0[1-9]|1[012])?", # month, optional # "(0[1-9]|[12][0-9]|3[01])?", # day, optional # "(?:[T,\\s]", # if a time is following, separate with a single whitespace character or a "T" # "([01][0-9]|2[0-3])?", # hour # "([0-5][0-9])?", # minute, optional # "([0-5]?[0-9](?:\\.[0-9]*)?)?", # second, optional, with optional fractional part # ")?", # close optional time capture group # "(?:\\s", # if a time zone offset is following, separate with a single whitespace character # "([+-]?[01][0-9]|2[0-3])?", # hour, with optional sign # "(00|15|30|45)?", # minute, only 4 possible values # ")?", # close optional timezone capture group # "$" # anchor string at end # ) parse <- data.frame(year = integer(), month = integer(), day = integer(), hour = integer(), minute = integer(), second = numeric(), frac = character(), tz_sign = character(), tz_hour = character(), tz_min = character()) cap <- utils::strcapture(iso8601, d, parse) missing <- which(is.na(cap$year)) if (length(missing) > 0) cap[missing,] <- utils::strcapture(broken, d[missing], parse) # Assign any fraction to the appropriate time part cap$frac[is.na(cap$frac)] <- "0" frac <- as.numeric(paste0("0.", cap$frac)) if (sum(frac) > 0) { ndx <- which(!(is.na(cap$second)) & frac > 0) if (length(ndx) > 0) cap$second[ndx] <- cap$second[ndx] + frac[ndx] ndx <- which(!(is.na(cap$minute)) & is.na(cap$second) & frac > 0) if (length(ndx) > 0) cap$second[ndx] <- 60 * frac[ndx] ndx <- which(!(is.na(cap$hour)) & is.na(cap$minute) & frac > 0) if (length(ndx) > 0) { secs <- 3600 * frac cap$minute[ndx] <- secs[ndx] %/% 60 cap$second[ndx] <- secs[ndx] %% 60 } } cap$frac <- NULL # Convert NA time parts to 0 - in CF default time is 00:00:00 when not specified cap$hour[is.na(cap$hour)] <- 0 cap$minute[is.na(cap$minute)] <- 0 cap$second[is.na(cap$second)] <- 0 # Set timezone to default value where needed cap$tz <- paste0(ifelse(cap$tz_sign == "-", "-", ""), ifelse(cap$tz_hour == "", "00", cap$tz_hour), ":", ifelse(cap$tz_min == "", "00", cap$tz_min)) cap$tz_sign <- cap$tz_hour <- cap$tz_min <- NULL # Set optional date parts to 1 if not specified cap$month[is.na(cap$month)] <- 1 cap$day[is.na(cap$day)] <- 1 # Check date validity invalid <- mapply(function(y, m, d) {!.is_valid_calendar_date(y, m, d, calendar_id(datum))}, cap$year, cap$month, cap$day) if (nrow(datum@origin) > 0) { earlier <- mapply(function(y, m, d, dy, dm, dd) { if (is.na(y)) return(TRUE) if (y < dy) return(TRUE) if (y == dy){ if (m < dm) return(TRUE) if (m == dm && d < dd) return(TRUE) } return(FALSE) }, cap$year, cap$month, cap$day, datum@origin[1, 1], datum@origin[1, 2], datum@origin[1, 3]) invalid <- invalid | earlier } if (sum(invalid) > 0) cap[invalid,] <- rep(NA, 7) # Calculate offsets if (nrow(datum@origin) == 0) { # if there's no datum yet, don't calculate offsets cap$offset <- rep(0, nrow(cap)) # this happens, f.i., when a CFdatum is created } else { days <- switch(calendar_id(datum), .date2offset_standard(cap, datum@origin), .date2offset_julian(cap, datum@origin), .date2offset_360day(cap, datum@origin), .date2offset_365day(cap, datum@origin), .date2offset_366day(cap, datum@origin) ) cap$offset <- round((days * 86400 + (cap$hour - datum@origin$hour[1]) * 3600 + (cap$minute - datum@origin$minute[1]) * 60 + cap$second - datum@origin$second) / CFt$units$seconds[datum@unit], 3) } return(cap) } #' Calculate difference in days between a data.frame of time parts and a datum #' #' This is an internal function that should not generally be used outside of #' the CFtime package. #' #' @param x data.frame. Dates to calculate the difference for. #' @param origin data.frame. The origin to calculate the difference against. #' #' @returns Vector of days between `x` and the `origin`, using the `standard` calendar. #' @noRd .date2offset_standard <- function(x, origin) { yd0 <- c(0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334) # days diff of 1st of month to 1 January in normal year datum_year <- origin[1, 1] datum_days_in_year <- yd0[origin[1, 2]] + origin[1, 3] if ((origin[1, 2] <= 2) && ((datum_year %% 4 == 0 && datum_year %% 100 > 0) || datum_year %% 400 == 0)) datum_days_in_year <- datum_days_in_year - 1 mapply(function(y, m, d) { if (is.na(y)) return(NA_integer_) if (m <= 2 && ((y %% 4 == 0 && y %% 100 > 0) || y %% 400 == 0)) days <- -1 else days <- 0 # -1 if in a leap year up to the leap day, 0 otherwise repeat { if (y > datum_year) { days <- days + 365 + as.integer((y %% 4 == 0 && y %% 100 > 0) || y %% 400 == 0) y <- y - 1 } else break } days + yd0[m] + d - datum_days_in_year }, x$year, x$month, x$day) } #' Calculate difference in days between a data.frame of time parts and a datum #' #' This is an internal function that should not generally be used outside of #' the CFtime package. #' #' @param x data.frame. Dates to calculate the difference for. #' @param origin data.frame. The origin to calculate the difference against. #' #' @returns Vector of days between `x` and the `origin`, using the `julian` calendar. #' @noRd .date2offset_julian <- function(x, origin) { yd0 <- c(0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334) # days diff of 1st of month to 1 January in normal year datum_year <- origin[1, 1] datum_days_in_year <- yd0[origin[1, 2]] + origin[1, 3] if (origin[1, 2] <= 2 && datum_year %% 4 == 0) datum_days_in_year <- datum_days_in_year - 1 mapply(function(y, m, d) { if (is.na(y)) return(NA_integer_) if (m <= 2 && y %% 4 == 0) days <- -1 else days <- 0 # -1 if in a leap year up to the leap day, 0 otherwise repeat { if (y > datum_year) { days <- days + 365 + as.integer(y %% 4 == 0) y <- y - 1 } else break } days + yd0[m] + d - datum_days_in_year }, x$year, x$month, x$day) } #' Calculate difference in days between a data.frame of time parts and a datum #' #' This is an internal function that should not generally be used outside of #' the CFtime package. #' #' @param x data.frame. Dates to calculate the difference for. #' @param origin data.frame. The origin to calculate the difference against. #' #' @returns Vector of days between `x` and the `origin`, using the `360_day` calendar. #' @noRd .date2offset_360day <- function(x, origin) { (x$year - origin[1, 1]) * 360 + (x$month - origin[1, 2]) * 30 + x$day - origin[1, 3] } #' Calculate difference in days between a data.frame of time parts and a datum #' #' This is an internal function that should not generally be used outside of #' the CFtime package. #' #' @param x data.frame. Dates to calculate the difference for. #' @param origin data.frame. The origin to calculate the difference against. #' #' @returns Vector of days between `x` and the `origin`, using the `365_day` calendar. #' @noRd .date2offset_365day <- function(x, origin) { yd0 <- c(0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334) # days diff of 1st of month to 1 January (x$year - origin[1, 1]) * 365 + yd0[x$month] - yd0[origin[1, 2]] + x$day - origin[1, 3] } #' Calculate difference in days between a data.frame of time parts and a datum #' #' This is an internal function that should not generally be used outside of #' the CFtime package. #' #' @param x data.frame. Dates to calculate the difference for. #' @param origin data.frame. The origin to calculate the difference against. #' #' @returns Vector of days between `x` and the `origin`, using the `366_day` calendar. #' @noRd .date2offset_366day <- function(x, origin) { yd0 <- c(0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335) # days diff of 1st of month to 1 January (x$year - origin[1, 1]) * 366 + yd0[x$month] - yd0[origin[1, 2]] + x$day - origin[1, 3] }
/scratch/gouwar.j/cran-all/cranData/CFtime/R/CFparse.R
#' CFtime: working with CF Metadata Conventions "time" dimensions #' #' Support for all calendars as specified in the Climate and Forecast #' (CF) Metadata Conventions for climate and forecasting data. The CF Metadata #' Conventions is widely used for distributing files with climate observations #' or projections, including the Coupled Model Intercomparison Project (CMIP) #' data used by climate change scientists and the Intergovernmental Panel on #' Climate Change (IPCC). This package specifically allows the user to work #' with any of the CF-compliant calendars (many of which are not compliant with #' POSIXt). The CF time coordinate is formally defined in the #' [CF Metadata Conventions document](https://cfconventions.org/Data/cf-conventions/cf-conventions-1.10/cf-conventions.html#time-coordinate). #' #' The package can create a `CFtime` instance from scratch or, more commonly, it #' can use the dimension attributes and dimension variable values from a NetCDF #' resource. The package does not actually do any of the reading and the user is #' free to use their NetCDF package of preference (with the two main options #' being [RNetCDF](https://cran.r-project.org/package=RNetCDF) and #' [ncdf4](https://cran.r-project.org/package=ncdf4)). #' #' **Create, modify, inquire** #' * [CFtime()]: Create a CFtime instance #' * [`Properties`][CFproperties] of the CFtime instance #' * [CFparse()]: Parse a vector of character timestamps into CFtime elements #' * [`Compare`][CFtime-equivalent] two CFtime instances #' * [`Merge`][CFtime-merge] two CFtime instances #' * [`Append`][CFtime-append] additional time steps to a CFtime instance #' * [CFtimestamp()]: Generate a vector of character or `POSIXct` timestamps from a CFtime instance #' * [CFrange()]: Timestamps of the two endpoints in the time series #' * [CFcomplete()]: Does the CFtime instance have a complete time series between endpoints? #' * [CFmonth_days()]: How many days are there in a month using the CFtime calendar? #' #' **Factors and coverage** #' * [CFfactor()]: Create factors for different time periods #' * [CFfactor_units()]: How many units of time are there in each factor level? #' * [CFfactor_coverage()]: How much data is available for each level of the factor? #' @keywords internal #' @aliases CFtime-package "_PACKAGE" ## usethis namespace: start ## usethis namespace: end NULL
/scratch/gouwar.j/cran-all/cranData/CFtime/R/CFtime-package.R
#' CF Metadata Conventions time representation #' #' @slot datum CFdatum. The atomic origin upon which the `offsets` are based. #' @slot resolution numeric. The average number of time units between offsets. #' @slot offsets numeric. A vector of offsets from the datum. #' #' @returns An object of class CFtime. #' @export setClass("CFtime", slots = c( datum = "CFdatum", resolution = "numeric", offsets = "numeric" )) #' Create a CFtime object #' #' This function creates an instance of the `CFtime` class. The arguments to #' the call are typically read from a CF-compliant data file with climatological #' observations or climate projections. Specification of arguments can also be #' made manually in a variety of combinations. #' #' @param definition character. An atomic string describing the time coordinate #' of a CF-compliant data file. #' @param calendar character. An atomic string describing the calendar to use #' with the time dimension definition string. Default value is "standard". #' @param offsets numeric or character, optional. When numeric, a vector of #' offsets from the origin in the time series. When a character vector, #' timestamps in ISO8601 or UDUNITS format. When an atomic character string, a #' timestamp in ISO8601 or UDUNITS format and then a time series will be #' generated with a separation between steps equal to the unit of measure in #' the definition, inclusive of the definition timestamp. The unit of measure #' of the offsets is defined by the time series definition. #' #' @returns An instance of the `CFtime` class. #' @export #' #' @examples #' CFtime("days since 1850-01-01", "julian", 0:364) #' #' CFtime("hours since 2023-01-01", "360_day", "2023-01-30T23:00") CFtime <- function(definition, calendar = "standard", offsets = NULL) { if (is.null(calendar)) calendar <- "standard" # This may occur when "calendar" attribute is not defined in the NC file datum <- CFdatum(definition, calendar) if (is.array(offsets)) dim(offsets) <- NULL if (is.null(offsets)) { methods::new("CFtime", datum = datum, resolution = NA_real_, offsets = numeric()) } else if (is.numeric(offsets)) { stopifnot(.validOffsets(offsets, CFt$units$per_day[datum@unit])) if (length(offsets) > 1L) { resolution <- (max(offsets) - min(offsets)) / (length(offsets) - 1L) } else { resolution <- NA_real_ } methods::new("CFtime", datum = datum, resolution = resolution, offsets = offsets) } else if (is.character(offsets)) { time <- .parse_timestamp(datum, offsets) if (anyNA(time$year)) stop("Offset argument contains invalid timestamps") if (length(offsets) == 1L) { off <- seq(0L, time$offset[1L]) resolution <- 1 } else { off <- time$offset resolution <- (max(time$offset) - min(time$offset)) / (length(time$offset) - 1L) } methods::new("CFtime", datum = datum, resolution = resolution, offsets = off) } else stop("Invalid offsets for CFtime object") } #' @aliases CFproperties #' @title Properties of a CFtime object #' #' @description These functions return the properties of an instance of the #' `CFtime` class. The properties are all read-only, but offsets can be added #' using the `+` operator. #' #' @param cf CFtime. An instance of `CFtime`. #' #' @returns `CFcalendar()` and `CFunit()` return an atomic character string. #' `CForigin()` returns a data frame of timestamp elements with a single row #' of data. `CFtimezone()` returns the datum time zone as an atomic character #' string. `CFoffsets()` returns a vector of offsets or `NULL` if no offsets #' have been set. #' #' @examples #' cf <- CFtime("days since 1850-01-01", "julian", 0:364) #' CFdefinition(cf) #' CFcalendar(cf) #' CFunit(cf) #' CFtimezone(cf) #' CForigin(cf) #' CFoffsets(cf) #' CFresolution(cf) #' @describeIn CFproperties The definition string of the CFtime instance #' @export CFdefinition <- function(cf) definition(cf@datum) #' @describeIn CFproperties The calendar of the CFtime instance #' @export CFcalendar <- function(cf) calendar(cf@datum) #' @describeIn CFproperties The unit of the CFtime instance #' @export CFunit <- function(cf) CFt$units$name[unit(cf@datum)] #' @describeIn CFproperties The origin of the CFtime instance in timestamp elements #' @export CForigin <- function(cf) cf@datum@origin #' @describeIn CFproperties The time zone of the datum of the CFtime instance as a character string #' @export CFtimezone <- function(cf) timezone(cf@datum) #' @describeIn CFproperties The offsets of the CFtime instance as a vector #' @export CFoffsets <- function(cf) cf@offsets #' @describeIn CFproperties The average separation between the offsets in the CFtime instance #' @export CFresolution <- function(cf) cf@resolution setMethod("show", "CFtime", function(object) { noff <- length(object@offsets) if (noff == 0L) { el <- " Elements: (no elements)\n" } else { d <- CFrange(object) if (noff > 1L) { el <- sprintf(" Elements: [%s .. %s] (average of %f %s between %d elements)\n", d[1L], d[2L], object@resolution, CFt$units$name[unit(object@datum)], noff) } else { el <- paste(" Elements:", d[1L], "\n") } } cat("CF time series:\n", methods::show(object@datum), el, sep = "") }) #' @aliases CFrange #' #' @title Extreme time series values #' #' @description Character representation of the extreme values in the time series #' #' @param x An instance of the `CFtime` class #' #' @returns character. Vector of two character representations of the extremes of the time series. #' @export #' @examples #' cf <- CFtime("days since 1850-01-01", "julian", 0:364) #' CFrange(cf) setGeneric("CFrange", function(x) standardGeneric("CFrange")) #' @describeIn CFrange Extreme values of the time series setMethod("CFrange", "CFtime", function(x) .ts_extremes(x)) #' Indicates if the time series is complete #' #' This function indicates if the time series is complete, meaning that the time #' steps are equally spaced and there are thus no gaps in the time series. #' #' This function gives exact results for time series where the nominal #' *unit of separation* between observations in the time series is exact in terms of the #' datum unit. As an example, for a datum unit of "days" where the observations #' are spaced a fixed number of days apart the result is exact, but if the same #' datum unit is used for data that is on monthly a basis, the *assessment* is #' approximate because the number of days per month is variable and dependent on #' the calendar (the exception being the `360_day` calendar, where the #' assessment is exact). The *result* is still correct in most cases (including #' all CF-compliant data sets that the developers have seen) although #' there may be esoteric constructions of CFtime and offsets that trip up this #' implementation. #' #' @param x An instance of the `CFtime` class #' #' @returns logical. `TRUE` if the time series is complete, with no gaps; #' `FALSE` otherwise. If no offsets have been added to the CFtime instance, #' `NA` is returned. #' @export #' @examples #' cf <- CFtime("days since 1850-01-01", "julian", 0:364) #' CFcomplete(cf) CFcomplete <- function(x) { if (!methods::is(x, "CFtime")) stop("Argument must be an instance of CFtime") if (length(x@offsets) == 0L) NA else .ts_equidistant(x) } #' Which time steps fall within two extreme values #' #' Given two extreme character timestamps, return a logical vector of a length #' equal to the number of time steps in the CFtime instance with values `TRUE` #' for those time steps that fall between the two extreme values, `FALSE` #' otherwise. This can be used to select slices from the time series in reading #' or analysing data. #' #' @param x CFtime. The time series to operate on. #' @param extremes character. Vector of two timestamps that represent the #' extremes of the time period of interest. The timestamps must be in #' increasing order. The timestamps need not fall in the range of the time #' steps in the CFtime stance. #' #' @returns A logical vector with a length equal to the number of time steps in #' `x` with values `TRUE` for those time steps that fall between the two #' extreme values, `FALSE` otherwise. The earlier timestamp is included, the #' later timestamp is excluded. A specification of `c("2022-01-01", "2023-01-01")` #' will thus include all time steps that fall in the year 2022. #' @export #' #' @examples #' cf <- CFtime("hours since 2023-01-01 00:00:00", "standard", 0:23) #' CFsubset(cf, c("2022-12-01", "2023-01-01 03:00")) CFsubset <- function(x, extremes) { if (!methods::is(x, "CFtime")) stop("First argument must be an instance of CFtime") if (!is.character(extremes) || length(extremes) != 2L) stop("Second argument must be a character vector of two timestamps") if (extremes[2L] < extremes[1L]) extremes <- c(extremes[2L], extremes[1L]) .ts_subset(x, extremes) } #' Equivalence of CFtime objects #' #' This operator can be used to test if two `CFtime` objects represent the same #' CF-convention time coordinates. Two `CFtime` objects are considered equivalent #' if they have an equivalent datum and the same offsets. #' #' @param e1,e2 CFtime. Instances of the `CFtime` class. #' #' @returns `TRUE` if the `CFtime` objects are equivalent, `FALSE` otherwise. #' @export #' @aliases CFtime-equivalent #' #' @examples #' e1 <- CFtime("days since 1850-01-01", "gregorian", 0:364) #' e2 <- CFtime("days since 1850-01-01 00:00:00", "standard", 0:364) #' e1 == e2 setMethod("==", c("CFtime", "CFtime"), function(e1, e2) .datum_equivalent(e1@datum, e2@datum) && length(e1@offsets) == length(e2@offsets) && all(e1@offsets == e2@offsets)) #' Merge two CFtime objects #' #' Two `CFtime` instances can be merged into one with this operator, provided #' that the units and calendars of the datums of the two instances are #' equivalent. #' #' If the origins of the two datums are not identical, the earlier origin is #' preserved and the offsets of the later origin are updated in the resulting #' CFtime instance. #' #' The order of the two parameters is indirectly significant. The resulting #' `CFtime` instance will have the offsets of both instances in the order that #' they are specified. There is no reordering or removal of duplicates. This is #' because the time series are usually associated with a data set and the #' correspondence between the data in the files and the CFtime instance is thus #' preserved. When merging the data sets described by this time series, the #' order must be identical to the merging here. #' #' @param e1,e2 CFtime. Instances of the `CFtime` class. #' #' @returns A `CFtime` object with a set of offsets composed of the offsets of #' the instances of `CFtime` that the operator operates on. If the datum units #' or calendars of the `CFtime` instances are not equivalent, an error is #' thrown. #' @export #' @aliases CFtime-merge #' #' @examples #' e1 <- CFtime("days since 1850-01-01", "gregorian", 0:364) #' e2 <- CFtime("days since 1850-01-01 00:00:00", "standard", 365:729) #' e1 + e2 setMethod("+", c("CFtime", "CFtime"), function(e1, e2) { if (!.datum_compatible(e1@datum, e2@datum)) stop('Datums not compatible') if (all(e1@datum@origin[1:6] == e2@datum@origin[1:6])) CFtime(definition(e1@datum), calendar(e1@datum), c(e1@offsets, e2@offsets)) else { diff <- .parse_timestamp(e1@datum, paste(origin_date(e2@datum), origin_time(e2@datum)))$offset if (is.na(diff)) { diff <- .parse_timestamp(e2@datum, paste(origin_date(e1@datum), origin_time(e1@datum)))$offset CFtime(definition(e2@datum), calendar(e2@datum), c(e1@offsets + diff, e2@offsets)) } else CFtime(definition(e1@datum), calendar(e1@datum), c(e1@offsets, e2@offsets + diff)) } }) #' Extend a CFtime object with additional offsets #' #' A `CFtime` instance can be extended by adding additional offsets using this #' operator. #' #' The resulting `CFtime` instance will have its offsets in the order that they #' are added, meaning that the offsets from the `CFtime` instance come first and #' those from the numeric vector follow. There is no reordering or removal of #' duplicates. This is because the time series are usually associated with a #' data set and the correspondence between the two is thus preserved, if and #' only if the data sets are merged in the same order. #' #' Note that when adding multiple vectors of offsets to a `CFtime` instance, it #' is more efficient to first concatenate the vectors and then do a final #' addition to the `CFtime` instance. So avoid `CFtime(definition, calendar, e1) + CFtime(definition, calendar, e2) + CFtime(definition, calendar, e3) + ...` #' but rather do `CFtime(definition, calendar, e1) + c(e2, e3, ...)`. It is the #' responsibility of the operator to ensure that the offsets of the different #' data sets are in reference to the same datum. #' #' Negative offsets will generate an error. #' #' @param e1 CFtime. Instance of the `CFtime` class. #' @param e2 numeric. Vector of offsets to be added to the `CFtime` instance. #' #' @returns A `CFtime` object with offsets composed of the `CFtime` instance and #' the numeric vector. #' @export #' @aliases CFtime-append #' #' @examples #' e1 <- CFtime("days since 1850-01-01", "gregorian", 0:364) #' e2 <- 365:729 #' e1 + e2 setMethod("+", c("CFtime", "numeric"), function(e1, e2) { if (is.array(e2)) dim(e2) <- NULL if (.validOffsets(e2, CFt$units$per_day[unit(e1@datum)])) CFtime(definition(e1@datum), calendar(e1@datum), c(e1@offsets, e2)) }) #' Validate offsets passed into a CFtime instance #' #' This is an internal function that should not be used outside the CFtime #' package. #' #' Tests the `offsets` values. Throws an error if the argument contains negative or `NA` values. #' #' @param offsets The offsets to test #' #' @returns logical. `TRUE` if the offsets are valid, throws an error otherwise. #' @noRd .validOffsets <- function(offsets, upd) { if (any(is.na(offsets) | (offsets < 0))) stop("Offsets cannot contain negative or `NA` values.") if (any(offsets > 1000000 * upd)) stop("Offset values are outside of reasonable range (year 1 - 2500).") TRUE } #' Return the extremes of the time series as character strings #' #' This function returns the first and last timestamp of the time series as a #' vector. Note that the offsets do not have to be sorted. #' #' This is an internal function that should not be used outside of the CFtime #' package. #' #' @param x CFtime. The time series to operate on. #' #' @returns Vector of two character strings that represent the starting and #' ending timestamps in the time series. If all of the timestamps in the time #' series have a time component of `00:00:00` the date of the timestamp is #' returned, otherwise the full timestamp (without any time zone information). #' #' @noRd .ts_extremes <- function(x) { if (length(x@offsets) == 0L) return(c(NA_character_, NA_character_)) time <- .offsets2time(range(x@offsets), x@datum) if (sum(time$hour, time$minute, time$second) == 0) { # all times are 00:00:00 return(sprintf("%04d-%02d-%02d", time$year, time$month, time$day)) } else { t <- .format_time(time) return(sprintf("%04d-%02d-%02dT%s", time$year, time$month, time$day, t)) } } #' Indicates if the time series has equidistant time steps #' #' This function returns `TRUE` if the time series has uniformly distributed #' time steps between the extreme values, `FALSE` otherwise. First test without #' sorting; this should work for most data sets. If not, only then offsets are #' sorted. For most data sets that will work but for implied resolutions of #' month, season, year, etc based on a "days" or finer datum unit this will fail #' due to the fact that those coarser units have a variable number of days per #' time step, in all calendars except for `360_day`. For now, an approximate #' solution is used that should work in all but the most non-conformal exotic #' arrangements. #' #' This function should only be called after offsets have been added. #' #' This is an internal function that should not be used outside of the CFtime #' package. #' #' @param x CFtime. The time series to operate on. #' #' @returns `TRUE` if all time steps are equidistant, `FALSE` otherwise. #' #' @noRd .ts_equidistant <- function(x) { out <- all(diff(x@offsets) == x@resolution) if (!out) { doff <- diff(sort(x@offsets)) out <- all(doff == x@resolution) if (!out) { # Don't try to make sense of totally non-standard arrangements such as # datum units "years" or "months" describing sub-daily time steps. # Also, 360_day calendar should be well-behaved so we don't want to get here. if (unit(x@datum) > 4L || calendar_id(x@datum) == 3L) return(FALSE) # Check if we have monthly or yearly data on a finer-scale datum # This is all rather approximate but should be fine in most cases # This accommodates middle-of-the-time-period offsets as per the CF Metadata Conventions # Please report problems at https://github.com/pvanlaake/CFtime/issues ddays <- range(doff) * CFt$units$per_day[unit(x@datum)] return((ddays[1] >= 28 && ddays[2] <= 31) || # months (ddays[1] >= 90 && ddays[2] <= 92) || # seasons (ddays[1] >= 365 && ddays[2] <= 366)) # years } } out } #' Which time steps fall within two extreme values #' #' Given two extreme character timestamps, return a logical vector of a length #' equal to the number of time steps in the CFtime instance with values `TRUE` #' for those time steps that fall between the two extreme values, `FALSE` #' otherwise. #' #' **NOTE** Giving crap as the earlier timestamp will set that value to 0. So #' invalid input will still generate a result. To be addressed. Crap in later #' timestamp is not tolerated. #' #' @param x CFtime. The time series to operate on. #' @param extremes character. Vector of two timestamps that represent the #' extremes of the time period of interest. The timestamps must be in #' increasing order. #' #' @returns A logical vector with a length equal to the number of time steps in #' `x` with values `TRUE` for those time steps that fall between the two #' extreme values, `FALSE` otherwise. The earlier timestamp is included, the #' later timestamp is excluded. A specification of `c("2022-01-01", "2023-01-01)` #' will thus include all time steps that fall in the year 2022. #' @noRd .ts_subset <- function(x, extremes) { ext <- .parse_timestamp(x@datum, extremes)$offset if (is.na(ext[1L])) ext[1L] <- 0 if (ext[1L] > max(x@offsets) || is.na(ext[2L])) rep(FALSE, length(x@offsets)) else x@offsets >= ext[1L] & x@offsets < ext[2L] } #' Decompose a vector of offsets, in units of the datum, to their timestamp #' values #' #' This function adds a specified amount of time to the origin of a CFts object. #' #' This is an internal function that should not be used outside of the CFtime #' package. #' #' This functions may introduce inaccuracies where the datum unit is "months" or #' "years", due to the ambiguous definition of these units. #' #' @param offsets numeric. Vector of offsets to add to the datum. #' @param datum CFdatum. The datum that defines the unit of the offsets and the #' origin to add the offsets to. #' #' @returns A data.frame with columns for the timestamp elements and as many #' rows as there are offsets. #' @noRd .offsets2time <- function(offsets, datum) { len <- length(offsets) if(len == 0L) return(data.frame(year = integer(), month = integer(), day = integer(), hour = integer(), minute = integer(), second = numeric(), tz = character(), offset = numeric())) if (unit(datum) <= 4L) { # Days, hours, minutes, seconds # First add time: convert to seconds first, then recompute time parts secs <- offsets * CFt$units$seconds[unit(datum)] secs <- secs + datum@origin$hour[1L] * 3600L + datum@origin$minute[1L] * 60L + datum@origin$second[1L] days <- secs %/% 86400L # overflow days secs <- round(secs %% 86400L, 3L) # drop overflow days from time, round down to milli-seconds avoid errors # Time elements for output hrs <- secs %/% 3600L mins <- (secs %% 3600L) %/% 60L secs <- secs %% 60L # Now add days using the calendar of the datum origin <- unlist(datum@origin[1L,1L:3L]) # origin ymd as a named vector if (any(days > 0)) { switch (calendar_id(datum), out <- .offset2date_standard(days, origin), out <- .offset2date_julian(days, origin), out <- .offset2date_360(days, origin), out <- .offset2date_fixed(days, origin, c(31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31), 365), out <- .offset2date_fixed(days, origin, c(31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31), 366)) } else { out <- data.frame(year = rep(origin[1L], len), month = rep(origin[2L], len), day = rep(origin[3L], len)) } # Put it all back together again out$hour <- hrs out$minute <- mins out$second <- secs out$tz <- rep(datum@origin$tz, len) } else { # Months, years out <- datum@origin[rep(1L, len), ] if (unit(datum) == 5L) { # Offsets are months months <- out$month + offsets - 1L out$month <- months %% 12L + 1L out$year <- out$year + months %/% 12L } else { # Offsets are years out$year <- out$year + offsets } } out$offset <- offsets return(out) } #' 360_day, use integer arithmetic #' This is an internal function that should not be used outside of the CFtime package. #' #' @param x integer. Vector of days to add to the origin. #' @param origin integer. Vector of year, month, day and seconds to add days to. #' #' @returns A data frame with time elements year, month and day in columns and as #' many rows as the length of vector `x`. #' @noRd .offset2date_360 <- function(x, origin) { y <- origin[1L] + x %/% 360L m <- origin[2L] + (x %% 360L) %/% 30L d <- origin[3L] + x %% 30L over <- which(d > 30L) d[over] <- d[over] - 30L m[over] <- m[over] + 1L over <- which(m > 12L) m[over] <- m[over] - 12L y[over] <- y[over] + 1L data.frame(year = y, month = m, day = d, row.names = NULL) } #' Fixed year length, either 365_day or 366_day #' #' This is an internal function that should not be used outside of the CFtime package. #' #' @param x numeric. Vector of days to add to the origin. #' @param origin numeric. Vector of year, month, day and seconds to add days to. #' @param month numeric. Vector of days per month in the year. #' @param ydays numeric. Number of days per year, either 365 or 366. #' #' @returns A data frame with time elements year, month and day in columns and as #' many rows as the length of vector `x`. #' @noRd .offset2date_fixed <- function(x, origin, month, ydays) { # First process full years over the vector yr <- origin[1L] + (x %/% ydays) x <- x %% ydays # Remaining portion per datum x <- x + origin[3L] ymd <- mapply(function(y, m, d) { while (d > month[m]) { d <- d - month[m] m <- m + 1L if (m == 13L) { y <- y + 1L m <- 1L } } return(c(y, m, d)) }, yr, origin[2L], x) data.frame(year = ymd[1L,], month = ymd[2L,], day = ymd[3L,], row.names = NULL) } #' Julian calendar offsetting #' #' This is an internal function that should not be used outside of the CFtime package. #' #' @param x numeric. Vector of days to add to the origin. #' @param origin numeric. Vector of year, month, day and seconds to add days to. #' #' @returns A data frame with time elements year, month and day in columns and as #' many rows as the length of vector `x`. #' @noRd .offset2date_julian <- function(x, origin) { common_days <- c(31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31) leap_days <- c(31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31) # Is the leap day to consider ahead in the year from the base date (offset = 0) or in the next year (offset = 1) offset <- as.integer(origin[2L] > 2L) # First process 4-year cycles of 1,461 days over the vector yr <- origin[1L] + (x %/% 1461L) * 4L x <- x %% 1461L # Remaining portion per datum x <- x + origin[3L] ymd <- mapply(function(y, m, d) { repeat { leap <- (y + offset) %% 4L == 0L ydays <- 365L + as.integer(leap) if (d > ydays) { d <- d - ydays y <- y + 1L } else break } if (leap) month <- leap_days else month <- common_days while (d > month[m]) { d <- d - month[m] m <- m + 1L if (m == 13L) { y <- y + 1L m <- 1L } } return(c(y, m, d)) }, yr, origin[2L], x) data.frame(year = ymd[1L,], month = ymd[2L,], day = ymd[3L,], row.names = NULL) } #' Standard calendar offsetting #' #' This is an internal function that should not be used outside of the CFtime package. #' #' @param x numeric. Vector of days to add to the origin. #' @param origin numeric. Vector of year, month, day and seconds to add days to. #' #' @returns A data frame with time elements year, month and day in columns and as #' many rows as the length of vector `x`. #' @noRd .offset2date_standard <- function(x, origin) { common_days <- c(31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31) leap_days <- c(31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31) # Is the leap day to consider ahead in the year from the base date (offset = 0) or in the next year (offset = 1) offset <- as.integer(origin[2L] > 2L) x <- x + origin[3L] ymd <- mapply(function(y, m, d) { repeat { test <- y + offset leap <- (test %% 4L == 0L && test %% 100L > 0L) || test %% 400L == 0L ydays <- 365L + as.integer(leap) if (d > ydays) { d <- d - ydays y <- y + 1L } else break } if (leap) month <- leap_days else month <- common_days while (d > month[m]) { d <- d - month[m] m <- m + 1L if (m == 13L) { y <- y + 1L m <- 1L } } return(c(y, m, d)) }, origin[1L], origin[2L], x) data.frame(year = ymd[1L,], month = ymd[2L,], day = ymd[3L,], row.names = NULL) }
/scratch/gouwar.j/cran-all/cranData/CFtime/R/CFtime.R
#' Return the number of days in a month given a certain CF calendar #' #' Given a vector of dates as strings in ISO 8601 or UDUNITS format and a `CFtime` object, #' this function will return a vector of the same length as the dates, #' indicating the number of days in the month according to the calendar #' specification. If no vector of days is supplied, the function will return an #' integer vector of length 12 with the number of days for each month of the #' calendar (disregarding the leap day for `standard` and `julian` calendars). #' #' @param cf CFtime. The CFtime definition to use. #' @param x character. An optional vector of dates as strings with format #' `YYYY-MM-DD`. Any time part will be silently ingested. #' #' @returns A vector indicating the number of days in each month for the vector #' of dates supplied as a parameter to the function. If no dates are supplied, #' the number of days per month for the calendar as a vector of length 12. #' Invalidly specified dates will result in an `NA` value. #' @export #' @seealso When working with factors generated by [CFfactor()], it is usually #' better to use [CFfactor_units()] as that will consider leap days for #' non-epoch factors. [CFfactor_units()] can also work with other time periods #' and datum units, such as "hours per month", or "days per season". #' @examples #' dates <- c("2021-11-27", "2021-12-10", "2022-01-14", "2022-02-18") #' cf <- CFtime("days since 1850-01-01", "standard") #' CFmonth_days(cf, dates) #' #' cf <- CFtime("days since 1850-01-01", "360_day") #' CFmonth_days(cf, dates) #' #' cf <- CFtime("days since 1850-01-01", "all_leap") #' CFmonth_days(cf, dates) #' #' CFmonth_days(cf) CFmonth_days <- function(cf, x = NULL) { stopifnot(methods::is(cf, "CFtime")) cal_id <- calendar_id(cf@datum) days_in_month <- c(31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31) leapdays_in_month <- c(31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31) # No dates supplied: return standard number of days per month if (is.null(x)) { if (cal_id %in% c(1L, 2L, 4L)) return(days_in_month) if (cal_id == 3L) return(rep(30L, 12L)) return(leapdays_in_month) } # Argument x supplied if (!(is.character(x))) stop("Argument `x` must be a character vector of dates in 'YYYY-MM-DD' format") ymd <- .parse_timestamp(cf@datum, x) if (anyNA(ymd$year)) warning("Some dates could not be parsed. Result contains `NA` values.") if (cal_id == 3L) { # 360_day res <- rep(30L, length(x)) res[which(is.na(ymd$year))] <- NA return(res) } if (cal_id == 4L) return(days_in_month[ymd$month]) if (cal_id == 5L) return(leapdays_in_month[ymd$month]) # Standard and julian calendars ifelse(.is_leap_year(ymd$year, cal_id), leapdays_in_month[ymd$month], days_in_month[ymd$month]) } #' Check if the supplied year, month and day form a valid date in the specified #' calendar. #' #' This is an internal function that should not be used outside of the CFtime package. #' #' @param yr numeric. The year to test, must be in range 1:9999. #' @param mon numeric. The month to test, must be in range 1:12 #' @param day numeric. The day to test, must be in the range permitted by the calendar. #' @param cal_id numeric. Identifier of the calendar to use to test the validity of the date. #' #' @returns boolean. TRUE if the date is valid, FALSE otherwise. #' @noRd .is_valid_calendar_date <- function(yr, mon, day, cal_id) { if (is.na(yr) || is.na(mon)) return(FALSE) # Check valid date ranges, no extended syntax if ((yr < 1L) || (yr > 9999L)) return(FALSE) # year out of range if ((mon < 1L) || (mon > 12L)) return(FALSE) # month out of range if (is.na(day)) return(TRUE) # day not specified if ((day >= 1L) && (day <= 28L)) return(TRUE) # day in safe range, 90% of valid cases else if ((day < 1L) || day > 31L) return(FALSE) # day out of range # 360_day calendar: oddball case for month length if (cal_id == 3L) return(day <= 30L) # Now all dates should be in regular-length months, but check for leap years # Day is in range 29:31 because day in range 1:28 already passed if (mon == 2L) { # February if (day > 29L) return(FALSE) if (cal_id == 5L) return(TRUE) # all_leap if (cal_id == 4L) return(FALSE) # no_leap if (cal_id == 2L) return(yr %% 4L == 0L) # julian: every 4th year is a leap year return(((yr %% 4L == 0L) && (yr %% 100L > 0L)) || (yr %% 400L == 0L)) # standard calendar } return(!((mon %in% c(4L, 6L, 9L, 11L)) && (day == 31L))) # months other than February } #' Flag which years are leap years, given a certain CF calendar #' #' This is an internal function that should not be used outside of the CFtime package. #' #' @param yr numeric. Vector of years to test. #' @param cal integer. The id of the calendar. #' #' @returns A logical vector of the same length as argument `yr` which is `TRUE` #' for elements that are leap years for the given calendar, `FALSE` otherwise. #' @noRd .is_leap_year <- function(yr, cal) { switch (cal, ((yr %% 4L == 0L) & (yr %% 100L > 0L)) | (yr %% 400L == 0L), yr %% 4L == 0L, rep(FALSE, length(yr)), rep(FALSE, length(yr)), rep(TRUE, length(yr))) }
/scratch/gouwar.j/cran-all/cranData/CFtime/R/CFutils.R
#nocov start # Create environment for global CFtime variables CFt <- new.env(parent = emptyenv()) .onLoad <- function(libname, pkgname) { assign("calendars", data.frame(name = c("standard", "gregorian", "proleptic_gregorian", "julian", "360_day", "365_day", "366_day", "noleap", "all_leap"), id = c(1L, 1L, 1L, 2L, 3L, 4L, 5L, 4L, 5L)), envir = CFt) assign("CFunits", data.frame(unit = c("years", "year", "yr", "months", "month", "mon", "days", "day", "d", "hours", "hour", "hr", "h", "minutes", "minute", "min", "seconds", "second", "sec", "s"), id = c(6L, 6L, 6L, 5L, 5L, 5L, 4L, 4L, 4L, 3L, 3L, 3L, 3L, 2L, 2L, 2L, 1L, 1L, 1L, 1L)), envir = CFt) assign("units", data.frame(name = c("seconds", "minutes", "hours", "days", "months", "years"), seconds = c(1, 60, 3600, 86400, 86400 * 30, 86400 * 365), per_day = c(86400, 1440, 24, 1, 1/30, 1/365)), envir = CFt) assign("factor_periods", c("year", "season", "month", "dekad", "day"), envir = CFt) } #nocov end
/scratch/gouwar.j/cran-all/cranData/CFtime/R/zzz.R
## ----include = FALSE---------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----setup, include = FALSE--------------------------------------------------- library(CFtime) library(ncdf4) ## ----------------------------------------------------------------------------- # POSIXt calculations on a standard calendar - INCORRECT as.Date("1949-12-01") + 43289 # CFtime calculation on a "360_day" calendar - CORRECT # See below examples for details on the two functions CFtimestamp(CFtime("days since 1949-12-01", "360_day", 43289)) ## ----------------------------------------------------------------------------- # Create a CF time object from a definition string, a calendar and some offsets cf <- CFtime("days since 1949-12-01", "360_day", 19830:90029) cf ## ----------------------------------------------------------------------------- # Opening a data file that is included with the package and showing some attributes. # Usually you would `list.files()` on a directory of your choice. nc <- nc_open(list.files(path = system.file("extdata", package = "CFtime"), full.names = TRUE)[1]) attrs <- ncatt_get(nc, "") attrs$title # "Conventions" global attribute must have a string like "CF-1.*" for this package to work reliably attrs$Conventions # Create the CFtime instance from the metadata in the file. cf <- CFtime(nc$dim$time$units, nc$dim$time$calendar, nc$dim$time$vals) cf ## ----------------------------------------------------------------------------- library(RNetCDF) nc <- open.nc(list.files(path = system.file("extdata", package = "CFtime"), full.names = TRUE)[1]) att.get.nc(nc, -1, "Conventions") cf <- CFtime(att.get.nc(nc, "time", "units"), att.get.nc(nc, "time", "calendar"), var.get.nc(nc, "time")) cf ## ----------------------------------------------------------------------------- dates <- CFtimestamp(cf, format = "date") dates[1:10] ## ----------------------------------------------------------------------------- CFrange(cf) ## ----------------------------------------------------------------------------- # Create a dekad factor for the whole `cf` time series that was created above f_k <- CFfactor(cf, "dekad") str(f_k) # Create monthly factors for a baseline epoch and early, mid and late 21st century epochs baseline <- CFfactor(cf, epoch = 1991:2020) future <- CFfactor(cf, epoch = list(early = 2021:2040, mid = 2041:2060, late = 2061:2080)) str(future) ## ----------------------------------------------------------------------------- # Is the time series complete? CFcomplete(cf) # How many time units fit in a factor level? CFfactor_units(cf, baseline) # What's the absolute and relative coverage of our time series CFfactor_coverage(cf, baseline, "absolute") CFfactor_coverage(cf, baseline, "relative") ## ----------------------------------------------------------------------------- # 4 years of data on a `365_day` calendar, keep 80% of values n <- 365 * 4 cov <- 0.8 offsets <- sample(0:(n-1), n * cov) cf <- CFtime("days since 2020-01-01", "365_day", offsets) cf # Note that there are about 1.25 days between observations mon <- CFfactor(cf, "month") CFfactor_coverage(cf, mon, "absolute") CFfactor_coverage(cf, mon, "relative") ## ----------------------------------------------------------------------------- # Days in January and February cf <- CFtime("days since 2023-01-01", "360_day", 0:59) cf_days <- CFtimestamp(cf, "date") as.Date(cf_days)
/scratch/gouwar.j/cran-all/cranData/CFtime/inst/doc/CFtime.R
--- title: "Working with CFtime" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Working with CFtime} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup, include = FALSE} library(CFtime) library(ncdf4) ``` ## Climate change models and calendars Around the world, many climate change models are being developed (100+) under the umbrella of the [World Climate Research Programme](https://www.wcrp-climate.org) to assess the rate of climate change. Published data is generally publicly available to download for research and other (non-commercial) purposes through partner organizations in the Earth Systems Grid Federation. The data are all formatted to comply with the [CF Metadata Conventions](http://cfconventions.org), a set of standards to support standardization among research groups and published data sets. These conventions greatly facilitate use and analysis of the climate projections because standard processing work flows (should) work across the various data sets. On the flip side, the CF Metadata Conventions needs to cater to a wide range of modeling requirements and that means that some of the areas covered by the standards are more complex than might be assumed. One of those areas is the temporal dimension of the data sets. The CF Metadata Conventions supports no less than nine different calendar definitions, that, upon analysis, fall into five distinct calendars (from the perspective of computation of climate projections): - `standard` or `gregorian`: The international civil calendar that is in common use in many countries around the world, adopted by edict of Pope Gregory XIII in 1582 and in effect from 15 October of that year. The `proleptic_gregorian` calendar is the same as the `gregorian` calendar, but with validity extended to periods prior to `1582-10-15`. - `julian`: Adopted in the year 45 BCE, every fourth year is a leap year. Originally, the julian calendar did not have a monotonically increasing year assigned to it and there are indeed several julian calendars in use around the world today with different years assigned to them. Common interpretation is currently that the year is the same as that of the standard calendar. The julian calendar is currently 13 days behind the gregorian calendar. - `365_day` or `noleap`: No years have a leap day. - `366_day` or `all_leap`: All years have a leap day. - `360_day`: Every year has 12 months of 30 days each. The three latter calendars are specific to the CF Metadata Conventions to reduce computational complexities of working with dates. These three, and the julian calendar, are not compliant with the standard `POSIXt` date/time facilities in `R` and using standard date/time procedures would quickly lead to problems. In the below code snippet, the date of `1949-12-01` is the *datum* from which other dates are calculated. When adding 43,289 days to this *datum* for a data set that uses the `360_day` calendar, that should yield a date some 120 years after the *datum*: ```{r} # POSIXt calculations on a standard calendar - INCORRECT as.Date("1949-12-01") + 43289 # CFtime calculation on a "360_day" calendar - CORRECT # See below examples for details on the two functions CFtimestamp(CFtime("days since 1949-12-01", "360_day", 43289)) ``` Using standard `POSIXt` calculations gives a result that is about 21 months off from the correct date - obviously an undesirable situation. This example is far from artificial: `1949-12-01` is the datum for all CORDEX data, covering the period 1951 - 2005 for historical experiments and the period 2006 - 2100 for RCP experiments (with some deviation between data sets), and several models used in the CORDEX set use the `360_day` calendar. The `365_day` or `noleap` calendar deviates by about 1 day every 4 years (disregarding centurial years), or about 24 days in a century. The `366_day` or `all_leap` calendar deviates by about 3 days every 4 years, or about 76 days in a century. The `CFtime` package deals with the complexity of the different calendars allowed by the CF Metadata Conventions. It properly formats dates and times (even oddball dates like `2070-02-30`) and it can generate calendar-aware factors for further processing of the data. ##### Time zones The character of CF time series - a number of numerical offsets from a base date - implies that there should only be a single time zone associated with the time series. The time zone offset from UTC is stored in the datum and can be retrieved with the `CFtimezone()` function. If a vector of character timestamps with time zone information is parsed with the `CFparse()` function and the time zones are found to be different from the datum time zone, a warning message is generated but the timestamp is interpreted as being in the datum time zone. No correction of timestamp to datum time zone is performed. ## Using CFtime to deal with calendars Data sets that are compliant with the CF Metadata Conventions always include a *datum*, a specific point in time in reference to a specified *calendar*, from which other points in time are calculated by adding a specified *offset* of a certain *unit*. This approach is encapsulated in the `CFtime` package by the S4 class `CFtime`. ```{r} # Create a CF time object from a definition string, a calendar and some offsets cf <- CFtime("days since 1949-12-01", "360_day", 19830:90029) cf ``` The `CFtime()` function takes a *datum* description (which is actually a unit - "days" - in reference to a datum - "1949-12-01"), a calendar description, and a vector of *offsets* from that datum. Once a `CFtime` instance is created its datum and calendar cannot be changed anymore. Offsets may be added. In practice, these parameters will be taken from the data set of interest. CF Metadata Conventions require data sets to be in the NetCDF format, with all metadata describing the data set included in a single file, including the mandatory "Conventions" global attribute which should have a string identifying the version of the CF Metadata Conventions that this file adheres to (among possible others). Not surprisingly, all the pieces of interest are contained in the mandatory `time` dimension of the file. The process then becomes as follows, for a CMIP6 file of daily precipitation: ```{r} # Opening a data file that is included with the package and showing some attributes. # Usually you would `list.files()` on a directory of your choice. nc <- nc_open(list.files(path = system.file("extdata", package = "CFtime"), full.names = TRUE)[1]) attrs <- ncatt_get(nc, "") attrs$title # "Conventions" global attribute must have a string like "CF-1.*" for this package to work reliably attrs$Conventions # Create the CFtime instance from the metadata in the file. cf <- CFtime(nc$dim$time$units, nc$dim$time$calendar, nc$dim$time$vals) cf ``` You can see from the global attribute "Conventions" that the file adheres to the CF Metadata Conventions, among others. According to the CF conventions, `units` and `calendar` are required attributes of the `time` dimension in the NetCDF file, and `nc$dim$time$vals` are the offset values, or `dimnames()` in `R` terms, for the `time` dimension of the data. The above example (and others in this vignette) use the `ncdf4` package. If you are using the `RNetCDF` package, checking for CF conventions and then creating a `CFtime` instance goes like this: ```{r} library(RNetCDF) nc <- open.nc(list.files(path = system.file("extdata", package = "CFtime"), full.names = TRUE)[1]) att.get.nc(nc, -1, "Conventions") cf <- CFtime(att.get.nc(nc, "time", "units"), att.get.nc(nc, "time", "calendar"), var.get.nc(nc, "time")) cf ``` The corresponding character representations of the time series can be easily generated: ```{r} dates <- CFtimestamp(cf, format = "date") dates[1:10] ``` ...as well as the full range of the time series: ```{r} CFrange(cf) ``` Note that in this latter case, if any of the timestamps in the time series have a time that is other than `00:00:00` then the time of the extremes of the time series is also displayed. This is a common occurrence because the CF Metadata Conventions prescribe that the middle of the time period (month, day, etc) is recorded, which for months with 31 days would be something like `2005-01-15T12:00:00`. ## Supporting processing of climate projection data When working with high resolution climate projection data, typically at a "day" resolution, one of the processing steps would be to aggregate the data to some lower resolution such as a dekad (10-day period), a month or a meteorological season, and then compute a derivative value such as the dekadal sum of precipitation, monthly minimum/maximum daily temperature, or seasonal average daily short-wave irradiance. It is also possible to create factors for multiple "epochs" in one go. This greatly reduces programming effort if you want to calculate anomalies over multiple future periods. A complete example is provided in the vignette ["Processing climate projection data"](Processing.html). It is easy to generate the factors that you need once you have a `CFtime` instance prepared: ```{r} # Create a dekad factor for the whole `cf` time series that was created above f_k <- CFfactor(cf, "dekad") str(f_k) # Create monthly factors for a baseline epoch and early, mid and late 21st century epochs baseline <- CFfactor(cf, epoch = 1991:2020) future <- CFfactor(cf, epoch = list(early = 2021:2040, mid = 2041:2060, late = 2061:2080)) str(future) ``` For the "epoch" version, there are two interesting things to note here: - The epochs do not have to coincide with the boundaries of the time series. In the example above, the time series starts in 2015, while the baseline epoch is from 1991. Obviously, the number of time steps from the time series that then fall within this epoch will then be reduced. - The factor is always of the same length as the time series, with `NA` values where the time series values are not falling in the epoch. This ensures that the factor is compatible with the data set which the time series describes, such that functions like `tapply()` will not throw an error. There are five periods defined for `CFfactor()`: - `year`, to summarize data to yearly timescales - `season`, the meteorological seasons. Note that the month of December will be added to the months of January and February of the following year, so the date "2020-12-01" yields the factor value "2021-DJF". - `month`, monthly summaries, the default period. - `dekad`, 10-day period. Each month is subdivided in dekads as follows: (1) days 01 - 10; (2) days 11 - 20; (3) remainder of the month. - `day`, to summarize sub-daily data. ##### Incomplete time series You can test if your time series is complete with the function `CFcomplete()`. A time series is considered complete if the time steps between the two extreme values are equally spaced. There is a "fuzzy" assessment of completeness for time series with a datum unit of "days" or smaller where the time steps are months or years apart - these have different lengths in days in different months or years (e.g. a leap year). If your time series is incomplete, for instance because it has missing time steps, you should recognize that in your further processing. As an example, you might want to filter out months that have fewer than 90% of daily data from further processing or apply weights based on the actual coverage. ```{r} # Is the time series complete? CFcomplete(cf) # How many time units fit in a factor level? CFfactor_units(cf, baseline) # What's the absolute and relative coverage of our time series CFfactor_coverage(cf, baseline, "absolute") CFfactor_coverage(cf, baseline, "relative") ``` The time series is complete but coverage of the baseline epoch is only 20%! Recall that the time series starts in 2015 while the baseline period in the factor is for `1991:2020` so that's only 6 years of time series data out of 30 years of the baseline factor. An artificial example of missing data: ```{r} # 4 years of data on a `365_day` calendar, keep 80% of values n <- 365 * 4 cov <- 0.8 offsets <- sample(0:(n-1), n * cov) cf <- CFtime("days since 2020-01-01", "365_day", offsets) cf # Note that there are about 1.25 days between observations mon <- CFfactor(cf, "month") CFfactor_coverage(cf, mon, "absolute") CFfactor_coverage(cf, mon, "relative") ``` Keep in mind, though, that there are data sets where the time unit is lower than the intended resolution of the data. Since the CF conventions recommend that the coarsest time unit is "day", many files with monthly data sets have a definition like `days since 2016-01-01` with offset values for the middle of the month like `15, 44, 74, 104, ...`. Even in these scenarios you can verify that your data set is complete with the function `CFcomplete()`. ## CFtime and POSIXt The CF Metadata Conventions support nine different calendars. Of these, only the `standard`, `gregorian` and `proleptic_gregorian` calendars are fully compatible with POSIXt. The other calendars have varying degrees of discrepancies: - `julian`: Every fouth year is a leap year. Dates like `2100-02-29` and `2200-02-29` are valid. - `365_day` or `noleap`: No leap year exists. `2020-02-29` does not occur. - `366_day` or `all_leap`: All years are leap years. - `360_day`: All months have 30 days in every year. This means that 31 January, March, May, July, August, October and December never occur, while 29 and 30 February occur in every year. Converting time series using these incompatible calendars to `Date`s is likely to produce problems. This is most pronounced for the `360_day` calendar: ```{r} # Days in January and February cf <- CFtime("days since 2023-01-01", "360_day", 0:59) cf_days <- CFtimestamp(cf, "date") as.Date(cf_days) ``` 31 January is missing from the vector of `Date`s because the `360_day` calendar does not include it and 29 and 30 February are `NA`s because POSIXt rejects them. This will produce problems later on when processing your data. The general advice is therefore: **do not convert CFtime objects to Date objects** unless you are sure that the `CFtime` object uses a POSIXt-compatible calendar. ##### So how do I compare climate projection data with different calendars? One reason to convert the "time" dimension from different climate projection data sets is to be able to compare the data from different models and produce a multi-model ensemble. The correct procedure to do this is to first calculate **for each data set individually** the property of interest (e.g. average daily rainfall per month anomaly for some future period relative to a baseline period), which will typically involve aggregation to a lower resolution (such as from daily data to monthly averages), and only then combine the aggregate data from multiple data sets to compute statistically interesting properties (such as average among models and standard deviation, etc). Once data is aggregated from daily or higher-resolution values, the different calendars no longer matter (although if you do need to convert averaged data to absolute data you should use `CFfactor_units()` to make sure that you use the correct scaling factor). Otherwise, there really shouldn't be any reason to convert the time series in the data files to `Date`s. Climate projection data is virtually never compared on a day-to-day basis between different models and neither does complex date arithmetic make much sense (such as adding intervals) - `CFtime` can support basic arithmetic by manipulation the offsets of the `CFtime` object. The character representations that are produced are perfectly fine to use for `dimnames()` on an array or as `rownames()` in a `data.frame` and these also support basic logical operations such as `"2023-02-30" < "2023-03-01"`. So ask yourself, do you really need `Date`s when working with unprocessed climate projection data? (If so, [open an issue on GitHub](https://github.com/pvanlaake/CFtime/issues)). A complete example of creating a multi-model ensemble is provided in the vignette ["Processing climate projection data"](Processing.html). ## Final observations - This package is intended to facilitate processing of climate projection data. It is not a full implementation of the CF Metadata Conventions "time" component. - In parsing and deparsing of offsets and timestamps, data is rounded to 3 digits of precision of the unit of the datum. When using a description of time that is very different from the datum unit, this may lead to some loss of precision due to rounding errors. For instance, if milli-second precision is required, use a unit of "seconds". The authors have no knowledge of published climate projection data that requires milli-second precision so for the intended use of the package this issue is marginal.
/scratch/gouwar.j/cran-all/cranData/CFtime/inst/doc/CFtime.Rmd
## ----include = FALSE---------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----setup, include = FALSE--------------------------------------------------- library(CFtime) library(ncdf4) ## ----------------------------------------------------------------------------- # Setting up nc <- nc_open(list.files(path = system.file("extdata", package = "CFtime"), full.names = TRUE)[1]) cf <- CFtime(nc$dim$time$units, nc$dim$time$calendar, nc$dim$time$vals) # Create monthly factors for a baseline epoch and early, mid and late 21st century epochs baseline <- CFfactor(cf, epoch = 1991:2020) future <- CFfactor(cf, epoch = list(early = 2021:2040, mid = 2041:2060, late = 2061:2080)) str(baseline) str(future) ## ----------------------------------------------------------------------------- # Read the data from the NetCDF file. # Keep degenerate dimensions so that we have a predictable data structure: 3-dimensional array. # Converts units of kg m-2 s-1 to mm/day. pr <- ncvar_get(nc, "pr", collapse_degen = FALSE) * 86400 # Assign dimnames(), optional. dimnames(pr) <- list(nc$dim$lon$vals, nc$dim$lat$vals, CFtimestamp(cf)) # Get a global attribute from the file experiment <- ncatt_get(nc, "")$experiment_id nc_close(nc) # Calculate the daily average precipitation per month for the baseline period # and the three future epochs. pr_base <- apply(pr, 1:2, tapply, baseline, mean) # an array pr_future <- lapply(future, function(f) apply(pr, 1:2, tapply, f, mean)) # a list of arrays # Calculate the precipitation anomalies for the future epochs against the baseline. # Working with daily averages per month so we can simply subtract and then multiply by days # per month for each of the factor levels using the CF calendar. ano <- mapply(function(pr, f) {(pr - pr_base) * CFfactor_units(cf, f)}, pr_future, future, SIMPLIFY = FALSE) # Plot the results plot(1:12, ano$early[,1,1], type = "o", col = "blue", ylim = c(-50, 40), xlim = c(1, 12), main = paste0("Hamilton, New Zealand\nExperiment: ", experiment), xlab = "month", ylab = "Precipitation anomaly (mm)") lines(1:12, ano$mid[,1,1], type = "o", col = "green") lines(1:12, ano$late[,1,1], type = "o", col = "red") ## ----------------------------------------------------------------------------- # Get the list of files that make up the ensemble members, here: # GFDL ESM4 and MRI ESM2 models for experiment SSP2-4.5, precipitation, CMIP6 2015-01-01 to 2099-12-31 lf <- list.files(path = system.file("extdata", package = "CFtime"), full.names = TRUE) # Loop over the files individually # ano is here a list with each element holding the results for a single model ano <- lapply(lf, function(fn) { nc <- nc_open(fn) cf <- CFtime(nc$dim$time$units, nc$dim$time$calendar, nc$dim$time$vals) pr <- ncvar_get(nc, "pr", collapse_degen = FALSE) * 86400 nc_close(nc) baseline <- CFfactor(cf, epoch = 1991:2020) pr_base <- apply(pr, 1:2, tapply, baseline, mean) future <- CFfactor(cf, epoch = list(early = 2021:2040, mid = 2041:2060, late = 2061:2080)) pr_future <- lapply(future, function(f) apply(pr, 1:2, tapply, f, mean)) mapply(function(pr, f) {(pr - pr_base) * CFfactor_units(cf, f)}, pr_future, future, SIMPLIFY = FALSE) }) # Epoch names epochs <- c("early", "mid", "late") dim(epochs) <- 3 # Build the ensemble for each epoch # For each epoch, grab the data for each of the ensemble members, simplify to an array # and take the mean per row (months, in this case) ensemble <- apply(epochs, 1, function(e) { rowMeans(sapply(ano, function(a) a[[e]], simplify = T))}) colnames(ensemble) <- epochs rownames(ensemble) <- rownames(ano[[1]][[1]]) ensemble ## ----eval = FALSE------------------------------------------------------------- # library(ncdf4) # library(abind) # # prepare_CORDEX <- function(fn, var) { # offsets <- vector("list", length(fn)) # data <- vector("list", length(fn)) # for (i in 1:length(fn)) { # nc <- nc_open(fn[i]) # if (i == 1) # # Create an "empty" CFtime object, without elements # cf <- CFtime(nc$dim$time$units, nc$dim$time$calendar) # # # Make lists of all datum offsets and data arrays # offsets[[i]] <- as.vector(nc$dim$time$vals) # data[[i]] <- ncvar_get(nc, var, # start = c(10, 10, 1), count = c(100, 100, -1), # spatial subsetting # collapse_degen = FALSE) # # nc_close(nc) # } # # # Create a list for output with the CFtime instance assigned the offsets and # # the data bound in a single 3-dimensional array # list(CFtime = cf + unlist(offsets), data = abind(data, along = 3)) # }
/scratch/gouwar.j/cran-all/cranData/CFtime/inst/doc/Processing.R
--- title: "Processing climate projection data" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Processing climate projection data} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup, include = FALSE} library(CFtime) library(ncdf4) ``` Climate projection data sets are produced in a variety of formats but all conform to the CF Metadata Conventions. NetCDF data files, in particular, are highly structured and relatively easy to process. That said, it is very important to maintain a proper processing workflow such that the small changes in the climate projections are maintained and revealed through analysis. In this document, the basic workflow with varying calendars is described. ## Processing climate projection data Individual files containing climate projections contain global, regional or local data, typically on a rectangular latitude-longitude grid, for a single parameter such as "near-surface temperature", and for a number of time steps. An analysis workflow then consists of a number of steps: - Download the appropriate data files for your desired combination of model, experiment, realization, geography, time range, parameter, ... (called a "data suite" henceforth). If your analysis involves multiple parameters (such as temperature and precipitation to estimate crop productivity), repeat the process for all parameters. If you want to make a multi-model ensemble to reduce model bias, repeat again for all desired model, experiment and realization combinations ("ensemble member"). You end up having one or more data suites to work with. - Take all files in a data suite and extract the data. Process the data in the data suite. Since the data are (mostly) 3-dimensional arrays, this will involve binding the arrays on the right dimension and then do something like `apply(data, 1:2, tapply, f, fun)` (following the CF Metadata Conventions, dimensions 1 and 2 are "longitude" and "latitude", respectively; the third dimension is "time"). Repeat for the data suite for each ensemble member. - Combine the above results as your workflow requires. Frequently this involves computing "anomalies": ratio the data for one or more future periods to a baseline period. Repeat for each ensemble member. - Construct the multi-model ensemble from the individual ensemble members. Apart from the first step of obtaining the data, the steps lend themselves well to automation. The catch, however, is in the factor `f` to use with `tapply()`. The different models (in your ensemble) use different calendars, meaning that different factors are required. The CFtime package can help out. The `CFfactor()` function produces a factor that respects the calendar of the data files. The function comes in two operating modes: - Plain vanilla mode produces a factor for a time period across the entire time series. The factor level includes the year. This would be useful to calculate mean temperature for every month in every year, for instance. - When one or more "epochs" (periods of interest) are provided, the factor level no longer includes the year and can be used to calculate, for instance, the mean temperature per period of interest in the epoch (e.g. average March temperature in the epoch 2041-2060). ```{r} # Setting up nc <- nc_open(list.files(path = system.file("extdata", package = "CFtime"), full.names = TRUE)[1]) cf <- CFtime(nc$dim$time$units, nc$dim$time$calendar, nc$dim$time$vals) # Create monthly factors for a baseline epoch and early, mid and late 21st century epochs baseline <- CFfactor(cf, epoch = 1991:2020) future <- CFfactor(cf, epoch = list(early = 2021:2040, mid = 2041:2060, late = 2061:2080)) str(baseline) str(future) ``` Building on the examples above of opening a file, creating a `CFtime` instance and a suitable factor for one data suite, here daily rainfall, the actual processing of the data into precipitation anomalies for 3 periods relative to a baseline period could look like this: ```{r} # Read the data from the NetCDF file. # Keep degenerate dimensions so that we have a predictable data structure: 3-dimensional array. # Converts units of kg m-2 s-1 to mm/day. pr <- ncvar_get(nc, "pr", collapse_degen = FALSE) * 86400 # Assign dimnames(), optional. dimnames(pr) <- list(nc$dim$lon$vals, nc$dim$lat$vals, CFtimestamp(cf)) # Get a global attribute from the file experiment <- ncatt_get(nc, "")$experiment_id nc_close(nc) # Calculate the daily average precipitation per month for the baseline period # and the three future epochs. pr_base <- apply(pr, 1:2, tapply, baseline, mean) # an array pr_future <- lapply(future, function(f) apply(pr, 1:2, tapply, f, mean)) # a list of arrays # Calculate the precipitation anomalies for the future epochs against the baseline. # Working with daily averages per month so we can simply subtract and then multiply by days # per month for each of the factor levels using the CF calendar. ano <- mapply(function(pr, f) {(pr - pr_base) * CFfactor_units(cf, f)}, pr_future, future, SIMPLIFY = FALSE) # Plot the results plot(1:12, ano$early[,1,1], type = "o", col = "blue", ylim = c(-50, 40), xlim = c(1, 12), main = paste0("Hamilton, New Zealand\nExperiment: ", experiment), xlab = "month", ylab = "Precipitation anomaly (mm)") lines(1:12, ano$mid[,1,1], type = "o", col = "green") lines(1:12, ano$late[,1,1], type = "o", col = "red") ``` Looks like Hadley will be needing rubber boots in spring and autumn back home! The interesting feature, working from opening the NetCDF file down to plotting, is that the specifics of the CF calendar that the data suite uses do not have to be considered anywhere in the processing workflow: the `CFtime` package provides the functionality. Data suites using another CF calendar are processed exactly the same. ## Combining data from different models with different calendars Different climate projection data sets can use different calendars. It is absolutely essential to respect the calendar of the different data sets because the underlying solar and atmospheric physics are based on those calendars as well. In a typical situation, a researcher would construct a multi-model ensemble to remove or reduce the bias in any given model. The data sets composing the ensemble might well use different calendars. The correct way of constructing an ensemble is to perform the desired analysis on every ensemble member individually and to combine them only in the final step and to then perform any ensemble operations such as computing confidence intervals. The design of the CFtime package makes it easy to do this, through its heavy use of lists. Building on the previous example, let's make a multi-model ensemble of 2 models (not much of an ensemble but such are the limitations of including data with packages - the example easily extends to a larger set of ensemble members). ```{r} # Get the list of files that make up the ensemble members, here: # GFDL ESM4 and MRI ESM2 models for experiment SSP2-4.5, precipitation, CMIP6 2015-01-01 to 2099-12-31 lf <- list.files(path = system.file("extdata", package = "CFtime"), full.names = TRUE) # Loop over the files individually # ano is here a list with each element holding the results for a single model ano <- lapply(lf, function(fn) { nc <- nc_open(fn) cf <- CFtime(nc$dim$time$units, nc$dim$time$calendar, nc$dim$time$vals) pr <- ncvar_get(nc, "pr", collapse_degen = FALSE) * 86400 nc_close(nc) baseline <- CFfactor(cf, epoch = 1991:2020) pr_base <- apply(pr, 1:2, tapply, baseline, mean) future <- CFfactor(cf, epoch = list(early = 2021:2040, mid = 2041:2060, late = 2061:2080)) pr_future <- lapply(future, function(f) apply(pr, 1:2, tapply, f, mean)) mapply(function(pr, f) {(pr - pr_base) * CFfactor_units(cf, f)}, pr_future, future, SIMPLIFY = FALSE) }) # Epoch names epochs <- c("early", "mid", "late") dim(epochs) <- 3 # Build the ensemble for each epoch # For each epoch, grab the data for each of the ensemble members, simplify to an array # and take the mean per row (months, in this case) ensemble <- apply(epochs, 1, function(e) { rowMeans(sapply(ano, function(a) a[[e]], simplify = T))}) colnames(ensemble) <- epochs rownames(ensemble) <- rownames(ano[[1]][[1]]) ensemble ``` Here we simply compute the average of the monthly precipitation anomaly over the ensemble members. In a more typical scenario, you would use the values from the individual models and to apply a more suitable analysis, such as calculating the confidence interval or model agreement. One significant advantage of this processing workflow is that it is easily parallelized: the bulk of the work goes into computing the anomalies, `ano`, and this is [embarrassingly parallel](https://en.wikipedia.org/wiki/Embarrassingly_parallel) because they read their own data and produce independent outputs. Use [package future](https://cran.r-project.org/package=future) or something similar to easily make the code run on all available CPU cores. ## Working with multiple files in a single data suite Due to the large size of typical climate projection data files, it is common to have a data suite that is contained in multiple files. A case in point is the CORDEX data set which breaks up the experiment period of 2006 - 2100 into 19 files of 5 years each, with each file covering a single parameter (temperature, precipitation, etc) over an entire domain (such as Europe, South Asia, Central America and the Caribbean, etc). The CFtime package can streamline processing of such multi-file data suites as well. Assuming that you have your CORDEX files in a directory on disk, organized by domain and other properties such as the variable, GCM/RCM combination, experiment, etc, the process of preparing the files for processing could be encoded in a function as below. The argument `fn` is a list of file names to process, and `var` is the variable contained in the files. (There are no checks on argument sanity here, which should really be included. This function only makes sense for a single [domain, GCM/RCM, experiment, variable] combination. Also be aware of data size, CORDEX files are huge and stitching all domain data together will easily exhaust available memory and it may thus lead to very large swap files and very poor performance - use the `CFsubset()` function to read temporal chunks of data to avoid such problems.) ```{r eval = FALSE} library(ncdf4) library(abind) prepare_CORDEX <- function(fn, var) { offsets <- vector("list", length(fn)) data <- vector("list", length(fn)) for (i in 1:length(fn)) { nc <- nc_open(fn[i]) if (i == 1) # Create an "empty" CFtime object, without elements cf <- CFtime(nc$dim$time$units, nc$dim$time$calendar) # Make lists of all datum offsets and data arrays offsets[[i]] <- as.vector(nc$dim$time$vals) data[[i]] <- ncvar_get(nc, var, start = c(10, 10, 1), count = c(100, 100, -1), # spatial subsetting collapse_degen = FALSE) nc_close(nc) } # Create a list for output with the CFtime instance assigned the offsets and # the data bound in a single 3-dimensional array list(CFtime = cf + unlist(offsets), data = abind(data, along = 3)) } ``` Calling this function like `prepare_CORDEX(list.files(path = "~/CC/CORDEX/CAM", pattern = "^pr.*\\.nc$", full.names = TRUE), "pr")` will yield a list of NetCDF files with precipitation data, with the resulting `CFtime` instance describing the full temporal extent covered by the data files, as well as the data bound on the temporal dimension, ready for further processing. When working like this it is imperative that the offsets and the data arrays are added to their final structures *in exactly the same order*. It is not necessary that the offsets (and the data) themselves are in order, but the correspondence between offsets and data needs to be maintained. (`list.files()` produces a list in alphabetical order by default, which for most climate projection files produces offsets in chronological order.) ## Acknowledgements The results presented contain modified data from Copernicus Climate Change Service information, 2023-2024. Neither the European Commission nor ECMWF is responsible for any use that may be made of the Copernicus information or data it contains. We acknowledge the World Climate Research Programme, which, through its Working Group on Coupled Modelling, coordinated and promoted CMIP6. We thank the climate modeling groups for producing and making available their model output, the Earth System Grid Federation (ESGF) for archiving the data and providing access, and the multiple funding agencies who support CMIP6 and ESGF. The two datasets used as examples in this vignette carry the following license statements: - **pr_day_GFDL-ESM4_ssp245_r1i1p1f1_gr1_20150101-20991231_v20180701.nc:** CMIP6 model data produced by NOAA-GFDL is licensed under a Creative Commons Attribution-ShareAlike 4.0 International License (https://creativecommons.org/licenses/). Consult https://pcmdi.llnl.gov/CMIP6/TermsOfUse/ for terms of use governing CMIP6 output, including citation requirements and proper acknowledgment. Further information about this data, including some limitations, can be found via the further_info_url (recorded as a global attribute in this file). The data producers and data providers make no warranty, either express or implied, including, but not limited to, warranties of merchantability and fitness for a particular purpose. All liabilities arising from the supply of the information (including any liability arising in negligence) are excluded to the fullest extent permitted by law. - **pr_day_MRI-ESM2-0_ssp245_r1i1p1f1_gn_20150101-20991231_v20190603.nc:** CMIP6 model data produced by MRI is licensed under a Creative Commons Attribution-ShareAlike 4.0 International License (https://creativecommons.org/licenses/). Consult https://pcmdi.llnl.gov/CMIP6/TermsOfUse/ for terms of use governing CMIP6 output, including citation requirements and proper acknowledgment. Further information about this data, including some limitations, can be found via the further_info_url (recorded as a global attribute in this file). The data producers and data providers make no warranty, either express or implied, including, but not limited to, warranties of merchantability and fitness for a particular purpose. All liabilities arising from the supply of the information (including any liability arising in negligence) are excluded to the fullest extent permitted by law.
/scratch/gouwar.j/cran-all/cranData/CFtime/inst/doc/Processing.Rmd
--- title: "Working with CFtime" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Working with CFtime} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup, include = FALSE} library(CFtime) library(ncdf4) ``` ## Climate change models and calendars Around the world, many climate change models are being developed (100+) under the umbrella of the [World Climate Research Programme](https://www.wcrp-climate.org) to assess the rate of climate change. Published data is generally publicly available to download for research and other (non-commercial) purposes through partner organizations in the Earth Systems Grid Federation. The data are all formatted to comply with the [CF Metadata Conventions](http://cfconventions.org), a set of standards to support standardization among research groups and published data sets. These conventions greatly facilitate use and analysis of the climate projections because standard processing work flows (should) work across the various data sets. On the flip side, the CF Metadata Conventions needs to cater to a wide range of modeling requirements and that means that some of the areas covered by the standards are more complex than might be assumed. One of those areas is the temporal dimension of the data sets. The CF Metadata Conventions supports no less than nine different calendar definitions, that, upon analysis, fall into five distinct calendars (from the perspective of computation of climate projections): - `standard` or `gregorian`: The international civil calendar that is in common use in many countries around the world, adopted by edict of Pope Gregory XIII in 1582 and in effect from 15 October of that year. The `proleptic_gregorian` calendar is the same as the `gregorian` calendar, but with validity extended to periods prior to `1582-10-15`. - `julian`: Adopted in the year 45 BCE, every fourth year is a leap year. Originally, the julian calendar did not have a monotonically increasing year assigned to it and there are indeed several julian calendars in use around the world today with different years assigned to them. Common interpretation is currently that the year is the same as that of the standard calendar. The julian calendar is currently 13 days behind the gregorian calendar. - `365_day` or `noleap`: No years have a leap day. - `366_day` or `all_leap`: All years have a leap day. - `360_day`: Every year has 12 months of 30 days each. The three latter calendars are specific to the CF Metadata Conventions to reduce computational complexities of working with dates. These three, and the julian calendar, are not compliant with the standard `POSIXt` date/time facilities in `R` and using standard date/time procedures would quickly lead to problems. In the below code snippet, the date of `1949-12-01` is the *datum* from which other dates are calculated. When adding 43,289 days to this *datum* for a data set that uses the `360_day` calendar, that should yield a date some 120 years after the *datum*: ```{r} # POSIXt calculations on a standard calendar - INCORRECT as.Date("1949-12-01") + 43289 # CFtime calculation on a "360_day" calendar - CORRECT # See below examples for details on the two functions CFtimestamp(CFtime("days since 1949-12-01", "360_day", 43289)) ``` Using standard `POSIXt` calculations gives a result that is about 21 months off from the correct date - obviously an undesirable situation. This example is far from artificial: `1949-12-01` is the datum for all CORDEX data, covering the period 1951 - 2005 for historical experiments and the period 2006 - 2100 for RCP experiments (with some deviation between data sets), and several models used in the CORDEX set use the `360_day` calendar. The `365_day` or `noleap` calendar deviates by about 1 day every 4 years (disregarding centurial years), or about 24 days in a century. The `366_day` or `all_leap` calendar deviates by about 3 days every 4 years, or about 76 days in a century. The `CFtime` package deals with the complexity of the different calendars allowed by the CF Metadata Conventions. It properly formats dates and times (even oddball dates like `2070-02-30`) and it can generate calendar-aware factors for further processing of the data. ##### Time zones The character of CF time series - a number of numerical offsets from a base date - implies that there should only be a single time zone associated with the time series. The time zone offset from UTC is stored in the datum and can be retrieved with the `CFtimezone()` function. If a vector of character timestamps with time zone information is parsed with the `CFparse()` function and the time zones are found to be different from the datum time zone, a warning message is generated but the timestamp is interpreted as being in the datum time zone. No correction of timestamp to datum time zone is performed. ## Using CFtime to deal with calendars Data sets that are compliant with the CF Metadata Conventions always include a *datum*, a specific point in time in reference to a specified *calendar*, from which other points in time are calculated by adding a specified *offset* of a certain *unit*. This approach is encapsulated in the `CFtime` package by the S4 class `CFtime`. ```{r} # Create a CF time object from a definition string, a calendar and some offsets cf <- CFtime("days since 1949-12-01", "360_day", 19830:90029) cf ``` The `CFtime()` function takes a *datum* description (which is actually a unit - "days" - in reference to a datum - "1949-12-01"), a calendar description, and a vector of *offsets* from that datum. Once a `CFtime` instance is created its datum and calendar cannot be changed anymore. Offsets may be added. In practice, these parameters will be taken from the data set of interest. CF Metadata Conventions require data sets to be in the NetCDF format, with all metadata describing the data set included in a single file, including the mandatory "Conventions" global attribute which should have a string identifying the version of the CF Metadata Conventions that this file adheres to (among possible others). Not surprisingly, all the pieces of interest are contained in the mandatory `time` dimension of the file. The process then becomes as follows, for a CMIP6 file of daily precipitation: ```{r} # Opening a data file that is included with the package and showing some attributes. # Usually you would `list.files()` on a directory of your choice. nc <- nc_open(list.files(path = system.file("extdata", package = "CFtime"), full.names = TRUE)[1]) attrs <- ncatt_get(nc, "") attrs$title # "Conventions" global attribute must have a string like "CF-1.*" for this package to work reliably attrs$Conventions # Create the CFtime instance from the metadata in the file. cf <- CFtime(nc$dim$time$units, nc$dim$time$calendar, nc$dim$time$vals) cf ``` You can see from the global attribute "Conventions" that the file adheres to the CF Metadata Conventions, among others. According to the CF conventions, `units` and `calendar` are required attributes of the `time` dimension in the NetCDF file, and `nc$dim$time$vals` are the offset values, or `dimnames()` in `R` terms, for the `time` dimension of the data. The above example (and others in this vignette) use the `ncdf4` package. If you are using the `RNetCDF` package, checking for CF conventions and then creating a `CFtime` instance goes like this: ```{r} library(RNetCDF) nc <- open.nc(list.files(path = system.file("extdata", package = "CFtime"), full.names = TRUE)[1]) att.get.nc(nc, -1, "Conventions") cf <- CFtime(att.get.nc(nc, "time", "units"), att.get.nc(nc, "time", "calendar"), var.get.nc(nc, "time")) cf ``` The corresponding character representations of the time series can be easily generated: ```{r} dates <- CFtimestamp(cf, format = "date") dates[1:10] ``` ...as well as the full range of the time series: ```{r} CFrange(cf) ``` Note that in this latter case, if any of the timestamps in the time series have a time that is other than `00:00:00` then the time of the extremes of the time series is also displayed. This is a common occurrence because the CF Metadata Conventions prescribe that the middle of the time period (month, day, etc) is recorded, which for months with 31 days would be something like `2005-01-15T12:00:00`. ## Supporting processing of climate projection data When working with high resolution climate projection data, typically at a "day" resolution, one of the processing steps would be to aggregate the data to some lower resolution such as a dekad (10-day period), a month or a meteorological season, and then compute a derivative value such as the dekadal sum of precipitation, monthly minimum/maximum daily temperature, or seasonal average daily short-wave irradiance. It is also possible to create factors for multiple "epochs" in one go. This greatly reduces programming effort if you want to calculate anomalies over multiple future periods. A complete example is provided in the vignette ["Processing climate projection data"](Processing.html). It is easy to generate the factors that you need once you have a `CFtime` instance prepared: ```{r} # Create a dekad factor for the whole `cf` time series that was created above f_k <- CFfactor(cf, "dekad") str(f_k) # Create monthly factors for a baseline epoch and early, mid and late 21st century epochs baseline <- CFfactor(cf, epoch = 1991:2020) future <- CFfactor(cf, epoch = list(early = 2021:2040, mid = 2041:2060, late = 2061:2080)) str(future) ``` For the "epoch" version, there are two interesting things to note here: - The epochs do not have to coincide with the boundaries of the time series. In the example above, the time series starts in 2015, while the baseline epoch is from 1991. Obviously, the number of time steps from the time series that then fall within this epoch will then be reduced. - The factor is always of the same length as the time series, with `NA` values where the time series values are not falling in the epoch. This ensures that the factor is compatible with the data set which the time series describes, such that functions like `tapply()` will not throw an error. There are five periods defined for `CFfactor()`: - `year`, to summarize data to yearly timescales - `season`, the meteorological seasons. Note that the month of December will be added to the months of January and February of the following year, so the date "2020-12-01" yields the factor value "2021-DJF". - `month`, monthly summaries, the default period. - `dekad`, 10-day period. Each month is subdivided in dekads as follows: (1) days 01 - 10; (2) days 11 - 20; (3) remainder of the month. - `day`, to summarize sub-daily data. ##### Incomplete time series You can test if your time series is complete with the function `CFcomplete()`. A time series is considered complete if the time steps between the two extreme values are equally spaced. There is a "fuzzy" assessment of completeness for time series with a datum unit of "days" or smaller where the time steps are months or years apart - these have different lengths in days in different months or years (e.g. a leap year). If your time series is incomplete, for instance because it has missing time steps, you should recognize that in your further processing. As an example, you might want to filter out months that have fewer than 90% of daily data from further processing or apply weights based on the actual coverage. ```{r} # Is the time series complete? CFcomplete(cf) # How many time units fit in a factor level? CFfactor_units(cf, baseline) # What's the absolute and relative coverage of our time series CFfactor_coverage(cf, baseline, "absolute") CFfactor_coverage(cf, baseline, "relative") ``` The time series is complete but coverage of the baseline epoch is only 20%! Recall that the time series starts in 2015 while the baseline period in the factor is for `1991:2020` so that's only 6 years of time series data out of 30 years of the baseline factor. An artificial example of missing data: ```{r} # 4 years of data on a `365_day` calendar, keep 80% of values n <- 365 * 4 cov <- 0.8 offsets <- sample(0:(n-1), n * cov) cf <- CFtime("days since 2020-01-01", "365_day", offsets) cf # Note that there are about 1.25 days between observations mon <- CFfactor(cf, "month") CFfactor_coverage(cf, mon, "absolute") CFfactor_coverage(cf, mon, "relative") ``` Keep in mind, though, that there are data sets where the time unit is lower than the intended resolution of the data. Since the CF conventions recommend that the coarsest time unit is "day", many files with monthly data sets have a definition like `days since 2016-01-01` with offset values for the middle of the month like `15, 44, 74, 104, ...`. Even in these scenarios you can verify that your data set is complete with the function `CFcomplete()`. ## CFtime and POSIXt The CF Metadata Conventions support nine different calendars. Of these, only the `standard`, `gregorian` and `proleptic_gregorian` calendars are fully compatible with POSIXt. The other calendars have varying degrees of discrepancies: - `julian`: Every fouth year is a leap year. Dates like `2100-02-29` and `2200-02-29` are valid. - `365_day` or `noleap`: No leap year exists. `2020-02-29` does not occur. - `366_day` or `all_leap`: All years are leap years. - `360_day`: All months have 30 days in every year. This means that 31 January, March, May, July, August, October and December never occur, while 29 and 30 February occur in every year. Converting time series using these incompatible calendars to `Date`s is likely to produce problems. This is most pronounced for the `360_day` calendar: ```{r} # Days in January and February cf <- CFtime("days since 2023-01-01", "360_day", 0:59) cf_days <- CFtimestamp(cf, "date") as.Date(cf_days) ``` 31 January is missing from the vector of `Date`s because the `360_day` calendar does not include it and 29 and 30 February are `NA`s because POSIXt rejects them. This will produce problems later on when processing your data. The general advice is therefore: **do not convert CFtime objects to Date objects** unless you are sure that the `CFtime` object uses a POSIXt-compatible calendar. ##### So how do I compare climate projection data with different calendars? One reason to convert the "time" dimension from different climate projection data sets is to be able to compare the data from different models and produce a multi-model ensemble. The correct procedure to do this is to first calculate **for each data set individually** the property of interest (e.g. average daily rainfall per month anomaly for some future period relative to a baseline period), which will typically involve aggregation to a lower resolution (such as from daily data to monthly averages), and only then combine the aggregate data from multiple data sets to compute statistically interesting properties (such as average among models and standard deviation, etc). Once data is aggregated from daily or higher-resolution values, the different calendars no longer matter (although if you do need to convert averaged data to absolute data you should use `CFfactor_units()` to make sure that you use the correct scaling factor). Otherwise, there really shouldn't be any reason to convert the time series in the data files to `Date`s. Climate projection data is virtually never compared on a day-to-day basis between different models and neither does complex date arithmetic make much sense (such as adding intervals) - `CFtime` can support basic arithmetic by manipulation the offsets of the `CFtime` object. The character representations that are produced are perfectly fine to use for `dimnames()` on an array or as `rownames()` in a `data.frame` and these also support basic logical operations such as `"2023-02-30" < "2023-03-01"`. So ask yourself, do you really need `Date`s when working with unprocessed climate projection data? (If so, [open an issue on GitHub](https://github.com/pvanlaake/CFtime/issues)). A complete example of creating a multi-model ensemble is provided in the vignette ["Processing climate projection data"](Processing.html). ## Final observations - This package is intended to facilitate processing of climate projection data. It is not a full implementation of the CF Metadata Conventions "time" component. - In parsing and deparsing of offsets and timestamps, data is rounded to 3 digits of precision of the unit of the datum. When using a description of time that is very different from the datum unit, this may lead to some loss of precision due to rounding errors. For instance, if milli-second precision is required, use a unit of "seconds". The authors have no knowledge of published climate projection data that requires milli-second precision so for the intended use of the package this issue is marginal.
/scratch/gouwar.j/cran-all/cranData/CFtime/vignettes/CFtime.Rmd
--- title: "Processing climate projection data" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Processing climate projection data} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup, include = FALSE} library(CFtime) library(ncdf4) ``` Climate projection data sets are produced in a variety of formats but all conform to the CF Metadata Conventions. NetCDF data files, in particular, are highly structured and relatively easy to process. That said, it is very important to maintain a proper processing workflow such that the small changes in the climate projections are maintained and revealed through analysis. In this document, the basic workflow with varying calendars is described. ## Processing climate projection data Individual files containing climate projections contain global, regional or local data, typically on a rectangular latitude-longitude grid, for a single parameter such as "near-surface temperature", and for a number of time steps. An analysis workflow then consists of a number of steps: - Download the appropriate data files for your desired combination of model, experiment, realization, geography, time range, parameter, ... (called a "data suite" henceforth). If your analysis involves multiple parameters (such as temperature and precipitation to estimate crop productivity), repeat the process for all parameters. If you want to make a multi-model ensemble to reduce model bias, repeat again for all desired model, experiment and realization combinations ("ensemble member"). You end up having one or more data suites to work with. - Take all files in a data suite and extract the data. Process the data in the data suite. Since the data are (mostly) 3-dimensional arrays, this will involve binding the arrays on the right dimension and then do something like `apply(data, 1:2, tapply, f, fun)` (following the CF Metadata Conventions, dimensions 1 and 2 are "longitude" and "latitude", respectively; the third dimension is "time"). Repeat for the data suite for each ensemble member. - Combine the above results as your workflow requires. Frequently this involves computing "anomalies": ratio the data for one or more future periods to a baseline period. Repeat for each ensemble member. - Construct the multi-model ensemble from the individual ensemble members. Apart from the first step of obtaining the data, the steps lend themselves well to automation. The catch, however, is in the factor `f` to use with `tapply()`. The different models (in your ensemble) use different calendars, meaning that different factors are required. The CFtime package can help out. The `CFfactor()` function produces a factor that respects the calendar of the data files. The function comes in two operating modes: - Plain vanilla mode produces a factor for a time period across the entire time series. The factor level includes the year. This would be useful to calculate mean temperature for every month in every year, for instance. - When one or more "epochs" (periods of interest) are provided, the factor level no longer includes the year and can be used to calculate, for instance, the mean temperature per period of interest in the epoch (e.g. average March temperature in the epoch 2041-2060). ```{r} # Setting up nc <- nc_open(list.files(path = system.file("extdata", package = "CFtime"), full.names = TRUE)[1]) cf <- CFtime(nc$dim$time$units, nc$dim$time$calendar, nc$dim$time$vals) # Create monthly factors for a baseline epoch and early, mid and late 21st century epochs baseline <- CFfactor(cf, epoch = 1991:2020) future <- CFfactor(cf, epoch = list(early = 2021:2040, mid = 2041:2060, late = 2061:2080)) str(baseline) str(future) ``` Building on the examples above of opening a file, creating a `CFtime` instance and a suitable factor for one data suite, here daily rainfall, the actual processing of the data into precipitation anomalies for 3 periods relative to a baseline period could look like this: ```{r} # Read the data from the NetCDF file. # Keep degenerate dimensions so that we have a predictable data structure: 3-dimensional array. # Converts units of kg m-2 s-1 to mm/day. pr <- ncvar_get(nc, "pr", collapse_degen = FALSE) * 86400 # Assign dimnames(), optional. dimnames(pr) <- list(nc$dim$lon$vals, nc$dim$lat$vals, CFtimestamp(cf)) # Get a global attribute from the file experiment <- ncatt_get(nc, "")$experiment_id nc_close(nc) # Calculate the daily average precipitation per month for the baseline period # and the three future epochs. pr_base <- apply(pr, 1:2, tapply, baseline, mean) # an array pr_future <- lapply(future, function(f) apply(pr, 1:2, tapply, f, mean)) # a list of arrays # Calculate the precipitation anomalies for the future epochs against the baseline. # Working with daily averages per month so we can simply subtract and then multiply by days # per month for each of the factor levels using the CF calendar. ano <- mapply(function(pr, f) {(pr - pr_base) * CFfactor_units(cf, f)}, pr_future, future, SIMPLIFY = FALSE) # Plot the results plot(1:12, ano$early[,1,1], type = "o", col = "blue", ylim = c(-50, 40), xlim = c(1, 12), main = paste0("Hamilton, New Zealand\nExperiment: ", experiment), xlab = "month", ylab = "Precipitation anomaly (mm)") lines(1:12, ano$mid[,1,1], type = "o", col = "green") lines(1:12, ano$late[,1,1], type = "o", col = "red") ``` Looks like Hadley will be needing rubber boots in spring and autumn back home! The interesting feature, working from opening the NetCDF file down to plotting, is that the specifics of the CF calendar that the data suite uses do not have to be considered anywhere in the processing workflow: the `CFtime` package provides the functionality. Data suites using another CF calendar are processed exactly the same. ## Combining data from different models with different calendars Different climate projection data sets can use different calendars. It is absolutely essential to respect the calendar of the different data sets because the underlying solar and atmospheric physics are based on those calendars as well. In a typical situation, a researcher would construct a multi-model ensemble to remove or reduce the bias in any given model. The data sets composing the ensemble might well use different calendars. The correct way of constructing an ensemble is to perform the desired analysis on every ensemble member individually and to combine them only in the final step and to then perform any ensemble operations such as computing confidence intervals. The design of the CFtime package makes it easy to do this, through its heavy use of lists. Building on the previous example, let's make a multi-model ensemble of 2 models (not much of an ensemble but such are the limitations of including data with packages - the example easily extends to a larger set of ensemble members). ```{r} # Get the list of files that make up the ensemble members, here: # GFDL ESM4 and MRI ESM2 models for experiment SSP2-4.5, precipitation, CMIP6 2015-01-01 to 2099-12-31 lf <- list.files(path = system.file("extdata", package = "CFtime"), full.names = TRUE) # Loop over the files individually # ano is here a list with each element holding the results for a single model ano <- lapply(lf, function(fn) { nc <- nc_open(fn) cf <- CFtime(nc$dim$time$units, nc$dim$time$calendar, nc$dim$time$vals) pr <- ncvar_get(nc, "pr", collapse_degen = FALSE) * 86400 nc_close(nc) baseline <- CFfactor(cf, epoch = 1991:2020) pr_base <- apply(pr, 1:2, tapply, baseline, mean) future <- CFfactor(cf, epoch = list(early = 2021:2040, mid = 2041:2060, late = 2061:2080)) pr_future <- lapply(future, function(f) apply(pr, 1:2, tapply, f, mean)) mapply(function(pr, f) {(pr - pr_base) * CFfactor_units(cf, f)}, pr_future, future, SIMPLIFY = FALSE) }) # Epoch names epochs <- c("early", "mid", "late") dim(epochs) <- 3 # Build the ensemble for each epoch # For each epoch, grab the data for each of the ensemble members, simplify to an array # and take the mean per row (months, in this case) ensemble <- apply(epochs, 1, function(e) { rowMeans(sapply(ano, function(a) a[[e]], simplify = T))}) colnames(ensemble) <- epochs rownames(ensemble) <- rownames(ano[[1]][[1]]) ensemble ``` Here we simply compute the average of the monthly precipitation anomaly over the ensemble members. In a more typical scenario, you would use the values from the individual models and to apply a more suitable analysis, such as calculating the confidence interval or model agreement. One significant advantage of this processing workflow is that it is easily parallelized: the bulk of the work goes into computing the anomalies, `ano`, and this is [embarrassingly parallel](https://en.wikipedia.org/wiki/Embarrassingly_parallel) because they read their own data and produce independent outputs. Use [package future](https://cran.r-project.org/package=future) or something similar to easily make the code run on all available CPU cores. ## Working with multiple files in a single data suite Due to the large size of typical climate projection data files, it is common to have a data suite that is contained in multiple files. A case in point is the CORDEX data set which breaks up the experiment period of 2006 - 2100 into 19 files of 5 years each, with each file covering a single parameter (temperature, precipitation, etc) over an entire domain (such as Europe, South Asia, Central America and the Caribbean, etc). The CFtime package can streamline processing of such multi-file data suites as well. Assuming that you have your CORDEX files in a directory on disk, organized by domain and other properties such as the variable, GCM/RCM combination, experiment, etc, the process of preparing the files for processing could be encoded in a function as below. The argument `fn` is a list of file names to process, and `var` is the variable contained in the files. (There are no checks on argument sanity here, which should really be included. This function only makes sense for a single [domain, GCM/RCM, experiment, variable] combination. Also be aware of data size, CORDEX files are huge and stitching all domain data together will easily exhaust available memory and it may thus lead to very large swap files and very poor performance - use the `CFsubset()` function to read temporal chunks of data to avoid such problems.) ```{r eval = FALSE} library(ncdf4) library(abind) prepare_CORDEX <- function(fn, var) { offsets <- vector("list", length(fn)) data <- vector("list", length(fn)) for (i in 1:length(fn)) { nc <- nc_open(fn[i]) if (i == 1) # Create an "empty" CFtime object, without elements cf <- CFtime(nc$dim$time$units, nc$dim$time$calendar) # Make lists of all datum offsets and data arrays offsets[[i]] <- as.vector(nc$dim$time$vals) data[[i]] <- ncvar_get(nc, var, start = c(10, 10, 1), count = c(100, 100, -1), # spatial subsetting collapse_degen = FALSE) nc_close(nc) } # Create a list for output with the CFtime instance assigned the offsets and # the data bound in a single 3-dimensional array list(CFtime = cf + unlist(offsets), data = abind(data, along = 3)) } ``` Calling this function like `prepare_CORDEX(list.files(path = "~/CC/CORDEX/CAM", pattern = "^pr.*\\.nc$", full.names = TRUE), "pr")` will yield a list of NetCDF files with precipitation data, with the resulting `CFtime` instance describing the full temporal extent covered by the data files, as well as the data bound on the temporal dimension, ready for further processing. When working like this it is imperative that the offsets and the data arrays are added to their final structures *in exactly the same order*. It is not necessary that the offsets (and the data) themselves are in order, but the correspondence between offsets and data needs to be maintained. (`list.files()` produces a list in alphabetical order by default, which for most climate projection files produces offsets in chronological order.) ## Acknowledgements The results presented contain modified data from Copernicus Climate Change Service information, 2023-2024. Neither the European Commission nor ECMWF is responsible for any use that may be made of the Copernicus information or data it contains. We acknowledge the World Climate Research Programme, which, through its Working Group on Coupled Modelling, coordinated and promoted CMIP6. We thank the climate modeling groups for producing and making available their model output, the Earth System Grid Federation (ESGF) for archiving the data and providing access, and the multiple funding agencies who support CMIP6 and ESGF. The two datasets used as examples in this vignette carry the following license statements: - **pr_day_GFDL-ESM4_ssp245_r1i1p1f1_gr1_20150101-20991231_v20180701.nc:** CMIP6 model data produced by NOAA-GFDL is licensed under a Creative Commons Attribution-ShareAlike 4.0 International License (https://creativecommons.org/licenses/). Consult https://pcmdi.llnl.gov/CMIP6/TermsOfUse/ for terms of use governing CMIP6 output, including citation requirements and proper acknowledgment. Further information about this data, including some limitations, can be found via the further_info_url (recorded as a global attribute in this file). The data producers and data providers make no warranty, either express or implied, including, but not limited to, warranties of merchantability and fitness for a particular purpose. All liabilities arising from the supply of the information (including any liability arising in negligence) are excluded to the fullest extent permitted by law. - **pr_day_MRI-ESM2-0_ssp245_r1i1p1f1_gn_20150101-20991231_v20190603.nc:** CMIP6 model data produced by MRI is licensed under a Creative Commons Attribution-ShareAlike 4.0 International License (https://creativecommons.org/licenses/). Consult https://pcmdi.llnl.gov/CMIP6/TermsOfUse/ for terms of use governing CMIP6 output, including citation requirements and proper acknowledgment. Further information about this data, including some limitations, can be found via the further_info_url (recorded as a global attribute in this file). The data producers and data providers make no warranty, either express or implied, including, but not limited to, warranties of merchantability and fitness for a particular purpose. All liabilities arising from the supply of the information (including any liability arising in negligence) are excluded to the fullest extent permitted by law.
/scratch/gouwar.j/cran-all/cranData/CFtime/vignettes/Processing.Rmd
#' @export CD_A <- function(alpha, Beta, p) { # computing Cobb-Douglas demand structure matrix if (!is.matrix(Beta)) Beta <- cbind(Beta) if (is.numeric(Beta) && any(abs(colSums(Beta) - 1) > 10^-10)) { stop("Li: colSum(Beta)~=1, CD_A") } A <- dg(1 / p) %*% Beta %*% dg(apply((dg(1 / p) %*% Beta)^(-Beta), 2, prod) %*% dg(1 / alpha)) A }
/scratch/gouwar.j/cran-all/cranData/CGE/R/CD_A.R
#' @export CD_mA <- function(alpha, Beta, p) { # computing Cobb-Douglas demand structure matrix in a monetary economy nonnegative_Beta <- Beta nonnegative_Beta[Beta < 0] <- 0 A <- CD_A(alpha, nonnegative_Beta, p) tmpA <- A Indx <- which(Beta < 0, arr.ind = T) for (k in 1:nrow(Indx)) { A[Indx[k, 1], Indx[k, 2]] <- t(p) %*% tmpA[, Indx[k, 2]] / (-Beta[Indx[k, 1], Indx[k, 2]]) } A }
/scratch/gouwar.j/cran-all/cranData/CGE/R/CD_mA.R
#' @export # computing CES demand structure matrix CES_A <- function(sigma, alpha, Beta, p, Theta = NULL) { .CES_A <- function(sigma, alpha, Beta, p) { if (!is.matrix(Beta)) Beta <- cbind(Beta) n <- nrow(Beta) m <- ncol(Beta) A <- matrix(0, n, m) for (cn in 1:m) { if (sigma[cn] == 0) { A[, cn] <- CD_A(alpha[cn], Beta[, cn, drop=F], p) next } e1 <- 1 / (1 - sigma[cn]) e2 <- sigma[cn] / (sigma[cn] - 1) e3 <- -1 / sigma[cn] k <- alpha[cn] beta <- Beta[, cn] for (rn in 1:n) { A[rn, cn] <- 1 / k * (beta[rn] / p[rn])^e1 * (sum(beta^e1 * p^e2))^e3 } } A } # beginning --------------------------------------------------------------- if (!is.matrix(Beta)) Beta <- cbind(Beta) if (is.null(Theta)) { return(.CES_A(sigma, alpha, Beta, p)) } else { if (!is.matrix(Theta)) Theta <- cbind(Theta) result <- matrix(0, nrow(Beta), ncol(Beta)) for (k in 1:ncol(Beta)) { result[, k] <- .CES_A(sigma[k], alpha[k], Beta[, k], p * Theta[, k]) * Theta[, k] } return(result) } }
/scratch/gouwar.j/cran-all/cranData/CGE/R/CES_A.R
#' @export CES_mA <- function(sigma, alpha, Beta, p, Theta = NULL) { # computing CES demand structure matrix in a monetary economy nonnegative_Beta <- Beta nonnegative_Beta[Beta < 0] <- 0 tmpA <- A <- CES_A(sigma, alpha, nonnegative_Beta, p, Theta) Indx <- which(Beta < 0, arr.ind = T) for (k in 1:nrow(Indx)) { A[Indx[k, 1], Indx[k, 2]] <- t(p) %*% tmpA[, Indx[k, 2]] / (-Beta[Indx[k, 1], Indx[k, 2]]) } A }
/scratch/gouwar.j/cran-all/cranData/CGE/R/CES_mA.R
#' @export ChinaCGE2012 <- list( A = function(state) { alpha <- 2.285721 beta <- c(0.00571921945379653, 0.00341123179518814, 0.805169151687454, 0.000569349978830593, 0.00565630395685361, 0.0861454541796253, 0.041668244206557, 0.00413777246701116, 0.00730057717175716, 0.00303515613905136, 0, 0.0302411226834915, 0.000194272377422948, 0.00078685932401983, 0.000856960229643203, 0.000317602380792532, 0.000313144793248613, 0.0040202829862574, 0.000457294188999536) biaozhongshengchanbumenshu <- 19 teshushangpinshu <- 5 teshubumenshu <- 2 shengchanshuijingeyulaodongbaochouzhibi <- c(-0.0546388292990578, 0.591276558020123, 0.495974367450526, 0.527506584491606, 0.228008738512149, 1.10199418774818, 0.0671396741410387, 0.134584720601729, 0.138364059445131, 0.356622798530461, 1.41472807901058, 0.20434973507668, 0.123481771828289, 0.0219869207799679, 0.126644879324176, 0.00659645886834539, 0.0122636304355387, 0.145571168492461, 7.93984853066929e-06) yingyeyingyuyulaodongbaochouzhibi <- c(0, 0.660896726662984, 0.730380303305365, 0.670387453554885, 0.337269942190754, 1.0763801347756, 0.55693301213863, 0.19318867346451, 1.26488262759778, 1.75275449590008, 1.61101179553438, 0.385875371265639, 0.574471577478554, 0.356904732024932, 0.278234799284953, 0.0349012980118043, 0.066559726290652, 0.395514412900369, 0.0184702026887848) moxingzhongdeshengchanbumenshu <- 36 moxingzhongdebumenshu <- 38 moxingzhongdeshangpinshu <- 41 jinkouliangweilingdebumenbianhao <- c(6, 11) resultA <- matrix(0, biaozhongshengchanbumenshu * 2 + teshushangpinshu, biaozhongshengchanbumenshu * 2 + teshubumenshu) Aguoneibufen <- matrix(c( 0.308606897522948, 4.52328905440544e-05, 0.224836056235319, 0.0099801743204192, 0.110055456486192, 0.0147464966679159, 0.0114590407239647, 0.000843666967246204, 0.000954063416682648, 0.0122631350593382, 1.35582837499922e-05, 0.000315338136350343, 0.00571909072964625, 0.00138838258420625, 0.000847815851113601, 0.000126745130442897, 7.37852148744756e-05, 3.4134494374241e-05, 0.000389072928393729, 0.000439822227844211, 0.113780308212484, 0.332487126927094, 0.0583900359691977, 0.54302626772912, 0.0137891948174749, 0.0228553005491116, 0.00381215991589948, 0.00166127467023042, 0.0341420000152447, 0.000204212474566424, 0.0157396626012977, 0.0108028482557389, 0.000916113295164773, 0.00419661997880964, 0.000476069195103604, 0.000240302847342917, 0.000986085450144495, 0.000406322037998314, 0.0543785554128105, 0.0483597649757245, 0.563176254020048, 0.0255352008192698, 0.260578256740836, 0.0313645689226056, 0.0249029053736303, 0.00353275598636047, 0.00165171823044043, 0.0189178680357974, 0.000464148650645437, 0.0104318776149767, 0.00784276713323058, 0.000668505872063331, 0.00280230253907816, 0.000190506444065411, 0.000179014413136837, 0.000793219279143359, 0.000444095818372272, 9.99114481460523e-05, 0.139407507672303, 0.257067195157043, 0.315457145553038, 0.538398084052383, 0.0112805165219467, 0.0131851344128802, 0.00184929052370108, 0.00305022259059869, 0.0481423933190668, 0.000130965146007583, 0.00342258627212647, 0.00394023197608815, 0.00513925910415559, 0.00208977503156526, 0.000133390679619812, 0.000220147344486412, 0.00140657098600207, 0.000209479291781967, 0.00743495216621813, 0.00397796114597878, 0.493812491668988, 0.0136876097528861, 0.0756287261761975, 0.0196443581132756, 0.0296498682944224, 0.00457818688657874, 0.010888237491029, 0.0276105989936663, 7.75910775854785e-05, 0.00619064800406437, 0.0356702169417439, 0.000127485651670709, 0.00440960231697161, 0.000447764706148967, 0.000177026373152047, 0.000966401963610176, 0.000326590999169442, 0.000184474376623979, 3.76440824820552e-06, 0.21103531378629, 0.0108467956640052, 0.636176125865137, 0.247115829850921, 0.0323666295666114, 0.00513870395978926, 0.00301031659437095, 0.0390018396154708, 0.0459559477755182, 0.0815802251953086, 0.00383922694163783, 0.000514742838533487, 0.0070074127342273, 0.000598529868087193, 0.00020758072579909, 0.000773935642496554, 0.000423086744187935, 0.0120962728340892, 0.000635356996118575, 0.326914722606468, 0.0214225677308233, 0.283251206969689, 0.0212456146354955, 0.232207477685457, 0.0128738799228021, 0.00902190653785156, 0.082484995797742, 0.00361644008860294, 0.0115524541838556, 0.00145634957424787, 0.000451375182124339, 0.0176150587812642, 0.0005973058905385, 0.000234801391235386, 0.00157434132825178, 0.000491459663397123, 0.104509002385032, 0.000102620985102742, 0.410619722107862, 0.012851605990931, 0.392786335224067, 0.0605557594644899, 0.0181781725554692, 0.00171056956411766, 0.00403877074545181, 0.0137628948987927, 0.0100400006160837, 0.00947893975637372, 3.04234801932044e-05, 0.000264226786680897, 0.00408821246338937, 0.000325862113225546, 4.96744930087043e-05, 0.00186099596656353, 0.000222902780926152, 0.00195305537549074, 0, 0.227654653645615, 0.0123798713335471, 0.142957410656557, 0.0266994681367603, 0.0118596820972211, 0.00624999504379099, 1.03493284709634, 0.0395372533364217, 0.0214237895337946, 0.0417244458344787, 0.0140895371842284, 0.000500404492804831, 0.0031182647538252, 0.000725025484007121, 3.38619578751084e-06, 0.00293302521326622, 0.00176833713082189, 1.02904053368048e-05, 0, 0.0572846411157644, 0.00603633765097633, 0.0474064773654737, 0.0118974886741119, 0.0197531752230253, 0.0332020516869077, 0.0259077265872204, 0.0613600718356408, 0.0631864877612865, 0.0840699866329747, 0.00175842070007264, 0.00106740591867861, 0.00586416300047239, 0.00502232636766738, 0.000275562661085659, 0.00923104771403184, 0.000985389776698051, 0.000210208760134965, 4.90109446808696e-05, 1.81605264125274, 0.00720093840849692, 6.42295463665705, 0.00413727877991674, 0.0056045849293814, 0.0044623553026424, 0.004063417072796, 0.104440396282219, 1.38361187752077, 0.0365836490487986, 0.000220709855590099, 0.000276917742085765, 0.00193544950265887, 0.00034305470926945, 2.06726209724787e-06, 0.00163919989135475, 0.00132771581301924, 0.00662972464756392, 1.89009319455551e-05, 0.405065313548954, 0.00256834224119435, 0.269310142338116, 0.0441440142742641, 0.0484333530160302, 0.0367752305312272, 0.00499119856544842, 0.0674962926868713, 0.00881793647175749, 0.0522019635968334, 0.000524583849786712, 0.00354088879035408, 0.00726507848153323, 0.000417289933251544, 6.50076624359765e-06, 0.00142704513608172, 0.00736672182346903, 0.00793859754999348, 0.000627880354911094, 0.275342628481485, 0.00630596999328125, 0.00832332729211927, 0.0298743751642537, 0.0302053399231361, 0.027971487924325, 0.00446793839208332, 0.0377585543740908, 0.00530758224218346, 0.0203407562955396, 0.270880791869279, 0.000262489060919381, 0.0111298753361386, 0.00163056847775585, 6.0235499318738e-05, 0.00205410761756242, 0.000843201648248776, 0.0646877114157884, 0.00189548798522685, 0.826581668771273, 0.0349751889890287, 2.27980607862018, 0.023549136290638, 0.0353926485019496, 0.0127761512554704, 0.00957988485588807, 0.0652052915844931, 0.00403425050393445, 0.0160794103802415, 0.00333303250805973, 0.0160258363268891, 0.0436518609075299, 0.00277194285757473, 0.00018294185858154, 0.00295456670795164, 0.00139131054231092, 0.00409750314573955, 0.000790003350392646, 0.293203270777345, 0.0209516546324854, 0.229261406166816, 0.0317745729801672, 0.0244499742068771, 0.0123384928544908, 0.00435341791686851, 0.0213399168807007, 0.0496722237717052, 0.0173321793524548, 7.98483312524197e-05, 0.000968542923306525, 0.0190124364738636, 0.000796675632036532, 0.000212591182254008, 0.00280333696476039, 0.00139801329027901, 0.00509168478804414, 0.000898170160230931, 0.710754883474161, 0.00772059536230375, 2.25208790695806, 0.0106052159961952, 0.0244172029012677, 0.0177513358884109, 0.0136492450581343, 0.0356362914189952, 0.00852384188147665, 0.00515100334909163, 0.00735971841433444, 0.000465465010996321, 0.0112498581446825, 0.0134944258513373, 0.000326355149228551, 0.00335035485408844, 0.00128233313740356, 0.00293286650972334, 0.00110389018871079, 0.503485358512057, 0.00794975809406547, 0.385440571725958, 0.0378673238308601, 0.0136443468842084, 0.00505235558376033, 0.0127093302323898, 0.0138534335883619, 0.00613287360958986, 0.00114113594398853, 0.000785996965382305, 0.000442192977866222, 0.00749162911345829, 0.00231328894611609, 0.00469275964871419, 0.00155635815881185, 0.000568406847645401, 0.00296655345293303, 0.000649902163976158, 0.713947141909208, 0.00852233582375597, 1.82376939927774, 0.0494404144783277, 0.04218853433074, 0.0256828093682994, 0.011760092560816, 0.022008369690241, 0.0135785920638117, 0.0174513673793463, 0.00131051078045701, 0.00137792606162987, 0.0128086339611182, 0.00151400717327619, 0.000335570928802637, 0.0332893043085715, 0.00101029438540035, 1.12310404558296e-05, 0.000625038272079271, 0.50079498325685, 0.009808313857018, 1.37870701517068, 0.0211488730306462, 0.0541138856514974, 0.0371702487088417, 0.0267830923003035, 0.0277118367665275, 0.00986840605170787, 0.0145661015944401, 0.000389375537565625, 0.00183536134048781, 0.019877287962179, 0.00788356529886674, 0.00273081674491942, 0.009430320799954, 0.00542987389625932 ), 19, 19) Ajinkoubufen <- matrix( c( 0.0138324809572758, 2.13619459218014e-05, 0.0260253062985319, 4.14404379314917e-06, 0.000183042975520331, 0, 0.00066906439573971, 4.27913829513443e-05, 2.92744847806188e-05, 9.26964435494837e-05, 0, 2.91098162919453e-05, 5.14262171499666e-05, 5.01486377450464e-05, 7.48439104680718e-06, 7.57980886314357e-07, 3.10067740439762e-07, 6.55824969493723e-06, 7.61278454550443e-07, 2.66682157745066e-05, 0.0428514564500408, 0.0384861728295924, 2.42451542800053e-05, 0.000903155072945508, 0, 0.00133446317363728, 0.000193355436642827, 5.09745570364582e-05, 0.000258077723336303, 0, 0.00145297581866831, 9.71395011024998e-05, 3.30901829908104e-05, 3.70471311134361e-05, 2.84706283539759e-06, 1.0098250851965e-06, 0.00018945628816674, 7.95028876499259e-07, 0.00329719363289765, 0.0228386616857109, 0.0637222259128822, 1.06029200557559e-05, 0.000433390774002447, 0, 0.001454017638765, 0.00017918387249348, 5.06813272088628e-05, 0.000142999247579967, 0, 0.000962998146899207, 7.0522372299396e-05, 2.41465567127571e-05, 2.47383060913204e-05, 1.13929618295161e-06, 7.52272588511365e-07, 0.000152400971241161, 8.68938837966726e-07, 6.05803865475877e-06, 0.0658373940769586, 0.0297561370061763, 0.000130012805495462, 0.000895457530829087, 0, 0.000769846639104034, 9.3797318207543e-05, 9.35930392514808e-05, 0.000363906017755198, 0, 0.00031594928154914, 3.54306715529384e-05, 0.000185630996833323, 1.84482202303063e-05, 7.97723630178059e-07, 9.25125579492333e-07, 0.000270244042250025, 4.09877068976428e-07, 0.000450811478113997, 0.00187865488712605, 0.0571599660878049, 5.6834732959968e-06, 0.000125784831721091, 0, 0.0017311807935792, 0.000232208864270068, 0.000334094712306653, 0.000208707179575167, 0, 0.000571477424875116, 0.000320747546934966, 4.60480550249817e-06, 3.89273072186289e-05, 2.67779194073251e-06, 7.43918244527458e-07, 0.0001856745060743, 6.39023362905877e-07, 1.11854339531435e-05, 1.77780116323606e-06, 0.0244278376567265, 4.50388889049888e-06, 0.00105808085079327, 0, 0.00188980561067622, 0.000260638685113721, 9.23685635417986e-05, 0.000294813015329916, 0, 0.00753091711638887, 3.45224315755665e-05, 1.85926072792325e-05, 6.18603875599231e-05, 3.57942114472587e-06, 8.72316855305858e-07, 0.000148696012182162, 8.27831491864243e-07, 0.000733446364427617, 0.000300057361554295, 0.0378411538246703, 8.89524130421271e-06, 0.000471100165305861, 0, 0.012992801179574, 0.000652972259473899, 0.00027682820766028, 0.000623500085389863, 0, 0.00106644195626098, 1.3095534411318e-05, 1.6303755717592e-05, 0.00015550309428434, 3.57210132435924e-06, 9.86706306355878e-07, 0.000302477705471034, 9.61613172546995e-07, 0.00633680712237595, 4.84643786377458e-05, 0.0475302058709408, 5.33634146347047e-06, 0.000653277736866776, 0, 0.00106137750353234, 8.67612934070369e-05, 0.000123925653843057, 0.000104033055486243, 0, 0.000875029573473143, 2.73568749445284e-07, 9.54392078850043e-06, 3.60901258430671e-05, 1.94877114833417e-06, 2.08747210818625e-07, 0.000357552571196276, 4.36142101376777e-07, 0.000118421714219491, 0, 0.0263515656279448, 5.14046421562065e-06, 0.000237765129096903, 0, 0.000692456831874918, 0.000317004151811027, 0.031755882629511, 0.000298860181696396, 0, 0.0038517096827625, 0.000126693496052209, 1.80747035587529e-05, 2.75275730861235e-05, 4.3359098455921e-06, 1.42298166144303e-08, 0.000563521214032166, 3.46001188937501e-06, 6.23949251665035e-07, 0, 0.00663083295536406, 2.50645397292323e-06, 7.88459105342244e-05, 0, 0.0011533379244298, 0.00168403145276895, 0.000794952761437133, 0.000463817809033549, 0, 0.00776075451855042, 1.58117660722136e-05, 3.85549007540562e-05, 5.17679505521213e-05, 3.00352950698929e-05, 1.15799746355357e-06, 0.00177355830119415, 1.9280601439722e-06, 1.27458145997942e-05, 2.3146191571141e-05, 0.210212396686915, 2.99002834605074e-06, 0.0106825846338052, 0, 0.00032723753405521, 0.000226333804728198, 0.000124681901826282, 0.00078945989352766, 0, 0.00337714719641892, 1.98462893793193e-06, 1.00023204634006e-05, 1.70858576307822e-05, 2.05158897764142e-06, 8.6872592087842e-09, 0.000314938960851606, 2.59787142320686e-06, 0.000401987249015128, 8.92626319556904e-06, 0.0468872699180816, 1.0664465751393e-06, 0.00044791354618167, 0, 0.00282790094301248, 0.00186526560109645, 0.000153149951969984, 0.000510201205040438, 0, 0.00481892100958688, 4.71707203955577e-06, 0.000127897563152221, 6.41350220924957e-05, 2.49554197743768e-06, 2.73181816127865e-08, 0.00027417773428137, 1.44140756028091e-05, 0.000481349552176759, 0.00029652639983087, 0.031871561720392, 2.61841276228775e-06, 1.38432626824844e-05, 0, 0.00176361337659975, 0.0014187335737413, 0.000137094235218216, 0.000285415082449308, 0, 0.00187771668170368, 0.00243576734186312, 9.48115381007114e-06, 9.82528684837179e-05, 9.75137849988215e-06, 2.53127746524769e-07, 0.000394655052116809, 1.64984814107555e-06, 0.00392227981393952, 0.000895174094532028, 0.0956787868935441, 1.45226636520429e-05, 0.00379174737503662, 0, 0.00206648720028213, 0.000648015392615918, 0.00029394921606879, 0.000492883638746761, 0, 0.00148433896283195, 2.9970717659515e-05, 0.000578856195447047, 0.000385352074420006, 1.65772025848665e-05, 7.68776899528296e-07, 0.000567659974647754, 2.72230387198347e-06, 0.000248448330051221, 0.00037309154126892, 0.0339389733901278, 8.69970519035075e-06, 0.000381304946584037, 0, 0.00142757213388415, 0.000625817050183102, 0.000133580288608249, 0.000161307397407829, 0, 0.0015999858524199, 7.1799833507859e-07, 3.49839509325132e-05, 0.000167838934759322, 4.76440317324981e-06, 8.93372305428098e-07, 0.000538604251534483, 2.73541878500366e-06, 0.000308729618438999, 0.000424175023100548, 0.082271562023096, 3.20580425385068e-06, 0.00374564682919359, 0, 0.00142565870026319, 0.000900360262270719, 0.000418813476897207, 0.000269372999632617, 0, 0.000475504685055483, 6.61787852702211e-05, 1.68126829628835e-05, 9.93120587039253e-05, 8.07014583626018e-05, 1.37144282732425e-06, 0.000643702626992421, 2.5090735381967e-06, 0.000177831660083424, 0.000521329550935512, 0.0582796233464801, 3.30095894410847e-06, 0.000641060347097182, 0, 0.0007966588934669, 0.000256259034648157, 0.000389973127523987, 0.000104717433052508, 0, 0.000105341707406858, 7.06770578257035e-06, 1.59720927881665e-05, 6.61349770579533e-05, 1.38342893297053e-05, 1.97203922652945e-05, 0.000299022604768488, 1.11217166487997e-06, 0.000179874236864281, 0.000306926546465097, 0.0826410735015808, 3.53870901846987e-06, 0.00303327238980293, 0, 0.0024632817797836, 0.00130265018498058, 0.000360846715920973, 0.000166360199796877, 0, 0.0016109884593585, 1.17841480681183e-05, 4.97709462004241e-05, 0.000113072697584629, 9.05430050902984e-06, 1.41017031431184e-06, 0.00639586359278586, 1.97678967676788e-06, 6.80983795922124e-07, 0.000295184181391402, 0.0579682060353639, 4.07268258604197e-06, 0.00229304972679167, 0, 0.00315957287147331, 0.00188530120135969, 0.000821812485641034, 0.000209472430993478, 0, 0.00134464085572218, 3.50127527159783e-06, 6.62935937416751e-05, 0.000175473713853702, 4.71465198834153e-05, 1.14757160915328e-05, 0.00181184457667344, 1.06243475361128e-05 ), 19, 19 ) laodongtouruxishuxiangliang <- c(0.592658465793436, 0.194754773093445, 0.0794143326736502, 0.0781923621348833, 0.162046744598998, 0.205090310212276, 0.178010129295939, 0.273455508816636, 0.146235203444634, 0.186802664393738, 0.0925385230820084, 0.16064258126527, 0.198966907029883, 0.243614420504346, 0.350288134821033, 0.629645973483973, 0.361176363951541, 0.274216117011671, 0.520063356748265) xiaofeixiangliang <- c(211936918, 1631933, 758891902, 49690402, 0, 125284831, 84081747, 117141065, 58974562, 105244788, 206682390, 22314214, 69525851, 47288829, 77502170, 207404105, 203897570, 45833581, 323858903) shangpinjinkoubili <- c(0.0571677643975567, 0.320774808610785, 0.103743781169251, 0.000415055253847176, 0.00166042695078506, 0, 0.0551664410174583, 0.0482722994733009, 0.0297705241881317, 0.00750224244542503, 0, 0.0845115091224714, 0.00891189084515529, 0.034861000567664, 0.00875060087426412, 0.0059448030594113, 0.00418471634761462, 0.161165088394828, 0.00195282620451706) duinatouziyuxiaofeizhibi <- 0.6578893 duiwaitouziyuxiaofeizhibi <- 0.05387513 resultA[1:(biaozhongshengchanbumenshu * 2), 1:biaozhongshengchanbumenshu] <- rbind(Aguoneibufen, Ajinkoubufen) resultA[1:biaozhongshengchanbumenshu, (biaozhongshengchanbumenshu + 1):(biaozhongshengchanbumenshu * 2)] <- CD_A(matrix(alpha, biaozhongshengchanbumenshu, 1), matrix(beta, biaozhongshengchanbumenshu, biaozhongshengchanbumenshu), state$p[1:biaozhongshengchanbumenshu]) resultA[biaozhongshengchanbumenshu * 2 + 1, 1:biaozhongshengchanbumenshu] <- laodongtouruxishuxiangliang resultA[1:(biaozhongshengchanbumenshu * 2), biaozhongshengchanbumenshu * 2 + 1] <- xiaofeixiangliang * c(1 - shangpinjinkoubili, shangpinjinkoubili) resultA <- resultA[-(biaozhongshengchanbumenshu + jinkouliangweilingdebumenbianhao), -(biaozhongshengchanbumenshu + jinkouliangweilingdebumenbianhao)] gongzilv <- state$p[moxingzhongdeshengchanbumenshu + 1] shuipiaojiage <- state$p[moxingzhongdeshengchanbumenshu + 2] gupiaojiage <- state$p[moxingzhongdeshengchanbumenshu + 3] tmplaodongbaochou <- gongzilv * resultA[moxingzhongdeshengchanbumenshu + 1, 1:biaozhongshengchanbumenshu] resultA[moxingzhongdeshengchanbumenshu + 2, 1:biaozhongshengchanbumenshu] <- tmplaodongbaochou * shengchanshuijingeyulaodongbaochouzhibi / shuipiaojiage resultA[moxingzhongdeshengchanbumenshu + 3, 1:biaozhongshengchanbumenshu] <- tmplaodongbaochou * yingyeyingyuyulaodongbaochouzhibi / gupiaojiage resultA[1:biaozhongshengchanbumenshu, moxingzhongdebumenshu] <- CD_A(alpha, matrix(beta, biaozhongshengchanbumenshu, 1), state$p[1:biaozhongshengchanbumenshu]) tmpxiaofeie <- sum(resultA[1:moxingzhongdeshengchanbumenshu, moxingzhongdeshengchanbumenshu + 1] * state$p[1:moxingzhongdeshengchanbumenshu]) resultA[moxingzhongdeshangpinshu - 1, moxingzhongdebumenshu - 1] <- tmpxiaofeie * duinatouziyuxiaofeizhibi / state$p[moxingzhongdeshangpinshu - 1] resultA[moxingzhongdeshangpinshu, moxingzhongdebumenshu - 1] <- tmpxiaofeie * duiwaitouziyuxiaofeizhibi / state$p[moxingzhongdeshangpinshu] resultA }, B = { biaozhongshengchanbumenshu <- 19 teshushangpinshu <- 5 teshubumenshu <- 2 moxingzhongdeshengchanbumenshu <- 36 moxingzhongdebumenshu <- 38 moxingzhongdeshangpinshu <- 41 jinkouliangweilingdebumenbianhao <- c(6, 11) guoneizhaiquangonggeixishuxiangliang <- c(0.401099545759085, 0.131806184389208, 0.053746052069834, 0.0529190465408162, 0.109670036626205, 0.138801071803815, 0.120473740142274, 0.185069288135168, 0.0989690978211274, 0.126424354260435, 0.0626282449601368, 0.108719726611523, 0.134656873455666, 0.164873428870774, 0.237068092115102, 0.426131960530337, 0.244437030568586, 0.185584052740987, 0.351968609574166) Bgudingzichandeguoneibufen <- c(0.0882561716108159, 0, 0.0277568368396929, 0, 0.098803146173774, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.140959999244394, 0, 0.501760755032525, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0670146743091446, 0, 0.238545216798109, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.130929738968232, 0, 0.466057073163076, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0111523645729912, 0, 0.0396979206763483, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.171287441122797, 0, 0.609714218544748, 0.18735074494246, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0636111294980147, 0, 0.226429969753742, 0, 0.0726047670826852, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0995018380321376, 0, 0.354186419166819, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0348206740877757, 0, 0.12394755827667, 0, 0, 0, 0.818211234132971, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.00761364654663068, 0, 0.0271015114945109, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.72900573452845, 0, 6.154563191423, 0, 0, 0, 0, 0, 1.29983788490156, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.05999843908289, 0, 0.213570248697353, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.00074901996361455, 0, 0.00266620902732886, 0, 0, 0, 0, 0, 0, 0, 0.11571363635982, 0, 0, 0, 0, 0, 0, 0, 0, 0.617709034447327, 0, 2.19879507077275, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.056949942670452, 0, 0.202718830778665, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.614522283481341, 0, 2.18745152239473, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0983725323391308, 0, 0.3501665462935, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.492356632704138, 0, 1.75259106906334, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.369273161528009, 0, 1.31446354522388, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) Bgudingzichandejinkoubufen <- c(0.00535133169482011, 0, 0.0032129196389894, 0, 0.000164328261803965, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0163164539424966, 0, 0.00083452274455837, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.00775710735455369, 0, 0.00039674567416243, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0151554275470332, 0, 0.000775140789541581, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0012909126268485, 0, 6.60251272819039e-05, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0198269272062257, 0, 0.00101406970942405, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.00736313897735655, 0, 0.000376595733950761, 0, 0.00423920865508468, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0115175735396416, 0, 0.000589078798299472, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.00403057553948217, 0, 0.000206148160207647, 0, 0, 0, 0.0251059959978786, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.000881297629671268, 0, 4.50749237106306e-05, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.200136510960346, 0, 0.0102361990541315, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.00694496150088906, 0, 0.000355207593083631, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8.67008357253051e-05, 0, 4.43440833651598e-06, 0, 0, 0, 0, 0, 0, 0, 0.00104050012002877, 0, 0, 0, 0, 0, 0, 0, 0, 0.0715012845094402, 0, 0.00365701079404615, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.00659209081719125, 0, 0.000337159639007856, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0711324105335504, 0, 0.0036381443342234, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011386853729584, 0, 0.000582392991748498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.056991446962705, 0, 0.00291488940570576, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0427442435057442, 0, 0.00218620072291436, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) Bcunhuo <- c(0.0804764278026037, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0230444152206456, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0126715335589211, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.00234499532032565, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0242817374213106, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.00968026089863429, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) result <- matrix(0, biaozhongshengchanbumenshu * 2 + teshushangpinshu, biaozhongshengchanbumenshu * 2 + teshubumenshu) diag(result[1:(biaozhongshengchanbumenshu * 2), 1:(biaozhongshengchanbumenshu * 2)]) <- 1 result[(biaozhongshengchanbumenshu * 2 + 1):(biaozhongshengchanbumenshu * 2 + 3), biaozhongshengchanbumenshu * 2 + 1] <- 1 result[1:biaozhongshengchanbumenshu, 1:biaozhongshengchanbumenshu] <- result[1:biaozhongshengchanbumenshu, 1:biaozhongshengchanbumenshu] + Bgudingzichandeguoneibufen + Bcunhuo result[(biaozhongshengchanbumenshu + 1):(biaozhongshengchanbumenshu * 2), 1:biaozhongshengchanbumenshu] <- Bgudingzichandejinkoubufen result <- result[-(biaozhongshengchanbumenshu + jinkouliangweilingdebumenbianhao), -(biaozhongshengchanbumenshu + jinkouliangweilingdebumenbianhao)] result[moxingzhongdeshangpinshu - 1, 1:biaozhongshengchanbumenshu] <- guoneizhaiquangonggeixishuxiangliang result[moxingzhongdeshangpinshu, moxingzhongdebumenshu] <- 1 result }, S0Exg = { biaozhongshengchanbumenshu <- 19 teshushangpinshu <- 5 teshubumenshu <- 2 moxingzhongdeshengchanbumenshu <- 36 moxingzhongdebumenshu <- 38 moxingzhongdeshangpinshu <- 41 jinkouliangweilingdebumenbianhao <- c(6, 11) laodongzhebaochou_shengchanshuijinge_yingyeyingyu <- c(2641340939, 736062253, 1273778692) tmp <- matrix(NA, biaozhongshengchanbumenshu * 2 + teshushangpinshu, biaozhongshengchanbumenshu * 2 + teshubumenshu) tmp[(biaozhongshengchanbumenshu * 2 + 1):(biaozhongshengchanbumenshu * 2 + 3), biaozhongshengchanbumenshu * 2 + 1] <- laodongzhebaochou_shengchanshuijinge_yingyeyingyu tmp <- tmp[-(biaozhongshengchanbumenshu + jinkouliangweilingdebumenbianhao), -(biaozhongshengchanbumenshu + jinkouliangweilingdebumenbianhao)] tmp }, z0 = c(894213473, 535980969, 8183479570, 535172923, 1386125871, 721553392, 619666563, 233344906, 250850952, 590140255, 419085368, 344051923, 249336127, 61624086, 157224489, 220302538, 207698068, 70036569, 336382794, 51186806, 248969888, 812578974, 221872, 2284306, 32662160, 11526917, 7389509, 4433607, 27953887, 2245559, 2192016, 1364309, 1315772, 872352, 12415233, 656617, 1, 146388737), subject.names = c(row.names.eng <- c( "Agriculture, Forestry, Fishing and Hunting", "Mining", "Manufacturing", "Utilities", "Construction", "Wholesale and Retail Trade", "Transportation and Warehousing", "Accommodation and Food Services", "Information", "Finance and Insurance", "Real Estate Rental and Leasing", "Leasing and Business services", "Scientific, and Technical Services", "Water Conservancy, Environment and Public Facilities Management", "Other Services (except Public Administration)", "Educational Services", "Health Care and Social Assistance", "Arts, Entertainment, and Recreation", "Public Administration", "Foreign Trade: Agriculture, Forestry, Fishing and Hunting", "Foreign Trade: Mining", "Foreign Trade: Manufacturing", "Foreign Trade: Utilities", "Foreign Trade: Construction", "Foreign Trade: Transportation and Warehousing", "Foreign Trade: Accommodation and Food Services", "Foreign Trade: Information", "Foreign Trade: Finance and Insurance", "Foreign Trade: Leasing and Business services", "Foreign Trade: Scientific, and Technical Services", "Foreign Trade: Water Conservancy, Environment and Public Facilities Management", "Foreign Trade: Other Services (except Public Administration)", "Foreign Trade: Educational Services", "Foreign Trade: Health Care and Social Assistance", "Foreign Trade: Arts, Entertainment, and Recreation", "Foreign Trade: Public Administration", "Compensation of Employees", "Taxes on Production", "Gross Operating Surplus", "Domestic Bond", "Foreign Bond" )), sector.names = c(row.names.eng <- c( "Agriculture, Forestry, Fishing and Hunting", "Mining", "Manufacturing", "Utilities", "Construction", "Wholesale and Retail Trade", "Transportation and Warehousing", "Accommodation and Food Services", "Information", "Finance and Insurance", "Real Estate Rental and Leasing", "Leasing and Business services", "Scientific, and Technical Services", "Water Conservancy, Environment and Public Facilities Management", "Other Services (except Public Administration)", "Educational Services", "Health Care and Social Assistance", "Arts, Entertainment, and Recreation", "Public Administration", "Foreign Trade: Agriculture, Forestry, Fishing and Hunting", "Foreign Trade: Mining", "Foreign Trade: Manufacturing", "Foreign Trade: Utilities", "Foreign Trade: Construction", "Foreign Trade: Transportation and Warehousing", "Foreign Trade: Accommodation and Food Services", "Foreign Trade: Information", "Foreign Trade: Finance and Insurance", "Foreign Trade: Leasing and Business services", "Foreign Trade: Scientific, and Technical Services", "Foreign Trade: Water Conservancy, Environment and Public Facilities Management", "Foreign Trade: Other Services (except Public Administration)", "Foreign Trade: Educational Services", "Foreign Trade: Health Care and Social Assistance", "Foreign Trade: Arts, Entertainment, and Recreation", "Foreign Trade: Public Administration", "Consumer", "Rest of the World" )) ) # end of list
/scratch/gouwar.j/cran-all/cranData/CGE/R/ChinaCGE2012.R
#' @export F_Z <- function(A, p, S) { s <- rowSums(S) S_bar <- S m <- ncol(S) for (tk in 1:length(s)) { if (s[tk] != 0) S_bar[tk, ] <- S_bar[tk, ] / s[tk] } Z <- dg(1 / (t(p) %*% A)) %*% t(S_bar) %*% dg(p) %*% A tmp <- PF_eig(Z) z_structure <- tmp$vec zeta <- min(s / (A %*% z_structure)) z <- zeta * z_structure q <- pmin(A %*% z / s, 1) list(z = z, q = q) }
/scratch/gouwar.j/cran-all/cranData/CGE/R/F_Z.R
#' @export Leontief_mA <- function(A.pre, p) { # computing Leontief demand structure matrix in a monetary economy A <- A.pre nonnegativeA <- A nonnegativeA[nonnegativeA < 0] <- 0 Indx <- which(A < 0, arr.ind = T) for (k in 1:nrow(Indx)) { A[Indx[k, 1], Indx[k, 2]] <- t(p) %*% nonnegativeA[, Indx[k, 2]] / (-A[Indx[k, 1], Indx[k, 2]]) } A }
/scratch/gouwar.j/cran-all/cranData/CGE/R/Leontief_mA.R
#' @export # MWG_exchange_quasiLinear_2_2 Example.MWG.15.B.2 <- function(p0 = c(1, 0.3)) { r <- 2^(8 / 9) - 2^(1 / 9) MWG_quasi_linear_demand_1 <- function(p, w) { # only for case 'MWG_exchange_quasiLinear_2_2' d <- rbind(0, 0) w <- w / p[1] p <- p / p[1] # normalize wealth and prices d[2] <- (1 / p[2])^(1 / 9) if (d[2] * p[2] > w) { d[2] <- w / p[2] d[1] <- 0 } else { d[1] <- w - d[2] * p[2] } d } MWG_quasi_linear_demand_2 <- function(p, w) { # only for case 'MWG_exchange_quasiLinear_2_2' d <- rbind(0, 0) w <- w / p[2] p <- p / p[2] d[1] <- (1 / p[1])^(1 / 9) if (d[1] * p[1] > w) { d[1] <- w / p[1] d[2] <- 0 } else { d[2] <- w - d[1] * p[1] } d } sdm( A = function(state) { result1 <- MWG_quasi_linear_demand_1(state$p, state$w[1]) result2 <- MWG_quasi_linear_demand_2(state$p, state$w[2]) cbind(result1, result2) }, B = diag(2), S0Exg = matrix(c( 2, r, r, 2 ), 2, 2, TRUE), # Exogenous supplies GRExg = 0, p0 = p0, tolCond = 1e-7 ) } #' @export # Varian, H. R.(1992) Microeconomic Analysis... P351-353; (MWG) Mas-Colell, A., Whinston, M. and Green, J. (1995) Microeconomic Theory... P542, 15.C.2. # case 'CD_3_3' # column 1: firm (say, wheat producer); # column 2: firm owner; # column 3: laborer; # row 1: wheat; # row 2: ownership of firm (or land, see Varian (1992, P353)); # row 3: labor (or leisure); Example.Varian.P352 <- function(agent.number = 3) { if (agent.number == 3) { sdm( A = function(state) { a <- 0.5 alpha <- rep(1, 3) Beta <- matrix(c( 0, a, a, 0.5, 0, 0, 0.5, 1 - a, 1 - a ), 3, 3, TRUE) CD_A(alpha, Beta, state$p) # Demand functions of agents constitute an input coefficient matrx. }, B = diag(3), S0Exg = matrix(c( NA, NA, NA, NA, 1, NA, NA, NA, 1 ), 3, 3, TRUE), # Exogenous supplies, GRExg = 0, tolCond = 1e-10 ) } else { sdm( A = function(state) { a <- 0.5 alpha <- rep(1, 2) Beta <- matrix(c( 0, a, 0.5, 0, 0.5, 1 - a ), 3, 2, TRUE) CD_A(alpha, Beta, state$p) }, B = matrix(c( 1, 0, 0, 1, 0, 1 ), 3, 2, TRUE), S0Exg = matrix(c( NA, NA, NA, 1, NA, 1 ), 3, 2, TRUE), GRExg = 0, tolCond = 1e-10 ) } } #' @export # MWG_exchange_CES_2_2, MWG, P541 Example.MWG.Exercise.15.B.6 <- function(p0 = c(1, 2)) { sdm( A = function(state) { alpha <- c(1, 1) Beta <- matrix(c( 1, (12 / 37)^3, (12 / 37)^3, 1 ), 2, 2, TRUE) CES_A(c(-2, -2), alpha, Beta, state$p) }, B = diag(2), S0Exg = diag(2), GRExg = 0, p0 = p0, priceAdjustmentVelocity = 0.3, tolCond = 1e-10 ) } #' @export # MWG_exchange_CD_2_2, MWG, 15.B.1, P519 Example.MWG.15.B.1 <- function(a = 0.1, S0Exg = matrix(c(1, 2, 2, 1), 2, 2, TRUE)) { sdm( A = function(state) { alpha <- c(1, 1) Beta <- matrix(c( a, a, 1 - a, 1 - a ), 2, 2, TRUE) CD_A(alpha, Beta, state$p) }, B = diag(2), S0Exg = S0Exg, GRExg = 0 ) } #' @export # MWG_exchange_Leontief_Leontief(CD)_2_2, MWG, P541, 15.B.9 Example.MWG.Exercise.15.B.9 <- function(S0Exg = matrix(c( 30, 0, 0, 20 ), 2, 2, TRUE)) { sdm( A = function(state) { result1 <- c(1, 1) result2 <- c(1, state$z[2]) cbind(result1, result2) }, B = diag(2), S0Exg = S0Exg, GRExg = 0, z0 = c(1, 1) ) } #' @export # labor_CD_3_4, Varian (1992), P357, 18.2. # column 1: wheat producer; # column 2: iron producer; # column 3: laborer 1; # column 4: laborer 2; # row 1: wheat; # row 2: iron; # row 3: labor; Example.Varian.Exercise.18.2 <- function() { sdm( A = function(state) { alpha <- c(2, 3, 1, 1) Beta <- matrix(c( 0, 0, 0.4, 0.5, 0, 0, 0.6, 0.5, 1, 1, 0, 0 ), 3, 4, TRUE) CD_A(alpha, Beta, state$p) }, B = matrix(c( 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1 ), 3, 4, TRUE), S0Exg = { S0Exg <- matrix(NA, 3, 4) S0Exg[3, 3] <- 10 S0Exg[3, 4] <- 10 S0Exg }, GRExg = 0 ) }
/scratch/gouwar.j/cran-all/cranData/CGE/R/MWG_Varian_examples.R
#' @export # Compute the PF eigenvalue and eigenvector. PF_eig <- function(M) { ev <- eigen(M) tmp <- Re(ev$values) indx <- which(tmp == max(tmp)) if (length(indx) != 1) { print(M) print(ev$values) print(indx) stop("Li:PF_eig, none or multiple PF eig value") } PFVector <- ev$vectors[, indx] PFVector <- PFVector / sum(PFVector) list(val = abs(ev$values[indx]), vec = abs(PFVector)) }
/scratch/gouwar.j/cran-all/cranData/CGE/R/PF_eig.R
#' @export dg <- function(x) { if (length(x) == 1) { return(x) } if (is.vector(x) || nrow(x) == 1 || ncol(x) == 1) { return(diag(c(x))) } else { return(diag(x)) } }
/scratch/gouwar.j/cran-all/cranData/CGE/R/dg.R
#' @export iep <- function(A.iep = NULL, A = NULL, B.iep = NULL, B = NULL, SExg.iep, InitialEndowments, nPeriods.iep, ...) { ge.next <- function(._ge, ._A, ._B, ._SExg.iep, ...) { result <- sdm( A = ._A, B = ._B, S0Exg = { if (is.function(._B)) { S0Exg <- ._B(list(p = ._ge$p)) %*% diag(._ge$z) } else { S0Exg <- ._B %*% diag(._ge$z) } S0Exg <- ifelse(!is.na(._SExg.iep), ._SExg.iep, S0Exg) }, p0 = ._ge$p, z0 = ._ge$z, ... ) } ge <- sdm( A = { if (is.null(A.iep)) A else A.iep(list(time = 1)) }, B = { if (is.null(B.iep)) B else B.iep(list(time = 1)) # Actually B will be ignored here. }, S0Exg = InitialEndowments, ... ) ge.list <- list(ge) for (time.iep in 2:nPeriods.iep) { state.iep <- list( time = time.iep, p = ge.list[[time.iep - 1]]$p, z = ge.list[[time.iep - 1]]$z ) ge <- ge.next( ge.list[[time.iep - 1]], if (is.null(A.iep)) A else A.iep(state.iep), if (is.null(B.iep)) B else B.iep(list(time = time.iep - 1)), if (is.function(SExg.iep)) SExg.iep(state.iep) else SExg.iep, ... ) ge.list[[time.iep]] <- ge } return(ge.list) }
/scratch/gouwar.j/cran-all/cranData/CGE/R/iep.R
#' @import grDevices #' @import graphics #' @import utils #' @export sdm <- function(A, B = diag(nrow(A)), n = nrow(B), m = ncol(B), S0Exg = matrix(NA, n, m), p0 = matrix(1, nrow = n, ncol = 1), z0 = matrix(100, nrow = m, ncol = 1), GRExg = NA, moneyOwnerIndex = NULL, moneyIndex = NULL, pExg = NULL, tolCond = 1e-5, maxIteration = 200, numberOfPeriods = 300, depreciationCoef = 0.8, thresholdForPriceAdjustment = 0.99, priceAdjustmentMethod = "variable", priceAdjustmentVelocity = 0.15, trace = TRUE, ts = FALSE, policy = NULL, exchangeFunction = F_Z) { ##### definition of economic transition function xNext xNext <- function(xt) { p_t <- xt$p S_t <- xt$S q_t <- xt$q z_t <- xt$z e_t <- xt$e if (all(is.na(e_t))) { e_tp1 <- NA } p_tp1 <- p_t switch( priceAdjustmentMethod, "fixed" = { for (ka in 1:n) { if (q_t[ka] <= thresholdForPriceAdjustment) { p_tp1[ka] <- p_t[ka] * (1 - priceAdjustmentVelocity) } else { p_tp1[ka] <- p_t[ka] } } p_tp1 <- p_tp1 / sum(p_tp1) # normalize }, "variable" = { p_tp1 <- p_t * (1 - priceAdjustmentVelocity * (1 - q_t)) if (any(!is.na(pExg))) { tmpIndex <- which(!is.na(pExg)) tmpIndex <- tmpIndex[1] p_tp1 <- p_tp1 / p_tp1[tmpIndex] * pExg[tmpIndex] p_tp1[!is.na(pExg)] <- pExg[!is.na(pExg)] } else { p_tp1 <- p_tp1 / sum(p_tp1) # normalize } }, "monetary" = { # monetary economy p_tp1 <- p_t * (1 - priceAdjustmentVelocity * (1 - q_t)) e_tp1 <- e_t * (1 - priceAdjustmentVelocity * (1 - q_t[moneyIndex])) p_tp1 <- p_tp1 / e_tp1[1] e_tp1 <- e_tp1 / e_tp1[1] if (!is.null(pExg) && length(pExg) != 0) { p_tp1[!is.na(pExg)] <- pExg[!is.na(pExg)] } }, stop("LI: Wrong priceAdjustmentMethod!,xnext") ) # switch if (is.numeric(B)) { B_t <- B } else { B_t <- B(list(p = p_t, z = z_t, t = time - 1)) } S_tp1 <- sweep(B_t, 2, z_t, "*") + sweep(S_t, 1, depreciationCoef * (1 - q_t), "*") if (all(is.na(S0Exg))) { } # Set exogenous supply else { S_tp1[!is.na(S0Exg)] <- S_t[!is.na(S0Exg)] * (1 + GRExg) if (any(!is.na(e_t))) { for (i in 1:length(e_t)) { S_tp1[moneyIndex[i], moneyOwnerIndex[i]] <- S_tp1[moneyIndex[i], moneyOwnerIndex[i]] / e_t[i] * e_tp1[i] } } } if (!is.null(policy)) { tmp <- policy( time = time, state = list(p = p_tp1, S = S_tp1), state.history = list( p = t(p), S = S, q = t(q), z = t(z), e = t(e) ) ) # 20181204 p_tp1 <- tmp$p S_tp1 <- tmp$S if (!is.null(tmp$current.policy.data)) { policy.data <<- rbind(policy.data, tmp$current.policy.data) } } # 20181205 if (is.numeric(A)) { A_tp1 <- A } else { A_tp1 <- A(list( p = p_tp1, z = z_t, w = t(p_tp1) %*% S_tp1, t = time, e = e_tp1 )) } # 20140614,add e tmp <- exchangeFunction(A_tp1, p_tp1, S_tp1) q_tp1 <- tmp$q z_tp1 <- tmp$z if (any(z_tp1 < 0)) { if (any(z_tp1[z_tp1 < 0] > -0.01)) { z_tp1[z_tp1 < 0] <- 0 warning("LI: negative_z,z_tp1<0") } else { message(z_tp1) stop("Li: negative_z") } } list( p = p_tp1, S = S_tp1, q = q_tp1, z = z_tp1, e = e_tp1 ) } # xNext ##### the end of definition of economic function xNext # beginning --------------------------------------------------------------- substitutionMethod <- "finalValue" # the substitution method for iterations. priceAdjustmentVelocityCoefficient <- 0.95 # the changing coefficient of the price adjustment velocity. result <- c() if (is.na(GRExg) && !all(is.na(S0Exg))) GRExg <- 0 if (trace) { message(paste("tolCond: ", tolCond)) } p <- matrix(0, n, numberOfPeriods) S <- array(0, dim = c(n, m, numberOfPeriods)) q <- matrix(0, n, numberOfPeriods) z <- matrix(0, m, numberOfPeriods) if (length(moneyIndex) > 1) { e <- matrix(0, length(moneyIndex), numberOfPeriods) } else { e <- matrix(0, 1, numberOfPeriods) } if (!is.null(policy)) policy.data <- data.frame() if (all(is.na(S0Exg))) { S0 <- matrix(0, n, m) } else { S0 <- S0Exg S0[is.na(S0)] <- 0 } firstExgSupplyIndex <- which(!is.na(c(S0Exg)))[1] # Here we may get NA, not empty! if (!is.null(moneyIndex) && !all(is.na(moneyIndex))) { priceAdjustmentMethod <- "monetary" # for monetary economy e0 <- matrix(1, length(moneyIndex), 1) } else { e0 <- NA } time <- 1 xtp1 <- xNext(list( p = p0, S = S0, q = matrix(1, n, 1), z = z0, e = e0 )) p[, 1] <- xtp1$p S[, , 1] <- xtp1$S q[, 1] <- xtp1$q z[, 1] <- xtp1$z e[, 1] <- xtp1$e toleranceRec <- matrix(1, maxIteration, 1) for (k.iteration in 1:maxIteration) { for (t in 2:numberOfPeriods) { time <- time + 1 xt <- c() xt$p <- p[, t - 1] xt$S <- S[, , t - 1] dim(xt$S) <- c(n, m) xt$q <- q[, t - 1] xt$z <- z[, t - 1] xt$e <- e[, t - 1] xtp1 <- xNext(xt) p[, t] <- xtp1$p S[, , t] <- xtp1$S q[, t] <- xtp1$q z[, t] <- xtp1$z e[, t] <- xtp1$e } # for (t in 2:numberOfPeriods) ##### the end of an iteration if (ts) { result$ts.p <- t(p) result$ts.z <- t(z) result$ts.S <- S result$ts.q <- t(q) result$ts.e <- t(e) } tmp1 <- z[, ncol(z)] / max(z[, ncol(z)]) tmp2 <- z[, ncol(z) - 1] / max(z[, ncol(z) - 1]) toleranceZ <- max(abs(tmp1 - tmp2)) tmpU <- apply(q[, (ncol(q) - 20):ncol(q)], 1, min) tmpU <- tmpU[p[, ncol(p)] > tolCond] toleranceU <- max(1 - tmpU) tolerance <- max(c(toleranceU, toleranceZ)) toleranceRec[k.iteration] <- tolerance if ((maxIteration > 1 && tolerance > 0.99) || (k.iteration >= 5 && (toleranceRec[k.iteration] / toleranceRec[k.iteration - 1] > 0.9)) ) { # converge slowly if (!is.na(GRExg) && GRExg == 0) { substitutionMethod <- "meanValue" } else { substitutionMethod <- "pMeanValue" } message(paste("Iteration ", k.iteration, ", substitutionMethod: ", substitutionMethod)) if (k.iteration > 10 && (toleranceRec[k.iteration] / toleranceRec[k.iteration - 1] > 0.95)) { priceAdjustmentVelocity <- priceAdjustmentVelocity * priceAdjustmentVelocityCoefficient } } if (maxIteration > 1 && !is.na(GRExg) && GRExg == 0 && toleranceU < tolCond && toleranceZ >= tolCond) { substitutionMethod <- "zMeanValue" message(paste("substitutionMethod: ", substitutionMethod)) } S0 <- S[, , dim(S)[3]] dim(S0) <- c(n, m) switch( substitutionMethod, "pMeanValue" = { p0 <- apply(p, 1, mean) # message(paste("p0:",p0)) }, "zMeanValue" = { z0 <- apply(z, 1, mean) }, "meanValue" = { p0 <- apply(p, 1, mean) z0 <- apply(z, 1, mean) # message(paste("p0:",p0)) # message(paste("z0:",z0)) }, "finalValue" = { p0 <- p[, ncol(p)] z0 <- z[, ncol(z)] }, stop("Li: wrong substitutionMethod!") ) if (!is.na(firstExgSupplyIndex)) { # There are exogenous supplies. z0 <- z0 / S0[firstExgSupplyIndex] * S0Exg[firstExgSupplyIndex] S0 <- S0 / S0[firstExgSupplyIndex] * S0Exg[firstExgSupplyIndex] } else { S0 <- S0 / max(z0) z0 <- z0 / max(z0) } if (trace) { message(paste("Iteration number ", k.iteration, ": tolerance coefficient ", tolerance)) } if (tolerance < tolCond) { break } if (k.iteration < maxIteration) { xtp1 <- xNext(list( p = p0, S = S0, q = matrix(1, n, 1), z = z0, e = t(tail(t(e), 1)) )) p[, 1] <- xtp1$p S[, , 1] <- xtp1$S q[, 1] <- xtp1$q z[, 1] <- xtp1$z e[, 1] <- xtp1$e } } # for (k.iteration in 1:maxIteration) # result ------------------------------------------------------------------ result$tolerance <- tolerance result$p <- p0 result$z <- z0 result$S <- S0 if (any(!is.na(e0))) { result$e <- e[, ncol(e)] } if (all(is.na(S0Exg))) { result$growthRate <- max(z[, ncol(z)]) / max(z[, ncol(z) - 1]) - 1 } if (is.numeric(A)) { result$A <- A } else { tmpS <- S0Exg tmpS[is.na(tmpS)] <- 0 result$A <- A(list( p = result$p, z = result$z, w = result$p %*% tmpS, t = 1, e = result$e )) # 20140614,add e } if (is.function(B)) { result$B <- B(list(p = result$p, z = result$z, t = 1)) } # 20190214 if (!is.null(policy) && length(policy.data) != 0) { result$policy.data <- policy.data } return(result) }
/scratch/gouwar.j/cran-all/cranData/CGE/R/sdm.R
# LI Wu ([email protected]). Shanghai University. Mathematical Economics. #' @export # Cobb-Douglas pure production Example2.2 <- function() { sdm( A = function(state) { alpha <- rbind(5, 3, 1) Beta <- matrix(c( 0.6, 0.4, 0.2, 0.1, 0.4, 0.7, 0.3, 0.2, 0.1 ), 3, 3, TRUE) CD_A(alpha, Beta, state$p) }, B = diag(3) ) } #' @export # von Neumann economy Example2.3 <- function() { sdm( A = matrix(c( 0.8, 0.5, 0.06, 2, 2, 0.4 ), 2, 3, T), B = matrix(c( 1, 1, 0, 0, 0, 1 ), 2, 3, T) ) } #' @export # Setion 3.1.2, Leontief two-sector corn economy Example.Section.3.1.2.corn <- function() { sdm( A = matrix(c( 0.5, 1, 1, 0 ), 2, 2, TRUE), B = diag(2), S0Exg = matrix(c( NA, NA, NA, 100 ), 2, 2, TRUE), GRExg = 0 ) } #' @export # two-sector corn economy with non-homothetic utility function Example3.1 <- function() { GRExg <- 0.2 rho <- 1 / (1 + GRExg) sdm( GRExg = GRExg, A = function(state) { with(state, { matrix(c( 0.5, (-1 / 2 * (p[2] - (p[2]^2 + 4 * p[1] * p[2] * rho)^(1 / 2)) / p[1])^2, 1, -1 / 2 * (p[2] - (p[2]^2 + 4 * p[1] * p[2] * rho)^(1 / 2)) / p[1] ), 2, 2, T) }) }, B = diag(2), S0Exg = matrix(c( NA, NA, NA, 100 ), 2, 2, TRUE) ) } #' @export # Cobb-Douglas two-sector corn economy Example3.2 <- function() { sdm( A = function(state) { alpha <- rbind(1, 1) Beta <- matrix(c( 0.5, 0.4, 0.5, 0.6 ), 2, 2, TRUE) CD_A(alpha, Beta, state$p) }, B = diag(2), S0Exg = matrix(c( NA, NA, NA, 100 ), 2, 2, TRUE), GRExg = 0 ) } #' @export # Lontief three-sector economy with one primary factor Example3.4 <- function() { sdm( A = matrix(c( 0, 0.4, 1, 0.5, 0, 0, 0.3, 0.4, 0 ), 3, 3, T), B = diag(3), S0Exg = { tmp <- matrix(NA, 3, 3) tmp[3, 3] <- 100 tmp }, GRExg = 0 ) } #' @export # Cobb-Douglas three-sector economy with one primary factor Example3.8 <- function() { sdm( A = function(state) { alpha <- rbind(5, 3, 1) Beta <- matrix(c( 0.6, 0.4, 0.2, 0.1, 0.4, 0.7, 0.3, 0.2, 0.1 ), 3, 3, TRUE) CD_A(alpha, Beta, state$p) }, B = diag(3), S0Exg = { tmp <- matrix(NA, 3, 3) tmp[3, 3] <- 100 # Iron is both product and primary factor. # (ii) tmp[2,3]<-100 # (iii) tmp[2,3]<- -1 tmp }, GRExg = 0 ) } #' @export # Cobb-Douglas three-sector economy with two primary factors Example3.9 <- function() { sdm( A = function(state) { alpha <- rbind(5, 3, 1) Beta <- matrix(c( 0.6, 0.4, 0.2, 0.1, 0.4, 0.7, 0.3, 0.2, 0.1 ), 3, 3, TRUE) CD_A(alpha, Beta, state$p) }, B = diag(3), S0Exg = { tmp <- matrix(NA, 3, 3) tmp[2, 2] <- 60 tmp[3, 3] <- 100 tmp }, GRExg = 0 ) } #' @export # Leontief corn economy with three primary factors Example3.10 <- function() { sdm( A = matrix(c( 0, 0, 1, 1, 1, 0.5, 1, 0, 0, 0, 0.5, 0, 0, 0, 0, 0, 1, 0, 0, 0 ), 4, 5, TRUE), B = matrix(c( 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1 ), 4, 5, TRUE), S0Exg = { tmp <- matrix(NA, 4, 5) tmp[2, 3] <- 30 tmp[3, 4] <- 20 tmp[4, 5] <- 20 tmp }, GRExg = 0 ) } #' @export # decreasing returns to scale Example3.12 <- function() { sdm( A = function(state) { with(state, { matrix(c( 0, 0, (w[3] / 10000 - 0.4 * p[3]) / (3 * p[1]), 0, 0, (w[3] / 10000 - 0.4 * p[3]) / (3 * p[2]), (p[4] / p[3])^0.5, 0.5 * (p[5] / p[3])^0.5, 0.4 + (w[3] / 10000 - 0.4 * p[3]) / (3 * p[3]), (p[4] / p[3])^(-0.5), 0, 0, 0, 0.5 * (p[5] / p[3])^(-0.5), 0 ), 5, 3, T) }) }, B = matrix(c( 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1 / 10000, 0, 0, 1 / 10000 ), 5, 3, TRUE), S0Exg = { tmp <- matrix(NA, 5, 3) tmp[3, 3] <- 10000 tmp[4, 3] <- 1 tmp[5, 3] <- 1 tmp }, GRExg = 0 ) } #' @export # regular economy and pure exchange economy Example3.14 <- function() { sdm( A = function(state) { alpha <- rbind(1, 1) Beta <- matrix(c( 0.5, 0.75, 0.5, 0.25 ), 2, 2, TRUE) CD_A(alpha, Beta, state$p) }, B = diag(2), S0Exg = matrix(c( 60, 0, 0, 100 ), 2, 2, T), GRExg = 0 ) } #' @export # non-sufficient supply of the primary factor Example4.2 <- function() { sdm( A = function(state) { sigma <- rbind(-1, -1, -1) alpha <- rbind(1, 1, 1) Beta <- matrix(c( 0, 1, 1, 1, 0, 0, 1, 0, 0 ), 3, 3, TRUE) CES_A(sigma, alpha, Beta, state$p) }, B = diag(3), S0Exg = matrix(c( NA, NA, NA, NA, 100, NA, NA, NA, 100 ), 3, 3, T), GRExg = 0 ) } #' @export # increasing returns to scale Example4.8 <- function() { sdm( A = function(state) { alpha <- rbind(1, 1) Beta <- matrix(c( 0.5, 1, 0.5, 0 ), 2, 2, TRUE) CD_A(alpha, Beta, state$p) %*% diag(c(state$z[1]^(-1 / 4), 1)) }, B = diag(2), S0Exg = matrix(c( NA, NA, NA, 100 ), 2, 2, TRUE), GRExg = 0 ) } #' @export # price signal Example4.9 <- function() { sdm( A = function(state) { alpha <- rbind(1, 1) Beta <- matrix(c( 0.5, 0.4, 0.5, 0.6 ), 2, 2, TRUE) CD_A(alpha, Beta, state$p) %*% diag(c(state$z[1]^(-1 / 4), 1)) }, B = diag(2), S0Exg = matrix(c( NA, NA, NA, 100 ), 2, 2, TRUE), GRExg = 0 ) } #' @export # tax Example4.10 <- function() { sdm( A = function(state) { alpha <- rbind(5, 3, 1) Beta <- matrix(c( 0.6, 0.4, 0.2, 0.1, 0.4, 0.7, 0.3, 0.2, 0.1, 0, 0, 0 ), 4, 3, TRUE) tau <- 0.1 Tax <- matrix(c( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, tau / (1 + tau), 0 ), 4, 3, TRUE) CD_A(alpha, Beta, state$p) + state$p[2] * Tax / state$p[4] }, B = matrix(c( 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0 ), 4, 3, TRUE), S0Exg = { tmp <- matrix(NA, 4, 3) tmp[3, 3] <- 100 tmp }, GRExg = 0 ) } #' @export Example4.11.1 <- function() { sdm( A = function(state) { tau <- 1 result <- matrix(NA, 3, 3) result[1:2, 1] <- CD_A(1, rbind(0.5, 0.5), state$p[1:2]) # result[3,1]<-state$p[1]*tau/(1+tau)/state$p[3] result[3, 1] <- tau * (state$p[1] * result[1, 1] + state$p[2] * result[2, 1]) / state$p[3] result[1:2, 2] <- CD_A(1, rbind(0.5, 0.5), state$p[1:2]) result[3, 2] <- 0 result[, 3] <- c(1, 0, 0) result }, B = matrix(c( 1, 0, 0, 0, 1, 1, 0, 0, 1 ), 3, 3, TRUE), S0Exg = matrix(c( NA, NA, NA, NA, 100, 100, NA, NA, 100 ), 3, 3, T), GRExg = 0 ) } #' @export Example4.11.2 <- function() { sdm( A = function(state) { tau <- 1 result <- matrix(NA, 3, 3) result[1:2, 1] <- CD_A(1, rbind(0.5, 0.5), state$p[1:2]) result[3, 1] <- 0 result[1:2, 2] <- CD_A(1, rbind(0.5, 0.5), state$p[1:2]) result[3, 2] <- (state$p[1] * result[1, 1] + state$p[2] * result[2, 1]) * tau / state$p[3] result[, 3] <- c(1, 0, 0) result }, B = matrix(c( 1, 0, 0, 0, 1, 1, 0, 0, 1 ), 3, 3, TRUE), S0Exg = matrix(c( NA, NA, NA, NA, 100, 100, NA, NA, 100 ), 3, 3, T), GRExg = 0 ) } #' @export Example4.12 <- function() { sdm( A = function(state) { tau <- 1 result <- matrix(NA, 3, 3) result[1:2, 1] <- CD_A(1, rbind(0.5, 0.5), rbind(state$p[1], state$p[2] * (1 + tau))) result[3, 1] <- state$p[2] * result[2, 1] * tau / state$p[3] result[1:2, 2] <- CD_A(1, rbind(0.5, 0.5), state$p[1:2]) result[3, 2] <- 0 result[, 3] <- c(1, 0, 0) result }, B = matrix(c( 1, 0, 0, 0, 1, 0, 0, 0, 100 ), 3, 3, TRUE), S0Exg = matrix(c( NA, NA, NA, NA, 200, NA, NA, NA, 100 ), 3, 3, T), GRExg = 0 ) } #' @export # divident Example4.13 <- function() { sdm( A = function(state) { r <- 0.25 result <- matrix(NA, 3, 3) result[1:2, 1] <- CD_A(1, rbind(0.5, 0.5), state$p[1:2]) result[3, 1] <- r * (state$p[1] * result[1, 1] + state$p[2] * result[2, 1]) / state$p[3] result[, 2] <- c(1, 0, 0) result[, 3] <- c(1, 0, 0) result }, B = diag(3), S0Exg = matrix(c( NA, NA, NA, NA, 100, NA, NA, NA, 100 ), 3, 3, T), GRExg = 0 ) } #' @export # over-investment Example4.15 <- function() { sdm( A = function(state) { result <- matrix(NA, 2, 2) result[, 1] <- CD_A(1, rbind(0.5, 0.5), state$p) result[, 2] <- c(1, 0) result }, B = diag(2), S0Exg = matrix(c( NA, NA, 75, 25 ), 2, 2, T), GRExg = 0 ) } #' @export # technology monopoly Example4.16 <- function() { sdm( A = function(state) { alpha <- rbind(1, 1.5) Beta <- matrix(c( 0.5, 0.5, 0.5, 0.5 ), 2, 2, TRUE) result <- matrix(0, 3, 4) result[1:2, 1:2] <- CD_A(alpha, Beta, state$p[1:2]) result[3, 2] <- 1 result[1:3, 3] <- result[1:3, 4] <- c(1, 0, 0) result }, B = matrix(c( 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1 ), 3, 4, TRUE), S0Exg = matrix(c( NA, NA, NA, NA, NA, NA, 100, NA, NA, NA, NA, 99.99 ), 3, 4, TRUE), GRExg = 0 ) } #' @export # fixed assets Example5.1 <- function() { sdm( A = matrix(c( 0.5, 0.9, 0.5, 0.5, 0.6, 0, 0, 0, 0, 0, 0.6, 0, 0, 0, 0, 0.6 ), 4, 4, TRUE), B = matrix(c( 1, 0, 1, 1, 0, 1, 0, 0, 0.6, 0, 0, 0, 0, 0, 0.6, 0 ), 4, 4, TRUE), priceAdjustmentVelocity = 0.05 ) } #' @export # fixed assets Example5.2 <- function() { sdm( A = matrix(c( 0.5, 0.9, 0.6, 0 ), 2, 2, TRUE), B = matrix(c( 1, 0, 0.4, 1 ), 2, 2, TRUE) ) } #' @export # fixed assets Example5.3.1 <- function() { sdm( A = matrix(c( 0.6, 0.4, 1, 0.1, 0.4, 0, 0.3, 0.2, 0 ), 3, 3, TRUE), B = diag(3), S0Exg = { tmp <- matrix(NA, 3, 3) tmp[3, 3] <- 100 tmp }, GRExg = 0 ) } #' @export # fixed assets Example5.3.2 <- function() { GRExg <- 0 v <- 1 / (2 + GRExg) sdm( A = matrix(c( 0.6, 0.4, 1, 0.1, 0.4, 0, 0.3, 0.2, 0 ), 3, 3, TRUE), GRExg = GRExg, B = matrix(c( 1, 0, 0, 0.1 * (1 - v), 1 + 0.4 * (1 - v), 0, 0, 0, 1 ), 3, 3, T), S0Exg = { tmp <- matrix(NA, 3, 3) tmp[3, 3] <- 100 tmp } ) } #' @export # fixed assets Example5.4 <- function() { GRExg <- 0.1 v <- 1 / (2 + GRExg) sdm( A = matrix(c( 0.6, 0.4, 1, 0, 0, 0, 0, 1 / (1 + GRExg), 0.3, 0.2, 0, 0, 0.1, 0.4, 0, 0 ), 4, 4, T), B = matrix(c( 1, 0, 0, 0, 0, 1, 0, (1 - v) / (1 + GRExg), 0, 0, 1, 0, 0, 0, 0, 1 ), 4, 4, TRUE), S0Exg = { tmp <- matrix(NA, 4, 4) tmp[3, 3] <- 100 tmp }, GRExg = GRExg, priceAdjustmentVelocity = 0.05 ) } #' @export # fixed assets Example5.5 <- function() { GRExg <- 0.1 rdm <- 0.1 rho <- 1 / (1 + GRExg) sdm( A = function(state) { result <- matrix(NA, 6, 5) result[1:5, ] <- matrix(c( 0.6, 0.4, 1, 0, 0, 0, 0, 0, rho, 0, 0.3, 0.2, 0, 0, 0, 0.1, 0.4, 0, 0, 0, 0, 0, 0, 0, rho ), 5, 5, T) result[6, 1:2] <- rbind(state$p[1:5]) %*% result[1:5, 1:2] * rdm / state$p[6] result[6, 3] <- 0 result[6, 4] <- (state$p[1:5] %*% result[1:5, 4] - state$p[4] * result[2, 4]) * rdm / state$p[6] result[6, 5] <- (state$p[1:5] %*% result[1:5, 5] - state$p[4] * result[5, 5]) * rdm / state$p[6] result }, B = matrix(c( 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, rho, 0, 0, 0, 1, 0, 0 ), 6, 5, TRUE), S0Exg = { tmp <- matrix(NA, 6, 5) tmp[3, 3] <- 100 tmp[6, 3] <- 100 tmp }, GRExg = GRExg, priceAdjustmentVelocity = 0.05 ) } #' @export # fixed assets Example5.6 <- function() { sdm( A = matrix(c( 0.5, 0.9, 1, 0.5, 0.5, 0.6, 0, 0, 0, 0, 0.2, 0.1, 0, 0.4, 0.8, 0, 0, 0, 0.6, 0, 0, 0, 0, 0, 0.6 ), 5, 5, TRUE), B = matrix(c( 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0.6, 0, 0, 0, 0, 0, 0, 0, 0.6, 0.6 ), 5, 5, TRUE), S0Exg = { tmp <- matrix(NA, 5, 5) tmp[3, 3] <- 100 tmp }, GRExg = 0, priceAdjustmentVelocity = 0.05 ) } #' @export # pollution Example5.10 <- function() { sdm( A = function(state) { result <- matrix(NA, 3, 3) result[, 1] <- c(0.5, 0.5, 0.1) result[, 2] <- c(0.1, 0, 0.1) result[, 3] <- CD_A(1, rbind(0.5, 0.5, 0), state$p) result }, B = diag(3), S0Exg = { tmp <- matrix(NA, 3, 3) tmp[2, 3] <- 30 tmp[3, 3] <- 3 tmp }, GRExg = 0 ) } #' @export Example5.11.1 <- function() { sdm( A = function(state) { result <- matrix(NA, 3, 2) result[, 1] <- CD_A(1, rbind(0, 0.5, 0.5), state$p) result[, 2] <- CD_A(1, rbind(0.5, 0.5, 0), state$p) result }, B = matrix(c( 1, 0, 0, 10, 0, 1 ), 3, 2, TRUE), S0Exg = matrix(c( NA, NA, NA, 30, NA, 3 ), 3, 2, TRUE), GRExg = 0 ) } #' @export Example5.11.2 <- function() { sdm( A = function(state) { result <- matrix(NA, 4, 2) result[, 1] <- CD_A(1, rbind(0, 0.5, 0.5, 0), state$p) result[, 2] <- CD_A(1, rbind(0.5, 0, 0, 0.5), state$p) result }, B = matrix(c( 1, 0, 0, 4, 0, 1, 0, 6 ), 4, 2, TRUE), S0Exg = matrix(c( NA, NA, NA, 12, NA, 3, NA, 18 ), 4, 2, TRUE), GRExg = 0 ) } #' @export # two-country economy Example6.2.1 <- function() { # see also Example6.8 # column 1: wheat producer of country 1; # column 2: iron producer of country 1; # column 3: laborer of country 1; # column 4: wheat producer of country 2; # column 5: iron producer of country 2; # column 6: laborer of country 2; # row 1: wheat (of country 1 and 2); # row 2: iron (of country 1 and 2); # row 3: labor of country 1; # row 4: labor of country 2; sdm( A = matrix(c( 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0.1, 0.4, 0, 0, 0, 0, 0, 0, 0, 0.5, 0.5, 0 ), 4, 6, TRUE), B = matrix(c( 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1 ), 4, 6, TRUE), S0Exg = matrix(c( NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 100, NA, NA, NA, NA, NA, NA, NA, NA, 100 ), 4, 6, TRUE), GRExg = 0 ) } #' @export Example6.2.2 <- function() { sdm( A = matrix(c( 0, 0, 1, 0, 0, 1, 0.5, 0.5, 0 ), 3, 3, TRUE), B = diag(3), S0Exg = matrix(c( NA, NA, NA, NA, NA, NA, NA, NA, 100 ), 3, 3, TRUE), GRExg = 0 ) } #' @export Example6.3 <- function() { sdm( A = matrix(c( 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0.1, 0.4, 0, 0, 0, 0, 0, 0, 0, 0.8, 0.2, 0 ), 4, 6, TRUE), B = matrix(c( 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1 ), 4, 6, TRUE), S0Exg = { tmp <- matrix(NA, 4, 6, TRUE) tmp[3, 3] <- 100 tmp[4, 6] <- 100 tmp }, GRExg = 0 ) } #' @export Example6.4 <- function() { sdm( A = matrix(c( 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0.1, 0.4, 0, 0, 0, 0, 0, 0, 0, 0.4, 0.1, 0 ), 4, 6, TRUE), B = matrix(c( 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1 ), 4, 6, TRUE), S0Exg = { tmp <- matrix(NA, 4, 6, TRUE) tmp[3, 3] <- 100 tmp[4, 6] <- 100 tmp }, GRExg = 0, p0 = rbind(1, 1, 1, 1) ) } #' @export Example6.5 <- function() { sdm( A = matrix(c( 0, 0, 1, 0, 0, 1, 0, 0, 0.25, 0, 0, 1, 0.1, 0.4, 0, 0, 0, 0, 0, 0, 0, 0.8, 0.2, 0 ), 4, 6, TRUE), B = matrix(c( 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1 ), 4, 6, TRUE), S0Exg = { tmp <- matrix(NA, 4, 6, TRUE) tmp[3, 3] <- 100 tmp[4, 6] <- 100 tmp }, GRExg = 0, p0 = rbind(1, 1, 1, 1), priceAdjustmentVelocity = 0.05 ) } #' @export Example6.6.1 <- function() { sdm( A = function(state) { result <- matrix(NA, 6, 8) result[, 1] <- CD_A(1, rbind(0, 0, 0.5, 0.5, 0, 0), state$p) result[, 2] <- c(0, 0, 0, 0.5, 0, 0) result[, 3] <- c(1, 1, 0, 0, 0, 0) result[, 4] <- c(1, 1, 0, 0, 0, 0) result[, 5] <- CD_A(1, rbind(0, 0, 0, 0, 0.5, 0.5), state$p) result[, 6] <- c(0, 0, 0, 0, 0, 0.5) result[, 7] <- c(1, 0.5, 0, 0, 0, 0) result[, 8] <- c(1, 0.5, 0, 0, 0, 0) result }, B = matrix(c( 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1 ), 6, 8, TRUE), S0Exg = { tmp <- matrix(NA, 6, 8, TRUE) tmp[3, 3] <- 100 tmp[4, 4] <- 100 tmp[5, 7] <- 100 tmp[6, 8] <- 100 tmp }, GRExg = 0 ) } #' @export # country 1 Example6.6.2 <- function() { sdm( A = function(state) { result <- matrix(NA, 4, 4) result[, 1] <- CD_A(1, rbind(0, 0, 0.5, 0.5), state$p) result[, 2] <- c(0, 0, 0, 0.5) result[, 3] <- c(1, 1, 0, 0) result[, 4] <- c(1, 1, 0, 0) result }, B = diag(4), S0Exg = { tmp <- matrix(NA, 4, 4, TRUE) tmp[3, 3] <- 100 tmp[4, 4] <- 100 tmp }, GRExg = 0 ) } #' @export # country 2 Example6.6.3 <- function() { sdm( A = function(state) { result <- matrix(NA, 4, 4) result[, 1] <- CD_A(1, rbind(0, 0, 0.5, 0.5), state$p) result[, 2] <- c(0, 0, 0, 0.5) result[, 3] <- c(1, 0.5, 0, 0) result[, 4] <- c(1, 0.5, 0, 0) result }, B = diag(4), S0Exg = { tmp <- matrix(NA, 4, 4, TRUE) tmp[3, 3] <- 100 tmp[4, 4] <- 100 tmp }, GRExg = 0 ) } #' @export Example6.7 <- function() { sdm( A = matrix(c( 0, 0, 1, 0, 0, 1, 0.5, 0.5, 0, 0.5, 0.5, 0, 0.4, 0.2, 0, 0, 0, 0, 0, 0, 0, 0.2, 0.08, 0 ), 4, 6, TRUE), B = matrix(c( 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1 ), 4, 6, TRUE), S0Exg = { tmp <- matrix(NA, 4, 6, TRUE) tmp[3, 3] <- 100 tmp[4, 6] <- 100 # 1e-6 tmp }, GRExg = 0, priceAdjustmentVelocity = 0.05 ) } #' @export Example6.9 <- function() { sdm( A = function(state) { taxRate <- 0.05 matrix(c( 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0.1, 0.4, 0, 0, 0, 0, 0, 0, 0, 0, 0.5, 0.5, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, state$p[1] * taxRate / state$p[6] ), 6, 7, TRUE) }, B = matrix(c( 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0 ), 6, 7, TRUE), S0Exg = { tmp <- matrix(NA, 6, 7, TRUE) tmp[3, 3] <- 100 tmp[4, 6] <- 100 tmp }, GRExg = 0, priceAdjustmentVelocity = 0.05 ) } #' @export Example6.10 <- function() { sdm( A = function(state) { b <- 2 / 3 sigma <- rbind(-1, -1, -1, -1) alpha <- rbind(1, 1, 1, 1) Beta <- matrix(c( 0, 1, 0, 1, b, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0 ), 4, 4, TRUE) CES_A(sigma, alpha, Beta, state$p) }, B = diag(4), S0Exg = { tmp <- matrix(NA, 4, 4, T) tmp[2, 2] <- 100 tmp[4, 4] <- 100 tmp }, GRExg = 0 ) } #' @export Example6.11 <- function() { sdm( A = matrix(c( 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0.1, 0.4, 0, 0, 0, 0, 0, 0, 0, 0.5, 0.5, 0 ), 4, 6, TRUE), B = matrix(c( 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1 ), 4, 6, TRUE), S0Exg = { tmp <- matrix(NA, 4, 6, TRUE) tmp[3, 3] <- 1000 tmp[4, 6] <- 100 tmp }, GRExg = 0 ) } #' @export Example6.13 <- function() { sdm( A = function(state) { alpha <- rep(1, 6) Beta <- matrix(c( 0, 0, 1, 0, 0, 1, 0.5, 0.5, 0, 0.5, 0.5, 0, 0.5, 0.5, 0, 0, 0, 0, 0, 0, 0, 0.5, 0.5, 0 ), 4, 6, TRUE) tmpA <- CD_A(alpha, Beta, state$p) tmp.z <- ifelse(state$z < 1e-10, 1e-10, state$z) tmpA %*% dg(c(tmp.z[1:2]^-0.25, 1, tmp.z[4:5]^-0.25, 1)) }, B = matrix(c( 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1 ), 4, 6, TRUE), S0Exg = matrix(c( NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 100, NA, NA, NA, NA, NA, NA, NA, NA, 100 ), 4, 6, TRUE), GRExg = 0, maxIteration = 1, z0 = c(200, 100, 100, 100, 200, 100), # (ii) # z0=c(100,200,100,200,100,100), #(iii) priceAdjustmentVelocity = 0.05, ts = TRUE, policy = function(time, state, state.history) { state$S <- ifelse(state$S > 0 & state$S < 1e-10, 1e-10, state$S) state } ) } #' @export Example7.1 <- function() { sdm( A = function(state) { alpha <- rbind(1, 1, 1) Beta <- matrix(c( 0.64, 0.4, 0.4, 0.16, 0.4, 0.4, 0.2, 0.2, 0.2 ), 3, 3, TRUE) CD_A(alpha, Beta, state$p) }, B = diag(3), S0Exg = { tmp <- matrix(NA, 3, 3) tmp[1, 1] <- 100 tmp[2, 2] <- 100 tmp[3, 3] <- 100 tmp }, GRExg = 0 ) } #' @export Example7.2 <- function() { # column 1: wheat producer; # column 2: laborer; # column 3: money owner; # row 1: wheat; # row 2: labor; # row 3: money; sdm( A = function(state) { alpha <- rbind(1, 1, 1) Beta <- matrix(c( 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, -1, -1, -1 ), 3, 3, TRUE) CD_mA(alpha, Beta, state$p) }, B = diag(3), S0Exg = { tmp <- matrix(NA, 3, 3) tmp[2, 2] <- 100 tmp[3, 3] <- 100 tmp }, GRExg = 0, moneyIndex = 3, moneyOwnerIndex = 3, pExg = rbind(NA, NA, 0.25) # (i) # pExg=rbind(NA, NA, 1e-6) #(ii) ) } #' @export Example7.3 <- function() { # column 1: wheat producer; # column 2: laborer; # column 3: money owner; # row 1: wheat; # row 2: labor; # row 3: money; sdm( A = function(state) { tmpA <- matrix(c( 0.5, 1, 1, 0.1, 0, 0, -1, -1, -1 ), 3, 3, TRUE) Leontief_mA(tmpA, state$p) }, B = diag(3), S0Exg = { tmp <- matrix(NA, 3, 3) tmp[2, 2] <- 100 tmp[3, 3] <- 100 tmp }, GRExg = 0, moneyIndex = 3, moneyOwnerIndex = 3, pExg = rbind(NA, NA, 0.25) ) } #' @export Example7.4 <- function() { sdm( moneyIndex = 3, moneyOwnerIndex = 3, A = function(state) { alpha <- c(1, 1, 1) Beta <- matrix(c( 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, -1, -1, -1 ), 3, 3, TRUE) CD_mA(alpha, Beta, state$p) }, B = diag(3), S0Exg = { tmp <- matrix(NA, 3, 3) tmp[2, 2] <- 100 tmp[3, 3] <- 100 tmp }, GRExg = 0.1, pExg = rbind(NA, NA, 0.25) ) } #' @export Example7.5.1 <- function() { r <- rs <- 0.25 sdm( A = function(state) { alpha <- rbind(1, 1, 1, 1) Beta <- matrix(c( 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, -1, -1, -1, -1 ), 3, 4, TRUE) result <- matrix(0, 4, 4) result[1:3, ] <- CD_mA(alpha, Beta, state$p[1:3]) result[4, 1] <- result[3, 1] * (1 + r) * rs / state$p[4] result }, B = diag(4), S0Exg = { tmp <- matrix(NA, 4, 4) tmp[2, 2] <- tmp[3, 3] <- tmp[4, 4] <- 100 tmp }, GRExg = 0.1, # (i) 0 (ii) 0.1 moneyIndex = 3, moneyOwnerIndex = 3, pExg = rbind(NA, NA, r, NA) ) } #' @export Example7.5.2 <- function() { r <- rs <- 0.1423 sdm( A = function(state) { alpha <- rbind(1, 1, 1) Beta <- matrix(c( 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, -1, -1, -1 ), 3, 3, TRUE) result <- matrix(0, 4, 3) result[1:3, ] <- CD_mA(alpha, Beta, state$p[1:3]) result[4, 1] <- result[3, 1] * (1 + r) * rs / state$p[4] result }, B = matrix(c( 1, 0, 0, 0, 1, 1, 0, 0, 10, 0, 0, 10 ), 4, 3, TRUE), S0Exg = matrix(c( NA, NA, NA, NA, 99, 1, NA, NA, 100, NA, NA, 100 ), 4, 3, TRUE), GRExg = 0, moneyIndex = 3, moneyOwnerIndex = 3, pExg = rbind(NA, NA, r, NA) ) } #' @export # foreign exchange rate Example7.6 <- function() { # column 1: wheat producer of country 1; # column 2: laborer of country 1; # column 3: money owner of country 1; # column 4: iron producer of country 2; # column 5: laborer of country 2; # column 6: money owner of country 2; # row 1: wheat (of country 1); # row 2: labor of country 1; # row 3: money of country 1; # row 4: iron (of country 2); # row 5: labor of country 2; # row 6: money of country 2; sdm( A = function(state) { alpha <- matrix(1, 6, 1) Beta <- matrix(c( 0, 1, 1, 0, 1, 1, 0.5, 0, 0, 0, 0, 0, -1, -1, -1, 0, 0, 0, 0.5, 0, 0, 0.5, 0, 0, 0, 0, 0, 0.5, 0, 0, 0, 0, 0, -1, -1, -1 ), 6, 6, TRUE) CD_mA(alpha, Beta, state$p) }, # A=function(state){CES_mA(-1*ones(6,1),alpha,Beta,state$p)} B = diag(6), S0Exg = { tmp <- matrix(NA, 6, 6) tmp[2, 2] <- 100 tmp[3, 3] <- 600 tmp[5, 5] <- 100 tmp[6, 6] <- 100 tmp }, GRExg = 0, moneyOwnerIndex = c(3, 6), moneyIndex = c(3, 6), pExg = c(NA, NA, 0.1, NA, NA, 0.1) ) } #' @export # foreign exchange rate Example7.7 <- function() { sdm( A = function(state) { result <- matrix(NA, 8, 12) result[, 1] <- result[, 2] <- CES_mA(-1, 10, rbind(0, 1, 1, -1, 0, 0, 0, 0), state$p) result[, 3] <- result[, 4] <- Leontief_mA(rbind(1, 0, 0, -1, 0, 0, 0, 0), state$p) result[, 5] <- result[, 6] <- CES_mA(-1, 10, rbind(0, 1, 0, 0, 1, -1, 0, 0), state$p) result[, 7] <- result[, 8] <- Leontief_mA(rbind(1, 0, 0, 0, 0, -1, 0, 0), state$p) result[, 9] <- result[, 10] <- CES_mA(-1, 10, rbind(0, 1, 0, 0, 0, 0, 1, -1), state$p) result[, 11] <- result[, 12] <- Leontief_mA(rbind(1, 0, 0, 0, 0, 0, 0, -1), state$p) result }, B = { B <- matrix(0, 8, 12) B[1, 1] <- B[2, 2] <- B[3, 3] <- B[4, 4] <- B[1, 5] <- B[2, 6] <- B[5, 7] <- B[6, 8] <- B[1, 9] <- B[2, 10] <- B[7, 11] <- B[8, 12] <- 1 B }, S0Exg = { S0Exg <- matrix(NA, 8, 12) S0Exg[3, 3] <- S0Exg[4, 4] <- S0Exg[5, 7] <- S0Exg[6, 8] <- S0Exg[7, 11] <- S0Exg[8, 12] <- 100 S0Exg }, GRExg = 0, moneyIndex = rbind(4, 6, 8), moneyOwnerIndex = rbind(4, 8, 12), # p0=rbind(10,0.08749,0.4031,0.01,0.2448,0.4,0.1611,0.8); pExg = rbind(NA, NA, NA, 0.01, NA, 0.4, NA, 0.8) ) } #' @export # commodity money Example7.8 <- function() { dv <- 0.2 # (i) rd<-1e-6; rr<-1; tau<-1e-6 #7.8.1 # (ii) rd<-0; rr<-1;tau<-1; #7.8.2 # (iii) rd<-0.1; rr<-0.5; tau<-0 #7.9 rd <- 0.1 rr <- 0.5 tau <- 1e-6 sdm( A = function(state) { p1 <- state$p[1] / state$p[2] p2 <- 1 p3 <- state$p[3] / state$p[2] r <- state$p[4] / state$p[2] ps <- state$p[5] / state$p[2] pt <- state$p[6] / state$p[2] matrix(c( 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, p3, p3, p1, dv, rd * (1 + tau) * (1 + r) * p3 / ps, rd * (1 + tau) * (1 + r) * p3 / ps, 0, rd * (1 + tau) * (p2 + r * dv) / ps, 0, tau * (1 + r) * p3 / pt, 0, 0 ), 6, 4, T) }, B = matrix(c( 1, 0, 0, 0, 0, 1, 0, 1 - dv, 0, 0, 1, 0, 0, 0, 0, 1 / rr, 0, 0, 1, 0, 0, 0, 1, 0 ), 6, 4, TRUE), S0Exg = { S0Exg <- matrix(NA, 6, 4) S0Exg[3, 3] <- S0Exg[5, 3] <- 100 S0Exg[6, 3] <- 100 S0Exg }, GRExg = 0, priceAdjustmentVelocity = 0.008, p0 = rbind(1, 1, 1, 0.2, 1, 1), tolCond = 1e-5 ) } #' @export # positive growth Example7.9X <- function() { GRExg <- 0.01 dv <- 0.2 # !!! money.dv <- 1 - (1 - dv) / (1 + GRExg) # (i) rd<-0; rr<-1;tau<-0; #7.8.1 # (ii) rd<-0; rr<-1;tau<-1; #7.8.2 # (iii) rd<-0.1; rr<-0.5; tau<-0 #7.9 rd <- 0.1 rr <- 0.5 mm <- 1 / rr sdm( A = function(state) { p1 <- state$p[1] / state$p[2] p2 <- 1 p3 <- state$p[3] / state$p[2] r <- state$p[4] / state$p[2] ps <- state$p[5] / state$p[2] alpha <- rbind(1, 1, 1) Beta <- matrix(c( 0.5, 0.5, 0.5, 0, 0, 0, 0.5, 0.5, 0.5, -1, -1, -1 ), 4, 3, TRUE) result <- matrix(0, 5, 4) result[1:4, 1:3] <- CD_mA(alpha, Beta, c(p1, p2, p3, r)) result[1:4, 4] <- c(0, 1, 0, money.dv) result[5, 1:4] <- c( rd * (1 + r) * p3 / ps, rd * (1 + r) * p3 / ps, 0, rd * (1 + r * money.dv) / ps ) result }, B = matrix(c( 1, 0, 0, 0, 0, 1, 0, 1 - dv, 0, 0, 1, 0, 0, 0, 0, mm, 0, 0, 1, 0 ), 5, 4, TRUE), S0Exg = { S0Exg <- matrix(NA, 5, 4) S0Exg[3, 3] <- S0Exg[5, 3] <- 100 S0Exg }, GRExg = GRExg, priceAdjustmentVelocity = 0.1, p0 = rbind(1, 1, 1, 0.2, 1), tolCond = 1e-5 ) } #' @export Example7.10 <- function() { GRExg <- 0 rd <- 0 rr <- 0.5 rfm <- 0.05 sdm( A = function(state) { matrix(c( 0, 1, 1, 0, 0, 0, 0, rr, 1, 0, 0, 0, state$p[3], state$p[1], state$p[1], 0 ), 4, 4, T) }, B = matrix(c( 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1 ), 4, 4, TRUE), S0Exg = { S0Exg <- matrix(NA, 4, 4) S0Exg[3, 3] <- 100 S0Exg[2, 2] <- 100 S0Exg }, GRExg = GRExg, moneyIndex = 2, moneyOwnerIndex = 2, pExg = rbind(NA, rfm, NA, NA), priceAdjustmentVelocity = 0.01 ) } #' @export Example7.10.2 <- function() { rd <- 0 rr <- 0.5 rfm <- 0.05 mm <- 1 / rr sdm( A = function(state) { matrix(c( 0, 1, 1, state$p[3] / mm, state$p[1] / mm, state$p[1] / mm, 1, 0, 0 ), 3, 3, T) }, B = matrix(c( 1, 0, 0, 0, 1, 0, 0, 0, 1 ), 3, 3, TRUE), S0Exg = { S0Exg <- matrix(NA, 3, 3) S0Exg[3, 3] <- 100 S0Exg[2, 2] <- 100 S0Exg }, GRExg = 0, moneyIndex = 2, moneyOwnerIndex = 2, pExg = rbind(NA, rfm, NA), priceAdjustmentVelocity = 0.01 ) } #' @export # Bond Example7.11 <- function() { r <- 0.1 labor.input <- 0 sdm( A = function(state) { matrix(c( 0, 1, 1, 1, # wheat 1, 0, 0, 0, # labor 0, 0, (1 + r) * state$p[1] / state$p[3], 0, state$p[2], state$p[1], state$p[1], state$p[1] ), 4, 4, T) }, B = matrix(c( 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 100 ), 4, 4, TRUE), S0Exg = { S0Exg <- matrix(NA, 4, 4) S0Exg[2, 2] <- S0Exg[2, 3] <- S0Exg[4, 4] <- 100 S0Exg[3, 2] <- 100 S0Exg }, GRExg = 0, moneyIndex = 4, moneyOwnerIndex = 4, pExg = rbind(NA, NA, NA, r), priceAdjustmentVelocity = 0.01 ) } #' @export # exchange rate and international credit Example7.12 <- function() { sdm( A = function(state) { alpha <- matrix(1, 6, 1) Beta <- matrix(c( 0, 1, 1, 0, 1, 0, 0.5, 0, 0, 0, 0, 0, -1, -1, -1, 0, 0, 0, 0.5, 0, 0, 0.5, 0, 0, 0, 0, 0, 0.5, 0, 0, 0, 0, 0, -1, -1, 0, 0, 0, 0, 0, 0, 1 ), 7, 6, TRUE) CD_mA(alpha, Beta, state$p) }, B = { B <- diag(6) B <- rbind(B, c(0, 0, 1, 0, 0, 0)) B }, S0Exg = { S0Exg <- matrix(NA, 7, 6) S0Exg[2, 2] <- 100 S0Exg[3, 3] <- 600 S0Exg[5, 5] <- 100 S0Exg[6, 6] <- 100 S0Exg[7, 3] <- 1 S0Exg }, GRExg = 0, moneyOwnerIndex = rbind(3, 6), moneyIndex = rbind(3, 6), pExg = rbind(NA, NA, 0.1, NA, NA, 0.1) # !!!!!!!! ) } #' @export # bank Example7.13 <- function() { r <- 0.1 rr <- 0.2 # (i) 0 (ii) 0.2 labor.input <- 0 sdm( A = function(state) { matrix(c( 0, 1, 1, 1, 0, # wheat 1, 0, 0, 0, 0, # labor 0, 0, 0, 0, 1, # deposit 0, 0, (1 + r) * state$p[1] / state$p[4], 0, 0, # credit # money state$p[2], state$p[1], state$p[1], state$p[1], 0, # reserve 0, 0, 0, 0, rr * state$p[3] / state$p[6] ), 6, 5, T) }, B = matrix(c( 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 100, 0, 0, 0, 0, 1, 0 ), 6, 5, TRUE), S0Exg = { S0Exg <- matrix(NA, 6, 5) S0Exg[2, 2] <- S0Exg[2, 3] <- S0Exg[5, 4] <- 100 S0Exg[3, 2] <- 100 S0Exg[6, 4] <- 1 S0Exg }, GRExg = 0, moneyIndex = 5, moneyOwnerIndex = 4, pExg = rbind(NA, NA, NA, NA, r, NA), priceAdjustmentVelocity = 0.05 ) } #' @export # shadow price Example7.14 <- function() { sdm( A = function(state) { alpha <- rbind(1, 1) Beta <- matrix(c( 0.5, 0.5, 0.5, 0.5, -1, -1 ), 3, 2, TRUE) CD_mA(alpha, Beta, state$p) }, B = matrix(c( 1, 0, 0, 100, 0, 100 ), 3, 2, T), S0Exg = { S0Exg <- matrix(NA, 3, 2) S0Exg[2, 2] <- 100 S0Exg[3, 2] <- 100 # S0Exg[1,2]<- 10 S0Exg }, GRExg = 0, pExg = rbind(NA, NA, 0.25), moneyIndex = 3, moneyOwnerIndex = 2 ) } #' @export # shadow price and international trade Example7.15 <- function() { sdm( A = function(state) { alpha <- rbind(1, 1, 1, 1) Beta <- matrix(c( 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, -1, -1, 0, 0, 0, 0, -1, -1 ), 4, 4, TRUE) CD_mA(alpha, Beta, state$p) }, B = matrix(c( 1, 0, 1, 0, 0, 100, 0, 100, 0, 100, 0, 0, 0, 0, 0, 100 ), 4, 4, T), S0Exg = matrix(c( NA, NA, NA, NA, NA, 100, NA, 100, NA, 100, NA, NA, NA, NA, NA, 100 ), 4, 4, T), GRExg = 0, moneyIndex = rbind(3, 4), moneyOwnerIndex = rbind(2, 4), pExg = rbind(NA, NA, 0.25, 0.1) ) } #' @export # equilibrium coffee problem Example8.1 <- function() { sdm( A = matrix(c( 0.05, 0.05, 0.1, 0.1, 0, 0.1, 0, 0.15, 0.05 ), 3, 3, TRUE), B = matrix(0, 3, 3), S0Exg = diag(3), GRExg = 0 ) } #' @export Example8.2 <- function() { S0Exg <- diag(c(100, 60, 100)) sdm( A = function(state) { alpha <- c(5, 3, 1) Beta <- matrix(c( 0.6, 0.4, 0.2, 0.1, 0.4, 0.7, 0.3, 0.2, 0.1 ), 3, 3, TRUE) CD_A(alpha, Beta, state$p) }, B = S0Exg, S0Exg = S0Exg, GRExg = 0 ) } #' @export Example8.7 <- function() { ge <- sdm( A = matrix(c( 0.05, 0.05, 0.1, 0.1, 0, 0.1, 0, 0.15, 0.05 ), 3, 3, TRUE), B = matrix(0, 3, 3), S0Exg = diag(3), GRExg = 0, p0 = rbind(1, 1, 1), numberOfPeriods = 300, maxIteration = 1, ts = TRUE ) } #' @export Example8.8 <- function() { ge <- sdm( A = function(state) { tmpA <- matrix(c( 0.05, 0.05, 0.1, 0.1, 0.1, 0, 0.1, 0.1, 0, 0.15, 0.05, 0.1, -1, -1, -1, -1 ), 4, 4, TRUE) Leontief_mA(tmpA, state$p) }, B = matrix(0, 4, 4), S0Exg = diag(4), GRExg = 0, moneyOwnerIndex = 4, moneyIndex = 4, depreciationCoef = 0, maxIteration = 1, numberOfPeriods = 800, p0 = rbind(0.5, 0.5, 0.5, 0.25), pExg = rbind(NaN, NaN, NaN, 0.25), ts = TRUE ) } #' @export Example8.9 <- function() { ge <- sdm( A = function(state) { tmpA <- matrix(c( 0.05, 0.05, 0.1, 0.1, 0.1, 0.1, 0, 0.1, 0.1, 0.1, 0, 0.15, 0.05, 0.1, 0.1, -1, -1, 0, -1, 0, 0, 0, -1, 0, -1 ), 5, 5, TRUE) Leontief_mA(tmpA, state$p) }, B = matrix(0, 5, 5), S0Exg = { S0Exg <- diag(5) S0Exg[4, 4] <- 3 S0Exg }, GRExg = 0, moneyOwnerIndex = c(4, 5), moneyIndex = c(4, 5), depreciationCoef = 0, maxIteration = 1, numberOfPeriods = 800, p0 = rbind(0.5, 0.5, 0.5, 0.25, 0.25), pExg = rbind(NaN, NaN, NaN, 0.25, 0.25), ts = TRUE ) } #' @export Example9.3 <- function() { ge <- sdm( A = matrix(c( 56 / 115, 6, 12 / 575, 2 / 5 ), 2, 2, TRUE), B = diag(2), maxIteration = 1, numberOfPeriods = 100, p0 = rbind(1 / 15, 1), z0 = rbind(575, 20), thresholdForPriceAdjustment = 0.99, priceAdjustmentMethod = "fixed", priceAdjustmentVelocity = 0.02, ts = TRUE ) } #' @export Example9.4 <- function() { sdm( A = matrix(c( 56 / 115, 6, 12 / 575, 2 / 5 ), 2, 2, TRUE), B = diag(2), S0Exg = matrix(c( NA, NA, NA, 190 ), 2, 2, T), GRExg = 0, maxIteration = 1, numberOfPeriods = 1000, p0 = rbind(12 / 295, 1), z0 = rbind(3400, 90), thresholdForPriceAdjustment = 0.99, priceAdjustmentMethod = "variable", priceAdjustmentVelocity = 0.2, ts = TRUE ) } #' @export Example9.5 <- function() { sdm( A = function(state) { alpha <- rbind(1, 1, 1) Beta <- matrix(c( 0, 1, 1, 0.5, 0, 0, 0.5, 0, 0 ), 3, 3, TRUE) CD_A(alpha, Beta, state$p) }, B = diag(3), S0Exg = matrix(c( NA, NA, NA, NA, 100, NA, NA, NA, 100 ), 3, 3, T), GRExg = 0, pExg = rbind(1, NA, 0.625), # (i) # pExg=rbind(1, 0.25, 0.25)#(ii) maxIteration = 1, numberOfPeriods = 200, depreciationCoef = 0, ts = TRUE ) } #' @export Example9.6 <- function() { sdm( A = function(state) { alpha <- rbind(1.2, 1) Beta <- matrix(c( 0.5, 1, 0.5, 0 ), 2, 2, TRUE) CD_A(alpha, Beta, state$p) }, B = diag(2), S0Exg = matrix(c( NA, NA, NA, 100 ), 2, 2, T), GRExg = 0, numberOfPeriods = 100, z0 = rbind(50, 50), p0 = rbind(1, 0.25), depreciationCoef = 1, # depreciationCoef=0.5 ts = TRUE ) } #' @export Example9.7 <- function() { sdm( A = matrix(c( 0, 0, 1, 0.4, 0, 0, 0.4, 1, 0 ), 3, 3, TRUE), B = matrix(c( 1, 0, 0, 0.36, 1, 0, 0, 0, 1 ), 3, 3, TRUE), S0Exg = { S0Exg <- matrix(NA, 3, 3, TRUE) S0Exg[3, 3] <- 100 S0Exg }, GRExg = 0, numberOfPeriods = 800, maxIteration = 1, priceAdjustmentVelocity = 0.05, ts = TRUE ) } #' @export Example9.10 <- function(policy = NULL, pExg = rbind(NA, NA, 0.25), p0 = rbind(0.625, 0.375, 0.25), priceAdjustmentVelocity = 0.3, ts = TRUE) { sdm( A = function(state) { tmpA <- matrix(c( 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, -1, -1, -1 ), 3, 3, TRUE) #-1 denotes the demand for money Leontief_mA(tmpA, state$p) }, B = diag(3), S0Exg = { S0Exg <- matrix(NA, 3, 3) S0Exg[2, 2] <- 100 S0Exg[3, 3] <- 100 S0Exg }, GRExg = 0, moneyIndex = 3, moneyOwnerIndex = 3, pExg = pExg, p0 = p0, z0 = rbind(95, 100, 100), thresholdForPriceAdjustment = 0.99, priceAdjustmentVelocity = priceAdjustmentVelocity, numberOfPeriods = 1000, maxIteration = 1, trace = FALSE, ts = ts, policy = policy ) } #' @export Example9.10.policy.interest.rate <- function(time, state, state.history) { if (time >= 600) { upsilon <- state.history$z[time - 1, 1] / mean(state.history$z[(time - 50):(time - 1), 1]) state$p[3] <- max(0.25 + 0.5 * log(upsilon), 0) } state } #' @export Example9.10.policy.money.supply <- function(time, state, state.history) { if (time >= 600) { upsilon <- state.history$z[time - 1, 1] / mean(state.history$z[(time - 50):(time - 1), 1]) state$S[3, 3] <- state.history$S[3, 3, time - 1] * (1 - 0.5 * log(upsilon)) } state } #' @export Example9.10.policy.tax <- function(time, state, state.history) { if (time >= 600) { upsilon <- state.history$z[time - 1, 1] / mean(state.history$z[(time - 50):(time - 1), 1]) tau <- 0 if (upsilon > 1) { tau <- min((upsilon - 1) / 2, 0.2) state$S[1, 1] <- state$S[1, 1] * (1 - tau) } state$current.policy.data <- data.frame(time = time, tau.Example9.10 = tau) } state } #' @export Example9.10.policy.deflation <- function(time, state, state.history) { if (time >= 600) { upsilon <- state.history$z[time - 1, 1] / mean(state.history$z[(time - 50):(time - 1), 1]) zeta.mu <- ifelse(upsilon > 1, 0.5, 0) state$S[3, 3] <- state.history$S[3, 3, time - 1] * (1 - zeta.mu * log(upsilon)) } state } #' @export Example9.10.policy.quantitative.easing <- function(time, state, state.history) { if (time >= 600) { upsilon <- state.history$z[time - 1, 1] / mean(state.history$z[(time - 50):(time - 1), 1]) zeta.mu <- ifelse(upsilon > 1, 0, 0.5) state$S[3, 3] <- state.history$S[3, 3, time - 1] * (1 - zeta.mu * log(upsilon)) } state } #' @export Example9.10.policy.deficit.fiscal <- function(time, state, state.history) { if (time >= 400) { current.deficit <- 0 if (state.history$q[time - 1, 1] < 0.95) { state$S[1, 1] <- state$S[1, 1] * 0.96 current.deficit <- state$S[1, 1] * 0.04 * state$p[1] state$S[3, 3] <- state$S[3, 3] + current.deficit } state$current.policy.data <- data.frame(time = time, deficit.Example9.10 = current.deficit) } state }
/scratch/gouwar.j/cran-all/cranData/CGE/R/sdm_examples.R
#' CGGP: A package for running sparse grid computer experiments #' #' The CGGP package implements the method presented in Plumlee et al. (2019). #' #' @section CGGP functions: #' The CGGP functions: CGGPcreate, CGGPfit, CGGPappend, and CGGPpred #' #' @docType package #' @aliases CGGP-package #' @name CGGP NULL
/scratch/gouwar.j/cran-all/cranData/CGGP/R/CGGP.R
#' Print CGGP object #' #' Default print as a list is bad since there's a lot of elements. #' #' @param x CGGP object #' @param ... Passed to print #' #' @return String to be printed #' @export #' #' @examples #' SG = CGGPcreate(3,21) #' print(SG) #' f <- function(x) {x[1]+exp(x[2]) + log(x[3]+4)} #' y <- apply(SG$design, 1, f) #' SG <- CGGPfit(SG, y) #' print(SG) print.CGGP <- function(x, ...) { s <- paste0( c( "CGGP object\n", " d = ", x$d, '\n', " output dimensions = ", if (is.matrix(x$Y)) ncol(x$Y) else {1}, '\n', " CorrFunc = ", x$CorrName, '\n', " number of design points = ", if (is.null(x$design) || length(x$design)==0) {"0"} else {nrow(x$design)}, '\n', " number of unevaluated design points = ", if (is.null(x$design_unevaluated)) 0 else nrow(x$design_unevaluated), '\n', if (is.null(x$Xs)) {""} else {paste0(" number of supplemental points = ", nrow(x$Xs), '\n')}, " Available functions:\n", " - CGGPfit(CGGP, Y) to update parameters with new data\n", " - CGGPpred(CGGP, xp) to predict at new points\n", " - CGGPappend(CGGP, batchsize) to add new design points\n", " - CGGPplot<name>(CGGP) to visualize CGGP model\n" ) ) cat(s, sep="") } #' S3 predict method for CGGP #' #' Passes to CGGPpred #' #' @param object CGGP object #' @param ... Other arguments passed to `CGGPpred` #' #' @rdname CGGPpred #' @export predict.CGGP <- function(object, xp, ...) { CGGPpred(CGGP=object, xp=xp, ...) } #' S3 plot method for CGGP #' #' There are a few different plot functions for CGGP objects: #' `CGGPplotblocks`, `CGGPplotblockselection`, #' `CGGPplotcorr`, `CGGPplotheat`, `CGGPplothist`, #' `CGGPvalplot`, #' `CGGPplotslice`, `CGGPplotslice`, and `CGGPplotvariogram`. #' Currently `CGGPplotblocks` is the default plot object. #' #' @param x CGGP object #' @param y Don't use #' @param ... Passed to CGGPplotblocks #' #' @return Either makes plot or returns plot object #' @export #' #' @examples #' SG = CGGPcreate(3,100) #' plot(SG) plot.CGGP <- function(x, y, ...) { CGGPplotblocks(x, ...) }
/scratch/gouwar.j/cran-all/cranData/CGGP/R/CGGP_S3_methods.R
#' Calculate MSE over single dimension #' #' Calculated using grid of integration points. #' Can be calculated exactly, but not much reason in 1D. #' #' @param xl Vector of points in 1D #' @param theta Correlation parameters #' @param CorrMat Function that gives correlation matrix for vectors of 1D points. #' #' @return MSE value #' @export #' #' @examples #' CGGP_internal_calcMSE(xl=c(0,.5,.9), theta=c(1,2,3), #' CorrMat=CGGP_internal_CorrMatCauchySQT) CGGP_internal_calcMSE <- function(xl, theta, CorrMat) { S = CorrMat(xl, xl, theta) xp = seq(-10^(-4),1+10^(-4),l=401) Cp = CorrMat(xp,xl,theta) n = length(xl) cholS = chol(S) CiCp = backsolve(cholS,backsolve(cholS,t(Cp), transpose = TRUE)) MSE_MAPal = mean(1 - rowSums(t(CiCp)*Cp)) MSE_MAPal } #' Calculate MSE over blocks #' #' Delta of adding block is product over i=1..d of IMSE(i,j-1) - IMSE(i,j) #' #' @param valsinds Block levels to calculate MSEs for #' @param MSE_MAP Matrix of MSE values #' #' @return All MSE values #' @export #' #' @examples #' SG <- CGGPcreate(d=3, batchsize=100) #' y <- apply(SG$design, 1, function(x){x[1]+x[2]^2}) #' SG <- CGGPfit(SG, Y=y) #' MSE_MAP <- outer(1:SG$d, 1:8, #' Vectorize(function(dimlcv, lcv1) { #' CGGP_internal_calcMSE(SG$xb[1:SG$sizest[dimlcv]], #' theta=SG$thetaMAP[(dimlcv-1)*SG$numpara+1:SG$numpara], #' CorrMat=SG$CorrMat) #' })) #' CGGP_internal_calcMSEde(SG$po[1:SG$poCOUNT, ], MSE_MAP) CGGP_internal_calcMSEde <- function(valsinds, MSE_MAP) { maxparam <- -Inf # Was set to -10 and ruined it. if(is.matrix(valsinds)){ MSE_de = rep(0, dim(valsinds)[1]) for (levellcv2 in 1:dim(valsinds)[1]) { MSE_de[levellcv2] = 0 for (levellcv in 1:dim(valsinds)[2]) { if (valsinds[levellcv2, levellcv] > 1.5) { MSE_de[levellcv2] = MSE_de[levellcv2] + max(log(-MSE_MAP[levellcv, valsinds[levellcv2, levellcv]] + MSE_MAP[levellcv, valsinds[levellcv2, levellcv] - 1]),maxparam) } else { # This is when no ancestor block, 1 comes from when there is no data. # 1 is correlation times integrated value over range. # This depends on correlation function. MSE_de[levellcv2] = MSE_de[levellcv2] + max(log(-MSE_MAP[levellcv, valsinds[levellcv2, levellcv]] + 1),maxparam) } } } } else { MSE_de = 0 for (levellcv in 1:length(valsinds)) { if (valsinds[levellcv] > 1.5) { MSE_de = MSE_de + max(log(-MSE_MAP[levellcv, valsinds[levellcv]] + MSE_MAP[levellcv, valsinds[levellcv] -1]),maxparam) } else { MSE_de = MSE_de + max(log(-MSE_MAP[levellcv, valsinds[levellcv]] + 1),maxparam) } } } MSE_de = exp(MSE_de) return(MSE_de) } #' Add points to CGGP #' #' Add `batchsize` points to `SG` using `theta`. #' #' @param CGGP Sparse grid object #' @param batchsize Number of points to add #' @param selectionmethod How points will be selected: one of `UCB`, `TS`, #' `MAP`, `Oldest`, `Random`, or `Lowest`. #' `UCB` uses Upper Confidence Bound estimates for the parameters. #' `TS` uses Thompson sampling, a random sample from the posterior. #' `MAP` uses maximum a posteriori parameter estimates. #' `Oldest` adds the block that has been available the longest. #' `Random` adds a random block. #' `Lowest` adds the block with the lowest sum of index levels. #' `UCB` and `TS` are based on bandit algorithms and account for uncertainty #' in the parameter estimates, but are the slowest. #' `MAP` is fast but doesn't account for parameter uncertainty. #' The other three are naive methods that are not adaptive and won't #' perform well. #' @importFrom stats quantile sd var #' #' @return SG with new points added. #' @export #' @family CGGP core functions #' #' @examples #' SG <- CGGPcreate(d=3, batchsize=100) #' y <- apply(SG$design, 1, function(x){x[1]+x[2]^2}) #' SG <- CGGPfit(SG, Y=y) #' SG <- CGGPappend(CGGP=SG, batchsize=20, selectionmethod="MAP") CGGPappend <- function(CGGP,batchsize, selectionmethod = "MAP"){ # ===== Check inputs ===== if (!(selectionmethod %in% c("UCB", "TS", "MAP", "Oldest", "Random", "Lowest"))) { stop("selectionmethod in CGGPappend must be one of UCB, TS, MAP, Oldest, Random, or Lowest") } if (!is.null(CGGP$design_unevaluated)) { stop("Can't append if CGGP has unevaluated design points.") } # Track how many design points there currently are in $design n_before <- if (is.null(CGGP[["design"]]) || length(CGGP$design)==0) { 0 } else { nrow(CGGP$design) } max_polevels = apply(CGGP$po[1:CGGP$poCOUNT, ,drop=FALSE], 2, max) separateoutputparameterdimensions <- is.matrix(CGGP$thetaMAP) # nopd is numberofoutputparameterdimensions nopd <- if (separateoutputparameterdimensions) { if (length(CGGP$y)>0) {ncol(CGGP$y)} else {ncol(CGGP$ys)} } else { 1 } # ==============================. # ==== Calculate IMSE ==== # ==============================. # Calculate integrated mean squared error (IMSE) values for the given method if(selectionmethod=="MAP"){ # Set up blank array to store MSE values MSE_MAP = array(0, dim=c(CGGP$d, CGGP$maxlevel,nopd)) # Loop over dimensions and design refinements for (opdlcv in 1:nopd) { thetaMAP.thisloop <- if (nopd==1) CGGP$thetaMAP else CGGP$thetaMAP[, opdlcv] for (dimlcv in 1:CGGP$d) { for (levellcv in 1:max_polevels[dimlcv]) { # Calculate some sort of MSE from above, not sure what it's doing MSE_MAP[dimlcv, levellcv, opdlcv] = max(0, abs( CGGP_internal_calcMSE( CGGP$xb[1:CGGP$sizest[levellcv]], thetaMAP.thisloop[(dimlcv-1)*CGGP$numpara+1:CGGP$numpara], CGGP$CorrMat ) ) ) if (levellcv > 1.5) { # If past 1st level, it is as good as one below MSE_MAP[dimlcv, levellcv, opdlcv] = min(MSE_MAP[dimlcv, levellcv, opdlcv], MSE_MAP[dimlcv, levellcv - 1, opdlcv]) } } } } # Integrated MSE IMES_MAP = rep(0, CGGP$ML) # For all possible blocks, calculate MSE_MAP, need to apply it over nopd IMES_MAP_beforemean = apply(MSE_MAP, 3, function(x) { CGGP_internal_calcMSEde( CGGP$po[1:CGGP$poCOUNT, , drop=F], x) }) if (CGGP$poCOUNT==1) { IMES_MAP_beforemean <- matrix(IMES_MAP_beforemean, nrow=1) } if (!is.matrix(IMES_MAP_beforemean)) {stop("Need a matrix here 0923859")} # Need as.matrix in case of single value # i.e. when only supp data and only po is initial point # If multiple output but single opd, need to take mean sigma2MAP.thisloop <- if (nopd==1) { mean(CGGP$sigma2MAP) } else { CGGP$sigma2MAP } IMES_MAP[1:CGGP$poCOUNT] = rowMeans( sweep(IMES_MAP_beforemean, 2, sigma2MAP.thisloop, "*") ) # Clean up to avoid silly errors rm(opdlcv, thetaMAP.thisloop, sigma2MAP.thisloop) } else if (selectionmethod %in% c("UCB", "TS")) { # selectionmethod is UCB or TS MSE_PostSamples = array(0, c(CGGP$d, CGGP$maxlevel,CGGP$numPostSamples, nopd)) # Dimensions can be considered independently # Loop over dimensions and design refinements for (opdlcv in 1:nopd) { # Loop over output parameter dimensions thetaPostSamples.thisloop <- if (nopd==1) { CGGP$thetaPostSamples } else { CGGP$thetaPostSamples[ , , opdlcv] } for (dimlcv in 1:CGGP$d) { # Loop over each input dimension for (levellcv in 1:max_polevels[dimlcv]) { for(samplelcv in 1:CGGP$numPostSamples){ # Calculate some sort of MSE from above, not sure what it's doing MSE_PostSamples[dimlcv, levellcv,samplelcv, opdlcv] = max(0, abs( CGGP_internal_calcMSE( CGGP$xb[1:CGGP$sizest[levellcv]], thetaPostSamples.thisloop[(dimlcv-1)*CGGP$numpara + 1:CGGP$numpara, samplelcv], CGGP$CorrMat) ) ) if (levellcv > 1.5) { # If past first level, it is as good as one below it MSE_PostSamples[dimlcv, levellcv,samplelcv, opdlcv] = min(MSE_PostSamples[dimlcv, levellcv,samplelcv, opdlcv], MSE_PostSamples[dimlcv, levellcv - 1,samplelcv, opdlcv]) } } } } } rm(opdlcv, dimlcv, levellcv, samplelcv) # Avoid dumb mistakes IMES_PostSamples = matrix(0, CGGP$ML,CGGP$numPostSamples) # Calculate sigma2 for all samples if needed if (is.null(CGGP$sigma2_samples)) { CGGP$sigma2_samples <- CGGP_internal_calc_sigma2_samples(CGGP) } sigma2.allsamples.alloutputs <- CGGP$sigma2_samples for(samplelcv in 1:CGGP$numPostSamples){ if (nopd == 1) { # Will be a matrix # Multiply by sigma2. If multiple output dimensions with # shared parameters, take mean. # Needed because each thetasample will have a different sigma2. sigma2.thistime <- mean(sigma2.allsamples.alloutputs[samplelcv,]) IMES_PostSamples[1:CGGP$poCOUNT,samplelcv] = sigma2.thistime * CGGP_internal_calcMSEde(CGGP$po[1:CGGP$poCOUNT,], MSE_PostSamples[,,samplelcv,]) rm(sigma2.thistime) # Avoid mistakes } else { # Is a 3d array, need to use an apply and then apply again with mean IMES_PostSamples_beforemean <- apply(MSE_PostSamples[,,samplelcv,], 3, function(x){ CGGP_internal_calcMSEde(CGGP$po[1:CGGP$poCOUNT,,drop=F], x) }) if (!is.matrix(IMES_PostSamples_beforemean)) { # Happens when CGGP$poCOUNT is 1, when only initial block avail if (CGGP$poCOUNT!=1) {stop("Something is wrong here #279287522")} IMES_PostSamples_beforemean <- matrix(IMES_PostSamples_beforemean, nrow=1) } # Need sigma2 for this theta sample, already calculated in sigma2.allsamples.alloutputs IMES_PostSamples[1:CGGP$poCOUNT,samplelcv] <- apply(IMES_PostSamples_beforemean, 1, function(x) { # Weight by sigma2 samples mean(sigma2.allsamples.alloutputs[samplelcv,] * x) }) } }; rm(samplelcv) # Get UCB IMES using 90% upper conf bound IMES_UCB = numeric(CGGP$ML) IMES_UCB[1:CGGP$poCOUNT] = apply(IMES_PostSamples[1:CGGP$poCOUNT,, drop=F],1,quantile, probs=0.9) } else { # Can be Oldest or Random or Lowest } # =============================. # ==== Append points ==== # =============================. # Append points to design until limit until reaching max_design_points max_design_points = CGGP$ss + batchsize while (max_design_points > CGGP$ss + min(CGGP$pogsize[1:CGGP$poCOUNT]) - .5) { if(selectionmethod=="MAP"){ IMES = IMES_MAP } else if(selectionmethod=="UCB"){ IMES = IMES_UCB } else if(selectionmethod=="TS"){ IMES = IMES_PostSamples[,sample(1:CGGP$numPostSamples,1)] } else if(selectionmethod=="Oldest"){ IMES = seq.int(from=CGGP$poCOUNT, to=1, by=-1) # Multiply by size so it gets undone below IMES <- IMES * CGGP$pogsize[1:CGGP$poCOUNT] } else if(selectionmethod=="Random"){ IMES = rep(1,CGGP$poCOUNT) # Multiply by size so it gets undone below IMES <- IMES * CGGP$pogsize[1:CGGP$poCOUNT] } else if(selectionmethod=="Lowest"){ IMES = rowSums(CGGP$po[1:CGGP$poCOUNT,]) # Make the lowest the highest value IMES <- max(IMES) + 1 - IMES # Multiply by size so it gets undone below IMES <- IMES * CGGP$pogsize[1:CGGP$poCOUNT] } else { stop("Selection method not acceptable") } CGGP$uoCOUNT = CGGP$uoCOUNT + 1 #increment used count # Find which blocks are still valid for selecting stillpossible <- which(CGGP$pogsize[1:CGGP$poCOUNT] < (max_design_points - CGGP$ss + 0.5)) # Pick block with max IMES per point in the block metric <- IMES[1:CGGP$poCOUNT] / CGGP$pogsize[1:CGGP$poCOUNT] # Find the best one that still fits M_comp = max(metric[stillpossible]) # Find which ones are close to M_comp and pick randomly among them possibleO = stillpossible[metric[stillpossible] >= 0.99*M_comp] # If more than one is possible and near the best, randomly pick among them. if(length(possibleO)>1.5){ pstar = sample(possibleO,1) } else{ pstar = possibleO } l0 = CGGP$po[pstar, ] # Selected block # Need to make sure there is still an open row in uo to set with new values if (CGGP$uoCOUNT > nrow(CGGP$uo)) { CGGP <- CGGP_internal_addrows(CGGP) } CGGP$uo[CGGP$uoCOUNT,] = l0 # Save selected block CGGP$ss = CGGP$ss + CGGP$pogsize[pstar] # Update selected size # ================================. # ==== Update ancestors ==== # ================================. # Protect against initial block which has no ancestors if (CGGP$pilaCOUNT[pstar] > 0) { # Protect for initial block new_an = CGGP$pila[pstar, 1:CGGP$pilaCOUNT[pstar]] total_an = new_an for (anlcv in 1:length(total_an)) { # Loop over ancestors if (total_an[anlcv] > 1.5) { # If there's more than 1, do this total_an = unique( c(total_an, CGGP$uala[total_an[anlcv], 1:CGGP$ualaCOUNT[total_an[anlcv]]]) ) } } CGGP$ualaCOUNT[CGGP$uoCOUNT] = length(total_an) CGGP$uala[CGGP$uoCOUNT, 1:length(total_an)] = total_an # Loop over all ancestors, update weight for (anlcv in 1:length(total_an)) { lo = CGGP$uo[total_an[anlcv],] if (max(abs(lo - l0)) < 1.5) { CGGP$w[total_an[anlcv]] = CGGP$w[total_an[anlcv]] + (-1)^abs(round(sum(l0-lo))) } } } CGGP$w[CGGP$uoCOUNT] = CGGP$w[CGGP$uoCOUNT] + 1 # Update data. Remove selected item, move rest up. # First get correct indices to change. Protect when selecting initial point new_indices <- if (CGGP$poCOUNT>1) {1:(CGGP$poCOUNT - 1)} else {numeric(0)} old_indices <- setdiff(seq.int(1, CGGP$poCOUNT, 1), pstar) # Then change the data CGGP$po[new_indices,] = CGGP$po[old_indices,] CGGP$pila[new_indices,] = CGGP$pila[old_indices,] CGGP$pilaCOUNT[new_indices] = CGGP$pilaCOUNT[old_indices] CGGP$pogsize[new_indices] = CGGP$pogsize[old_indices] if(selectionmethod=="MAP"){ IMES_MAP[new_indices] = IMES_MAP[old_indices] } if(selectionmethod=="UCB"){ IMES_UCB[new_indices] = IMES_UCB[old_indices] } if(selectionmethod=="TS"){ IMES_PostSamples[new_indices,] = IMES_PostSamples[old_indices,] } # And reduce number of available blocks by one. CGGP$poCOUNT = CGGP$poCOUNT - 1 # ==========================================. # ==== Update new possible blocks ==== # ==========================================. # Loop over possible descendents of selected block, add them if possible for (dimlcv in 1:CGGP$d) { lp = l0 lp[dimlcv] = lp[dimlcv] + 1 if (max(lp) <= CGGP$maxlevel && CGGP$poCOUNT < 4 * CGGP$ML) { kvals = which(lp > 1.5) # Dimensions above base level canuse = 1 ap = rep(0, CGGP$d) nap = 0 for (activedimlcv in 1:length(kvals)) { lpp = lp lpp[kvals[activedimlcv]] = lpp[kvals[activedimlcv]] - 1 ismem = rep(1, CGGP$uoCOUNT) for (dimdimlcv in 1:CGGP$d) { ismem = ismem * (CGGP$uo[1:CGGP$uoCOUNT, dimdimlcv] == lpp[dimdimlcv]) } if (max(ismem) > 0.5) { ap[activedimlcv] = which(ismem > 0.5) nap = nap + 1 } else{ canuse = 0 } } if (canuse > 0.5) { # If it can be used, add to possible blocks CGGP$poCOUNT = CGGP$poCOUNT + 1 CGGP$po[CGGP$poCOUNT,] = lp CGGP$pogsize[CGGP$poCOUNT] = prod(CGGP$sizes[lp]) CGGP$pila[CGGP$poCOUNT, 1:nap] = ap[1:nap] CGGP$pilaCOUNT[CGGP$poCOUNT] = nap max_polevels_old = max_polevels max_polevels = apply(CGGP$po[1:CGGP$poCOUNT, ,drop=F], 2, max) if(selectionmethod=="MAP"){ for (opdlcv in 1:nopd) { # Loop over output parameter dimensions thetaMAP.thisloop <- if (nopd==1) CGGP$thetaMAP else CGGP$thetaMAP[, opdlcv] for (dimlcv in 1:CGGP$d) { if((max_polevels_old[dimlcv]+0.5)<max_polevels[dimlcv]){ levellcv = max_polevels[dimlcv] MSE_MAP[dimlcv, levellcv, opdlcv] = max(0, abs(CGGP_internal_calcMSE(CGGP$xb[1:CGGP$sizest[levellcv]], thetaMAP.thisloop[(dimlcv-1)*CGGP$numpara+1:CGGP$numpara], CGGP$CorrMat))) if (levellcv > 1.5) { # If past first level, it is as good as one below it. Why isn't this a result of calculation? MSE_MAP[dimlcv, levellcv, opdlcv] = min(MSE_MAP[dimlcv, levellcv, opdlcv], MSE_MAP[dimlcv, levellcv - 1, opdlcv]) } } } } # Clean up rm(thetaMAP.thisloop, opdlcv) } else if (selectionmethod %in% c("UCB", "TS")){ # selection method is UCB or TS for (opdlcv in 1:nopd) { thetaPostSamples.thisloop <- if (nopd==1) CGGP$thetaPostSamples else CGGP$thetaPostSamples[, , opdlcv] for (dimlcv_2 in 1:CGGP$d) { # dimlcv is already used for which descendent to add if((max_polevels_old[dimlcv_2]+0.5)<max_polevels[dimlcv_2]){ levellcv = max_polevels[dimlcv_2] for(samplelcv in 1:CGGP$numPostSamples){ # Calculate some sort of MSE from above, not sure what it's doing MSE_PostSamples[dimlcv_2, levellcv, samplelcv, opdlcv] = max(0, abs(CGGP_internal_calcMSE( CGGP$xb[1:CGGP$sizest[levellcv]], thetaPostSamples.thisloop[(dimlcv_2-1)*CGGP$numpara+1:CGGP$numpara, samplelcv], CGGP$CorrMat))) if (levellcv > 1.5) { # If past first level, it is as good as one below it. Why isn't this a result of calculation? MSE_PostSamples[dimlcv_2, levellcv, samplelcv, opdlcv] = min(MSE_PostSamples[dimlcv_2, levellcv,samplelcv, opdlcv], MSE_PostSamples[dimlcv_2, levellcv - 1,samplelcv, opdlcv]) } }; rm(samplelcv) } }; rm(dimlcv_2) } # Clean up rm(thetaPostSamples.thisloop, opdlcv) } else { # Can be Oldest or Random or Lowest } if(selectionmethod=="MAP"){ # IMES_MAP[CGGP$poCOUNT] = CGGP_internal_calcMSEde(as.vector(CGGP$po[CGGP$poCOUNT, ]), MSE_MAP) # Need to apply first IMES_MAP_beforemeannewpoint <- apply(MSE_MAP, 3, function(x) {CGGP_internal_calcMSEde(as.vector(CGGP$po[CGGP$poCOUNT, ]), x)}) # Take weighted mean over dimensions IMES_MAP[CGGP$poCOUNT] <- mean(CGGP$sigma2MAP * IMES_MAP_beforemeannewpoint) } else if (selectionmethod=="UCB" || selectionmethod=="TS"){ for(samplelcv in 1:CGGP$numPostSamples){ if (nopd == 1) { # is a matrix # Each sample has different sigma2, so use. If multiple output # parameter dimensions, take mean over sigma2. sigma2.thistime <- mean(sigma2.allsamples.alloutputs[samplelcv,]) IMES_PostSamples[CGGP$poCOUNT,samplelcv] = sigma2.thistime * CGGP_internal_calcMSEde(as.vector(CGGP$po[CGGP$poCOUNT, ]), MSE_PostSamples[,,samplelcv,]) rm(sigma2.thistime) } else { # is an array, need to apply IMES_PostSamples_beforemeannewpoint = apply(MSE_PostSamples[,,samplelcv,], 3, # 3rd dim since samplelcv removes 3rd function(x) { CGGP_internal_calcMSEde(as.vector(CGGP$po[CGGP$poCOUNT, ]), x) } ) IMES_PostSamples[CGGP$poCOUNT,samplelcv] <- mean(sigma2.allsamples.alloutputs[samplelcv,] * IMES_PostSamples_beforemeannewpoint) } }; rm(samplelcv) IMES_UCB[CGGP$poCOUNT] = quantile(IMES_PostSamples[CGGP$poCOUNT,],probs=0.9) } else if (selectionmethod %in% c("Oldest", "Random", "Lowest")) { # nothing needed } else {stop("Not possible #9235058")} } } } } # Get design and other attributes updated CGGP <- CGGP_internal_getdesignfromCGGP(CGGP) # Check if none were added, return warning/error if (n_before == nrow(CGGP$design)) { warning("No points could be added. You may need a larger batch size.") } else { # Save design_unevaluated to make it easy to know which ones to add CGGP$design_unevaluated <- CGGP$design[(n_before+1):nrow(CGGP$design),] } return(CGGP) }
/scratch/gouwar.j/cran-all/cranData/CGGP/R/CGGP_append_fs.R
#' CGGP_internal_calc_cholS_lS_sigma2_pw #' #' Quickly calculate cholS, lS, sigma2, and pw. To be used within #' neglogpost. #' #' @param CGGP CGGP object #' @param y Measured output values #' @param theta Correlation parameters #' #' @noRd #' #' @return List with cholS, lS, sigma2, pw # @export #' # @examples CGGP_internal_calc_cholS_lS_sigma2_pw <- function(CGGP,y,theta) { #We need to return pw, sigma2 and cholS and lS Q = max(CGGP$uo[1:CGGP$uoCOUNT,]) # Max value of all blocks cholS = list(matrix(1,1,1),Q*CGGP$d) # To store choleskys lS = matrix(0, nrow = max(CGGP$uo[1:CGGP$uoCOUNT,]), ncol = CGGP$d) # Save log determinant of matrices # Loop over each dimension for (dimlcv in 1:CGGP$d) { # Loop over each possible needed correlation matrix for (levellcv in 1:max(CGGP$uo[1:CGGP$uoCOUNT,dimlcv])) { Xbrn = CGGP$xb[1:CGGP$sizest[levellcv]] Xbrn = Xbrn[order(Xbrn)] Sstuff = CGGP$CorrMat(Xbrn, Xbrn , theta[(dimlcv-1)*CGGP$numpara+1:CGGP$numpara],return_dCdtheta = FALSE) S = Sstuff # When theta is large (> about 5), the matrix is essentially all 1's, can't be inverted solvetry <- try({ cS = chol(S) cholS[[(dimlcv-1)*Q+levellcv]]= as.matrix(cS+t(cS)-diag(diag(cS))) #store the symmetric version for C code }) if (inherits(solvetry, "try-error")) {return(Inf)} lS[levellcv, dimlcv] = 2*sum(log(diag(cS))) } } if(!is.matrix(y)){ sigma2 = 0 # Predictive weight for each measured point pw = rep(0, length(y)) # Predictive weight for each measured point # Loop over blocks selected gg = (1:CGGP$d-1)*Q for (blocklcv in 1:CGGP$uoCOUNT) { if(abs(CGGP$w[blocklcv])>0.5){ IS = CGGP$dit[blocklcv, 1:CGGP$gridsizet[blocklcv]]; B0 = y[IS] B = CGGP$w[blocklcv]*B0 VVV1 = unlist(cholS[gg+CGGP$uo[blocklcv,]]) VVV2 = CGGP$gridsizest[blocklcv,] rcpp_kronDBS(VVV1, B, VVV2) pw[IS] = pw[IS]+B sigma2 = sigma2 + sum(B0*B) } } sigma2=sigma2/length(y) return(list(sigma2=sigma2,pw=pw,cholS=cholS, lS=lS)) }else{ numout = dim(y)[2] sigma2 = rep(0,numout) # Predictive weight for each measured point numout = dim(y)[2] pw = matrix(0,nrow=dim(y)[1],ncol=numout) # Predictive weight for each measured point # Loop over blocks selected gg = (1:CGGP$d-1)*Q for (blocklcv in 1:CGGP$uoCOUNT) { if(abs(CGGP$w[blocklcv])>0.5){ IS = CGGP$dit[blocklcv, 1:CGGP$gridsizet[blocklcv]]; VVV1 = unlist(cholS[gg+CGGP$uo[blocklcv,]]); VVV2 = CGGP$gridsizest[blocklcv,]; for(outdimlcv in 1:numout){ B0 = y[IS,outdimlcv] B = CGGP$w[blocklcv]*B0 rcpp_kronDBS(VVV1, B, VVV2) pw[IS,outdimlcv] = pw[IS,outdimlcv]+B sigma2[outdimlcv] = sigma2[outdimlcv] + (t(B0)%*%B) } } } sigma2=sigma2/dim(y)[1] return(list(sigma2=sigma2,pw=pw,cholS=cholS,lS=lS)) } } #' CGGP_internal_calc_cholS_lS_dsigma2_pw_dMatdtheta #' #' Quickly calculate cholS, lS, sigma2, dsigma2, dMatdtheta, #' and pw. To be used within gneglogpost. #' #' @param CGGP CGGP object #' @param y Measured output values #' @param theta Correlation parameters #' #' @noRd #' #' @return List with cholS, lS, sigma2, dsigma2, dMatdtheta, and pw # @export #' # @examples CGGP_internal_calc_cholS_lS_dsigma2_pw_dMatdtheta <- function(CGGP,y, theta) { #We need to return pw, sigma2, dsigma2, cholS, dMatdtheta and lS Q = max(CGGP$uo[1:CGGP$uoCOUNT,]) # Max level of all blocks cholS = list(matrix(1,1,1),Q*CGGP$d) # To store choleskys dMatdtheta = list(matrix(1,1,1),Q*CGGP$d) lS = matrix(0, nrow = max(CGGP$uo[1:CGGP$uoCOUNT,]), ncol = CGGP$d) # Save log determinant of matrices dlS = matrix(0, nrow = max(CGGP$uo[1:CGGP$uoCOUNT,]), ncol = CGGP$numpara*CGGP$d) for (dimlcv in 1:CGGP$d) { for (levellcv in 1:max(CGGP$uo[1:CGGP$uoCOUNT,dimlcv])) { Xbrn = CGGP$xb[1:CGGP$sizest[levellcv]] Xbrn = Xbrn[order(Xbrn)] nv = length(Xbrn); Sstuff = CGGP$CorrMat(Xbrn, Xbrn , theta[(dimlcv-1)*CGGP$numpara+1:CGGP$numpara],return_dCdtheta = TRUE) S = Sstuff$C cS = chol(S) cholS[[(dimlcv-1)*Q+levellcv]] = as.matrix(cS+t(cS)-diag(diag(cS)))#store the symmetric version for C code dMatdtheta[[(dimlcv-1)*Q+levellcv]] = -backsolve(cS,backsolve(cS,Sstuff$dCdtheta, transpose = TRUE)) for(paralcv in 1:CGGP$numpara){ dMatdtheta[[(dimlcv-1)*Q+levellcv]][1:nv,nv*(paralcv-1)+1:nv] = t(dMatdtheta[[(dimlcv-1)*Q+levellcv]][1:nv,nv*(paralcv-1)+1:nv]) } lS[levellcv, dimlcv] = 2*sum(log(diag(cS))) for(paralcv in 1:CGGP$numpara){ if(nv > 1.5){ dlS[levellcv, CGGP$numpara*(dimlcv-1)+paralcv] = -sum(diag(dMatdtheta[[(dimlcv-1)*Q+levellcv]][1:nv,nv*(paralcv-1)+1:nv])) } else { dlS[levellcv, CGGP$numpara*(dimlcv-1)+paralcv] = -dMatdtheta[[(dimlcv-1)*Q+levellcv]][1:nv,nv*(paralcv-1)+1:nv] } } } } if(is.matrix(y)){ numout = dim(y)[2] sigma2 = rep(0,numout) # Predictive weight for each measured point dsigma2 = matrix(0,nrow=CGGP$numpara*CGGP$d,ncol=numout) # Predictive weight for each measured point pw = matrix(0,nrow=dim(y)[1],ncol=numout) # Predictive weight for each measured point gg = (1:CGGP$d-1)*Q for (blocklcv in 1:CGGP$uoCOUNT) { if(abs(CGGP$w[blocklcv])>0.5){ IS = CGGP$dit[blocklcv, 1:CGGP$gridsizet[blocklcv]]; VVV1=unlist(cholS[gg+CGGP$uo[blocklcv,]]) VVV2=unlist(dMatdtheta[gg+CGGP$uo[blocklcv,]]) VVV3=CGGP$gridsizest[blocklcv,] for(outdimlcv in 1:numout){ B0 = y[IS,outdimlcv] B = CGGP$w[blocklcv]*B0 dB = rcpp_gkronDBS(VVV1,VVV2,B,VVV3) pw[IS,outdimlcv] = pw[IS,outdimlcv]+B dsigma2[,outdimlcv] = dsigma2[,outdimlcv] + as.vector(dB%*%B0) sigma2[outdimlcv] = sigma2[outdimlcv] + sum(B0*B) } } } out <- list(sigma2=sigma2/dim(y)[1],dsigma2=dsigma2/dim(y)[1],lS=lS,dlS=dlS,pw=pw,cholS=cholS,dMatdtheta=dMatdtheta) }else{ sigma2 = 0 # Predictive weight for each measured point dsigma2 = rep(0,nrow=CGGP$d) # Predictive weight for each measured point gg = (1:CGGP$d-1)*Q pw = rep(0, length(y)) # Predictive weight for each measured point for (blocklcv in 1:CGGP$uoCOUNT) { if(abs(CGGP$w[blocklcv])>0.5){ IS = CGGP$dit[blocklcv, 1:CGGP$gridsizet[blocklcv]]; B0 = y[IS] B = CGGP$w[blocklcv]*B0 dB = rcpp_gkronDBS(unlist(cholS[gg+CGGP$uo[blocklcv,]]),unlist(dMatdtheta[gg+CGGP$uo[blocklcv,]]), B, CGGP$gridsizest[blocklcv,]) pw[IS] = pw[IS]+B dsigma2 = dsigma2 +t(B0)%*%t(dB) sigma2 = sigma2 + t(B0)%*%B } } out <- list(sigma2=sigma2/length(y),dsigma2=dsigma2/length(y),lS=lS,dlS=dlS,pw=pw,cholS=cholS,dMatdtheta=dMatdtheta) } out } #' CGGP_internal_calc_dvalo #' #' Quickly calculate valo and dvalo. To be used within gneglogpost. #' #' @param CGGP CGGP object #' @param revc Input from a previous calculation #' @param y Measured output values #' @param cholS Cholesky factorizations #' @param dMatdtheta Input from a previous calculation #' #' @noRd #' #' @return List with valo and dvalo # @export #' # @examples CGGP_internal_calc_dvalo <- function(CGGP,revc,y,cholS,dMatdtheta) { Q = max(CGGP$uo[1:CGGP$uoCOUNT,]) # Max level of all blocks if(is.matrix(y)){ numout = dim(y)[2] valo = rep(0,numout) # Predictive weight for each measured point dvalo = matrix(0,nrow=CGGP$numpara*CGGP$d,ncol=numout) # Predictive weight for each measured point gg = (1:CGGP$d-1)*Q for (blocklcv in 1:CGGP$uoCOUNT) { if(abs(CGGP$w[blocklcv])>0.5){ IS = CGGP$dit[blocklcv, 1:CGGP$gridsizet[blocklcv]]; VVV1=unlist(cholS[gg+CGGP$uo[blocklcv,]]) VVV2=unlist(dMatdtheta[gg+CGGP$uo[blocklcv,]]) VVV3=CGGP$gridsizest[blocklcv,] for(outdimlcv in 1:numout){ B2 = y[IS,outdimlcv] B0 = revc[IS,outdimlcv] B = (CGGP$w[blocklcv])*B0#/dim(y)[1] dB = rcpp_gkronDBS(VVV1,VVV2,B,VVV3) dvalo[,outdimlcv] = dvalo[,outdimlcv] +t(B2)%*%t(dB) valo[outdimlcv] = valo[outdimlcv] + sum(B2*B)+ t(B2)%*%B } } } out <- list(valo=valo,dvalo=dvalo) }else{ valo= 0 # Predictive weight for each measured point dvalo = rep(0,nrow=CGGP$d) # Predictive weight for each measured point gg = (1:CGGP$d-1)*Q for (blocklcv in 1:CGGP$uoCOUNT) { if(abs(CGGP$w[blocklcv])>0.5){ IS = CGGP$dit[blocklcv, 1:CGGP$gridsizet[blocklcv]]; B0 = revc[IS] B2 = y[IS] B = (CGGP$w[blocklcv])*B0#/length(y) dB = rcpp_gkronDBS(unlist(cholS[gg+CGGP$uo[blocklcv,]]),unlist(dMatdtheta[gg+CGGP$uo[blocklcv,]]), B, CGGP$gridsizest[blocklcv,]) dvalo = dvalo + as.vector(dB%*%B2) valo = valo + sum(B2*B) } } out <- list(valo=valo,dvalo=dvalo) } out }
/scratch/gouwar.j/cran-all/cranData/CGGP/R/CGGP_calc_lS_cholS_valo_etc.R
#' Calculate quantities for supplemental data #' #' @param CGGP CGGP object #' @param thetaMAP theta to calculate for #' @param ys.thisloop Supplemental ys (after transformation) #' @param y.thisloop y (after transformation) #' @param pw pw for y #' @param sigma2MAP sigma2MAP if only using y, will be adjusted for ys #' @param only_sigma2MAP Should only sigma2MAP be returned? #' #' @return List with objects ## @export #' @noRd CGGP_internal_calc_supp_pw_sigma2_Sti <- function(CGGP, thetaMAP, ys.thisloop, y.thisloop, pw, sigma2MAP, only_sigma2MAP=TRUE) { if (missing(pw) || missing(sigma2MAP)) { if (missing(y.thisloop)) {stop("If not giving in pw and sigma2MAP, must give in y.thisloop")} likstuff <- CGGP_internal_calc_cholS_lS_sigma2_pw(CGGP=CGGP, y.thisloop, theta=thetaMAP) pw <- likstuff$pw sigma2MAP <- likstuff$sigma2 } Cs = matrix(1,dim(CGGP$Xs)[1],CGGP$ss) for (dimlcv in 1:CGGP$d) { # Loop over dimensions V = CGGP$CorrMat(CGGP$Xs[,dimlcv], CGGP$xb, thetaMAP[(dimlcv-1)*CGGP$numpara+1:CGGP$numpara]) Cs = Cs*V[,CGGP$designindex[,dimlcv]] } Sigma_t = matrix(1,dim(CGGP$Xs)[1],dim(CGGP$Xs)[1]) for (dimlcv in 1:CGGP$d) { # Loop over dimensions V = CGGP$CorrMat(CGGP$Xs[,dimlcv], CGGP$Xs[,dimlcv], thetaMAP[(dimlcv-1)*CGGP$numpara+1:CGGP$numpara]) Sigma_t = Sigma_t*V } MSE_s = list(matrix(0,dim(CGGP$Xs)[1],dim(CGGP$Xs)[1]), (CGGP$d+1)*(CGGP$maxlevel+1)) for (dimlcv in 1:CGGP$d) { for (levellcv in 1:max(CGGP$uo[1:CGGP$uoCOUNT,dimlcv])) { MSE_s[[(dimlcv)*CGGP$maxlevel+levellcv]] = (-CGGP_internal_postvarmatcalc(CGGP$Xs[,dimlcv],CGGP$Xs[,dimlcv], CGGP$xb[1:CGGP$sizest[levellcv]], thetaMAP[(dimlcv-1)*CGGP$numpara + 1:CGGP$numpara], CorrMat=CGGP$CorrMat)) } } for (blocklcv in 1:CGGP$uoCOUNT) { ME_s = matrix(1,nrow=dim(CGGP$Xs)[1],ncol=dim(CGGP$Xs)[1]) for (dimlcv in 1:CGGP$d) { levelnow = CGGP$uo[blocklcv,dimlcv] ME_s = ME_s*MSE_s[[(dimlcv)*CGGP$maxlevel+levelnow]] } Sigma_t = Sigma_t-CGGP$w[blocklcv]*(ME_s) } yhats = Cs%*%pw Sti_resid = solve(Sigma_t,ys.thisloop-yhats) sigma2MAP = (sigma2MAP*dim(CGGP$design)[1] + colSums((ys.thisloop-yhats)*Sti_resid)) / ( dim(CGGP$design)[1]+dim(CGGP$Xs)[1]) out <- list(sigma2MAP=sigma2MAP) if (!only_sigma2MAP) { out$Sti = solve(Sigma_t) pw_adj_y = t(Cs)%*%Sti_resid pw_adj <- CGGP_internal_calcpw(CGGP=CGGP, y=pw_adj_y, theta=thetaMAP) out$pw_uppadj = pw-pw_adj out$supppw = Sti_resid } out } #' Calculate quantities for supplemental data only (no grid data) #' #' @param CGGP CGGP object #' @param thetaMAP theta to calculate for #' @param ys.thisloop Supplemental ys (after transformation) #' @param only_sigma2MAP Should only sigma2MAP be returned? #' #' @return List with objects ## @export #' @noRd CGGP_internal_calc_supp_only_supppw_sigma2_Sti <- function(CGGP, thetaMAP, ys.thisloop, only_sigma2MAP=TRUE) { Sigma_t = matrix(1,dim(CGGP$Xs)[1],dim(CGGP$Xs)[1]) for (dimlcv in 1:CGGP$d) { # Loop over dimensions V = CGGP$CorrMat(CGGP$Xs[,dimlcv], CGGP$Xs[,dimlcv], thetaMAP[(dimlcv-1)*CGGP$numpara+1:CGGP$numpara]) Sigma_t = Sigma_t*V } Sti_chol <- chol(Sigma_t + diag(CGGP$nugget, nrow(Sigma_t), ncol(Sigma_t))) # Use backsolve for stability # supppw <- Sti %*% ys.thisloop # supppw is same as Sti_resid in other files supppw <- backsolve(Sti_chol, backsolve(Sti_chol, ys.thisloop, transpose = T)) if (is.matrix(supppw) && ncol(supppw)==1) {supppw <- as.vector(supppw)} # sigma2MAP <- (t(ys.thisloop) %*% supppw) / nrow(CGGP$Xs) # if (is.matrix(sigma2MAP)) {sigma2MAP <- diag(sigma2MAP)} sigma2MAP = colSums(as.matrix(ys.thisloop)*as.matrix(supppw))/nrow(CGGP$Xs) out <- list() out$sigma2MAP <- sigma2MAP if (!only_sigma2MAP) { Sti <- chol2inv(Sti_chol) out$Sti <- Sti out$supppw <- supppw out$Sti_chol <- Sti_chol } out }
/scratch/gouwar.j/cran-all/cranData/CGGP/R/CGGP_calc_supp_pw_sigma2_Sti.R
#' Cauchy correlation function #' #' Calculate correlation matrix for two sets of points in one dimension. #' Note that this is not the correlation between two vectors. #' #' @param x1 Vector of coordinates from same dimension #' @param x2 Vector of coordinates from same dimension # ' @param LS Log of parameter that controls lengthscale # ' @param FD Logit of 0.5*parameter that controls the fractal dimension # ' @param HE Log of parameter that controls the hurst effect #' @param theta Correlation parameters: #' \itemize{ #' \item LS Log of parameter that controls lengthscale #' \item FD Logit of 0.5*parameter that controls the fractal dimension #' \item HE Log of parameter that controls the hurst effect #' } #' @param return_dCdtheta Should dCdtheta be returned? #' @param return_numpara Should it just return the number of parameters? #' @param returnlogs Should log of correlation be returned? #' #' @return Matrix of correlation values between x1 and x2 #' @export #' @family correlation functions #' #' @examples #' CGGP_internal_CorrMatCauchy(c(0,.2,.4),c(.1,.3,.5), theta=c(-1,.9,.1)) CGGP_internal_CorrMatCauchy <- function(x1, x2, theta, return_dCdtheta=FALSE, return_numpara=FALSE, returnlogs=FALSE) { if(return_numpara){ return(3) }else{ if (length(theta) != 3) {stop("CorrMatCauchy theta should be length 3")} diffmat =abs(outer(x1,x2,'-')); expLS = exp(3*(theta[1])) expHE = exp(3*(theta[2])) h = diffmat/expLS alpha = 2*exp(3*theta[3]+2)/(1+exp(3*theta[3]+2)) halpha = h^alpha pow = -expHE/alpha if (!returnlogs) { C = (1+halpha)^pow } else { C = pow * log(1+halpha) } if(return_dCdtheta){ if (!returnlogs) { dCdtheta = cbind(3*expHE*((1+halpha)^(pow-1))*(halpha), 3*C*pow*log(1+halpha), (C*(expHE*log(1+halpha)/alpha^2 - expHE*halpha*log(h)/alpha/(1+halpha))) * 6*exp(3*theta[3]+2)/(1+exp(3*theta[3]+2))^2) } else { dCdtheta = cbind(3*expHE*halpha/(1+halpha), 3*pow*log(1+halpha), ((expHE*log(1+halpha)/alpha^2 - expHE*halpha*log(h)/alpha/(1+halpha))) * 6*exp(3*theta[3]+2)/(1+exp(3*theta[3]+2))^2) } dCdtheta[is.na(dCdtheta)] = 0 out <- list(C=C,dCdtheta=dCdtheta) return(out) }else{ return(C) } } } #' CauchySQT correlation function #' #' Calculate correlation matrix for two sets of points in one dimension. #' Note that this is not the correlation between two vectors. #' #' @inheritParams CGGP_internal_CorrMatCauchy #' #' @return Matrix of correlation values between x1 and x2 #' @export #' @family correlation functions #' #' @examples #' CGGP_internal_CorrMatCauchySQT(c(0,.2,.4),c(.1,.3,.5), theta=c(-.1,.3,-.7)) CGGP_internal_CorrMatCauchySQT <- function(x1, x2,theta, return_dCdtheta = FALSE, return_numpara=FALSE, returnlogs=FALSE) { if(return_numpara){ return(3); } else{ if (length(theta) != 3) {stop("CorrMatCauchySQT theta should be length 3")} expTILT = exp((theta[3])) expLS = exp(3*(theta[1])) x1t = (x1+10^(-2))^expTILT x2t = (x2+10^(-2))^expTILT x1ts = x1t/expLS x2ts = x2t/expLS diffmat =abs(outer(x1ts,x2ts,'-')); expHE = exp(3*(theta[2])) h = diffmat alpha = 2*exp(5)/(1+exp(5)) halpha = h^alpha pow = -expHE/alpha if (!returnlogs) { C = (1+halpha)^pow } else { C = pow * log(1+halpha) } if(return_dCdtheta){ Q = ((1+halpha)^(pow-1)) gt1 = x1t*log(x1+10^(-2)) gt2 = x2t*log(x2+10^(-2)) lh =outer(gt1,gt2,'-') hnabs = outer(x1ts,x2ts,'-') LO = alpha*expTILT*(pow/expLS)*(abs(h)^(alpha-1)*lh*sign(hnabs)) if (!returnlogs) { dCdtheta = cbind(3*expHE*((1+halpha)^(pow-1))*(halpha),3*C*pow*log(1+halpha),LO*Q) } else { dCdtheta = cbind(3*expHE*halpha/(1+halpha), 3*pow*log(1+halpha), LO/(1+halpha)) } out <- list(C=C,dCdtheta=dCdtheta) return(out) }else{ return(C) } } } #' CauchySQ correlation function #' #' Calculate correlation matrix for two sets of points in one dimension #' Note that this is not the correlation between two vectors. #' #' @inheritParams CGGP_internal_CorrMatCauchy #' #' @return Matrix of correlation values between x1 and x2 #' @export #' @family correlation functions #' #' @examples #' CGGP_internal_CorrMatCauchySQ(c(0,.2,.4),c(.1,.3,.5), theta=c(-.7,-.5)) CGGP_internal_CorrMatCauchySQ <- function(x1, x2,theta, return_dCdtheta = FALSE, return_numpara =FALSE,returnlogs = FALSE) { if(return_numpara){ return(2); }else{ if (length(theta) != 2) {stop("CorrMatCauchySQ theta should be length 2")} diffmat =abs(outer(x1,x2,'-')); expLS = exp(3*theta[1]) expHE = exp(3*theta[2]) h = diffmat/expLS alpha = 2*exp(0+6)/(1+exp(0+6)) halpha = h^alpha pow = -expHE/alpha if(!returnlogs){ C = (1+halpha)^pow }else{ C = pow*log(1+halpha) } if(return_dCdtheta){ if(!returnlogs){ dCdtheta = cbind(3*expHE*((1+halpha)^(pow-1))*(halpha),3*C*pow*log(1+halpha)) }else{ dCdtheta = cbind(3*expHE*halpha/(1+halpha),3*C) } dCdtheta[is.na(dCdtheta)] = 0 out <- list(C=C,dCdtheta=dCdtheta) return(out) }else{ return(C) } } } #' Gaussian correlation function #' #' Calculate correlation matrix for two sets of points in one dimension #' Note that this is not the correlation between two vectors. #' #' WE HIGHLY ADVISE NOT USING THIS CORRELATION FUNCTION. #' Try Power Exponential, CauchySQT, Cauchy, or Matern 3/2 instead. #' #' @inheritParams CGGP_internal_CorrMatCauchy #' #' @return Matrix of correlation values between x1 and x2 #' @export #' @family correlation functions #' #' @examples #' CGGP_internal_CorrMatGaussian(c(0,.2,.4),c(.1,.3,.5), theta=c(-.7)) CGGP_internal_CorrMatGaussian <- function(x1, x2,theta, return_dCdtheta = FALSE, return_numpara=FALSE, returnlogs=FALSE) { if(return_numpara){ return(1); }else{ if (length(theta) != 1) {stop("CorrMatGaussian theta should be length 1")} diffmat =abs(outer(x1,x2,'-')) diffmat2 <- diffmat^2 expLS = exp(3*theta[1]) h = diffmat2/expLS # Gaussian corr is awful, always needs a nugget nug <- 1e-10 if (!returnlogs) { C = (1-nug)*exp(-h) + nug*(diffmat<10^(-4)) # C = exp(-h) } else { # C = -h C = (1-nug)*exp(-h) + nug*(diffmat<10^(-4)) C <- log(C) } if(return_dCdtheta){ if (!returnlogs) { dCdtheta <- 3*C*diffmat2 / expLS } else { dCdtheta <- 3*diffmat2 / expLS } dCdtheta[is.na(dCdtheta)] = 0 out <- list(C=C,dCdtheta=dCdtheta) return(out) }else{ return(C) } } } #' Matern 3/2 correlation function #' #' Calculate correlation matrix for two sets of points in one dimension. #' Note that this is not the correlation between two vectors. #' #' @inheritParams CGGP_internal_CorrMatCauchy #' #' @return Matrix of correlation values between x1 and x2 #' @export #' @family correlation functions #' #' @examples #' CGGP_internal_CorrMatMatern32(c(0,.2,.4),c(.1,.3,.5), theta=c(-.7)) CGGP_internal_CorrMatMatern32 <- function(x1, x2,theta, return_dCdtheta=FALSE, return_numpara=FALSE, returnlogs=FALSE) { if(return_numpara){ return(1); }else{ if (length(theta) != 1) {stop("CorrMatMatern32 theta should be length 1")} diffmat =abs(outer(x1,x2,'-')) expLS = exp(3*theta[1]) h = diffmat/expLS if (!returnlogs) { # C = (1-10^(-10))*(1+sqrt(3)*h)*exp(-sqrt(3)*h) + 10^(-10)*(diffmat<10^(-4)) C = (1+sqrt(3)*h)*exp(-sqrt(3)*h) } else { C <- log(1+sqrt(3)*h) - sqrt(3)*h } if(return_dCdtheta){ if (!returnlogs) { dCdtheta <- (sqrt(3)*diffmat*exp(-sqrt(3)*h) - sqrt(3)*C*diffmat) * (-3/expLS) } else { dCdtheta <- (sqrt(3)*diffmat/(1+sqrt(3)*h) - sqrt(3)*diffmat) * (-3/expLS) } dCdtheta[is.na(dCdtheta)] = 0 out <- list(C=C,dCdtheta=dCdtheta) return(out) }else{ return(C) } } } #' Matern 5/2 correlation function #' #' Calculate correlation matrix for two sets of points in one dimension. #' Note that this is not the correlation between two vectors. #' #' @inheritParams CGGP_internal_CorrMatCauchy #' #' @return Matrix of correlation values between x1 and x2 #' @export #' @family correlation functions #' #' @examples #' CGGP_internal_CorrMatMatern52(c(0,.2,.4),c(.1,.3,.5), theta=c(-.7)) CGGP_internal_CorrMatMatern52 <- function(x1, x2,theta, return_dCdtheta=FALSE, return_numpara=FALSE, returnlogs=FALSE) { if(return_numpara){ return(1); }else{ if (length(theta) != 1) {stop("CorrMatMatern52 theta should be length 1")} diffmat =abs(outer(x1,x2,'-')) expLS = exp(3*theta[1]) h = diffmat/expLS if (!returnlogs) { # C = (1-10^(-10))*(1+sqrt(5)*h+5/3*h^2)*exp(-sqrt(5)*h) + 10^(-10)*(diffmat<10^(-4)) C = (1+sqrt(5)*h+5/3*h^2)*exp(-sqrt(5)*h) } else { C = log(1+sqrt(5)*h+5/3*h^2) - sqrt(5)*h } if(return_dCdtheta){ if (!returnlogs) { dCdtheta <- ((sqrt(5)*diffmat+10/3*diffmat*h)*exp(-sqrt(5)*h) - sqrt(5)*C*diffmat) * (-3/expLS) } else { dCdtheta <- ((sqrt(5)*diffmat+10/3*diffmat*h)/(1+sqrt(5)*h+5/3*h^2) - sqrt(5)*diffmat) * (-3/expLS) } dCdtheta[is.na(dCdtheta)] = 0 out <- list(C=C,dCdtheta=dCdtheta) return(out) }else{ return(C) } } } #' Power exponential correlation function #' #' Calculate correlation matrix for two sets of points in one dimension. #' Note that this is not the correlation between two vectors. #' #' @inheritParams CGGP_internal_CorrMatCauchy #' #' @return Matrix of correlation values between x1 and x2 # @rdname CGGP_internal_CorrMatCauchy #' @export #' @family correlation functions #' #' @examples #' CGGP_internal_CorrMatPowerExp(c(0,.2,.4),c(.1,.3,.5), theta=c(-.7,.2)) CGGP_internal_CorrMatPowerExp <- function(x1, x2,theta, return_dCdtheta = FALSE, return_numpara=FALSE, returnlogs=FALSE) { if(return_numpara){ return(2); }else{ if (length(theta) != 2) {stop("CorrMatPowerExp theta should be length 2")} diffmat =abs(outer(x1,x2,'-')) tmax <- 3 expLS = exp(tmax*theta[1]) minpower <- 1 maxpower <- 1.95 alpha <- minpower + (theta[2]+1)/2 * (maxpower - minpower) h = diffmat/expLS if (!returnlogs) { # C = (1-nug)*exp(-(h)^alpha) + nug*(diffmat<10^(-4)) C = exp(-(h)^alpha) } else { C = -(h^alpha) } if(return_dCdtheta){ if (!returnlogs) { dCdtheta <- cbind(tmax*alpha*C*diffmat^alpha/expLS^alpha, -C*h^alpha*log(h)/2 * (maxpower - minpower)) } else { dCdtheta <- cbind(tmax*alpha*diffmat^alpha/expLS^alpha, -h^alpha*log(h)/2 * (maxpower - minpower)) } dCdtheta[is.na(dCdtheta)] = 0 out <- list(C=C,dCdtheta=dCdtheta) return(out) }else{ return(C) } } } #' Wendland0 (Triangle) correlation function #' #' Calculate correlation matrix for two sets of points in one dimension. #' Note that this is not the correlation between two vectors. #' #' @inheritParams CGGP_internal_CorrMatCauchy #' #' @return Matrix of correlation values between x1 and x2 # @rdname CGGP_internal_CorrMatCauchy #' @export #' @family correlation functions #' #' @examples #' CGGP_internal_CorrMatWendland0(c(0,.2,.4),c(.1,.3,.5), theta=-.7) CGGP_internal_CorrMatWendland0 <- function(x1, x2,theta, return_dCdtheta = FALSE, return_numpara=FALSE, returnlogs=FALSE) { if(return_numpara){ return(1) }else{ if (length(theta) != 1) {stop("CorrMatWendland0 theta should be length 1")} diffmat =abs(outer(x1,x2,'-')) tmax <- 3 expLS = exp(tmax*theta[1]) wherecov = which(diffmat<expLS) h = matrix(0,dim(diffmat)[1],dim(diffmat)[2]) h[wherecov] = 1-diffmat[wherecov] / expLS if (!returnlogs) { C = matrix(0,dim(h)[1],dim(h)[2]) C[wherecov] <- h[wherecov] } else { C = -Inf * matrix(1,dim(h)[1],dim(h)[2]) C[wherecov] = log(h[wherecov]) } if(return_dCdtheta){ if (!returnlogs) { dCdtheta = matrix(0,dim(diffmat)[1],dim(diffmat)[2]) dCdtheta[wherecov] <- tmax * (1-h[wherecov]) } else { dCdtheta = matrix(0,dim(diffmat)[1],dim(diffmat)[2]) dCdtheta[wherecov] <- tmax * ((1-h[wherecov])/h[wherecov]) } dCdtheta[is.na(dCdtheta)] = 0 out <- list(C=C,dCdtheta=dCdtheta) return(out) }else{ return(C) } } } #' Wendland1 1 correlation function #' #' Calculate correlation matrix for two sets of points in one dimension. #' Note that this is not the correlation between two vectors. #' #' @inheritParams CGGP_internal_CorrMatCauchy #' #' @return Matrix of correlation values between x1 and x2 # @rdname CGGP_internal_CorrMatCauchy #' @export #' @family correlation functions #' #' @examples #' CGGP_internal_CorrMatWendland1(c(0,.2,.4),c(.1,.3,.5), theta=-.7) CGGP_internal_CorrMatWendland1 <- function(x1, x2,theta, return_dCdtheta = FALSE, return_numpara=FALSE, returnlogs=FALSE) { if(return_numpara){ return(1) }else{ if (length(theta) != 1) {stop("CorrMatWendland1 theta should be length 1")} diffmat =abs(outer(x1,x2,'-')) tmax <- 3 expLS = exp(tmax*theta[1]) wherecov = which(diffmat<expLS) h = matrix(0,dim(diffmat)[1],dim(diffmat)[2]) h[wherecov] = 1-diffmat[wherecov] / expLS if (!returnlogs) { C = matrix(0,dim(h)[1],dim(h)[2]) C[wherecov] <- h[wherecov]^3 * (4 - 3*h[wherecov]) } else { C = -Inf * matrix(1,dim(h)[1],dim(h)[2]) C[wherecov] = 3* log(h[wherecov]) + log(4 - 3*h[wherecov]) } if(return_dCdtheta){ h2 = 1-h if (!returnlogs) { # dCdtheta <- ifelse(1-h > 0, # # tmax*expLS * (3*(1-h2)^2*(h2/expLS)*(3*h+1) + (1-h2)^3*(-3*h2/expLS)), # tmax*expLS * (3*(1-h2)^2*(h2/expLS)*(4-3*h) + (1-h2)^3*(-3*1/expLS)), # 0) dCdtheta = matrix(0,dim(diffmat)[1],dim(diffmat)[2]) # dCdtheta[wherecov] <- 12*tmax*h[wherecov]^2*(1-h[wherecov]) dCdtheta[wherecov] <- 12*tmax*h[wherecov]^2*(1-h[wherecov]) * diffmat[wherecov] / expLS } else { dCdtheta = matrix(0,dim(diffmat)[1],dim(diffmat)[2]) # dCdtheta[wherecov] <- 12*tmax*(1-h[wherecov])/(h[wherecov] * (4 - 3*h[wherecov])) # dCdtheta[wherecov] <- 12*tmax*h[wherecov]^2*(1-h[wherecov]) * diffmat[wherecov] / expLS / C[wherecov] dCdtheta[wherecov] <- tmax * 3 * (1/h[wherecov] - 1/(4-3*h[wherecov])) * diffmat[wherecov] / expLS } dCdtheta[is.na(dCdtheta)] = 0 out <- list(C=C,dCdtheta=dCdtheta) return(out) }else{ return(C) } } } #' Wendland2 2 correlation function #' #' Calculate correlation matrix for two sets of points in one dimension. #' Note that this is not the correlation between two vectors. #' #' @inheritParams CGGP_internal_CorrMatCauchy #' #' @return Matrix of correlation values between x1 and x2 # @rdname CGGP_internal_CorrMatCauchy #' @export #' @family correlation functions #' #' @examples #' CGGP_internal_CorrMatWendland2(c(0,.2,.4),c(.1,.3,.5), theta=-.7) CGGP_internal_CorrMatWendland2 <- function(x1, x2,theta, return_dCdtheta = FALSE, return_numpara=FALSE, returnlogs=FALSE) { if(return_numpara){ return(1) }else{ if (length(theta) != 1) {stop("CorrMatWendland2 theta should be length 1")} diffmat =abs(outer(x1,x2,'-')) tmax <- 3 expLS = exp(tmax*theta[1]) wherecov = which(diffmat<expLS) h = matrix(0,dim(diffmat)[1],dim(diffmat)[2]) h[wherecov] = 1-diffmat[wherecov] / expLS if (!returnlogs) { C = matrix(0,dim(h)[1],dim(h)[2]) C[wherecov] <- h[wherecov]^5 * (8*h[wherecov]^2- 21*h[wherecov] + 14) } else { C = -Inf * matrix(1,dim(h)[1],dim(h)[2]) C[wherecov] = 5* log(h[wherecov]) + log(8*h[wherecov]^2- 21*h[wherecov] + 14) } if(return_dCdtheta){ if (!returnlogs) { dCdtheta = matrix(0,dim(h)[1],dim(h)[2]) dCdtheta[wherecov] <- tmax*14*h[wherecov]^4*(1-h[wherecov])^2*(5-4*h[wherecov]) } else { dCdtheta = matrix(0,dim(h)[1],dim(h)[2]) dCdtheta[wherecov] <- tmax*14*(1-h[wherecov])^2*(5-4*h[wherecov])/(h[wherecov]*(8*h[wherecov]^2- 21*h[wherecov] + 14)) } dCdtheta[is.na(dCdtheta)] = 0 out <- list(C=C,dCdtheta=dCdtheta) return(out) }else{ return(C) } } }
/scratch/gouwar.j/cran-all/cranData/CGGP/R/CGGP_corr_fs.R
#' Create sparse grid GP #' #' @param d Input dimension # @param xmin Min x values, vector. Must be rep(0,d). # @param xmax Max x values, vector. Must be rep(1,d). #' @param batchsize Number added to design each batch # @param nugget Nugget term added to diagonal of correlation matrix, #' for now only on predictions #' @param corr Name of correlation function to use. Must be one of "CauchySQT", #' "CauchySQ", "Cauchy", "Gaussian", "PowerExp", "Matern32", "Matern52". #' @param grid_sizes Size of grid refinements. #' @param Xs Supplemental X data #' @param Ys Supplemental Y data #' @param supp_args Arguments used to fit if Xs and Ys are given #' @param HandlingSuppData How should supplementary data be handled? #' * Correct: full likelihood with grid and supplemental data #' * Only: only use supplemental data #' * Ignore: ignore supplemental data #' #' @importFrom stats rbeta #' #' @return CGGP #' @export #' @family CGGP core functions #' #' @examples #' CGGPcreate(d=8,200) CGGPcreate <- function(d, batchsize, corr="PowerExponential", grid_sizes=c(1,2,4,4,8,12,20,28,32), Xs=NULL, Ys=NULL, HandlingSuppData="Correct", supp_args=list() ) { if (d < 2) {stop("d must be at least 2")} # ==================================. # ==== Create CGGP object ==== # ==================================. # This is list representing our GP object CGGP <- list() class(CGGP) <- c("CGGP", "list") # Give it class CGGP CGGP$d <- d CGGP$numPostSamples <- 100 CGGP$HandlingSuppData <- HandlingSuppData CGGP <- CGGP_internal_set_corr(CGGP, corr) CGGP$nugget <- 0 # Partial matching is very bad! Keep these as length 0 instead of NULL, # otherwise CGGP$Y can return CGGP$Ys CGGP$Y <- numeric(0) CGGP$y <- numeric(0) # ====================================================. # ==== If supplemental data is given, fit it here ==== # ====================================================. if (!is.null(Xs) && !is.null(Ys)) { if (!is.null(supp_args) && length(supp_args) > 0 && is.null(names(supp_args))) { stop("Give names for supp_args") } supp_args$CGGP <- CGGP supp_args$Xs <- Xs supp_args$Ys <- Ys CGGP <- do.call(CGGP_internal_fitwithonlysupp, supp_args) } # ===========================================. # ==== Start setting up CGGP stuff ==== # ===========================================. # Levels are blocks. Level is like eta from paper. CGGP$ML = min(choose(CGGP$d + 6, CGGP$d), 10000) #max levels # Track evaluated blocks, aka used levels CGGP$uo = matrix(0, nrow = CGGP$ML, ncol = CGGP$d) # blocks that have been selected CGGP$uoCOUNT = 0 # number of selected blocks # Track the blocks that are allowed to be evaluated CGGP$po = matrix(0, nrow = 4 * CGGP$ML, ncol = CGGP$d) #proposed levels tracker # Only option at first is initial block (1,1,...,1) CGGP$po[1, ] <- rep(1, CGGP$d) CGGP$poCOUNT <- 1 # Ancestors are blocks one level down in any dimension. CGGP$maxgridsize = 400 CGGP$pila = matrix(0, nrow = CGGP$ML, ncol =CGGP$maxgridsize ) #proposed immediate level ancestors CGGP$pala = matrix(0, nrow = CGGP$ML, ncol =CGGP$maxgridsize ) #proposedal all level ancestors CGGP$uala = matrix(0, nrow = CGGP$ML, ncol =CGGP$maxgridsize ) #used all level ancestors CGGP$pilaCOUNT = rep(0, CGGP$ML) #count of number of pila CGGP$palaCOUNT = rep(0, CGGP$ML) #count of number of pala CGGP$ualaCOUNT = rep(0, CGGP$ML) #count of number of uala # Initial block (1,1,...,1) has no ancestors CGGP$pilaCOUNT[1] <- 0 CGGP$pila[1, 1] <- 0 # CGGP$sizes = c(1,2,4,4,8,12,32) # Num of points added to 1D design as you go further in any dimension CGGP$sizes <- grid_sizes CGGP$maxlevel = length(CGGP$sizes) # Proposed grid size CGGP$pogsize = rep(0, 4 * CGGP$ML) CGGP$pogsize[1:CGGP$poCOUNT] = apply(matrix(CGGP$sizes[CGGP$po[1:CGGP$poCOUNT, ]], CGGP$poCOUNT, CGGP$d), 1, prod) # Selected sample size CGGP$ss = 0 CGGP$w = rep(0, CGGP$ML) #keep track of + and - for prediction CGGP$uoCOUNT = 0 ###1 # Number of used levels # =========================. # ==== Add Blocks ==== # =========================. # sample has unexpected behavior, e.g., sample(34,1), see help file for sample resample <- function(x, ...) x[sample.int(length(x), ...)] # While number selected + min sample size <= batch size, i.e., # still have enough spots for a block, keep adding blocks while (batchsize > (CGGP$ss + min(CGGP$pogsize[1:CGGP$poCOUNT]) - 0.5)) { CGGP$uoCOUNT = CGGP$uoCOUNT + 1 #increment used count if (CGGP$uoCOUNT < 1.5) { # Nothing picked yet, so take base block (1,1,...,1) pstar <- 1 } else if (CGGP$uoCOUNT < (CGGP$d + 1.5)) { # Next d iterations pick the (2,1,1,1,1),(1,2,1,1,1) blocks b/c we need # info on each dimension before going adaptive pstar = 1 } else{ # Next d iterations randomly pick from boxes w/ min # of pts criteriahere = rowSums(CGGP$po[1:CGGP$poCOUNT,]) A1 = (CGGP$pogsize[1:CGGP$poCOUNT] < min(batchsize - CGGP$ss + 0.5,CGGP$maxgridsize)) MCN = min(criteriahere[A1]) A2 = criteriahere <= 0.5 + MCN pstar <- resample(which(A1 & A2), 1) } l0 = CGGP$po[pstar, ] # Selected block e.g. (2,1,1,2) # Need to make sure there is still an open row in uo to set with new values if (CGGP$uoCOUNT > nrow(CGGP$uo)) { CGGP <- CGGP_internal_addrows(CGGP) } CGGP$uo[CGGP$uoCOUNT, ] = l0 # Store new block CGGP$ss = CGGP$ss + CGGP$pogsize[pstar] # Update selected sample size # Ancestors of block just selected # Need to give possibility for initial block, has no ancestors, and 1:0 is bad new_an = CGGP$pila[pstar, if (CGGP$pilaCOUNT[pstar]>.5) {1:CGGP$pilaCOUNT[pstar]} else {numeric(0)}] total_an = new_an # Loop over ancestors of block just selected if (length(total_an) > .5) { # Initial block has no total_an , need this to avoid 1:0 for (anlcv in 1:length(total_an)) { # If more than one ancestor, update with unique ones. if (total_an[anlcv] > 1.5) { total_an = unique(c(total_an, CGGP$uala[total_an[anlcv], 1:CGGP$ualaCOUNT[total_an[anlcv]]])) } } # Update storage of ancestors CGGP$ualaCOUNT[CGGP$uoCOUNT] = length(total_an) CGGP$uala[CGGP$uoCOUNT, 1:length(total_an)] = total_an } # Loop over ancestors, update coefficient if (length(total_an) > .5) { # Initial block has no total_an , need this to avoid 1:0 for (anlcv in 1:length(total_an)) { lo = CGGP$uo[total_an[anlcv], ] if (max(abs(lo - l0)) < 1.5) { CGGP$w[total_an[anlcv]] = CGGP$w[total_an[anlcv]] + (-1) ^ abs(round(sum(l0-lo))) } } } CGGP$w[CGGP$uoCOUNT] = CGGP$w[CGGP$uoCOUNT] + 1 CGGP$po[pstar,] <- 0 # Clear just used row if (CGGP$poCOUNT > 1.5) { # Move up other options if there are others left po_rows_to_move <- (1:CGGP$poCOUNT)[-pstar] # Moving all po rows except selected CGGP$po[1:(CGGP$poCOUNT - 1), ] = CGGP$po[po_rows_to_move, ] CGGP$pila[1:(CGGP$poCOUNT - 1), ] = CGGP$pila[po_rows_to_move, ] CGGP$pilaCOUNT[1:(CGGP$poCOUNT - 1)] = CGGP$pilaCOUNT[po_rows_to_move] CGGP$pogsize[1:(CGGP$poCOUNT - 1)] = CGGP$pogsize[po_rows_to_move] } # One less option now CGGP$poCOUNT = CGGP$poCOUNT - 1 # Loop over dimensions to add new possible blocks for (dimlcv in 1:CGGP$d) { # The block e.g. (1,2,1,1,3) just selected lp = l0 # Increase single dimension by 1, will see if it is possible lp[dimlcv] = lp[dimlcv] + 1 # Check if within bounds if (max(lp) <= CGGP$maxlevel && CGGP$poCOUNT < 4*CGGP$ML) { # Dimensions which are past first design level kvals = which(lp > 1.5) canuse = 1 # Can this block be used? Will be set to 0 below if not. ap = rep(0, CGGP$d) # Ancestors nap = 0 # Number ancestors # Loop over dims at 2+ for (activedimlcv in 1:length(kvals)) { lpp = lp # The block selected with 1 dim incremented lpp[kvals[activedimlcv]] = lpp[kvals[activedimlcv]] - 1 ismem = rep(1, CGGP$uoCOUNT) # Boolean # Loop over dimensions for (dimdimlcv in 1:CGGP$d) { # Set to 0 or 1 if all points already selected have same value ismem = ismem * (CGGP$uo[1:CGGP$uoCOUNT, dimdimlcv] == lpp[dimdimlcv]) } # If any are still 1, if (max(ismem) > 0.5) { ap[activedimlcv] = which(ismem > 0.5) nap = nap + 1 # Count number that are >=1 } else{ # All are 0, so can't use canuse = 0 } } # If it can be used, add to possible blocks if (canuse > 0.5) { CGGP$poCOUNT = CGGP$poCOUNT + 1 CGGP$po[CGGP$poCOUNT, ] = lp CGGP$pogsize[CGGP$poCOUNT] = prod(CGGP$sizes[lp]) CGGP$pila[CGGP$poCOUNT, 1:nap] = ap[1:nap] CGGP$pilaCOUNT[CGGP$poCOUNT] = nap } } } } # Create points for design # These are distances from the center 0.5. xb = rep( c( 3 / 8, # 1/8, 7/8 1 / 2, # 0, 1 1 / 4, # 1/4, 3/4 1 / 8, # 3/8, 5/8 15 / 32, # etc 7 / 16, 3 / 16, 5 / 16, 7 / 32, 11 / 32, 3 / 32, 13 / 32, 9 / 32, 5 / 32, 1 / 32, 1 / 16, seq(31,1,-4)/64, seq(29,1,-4)/64, seq(63,1,-4)/128, seq(61,1,-4)/128 ), "each" = 2 ) CGGP$xb = 0.5 + c(0, xb * rep(c(-1, 1), length(xb) / 2)) CGGP$xindex = 1:length(xb) # Why not length(CGGP$xb), which is one longer than xb? # After this xb is # [1] 0.50000 0.12500 0.87500 0.25000 0.75000 0.37500 0.62500 0.28125 # 0.71875 0.31250 0.68750 0.00000 1.00000 0.18750 0.81250 # [16] 0.06250 0.93750 0.43750 0.56250 0.40625 0.59375 0.09375 0.90625 # 0.21875 0.78125 0.34375 0.65625 0.46875 0.53125 0.15625 # [31] 0.84375 0.03125 0.96875 CGGP$sizest = cumsum(CGGP$sizes) # Total # of points in 1D design along axis # ======================================. # ==== Get design and return object ==== # ======================================. # This is all to create design from uo. # If only supp data is given, don't run it. if (CGGP$uoCOUNT > 0) { # Get design from uo and other data CGGP <- CGGP_internal_getdesignfromCGGP(CGGP) CGGP$design_unevaluated <- CGGP$design } return(CGGP) }
/scratch/gouwar.j/cran-all/cranData/CGGP/R/CGGP_create_fs.R
#' Calculate predictive weights for CGGP #' #' Predictive weights are Sigma^(-1)*y in standard GP. #' This calculation is much faster since we don't need to #' solve the full system of equations. #' #' @param CGGP CGGP object #' @param y Measured values for CGGP$design #' @param theta Correlation parameters #' @param return_lS Should lS be returned? #' #' @return Vector with predictive weights #' @export #' #' @examples #' cggp <- CGGPcreate(d=3, batchsize=100) #' y <- apply(cggp$design, 1, function(x){x[1]+x[2]^2+rnorm(1,0,.01)}) #' CGGP_internal_calcpw(CGGP=cggp, y=y, theta=cggp$thetaMAP) CGGP_internal_calcpw <- function(CGGP, y, theta, return_lS=FALSE) { Q = max(CGGP$uo[1:CGGP$uoCOUNT,]) # Max value of all blocks # Now going to store choleskys instead of inverses for stability #CiS = list(matrix(1,1,1),Q*CGGP$d) # A list of matrices, Q for each dimension cholS = list(matrix(1,1,1),Q*CGGP$d) # To store choleskys lS = matrix(0, nrow = max(CGGP$uo[1:CGGP$uoCOUNT,]), ncol = CGGP$d) # Save log determinant of matrices # Loop over each dimension for (dimlcv in 1:CGGP$d) { # Loop over each possible needed correlation matrix for (levellcv in 1:max(CGGP$uo[1:CGGP$uoCOUNT,dimlcv])) { Xbrn = CGGP$xb[1:CGGP$sizest[levellcv]] Xbrn = Xbrn[order(Xbrn)] Sstuff = CGGP$CorrMat(Xbrn, Xbrn , theta[(dimlcv-1)*CGGP$numpara+1:CGGP$numpara],return_dCdtheta = FALSE) S = Sstuff # When theta is large (> about 5), the matrix is essentially all 1's, can't be inverted solvetry <- try({ cS = chol(S) cholS[[(dimlcv-1)*Q+levellcv]]= cS+t(cS)-diag(diag(cS)) #store the symmetric version for C code }, silent = TRUE) if (inherits(solvetry, "try-error")) {return(Inf)} lS[levellcv, dimlcv] = 2*sum(log(diag(cS))) } } if(!is.matrix(y)){ pw = rep(0, length(y)) # Predictive weight for each measured point # Loop over blocks selected gg = (1:CGGP$d-1)*Q for (blocklcv in 1:CGGP$uoCOUNT) { if(abs(CGGP$w[blocklcv])>0.5){ IS = CGGP$dit[blocklcv, 1:CGGP$gridsizet[blocklcv]]; B = y[IS] rcpp_kronDBS(unlist(cholS[gg+CGGP$uo[blocklcv,]]), B, CGGP$gridsizest[blocklcv,]) pw[IS] = pw[IS]+CGGP$w[blocklcv] * B } } if (return_lS) { return(list(pw=pw, lS=lS)) }else{ return(pw) } }else{ numout = dim(y)[2] pw = matrix(0,nrow=dim(y)[1],ncol=numout) # Predictive weight for each measured point # Loop over blocks selected gg = (1:CGGP$d-1)*Q for (blocklcv in 1:CGGP$uoCOUNT) { if(abs(CGGP$w[blocklcv])>0.5){ IS = CGGP$dit[blocklcv, 1:CGGP$gridsizet[blocklcv]]; VVV1 = unlist(cholS[gg+CGGP$uo[blocklcv,]]); VVV2 = CGGP$gridsizest[blocklcv,]; for(outdimlcv in 1:numout){ B = y[IS,outdimlcv] rcpp_kronDBS(VVV1, B, VVV2) pw[IS,outdimlcv] = pw[IS,outdimlcv]+CGGP$w[blocklcv] * B } } } if (return_lS) { return(list(pw=pw, lS=lS)) }else{ return(pw) } } } #' Calculate derivative of pw #' #' @inheritParams CGGP_internal_calcpw #' @param return_lS Should lS and dlS be returned? #' #' @return derivative matrix of pw with respect to logtheta #' @export #' @import Rcpp #' #' @examples #' cggp <- CGGPcreate(d=3, batchsize=100) #' y <- apply(cggp$design, 1, function(x){x[1]+x[2]^2+rnorm(1,0,.01)}) #' CGGP_internal_calcpwanddpw(CGGP=cggp, y=y, theta=cggp$thetaMAP) CGGP_internal_calcpwanddpw <- function(CGGP, y, theta, return_lS=FALSE) { Q = max(CGGP$uo[1:CGGP$uoCOUNT,]) # Max level of all blocks cholS = list(matrix(1,1,1),Q*CGGP$d) # To store choleskys dMatdtheta = list(matrix(1,1,1),Q*CGGP$d) if(return_lS){ lS = matrix(0, nrow = max(CGGP$uo[1:CGGP$uoCOUNT,]), ncol = CGGP$d) # Save log determinant of matrices dlS = matrix(0, nrow = max(CGGP$uo[1:CGGP$uoCOUNT,]), ncol = CGGP$numpara*CGGP$d) } # Loop over each dimension for (dimlcv in 1:CGGP$d) { # Loop over depth of each dim for (levellcv in 1:max(CGGP$uo[1:CGGP$uoCOUNT,dimlcv])) { Xbrn = CGGP$xb[1:CGGP$sizest[levellcv]] Xbrn = Xbrn[order(Xbrn)] nv = length(Xbrn); Sstuff = CGGP$CorrMat(Xbrn, Xbrn , theta[(dimlcv-1)*CGGP$numpara+1:CGGP$numpara],return_dCdtheta = TRUE) S = Sstuff$C cS = chol(S) cholS[[(dimlcv-1)*Q+levellcv]] = cS+t(cS)-diag(diag(cS)) #store the symmetric version for C code dMatdtheta[[(dimlcv-1)*Q+levellcv]] = -backsolve(cS,backsolve(cS,Sstuff$dCdtheta, transpose = TRUE)) for(paralcv in 1:CGGP$numpara){ dMatdtheta[[(dimlcv-1)*Q+levellcv]][1:nv,nv*(paralcv-1)+1:nv] = t(dMatdtheta[[(dimlcv-1)*Q+levellcv]][1:nv,nv*(paralcv-1)+1:nv]) } if(return_lS){ lS[levellcv, dimlcv] = 2*sum(log(diag(cS))) for(paralcv in 1:CGGP$numpara){ if(nv > 1.5){ dlS[levellcv, CGGP$numpara*(dimlcv-1)+paralcv] = -sum(diag(dMatdtheta[[(dimlcv-1)*Q+levellcv]][1:nv,nv*(paralcv-1)+1:nv])) } else { dlS[levellcv, CGGP$numpara*(dimlcv-1)+paralcv] = -dMatdtheta[[(dimlcv-1)*Q+levellcv]][1:nv,nv*(paralcv-1)+1:nv] } } } } } pw = rep(0, length(y)) # predictive weights dpw = matrix(0, nrow = CGGP$numpara*CGGP$d, ncol = length(y)) # derivative of predictive weights gg = (1:CGGP$d-1)*Q for (blocklcv in 1:CGGP$uoCOUNT) { if(abs(CGGP$w[blocklcv])>0.5){ IS = CGGP$dit[blocklcv, 1:CGGP$gridsizet[blocklcv]]; B = CGGP$w[blocklcv]*y[IS] dB = rcpp_gkronDBS(unlist(cholS[gg+CGGP$uo[blocklcv,]]),unlist(dMatdtheta[gg+CGGP$uo[blocklcv,]]), B, CGGP$gridsizest[blocklcv,]) dpw[,IS] = dpw[,IS] +dB pw[IS] = pw[IS] + B } } dpw =t(dpw) out <- list(pw=pw, dpw=dpw) if (return_lS) { out$lS <- lS out$dlS <- dlS } out } CGGP_internal_calcsigma2 <- function(CGGP, y, theta, return_lS=FALSE) { Q = max(CGGP$uo[1:CGGP$uoCOUNT,]) # Max level of all blocks cholS = list(matrix(1,1,1),Q*CGGP$d) # To store choleskys if(return_lS){ lS = matrix(0, nrow = max(CGGP$uo[1:CGGP$uoCOUNT,]), ncol = CGGP$d) # Save log determinant of matrices } # Loop over each dimension for (dimlcv in 1:CGGP$d) { # Loop over depth of each dim for (levellcv in 1:max(CGGP$uo[1:CGGP$uoCOUNT,dimlcv])) { Xbrn = CGGP$xb[1:CGGP$sizest[levellcv]] Xbrn = Xbrn[order(Xbrn)] nv = length(Xbrn); Sstuff = CGGP$CorrMat(Xbrn, Xbrn , theta[(dimlcv-1)*CGGP$numpara+1:CGGP$numpara],return_dCdtheta = FALSE) S = Sstuff # cS = chol(S) cS = try(chol(S)) if (inherits(cS, "try-error")) { stop("Cholesky error in CGGP_internal_calcsigma2") } cholS[[(dimlcv-1)*Q+levellcv]] = cS+t(cS)-diag(diag(cS)) #store the symmetric version for C code if(return_lS){ lS[levellcv, dimlcv] = 2*sum(log(diag(cS))) } } } if(is.matrix(y)){ numout = dim(y)[2] sigma2 = rep(0,numout) # Predictive weight for each measured point gg = (1:CGGP$d-1)*Q for (blocklcv in 1:CGGP$uoCOUNT) { if(abs(CGGP$w[blocklcv])>0.5){ IS = CGGP$dit[blocklcv, 1:CGGP$gridsizet[blocklcv]]; VVV1=unlist(cholS[gg+CGGP$uo[blocklcv,]]) VVV3=CGGP$gridsizest[blocklcv,] for(outdimlcv in 1:numout){ B0 = y[IS,outdimlcv] B = (CGGP$w[blocklcv]/dim(y)[1])*B0 rcpp_kronDBS(VVV1,B,VVV3) sigma2[outdimlcv] = sigma2[outdimlcv] + t(B0)%*%B } } } out <- list(sigma2=sigma2) if (return_lS) { out$lS <- lS } }else{ sigma2 = 0 # Predictive weight for each measured point dsigma2 = rep(0,nrow=CGGP$d) # Predictive weight for each measured point gg = (1:CGGP$d-1)*Q for (blocklcv in 1:CGGP$uoCOUNT) { if(abs(CGGP$w[blocklcv])>0.5){ IS = CGGP$dit[blocklcv, 1:CGGP$gridsizet[blocklcv]]; B0 = y[IS] B = (CGGP$w[blocklcv]/length(y))*B0 rcpp_kronDBS(unlist(cholS[gg+CGGP$uo[blocklcv,]]),B, CGGP$gridsizest[blocklcv,]) sigma2 = sigma2 + t(B0)%*%B if (any(is.na(sigma2))) {warning("sigma2 is NA in CGGP_internal_calcsigma2")} } } out <- list(sigma2=sigma2) if (return_lS) { out$lS <- lS } } return(out) } CGGP_internal_calcsigma2anddsigma2 <- function(CGGP, y, theta, return_lS=FALSE) { Q = max(CGGP$uo[1:CGGP$uoCOUNT,]) # Max level of all blocks cholS = list(matrix(1,1,1),Q*CGGP$d) # To store choleskys dMatdtheta = list(matrix(1,1,1),Q*CGGP$d) if(return_lS){ lS = matrix(0, nrow = max(CGGP$uo[1:CGGP$uoCOUNT,]), ncol = CGGP$d) # Save log determinant of matrices dlS = matrix(0, nrow = max(CGGP$uo[1:CGGP$uoCOUNT,]), ncol = CGGP$numpara*CGGP$d) } # Loop over each dimension for (dimlcv in 1:CGGP$d) { # Loop over depth of each dim for (levellcv in 1:max(CGGP$uo[1:CGGP$uoCOUNT,dimlcv])) { Xbrn = CGGP$xb[1:CGGP$sizest[levellcv]] Xbrn = Xbrn[order(Xbrn)] nv = length(Xbrn); Sstuff = CGGP$CorrMat(Xbrn, Xbrn , theta[(dimlcv-1)*CGGP$numpara+1:CGGP$numpara],return_dCdtheta = TRUE) S = Sstuff$C cS = chol(S) cholS[[(dimlcv-1)*Q+levellcv]] = cS+t(cS)-diag(diag(cS)) #store the symmetric version for C code dMatdtheta[[(dimlcv-1)*Q+levellcv]] = -backsolve(cS,backsolve(cS,Sstuff$dCdtheta, transpose = TRUE)) for(paralcv in 1:CGGP$numpara){ dMatdtheta[[(dimlcv-1)*Q+levellcv]][1:nv,nv*(paralcv-1)+1:nv] = t(dMatdtheta[[(dimlcv-1)*Q+levellcv]][1:nv,nv*(paralcv-1)+1:nv]) } if(return_lS){ lS[levellcv, dimlcv] = 2*sum(log(diag(cS))) for(paralcv in 1:CGGP$numpara){ if(nv > 1.5){ dlS[levellcv, CGGP$numpara*(dimlcv-1)+paralcv] = -sum(diag(dMatdtheta[[(dimlcv-1)*Q+levellcv]][1:nv,nv*(paralcv-1)+1:nv])) } else { dlS[levellcv, CGGP$numpara*(dimlcv-1)+paralcv] = -dMatdtheta[[(dimlcv-1)*Q+levellcv]][1:nv,nv*(paralcv-1)+1:nv] } } } } } if(is.matrix(y)){ numout = dim(y)[2] sigma2 = rep(0,numout) # Predictive weight for each measured point dsigma2 = matrix(0,nrow=CGGP$numpara*CGGP$d,ncol=numout) # Predictive weight for each measured point gg = (1:CGGP$d-1)*Q for (blocklcv in 1:CGGP$uoCOUNT) { if(abs(CGGP$w[blocklcv])>0.5){ IS = CGGP$dit[blocklcv, 1:CGGP$gridsizet[blocklcv]]; VVV1=unlist(cholS[gg+CGGP$uo[blocklcv,]]) VVV2=unlist(dMatdtheta[gg+CGGP$uo[blocklcv,]]) VVV3=CGGP$gridsizest[blocklcv,] for(outdimlcv in 1:numout){ B0 = y[IS,outdimlcv] B = (CGGP$w[blocklcv]/dim(y)[1])*B0 dB = rcpp_gkronDBS(VVV1,VVV2,B,VVV3) dsigma2[,outdimlcv] = dsigma2[,outdimlcv] + as.vector(dB%*%B0) sigma2[outdimlcv] = sigma2[outdimlcv] + sum(B0*B) } } } out <- list(sigma2=sigma2, dsigma2=dsigma2) if (return_lS) { out$lS <- lS out$dlS <- dlS } }else{ sigma2 = 0 # Predictive weight for each measured point dsigma2 = rep(0,nrow=CGGP$d) # Predictive weight for each measured point gg = (1:CGGP$d-1)*Q for (blocklcv in 1:CGGP$uoCOUNT) { if(abs(CGGP$w[blocklcv])>0.5){ IS = CGGP$dit[blocklcv, 1:CGGP$gridsizet[blocklcv]]; B0 = y[IS] B = (CGGP$w[blocklcv]/length(y))*B0 dB = rcpp_gkronDBS(unlist(cholS[gg+CGGP$uo[blocklcv,]]),unlist(dMatdtheta[gg+CGGP$uo[blocklcv,]]), B, CGGP$gridsizest[blocklcv,]) dsigma2 = dsigma2 +t(B0)%*%t(dB) sigma2 = sigma2 + t(B0)%*%B } } out <- list(sigma2=sigma2, dsigma2=dsigma2) if (return_lS) { out$lS <- lS out$dlS <- dlS } } out }
/scratch/gouwar.j/cran-all/cranData/CGGP/R/CGGP_fastcalcassist_fs.R
#' Update CGGP model given data #' #' This function will update the GP parameters for a CGGP design. #' #' @param CGGP Sparse grid objects #' @param Y Output values calculated at CGGP$design #' @param Xs Supplemental X matrix #' @param Ys Supplemental Y values #' @param theta0 Initial theta #' @param Ynew Values of `CGGP$design_unevaluated` #' @param separateoutputparameterdimensions If multiple output dimensions, #' should separate parameters be fit to each dimension? #' @param set_thetaMAP_to Value for thetaMAP to be set to #' @param HandlingSuppData How should supplementary data be handled? #' * Correct: full likelihood with grid and supplemental data #' * Only: only use supplemental data #' * Ignore: ignore supplemental data #' @param corr Will update correlation function, if left missing it will be #' same as last time. #' #' @importFrom stats optim rnorm runif nlminb #' #' @return Updated CGGP object fit to data given #' @export #' @family CGGP core functions #' #' @examples #' cg <- CGGPcreate(d=3, batchsize=100) #' y <- apply(cg$design, 1, function(x){x[1]+x[2]^2}) #' cg <- CGGPfit(CGGP=cg, Y=y) CGGPfit <- function(CGGP, Y, Xs=NULL,Ys=NULL, theta0 = pmax(pmin(CGGP$thetaMAP,0.8),-0.8), #gotta pull away from edges to get not corner solution HandlingSuppData=CGGP$HandlingSuppData, separateoutputparameterdimensions=is.matrix(CGGP$thetaMAP), set_thetaMAP_to, corr, Ynew) { # ========================================. # ==== Check inputs, get Y from Ynew ==== # ========================================. # If different correlation function is given, update it if (!missing(corr)) { message("Changing correlation function") CGGP <- CGGP_internal_set_corr(CGGP, corr) } # If Y or Ynew is matrix with 1 column, convert it to vector to avoid issues if (!missing(Y) && is.matrix(Y) && ncol(Y)==1) { Y <- c(Y) } if (!missing(Ynew) && is.matrix(Ynew) && ncol(Ynew)==1) { Ynew <- c(Ynew) } # If Ynew is given, it is only the points that were added last iteration. # Append it to previous Y if (!missing(Ynew)) { if (!missing(Y)) {stop("Don't give both Y and Ynew, only one")} if (is.null(CGGP$Y) || length(CGGP$Y)==0) { if (is.matrix(Ynew) && nrow(Ynew) != nrow(CGGP$design_unevaluated)) { stop("nrow(Ynew) doesn't match") } if (!is.matrix(Ynew) && length(Ynew) != nrow(CGGP$design_unevaluated)) { stop("length(Ynew) doesn't match") } Y <- Ynew } else if (is.matrix(CGGP$Y)) { if (!is.matrix(Ynew)) {stop("Ynew should be a matrix")} if (nrow(Ynew) != nrow(CGGP$design_unevaluated)) { stop("Ynew is wrong size") } Y <- rbind(CGGP$Y, Ynew) } else { # is numeric vector if (length(Ynew) != nrow(CGGP$design_unevaluated)) { stop("Ynew is wrong size") } Y <- c(CGGP$Y, Ynew) } } if ((is.matrix(Y) && nrow(Y) == nrow(CGGP$design)) || (length(Y) == nrow(CGGP$design))) { CGGP$design_unevaluated <- NULL } else { stop("CGGP$design and Y have different length") } # ====================================================================. # Do the pre-processing ==== # For cleanness: Y is always the user input, y is after transformation # ====================================================================. CGGP$Y = Y if (any(is.na(Y))) { message(paste0(sum(is.na(Y)), "/",length(Y)," Y values are NA, will impute them")) } if(is.null(Xs)){ # No supplemental data CGGP$supplemented = FALSE if(!is.matrix(Y)){ CGGP$mu = mean(Y[!is.na(Y)]) y = Y-CGGP$mu }else{ # Y is matrix, PCA no longer an option CGGP$mu = colMeans(Y) for(oplcv in 1:dim(Y)[2]){ CGGP$mu[oplcv] = mean(Y[!is.na(Y[,oplcv]),oplcv]) } y <- sweep(Y, 2, CGGP$mu) # Need to set CGGP$M somewhere so that it doesn't use transformation } CGGP$y = y } else{ # Has supp data, used for prediction but not for fitting params # stop("Not working for supp") CGGP$supplemented = TRUE CGGP$Xs = Xs CGGP$Ys = Ys if(!is.matrix(Y)){ CGGP$mu = mean(Ys[!is.na(Ys)]) y = Y-CGGP$mu ys = Ys-CGGP$mu } else{ # PCA no longer an option CGGP$mu = colMeans(Ys) # Could use Y, or colMeans(rbind(Y, Ys)), for(oplcv in 1:dim(Ys)[2]){ CGGP$mu[oplcv] = mean(Ys[!is.na(Ys[,oplcv]),oplcv]) } # or make sure Ys is big enough for this y <- sweep(Y, 2, CGGP$mu) ys <- sweep(Ys, 2, CGGP$mu) } CGGP$y = y CGGP$ys = ys } # nopd is numberofoutputparameterdimensions nopd <- if (separateoutputparameterdimensions && is.matrix(y)) { ncol(y) } else { 1 } # Fix theta0 if (nopd > 1) { if (is.vector(theta0)) { theta0 <- matrix(theta0, nrow=length(theta0), ncol=nopd, byrow=F) } } # Can get an error for theta0 if number of PCA dimensions has changed if (is.matrix(theta0) && (ncol(theta0) != nopd)) { stop(paste("theta0 should have", nopd, "columns")) } # =======================================================. # Fit parameters for each output parameter dimension ==== # =======================================================. for (opdlcv in 1:nopd) { # output parameter dimension y.thisloop <- if (nopd==1) {y} else {y[,opdlcv]} # All of y or single column if (!is.null(Ys)) {ys.thisloop <- if (nopd==1) {ys} else {ys[,opdlcv]}} else {ys.thisloop <- NULL} theta0.thisloop <- if (nopd==1) {theta0} else {theta0[,opdlcv]} if(any(is.na(y.thisloop))){ if (!missing(set_thetaMAP_to) && !is.null(set_thetaMAP_to)) { opt.out <- list(par = if (nopd>1) {set_thetaMAP_to[,opdlcv]} else {set_thetaMAP_to}) y.thisloop=CGGP_internal_imputesomegrid(CGGP,y.thisloop,opt.out) repeattimes = 1 }else{ y.orig = y.thisloop y.thisloop=CGGP_internal_imputesomegrid(CGGP,y.thisloop,theta0.thisloop) repeattimes = 10 } }else{ repeattimes = 1 } for(imputelcv in 1:repeattimes){ if(imputelcv > 1.5){ if(imputelcv < 2.5){ y.thisloop=CGGP_internal_imputesomegrid(CGGP,y.orig,thetaMAP) }else{ ystart = y.thisloop y.thisloop=CGGP_internal_imputesomegrid(CGGP,y.orig,thetaMAP,ystart=ystart) if(max(abs(y.thisloop-ystart))<10^(-10)*max(abs(ystart))){ break } } } # Find MAP theta if (!missing(set_thetaMAP_to) && !is.null(set_thetaMAP_to)) { opt.out <- list(par = if (nopd>1) {set_thetaMAP_to[,opdlcv]} else {set_thetaMAP_to}) } else if (is.null(CGGP$Xs)){ # No supp data, just optimize opt.out = nlminb( theta0.thisloop, objective = CGGP_internal_neglogpost, gradient = CGGP_internal_gneglogpost, y = y.thisloop, CGGP = CGGP, ys = ys.thisloop, Xs = Xs, HandlingSuppData=HandlingSuppData, control = list(rel.tol = 1e-4,iter.max = 500) ) } else { # W/ supp data, optimize on grid first, then with both # Only grid data b/c it's fast opt.out = nlminb( theta0.thisloop, objective = CGGP_internal_neglogpost, gradient = CGGP_internal_gneglogpost, y = y.thisloop, CGGP = CGGP, HandlingSuppData="Ignore", # Never supp data here, so set to Ignore # regardless of user setting lower=rep(-.9, CGGP$d), upper=rep( .9, CGGP$d), control = list(rel.tol = 1e-2,iter.max = 500) ) neglogpost_par <- CGGP_internal_neglogpost(theta=opt.out$par, CGGP=CGGP, y=y.thisloop, ys=ys.thisloop, Xs=Xs, HandlingSuppData=HandlingSuppData ) if (is.infinite(neglogpost_par)) { theta0_2 <- rep(0, CGGP$d) } else { theta0_2 <- opt.out$par } # Then use best point as initial point with supp data opt.out = nlminb( theta0_2, objective = CGGP_internal_neglogpost, gradient = CGGP_internal_gneglogpost, y = y.thisloop, ys = ys.thisloop, Xs = Xs, CGGP = CGGP, HandlingSuppData = HandlingSuppData, control = list(rel.tol = 1e-4,iter.max = 500) ) } # End supp data opt thetaMAP <- opt.out$par sigma2MAP <- CGGP_internal_calcsigma2anddsigma2(CGGP=CGGP, y=y.thisloop, theta=thetaMAP, return_lS=FALSE)$sigma2 if(imputelcv > 1.5){ if(all(abs(sigma2MAP0-sigma2MAP)<0.025*sigma2MAP)){ break } sigma2MAP0 = sigma2MAP }else{ # On first time through sigma2MAP0 = sigma2MAP } } # ===================================. # Update parameters and samples ==== # ===================================. # Set new theta # If one value, it gives it as matrix. Convert it to scalar if (length(sigma2MAP) == 1) {sigma2MAP <- sigma2MAP[1,1]} lik_stuff <- CGGP_internal_calc_cholS_lS_sigma2_pw(CGGP=CGGP, y.thisloop, theta=thetaMAP) cholSs = lik_stuff$cholS pw <- lik_stuff$pw totnumpara = length(thetaMAP) # H is the Hessian at thetaMAP with reverse transformation H = matrix(0,nrow=totnumpara,ncol=totnumpara) # Transform so instead of -1 to 1 it is -Inf to Inf. Mostly in -5 to 5. PSTn= log((1+thetaMAP)/(1-thetaMAP)) # Reverse transformation thetav=(exp(PSTn)-1)/(exp(PSTn)+1) # Grad of reverse transformation function grad0 = CGGP_internal_gneglogpost(thetav,CGGP,y.thisloop, Xs=Xs, ys=ys.thisloop, HandlingSuppData=HandlingSuppData) * (2*(exp(PSTn))/(exp(PSTn)+1)^2) dimensions_that_need_fixing <- c() for(ipara in 1:totnumpara){ rsad = rep(0,totnumpara) rsad[ipara] =10^(-3) PSTn= log((1+thetaMAP)/(1-thetaMAP)) + rsad thetav=(exp(PSTn)-1)/(exp(PSTn)+1) PSTn2= log((1+thetaMAP)/(1-thetaMAP)) - rsad thetav2=(exp(PSTn2)-1)/(exp(PSTn2)+1) # There can be issues if gneglogpost can't be calculated at +/- epsilon, # happens when theta is at the edge of allowing matrix to be Cholesky # decomposed. Check here for that, use one side approx if only one grad # can be calculated. If both fail, no clue what to do. g_plus <- (CGGP_internal_gneglogpost(thetav,CGGP,y.thisloop, Xs=Xs, ys=ys.thisloop, HandlingSuppData=HandlingSuppData) * (2*(exp(PSTn))/(exp(PSTn)+1)^2)-grad0 )*10^(3)/2 g_minus <- (CGGP_internal_gneglogpost(thetav2,CGGP,y.thisloop, Xs=Xs, ys=ys.thisloop, HandlingSuppData=HandlingSuppData) * (2*(exp(PSTn))/(exp(PSTn)+1)^2)-grad0 )*10^(3)/2 if (all(is.finite(g_plus)) && all(is.finite(g_minus))) { H[ipara,] <- g_plus - g_minus } else { dimensions_that_need_fixing <- c(dimensions_that_need_fixing, ipara) # message(c("At least one was not finite, ", g_plus, g_minus)) if (all(is.finite(g_plus))) { H[ipara,] <- 2 * g_plus } else if (all(is.finite(g_minus))) { H[ipara,] <- -2 * g_minus } else { # stop("Having to set one to NaN, will probably break stuff") H[ipara,] <- NaN } } } # For any dimensions that gave issues, set them to unit vector here # Shouldn't affect eigen stuff for other dimensions? for (ipara in dimensions_that_need_fixing) { message(paste(c("Had to fix dimensions ", dimensions_that_need_fixing, " in CGGP_fit"))) H[ipara,] <- H[,ipara] <- 0 H[ipara, ipara] <- 1 } Hmat = H/2+t(H)/2 A = eigen(Hmat) cHa = (A$vectors)%*%diag(pmin(sqrt(pmax(1/(A$values),10^(-16))),4))%*%t(A$vectors) # Get posterior samples using Laplace approximation PST= log((1+thetaMAP)/(1-thetaMAP)) + cHa%*%matrix(rnorm(CGGP$numPostSamples*length(thetaMAP),0,1), nrow=length(thetaMAP)) thetaPostSamples = (exp(PST)-1)/(exp(PST)+1) # Now if there were any bad dimensions, we need to set all those # thetaPostSamples for that dimension to be the MAP for (ipara in dimensions_that_need_fixing) { message(paste(c("Changed thetaPostSamples for dims ", dimensions_that_need_fixing))) thetaPostSamples[ipara,] <- thetaMAP[ipara] } if(CGGP$supplemented){ # Cs = matrix(1,dim(CGGP$Xs)[1],CGGP$ss) # for (dimlcv in 1:CGGP$d) { # Loop over dimensions # V = CGGP$CorrMat(CGGP$Xs[,dimlcv], CGGP$xb, # thetaMAP[(dimlcv-1)*CGGP$numpara+1:CGGP$numpara]) # Cs = Cs*V[,CGGP$designindex[,dimlcv]] # } # # Sigma_t = matrix(1,dim(CGGP$Xs)[1],dim(CGGP$Xs)[1]) # for (dimlcv in 1:CGGP$d) { # Loop over dimensions # V = CGGP$CorrMat(CGGP$Xs[,dimlcv], CGGP$Xs[,dimlcv], # thetaMAP[(dimlcv-1)*CGGP$numpara+1:CGGP$numpara]) # Sigma_t = Sigma_t*V # } # # MSE_s = list(matrix(0,dim(CGGP$Xs)[1],dim(CGGP$Xs)[1]), # (CGGP$d+1)*(CGGP$maxlevel+1)) # for (dimlcv in 1:CGGP$d) { # for (levellcv in 1:max(CGGP$uo[1:CGGP$uoCOUNT,dimlcv])) { # MSE_s[[(dimlcv)*CGGP$maxlevel+levellcv]] = # (-CGGP_internal_postvarmatcalc(CGGP$Xs[,dimlcv],CGGP$Xs[,dimlcv], # CGGP$xb[1:CGGP$sizest[levellcv]], # thetaMAP[(dimlcv-1)*CGGP$numpara + # 1:CGGP$numpara], # CorrMat=CGGP$CorrMat)) # } # } # # for (blocklcv in 1:CGGP$uoCOUNT) { # ME_s = matrix(1,nrow=dim(Xs)[1],ncol=dim(Xs)[1]) # for (dimlcv in 1:CGGP$d) { # levelnow = CGGP$uo[blocklcv,dimlcv] # ME_s = ME_s*MSE_s[[(dimlcv)*CGGP$maxlevel+levelnow]] # } # Sigma_t = Sigma_t-CGGP$w[blocklcv]*(ME_s) # } # yhats = Cs%*%pw # # # Sti_resid = solve(Sigma_t,ys.thisloop-yhats) # Sti = solve(Sigma_t) # sigma2MAP = (sigma2MAP*dim(CGGP$design)[1] + # colSums((ys.thisloop-yhats)*Sti_resid)) / ( # dim(CGGP$design)[1]+dim(Xs)[1]) # # pw_adj_y = t(Cs)%*%Sti_resid # pw_adj <- CGGP_internal_calcpw(CGGP=CGGP, y=pw_adj_y, theta=thetaMAP) # # pw_uppadj = pw-pw_adj # supppw = Sti_resid supp_values <- CGGP_internal_calc_supp_pw_sigma2_Sti( CGGP, thetaMAP=thetaMAP, ys.thisloop=ys.thisloop, pw=pw, sigma2MAP=sigma2MAP, only_sigma2MAP=FALSE) supppw <- supp_values$supppw sigma2MAP <- supp_values$sigma2MAP Sti <- supp_values$Sti pw_uppadj<- supp_values$pw_uppadj } # Add all new variables to CGGP that are needed if (nopd==1) { # Only 1 output parameter dim, so just set them CGGP$thetaMAP <- thetaMAP CGGP$sigma2MAP <- sigma2MAP CGGP$pw <- pw CGGP$thetaPostSamples <- thetaPostSamples CGGP$cholSs = cholSs if (CGGP$supplemented) { CGGP$pw_uppadj <- pw_uppadj CGGP$supppw <- supppw CGGP$Sti = Sti CGGP$sigma2MAP <- sigma2MAP } } else { # More than 1 opd, so need to set as columns of matrix if (opdlcv==1) { # First time, initialize matrix/array for all CGGP$thetaMAP <- matrix(NaN, length(thetaMAP), nopd) if (length(sigma2MAP) != 1) { stop("Error: sigma2map should be a 1x1 matrix.") } CGGP$sigma2MAP <- numeric(nopd) CGGP$pw <- matrix(NaN, length(pw), nopd) # thetaPostSamples is matrix, so this is 3dim array below CGGP$thetaPostSamples <- array(data = NaN, dim=c(dim(thetaPostSamples), nopd)) CGGP$cholSs <- vector("list", nopd) } CGGP$thetaMAP[,opdlcv] <- thetaMAP CGGP$sigma2MAP[opdlcv] <- sigma2MAP CGGP$pw[,opdlcv] <- pw CGGP$thetaPostSamples[,,opdlcv] <- thetaPostSamples # CGGP$cholSs[,,opdlcv] <- cholSs CGGP$cholSs[[opdlcv]] <- cholSs if (CGGP$supplemented) { if (opdlcv==1) { # First time initialize all CGGP$pw_uppadj <- matrix(NaN, nrow(pw_uppadj), nopd) CGGP$supppw <- matrix(NaN, nrow(supppw), nopd) # Sti is matrix, so this is 3 dim array CGGP$Sti = array(NaN, dim=c(dim(Sti), nopd)) } CGGP$pw_uppadj[,opdlcv] <- pw_uppadj CGGP$supppw[,opdlcv] <- supppw CGGP$Sti[,,opdlcv] = Sti } } } # Clear old sigma2_samples. They will be recalculated when needed. CGGP$sigma2_samples <- NULL return(CGGP) } #' Calculate posterior variance #' #' @param x1 Points at which to calculate MSE #' @param x2 Levels along dimension, vector??? #' @param xo No clue what this is #' @param theta Correlation parameters #' @param CorrMat Function that gives correlation matrix #' for vector of 1D points. #' @param returndPVMC Should dPVMC be returned? #' @param returndiagonly Should only the diagonal be returned? #' #' @return Variance posterior # @export #' @noRd #' #' @examples #' CGGP_internal_postvarmatcalc(c(.4,.52), c(0,.25,.5,.75,1), #' xo=c(.11), theta=c(.1,.2,.3), #' CorrMat=CGGP_internal_CorrMatCauchySQT) CGGP_internal_postvarmatcalc <- function(x1, x2, xo, theta, CorrMat, returndPVMC = FALSE, returndiagonly=FALSE) { if(!returndiagonly && !returndPVMC){ S = CorrMat(xo, xo, theta) n = length(xo) cholS = chol(S) C1o = CorrMat(x1, xo, theta) CoinvC1o = backsolve(cholS,backsolve(cholS,t(C1o), transpose = TRUE)) C2o = CorrMat(x2, xo, theta) Sigma_mat = - t(CoinvC1o)%*%t(C2o) return(Sigma_mat) } else { stop("Full postvarmatcalc function was removed #25082") # Only the chunk above was ever used in our code, # the full version where the other options can be used # was moved to scratch/scratch_postvarmatcalc_fullversion.R } } #' Calculate sigma2 for all theta samples #' #' @param CGGP CGGP object #' #' @return All sigma2 samples ## @export #' @noRd CGGP_internal_calc_sigma2_samples <- function(CGGP) { nopd <- if (is.matrix(CGGP$thetaPostSamples)) {1} else {dim(CGGP$thetaPostSamples)[3]} if (is.null(CGGP[["y"]]) || length(CGGP$y)==0) { # Only supp data if (nopd == 1 && length(CGGP$sigma2MAP)==1) { # 1 opd and 1 od # Single output dimension as.matrix( apply(CGGP$thetaPostSamples, 2, function(th) { CGGP_internal_calc_supp_only_supppw_sigma2_Sti( CGGP=CGGP,thetaMAP=th,ys.thisloop=CGGP$ys, only_sigma2MAP=TRUE )$sigma2 } ) ) } else if (nopd == 1) { # 1 opd but 2+ od # MV output but shared parameters, so sigma2 is vector t( apply(CGGP$thetaPostSamples, 2, function(th) { CGGP_internal_calc_supp_only_supppw_sigma2_Sti( CGGP=CGGP,thetaMAP=th,ys.thisloop=CGGP$ys, only_sigma2MAP=TRUE )$sigma2 } ) ) } else { # 2+ opd, so must be 2+ od # MV output with separate parameters, so need to loop over # both samples and output dimension outer(1:CGGP$numPostSamples, 1:nopd, Vectorize( function(samplenum, outputdim) { CGGP_internal_calc_supp_only_supppw_sigma2_Sti( CGGP=CGGP,thetaMAP=CGGP$thetaPostSamples[,samplenum,outputdim], ys.thisloop=CGGP$ys[,outputdim], only_sigma2MAP=TRUE )$sigma2 } ) ) } } else if (!CGGP$supplemented) { if (nopd == 1 && length(CGGP$sigma2MAP)==1) { # 1 opd and 1 od # Single output dimension as.matrix( apply(CGGP$thetaPostSamples, 2, function(th) { CGGP_internal_calcsigma2(CGGP, CGGP$y, th )$sigma2 } ) ) } else if (nopd == 1) { # 1 opd but 2+ od # MV output but shared parameters, so sigma2 is vector t( apply(CGGP$thetaPostSamples, 2, function(th) { CGGP_internal_calcsigma2(CGGP, CGGP$y, th )$sigma2 } ) ) } else { # 2+ opd, so must be 2+ od # MV output with separate parameters, so need to loop over # both samples and output dimension outer(1:CGGP$numPostSamples, 1:nopd, Vectorize(function(samplenum, outputdim) { CGGP_internal_calcsigma2( CGGP, CGGP$y[,outputdim], CGGP$thetaPostSamples[,samplenum,outputdim] )$sigma2 }) ) } } else { # There is supplementary data if (nopd == 1 && length(CGGP$sigma2MAP)==1) { # 1 opd and 1 od # Single output dimension as.matrix( apply(CGGP$thetaPostSamples, 2, function(th) { CGGP_internal_calc_supp_pw_sigma2_Sti(CGGP=CGGP, y.thisloop=CGGP$y, thetaMAP=th, ys.thisloop=CGGP$ys, only_sigma2MAP=TRUE )$sigma2 } ) ) } else if (nopd == 1) { # 1 opd but 2+ od # MV output but shared parameters, so sigma2 is vector t( apply(CGGP$thetaPostSamples, 2, function(th) { CGGP_internal_calc_supp_pw_sigma2_Sti(CGGP, y.thisloop=CGGP$y, thetaMAP=th, ys.thisloop=CGGP$ys, only_sigma2MAP=TRUE )$sigma2 } ) ) } else { # 2+ opd, so must be 2+ od # MV output with separate parameters, so need to loop over # both samples and output dimension outer(1:CGGP$numPostSamples, 1:nopd, Vectorize(function(samplenum, outputdim) { CGGP_internal_calc_supp_pw_sigma2_Sti( CGGP=CGGP, y.thisloop=CGGP$y[,outputdim], thetaMAP=CGGP$thetaPostSamples[,samplenum,outputdim], ys.thisloop=CGGP$ys[,outputdim], only_sigma2MAP=TRUE )$sigma2 }) ) } } }
/scratch/gouwar.j/cran-all/cranData/CGGP/R/CGGP_fit_fs.R
#' Fit CGGP to only supplemental data #' #' If only supplemental data is given, and there is no grid data, #' then this function is used. #' It basically does the standard GP calculations since there is #' nothing from the grid. #' #' @param CGGP CGGP object #' @param Xs X supp matrix #' @param Ys Y supp data #' @param separateoutputparameterdimensions Should output dimensions be fit separately? #' @param theta0 Initial theta0 for optimization #' @param numPostSamples How many posterior samples should be calculated? #' @param set_thetaMAP_to Value for thetaMAP to be set to #' #' @return CGGP object ## @export #' @noRd #' @examples #' d <- 3 #' n <- 30 #' Xs <- matrix(runif(d*n), n, d) #' Ys <- apply(Xs, 1, function(x){x[1]/(x[3]+.2)+exp(x[3])*cos(x[2]^2)}) #' cg <- CGGPcreate(d, Xs=Xs, Ys=Ys, batchsize=0) CGGP_internal_fitwithonlysupp <- function(CGGP, Xs, Ys, separateoutputparameterdimensions=TRUE, theta0=rep(0,CGGP$numpara*CGGP$d), set_thetaMAP_to, numPostSamples=NULL ) { # ==========================. # ==== Set Values ==== # ==========================. CGGP$supplemented = TRUE CGGP$Xs = Xs CGGP$Ys = Ys if (!is.null(numPostSamples)) { CGGP$numPostSamples <- numPostSamples } rm(numPostSamples) if(!is.matrix(Ys)){ # 1D output CGGP$mu = mean(Ys) # y = Y-CGGP$mu ys = Ys-CGGP$mu } else{ # Multiple outputs CGGP$mu = colMeans(Ys) # Could use Y, or colMeans(rbind(Y, Ys)), or make sure Ys is big enough for this # y <- sweep(Y, 2, CGGP$mu) ys <- sweep(Ys, 2, CGGP$mu) } # CGGP$y = y CGGP$ys = ys # nopd is numberofoutputparameterdimensions nopd <- if (separateoutputparameterdimensions && is.matrix(ys)) { ncol(ys) } else { 1 } # Fix theta0 if (nopd > 1) { if (is.vector(theta0)) { theta0 <- matrix(theta0, nrow=length(theta0), ncol=nopd, byrow=F) } } # Can get an error for theta0 if number of PCA dimensions has changed if (is.matrix(theta0) && (ncol(theta0) != nopd)) { if (ncol(theta0) > nopd) { theta0 <- theta0[,1:nopd] } else { theta0 <- cbind(theta0, matrix(0,nrow(theta0), nopd-ncol(theta0))) } } # =================================================================. # ==== Optimize parameters for each output parameter dimension ==== # =================================================================. for (opdlcv in 1:nopd) { # output parameter dimension ys.thisloop <- if (nopd==1) {ys} else {ys[,opdlcv]} theta0.thisloop <- if (nopd==1) {theta0} else {theta0[,opdlcv]} if (!missing(set_thetaMAP_to) && !is.null(set_thetaMAP_to)) { opt.out <- list(par = if (nopd>1) {set_thetaMAP_to[,opdlcv]} else {set_thetaMAP_to}) } else { # Sometimes it starts in a bad spot # Catch it with try, and start it at zero. # If that fails, we don't know what to do. neglogpost_par <- CGGP_internal_neglogpost(theta = theta0.thisloop, CGGP=CGGP, y=NULL, Xs=Xs, ys=ys.thisloop) if (is.infinite(neglogpost_par)) { theta0_touse <- rep(0, CGGP$d) } else { theta0_touse <- theta0.thisloop } opt.out <- nlminb( theta0_touse, objective = CGGP_internal_neglogpost, gradient = CGGP_internal_gneglogpost, y = NULL, HandlingSuppData="Only", Xs=Xs, ys=ys.thisloop, CGGP = CGGP, control = list(rel.tol = 1e-8,iter.max = 500) ) } # Set new theta thetaMAP <- opt.out$par # =====================================. # ==== Get posterior samples ==== # =====================================. totnumpara = length(thetaMAP) # H is the Hessian at thetaMAP with reverse transformation H = matrix(0,nrow=totnumpara,ncol=totnumpara) # Transform so instead of -1 to 1 it is -Inf to Inf. Mostly in -5 to 5 though. PSTn= log((1+thetaMAP)/(1-thetaMAP)) # Reverse transformation thetav=(exp(PSTn)-1)/(exp(PSTn)+1) # Grad of reverse transformation function grad0 = CGGP_internal_gneglogpost(thetav,CGGP,y=NULL, Xs=Xs, ys=ys.thisloop, HandlingSuppData="Only")*(2*(exp(PSTn))/(exp(PSTn)+1)^2) for(c in 1:totnumpara){ rsad = rep(0,totnumpara) rsad[c] =10^(-3) PSTn= log((1+thetaMAP)/(1-thetaMAP)) + rsad thetav=(exp(PSTn)-1)/(exp(PSTn)+1) H[c,] = (CGGP_internal_gneglogpost(thetav,CGGP,y=NULL, Xs=Xs, ys=ys.thisloop, HandlingSuppData="Only")*(2*(exp(PSTn))/(exp(PSTn)+1)^2)-grad0 )*10^(3) } Hmat = H/2+t(H)/2 A = eigen(Hmat) cHa = (A$vectors)%*%diag(abs(A$values)^(-1/2))%*%t(A$vectors) # Get posterior samples using Laplace approx PST= log((1+thetaMAP)/(1-thetaMAP)) + cHa%*%matrix(rnorm(CGGP$numPostSamples*length(thetaMAP),0,1),nrow=length(thetaMAP)) thetaPostSamples = (exp(PST)-1)/(exp(PST)+1) # =================================. # ==== Calculate supp stuff ==== # # =================================. # Sigma_t = matrix(1,dim(CGGP$Xs)[1],dim(CGGP$Xs)[1]) # for (dimlcv in 1:CGGP$d) { # Loop over dimensions # V = CGGP$CorrMat(CGGP$Xs[,dimlcv], CGGP$Xs[,dimlcv], thetaMAP[(dimlcv-1)*CGGP$numpara+1:CGGP$numpara]) # Sigma_t = Sigma_t*V # } # # Sti_chol <- chol(Sigma_t + diag(CGGP$nugget, nrow(Sigma_t), ncol(Sigma_t))) # Sti <- chol2inv(Sti_chol) # # # Use backsolve for stability # # supppw <- Sti %*% ys.thisloop # # supppw is same as Sti_resid in other files # supppw <- backsolve(Sti_chol, backsolve(Sti_chol, ys.thisloop, transpose = T)) # if (is.matrix(supppw) && ncol(supppw)==1) {supppw <- as.vector(supppw)} # # # sigma2MAP <- (t(ys.thisloop) %*% supppw) / nrow(Xs) # # if (is.matrix(sigma2MAP)) {sigma2MAP <- diag(sigma2MAP)} # sigma2MAP = colSums(as.matrix(ys.thisloop)*as.matrix(supppw))/nrow(Xs) supp_quantities <- CGGP_internal_calc_supp_only_supppw_sigma2_Sti( CGGP=CGGP,thetaMAP=thetaMAP,ys.thisloop=ys.thisloop, only_sigma2MAP=FALSE ) supppw <- supp_quantities$supppw sigma2MAP <- supp_quantities$sigma2MAP Sti <- supp_quantities$Sti # Add all new variables to CGGP that are needed if (nopd==1) { # Only 1 output parameter dim, so just set them CGGP$thetaMAP <- thetaMAP CGGP$sigma2MAP <- sigma2MAP # CGGP$pw <- pw CGGP$thetaPostSamples <- thetaPostSamples if (CGGP$supplemented) { # CGGP$pw_uppadj <- pw_uppadj CGGP$supppw <- supppw CGGP$Sti = Sti CGGP$sigma2MAP <- sigma2MAP } } else { # More than 1 opd, so need to set as columns of matrix if (opdlcv==1) { # First time, initialize matrix/array for all CGGP$thetaMAP <- matrix(NaN, length(thetaMAP), nopd) if (length(sigma2MAP) != 1) { stop("ERROR HERE, sigma2map can be matrix??? It is always a 1x1 matrix from what I've seen before.") } CGGP$sigma2MAP <- numeric(nopd) # CGGP$pw <- matrix(NaN, length(pw), nopd) # thetaPostSamples is matrix, so this is 3dim array below CGGP$thetaPostSamples <- array(data = NaN, dim=c(dim(thetaPostSamples), nopd)) } CGGP$thetaMAP[,opdlcv] <- thetaMAP CGGP$sigma2MAP[opdlcv] <- sigma2MAP # CGGP$pw[,opdlcv] <- pw CGGP$thetaPostSamples[,,opdlcv] <- thetaPostSamples if (CGGP$supplemented) { if (opdlcv==1) { # First time initialize all # CGGP$pw_uppadj <- matrix(NaN, nrow(pw_uppadj), nopd) CGGP$supppw <- matrix(NaN, length(supppw), nopd) # Could be nrow supppw if 1 col matrix CGGP$Sti = array(NaN, dim=c(dim(Sti), nopd)) # Sti is matrix, so this is 3 dim array } # CGGP$pw_uppadj[,opdlcv] <- pw_uppadj CGGP$supppw[,opdlcv] <- supppw CGGP$Sti[,,opdlcv] = Sti } } } # Get sigma2 samples CGGP$sigma2_samples <- CGGP_internal_calc_sigma2_samples(CGGP) CGGP }
/scratch/gouwar.j/cran-all/cranData/CGGP/R/CGGP_fitwithonlysupp.R
#' Gradient of negative log likelihood posterior #' #' @param theta Log of correlation parameters #' @param CGGP CGGP object #' @param y CGGP$design measured values #' @param return_lik If yes, it returns a list with lik and glik #' @param ... Forces you to name remaining arguments #' @param Xs Supplementary input data #' @param ys Supplementary output data #' @param HandlingSuppData How should supplementary data be handled? #' * Correct: full likelihood with grid and supplemental data #' * Only: only use supplemental data #' * Ignore: ignore supplemental data #' #' @return Vector for gradient of likelihood w.r.t. x (theta) #' @export #' #' @examples #' cg <- CGGPcreate(d=3, batchsize=20) #' Y <- apply(cg$design, 1, function(x){x[1]+x[2]^2}) #' cg <- CGGPfit(cg, Y) #' CGGP_internal_gneglogpost(cg$thetaMAP, CGGP=cg, y=cg$y) CGGP_internal_gneglogpost <- function(theta, CGGP, y,..., return_lik=FALSE, ys=NULL,Xs=NULL, HandlingSuppData = "Correct") { # ================================ # Check that inputs are acceptable # ================================ if (!(HandlingSuppData %in% c("Correct", "Only", "Ignore"))) { stop(paste("HandlingSuppData in CGGP_internal_neglogpost must be one of", "Correct, Only, Ignore")) } if(!is.null(Xs) && (is.null(y) || length(y)==0)){ HandlingSuppData = "Only" }else if(is.null(Xs) && !(is.null(y) || length(y)==0)){ HandlingSuppData = "Ignore" }else if(is.null(Xs) && (is.null(y) || length(y)==0)){ stop(paste("You have given no y or ys to CGGP_internal_neglogpost")) } # ======================= # Calculate gneglogpost # ======================= if(HandlingSuppData == "Only" || HandlingSuppData == "Correct"){ Sigma_t = matrix(0,dim(Xs)[1],dim(Xs)[1]) dSigma_to = list(Sigma_t,CGGP$d) RTR = list(Sigma_t,CGGP$d) for (dimlcv in 1:CGGP$d) { # Loop over dimensions V = CGGP$CorrMat(Xs[,dimlcv], Xs[,dimlcv], theta[(dimlcv-1)*CGGP$numpara+1:CGGP$numpara], return_dCdtheta=TRUE,returnlogs=TRUE) dSigma_to[[dimlcv]] = V$dCdtheta Sigma_t = Sigma_t+V$C } Sigma_t = exp(Sigma_t) Cmat1 = matrix(rep(Sigma_t, CGGP$numpara), nrow = nrow(Sigma_t), byrow = FALSE ) for (dimlcv in 1:CGGP$d) { # Loop over dimensions dSigma_to[[dimlcv]] =Cmat1*dSigma_to[[dimlcv]] } Sigma_t = (1-CGGP$nugget)*Sigma_t+diag(dim(Sigma_t)[1])*CGGP$nugget for (dimlcv in 1:CGGP$d) { dSigma_to[[dimlcv]] = (1-CGGP$nugget)*dSigma_to[[dimlcv]] } } if(HandlingSuppData == "Only"){ try.chol <- try({Sigma_chol = chol(Sigma_t)}, silent = TRUE) if (inherits(try.chol, "try-error")) { stop("chol error in gneglogpost #1, this can happen when neglogpost is Inf") # This came up a lot when running nlminb on the initial point. # If the initial neglogpost is Inf, it will call gneglogpost # and get the error here. To avoid this we have to make sure the # initial points of nlminb are always finite values. # This one wasn't the problem, it was usually the other which # happens when there is supp data. # return(rep(NA, length(theta))) }; rm(try.chol) tempvec1 = backsolve(Sigma_chol,backsolve(Sigma_chol,ys,transpose = TRUE)) sigma2_hat_supp = colSums(as.matrix(ys*tempvec1))/dim(Xs)[1] if(is.matrix(ys)){ dsigma2_hat_supp = matrix(0,CGGP$d*CGGP$numpara,ncol=dim(ys)[2]) }else{ dsigma2_hat_supp = rep(0,CGGP$d*CGGP$numpara) } dlDet_supp=rep(0,CGGP$d*CGGP$numpara) for (dimlcv in 1:CGGP$d) { for(paralcv in 1:CGGP$numpara){ dSigma_supp = as.matrix(( dSigma_to[[dimlcv]])[ ,((paralcv-1)*dim(Sigma_chol)[2]+1 ):(paralcv*dim(Sigma_chol)[2])]) tempvec2= dSigma_supp%*%tempvec1 if(is.matrix(dsigma2_hat_supp )){ if(dim(dsigma2_hat_supp)[1]>1.5){ dsigma2_hat_supp[(dimlcv-1)*CGGP$numpara+paralcv,] = -colSums(as.matrix(tempvec1*tempvec2))/dim(Xs)[1] }else{ dsigma2_hat_supp[,(dimlcv-1)*CGGP$numpara+paralcv] = -colSums(as.matrix(tempvec1*tempvec2))/dim(Xs)[1] } }else{ dsigma2_hat_supp[(dimlcv-1)*CGGP$numpara+paralcv] = -colSums(as.matrix(tempvec1*tempvec2))/dim(Xs)[1] } dlDet_supp[(dimlcv-1)*CGGP$numpara+paralcv] = sum(diag(backsolve(Sigma_chol,backsolve(Sigma_chol,dSigma_supp, transpose = TRUE)))) } } lDet_supp = 2*sum(log(diag(Sigma_chol))) } if(HandlingSuppData == "Ignore" ){ sigma2anddsigma2 <- CGGP_internal_calcsigma2anddsigma2(CGGP=CGGP, y=y, theta=theta, return_lS=TRUE) lS <- sigma2anddsigma2$lS dlS <-sigma2anddsigma2$dlS sigma2_hat_grid = sigma2anddsigma2$sigma2 dsigma2_hat_grid = sigma2anddsigma2$dsigma2 lDet_grid = 0 dlDet_grid = rep(0, CGGP$numpara*CGGP$d) for (blocklcv in 1:CGGP$uoCOUNT) { nv = CGGP$gridsize[blocklcv]/CGGP$gridsizes[blocklcv,] uonow = CGGP$uo[blocklcv,] for (dimlcv in which(uonow>1.5)) { if (return_lik) { lDet_grid = lDet_grid + (lS[uonow[dimlcv], dimlcv] - lS[uonow[dimlcv] - 1, dimlcv])*nv[dimlcv] } IS = (dimlcv-1)*CGGP$numpara+1:CGGP$numpara dlDet_grid[IS] = dlDet_grid[IS] + (dlS[uonow[dimlcv], IS] - dlS[uonow[dimlcv]-1, IS] )*nv[dimlcv] } } } if(HandlingSuppData == "Correct"){ Cs = matrix(0,dim(Xs)[1],CGGP$ss) dCs = list(matrix(0,dim(Xs)[1],CGGP$numpara*CGGP$ss),dim(Xs)[2]) GGGG = list(matrix(1,dim(Xs)[1],length(CGGP$xb)),CGGP$d) dGGGG1 = list(matrix(1,dim(Xs)[1],length(CGGP$xb)),CGGP$d) for (dimlcv in 1:CGGP$d) { # Loop over dimensions Xbn = CGGP$xb[1:CGGP$sizest[max(CGGP$uo[,dimlcv])]] V = CGGP$CorrMat(Xs[,dimlcv], Xbn, theta[(dimlcv-1)*CGGP$numpara+1:CGGP$numpara], return_dCdtheta=TRUE,returnlogs=TRUE) A = round(as.vector(outer(CGGP$designindex[,dimlcv], length(Xbn)*(0:(CGGP$numpara-1)), FUN=function(x,y) x+y))) GGGG[[dimlcv]] = exp(V$C) dGGGG1[[dimlcv]] = V$dCdtheta dCs[[dimlcv]] = V$dCdtheta[,A] Cs= Cs+V$C[,CGGP$designindex[,dimlcv]] } Cs = exp(Cs) Cmat1 = matrix(rep(Cs, CGGP$numpara), nrow = nrow(Cs), byrow = FALSE) dCs = lapply(dCs, FUN = function(x) Cmat1*x) for (dimlcv in 1:CGGP$d) { # Loop over dimensions Cmat2 = matrix(rep(GGGG[[dimlcv]], CGGP$numpara), nrow = nrow(GGGG[[dimlcv]]) , byrow = FALSE) dGGGG1[[dimlcv]] =Cmat2*(dGGGG1[[dimlcv]]) } lik_stuff <- CGGP_internal_calc_cholS_lS_dsigma2_pw_dMatdtheta( CGGP=CGGP, y=y, theta=theta) cholS = lik_stuff$cholS dSV = lik_stuff$dMatdtheta lS <- lik_stuff$lS dlS <- lik_stuff$dlS sigma2_hat_grid = lik_stuff$sigma2 dsigma2_hat_grid = lik_stuff$dsigma2 pw = lik_stuff$pw yhats = Cs%*%pw lDet_grid = 0 dlDet_grid = rep(0, CGGP$numpara*CGGP$d) for (blocklcv in 1:CGGP$uoCOUNT) { nv = CGGP$gridsize[blocklcv]/CGGP$gridsizes[blocklcv,] uonow = CGGP$uo[blocklcv,] for (dimlcv in which(uonow>1.5)) { if (return_lik) { lDet_grid = lDet_grid + (lS[uonow[dimlcv], dimlcv] - lS[uonow[dimlcv] - 1, dimlcv])*nv[dimlcv] } IS = (dimlcv-1)*CGGP$numpara+1:CGGP$numpara dlDet_grid[IS] = dlDet_grid[IS] + (dlS[uonow[dimlcv], IS] - dlS[uonow[dimlcv]-1, IS] )*nv[dimlcv] } } } if(HandlingSuppData == "Correct"){ MSE_s = matrix(NaN,nrow=dim(Xs)[1]*dim(Xs)[1], ncol=(CGGP$d)*(CGGP$maxlevel)) dMSE_s = matrix(NaN,nrow=dim(Xs)[1]*dim(Xs)[1], ncol=CGGP$numpara*CGGP$d*CGGP$maxlevel) Q = max(CGGP$uo[1:CGGP$uoCOUNT,]) for (dimlcv in 1:CGGP$d) { gg = (dimlcv-1)*Q TT1 = GGGG[[dimlcv]] TT2 = dGGGG1[[dimlcv]] for (levellcv in 1:max(CGGP$uo[1:CGGP$uoCOUNT,dimlcv])) { INDSN = 1:CGGP$sizest[levellcv] INDSN = INDSN[sort(CGGP$xb[1:CGGP$sizest[levellcv]], index.return = TRUE)$ix] REEALL = CGGP_internal_postvarmatcalc_fromGMat(TT1, TT2, cholS[[gg+levellcv]], dSV[[gg+levellcv]], INDSN, CGGP$numpara, returndG = TRUE, returnderiratio =TRUE) MSE_s[,(dimlcv-1)*CGGP$maxlevel+levellcv] = as.vector(REEALL$Sigma_mat) for(paralcv in 1:CGGP$numpara){ dMSE_s[ , CGGP$numpara*(dimlcv-1)*CGGP$maxlevel + (paralcv-1)*CGGP$maxlevel+levellcv] = as.vector(REEALL$dSigma_mat[,((dim(Xs)[1]*( paralcv-1)+1):(dim(Xs)[1]*paralcv))]) } } } dsigma2_hat_part1 = 0*dsigma2_hat_grid dsigma2_hat_part2 = 0*dsigma2_hat_grid dsigma2_hat_part3 = 0*dsigma2_hat_grid dlDet_supp = 0*dlDet_grid Sigma_t2 = as.vector(Sigma_t) dSigma_to2 = matrix(0,length(Sigma_t2),CGGP$numpara*CGGP$d) for (dimlcv in 1:CGGP$d) { for(paralcv in 1:CGGP$numpara){ dSigma_to2[,CGGP$numpara*(dimlcv-1)+paralcv] = as.vector(t((dSigma_to[[dimlcv]])[,((dim(Xs)[1]*(paralcv-1)+1 ):(dim(Xs)[1]*paralcv))])) } } rcpp_fastmatclcranddclcr(CGGP$uo[1:CGGP$uoCOUNT,], CGGP$w[1:CGGP$uoCOUNT], MSE_s, dMSE_s, Sigma_t2, dSigma_to2, CGGP$maxlevel, CGGP$numpara) for (dimlcv in 1:CGGP$d) { dSigma_to[[dimlcv]] = matrix(dSigma_to2[,CGGP$numpara*(dimlcv-1) + (1:CGGP$numpara)], nrow=dim(Xs)[1] , byrow = FALSE) } Sigma_t = matrix(Sigma_t2,nrow=dim(Xs)[1] , byrow = FALSE) Sigma_t = (1-CGGP$nugget)*Sigma_t+diag(dim(Sigma_t)[1])*CGGP$nugget for (dimlcv in 1:CGGP$d) { dSigma_to[[dimlcv]] = (1-CGGP$nugget)*dSigma_to[[dimlcv]] } try.chol <- try({Sigma_chol = chol(Sigma_t)}, silent = TRUE) if (inherits(try.chol, "try-error")) { # stop(paste("chol error in gneglogpost #2, this can happen when", # " neglogpost is Inf, theta is ", theta, collapse=' ')) warning(paste(c("chol error in gneglogpost #2, this can happen when", " neglogpost is Inf, theta is ", theta))) return(NaN * theta) # This came up a lot when running nlminb on the initial point. # If the initial neglogpost is Inf, it will call gneglogpost # and get the error here. To avoid this we have to make sure the # initial points of nlminb are always finite values. # return(rep(NA, length(theta))) }; rm(try.chol) tempvec1= backsolve(Sigma_chol,backsolve(Sigma_chol,ys-yhats, transpose = TRUE)) for (dimlcv in 1:CGGP$d) { for(paralcv in 1:CGGP$numpara){ dCpn = as.matrix((dCs[[dimlcv]])[,((paralcv-1)*dim(Cs)[2]+1 ):(paralcv*dim(Cs)[2])]) if(is.matrix(dsigma2_hat_part2)){ if(dim(dsigma2_hat_part2)[1]>1.5){ dsigma2_hat_part2[(dimlcv-1)*CGGP$numpara+paralcv,] = -2*colSums((tempvec1)*(dCpn%*%pw)) }else{ dsigma2_hat_part2[,(dimlcv-1)*CGGP$numpara+paralcv] = -2*colSums((tempvec1)*(dCpn%*%pw)) } }else{ dsigma2_hat_part2[(dimlcv-1)*CGGP$numpara+paralcv] = -2*colSums((tempvec1)*(dCpn%*%pw)) } dSigma_now = as.matrix(( dSigma_to[[dimlcv]] )[,((paralcv-1)*dim(Sigma_chol)[2]+1):(paralcv*dim(Sigma_chol)[2])]) tempvec2= dSigma_now%*%tempvec1 if(is.matrix(dsigma2_hat_part2)){ if(dim(dsigma2_hat_part2)[1]>1.5){ dsigma2_hat_part1[(dimlcv-1)*CGGP$numpara+paralcv,] = -colSums(tempvec1*tempvec2) }else{ dsigma2_hat_part1[,(dimlcv-1)*CGGP$numpara+paralcv] = -colSums(tempvec1*tempvec2) } }else{ dsigma2_hat_part1[(dimlcv-1)*CGGP$numpara+paralcv] = -colSums(tempvec1*tempvec2) } dlDet_supp[(dimlcv-1)*CGGP$numpara+paralcv] = sum(diag(backsolve(Sigma_chol,backsolve(Sigma_chol,dSigma_now, transpose = TRUE)))) } } if(is.vector(y)){ temp4 = as.vector(t(Cs)%*%tempvec1) }else{ temp4 = t(Cs)%*%tempvec1 } dsigma2_hat_part3 = -2*(CGGP_internal_calc_dvalo(CGGP,y,temp4,cholS,dSV )$dvalo) lDet_supp = 2*sum(log(diag(Sigma_chol))) sigma2_hat_supp = colSums((ys-yhats)*tempvec1)/dim(Xs)[1] dsigma2_hat_supp = (dsigma2_hat_part1+dsigma2_hat_part2 + dsigma2_hat_part3)/dim(Xs)[1] } if(HandlingSuppData == "Ignore"){ sigma2_hat = sigma2_hat_grid dsigma2_hat = dsigma2_hat_grid dlDet = dlDet_grid lDet = lDet_grid if(!is.matrix(y)){ nsamples = length(y) }else{ nsamples = dim(y)[1] ndim = dim(y)[2] } } if(HandlingSuppData == "Only"){ sigma2_hat = sigma2_hat_supp dsigma2_hat = dsigma2_hat_supp dlDet = dlDet_supp lDet = lDet_supp if(!is.matrix(ys)){ nsamples = length(ys) }else{ nsamples = dim(ys)[1] ndim = dim(ys)[2] } } if(HandlingSuppData =="Correct"){ sigma2_hat = sigma2_hat_grid * dim(CGGP$design)[1]/(dim(Xs)[1] +dim(CGGP$design)[1]) + sigma2_hat_supp*dim(Xs)[1]/(dim(Xs)[1]+dim(CGGP$design)[1]) dsigma2_hat = dsigma2_hat_grid * dim(CGGP$design)[1]/(dim(Xs)[1]+dim(CGGP$design)[1]) + dsigma2_hat_supp*dim(Xs)[1]/(dim(Xs)[1]+dim(CGGP$design)[1]) dlDet = dlDet_grid+dlDet_supp lDet = lDet_grid+lDet_supp if(!is.matrix(y)){ nsamples = length(y)+length(ys) }else{ nsamples = dim(y)[1]+dim(ys)[1] ndim = dim(y)[2] } } neglogpost = 0.1*sum((log(1-theta)-log(theta+1))^2) #start out with prior gneglogpost = -0.2*(log(1-theta)-log(theta+1))*((1/(1-theta))+1/(1+theta)) neglogpost = neglogpost+0.1*sum((log(1-theta)-log(theta+1))^4) #start out with prior gneglogpost = gneglogpost-0.1*4*((log(1-theta)-log(theta+1))^3)*((1/(1-theta))+1/(1+theta)) output_is_1D <- if (!is.null(y)) {!is.matrix(y)} else {!is.matrix(ys)} if(output_is_1D){ neglogpost =neglogpost +1/2*(nsamples*log(sigma2_hat[1])+lDet) gneglogpost = gneglogpost+1/2*(dlDet+nsamples*dsigma2_hat / sigma2_hat[1]) }else{ neglogpost = neglogpost+1/2*(nsamples*mean(log(c(sigma2_hat)))+lDet) gneglogpost = gneglogpost+1/2*dlDet for(i in 1:ndim){ gneglogpost = gneglogpost+1/2*1/ndim*nsamples*dsigma2_hat[,i]/sigma2_hat[i] } } if(return_lik){ return(list(neglogpost=neglogpost,gneglogpost=gneglogpost)) } else { return(gneglogpost) } }
/scratch/gouwar.j/cran-all/cranData/CGGP/R/CGGP_gneglogpost.R
CGGP_internal_imputesomegrid <- function(CGGP,y,theta,...,ystart = NULL) { Q = max(CGGP$uo[1:CGGP$uoCOUNT,]) # Max value of all blocks if(!is.matrix(y)){ numoutputs = 1 yimputed <- y }else{ numoutputs = dim(y)[2] yimputed <- y } for(oplcv in 1:numoutputs){ if(!is.matrix(y)){ y.thisloop = y }else{ y.thisloop = as.vector(y[,oplcv]) } if(any(is.na(y.thisloop))){ Is = sort(which(is.na(y.thisloop))) if(!is.matrix(y)){ thetaMAP.thisloop = theta cholS = list(matrix(1,1,1),Q*CGGP$d) # To store choleskys # Loop over each dimension for (dimlcv in 1:CGGP$d) { # Loop over each possible needed correlation matrix for (levellcv in 1:max(CGGP$uo[1:CGGP$uoCOUNT,dimlcv])) { Xbrn = CGGP$xb[1:CGGP$sizest[levellcv]] Xbrn = Xbrn[order(Xbrn)] Sstuff = CGGP$CorrMat(Xbrn, Xbrn , thetaMAP.thisloop[(dimlcv-1)*CGGP$numpara+1:CGGP$numpara],return_dCdtheta = FALSE) S = Sstuff # When theta is large (> about 5), the matrix is essentially all 1's, can't be inverted solvetry <- try({ cS = chol(S) cholS[[(dimlcv-1)*Q+levellcv]]= cS+t(cS)-diag(diag(cS)) #store the symmetric version for C code }, silent = TRUE) } } cholS.thisloop =cholS }else{ if(is.matrix(theta)){ thetaMAP.thisloop = theta[,oplcv] cholS = list(matrix(1,1,1),Q*CGGP$d) # To store choleskys # Loop over each dimension for (dimlcv in 1:CGGP$d) { # Loop over each possible needed correlation matrix for (levellcv in 1:max(CGGP$uo[1:CGGP$uoCOUNT,dimlcv])) { Xbrn = CGGP$xb[1:CGGP$sizest[levellcv]] Xbrn = Xbrn[order(Xbrn)] Sstuff = CGGP$CorrMat(Xbrn, Xbrn , thetaMAP.thisloop[(dimlcv-1)*CGGP$numpara+1:CGGP$numpara],return_dCdtheta = FALSE) S = Sstuff # When theta is large (> about 5), the matrix is essentially all 1's, can't be inverted solvetry <- try({ cS = chol(S) cholS[[(dimlcv-1)*Q+levellcv]]= cS+t(cS)-diag(diag(cS)) #store the symmetric version for C code }, silent = TRUE) } } cholS.thisloop =cholS }else{ thetaMAP.thisloop = theta if(oplcv <1.5){ cholS = list(matrix(1,1,1),Q*CGGP$d) # To store choleskys # Loop over each dimension for (dimlcv in 1:CGGP$d) { # Loop over each possible needed correlation matrix for (levellcv in 1:max(CGGP$uo[1:CGGP$uoCOUNT,dimlcv])) { Xbrn = CGGP$xb[1:CGGP$sizest[levellcv]] Xbrn = Xbrn[order(Xbrn)] Sstuff = CGGP$CorrMat(Xbrn, Xbrn , thetaMAP.thisloop[(dimlcv-1)*CGGP$numpara+1:CGGP$numpara],return_dCdtheta = FALSE) S = Sstuff # When theta is large (> about 5), the matrix is essentially all 1's, can't be inverted solvetry <- try({ cS = chol(S) cholS[[(dimlcv-1)*Q+levellcv]]= cS+t(cS)-diag(diag(cS)) #store the symmetric version for C code }, silent = TRUE) } } cholS.thisloop =cholS } } } xp = as.matrix(CGGP$design[Is,]) if(dim(xp)[2]<CGGP$d){ xp = t(xp) } n2pred = dim(xp)[1] warmstart = 1 if(is.null(ystart)){ warmstart = 0 }else{ if(!is.matrix(y)){ if(is.matrix(ystart)){ warmstart = 0 }else{ if(length(ystart)!=length(y)){ warmstart =0 }else{ warmstart = 1 yn0 = ystart yhat0 = ystart[Is] w = rep(1,n2pred) } } }else{ if(!is.matrix(ystart)){ warmstart = 0 }else{ if(dim(ystart)[1] !=dim(y)[1] || dim(ystart)[2] !=dim(y)[2] ){ warmstart =0 }else{ warmstart = 1 yn0 = ystart[,oplcv] yhat0 = ystart[Is,oplcv] w = rep(1,n2pred) } } } } if(warmstart<0.5){ brokenblocks = unique(CGGP$blockassign[Is]) possblocks = 1:max(CGGP$uoCOUNT,20) for (lcv in 1:n2pred){ Bs = CGGP$blockassign[Is[lcv]] possblocks = unique(c(possblocks,CGGP$uala[Bs,1:sum(CGGP$uo[Bs,]>1.5)])) } possblocks = sort(possblocks) keepthisone = rep(1,length(possblocks)) for (lcv in 1:length(possblocks)){ if (any(Is %in% CGGP$dit[possblocks[lcv], 1:CGGP$gridsizet[possblocks[lcv]]])){ keepthisone[lcv] = 0; } } possblocks = possblocks[which(keepthisone>0.5)] # Cp is sigma(x_0) in paper, correlation vector between design points and xp Cp = matrix(0,n2pred,CGGP$ss) GGGG = list(matrix(1,n2pred,length(CGGP$xb)),CGGP$d) for (dimlcv in 1:CGGP$d) { # Loop over dimensions V = CGGP$CorrMat(xp[,dimlcv], CGGP$xb[1:CGGP$sizest[max(CGGP$uo[,dimlcv])]], thetaMAP.thisloop[(dimlcv-1)*CGGP$numpara+1:CGGP$numpara], returnlogs=TRUE) GGGG[[dimlcv]] = exp(V) Cp = Cp+V[,CGGP$designindex[,dimlcv]] } Cp = exp(Cp) ME_t = matrix(1,n2pred,1) MSE_v = list(matrix(0,n2pred,2),(CGGP$d+1)*(CGGP$maxlevel+1)) Q = max(CGGP$uo[1:CGGP$uoCOUNT,]) for (dimlcv in 1:CGGP$d) { for (levellcv in 1:max(CGGP$uo[1:CGGP$uoCOUNT,dimlcv])) { gg = (dimlcv-1)*Q INDSN = 1:CGGP$sizest[levellcv] INDSN = INDSN[sort(CGGP$xb[1:CGGP$sizest[levellcv]], index.return = TRUE)$ix] MSE_v[[(dimlcv)*CGGP$maxlevel+levellcv]] = CGGP_internal_postvarmatcalc_fromGMat(GGGG[[dimlcv]], c(), as.matrix( cholS.thisloop[[gg+levellcv]] ), c(), INDSN, CGGP$numpara, returndiag=TRUE) } } ME_t = matrix(1,n2pred,length(possblocks)) for (blocklcv in 1:length(possblocks)) { ME_s = matrix(1,n2pred,1) for (dimlcv in 1:CGGP$d) { levelnow = CGGP$uo[possblocks[blocklcv],dimlcv] ME_s = ME_s*as.matrix(MSE_v[[(dimlcv)*CGGP$maxlevel+levelnow]]) } ME_t[,blocklcv] = as.vector(ME_s) } Jstar = possblocks[apply(ME_t, 1, which.max)] w = rep(0,n2pred) w = 1-apply(ME_t, 1, max) #find x0 yn0 = y.thisloop yhat0 = rep(0,n2pred) gg = (1:CGGP$d-1)*Q for(lcv in 1:length(Jstar)){ Q = max(CGGP$uo[1:CGGP$uoCOUNT,]) # Max value of all blocks IS = CGGP$dit[Jstar[lcv], 1:CGGP$gridsizet[Jstar[lcv]]]; B = y.thisloop[IS] rcpp_kronDBS(unlist(cholS.thisloop[gg+CGGP$uo[Jstar[lcv],]]), B, CGGP$gridsizest[Jstar[lcv],]) yhat0[lcv] = Cp[lcv,IS]%*%B yn0[Is[lcv]] = yhat0[lcv] } } gg = (1:CGGP$d-1)*Q #find g at x0 pwforg = rep(0,length(y.thisloop)) for (blocklcv in 1:CGGP$uoCOUNT) { IS = CGGP$dit[blocklcv, 1:CGGP$gridsizet[blocklcv]] if(any(Is %in% IS) ){ B = yn0[IS] rcpp_kronDBS(unlist(cholS.thisloop[gg+CGGP$uo[blocklcv,]]), B, CGGP$gridsizest[blocklcv,]) pwforg[IS] = pwforg[IS]+CGGP$w[blocklcv] * B } } pw0 = pwforg pwforg = pwforg[Is] dir0 = pwforg wgst1 = rep(0,length(y.thisloop)) wgst2 = rep(0,length(y.thisloop)) wgst1[Is] = w*dir0 dotheseblocks = rep(0,CGGP$uoCOUNT) for (blocklcv in 1:CGGP$uoCOUNT) { IS = CGGP$dit[blocklcv, 1:CGGP$gridsizet[blocklcv]] if(any(Is %in% IS) ){ B = wgst1[IS] rcpp_kronDBS(unlist(cholS.thisloop[gg+CGGP$uo[blocklcv,]]), B, CGGP$gridsizest[blocklcv,]) wgst2[IS] = wgst2[IS]+CGGP$w[blocklcv] * B } } lambdas = pmax(pmin(sum(wgst2*yn0)/sum(wgst1[Is]*wgst2[Is]),1.1),-0.1) yhat1 = yhat0-lambdas*w*dir0; yn1 = yn0 yn1[Is] = yhat1 pwforg = rep(0,length(y.thisloop)) for (blocklcv in 1:CGGP$uoCOUNT) { IS = CGGP$dit[blocklcv, 1:CGGP$gridsizet[blocklcv]] if(any(Is %in% IS) ){ B = yn1[IS] rcpp_kronDBS(unlist(cholS.thisloop[gg+CGGP$uo[blocklcv,]]), B, CGGP$gridsizest[blocklcv,]) pwforg[IS] = pwforg[IS]+CGGP$w[blocklcv] * B } } pw1 = pwforg pwforg = pwforg[Is] dir1 = pwforg M = 20 s = matrix(0,length(dir1),M) ny = matrix(0,length(dir1),M) dirsave = matrix(0,length(dir1),M) xsave = matrix(0,length(dir1),M) rho = rep(0,M) gamma = rep(0,M) #x0 = yhat0, x1 = yhat1 #g(x0)= dir0, x1 = dir1 lcv = 1 s[,lcv] = yhat1-yhat0 ny[,lcv] = dir1-dir0 xsave[,lcv] = yhat1 dirsave[,lcv] = dir1 L = rep(0,400) for(lcv in 1:400){ q = dir1 if(lcv > 1.5){ for(k in 2:min(lcv,M)){#q(min(lcv,M),1,by=-1) rho[k] = 1/sum(s[,k]*ny[,k]) gamma[k] = rho[k]*sum(s[,k]*q) q = q-gamma[k]*ny[,k] } } r = q*mean(s[,1]*ny[,1])/mean(ny[,1]*ny[,1]) if(lcv > 1.5){ for(k in min(lcv,M):2){#q(min(lcv,M),1,by=-1) beta = rho[k]*sum(ny[,k]*r) r =r+(gamma[k]-beta)*s[,k] } } dir1u = -r wgst1 = rep(0,length(y.thisloop)) wgst2 = rep(0,length(y.thisloop)) wgst1[Is] = dir1u dotheseblocks = rep(0,CGGP$uoCOUNT) for (blocklcv in 1:CGGP$uoCOUNT) { IS = CGGP$dit[blocklcv, 1:CGGP$gridsizet[blocklcv]] if(any(Is %in% IS) ){ B = wgst1[IS] rcpp_kronDBS(unlist(cholS.thisloop[gg+CGGP$uo[blocklcv,]]), B, CGGP$gridsizest[blocklcv,]) wgst2[IS] = wgst2[IS]+CGGP$w[blocklcv] * B } } lambdas =-sum(wgst2*yn1)/sum(wgst1[Is]*wgst2[Is]) for(k in (min(lcv,M-1)):1){ s[,k+1] = s[,k] ny[,k+1] = ny[,k] xsave[,k+1] = xsave[,k] dirsave[,k+1] = dirsave[,k] } yhat2 = yhat1+lambdas*dir1u yn2 = yn1 yn2[Is] = yhat2 pwforg = rep(0,length(y.thisloop)) for (blocklcv in 1:CGGP$uoCOUNT) { IS = CGGP$dit[blocklcv, 1:CGGP$gridsizet[blocklcv]] if(any(Is %in% IS) ){ B = yn2[IS] rcpp_kronDBS(unlist(cholS.thisloop[gg+CGGP$uo[blocklcv,]]), B, CGGP$gridsizest[blocklcv,]) pwforg[IS] = pwforg[IS]+CGGP$w[blocklcv] * B } } pw2 = pwforg pwforg = pwforg[Is] dir2 = pwforg s[,1] = yhat2-xsave[,2] ny[,1] = dir2-dirsave[,2] xsave[,1] = yhat2 dirsave[,1] = dir2 if(any(is.na(yn2))){ break } dir1 = dir2 yhat1 = yhat2 yn1 = yn2 pw1 = pw2 L[lcv] = sum( pw2*yn2) if(lcv > (M+1)){ if(max(abs(L[lcv:(lcv-3)]-L[(lcv-1):(lcv-4)])) < 10^(-3)*L[lcv]){ break } } if(lcv > 10 && lcv <= M+1){ if(max(abs(L[lcv:(lcv-3)]-L[(lcv-1):(lcv-4)])) < 10^(-3)*L[lcv]){ break } } } if(!is.matrix(y)){ yimputed <- yn1 }else{ yimputed[,oplcv] <- yn1 } } } return(yimputed) }
/scratch/gouwar.j/cran-all/cranData/CGGP/R/CGGP_impute_fs.R