content
stringlengths
0
14.9M
filename
stringlengths
44
136
## Partition additive genetic values res <- AlphaPart(x=AlphaPart.ped, colPath="country", colBV=c("bv1", "bv2")) ## Summarize population by generation (=trend) ret <- summary(res, by="gen") ## Plot the partitions p <- plot(ret, ylab=c("BV for trait 1", "BV for trait 2"), xlab="Generation") ## Save the plots tmp <- savePlot(x = p, filename="test", type="png") ## Remove the files unlink(tmp)
/scratch/gouwar.j/cran-all/cranData/AlphaPart/inst/examples/examples_savePlot.plotsummaryAlphaPart.R
## --- Partition additive genetic values by loc --- res <- AlphaPart(x=AlphaPart.ped, colPath="country", colBV=c("bv1", "bv2")) ## Summarize whole population ret <- summary(res) ## Summarize population by generation (=trend) ret <- summary(res, by="gen") ## Summarize population by generation (=trend) but only for domestic location ret <- summary(res, by="gen", subset=res[[1]]$country == "domestic") ## --- Partition additive genetic values by loc and gender --- AlphaPart.ped$country.gender <- with(AlphaPart.ped, paste(country, gender, sep="-")) res <- AlphaPart(x=AlphaPart.ped, colPath="country.gender", colBV=c("bv1", "bv2")) ## Summarize population by generation (=trend) ret <- summary(res, by="gen") ## Summarize population by generation (=trend) but only for domestic location ret <- summary(res, by="gen", subset=res[[1]]$country == "domestic")
/scratch/gouwar.j/cran-all/cranData/AlphaPart/inst/examples/examples_summary.AlphaPart.R
## Partition additive genetic values res <- AlphaPart(x=AlphaPart.ped, colPath="country", colBV=c("bv1", "bv2")) ## Write summary on the disk and collect saved file names fileName <- file.path(tempdir(), "AlphaPart") ret <- write.csv(x=res, file=fileName) print(ret) file.show(ret[1]) ## Clean up files <- dir(path=tempdir(), pattern="AlphaPart*") unlink(x=files)
/scratch/gouwar.j/cran-all/cranData/AlphaPart/inst/examples/examples_write.csv.R
--- title: "Partitioning genetic trends in mean and variance" author: "Gregor Gorjanc, Jana Obsteter, Thiago de Paula Oliveira" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{PartitioningVariance} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE, fig.height = 6, fig.width = 6) ``` In breeding programmes, the observed genetic change is a sum of the contributions of different groups of individuals. Here we show how to partition the genetic mean and variance of breeding values using AlphaPart. In addition to the contribution of paths to changes in genetic mean, breeding programmes should also consider analysing changes in genetic variance to understand the drivers of genetic change in a population fully. Managing the change in genetic mean and variance in breeding programmes is essential to ensure long-term genetic gain. # Loading packages ```{r, message=FALSE} #======================================================================= # Packges #======================================================================= #devtools::install_github("AlphaGenes/AlphaPart") library(AlphaPart) library(dplyr) library(ggplot2) library(ggridges) ``` # Loading datafile ```{r} #======================================================================= # Reading and organizing Scenario 1 #======================================================================= data <- readRDS("./../inst/extdata/AlphaPartCattleSim.rds") %>% dplyr::mutate(across(generation:mother, as.numeric)) %>% dplyr::rename(status = type) %>% dplyr::mutate(across(c("sex", "status"), as.factor)) %>% dplyr::mutate(path = interaction(sex,status, sep = ":")) %>% arrange(generation, ind) %>% select(ind, father, mother, sex, status, path, generation, tbv, pheno) %>% dplyr::mutate(generation = generation - 20) %>% droplevels() # Data head head(data) %>% knitr::kable(digits = 2) # Data size dim(data) ``` * ```ind``` - individual * ```father``` and ```mother``` - individual's parents * ```sex``` - individual sex * ```status``` - if the individual is or not selected * ```path``` - the path variable used to partition the additive genetic mean * ```tbv``` - true breeding value * ```pheno``` - phenotypic value # Partitioning trends in genetic mean and variance We use the `AlphaPart` function to partition the true breeding values `(tbv)` in the `data` by the animal sex and status variable combination into females (F) and males (M) non-selected (N) and males selected (S) contributions: ```{r} part <- AlphaPart(data, colId = "ind", colFid = "father", colMid = "mother", colBV = "tbv", colPath = "path") head(part$tbv) %>% knitr::kable(digits = 2) ``` We use the generic `summary.AlphaPart` function to summarize an `AlphaPart` object by generation, con*sering: * the function **mean** ```{r} # Trends in the additve genetic mean partMean <- summary(part, by = "generation", FUN = mean) head(partMean$tbv) %>% knitr::kable(digits = 2) ``` * the function **variance** ```{r} # Trends in the additive genetic variance partVar <- summary(part, by = "generation", FUN = var, cov = TRUE) head(partVar$tbv) %>% knitr::kable(digits = 2) ``` ## Example of plots to analyse the results Distribution of breeding value partitions by sex and selection status (selected males (M(S)), non-selected males (M(N)), and females (F)) over generations. ```{r, fig.height = 10, fig.width=9} part$tbv %>% ggplot(aes(y = as.factor(generation), `tbv_F:Non-Selected`)) + geom_density_ridges( aes(fill = "F - Non-Selected", linetype = "F - Non-Selected"), alpha = .4, point_alpha = 1, rel_min_height = 0.01 ) + geom_density_ridges( aes(y = as.factor(generation), x= `tbv_M:Non-Selected`, fill = "M - Non-Selected", linetype = "M - Non-Selected"), alpha = .4, point_alpha = 1, rel_min_height = 0.01 ) + geom_density_ridges( aes(y = as.factor(generation), x= `tbv_M:Selected`, fill = "M - Selected", linetype = "M - Selected"), alpha = .4, point_alpha = 1, rel_min_height = 0.01 ) + geom_density_ridges( aes(y = as.factor(generation), x= `tbv`, fill = "Sum", linetype = "Sum"), alpha = .4, point_alpha = 1, rel_min_height = 0.01 ) + ylab("Generation") + xlab("Density plot of breeding value partitions") + labs(fill = "Path:", linetype = "Path:") + theme_bw(base_size = 20) + theme( legend.position = "top" ) ``` Partitions of genetic mean and variance by sex and selection status (selected males (M(S)), non-selected males (M(N)), and females (F)) using true breeding values: ```{r} partMean$tbv %>% ggplot(aes(y = Sum, x = generation, colour = "Sum"), size = 0.1) + scale_linetype_manual( values = c("solid", "longdash", "dashed", "dotted"))+ geom_line() + geom_line(aes(y = `F:Non-Selected`, x = generation, colour = "F"), alpha = 0.8) + geom_line(aes(y = `M:Selected`, x = generation, colour = "M(S)"), alpha = 0.8) + geom_line(aes(y = `M:Non-Selected`, x = generation, colour = "M(N)"), alpha = 0.8) + geom_vline(xintercept = 0, linetype = 2, alpha = 0.3) + ylab("Genetic Mean") + xlab("Generation") + labs(colour = "Path:") + theme_bw(base_size = 18) + theme(legend.position = "top") ``` ```{r} partVar$tbv %>% ggplot(aes(y = Sum, x = generation, colour = "Sum")) + geom_line() + geom_line(aes(y = `F:Non-Selected`, x = generation, colour = "F"), alpha = 0.8) + geom_line(aes(y = `F:Non-SelectedM:Selected`, x = generation, colour = "F:M(S)"), size =0.5, alpha =0.8) + geom_line(aes(y = `F:Non-SelectedM:Non-Selected`, x = generation, colour = "F:M(N)"), size =0.5, alpha =0.6) + geom_line(aes(y = `M:Non-SelectedM:Selected`, x = generation, colour = "M(N):M(S)"), size =0.5, alpha =0.6) + geom_line(aes(y = `M:Selected`, x = generation, colour = "M(S)"), alpha = 0.8) + geom_line(aes(y = `M:Non-Selected`, x = generation, colour = "M(N)"), size =0.5, alpha =0.8) + geom_vline(xintercept = 0, linetype = 2, alpha = 0.3) + ylab("Genetic Variance") + xlab("Generation") + labs(colour = "Path: ") + theme_bw(base_size = 18) + theme( legend.position = "top" ) ```
/scratch/gouwar.j/cran-all/cranData/AlphaPart/vignettes/alphapart-variance.Rmd
--- title: "AlphaPart - R implementation of the method for partitioning genetic trends" description: > Learn how to get started with the basics of AlphaPart. author: "Gregor Gorjanc, Jana Obsteter, Thiago de Paula Oliveira" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{AlphaPart} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ## AlphaPart AlphaPart is an R package available from CRAN repository at https://CRAN.R-project.org/package=AlphaPart. It consists of the main function AlphaPart for partitioning breeding values and auxiliary functions for manipulating data and summarizing, visualizing, and saving results. The package includes an example dataset AlphaPart.ped, which includes a four-generation pedigree and information about the generation, country, gender, and breeding values. Below we describe and demonstrate the functions with the dataset. We install and load the package with: install.packages(pkg = “AlphaPart”) library(package = "AlphaPart") We use the `AlphaPart` function to partition breeding values `(bv1)` in the `AlphaPart.ped` by the country variable into domestic and import contributions: data(AlphaPart.ped) part <- AlphaPart(x = AlphaPart.ped, colPath = “country”, colBV = “bv1”) The partitioning function `AlphaPart` requires a data frame holding pedigree with animal/sire/dam or animal/sire/maternal-grandsire, a time-ordering variable such as year of birth, partition variable (path), and breeding values. Following the method described in García-Cortés et al. (2008), we recurse the pedigree from the oldest to the youngest individuals, for each individual calculate parent average and Mendelian sampling terms for any number of traits and assign terms to paths. We partition multiple traits by specifying a vector of variables, say colBV = c(“bv1”, “bv2”). The multiple trait option can also serve to partition samples from a posterior distribution to quantify uncertainty (Sorensen et al., 1994; Škorput et al., 2015). To speed-up calculations we use C++ and trait-vectorised partitioning. The function can also directly partition and summarize path contributions “on-the-fly”, which is a useful computational speed-up for huge pedigrees. The output object of the function is either `AlphaPart` or `summaryAlphaPart` class. We use the generic `summary.AlphaPart` function to summarize an `AlphaPart` object by a grouping variable, say generation `(gen)`: sumPartByGen <- summary(part, by = “gen”) print(sumPartByGen) The `summary` function summarizes breeding values and their path partitions by levels of grouping variable. By default, we summarize with a mean, but the user can specify any R function via the FUN argument. The summary function can also summarize only a subset of the object via the subset argument. We use the generic `plot.summaryAlphaPart` function to plot summarized partitions: plot(sumPartByGen) We provide a number of utility functions that ease partitioning analysis. With the `pedFixBirthYear` function we impute missing or fix erroneous years of birth. With the `pedSetBase` function we set the base population by specifying founders and removing older pedigree records. With the `AlphaPartSubset` function we keep partitions for specified paths in the `AlphaPart` or `summaryAlphaPart` objects. With the `AlphaPartSum` function we sum the partitions of several paths in a `summaryAlphaPart` object. The `AlphaPartSubset` and `AlphaPartSum` functions simplify the presentation of partitioning analysis.
/scratch/gouwar.j/cran-all/cranData/AlphaPart/vignettes/alphapart-vignette.Rmd
#' @useDynLib AlphaSimR, .registration = TRUE #' @import Rcpp #' @importFrom methods new validObject is .hasSlot #' @importFrom methods show classLabel #' @importFrom stats aggregate rnorm qnorm var #' @importFrom stats coef dnorm lm pnorm qgamma na.omit #' @importFrom stats model.matrix rbinom runif cov2cor #' @importFrom utils combn read.table write.table packageDescription #' @importFrom R6 R6Class #' @importFrom Rdpack reprompt #' @description #' The successor to the 'AlphaSim' software for breeding program #' simulation [Faux et al. (2016) <doi:10.3835/plantgenome2016.02.0013>]. #' Used for stochastic simulations of breeding programs to the level of DNA #' sequence for every individual. Contained is a wide range of functions for #' modeling common tasks in a breeding program, such as selection and crossing. #' These functions allow for constructing simulations of highly complex plant and #' animal breeding programs via scripting in the R software environment. Such #' simulations can be used to evaluate overall breeding program performance and #' conduct research into breeding program design, such as implementation of #' genomic selection. Included is the 'Markovian Coalescent Simulator' ('MaCS') #' for fast simulation of biallelic sequences according to a population #' demographic history [Chen et al. (2009) <doi:10.1101/gr.083634.108>]. #' #' Please see the introductory vignette for instructions for using this package. #' The vignette can be viewed using the following command: #' \code{vignette("intro",package="AlphaSimR")} #' @keywords internal "_PACKAGE"
/scratch/gouwar.j/cran-all/cranData/AlphaSimR/R/AlphaSimR.R
#HybridPop---- #' @title Hybrid population #' #' @description #' A lightweight version of \code{\link{Pop-class}} for hybrid lines. #' Memory is saved by not storing genotypic data. #' #' @param x a 'HybridPop' #' @param i index of individuals #' @param ... additional 'HybridPop' objects #' #' @slot nInd number of individuals #' @slot id an individual's identifier #' @slot mother the identifier of the individual's mother #' @slot father the identifier of the individual's father #' @slot nTraits number of traits #' @slot gv matrix of genetic values. When using GxE traits, #' gv reflects gv when p=0.5. Dimensions are nInd by nTraits. #' @slot pheno matrix of phenotypic values. Dimensions are #' nInd by nTraits. #' @slot gxe list containing GxE slopes for GxE traits #' #' @export setClass("HybridPop", slots=c(nInd="integer", id="character", mother="character", father="character", nTraits="integer", gv="matrix", pheno="matrix", gxe="list")) setValidity("HybridPop",function(object){ errors = character() if(any(grepl(" ",object@id,fixed=TRUE))){ errors = c(errors,"id can not contain spaces") } if(any(grepl(" ",object@mother,fixed=TRUE))){ errors = c(errors,"mother can not contain spaces") } if(any(grepl(" ",object@father,fixed=TRUE))){ errors = c(errors,"father can not contain spaces") } if(object@nInd!=length(object@id)){ errors = c(errors,"nInd!=length(id)") } if(object@nInd!=length(object@mother)){ errors = c(errors,"nInd!=length(mother)") } if(object@nInd!=length(object@father)){ errors = c(errors,"nInd!=length(father)") } if(object@nInd!=nrow(object@gv)){ errors = c(errors,"nInd!=nrow(gv)") } if(object@nInd!=nrow(object@pheno)){ errors = c(errors,"nInd!=nrow(pheno)") } if(object@nTraits!=ncol(object@gv)){ errors = c(errors,"nTraits!=ncol(gv)") } if(object@nTraits!=ncol(object@pheno)){ errors = c(errors,"nTraits!=ncol(pheno)") } if(object@nTraits!=length(object@gxe)){ errors = c(errors,"nTraits!=length(gxe)") } if(length(errors)==0){ return(TRUE) }else{ return(errors) } }) #' @describeIn HybridPop Extract HybridPop using index or id setMethod("[", signature(x = "HybridPop"), function(x, i){ if(is.character(i)){ i = x@id%in%i } x@id = x@id[i] x@mother = x@mother[i] x@father = x@father[i] x@gv = x@gv[i,,drop=FALSE] x@pheno = x@pheno[i,,drop=FALSE] x@nInd = length(x@id) if(x@nTraits>=1){ for(trait in 1:x@nTraits){ if(!is.null(x@gxe[[trait]])){ x@gxe[[trait]] = x@gxe[[trait]][i] } } } return(x) } ) #' @describeIn HybridPop Combine multiple HybridPops setMethod("c", signature(x = "HybridPop"), function (x, ...){ for(y in list(...)){ stopifnot(class(y)=="HybridPop") x@nInd = x@nInd+y@nInd x@id = c(x@id,y@id) x@mother = c(x@mother,y@mother) x@father = c(x@father,y@father) x@gv = rbind(x@gv,y@gv) x@pheno = rbind(x@pheno,y@pheno) if(x@nTraits>=1){ for(trait in 1:x@nTraits){ if(!is.null(x@gxe[[trait]])){ x@gxe[[trait]] = c(x@gxe[[trait]],y@gxe[[trait]]) } } } } return(x) } ) #' @describeIn HybridPop Test if object is of a HybridPop class #' @export isHybridPop = function(x) { ret = is(x, class2 = "HybridPop") return(ret) }
/scratch/gouwar.j/cran-all/cranData/AlphaSimR/R/Class-HybridPop.R
# The LociMap superclass contains SNP/QTL locations # Trait classes add QTL effects #LociMap---- #' @title Loci metadata #' #' @description used for both SNPs and QTLs #' #' @slot nLoci total number of loci #' @slot lociPerChr number of loci per chromosome #' @slot lociLoc physical position of loci #' @slot name optional name for LociMap object #' #' @export setClass("LociMap", slots=c(nLoci="integer", lociPerChr="integer", lociLoc="integer", name="character"), prototype=list(name=NA_character_)) setValidity("LociMap",function(object){ errors = character() if(object@nLoci!=sum(object@lociPerChr)){ errors = c(errors,"nLoci!=sum(lociPerChr)") } if(object@nLoci!=length(object@lociLoc)){ errors = c(errors,"nLoci!=length(lociLoc)") } if(length(errors)==0){ return(TRUE) }else{ return(errors) } }) # Test if object is of LociMap class isLociMap = function(x) { ret = is(x, class2 = "LociMap") return(ret) } #TraitA---- #' @title Additive trait #' #' @description Extends \code{\link{LociMap-class}} #' to model additive traits #' #' @slot addEff additive effects #' @slot intercept adjustment factor for gv #' #' @export setClass("TraitA", slots=c(addEff="numeric", intercept="numeric"), contains="LociMap") setValidity("TraitA",function(object){ errors = character() if(object@nLoci!=length(object@addEff)){ errors = c(errors,"nLoci!=length(addEff)") } if(length(errors)==0){ return(TRUE) }else{ return(errors) } }) # Test if object is of a TraitA class isTraitA = function(x) { ret = is(x, class2 = "TraitA") return(ret) } #TraitA2---- #' @title Sex specific additive trait #' #' @description Extends \code{\link{TraitA-class}} #' to model seperate additive effects for parent of #' origin. Used exclusively for genomic selection. #' #' @slot addEffMale additive effects #' #' @export setClass("TraitA2", slots=c(addEffMale="numeric"), contains="TraitA") setValidity("TraitA2",function(object){ errors = character() if(object@nLoci!=length(object@addEffMale)){ errors = c(errors,"nLoci!=length(addEffMale)") } if(length(errors)==0){ return(TRUE) }else{ return(errors) } }) # Test if object is of a TraitA2 class isTraitA2 = function(x) { ret = is(x, class2 = "TraitA2") return(ret) } #TraitAE---- #' @title Additive and epistatic trait #' #' @description Extends \code{\link{TraitA-class}} #' to add epistasis #' #' @slot epiEff epistatic effects #' #' @export setClass("TraitAE", slots=c(epiEff="matrix"), contains="TraitA") setValidity("TraitAE",function(object){ errors = character() if(object@nLoci!=(2*nrow(object@epiEff))){ errors = c(errors,"nLoci!=2*nrow(epiEff)") } if(ncol(object@epiEff)!=3){ errors = c(errors,"ncol(epiEff)!=3") } if(length(errors)==0){ return(TRUE) }else{ return(errors) } }) # Test if object is of a TraitAE class isTraitAE = function(x) { ret = is(x, class2 = "TraitAE") return(ret) } #TraitAD---- #' @title Additive and dominance trait #' #' @description Extends \code{\link{TraitA-class}} #' to add dominance #' #' @slot domEff dominance effects #' #' @export setClass("TraitAD", slots=c(domEff="numeric"), contains="TraitA") setValidity("TraitAD",function(object){ errors = character() if(object@nLoci!=length(object@domEff)){ errors = c(errors,"nLoci!=length(domEff)") } if(length(errors)==0){ return(TRUE) }else{ return(errors) } }) # Test if object is of a TraitAD class isTraitAD = function(x) { ret = is(x, class2 = "TraitAD") return(ret) } #TraitA2D---- #' @title Sex specific additive and dominance trait #' #' @description Extends \code{\link{TraitA2-class}} #' to add dominance #' #' @slot domEff dominance effects #' #' @export setClass("TraitA2D", slots=c(domEff="numeric"), contains="TraitA2") setValidity("TraitA2D",function(object){ errors = character() if(object@nLoci!=length(object@domEff)){ errors = c(errors,"nLoci!=length(domEff)") } if(length(errors)==0){ return(TRUE) }else{ return(errors) } }) # Test if object is of a TraitA2D class isTraitA2D = function(x) { ret = is(x, class2 = "TraitA2D") return(ret) } #TraitADE---- #' @title Additive, dominance, and epistatic trait #' #' @description Extends \code{\link{TraitAD-class}} #' to add epistasis #' #' @slot epiEff epistatic effects #' #' @export setClass("TraitADE", slots=c(epiEff="matrix"), contains="TraitAD") setValidity("TraitADE",function(object){ errors = character() if(object@nLoci!=(2*nrow(object@epiEff))){ errors = c(errors,"nLoci!=2*nrow(epiEff)") } if(ncol(object@epiEff)!=3){ errors = c(errors,"ncol(epiEff)!=3") } if(length(errors)==0){ return(TRUE) }else{ return(errors) } }) # Test if object is of a TraitADE class isTraitADE = function(x) { ret = is(x, class2 = "TraitADE") return(ret) } #TraitAG---- #' @title Additive and GxE trait #' #' @description Extends \code{\link{TraitA-class}} #' to add GxE effects #' #' @slot gxeEff GxE effects #' @slot gxeInt GxE intercept #' @slot envVar Environmental variance #' #' @export setClass("TraitAG", slots=c(gxeEff="numeric", gxeInt="numeric", envVar="numeric"), contains="TraitA") setValidity("TraitAG",function(object){ errors = character() if(object@nLoci!=length(object@gxeEff)){ errors = c(errors,"nLoci!=length(gxeEff)") } if(length(errors)==0){ return(TRUE) }else{ return(errors) } }) # Test if object is of a TraitAG class isTraitAG = function(x) { ret = is(x, class2 = "TraitAG") return(ret) } #TraitAEG---- #' @title Additive, epistasis and GxE trait #' #' @description Extends \code{\link{TraitAE-class}} #' to add GxE effects #' #' @slot gxeEff GxE effects #' @slot gxeInt GxE intercept #' @slot envVar Environmental variance #' #' @export setClass("TraitAEG", slots=c(gxeEff="numeric", gxeInt="numeric", envVar="numeric"), contains="TraitAE") setValidity("TraitAEG",function(object){ errors = character() if(object@nLoci!=length(object@gxeEff)){ errors = c(errors,"nLoci!=length(gxeEff)") } if(length(errors)==0){ return(TRUE) }else{ return(errors) } }) # Test if object is of a TraitAEG class isTraitAEG = function(x) { ret = is(x, class2 = "TraitAEG") return(ret) } #TraitADG---- #' @title Additive, dominance and GxE trait #' #' @description Extends \code{\link{TraitAD-class}} #' to add GxE effects #' #' @slot gxeEff GxE effects #' @slot gxeInt GxE intercept #' @slot envVar Environmental variance #' #' @export setClass("TraitADG", slots=c(gxeEff="numeric", gxeInt="numeric", envVar="numeric"), contains="TraitAD") setValidity("TraitADG",function(object){ errors = character() if(object@nLoci!=length(object@gxeEff)){ errors = c(errors,"nLoci!=length(gxeEff)") } if(length(errors)==0){ return(TRUE) }else{ return(errors) } }) # Test if object is of a TraitADG class isTraitADG = function(x) { ret = is(x, class2 = "TraitADG") return(ret) } #TraitADEG---- #' @title Additive, dominance, epistasis, and GxE trait #' #' @description Extends \code{\link{TraitADE-class}} #' to add GxE effects #' #' @slot gxeEff GxE effects #' @slot gxeInt GxE intercept #' @slot envVar Environmental variance #' #' @export setClass("TraitADEG", slots=c(gxeEff="numeric", gxeInt="numeric", envVar="numeric"), contains="TraitADE") setValidity("TraitADEG",function(object){ errors = character() if(object@nLoci!=length(object@gxeEff)){ errors = c(errors,"nLoci!=length(gxeEff)") } if(length(errors)==0){ return(TRUE) }else{ return(errors) } }) # Test if object is of a TraitADEG class isTraitADEG = function(x) { ret = is(x, class2 = "TraitADEG") return(ret) }
/scratch/gouwar.j/cran-all/cranData/AlphaSimR/R/Class-LociMap.R
# RawPop ------------------------------------------------------------------ #' @title Raw Population #' #' @description #' The raw population class contains only genotype data. #' #' @param object a 'RawPop' object #' @param x a 'RawPop' object #' @param i index of individuals #' @param ... additional 'RawPop' objects #' #' @slot nInd number of individuals #' @slot nChr number of chromosomes #' @slot ploidy level of ploidy #' @slot nLoci number of loci per chromosome #' @slot geno list of nChr length containing chromosome genotypes. #' Each element is a three dimensional array of raw values. #' The array dimensions are nLoci by ploidy by nInd. #' #' @export setClass("RawPop", slots=c(nInd="integer", nChr="integer", ploidy="integer", nLoci="integer", geno="list")) setValidity("RawPop",function(object){ errors = character() if(object@nChr!=length(object@geno)){ errors = c(errors,"nChr!=length(geno)") } if(object@nChr!=length(object@nLoci)){ errors = c(errors,"nChr!=length(nLoci)") } for(i in 1:object@nChr){ DIM1 = object@nLoci[i]%/%8L + (object@nLoci[i]%%8L > 0L) if(DIM1!=dim(object@geno[[i]])[1]){ errors = c(errors, paste0("nLoci[",i, "]!=dim(geno[[",i, "]][1]")) } if(object@ploidy!=dim(object@geno[[i]])[2]){ errors = c(errors, paste0("ploidy!=dim(geno[[",i, "]][2]")) } if(object@nInd!=dim(object@geno[[i]])[3]){ errors = c(errors, paste0("nInd!=dim(geno[[",i, "]][3]")) } } if(length(errors)==0){ return(TRUE) }else{ return(errors) } }) #' @describeIn RawPop Extract RawPop by index setMethod("[", signature(x = "RawPop"), function(x, i){ if(any(abs(i)>x@nInd)){ stop("Trying to select invalid individuals") } for(chr in 1:x@nChr){ x@geno[[chr]] = x@geno[[chr]][,,i,drop=FALSE] } x@nInd = dim(x@geno[[1]])[3] return(x) } ) #' @describeIn RawPop Combine multiple RawPops setMethod("c", signature(x = "RawPop"), function (x, ...){ for(y in list(...)){ if(is(y,"NULL")){ # Do nothing }else{ stopifnot(class(y)=="RawPop", x@nChr==y@nChr, x@ploidy==y@ploidy, x@nLoci==y@nLoci) x@nInd = x@nInd+y@nInd x@geno = mergeGeno(x@geno,y@geno) } } return(x) } ) #' @describeIn RawPop Show population summary setMethod("show", signature(object = "RawPop"), function (object){ cat("An object of class", classLabel(class(object)), "\n") cat("Ploidy:", object@ploidy,"\n") cat("Individuals:", object@nInd,"\n") cat("Chromosomes:", object@nChr,"\n") cat("Loci:", sum(object@nLoci),"\n") invisible() } ) #' @describeIn RawPop Test if object is of a RawPop class #' @export isRawPop = function(x) { ret = is(x, class2 = "RawPop") return(ret) } # MapPop ------------------------------------------------------------------ #' @title Raw population with genetic map #' #' @description #' Extends \code{\link{RawPop-class}} to add a genetic map. #' This is the first object created in a simulation. It is used #' for creating initial populations and setting traits in the #' \code{\link{SimParam}}. #' #' @param x a 'MapPop' object #' @param i index of individuals #' @param ... additional 'MapPop' objects #' #' @slot genMap list of chromosome genetic maps #' @slot centromere vector of centromere positions #' @slot inbred indicates whether the individuals are fully inbred #' #' @export setClass("MapPop", slots=c(genMap="list", centromere="numeric", inbred="logical"), contains="RawPop") setValidity("MapPop",function(object){ errors = character() if(object@nChr!=length(object@genMap)){ errors = c(errors,"nInd!=length(id)") } for(i in 1:object@nChr){ if(object@nLoci[i]!=length(object@genMap[[i]])){ errors = c(errors, paste0("nLoci[",i,"]!=length(genMap[[",i,"]]")) } } if(length(errors)==0){ return(TRUE) }else{ return(errors) } }) #' @describeIn MapPop Extract MapPop by index setMethod("[", signature(x = "MapPop"), function(x, i){ if(any(abs(i)>x@nInd)){ stop("Trying to select invalid individuals") } for(chr in 1:x@nChr){ x@geno[[chr]] = x@geno[[chr]][,,i,drop=FALSE] } x@nInd = dim(x@geno[[1]])[3] return(x) } ) #' @describeIn MapPop Combine multiple MapPops setMethod("c", signature(x = "MapPop"), function (x, ...){ for(y in list(...)){ if(is(y,"NULL")){ # Do nothing }else{ stopifnot(class(y)=="MapPop", x@nChr==y@nChr, x@ploidy==y@ploidy, x@nLoci==y@nLoci, all.equal(x@genMap, y@genMap)) x@nInd = x@nInd+y@nInd x@geno = mergeGeno(x@geno,y@geno) x@inbred = x@inbred & y@inbred } } return(x) } ) #' @describeIn MapPop Test if object is of a MapPop class #' @export isMapPop = function(x) { ret = is(x, class2 = "MapPop") return(ret) } # NamedMapPop ------------------------------------------------------------------ #' @title Raw population with genetic map and id #' #' @description #' Extends \code{\link{MapPop-class}} to add id, mother and father. #' #' @param x a 'NamedMapPop' object #' @param i index of individuals #' @param ... additional 'NamedMapPop' objects #' #' @slot id an individual's identifier #' @slot mother the identifier of the individual's mother #' @slot father the identifier of the individual's father #' #' @export setClass("NamedMapPop", slots=c(id="character", mother="character", father="character"), contains="MapPop") setValidity("NamedMapPop",function(object){ errors = character() if(any(grepl(" ",object@id,fixed=TRUE))){ errors = c(errors,"id can not contain spaces") } if(any(grepl(" ",object@mother,fixed=TRUE))){ errors = c(errors,"mother can not contain spaces") } if(any(grepl(" ",object@father,fixed=TRUE))){ errors = c(errors,"father can not contain spaces") } if(object@nInd!=length(object@id)){ errors = c(errors,"nInd!=length(id)") } if(object@nInd!=length(object@mother)){ errors = c(errors,"nInd!=length(mother)") } if(object@nInd!=length(object@father)){ errors = c(errors,"nInd!=length(father)") } if(length(errors)==0){ return(TRUE) }else{ return(errors) } }) #' @describeIn NamedMapPop Extract NamedMapPop by index setMethod("[", signature(x = "NamedMapPop"), function(x, i){ if(any(abs(i)>x@nInd)){ stop("Trying to select invalid individuals") } for(chr in 1:x@nChr){ x@geno[[chr]] = x@geno[[chr]][,,i,drop=FALSE] } x@nInd = dim(x@geno[[1]])[3] x@id = x@id[i] x@mother = x@mother[i] x@father = x@father[i] return(x) } ) #' @describeIn NamedMapPop Combine multiple NamedMapPops setMethod("c", signature(x = "NamedMapPop"), function (x, ...){ for(y in list(...)){ if(is(y,"NULL")){ # Do nothing }else{ stopifnot(is(y,"NamedMapPop"), x@nChr==y@nChr, x@ploidy==y@ploidy, x@nLoci==y@nLoci, all.equal(x@genMap, y@genMap)) x@nInd = x@nInd+y@nInd x@id = c(x@id, y@id) x@mother = c(x@mother, y@mother) x@father = c(x@father, y@father) x@geno = mergeGeno(x@geno,y@geno) x@inbred = x@inbred & y@inbred } } return(x) } ) #' @title Combine MapPop chromosomes #' #' @description #' Merges the chromosomes of multiple \code{\link{MapPop-class}} or #' \code{\link{NamedMapPop-class}} objects. #' Each MapPop must have the same number of chromosomes #' #' @param ... \code{\link{MapPop-class}} or \code{\link{NamedMapPop-class}} #' objects to be combined #' #' @return Returns an object of \code{\link{MapPop-class}} #' #' @examples #' pop1 = quickHaplo(nInd=10, nChr=1, segSites=10) #' pop2 = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' combinedPop = cChr(pop1, pop2) #' #' @export cChr = function(...){ for(y in list(...)){ if(is(y,"NULL")){ #Do nothing }else{ stopifnot(is(y,"MapPop")) if(!exists("x",inherits=FALSE)){ x = y }else{ stopifnot(x@nInd==y@nInd, x@ploidy==y@ploidy) x@nChr = x@nChr+y@nChr x@geno = c(x@geno,y@geno) x@genMap = c(x@genMap,y@genMap) x@centromere = c(x@centromere,y@centromere) x@nLoci = c(x@nLoci,y@nLoci) x@inbred = x@inbred & y@inbred } } } return(x) } #' @describeIn NamedMapPop Test if object is a NamedMapPop class #' @export isNamedMapPop = function(x) { ret = is(x, class2 = "NamedMapPop") return(ret) } # Pop --------------------------------------------------------------------- #' @title Population #' #' @description #' Extends \code{\link{RawPop-class}} to add sex, genetic values, #' phenotypes, and pedigrees. #' #' @param object a 'Pop' object #' @param x a 'Pop' object #' @param i index of individuals #' @param ... additional 'Pop' objects #' #' @slot id an individual's identifier #' @slot iid an individual's internal identifier #' @slot mother the identifier of the individual's mother #' @slot father the identifier of the individual's father #' @slot sex sex of individuals: "M" for males, "F" for females, #' and "H" for hermaphrodites #' @slot nTraits number of traits #' @slot gv matrix of genetic values. When using GxE traits, #' gv reflects gv when p=0.5. Dimensions are nInd by nTraits. #' @slot pheno matrix of phenotypic values. Dimensions are #' nInd by nTraits. #' @slot ebv matrix of estimated breeding values. Dimensions #' are nInd rows and a variable number of columns. #' @slot gxe list containing GxE slopes for GxE traits #' @slot fixEff a fixed effect relating to the phenotype. #' Used by genomic selection models but otherwise ignored. #' @slot misc a list whose elements correspond to individuals in the #' population. This list is normally empty and exists solely as an #' open slot available for uses to store extra information about #' individuals. #' @slot miscPop a list of any length containing optional meta data for the #' population. This list is empty unless information is supplied by the user. #' Note that the list is emptied every time the population is subsetted. #' #' @export setClass("Pop", slots=c(id="character", iid="integer", mother="character", father="character", sex="character", nTraits="integer", gv="matrix", pheno="matrix", ebv="matrix", gxe="list", fixEff="integer", misc="list", miscPop="list"), contains="RawPop") setValidity("Pop",function(object){ errors = character() if(any(grepl(" ",object@id,fixed=TRUE))){ errors = c(errors,"id can not contain spaces") } if(any(grepl(" ",object@mother,fixed=TRUE))){ errors = c(errors,"mother can not contain spaces") } if(any(grepl(" ",object@father,fixed=TRUE))){ errors = c(errors,"father can not contain spaces") } if(object@nInd!=length(object@sex)){ errors = c(errors,"nInd!=length(sex)") } if(object@nInd!=length(object@id)){ errors = c(errors,"nInd!=length(id)") } if(object@nInd!=length(object@iid)){ errors = c(errors,"nInd!=length(iid)") } if(object@nInd!=length(object@mother)){ errors = c(errors,"nInd!=length(mother)") } if(object@nInd!=length(object@father)){ errors = c(errors,"nInd!=length(father)") } if(object@nInd!=nrow(object@gv)){ errors = c(errors,"nInd!=nrow(gv)") } if(object@nInd!=nrow(object@pheno)){ errors = c(errors,"nInd!=nrow(pheno)") } if(object@nInd!=nrow(object@ebv)){ errors = c(errors,"nInd!=nrow(ebv)") } if(!is.numeric(object@gv)){ errors = c(errors,"!is.numeric(gv)") } if(!is.numeric(object@pheno)){ errors = c(errors,"!is.numeric(pheno)") } if(!is.numeric(object@ebv)){ errors = c(errors,"!is.numeric(ebv)") } if(object@nTraits!=ncol(object@gv)){ errors = c(errors,"nTraits!=ncol(gv)") } if(object@nTraits!=ncol(object@pheno)){ errors = c(errors,"nTraits!=ncol(pheno)") } if(object@nTraits!=length(object@gxe)){ errors = c(errors,"nTraits!=length(gxe)") } if(object@nInd!=length(object@fixEff)){ errors = c(errors,"nInd!=length(fixEff)") } if(object@nInd!=length(object@misc)){ errors = c(errors,"nInd!=length(misc)") } if(length(errors)==0){ return(TRUE) }else{ return(errors) } }) #' @describeIn Pop Extract Pop by index or id setMethod("[", signature(x = "Pop"), function(x, i){ if(is.character(i)){ i = match(i, x@id) if(any(is.na(i))){ stop("Trying to select invalid individuals") } if(any(is.null(i))){ stop("Not valid ids") } }else{ if(any(abs(i)>x@nInd)){ stop("Trying to select invalid individuals") } } x@id = x@id[i] x@iid = x@iid[i] x@mother = x@mother[i] x@father = x@father[i] x@fixEff = x@fixEff[i] x@misc = x@misc[i] x@gv = x@gv[i,,drop=FALSE] x@pheno = x@pheno[i,,drop=FALSE] x@ebv = x@ebv[i,,drop=FALSE] x@sex = x@sex[i] x@nInd = length(x@sex) if(x@nTraits>=1){ for(trait in 1:x@nTraits){ if(!is.null(x@gxe[[trait]])){ x@gxe[[trait]] = x@gxe[[trait]][i] } } } for(chr in 1:x@nChr){ x@geno[[chr]] = x@geno[[chr]][,,i,drop=FALSE] } x@miscPop = list() return(x) } ) #' @describeIn Pop Combine multiple Pops setMethod("c", signature(x = "Pop"), function (x, ...){ # Uses mergePops for increased speed x = mergePops(c(list(x),list(...))) return(x) } ) #' @describeIn Pop Show population summary setMethod("show", signature(object = "Pop"), function (object){ cat("An object of class", classLabel(class(object)), "\n") cat("Ploidy:", object@ploidy,"\n") cat("Individuals:", object@nInd,"\n") cat("Chromosomes:", object@nChr,"\n") cat("Loci:", sum(object@nLoci),"\n") cat("Traits:", object@nTraits,"\n") invisible() } ) #' @title Create new population #' #' @description #' Creates an initial \code{\link{Pop-class}} from an object of #' \code{\link{MapPop-class}} or \code{\link{NamedMapPop-class}}. #' The function is intended for us with output from functions such #' as \code{\link{runMacs}}, \code{\link{newMapPop}}, or #' \code{\link{quickHaplo}}. #' #' @param rawPop an object of \code{\link{MapPop-class}} or #' \code{\link{NamedMapPop-class}} #' @param simParam an object of \code{\link{SimParam}} #' @param ... additional arguments used internally #' #' @return Returns an object of \code{\link{Pop-class}} #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=2, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' SP$addTraitA(10) #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' isPop(pop) #' #' @export newPop = function(rawPop,simParam=NULL,...){ if(is.null(simParam)){ simParam = get("SP",envir=.GlobalEnv) } return(.newPop(rawPop=rawPop,simParam=simParam,...)) } #' @title Create new population (internal) #' #' @description #' Creates a new \code{\link{Pop-class}} from an object of #' of the Pop superclass. #' #' @param rawPop an object of the pop superclass #' @param id optional id for new individuals #' @param mother optional id for mothers #' @param father optional id for fathers #' @param iMother optional internal id for mothers #' @param iFather optional internal id for fathers #' @param isDH optional indicator for DH/inbred individuals #' @param femaleParentPop optional population of female parents #' @param maleParentPop optional population of male parents #' @param hist optional recombination history #' @param simParam an object of \code{\link{SimParam}} #' @param ... additional arguments passed to the finalizePop #' function in simParam #' #' @return Returns an object of \code{\link{Pop-class}} .newPop = function(rawPop, id=NULL, mother=NULL, father=NULL, iMother=NULL, iFather=NULL, isDH=NULL, femaleParentPop=NULL, maleParentPop=NULL, hist=NULL, simParam=NULL,...){ if(is.null(simParam)){ simParam = get("SP",envir=.GlobalEnv) } stopifnot(sapply(simParam$genMap,length)==rawPop@nLoci) lastId = simParam$lastId iid = (1:rawPop@nInd) + lastId lastId = max(iid) if(is.null(id)){ if(is(rawPop, "NamedMapPop")){ id = rawPop@id }else{ id = as.character(iid) } } if(is.null(iMother)){ iMother = rep(0L, rawPop@nInd) } if(is.null(iFather)){ iFather = rep(0L, rawPop@nInd) } if(is.null(isDH)){ if(is(rawPop, "MapPop")){ isDH = rawPop@inbred }else{ isDH = FALSE } } if(is.null(mother)){ if(is(rawPop, "NamedMapPop")){ mother = rawPop@mother }else{ mother = rep("0", rawPop@nInd) } } if(is.null(father)){ if(is(rawPop, "NamedMapPop")){ father = rawPop@father }else{ father = rep("0", rawPop@nInd) } } stopifnot(length(id)==length(mother), length(id)==length(father)) if(simParam$sexes=="no"){ sex = rep("H", rawPop@nInd) }else if(simParam$sexes=="yes_rand"){ sex = sample(c("M","F"), rawPop@nInd, replace=TRUE) }else if(simParam$sexes=="yes_sys"){ sex = rep_len(c("M","F"), rawPop@nInd) }else{ stop(paste("no rules for sex type", simParam$sexes)) } gxe = vector("list", simParam$nTraits) gv = matrix(NA_real_,nrow=rawPop@nInd, ncol=simParam$nTraits) colnames(gv) = rep(NA_character_, simParam$nTraits) pheno = gv if(simParam$nTraits>=1){ for(i in 1:simParam$nTraits){ tmp = getGv(simParam$traits[[i]], rawPop, simParam$nThreads) gv[,i] = tmp[[1]] colnames(gv)[i] = simParam$traits[[i]]@name if(length(tmp)>1){ gxe[[i]] = tmp[[2]] } } } output = new("Pop", nInd=rawPop@nInd, nChr=rawPop@nChr, ploidy=rawPop@ploidy, nLoci=rawPop@nLoci, sex=sex, geno=rawPop@geno, id=id, iid=iid, mother=mother, father=father, fixEff=rep(1L,rawPop@nInd), nTraits=simParam$nTraits, gv=gv, gxe=gxe, pheno=pheno, ebv=matrix(NA_real_, nrow=rawPop@nInd, ncol=0), misc=vector("list",rawPop@nInd), miscPop=list()) if(simParam$nTraits>=1){ output = setPheno(output, varE=NULL, reps=1, fixEff=1L, p=NULL, onlyPheno=FALSE, simParam=simParam) } output = simParam$finalizePop(output,...) if(simParam$isTrackPed){ if(simParam$isTrackRec){ simParam$addToRec(lastId,id,iMother,iFather,isDH,hist,output@ploidy) }else{ simParam$addToPed(lastId,id,iMother,iFather,isDH) } }else{ simParam$updateLastId(lastId) } return(output) } #' @title Reset population #' #' @description #' Recalculates a population's genetic values and #' resets phenotypes and EBVs. #' #' @param pop an object of \code{\link{Pop-class}} #' @param simParam an object of \code{\link{SimParam}} #' #' @return an object of \code{\link{Pop-class}} #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=2, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' SP$addTraitA(10) #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' #' #Rescale to set mean to 1 #' SP$rescaleTraits(mean=1) #' pop = resetPop(pop, simParam=SP) #' #' @export resetPop = function(pop,simParam=NULL){ if(is.null(simParam)){ simParam = get("SP",envir=.GlobalEnv) } pop@nTraits = simParam$nTraits # Extract names to add back at the end traitNames = colnames(pop@gv) # Create empty slots for traits pop@pheno = matrix(NA_real_, nrow=pop@nInd, ncol=simParam$nTraits) pop@ebv = matrix(NA_real_, nrow=pop@nInd, ncol=0) pop@gxe = vector("list",simParam$nTraits) pop@gv = matrix(NA_real_,nrow=pop@nInd, ncol=simParam$nTraits) pop@fixEff = rep(1L,pop@nInd) # Calculate genetic values if(simParam$nTraits>=1){ for(i in 1:simParam$nTraits){ tmp = getGv(simParam$traits[[i]],pop,simParam$nThreads) pop@gv[,i] = tmp[[1]] if(length(tmp)>1){ pop@gxe[[i]] = tmp[[2]] } } } # Add back trait names colnames(pop@pheno) = colnames(pop@gv) = traitNames return(pop) } #' @title Test if object is of a Population class #' #' @description Utilify function to test if object is of a Population class #' #' @param x \code{\link{Pop-class}} #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=2, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' SP$addTraitA(10) #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' isPop(pop) #' isPop(SP) #' #' @export isPop = function(x) { ret = is(x, class2 = "Pop") return(ret) } #' @title Creates an empty population #' #' @description #' Creates an empty \code{\link{Pop-class}} object with user #' defined ploidy and other parameters taken from simParam. #' #' @param ploidy the ploidy of the population #' @param simParam an object of \code{\link{SimParam}} #' #' @return Returns an object of \code{\link{Pop-class}} with #' zero individuals #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=2, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' SP$addTraitA(10) #' #' #Create empty population #' pop = newEmptyPop(simParam=SP) #' isPop(pop) #' #' @export newEmptyPop = function(ploidy=2L, simParam=NULL){ if(is.null(simParam)){ simParam = get("SP", envir=.GlobalEnv) } # Create 0 x nTrait matrix with trait names # For pheno and gv slots traitMat = matrix(NA_real_, nrow = 0L, ncol = simParam$nTraits) traitNames = character(simParam$nTraits) if(simParam$nTraits > 0L){ # Get trait names for(i in 1:simParam$nTraits){ traitNames[i] = simParam$traits[[i]]@name } } colnames(traitMat) = traitNames # Create empty geno list nLoci = unname(sapply(simParam$genMap, length)) geno = vector("list", simParam$nChr) for(i in 1:simParam$nChr){ DIM1 = nLoci[i]%/%8L + (nLoci[i]%%8L > 0L) geno[[i]] = array(as.raw(0), dim=c(DIM1, ploidy, 0)) } output = new("Pop", nInd = 0L, nChr = simParam$nChr, ploidy = as.integer(ploidy), nLoci = nLoci, sex = character(), geno = geno, id = character(), iid = integer(), mother = character(), father = character(), fixEff = integer(), nTraits = simParam$nTraits, gv = traitMat, gxe = vector("list", simParam$nTraits), pheno = traitMat, ebv = matrix(NA_real_, nrow=0L, ncol=0L), misc = list(), miscPop = list()) return(output) } # MultiPop ------------------------------------------------------------------ #' @title Multi-Population #' #' @description #' The mega-population represents a population of populations. #' It is designed to behave like a list of populations. #' #' @param x a 'MultiPop' object #' @param i index of populations or mega-populations #' @param ... additional 'MultiPop' or 'Pop' objects #' #' @slot pops list of \code{\link{Pop-class}} and/or #' \code{MultiPop-class} #' #' #' @export setClass("MultiPop", slots=c(pops="list")) setValidity("MultiPop",function(object){ errors = character() # Check that all populations are valid for(i in 1:length(object@pops)){ if(!validObject(object@pops[[i]]) & (is(object@pops[[i]], "Pop") | is(object@pops[[i]],"MultiPop"))){ errors = c(errors,paste("object",i,"is not a valid pop")) } } if(length(errors)==0){ return(TRUE) }else{ return(errors) } }) #' @describeIn MultiPop Extract MultiPop by index setMethod("[", signature(x = "MultiPop"), function(x, i){ x@pops = x@pops[i] return(x) } ) #' @describeIn MultiPop Extract Pop by index setMethod("[[", signature(x = "MultiPop"), function (x, i){ return(x@pops[[i]]) } ) #' @describeIn MultiPop Combine multiple MultiPops setMethod("c", signature(x = "MultiPop"), function (x, ...){ for(y in list(...)){ if(is(y,"NULL")){ # Do nothing }else{ if(is(y,"Pop")){ x@pops = c(x@pops, y) }else{ stopifnot(is(y,"MultiPop")) x@pops = c(x@pops, y@pops) } } } return(x) } ) #' @title Create new Multi Population #' #' @description #' Creates a new \code{\link{MultiPop-class}} from one or more #' \code{\link{Pop-class}} and/or \code{\link{MultiPop-class}} #' objects. #' #' @param ... one or more \code{\link{Pop-class}} and/or #' \code{\link{MultiPop-class}} objects. #' #' @return Returns an object of \code{\link{MultiPop-class}} #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=2, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' SP$addTraitA(10) #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' megaPop = newMultiPop(pop=pop) #' isMultiPop(megaPop) #' #' @export newMultiPop = function(...){ input = list(...) class = sapply(input, "class") stopifnot(all(class=="Pop" | class=="MultiPop")) output = new("MultiPop", pops=input) return(output) } #' @describeIn MultiPop Test if object is of a MultiPop class #' @export isMultiPop = function(x) { ret = is(x, class2 = "MultiPop") return(ret) }
/scratch/gouwar.j/cran-all/cranData/AlphaSimR/R/Class-Pop.R
#RRsol---- #' @title RR-BLUP Solution #' #' @description Contains output from AlphaSimR's genomic #' selection functions. #' #' @slot gv Trait(s) for estimating genetic values #' @slot bv Trait(s) for estimating breeding values #' @slot female Trait(s) for estimating GCA in the female pool #' @slot male Trait(s) for estimating GCA in the male pool #' @slot Vu Estimated marker variance(s) #' @slot Ve Estimated error variance #' #' @export setClass("RRsol", slots=c(gv="list", bv="list", female="list", male="list", Vu="matrix", Ve="matrix")) # Test if object is of a RRsol class isRRsol = function(x) { ret = is(x, class2 = "RRsol") return(ret) }
/scratch/gouwar.j/cran-all/cranData/AlphaSimR/R/Class-RRsol.R
#' @title Simulation parameters #' #' @description #' Container for global simulation parameters. Saving this object #' as SP will allow it to be accessed by function defaults. #' #' @export SimParam = R6Class( "SimParam", public = list( #### Public ---- #' @field nThreads number of threads used on platforms with OpenMP support nThreads = "integer", #' @field snpChips list of SNP chips snpChips = "list", #' @field invalidQtl list of segregating sites that aren't valid QTL invalidQtl = "list", #' @field invalidSnp list of segregating sites that aren't valid SNP invalidSnp = "list", #' @field founderPop founder population used for variance scaling founderPop = "MapPop", #' @field finalizePop function applied to newly created populations. #' Currently does nothing and should only be changed by expert users. finalizePop = "function", #' @field allowEmptyPop if true, population arguments with nInd=0 will #' return an empty population with a warning instead of an error. allowEmptyPop = "logical", #' @description Starts the process of building a new simulation #' by creating a new SimParam object and assigning a founder #' population to the class. It is recommended that you save the #' object with the name "SP", because subsequent functions will #' check your global environment for an object of this name if #' their simParam arguments are NULL. This allows you to call #' these functions without explicitly supplying a simParam #' argument with every call. #' #' @param founderPop an object of \code{\link{MapPop-class}} #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) initialize = function(founderPop){ stopifnot(is(founderPop, "MapPop")) # Public items self$nThreads = getNumThreads() self$v = 2.6 # Kosambi self$p = 0 # Single pathway gamma model self$quadProb = 0 # No quadrivalent pairing self$snpChips = list() self$invalidQtl = rep(list(integer()), founderPop@nChr) # All eligible self$invalidSnp = rep(list(integer()), founderPop@nChr) # All eligible self$founderPop = founderPop self$finalizePop = function(pop, ...){return(pop)} self$allowEmptyPop = FALSE # Empty populations trigger an error # Private items private$.restrSites = TRUE private$.traits = list() private$.segSites = founderPop@nLoci private$.sexes = "no" private$.femaleMap = lapply(founderPop@genMap, function(x) x-x[1]) # Set position 1 to 0 private$.maleMap = NULL private$.sepMap = FALSE private$.femaleCentromere = founderPop@centromere private$.maleCentromere = NULL private$.lastId = 0L private$.isTrackPed = FALSE private$.pedigree = matrix(NA_integer_,nrow=0,ncol=3) private$.isTrackRec = FALSE private$.recHist = list() private$.varA = numeric() private$.varG = numeric() private$.varE = numeric() private$.version = packageDescription("AlphaSimR")$Version private$.lastHaplo = 0L private$.hasHap = logical() private$.hap = list() private$.isFounder = logical() invisible(self) }, #' @description Sets pedigree tracking for the simulation. #' By default pedigree tracking is turned off. When turned on, #' the pedigree of all individuals created will be tracked, #' except those created by \code{\link{hybridCross}}. Turning #' off pedigree tracking will turn off recombination tracking #' if it is turned on. #' #' @param isTrackPed should pedigree tracking be on. #' @param force should the check for a running simulation be #' ignored. Only set to TRUE if you know what you are doing. #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$setTrackPed(TRUE) setTrackPed = function(isTrackPed, force=FALSE){ stopifnot(is.logical(isTrackPed)) if(!force){ private$.isRunning() } private$.isTrackPed = isTrackPed if(!isTrackPed){ private$.isTrackRec = FALSE } invisible(self) }, #' @description Sets recombination tracking for the simulation. #' By default recombination tracking is turned off. When turned #' on recombination tracking will also turn on pedigree tracking. #' Recombination tracking keeps records of all individuals created, #' except those created by \code{\link{hybridCross}}, because their #' pedigree is not tracked. #' #' @param isTrackRec should recombination tracking be on. #' @param force should the check for a running simulation be #' ignored. Only set to TRUE if you know what you are doing. #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$setTrackRec(TRUE) setTrackRec = function(isTrackRec, force=FALSE){ stopifnot(is.logical(isTrackRec)) if(!force){ private$.isRunning() } private$.isTrackRec = isTrackRec if(isTrackRec){ private$.isTrackPed = TRUE } invisible(self) }, #' @description Resets the internal lastId, the pedigree #' and recombination tracking (if in use) to the #' supplied lastId. Be careful using this function because #' it may introduce a bug if you use individuals from #' the deleted portion of the pedigree. #' #' @param lastId last ID to include in pedigree #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' pop@id # 1:10 #' #' #Create another population after reseting pedigree #' SP$resetPed() #' pop2 = newPop(founderPop, simParam=SP) #' pop2@id # 1:10 resetPed =function(lastId=0L){ private$.lastId = lastId private$.pedigree = private$.pedigree[0:lastId,,drop=FALSE] if(private$.isTrackRec){ private$.recHist = private$.recHist[0:lastId] } invisible(self) }, #' @description Sets restrictions on which segregating sites #' can serve as a SNP and/or QTL. #' #' @param minQtlPerChr the minimum number of segregating sites for #' QTLs. Can be a single value or a vector values for each chromosome. #' @param minSnpPerChr the minimum number of segregating sites for SNPs. #' Can be a single value or a vector values for each chromosome. #' @param excludeQtl an optional vector of segregating site names to #' exclude from consideration as a viable QTL. #' @param excludeSnp an optional vector of segregating site names to #' exclude from consideration as a viable SNP. #' @param overlap should SNP and QTL sites be allowed to overlap. #' @param minSnpFreq minimum allowable frequency for SNP loci. #' No minimum SNP frequency is used if value is NULL. #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$restrSegSites(minQtlPerChr=5, minSnpPerChr=5) restrSegSites = function(minQtlPerChr=NULL, minSnpPerChr=NULL, excludeQtl=NULL, excludeSnp=NULL, overlap=FALSE, minSnpFreq=NULL){ # Handle any named QTL exclusions if(!is.null(excludeQtl)){ matchList = private$.findNamedLoci(excludeQtl) # Make exclusions restr = self$invalidQtl for(i in 1:self$nChr){ restr[[i]] = sort(union(restr[[i]], matchList[[i]])) } self$invalidQtl = restr } # Handle any named SNP exclusions if(!is.null(excludeSnp)){ # Check if the SNP list matches the QTL list # If so, save time by using the previously determined exclusions findMatch = TRUE if(!is.null(excludeQtl)){ if(all(excludeSnp==excludeQtl)){ findMatch = FALSE } } if(findMatch){ matchList = private$.findNamedLoci(excludeSnp) } # Make exclusions restr = self$invalidSnp for(i in 1:self$nChr){ restr[[i]] = sort(union(restr[[i]], matchList[[i]])) } self$invalidSnp = restr } if(overlap){ # Not setting any restrictions if overlap is allow # Existing restrictions will be left in place private$.restrSites = FALSE invisible(self) }else{ # Check validity of inputs if(!is.null(minSnpPerChr)){ if(length(minSnpPerChr)==1){ minSnpPerChr = rep(minSnpPerChr,self$nChr) }else{ stopifnot(length(minSnpPerChr)==self$nChr) } } if(!is.null(minQtlPerChr)){ if(length(minQtlPerChr)==1){ minQtlPerChr = rep(minQtlPerChr,self$nChr) }else{ stopifnot(length(minQtlPerChr)==self$nChr) } } # Restrict SNPs and then QTL # SNPs are done first due to potentially fewer viable loci private$.restrSites = TRUE if(!is.null(minSnpPerChr)){ invisible(private$.pickLoci(minSnpPerChr, FALSE, minSnpFreq)) } if(!is.null(minQtlPerChr)){ invisible(private$.pickLoci(minQtlPerChr)) } invisible(self) } }, #' @description Changes how sexes are determined in the simulation. #' The default sexes is "no", indicating all individuals are hermaphrodites. #' To add sexes to the simulation, run this function with "yes_sys" or #' "yes_rand". The value "yes_sys" will systematically assign #' sexes to newly created individuals as first male and then female. #' Populations with an odd number of individuals will have one more male than #' female. The value "yes_rand" will randomly assign a sex to each #' individual. #' #' @param sexes acceptable value are "no", "yes_sys", or #' "yes_rand" #' @param force should the check for a running simulation be #' ignored. Only set to TRUE if you know what you are doing. #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$setSexes("yes_sys") setSexes = function(sexes, force=FALSE){ if(!force){ private$.isRunning() } sexes = tolower(sexes) if(sexes=="no"){ private$.sexes="no" }else if(sexes=="yes_sys"){ private$.sexes="yes_sys" }else if(sexes=="yes_rand"){ private$.sexes="yes_rand" }else{ stop(paste0("sexes=",sexes," is not a valid option")) } invisible(self) }, #' @description #' Allows for the manual setting of founder haplotypes. This functionality #' is not fully documented, because it is still experimental. #' #' @param hapMap a list of founder haplotypes setFounderHap = function(hapMap){ private$.hap = hapMap private$.hasHap = rep(TRUE, length(hapMap)) private$.isFounder = rep(FALSE, length(hapMap)) invisible(self) }, #' @description #' Randomly assigns eligible SNPs to a SNP chip #' #' @param nSnpPerChr number of SNPs per chromosome. #' Can be a single value or nChr values. #' @param minSnpFreq minimum allowable frequency for SNP loci. #' If NULL, no minimum frequency is used. #' @param refPop reference population for calculating SNP #' frequency. If NULL, the founder population is used. #' @param name optional name for chip #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$addSnpChip(10) addSnpChip = function(nSnpPerChr, minSnpFreq=NULL, refPop=NULL, name=NULL){ if(length(nSnpPerChr)==1){ nSnpPerChr = rep(nSnpPerChr,self$nChr) } snpChip = private$.pickLoci(nSnpPerChr, FALSE, minSnpFreq, refPop) if(is.null(name)){ snpChip@name = paste0("Chip",self$nSnpChips + 1L) }else{ snpChip@name = name } self$snpChips[[self$nSnpChips + 1L]] = snpChip invisible(self) }, #' @description #' Assigns SNPs to a SNP chip by supplying marker names. This function does #' check against excluded SNPs and will not add the SNPs to the list of #' excluded QTL for the purpose of avoiding overlap between SNPs and QTL. #' Excluding these SNPs from being used as QTL can be accomplished using #' the excludeQtl argument in SimParam's restrSegSites function. #' #' @param markers a vector of names for the markers #' @param name optional name for chip #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' SP$addSnpChipByName(c("1_1","1_3")) addSnpChipByName = function(markers, name=NULL){ genMap = private$.femaleMap # Check that the markers are present on the map genMapMarkerNames = unlist(lapply(genMap, names)) stopifnot(all(markers%in%genMapMarkerNames)) # Create lociPerChr and lociLoc lociPerChr = integer(length(genMap)) lociLoc = vector("list", length(genMap)) # Loop through chromosomes for(i in 1:length(genMap)){ # Initialize lociLoc lociLoc[[i]] = integer() # Find matches if they exist take = match(names(genMap[[i]]), markers) lociPerChr[i] = length(na.omit(take)) if(lociPerChr[i]>0L){ lociLoc[[i]] = which(!is.na(take)) } } lociLoc = unlist(lociLoc) snpChip = new("LociMap", nLoci=sum(lociPerChr), lociPerChr=lociPerChr, lociLoc=lociLoc) if(is.null(name)){ snpChip@name = paste0("Chip",self$nSnpChips + 1L) }else{ snpChip@name = name } self$snpChips[[self$nSnpChips + 1L]] = snpChip invisible(self) }, #' @description #' Randomly selects the number of snps in structure and then #' assigns them to chips based on structure #' #' @param nSnpPerChr number of SNPs per chromosome. #' Can be a single value or nChr values. #' @param structure a matrix. Rows are snp chips, columns are chips. #' If value is true then that snp is on that chip. #' @param force should the check for a running simulation be #' ignored. Only set to TRUE if you know what you are doing. addStructuredSnpChip = function(nSnpPerChr,structure,force=FALSE){ if(!force){ private$.isRunning() } if(length(nSnpPerChr)==1){ nSnpPerChr = rep(nSnpPerChr,self$nChr) } stopifnot(length(nSnpPerChr)==self$nChr) stopifnot(sapply(self$potSnp,length)>=nSnpPerChr) stopifnot(dim(structure)[2]==sum(nSnpPerChr)) lociLoc = lapply(1:self$nChr,function(x){ sort(sample(self$potSnp[[x]],nSnpPerChr[x])) }) lociLoc = do.call("c",lociLoc) for (i in 1:nrow(structure)){ snps = lociLoc[structure[i,]] start = 1 numChr = numeric(length(nSnpPerChr)) for (j in 1:length(nSnpPerChr)){ end = start + nSnpPerChr[j] - 1 numChr[j] = sum(structure[i,start:end]) start = end + 1 } snpChip = new("LociMap", nLoci = length(snps), lociPerChr = as.integer(numChr), lociLoc = as.integer(snps)) self$snpChips[[self$nSnpChips+1L]] = snpChip } invisible(self) }, ### Traits (public) ---- #' @description #' Randomly assigns eligible QTLs for one or more additive traits. #' If simulating more than one trait, all traits will be pleiotropic #' with correlated additive effects. #' #' @param nQtlPerChr number of QTLs per chromosome. Can be a single value or nChr values. #' @param mean a vector of desired mean genetic values for one or more traits #' @param var a vector of desired genetic variances for one or more traits #' @param corA a matrix of correlations between additive effects #' @param gamma should a gamma distribution be used instead of normal #' @param shape the shape parameter for the gamma distribution #' (the rate/scale parameter of the gamma distribution is accounted #' for via the desired level of genetic variance, the var argument) #' @param force should the check for a running simulation be #' ignored. Only set to TRUE if you know what you are doing. #' @param name optional name for trait(s) #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$addTraitA(10) addTraitA = function(nQtlPerChr,mean=0,var=1,corA=NULL, gamma=FALSE,shape=1,force=FALSE,name=NULL){ if(!force){ private$.isRunning() } if(length(nQtlPerChr)==1){ nQtlPerChr = rep(nQtlPerChr,self$nChr) } nTraits = length(mean) if(length(gamma)==1) gamma = rep(gamma,nTraits) if(length(shape)==1) shape = rep(shape,nTraits) if(is.null(corA)) corA=diag(nTraits) if(is.null(name)){ name = paste0("Trait",1:nTraits+self$nTraits) } stopifnot(length(mean)==length(var), isSymmetric(corA), length(mean)==nrow(corA), length(mean)==length(name)) qtlLoci = private$.pickLoci(nQtlPerChr) addEff = sampAddEff(qtlLoci=qtlLoci,nTraits=nTraits, corr=corA,gamma=gamma,shape=shape) for(i in 1:nTraits){ trait = new("TraitA", qtlLoci, addEff=addEff[,i], intercept=0, name=name[i]) tmp = calcGenParam(trait, self$founderPop, self$nThreads) scale = sqrt(var[i])/sqrt(popVar(tmp$bv)[1]) trait@addEff = trait@addEff*scale trait@intercept = mean[i]-mean(tmp$gv*scale) private$.addTrait(trait,var[i],var[i]) } invisible(self) }, #' @description #' Randomly assigns eligible QTLs for one or more traits with dominance. #' If simulating more than one trait, all traits will be pleiotropic #' with correlated effects. #' #' @param nQtlPerChr number of QTLs per chromosome. Can be a single value or nChr values. #' @param mean a vector of desired mean genetic values for one or more traits #' @param var a vector of desired genetic variances for one or more traits #' @param meanDD mean dominance degree #' @param varDD variance of dominance degree #' @param corA a matrix of correlations between additive effects #' @param corDD a matrix of correlations between dominance degrees #' @param useVarA tune according to additive genetic variance if true. If #' FALSE, tuning is performed according to total genetic variance. #' @param gamma should a gamma distribution be used instead of normal #' @param shape the shape parameter for the gamma distribution #' (the rate/scale parameter of the gamma distribution is accounted #' for via the desired level of genetic variance, the var argument) #' @param force should the check for a running simulation be #' ignored. Only set to TRUE if you know what you are doing. #' @param name optional name for trait(s) #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$addTraitAD(10, meanDD=0.5) addTraitAD = function(nQtlPerChr,mean=0,var=1,meanDD=0, varDD=0,corA=NULL,corDD=NULL,useVarA=TRUE, gamma=FALSE,shape=1,force=FALSE,name=NULL){ if(!force){ private$.isRunning() } if(length(nQtlPerChr)==1){ nQtlPerChr = rep(nQtlPerChr,self$nChr) } nTraits = length(mean) if(length(meanDD)==1) meanDD = rep(meanDD,nTraits) if(length(varDD)==1) varDD = rep(varDD,nTraits) if(length(gamma)==1) gamma = rep(gamma,nTraits) if(length(shape)==1) shape = rep(shape,nTraits) if(is.null(corA)) corA=diag(nTraits) if(is.null(corDD)) corDD=diag(nTraits) if(is.null(name)){ name = paste0("Trait",1:nTraits+self$nTraits) } stopifnot(length(mean)==length(var), isSymmetric(corA), isSymmetric(corDD), length(mean)==nrow(corA), length(mean)==length(name)) qtlLoci = private$.pickLoci(nQtlPerChr) addEff = sampAddEff(qtlLoci=qtlLoci,nTraits=nTraits, corr=corA,gamma=gamma,shape=shape) domEff = sampDomEff(qtlLoci=qtlLoci,nTraits=nTraits,addEff=addEff, corDD=corDD,meanDD=meanDD,varDD=varDD) for(i in 1:nTraits){ trait = new("TraitAD", qtlLoci, addEff=addEff[,i], domEff=domEff[,i], intercept=0, name=name[i]) tmp = calcGenParam(trait, self$founderPop, self$nThreads) if(useVarA){ scale = sqrt(var[i])/sqrt(popVar(tmp$bv)[1]) }else{ scale = sqrt(var[i])/sqrt(popVar(tmp$gv)[1]) } trait@addEff = trait@addEff*scale trait@domEff = trait@domEff*scale trait@intercept = mean[i]-mean(tmp$gv*scale) if(useVarA){ private$.addTrait(trait,var[i],popVar(tmp$gv*scale)[1]) }else{ private$.addTrait(trait,popVar(tmp$bv*scale)[1],var[i]) } } invisible(self) }, #' @description #' An alternative method for adding a trait with additive and dominance effects #' to an AlphaSimR simulation. The function attempts to create a trait matching #' user defined values for number of QTL, inbreeding depression, additive genetic #' variance and dominance genetic variance. #' #' @param nQtlPerChr number of QTLs per chromosome. #' Can be a single value or nChr values. #' @param mean desired mean of the trait #' @param varA desired additive variance #' @param varD desired dominance variance #' @param inbrDepr desired inbreeding depression, see details #' @param limMeanDD limits for meanDD, see details #' @param limVarDD limits for varDD, see details #' @param silent should summary details be printed to the console #' @param force should the check for a running simulation be #' ignored. Only set to TRUE if you know what you are doing. #' @param name optional name for trait #' #' @details #' This function will always add a trait to 'SimParam', unless an error occurs #' with picking QTLs. The resulting trait will always have the desired mean and #' additive genetic variance. However, it may not have the desired values for #' inbreeding depression and dominance variance. Thus, it is strongly recommended #' to check the output printed to the console to determine how close the trait's #' parameters came to these desired values. #' #' The mean and additive genetic variance will always be achieved exactly. The #' function attempts to achieve the desired dominance variance and inbreeding #' depression while staying within the user supplied constraints for the #' acceptable range of dominance degree mean and variance. If the desired values #' are not being achieved, the acceptable range need to be increased and/or the #' number of QTL may need to be increased. There are not limits to setting the #' range for dominance degree mean and variance, but care should be taken to #' with regards to the biological feasibility of the limits that are supplied. #' The default limits were somewhat arbitrarily set, so I make not claim to #' how reasonable these limits are for routine use. #' #' Inbreeding depression in this function is defined as the difference in mean #' genetic value between a population with the same allele frequency as the #' reference population (population used to initialize SimParam) in #' Hardy-Weinberg equilibrium compared to a population with the same allele #' frequency that is fully inbred. This is equivalent to the amount the mean of #' a population increases when going from an inbreeding coefficient of 1 (fully #' inbred) to a population with an inbreeding coefficient of 0 (Hardy-Weinberg #' equilibrium). Note that the sign of the value should (usually) be positive. #' This corresponds to a detrimental effect of inbreeding when higher values of #' the trait are considered biologically beneficial. #' #' Summary information on this trait is printed to the console when silent=FALSE. #' The summary information reports the inbreeding depression and dominance #' variance for the population as well as the dominance degree mean and variance #' applied to the trait. #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$altAddTraitAD(nQtlPerChr=10, mean=0, varA=1, varD=0.05, inbrDepr=0.2) altAddTraitAD = function(nQtlPerChr,mean=0,varA=1,varD=0,inbrDepr=0, limMeanDD=c(0,1.5),limVarDD=c(0,0.5), silent=FALSE,force=FALSE,name=NULL){ if(!force){ private$.isRunning() } if(length(nQtlPerChr)==1){ nQtlPerChr = rep(nQtlPerChr,self$nChr) } if(is.null(name)){ name = paste0("Trait",self$nTraits+1) } # Pick QTL qtlLoci = private$.pickLoci(nQtlPerChr) # Create list of arguments for optimization argsList = argAltAD(LociMap = qtlLoci, Pop = self$founderPop, mean = mean, varA = varA, varD = varD, inbrDepr = inbrDepr, nThreads = self$nThreads) # Run optim to optimize meanDD and varDD optOut = optim(par = c(mean(limMeanDD), mean(sqrt(limVarDD))), fn = objAltAD, gr = NULL, method = "L-BFGS-B", lower = c(limMeanDD[1], sqrt(limVarDD[1])), upper = c(limMeanDD[2], sqrt(limVarDD[2])), args = argsList) # Finalize creation of trait output = finAltAD(input = optOut$par, args = argsList) trait = new("TraitAD", qtlLoci, addEff=c(output$a), domEff=c(output$d), intercept=c(output$intercept), name=name) private$.addTrait(trait,varA,output$varG) # Report trait details if(!silent){ cat("A new trait called", name, "was added. \n") cat(" varD =", output$varD, "\n") cat(" inbrDepr =", output$inbrDepr, "\n") cat(" meanDD =", output$meanDD, "\n") cat(" varDD =", output$varDD, "\n") } invisible(self) }, #' @description #' Randomly assigns eligible QTLs for one or more additive GxE traits. #' If simulating more than one trait, all traits will be pleiotropic #' with correlated effects. #' #' @param nQtlPerChr number of QTLs per chromosome. Can be a single value or nChr values. #' @param mean a vector of desired mean genetic values for one or more traits #' @param var a vector of desired genetic variances for one or more traits #' @param varGxE a vector of total genotype-by-environment variances for the traits #' @param varEnv a vector of environmental variances for one or more traits #' @param corA a matrix of correlations between additive effects #' @param corGxE a matrix of correlations between GxE effects #' @param gamma should a gamma distribution be used instead of normal #' @param shape the shape parameter for the gamma distribution #' (the rate/scale parameter of the gamma distribution is accounted #' for via the desired level of genetic variance, the var argument) #' @param force should the check for a running simulation be #' ignored. Only set to TRUE if you know what you are doing. #' @param name optional name for trait(s) #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$addTraitAG(10, varGxE=2) addTraitAG = function(nQtlPerChr,mean=0,var=1,varGxE=1e-6,varEnv=0, corA=NULL,corGxE=NULL,gamma=FALSE,shape=1, force=FALSE,name=NULL){ if(!force){ private$.isRunning() } if(length(nQtlPerChr)==1){ nQtlPerChr = rep(nQtlPerChr,self$nChr) } nTraits = length(mean) if(length(gamma)==1) gamma = rep(gamma,nTraits) if(length(shape)==1) shape = rep(shape,nTraits) if(length(varEnv)==1) varEnv = rep(varEnv,nTraits) if(is.null(corA)) corA=diag(nTraits) if(is.null(corGxE)) corGxE=diag(nTraits) if(is.null(name)){ name = paste0("Trait",1:nTraits+self$nTraits) } stopifnot(length(mean)==length(var), isSymmetric(corA), isSymmetric(corGxE), length(mean)==nrow(corA), length(mean)==nrow(corGxE), length(mean)==length(varGxE), length(mean)==length(varEnv), length(mean)==length(name)) qtlLoci = private$.pickLoci(nQtlPerChr) addEff = sampAddEff(qtlLoci=qtlLoci,nTraits=nTraits, corr=corA,gamma=gamma,shape=shape) gxeEff = sampAddEff(qtlLoci=qtlLoci,nTraits=nTraits, corr=corGxE,gamma=FALSE,shape=NULL) for(i in 1:nTraits){ trait = new("TraitA", qtlLoci, addEff=addEff[,i], intercept=0, name=name[i]) tmp = calcGenParam(trait, self$founderPop, self$nThreads) scale = sqrt(var[i])/sqrt(popVar(tmp$bv)[1]) trait@addEff = trait@addEff*scale trait@intercept = mean[i]-mean(tmp$gv*scale) # GxE component traitG = new("TraitA", qtlLoci, addEff=gxeEff[,i], intercept=0) tmpG = calcGenParam(traitG, self$founderPop, self$nThreads) if(varEnv[i]==0){ scaleG = sqrt(varGxE[i])/sqrt(popVar(tmpG$gv)[1]) trait = new("TraitAG", trait, gxeEff = gxeEff[,i]*scaleG, gxeInt = 0-mean(tmpG$gv*scaleG), envVar = 1) }else{ scaleG = sqrt(varGxE[i]/varEnv[i])/sqrt(popVar(tmpG$gv)[1]) trait = new("TraitAG", trait, gxeEff = gxeEff[,i]*scaleG, gxeInt = 1-mean(tmpG$gv*scaleG), envVar = varEnv[i]) } private$.addTrait(trait,var[i],var[i]) } invisible(self) }, #' @description #' Randomly assigns eligible QTLs for a trait with dominance and GxE. #' #' @param nQtlPerChr number of QTLs per chromosome. Can be a single #' value or nChr values. #' @param mean a vector of desired mean genetic values for one or more traits #' @param var a vector of desired genetic variances for one or more traits #' @param varGxE a vector of total genotype-by-environment variances for the traits #' @param varEnv a vector of environmental variances for one or more traits #' @param meanDD mean dominance degree #' @param varDD variance of dominance degree #' @param corA a matrix of correlations between additive effects #' @param corDD a matrix of correlations between dominance degrees #' @param corGxE a matrix of correlations between GxE effects #' @param useVarA tune according to additive genetic variance if true #' @param gamma should a gamma distribution be used instead of normal #' @param shape the shape parameter for the gamma distribution #' (the rate/scale parameter of the gamma distribution is accounted #' for via the desired level of genetic variance, the var argument) #' @param force should the check for a running simulation be #' ignored. Only set to TRUE if you know what you are doing. #' @param name optional name for trait(s) #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$addTraitADG(10, meanDD=0.5, varGxE=2) addTraitADG = function(nQtlPerChr,mean=0,var=1,varEnv=0, varGxE=1e-6,meanDD=0,varDD=0,corA=NULL, corDD=NULL,corGxE=NULL,useVarA=TRUE,gamma=FALSE, shape=1,force=FALSE,name=NULL){ if(!force){ private$.isRunning() } if(length(nQtlPerChr)==1){ nQtlPerChr = rep(nQtlPerChr,self$nChr) } nTraits = length(mean) if(length(meanDD)==1) meanDD = rep(meanDD,nTraits) if(length(varDD)==1) varDD = rep(varDD,nTraits) if(length(varEnv)==1) varEnv = rep(varEnv,nTraits) if(length(gamma)==1) gamma = rep(gamma,nTraits) if(length(shape)==1) shape = rep(shape,nTraits) if(is.null(corA)) corA=diag(nTraits) if(is.null(corDD)) corDD=diag(nTraits) if(is.null(corGxE)) corGxE=diag(nTraits) if(is.null(name)){ name = paste0("Trait",1:nTraits+self$nTraits) } stopifnot(length(mean)==length(var), isSymmetric(corA), isSymmetric(corDD), isSymmetric(corGxE), nrow(corA)==nTraits, nrow(corGxE)==nTraits, nrow(corDD)==nTraits, length(varGxE)==nTraits, length(varEnv)==nTraits, length(mean)==length(name)) qtlLoci = private$.pickLoci(nQtlPerChr) addEff = sampAddEff(qtlLoci=qtlLoci,nTraits=nTraits, corr=corA,gamma=gamma,shape=shape) domEff = sampDomEff(qtlLoci=qtlLoci,nTraits=nTraits,addEff=addEff, corDD=corDD,meanDD=meanDD,varDD=varDD) gxeEff = sampAddEff(qtlLoci=qtlLoci,nTraits=nTraits, corr=corGxE,gamma=FALSE,shape=NULL) for(i in 1:nTraits){ trait = new("TraitAD", qtlLoci, addEff=addEff[,i], domEff=domEff[,i], intercept=0, name=name[i]) tmp = calcGenParam(trait, self$founderPop, self$nThreads) if(useVarA){ scale = sqrt(var[i])/sqrt(popVar(tmp$bv)[1]) }else{ scale = sqrt(var[i])/sqrt(popVar(tmp$gv)[1]) } trait@addEff = trait@addEff*scale trait@domEff = trait@domEff*scale trait@intercept = mean[i]-mean(tmp$gv*scale) # GxE component traitG = new("TraitA", qtlLoci, addEff=gxeEff[,i], intercept=0) tmpG = calcGenParam(traitG, self$founderPop, self$nThreads) if(varEnv[i]==0){ scaleG = sqrt(varGxE[i])/sqrt(popVar(tmpG$gv)[1]) trait = new("TraitADG", trait, gxeEff = gxeEff[,i]*scaleG, gxeInt = 0-mean(tmpG$gv*scaleG), envVar = 1) }else{ scaleG = sqrt(varGxE[i]/varEnv[i])/sqrt(popVar(tmpG$gv)[1]) trait = new("TraitADG", trait, gxeEff = gxeEff[,i]*scaleG, gxeInt = 1-mean(tmpG$gv*scaleG), envVar = varEnv[i]) } if(useVarA){ private$.addTrait(trait,var[i],popVar(tmp$gv*scale)[1]) }else{ private$.addTrait(trait,popVar(tmp$bv*scale)[1],var[i]) } } invisible(self) }, #' @description #' Randomly assigns eligible QTLs for one or more additive and epistasis #' traits. If simulating more than one trait, all traits will be pleiotropic #' with correlated additive effects. #' #' @param nQtlPerChr number of QTLs per chromosome. Can be a single value or nChr values. #' @param mean a vector of desired mean genetic values for one or more traits #' @param var a vector of desired genetic variances for one or more traits #' @param relAA the relative value of additive-by-additive variance compared #' to additive variance in a diploid organism with allele frequency 0.5 #' @param corA a matrix of correlations between additive effects #' @param corAA a matrix of correlations between additive-by-additive effects #' @param useVarA tune according to additive genetic variance if true. If #' FALSE, tuning is performed according to total genetic variance. #' @param gamma should a gamma distribution be used instead of normal #' @param shape the shape parameter for the gamma distribution #' (the rate/scale parameter of the gamma distribution is accounted #' for via the desired level of genetic variance, the var argument) #' @param force should the check for a running simulation be #' ignored. Only set to TRUE if you know what you are doing. #' @param name optional name for trait(s) #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$addTraitAE(10, relAA=0.1) addTraitAE = function(nQtlPerChr,mean=0,var=1,relAA=0,corA=NULL, corAA=NULL,useVarA=TRUE,gamma=FALSE,shape=1,force=FALSE, name=NULL){ if(!force){ private$.isRunning() } if(length(nQtlPerChr)==1){ nQtlPerChr = rep(nQtlPerChr,self$nChr) } nTraits = length(mean) relAA = relAA*4 if(length(gamma)==1) gamma = rep(gamma,nTraits) if(length(shape)==1) shape = rep(shape,nTraits) if(length(relAA)==1) relAA = rep(relAA,nTraits) if(is.null(corA)) corA=diag(nTraits) if(is.null(corAA)) corAA=diag(nTraits) if(is.null(name)){ name = paste0("Trait",1:nTraits+self$nTraits) } stopifnot(length(mean)==length(var), isSymmetric(corA), isSymmetric(corAA), length(relAA)==length(mean), length(mean)==nrow(corA), (sum(nQtlPerChr)%%2L)==0L, length(mean)==length(name)) qtlLoci = private$.pickLoci(nQtlPerChr) addEff = sampAddEff(qtlLoci=qtlLoci,nTraits=nTraits, corr=corA,gamma=gamma,shape=shape) epiEff = sampEpiEff(qtlLoci=qtlLoci,nTraits=nTraits, corr=corA,gamma=gamma,shape=shape, relVar=relAA) E = matrix(sample.int(sum(nQtlPerChr),sum(nQtlPerChr)),ncol=2) for(i in 1:nTraits){ trait = new("TraitAE", qtlLoci, addEff=addEff[,i], epiEff=cbind(E,epiEff[,i]), intercept=0, name=name[i]) tmp = calcGenParam(trait, self$founderPop, self$nThreads) if(useVarA){ scale = sqrt(var[i])/sqrt(popVar(tmp$bv)[1]) }else{ scale = sqrt(var[i])/sqrt(popVar(tmp$gv)[1]) } trait@addEff = trait@addEff*scale trait@epiEff[,3] = trait@epiEff[,3]*scale trait@intercept = mean[i]-mean(tmp$gv*scale) if(useVarA){ private$.addTrait(trait,var[i],popVar(tmp$gv*scale)[1]) }else{ private$.addTrait(trait,popVar(tmp$bv*scale)[1],var[i]) } } invisible(self) }, #' @description #' Randomly assigns eligible QTLs for one or more traits with dominance and #' epistasis. If simulating more than one trait, all traits will be pleiotropic #' with correlated effects. #' #' @param nQtlPerChr number of QTLs per chromosome. Can be a single value or nChr values. #' @param mean a vector of desired mean genetic values for one or more traits #' @param var a vector of desired genetic variances for one or more traits #' @param meanDD mean dominance degree #' @param varDD variance of dominance degree #' @param relAA the relative value of additive-by-additive variance compared #' to additive variance in a diploid organism with allele frequency 0.5 #' @param corA a matrix of correlations between additive effects #' @param corDD a matrix of correlations between dominance degrees #' @param corAA a matrix of correlations between additive-by-additive effects #' @param useVarA tune according to additive genetic variance if true. If #' FALSE, tuning is performed according to total genetic variance. #' @param gamma should a gamma distribution be used instead of normal #' @param shape the shape parameter for the gamma distribution #' (the rate/scale parameter of the gamma distribution is accounted #' for via the desired level of genetic variance, the var argument) #' @param force should the check for a running simulation be #' ignored. Only set to TRUE if you know what you are doing. #' @param name optional name for trait(s) #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$addTraitADE(10) addTraitADE = function(nQtlPerChr,mean=0,var=1,meanDD=0, varDD=0,relAA=0,corA=NULL,corDD=NULL,corAA=NULL, useVarA=TRUE,gamma=FALSE,shape=1,force=FALSE, name=NULL){ if(!force){ private$.isRunning() } if(length(nQtlPerChr)==1){ nQtlPerChr = rep(nQtlPerChr,self$nChr) } nTraits = length(mean) relAA = relAA*4 if(length(meanDD)==1) meanDD = rep(meanDD,nTraits) if(length(varDD)==1) varDD = rep(varDD,nTraits) if(length(gamma)==1) gamma = rep(gamma,nTraits) if(length(shape)==1) shape = rep(shape,nTraits) if(length(relAA)==1) relAA = rep(relAA,nTraits) if(is.null(corA)) corA=diag(nTraits) if(is.null(corDD)) corDD=diag(nTraits) if(is.null(corAA)) corAA=diag(nTraits) if(is.null(name)){ name = paste0("Trait",1:nTraits+self$nTraits) } stopifnot(length(mean)==length(var), isSymmetric(corA), isSymmetric(corDD), length(mean)==nrow(corA), length(mean)==nrow(corAA), length(mean)==nrow(corDD), length(relAA)==length(mean), (sum(nQtlPerChr)%%2L)==0L, length(mean)==length(name)) qtlLoci = private$.pickLoci(nQtlPerChr) addEff = sampAddEff(qtlLoci=qtlLoci,nTraits=nTraits, corr=corA,gamma=gamma,shape=shape) domEff = sampDomEff(qtlLoci=qtlLoci,nTraits=nTraits,addEff=addEff, corDD=corDD,meanDD=meanDD,varDD=varDD) epiEff = sampEpiEff(qtlLoci=qtlLoci,nTraits=nTraits, corr=corA,gamma=gamma,shape=shape, relVar=relAA) E = matrix(sample.int(sum(nQtlPerChr),sum(nQtlPerChr)),ncol=2) for(i in 1:nTraits){ trait = new("TraitADE", qtlLoci, addEff=addEff[,i], domEff=domEff[,i], epiEff=cbind(E,epiEff[,i]), intercept=0, name=name[i]) tmp = calcGenParam(trait, self$founderPop, self$nThreads) if(useVarA){ scale = sqrt(var[i])/sqrt(popVar(tmp$bv)[1]) }else{ scale = sqrt(var[i])/sqrt(popVar(tmp$gv)[1]) } trait@addEff = trait@addEff*scale trait@domEff = trait@domEff*scale trait@epiEff[,3] = trait@epiEff[,3]*scale trait@intercept = mean[i]-mean(tmp$gv*scale) if(useVarA){ private$.addTrait(trait,var[i],popVar(tmp$gv*scale)[1]) }else{ private$.addTrait(trait,popVar(tmp$bv*scale)[1],var[i]) } } invisible(self) }, #' @description #' Randomly assigns eligible QTLs for one or more additive and epistasis #' GxE traits. If simulating more than one trait, all traits will be pleiotropic #' with correlated effects. #' #' @param nQtlPerChr number of QTLs per chromosome. Can be a single value or nChr values. #' @param mean a vector of desired mean genetic values for one or more traits #' @param var a vector of desired genetic variances for one or more traits #' @param relAA the relative value of additive-by-additive variance compared #' to additive variance in a diploid organism with allele frequency 0.5 #' @param varGxE a vector of total genotype-by-environment variances for the traits #' @param varEnv a vector of environmental variances for one or more traits #' @param corA a matrix of correlations between additive effects #' @param corAA a matrix of correlations between additive-by-additive effects #' @param corGxE a matrix of correlations between GxE effects #' @param useVarA tune according to additive genetic variance if true. If #' FALSE, tuning is performed according to total genetic variance. #' @param gamma should a gamma distribution be used instead of normal #' @param shape the shape parameter for the gamma distribution #' (the rate/scale parameter of the gamma distribution is accounted #' for via the desired level of genetic variance, the var argument) #' @param force should the check for a running simulation be #' ignored. Only set to TRUE if you know what you are doing. #' @param name optional name for trait(s) #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$addTraitAEG(10, varGxE=2) addTraitAEG = function(nQtlPerChr,mean=0,var=1,relAA=0,varGxE=1e-6,varEnv=0, corA=NULL,corAA=NULL,corGxE=NULL,useVarA=TRUE,gamma=FALSE, shape=1,force=FALSE,name=NULL){ if(!force){ private$.isRunning() } if(length(nQtlPerChr)==1){ nQtlPerChr = rep(nQtlPerChr,self$nChr) } nTraits = length(mean) relAA = relAA*4 if(length(gamma)==1) gamma = rep(gamma,nTraits) if(length(shape)==1) shape = rep(shape,nTraits) if(length(relAA)==1) relAA = rep(relAA,nTraits) if(length(varEnv)==1) varEnv = rep(varEnv,nTraits) if(is.null(corA)) corA=diag(nTraits) if(is.null(corAA)) corAA=diag(nTraits) if(is.null(corGxE)) corGxE=diag(nTraits) if(is.null(name)){ name = paste0("Trait",1:nTraits+self$nTraits) } stopifnot(length(mean)==length(var), length(relAA)==length(mean), isSymmetric(corA), isSymmetric(corGxE), isSymmetric(corAA), length(mean)==nrow(corA), length(mean)==nrow(corAA), length(mean)==nrow(corGxE), length(mean)==length(varGxE), length(mean)==length(varEnv), (sum(nQtlPerChr)%%2L)==0L, length(mean)==length(name)) qtlLoci = private$.pickLoci(nQtlPerChr) addEff = sampAddEff(qtlLoci=qtlLoci,nTraits=nTraits, corr=corA,gamma=gamma,shape=shape) epiEff = sampEpiEff(qtlLoci=qtlLoci,nTraits=nTraits, corr=corA,gamma=gamma,shape=shape, relVar=relAA) E = matrix(sample.int(sum(nQtlPerChr),sum(nQtlPerChr)),ncol=2) gxeEff = sampAddEff(qtlLoci=qtlLoci,nTraits=nTraits, corr=corGxE,gamma=FALSE,shape=NULL) for(i in 1:nTraits){ trait = new("TraitAE", qtlLoci, addEff=addEff[,i], epiEff=cbind(E,epiEff[,i]), intercept=0, name=name[i]) tmp = calcGenParam(trait, self$founderPop, self$nThreads) if(useVarA){ scale = sqrt(var[i])/sqrt(popVar(tmp$bv)[1]) }else{ scale = sqrt(var[i])/sqrt(popVar(tmp$gv)[1]) } trait@addEff = trait@addEff*scale trait@epiEff[,3] = trait@epiEff[,3]*scale trait@intercept = mean[i]-mean(tmp$gv*scale) # GxE component traitG = new("TraitA", qtlLoci, addEff=gxeEff[,i], intercept=0) tmpG = calcGenParam(traitG, self$founderPop, self$nThreads) if(varEnv[i]==0){ scaleG = sqrt(varGxE[i])/sqrt(popVar(tmpG$gv)[1]) trait = new("TraitAEG", trait, gxeEff = gxeEff[,i]*scaleG, gxeInt = 0-mean(tmpG$gv*scaleG), envVar = 1) }else{ scaleG = sqrt(varGxE[i]/varEnv[i])/sqrt(popVar(tmpG$gv)[1]) trait = new("TraitAEG", trait, gxeEff = gxeEff[,i]*scaleG, gxeInt = 1-mean(tmpG$gv*scaleG), envVar = varEnv[i]) } if(useVarA){ private$.addTrait(trait,var[i],popVar(tmp$gv*scale)[1]) }else{ private$.addTrait(trait,popVar(tmp$bv*scale)[1],var[i]) } } invisible(self) }, #' @description #' Randomly assigns eligible QTLs for a trait with dominance, #' epistasis and GxE. #' #' @param nQtlPerChr number of QTLs per chromosome. Can be a single #' value or nChr values. #' @param mean a vector of desired mean genetic values for one or more traits #' @param var a vector of desired genetic variances for one or more traits #' @param varGxE a vector of total genotype-by-environment variances for the traits #' @param varEnv a vector of environmental variances for one or more traits #' @param meanDD mean dominance degree #' @param varDD variance of dominance degree #' @param relAA the relative value of additive-by-additive variance compared #' to additive variance in a diploid organism with allele frequency 0.5 #' @param corA a matrix of correlations between additive effects #' @param corDD a matrix of correlations between dominance degrees #' @param corAA a matrix of correlations between additive-by-additive effects #' @param corGxE a matrix of correlations between GxE effects #' @param useVarA tune according to additive genetic variance if true #' @param gamma should a gamma distribution be used instead of normal #' @param shape the shape parameter for the gamma distribution #' (the rate/scale parameter of the gamma distribution is accounted #' for via the desired level of genetic variance, the var argument) #' @param force should the check for a running simulation be #' ignored. Only set to TRUE if you know what you are doing. #' @param name optional name for trait(s) #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$addTraitADEG(10, meanDD=0.5, varGxE=2) addTraitADEG = function(nQtlPerChr,mean=0,var=1,varEnv=0, varGxE=1e-6,meanDD=0,varDD=0,relAA=0,corA=NULL, corDD=NULL,corAA=NULL,corGxE=NULL,useVarA=TRUE, gamma=FALSE,shape=1,force=FALSE,name=NULL){ if(!force){ private$.isRunning() } if(length(nQtlPerChr)==1){ nQtlPerChr = rep(nQtlPerChr,self$nChr) } nTraits = length(mean) relAA = relAA*4 if(length(meanDD)==1) meanDD = rep(meanDD,nTraits) if(length(varDD)==1) varDD = rep(varDD,nTraits) if(length(relAA)==1) relAA = rep(relAA,nTraits) if(length(varEnv)==1) varEnv = rep(varEnv,nTraits) if(length(gamma)==1) gamma = rep(gamma,nTraits) if(length(shape)==1) shape = rep(shape,nTraits) if(is.null(corA)) corA=diag(nTraits) if(is.null(corDD)) corDD=diag(nTraits) if(is.null(corGxE)) corGxE=diag(nTraits) if(is.null(corAA)) corAA=diag(nTraits) if(is.null(name)){ name = paste0("Trait",1:nTraits+self$nTraits) } stopifnot(length(mean)==length(var), isSymmetric(corA), isSymmetric(corDD), isSymmetric(corGxE), isSymmetric(corAA), nrow(corA)==nTraits, nrow(corGxE)==nTraits, nrow(corDD)==nTraits, nrow(corAA)==nTraits, length(varGxE)==nTraits, length(varEnv)==nTraits, length(relAA)==length(mean), (sum(nQtlPerChr)%%2L)==0L, length(mean)==length(name)) qtlLoci = private$.pickLoci(nQtlPerChr) addEff = sampAddEff(qtlLoci=qtlLoci,nTraits=nTraits, corr=corA,gamma=gamma,shape=shape) domEff = sampDomEff(qtlLoci=qtlLoci,nTraits=nTraits,addEff=addEff, corDD=corDD,meanDD=meanDD,varDD=varDD) epiEff = sampEpiEff(qtlLoci=qtlLoci,nTraits=nTraits, corr=corA,gamma=gamma,shape=shape, relVar=relAA) E = matrix(sample.int(sum(nQtlPerChr),sum(nQtlPerChr)),ncol=2) gxeEff = sampAddEff(qtlLoci=qtlLoci,nTraits=nTraits, corr=corGxE,gamma=FALSE,shape=NULL) for(i in 1:nTraits){ trait = new("TraitADE", qtlLoci, addEff=addEff[,i], domEff=domEff[,i], epiEff=cbind(E,epiEff[,i]), intercept=0, name=name[i]) tmp = calcGenParam(trait, self$founderPop, self$nThreads) if(useVarA){ scale = sqrt(var[i])/sqrt(popVar(tmp$bv)[1]) }else{ scale = sqrt(var[i])/sqrt(popVar(tmp$gv)[1]) } trait@addEff = trait@addEff*scale trait@domEff = trait@domEff*scale trait@epiEff[,3] = trait@epiEff[,3]*scale trait@intercept = mean[i]-mean(tmp$gv*scale) # GxE component traitG = new("TraitA", qtlLoci, addEff=gxeEff[,i], intercept=0) tmpG = calcGenParam(traitG, self$founderPop, self$nThreads) if(varEnv[i]==0){ scaleG = sqrt(varGxE[i])/sqrt(popVar(tmpG$gv)[1]) trait = new("TraitADEG", trait, gxeEff = gxeEff[,i]*scaleG, gxeInt = 0-mean(tmpG$gv*scaleG), envVar = 1) }else{ scaleG = sqrt(varGxE[i]/varEnv[i])/sqrt(popVar(tmpG$gv)[1]) trait = new("TraitADEG", trait, gxeEff = gxeEff[,i]*scaleG, gxeInt = 1-mean(tmpG$gv*scaleG), envVar = varEnv[i]) } if(useVarA){ private$.addTrait(trait,var[i],popVar(tmp$gv*scale)[1]) }else{ private$.addTrait(trait,popVar(tmp$bv*scale)[1],var[i]) } } invisible(self) }, #' @description #' Manually add a new trait to the simulation. Trait must #' be formatted as a \code{\link{LociMap-class}}. If the #' trait is not already formatted, consider using importTrait. #' #' @param lociMap a new object descended from #' \code{\link{LociMap-class}} #' @param varE default error variance for phenotype, optional #' @param force should the check for a running simulation be #' ignored. Only set to TRUE if you know what you are doing manAddTrait = function(lociMap,varE=NA_real_,force=FALSE){ if(!force){ private$.isRunning() } stopifnot(is(lociMap,"LociMap")) tmp = calcGenParam(lociMap, self$founderPop, self$nThreads) varA = popVar(tmp$bv)[1] varG = popVar(tmp$gv)[1] private$.addTrait(lociMap,varA,varG,varE) invisible(self) }, #' @description #' Manually add a new trait(s) to the simulation. Unlike the #' manAddTrait function, this function does not require #' formatting the trait as a \code{\link{LociMap-class}}. #' The formatting is performed automatically for the user, #' with more user friendly data.frames or matrices taken as #' inputs. This function only works for A and AD trait types. #' #' @param markerNames a vector of names for the QTL #' @param addEff a matrix of additive effects (nLoci x nTraits). #' Alternatively, a vector of length nLoci can be supplied for #' a single trait. #' @param domEff optional dominance effects for each locus #' @param intercept optional intercepts for each trait #' @param name optional name(s) for the trait(s) #' @param varE default error variance for phenotype, optional #' @param force should the check for a running simulation be #' ignored. Only set to TRUE if you know what you are doing importTrait = function(markerNames, addEff, domEff=NULL, intercept=NULL, name=NULL, varE=NULL, force=FALSE){ if(!force){ private$.isRunning() } # Check addEff and domEff inputs addEff = as.matrix(addEff) stopifnot(length(markerNames)==nrow(addEff)) nTraits = ncol(addEff) if(is.null(domEff)){ useDom = FALSE }else{ useDom = TRUE domEff = as.matrix(domEff) stopifnot(nrow(addEff)==nrow(domEff), ncol(addEff)==ncol(domEff)) } # Prepare the intercept if(is.null(intercept)){ intercept = rep(0, nTraits) }else{ intercept = as.numeric(intercept) stopifnot(length(intercept)==nTraits) } # Prepare varE if(!is.null(varE)){ varE = as.numeric(varE) stopifnot(length(varE)==nTraits) }else{ varE = rep(NA_real_, nTraits) } # Prepare trait names if(is.null(name)){ name = paste0("Trait",1:nTraits+self$nTraits) }else{ stopifnot(length(name)==nTraits) } # Extract genetic map and check if marker names are on the map genMapMarkerNames = unlist(lapply(private$.femaleMap, names)) stopifnot(all(markerNames%in%genMapMarkerNames)) # Create trait variables lociPerChr = integer(self$nChr) lociLoc = vector("list", self$nChr) addEffList = domEffList = vector("list", nTraits) for(i in 1:nTraits){ addEffList[[i]] = domEffList[[i]] = vector("list", self$nChr) } # Loop through chromosomes for(i in 1:self$nChr){ # Working on trait 1 # Initialize variables addEffList[[1]][[i]] = domEffList[[1]][[i]] = numeric() lociLoc[[i]] = integer() # Find matches if they exist take = match(names(private$.femaleMap[[i]]), markerNames) lociPerChr[i] = length(na.omit(take)) if(lociPerChr[i]>0L){ lociLoc[[i]] = which(!is.na(take)) addEffList[[1]][[i]] = addEff[na.omit(take),1] if(useDom){ domEffList[[1]][[i]] = domEff[na.omit(take),1] } } # Work on additional traits? if(nTraits>1){ for(j in 2:nTraits){ addEffList[[j]][[i]] = domEffList[[j]][[i]] = numeric() if(lociPerChr[i]>0L){ addEffList[[j]][[i]] = addEff[na.omit(take),j] if(useDom){ domEffList[[j]][[i]] = domEff[na.omit(take),j] } } } } } lociLoc = unlist(lociLoc) nLoci = sum(lociPerChr) # Create Trait(s) for(i in 1:nTraits){ addEff = unlist(addEffList[[i]]) if(useDom){ domEff = unlist(domEffList[[i]]) trait = new("TraitAD", addEff=addEff, domEff=domEff, intercept=intercept[i], nLoci=nLoci, lociPerChr=lociPerChr, lociLoc=lociLoc, name=name[i]) }else{ trait = new("TraitA", addEff=addEff, intercept=intercept[i], nLoci=nLoci, lociPerChr=lociPerChr, lociLoc=lociLoc, name=name[i]) } # Add trait to simParam self$manAddTrait(lociMap=trait, varE=varE[i], force=force) } invisible(self) }, #' @description #' Switch a trait in the simulation. #' #' @param traitPos an integer indicate which trait to switch #' @param lociMap a new object descended from #' \code{\link{LociMap-class}} #' @param varE default error variance for phenotype, optional #' @param force should the check for a running simulation be #' ignored. Only set to TRUE if you know what you are doing switchTrait = function(traitPos,lociMap,varE=NA_real_,force=FALSE){ if(!force){ private$.isRunning() } stopifnot(is(lociMap,"LociMap"), traitPos<=self$nTraits, traitPos>0) tmp = calcGenParam(lociMap, self$founderPop, self$nThreads) private$.traits[[traitPos]] = lociMap private$.varA[traitPos] = popVar(tmp$bv)[1] private$.varG[traitPos] = popVar(tmp$gv)[1] if(is.matrix(private$.varE)){ private$.varE[traitPos,] = 0 private$.varE[,traitPos] = 0 private$.varE[traitPos,traitPos] = varE }else{ private$.varE[traitPos] = varE } invisible(self) }, #' @description #' Remove a trait from the simulation #' #' @param traits an integer vector indicating which traits to remove #' @param force should the check for a running simulation be #' ignored. Only set to TRUE if you know what you are doing removeTrait = function(traits,force=FALSE){ if(!force){ private$.isRunning() } stopifnot(max(traits)<=self$nTraits, min(traits)>0) private$.traits[-traits] private$.varA[-traits] private$.varG[-traits] if(is.matrix(private$.varE)){ private$.varE[-traits,-traits] }else{ private$.varE[-traits] } invisible(self) }, #' @description Defines a default values for error #' variances used in \code{\link{setPheno}}. These defaults #' will be used to automatically generate phenotypes when new #' populations are created. See the details section of \code{\link{setPheno}} #' for more information about each arguments and how they #' should be used. #' #' @param h2 a vector of desired narrow-sense heritabilities #' @param H2 a vector of desired broad-sense heritabilities #' @param varE a vector or matrix of error variances #' @param corE an optional matrix of error correlations #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$addTraitA(10) #' SP$setVarE(h2=0.5) setVarE = function(h2=NULL, H2=NULL, varE=NULL, corE=NULL){ # Check validity of corE, if supplied if(!is.null(corE)){ stopifnot(isSymmetric(corE), nrow(corE)==self$nTraits) } # Set error variances (.varE) if(!is.null(h2)){ stopifnot(length(h2)==self$nTraits, all(private$.varG>0), all(private$.varA>0)) varE = numeric(self$nTraits) for(i in 1:length(h2)){ tmp = private$.varA[i]/h2[i]-private$.varG[i] if(tmp<0){ stop(paste0("h2=",h2[i]," is not possible for trait ",i)) } varE[i] = tmp } private$.varE = varE }else if(!is.null(H2)){ stopifnot(length(H2)==self$nTraits) varE = numeric(self$nTraits) for(i in 1:length(H2)){ tmp = private$.varG[i]/H2[i]-private$.varG[i] varE[i] = tmp } private$.varE = varE }else if(!is.null(varE)){ if(is.matrix(varE)){ stopifnot(nrow(varE)==self$nTraits, ncol(varE)==self$nTraits) }else{ stopifnot(length(varE)==self$nTraits) } private$.varE = varE }else{ private$.varE = rep(NA_real_, self$nTraits) } # Set error correlations if(!is.null(corE)){ if(is.matrix(private$.varE)){ varE = diag(private$.varE) }else{ varE = private$.varE } varE = diag(sqrt(varE), nrow=self$nTraits, ncol=self$nTraits) varE = varE%*%corE%*%varE private$.varE = varE } invisible(self) }, #' @description Defines a correlation structure for default #' error variances. You must call \code{setVarE} first to define #' the default error variances. #' #' @param corE a correlation matrix for the error variances #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$addTraitA(10, mean=c(0,0), var=c(1,1), corA=diag(2)) #' SP$setVarE(varE=c(1,1)) #' E = 0.5*diag(2)+0.5 #Positively correlated error #' SP$setCorE(E) setCorE = function(corE){ warning("This function has been deprecated. Use simParam$setVarE instead.") stopifnot(isSymmetric(corE), nrow(corE)==self$nTraits, length(private$.varE)==self$nTraits) if(is.matrix(private$.varE)){ varE = diag(private$.varE) }else{ varE = private$.varE } varE = diag(sqrt(varE), nrow=self$nTraits, ncol=self$nTraits) varE = varE%*%corE%*%varE private$.varE = varE invisible(self) }, #' @description #' Linearly scales all traits to achieve desired #' values of means and variances in the founder population. #' #' @param mean a vector of new trait means #' @param var a vector of new trait variances #' @param varEnv a vector of new environmental variances #' @param varGxE a vector of new GxE variances #' @param useVarA tune according to additive genetic variance if true #' #' @note #' By default the founder population is the population used to #' initalize the SimParam object. This population can be changed by #' replacing the population in the founderPop slot. You must run #' \code{\link{resetPop}} on any existing populations to obtain the #' new trait values. #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' SP$addTraitA(10) #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' meanG(pop) #' #' #Change mean to 1 #' SP$rescaleTraits(mean=1) #' \dontshow{SP$nThreads = 1L} #' #Run resetPop for change to take effect #' pop = resetPop(pop, simParam=SP) #' meanG(pop) rescaleTraits = function(mean=0,var=1,varEnv=0, varGxE=1e-6,useVarA=TRUE){ stopifnot(length(mean)==self$nTraits, length(var)==self$nTraits, length(varEnv)==self$nTraits, length(varGxE)==self$nTraits) for(i in 1:self$nTraits){ trait = private$.traits[[i]] trait@intercept = 0 tmp = calcGenParam(trait, self$founderPop, self$nThreads) if(useVarA){ scale = sqrt(var[i])/sqrt(popVar(tmp$bv)[1]) }else{ scale = sqrt(var[i])/sqrt(popVar(tmp$gv)[1]) } trait@addEff = trait@addEff*scale if(.hasSlot(trait,"domEff")){ trait@domEff = trait@domEff*scale } if(.hasSlot(trait,"epiEff")){ trait@epiEff[,3] = trait@epiEff[,3]*scale } trait@intercept = mean[i]-mean(tmp$gv*scale) if(.hasSlot(trait,"gxeEff")){ traitG = new("TraitA", nLoci = trait@nLoci, lociPerChr = trait@lociPerChr, lociLoc = trait@lociLoc, addEff = trait@gxeEff, intercept = 0) tmpG = calcGenParam(traitG, self$founderPop, self$nThreads) if(varEnv[i]==0){ scaleG = sqrt(varGxE[i])/sqrt(popVar(tmpG$gv)[1]) trait@gxeEff = trait@gxeEff*scaleG trait@gxeInt = 0-mean(tmpG$gv*scaleG) trait@envVar = 1 }else{ scaleG = sqrt(varGxE[i]/varEnv[i])/sqrt(popVar(tmpG$gv)[1]) trait@gxeEff = trait@gxeEff*scaleG trait@gxeInt = 1-mean(tmpG$gv*scaleG) trait@envVar = varEnv[i] } } private$.varA[i] = popVar(tmp$bv*scale)[1] private$.varG[i] = popVar(tmp$gv*scale)[1] private$.traits[[i]] = trait } invisible(self) }, #### Genetic map (public) ---- #' @field v the crossover interference parameter for a gamma model of #' recombination. A value of 1 indicates no crossover interference #' (e.g. Haldane mapping function). A value of 2.6 approximates the #' degree of crossover interference implied by the Kosambi mapping #' function. (default is 2.6) v = "numeric", #' @field p the proportion of crossovers coming from a non-interfering #' pathway. (default is 0) p = "numeric", #' @field quadProb the probability of quadrivalent pairing in an #' autopolyploid. (default is 0) quadProb = "numeric", #' @description Set the relative recombination rates between males #' and females. This allows for sex-specific recombination rates, #' under the assumption of equivalent recombination landscapes. #' #' @param femaleRatio relative ratio of recombination in females compared to #' males. A value of 2 indicate twice as much recombination in females. The #' value must be greater than 0. (default is 1) #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$setRecombRatio(2) #Twice as much recombination in females setRecombRatio = function(femaleRatio){ stopifnot(femaleRatio>0) genMap = self$genMap private$.sepMap = TRUE feSc = 2/(1/femaleRatio+1) maSc = 2/(femaleRatio+1) private$.femaleMap = lapply(genMap, function(x){ feSc*x }) private$.femaleCentromere = feSc*private$.femaleCentromere private$.maleMap = lapply(genMap, function(x){ maSc*x }) private$.maleCentromere = maSc*private$.maleCentromere invisible(self) }, #' @description #' Replaces existing genetic map. #' #' @param genMap a list of length nChr containing #' numeric vectors for the position of each segregating #' site on a chromosome. #' @param centromere a numeric vector of centromere #' positions. If NULL, the centromere are assumed to #' be metacentric. switchGenMap = function(genMap, centromere=NULL){ genMap = lapply(genMap, function(x) x-x[1]) # Set position 1 to 0 if(is.null(centromere)){ centromere=sapply(genMap,max)/2 } stopifnot(length(genMap)==self$nChr, centromere<=sapply(genMap,max)) tmp = do.call("c",lapply(genMap,length)) stopifnot(all(tmp==private$.segSites)) private$.sepMap = FALSE private$.femaleMap = genMap private$.maleMap = NULL private$.femaleCentromere = centromere private$.maleCentromere = NULL invisible(self) }, #' @description #' Replaces existing female genetic map. #' #' @param genMap a list of length nChr containing #' numeric vectors for the position of each segregating #' site on a chromosome. #' @param centromere a numeric vector of centromere #' positions. If NULL, the centromere are assumed to #' be metacentric. switchFemaleMap = function(genMap, centromere=NULL){ genMap = lapply(genMap, function(x) x-x[1]) # Set position 1 to 0 if(is.null(centromere)){ centromere=sapply(genMap,max)/2 } stopifnot(length(genMap)==self$nChr, centromere<=sapply(genMap,max)) tmp = do.call("c",lapply(genMap,length)) stopifnot(all(tmp==private$.segSites)) if(private$.sepMap){ private$.femaleMap = genMap private$.femaleCentromere = centromere }else{ private$.sepMap = TRUE private$.maleMap = private$.femaleMap private$.femaleMap = genMap private$.maleCentromere = private$.femaleCentromere private$.femaleCentromere = centromere } invisible(self) }, #' @description #' Replaces existing male genetic map. #' #' @param genMap a list of length nChr containing #' numeric vectors for the position of each segregating #' site on a chromosome. #' @param centromere a numeric vector of centromere #' positions. If NULL, the centromere are assumed to #' be metacentric. switchMaleMap = function(genMap, centromere=NULL){ genMap = lapply(genMap, function(x) x-x[1]) # Set position 1 to 0 if(is.null(centromere)){ centromere=sapply(genMap,max)/2 } stopifnot(length(genMap)==self$nChr, centromere<=sapply(genMap,max)) tmp = do.call("c",lapply(genMap,length)) stopifnot(all(tmp==private$.segSites)) private$.sepMap = TRUE private$.maleMap = genMap private$.maleCentromere = centromere invisible(self) }, #### Internal (public) ---- #' @description For internal use only. #' #' @param lastId ID of last individual #' @param id the name of each individual #' @param mother vector of mother iids #' @param father vector of father iids #' @param isDH indicator for DH lines #' @param hist new recombination history #' @param ploidy ploidy level addToRec = function(lastId,id,mother,father,isDH, hist,ploidy){ nNewInd = lastId-private$.lastId stopifnot(nNewInd>0) if(length(isDH)==1) isDH = rep(isDH,nNewInd) mother = as.integer(mother) father = as.integer(father) isDH = as.integer(isDH) stopifnot(length(mother)==nNewInd, length(father)==nNewInd, length(isDH)==nNewInd) tmp = cbind(mother,father,isDH) rownames(tmp) = id if(is.null(hist)){ newRecHist = vector("list",nNewInd) tmpLastHaplo = private$.lastHaplo if(all(isDH==1L)){ for(i in 1:nNewInd){ tmpLastHaplo = tmpLastHaplo + 1L newRecHist[[i]] = rep(tmpLastHaplo, ploidy) } }else{ for(i in 1:nNewInd){ newRecHist[[i]] = (tmpLastHaplo+1L):(tmpLastHaplo+ploidy) tmpLastHaplo = tmpLastHaplo + ploidy } } private$.hasHap = c(private$.hasHap, rep(FALSE, nNewInd)) private$.isFounder = c(private$.isFounder, rep(TRUE, nNewInd)) private$.recHist = c(private$.recHist, newRecHist) private$.lastHaplo = tmpLastHaplo }else{ # Add hist to recombination history private$.hasHap = c(private$.hasHap, rep(FALSE, nNewInd)) private$.isFounder = c(private$.isFounder, rep(FALSE, nNewInd)) private$.recHist = c(private$.recHist, hist) } private$.pedigree = rbind(private$.pedigree, tmp) private$.lastId = lastId invisible(self) }, #' @description For internal use only. #' #' @param iid internal ID ibdHaplo = function(iid){ if(all(private$.hasHap[iid])){ # Return relevant haplotypes return(private$.hap[as.character(iid)]) } ## Fill in missing haplotypes # Determine unique iid for needed individuals without hap data uid = list() i = 1L uid[[i]] = unique(iid) while(any(uid[[i]]!=0L)){ i = i+1L uid[[i]] = unique(c(private$.pedigree[uid[[i-1]],1:2])) } uid = unique(unlist(uid)) uid = sort(uid)[-1] # First one is always zero uid = uid[!private$.hasHap[uid]] # Split uid by founder and non-founder fuid = uid[private$.isFounder[uid]] nfuid = uid[!private$.isFounder[uid]] # Set hap for founders if(length(fuid)>0){ nChr = length(private$.femaleMap) newHap = getFounderIbd(founder=private$.recHist[fuid], nChr=nChr) names(newHap) = as.character(fuid) private$.hap = c(private$.hap, newHap) private$.hasHap[fuid] = TRUE } # Set hap for non-founders if(length(nfuid)>0){ for(id in nfuid){ mother = as.character(private$.pedigree[id,1]) father = as.character(private$.pedigree[id,2]) private$.hap[[as.character(id)]] = getNonFounderIbd(recHist=private$.recHist[[id ]], mother=private$.hap[[mother]], father=private$.hap[[father]]) private$.hasHap[id] = TRUE } } # Return relevant haplotypes return(private$.hap[as.character(iid)]) }, #' @description For internal use only. #' #' @param lastId last ID assigned updateLastId = function(lastId){ lastId = as.integer(lastId) stopifnot(lastId>=private$.lastId) private$.lastId = lastId invisible(self) }, #' @description For internal use only. #' #' @param lastId ID of last individual #' @param id the name of each individual #' @param mother vector of mother iids #' @param father vector of father iids #' @param isDH indicator for DH lines addToPed = function(lastId,id,mother,father,isDH){ nNewInd = lastId-private$.lastId stopifnot(nNewInd>0) if(length(isDH)==1) isDH = rep(isDH,nNewInd) mother = as.integer(mother) father = as.integer(father) isDH = as.integer(isDH) stopifnot(length(mother)==nNewInd, length(father)==nNewInd, length(isDH)==nNewInd) tmp = cbind(mother,father,isDH) rownames(tmp) = id private$.pedigree = rbind(private$.pedigree,tmp) private$.lastId = lastId invisible(self) } ), private = list( #### Private ---- .restrSites="logical", .traits="list", .segSites="integer", .sexes="character", .femaleMap="list", .maleMap="list", .sepMap="logical", .femaleCentromere="numeric", .maleCentromere="numeric", .lastId="integer", .isTrackPed="logical", .pedigree="matrix", .isTrackRec="logical", .recHist="list", .varA="numeric", .varG="numeric", .varE="numeric", .version="character", .lastHaplo="integer", .hasHap="logical", .hap="list", .isFounder="logical", # Determines whether not a simulation has started using lastId as an indicator .isRunning = function(){ if(private$.lastId==0L){ invisible(self) }else{ stop("lastId doesn't equal 0, you must run resetPed to proceed") } }, # Adds a trait to simulation and ensures all fields are propagated .addTrait = function(lociMap,varA=NA_real_,varG=NA_real_,varE=NA_real_){ stopifnot(is.numeric(varA),is.numeric(varG),is.numeric(varE), length(varA)==1,length(varG)==1,length(varE)==1) private$.traits[[self$nTraits + 1L]] = lociMap private$.varA = c(private$.varA,varA) private$.varG = c(private$.varG,varG) private$.varE = c(private$.varE,varE) invisible(self) }, # Samples eligible loci for traits or SNP chips and ensures that they # are added to the exclusion list when applicable .pickLoci = function(nSitesPerChr, QTL=TRUE, minFreq=NULL, refPop=NULL){ stopifnot(length(nSitesPerChr)==self$nChr) # Get invalid sites if(QTL){ restr = self$invalidQtl }else{ restr = self$invalidSnp } # Identify potential sites pot = vector('list', self$nChr) for(i in 1:self$nChr){ pot[[i]] = setdiff(1:private$.segSites[i], restr[[i]]) } # Filter for minimum frequency if(!is.null(minFreq)){ if(is.null(refPop)){ refPop = self$founderPop } for(chr in 1:self$nChr){ q = calcChrFreq(refPop@geno[[chr]]) q = 0.5-abs(q-0.5) #Convert to minor allele frequency tmp = which(q>=minFreq) pot[[chr]] = tmp[tmp%in%pot[[chr]]] } } stopifnot(sapply(pot,length)>=nSitesPerChr) # Sample sites lociLoc = lapply(1:self$nChr,function(x){ if(nSitesPerChr[x]==0){ return(NULL) }else{ if(length(pot[[x]])==1){ tmp = pot[[x]] }else{ tmp = sort(sample(pot[[x]],nSitesPerChr[x])) } # Add site restrictions if(private$.restrSites){ if(QTL){ self$invalidSnp[[x]] = sort(union(tmp, self$invalidSnp[[x]])) }else{ self$invalidQtl[[x]] = sort(union(tmp, self$invalidQtl[[x]])) } } return(tmp) } }) # Create and return a LociMap lociLoc = do.call("c",lociLoc) loci = new("LociMap", nLoci=as.integer(sum(nSitesPerChr)), lociPerChr=as.integer(nSitesPerChr), lociLoc=as.integer(lociLoc)) return(loci) }, # Returns physical positions of named loci in a list format # Input order is not preserved. This function is intended as # a helper for restrSegSites .findNamedLoci = function(lociNames){ # Loci names id = unlist(unname(lapply(private$.femaleMap, names))) take = match(lociNames, id) if(any(is.na(take))){ stop("One or more loci are not on the genetic map. Beware of case sensitivity.") } # Find positions using an interval search strategy on the cumulative sum take = unique(take) posList = rep(list(integer()), self$nChr) cumSumSegSite = cumsum(private$.segSites) for(i in take){ # Identify chromosome chr = findInterval(i, cumSumSegSite, left.open = TRUE) + 1L # Identify position if(chr>1L){ pos = i - cumSumSegSite[chr-1L] }else{ pos = i } # Add site to list posList[[chr]] = c(posList[[chr]], pos) } return(posList) } ), active = list( #### Active ---- #' @field traitNames vector of trait names traitNames=function(value){ if(missing(value)){ traitNames = sapply(private$.traits, function(x){ x@name }) return(traitNames) }else{ value = as.character(value) if(length(value)!=self$nTraits){ stop("length of traitNames vector must equal ",self$nTraits) } for(i in 1:self$nTraits){ private$.traits[[i]]@name = value[i] } } }, #' @field snpChipNames vector of chip names snpChipNames=function(value){ if(missing(value)){ snpChipNames = sapply(self$snpChips, function(x){ x@name }) return(snpChipNames) }else{ value = as.character(value) if(length(value)!=self$nSnpChips){ stop("length of snpChipNames vector must equal ",self$nSnpChips) } for(i in 1:self$nSnpChips){ self$snpChips[[i]]@name = value[i] } } }, #' @field traits list of traits traits=function(value){ if(missing(value)){ private$.traits }else{ stop("`$traits` is read only, see manAddTrait",call.=FALSE) } }, #' @field nChr number of chromosomes nChr=function(value){ if(missing(value)){ length(private$.segSites) }else{ stop("`$nChr` is read only",call.=FALSE) } }, #' @field nTraits number of traits nTraits=function(value){ if(missing(value)){ length(private$.traits) }else{ stop("`$nTraits` is read only",call.=FALSE) } }, #' @field nSnpChips number of SNP chips nSnpChips=function(value){ if(missing(value)){ length(self$snpChips) }else{ stop("`$nSnpChips` is read only",call.=FALSE) } }, #' @field segSites segregating sites per chromosome segSites=function(value){ if(missing(value)){ private$.segSites }else{ stop("`$segSites` is read only",call.=FALSE) } }, #' @field sexes sexes used for mating sexes=function(value){ if(missing(value)){ private$.sexes }else{ stop("`$sexes` is read only",call.=FALSE) } }, #' @field sepMap are there seperate genetic maps for #' males and females sepMap=function(value){ if(missing(value)){ private$.sepMap }else{ stop("`$sepMap` is read only",call.=FALSE) } }, #' @field genMap "matrix" of chromosome genetic maps genMap=function(value){ if(missing(value)){ if(private$.sepMap){ genMap = vector("list",self$nChr) for(i in 1:self$nChr){ genMap[[i]] = (private$.femaleMap[[i]]+ private$.maleMap[[i]])/2 } as.matrix(genMap) }else{ private$.femaleMap } }else{ stop("`$genMap` is read only",call.=FALSE) } }, #' @field femaleMap "matrix" of chromosome genetic maps for #' females femaleMap=function(value){ if(missing(value)){ private$.femaleMap }else{ stop("`$femaleMap` is read only",call.=FALSE) } }, #' @field maleMap "matrix" of chromosome genetic maps for #' males maleMap=function(value){ if(missing(value)){ if(private$.sepMap){ private$.maleMap }else{ private$.femaleMap } }else{ stop("`$maleMap` is read only",call.=FALSE) } }, #' @field centromere position of centromeres genetic map centromere=function(value){ if(missing(value)){ if(private$.sepMap){ (private$.femaleCentromere+private$.maleCentromere)/2 }else{ private$.femaleCentromere } }else{ stop("`$centromere` is read only",call.=FALSE) } }, #' @field femaleCentromere position of centromeres on female #' genetic map femaleCentromere=function(value){ if(missing(value)){ private$.femaleCentromere }else{ stop("`$femaleCentromere` is read only",call.=FALSE) } }, #' @field maleCentromere position of centromeres on male #' genetic map maleCentromere=function(value){ if(missing(value)){ if(private$.sepMap){ private$.maleCentromere }else{ private$.femaleCentromere } }else{ stop("`$maleCentromere` is read only",call.=FALSE) } }, #' @field lastId last ID number assigned lastId=function(value){ if(missing(value)){ private$.lastId }else{ stop("`$lastId` is read only",call.=FALSE) } }, #' @field isTrackPed is pedigree being tracked isTrackPed=function(value){ if(missing(value)){ private$.isTrackPed }else{ stop("`$isTrackPed` is read only",call.=FALSE) } }, #' @field pedigree pedigree matrix for all individuals pedigree=function(value){ if(missing(value)){ private$.pedigree }else{ stop("`$pedigree` is read only",call.=FALSE) } }, #' @field isTrackRec is recombination being tracked isTrackRec=function(value){ if(missing(value)){ private$.isTrackRec }else{ stop("`$isTrackRec` is read only",call.=FALSE) } }, #' @field recHist list of historic recombination events recHist=function(value){ if(missing(value)){ private$.recHist }else{ stop("`$recHist` is read only",call.=FALSE) } }, #' @field haplotypes list of computed IBD haplotypes haplotypes=function(value){ if(missing(value)){ private$.hap }else{ stop("`$haplotypes` is read only",call.=FALSE) } }, #' @field varA additive genetic variance in founderPop varA=function(value){ if(missing(value)){ private$.varA }else{ stop("`$varA` is read only",call.=FALSE) } }, #' @field varG total genetic variance in founderPop varG=function(value){ if(missing(value)){ private$.varG }else{ stop("`$varG` is read only",call.=FALSE) } }, #' @field varE default error variance varE=function(value){ if(missing(value)){ private$.varE }else{ stop("`$varE` is read only",call.=FALSE) } }, #' @field version the version of AlphaSimR used to generate this object version=function(value){ if(missing(value)){ private$.version }else{ stop("`$version` is read only",call.=FALSE) } } ) ) #### External helpers ---- sampAddEff = function(qtlLoci,nTraits,corr,gamma,shape){ addEff = matrix(rnorm(qtlLoci@nLoci*nTraits), ncol=nTraits)%*%transMat(corr) if(any(gamma)){ for(i in which(gamma)){ x = (pnorm(addEff[,i])-0.5)*2 addEff[,i] = sign(x)*qgamma(abs(x),shape=shape) } } return(addEff) } sampDomEff = function(qtlLoci,nTraits,addEff,corDD, meanDD,varDD){ domEff = matrix(rnorm(qtlLoci@nLoci*nTraits), ncol=nTraits)%*%transMat(corDD) domEff = sweep(domEff,2,sqrt(varDD),"*") domEff = sweep(domEff,2,meanDD,"+") domEff = abs(addEff)*domEff return(domEff) } sampEpiEff = function(qtlLoci,nTraits,corr,gamma,shape,relVar){ epiEff = matrix(rnorm(qtlLoci@nLoci*nTraits/2), ncol=nTraits)%*%transMat(corr) if(any(gamma)){ for(i in which(gamma)){ x = (pnorm(epiEff[,i])-0.5)*2 epiEff[,i] = sign(x)*qgamma(abs(x),shape=shape) } } epiEff = sweep(epiEff,2,sqrt(relVar),"*") return(epiEff) } # Test if object is of SimParam class isSimParam = function(x) { ret = is(x, class2 = "SimParam") return(ret) }
/scratch/gouwar.j/cran-all/cranData/AlphaSimR/R/Class-SimParam.R
convertTraitsToNames = function(traits, simParam){ if(is.character(traits)){ # Suspect trait is a name take = match(traits, simParam$traitNames) if(is.na(take)){ stop("'",traits,"' did not match any trait names") } traits = take }else if(is.function(traits)){ traits = "Custom Function" }else{ traits = simParam$traitNames[traits] } return(traits) } #' @title Fast RR-BLUP #' #' @description #' Solves an RR-BLUP model for genomic predictions given known variance #' components. This implementation is meant as a fast and low memory #' alternative to \code{\link{RRBLUP}} or \code{\link{RRBLUP2}}. Unlike #' the those functions, the fastRRBLUP does not fit fixed effects (other #' than the intercept) or account for unequal replication. #' #' @param pop a \code{\link{Pop-class}} to serve as the training population #' @param traits an integer indicating the trait to model, a trait name, #' or a function of the traits returning a single value. Only univariate models #' are supported. #' @param use train model using phenotypes "pheno", genetic values "gv", #' estimated breeding values "ebv", breeding values "bv", or randomly "rand" #' @param snpChip an integer indicating which SNP chip genotype #' to use #' @param useQtl should QTL genotypes be used instead of a SNP chip. #' If TRUE, snpChip specifies which trait's QTL to use, and thus these #' QTL may not match the QTL underlying the phenotype supplied in traits. #' @param maxIter maximum number of iterations. #' @param Vu marker effect variance. If value is NULL, a #' reasonable value is chosen automatically. #' @param Ve error variance. If value is NULL, a #' reasonable value is chosen automatically. #' @param simParam an object of \code{\link{SimParam}} #' @param ... additional arguments if using a function for #' traits #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=20) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$addTraitA(10) #' SP$setVarE(h2=0.5) #' SP$addSnpChip(10) #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' #' #Run GS model and set EBV #' ans = fastRRBLUP(pop, simParam=SP) #' pop = setEBV(pop, ans, simParam=SP) #' #' #Evaluate accuracy #' cor(gv(pop), ebv(pop)) #' #' @export fastRRBLUP = function(pop, traits=1, use="pheno", snpChip=1, useQtl=FALSE, maxIter=1000, Vu=NULL, Ve=NULL, simParam=NULL, ...){ if(is.null(simParam)){ simParam = get("SP",envir=.GlobalEnv) } y = getResponse(pop=pop,trait=traits,use=use, simParam=simParam,...) traits = convertTraitsToNames(traits, simParam) #fixEff = as.integer(factor(pop@fixEff)) if(useQtl){ nLoci = simParam$traits[[snpChip]]@nLoci lociPerChr = simParam$traits[[snpChip]]@lociPerChr lociLoc = simParam$traits[[snpChip]]@lociLoc }else{ nLoci = simParam$snpChips[[snpChip]]@nLoci lociPerChr = simParam$snpChips[[snpChip]]@lociPerChr lociLoc = simParam$snpChips[[snpChip]]@lociLoc } # Sort out Vu and Ve if(is.function(traits)){ if(is.null(Vu)){ Vu = var(y)/nLoci } if(is.null(Ve)){ Ve = var(y)/2 } }else{ stopifnot(length(traits)==1) if(is.null(Vu)){ Vu = 2*simParam$varA[traits]/nLoci if(is.na(Vu)){ Vu = var(y)/nLoci } } if(is.null(Ve)){ Ve = simParam$varE[traits] if(is.na(Ve)){ Ve = var(y)/2 } } } #Fit model ans = callFastRRBLUP(y,pop@geno,lociPerChr, lociLoc,Vu,Ve,maxIter, simParam$nThreads) bv = new("TraitA", nLoci=nLoci, lociPerChr=lociPerChr, lociLoc=lociLoc, addEff=c(ans$alpha), intercept=c(ans$beta), name=paste0("est_BV_",traits)) gv = new("TraitA", nLoci=nLoci, lociPerChr=lociPerChr, lociLoc=lociLoc, addEff=c(ans$alpha), intercept=c(ans$mu), name=paste0("est_GV_",traits)) output = new("RRsol", bv = list(bv), gv = list(gv), female = as.list(NULL), male = as.list(NULL), Vu = as.matrix(Vu), Ve = as.matrix(Ve)) return(output) } #' @title RR-BLUP Model #' #' @description #' Fits an RR-BLUP model for genomic predictions. #' #' @param pop a \code{\link{Pop-class}} to serve as the training population #' @param traits an integer indicating the trait or traits to model, a vector of trait names, #' or a function of the traits returning a single value. #' @param use train model using phenotypes "pheno", genetic values "gv", #' estimated breeding values "ebv", breeding values "bv", or randomly "rand" #' @param snpChip an integer indicating which SNP chip genotype #' to use #' @param useQtl should QTL genotypes be used instead of a SNP chip. #' If TRUE, snpChip specifies which trait's QTL to use, and thus these #' QTL may not match the QTL underlying the phenotype supplied in traits. #' @param maxIter maximum number of iterations. Only used #' when number of traits is greater than 1. #' @param simParam an object of \code{\link{SimParam}} #' @param ... additional arguments if using a function for #' traits #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=20) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$addTraitA(10) #' SP$setVarE(h2=0.5) #' SP$addSnpChip(10) #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' #' #Run GS model and set EBV #' ans = RRBLUP(pop, simParam=SP) #' pop = setEBV(pop, ans, simParam=SP) #' #' #Evaluate accuracy #' cor(gv(pop), ebv(pop)) #' #' @export RRBLUP = function(pop, traits=1, use="pheno", snpChip=1, useQtl=FALSE, maxIter=1000L, simParam=NULL, ...){ if(is.null(simParam)){ simParam = get("SP",envir=.GlobalEnv) } y = getResponse(pop=pop,trait=traits,use=use, simParam=simParam,...) traits = convertTraitsToNames(traits, simParam) fixEff = as.integer(factor(pop@fixEff)) if(useQtl){ nLoci = simParam$traits[[snpChip]]@nLoci lociPerChr = simParam$traits[[snpChip]]@lociPerChr lociLoc = simParam$traits[[snpChip]]@lociLoc }else{ nLoci = simParam$snpChips[[snpChip]]@nLoci lociPerChr = simParam$snpChips[[snpChip]]@lociPerChr lociLoc = simParam$snpChips[[snpChip]]@lociLoc } #Fit model if(ncol(y)>1){ ans = callRRBLUP_MV(y, fixEff, pop@geno, lociPerChr, lociLoc, maxIter, simParam$nThreads) }else{ ans = callRRBLUP(y, fixEff, pop@geno, lociPerChr, lociLoc, simParam$nThreads) } markerEff=ans$u bv = gv = vector("list",ncol(y)) for(i in 1:ncol(y)){ bv[[i]] = new("TraitA", nLoci=nLoci, lociPerChr=lociPerChr, lociLoc=lociLoc, addEff=ans$alpha[,i], intercept=ans$beta[i], name=paste0("est_BV_",traits[i])) gv[[i]] = new("TraitA", nLoci=nLoci, lociPerChr=lociPerChr, lociLoc=lociLoc, addEff=ans$alpha[,i], intercept=ans$mu[i], name=paste0("est_GV_",traits[i])) } output = new("RRsol", bv = bv, gv = gv, female = as.list(NULL), male = as.list(NULL), Vu = as.matrix(ans$Vu), Ve = as.matrix(ans$Ve)) return(output) } #' @title RR-BLUP Model 2 #' #' @description #' Fits an RR-BLUP model for genomic predictions. This implementation is #' meant for situations where \code{\link{RRBLUP}} is too slow. Note that #' RRBLUP2 is only faster in certain situations, see details below. Most #' users should use \code{\link{RRBLUP}}. #' #' #' @param pop a \code{\link{Pop-class}} to serve as the training population #' @param traits an integer indicating the trait to model, a trait name, or a #' function of the traits returning a single value. Unlike \code{\link{RRBLUP}}, #' only univariate models are supported. #' @param use train model using phenotypes "pheno", genetic values "gv", #' estimated breeding values "ebv", breeding values "bv", or randomly "rand" #' @param snpChip an integer indicating which SNP chip genotype #' to use #' @param useQtl should QTL genotypes be used instead of a SNP chip. #' If TRUE, snpChip specifies which trait's QTL to use, and thus these #' QTL may not match the QTL underlying the phenotype supplied in traits. #' @param maxIter maximum number of iterations. #' @param Vu marker effect variance. If value is NULL, a #' reasonable starting point is chosen automatically. #' @param Ve error variance. If value is NULL, a #' reasonable starting point is chosen automatically. #' @param useEM use EM to solve variance components. If false, #' the initial values are considered true. #' @param tol tolerance for EM algorithm convergence #' @param simParam an object of \code{\link{SimParam}} #' @param ... additional arguments if using a function for #' traits #' #' @details #' The RRBLUP2 function works best when the number of markers is not #' too large. This is because it solves the RR-BLUP problem by setting #' up and solving Henderson's mixed model equations. Solving these equations #' involves a square matrix with dimensions equal to the number of fixed #' effects plus the number of random effects (markers). Whereas the \code{\link{RRBLUP}} #' function solves the RR-BLUP problem using the EMMA approach. This approach involves #' a square matrix with dimensions equal to the number of phenotypic records. This means #' that the RRBLUP2 function uses less memory than RRBLUP when the number of markers #' is approximately equal to or smaller than the number of phenotypic records. #' #' The RRBLUP2 function is not recommend for cases where the variance components are #' unknown. This is uses the EM algorithm to solve for unknown variance components, #' which is generally considerably slower than the EMMA approach of \code{\link{RRBLUP}}. #' The number of iterations for the EM algorithm is set by maxIter. The default value #' is typically too small for convergence. When the algorithm fails to converge a #' warning is displayed, but results are given for the last iteration. These results may #' be "good enough". However we make no claim to this effect, because we can not generalize #' to all possible use cases. #' #' The RRBLUP2 function can quickly solve the mixed model equations without estimating variance #' components. The variance components are set by defining Vu and Ve. Estimation of components #' is suppressed by setting useEM to false. This may be useful if the model is being retrained #' multiple times during the simulation. You could run \code{\link{RRBLUP}} function the first #' time the model is trained, and then use the variance components from this output for all #' future runs with the RRBLUP2 functions. Again, we can make no claim to the general robustness #' of this approach. #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=20) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$addTraitA(10) #' SP$setVarE(h2=0.5) #' SP$addSnpChip(10) #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' #' #Run GS model and set EBV #' ans = RRBLUP2(pop, simParam=SP) #' pop = setEBV(pop, ans, simParam=SP) #' #' #Evaluate accuracy #' cor(gv(pop), ebv(pop)) #' #' @export RRBLUP2 = function(pop, traits=1, use="pheno", snpChip=1, useQtl=FALSE, maxIter=10, Vu=NULL, Ve=NULL, useEM=TRUE, tol=1e-6, simParam=NULL, ...){ if(is.null(simParam)){ simParam = get("SP",envir=.GlobalEnv) } y = getResponse(pop=pop,trait=traits,use=use, simParam=simParam,...) traits = convertTraitsToNames(traits, simParam) fixEff = as.integer(factor(pop@fixEff)) if(useQtl){ nLoci = simParam$traits[[snpChip]]@nLoci lociPerChr = simParam$traits[[snpChip]]@lociPerChr lociLoc = simParam$traits[[snpChip]]@lociLoc }else{ nLoci = simParam$snpChips[[snpChip]]@nLoci lociPerChr = simParam$snpChips[[snpChip]]@lociPerChr lociLoc = simParam$snpChips[[snpChip]]@lociLoc } # Sort out Vu and Ve if(is.function(traits)){ if(is.null(Vu)){ Vu = var(y)/nLoci } if(is.null(Ve)){ Ve = var(y)/2 } }else{ stopifnot(length(traits)==1) if(is.null(Vu)){ Vu = 2*simParam$varA[traits]/nLoci if(is.na(Vu)){ Vu = var(y)/nLoci } } if(is.null(Ve)){ Ve = simParam$varE[traits] if(is.na(Ve)){ Ve = var(y)/2 } } } #Fit model ans = callRRBLUP2(y, fixEff, pop@geno, lociPerChr, lociLoc, Vu, Ve, tol, maxIter, useEM, simParam$nThreads) bv = new("TraitA", nLoci=nLoci, lociPerChr=lociPerChr, lociLoc=lociLoc, addEff=c(ans$alpha), intercept=c(ans$beta), name=paste0("est_BV_",traits)) gv = new("TraitA", nLoci=nLoci, lociPerChr=lociPerChr, lociLoc=lociLoc, addEff=c(ans$alpha), intercept=c(ans$mu), name=paste0("est_GV_",traits)) output = new("RRsol", bv = list(bv), gv = list(gv), female = as.list(NULL), male = as.list(NULL), Vu = as.matrix(ans$Vu), Ve = as.matrix(ans$Ve)) return(output) } #' @title RR-BLUP Model with Dominance #' #' @description #' Fits an RR-BLUP model for genomic predictions that includes #' dominance effects. #' #' @param pop a \code{\link{Pop-class}} to serve as the training population #' @param traits an integer indicating the trait to model, a trait name, or a #' function of the traits returning a single value. #' @param use train model using phenotypes "pheno", genetic values "gv", #' estimated breeding values "ebv", breeding values "bv", or randomly "rand" #' @param snpChip an integer indicating which SNP chip genotype #' to use #' @param useQtl should QTL genotypes be used instead of a SNP chip. #' If TRUE, snpChip specifies which trait's QTL to use, and thus these #' QTL may not match the QTL underlying the phenotype supplied in traits. #' @param maxIter maximum number of iterations. Only used #' when number of traits is greater than 1. #' @param simParam an object of \code{\link{SimParam}} #' @param ... additional arguments if using a function for #' traits #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=20) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$addTraitAD(10, meanDD=0.5) #' SP$setVarE(h2=0.5) #' SP$addSnpChip(10) #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' #' #Run GS model and set EBV #' ans = RRBLUP_D(pop, simParam=SP) #' pop = setEBV(pop, ans, simParam=SP) #' #' #Evaluate accuracy #' cor(gv(pop), ebv(pop)) #' #' @export RRBLUP_D = function(pop, traits=1, use="pheno", snpChip=1, useQtl=FALSE, maxIter=40L, simParam=NULL, ...){ if(is.null(simParam)){ simParam = get("SP",envir=.GlobalEnv) } y = getResponse(pop=pop,trait=traits,use=use, simParam=simParam,...) traits = convertTraitsToNames(traits, simParam) fixEff = as.integer(factor(pop@fixEff)) if(useQtl){ nLoci = simParam$traits[[snpChip]]@nLoci lociPerChr = simParam$traits[[snpChip]]@lociPerChr lociLoc = simParam$traits[[snpChip]]@lociLoc }else{ nLoci = simParam$snpChips[[snpChip]]@nLoci lociPerChr = simParam$snpChips[[snpChip]]@lociPerChr lociLoc = simParam$snpChips[[snpChip]]@lociLoc } #Fit model stopifnot(ncol(y)==1) ans = callRRBLUP_D(y, fixEff, pop@geno, lociPerChr, lociLoc, maxIter, simParam$nThreads) bv = new("TraitA", nLoci=nLoci, lociPerChr=lociPerChr, lociLoc=lociLoc, addEff=c(ans$alpha), intercept=c(ans$beta), name=paste0("est_BV_",traits)) gv = new("TraitAD", nLoci=nLoci, lociPerChr=lociPerChr, lociLoc=lociLoc, addEff=c(ans$a), domEff=c(ans$d), intercept=c(ans$mu), name=paste0("est_GV_",traits)) output = new("RRsol", bv = list(bv), gv = list(gv), female = as.list(NULL), male = as.list(NULL), Vu = as.matrix(ans$Vu), Ve = as.matrix(ans$Ve)) return(output) } #' @title RR-BLUP with Dominance Model 2 #' #' @description #' Fits an RR-BLUP model for genomic predictions that includes #' dominance effects. This implementation is meant for situations where #' \code{\link{RRBLUP_D}} is too slow. Note that RRBLUP_D2 #' is only faster in certain situations. Most users should use #' \code{\link{RRBLUP_D}}. #' #' @param pop a \code{\link{Pop-class}} to serve as the training population #' @param traits an integer indicating the trait to model, a trait name, or a #' function of the traits returning a single value. #' @param use train model using phenotypes "pheno", genetic values "gv", #' estimated breeding values "ebv", breeding values "bv", or randomly "rand" #' @param snpChip an integer indicating which SNP chip genotype #' to use #' @param useQtl should QTL genotypes be used instead of a SNP chip. #' If TRUE, snpChip specifies which trait's QTL to use, and thus these #' QTL may not match the QTL underlying the phenotype supplied in traits. #' @param maxIter maximum number of iterations. Only used #' when number of traits is greater than 1. #' @param Va marker effect variance for additive effects. If value is NULL, #' a reasonable starting point is chosen automatically. #' @param Vd marker effect variance for dominance effects. If value is NULL, #' a reasonable starting point is chosen automatically. #' @param Ve error variance. If value is NULL, a #' reasonable starting point is chosen automatically. #' @param useEM use EM to solve variance components. If false, #' the initial values are considered true. #' @param tol tolerance for EM algorithm convergence #' @param simParam an object of \code{\link{SimParam}} #' @param ... additional arguments if using a function for #' traits #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=20) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$addTraitAD(10, meanDD=0.5) #' SP$setVarE(h2=0.5) #' SP$addSnpChip(10) #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' #' #Run GS model and set EBV #' ans = RRBLUP_D2(pop, simParam=SP) #' pop = setEBV(pop, ans, simParam=SP) #' #' #Evaluate accuracy #' cor(gv(pop), ebv(pop)) #' #' @export RRBLUP_D2 = function(pop, traits=1, use="pheno", snpChip=1, useQtl=FALSE, maxIter=10, Va=NULL, Vd=NULL, Ve=NULL, useEM=TRUE, tol=1e-6, simParam=NULL, ...){ if(is.null(simParam)){ simParam = get("SP",envir=.GlobalEnv) } y = getResponse(pop=pop,trait=traits,use=use, simParam=simParam,...) traits = convertTraitsToNames(traits, simParam) fixEff = as.integer(factor(pop@fixEff)) if(useQtl){ nLoci = simParam$traits[[snpChip]]@nLoci lociPerChr = simParam$traits[[snpChip]]@lociPerChr lociLoc = simParam$traits[[snpChip]]@lociLoc }else{ nLoci = simParam$snpChips[[snpChip]]@nLoci lociPerChr = simParam$snpChips[[snpChip]]@lociPerChr lociLoc = simParam$snpChips[[snpChip]]@lociLoc } # Sort out Va, Vd and Ve if(is.function(traits)){ if(is.null(Va)){ Va = var(y)/nLoci } if(is.null(Vd)){ Vd = var(y)/nLoci } if(is.null(Ve)){ Ve = var(y)/2 } }else{ stopifnot(length(traits)==1) if(is.null(Va)){ Va = 2*simParam$varA[traits]/nLoci if(is.na(Va)){ Va = var(y)/nLoci } } if(is.null(Vd)){ Vd = 2*simParam$varA[traits]/nLoci if(is.na(Vd)){ Vd = var(y)/nLoci } } if(is.null(Ve)){ Ve = simParam$varE[traits] if(is.na(Ve)){ Ve = var(y)/2 } } } #Fit model stopifnot(ncol(y)==1) ans = callRRBLUP_D2(y, fixEff, pop@geno, lociPerChr, lociLoc, maxIter, Va, Vd, Ve, tol, useEM, simParam$nThreads) bv = new("TraitA", nLoci=nLoci, lociPerChr=lociPerChr, lociLoc=lociLoc, addEff=c(ans$alpha), intercept=c(ans$beta), name=paste0("est_BV_",traits)) gv = new("TraitAD", nLoci=nLoci, lociPerChr=lociPerChr, lociLoc=lociLoc, addEff=c(ans$a), domEff=c(ans$d), intercept=c(ans$mu), name=paste0("est_GV_",traits)) output = new("RRsol", bv = list(bv), gv = list(gv), female = as.list(NULL), male = as.list(NULL), Vu = as.matrix(ans$Vu), Ve = as.matrix(ans$Ve)) return(output) } #' @title RR-BLUP GCA Model #' #' @description #' Fits an RR-BLUP model that estimates seperate marker effects for #' females and males. Useful for predicting GCA of parents #' in single cross hybrids. Can also predict performance of specific #' single cross hybrids. #' #' @param pop a \code{\link{Pop-class}} to serve as the training population #' @param traits an integer indicating the trait to model, a trait name, or a #' function of the traits returning a single value. #' @param use train model using phenotypes "pheno", genetic values "gv", #' estimated breeding values "ebv", breeding values "bv", or randomly "rand" #' @param snpChip an integer indicating which SNP chip genotype #' to use #' @param useQtl should QTL genotypes be used instead of a SNP chip. #' If TRUE, snpChip specifies which trait's QTL to use, and thus these #' QTL may not match the QTL underlying the phenotype supplied in traits. #' @param maxIter maximum number of iterations for convergence. #' @param simParam an object of \code{\link{SimParam}} #' @param ... additional arguments if using a function for #' traits #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=20) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$addTraitA(10) #' SP$setVarE(h2=0.5) #' SP$addSnpChip(10) #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' #' #Run GS model and set EBV #' ans = RRBLUP_GCA(pop, simParam=SP) #' pop = setEBV(pop, ans, simParam=SP) #' #' #Evaluate accuracy #' cor(gv(pop), ebv(pop)) #' #' @export RRBLUP_GCA = function(pop, traits=1, use="pheno", snpChip=1, useQtl=FALSE, maxIter=40L, simParam=NULL, ...){ if(is.null(simParam)){ simParam = get("SP",envir=.GlobalEnv) } y = getResponse(pop=pop,trait=traits,use=use, simParam=simParam,...) traits = convertTraitsToNames(traits, simParam) fixEff = as.integer(factor(pop@fixEff)) if(useQtl){ nLoci = simParam$traits[[snpChip]]@nLoci lociPerChr = simParam$traits[[snpChip]]@lociPerChr lociLoc = simParam$traits[[snpChip]]@lociLoc }else{ nLoci = simParam$snpChips[[snpChip]]@nLoci lociPerChr = simParam$snpChips[[snpChip]]@lociPerChr lociLoc = simParam$snpChips[[snpChip]]@lociLoc } #Fit model stopifnot(ncol(y)==1) ans = callRRBLUP_GCA(y, fixEff, pop@geno, lociPerChr, lociLoc, maxIter, simParam$nThreads) gv = new("TraitA2", nLoci=nLoci, lociPerChr=lociPerChr, lociLoc=lociLoc, addEff=c(ans$alpha1), addEffMale=c(ans$alpha2), intercept=c(ans$mu), name=paste0("est_GV_",traits)) female = new("TraitA", nLoci=nLoci, lociPerChr=lociPerChr, lociLoc=lociLoc, addEff=c(ans$alpha1), intercept=c(ans$beta1), name=paste0("est_female_",traits)) male = new("TraitA", nLoci=nLoci, lociPerChr=lociPerChr, lociLoc=lociLoc, addEff=c(ans$alpha2), intercept=c(ans$beta2), name=paste0("est_male_",traits)) output = new("RRsol", gv = list(gv), bv = as.list(NULL), female = list(female), male = list(male), Vu = as.matrix(ans$Vu), Ve = as.matrix(ans$Ve)) return(output) } #' @title RR-BLUP GCA Model 2 #' #' @description #' Fits an RR-BLUP model that estimates seperate marker effects for #' females and males. This implementation is meant for situations where #' \code{\link{RRBLUP_GCA}} is too slow. Note that RRBLUP_GCA2 #' is only faster in certain situations. Most users should use #' \code{\link{RRBLUP_GCA}}. #' #' @param pop a \code{\link{Pop-class}} to serve as the training population #' @param traits an integer indicating the trait to model, a trait name, or a #' function of the traits returning a single value. #' @param use train model using phenotypes "pheno", genetic values "gv", #' estimated breeding values "ebv", breeding values "bv", or randomly "rand" #' @param snpChip an integer indicating which SNP chip genotype #' to use #' @param useQtl should QTL genotypes be used instead of a SNP chip. #' If TRUE, snpChip specifies which trait's QTL to use, and thus these #' QTL may not match the QTL underlying the phenotype supplied in traits. #' @param maxIter maximum number of iterations for convergence. #' @param VuF marker effect variance for females. If value is NULL, a #' reasonable starting point is chosen automatically. #' @param VuM marker effect variance for males. If value is NULL, a #' reasonable starting point is chosen automatically. #' @param Ve error variance. If value is NULL, a #' reasonable starting point is chosen automatically. #' @param useEM use EM to solve variance components. If false, #' the initial values are considered true. #' @param tol tolerance for EM algorithm convergence #' @param simParam an object of \code{\link{SimParam}} #' @param ... additional arguments if using a function for #' traits #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=20) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$addTraitA(10) #' SP$setVarE(h2=0.5) #' SP$addSnpChip(10) #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' #' #Run GS model and set EBV #' ans = RRBLUP_GCA2(pop, simParam=SP) #' pop = setEBV(pop, ans, simParam=SP) #' #' #Evaluate accuracy #' cor(gv(pop), ebv(pop)) #' #' @export RRBLUP_GCA2 = function(pop, traits=1, use="pheno", snpChip=1, useQtl=FALSE, maxIter=10, VuF=NULL, VuM=NULL, Ve=NULL, useEM=TRUE, tol=1e-6, simParam=NULL, ...){ if(is.null(simParam)){ simParam = get("SP",envir=.GlobalEnv) } y = getResponse(pop=pop,trait=traits,use=use, simParam=simParam,...) traits = convertTraitsToNames(traits, simParam) fixEff = as.integer(factor(pop@fixEff)) if(useQtl){ nLoci = simParam$traits[[snpChip]]@nLoci lociPerChr = simParam$traits[[snpChip]]@lociPerChr lociLoc = simParam$traits[[snpChip]]@lociLoc }else{ nLoci = simParam$snpChips[[snpChip]]@nLoci lociPerChr = simParam$snpChips[[snpChip]]@lociPerChr lociLoc = simParam$snpChips[[snpChip]]@lociLoc } # Sort out VuF, VuM and Ve if(is.function(traits)){ if(is.null(VuF)){ VuF = var(y)/nLoci } if(is.null(VuM)){ VuM = var(y)/nLoci } if(is.null(Ve)){ Ve = var(y)/2 } }else{ stopifnot(length(traits)==1) if(is.null(VuF)){ VuF = 2*simParam$varA[traits]/nLoci if(is.na(VuF)){ VuF = var(y)/nLoci } } if(is.null(VuM)){ VuM = 2*simParam$varA[traits]/nLoci if(is.na(VuM)){ VuM = var(y)/nLoci } } if(is.null(Ve)){ Ve = simParam$varE[traits] if(is.na(Ve)){ Ve = var(y)/2 } } } #Fit model stopifnot(ncol(y)==1) ans = callRRBLUP_GCA2(y, fixEff, pop@geno, lociPerChr, lociLoc, maxIter, VuF, VuM, Ve, tol, useEM, simParam$nThreads) gv = new("TraitA2", nLoci=nLoci, lociPerChr=lociPerChr, lociLoc=lociLoc, addEff=c(ans$alpha1), addEffMale=c(ans$alpha2), intercept=c(ans$mu), name=paste0("est_GV_",traits)) female = new("TraitA", nLoci=nLoci, lociPerChr=lociPerChr, lociLoc=lociLoc, addEff=c(ans$alpha1), intercept=c(ans$beta1), name=paste0("est_female_",traits)) male = new("TraitA", nLoci=nLoci, lociPerChr=lociPerChr, lociLoc=lociLoc, addEff=c(ans$alpha2), intercept=c(ans$beta2), name=paste0("est_male_",traits)) output = new("RRsol", gv = list(gv), bv = as.list(NULL), female = list(female), male = list(male), Vu = as.matrix(ans$Vu), Ve = as.matrix(ans$Ve)) return(output) } #' @title RR-BLUP SCA Model #' #' @description #' An extention of \code{\link{RRBLUP_GCA}} that adds dominance effects. #' Note that we have not seen any consistent benefit of this model over #' \code{\link{RRBLUP_GCA}}. #' #' @param pop a \code{\link{Pop-class}} to serve as the training population #' @param traits an integer indicating the trait to model, a trait name, or a #' function of the traits returning a single value. #' @param use train model using phenotypes "pheno", genetic values "gv", #' estimated breeding values "ebv", breeding values "bv", or randomly "rand" #' @param snpChip an integer indicating which SNP chip genotype #' to use #' @param useQtl should QTL genotypes be used instead of a SNP chip. #' If TRUE, snpChip specifies which trait's QTL to use, and thus these #' QTL may not match the QTL underlying the phenotype supplied in traits. #' @param maxIter maximum number of iterations for convergence. #' @param simParam an object of \code{\link{SimParam}} #' @param ... additional arguments if using a function for #' traits #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=2, nChr=1, segSites=20) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$addTraitA(10) #' SP$setVarE(h2=0.5) #' SP$addSnpChip(10) #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' #' #Run GS model and set EBV #' ans = RRBLUP_SCA(pop, simParam=SP) #' pop = setEBV(pop, ans, simParam=SP) #' #' #Evaluate accuracy #' cor(gv(pop), ebv(pop)) #' #' @export RRBLUP_SCA = function(pop, traits=1, use="pheno", snpChip=1, useQtl=FALSE, maxIter=40L, simParam=NULL, ...){ if(is.null(simParam)){ simParam = get("SP",envir=.GlobalEnv) } y = getResponse(pop=pop,trait=traits,use=use, simParam=simParam,...) traits = convertTraitsToNames(traits, simParam) fixEff = as.integer(factor(pop@fixEff)) if(useQtl){ nLoci = simParam$traits[[snpChip]]@nLoci lociPerChr = simParam$traits[[snpChip]]@lociPerChr lociLoc = simParam$traits[[snpChip]]@lociLoc }else{ nLoci = simParam$snpChips[[snpChip]]@nLoci lociPerChr = simParam$snpChips[[snpChip]]@lociPerChr lociLoc = simParam$snpChips[[snpChip]]@lociLoc } #Fit model stopifnot(ncol(y)==1) ans = callRRBLUP_SCA(y, fixEff, pop@geno, lociPerChr, lociLoc, maxIter, simParam$nThreads) gv = new("TraitA2D", nLoci=nLoci, lociPerChr=lociPerChr, lociLoc=lociLoc, addEff=c(ans$a1), addEffMale=c(ans$a2), domEff=c(ans$d), intercept=c(ans$mu), name=paste0("est_GV_",traits)) female = new("TraitA", nLoci=nLoci, lociPerChr=lociPerChr, lociLoc=lociLoc, addEff=c(ans$alpha1), intercept=c(ans$beta1), name=paste0("est_female_",traits)) male = new("TraitA", nLoci=nLoci, lociPerChr=lociPerChr, lociLoc=lociLoc, addEff=c(ans$alpha2), intercept=c(ans$beta2), name=paste0("est_male_",traits)) output = new("RRsol", gv = list(gv), bv = as.list(NULL), female = list(female), male = list(male), Vu = as.matrix(ans$Vu), Ve = as.matrix(ans$Ve)) return(output) } #' @title RR-BLUP SCA Model 2 #' #' @description #' Fits an RR-BLUP model that estimates seperate additive effects for #' females and males and a dominance effect. This implementation is meant #' for situations where \code{\link{RRBLUP_SCA}} is too slow. Note that #' RRBLUP_SCA2 is only faster in certain situations. Most users should use #' \code{\link{RRBLUP_SCA}}. #' #' @param pop a \code{\link{Pop-class}} to serve as the training population #' @param traits an integer indicating the trait to model, a trait name, or a #' function of the traits returning a single value. #' @param use train model using phenotypes "pheno", genetic values "gv", #' estimated breeding values "ebv", breeding values "bv", or randomly "rand" #' @param snpChip an integer indicating which SNP chip genotype #' to use #' @param useQtl should QTL genotypes be used instead of a SNP chip. #' If TRUE, snpChip specifies which trait's QTL to use, and thus these #' QTL may not match the QTL underlying the phenotype supplied in traits. #' @param maxIter maximum number of iterations for convergence. #' @param VuF marker effect variance for females. If value is NULL, a #' reasonable starting point is chosen automatically. #' @param VuM marker effect variance for males. If value is NULL, a #' reasonable starting point is chosen automatically. #' @param VuD marker effect variance for dominance. If value is NULL, a #' reasonable starting point is chosen automatically. #' @param Ve error variance. If value is NULL, a #' reasonable starting point is chosen automatically. #' @param useEM use EM to solve variance components. If false, #' the initial values are considered true. #' @param tol tolerance for EM algorithm convergence #' @param simParam an object of \code{\link{SimParam}} #' @param ... additional arguments if using a function for #' traits #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=20) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$addTraitA(10) #' SP$setVarE(h2=0.5) #' SP$addSnpChip(10) #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' #' #Run GS model and set EBV #' ans = RRBLUP_SCA2(pop, simParam=SP) #' pop = setEBV(pop, ans, simParam=SP) #' #' #Evaluate accuracy #' cor(gv(pop), ebv(pop)) #' #' @export RRBLUP_SCA2 = function(pop, traits=1, use="pheno", snpChip=1, useQtl=FALSE, maxIter=10, VuF=NULL, VuM=NULL, VuD=NULL, Ve=NULL, useEM=TRUE, tol=1e-6, simParam=NULL, ...){ if(is.null(simParam)){ simParam = get("SP",envir=.GlobalEnv) } y = getResponse(pop=pop,trait=traits,use=use, simParam=simParam,...) traits = convertTraitsToNames(traits, simParam) fixEff = as.integer(factor(pop@fixEff)) if(useQtl){ nLoci = simParam$traits[[snpChip]]@nLoci lociPerChr = simParam$traits[[snpChip]]@lociPerChr lociLoc = simParam$traits[[snpChip]]@lociLoc }else{ nLoci = simParam$snpChips[[snpChip]]@nLoci lociPerChr = simParam$snpChips[[snpChip]]@lociPerChr lociLoc = simParam$snpChips[[snpChip]]@lociLoc } # Sort out VuF, VuM, VuD and Ve if(is.function(traits)){ if(is.null(VuF)){ VuF = var(y)/nLoci } if(is.null(VuM)){ VuM = var(y)/nLoci } if(is.null(VuD)){ VuD = var(y)/nLoci/2 } if(is.null(Ve)){ Ve = var(y)/2 } }else{ stopifnot(length(traits)==1) if(is.null(VuF)){ VuF = 2*simParam$varA[traits]/nLoci if(is.na(VuF)){ VuF = var(y)/nLoci } } if(is.null(VuM)){ VuM = 2*simParam$varA[traits]/nLoci if(is.na(VuM)){ VuM = var(y)/nLoci } } if(is.null(VuD)){ VuD = simParam$varA[traits]/nLoci if(is.na(VuD)){ VuD = var(y)/nLoci/2 } } if(is.null(Ve)){ Ve = simParam$varE[traits] if(is.na(Ve)){ Ve = var(y)/2 } } } #Fit model stopifnot(ncol(y)==1) ans = callRRBLUP_SCA2(y, fixEff, pop@geno, lociPerChr, lociLoc, maxIter, VuF, VuM, VuD, Ve, tol, useEM, simParam$nThreads) gv = new("TraitA2D", nLoci=nLoci, lociPerChr=lociPerChr, lociLoc=lociLoc, addEff=c(ans$a1), addEffMale=c(ans$a2), domEff=c(ans$d), intercept=c(ans$mu), name=paste0("est_GV_",traits)) female = new("TraitA", nLoci=nLoci, lociPerChr=lociPerChr, lociLoc=lociLoc, addEff=c(ans$alpha1), intercept=c(ans$beta1), name=paste0("est_female_",traits)) male = new("TraitA", nLoci=nLoci, lociPerChr=lociPerChr, lociLoc=lociLoc, addEff=c(ans$alpha2), intercept=c(ans$beta2), name=paste0("est_male_",traits)) output = new("RRsol", gv = list(gv), bv = as.list(NULL), female = list(female), male = list(male), Vu = as.matrix(ans$Vu), Ve = as.matrix(ans$Ve)) return(output) } #' @title Set EBV #' #' @description #' Adds genomic estimated values to a populations's EBV #' slot using output from a genomic selection functions. #' The genomic estimated values can be either estimated #' breeding values, estimated genetic values, or #' estimated general combining values. #' #' @param pop an object of \code{\link{Pop-class}} #' @param solution an object of \code{\link{RRsol-class}} #' @param value the genomic value to be estimated. Can be #' either "gv", "bv", "female", or "male". #' @param targetPop an optional target population that can #' be used when value is "bv", "female", or "male". When #' supplied, the allele frequency in the targetPop is used #' to set these values. #' @param append should estimated values be appended to #' existing data in the EBV slot. If TRUE, a new column is #' added. If FALSE, existing data is replaced with the #' new estimates. #' @param simParam an object of \code{\link{SimParam}} #' #' #' @return Returns an object of \code{\link{Pop-class}} #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=20) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$addTraitA(10) #' SP$setVarE(h2=0.5) #' SP$addSnpChip(10) #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' #' #Run GS model and set EBV #' ans = RRBLUP(pop, simParam=SP) #' pop = setEBV(pop, ans, simParam=SP) #' #' #Evaluate accuracy #' cor(gv(pop), ebv(pop)) #' #' @export setEBV = function(pop, solution, value="gv", targetPop=NULL, append=FALSE, simParam=NULL){ if(is.null(simParam)){ simParam = get("SP",envir=.GlobalEnv) } nTraits = length(solution@gv) ebv = matrix(NA_real_, nrow=pop@nInd, ncol=nTraits) # Placeholder names colnames(ebv) = as.character(1:nTraits) value = tolower(value) if(value=="gv"){ for(i in 1:nTraits){ tmp = getGv(solution@gv[[i]],pop,simParam$nThreads) ebv[,i] = tmp[[1]] colnames(ebv)[i] = solution@gv[[i]]@name } }else if(value=="bv"){ if(is.null(targetPop)){ if(length(solution@bv)==0){ stop("This genomic selection model does not produce breeding value estimates.") } for(i in 1:nTraits){ tmp = getGv(solution@bv[[i]],pop,simParam$nThreads) ebv[,i] = tmp[[1]] colnames(ebv)[i] = solution@bv[[i]]@name } }else{ for(i in 1:nTraits){ trait = solution@gv[[i]] if(.hasSlot(trait,"addEffMale")){ stop("This genomic selection model does not produce breeding value estimates. Try value='male' or value='female' instead.") } p = calcGenoFreq(targetPop@geno, trait@lociPerChr, trait@lociLoc, simParam$nThreads) p = c(p) q = 1-p a = trait@addEff if(.hasSlot(trait,"domEff")){ d = trait@domEff }else{ d = rep(0, length(a)) } alpha = a+d*(q-p) intercept = -sum((p-q)*alpha) trait = new("TraitA", nLoci=trait@nLoci, lociPerChr=trait@lociPerChr, lociLoc=trait@lociLoc, addEff=alpha, intercept=intercept) tmp = getGv(trait, pop, simParam$nThreads) ebv[,i] = tmp[[1]] # changing original name from "est_GV_..." to "est_BV_..." tmp = solution@gv[[i]]@name tmp = strsplit(tmp, "_")[[1]] tmp[2] = "BV" colnames(ebv)[i] = paste(tmp,collapse="_") } } }else if(value=="female"){ if(is.null(targetPop)){ if(length(solution@female)==0){ stop("This genomic selection model does not produce GCA estimates for females.") } for(i in 1:nTraits){ tmp = getGv(solution@female[[i]],pop,simParam$nThreads) ebv[,i] = tmp[[1]] colnames(ebv)[i] = solution@female[[i]]@name } }else{ for(i in 1:nTraits){ trait = solution@gv[[i]] p = calcGenoFreq(targetPop@geno, trait@lociPerChr, trait@lociLoc, simParam$nThreads) p = c(p) q = 1-p a = trait@addEff if(.hasSlot(trait,"domEff")){ d = trait@domEff }else{ d = rep(0, length(a)) } alpha = (a+d*(q-p))/2 intercept = -sum((p-q)*alpha) trait = new("TraitA", nLoci=trait@nLoci, lociPerChr=trait@lociPerChr, lociLoc=trait@lociLoc, addEff=alpha, intercept=intercept) tmp = getGv(trait, pop, simParam$nThreads) ebv[,i] = tmp[[1]] # changing original name from "est_GV_..." to "est_female_..." tmp = solution@gv[[i]]@name tmp = strsplit(tmp, "_")[[1]] tmp[2] = "female" colnames(ebv)[i] = paste(tmp,collapse="_") } } }else if(value=="male"){ if(is.null(targetPop)){ if(length(solution@male)==0){ stop("This genomic selection model does not produce GCA estimates for males.") } for(i in 1:nTraits){ tmp = getGv(solution@male[[i]],pop,simParam$nThreads) ebv[,i] = tmp[[1]] colnames(ebv)[i] = solution@male[[i]]@name } }else{ for(i in 1:nTraits){ trait = solution@gv[[i]] p = calcGenoFreq(targetPop@geno, trait@lociPerChr, trait@lociLoc, simParam$nThreads) p = c(p) q = 1-p if(.hasSlot(trait,"addEffMale")){ a = trait@addEffMale }else{ a = trait@addEff } if(.hasSlot(trait,"domEff")){ d = trait@domEff }else{ d = rep(0, length(a)) } alpha = (a+d*(q-p))/2 intercept = -sum((p-q)*alpha) trait = new("TraitA", nLoci=trait@nLoci, lociPerChr=trait@lociPerChr, lociLoc=trait@lociLoc, addEff=alpha, intercept=intercept) tmp = getGv(trait, pop, simParam$nThreads) ebv[,i] = tmp[[1]] # changing original name from "est_BV_..." to "est_male_..." tmp = solution@gv[[i]]@name tmp = strsplit(tmp, "_")[[1]] tmp[2] = "male" colnames(ebv)[i] = paste(tmp,collapse="_") } } }else{ stop(paste0("value=",value," is not a valid option")) } if(append){ pop@ebv = cbind(pop@ebv,ebv) }else{ pop@ebv = ebv } return(pop) } #' @title RRBLUP Memory Usage #' #' @description #' Estimates the amount of RAM needed to run the \code{\link{RRBLUP}} #' and its related functions for a given training population size. #' Note that this function may underestimate total usage. #' #' @param nInd the number of individuals in the training population #' @param nMarker the number of markers per individual #' @param model either "REG", "GCA", or "SCA" for \code{\link{RRBLUP}} #' \code{\link{RRBLUP_GCA}} and \code{\link{RRBLUP_SCA}} respectively. #' #' @return Returns an estimate for the required gigabytes of RAM #' #' @examples #' RRBLUPMemUse(nInd=1000, nMarker=5000) #' #' @export RRBLUPMemUse = function(nInd,nMarker,model="REG"){ y = nInd X = nInd #times fixed effects, assuming 1 here M = nInd*nMarker u = nMarker if(toupper(model)=="REG"){ S = nInd*nInd eigval = nInd eigvec = nInd*nInd eta = nInd Hinv = nInd*nInd }else if(toupper(model)=="GCA"){ M = M*2 V = nInd*nInd*3 W = W0 = WQX = nInd*nInd WX = ee = nInd u = u*2 }else if(toupper(model)=="SCA"){ M = M*3 V = nInd*nInd*4 W = W0 = WQX = nInd*nInd WX = ee = nInd u = u*3 }else{ stop(paste0("model=",toupper(model)," not recognized")) } objects = ls() objects = objects[objects!="model"] bytes = sapply(objects,function(x) get(x)) bytes = 8*sum(bytes) # Using base 2 calculation #return(bytes/((2^10)^3)) #GB # Using base 10 calculation, more conservative # Incorrect, but accounts for sources of data usage return(bytes/10^9) #GB }
/scratch/gouwar.j/cran-all/cranData/AlphaSimR/R/GS.R
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 #' @title Solve RR-BLUP #' #' @description #' Solves a univariate mixed model of form \eqn{y=X\beta+Mu+e} #' #' @param y a matrix with n rows and 1 column #' @param X a matrix with n rows and x columns #' @param M a matrix with n rows and m columns #' #' @export solveRRBLUP <- function(y, X, M) { .Call(`_AlphaSimR_solveRRBLUP`, y, X, M) } #' @title Solve Multivariate RR-BLUP #' #' @description #' Solves a multivariate mixed model of form \eqn{Y=X\beta+Mu+e} #' #' @param Y a matrix with n rows and q columns #' @param X a matrix with n rows and x columns #' @param M a matrix with n rows and m columns #' @param tol tolerance for convergence #' @param maxIter maximum number of iteration #' #' @export solveRRBLUPMV <- function(Y, X, M, maxIter = 1000L, tol = 1e-6) { .Call(`_AlphaSimR_solveRRBLUPMV`, Y, X, M, maxIter, tol) } #' @title Solve Multikernel RR-BLUP #' #' @description #' Solves a univariate mixed model with multiple random effects. #' #' @param y a matrix with n rows and 1 column #' @param X a matrix with n rows and x columns #' @param Mlist a list of M matrices #' @param maxIter maximum number of iteration #' #' @export solveRRBLUPMK <- function(y, X, Mlist, maxIter = 40L) { .Call(`_AlphaSimR_solveRRBLUPMK`, y, X, Mlist, maxIter) } #' @title Solve RR-BLUP with EM #' #' @description #' Solves a univariate mixed model of form \eqn{y=X\beta+Mu+e} using #' the Expectation-Maximization algorithm. #' #' @param Y a matrix with n rows and 1 column #' @param X a matrix with n rows and x columns #' @param M a matrix with n rows and m columns #' @param Vu initial guess for variance of marker effects #' @param Ve initial guess for error variance #' @param tol tolerance for declaring convergence #' @param maxIter maximum iteration for attempting convergence #' @param useEM should EM algorithm be used. If false, no estimation of #' variance components is performed. The initial values are treated as true. #' #' @export solveRRBLUP_EM <- function(Y, X, M, Vu, Ve, tol, maxIter, useEM) { .Call(`_AlphaSimR_solveRRBLUP_EM`, Y, X, M, Vu, Ve, tol, maxIter, useEM) } #' @title Solve RR-BLUP with EM and 2 random effects #' #' @description #' Solves a univariate mixed model of form \eqn{y=X\beta+M_1u_1+M_2u_2+e} using #' the Expectation-Maximization algorithm. #' #' @param Y a matrix with n rows and 1 column #' @param X a matrix with n rows and x columns #' @param M1 a matrix with n rows and m1 columns #' @param M2 a matrix with n rows and m2 columns #' @param Vu1 initial guess for variance of the first marker effects #' @param Vu2 initial guess for variance of the second marker effects #' @param Ve initial guess for error variance #' @param tol tolerance for declaring convergence #' @param maxIter maximum iteration for attempting convergence #' @param useEM should EM algorithm be used. If false, no estimation of #' variance components is performed. The initial values are treated as true. #' #' @export solveRRBLUP_EM2 <- function(Y, X, M1, M2, Vu1, Vu2, Ve, tol, maxIter, useEM) { .Call(`_AlphaSimR_solveRRBLUP_EM2`, Y, X, M1, M2, Vu1, Vu2, Ve, tol, maxIter, useEM) } #' @title Solve RR-BLUP with EM and 3 random effects #' #' @description #' Solves a univariate mixed model of form \eqn{y=X\beta+M_1u_1+M_2u_2+M_3u_3+e} using #' the Expectation-Maximization algorithm. #' #' @param Y a matrix with n rows and 1 column #' @param X a matrix with n rows and x columns #' @param M1 a matrix with n rows and m1 columns #' @param M2 a matrix with n rows and m2 columns #' @param M3 a matrix with n rows and m3 columns #' @param Vu1 initial guess for variance of the first marker effects #' @param Vu2 initial guess for variance of the second marker effects #' @param Vu3 initial guess for variance of the second marker effects #' @param Ve initial guess for error variance #' @param tol tolerance for declaring convergence #' @param maxIter maximum iteration for attempting convergence #' @param useEM should EM algorithm be used. If false, no estimation of #' variance components is performed. The initial values are treated as true. #' #' @export solveRRBLUP_EM3 <- function(Y, X, M1, M2, M3, Vu1, Vu2, Vu3, Ve, tol, maxIter, useEM) { .Call(`_AlphaSimR_solveRRBLUP_EM3`, Y, X, M1, M2, M3, Vu1, Vu2, Vu3, Ve, tol, maxIter, useEM) } callFastRRBLUP <- function(y, geno, lociPerChr, lociLoc, Vu, Ve, maxIter, nThreads) { .Call(`_AlphaSimR_callFastRRBLUP`, y, geno, lociPerChr, lociLoc, Vu, Ve, maxIter, nThreads) } callRRBLUP <- function(y, x, geno, lociPerChr, lociLoc, nThreads) { .Call(`_AlphaSimR_callRRBLUP`, y, x, geno, lociPerChr, lociLoc, nThreads) } callRRBLUP2 <- function(y, x, geno, lociPerChr, lociLoc, Vu, Ve, tol, maxIter, useEM, nThreads) { .Call(`_AlphaSimR_callRRBLUP2`, y, x, geno, lociPerChr, lociLoc, Vu, Ve, tol, maxIter, useEM, nThreads) } callRRBLUP_D <- function(y, x, geno, lociPerChr, lociLoc, maxIter, nThreads) { .Call(`_AlphaSimR_callRRBLUP_D`, y, x, geno, lociPerChr, lociLoc, maxIter, nThreads) } callRRBLUP_D2 <- function(y, x, geno, lociPerChr, lociLoc, maxIter, Va, Vd, Ve, tol, useEM, nThreads) { .Call(`_AlphaSimR_callRRBLUP_D2`, y, x, geno, lociPerChr, lociLoc, maxIter, Va, Vd, Ve, tol, useEM, nThreads) } callRRBLUP_MV <- function(Y, x, geno, lociPerChr, lociLoc, maxIter, nThreads) { .Call(`_AlphaSimR_callRRBLUP_MV`, Y, x, geno, lociPerChr, lociLoc, maxIter, nThreads) } callRRBLUP_GCA <- function(y, x, geno, lociPerChr, lociLoc, maxIter, nThreads) { .Call(`_AlphaSimR_callRRBLUP_GCA`, y, x, geno, lociPerChr, lociLoc, maxIter, nThreads) } callRRBLUP_GCA2 <- function(y, x, geno, lociPerChr, lociLoc, maxIter, Vu1, Vu2, Ve, tol, useEM, nThreads) { .Call(`_AlphaSimR_callRRBLUP_GCA2`, y, x, geno, lociPerChr, lociLoc, maxIter, Vu1, Vu2, Ve, tol, useEM, nThreads) } callRRBLUP_SCA <- function(y, x, geno, lociPerChr, lociLoc, maxIter, nThreads) { .Call(`_AlphaSimR_callRRBLUP_SCA`, y, x, geno, lociPerChr, lociLoc, maxIter, nThreads) } callRRBLUP_SCA2 <- function(y, x, geno, lociPerChr, lociLoc, maxIter, Vu1, Vu2, Vu3, Ve, tol, useEM, nThreads) { .Call(`_AlphaSimR_callRRBLUP_SCA2`, y, x, geno, lociPerChr, lociLoc, maxIter, Vu1, Vu2, Vu3, Ve, tol, useEM, nThreads) } #' @title Solve Univariate Model #' #' @description #' Solves a univariate mixed model of form \eqn{y=X\beta+Zu+e} #' #' @param y a matrix with n rows and 1 column #' @param X a matrix with n rows and x columns #' @param Z a matrix with n rows and m columns #' @param K a matrix with m rows and m columns #' #' @export solveUVM <- function(y, X, Z, K) { .Call(`_AlphaSimR_solveUVM`, y, X, Z, K) } #' @title Solve Multivariate Model #' #' @description #' Solves a multivariate mixed model of form \eqn{Y=X\beta+Zu+e} #' #' @param Y a matrix with n rows and q columns #' @param X a matrix with n rows and x columns #' @param Z a matrix with n rows and m columns #' @param K a matrix with m rows and m columns #' @param tol tolerance for convergence #' @param maxIter maximum number of iteration #' #' @export solveMVM <- function(Y, X, Z, K, tol = 1e-6, maxIter = 1000L) { .Call(`_AlphaSimR_solveMVM`, Y, X, Z, K, tol, maxIter) } #' @title Solve Multikernel Model #' #' @description #' Solves a univariate mixed model with multiple random effects. #' #' @param y a matrix with n rows and 1 column #' @param X a matrix with n rows and x columns #' @param Zlist a list of Z matrices #' @param Klist a list of K matrices #' @param maxIter maximum number of iteration #' @param tol tolerance for convergence #' #' @export solveMKM <- function(y, X, Zlist, Klist, maxIter = 40L, tol = 1e-4) { .Call(`_AlphaSimR_solveMKM`, y, X, Zlist, Klist, maxIter, tol) } writeASGenotypes <- function(g, locations, allLocations, snpchips, names, missing, fname) { invisible(.Call(`_AlphaSimR_writeASGenotypes`, g, locations, allLocations, snpchips, names, missing, fname)) } writeASHaplotypes <- function(g, locations, allLocations, snpchips, names, missing, fname) { invisible(.Call(`_AlphaSimR_writeASHaplotypes`, g, locations, allLocations, snpchips, names, missing, fname)) } argAltAD <- function(LociMap, Pop, mean, varA, varD, inbrDepr, nThreads) { .Call(`_AlphaSimR_argAltAD`, LociMap, Pop, mean, varA, varD, inbrDepr, nThreads) } objAltAD <- function(input, args) { .Call(`_AlphaSimR_objAltAD`, input, args) } finAltAD <- function(input, args) { .Call(`_AlphaSimR_finAltAD`, input, args) } calcGenParam <- function(trait, pop, nThreads) { .Call(`_AlphaSimR_calcGenParam`, trait, pop, nThreads) } getGeno <- function(geno, lociPerChr, lociLoc, nThreads) { .Call(`_AlphaSimR_getGeno`, geno, lociPerChr, lociLoc, nThreads) } getMaternalGeno <- function(geno, lociPerChr, lociLoc, nThreads) { .Call(`_AlphaSimR_getMaternalGeno`, geno, lociPerChr, lociLoc, nThreads) } getPaternalGeno <- function(geno, lociPerChr, lociLoc, nThreads) { .Call(`_AlphaSimR_getPaternalGeno`, geno, lociPerChr, lociLoc, nThreads) } getHaplo <- function(geno, lociPerChr, lociLoc, nThreads) { .Call(`_AlphaSimR_getHaplo`, geno, lociPerChr, lociLoc, nThreads) } getOneHaplo <- function(geno, lociPerChr, lociLoc, haplo, nThreads) { .Call(`_AlphaSimR_getOneHaplo`, geno, lociPerChr, lociLoc, haplo, nThreads) } setHaplo <- function(geno, haplo, lociPerChr, lociLoc, nThreads) { .Call(`_AlphaSimR_setHaplo`, geno, haplo, lociPerChr, lociLoc, nThreads) } writeGeno <- function(geno, lociPerChr, lociLoc, filePath, nThreads) { invisible(.Call(`_AlphaSimR_writeGeno`, geno, lociPerChr, lociLoc, filePath, nThreads)) } writeOneHaplo <- function(geno, lociPerChr, lociLoc, haplo, filePath, nThreads) { invisible(.Call(`_AlphaSimR_writeOneHaplo`, geno, lociPerChr, lociLoc, haplo, filePath, nThreads)) } calcGenoFreq <- function(geno, lociPerChr, lociLoc, nThreads) { .Call(`_AlphaSimR_calcGenoFreq`, geno, lociPerChr, lociLoc, nThreads) } calcChrFreq <- function(geno) { .Call(`_AlphaSimR_calcChrFreq`, geno) } getGv <- function(trait, pop, nThreads) { .Call(`_AlphaSimR_getGv`, trait, pop, nThreads) } getHybridGv <- function(trait, females, femaleParents, males, maleParents, nThreads) { .Call(`_AlphaSimR_getHybridGv`, trait, females, femaleParents, males, maleParents, nThreads) } getNonFounderIbd <- function(recHist, mother, father) { .Call(`_AlphaSimR_getNonFounderIbd`, recHist, mother, father) } getFounderIbd <- function(founder, nChr) { .Call(`_AlphaSimR_getFounderIbd`, founder, nChr) } createIbdMat <- function(ibd, chr, nLoci, ploidy, nThreads) { .Call(`_AlphaSimR_createIbdMat`, ibd, chr, nLoci, ploidy, nThreads) } cross <- function(motherGeno, mother, fatherGeno, father, femaleMap, maleMap, trackRec, motherPloidy, fatherPloidy, v, p, motherCentromere, fatherCentromere, quadProb, nThreads) { .Call(`_AlphaSimR_cross`, motherGeno, mother, fatherGeno, father, femaleMap, maleMap, trackRec, motherPloidy, fatherPloidy, v, p, motherCentromere, fatherCentromere, quadProb, nThreads) } createDH2 <- function(geno, nDH, genMap, v, p, trackRec, nThreads) { .Call(`_AlphaSimR_createDH2`, geno, nDH, genMap, v, p, trackRec, nThreads) } createReducedGenome <- function(geno, nProgeny, genMap, v, p, trackRec, ploidy, centromere, quadProb, nThreads) { .Call(`_AlphaSimR_createReducedGenome`, geno, nProgeny, genMap, v, p, trackRec, ploidy, centromere, quadProb, nThreads) } #' @title Population variance #' #' @description #' Calculates the population variance matrix as #' opposed to the sample variance matrix calculated #' by \code{\link{var}}. i.e. divides by n instead #' of n-1 #' #' @param X an n by m matrix #' #' @return an m by m variance-covariance matrix #' #' @export popVar <- function(X) { .Call(`_AlphaSimR_popVar`, X) } mergeGeno <- function(x, y) { .Call(`_AlphaSimR_mergeGeno`, x, y) } mergeMultGeno <- function(popList, nInd, nBin, ploidy) { .Call(`_AlphaSimR_mergeMultGeno`, popList, nInd, nBin, ploidy) } mergeMultIntMat <- function(X, nRow, nCol) { .Call(`_AlphaSimR_mergeMultIntMat`, X, nRow, nCol) } sampleInt <- function(n, N) { .Call(`_AlphaSimR_sampleInt`, n, N) } sampAllComb <- function(nLevel1, nLevel2, n) { .Call(`_AlphaSimR_sampAllComb`, nLevel1, nLevel2, n) } sampHalfDialComb <- function(nLevel, n) { .Call(`_AlphaSimR_sampHalfDialComb`, nLevel, n) } calcCoef <- function(X, Y) { .Call(`_AlphaSimR_calcCoef`, X, Y) } #' @title Number of available threads #' #' @description #' Gets the number of available threads by calling the OpenMP function #' \code{omp_get_max_threads()} #' #' @return integer #' #' @examples #' getNumThreads() #' #' @export getNumThreads <- function() { .Call(`_AlphaSimR_getNumThreads`) } packHaplo <- function(haplo, ploidy, inbred) { .Call(`_AlphaSimR_packHaplo`, haplo, ploidy, inbred) } MaCS <- function(args, maxSites, inbred, ploidy, nThreads, seed) { .Call(`_AlphaSimR_MaCS`, args, maxSites, inbred, ploidy, nThreads, seed) }
/scratch/gouwar.j/cran-all/cranData/AlphaSimR/R/RcppExports.R
#' @title Make designed crosses #' #' @description #' Makes crosses within a population using a user supplied #' crossing plan. #' #' @param pop an object of \code{\link{Pop-class}} #' @param crossPlan a matrix with two column representing #' female and male parents. Either integers for the position in #' population or character strings for the IDs. #' @param nProgeny number of progeny per cross #' @param simParam an object of \code{\link{SimParam}} #' #' @return Returns an object of \code{\link{Pop-class}} #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' #' #Cross individual 1 with individual 10 #' crossPlan = matrix(c(1,10), nrow=1, ncol=2) #' pop2 = makeCross(pop, crossPlan, simParam=SP) #' #' @export makeCross = function(pop,crossPlan,nProgeny=1, simParam=NULL){ if(is.null(simParam)){ simParam = get("SP",envir=.GlobalEnv) } if(pop@ploidy%%2L != 0L){ stop("You can not cross indiviuals with odd ploidy levels") } if(is.character(crossPlan)){ #Match by ID crossPlan = cbind(match(crossPlan[,1],pop@id), match(crossPlan[,2],pop@id)) if(any(is.na(crossPlan))){ stop("Failed to match supplied IDs") } } if((max(crossPlan)>nInd(pop)) | (min(crossPlan)<1L)){ stop("Invalid crossPlan") } if(nProgeny>1){ crossPlan = cbind(rep(crossPlan[,1],each=nProgeny), rep(crossPlan[,2],each=nProgeny)) } tmp = cross(pop@geno, crossPlan[,1], pop@geno, crossPlan[,2], simParam$femaleMap, simParam$maleMap, simParam$isTrackRec, pop@ploidy, pop@ploidy, simParam$v, simParam$p, simParam$femaleCentromere, simParam$maleCentromere, simParam$quadProb, simParam$nThreads) dim(tmp$geno) = NULL # Account for matrix bug in RcppArmadillo rPop = new("RawPop", nInd=nrow(crossPlan), nChr=pop@nChr, ploidy=pop@ploidy, nLoci=pop@nLoci, geno=tmp$geno) if(simParam$isTrackRec){ hist = tmp$recHist }else{ hist = NULL } return(.newPop(rawPop=rPop, mother=pop@id[crossPlan[,1]], father=pop@id[crossPlan[,2]], iMother=pop@iid[crossPlan[,1]], iFather=pop@iid[crossPlan[,2]], femaleParentPop=pop, maleParentPop=pop, hist=hist, simParam=simParam)) } #' @title Make random crosses #' #' @description #' A wrapper for \code{\link{makeCross}} that randomly #' selects parental combinations for all possible combinantions. #' #' @param pop an object of \code{\link{Pop-class}} #' @param nCrosses total number of crosses to make #' @param nProgeny number of progeny per cross #' @param balance if using sexes, this option will balance the number #' of progeny per parent #' @param parents an optional vector of indices for allowable parents #' @param ignoreSexes should sexes be ignored #' @param simParam an object of \code{\link{SimParam}} #' #' @return Returns an object of \code{\link{Pop-class}} #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' #' #Make 10 crosses #' pop2 = randCross(pop, 10, simParam=SP) #' #' @export randCross = function(pop,nCrosses,nProgeny=1, balance=TRUE,parents=NULL, ignoreSexes=FALSE, simParam=NULL){ if(is.null(simParam)){ simParam = get("SP",envir=.GlobalEnv) } if(is.null(parents)){ parents = 1:pop@nInd }else{ parents = as.integer(parents) } n = length(parents) if(n<=1){ stop("The population must contain more than 1 individual") } if(simParam$sexes=="no" | ignoreSexes){ crossPlan = sampHalfDialComb(n, nCrosses) crossPlan[,1] = parents[crossPlan[,1]] crossPlan[,2] = parents[crossPlan[,2]] }else{ female = which(pop@sex=="F" & (1:pop@nInd)%in%parents) nFemale = length(female) if(nFemale==0){ stop("population doesn't contain any females") } male = which(pop@sex=="M" & (1:pop@nInd)%in%parents) nMale = length(male) if(nMale==0){ stop("population doesn't contain any males") } if(balance){ female = female[sample.int(nFemale, nFemale)] female = rep(female, length.out=nCrosses) tmp = male[sample.int(nMale, nMale)] n = nCrosses%/%nMale + 1 male = NULL for(i in 1:n){ take = nMale - (i:(nMale+i-1))%%nMale male = c(male, tmp[take]) } male = male[1:nCrosses] crossPlan = cbind(female,male) }else{ crossPlan = sampAllComb(nFemale, nMale, nCrosses) crossPlan[,1] = female[crossPlan[,1]] crossPlan[,2] = male[crossPlan[,2]] } } return(makeCross(pop=pop,crossPlan=crossPlan,nProgeny=nProgeny,simParam=simParam)) } #' @title Select and randomly cross #' #' @description #' This is a wrapper that combines the functionalities of #' \code{\link{randCross}} and \code{\link{selectInd}}. The #' purpose of this wrapper is to combine both selection and #' crossing in one function call that minimized the amount #' of intermediate populations created. This reduces RAM usage #' and simplifies code writing. Note that this wrapper does not #' provide the full functionality of either function. #' #' @param pop an object of \code{\link{Pop-class}} #' @param nInd the number of individuals to select. These individuals #' are selected without regards to sex and it supercedes values #' for nFemale and nMale. Thus if the simulation uses sexes, it is #' likely better to leave this value as NULL and use nFemale and nMale #' instead. #' @param nFemale the number of females to select. This value is ignored #' if nInd is set. #' @param nMale the number of males to select. This value is ignored #' if nInd is set. #' @param nCrosses total number of crosses to make #' @param nProgeny number of progeny per cross #' @param trait the trait for selection. Either a number indicating #' a single trait or a function returning a vector of length nInd. #' @param use select on genetic values "gv", estimated #' breeding values "ebv", breeding values "bv", phenotypes "pheno", #' or randomly "rand" #' @param selectTop selects highest values if true. #' Selects lowest values if false. #' @param simParam an object of \code{\link{SimParam}} #' @param ... additional arguments if using a function for #' trait #' @param balance if using sexes, this option will balance the number #' of progeny per parent. This argument occurs after ..., so the argument #' name must be matched exactly. #' #' @return Returns an object of \code{\link{Pop-class}} #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$addTraitA(10) #' SP$setVarE(h2=0.5) #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' #' #Select 4 individuals and make 8 crosses #' pop2 = selectCross(pop, nInd=4, nCrosses=8, simParam=SP) #' #' @export selectCross = function(pop,nInd=NULL,nFemale=NULL,nMale=NULL,nCrosses, nProgeny=1,trait=1,use="pheno",selectTop=TRUE, simParam=NULL,...,balance=TRUE){ if(is.null(simParam)){ simParam = get("SP",envir=.GlobalEnv) } if(!is.null(nInd)){ parents = selectInd(pop=pop,nInd=nInd,trait=trait,use=use, sex="B",selectTop=selectTop, returnPop=FALSE,simParam=simParam,...) }else{ if(simParam$sexes=="no") stop("You must specify nInd when simParam$sexes is `no`") if(is.null(nFemale)) stop("You must specify nFemale if nInd is NULL") if(is.null(nMale)) stop("You must specify nMale if nInd is NULL") females = selectInd(pop=pop,nInd=nFemale,trait=trait,use=use, sex="F",selectTop=selectTop, returnPop=FALSE,simParam=simParam,...) males = selectInd(pop=pop,nInd=nMale,trait=trait,use=use, sex="M",selectTop=selectTop, returnPop=FALSE,simParam=simParam,...) parents = c(females,males) } return(randCross(pop=pop,nCrosses=nCrosses,nProgeny=nProgeny, balance=balance,parents=parents, ignoreSexes=FALSE,simParam=simParam)) } #' @title Make designed crosses #' #' @description #' Makes crosses between two populations using a user supplied #' crossing plan. #' #' @param females an object of \code{\link{Pop-class}} for female parents. #' @param males an object of \code{\link{Pop-class}} for male parents. #' @param crossPlan a matrix with two column representing #' female and male parents. Either integers for the position in #' population or character strings for the IDs. #' @param nProgeny number of progeny per cross #' @param simParam an object of \code{\link{SimParam}} #' #' @return Returns an object of \code{\link{Pop-class}} #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' #' #Cross individual 1 with individual 10 #' crossPlan = matrix(c(1,10), nrow=1, ncol=2) #' pop2 = makeCross2(pop, pop, crossPlan, simParam=SP) #' #' @export makeCross2 = function(females,males,crossPlan,nProgeny=1,simParam=NULL){ if(is.null(simParam)){ simParam = get("SP",envir=.GlobalEnv) } if((females@ploidy%%2L != 0L) | (males@ploidy%%2L != 0L)){ stop("You can not cross indiviuals with odd ploidy levels") } if(is.character(crossPlan)){ #Match by ID crossPlan = cbind(match(crossPlan[,1],females@id), match(crossPlan[,2],males@id)) if(any(is.na(crossPlan))){ stop("Failed to match supplied IDs") } } if((max(crossPlan[,1])>nInd(females)) | (max(crossPlan[,2])>nInd(males)) | (min(crossPlan)<1L)){ stop("Invalid crossPlan") } if(nProgeny>1){ crossPlan = cbind(rep(crossPlan[,1],each=nProgeny), rep(crossPlan[,2],each=nProgeny)) } tmp=cross(females@geno, crossPlan[,1], males@geno, crossPlan[,2], simParam$femaleMap, simParam$maleMap, simParam$isTrackRec, females@ploidy, males@ploidy, simParam$v, simParam$p, simParam$femaleCentromere, simParam$maleCentromere, simParam$quadProb, simParam$nThreads) dim(tmp$geno) = NULL # Account for matrix bug in RcppArmadillo rPop = new("RawPop", nInd=nrow(crossPlan), nChr=females@nChr, ploidy=as.integer((females@ploidy+males@ploidy)/2), nLoci=females@nLoci, geno=tmp$geno) if(simParam$isTrackRec){ hist = tmp$recHist }else{ hist = NULL } return(.newPop(rawPop=rPop, mother=females@id[crossPlan[,1]], father=males@id[crossPlan[,2]], iMother=females@iid[crossPlan[,1]], iFather=males@iid[crossPlan[,2]], femaleParentPop=females, maleParentPop=males, hist=hist, simParam=simParam)) } #' @title Make random crosses #' #' @description #' A wrapper for \code{\link{makeCross2}} that randomly #' selects parental combinations for all possible combinantions between #' two populations. #' #' @param females an object of \code{\link{Pop-class}} for female parents. #' @param males an object of \code{\link{Pop-class}} for male parents. #' @param nCrosses total number of crosses to make #' @param nProgeny number of progeny per cross #' @param balance this option will balance the number #' of progeny per parent #' @param femaleParents an optional vector of indices for allowable #' female parents #' @param maleParents an optional vector of indices for allowable #' male parents #' @param ignoreSexes should sex be ignored #' @param simParam an object of \code{\link{SimParam}} #' #' @return Returns an object of \code{\link{Pop-class}} #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' #' #Make 10 crosses #' pop2 = randCross2(pop, pop, 10, simParam=SP) #' #' @export randCross2 = function(females,males,nCrosses,nProgeny=1, balance=TRUE,femaleParents=NULL, maleParents=NULL,ignoreSexes=FALSE, simParam=NULL){ if(is.null(simParam)){ simParam = get("SP",envir=.GlobalEnv) } #Set allowable parents if(is.null(femaleParents)){ femaleParents = 1:females@nInd }else{ femaleParents = as.integer(femaleParents) } if(is.null(maleParents)){ maleParents = 1:males@nInd }else{ maleParents = as.integer(maleParents) } if(simParam$sexes=="no" | ignoreSexes){ female = femaleParents male = maleParents }else{ female = which(females@sex=="F" & (1:females@nInd)%in%femaleParents) if(length(female)==0){ stop("population doesn't contain any females") } male = which(males@sex=="M" & (1:males@nInd)%in%maleParents) if(length(male)==0){ stop("population doesn't contain any males") } } nMale = length(male) nFemale = length(female) if(balance){ female = female[sample.int(nFemale, nFemale)] female = rep(female, length.out=nCrosses) tmp = male[sample.int(nMale, nMale)] n = nCrosses%/%nMale + 1 male = NULL for(i in 1:n){ take = nMale - (i:(nMale+i-1))%%nMale male = c(male, tmp[take]) } male = male[1:nCrosses] crossPlan = cbind(female,male) }else{ crossPlan = sampAllComb(nFemale, nMale, nCrosses) crossPlan[,1] = female[crossPlan[,1]] crossPlan[,2] = male[crossPlan[,2]] } return(makeCross2(females=females,males=males, crossPlan=crossPlan,nProgeny=nProgeny, simParam=simParam)) } #' @title Self individuals #' #' @description #' Creates selfed progeny from each individual in a #' population. Only works when sexes is "no". #' #' @param pop an object of \code{\link{Pop-class}} #' @param nProgeny total number of selfed progeny per individual #' @param parents an optional vector of indices for allowable parents #' @param keepParents should previous parents be used for mother and #' father. #' @param simParam an object of \code{\link{SimParam}} #' #' @return Returns an object of \code{\link{Pop-class}} #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=2, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' #' #Self pollinate each individual #' pop2 = self(pop, simParam=SP) #' #' @export self = function(pop,nProgeny=1,parents=NULL,keepParents=TRUE, simParam=NULL){ if(is.null(simParam)){ simParam = get("SP",envir=.GlobalEnv) } if(is(pop,"MultiPop")){ stopifnot(is.null(parents)) pop@pops = lapply(pop@pops, self, nProgeny=nProgeny, parents=NULL, keepParents=keepParents, simParam=simParam) return(pop) } if(is.null(parents)){ parents = 1:pop@nInd }else{ parents = as.integer(parents) } if(pop@ploidy%%2L != 0L){ stop("You can not self aneuploids") } crossPlan = rep(parents,each=nProgeny) crossPlan = cbind(crossPlan,crossPlan) tmp = cross(pop@geno, crossPlan[,1], pop@geno, crossPlan[,2], simParam$femaleMap, simParam$maleMap, simParam$isTrackRec, pop@ploidy, pop@ploidy, simParam$v, simParam$p, simParam$femaleCentromere, simParam$maleCentromere, simParam$quadProb, simParam$nThreads) dim(tmp$geno) = NULL # Account for matrix bug in RcppArmadillo rPop = new("RawPop", nInd=nrow(crossPlan), nChr=pop@nChr, ploidy=pop@ploidy, nLoci=pop@nLoci, geno=tmp$geno) if(simParam$isTrackRec){ hist = tmp$recHist }else{ hist = NULL } if(keepParents){ return(.newPop(rawPop=rPop, mother=rep(pop@mother,each=nProgeny), father=rep(pop@father,each=nProgeny), iMother=rep(pop@iid,each=nProgeny), iFather=rep(pop@iid,each=nProgeny), femaleParentPop=pop, maleParentPop=pop, hist=hist, simParam=simParam)) }else{ return(.newPop(rawPop=rPop, mother=rep(pop@id,each=nProgeny), father=rep(pop@id,each=nProgeny), iMother=rep(pop@iid,each=nProgeny), iFather=rep(pop@iid,each=nProgeny), femaleParentPop=pop, maleParentPop=pop, hist=hist, simParam=simParam)) } } #' @title Generates DH lines #' #' @description Creates DH lines from each individual in a population. #' Only works with diploid individuals. For polyploids, use #' \code{\link{reduceGenome}} and \code{\link{doubleGenome}}. #' #' @param pop an object of 'Pop' superclass #' @param nDH total number of DH lines per individual #' @param useFemale should female recombination rates be used. #' @param keepParents should previous parents be used for mother and #' father. #' @param simParam an object of 'SimParam' class #' #' @return Returns an object of \code{\link{Pop-class}} #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=2, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' #' #Create 1 DH for each individual #' pop2 = makeDH(pop, simParam=SP) #' #' @export makeDH = function(pop,nDH=1,useFemale=TRUE,keepParents=TRUE, simParam=NULL){ if(is.null(simParam)){ simParam = get("SP",envir=.GlobalEnv) } if(is(pop,"MultiPop")){ pop@pops = lapply(pop@pops, makeDH, nDH=nDH, useFemale=useFemale, keepParents=keepParents, simParam=simParam) return(pop) } if(pop@ploidy!=2){ stop("Only works with diploids") } if(useFemale){ tmp = createDH2(pop@geno,nDH, simParam$femaleMap, simParam$v, simParam$p, simParam$isTrackRec, simParam$nThreads) }else{ tmp = createDH2(pop@geno,nDH, simParam$maleMap, simParam$v, simParam$p, simParam$isTrackRec, simParam$nThreads) } dim(tmp$geno) = NULL # Account for matrix bug in RcppArmadillo rPop = new("RawPop", nInd=as.integer(pop@nInd*nDH), nChr=pop@nChr, ploidy=pop@ploidy, nLoci=pop@nLoci, geno=tmp$geno) if(simParam$isTrackRec){ hist = tmp$recHist }else{ hist = NULL } if(keepParents){ return(.newPop(rawPop=rPop, mother=rep(pop@mother, each=nDH), father=rep(pop@father, each=nDH), isDH=TRUE, iMother=rep(pop@iid, each=nDH), iFather=rep(pop@iid, each=nDH), femaleParentPop=pop, maleParentPop=pop, hist=hist, simParam=simParam)) }else{ return(.newPop(rawPop=rPop, mother=rep(pop@id, each=nDH), father=rep(pop@id, each=nDH), isDH=TRUE, iMother=rep(pop@iid, each=nDH), iFather=rep(pop@iid, each=nDH), femaleParentPop=pop, maleParentPop=pop, hist=hist, simParam=simParam)) } } # Sort Pedigree # # id, id of individual # mother, name of individual's mother # father, name of individual's father # maxCycle, number of loops for attempting to sort the pedigree sortPed = function(id, mother, father, maxCycle=100){ nInd = length(id) output = data.frame(gen=integer(nInd), id=as.character(id), mother=match(mother, id), father=match(father, id), motherID=as.character(mother), fatherID=as.character(father)) unsorted = rep(TRUE, nInd) for(gen in 1:maxCycle){ for(i in which(unsorted)){ if(is.na(output$mother[i])&is.na(output$father[i])){ # Is a founder output$gen[i] = 1 unsorted[i] = FALSE }else if(is.na(output$mother[i])){ # Mother is a founder if(!unsorted[output$father[i]]){ output$gen[i] = output$gen[output$father[i]] + 1 unsorted[i] = FALSE } }else if(is.na(output$father[i])){ # Father is a founder if(!unsorted[output$mother[i]]){ output$gen[i] = output$gen[output$mother[i]] + 1 unsorted[i] = FALSE } }else{ # Both parents are in the pedigree if(!unsorted[output$mother[i]] & !unsorted[output$father[i]]){ output$gen[i] = pmax(output$gen[output$mother[i]], output$gen[output$father[i]]) + 1 unsorted[i] = FALSE } } } } if(any(unsorted)){ stop("Failed to sort pedigree, may contain loops or require a higher maxGen") } return(output) } #' @title Pedigree cross #' #' @description #' Creates a \code{\link{Pop-class}} from a generic #' pedigree and a set of founder individuals. #' #' @param founderPop a \code{\link{Pop-class}} #' @param id a vector of unique identifiers for individuals #' in the pedigree. The values of these IDs are seperate from #' the IDs in the founderPop if matchID=FALSE. #' @param mother a vector of identifiers for the mothers #' of individuals in the pedigree. Must match one of the #' elements in the id vector or they will be treated as unknown. #' @param father a vector of identifiers for the fathers #' of individuals in the pedigree. Must match one of the #' elements in the id vector or they will be treated as unknown. #' @param matchID indicates if the IDs in founderPop should be #' matched to the id argument. See details. #' @param maxCycle the maximum number of loops to make over the pedigree #' to sort it. #' @param DH an optional vector indicating if an individual #' should be made a doubled haploid. #' @param nSelf an optional vector indicating how many generations an #' individual should be selfed. #' @param useFemale If creating DH lines, should female recombination #' rates be used. This parameter has no effect if, recombRatio=1. #' @param simParam an object of 'SimParam' class #' #' @description #' The way in which the user supplied pedigree is used depends on #' the value of matchID. If matchID is TRUE, the IDs in the user #' supplied pedigree are matched against founderNames. If matchID #' is FALSE, founder individuals in the user supplied pedigree are #' randomly sampled from founderPop. #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=2, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' #' #Pedigree for a biparental cross with 7 generations of selfing #' id = 1:10 #' mother = c(0,0,1,3:9) #' father = c(0,0,2,3:9) #' pop2 = pedigreeCross(pop, id, mother, father, simParam=SP) #' #' #' @export pedigreeCross = function(founderPop, id, mother, father, matchID=FALSE, maxCycle=100, DH=NULL, nSelf=NULL, useFemale=TRUE, simParam=NULL){ if(is.null(simParam)){ simParam = get("SP",envir=.GlobalEnv) } if(simParam$sexes!="no"){ stop("pedigreeCross currently only works with sex='no'") } # Coerce input data id = as.character(id) mother = as.character(mother) father = as.character(father) if(is.null(DH)){ DH = logical(length(id)) }else{ DH = as.logical(DH) } if(is.null(nSelf)){ nSelf = rep(0, length(id)) } # Check input data stopifnot(!any(duplicated(id)), length(id)==length(mother), length(id)==length(father), length(id)==length(DH), length(id)==length(nSelf)) # Sort pedigree (identifies potential problems) ped = sortPed(id=id, mother=mother, father=father, maxCycle=maxCycle) # Create list for new population output = vector("list", length=length(id)) # Order and assign founders isFounder = is.na(ped$father) & is.na(ped$mother) motherIsFounder = is.na(ped$mother) & !is.na(ped$father) fatherIsFounder = is.na(ped$father) & !is.na(ped$mother) founderNames = c(unique(id[isFounder]), unique(mother[motherIsFounder]), unique(father[fatherIsFounder])) nFounder = length(founderNames) if(matchID){ # Check that all founders are present founderPresent = founderNames%in%founderPop@id if(!all(founderPresent)){ stop(paste("The following founders are missing:", founderNames[!founderPresent])) } }else{ # Check that there are enough founders if(nFounder>founderPop@nInd){ stop(paste("Pedigree requires",nFounder,"founders, but only",founderPop@nInd,"were supplied")) } # Randomly assign individuals as founders founderPop = founderPop[sample.int(founderPop@nInd,nFounder)] # isFounder n1 = 1 n2 = sum(isFounder) founderPop@id[n1:n2] = id[isFounder] founderPop@mother[n1:n2] = mother[isFounder] founderPop@father[n1:n2] = father[isFounder] # motherIsFounder n = sum(motherIsFounder) if(n>=1){ n1 = n2 + 1 n2 = n2 + n founderPop@id[n1:n2] = mother[motherIsFounder] founderPop@mother[n1:n2] = rep("0", n2-n1+1) founderPop@father[n1:n2] = rep("0", n2-n1+1) } # fatherIsFounder n = sum(fatherIsFounder) if(n>=1){ n1 = n2 + 1 n2 = n2 + n founderPop@id[n1:n2] = father[fatherIsFounder] founderPop@mother[n1:n2] = rep("0", n2-n1+1) founderPop@father[n1:n2] = rep("0", n2-n1+1) } } # Create individuals crossPlan = matrix(c(1,1),ncol=2) for(gen in 1:max(ped$gen)){ for(i in which(ped$gen==gen)){ if(isFounder[i]){ # Copy over founder individual output[[i]] = founderPop[id[i]] }else{ if(motherIsFounder[i]){ # Cross founder to newly created individual output[[i]] = makeCross2(founderPop[id[i]], output[[ped$father[i]]], crossPlan=crossPlan, simParam=simParam) }else if(fatherIsFounder[i]){ # Cross newly created individual to founder output[[i]] = makeCross2(output[[ped$mother[i]]], founderPop[id[i]], crossPlan=crossPlan, simParam=simParam) }else{ # Cross two newly created individuals output[[i]] = makeCross2(output[[ped$mother[i]]], output[[ped$father[i]]], crossPlan=crossPlan, simParam=simParam) } } # Self? if(nSelf[i]>0){ for(j in 1:nSelf[i]){ output[[i]] = self(output[[i]], simParam=simParam) } } # Make the individual a DH? if(DH[i]){ output[[i]] = makeDH(output[[i]], useFemale=useFemale, simParam=simParam) } } } # Collapse list to a population output = mergePops(output) # Copy over names output@id = id output@mother = mother output@father = father return(output) }
/scratch/gouwar.j/cran-all/cranData/AlphaSimR/R/crossing.R
#' @title New MapPop #' #' @description #' Creates a new \code{\link{MapPop-class}} from user supplied #' genetic maps and haplotypes. #' #' @param genMap a list of genetic maps #' @param haplotypes a list of matrices or data.frames that #' can be coerced to matrices. See details. #' @param inbred are individuals fully inbred #' @param ploidy ploidy level of the organism #' #' @details #' Each item of genMap must be a vector of ordered genetic lengths in #' Morgans. The first value must be zero. The length of the vector #' determines the number of segregating sites on the chromosome. #' #' Each item of haplotypes must be coercible to a matrix. The columns #' of this matrix correspond to segregating sites. The number of rows #' must match the number of individuals times the ploidy if using #' inbred=FALSE. If using inbred=TRUE, the number of rows must equal #' the number of individuals. The haplotypes can be stored as numeric, #' integer or raw. The underlying C++ function will use raw. #' #' @return an object of \code{\link{MapPop-class}} #' #' @examples #' # Create genetic map for two chromosomes, each 1 Morgan long #' # Each chromosome contains 11 equally spaced segregating sites #' genMap = list(seq(0,1,length.out=11), #' seq(0,1,length.out=11)) #' #' # Create haplotypes for 10 outbred individuals #' chr1 = sample(x=0:1,size=20*11,replace=TRUE) #' chr1 = matrix(chr1,nrow=20,ncol=11) #' chr2 = sample(x=0:1,size=20*11,replace=TRUE) #' chr2 = matrix(chr2,nrow=20,ncol=11) #' haplotypes = list(chr1,chr2) #' #' founderPop = newMapPop(genMap=genMap, haplotypes=haplotypes) #' #' @export newMapPop = function(genMap, haplotypes, inbred=FALSE, ploidy=2L){ stopifnot(length(genMap)==length(haplotypes)) nRow = lapply(haplotypes,nrow) nRow = unlist(nRow) ploidy = as.integer(ploidy) if(length(nRow)>1L){ if(any(nRow[1]!=nRow)){ stop("Number of rows must be equal in haplotypes") } nRow = nRow[1] } if(inbred){ nInd = nRow }else{ if(nRow%%ploidy != 0L){ stop("Number of haplotypes must be divisible by ploidy") } nInd = nRow/ploidy } nCol = lapply(haplotypes,ncol) nCol = unlist(nCol) segSites = lapply(genMap,length) segSites = unlist(segSites) if(!all(nCol == segSites)){ stop("Number of segregating sites in haplotypes and genMap don't match") } output = vector("list",length(genMap)) for(chr in 1:length(genMap)){ geno = packHaplo(as.matrix(haplotypes[[chr]]), ploidy=ploidy,inbred=inbred) output[[chr]] = new("MapPop", nInd=as.integer(nInd), nChr=1L, ploidy=ploidy, nLoci=as.integer(segSites[chr]), geno=list(geno), genMap=genMap[chr], centromere=max(genMap[[chr]])/2, inbred=inbred) if(is.null(names(output[[chr]]@genMap))){ names(output[[chr]]@genMap) = as.character(chr) } if(is.null(names(output[[chr]]@genMap[[1]]))){ names(output[[chr]]@genMap[[1]]) = paste(chr,1:segSites[chr],sep="_") } } output = do.call("cChr",output) return(output) } #' @title Create founder haplotypes using MaCS #' #' @description Uses the MaCS software to produce founder haplotypes #' \insertCite{MaCS}{AlphaSimR}. #' #' @param nInd number of individuals to simulate #' @param nChr number of chromosomes to simulate #' @param segSites number of segregating sites to keep per chromosome. A #' value of NULL results in all sites being retained. #' @param inbred should founder individuals be inbred #' @param species species history to simulate. See details. #' @param split an optional historic population split in terms of generations ago. #' @param ploidy ploidy level of organism #' @param manualCommand user provided MaCS options. For advanced users only. #' @param manualGenLen user provided genetic length. This must be supplied if using #' manualCommand. If not using manualCommand, this value will replace the predefined #' genetic length for the species. However, this the genetic length is only used by #' AlphaSimR and is not passed to MaCS, so MaCS still uses the predefined genetic length. #' For advanced users only. #' @param nThreads if OpenMP is available, this will allow for simulating chromosomes in parallel. #' If the value is NULL, the number of threads is automatically detected. #' #' @details #' There are currently three species histories available: GENERIC, CATTLE, WHEAT, and MAIZE. #' #' The GENERIC history is meant to be a reasonable all-purpose choice. It runs quickly and #' models a population with an effective populations size that has gone through several historic #' bottlenecks. This species history is used as the default arguments in the \code{\link{runMacs2}} #' function, so the user should examine this function for the details of how the species is modeled. #' #' The CATTLE history is based off of real genome sequence data \insertCite{cattle}{AlphaSimR}. #' #' The WHEAT \insertCite{gaynor_2017}{AlphaSimR} and MAIZE \insertCite{hickey_2014}{AlphaSimR} #' histories have been included due to their use in previous simulations. However, it should #' be noted that neither faithfully simulates its respective species. This is apparent by #' the low number of segregating sites simulated by each history relative to their real-world #' analogs. Adjusting these histories to better represent their real-world analogs would result #' in a drastic increase to runtime. #' #' @return an object of \code{\link{MapPop-class}} #' #' @references #' \insertAllCited{} #' #' @examples #' # Creates a populations of 10 outbred individuals #' # Their genome consists of 1 chromosome and 100 segregating sites #' \dontrun{ #' founderPop = runMacs(nInd=10,nChr=1,segSites=100) #' } #' @export runMacs = function(nInd,nChr=1, segSites=NULL, inbred=FALSE, species="GENERIC", split=NULL, ploidy=2L, manualCommand=NULL, manualGenLen=NULL, nThreads=NULL){ if(is.null(nThreads)){ nThreads = getNumThreads() } nInd = as.integer(nInd) nChr = as.integer(nChr) ploidy = as.integer(ploidy) if(nChr<nThreads){ nThreads = nChr } # Note that the seed doesn't really control the random number seed. # This is partially because MaCS is called within an OpenMP loop, but # it is still a problem even with nThreads = 1 seed = sapply(1:nChr,function(x){as.character(sample.int(1e8,1))}) if(is.null(segSites)){ segSites = rep(0L,nChr) }else if(length(segSites)==1L){ segSites = rep(as.integer(segSites),nChr) } popSize = ifelse(inbred,nInd,ploidy*nInd) if(!is.null(manualCommand)){ if(is.null(manualGenLen)) stop("You must define manualGenLen") command = paste0(popSize," ",manualCommand," -s ") genLen = manualGenLen }else{ species = toupper(species) if(species=="GENERIC"){ #GENERIC---- genLen = 1.0 Ne = 100 speciesParams = "1E8 -t 1E-5 -r 4E-6" speciesHist = "-eN 0.25 5.0 -eN 2.50 15.0 -eN 25.00 60.0 -eN 250.00 120.0 -eN 2500.00 1000.0" }else if(species=="CATTLE"){ #CATTLE---- genLen = 1.0 Ne = 90 speciesParams = "1E8 -t 9E-6 -r 3.6E-6" speciesHist = "-eN 0.011 1.33 -eN 0.019 2.78 -eN 0.036 3.89 -eN 0.053 11.11 -eN 0.069 16.67 -eN 0.431 22.22 -eN 1.264 27.78 -eN 1.819 38.89 -eN 4.875 77.78 -eN 6.542 111.11 -eN 9.319 188.89 -eN 92.097 688.89 -eN 2592.097 688.89" }else if(species=="WHEAT"){ #WHEAT---- genLen = 1.43 Ne = 50 speciesParams = "8E8 -t 4E-7 -r 3.6E-7" speciesHist = "-eN 0.03 1 -eN 0.05 2 -eN 0.10 4 -eN 0.15 6 -eN 0.20 8 -eN 0.25 10 -eN 0.30 12 -eN 0.35 14 -eN 0.40 16 -eN 0.45 18 -eN 0.50 20 -eN 1.00 40 -eN 2.00 60 -eN 3.00 80 -eN 4.00 100 -eN 5.00 120 -eN 10.00 140 -eN 20.00 160 -eN 30.00 180 -eN 40.00 200 -eN 50.00 240 -eN 100.00 320 -eN 200.00 400 -eN 300.00 480 -eN 400.00 560 -eN 500.00 640" }else if(species=="MAIZE"){ #MAIZE---- genLen = 2.0 Ne = 100 speciesParams = "2E8 -t 5E-6 -r 4E-6" speciesHist = "-eN 0.03 1 -eN 0.05 2 -eN 0.10 4 -eN 0.15 6 -eN 0.20 8 -eN 0.25 10 -eN 0.30 12 -eN 0.35 14 -eN 0.40 16 -eN 0.45 18 -eN 0.50 20 -eN 2.00 40 -eN 3.00 60 -eN 4.00 80 -eN 5.00 100" }else{ stop(paste("No rules for species",species)) } # Removed due to very long run time # #EUROPEAN # genLen = 1.3 # Ne = 512000 # speciesParams = "1.3E8 -t 0.0483328 -r 0.02054849" # speciesHist = "-G 1.0195 -eG 0.0001000977 1.0031 -eN 0.0004492188 0.002015625 -eN 0.000449707 0.003634766" if(is.null(split)){ splitI = "" splitJ = "" }else{ stopifnot(popSize%%2==0) splitI = paste(" -I 2",popSize%/%2,popSize%/%2) splitJ = paste(" -ej",split/(4*Ne)+0.000001,"2 1") } command = paste0(popSize," ",speciesParams,splitI," ",speciesHist,splitJ," -s ") } if(!is.null(manualGenLen)){ genLen = manualGenLen } if(length(genLen)==1){ genLen = rep(genLen, nChr) } # Run MaCS macsOut = MaCS(command, segSites, inbred, ploidy, nThreads, seed) dim(macsOut$geno) = NULL # Account for matrix bug in RcppArmadillo # Check if desired number of loci were obtained nLoci = sapply(macsOut$genMap,length) isLimited = segSites>0 if(any(nLoci[isLimited] != segSites[isLimited])){ stop("MaCS did not return enough segSites, use segSites=NULL to return all sites generated by MaCS") } genMap = vector("list",nChr) for(i in 1:nChr){ genMap[[i]] = genLen[i]*c(macsOut$genMap[[i]]-macsOut$genMap[[i]][1]) names(genMap[[i]]) = paste(i,1:length(genMap[[i]]),sep="_") } names(genMap) = as.character(1:nChr) output = new("MapPop", nInd=nInd, nChr=nChr, ploidy=ploidy, nLoci=nLoci, geno=macsOut$geno, genMap=genMap, centromere=sapply(genMap,max)/2, inbred=inbred) return(output) } #' @title Alternative wrapper for MaCS #' #' @description #' A wrapper function for \code{\link{runMacs}}. This wrapper is designed #' to provide a more intuitive interface for writing custom commands #' in MaCS \insertCite{MaCS}{AlphaSimR}. It effectively automates the creation #' of an appropriate line for the manualCommand argument in \code{\link{runMacs}} #' using user supplied variables, but only allows for a subset of the functionality #' offered by this argument. The default arguments of this function were chosen to match #' species="GENERIC" in \code{\link{runMacs}}. #' #' @param nInd number of individuals to simulate #' @param nChr number of chromosomes to simulate #' @param segSites number of segregating sites to keep per chromosome #' @param Ne effective population size #' @param bp base pair length of chromosome #' @param genLen genetic length of chromosome in Morgans #' @param mutRate per base pair mutation rate #' @param histNe effective population size in previous #' generations #' @param histGen number of generations ago for effective #' population sizes given in histNe #' @param inbred should founder individuals be inbred #' @param split an optional historic population split in terms of generations ago #' @param ploidy ploidy level of organism #' @param returnCommand should the command passed to manualCommand in #' \code{\link{runMacs}} be returned. If TRUE, MaCS will not be called and #' the command is returned instead. #' @param nThreads if OpenMP is available, this will allow for simulating chromosomes in parallel. #' If the value is NULL, the number of threads is automatically detected. #' #' @return an object of \code{\link{MapPop-class}} or if #' returnCommand is true a string giving the MaCS command passed to #' the manualCommand argument of \code{\link{runMacs}}. #' #' @references #' \insertAllCited{} #' #' @examples #' # Creates a populations of 10 outbred individuals #' # Their genome consists of 1 chromosome and 100 segregating sites #' # The command is equivalent to using species="GENERIC" in runMacs #' \dontrun{ #' founderPop = runMacs2(nInd=10,nChr=1,segSites=100) #' } #' @export runMacs2 = function(nInd,nChr=1,segSites=NULL,Ne=100, bp=1e8,genLen=1,mutRate=2.5e-8, histNe=c(500,1500,6000,12000,100000), histGen=c(100,1000,10000,100000,1000000), inbred=FALSE,split=NULL,ploidy=2L,returnCommand=FALSE, nThreads=NULL){ stopifnot(length(histNe)==length(histGen)) # Adjust Ne according to ploidy level Ne = Ne*(ploidy/2L) if(!is.null(histNe)){ histNe = histNe*(ploidy/2L) } speciesParams = paste(bp,"-t",4*Ne*mutRate, "-r",4*Ne*genLen/bp) speciesHist = "" if(length(histNe)>0){ histNe = histNe/Ne histGen = histGen/(4*Ne) for(i in 1:length(histNe)){ speciesHist = paste(speciesHist,"-eN", histGen[i],histNe[i]) } } if(is.null(split)){ command = paste(speciesParams,speciesHist) }else{ popSize = ifelse(inbred,nInd,ploidy*nInd) command = paste(speciesParams, paste("-I 2",popSize%/%2,popSize%/%2), speciesHist, paste("-ej",split/(4*Ne)+0.000001,"2 1")) } if(returnCommand){ return(command) } return(runMacs(nInd=nInd,nChr=nChr,segSites=segSites, inbred=inbred,species="TEST",split=NULL, ploidy=ploidy,manualCommand=command, manualGenLen=genLen,nThreads=nThreads)) } #' @title Sample haplotypes from a MapPop #' #' @description #' Creates a new \code{\link{MapPop-class}} from an existing #' \code{\link{MapPop-class}} by randomly sampling haplotypes. #' #' @param mapPop the \code{\link{MapPop-class}} used to #' sample haplotypes #' @param nInd the number of individuals to create #' @param inbred should new individuals be fully inbred #' @param ploidy new ploidy level for organism. If NULL, #' the ploidy level of the mapPop is used. #' @param replace should haplotypes be sampled with replacement #' #' @return an object of \code{\link{MapPop-class}} #' #' @examples #' founderPop = quickHaplo(nInd=2,nChr=1,segSites=11,inbred=TRUE) #' founderPop = sampleHaplo(mapPop=founderPop,nInd=20) #' #' @export sampleHaplo = function(mapPop,nInd,inbred=FALSE,ploidy=NULL,replace=TRUE){ if(is.null(ploidy)) ploidy = mapPop@ploidy nHaplo = mapPop@nInd*mapPop@ploidy if(inbred){ nSamp = nInd }else{ nSamp = nInd*ploidy } nBin = mapPop@nLoci%/%8L + (mapPop@nLoci%%8L > 0L) if(!replace) stopifnot(nHaplo>=nSamp) output = vector("list",mapPop@nChr) for(chr in 1:mapPop@nChr){ haplo = sample.int(nHaplo,nSamp,replace=replace) geno = array(data=as.raw(0), dim=c(nBin[chr], ploidy,nInd)) outHap = 1L outInd = 1L for(i in 1:length(haplo)){ inHap = (haplo[i]-1L)%%mapPop@ploidy + 1L inInd = (haplo[i]-1L)%/%mapPop@ploidy + 1L if(inbred){ for(outHap in 1:ploidy){ geno[,outHap,outInd] = mapPop@geno[[chr]][,inHap,inInd] } outInd = outInd+1L }else{ geno[,outHap,outInd] = mapPop@geno[[chr]][,inHap,inInd] outHap = outHap%%ploidy+1L if(outHap==1L){ outInd = outInd+1L } } } output[[chr]] = new("MapPop", nInd=as.integer(nInd), nChr=1L, ploidy=as.integer(ploidy), nLoci=mapPop@nLoci[chr], geno=list(geno), genMap=mapPop@genMap[chr], centromere=mapPop@centromere[chr], inbred=inbred) } output = do.call("cChr",output) return(output) } #' @title Quick founder haplotype simulation #' #' @description Rapidly simulates founder haplotypes by randomly #' sampling 0s and 1s. This is equivalent to having all loci with #' allele frequency 0.5 and being in linkage equilibrium. #' #' @param nInd number of individuals to simulate #' @param nChr number of chromosomes to simulate #' @param segSites number of segregating sites per chromosome #' @param genLen genetic length of chromosomes #' @param ploidy ploidy level of organism #' @param inbred should founder individuals be inbred #' #' @return an object of \code{\link{MapPop-class}} #' #' @examples #' # Creates a populations of 10 outbred individuals #' # Their genome consists of 1 chromosome and 100 segregating sites #' founderPop = quickHaplo(nInd=10,nChr=1,segSites=100) #' #' @export quickHaplo = function(nInd,nChr,segSites,genLen=1,ploidy=2L,inbred=FALSE){ ploidy = as.integer(ploidy) nInd = as.integer(nInd) nChr = as.integer(nChr) segSites = as.integer(segSites) if(length(segSites)==1) segSites = rep(segSites,nChr) if(length(genLen)==1) genLen = rep(genLen,nChr) nBins = segSites%/%8L + (segSites%%8L > 0L) centromere = genLen/2 genMap = vector("list",nChr) geno = vector("list",nChr) for(i in 1:nChr){ genMap[[i]] = seq(0,genLen[i],length.out=segSites[i]) names(genMap[[i]]) = paste(i, 1:segSites[i], sep="_") geno[[i]] = array(sample(as.raw(0:255), nInd*ploidy*nBins[i], replace=TRUE), dim = c(nBins[i],ploidy,nInd)) if(inbred){ if(ploidy>1){ for(j in 2:ploidy){ geno[[i]][,j,] = geno[[i]][,1,] } } } } names(genMap) = as.character(1:nChr) return(new("MapPop", nInd=nInd, nChr=nChr, ploidy=ploidy, nLoci=segSites, geno=geno, genMap=genMap, centromere=centromere, inbred=inbred)) } #' @title Add segregating site to MapPop #' #' @description This function allows for adding a new #' segregating site with user supplied genotypes to a MapPop. #' The position of the site is set using a genetic map position. #' #' @param mapPop an object of \code{\link{MapPop-class}} #' @param siteName name to give the segregating site #' @param chr which chromosome to add the site #' @param mapPos genetic map position of site in Morgans #' @param haplo haplotypes for the site #' #' @return an object of \code{\link{MapPop-class}} #' #' @examples #' # Creates a populations of 10 outbred individuals #' # Their genome consists of 1 chromosome and 2 segregating sites #' founderPop = quickHaplo(nInd=10,nChr=1,segSites=2) #' #' # Add a locus a the 0.5 Morgan map position #' haplo = matrix(sample(x=0:1, size=20, replace=TRUE), ncol=1) #' #' founderPop2 = addSegSite(founderPop, siteName="x", chr=1, mapPos=0.5, haplo=haplo) #' #' pullSegSiteHaplo(founderPop2) #' #' @export addSegSite = function(mapPop, siteName, chr, mapPos, haplo){ # Check validity of input data stopifnot(is(mapPop, "MapPop")) stopifnot(length(haplo)==(mapPop@nInd*mapPop@ploidy)) # Check that name isn't already present allSiteNames = unlist(unname(lapply(mapPop@genMap, names))) if(siteName%in%allSiteNames){ stop("The siteName '",siteName, "' is already in use.") } # Coerce haplo to a raw matrix haplo = matrix(as.raw(haplo), ncol=1) # Convert chr to a number, if needed if(is.character(chr)){ chr = match(chr, names(mapPop@genMap)) } # Extract temporary map and geno for target chromosome chrMap = mapPop@genMap[[chr]] markerNames = names(chrMap) chrGeno = mapPop@geno[chr] # Leaving in list format for getHaplo # Extract haplotype matrix from the bit array haploMat = getHaplo(chrGeno, mapPop@nLoci[chr], 1:mapPop@nLoci[chr], getNumThreads()) # Find position of insertion pos = findInterval(x=mapPos, vec=chrMap) # Insert site if(pos==length(chrMap)){ # Inserting site at the end chrMap = c(chrMap, mapPos) names(chrMap) = c(markerNames, siteName) haploMat = cbind(haploMat, haplo) }else if(pos==0){ # Inserting site at the beginning chrMap = c(mapPos, chrMap) names(chrMap) = c(siteName, markerNames) chrMap = chrMap-chrMap[1] haploMat = cbind(haplo, haploMat) }else{ # Inserting site in the middle chrMap = c(chrMap[1:pos], mapPos, chrMap[(pos+1L):length(chrMap)]) names(chrMap) = c(markerNames[1:pos], siteName, markerNames[(pos+1L):length(markerNames)]) haploMat = cbind(haploMat[,1:pos,drop=FALSE], haplo, haploMat[,(pos+1L):ncol(haploMat),drop=FALSE]) } # Convert haplotype matrix back to a bit array chrGeno = packHaplo(haploMat, ploidy=mapPop@ploidy, inbred=FALSE) # Return temporary map and geno mapPop@genMap[[chr]] = chrMap mapPop@geno[[chr]] = chrGeno # Add one to locus count mapPop@nLoci[chr] = mapPop@nLoci[chr] + 1L # Check if an inbred population has become outbred if(mapPop@inbred){ M = pullMarkerGeno(mapPop, markers=siteName) isHomo0 = M==0L isHomo1 = M==mapPop@ploidy isHet = !(isHomo0 | isHomo1) if(any(isHet)){ mapPop@inbred = FALSE } } return(mapPop) }
/scratch/gouwar.j/cran-all/cranData/AlphaSimR/R/founderPop.R
#' @title Hybrid crossing #' #' @description #' A convience function for hybrid plant breeding simulations. Allows for #' easy specification of a test cross scheme and/or creation of an object #' of \code{\link{HybridPop-class}}. Note that the \code{\link{HybridPop-class}} #' should only be used if the parents were created using the \code{\link{makeDH}} #' function or \code{\link{newPop}} using inbred founders. The id for #' new individuals is [mother_id]_[father_id] #' #' @param females female population, an object of \code{\link{Pop-class}} #' @param males male population, an object of \code{\link{Pop-class}} #' @param crossPlan either "testcross" for all possible combinantions #' or a matrix with two columns for designed crosses #' @param returnHybridPop should results be returned as #' \code{\link{HybridPop-class}}. If false returns results as #' \code{\link{Pop-class}}. Population must be fully inbred if TRUE. #' @param simParam an object of \code{\link{SimParam}} #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=2, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' #' #Make crosses for full diallele #' pop2 = hybridCross(pop, pop, simParam=SP) #' #' @export hybridCross = function(females, males, crossPlan="testcross", returnHybridPop=FALSE, simParam=NULL){ if(is.null(simParam)){ simParam = get("SP",envir=.GlobalEnv) } if((females@ploidy%%2L != 0L) | (males@ploidy%%2L != 0L)){ stop("You can not cross indiviuals with odd ploidy levels") } #crossPlan for test cross if(length(crossPlan)==1){ if(crossPlan=="testcross"){ crossPlan = cbind(rep(1:females@nInd,each=males@nInd), rep(1:males@nInd,females@nInd)) }else{ stop(paste0("crossPlan=",crossPlan," is not a valid option")) } } #Set id femaleParents = females@id[crossPlan[,1]] maleParents = males@id[crossPlan[,2]] id = paste(femaleParents, maleParents, sep="_") #Return Pop-class if(!returnHybridPop){ return(makeCross2(females=females, males=males, crossPlan=crossPlan, simParam=simParam)) } #Return HybridPop-class gv = matrix(NA_real_, nrow=length(id), ncol=simParam$nTraits) gxe = vector("list",simParam$nTraits) i = 0L for(trait in simParam$traits){ i = i+1L tmp = getHybridGv(trait=trait, females=females, femaleParents=crossPlan[,1], males=males, maleParents=crossPlan[,2], nThreads=simParam$nThreads) gv[,i] = tmp[[1]] if(length(tmp)==2){ gxe[[i]] = tmp[[2]] } } if(simParam$nTraits>0){ pheno = addError(gv, simParam$varE, reps=rep(1, simParam$nTraits)) }else{ pheno = gv } output = new("HybridPop", nInd=length(id), id=id, mother=femaleParents, father=maleParents, nTraits=simParam$nTraits, gv=gv, pheno=pheno, gxe=gxe) return(output) } #' @title Calculate GCA #' #' @description #' Calculate general combining ability of test crosses. Intended for #' output from hybridCross using the "testcross" option, but will work #' for any population. #' #' @param pop an object of \code{\link{Pop-class}} or #' \code{\link{HybridPop-class}} #' @param use tabulate either genetic values "gv", estimated #' breeding values "ebv", or phenotypes "pheno" #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10, inbred=TRUE) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$addTraitA(10) #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' #' #Make crosses for full diallele #' pop2 = hybridCross(pop, pop, simParam=SP) #' GCA = calcGCA(pop2, use="gv") #' #' @export calcGCA = function(pop,use="pheno"){ if(use=="pheno"){ y = pop@pheno }else if(use=="gv"){ y = pop@gv }else if(use=="ebv"){ y = pop@ebv }else{ stop(paste0("use=",use," is not a valid option")) } if(ncol(y)==0){ stop(paste("No values for",use)) } female = factor(pop@mother, levels=unique(pop@mother)) male = factor(pop@father, levels=unique(pop@father)) #Check for balance if(nlevels(female)==1 | nlevels(male)==1){ balanced = TRUE }else{ tmp = table(female,male) if(all(tmp==tmp[1])){ balanced = TRUE }else{ balanced = FALSE } } sca = paste(as.character(female),as.character(male),sep="_") sca = factor(sca,levels=unique(sca)) # Female GCA if(nlevels(female)==1){ GCAf = matrix(colMeans(y),nrow=1) }else{ if(nlevels(male)==1){ GCAf = y }else{ if(balanced){ #Calculate simple means tmp = aggregate(y~female,FUN=mean) GCAf = unname(as.matrix(tmp[,-1,drop=F])) }else{ #Calculate population marginal means X = model.matrix(~female+male-1,contrasts=list(male="contr.sum")) GCAf = calcCoef(X,y)[1:nlevels(female),,drop=FALSE] } } } GCAf = data.frame(levels(female),GCAf, stringsAsFactors=FALSE) names(GCAf) = c("id",paste0("Trait",1:pop@nTraits)) # Male GCA if(nlevels(male)==1){ GCAm = matrix(colMeans(y),nrow=1) }else{ if(nlevels(female)==1){ GCAm = y }else{ if(balanced){ #Calculate simple means tmp = aggregate(y~male,FUN=mean) GCAm = unname(as.matrix(tmp[,-1,drop=F])) }else{ #Calculate population marginal means X = model.matrix(~male+female-1,contrasts=list(female="contr.sum")) GCAm = calcCoef(X,y)[1:nlevels(male),,drop=FALSE] } } } GCAm = data.frame(levels(male),GCAm, stringsAsFactors=FALSE) names(GCAm) = c("id",paste0("Trait",1:pop@nTraits)) # SCA if(nlevels(sca)==pop@nInd){ SCA = y }else{ #Calculate simple means tmp = aggregate(y~sca,FUN=mean) SCA = unname(as.matrix(tmp[,-1,drop=F])) } SCA = data.frame(levels(sca),SCA, stringsAsFactors=FALSE) names(SCA) = c("id",paste0("Trait",1:pop@nTraits)) return(list(GCAf=GCAf, GCAm=GCAm, SCA=SCA)) } #' @title Set GCA as phenotype #' #' @description #' Calculates general combining ability from a set of testers and #' returns these values as phenotypes for a population. #' #' @param pop an object of \code{\link{Pop-class}} #' @param testers an object of \code{\link{Pop-class}} #' @param use true genetic value (\code{gv}) or phenotypes (\code{pheno}, default) #' @param h2 a vector of desired narrow-sense heritabilities for #' each trait. See details in \code{\link{setPheno}}. #' @param H2 a vector of desired broad-sense heritabilities for #' each trait. See details in \code{\link{setPheno}}. #' @param varE error (co)variances for traits. #' See details in \code{\link{setPheno}}. #' @param corE an optional matrix for correlations between errors. #' See details in \code{\link{setPheno}}. #' @param reps number of replications for phenotype. #' See details in \code{\link{setPheno}}. #' @param fixEff fixed effect to assign to the population. Used #' by genomic selection models only. #' @param p the p-value for the environmental covariate #' used by GxE traits. If NULL, a value is #' sampled at random. #' @param inbred are both pop and testers fully inbred. They are only #' fully inbred if created by \code{\link{newPop}} using inbred founders #' or by the \code{\link{makeDH}} function #' @param onlyPheno should only the phenotype be returned, see return #' @param simParam an object of \code{\link{SimParam}} #' #' #' @return Returns an object of \code{\link{Pop-class}} or #' a matrix if onlyPheno=TRUE #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10, inbred=TRUE) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$addTraitA(10) #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' #' #Set phenotype to average per #' pop2 = setPhenoGCA(pop, pop, use="gv", inbred=TRUE, simParam=SP) #' #' @export setPhenoGCA = function(pop, testers, use="pheno", h2=NULL, H2=NULL, varE=NULL, corE=NULL, reps=1, fixEff=1L, p=NULL, inbred=FALSE, onlyPheno=FALSE, simParam=NULL){ if(is.null(simParam)){ simParam = get("SP",envir=.GlobalEnv) } if(is(pop,"MultiPop")){ stopifnot(class(testers)=="Pop", !onlyPheno) pop@pops = lapply(pop@pops, setPhenoGCA, testers=testers, use=use, h2=h2, H2=H2, varE=varE, corE=corE, reps=reps, fixEff=fixEff, p=p, inbred=inbred, onlyPheno=FALSE, simParam=simParam) return(pop) } if(any(duplicated(pop@id))){ stop("This function does not work with duplicate IDs") } stopifnot(class(pop)=="Pop",class(testers)=="Pop") use = tolower(use) #Make hybrids tmp = hybridCross(females=pop, males=testers, crossPlan="testcross", returnHybridPop=inbred, simParam=simParam) #Get response if(use=="pheno"){ y = setPheno(tmp, h2=h2, H2=H2, varE=varE, corE=corE, p=p, reps=reps, onlyPheno=TRUE, simParam=simParam) }else if(use=="gv"){ y = tmp@gv }else{ stop(paste0("use=",use," is not a valid option")) } if(ncol(y)==0){ stop(paste("No values for",use)) } female = factor(tmp@mother,levels=unique(tmp@mother)) if(nlevels(female)==1){ GCAf = matrix(colMeans(y),nrow=1) }else{ if(testers@nInd==1){ GCAf = y }else{ #Calculate simple means tmp = aggregate(y~female,FUN=mean) GCAf = unname(as.matrix(tmp[,-1,drop=F])) } } if(onlyPheno){ return(GCAf) } pop@pheno = GCAf pop@fixEff = rep(as.integer(fixEff),pop@nInd) return(pop) } #' @title Set progeny test as phenotype #' #' @description #' Models a progeny test of individuals in 'pop'. Returns 'pop' with a phenotype #' representing the average performance of their progeny. The phenotype is generated #' by mating individuals in 'pop' to randomly chosen individuals in testPop a #' number of times equal to 'nMatePerInd'. #' #' @param pop an object of \code{\link{Pop-class}} #' @param testPop an object of \code{\link{Pop-class}} #' @param nMatePerInd number of times an individual in 'pop' is mated to an #' individual in testPop #' @param use true genetic value (\code{gv}) or phenotypes (\code{pheno}, default) #' @param h2 a vector of desired narrow-sense heritabilities for #' each trait. See details in \code{\link{setPheno}}. #' @param H2 a vector of desired broad-sense heritabilities for #' each trait. See details in \code{\link{setPheno}}. #' @param varE error (co)variances for traits. #' See details in \code{\link{setPheno}}. #' @param corE an optional matrix for correlations between errors. #' See details in \code{\link{setPheno}}. #' @param reps number of replications for phenotype. #' See details in \code{\link{setPheno}}. #' @param fixEff fixed effect to assign to the population. Used #' by genomic selection models only. #' @param p the p-value for the environmental covariate #' used by GxE traits. If NULL, a value is #' sampled at random. #' @param onlyPheno should only the phenotype be returned, see return #' @param simParam an object of \code{\link{SimParam}} #' #' @details #' The reps parameter is for convenient representation of replicated data. #' It was intended for representation of replicated yield trials in plant #' breeding programs. In this case, varE is set to the plot error and #' reps is set to the number plots per entry. The resulting phenotype #' would reflect the mean of all replications. #' #' @return Returns an object of \code{\link{Pop-class}} or #' a matrix if onlyPheno=TRUE #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10, inbred=TRUE) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$addTraitA(10) #' #' #Create two populations of 5 individuals #' pop1 = newPop(founderPop[1:5], simParam=SP) #' pop2 = newPop(founderPop[6:10], simParam=SP) #' #' #Set phenotype according to a progeny test #' pop3 = setPhenoProgTest(pop1, pop2, use="gv", simParam=SP) #' #' @export setPhenoProgTest = function(pop, testPop, nMatePerInd=1L, use="pheno", h2=NULL, H2=NULL, varE=NULL, corE=NULL, reps=1, fixEff=1L, p=NULL, onlyPheno=FALSE, simParam=NULL){ if(is.null(simParam)){ simParam = get("SP",envir=.GlobalEnv) } if(is(pop,"MultiPop")){ stopifnot(class(testPop)=="Pop", !onlyPheno) pop@pops = lapply(pop@pops, setPhenoProgTest, testPop=testPop, nMatePerInd=nMatePerInd, use=use, h2=h2, H2=H2, varE=varE, corE=corE, reps=reps, fixEff=fixEff, p=p, onlyPheno=FALSE, simParam=simParam) return(pop) } if(any(duplicated(pop@id))){ stop("This function does not work with duplicate IDs") } stopifnot(class(pop)=="Pop",class(testPop)=="Pop") use = tolower(use) #Make hybrids tmp = randCross2(females=pop, males=testPop, nCrosses=nInd(pop)*nMatePerInd, balance=TRUE, simParam=simParam) #Get response if(use=="pheno"){ y = setPheno(tmp, h2=h2, H2=H2, varE=varE, corE=corE, reps=reps, p=p, onlyPheno=TRUE, simParam=simParam) }else if(use=="gv"){ y = tmp@gv }else{ stop(paste0("use=",use," is not a valid option")) } if(ncol(y)==0){ stop(paste("No values for",use)) } female = factor(tmp@mother, levels=pop@id) #Calculate simple means tmp = aggregate(y~female, FUN=mean) GCAf = unname(as.matrix(tmp[,-1,drop=F])) if(onlyPheno){ return(GCAf) } pop@pheno = GCAf pop@fixEff = rep(as.integer(fixEff),pop@nInd) return(pop) }
/scratch/gouwar.j/cran-all/cranData/AlphaSimR/R/hybrids.R
#' @title Import genetic map #' #' @description #' Formats a genetic map stored in a data.frame to #' AlphaSimR's internal format. Map positions must be #' in Morgans. #' #' @param genMap genetic map as a data.frame. The first #' three columns must be: marker name, chromosome, and #' map position (Morgans). Marker name and chromosome are #' coerced using as.character. #' #' @return a list of named vectors #' #' @examples #' genMap = data.frame(markerName=letters[1:5], #' chromosome=c(1,1,1,2,2), #' position=c(0,0.5,1,0.15,0.4)) #' #' asrMap = importGenMap(genMap=genMap) #' #' str(asrMap) #' #' @export importGenMap = function(genMap){ # Convert data type markerName = as.character(genMap[,1]) chromosome = as.character(genMap[,2]) position = as.numeric(genMap[,3]) # Create list for map uniqueChr = unique(chromosome) genMap = vector("list", length=length(uniqueChr)) names(genMap) = uniqueChr # Iterate through chromosomes for(i in 1:length(uniqueChr)){ take = (chromosome==uniqueChr[i]) tmpPos = position[take] tmpName = markerName[take] # Order and name take = order(tmpPos, decreasing=FALSE) tmpPos = tmpPos[take] names(tmpPos) = tmpName[take] genMap[[uniqueChr[i] ]] = tmpPos - tmpPos[1] } return(genMap) } #' @title Import inbred, diploid genotypes #' #' @description #' Formats the genotypes from inbred, diploid lines #' to an AlphaSimR population that can be used to #' initialize a simulation. An attempt is made to #' automatically detect 0,1,2 or -1,0,1 genotype coding. #' Heterozygotes or probabilistic genotypes are allowed, #' but will be coerced to the nearest homozygote. Pedigree #' information is optional and when provided will be #' passed to the population for easier identification #' in the simulation. #' #' @param geno a matrix of genotypes #' @param genMap genetic map as a data.frame. The first #' three columns must be: marker name, chromosome, and #' map position (Morgans). Marker name and chromosome are #' coerced using as.character. See \link{importGenMap} #' @param ped an optional pedigree for the supplied #' genotypes. See details. #' #' @details #' The optional pedigree can be a data.frame, matrix or a vector. #' If the object is a data.frame or matrix, the first three #' columns must include information in the following order: id, #' mother, and father. All values are coerced using #' as.character. If the object is a vector, it is assumed to only #' include the id. In this case, the mother and father will be set #' to "0" for all individuals. #' #' @return a \code{\link{MapPop-class}} if ped is NULL, #' otherwise a \code{\link{NamedMapPop-class}} #' #' @examples #' geno = rbind(c(2,2,0,2,0), #' c(0,2,2,0,0)) #' colnames(geno) = letters[1:5] #' #' genMap = data.frame(markerName=letters[1:5], #' chromosome=c(1,1,1,2,2), #' position=c(0,0.5,1,0.15,0.4)) #' #' ped = data.frame(id=c("a","b"), #' mother=c(0,0), #' father=c(0,0)) #' #' founderPop = importInbredGeno(geno=geno, #' genMap=genMap, #' ped=ped) #' #' @export importInbredGeno = function(geno, genMap, ped=NULL){ # Extract pedigree, if supplied if(!is.null(ped)){ if(is.vector(ped)){ id = as.character(ped) stopifnot(length(id)==nrow(geno), !any(duplicated(id))) mother = father = rep("0", length(id)) }else{ id = as.character(ped[,1]) stopifnot(length(id)==nrow(geno), !any(duplicated(id))) mother = as.character(ped[,2]) father = as.character(ped[,3]) } } genMap = importGenMap(genMap) # Get marker names if(is.data.frame(geno)){ geno = as.matrix(geno) } markerName = colnames(geno) # Check marker coding and convert to haplotypes if(is.raw(geno)){ geno[geno==as.raw(1)] = as.raw(0) # For consistency with round geno[geno==as.raw(2)] = as.raw(1) }else{ minGeno = min(geno) maxGeno = max(geno) stopifnot(minGeno >= (-1-1e-8) ) if(minGeno < (0-1e-8) ){ # Suspect -1,0,1 coding stopifnot(maxGeno <= (1+1e-8) ) # Converting to 0,1 haplotypes with hard thresholds geno = matrix(as.raw( round( (geno+1)/2 ) ), ncol=ncol(geno)) }else{ # Suspect 0,1,2 coding stopifnot(maxGeno <= (2+1e-8) ) # Converting to 0,1 haplotypes with hard thresholds geno = matrix(as.raw( round( geno/2 ) ), ncol=ncol(geno)) } } # Create haplotype list haplotypes = vector("list", length=length(genMap)) # Order haplotypes by chromosome for(i in 1:length(genMap)){ mapMarkers = names(genMap[[i]]) take = match(mapMarkers, markerName) if(any(is.na(take))){ genMap[[i]] = genMap[[i]][is.na(take)] stopifnot(length(genMap[[i]]) >= 1L) genMap[[i]] = genMap[[i]] - genMap[[i]]-genMap[[i]][1] take = na.omit(take) } haplotypes[[i]] = geno[,take] } founderPop = newMapPop(genMap=genMap, haplotypes=haplotypes, inbred=TRUE) if(!is.null(ped)){ founderPop = new("NamedMapPop", id=id, mother=mother, father=father, founderPop) } return(founderPop) } #' @title Import haplotypes #' #' @description #' Formats haplotype in a matrix format to an #' AlphaSimR population that can be used to #' initialize a simulation. This function serves #' as wrapper for \code{\link{newMapPop}} that #' utilizes a more user friendly input format. #' #' @param haplo a matrix of haplotypes #' @param genMap genetic map as a data.frame. The first #' three columns must be: marker name, chromosome, and #' map position (Morgans). Marker name and chromosome are #' coerced using as.character. See \code{\link{importGenMap}} #' @param ploidy ploidy level of the organism #' @param ped an optional pedigree for the supplied #' genotypes. See details. #' #' @details #' The optional pedigree can be a data.frame, matrix or a vector. #' If the object is a data.frame or matrix, the first three #' columns must include information in the following order: id, #' mother, and father. All values are coerced using #' as.character. If the object is a vector, it is assumed to only #' include the id. In this case, the mother and father will be set #' to "0" for all individuals. #' #' @return a \code{\link{MapPop-class}} if ped is NULL, #' otherwise a \code{\link{NamedMapPop-class}} #' #' @examples #' haplo = rbind(c(1,1,0,1,0), #' c(1,1,0,1,0), #' c(0,1,1,0,0), #' c(0,1,1,0,0)) #' colnames(haplo) = letters[1:5] #' #' genMap = data.frame(markerName=letters[1:5], #' chromosome=c(1,1,1,2,2), #' position=c(0,0.5,1,0.15,0.4)) #' #' ped = data.frame(id=c("a","b"), #' mother=c(0,0), #' father=c(0,0)) #' #' founderPop = importHaplo(haplo=haplo, #' genMap=genMap, #' ploidy=2L, #' ped=ped) #' #' @export importHaplo = function(haplo, genMap, ploidy=2L, ped=NULL){ # Extract pedigree, if supplied if(!is.null(ped)){ if(is.vector(ped)){ id = as.character(ped) stopifnot(length(id)==(nrow(haplo)/ploidy), !any(duplicated(id))) mother = father = rep("0", length(id)) }else{ id = as.character(ped[,1]) stopifnot(length(id)==(nrow(haplo)/ploidy), !any(duplicated(id))) mother = as.character(ped[,2]) father = as.character(ped[,3]) } } genMap = importGenMap(genMap) # Get marker names if(is.data.frame(haplo)){ haplo = as.matrix(haplo) } markerName = colnames(haplo) # Convert haplotypes to raw haplo = matrix(as.raw(haplo), ncol=ncol(haplo)) stopifnot(haplo==as.raw(0) | haplo==as.raw(1)) # Create haplotype list haplotypes = vector("list", length=length(genMap)) # Order haplotypes by chromosome for(i in 1:length(genMap)){ mapMarkers = names(genMap[[i]]) take = match(mapMarkers, markerName) if(any(is.na(take))){ genMap[[i]] = genMap[[i]][is.na(take)] stopifnot(length(genMap[[i]]) >= 1L) genMap[[i]] = genMap[[i]] - genMap[[i]]-genMap[[i]][1] take = na.omit(take) } haplotypes[[i]] = haplo[,take] } founderPop = newMapPop(genMap=genMap, haplotypes=haplotypes, ploidy=ploidy) if(!is.null(ped)){ founderPop = new("NamedMapPop", id=id, mother=mother, father=father, founderPop) } return(founderPop) }
/scratch/gouwar.j/cran-all/cranData/AlphaSimR/R/importData.R
#' @title Merge list of populations #' #' @description Rapidly merges a list of populations into a #' single population #' #' @param popList a list containing \code{\link{Pop-class}} elements #' or a \code{\link{MultiPop-class}} #' #' @return Returns a \code{\link{Pop-class}} #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' #' #Create a list of populations and merge list #' pop = newPop(founderPop, simParam=SP) #' popList = list(pop, pop) #' pop2 = mergePops(popList) #' #' @export mergePops = function(popList){ if(is(popList,"MultiPop")){ for(i in 1:length(popList@pops)){ if(is(popList@pops[i],"MultiPop")){ popList@pops[i] = mergePops(popList@pops[i]) } } popList = popList@pops } classes = do.call("c",lapply(popList, function(x) class(x))) if(any(classes=="NULL")){ remove = which(classes=="NULL") popList = popList[-remove] classes = classes[-remove] } stopifnot(all(classes=="Pop")) #nChr nChr = do.call("c",lapply(popList, function(x) x@nChr)) stopifnot(all(nChr==nChr[1])) nChr = nChr[1] #ploidy ploidy = do.call("c",lapply(popList, function(x) x@ploidy)) stopifnot(all(ploidy==ploidy[1])) ploidy = ploidy[1] #nLoci nLoci = do.call("c",lapply(popList, function(x){ all(x@nLoci==popList[[1]]@nLoci) })) stopifnot(all(nLoci)) nLoci = popList[[1]]@nLoci #id id = do.call("c", lapply(popList, function(x) x@id)) #iid iid = do.call("c", lapply(popList, function(x) x@iid)) #mother mother = do.call("c", lapply(popList, function(x) x@mother)) #father father= do.call("c", lapply(popList, function(x) x@father)) #fixEff fixEff= do.call("c", lapply(popList, function(x) x@fixEff)) #misc misc = do.call("c", lapply(popList, function(x) x@misc)) #sex sex = do.call("c", lapply(popList, function(x) x@sex)) #nTraits nTraits = do.call("c",lapply(popList, function(x) x@nTraits)) stopifnot(all(nTraits==nTraits[1])) nTraits = nTraits[1] #nInd nInd = do.call("c",lapply(popList, function(x) x@nInd)) #gv gv = do.call("rbind",lapply(popList, function(x) x@gv)) #pheno pheno = do.call("rbind",lapply(popList, function(x) x@pheno)) #ebv ebv = do.call("c",lapply(popList, function(x) ncol(x@ebv))) if(all(ebv==ebv[1])){ ebv = do.call("rbind",lapply(popList, function(x) x@ebv)) }else{ ebv = matrix(NA_real_,nrow=sum(nInd),ncol=0) } #gxe if(nTraits>=1){ gxe = vector("list",length=nTraits) for(trait in 1:nTraits){ if(!is.null(popList[[1]]@gxe[[trait]])){ tmp = lapply(popList,function(x) x@gxe[[trait]]) tmp = do.call("c",tmp) gxe[[trait]] = tmp } } }else{ gxe = list() } #geno nBin = as.integer(nLoci%/%8L + (nLoci%%8L > 0L)) geno = mergeMultGeno(popList,nInd=nInd,nBin=nBin,ploidy=ploidy) dim(geno) = NULL # Account for matrix bug in RcppArmadillo nInd = sum(nInd) return(new("Pop", nInd=nInd, nChr=nChr, ploidy=ploidy, nLoci=nLoci, sex=sex, geno=geno, id=id, iid=iid, mother=mother, father=father, fixEff=fixEff, nTraits=nTraits, gv=gv, gxe=gxe, pheno=pheno, ebv=ebv, misc=misc, miscPop=list())) }
/scratch/gouwar.j/cran-all/cranData/AlphaSimR/R/mergePops.R
# Converts a matrix to integer type # Intended for genotype matrices of raw type convToImat = function(X){ return(matrix(as.integer(X), nrow=nrow(X) ,ncol=ncol(X))) } #' @rdname isFemale #' @title Test if individuals of a population are female or male #' #' @description Test if individuals of a population are female or male #' #' @param x \code{\link{Pop-class}} #' #' @return logical #' #' @examples #' founderGenomes <- quickHaplo(nInd = 3, nChr = 1, segSites = 100) #' SP <- SimParam$new(founderGenomes) #' SP$setSexes(sexes = "yes_sys") #' pop <- newPop(founderGenomes) #' #' isFemale(pop) #' isMale(pop) #' #' pop[isFemale(pop)] #' pop[isFemale(pop)]@sex #' #' @export isFemale <- function(x) { if (isPop(x)) { ret <- x@sex == "F" } else { stop("Argument x must be a Pop class object!") } return(ret) } ##Test #' @describeIn isFemale Test if individuals of a population are female or male #' @export isMale <- function(x) { if (isPop(x)) { ret <- x@sex == "M" } else { stop("Argument x must be a Pop class object!") } return(ret) } #' @rdname setMisc #' @title Set miscelaneous information in a population #' #' @description Set miscelaneous information in a population #' #' @param x \code{\link{Pop-class}} #' @param node character, name of the node to set within the \code{x@misc} slot #' @param value, value to be saved into \code{x@misc[[*]][[node]]}; length of #' \code{value} should be equal to \code{nInd(x)}; if its length is 1, then #' it is repeated using \code{rep} (see examples) #' #' @details A \code{NULL} in \code{value} is ignored #' #' @return \code{\link{Pop-class}} #' #' @export setMisc <- function(x, node = NULL, value = NULL) { if (isPop(x)) { if (is.null(node)) { stop("Argument node must be provided!") } if (is.null(value)) { stop("Argument value must be provided!") } n <- nInd(x) if (length(value) == 1 && n > 1) { value <- rep(x = value, times = n) } if (length(value) != n) { stop("Argument value must be of length 1 or nInd(x)!") } for (ind in seq_len(n)) { if (!is.null(value[ind])) { x@misc[[ind]][node] <- value[ind] } } } else { stop("Argument x must be a Pop class object!") } return(x) } #' @rdname getMisc #' @title Get miscelaneous information in a population #' #' @description Get miscelaneous information in a population #' #' @param x \code{\link{Pop-class}} #' @param node character, name of the node to get from the \code{x@misc} slot; #' if \code{NULL} the whole \code{x@misc} slot is returned #' #' @return The \code{x@misc} slot or its nodes \code{x@misc[[*]][[node]]} #' #' @examples #' founderGenomes <- quickHaplo(nInd = 3, nChr = 1, segSites = 100) #' SP <- SimParam$new(founderGenomes) #' \dontshow{SP$nThreads = 1L} #' basePop <- newPop(founderGenomes) #' #' basePop <- setMisc(basePop, node = "info", value = 1) #' basePop@misc #' getMisc(x = basePop, node = "info") #' #' basePop <- setMisc(basePop, node = "info2", value = c("A", "B", "C")) #' basePop@misc #' getMisc(x = basePop, node = "info2") #' #' n <- nInd(basePop) #' location <- vector(mode = "list", length = n) #' for (ind in seq_len(n)) { #' location[[ind]] <- runif(n = 2, min = 0, max = 100) #' } #' location #' basePop <- setMisc(basePop, node = "location", value = location) #' basePop@misc #' getMisc(x = basePop, node = "location") #' #' n <- nInd(basePop) #' location <- vector(mode = "list", length = n) #' for (ind in c(1, 3)) { #' location[[ind]] <- runif(n = 2, min = 0, max = 100) #' } #' location #' basePop <- setMisc(basePop, node = "location", value = location) #' basePop@misc #' getMisc(x = basePop, node = "location") #' #' getMisc(x = basePop) #' #' @export getMisc <- function(x, node = NULL) { if (isPop(x)) { if (is.null(node)) { ret <- x@misc } else { nInd <- nInd(x) ret <- vector(mode = "list", length = nInd) for (ind in seq_len(nInd)) { if (!is.null(x@misc[[ind]][[node]])) { ret[ind] <- x@misc[[ind]][node] } } } } else { stop("Argument x must be a Pop class object!") } return(ret) } #' @title Get pedigree #' #' @description #' Returns the population's pedigree as stored in the #' id, mother and father slots. NULL is returned if the #' input population lacks the required. #' #' @param pop a population #' #' @examples #' # Create a founder population #' founderPop = quickHaplo(2,1,2) #' #' # Set simulation parameters #' SP = SimParam$new(founderPop) #' #' # Create a population #' pop = newPop(founderPop, simParam=SP) #' #' # Get the pedigree #' getPed(pop) #' #' # Returns NULL when a population lacks a pedigree #' getPed(founderPop) #' #' @export getPed = function(pop){ if(.hasSlot(pop, "id") & .hasSlot(pop, "mother") & .hasSlot(pop, "father")){ df = data.frame(id = pop@id, mother = pop@mother, father = pop@father) return(df) }else{ return(NULL) } } #' @title Selection intensity #' #' @description #' Calculates the standardized selection intensity #' #' @param p the proportion of individuals selected #' #' @examples #' selInt(0.1) #' #' @export selInt = function(p){ return(dnorm(qnorm(1-p))/p) } #' @title Calculate Smith-Hazel weights #' #' @description #' Calculates weights for Smith-Hazel index given economice weights #' and phenotypic and genotypic variance-covariance matrices. #' #' @param econWt vector of economic weights #' @param varG the genetic variance-covariance matrix #' @param varP the phenotypic variance-covariance matrix #' #' @return a vector of weight for calculating index values #' #' @examples #' G = 1.5*diag(2)-0.5 #' E = diag(2) #' P = G+E #' wt = c(1,1) #' smithHazel(wt, G, P) #' #' @export smithHazel = function(econWt,varG,varP){ return(solve(varP)%*%varG%*%econWt) } #' @title Selection index #' #' @description #' Calculates values of a selection index given trait values and #' weights. This function is intended to be used in combination with #' selection functions working on populations such as #' \code{\link{selectInd}}. #' #' @param Y a matrix of trait values #' @param b a vector of weights #' @param scale should Y be scaled and centered #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' #Model two genetically correlated traits #' G = 1.5*diag(2)-0.5 #Genetic correlation matrix #' SP$addTraitA(10, mean=c(0,0), var=c(1,1), corA=G) #' SP$setVarE(h2=c(0.5,0.5)) #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' #' #Calculate Smith-Hazel weights #' econWt = c(1, 1) #' b = smithHazel(econWt, varG(pop), varP(pop)) #' #' #Selection 2 best individuals using Smith-Hazel index #' #selIndex is used as a trait #' pop2 = selectInd(pop, nInd=2, trait=selIndex, #' simParam=SP, b=b) #' #' @export selIndex = function(Y,b,scale=FALSE){ if(scale){ return(scale(Y)%*%b) } return(Y%*%b) } #' @title Edit genome #' #' @description #' Edits selected loci of selected individuals to a homozygous #' state for either the 1 or 0 allele. The gv slot is recalculated to #' reflect the any changes due to editing, but other slots remain the same. #' #' @param pop an object of \code{\link{Pop-class}} #' @param ind a vector of individuals to edit #' @param chr a vector of chromosomes to edit. #' Length must match length of segSites. #' @param segSites a vector of segregating sites to edit. Length must #' match length of chr. #' @param allele either 0 or 1 for desired allele #' @param simParam an object of \code{\link{SimParam}} #' #' @return Returns an object of \code{\link{Pop-class}} #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=2, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$addTraitA(10) #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' #' #Change individual 1 to homozygous for the 1 allele #' #at locus 1, chromosome 1 #' pop2 = editGenome(pop, ind=1, chr=1, segSites=1, #' allele=1, simParam=SP) #' #' @export editGenome = function (pop, ind, chr, segSites, allele, simParam = NULL) { if (is.null(simParam)) { simParam = get("SP", envir = .GlobalEnv) } ind = unique(as.integer(ind)) stopifnot(all(ind %in% (1:pop@nInd))) chr = as.integer(chr) segSites = as.integer(segSites) stopifnot(length(chr) == length(segSites)) allele = as.integer(allele) stopifnot(all(allele == 0L | allele == 1L)) allele = as.raw(allele) if(length(allele) == 1L){ allele = rep(allele, length(segSites)) } stopifnot(length(allele) == length(segSites)) for (selChr in unique(chr)) { sel = which(chr == selChr) for (i in sel) { BYTE = (segSites[i] - 1L)%/%8L + 1L BIT = (segSites[i] - 1L)%%8L + 1L for (selInd in ind) { for (j in 1:pop@ploidy) { TMP = pop@geno[[selChr]][BYTE, j, selInd] TMP = rawToBits(TMP) TMP[BIT] = allele[i] TMP = packBits(TMP) pop@geno[[selChr]][BYTE, j, selInd] = TMP } } } } PHENO = pop@pheno EBV = pop@ebv pop = resetPop(pop = pop, simParam = simParam) pop@pheno = PHENO pop@ebv = EBV return(pop) } #' @title Edit genome - the top QTL #' #' @description #' Edits the top QTL (with the largest additive effect) to a homozygous #' state for the allele increasing. Only nonfixed QTL are edited The gv slot is #' recalculated to reflect the any changes due to editing, but other slots remain the same. #' #' @param pop an object of \code{\link{Pop-class}} #' @param ind a vector of individuals to edit #' @param nQtl number of QTL to edit #' @param trait which trait effects should guide selection of the top QTL #' @param increase should the trait value be increased or decreased #' @param simParam an object of \code{\link{SimParam}} #' #' @return Returns an object of \code{\link{Pop-class}} #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=2, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$addTraitA(10) #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' #' #Change up to 10 loci for individual 1 #' pop2 = editGenomeTopQtl(pop, ind=1, nQtl=10, simParam=SP) #' #' @export editGenomeTopQtl = function(pop, ind, nQtl, trait = 1, increase = TRUE, simParam = NULL) { if (is.null(simParam)) { simParam = get("SP", envir = .GlobalEnv) } ind = unique(as.integer(ind)) stopifnot(all(ind %in% (1:pop@nInd))) nQtl = as.integer(nQtl) stopifnot(nQtl > 0 & nQtl <= simParam$traits[[trait]]@nLoci) findTopQtl = function(pop, ind, nQtl, trait, increase, simParam) { # @title Find the top non fixed QTL for use in editGenome() # @param pop an object of \code{\link{Pop-class}} # @param ind a vector of individuals to edit # @param nQtl number of QTL to edit # @param trait which trait effects should guide selection of the top QTL # @param increase should the trait value be increased or decreased # @param simParam an object of \code{\link{SimParam}} # @return: a list of four vectors with the: # first indicating which QTL (of all genome QTL) are the top, # second indicating which segsite (of all segsites within a chromosome) are the top, # third indicating chromosome of the QTL # fourth indicates which allele we want to fix (edit to) QtlGeno = pullQtlGeno(pop=pop[ind],trait=trait,simParam=simParam) QtlEff = simParam$traits[[trait]]@addEff ret = vector(mode = "list", length = 4) ret[[1]] = ret[[2]] = ret[[3]] = ret[[4]] = rep(NA, times = nQtl) QtlEffRank = order(abs(QtlEff), decreasing = TRUE) nQtlInd = 0 Qtl = 0 while (nQtlInd < nQtl) { Qtl = Qtl + 1 if(Qtl>ncol(QtlGeno)){ ret[[1]] = ret[[1]][1:nQtlInd] ret[[2]] = ret[[2]][1:nQtlInd] ret[[3]] = ret[[3]][1:nQtlInd] ret[[4]] = ret[[4]][1:nQtlInd] nQtl = nQtlInd break() } QtlGenoLoc = QtlGeno[QtlEffRank[Qtl]] if (QtlEff[QtlEffRank[Qtl]] > 0) { if (QtlGenoLoc < 2) { nQtlInd = nQtlInd + 1 ret[[1]][nQtlInd] = QtlEffRank[Qtl] ret[[2]][nQtlInd] = simParam$traits[[trait]]@lociLoc[QtlEffRank[Qtl]] if (increase) { ret[[4]][nQtlInd] = 1 } else { ret[[4]][nQtlInd] = 0 } } } else { if (QtlGenoLoc > 0) { nQtlInd = nQtlInd + 1 ret[[1]][nQtlInd] = QtlEffRank[Qtl] ret[[2]][nQtlInd] = simParam$traits[[trait]]@lociLoc[QtlEffRank[Qtl]] if (increase) { ret[[4]][nQtlInd] = 0 } else { ret[[4]][nQtlInd] = 1 } } } } # Locate QTL segsite to chromosomes tmp = cumsum(simParam$traits[[trait]]@lociPerChr) for (Qtl in 1:nQtl) { ret[[3]][Qtl] = which(ret[[1]][Qtl] <= tmp)[1] } ret } for (ind2 in ind) { targetQtl = findTopQtl(pop = pop, ind = ind2, nQtl = nQtl, trait = trait, increase = increase, simParam = simParam) pop = editGenome(pop = pop, ind = ind2, chr = targetQtl[[3]], segSites = targetQtl[[2]], allele = targetQtl[[4]], simParam = simParam) } return(pop) } #' @title Usefulness criterion #' #' @description Calculates the usefulness criterion #' #' @param pop and object of \code{\link{Pop-class}} or #' \code{\link{HybridPop-class}} #' @param trait the trait for selection. Either a number indicating #' a single trait or a function returning a vector of length nInd. #' @param use select on genetic values (\code{gv}, default), estimated #' breeding values (\code{ebv}), breeding values (\code{bv}), #' or phenotypes (\code{pheno}) #' @param p the proportion of individuals selected #' @param selectTop selects highest values if true. #' Selects lowest values if false. #' @param simParam an object of \code{\link{SimParam}} #' @param ... additional arguments if using a function for #' trait #' #' @return Returns a numeric value #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=2, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$addTraitA(10) #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' #' #Determine usefulness of population #' usefulness(pop, simParam=SP) #' #' #Should be equivalent to GV of best individual #' max(gv(pop)) #' #' @export usefulness = function(pop,trait=1,use="gv",p=0.1, selectTop=TRUE,simParam=NULL,...){ if(is.null(simParam)){ simParam = get("SP",envir=.GlobalEnv) } response = getResponse(pop=pop, trait=trait, use=use, simParam=simParam, ...) response = sort(response, decreasing=selectTop) response = response[1:ceiling(p*length(response))] return(mean(response)) } #' @title Linear transformation matrix #' #' @description #' Creates an m by m linear transformation matrix that #' can be applied to n by m uncorrelated deviates #' sampled from a standard normal distribution to produce #' correlated deviates with an arbitrary correlation #' of R. If R is not positive semi-definite, the function #' returns smoothing and returns a warning (see details). #' #' @param R a correlation matrix #' #' @details #' An eigendecomposition is applied to the correlation #' matrix and used to test if it is positive semi-definite. #' If the matrix is not positive semi-definite, it is not a #' valid correlation matrix. In this case, smoothing is #' applied to the matrix (as described in the 'cor.smooth' of #' the 'psych' library) to obtain a valid correlation matrix. #' The resulting deviates will thus not exactly match the #' desired correlation, but will hopefully be close if the #' input matrix wasn't too far removed from a valid #' correlation matrix. #' #' @examples #' # Create an 2x2 correlation matrix #' R = 0.5*diag(2) + 0.5 #' #' # Sample 1000 uncorrelated deviates from a #' # bivariate standard normal distribution #' X = matrix(rnorm(2*1000), ncol=2) #' #' # Compute the transformation matrix #' T = transMat(R) #' #' # Apply the transformation to the deviates #' Y = X%*%T #' #' # Measure the sample correlation #' cor(Y) #' #' @export transMat = function(R){ # Check if matrix is symmetric # Stop if it is not nameR = deparse(substitute(R)) if(!isSymmetric(R)){ stop(nameR, " is not a symmetric matrix") } # Check if matrix is positive semi-definite # Provide a warning if it is not eig = eigen(R, symmetric=TRUE) if(min(eig$values)<.Machine$double.eps){ warning("Matrix is not positive semi-definite, see ?transMat for details") # Performing correlation matrix smoothing eig$values[eig$values<.Machine$double.eps] = 100*.Machine$double.eps m = ncol(R) totVar = sum(eig$values) eig$values = eig$values * m/totVar newR = eig$vectors%*%diag(eig$values)%*%t(eig$vectors) newR = cov2cor(newR) eig = eigen(newR, symmetric=TRUE) } return( t(eig$vectors %*% (t(eig$vectors)*sqrt(pmax(eig$values, 0))) ) ) } #' @title Add Random Mutations #' #' @description #' Adds random mutations to individuals in a #' population. Note that any existing phenotypes #' or EBVs are kept. Thus, the user will need to run #' \code{\link{setPheno}} and/or \code{\link{setEBV}} #' to generate new phenotypes or EBVs that reflect #' changes introduced by the new mutations. #' #' @param pop an object of \code{\link{Pop-class}} #' @param mutRate rate of new mutations #' @param returnPos should the positions of mutations be returned #' @param simParam an object of \code{\link{SimParam}} #' #' @return an object of \code{\link{Pop-class}} if #' returnPos=FALSE or a list containing a #' \code{\link{Pop-class}} and a data.frame containing the #' postions of mutations if returnPos=TRUE #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=2, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$addTraitA(10) #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' #' #Introduce mutations #' pop = mutate(pop, simParam=SP) #' #' @export mutate = function(pop, mutRate=2.5e-8, returnPos=FALSE, simParam=NULL){ # Mutation history variable IND=NULL; CHR=NULL; HAP=NULL; SITE=NULL # Number of haplotypes per chromosome nHap = pop@nInd*pop@ploidy # Number of total sites s = sum(pop@nLoci) # Number of mutations per haplotype nMut = rbinom(nHap, s, mutRate) if(any(nMut>0L)){ for(take in which(nMut>0L)){ # Determine haplotype and individual ind = (take-1L)%/%pop@ploidy + 1L hap = (take-1L)%%pop@ploidy + 1L # Sample mutation sites sites = sampleInt(nMut[take], s) + 1L # Resolve all mutations chr = 1L for(i in sites){ # Find chromosome repeat{ if(i > sum(pop@nLoci[1L:chr])){ chr = chr + 1L }else{ break } } # Find site if(chr>1L){ site = i - sum(pop@nLoci[1L:(chr-1L)]) }else{ site = i } # Create mutation BYTE = (site-1L)%/%8L + 1L BIT = (site-1L)%%8L + 1L TMP = pop@geno[[chr]][BYTE,hap,ind] TMP = rawToBits(TMP) TMP[BIT] = ifelse(TMP[BIT], as.raw(0L), as.raw(1L)) TMP = packBits(TMP) pop@geno[[chr]][BYTE,hap,ind] = TMP # Record results if(returnPos){ IND = c(IND, ind) CHR = c(CHR, chr) HAP = c(HAP, hap) SITE = c(SITE, site) } } } # Reset population PHENO = pop@pheno EBV = pop@ebv pop = resetPop(pop=pop, simParam=simParam) pop@pheno = PHENO pop@ebv = EBV } # Return results if(returnPos){ return(list(pop,data.frame(individual=IND,chromosome=CHR,haplotype=HAP,site=SITE))) }else{ return(pop) } } #' @title Lose individuals at random #' #' @description #' Samples individuals at random to remove from the population. #' The user supplies a probability for the individuals to be #' removed from the population. #' #' @param pop an object of \code{\link{Pop-class}} #' @param p the expected proportion of individuals that will #' be lost to attrition. #' #' @return an object of \code{\link{Pop-class}} #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=100, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' #' #Lose an expected 5% of individuals #' pop = attrition(pop, p=0.05) #' #' @export attrition = function(pop, p){ take = as.logical(rbinom(pop@nInd, size=1, prob=1-p)) return(pop[take]) } # Sample deviates from a standard normal distribution # n is the number of deviates # u is a deviate from a uniform distribution [0,1] # Seed is generated from u rnormWithSeed = function(n, u){ glbEnv = globalenv() origSeed = glbEnv$.Random.seed on.exit({ if(is.null(origSeed)){ rm(list =".Random.seed", envir=glbEnv) }else{ assign(".Random.seed", value=origSeed, envir=glbEnv) } }) set.seed(as.integer((u-0.5)*2*2147483647)) rnorm(n) }
/scratch/gouwar.j/cran-all/cranData/AlphaSimR/R/misc.R
#Adds random error to a matrix of genetic values addError = function(gv, varE, reps){ nTraits = ncol(gv) nInd = nrow(gv) if(is.matrix(varE)){ stopifnot(isSymmetric(varE), ncol(varE)==nTraits) error = matrix(rnorm(nInd*nTraits), ncol=nTraits)%*%transMat(varE) }else{ stopifnot(length(varE)==nTraits) error = lapply(varE,function(x){ if(is.na(x)){ return(rep(NA_real_,nInd)) }else{ return(rnorm(nInd,sd=sqrt(x))) } }) error = do.call("cbind",error) } error = error/sqrt(reps) pheno = gv + error return(pheno) } #See setPheno documentation calcPheno = function(pop, varE, reps, p, traits, simParam){ nTraits = length(traits) if(nTraits==0L){ return(pop@pheno) } gv = pop@gv for(i in 1:nTraits){ if(.hasSlot(simParam$traits[[traits[i]]], "envVar")){ stdDev = sqrt(simParam$traits[[traits[i]]]@envVar) gv[,traits[i]] = gv[,traits[i]] + pop@gxe[[traits[i]]]*qnorm(p[i], sd=stdDev) } } gv = gv[,traits,drop=FALSE] # Calculate new phenotypes newPheno = addError(gv=gv, varE=varE, reps=reps) # Add to old phenotype pheno = pop@pheno pheno[,traits] = newPheno return(pheno) } #' @title Set phenotypes #' #' @description #' Sets phenotypes for all traits by adding random error #' from a multivariate normal distribution. #' #' @param pop an object of \code{\link{Pop-class}} or #' \code{\link{HybridPop-class}} #' @param h2 a vector of desired narrow-sense heritabilities for #' each trait. See details. #' @param H2 a vector of desired broad-sense heritabilities for #' each trait. See details. #' @param varE error (co)variances for traits. See details. #' @param corE an optional matrix for correlations between errors. #' See details. #' @param reps number of replications for phenotype. See details. #' @param fixEff fixed effect to assign to the population. Used #' by genomic selection models only. #' @param p the p-value for the environmental covariate #' used by GxE traits. If NULL, a value is #' sampled at random. #' @param onlyPheno should only the phenotype be returned, see return #' @param traits an integer vector indicate which traits to set. If NULL, #' all traits will be set. #' @param simParam an object of \code{\link{SimParam}} #' #' @details #' There are three arguments for setting the error variance of a #' phenotype: h2, H2, and varE. The user should only use one of these #' arguments. If the user supplies values for more than one, only one #' will be used according to order in which they are listed above. #' #' The h2 argument allows the user to specify the error variance #' according to narrow-sense heritability. This calculation uses the #' additive genetic variance and total genetic variance in the founder #' population. Thus, the heritability relates to the founder population #' and not the current population. #' #' The H2 argument allows the user to specify the error variance #' according to broad-sense heritability. This calculation uses the #' total genetic variance in the founder population. Thus, the heritability #' relates to the founder population and not the current population. #' #' The varE argument allows the user to specify the error variance #' directly. The user may supply a vector describing the error variance #' for each trait or supply a matrix that specify the covariance of #' the errors. #' #' The corE argument allows the user to specify correlations for the #' error covariance matrix. These correlations are be supplied in addition #' to the h2, H2, or varE arguments. These correlations will be used to #' construct a covariance matrix from a vector of variances. If the user #' supplied a covariance matrix to varE, these correlations will supercede #' values provided in that matrix. #' #' The reps parameter is for convenient representation of replicated data. #' It is intended to represent replicated yield trials in plant #' breeding programs. In this case, varE is set to the plot error and #' reps is set to the number of plots per entry. The resulting phenotype #' represents the entry-means. #' #' @return Returns an object of \code{\link{Pop-class}} or #' \code{\link{HybridPop-class}} if onlyPheno=FALSE, if #' onlyPheno=TRUE a matrix is returned #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$addTraitA(10) #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' #' #Add phenotype with error variance of 1 #' pop = setPheno(pop, varE=1) #' #' @export setPheno = function(pop, h2=NULL, H2=NULL, varE=NULL, corE=NULL, reps=1, fixEff=1L, p=NULL, onlyPheno=FALSE, traits=NULL, simParam=NULL){ if(is.null(simParam)){ simParam = get("SP",envir=.GlobalEnv) } # Determine which traits are selected if(is.null(traits)){ if(simParam$nTraits>0L){ traits = 1:simParam$nTraits }else{ traits = integer() } }else{ traits = as.integer(traits) stopifnot(all(traits>0L), all(!duplicated(traits)), max(traits)<=simParam$nTraits) } nTraits = length(traits) # Check for valid length of reps vector if(length(reps)==1){ reps = rep(reps, nTraits) }else{ stopifnot(length(reps)==nTraits) } # Set p-value for GxE traits if(is.null(p)){ p = rep(runif(1), nTraits) }else if(length(p)==1){ p = rep(p, nTraits) }else{ stopifnot(length(p)==nTraits) } # Calculate varE if using h2 or H2 if(!is.null(h2)){ if(length(h2)==1){ h2 = rep(h2, nTraits) } varA = simParam$varA[traits] varG = simParam$varG[traits] stopifnot(length(h2)==nTraits, all(varA>0), all(varG>0)) varE = numeric(nTraits) for(i in 1:nTraits){ tmp = varA[i]/h2[i]-varG[i] if(tmp<0){ stop(paste0("h2=",h2[i]," is not possible for trait ",traits[i])) } varE[i] = tmp } }else if(!is.null(H2)){ if(length(H2)==1){ H2 = rep(H2, nTraits) } varG = simParam$varG[traits] stopifnot(length(H2)==nTraits) varE = numeric(nTraits) for(i in 1:nTraits){ tmp = varG[i]/H2[i]-varG[i] varE[i] = tmp } }else if(!is.null(varE)){ if(is.matrix(varE)){ stopifnot(nTraits==nrow(varE), isSymmetric(varE)) }else{ stopifnot(length(varE)==nTraits) } }else{ if(is.matrix(simParam$varE)){ varE = simParam$varE[traits, traits] }else{ varE = simParam$varE[traits] } } # Set error correlations if(!is.null(corE)){ if(is.matrix(varE)){ varE = diag(varE) } stopifnot(length(varE)==nrow(corE), isSymmetric(corE)) varE = diag(sqrt(varE), nrow=nTraits, ncol=nTraits) varE = varE%*%corE%*%varE } # Use lapply if object is a MultiPop # Only passing varE after previous processing if(is(pop,"MultiPop")){ stopifnot(!onlyPheno) pop@pops = lapply(pop@pops, setPheno, h2=NULL, H2=NULL, varE=varE, corE=NULL, reps=reps, fixEff=fixEff, p=p, traits=traits, simParam=simParam) return(pop) } # Create phenotypes pheno = calcPheno(pop=pop, varE=varE, reps=reps, p=p, traits=traits, simParam=simParam) colnames(pheno) = colnames(pop@gv) if(onlyPheno){ return(pheno) } pop@pheno = pheno if(is(pop,"Pop")){ pop@fixEff = rep(as.integer(fixEff), pop@nInd) } return(pop) }
/scratch/gouwar.j/cran-all/cranData/AlphaSimR/R/phenotypes.R
#' @title Create individuals with reduced ploidy #' #' @description Creates new individuals from gametes. This function #' was created to model the creation of diploid potatoes from #' tetraploid potatoes. It can be used on any population with an #' even ploidy level. The newly created individuals will have half #' the ploidy level of the originals. The reduction can occur with #' or without genetic recombination. #' #' @param pop an object of 'Pop' superclass #' @param nProgeny total number of progeny per individual #' @param useFemale should female recombination rates be used. #' @param keepParents should previous parents be used for mother and #' father. #' @param simRecomb should genetic recombination be modeled. #' @param simParam an object of 'SimParam' class #' #' @return Returns an object of \code{\link{Pop-class}} #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=2, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' #' #Create individuals with reduced ploidy #' pop2 = reduceGenome(pop, simParam=SP) #' #' @export reduceGenome = function(pop,nProgeny=1,useFemale=TRUE,keepParents=TRUE, simRecomb=TRUE,simParam=NULL){ if(is.null(simParam)){ simParam = get("SP",envir=.GlobalEnv) } if(pop@ploidy%%2L){ stop("You cannot reduce odd ploidy levels") } if(simRecomb){ if(useFemale){ map = simParam$femaleMap }else{ map = simParam$maleMap } }else{ # Create dummy map with zero genetic distance map = vector("list",pop@nChr) for(i in 1:pop@nChr){ map[[i]] = rep(0,pop@nLoci[i]) } map = as.matrix(map) } tmp = createReducedGenome(pop@geno, nProgeny, map, simParam$v, simParam$p, simParam$isTrackRec, pop@ploidy, simParam$femaleCentromere, simParam$quadProb, simParam$nThreads) dim(tmp$geno) = NULL rPop = new("RawPop", nInd=as.integer(pop@nInd*nProgeny), nChr=pop@nChr, ploidy=as.integer(pop@ploidy/2), nLoci=pop@nLoci, geno=tmp$geno) if(simParam$isTrackRec){ hist = tmp$recHist }else{ hist = NULL } if(keepParents){ return(newPop(rawPop=rPop, mother=rep(pop@mother,each=nProgeny), father=rep(pop@father,each=nProgeny), simParam=simParam, iMother=rep(pop@iid,each=nProgeny), iFather=rep(pop@iid,each=nProgeny), femaleParentPop=pop, maleParentPop=pop, hist=hist )) }else{ return(newPop(rawPop=rPop, mother=rep(pop@id,each=nProgeny), father=rep(pop@id,each=nProgeny), simParam=simParam, iMother=rep(pop@iid,each=nProgeny), iFather=rep(pop@iid,each=nProgeny), femaleParentPop=pop, maleParentPop=pop, hist=hist )) } } #' @title Double the ploidy of individuals #' #' @description Creates new individuals with twice the ploidy. #' This function was created to model the formation of tetraploid #' potatoes from diploid potatoes. This function will work on any #' population. #' #' @param pop an object of 'Pop' superclass #' @param keepParents should previous parents be used for mother and #' father. #' @param simParam an object of 'SimParam' class #' #' @return Returns an object of \code{\link{Pop-class}} #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=2, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' #' #Create individuals with doubled ploidy #' pop2 = doubleGenome(pop, simParam=SP) #' #' @export doubleGenome = function(pop, keepParents=TRUE, simParam=NULL){ if(is.null(simParam)){ simParam = get("SP",envir=.GlobalEnv) } geno = pop@geno for(i in 1:pop@nChr){ geno[[i]] = geno[[i]][,rep(1:pop@ploidy,each=2),,drop=FALSE] } rPop = new("RawPop", nInd=as.integer(pop@nInd), nChr=pop@nChr, ploidy=2L*pop@ploidy, nLoci=pop@nLoci, geno=geno) if(simParam$isTrackRec){ # Match haplotypes according to original ploidy hist = vector("list", pop@ploidy) for(i in 1:pop@ploidy){ hist[[i]] = cbind(i, 1L) } # Double ploidy hist = rep(hist, each=2) # Rep for chromosomes and individuals hist = rep(list(hist), pop@nChr) hist = rep(list(hist), pop@nInd) }else{ hist = NULL } if(keepParents){ return(newPop(rawPop=rPop, mother=pop@mother, father=pop@father, simParam=simParam, iMother=pop@iid, iFather=pop@iid, femaleParentPop=pop, maleParentPop=pop, hist=hist )) }else{ return(newPop(rawPop=rPop, mother=pop@id, father=pop@id, simParam=simParam, iMother=pop@iid, iFather=pop@iid, femaleParentPop=pop, maleParentPop=pop, hist=hist )) } } #' @title Combine genomes of individuals #' #' @description #' This function is designed to model the pairing of gametes. The male #' and female individuals are treated as gametes, so the ploidy of newly #' created individuals will be the sum of it parents. #' #' @param females an object of \code{\link{Pop-class}} for female parents. #' @param males an object of \code{\link{Pop-class}} for male parents. #' @param crossPlan a matrix with two column representing #' female and male parents. Either integers for the position in #' population or character strings for the IDs. #' @param simParam an object of \code{\link{SimParam}} #' #' @return Returns an object of \code{\link{Pop-class}} #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' #' #Cross individual 1 with individual 10 #' crossPlan = matrix(c(1,10), nrow=1, ncol=2) #' pop2 = mergeGenome(pop, pop, crossPlan, simParam=SP) #' #' @export mergeGenome = function(females,males,crossPlan,simParam=NULL){ if(is.null(simParam)){ simParam = get("SP",envir=.GlobalEnv) } if(is.character(crossPlan)){ #Match by ID crossPlan = cbind(match(crossPlan[,1],females@id), match(crossPlan[,2],males@id)) if(any(is.na(crossPlan))){ stop("Failed to match supplied IDs") } } if((max(crossPlan[,1])>nInd(females)) | (max(crossPlan[,2])>nInd(males)) | (min(crossPlan)<1L)){ stop("Invalid crossPlan") } # Merge genotype data geno = vector("list", females@nChr) for(i in 1:females@nChr){ geno[[i]] = array(as.raw(0), dim = c(dim(females@geno[[i]])[1], females@ploidy+males@ploidy, nrow(crossPlan))) for(j in 1:nrow(crossPlan)){ # Add female gametes geno[[i]][,1:females@ploidy,j] = females@geno[[i]][,,crossPlan[j,1]] # Add male gametes geno[[i]][,(females@ploidy+1):(females@ploidy+males@ploidy),j] = males@geno[[i]][,,crossPlan[j,2]] } } rPop = new("RawPop", nInd=as.integer(nrow(crossPlan)), nChr=females@nChr, ploidy=females@ploidy+males@ploidy, nLoci=females@nLoci, geno=geno) if(simParam$isTrackRec){ # Create history for haplotypes hist = vector("list", rPop@ploidy) # Add female contribution for(i in 1:females@ploidy){ hist[[i]] = cbind(i, 1L) } # Add male contribution for(i in 1:males@ploidy){ hist[[i+females@ploidy]] = cbind(i, 1L) } # Rep for chromosomes and individuals hist = rep(list(hist), rPop@nChr) hist = rep(list(hist), rPop@nInd) }else{ hist = NULL } return(newPop(rawPop=rPop, mother=females@id[crossPlan[,1]], father=males@id[crossPlan[,2]], simParam=simParam, iMother=females@iid[crossPlan[,1]], iFather=males@iid[crossPlan[,2]], femaleParentPop=females, maleParentPop=males, hist=hist )) }
/scratch/gouwar.j/cran-all/cranData/AlphaSimR/R/polyploids.R
# Internal function for calculating mean EBV of populations # Used selectPop MultiPop-class meanEBV = function(pop){ colMeans(pop@ebv) } #' @title Mean genetic values #' #' @description Returns the mean genetic values for all traits #' #' @param pop an object of \code{\link{Pop-class}} or \code{\link{HybridPop-class}} #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' SP$addTraitA(10) #' SP$setVarE(h2=0.5) #' \dontshow{SP$nThreads = 1L} #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' meanG(pop) #' #' @export meanG = function(pop){ colMeans(pop@gv) } #' @title Mean phenotypic values #' #' @description Returns the mean phenotypic values for all traits #' #' @param pop an object of \code{\link{Pop-class}} or \code{\link{HybridPop-class}} #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' SP$addTraitA(10) #' SP$setVarE(h2=0.5) #' \dontshow{SP$nThreads = 1L} #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' meanP(pop) #' #' @export meanP = function(pop){ colMeans(pop@pheno) } #' @title Total genetic variance #' #' @description Returns total genetic variance for all traits #' #' @param pop an object of \code{\link{Pop-class}} or \code{\link{HybridPop-class}} #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' SP$addTraitA(10) #' SP$setVarE(h2=0.5) #' \dontshow{SP$nThreads = 1L} #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' varG(pop) #' #' @export varG = function(pop){ G = popVar(pop@gv) rownames(G) = colnames(G) = colnames(pop@gv) return(G) } #' @title Phenotypic variance #' #' @description Returns phenotypic variance for all traits #' #' @param pop an object of \code{\link{Pop-class}} or \code{\link{HybridPop-class}} #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' SP$addTraitA(10) #' SP$setVarE(h2=0.5) #' \dontshow{SP$nThreads = 1L} #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' varP(pop) #' #' @export varP = function(pop){ P = popVar(pop@pheno) rownames(P) = colnames(P) = colnames(pop@pheno) return(P) } #' @title Sumarize genetic parameters #' #' @description #' Calculates genetic and genic additive and dominance variances #' for an object of \code{\link{Pop-class}} #' #' @param pop an object of \code{\link{Pop-class}} #' @param simParam an object of \code{\link{SimParam}} #' #' @return #' \describe{ #' \item{varA}{an nTrait by nTrait matrix of additive genetic variances} #' \item{varD}{an nTrait by nTrait matrix of dominance genetic variances} #' \item{varAA}{an nTrait by nTrait matrix of additive-by-additive genetic variances} #' \item{varG}{an nTrait by nTrait matrix of total genetic variances} #' \item{genicVarA}{an nTrait vector of additive genic variances} #' \item{genicVarD}{an nTrait vector of dominance genic variances} #' \item{genicVarAA}{an nTrait vector of additive-by-additive genic variances} #' \item{genicVarG}{an nTrait vector of total genic variances} #' \item{covA_HW}{an nTrait vector of additive covariances due to non-random mating} #' \item{covD_HW}{an nTrait vector of dominance covariances due to non-random mating} #' \item{covAA_HW}{an nTrait vector of additive-by-additive covariances due to non-random mating} #' \item{covG_HW}{an nTrait vector of total genic covariances due to non-random mating} #' \item{covA_L}{an nTrait vector of additive covariances due to linkage disequilibrium} #' \item{covD_L}{an nTrait vector of dominance covariances due to linkage disequilibrium} #' \item{covAA_L}{an nTrait vector of additive-by-additive covariances due to linkage disequilibrium} #' \item{covAD_L}{an nTrait vector of additive by dominance covariances due to linkage disequilibrium} #' \item{covAAA_L}{an nTrait vector of additive by additive-by-additive covariances due to linkage disequilibrium} #' \item{covDAA_L}{an nTrait vector of dominance by additive-by-additive covariances due to linkage disequilibrium} #' \item{covG_L}{an nTrait vector of total genic covariances due to linkage disequilibrium} #' \item{mu}{an nTrait vector of trait means} #' \item{mu_HW}{an nTrait vector of expected trait means under random mating} #' \item{gv}{a matrix of genetic values with dimensions nInd by nTraits} #' \item{bv}{a matrix of breeding values with dimensions nInd by nTraits} #' \item{dd}{a matrix of dominance deviations with dimensions nInd by nTraits} #' \item{aa}{a matrix of additive-by-additive epistatic deviations with dimensions nInd by nTraits} #' \item{gv_mu}{an nTrait vector of intercepts with dimensions nInd by nTraits} #' \item{gv_a}{a matrix of additive genetic values with dimensions nInd by nTraits} #' \item{gv_d}{a matrix of dominance genetic values with dimensions nInd by nTraits} #' \item{gv_aa}{a matrix of additive-by-additive genetic values with dimensions nInd by nTraits} #' } #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' SP$addTraitAD(10, meanDD=0.5) #' SP$setVarE(h2=0.5) #' \dontshow{SP$nThreads = 1L} #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' ans = genParam(pop, simParam=SP) #' #' @export genParam = function(pop,simParam=NULL){ if(is.null(simParam)){ simParam = get("SP",envir=.GlobalEnv) } stopifnot(class(pop)=="Pop") nInd = nInd(pop) nTraits = simParam$nTraits traitNames = simParam$traitNames # Blank nInd x nTrait matrices gv = matrix(NA_real_, nrow=nInd, ncol=nTraits) colnames(gv) = traitNames bv = dd = aa = gv_a = gv_d = gv_aa = gv # Blank nTrait vectors genicVarA = rep(NA_real_, nTraits) names(genicVarA) = traitNames genicVarD = genicVarAA = covA_HW = covD_HW = covAA_HW = covG_HW = mu = mu_HW = gv_mu = covAAA_L = covDAA_L = covAD_L = genicVarA #Loop through trait calculations for(i in 1:nTraits){ trait = simParam$traits[[i]] tmp = calcGenParam(trait,pop,simParam$nThreads) genicVarA[i] = tmp$genicVarA2 covA_HW[i] = tmp$genicVarA-tmp$genicVarA2 gv[,i] = tmp$gv bv[,i] = tmp$bv mu[i] = tmp$mu mu_HW[i] = tmp$mu_HWE gv_a[,i] = tmp$gv_a gv_mu[i] = tmp$gv_mu if(.hasSlot(trait,"domEff")){ genicVarD[i] = tmp$genicVarD2 covD_HW[i] = tmp$genicVarD-tmp$genicVarD2 dd[,i] = tmp$dd gv_d[,i] = tmp$gv_d }else{ genicVarD[i] = 0 covD_HW[i] = 0 dd[,i] = rep(0,pop@nInd) gv_d[,i] = rep(0,pop@nInd) } if(.hasSlot(trait,"epiEff")){ genicVarAA[i] = tmp$genicVarAA2 covAA_HW[i] = tmp$genicVarAA-tmp$genicVarAA2 aa[,i] = tmp$aa gv_aa[,i] = tmp$gv_aa }else{ genicVarAA[i] = 0 covAA_HW[i] = 0 aa[,i] = rep(0,pop@nInd) gv_aa[,i] = rep(0,pop@nInd) } if(nInd==1){ covAD_L[i] = 0 covAAA_L[i] = 0 covDAA_L[i] = 0 } else { covAD_L[i] = popVar(cbind(bv[,i],dd[,i]))[1,2] covAAA_L[i] = popVar(cbind(bv[,i],aa[,i]))[1,2] covDAA_L[i] = popVar(cbind(dd[,i],aa[,i]))[1,2] } } varA = popVar(bv) rownames(varA) = colnames(varA) = traitNames varD = popVar(dd) rownames(varD) = colnames(varD) = traitNames varAA = popVar(aa) rownames(varAA) = colnames(varAA) = traitNames varG = popVar(gv) rownames(varG) = colnames(varG) = traitNames genicVarG = genicVarA + genicVarD + genicVarAA covG_HW = covA_HW + covD_HW + covAA_HW output = list(varA=varA, varD=varD, varAA=varAA, varG=varG, genicVarA=genicVarA, genicVarD=genicVarD, genicVarAA=genicVarAA, genicVarG=genicVarG, covA_HW=covA_HW, covD_HW=covD_HW, covAA_HW=covAA_HW, covG_HW=covG_HW, covA_L=diag(varA)-genicVarA-covA_HW, covD_L=diag(varD)-genicVarD-covD_HW, covAA_L=diag(varAA)-genicVarAA-covAA_HW, covAD_L=covAD_L, covAAA_L=covAAA_L, covDAA_L=covDAA_L, covG_L=diag(varG)-genicVarG-covG_HW, mu=mu, mu_HW=mu_HW, gv=gv, bv=bv, dd=dd, aa=aa, gv_mu=gv_mu, gv_a=gv_a, gv_d=gv_d, gv_aa=gv_aa) return(output) } #' @title Additive variance #' #' @description Returns additive variance for all traits #' #' @param pop an object of \code{\link{Pop-class}} #' @param simParam an object of \code{\link{SimParam}} #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' SP$addTraitAD(10, meanDD=0.5) #' SP$setVarE(h2=0.5) #' \dontshow{SP$nThreads = 1L} #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' varA(pop, simParam=SP) #' #' @export varA = function(pop,simParam=NULL){ genParam(pop,simParam=simParam)$varA } #' @title Dominance variance #' #' @description Returns dominance variance for all traits #' #' @param pop an object of \code{\link{Pop-class}} #' @param simParam an object of \code{\link{SimParam}} #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' SP$addTraitAD(10, meanDD=0.5) #' SP$setVarE(h2=0.5) #' \dontshow{SP$nThreads = 1L} #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' varD(pop, simParam=SP) #' #' @export varD = function(pop,simParam=NULL){ genParam(pop,simParam=simParam)$varD } #' @title Additive-by-additive epistatic variance #' #' @description Returns additive-by-additive epistatic #' variance for all traits #' #' @param pop an object of \code{\link{Pop-class}} #' @param simParam an object of \code{\link{SimParam}} #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' SP$addTraitAD(10, meanDD=0.5) #' SP$setVarE(h2=0.5) #' \dontshow{SP$nThreads = 1L} #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' varAA(pop, simParam=SP) #' #' @export varAA = function(pop,simParam=NULL){ genParam(pop,simParam=simParam)$varAA } #' @title Breeding value #' #' @description Returns breeding values for all traits #' #' @param pop an object of \code{\link{Pop-class}} #' @param simParam an object of \code{\link{SimParam}} #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' SP$addTraitAD(10, meanDD=0.5) #' SP$setVarE(h2=0.5) #' \dontshow{SP$nThreads = 1L} #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' bv(pop, simParam=SP) #' #' @export bv = function(pop,simParam=NULL){ genParam(pop,simParam=simParam)$bv } #' @title Dominance deviations #' #' @description Returns dominance deviations for all traits #' #' @param pop an object of \code{\link{Pop-class}} #' @param simParam an object of \code{\link{SimParam}} #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' SP$addTraitAD(10, meanDD=0.5) #' SP$setVarE(h2=0.5) #' \dontshow{SP$nThreads = 1L} #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' dd(pop, simParam=SP) #' #' @export dd = function(pop,simParam=NULL){ genParam(pop,simParam=simParam)$dd } #' @title Additive-by-additive epistatic deviations #' #' @description Returns additive-by-additive epistatic #' deviations for all traits #' #' @param pop an object of \code{\link{Pop-class}} #' @param simParam an object of \code{\link{SimParam}} #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' SP$addTraitAD(10, meanDD=0.5) #' SP$setVarE(h2=0.5) #' \dontshow{SP$nThreads = 1L} #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' aa(pop, simParam=SP) #' #' @export aa = function(pop,simParam=NULL){ genParam(pop,simParam=simParam)$aa } #' @title Additive genic variance #' #' @description Returns additive genic variance for all traits #' #' @param pop an object of \code{\link{Pop-class}} #' @param simParam an object of \code{\link{SimParam}} #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' SP$addTraitAD(10, meanDD=0.5) #' SP$setVarE(h2=0.5) #' \dontshow{SP$nThreads = 1L} #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' genicVarA(pop, simParam=SP) #' #' @export genicVarA = function(pop,simParam=NULL){ genParam(pop,simParam=simParam)$genicVarA } #' @title Dominance genic variance #' #' @description Returns dominance genic variance for all traits #' #' @param pop an object of \code{\link{Pop-class}} #' @param simParam an object of \code{\link{SimParam}} #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' SP$addTraitAD(10, meanDD=0.5) #' SP$setVarE(h2=0.5) #' \dontshow{SP$nThreads = 1L} #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' genicVarD(pop, simParam=SP) #' #' @export genicVarD = function(pop,simParam=NULL){ genParam(pop,simParam=simParam)$genicVarD } #' @title Additive-by-additive genic variance #' #' @description Returns additive-by-additive epistatic #' genic variance for all traits #' #' @param pop an object of \code{\link{Pop-class}} #' @param simParam an object of \code{\link{SimParam}} #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' SP$addTraitAD(10, meanDD=0.5) #' SP$setVarE(h2=0.5) #' \dontshow{SP$nThreads = 1L} #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' genicVarAA(pop, simParam=SP) #' #' @export genicVarAA = function(pop,simParam=NULL){ genParam(pop,simParam=simParam)$genicVarAA } #' @title Total genic variance #' #' @description Returns total genic variance for all traits #' #' @param pop an object of \code{\link{Pop-class}} #' @param simParam an object of \code{\link{SimParam}} #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' SP$addTraitAD(10, meanDD=0.5) #' SP$setVarE(h2=0.5) #' \dontshow{SP$nThreads = 1L} #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' genicVarG(pop, simParam=SP) #' #' @export genicVarG = function(pop,simParam=NULL){ genParam(pop,simParam=simParam)$genicVarG } #' @title Genetic value #' #' @description A wrapper for accessing the gv slot #' #' @param pop a \code{\link{Pop-class}} or similar object #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' SP$addTraitAD(10, meanDD=0.5) #' SP$setVarE(h2=0.5) #' \dontshow{SP$nThreads = 1L} #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' gv(pop) #' #' @export gv = function(pop){ pop@gv } #' @title Phenotype #' #' @description A wrapper for accessing the pheno slot #' #' @param pop a \code{\link{Pop-class}} or similar object #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' SP$addTraitAD(10, meanDD=0.5) #' SP$setVarE(h2=0.5) #' \dontshow{SP$nThreads = 1L} #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' pheno(pop) #' #' @export pheno = function(pop){ pop@pheno } #' @title Estimated breeding value #' #' @description A wrapper for accessing the ebv slot #' #' @param pop a \code{\link{Pop-class}} or similar object #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$addTraitAD(10, meanDD=0.5) #' SP$setVarE(h2=0.5) #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' pop@ebv = matrix(rnorm(pop@nInd), nrow=pop@nInd, ncol=1) #' ebv(pop) #' #' @export ebv = function(pop){ pop@ebv } #' @title Number of individuals #' #' @description A wrapper for accessing the nInd slot #' #' @param pop a \code{\link{Pop-class}} or similar object #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$addTraitAD(10, meanDD=0.5) #' SP$setVarE(h2=0.5) #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' nInd(pop) #' #' @export nInd = function(pop){ pop@nInd }
/scratch/gouwar.j/cran-all/cranData/AlphaSimR/R/popSummary.R
selectLoci = function(chr, inLociPerChr, inLociLoc){ if(is.null(chr)){ return(list(lociPerChr=inLociPerChr, lociLoc=inLociLoc)) } nChr = length(inLociPerChr) stopifnot(any(chr%in%(1:nChr)), max(chr)<=nChr) outLociPerChr = numeric(nChr) outLociPerChr[chr] = inLociPerChr[chr] outLociLoc = numeric(sum(outLociPerChr)) inStart = outStart = inEnd = outEnd = 0L for(i in 1:nChr){ inStart = inStart + 1L inEnd = inEnd + inLociPerChr[i] if(outLociPerChr[i]>0){ outStart = outStart + 1L outEnd = outEnd + inLociPerChr[i] outLociLoc[outStart:outEnd] = inLociLoc[inStart:inEnd] outStart = outEnd } inStart = inEnd } return(list(lociPerChr=outLociPerChr, lociLoc=outLociLoc)) } # Retrieves Marker names from genMap # lociPerChr, number of loci per chromosome # lociLoc, position of loci on chromosome # genMap, genetic map with names getLociNames = function(lociPerChr, lociLoc, genMap){ lociNames = character(length(lociLoc)) start = end = 0L for(chr in 1:length(lociPerChr)){ if(lociPerChr[chr]>0){ start = end + 1L end = end + lociPerChr[chr] take = lociLoc[start:end] lociNames[start:end] = names(genMap[[chr]])[take] } } return(lociNames) } # Finds loci on a genetic map and return a list of positions mapLoci = function(markers, genMap){ # Check that the markers are present on the map genMapMarkerNames = unlist(lapply(genMap, names)) stopifnot(all(markers%in%genMapMarkerNames)) # Create lociPerChr and lociLoc lociPerChr = integer(length(genMap)) lociLoc = vector("list", length(genMap)) # Loop through chromosomes for(i in 1:length(genMap)){ # Initialize lociLoc lociLoc[[i]] = integer() # Find matches if they exist take = match(names(genMap[[i]]), markers) lociPerChr[i] = length(na.omit(take)) if(lociPerChr[i]>0L){ lociLoc[[i]] = which(!is.na(take)) } } lociLoc = unlist(lociLoc) return(list(lociPerChr=lociPerChr, lociLoc=lociLoc)) } #' @title Get genetic map #' #' @description Retrieves the genetic map for all loci. #' #' @param object where to retrieve the genetic map. #' Can be an object of \code{\link{SimParam}} or #' \code{\link{MapPop-class}}. If NULL, the function will #' look for a SimParam object called "SP" in your #' global environment. #' @param sex determines which sex specific map #' is returned. Options are "A" for average map, "F" #' for female map, and "M" for male map. All options are #' equivalent if not using sex specific maps or using #' pulling from a MapPop. #' #' @return Returns a data.frame with: #' \describe{ #' \item{id}{Unique identifier for locus} #' \item{chr}{Chromosome containing the locus} #' \item{pos}{Genetic map position} #' } #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' getGenMap(founderPop) #' #' @export getGenMap = function(object=NULL, sex="A"){ if(is.null(object)){ object = get("SP",envir=.GlobalEnv) } if(is(object, "SimParam")){ # Extract the sex specific map if(sex=="A"){ genMap = object$genMap }else if(sex=="F"){ genMap = object$femaleMap }else if(sex=="M"){ genMap = object$maleMap }else{ stop(paste("sex =",sex,"is not a valid option")) } }else if(is(object, "MapPop")){ # No sex specific maps genMap = object@genMap }else{ stop("object is class ", class(object), ", which is not a valid source for the genetic map") } # Loci names id = unlist(unname(lapply(genMap, names))) # Chromosome names nLoci = sapply(genMap, length) chr = rep(names(genMap), nLoci) # Map positions pos = unname(do.call("c", genMap)) # Output data.frame return(data.frame(id=id, chr=chr, pos=pos)) } #' @title Get SNP genetic map #' #' @description Retrieves the genetic map for a #' given SNP chip. #' #' @param snpChip an integer. Indicates which SNP #' chip's map to retrieve. #' @param sex determines which sex specific map #' is returned. Options are "A" for average map, "F" #' for female map, and "M" for male map. All options are #' equivalent if not using sex specific maps. #' @param simParam an object of \code{\link{SimParam}} #' #' @return Returns a data.frame with: #' \describe{ #' \item{id}{Unique identifier for the SNP} #' \item{chr}{Chromosome containing the SNP} #' \item{site}{Segregating site on the chromosome} #' \item{pos}{Genetic map position} #' } #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$addSnpChip(5) #' #' #Pull SNP map #' getSnpMap(snpChip=1, simParam=SP) #' #' @export getSnpMap = function(snpChip=1, sex="A", simParam=NULL){ if(is.null(simParam)){ simParam = get("SP",envir=.GlobalEnv) } #Extract genetic map and SNP positions if(sex=="A"){ genMap = simParam$genMap }else if(sex=="F"){ genMap = simParam$femaleMap }else if(sex=="M"){ genMap = simParam$maleMap }else{ stop(paste("sex =",sex,"is not a valid option")) } if(is.character(snpChip)){ # Suspect snpChip is a name chipNames = simParam$snpChipNames take = match(snpChip, chipNames) if(is.na(take)){ stop("'",snpChip,"' did not match any SNP chip names") } snpChip = take } snp = simParam$snpChips[[snpChip]] #SNP positions #Create a list of SNP positions on the genetic map #Each list element corresponds to a chromosome snpMap = lapply(1:simParam$nChr, function(x){ if(snp@lociPerChr[x]==0){ #No SNPs on chromosome return(NULL) }else{ if(x==1){ #First chromosome, start at position 1 take = 1:snp@lociPerChr[x] }else{ #All other chromosomes take = (sum(snp@lociPerChr[1:(x-1)])+1):sum(snp@lociPerChr[1:x]) } return(genMap[[x]][snp@lociLoc[take]]) } }) #Create a data.frame with SNP positions on genetic map output = data.frame(id=getLociNames(snp@lociPerChr, snp@lociLoc, genMap), chr=rep(names(genMap),snp@lociPerChr), site=snp@lociLoc, pos=do.call("c",snpMap)) return(output) } #' @title Get QTL genetic map #' #' @description Retrieves the genetic map for the #' QTL of a given trait. #' #' @param trait an integer for the #' @param sex determines which sex specific map #' is returned. Options are "A" for average map, "F" #' for female map, and "M" for male map. All options are #' equivalent if not using sex specific maps. #' @param simParam an object of \code{\link{SimParam}} #' #' @return Returns a data.frame with: #' \describe{ #' \item{id}{Unique identifier for the QTL} #' \item{chr}{Chromosome containing the QTL} #' \item{site}{Segregating site on the chromosome} #' \item{pos}{Genetic map position} #' } #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$addTraitA(5) #' #' #Pull SNP map #' getQtlMap(trait=1, simParam=SP) #' #' @export getQtlMap = function(trait=1, sex="A", simParam=NULL){ if(is.null(simParam)){ simParam = get("SP",envir=.GlobalEnv) } if(is.character(trait)){ # Suspect trait is a name traitNames = simParam$traitNames take = match(trait, traitNames) if(is.na(take)){ stop("'",trait,"' did not match any trait names") } trait = take } #Extract genetic map and SNP positions if(sex=="A"){ genMap = simParam$genMap }else if(sex=="F"){ genMap = simParam$femaleMap }else if(sex=="M"){ genMap = simParam$maleMap }else{ stop(paste("sex =",sex,"is not a valid option")) } qtl = simParam$traits[[trait]] #QTL positions #Create a list of QTL positions on the genetic map #Each list element corresponds to a chromosome qtlMap = lapply(1:simParam$nChr, function(x){ if(qtl@lociPerChr[x]==0){ #No QTL on chromosome return(NULL) }else{ if(x==1){ #First chromosome, start at position 1 take = 1:qtl@lociPerChr[x] }else{ #All other chromosomes take = (sum(qtl@lociPerChr[1:(x-1)])+1):sum(qtl@lociPerChr[1:x]) } return(genMap[[x]][qtl@lociLoc[take]]) } }) #Create a data.frame with QTL positions on genetic map output = data.frame(id=getLociNames(qtl@lociPerChr, qtl@lociLoc, genMap), chr=rep(names(genMap),qtl@lociPerChr), site=qtl@lociLoc, pos=do.call("c",qtlMap)) return(output) } #' @title Pull SNP genotypes #' #' @description Retrieves SNP genotype data #' #' @param pop an object of \code{\link{Pop-class}} #' @param snpChip an integer. Indicates which SNP #' chip's genotypes to retrieve. #' @param chr a vector of chromosomes to retrieve. If NULL, #' all chromosome are retrieved. #' @param asRaw return in raw (byte) format #' @param simParam an object of \code{\link{SimParam}} #' #' @return Returns a matrix of SNP genotypes. #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=15) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$addTraitA(10) #' SP$addSnpChip(5) #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' pullSnpGeno(pop, simParam=SP) #' #' @export pullSnpGeno = function(pop, snpChip=1, chr=NULL, asRaw=FALSE, simParam=NULL){ if(is.null(simParam)){ simParam = get("SP",envir=.GlobalEnv) } if(is.character(snpChip)){ # Suspect snpChip is a name chipNames = simParam$snpChipNames take = match(snpChip, chipNames) if(is.na(take)){ stop("'",snpChip,"' did not match any SNP chip names") } snpChip = take } tmp = selectLoci(chr, simParam$snpChips[[snpChip]]@lociPerChr, simParam$snpChips[[snpChip]]@lociLoc) output = getGeno(pop@geno,tmp$lociPerChr,tmp$lociLoc,simParam$nThreads) if(!asRaw){ output = convToImat(output) } if(is(pop,"Pop")){ rownames(output) = pop@id }else{ rownames(output) = as.character(1:pop@nInd) } colnames(output) = getLociNames(tmp$lociPerChr, tmp$lociLoc, simParam$genMap) return(output) } #' @title Pull QTL genotypes #' #' @description Retrieves QTL genotype data #' #' @param pop an object of \code{\link{Pop-class}} #' @param trait an integer. Indicates which trait's #' QTL genotypes to retrieve. #' @param chr a vector of chromosomes to retrieve. If NULL, #' all chromosome are retrieved. #' @param asRaw return in raw (byte) format #' @param simParam an object of \code{\link{SimParam}} #' #' @return Returns a matrix of QTL genotypes. #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=15) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$addTraitA(10) #' SP$addSnpChip(5) #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' pullQtlGeno(pop, simParam=SP) #' #' @export pullQtlGeno = function(pop, trait=1, chr=NULL, asRaw=FALSE, simParam=NULL){ if(is.null(simParam)){ simParam = get("SP",envir=.GlobalEnv) } if(is.character(trait)){ # Suspect trait is a name traitNames = simParam$traitNames take = match(trait, traitNames) if(is.na(take)){ stop("'",trait,"' did not match any trait names") } trait = take } tmp = selectLoci(chr, simParam$traits[[trait]]@lociPerChr, simParam$traits[[trait]]@lociLoc) output = getGeno(pop@geno,tmp$lociPerChr,tmp$lociLoc,simParam$nThreads) if(!asRaw){ output = convToImat(output) } if(is(pop,"Pop")){ rownames(output) = pop@id }else{ rownames(output) = as.character(1:pop@nInd) } colnames(output) = getLociNames(tmp$lociPerChr, tmp$lociLoc, simParam$genMap) return(output) } #' @title Pull segregating site genotypes #' #' @description #' Retrieves genotype data for all segregating sites #' #' @param pop an object of \code{\link{RawPop-class}} or #' \code{\link{MapPop-class}} #' @param chr a vector of chromosomes to retrieve. If NULL, #' all chromosome are retrieved. #' @param asRaw return in raw (byte) format #' @param simParam an object of \code{\link{SimParam}}, not #' used if pop is \code{\link{MapPop-class}} #' #' @return Returns a matrix of genotypes #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=15) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' SP$addTraitA(10) #' SP$addSnpChip(5) #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' pullSegSiteGeno(pop, simParam=SP) #' #' @export pullSegSiteGeno = function(pop, chr=NULL, asRaw=FALSE, simParam=NULL){ if(is(pop,"MapPop")){ allLoci = unlist(c(sapply(pop@nLoci, function(x) 1:x))) lociTot = pop@nLoci nThreads = getNumThreads() map = pop@genMap }else{ if(is.null(simParam)){ simParam = get("SP",envir=.GlobalEnv) } allLoci = unlist(c(sapply(simParam$segSites, function(x) 1:x))) lociTot = simParam$segSites nThreads = simParam$nThreads map = simParam$genMap } tmp = selectLoci(chr,lociTot,allLoci) output = getGeno(pop@geno,tmp$lociPerChr,tmp$lociLoc,nThreads) if(!asRaw){ output = convToImat(output) } if(is(pop,"Pop") | is(pop,"NamedMapPop")){ rownames(output) = pop@id }else{ rownames(output) = as.character(1:pop@nInd) } colnames(output) = getLociNames(tmp$lociPerChr, tmp$lociLoc, map) return(output) } #' @title Pull SNP haplotypes #' #' @description Retrieves SNP haplotype data #' #' @param pop an object of \code{\link{Pop-class}} #' @param snpChip an integer. Indicates which SNP #' chip's haplotypes to retrieve. #' @param haplo either "all" for all haplotypes or an integer #' for a single set of haplotypes. Use a value of 1 for female #' haplotypes and a value of 2 for male haplotypes in diploids. #' @param chr a vector of chromosomes to retrieve. If NULL, #' all chromosome are retrieved. #' @param asRaw return in raw (byte) format #' @param simParam an object of \code{\link{SimParam}} #' #' @return Returns a matrix of SNP haplotypes. #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=15) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$addTraitA(10) #' SP$addSnpChip(5) #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' pullSnpHaplo(pop, simParam=SP) #' #' @export pullSnpHaplo = function(pop, snpChip=1, haplo="all", chr=NULL, asRaw=FALSE, simParam=NULL){ if(is.null(simParam)){ simParam = get("SP",envir=.GlobalEnv) } if(is.character(snpChip)){ # Suspect snpChip is a name chipNames = simParam$snpChipNames take = match(snpChip, chipNames) if(is.na(take)){ stop("'",snpChip,"' did not match any SNP chip names") } snpChip = take } tmp = selectLoci(chr, simParam$snpChips[[snpChip]]@lociPerChr, simParam$snpChips[[snpChip]]@lociLoc) lociPerChr = tmp$lociPerChr lociLoc = tmp$lociLoc if(haplo=="all"){ output = getHaplo(pop@geno,lociPerChr,lociLoc,simParam$nThreads) if(!asRaw){ output = convToImat(output) } if(is(pop,"Pop")){ rownames(output) = paste(rep(pop@id,each=pop@ploidy), rep(1:pop@ploidy,pop@nInd),sep="_") }else{ rownames(output) = paste(rep(1:pop@nInd,each=pop@ploidy), rep(1:pop@ploidy,pop@nInd),sep="_") } }else{ output = getOneHaplo(pop@geno,lociPerChr,lociLoc, as.integer(haplo),simParam$nThreads) if(!asRaw){ output = convToImat(output) } if(is(pop,"Pop")){ rownames(output) = paste(pop@id,rep(haplo,pop@nInd),sep="_") }else{ rownames(output) = paste(1:pop@nInd,rep(haplo,pop@nInd),sep="_") } } colnames(output) = getLociNames(tmp$lociPerChr, tmp$lociLoc, simParam$genMap) return(output) } #' @title Pull QTL haplotypes #' #' @description Retrieves QTL haplotype data #' #' @param pop an object of \code{\link{Pop-class}} #' @param trait an integer. Indicates which trait's #' QTL haplotypes to retrieve. #' @param haplo either "all" for all haplotypes or an integer #' for a single set of haplotypes. Use a value of 1 for female #' haplotypes and a value of 2 for male haplotypes in diploids. #' @param chr a vector of chromosomes to retrieve. If NULL, #' all chromosome are retrieved. #' @param asRaw return in raw (byte) format #' @param simParam an object of \code{\link{SimParam}} #' #' @return Returns a matrix of QTL haplotypes. #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=15) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$addTraitA(10) #' SP$addSnpChip(5) #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' pullQtlHaplo(pop, simParam=SP) #' #' @export pullQtlHaplo = function(pop, trait=1, haplo="all", chr=NULL, asRaw=FALSE, simParam=NULL){ if(is.null(simParam)){ simParam = get("SP",envir=.GlobalEnv) } if(is.character(trait)){ # Suspect trait is a name traitNames = simParam$traitNames take = match(trait, traitNames) if(is.na(take)){ stop("'",trait,"' did not match any trait names") } trait = take } tmp = selectLoci(chr, simParam$traits[[trait]]@lociPerChr, simParam$traits[[trait]]@lociLoc) lociPerChr = tmp$lociPerChr lociLoc = tmp$lociLoc if(haplo=="all"){ output = getHaplo(pop@geno,lociPerChr,lociLoc,simParam$nThreads) if(!asRaw){ output = convToImat(output) } if(is(pop,"Pop")){ rownames(output) = paste(rep(pop@id,each=pop@ploidy), rep(1:pop@ploidy,pop@nInd),sep="_") }else{ rownames(output) = paste(rep(1:pop@nInd,each=pop@ploidy), rep(1:pop@ploidy,pop@nInd),sep="_") } }else{ output = getOneHaplo(pop@geno,lociPerChr,lociLoc, as.integer(haplo),simParam$nThreads) if(!asRaw){ output = convToImat(output) } if(is(pop,"Pop")){ rownames(output) = paste(pop@id,rep(haplo,pop@nInd),sep="_") }else{ rownames(output) = paste(1:pop@nInd,rep(haplo,pop@nInd),sep="_") } } colnames(output) = getLociNames(tmp$lociPerChr, tmp$lociLoc, simParam$genMap) return(output) } #' @title Pull seg site haplotypes #' #' @description #' Retrieves haplotype data for all segregating sites #' #' @param pop an object of \code{\link{RawPop-class}} or #' \code{\link{MapPop-class}} #' @param haplo either "all" for all haplotypes or an integer #' for a single set of haplotypes. Use a value of 1 for female #' haplotypes and a value of 2 for male haplotypes in diploids. #' @param chr a vector of chromosomes to retrieve. If NULL, #' all chromosome are retrieved. #' @param asRaw return in raw (byte) format #' @param simParam an object of \code{\link{SimParam}}, not #' used if pop is \code{\link{MapPop-class}} #' #' @return Returns a matrix of haplotypes #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=15) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$addTraitA(10) #' SP$addSnpChip(5) #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' pullSegSiteHaplo(pop, simParam=SP) #' #' @export pullSegSiteHaplo = function(pop, haplo="all", chr=NULL, asRaw=FALSE, simParam=NULL){ if(is(pop,"MapPop")){ allLoci = unlist(c(sapply(pop@nLoci, function(x) 1:x))) lociTot = pop@nLoci nThreads = getNumThreads() map = pop@genMap }else{ if(is.null(simParam)){ simParam = get("SP",envir=.GlobalEnv) } allLoci = unlist(c(sapply(simParam$segSites, function(x) 1:x))) lociTot = simParam$segSites nThreads = simParam$nThreads map = simParam$genMap } if(!is.null(chr)){ tmp = selectLoci(chr,lociTot,allLoci) lociTot = tmp$lociPerChr allLoci = tmp$lociLoc }else{ chr = 1:pop@nChr } if(haplo=="all"){ output = getHaplo(pop@geno, lociTot, allLoci, nThreads) if(!asRaw){ output = convToImat(output) } if(is(pop,"Pop") | is(pop,"NamedMapPop")){ rownames(output) = paste(rep(pop@id,each=pop@ploidy), rep(1:pop@ploidy,pop@nInd),sep="_") }else{ rownames(output) = paste(rep(1:pop@nInd,each=pop@ploidy), rep(1:pop@ploidy,pop@nInd),sep="_") } }else{ output = getOneHaplo(pop@geno, lociTot, allLoci, as.integer(haplo), nThreads) if(!asRaw){ output = convToImat(output) } if(is(pop,"Pop") | is(pop,"NamedMapPop")){ rownames(output) = paste(pop@id,rep(haplo,pop@nInd),sep="_") }else{ rownames(output) = paste(1:pop@nInd,rep(haplo,pop@nInd),sep="_") } } colnames(output) = getLociNames(lociTot, allLoci, map) return(output) } #' @title Pull IBD haplotypes #' #' @description Retrieves IBD haplotype data #' #' @param pop an object of \code{\link{Pop-class}} #' @param chr a vector of chromosomes to retrieve. If NULL, #' all chromosomes are retrieved. #' @param snpChip an integer indicating which SNP array loci #' are to be retrieved. If NULL, all sites are retrieved. #' @param simParam an object of \code{\link{SimParam}} #' #' @return Returns a matrix of IBD haplotypes. #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=15) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$addTraitA(10) #' SP$addSnpChip(5) #' SP$setTrackRec(TRUE) #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' pullIbdHaplo(pop, simParam=SP) #' #' @export pullIbdHaplo = function(pop, chr=NULL, snpChip=NULL, simParam=NULL){ if(is.null(simParam)){ simParam = get("SP",envir=.GlobalEnv) } if(is.character(snpChip)){ # Suspect snpChip is a name chipNames = simParam$snpChipNames take = match(snpChip, chipNames) if(is.na(take)){ stop("'",snpChip,"' did not match any SNP chip names") } snpChip = take } if(!simParam$isTrackRec){ stop( "pullIbdHaplo can only be used with the trackRec option, see trackRec in ?SimParam" ) } if(is.null(chr)){ chr = 1:pop@nChr } # Retrieve IBD data ibd = simParam$ibdHaplo(pop@iid) # Fill in output matrix output = createIbdMat(ibd=ibd, chr=chr, nLoci=pop@nLoci, ploidy=pop@ploidy, nThreads=simParam$nThreads) rownames(output) = paste(rep(pop@id,each=pop@ploidy), rep(1:pop@ploidy,pop@nInd),sep="_") colnames(output) = unlist(lapply(simParam$genMap[chr], names)) if(!is.null(snpChip)){ nLoci = pop@nLoci[chr] tmp = getSnpMap(snpChip=snpChip,simParam=simParam) tmp = tmp[tmp$chr%in%chr,] if(length(chr)>1){ for(i in 2:length(chr)){ j = chr[i] tmp[tmp$chr==j,"site"] = tmp[tmp$chr==j,"site"] + sum(nLoci[1:(i-1)]) } } output = output[,tmp$site,drop=FALSE] } return(output) } #' @title Pull marker genotypes #' #' @description Retrieves genotype data for user #' specified loci #' #' @param pop an object of \code{\link{RawPop-class}} or #' \code{\link{MapPop-class}} #' @param markers a character vector. Indicates the #' names of the loci to be retrieved. #' @param asRaw return in raw (byte) format #' @param simParam an object of \code{\link{SimParam}}, not #' used if pop is \code{\link{MapPop-class}} #' #' @return Returns a matrix of genotypes. #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=15) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$addTraitA(10) #' SP$addSnpChip(5) #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' #' #Pull genotype data for first two markers on chromosome one. #' #Marker name is consistent with default naming in AlphaSimR. #' pullMarkerGeno(pop, markers=c("1_1","1_2"), simParam=SP) #' #' @export pullMarkerGeno = function(pop, markers, asRaw=FALSE, simParam=NULL){ # Get genetic map and nThreads if(is(pop,"MapPop")){ nThreads = getNumThreads() genMap = pop@genMap }else{ if(is.null(simParam)){ simParam = get("SP",envir=.GlobalEnv) } nThreads = simParam$nThreads genMap = simParam$genMap } # Map markers to genetic map lociMap = mapLoci(markers, genMap) # Get genotypes output = getGeno(pop@geno, lociMap$lociPerChr, lociMap$lociLoc, nThreads) if(!asRaw){ output = convToImat(output) } if(is(pop,"Pop") | is(pop,"NamedMapPop")){ rownames(output) = pop@id }else{ rownames(output) = as.character(1:pop@nInd) } colnames(output) = getLociNames(lociMap$lociPerChr, lociMap$lociLoc, genMap) output = output[,match(markers, colnames(output)),drop=FALSE] return(output) } #' @title Pull marker haplotypes #' #' @description Retrieves haplotype data for user #' specified loci #' #' @param pop an object of \code{\link{RawPop-class}} or #' \code{\link{MapPop-class}} #' @param markers a character vector. Indicates the #' names of the loci to be retrieved #' @param haplo either "all" for all haplotypes or an integer #' for a single set of haplotypes. Use a value of 1 for female #' haplotypes and a value of 2 for male haplotypes in diploids. #' @param asRaw return in raw (byte) format #' @param simParam an object of \code{\link{SimParam}}, not #' used if pop is \code{\link{MapPop-class}} #' #' @return Returns a matrix of genotypes. #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=15) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$addTraitA(10) #' SP$addSnpChip(5) #' SP$setTrackRec(TRUE) #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' #' #Pull haplotype data for first two markers on chromosome one. #' #Marker name is consistent with default naming in AlphaSimR. #' pullMarkerHaplo(pop, markers=c("1_1","1_2"), simParam=SP) #' #' @export pullMarkerHaplo = function(pop, markers, haplo="all", asRaw=FALSE, simParam=NULL){ # Get genetic map and nThreads if(is(pop,"MapPop")){ nThreads = getNumThreads() genMap = pop@genMap }else{ if(is.null(simParam)){ simParam = get("SP",envir=.GlobalEnv) } nThreads = simParam$nThreads genMap = simParam$genMap } # Map markers to genetic map lociMap = mapLoci(markers, genMap) if(haplo=="all"){ output = getHaplo(pop@geno, lociMap$lociPerChr, lociMap$lociLoc, nThreads) if(!asRaw){ output = convToImat(output) } if(is(pop,"Pop")){ rownames(output) = paste(rep(pop@id,each=pop@ploidy), rep(1:pop@ploidy,pop@nInd),sep="_") }else{ rownames(output) = paste(rep(1:pop@nInd,each=pop@ploidy), rep(1:pop@ploidy,pop@nInd),sep="_") } }else{ output = getOneHaplo(pop@geno, lociMap$lociPerChr, lociMap$lociLoc, as.integer(haplo), simParam$nThreads) if(!asRaw){ output = convToImat(output) } if(is(pop,"Pop")){ rownames(output) = paste(pop@id,rep(haplo,pop@nInd),sep="_") }else{ rownames(output) = paste(1:pop@nInd,rep(haplo,pop@nInd),sep="_") } } colnames(output) = getLociNames(lociMap$lociPerChr, lociMap$lociLoc, genMap) output = output[,match(markers, colnames(output)),drop=FALSE] return(output) } #' @title Set marker haplotypes #' #' @description Manually sets the haplotypes in a population #' for all individuals at one or more loci. #' #' @param pop an object of \code{\link{RawPop-class}} or #' \code{\link{MapPop-class}} #' @param haplo a matrix of haplotypes, see details #' @param simParam an object of \code{\link{SimParam}}, not #' used if pop is \code{\link{MapPop-class}} #' #' @details The format of the haplotype matrix should match #' the format of the output from \code{\link{pullMarkerHaplo}} #' with the option haplo="all". Thus, it is recommended that this #' function is first used to extract the haplotypes and that any #' desired changes be made to the output of pullMarkerHaplo before #' passing the matrix to setMarkerHaplo. Any changes made to QTL #' may potentially result in changes to an individuals genetic #' value. These changes will be reflected in the gv and/or gxe slot. #' All other slots will remain unchanged, so the ebv and pheno slots #' will not reflect the new genotypes. #' #' @return an object of the same class as the "pop" input #' #' @examples #' # Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=15) #' #' # Extract haplotypes for marker "1_1" #' H = pullMarkerHaplo(founderPop, markers="1_1") #' #' # Set the first haplotype to 1 #' H[1,1] = 1L #' #' # Set marker haplotypes #' founderPop = setMarkerHaplo(founderPop, haplo=H) #' #' @export setMarkerHaplo = function(pop, haplo, simParam=NULL){ # Check validity of rows stopifnot(nrow(haplo)==(pop@nInd*pop@ploidy)) # Get genetic map if(is(pop,"MapPop")){ genMap = pop@genMap nThreads = getNumThreads() }else{ if(is.null(simParam)){ simParam = get("SP",envir=.GlobalEnv) } genMap = simParam$genMap nThreads = simParam$nThreads } # Map markers to the genetic map markers = colnames(haplo) lociMap = mapLoci(markers, genMap) # Order haplotype data orderedMapNames = getLociNames(lociMap$lociPerChr, lociMap$lociLoc, genMap) haplo = haplo[,match(markers, orderedMapNames), drop=FALSE] # Set haplotypes geno = setHaplo(pop@geno, haplo, lociMap$lociPerChr, lociMap$lociLoc, nThreads) dim(geno) = NULL # Account for matrix bug in RcppArmadillo pop@geno = geno if(is(pop, "Pop")){ PHENO = pop@pheno EBV = pop@ebv pop = resetPop(pop=pop, simParam=simParam) pop@pheno = PHENO pop@ebv = EBV } return(pop) }
/scratch/gouwar.j/cran-all/cranData/AlphaSimR/R/pullGeno.R
# Returns a vector response from a population # pop is an object of class Pop or HybridPop # trait is a vector of traits or a function # use is "rand", "gv", "ebv", "pheno", or "bv" # "bv" doesn't work on class HybridPop # simParam is an object of class SimParam, it is only called when use="bv" # ... are additional arguments passed to trait when trait is a function getResponse = function(pop,trait,use,simParam=NULL,...){ use = tolower(use) if(use=="rand"){ return(rnorm(pop@nInd)) } if(is(trait,"function")){ if(use=="gv"){ response = trait(pop@gv,...) }else if(use=="ebv"){ response = trait(pop@ebv,...) }else if(use=="pheno"){ response = trait(pop@pheno,...) }else if(use=="bv"){ if(is(pop,"HybridPop")){ stop("Use='bv' is not a valid option for HybridPop") } response = genParam(pop,simParam=simParam)$bv response = trait(response,...) }else{ stop(paste0("Use=",use," is not an option")) } }else{ if(is.character(trait)){ # Suspect trait is a name take = match(trait, simParam$traitNames) if(is.na(take)){ stop("'",trait,"' did not match any trait names") } trait = take } if(use == "gv"){ response = pop@gv[,trait,drop=FALSE] }else if(use=="ebv"){ response = pop@ebv[,trait,drop=FALSE] }else if(use=="pheno"){ response = pop@pheno[,trait,drop=FALSE] }else if(use=="bv"){ if(is(pop,"HybridPop")){ stop("Use='bv' is not a valid option for HybridPop") } response = genParam(pop,simParam=simParam)$bv[,trait,drop=FALSE] }else{ stop(paste0("Use=",use," is not an option")) } } if(any(is.na(response))){ stop("selection trait has missing values, phenotype may need to be set") } return(response) } # Converts candidates to a vector of positive numbers # for the individuals that are candidates. This function # handle indexing by id and negative value indexing getCandidates = function(pop, candidates){ if(is.character(candidates)){ candidates = match(candidates, pop@id) if(any(is.na(candidates))){ stop("Trying to select invalid individuals") } if(any(is.null(candidates))){ stop("Not valid ids") } }else{ if(any(abs(candidates)>pop@nInd)){ stop("Trying to select invalid individuals") } candidates = (1:pop@nInd)[candidates] } return(candidates) } # Returns a vector of individuals in a population with the required sex checkSexes = function(pop,sex,simParam,...){ sex = toupper(sex) eligible = 1:pop@nInd if(simParam$sexes=="no"){ return(eligible) }else{ if(sex=="B"){ # Check in gender is incorrectly being used args = list(...) if(any(names(args)=="gender")){ stop("The discontinued 'gender' argument appears to be in use. This argument was renamed as 'sex' in AlphaSimR version 0.13.0.") } return(eligible) }else{ return(eligible[pop@sex%in%sex]) } } } # Returns a vector of families getFam = function(pop,famType){ famType = toupper(famType) if(famType=="B"){ return(paste(pop@mother,pop@father,sep="_")) }else if(famType=="F"){ return(pop@mother) }else if(famType=="M"){ return(pop@father) }else{ stop(paste0("famType=",famType," is not a valid option")) } } #' @title Select individuals #' #' @description Selects a subset of nInd individuals from a #' population. #' #' @param pop and object of \code{\link{Pop-class}}, #' \code{\link{HybridPop-class}} or \code{\link{MultiPop-class}} #' @param nInd the number of individuals to select #' @param trait the trait for selection. Either a number indicating #' a single trait or a function returning a vector of length nInd. #' The function must work on a vector or matrix of \code{use} values. #' See the examples and \code{\link{selIndex}}. #' @param use select on genetic values "gv", estimated #' breeding values "ebv", breeding values "bv", phenotypes "pheno", #' or randomly "rand" #' @param sex which sex to select. Use "B" for both, "F" for #' females and "M" for males. If the simulation is not using sexes, #' the argument is ignored. #' @param selectTop selects highest values if true. #' Selects lowest values if false. #' @param returnPop should results be returned as a #' \code{\link{Pop-class}}. If FALSE, only the index of selected #' individuals is returned. #' @param candidates an optional vector of eligible selection candidates. #' @param simParam an object of \code{\link{SimParam}} #' @param ... additional arguments if using a function for #' trait #' #' @return Returns an object of \code{\link{Pop-class}}, #' \code{\link{HybridPop-class}} or \code{\link{MultiPop-class}} #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$addTraitA(10) #' SP$setVarE(h2=0.5) #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' #' #Select top 5 (directional selection) #' pop2 = selectInd(pop, 5, simParam=SP) #' hist(pop@pheno); abline(v = pop@pheno, lwd = 2) #' abline(v = pop2@pheno, col = "red", lwd = 2) #' #' #Select 5 most deviating from an optima (disruptive selection) #' squaredDeviation = function(x, optima = 0) (x - optima)^2 #' pop3 = selectInd(pop, 5, simParam=SP, trait = squaredDeviation, selectTop = TRUE) #' hist(pop@pheno); abline(v = pop@pheno, lwd = 2) #' abline(v = pop3@pheno, col = "red", lwd = 2) #' #' #Select 5 least deviating from an optima (stabilising selection) #' pop4 = selectInd(pop, 5, simParam=SP, trait = squaredDeviation, selectTop = FALSE) #' hist(pop@pheno); abline(v = pop@pheno, lwd = 2) #' abline(v = pop4@pheno, col = "red", lwd = 2) #' #' @export selectInd = function(pop,nInd,trait=1,use="pheno",sex="B", selectTop=TRUE,returnPop=TRUE, candidates=NULL,simParam=NULL,...){ stopifnot(nInd>=0) if(is.null(simParam)){ simParam = get("SP",envir=.GlobalEnv) } if(is(pop,"MultiPop")){ stopifnot(returnPop, is.null(candidates)) pop@pops = lapply(pop@pops, selectInd, nInd=nInd, trait=trait, use=use, sex=sex, selectTop=selectTop, returnPop=TRUE, candidates=NULL, simParam=simParam, ...) return(pop) } eligible = checkSexes(pop=pop,sex=sex,simParam=simParam,...) if(!is.null(candidates)){ candidates = getCandidates(pop=pop,candidates=candidates) eligible = eligible[eligible%in%candidates] } if(length(eligible)<nInd){ nInd = length(eligible) warning("Suitable candidates smaller than nInd, returning ",nInd," individuals") } response = getResponse(pop=pop,trait=trait,use=use, simParam=simParam,...) if(is.matrix(response)){ stopifnot(ncol(response)==1) } take = order(response,decreasing=selectTop) take = take[take%in%eligible] if(returnPop){ return(pop[take[0:nInd]]) }else{ return(take[0:nInd]) } } #' @title Select families #' #' @description Selects a subset of full-sib families from a #' population. #' #' @param pop and object of \code{\link{Pop-class}}, #' \code{\link{HybridPop-class}} or \code{\link{MultiPop-class}} #' @param nFam the number of families to select #' @param trait the trait for selection. Either a number indicating #' a single trait or a function returning a vector of length nInd. #' The function must work on a vector or matrix of \code{use} values. #' See the examples in \code{\link{selectInd}} and \code{\link{selIndex}}. #' @param use select on genetic values "gv", estimated #' breeding values "ebv", breeding values "bv", phenotypes "pheno", #' or randomly "rand" #' @param sex which sex to select. Use "B" for both, "F" for #' females and "M" for males. If the simulation is not using sexes, #' the argument is ignored. #' @param famType which type of family to select. Use "B" for #' full-sib families, "F" for half-sib families on female side and "M" #' for half-sib families on the male side. #' @param selectTop selects highest values if true. #' Selects lowest values if false. #' @param returnPop should results be returned as a #' \code{\link{Pop-class}}. If FALSE, only the index of selected #' individuals is returned. #' @param candidates an optional vector of eligible selection candidates. #' @param simParam an object of \code{\link{SimParam}} #' @param ... additional arguments if using a function for #' trait #' #' @return Returns an object of \code{\link{Pop-class}}, #' \code{\link{HybridPop-class}} or \code{\link{MultiPop-class}} #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$addTraitA(10) #' SP$setVarE(h2=0.5) #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' #' #Create 3 biparental families with 10 progeny #' pop2 = randCross(pop, nCrosses=3, nProgeny=10, simParam=SP) #' #' #Select best 2 families #' pop3 = selectFam(pop2, 2, simParam=SP) #' #' @export selectFam = function(pop,nFam,trait=1,use="pheno",sex="B", famType="B",selectTop=TRUE,returnPop=TRUE, candidates=NULL,simParam=NULL,...){ stopifnot(nFam>=0) if(is.null(simParam)){ simParam = get("SP",envir=.GlobalEnv) } if(is(pop,"MultiPop")){ stopifnot(returnPop, is.null(candidates)) pop@pops = lapply(pop@pops, selectFam, nFam=nFam, trait=trait, use=use, sex=sex, famType=famType, selectTop=selectTop, returnPop=TRUE, candidates=NULL, simParam=simParam, ...) return(pop) } eligible = checkSexes(pop=pop,sex=sex,simParam=simParam,...) if(!is.null(candidates)){ candidates = getCandidates(pop=pop,candidates=candidates) eligible = eligible[eligible%in%candidates] } allFam = getFam(pop=pop,famType=famType) availFam = allFam[eligible] if(nFam>length(unique(availFam))){ nFam = length(unique(availFam)) warning("Suitable families smaller than nFam, returning ", nFam, " families") } response = getResponse(pop=pop,trait=trait,use=use, simParam=simParam,...) if(is.matrix(response)){ stopifnot(ncol(response)==1) } response = response[eligible] #Calculate family means famMeans = aggregate(response,list(families=availFam),mean) response = famMeans$x #Select families bestFam = order(response,decreasing=selectTop)[0:nFam] bestFam = famMeans$families[bestFam] take = which(allFam%in%bestFam) take = take[take%in%eligible] if(returnPop){ return(pop[take]) }else{ return(take) } } #' @title Select individuals within families #' #' @description Selects a subset of nInd individuals from each #' full-sib family within a population. Will return all individuals #' from a full-sib family if it has less than or equal to nInd individuals. #' #' @param pop and object of \code{\link{Pop-class}}, #' \code{\link{HybridPop-class}} or \code{\link{MultiPop-class}} #' @param nInd the number of individuals to select within a family #' @param trait the trait for selection. Either a number indicating #' a single trait or a function returning a vector of length nInd. #' The function must work on a vector or matrix of \code{use} values. #' See the examples in \code{\link{selectInd}} and \code{\link{selIndex}}. #' @param use select on genetic values "gv", estimated #' breeding values "ebv", breeding values "bv", phenotypes "pheno", #' or randomly "rand" #' @param sex which sex to select. Use "B" for both, "F" for #' females and "M" for males. If the simulation is not using sexes, #' the argument is ignored. #' @param famType which type of family to select. Use "B" for #' full-sib families, "F" for half-sib families on female side and "M" #' for half-sib families on the male side. #' @param selectTop selects highest values if true. #' Selects lowest values if false. #' @param returnPop should results be returned as a #' \code{\link{Pop-class}}. If FALSE, only the index of selected #' individuals is returned. #' @param candidates an optional vector of eligible selection candidates. #' @param simParam an object of \code{\link{SimParam}} #' @param ... additional arguments if using a function for #' trait #' #' @return Returns an object of \code{\link{Pop-class}}, #' \code{\link{HybridPop-class}} or \code{\link{MultiPop-class}} #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$addTraitA(10) #' SP$setVarE(h2=0.5) #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' #' #Create 3 biparental families with 10 progeny #' pop2 = randCross(pop, nCrosses=3, nProgeny=10, simParam=SP) #' #' #Select best individual per family #' pop3 = selectWithinFam(pop2, 1, simParam=SP) #' #' @export selectWithinFam = function(pop,nInd,trait=1,use="pheno",sex="B", famType="B",selectTop=TRUE,returnPop=TRUE, candidates=NULL,simParam=NULL,...){ stopifnot(nInd>=0) if(is.null(simParam)){ simParam = get("SP",envir=.GlobalEnv) } if(is(pop,"MultiPop")){ stopifnot(returnPop, is.null(candidates)) pop@pops = lapply(pop@pops, selectWithinFam, nInd=nInd, trait=trait, use=use, sex=sex, selectTop=selectTop, returnPop=TRUE, candidates=NULL, simParam=simParam, ...) return(pop) } eligible = checkSexes(pop=pop,sex=sex,simParam=simParam,...) if(!is.null(candidates)){ candidates = getCandidates(pop=pop,candidates=candidates) eligible = eligible[eligible%in%candidates] } families = getFam(pop=pop,famType=famType) response = getResponse(pop=pop,trait=trait,use=use, simParam=simParam,...) if(is.matrix(response)){ stopifnot(ncol(response)==1) } warn = FALSE selInFam = function(selFam){ index = which(families%in%selFam) y = response[index] index = index[order(y,decreasing=selectTop)] index = index[index%in%eligible] if(length(index)<nInd){ warn <<- TRUE return(index) }else{ return(index[0:nInd]) } } take = unlist(sapply(unique(families),selInFam)) if(warn){ warning("One or more families are smaller than nInd") } if(returnPop){ return(pop[take]) }else{ return(take) } } #' @title Select open pollinating plants #' #' @description #' This function models selection in an open pollinating #' plant population. It allows for varying the percentage of #' selfing. The function also provides an option for modeling #' selection as occuring before or after pollination. #' #' @param pop and object of \code{\link{Pop-class}} #' or \code{\link{MultiPop-class}} #' @param nInd the number of plants to select #' @param nSeeds number of seeds per plant #' @param probSelf percentage of seeds expected from selfing. #' Value ranges from 0 to 1. #' @param pollenControl are plants selected before pollination #' @param trait the trait for selection. Either a number indicating #' a single trait or a function returning a vector of length nInd. #' The function must work on a vector or matrix of \code{use} values. #' See the examples in \code{\link{selectInd}} and \code{\link{selIndex}}. #' @param use select on genetic values "gv", estimated #' breeding values "ebv", breeding values "bv", phenotypes "pheno", #' or randomly "rand" #' @param selectTop selects highest values if true. #' Selects lowest values if false. #' @param candidates an optional vector of eligible selection candidates. #' @param simParam an object of \code{\link{SimParam}} #' @param ... additional arguments if using a function for #' trait #' #' @return Returns an object of \code{\link{Pop-class}} #' or \code{\link{MultiPop-class}} #' #' @examples #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$addTraitA(10) #' SP$setVarE(h2=0.5) #' #' #Create population #' pop = newPop(founderPop, simParam=SP) #' #' #Create new population by selecting the best 3 plant #' #Assuming 50% selfing in plants and 10 seeds per plant #' pop2 = selectOP(pop, nInd=3, nSeeds=10, probSelf=0.5, simParam=SP) #' #' @export selectOP = function(pop,nInd,nSeeds,probSelf=0, pollenControl=FALSE,trait=1, use="pheno",selectTop=TRUE, candidates=NULL,simParam=NULL,...){ stopifnot(nInd>=0) if(is.null(simParam)){ simParam = get("SP",envir=.GlobalEnv) } if(is(pop,"MultiPop")){ stopifnot(is.null(candidates)) pop@pops = lapply(pop@pops, selectOP, nInd=nInd, nSeeds=nSeeds, pollenControl=pollenControl, trait=trait, use=use, selectTop=selectTop, candidates=NULL, simParam=simParam, ...) return(pop) } female = selectInd(pop=pop,nInd=nInd,trait=trait, use=use,sex="B",selectTop=selectTop, returnPop=FALSE,candidates=candidates, simParam=simParam,...) nSelf = rbinom(n=nInd,prob=probSelf,size=nSeeds) if(pollenControl){ male = female }else{ male = 1:pop@nInd } crossPlan = lapply(1:nInd,function(x){ male = male[!male==female[x]] if(length(male)==1){ #Account for "convenience" feature of sample when length = 1 cbind(rep(female[x],nSeeds), c(rep(female[x],nSelf[x]), rep(male,nSeeds-nSelf[x]))) }else{ cbind(rep(female[x],nSeeds), c(rep(female[x],nSelf[x]), sample(male,nSeeds-nSelf[x],replace=TRUE))) } }) crossPlan = mergeMultIntMat(crossPlan,rep(nSeeds,nInd),2L) return(makeCross(pop=pop,crossPlan=crossPlan,simParam=simParam)) }
/scratch/gouwar.j/cran-all/cranData/AlphaSimR/R/selection.R
#' @title Writes a Pop-class as PLINK files #' #' @description #' Writes a Pop-class to PLINK PED and MAP files. The arguments #' for this function were chosen for consistency with #' \code{\link{RRBLUP2}}. The base pair coordinate will the locus #' position as stored in AlphaSimR and not an actual base pair #' position. This is because AlphaSimR doesn't track base pair #' positions, only relative positions for the loci used in the #' simulation. #' #' @param pop an object of \code{\link{Pop-class}} #' @param baseName basename for PED and MAP files. #' @param traits an integer indicating the trait to write, a trait name, or a #' function of the traits returning a single value. #' @param use what to use for PLINK's phenotype field. Either phenotypes "pheno", #' genetic values "gv", estimated breeding values "ebv", breeding values "bv", #' or random values "rand". #' @param snpChip an integer indicating which SNP chip genotype #' to use #' @param useQtl should QTL genotypes be used instead of a SNP chip. #' If TRUE, snpChip specifies which trait's QTL to use, and thus these #' QTL may not match the QTL underlying the phenotype supplied in traits. #' @param simParam an object of \code{\link{SimParam}} #' @param ... additional arguments if using a function for #' traits #' #' @examples #' \dontrun{ #' #Create founder haplotypes #' founderPop = quickHaplo(nInd=10, nChr=1, segSites=15) #' #' #Set simulation parameters #' SP = SimParam$new(founderPop) #' \dontshow{SP$nThreads = 1L} #' SP$setSexes(sex="yes_rand") #' SP$addTraitA(nQtlPerChr=10) #' SP$addSnpChip(nSnpPerChr=5) #' SP$setVarE(h2=0.5) #' #' #Create population #' pop = newPop(rawPop = founderPop) #' #' # Write out PLINK files #' writePlink(pop, baseName="test") #' } #' @export writePlink = function(pop, baseName, traits=1, use="pheno", snpChip=1, useQtl=FALSE, simParam=NULL, ...){ if(pop@ploidy!=2L){ stop("writePlink() only supports ploidy=2") } if(is.null(simParam)){ simParam = get(x="SP", envir=.GlobalEnv) } # Pull "phenotype" data indicated by traits y = getResponse(pop=pop, trait=traits, use=use, simParam=simParam, ...) # Pull QTL/SNP data indicated by snpChip and useQtl if(useQtl){ H1 = pullQtlHaplo(pop=pop, trait=snpChip, haplo=1, asRaw=TRUE, simParam=simParam) H2 = pullQtlHaplo(pop=pop, trait=snpChip, haplo=2, asRaw=TRUE, simParam=simParam) map = getQtlMap(trait=snpChip, simParam=simParam) }else{ H1 = pullSnpHaplo(pop=pop, snpChip=snpChip, haplo=1, asRaw=TRUE, simParam=simParam) H2 = pullSnpHaplo(pop=pop, snpChip=snpChip, haplo=2, asRaw=TRUE, simParam=simParam) map = getSnpMap(snpChip=snpChip, simParam=simParam) } ## Make .ped file # Format pop data for a .fam (first columns of .ped) # Format sex for PLINK sex = pop@sex sex[which(sex=="H")] = "0" sex[which(sex=="M")] = "1" sex[which(sex=="F")] = "2" # Determine within-family ID of father, "0" if not present father = pop@id[match(pop@father, pop@id)] father[is.na(father)] = "0" # Determine within-family ID of mother, "0" if not present mother = pop@id[match(pop@mother, pop@id)] mother[is.na(mother)] = "0" fam = rbind(rep("1", pop@nInd), # Family ID pop@id, # Within-family ID father, # Within-family ID of father mother, # Within-family ID of mother sex, # Sex as.character(c(y))) # Phenotype # Weave together haplotype data for writing to a file with # the write function (requires a transposed matrix) H = unname(rbind(t(H1), t(H2))) H = H[c(matrix(1:nrow(H),nrow=2,byrow=T)),] # Free up some memory rm(H1, H2) # Convert to characters for writing to file H = ifelse(H, "2", "1") # Append .fam H = rbind(fam,H) # Write .ped file write(H, file=paste0(baseName,".ped"), ncolumns=nrow(H)) ## Make .map file map = rbind(map$chr, # Chromosome map$id, # Variant id as.character(map$pos*100), # Genetic map position (cM) as.character(map$site) # Physical map position ) # Write .map file write(map, file=paste0(baseName,".map"), ncolumns=nrow(map)) # Don't return anything return(invisible()) }
/scratch/gouwar.j/cran-all/cranData/AlphaSimR/R/writePlink.R
#' @title Write data records #' #' @description #' Saves a population's phenotypic and marker data to a directory. #' #' @param pop an object of \code{\link{Pop-class}} #' @param dir path to a directory for saving output #' @param snpChip which SNP chip genotype to save. If useQtl=TRUE, this #' value will indicate which trait's QTL genotype to save. A value of #' 0 will skip writing a snpChip. #' @param useQtl should QTL genotype be written instead of SNP chip #' genotypes. #' @param includeHaplo should markers be separated by female and male #' haplotypes. #' @param append if true, new records are added to any existing records. #' If false, any existing records are deleted before writing new records. #' Note that this will delete all files in the 'dir' directory. #' @param simParam an object of \code{\link{SimParam}} #' #' @export writeRecords = function(pop,dir,snpChip=1,useQtl=FALSE, includeHaplo=FALSE,append=TRUE,simParam=NULL){ if(is.null(simParam)){ simParam = get("SP",envir=.GlobalEnv) } snpChip = as.integer(snpChip) dir = normalizePath(dir, mustWork=TRUE) if(!append){ #Delete any existing files tmp = list.files(dir,full.names=TRUE) if(length(tmp)>0){ unlink(tmp,recursive=TRUE) } } if(snpChip==0){ nMarkers = 0 markerType = "NULL" }else{ if(useQtl){ nMarkers = simParam$traits[[snpChip]]@nLoci markerType = paste("QTL",snpChip,sep="_") }else{ nMarkers = simParam$snpChips[[snpChip]]@nLoci markerType = paste("SNP",snpChip,sep="_") } } #Check that the marker set isn't being changed nMarkerPath = file.path(dir,"nMarkers.txt") if(file.exists(nMarkerPath)){ nMarkersDir = scan(nMarkerPath,integer(),quiet=TRUE) stopifnot(nMarkersDir==nMarkers) }else{ writeLines(as.character(nMarkers),nMarkerPath) } markerTypePath = file.path(dir,"markerType.txt") if(file.exists(markerTypePath)){ markerTypeDir = scan(markerTypePath,character(),quiet=TRUE) stopifnot(markerTypeDir==markerType) }else{ writeLines(markerType,markerTypePath) } #Write info.txt info = data.frame(id=pop@id,mother=pop@mother,father=pop@father, fixEff=pop@fixEff,stringsAsFactors=FALSE) filePath = file.path(dir,"info.txt") if(file.exists(filePath)){ write.table(info,filePath,append=TRUE,col.names=FALSE, row.names=FALSE,quote=FALSE) }else{ write.table(info,filePath,row.names=FALSE,quote=FALSE) } #Write gv.txt write.table(pop@gv,file.path(dir,"gv.txt"),append=TRUE, col.names=FALSE,row.names=FALSE) #Write pheno.txt write.table(pop@pheno,file.path(dir,"pheno.txt"),append=TRUE, col.names=FALSE,row.names=FALSE) #Write genotype.txt, unless snpChip=0 if(snpChip!=0){ if(useQtl){ writeGeno(pop@geno,simParam$traits[[snpChip]]@lociPerChr, simParam$traits[[snpChip]]@lociLoc, file.path(dir,"genotype.txt"),simParam$nThreads) if(includeHaplo){ writeOneHaplo(pop@geno,simParam$traits[[snpChip]]@lociPerChr, simParam$traits[[snpChip]]@lociLoc,1L, file.path(dir,"haplotype1.txt"),simParam$nThreads) writeOneHaplo(pop@geno,simParam$traits[[snpChip]]@lociPerChr, simParam$traits[[snpChip]]@lociLoc,2L, file.path(dir,"haplotype2.txt"),simParam$nThreads) } }else{ writeGeno(pop@geno,simParam$snpChips[[snpChip]]@lociPerChr, simParam$snpChips[[snpChip]]@lociLoc, file.path(dir,"genotype.txt"),simParam$nThreads) if(includeHaplo){ writeOneHaplo(pop@geno,simParam$snpChips[[snpChip]]@lociPerChr, simParam$snpChips[[snpChip]]@lociLoc,1L, file.path(dir,"haplotype1.txt"),simParam$nThreads) writeOneHaplo(pop@geno,simParam$snpChips[[snpChip]]@lociPerChr, simParam$snpChips[[snpChip]]@lociLoc,2L, file.path(dir,"haplotype2.txt"),simParam$nThreads) } } } }
/scratch/gouwar.j/cran-all/cranData/AlphaSimR/R/writeRecords.R
## ----eval=FALSE--------------------------------------------------------------- # founderPop = quickHaplo(nInd=1000, nChr=10, segSites=1000) ## ----eval=FALSE--------------------------------------------------------------- # SP = SimParam$new(founderPop) ## ----eval=FALSE--------------------------------------------------------------- # SP$addTraitA(nQtlPerChr=1000) ## ----eval=FALSE--------------------------------------------------------------- # SP$setSexes("yes_sys") ## ----eval=FALSE--------------------------------------------------------------- # pop = newPop(founderPop) ## ----eval=FALSE--------------------------------------------------------------- # genMean = meanG(pop) ## ----eval=FALSE--------------------------------------------------------------- # for(generation in 1:20){ # pop = selectCross(pop=pop, nFemale=500, nMale=25, use="gv", nCrosses=1000) # genMean = c(genMean, meanG(pop)) # } ## ----eval=FALSE--------------------------------------------------------------- # plot(0:20, genMean, xlab="Generation", ylab="Mean Genetic Value", type="l") ## ----message=FALSE, warning=FALSE--------------------------------------------- library(AlphaSimR) ## ----------------------------------------------------------------------------- # Creating Founder Haplotypes founderPop = quickHaplo(nInd=1000, nChr=10, segSites=1000) # Setting Simulation Parameters SP = SimParam$new(founderPop) ## ----include=FALSE------------------------------------------------------------ SP$nThreads = 1L ## ----------------------------------------------------------------------------- SP$addTraitA(nQtlPerChr=1000) SP$setSexes("yes_sys") # Modeling the Breeding Program pop = newPop(founderPop) genMean = meanG(pop) for(generation in 1:20){ pop = selectCross(pop=pop, nFemale=500, nMale=25, use="gv", nCrosses=1000) genMean = c(genMean, meanG(pop)) } # Examining the Results plot(0:20, genMean, xlab="Generation", ylab="Mean Genetic Value", type="l")
/scratch/gouwar.j/cran-all/cranData/AlphaSimR/inst/doc/intro.R
--- title: "Introduction to AlphaSimR" author: "Chris Gaynor" date: "`r Sys.Date()`" output: rmarkdown::html_vignette bibliography: AlphaSimR.bib vignette: > %\VignetteIndexEntry{Introduction} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- AlphaSimR is a package for performing stochastic simulations of plant and animal breeding programs. It is the successor to the AlphaSim software for breeding program simulation [@AlphaSim]. AlphaSimR combines the features of its predecessor with the R software environment to create a flexible and easy-to-use software environment capable of simulating very complex plant and animal breeding programs. There is no single way to construct a simulation in AlphaSimR. This is an intentional design aspect of AlphaSimR, because it frees users from the constraints of predefined simulation structures. However, most simulations follow a general structure consisting of four steps: 1. Creating Founder Haplotypes 2. Setting Simulation Parameters 3. Modeling the Breeding Program 4. Examining the Results The easiest way to learn how to use AlphaSimR is to learn about these steps. The easiest way to learn about these steps is to look at an example, so this vignette will introduce AlphaSimR by working through a small example simulation. The example will begin with a description of the breeding program being simulated. This will be followed by sections for each of the above steps and conclude with the full code for the example simulation. ## Example Breeding Program A simplified animal breeding program modeling 20 discrete generations of selection. Each generation consists of 1000 animals, of which 500 are male and 500 are female. In each generation, the best 25 males are selected on the basis of their genetic value for a single polygenic trait and mated to the females to produce 1000 replacement animals. The quantitative trait under selection is modeled as being controlled by 10,000 QTL. These QTL are equally split across 10 chromosome groups so that there are 1,000 QTL per chromosome. The mean genetic value of all individuals in a generation is recorded to construct a plot for the genetic gain per generation. ## Creating Founder Haplotypes The first step in the simulation is creating a set of founder haplotypes. The founder haplotypes are used to form the genome and genotypes of animals in the first generation. The genotypes of their descendants are then derived from these haplotypes using simulated mating and genetic recombination. For this simulation, only a single line of code is needed to create the haplotypes, and it is given below. ```{r eval=FALSE} founderPop = quickHaplo(nInd=1000, nChr=10, segSites=1000) ``` The code above uses the `quickHaplo` function to generate the initial haplotypes. The `quickHaplo` function generates haplotypes by randomly sampling 1s and 0s. This approach is equivalent to modeling a population in linkage and Hardy-Weinberg equilibrium with allele frequencies of 0.5. This approach is very rapid but does not generate realistic haplotypes. This makes the approach great for prototyping code, but ill-suited for some types of simulations. The preferred choice for simulating realistic haplotypes is to use the `runMacs` function. The `runMacs` function is a wrapper for the MaCS software, a coalescent simulation program included within the distribution of AlphaSimR [@MaCS]. The MaCS software is used by AlphaSimR to simulate bi-allelic genome sequences according to a population demographic history. The `runMacs` function allows the user to specify one of several predefined population histories or supply their own population history. A list of currently available population histories can be found in the `runMacs` help document. An alternative choice for providing realistic initial haplotypes is to import them with the `newMapPop` function. This function allows the user to import their own haplotypes that can be generated in another software package or taken directly from real marker data. ## Setting Simulation Parameters The second step is setting global parameters for the simulation. This can be done with three lines of code. The first line initializes an object containing the simulation parameters. The object must be initialized with the founder haplotypes created in the previous step and the code for doing so is given below. ```{r eval=FALSE} SP = SimParam$new(founderPop) ``` The output from this function is an object of class `SimParam` and it is saved with the name `SP`. The name `SP` should almost always be used, because many AlphaSimR functions use an argument called "simParam" with a default value of `NULL`. If you leave this value as `NULL`, those functions will search your global environment for an object called `SP` and use it as the function's argument. This means that if you use `SP`, you won't need to specify a value for the "simParam" argument. The next line of code defines the quantitative trait used for selection. As mentioned in the previous section, this trait is controlled by 1000 QTL per chromosome. The rest of the function arguments are left as their defaults, which include a trait mean of zero and a variance of one unit. ```{r eval=FALSE} SP$addTraitA(nQtlPerChr=1000) ``` The 'A' at the end of `SP$addTraitA` indicates that the trait's QTL only have additive effects. All traits in AlphaSimR will include additive effects. Traits may also include any combination of three additional types of effects: dominance ("D"), epistasis ("E"), and genotype-by-environment ("G"). A specific combination of trait effects is requested by using a function with the appropriate letter ending. For example, a trait with additive and epistasis effects can be requested using `SP$addTraitAE`. The following trait types are currently offered: "A", "AD", "AE", "AG", "ADE", "ADG", "AEG", and "ADEG". The final line of code defines how sexes are determined in the simulation. Sex will be systematically assigned (i.e. male, female, male, ...). Systematic assignment is used to ensure that there is always equal numbers of males and females. ```{r eval=FALSE} SP$setSexes("yes_sys") ``` ## Modeling the Breeding Program We are now ready to start modeling the breeding program. To begin, we need to generate the initial population of animals. This step will take the haplotypes in `founderPop` and the information in `SP` to create an object of `Pop-class`. ```{r eval=FALSE} pop = newPop(founderPop) ``` A `Pop-class` object represents a population of individuals. A population is the most important units in AlphaSimR, because most AlphaSimR functions use one or more populations as an argument. In this regard, AlphaSimR can be thought of as modeling discrete populations as its basic unit. This contrasts with its predecessor, which used discrete generations. Populations are not a fixed unit in AlphaSimR. Many functions in AlphaSimR take a population as an argument, modify the population, and then return the modified population. Populations can also be used "directly". For example, you can pull individuals out to form new (sub-)populations using `[]` and you can merge populations together using `c()`. This functionality is particularly useful for performing tasks in AlphaSimR that lacks a built-in function. However, the example breeding program presented here is easily modeled using built-in functions. Before continuing to model the breeding program, you should first think about the data you’ll need for examining the results in the next stage. This is because you must expressly request that the relevant data is saved. AlphaSimR is designed this way for increased speed and reduced memory usage. In this example a plot of the generation mean over time is desired. All that is needed to construct this plot is a vector containing the mean in each generation. To start this vector, the mean in the current generation is saved as “genMean”. In each subsequent generation, the mean of that generation will be added to “genMean”. Measuring the mean in the current generation is accomplished with the code below. ```{r eval=FALSE} genMean = meanG(pop) ``` The final lines of code are for modeling 20 generations of selection and mating. AlphaSimR has a host of functions for modeling both selection and mating. In this example the `selectCross` function is used, because it efficiently combines both selection and mating in a single function call. The function itself actually uses two separate function in AlphaSimR, `selectInd` and `randCross` for selection and mating, respectively. To model multiple generations of selection, the function call is placed within a loop with a line of code for tracking the population mean. Using a loop makes code easier to read and avoids needless duplication. In this loop “pop” is overwritten in each generation. Doing this keeps memory usage low and keeps the code simple. However, if the user needed to retain older populations there are several alternative approaches that could be adopted. These approaches include giving each population a unique name, storing populations as elements in a list, or dynamically growing populations with `c()`. The code for the loop is given below. ```{r eval=FALSE} for(generation in 1:20){ pop = selectCross(pop=pop, nFemale=500, nMale=25, use="gv", nCrosses=1000) genMean = c(genMean, meanG(pop)) } ``` ## Examining the Results The last step to a simulation is examining the results. In this example there is only one result: a vector of population means for each generation. To examine this result, the code below will produce a basic line plot. ```{r eval=FALSE} plot(0:20, genMean, xlab="Generation", ylab="Mean Genetic Value", type="l") ``` ## Full Code ```{r message=FALSE, warning=FALSE} library(AlphaSimR) ``` ```{r} # Creating Founder Haplotypes founderPop = quickHaplo(nInd=1000, nChr=10, segSites=1000) # Setting Simulation Parameters SP = SimParam$new(founderPop) ``` ```{r include=FALSE} SP$nThreads = 1L ``` ```{r} SP$addTraitA(nQtlPerChr=1000) SP$setSexes("yes_sys") # Modeling the Breeding Program pop = newPop(founderPop) genMean = meanG(pop) for(generation in 1:20){ pop = selectCross(pop=pop, nFemale=500, nMale=25, use="gv", nCrosses=1000) genMean = c(genMean, meanG(pop)) } # Examining the Results plot(0:20, genMean, xlab="Generation", ylab="Mean Genetic Value", type="l") ``` # References
/scratch/gouwar.j/cran-all/cranData/AlphaSimR/inst/doc/intro.Rmd
--- title: "Traits in AlphaSimR" author: "Chris Gaynor" date: "`r Sys.Date()`" output: rmarkdown::pdf_document tables: true bibliography: AlphaSimR.bib vignette: > %\VignetteIndexEntry{Traits} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- This vignette describes AlphaSimR's biological model for traits. The biological model is responsible for converting an individual's genotype into a genetic value. The genetic value is used to create a phenotype. AlphaSimR's biological model is primarily based on classic models used in quantitative genetics. Thus, users that have taken an introductory quantitative genetics course should already be familiar with most elements of AlphaSimR's biological model. This vignette will assume the reader is such a person and thus focus on aspects of AlphaSimR's biological model that may not be obvious or don't follow classic models. The vignette will begin by describing a novel classification system used in AlphaSimR to name different types of traits. Then it will present the full biological model along with a detailed description for each component in the model. Traits in AlphaSimR are classified according to the biological effects they model using the **ADEG** framework. Under this framework, each trait is assigned a name consisting of one or more letters. The letters come from the name **ADEG**, whose letters correspond to biological effects. The biological effects are: **A**dditive, **D**ominance, **E**pistatic and **G**enotype-by-environment. For example, a trait with only additive effects is called an **A** trait and a trait with both additive and dominance effects is called an **AD** trait. The following traits are modeled in AlphaSimR: **A**, **AD**, **AE**, **AG**, **ADE**, **ADG**, **AEG**, and **ADEG**. The most complex trait in AlphaSimR is an **ADEG** trait, because it includes all biological effects. This trait is represented using the equation below. \begin{equation} GV(x, w) = \mu + A(x) + D(x) + E(x) + G(x, w) \end{equation} The left-hand side of equation (1) consists of the following terms: $GV$, which represents an individual’s genetic value; $x$, which represents a vector of QTL genotype dosages; and $w$, which represents an environmental covariate. The right-hand side of the equation consists of an intercept ($\mu$) and four functions. The intercept is used to obtain the user specified trait mean in a founder population. The functions model each of the biological effects. All other trait in the **ADEG** framework can be modeled with equation (1) by simply removing any functions for biological effects that those traits lack. The discussion of the **ADEG** framework will continue below with an explanation for each function in equation (1) after first describing the concept of genotype dosage scaling. Genotype dosage scaling is discussed first, because it is applied within each of the aforementioned functions. # Genotype Dosage Scaling AlphaSimR defines an individual's raw genotype dosage as the number of copies of the “1” allele at a locus. Since all loci in AlphaSimR are biallelic with alleles "0" and "1", this definition for raw genotype dosage fully explains an individual's genotype. The number of copies of the "0" allele is just the individual's ploidy level minus its raw genotype dosage. This is usually a rather irrelevant detail, because most user will want to model diploid organism whose ploidy level is always two. However, AlphaSimR can also be used to model a wide range of autopolyploid organisms and it even allows for mixing ploidy levels within a simulation. This means that the allowable range for raw genotype dosage is not a fixed value and that software must account for this fact. AlphaSimR accounts for different levels of ploidy by scaling an organism's genotype dosage in accordance with its ploidy level. The primary motivation for using scaled dosages is to unify user inputs. The use of scaled dosages can also make it easier to compare simulations with different levels of ploidy. However, the user must use care when making such comparisons, because the underlying assumptions used by this model may not be valid. The use of scaled genotype dosages implies that an individual's genetic value depends on the relative ratio of alleles and that it is independent of the organism’s ploidy level. For some traits in some organisms there is evidence to support this assumption or at least not reject it, but there is also plenty of evidence clearly rejecting this assumption in other traits [@Gallais]. Thus, the direct comparisons that are possible in AlphaSimR are not always biologically relevant. However, the vast majority of users won't be running a simulation that depends on this assumption, so the information presented below is solely for the purpose of understanding the functions presented in the following sections. There are two types of scaled genotype dosages in AlphaSimR: additive and dominance. An explanation for both is given below. This is followed by a table providing an example of dosage scaling in both diploid and autotetraploid organisms. The scaled additive genotype dosage ($x_A$) is shown in equation (2). This equation linearly scales relative dosage to set the values for opposing homozygotes to -1 and 1. \begin{equation} x_A = \big( x - \tfrac{ploidy}{2} \big) \big( \tfrac{2}{ploidy} \big) \end{equation} The scaled dominance genotype dosage ($x_D$) is shown in equation (3). This equation uses non-linear scaling to fit the value of the opposing homozygotes to 0 and middlemost heterozygote to 1. The middlemost heterozygote is the genotype with an equal ratio of "0" and "1" alleles. For a diploid organism, the scaled dominance genotype dosage matches the classic parameterization for dominance. For autopolyploid organisms, the scaled dominance genotype dosage is consistent with digenic dominance (discussed later). \begin{equation} x_D = x \big( ploidy - x \big) \big( \tfrac{2}{ploidy} \big)^2 \end{equation} Table 1 provides an example of raw and scaled genotype dosages for a diploid and an autotetraploid organism. The diploid and tetraploid columns represent the raw genotype dosages for the respective organisms. The additive and dominance columns represent the corresponding scaled genotype dosages. This table shows how the 0, 1 and 2 genotypes of a diploid organism are treated as being equivalent to the 0, 2 and 4 genotypes of an autotetraploid organism. \begin{table}[h] \centering \caption{Raw and scaled genotype dosages.} \begin{tabular}{@{}rrrr@{}} \toprule Diploid & Tetraploid & Additive & Dominance \\ \midrule 0 & 0 & $-1$ & $0$ \\ & 1 & $-1/2$ & $3/4$ \\ 1 & 2 & $0$ & $1$ \\ & 3 & $1/2$ & $3/4$ \\ 2 & 4 & $1$ & $0$ \\ \bottomrule \end{tabular} \end{table} # Additive Effects \begin{equation} A(x) = \sum a x_A \end{equation} The function for additive effects is given above in equation (4). The right-hand side is a summation over all QTL for the product of the additive effect ($a$) and the scaled additive dosage ($x_A$). This equation is equivalent to parameterizations in classic quantitative trait models. The only unique aspect of additive effects in AlphaSimR is the sampling of those effects. Additive effects are sampled in two stages. The first stage involves sampling initial values and is similar to methods used by other stochastic simulation software programs. It is the second stage that is unique. This stage involves scaling the magnitude of the initial values to achieve a desired genetic variance, that being either total or additive genetic variance. The first stage of sampling additive effects is setting initial values for the effects. Initial values are sampled from either a standard normal distribution or a gamma distribution. When using a gamma distribution, the user specifies the shape parameter and the scale parameter is set to 1. The deviates sampled from a gamma distribution are randomly assigned either a positive or negative sign. Random assignment of this sign results in an expected distribution that is symmetric distribution with mean 0. The second stage is scaling the magnitude of the effects to achieve a user specified genetic variance. The user specified genetic variance can be either total or additive. The scaling procedure involves not just the additive effect, but also dominance and epistatic effects if the trait includes those effects as well. The procedure works by first calculating the variance in the founder population using the initially sampled effects and then calculating a scaling constant that is applied to all effects to achieve the desired variance in the founder population. The scaling constant equals the square-root of the target genetic variance (total or additive) divided by the square-root of the initial genetic variance (total or additive). Note that the founder population is the population the user uses to initialize the simulation parameters object. Scaling the magnitude of effects allows AlphaSimR to build simulations with trait values matching real-world counterparts. For example, a user simulating grain yield in a plant species is likely to have estimates for the means and variances in tons per hectare. The user can use those to create a simulation with the same values. The primary benefit of matching these values is to make interpretation of simulation results more intuitive. The significance of the results will be more apparent to the user because they can work in a scale that is familiar to them. It also allows the user to more easily identify potential flaws in the simulation when values move outside the range of biologically acceptable values. # Dominance Effects \begin{equation} D(x) = \sum d x_D \end{equation} The function for dominance effects is given above in equation (5). The right-hand side of the equation is a summation over all QTL for the product of the dominance effect ($d$) and the scaled dominance dosage ($x_D$). This equation is equivalent to the parameterization of dominance in classic quantitative trait models for diploid organisms. As with the additive effects, the method for sampling dominance effects in AlphaSimR requires special attention. Dominance effects are calculated in AlphaSimR using the concept of dominance degrees. The formula for this calculation is given below in equation (6). The equation shows that the dominance effect ($d$) at a locus is the dominance degree ($\delta$) at that locus times the absolute value of its additive effect ($a$). \begin{equation} d = \delta \left| a \right| \end{equation} The rationale behind using dominance degrees is their intuitive biological interpretation in diploid organisms. For example, a dominance degree of 0 represents no dominance and an additive model. A dominance degree of 1 corresponds to complete dominance. Dominance degrees between 0 and 1 correspond to partial dominance, and values above 1 correspond to over-dominance. Dominance effects, as with additive effects, are sampled in two stages. The first stage is sampling of initial values and the second stage is scaling the magnitude of those values. The sampling of initial effects involves two user supplied parameters, the mean and variance for a normal distribution used to sample dominance degrees. The dominance degrees are then used in conjunction with the additive effects to calculate dominance effects. The scaling procedure is then performed as described above in the additive effects section to calculate the scaling constant. The scaling constant is then directly applied to the dominance effects. Note that this scaling changes the value of the dominance effect, but does not change the value of the dominance degree. This means that the specification of the dominance degree distribution is independent of the requested genetic variance. Dominance effects become more complicated when discussing polyploid organisms. This is because polyploids have additional heterozygous genotypes. For example, an autotetraploid organism has two homozygous genotypes (0 and 4) and three heterozygous genotypes (1, 2 and 3). Each additional heterozygous genotype requires an additional dominance parameter to obtain a fully parameterized model. Several different parameterizations have been used for polyploids and a full discussion of these parameterizations is outside the scope of this document. Interested readers should refer to Gallais's textbook for more details [-@Gallais]. AlphaSimR does not include additional term to its dominance model for polyploids, so it does not make use of a fully parameterized model. Instead, AlphaSimR uses a digenic dominance model for all ploidy levels due to its use of scaled dominance genotype dosages. This model was chosen because it provides consistency in user interface and internal coding regardless of ploidy level and it provides as a reasonable approximation for partial dominance in highly polygenic traits. An unfortunate side effect of the dominance model in polyploids is that the previously described interpretation of dominance degrees breaks. Consider a single QTL with an additive effect of 1 and a dominance degree of 1. The genotypes and genetic values for the example for both a diploid and an autotetraploid organism are given below in Table 2. For the diploid organism, the heterozygous genotype is equivalent to the best homozygote, so a dominance degree of 1 indicates complete dominance. However, the autotetraploid organism has three heterozygous genotypes. The value of the middlemost heterozygous genotype is equivalent to the best homozygote, but it is not the heterozygous genotype with the highest value. The heterozygous genotype with the highest value is better than the best homozygote, so a dominance degree of 1 actually represents over-dominance in an autotetraploid. \begin{table}[h] \centering \caption{Example genetic values ($a=1$, $d=1$).} \begin{tabular}{@{}rrr@{}} \toprule Diploid Dosage & Tetraploid Dosage & Genetic Value \\ \midrule 0 & 0 & $-1$ \\ & 1 & $1/4$ \\ 1 & 2 & $1$ \\ & 3 & $5/4$ \\ 2 & 4 & $1$ \\ \bottomrule \end{tabular} \end{table} # Epistatic Effects \begin{equation} E(x) = \sum e x_{A_1} x_{A_2} \end{equation} The function for epistatic effects is given above in equation (7). The summation in the right-hand side is over pairs of QTLs, because AlphaSimR uses a simplified model of epistasis that restricts epistatic interactions to pairs of loci. Each QTL must be present in one and only one pair, so the number of pairs is equal to half the number of QTL. The remaining elements in the right-hand side of equation (7) are epistatic effects ($e$), the scaled additive dosage for the first locus in a pair ($x_{A_1}$) and the scaled additive dosage for the second locus in a pair ($x_{A_2}$). The epistatic model in equation (7) is somewhat constrained. The model only allows epistasis between pairs of loci and it only models additive-by-additive epistasis. The motivation behind using this constrained model is to maintain computational tractability when using very large numbers of QTL. As with the dominance model, this model is considered a reasonable approximation for a more complicated reality. The sampling of epistatic effects is similar to the sampling of additive effects with one additional user specified parameter for the relative ratio of the additive-by-additive epistatic variance. The relative ratio refers to the ratio between additive-by-additive epistatic variance and additive variance. This parameter sets the variance of epistatic effects. Specifically, the variance of the effects is set to a value expected to achieve the desired ratio in a random mating population whose QTL are in linkage equilibrium and have an allele frequency of 0.5. Note that most populations, including simulated populations, will not meet these assumptions, so the observed relative ratio between additive and additive-by-additive epistatic variance is unlikely to match the requested ratio. The observed additive-by-additive epistatic variance will typically be smaller, because its value is maximized at allele frequency 0.5. Note that unselected bi-parental populations derived from inbred parents are expected to have allele frequencies of 0.5 for segregating alleles. Thus, these populations are ideal for estimating a reasonable value for the relative ratio of additive-by-additive epistatic variance. # Genotype-by-Environment Effects \begin{equation} G(x, w) = w b(x) \end{equation} \begin{equation} b(x) = \mu_G + \sum g x_A \end{equation} The function for genotype-by-environment effects is given above in equations (8) and (9). The right-hand side of the equation (8) contains two parts: an environmental covariate ($w$) and a genotype specific slope ($b(x)$). The formula for the genotype specific slope is shown in equation (9). The right-hand side of this equation includes an intercept value ($\mu_G$) and a summation over all QTL for the product of a genotype-by-environment effect ($g$) and the scaled additive dosage ($x_A$). The following paragraphs explain how the above equations model genotype-by-environment interactions. Initially, the explanation will cover a case where a population has genotype-by-environment interaction variance, but no environmental variance. An explanation for how the parameters of these equations are changed to model both genotype-by-environment interaction variance and environmental variance is given in the next section. To begin, a description of how the variables in the above equations are sampled is needed. This is because it is much easier to understand how the formulas come together to model genotype-by-environment interactions when it is understood what variables represent. The environmental covariate ($w$) in equation (8) represents an environmental component of the genotype-by-environment interaction. The value of the environmental covariate is randomly sampled from a standard normal distribution. By definition, this means that the average value of the environmental covariate is zero and its variance is one. The average value of the environmental covariate is considered to be the target environment. Thus, the value for equation (8) in the target environment is always zero. The genotype specific slope in equation (9) represents the genetic component of the genotype-by-environment interaction. The astute reader will notice that this equation is very similar to the equation for an additive trait. Indeed, the genotype specific slope is really just an additive trait. The term $\mu_G$ serves a similar role to $\mu$ in equation (1) and the summation on the right-hand side of equation (9) is similar to the function for additive effects in equation (4). Also like an additive trait, effects for the genotype specific slope are scaled to achieve a specific mean and variance in the founder population. In this case, the mean is set to zero and the variance is set to the user specified genotype-by-environment interaction variance. To understand how the environmental covariate and the genotype specific slope model genotype-by-environment interactions it helps to review the properties of these variables. The environmental covariate is just a random variable with a mean of zero and a variance of one. The genotype specific slope is also random variable, with regards to the founder population, that has a mean of zero and a variance equal to the genotype-by-environment variance. This means that equation (8) is just the product of two random variables. These random variables are independent, so the formula for the variance of this product is given in the equation below. \begin{equation} Var(w b) = E[w]^2 Var(b) + Var(w) E[b]^2 + Var(w) Var(b) \end{equation} Equation (10) above gives the variance for the product of $w$ and $b$ in equation (8). The term $Var()$ indicates the variance of a variable and the term $E[]$ indicates the expectation (mean) of a variable. In the founder population, the expectations for $w$ and $b$ are zero, so the first two terms in the right-hand side of the equation drop out. Equation (10) reduces to the product of the two variances. The variance of $w$ is one and the variance of $b$ equals the genotype-by-environment interaction variance, so the variance in equation (8) equals the genotype-by-environment interaction variance and is equivalent to the variance in the genotype specific slopes. It must be repeated that the above description is limited to the founder population. This is an important point, because the mean and variance for genotype specific slope can and will be different in other populations since it is under genetic control. ## Adding Environmental Variance The genotype-by-environment effects model described above is altered to model a founder population with a non-zero environmental variance. All of the above equations remain relevant under the altered model and only the sampling distributions of the effects in the equations are changed. Specifically, the variance of environmental covariate is set to the environmental variance ($\sigma_E^2$) and the genotype specific slope is set to a different mean and variance. The mean of the genotype specific slope is set to one and the variance is set to the genotype-by-environment interaction variance divided by the environmental variance ($\tfrac{\sigma_{GE}^2}{\sigma_E^2}$). The logic behind these modifications becomes clear with an examination of equation (10). The right-hand side of equation (10) contains three terms. The first term equals zero, because the environmental covariate has a mean of zero (_i.e._ $E[w] = 0$). The second term equals the environmental variance, because the mean for genotype specific slope equals one and the variance of the environmental covariate equals the environmental variance (_i.e._ $E[b] = 1$ and $Var(w) = \sigma_E^2$). Finally, the value of the last term equals the genotype-by-environment interaction variance (_i.e._ $Var(w) Var(b) = \sigma_E^2 \tfrac{\sigma_{GE}^2}{\sigma_E^2} = \sigma_{GE}^2$). An examination of equation (10) also shows how both the environmental and genotype-by-environment interaction variances are populations specific parameters that are under genetic control. For example, a population with a higher average genotype specific slope will also have a higher value for the second term in equation (10). This population thus have a higher environmental variance even though the environment itself has not changed. Likewise, the amount of genotype-by-environment interaction variance depends on the variance for the genotype specific slope. ## Relationship to Finlay-Wilkinson Regression AlphaSimR's model for genotype-by-environment effects is effectively a biological model for Finlay-Wilkinson regression. Finlay-Wilkinson regression is a classic technique for analyzing genotype-by-environment interactions [@FinlayWilkinson]. In its simplest form, an individual's genotype-by-environment interaction is reduced to an intercept and a slope. The slope in Finlay-Wilkinson regression is roughly equivalent to genotype specific slope in equation (9). The intercept in Finlay-Wilkinson regression is roughly equivalent to the genetic value of an individual in the target environment. Initially, the similarity between these two models was not intentional. The model used in AlphaSimR originates with the model used by Gaynor _et al._ [-@gaynor_2017]. This model treated each QTL's additive effect as random variable whose value depended on the value of an environmental covariate. That model is roughly equivalent to AlphaSimR's model when the founder population has zero environmental variance. The difference between the models is that AlphaSimR scales effects to exactly achieve a desired variance and the Gaynor _et al._ model randomly sampled effects so that the expectation of the sampled effects equaled the desired variance. It was only realized later that a slight change to the model, as described above, could introduce a specified amount of environmental variance and that those modifications would result in a model with properties matching those measured by Finlay-Wilkinson regression. ## User Interaction AlphaSimR users do not directly observe nor set the value of the environmental covariate. Instead, they indirectly set its value by providing a randomly sampled p-value for each environment. AlphaSimR then uses this p-value to calculate the appropriate value of $w$ for any level of environmental variance. Users should note that p-values follow a uniform distribution over the range of zero to one, so it is recommended that users randomly generate p-values using R's built-in `runif` function. A p-value of 0.5 corresponds to $w$ equaling zero and is equivalent to the target environment. If the user does not supply a p-value, AlphaSimR will sample one at random. Genetic values and genetic variances reported by AlphaSimR are always for the target environment. Since the target environment corresponds to $w$ equaling zero, equation (8) is effectively ignored in these calculations. Only the values for phenotypes and phenotypic variance ever reflect a contribution from genotype-by-environment interaction. # References
/scratch/gouwar.j/cran-all/cranData/AlphaSimR/inst/doc/traits.Rmd
--- title: "Introduction to AlphaSimR" author: "Chris Gaynor" date: "`r Sys.Date()`" output: rmarkdown::html_vignette bibliography: AlphaSimR.bib vignette: > %\VignetteIndexEntry{Introduction} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- AlphaSimR is a package for performing stochastic simulations of plant and animal breeding programs. It is the successor to the AlphaSim software for breeding program simulation [@AlphaSim]. AlphaSimR combines the features of its predecessor with the R software environment to create a flexible and easy-to-use software environment capable of simulating very complex plant and animal breeding programs. There is no single way to construct a simulation in AlphaSimR. This is an intentional design aspect of AlphaSimR, because it frees users from the constraints of predefined simulation structures. However, most simulations follow a general structure consisting of four steps: 1. Creating Founder Haplotypes 2. Setting Simulation Parameters 3. Modeling the Breeding Program 4. Examining the Results The easiest way to learn how to use AlphaSimR is to learn about these steps. The easiest way to learn about these steps is to look at an example, so this vignette will introduce AlphaSimR by working through a small example simulation. The example will begin with a description of the breeding program being simulated. This will be followed by sections for each of the above steps and conclude with the full code for the example simulation. ## Example Breeding Program A simplified animal breeding program modeling 20 discrete generations of selection. Each generation consists of 1000 animals, of which 500 are male and 500 are female. In each generation, the best 25 males are selected on the basis of their genetic value for a single polygenic trait and mated to the females to produce 1000 replacement animals. The quantitative trait under selection is modeled as being controlled by 10,000 QTL. These QTL are equally split across 10 chromosome groups so that there are 1,000 QTL per chromosome. The mean genetic value of all individuals in a generation is recorded to construct a plot for the genetic gain per generation. ## Creating Founder Haplotypes The first step in the simulation is creating a set of founder haplotypes. The founder haplotypes are used to form the genome and genotypes of animals in the first generation. The genotypes of their descendants are then derived from these haplotypes using simulated mating and genetic recombination. For this simulation, only a single line of code is needed to create the haplotypes, and it is given below. ```{r eval=FALSE} founderPop = quickHaplo(nInd=1000, nChr=10, segSites=1000) ``` The code above uses the `quickHaplo` function to generate the initial haplotypes. The `quickHaplo` function generates haplotypes by randomly sampling 1s and 0s. This approach is equivalent to modeling a population in linkage and Hardy-Weinberg equilibrium with allele frequencies of 0.5. This approach is very rapid but does not generate realistic haplotypes. This makes the approach great for prototyping code, but ill-suited for some types of simulations. The preferred choice for simulating realistic haplotypes is to use the `runMacs` function. The `runMacs` function is a wrapper for the MaCS software, a coalescent simulation program included within the distribution of AlphaSimR [@MaCS]. The MaCS software is used by AlphaSimR to simulate bi-allelic genome sequences according to a population demographic history. The `runMacs` function allows the user to specify one of several predefined population histories or supply their own population history. A list of currently available population histories can be found in the `runMacs` help document. An alternative choice for providing realistic initial haplotypes is to import them with the `newMapPop` function. This function allows the user to import their own haplotypes that can be generated in another software package or taken directly from real marker data. ## Setting Simulation Parameters The second step is setting global parameters for the simulation. This can be done with three lines of code. The first line initializes an object containing the simulation parameters. The object must be initialized with the founder haplotypes created in the previous step and the code for doing so is given below. ```{r eval=FALSE} SP = SimParam$new(founderPop) ``` The output from this function is an object of class `SimParam` and it is saved with the name `SP`. The name `SP` should almost always be used, because many AlphaSimR functions use an argument called "simParam" with a default value of `NULL`. If you leave this value as `NULL`, those functions will search your global environment for an object called `SP` and use it as the function's argument. This means that if you use `SP`, you won't need to specify a value for the "simParam" argument. The next line of code defines the quantitative trait used for selection. As mentioned in the previous section, this trait is controlled by 1000 QTL per chromosome. The rest of the function arguments are left as their defaults, which include a trait mean of zero and a variance of one unit. ```{r eval=FALSE} SP$addTraitA(nQtlPerChr=1000) ``` The 'A' at the end of `SP$addTraitA` indicates that the trait's QTL only have additive effects. All traits in AlphaSimR will include additive effects. Traits may also include any combination of three additional types of effects: dominance ("D"), epistasis ("E"), and genotype-by-environment ("G"). A specific combination of trait effects is requested by using a function with the appropriate letter ending. For example, a trait with additive and epistasis effects can be requested using `SP$addTraitAE`. The following trait types are currently offered: "A", "AD", "AE", "AG", "ADE", "ADG", "AEG", and "ADEG". The final line of code defines how sexes are determined in the simulation. Sex will be systematically assigned (i.e. male, female, male, ...). Systematic assignment is used to ensure that there is always equal numbers of males and females. ```{r eval=FALSE} SP$setSexes("yes_sys") ``` ## Modeling the Breeding Program We are now ready to start modeling the breeding program. To begin, we need to generate the initial population of animals. This step will take the haplotypes in `founderPop` and the information in `SP` to create an object of `Pop-class`. ```{r eval=FALSE} pop = newPop(founderPop) ``` A `Pop-class` object represents a population of individuals. A population is the most important units in AlphaSimR, because most AlphaSimR functions use one or more populations as an argument. In this regard, AlphaSimR can be thought of as modeling discrete populations as its basic unit. This contrasts with its predecessor, which used discrete generations. Populations are not a fixed unit in AlphaSimR. Many functions in AlphaSimR take a population as an argument, modify the population, and then return the modified population. Populations can also be used "directly". For example, you can pull individuals out to form new (sub-)populations using `[]` and you can merge populations together using `c()`. This functionality is particularly useful for performing tasks in AlphaSimR that lacks a built-in function. However, the example breeding program presented here is easily modeled using built-in functions. Before continuing to model the breeding program, you should first think about the data you’ll need for examining the results in the next stage. This is because you must expressly request that the relevant data is saved. AlphaSimR is designed this way for increased speed and reduced memory usage. In this example a plot of the generation mean over time is desired. All that is needed to construct this plot is a vector containing the mean in each generation. To start this vector, the mean in the current generation is saved as “genMean”. In each subsequent generation, the mean of that generation will be added to “genMean”. Measuring the mean in the current generation is accomplished with the code below. ```{r eval=FALSE} genMean = meanG(pop) ``` The final lines of code are for modeling 20 generations of selection and mating. AlphaSimR has a host of functions for modeling both selection and mating. In this example the `selectCross` function is used, because it efficiently combines both selection and mating in a single function call. The function itself actually uses two separate function in AlphaSimR, `selectInd` and `randCross` for selection and mating, respectively. To model multiple generations of selection, the function call is placed within a loop with a line of code for tracking the population mean. Using a loop makes code easier to read and avoids needless duplication. In this loop “pop” is overwritten in each generation. Doing this keeps memory usage low and keeps the code simple. However, if the user needed to retain older populations there are several alternative approaches that could be adopted. These approaches include giving each population a unique name, storing populations as elements in a list, or dynamically growing populations with `c()`. The code for the loop is given below. ```{r eval=FALSE} for(generation in 1:20){ pop = selectCross(pop=pop, nFemale=500, nMale=25, use="gv", nCrosses=1000) genMean = c(genMean, meanG(pop)) } ``` ## Examining the Results The last step to a simulation is examining the results. In this example there is only one result: a vector of population means for each generation. To examine this result, the code below will produce a basic line plot. ```{r eval=FALSE} plot(0:20, genMean, xlab="Generation", ylab="Mean Genetic Value", type="l") ``` ## Full Code ```{r message=FALSE, warning=FALSE} library(AlphaSimR) ``` ```{r} # Creating Founder Haplotypes founderPop = quickHaplo(nInd=1000, nChr=10, segSites=1000) # Setting Simulation Parameters SP = SimParam$new(founderPop) ``` ```{r include=FALSE} SP$nThreads = 1L ``` ```{r} SP$addTraitA(nQtlPerChr=1000) SP$setSexes("yes_sys") # Modeling the Breeding Program pop = newPop(founderPop) genMean = meanG(pop) for(generation in 1:20){ pop = selectCross(pop=pop, nFemale=500, nMale=25, use="gv", nCrosses=1000) genMean = c(genMean, meanG(pop)) } # Examining the Results plot(0:20, genMean, xlab="Generation", ylab="Mean Genetic Value", type="l") ``` # References
/scratch/gouwar.j/cran-all/cranData/AlphaSimR/vignettes/intro.Rmd
--- title: "Traits in AlphaSimR" author: "Chris Gaynor" date: "`r Sys.Date()`" output: rmarkdown::pdf_document tables: true bibliography: AlphaSimR.bib vignette: > %\VignetteIndexEntry{Traits} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- This vignette describes AlphaSimR's biological model for traits. The biological model is responsible for converting an individual's genotype into a genetic value. The genetic value is used to create a phenotype. AlphaSimR's biological model is primarily based on classic models used in quantitative genetics. Thus, users that have taken an introductory quantitative genetics course should already be familiar with most elements of AlphaSimR's biological model. This vignette will assume the reader is such a person and thus focus on aspects of AlphaSimR's biological model that may not be obvious or don't follow classic models. The vignette will begin by describing a novel classification system used in AlphaSimR to name different types of traits. Then it will present the full biological model along with a detailed description for each component in the model. Traits in AlphaSimR are classified according to the biological effects they model using the **ADEG** framework. Under this framework, each trait is assigned a name consisting of one or more letters. The letters come from the name **ADEG**, whose letters correspond to biological effects. The biological effects are: **A**dditive, **D**ominance, **E**pistatic and **G**enotype-by-environment. For example, a trait with only additive effects is called an **A** trait and a trait with both additive and dominance effects is called an **AD** trait. The following traits are modeled in AlphaSimR: **A**, **AD**, **AE**, **AG**, **ADE**, **ADG**, **AEG**, and **ADEG**. The most complex trait in AlphaSimR is an **ADEG** trait, because it includes all biological effects. This trait is represented using the equation below. \begin{equation} GV(x, w) = \mu + A(x) + D(x) + E(x) + G(x, w) \end{equation} The left-hand side of equation (1) consists of the following terms: $GV$, which represents an individual’s genetic value; $x$, which represents a vector of QTL genotype dosages; and $w$, which represents an environmental covariate. The right-hand side of the equation consists of an intercept ($\mu$) and four functions. The intercept is used to obtain the user specified trait mean in a founder population. The functions model each of the biological effects. All other trait in the **ADEG** framework can be modeled with equation (1) by simply removing any functions for biological effects that those traits lack. The discussion of the **ADEG** framework will continue below with an explanation for each function in equation (1) after first describing the concept of genotype dosage scaling. Genotype dosage scaling is discussed first, because it is applied within each of the aforementioned functions. # Genotype Dosage Scaling AlphaSimR defines an individual's raw genotype dosage as the number of copies of the “1” allele at a locus. Since all loci in AlphaSimR are biallelic with alleles "0" and "1", this definition for raw genotype dosage fully explains an individual's genotype. The number of copies of the "0" allele is just the individual's ploidy level minus its raw genotype dosage. This is usually a rather irrelevant detail, because most user will want to model diploid organism whose ploidy level is always two. However, AlphaSimR can also be used to model a wide range of autopolyploid organisms and it even allows for mixing ploidy levels within a simulation. This means that the allowable range for raw genotype dosage is not a fixed value and that software must account for this fact. AlphaSimR accounts for different levels of ploidy by scaling an organism's genotype dosage in accordance with its ploidy level. The primary motivation for using scaled dosages is to unify user inputs. The use of scaled dosages can also make it easier to compare simulations with different levels of ploidy. However, the user must use care when making such comparisons, because the underlying assumptions used by this model may not be valid. The use of scaled genotype dosages implies that an individual's genetic value depends on the relative ratio of alleles and that it is independent of the organism’s ploidy level. For some traits in some organisms there is evidence to support this assumption or at least not reject it, but there is also plenty of evidence clearly rejecting this assumption in other traits [@Gallais]. Thus, the direct comparisons that are possible in AlphaSimR are not always biologically relevant. However, the vast majority of users won't be running a simulation that depends on this assumption, so the information presented below is solely for the purpose of understanding the functions presented in the following sections. There are two types of scaled genotype dosages in AlphaSimR: additive and dominance. An explanation for both is given below. This is followed by a table providing an example of dosage scaling in both diploid and autotetraploid organisms. The scaled additive genotype dosage ($x_A$) is shown in equation (2). This equation linearly scales relative dosage to set the values for opposing homozygotes to -1 and 1. \begin{equation} x_A = \big( x - \tfrac{ploidy}{2} \big) \big( \tfrac{2}{ploidy} \big) \end{equation} The scaled dominance genotype dosage ($x_D$) is shown in equation (3). This equation uses non-linear scaling to fit the value of the opposing homozygotes to 0 and middlemost heterozygote to 1. The middlemost heterozygote is the genotype with an equal ratio of "0" and "1" alleles. For a diploid organism, the scaled dominance genotype dosage matches the classic parameterization for dominance. For autopolyploid organisms, the scaled dominance genotype dosage is consistent with digenic dominance (discussed later). \begin{equation} x_D = x \big( ploidy - x \big) \big( \tfrac{2}{ploidy} \big)^2 \end{equation} Table 1 provides an example of raw and scaled genotype dosages for a diploid and an autotetraploid organism. The diploid and tetraploid columns represent the raw genotype dosages for the respective organisms. The additive and dominance columns represent the corresponding scaled genotype dosages. This table shows how the 0, 1 and 2 genotypes of a diploid organism are treated as being equivalent to the 0, 2 and 4 genotypes of an autotetraploid organism. \begin{table}[h] \centering \caption{Raw and scaled genotype dosages.} \begin{tabular}{@{}rrrr@{}} \toprule Diploid & Tetraploid & Additive & Dominance \\ \midrule 0 & 0 & $-1$ & $0$ \\ & 1 & $-1/2$ & $3/4$ \\ 1 & 2 & $0$ & $1$ \\ & 3 & $1/2$ & $3/4$ \\ 2 & 4 & $1$ & $0$ \\ \bottomrule \end{tabular} \end{table} # Additive Effects \begin{equation} A(x) = \sum a x_A \end{equation} The function for additive effects is given above in equation (4). The right-hand side is a summation over all QTL for the product of the additive effect ($a$) and the scaled additive dosage ($x_A$). This equation is equivalent to parameterizations in classic quantitative trait models. The only unique aspect of additive effects in AlphaSimR is the sampling of those effects. Additive effects are sampled in two stages. The first stage involves sampling initial values and is similar to methods used by other stochastic simulation software programs. It is the second stage that is unique. This stage involves scaling the magnitude of the initial values to achieve a desired genetic variance, that being either total or additive genetic variance. The first stage of sampling additive effects is setting initial values for the effects. Initial values are sampled from either a standard normal distribution or a gamma distribution. When using a gamma distribution, the user specifies the shape parameter and the scale parameter is set to 1. The deviates sampled from a gamma distribution are randomly assigned either a positive or negative sign. Random assignment of this sign results in an expected distribution that is symmetric distribution with mean 0. The second stage is scaling the magnitude of the effects to achieve a user specified genetic variance. The user specified genetic variance can be either total or additive. The scaling procedure involves not just the additive effect, but also dominance and epistatic effects if the trait includes those effects as well. The procedure works by first calculating the variance in the founder population using the initially sampled effects and then calculating a scaling constant that is applied to all effects to achieve the desired variance in the founder population. The scaling constant equals the square-root of the target genetic variance (total or additive) divided by the square-root of the initial genetic variance (total or additive). Note that the founder population is the population the user uses to initialize the simulation parameters object. Scaling the magnitude of effects allows AlphaSimR to build simulations with trait values matching real-world counterparts. For example, a user simulating grain yield in a plant species is likely to have estimates for the means and variances in tons per hectare. The user can use those to create a simulation with the same values. The primary benefit of matching these values is to make interpretation of simulation results more intuitive. The significance of the results will be more apparent to the user because they can work in a scale that is familiar to them. It also allows the user to more easily identify potential flaws in the simulation when values move outside the range of biologically acceptable values. # Dominance Effects \begin{equation} D(x) = \sum d x_D \end{equation} The function for dominance effects is given above in equation (5). The right-hand side of the equation is a summation over all QTL for the product of the dominance effect ($d$) and the scaled dominance dosage ($x_D$). This equation is equivalent to the parameterization of dominance in classic quantitative trait models for diploid organisms. As with the additive effects, the method for sampling dominance effects in AlphaSimR requires special attention. Dominance effects are calculated in AlphaSimR using the concept of dominance degrees. The formula for this calculation is given below in equation (6). The equation shows that the dominance effect ($d$) at a locus is the dominance degree ($\delta$) at that locus times the absolute value of its additive effect ($a$). \begin{equation} d = \delta \left| a \right| \end{equation} The rationale behind using dominance degrees is their intuitive biological interpretation in diploid organisms. For example, a dominance degree of 0 represents no dominance and an additive model. A dominance degree of 1 corresponds to complete dominance. Dominance degrees between 0 and 1 correspond to partial dominance, and values above 1 correspond to over-dominance. Dominance effects, as with additive effects, are sampled in two stages. The first stage is sampling of initial values and the second stage is scaling the magnitude of those values. The sampling of initial effects involves two user supplied parameters, the mean and variance for a normal distribution used to sample dominance degrees. The dominance degrees are then used in conjunction with the additive effects to calculate dominance effects. The scaling procedure is then performed as described above in the additive effects section to calculate the scaling constant. The scaling constant is then directly applied to the dominance effects. Note that this scaling changes the value of the dominance effect, but does not change the value of the dominance degree. This means that the specification of the dominance degree distribution is independent of the requested genetic variance. Dominance effects become more complicated when discussing polyploid organisms. This is because polyploids have additional heterozygous genotypes. For example, an autotetraploid organism has two homozygous genotypes (0 and 4) and three heterozygous genotypes (1, 2 and 3). Each additional heterozygous genotype requires an additional dominance parameter to obtain a fully parameterized model. Several different parameterizations have been used for polyploids and a full discussion of these parameterizations is outside the scope of this document. Interested readers should refer to Gallais's textbook for more details [-@Gallais]. AlphaSimR does not include additional term to its dominance model for polyploids, so it does not make use of a fully parameterized model. Instead, AlphaSimR uses a digenic dominance model for all ploidy levels due to its use of scaled dominance genotype dosages. This model was chosen because it provides consistency in user interface and internal coding regardless of ploidy level and it provides as a reasonable approximation for partial dominance in highly polygenic traits. An unfortunate side effect of the dominance model in polyploids is that the previously described interpretation of dominance degrees breaks. Consider a single QTL with an additive effect of 1 and a dominance degree of 1. The genotypes and genetic values for the example for both a diploid and an autotetraploid organism are given below in Table 2. For the diploid organism, the heterozygous genotype is equivalent to the best homozygote, so a dominance degree of 1 indicates complete dominance. However, the autotetraploid organism has three heterozygous genotypes. The value of the middlemost heterozygous genotype is equivalent to the best homozygote, but it is not the heterozygous genotype with the highest value. The heterozygous genotype with the highest value is better than the best homozygote, so a dominance degree of 1 actually represents over-dominance in an autotetraploid. \begin{table}[h] \centering \caption{Example genetic values ($a=1$, $d=1$).} \begin{tabular}{@{}rrr@{}} \toprule Diploid Dosage & Tetraploid Dosage & Genetic Value \\ \midrule 0 & 0 & $-1$ \\ & 1 & $1/4$ \\ 1 & 2 & $1$ \\ & 3 & $5/4$ \\ 2 & 4 & $1$ \\ \bottomrule \end{tabular} \end{table} # Epistatic Effects \begin{equation} E(x) = \sum e x_{A_1} x_{A_2} \end{equation} The function for epistatic effects is given above in equation (7). The summation in the right-hand side is over pairs of QTLs, because AlphaSimR uses a simplified model of epistasis that restricts epistatic interactions to pairs of loci. Each QTL must be present in one and only one pair, so the number of pairs is equal to half the number of QTL. The remaining elements in the right-hand side of equation (7) are epistatic effects ($e$), the scaled additive dosage for the first locus in a pair ($x_{A_1}$) and the scaled additive dosage for the second locus in a pair ($x_{A_2}$). The epistatic model in equation (7) is somewhat constrained. The model only allows epistasis between pairs of loci and it only models additive-by-additive epistasis. The motivation behind using this constrained model is to maintain computational tractability when using very large numbers of QTL. As with the dominance model, this model is considered a reasonable approximation for a more complicated reality. The sampling of epistatic effects is similar to the sampling of additive effects with one additional user specified parameter for the relative ratio of the additive-by-additive epistatic variance. The relative ratio refers to the ratio between additive-by-additive epistatic variance and additive variance. This parameter sets the variance of epistatic effects. Specifically, the variance of the effects is set to a value expected to achieve the desired ratio in a random mating population whose QTL are in linkage equilibrium and have an allele frequency of 0.5. Note that most populations, including simulated populations, will not meet these assumptions, so the observed relative ratio between additive and additive-by-additive epistatic variance is unlikely to match the requested ratio. The observed additive-by-additive epistatic variance will typically be smaller, because its value is maximized at allele frequency 0.5. Note that unselected bi-parental populations derived from inbred parents are expected to have allele frequencies of 0.5 for segregating alleles. Thus, these populations are ideal for estimating a reasonable value for the relative ratio of additive-by-additive epistatic variance. # Genotype-by-Environment Effects \begin{equation} G(x, w) = w b(x) \end{equation} \begin{equation} b(x) = \mu_G + \sum g x_A \end{equation} The function for genotype-by-environment effects is given above in equations (8) and (9). The right-hand side of the equation (8) contains two parts: an environmental covariate ($w$) and a genotype specific slope ($b(x)$). The formula for the genotype specific slope is shown in equation (9). The right-hand side of this equation includes an intercept value ($\mu_G$) and a summation over all QTL for the product of a genotype-by-environment effect ($g$) and the scaled additive dosage ($x_A$). The following paragraphs explain how the above equations model genotype-by-environment interactions. Initially, the explanation will cover a case where a population has genotype-by-environment interaction variance, but no environmental variance. An explanation for how the parameters of these equations are changed to model both genotype-by-environment interaction variance and environmental variance is given in the next section. To begin, a description of how the variables in the above equations are sampled is needed. This is because it is much easier to understand how the formulas come together to model genotype-by-environment interactions when it is understood what variables represent. The environmental covariate ($w$) in equation (8) represents an environmental component of the genotype-by-environment interaction. The value of the environmental covariate is randomly sampled from a standard normal distribution. By definition, this means that the average value of the environmental covariate is zero and its variance is one. The average value of the environmental covariate is considered to be the target environment. Thus, the value for equation (8) in the target environment is always zero. The genotype specific slope in equation (9) represents the genetic component of the genotype-by-environment interaction. The astute reader will notice that this equation is very similar to the equation for an additive trait. Indeed, the genotype specific slope is really just an additive trait. The term $\mu_G$ serves a similar role to $\mu$ in equation (1) and the summation on the right-hand side of equation (9) is similar to the function for additive effects in equation (4). Also like an additive trait, effects for the genotype specific slope are scaled to achieve a specific mean and variance in the founder population. In this case, the mean is set to zero and the variance is set to the user specified genotype-by-environment interaction variance. To understand how the environmental covariate and the genotype specific slope model genotype-by-environment interactions it helps to review the properties of these variables. The environmental covariate is just a random variable with a mean of zero and a variance of one. The genotype specific slope is also random variable, with regards to the founder population, that has a mean of zero and a variance equal to the genotype-by-environment variance. This means that equation (8) is just the product of two random variables. These random variables are independent, so the formula for the variance of this product is given in the equation below. \begin{equation} Var(w b) = E[w]^2 Var(b) + Var(w) E[b]^2 + Var(w) Var(b) \end{equation} Equation (10) above gives the variance for the product of $w$ and $b$ in equation (8). The term $Var()$ indicates the variance of a variable and the term $E[]$ indicates the expectation (mean) of a variable. In the founder population, the expectations for $w$ and $b$ are zero, so the first two terms in the right-hand side of the equation drop out. Equation (10) reduces to the product of the two variances. The variance of $w$ is one and the variance of $b$ equals the genotype-by-environment interaction variance, so the variance in equation (8) equals the genotype-by-environment interaction variance and is equivalent to the variance in the genotype specific slopes. It must be repeated that the above description is limited to the founder population. This is an important point, because the mean and variance for genotype specific slope can and will be different in other populations since it is under genetic control. ## Adding Environmental Variance The genotype-by-environment effects model described above is altered to model a founder population with a non-zero environmental variance. All of the above equations remain relevant under the altered model and only the sampling distributions of the effects in the equations are changed. Specifically, the variance of environmental covariate is set to the environmental variance ($\sigma_E^2$) and the genotype specific slope is set to a different mean and variance. The mean of the genotype specific slope is set to one and the variance is set to the genotype-by-environment interaction variance divided by the environmental variance ($\tfrac{\sigma_{GE}^2}{\sigma_E^2}$). The logic behind these modifications becomes clear with an examination of equation (10). The right-hand side of equation (10) contains three terms. The first term equals zero, because the environmental covariate has a mean of zero (_i.e._ $E[w] = 0$). The second term equals the environmental variance, because the mean for genotype specific slope equals one and the variance of the environmental covariate equals the environmental variance (_i.e._ $E[b] = 1$ and $Var(w) = \sigma_E^2$). Finally, the value of the last term equals the genotype-by-environment interaction variance (_i.e._ $Var(w) Var(b) = \sigma_E^2 \tfrac{\sigma_{GE}^2}{\sigma_E^2} = \sigma_{GE}^2$). An examination of equation (10) also shows how both the environmental and genotype-by-environment interaction variances are populations specific parameters that are under genetic control. For example, a population with a higher average genotype specific slope will also have a higher value for the second term in equation (10). This population thus have a higher environmental variance even though the environment itself has not changed. Likewise, the amount of genotype-by-environment interaction variance depends on the variance for the genotype specific slope. ## Relationship to Finlay-Wilkinson Regression AlphaSimR's model for genotype-by-environment effects is effectively a biological model for Finlay-Wilkinson regression. Finlay-Wilkinson regression is a classic technique for analyzing genotype-by-environment interactions [@FinlayWilkinson]. In its simplest form, an individual's genotype-by-environment interaction is reduced to an intercept and a slope. The slope in Finlay-Wilkinson regression is roughly equivalent to genotype specific slope in equation (9). The intercept in Finlay-Wilkinson regression is roughly equivalent to the genetic value of an individual in the target environment. Initially, the similarity between these two models was not intentional. The model used in AlphaSimR originates with the model used by Gaynor _et al._ [-@gaynor_2017]. This model treated each QTL's additive effect as random variable whose value depended on the value of an environmental covariate. That model is roughly equivalent to AlphaSimR's model when the founder population has zero environmental variance. The difference between the models is that AlphaSimR scales effects to exactly achieve a desired variance and the Gaynor _et al._ model randomly sampled effects so that the expectation of the sampled effects equaled the desired variance. It was only realized later that a slight change to the model, as described above, could introduce a specified amount of environmental variance and that those modifications would result in a model with properties matching those measured by Finlay-Wilkinson regression. ## User Interaction AlphaSimR users do not directly observe nor set the value of the environmental covariate. Instead, they indirectly set its value by providing a randomly sampled p-value for each environment. AlphaSimR then uses this p-value to calculate the appropriate value of $w$ for any level of environmental variance. Users should note that p-values follow a uniform distribution over the range of zero to one, so it is recommended that users randomly generate p-values using R's built-in `runif` function. A p-value of 0.5 corresponds to $w$ equaling zero and is equivalent to the target environment. If the user does not supply a p-value, AlphaSimR will sample one at random. Genetic values and genetic variances reported by AlphaSimR are always for the target environment. Since the target environment corresponds to $w$ equaling zero, equation (8) is effectively ignored in these calculations. Only the values for phenotypes and phenotypic variance ever reflect a contribution from genotype-by-environment interaction. # References
/scratch/gouwar.j/cran-all/cranData/AlphaSimR/vignettes/traits.Rmd
AlteredPQR_RB <- function (modif_z_score_threshold = 3.5, fraction_of_samples_threshold = 0.10, modif = 1, filter_variable_in_ref_set = "NO", write_table = "NO", print_recomm = "NO", quant_data_all_local = quant_data_all, cols_with_reference_data_local = cols_with_reference_data) { #globalVariables(c("quant_data_all", "cols_with_reference_data")) all_columns = 1:ncol(quant_data_all_local) cols_with_other_data = all_columns[!(all_columns%in%cols_with_reference_data_local)] if (!(is.na(cols_with_reference_data_local[1]))) { quant_data_ref_set = quant_data_all_local[, cols_with_reference_data_local] quant_data_disease = quant_data_all_local[, cols_with_other_data] } #print ("Running") ### Quantitative values for proteins in the annotated and inferred protein complexes measured_proteins = row.names (quant_data_all_local) protA_ok <- int_pairs$ProtA %in% measured_proteins protB_ok <- int_pairs$ProtB %in% measured_proteins ok_elements = protA_ok & protB_ok int_pairs <- int_pairs[ok_elements,c(1,2)] protsA_ids = as.character (int_pairs$ProtA) protsB_ids = as.character (int_pairs$ProtB) values_protA = quant_data_ref_set[protsA_ids,] values_protB = quant_data_ref_set[protsB_ids,] ### We check which protein in the pair has higher values in the reference samples and this one becomes Prot1. av_A = apply (values_protA, 1, function(x) mean(x, na.rm = TRUE)) av_B = apply (values_protB, 1, function(x) mean(x, na.rm = TRUE)) diffAB = values_protA - values_protB diffBA = values_protB - values_protA pairsAB = paste(int_pairs[,1], int_pairs[,2], sep = "-") rownames (diffAB) = pairsAB pairsBA = paste(int_pairs[,2], int_pairs[,1], sep = "-") rownames (diffBA) = pairsBA diff_prot_pairs = diffAB positions_to_change = av_A < av_B rownames (diff_prot_pairs)[positions_to_change] = rownames (diffBA)[positions_to_change] diff_prot_pairs[positions_to_change,] = diffBA[positions_to_change,] ### For later on: check there are more than 3 samples with measurments for each pair measured_samples = apply (diff_prot_pairs, 1, function (x) length (x[!is.na(x)])) measured_samples = measured_samples [measured_samples > 3] ### Calculate Median and MAD for the reference set results_table = matrix (nrow=nrow(diff_prot_pairs),ncol=6) row.names (results_table) = rownames (diff_prot_pairs) results_table [,1] = apply (diff_prot_pairs, 1, function (x) median (x, na.rm = TRUE)) results_table [,2] = apply (diff_prot_pairs, 1, function (x) mad (x, constant = 1, na.rm = TRUE)) ### And also check variability in the reference set results_table [,3] = results_table [,1] - (2*1.4826*results_table [,2]) results_table [,4] = results_table [,1] + (2*1.4826*results_table [,2]) n_reference_samples = length (cols_with_reference_data_local) temp = diff_prot_pairs < results_table[,3] results_table [,5] = apply (temp, 1, function (x) sum(x, na.rm = T)) temp = diff_prot_pairs > results_table[,4] results_table [,6] = apply (temp, 1, function (x) sum(x, na.rm = T)) allowed_n_variable = n_reference_samples*0.2 ### It does not need to be 0.2 (i.e. 20%) var_pairs1 = row.names (results_table [ results_table [,5] >= allowed_n_variable, ]) var_pairs2 = row.names (results_table [ results_table [,6] >= allowed_n_variable, ]) var_pairs = unique(var_pairs1, var_pairs2) ### For each pair, calculate the Modified z-score in each disease sample (i.e. its distance to the corresponding ref samples) protsA_ids = as.character (int_pairs$ProtA) protsB_ids = as.character (int_pairs$ProtB) values_protA = quant_data_disease[protsA_ids,] values_protB = quant_data_disease[protsB_ids,] diffAB_anal = values_protA - values_protB diffBA_anal = values_protB - values_protA modif_z_score = diffAB_anal modif_z_score[positions_to_change, ] = diffBA_anal[positions_to_change, ] rownames (modif_z_score) = rownames (diff_prot_pairs) modif_z_score = (0.6745*(modif_z_score - results_table [,1]))/results_table [,2] ### Based on the defined thresholds, identify all pairs that have a stronger signal score_threshold = modif_z_score_threshold thresh_neg = -score_threshold n_samples_threshold = fraction_of_samples_threshold * (length (cols_with_other_data)) signif_z_score_high = modif_z_score>score_threshold signif_z_score_low = modif_z_score<thresh_neg per.row.high = apply (signif_z_score_high, 1, function (x) sum(x, na.rm = TRUE)) per.row.low = apply (signif_z_score_low, 1, function (x) sum(x, na.rm = TRUE)) signif.row.high = per.row.high [per.row.high >= n_samples_threshold] signif.row.low = per.row.low [per.row.low >= n_samples_threshold] n_h = names (signif.row.high) n_l = names (signif.row.low) signif.pairs = unique(c(n_h, n_l)) ### Indentify proteins that were driving the signal, i.e. that showed at least a small variability in expression in the significant disease samples proteins_values = matrix (nrow=nrow(quant_data_ref_set),ncol=4) row.names (proteins_values) = rownames (quant_data_ref_set) proteins_values [,1] = apply (quant_data_ref_set, 1, function (x) median (x, na.rm = TRUE)) proteins_values [,2] = apply (quant_data_ref_set, 1, function (x) mad (x, constant = 1, na.rm = TRUE)) ### When the overall varibility in protein measurements is high 'modif' can be higher than 1 # default: modif = 1 proteins_values [,3] = proteins_values [,1] - (modif*1.4826*proteins_values [,2]) # LOWER_THRESH_PROT, it can also be: 2*1.4826*proteins_values [,2] proteins_values [,4] = proteins_values [,1] + (modif*1.4826*proteins_values [,2]) # UPPER_THRESH_PROT, it can also be: 2*1.4826*proteins_values [,2] ### Check individually all significant samples and protein pairs. ### For a protein to be considered as driving the signal, its quantity had to slightly change in at least half of the significant samples. #print ("...") signif_indiv_prots = list() for (i in signif.pairs) { values_z_scores = as.numeric(modif_z_score[i,]) signif_prots <- unlist (strsplit(i, "-")) protA = signif_prots[1] protB = signif_prots[2] if (i %in% n_h) { signif_samples_high = which(values_z_scores > score_threshold) n_signif_samples = length (signif_samples_high) protA_values = quant_data_disease[protA, signif_samples_high] protB_values = quant_data_disease[protB, signif_samples_high] A_below_limit = protA_values [protA_values < proteins_values[protA,3]] A_above_limit = protA_values [protA_values > proteins_values[protA,4]] if ((length(A_below_limit) >= (n_signif_samples*0.5)) || (length(A_above_limit) >= (n_signif_samples*0.5))) { signif_indiv_prots[[protA]][i] = sum (values_z_scores[signif_samples_high], na.rm = T) } B_below_limit = protB_values [protB_values < proteins_values[protB,3]] B_above_limit = protB_values [protB_values > proteins_values[protB,4]] if ((length(B_below_limit) >= (n_signif_samples*0.5)) || (length (B_above_limit) >= (n_signif_samples*0.5))) { signif_indiv_prots[[protB]][i] = sum (values_z_scores[signif_samples_high], na.rm = T) } } if (i %in% n_l) { signif_samples_low = which(values_z_scores < thresh_neg) n_signif_samples = length (signif_samples_low) protA_values = quant_data_disease[protA, signif_samples_low] protB_values = quant_data_disease[protB, signif_samples_low] A_below_limit = protA_values [protA_values < proteins_values[protA,3]] A_above_limit = protA_values [protA_values > proteins_values[protA,4]] if ((length(A_below_limit) >= (n_signif_samples*0.5)) || (length(A_above_limit) >= (n_signif_samples*0.5))) { signif_indiv_prots[[protA]][i] = sum (abs(values_z_scores[signif_samples_low]), na.rm = T) } B_below_limit = protB_values [protB_values < proteins_values[protB,3]] B_above_limit = protB_values [protB_values > proteins_values[protB,4]] if ((length(B_below_limit) >= (n_signif_samples*0.5)) || (length (B_above_limit) >= (n_signif_samples*0.5))) { signif_indiv_prots[[protB]][i] = sum (abs(values_z_scores[signif_samples_low]), na.rm = T) } } } #print ("...") ### Find representative pairs. This step makes sure that in instances where one protein is strongly up-, down-regulated, not all of its pairs are reported. ### For each protein that contributed to the perturbation signal in the samples with significant z scores, find the pair with the strongest overall signal, i.e. maximum sum of significant z-scores. representative_pairs = data.frame() row = 1 for (prot in names(signif_indiv_prots)) { repres_pair = which.max (signif_indiv_prots[[prot]]) ### should somehow point to the most significant, i.e. representative protein pair name_repres_pair = names (repres_pair)[1] element = repres_pair[[1]] highest_score = signif_indiv_prots[[prot]][element] values_z_scores = as.numeric(modif_z_score[name_repres_pair,]) if ((name_repres_pair %in% n_h) && (name_repres_pair %in% n_l)) { signif_samples_h = which(values_z_scores > score_threshold) signif_samples_l = which(values_z_scores < thresh_neg) signif_samples = unique(c(signif_samples_h, signif_samples_l)) direction = "Both_directions" } else if (name_repres_pair %in% n_h) { signif_samples = which(values_z_scores > score_threshold) direction = "Increased_ratio" } else if (name_repres_pair %in% n_l) { signif_samples = which(values_z_scores < thresh_neg) direction = "Decreased_ratio" } cols_anal_data = colnames (quant_data_disease) signif_cls = cols_anal_data[signif_samples] indiv_prots <- unlist (strsplit(name_repres_pair, "-")) protA = indiv_prots[1] protB = indiv_prots[2] if (!(is.null(signif_indiv_prots[[protA]])) && !(is.null(signif_indiv_prots[[protB]]))) { signal_contribution = paste (protA, protB, sep = "&") } else if (!(is.null(signif_indiv_prots[[protA]]))) { signal_contribution = protA } else if (!(is.null(signif_indiv_prots[[protB]]))) { signal_contribution = protB } #sign_sampl = paste (signif_samples, collapse = ",") OR, to get samples names and not column numbers: sign_sampl = paste (signif_cls, collapse = ",") representative_pairs[row, 1] = name_repres_pair representative_pairs[row, 2] = highest_score representative_pairs[row, 3] = sign_sampl representative_pairs[row, 4] = direction representative_pairs[row, 5] = signal_contribution #representative_pairs[row, 6] = results_table[name_repres_pair,1] #representative_pairs[row, 7] = results_table[name_repres_pair,2] row = row + 1 } colnames (representative_pairs) = c("Protein_pair", "Score", "Significant_samples", "Change", "Signal_contribution") # , "Median_ref_set", "MAD_ref_set" representative_pairs = unique(representative_pairs) representative_pairs = representative_pairs[order(representative_pairs[,2], decreasing = T),] #representative_pairs = representative_pairs[representative_pairs[,6] != 0,] rows_to_stay = which (representative_pairs[,1] %in% names(measured_samples)) representative_pairs = representative_pairs[rows_to_stay,] if (filter_variable_in_ref_set == "YES") { ##representative_pairs = representative_pairs [!(representative_pairs[,1] %in% var_pairs),] } if (write_table == "YES") { write.table(file = "Representative_pairs.txt", representative_pairs, quote = F, row.names = F, sep = "\t") } if (print_recomm == "YES") { #### ACCOUNT FOR ALL OBSERVED MODIF Z-SCORES, I.E. STANDARDIZED DISTANCES FROM THE REF POPULATION all_values = as.vector(as.matrix (modif_z_score)) q1 = quantile (all_values, 0.999, na.rm = T) q2 = quantile (all_values, 0.99, na.rm = T) q3 = quantile (all_values, 0.95, na.rm = T) q4 = quantile (all_values, 0.001, na.rm = T) q5 = quantile (all_values, 0.01, na.rm = T) q6 = quantile (all_values, 0.05, na.rm = T) all_values_abs = as.vector(as.matrix (abs(modif_z_score))) q_abs = quantile (all_values_abs, 0.99, na.rm = T) q1 = round (q1, 2) q2 = round (q2, 2) q3 = round (q3, 2) q4 = round (q4, 2) q5 = round (q5, 2) q6 = round (q6, 2) message = paste ("Top 0.1, 1 and 5% upper and lower z-score values are:", q1, q2, q3, "and", q4, q5, q6, sep = " ") message = paste (message, ".", sep = "") print (message) q_abs = round (q_abs, 2) message_two = paste ("Top 1% of the absolute values for the modified z-scores is", q_abs, sep = " ") message_two = paste (message_two, ".", sep = "") print (message_two) plot (density(all_values, na.rm = T)) } return (representative_pairs) #return(list(modif_z_scores = modif_z_score, RepresentativePairs = representative_pairs)) }
/scratch/gouwar.j/cran-all/cranData/AlteredPQR/R/AlteredPQR_RB.R
CorShift <- function (samplesA = samplesGroupA, samplesB = samplesGroupB, shift_threshold = 0.6, writeTable = FALSE, min_cor_in_samples = 0.6, cor_signif = 0.01, quant_data_all_local = quant_data_all, int_pairs_local = int_pairs) { #globalVariables(c("quant_data_all", "samplesGroupA", "samplesGroupB", "int_pairs")) quant_samplesA = quant_data_all_local[, samplesA] quant_samplesB = quant_data_all_local[, samplesB] measured_proteins = row.names (quant_data_all_local) protA_ok <- int_pairs_local$ProtA %in% measured_proteins protB_ok <- int_pairs_local$ProtB %in% measured_proteins ok_elements = protA_ok & protB_ok int_pairs_ok <- int_pairs_local[ok_elements,c(1,2)] cor_table = as.data.frame(matrix(nrow = 0, ncol = 7)) n_pairs_ok = nrow (int_pairs_ok) #print ("Comparing individual correlations...") row_nu = 1 for (r in 1:n_pairs_ok) { prot1 = int_pairs_ok [r,1] prot2 = int_pairs_ok [r,2] pair_name = paste (prot1, prot2, sep = "-") nas_protA_samplesA = is.na (as.numeric(quant_samplesA[prot1, ])) nas_protB_samplesA = is.na (as.numeric(quant_samplesA[prot2, ])) nas_protA_samplesB = is.na (as.numeric(quant_samplesB[prot1, ])) nas_protB_samplesB = is.na (as.numeric(quant_samplesB[prot2, ])) notNA_samplesA = !(nas_protA_samplesA | nas_protB_samplesA) notNA_samplesB = !(nas_protA_samplesB | nas_protB_samplesB) measured_in_samplesA = length (notNA_samplesA [as.numeric(notNA_samplesA) > 0]) measured_in_samplesB = length (notNA_samplesB [as.numeric(notNA_samplesB) > 0]) if ((measured_in_samplesA > 3) & (measured_in_samplesB > 3)) { cor_A = cor (as.numeric(quant_samplesA[prot1, ]), as.numeric(quant_samplesA[prot2, ]), use = "pairwise.complete.obs") cor_B = cor (as.numeric(quant_samplesB[prot1, ]), as.numeric(quant_samplesB[prot2, ]), use = "pairwise.complete.obs") cor_A_p = cor.test (as.numeric(quant_samplesA[prot1, ]), as.numeric(quant_samplesA[prot2, ])) cor_B_p = cor.test (as.numeric(quant_samplesB[prot1, ]), as.numeric(quant_samplesB[prot2, ])) if (((cor_A > min_cor_in_samples) && (cor_A_p$p.value < cor_signif)) || ((cor_B > min_cor_in_samples) && (cor_B_p$p.value < cor_signif))) { #jao = paste (pair_name, cor_A, cor_B, cor_A_p$p.value, cor_B_p$p.value, sep = " ") #print (jao) cor_table[row_nu, 1] = round (cor_A, 2) cor_table[row_nu, 2] = round (cor_A_p$p.value, 4) cor_table[row_nu, 3] = round (cor_B, 2) cor_table[row_nu, 4] = round (cor_B_p$p.value, 4) cor_table[row_nu, 5] = measured_in_samplesA cor_table[row_nu, 6] = measured_in_samplesB row.names (cor_table)[row_nu] = pair_name row_nu = row_nu + 1 } } } diff_pears = cor_table[,3] - cor_table[,1] cor_table[,7] = round (diff_pears, 2) cor_table = cor_table[abs(cor_table[,7]) > shift_threshold,] cor_table = cor_table [order (cor_table[,3], decreasing = T), ] colnames(cor_table) <- c('Pearson_cor_samplesA', 'cor_p_value', 'Pearson_cor_samplesB', 'cor_p_value', 'NumberOfSamplesA', 'NumberOfSamplesB', 'Correlation_shift') if (writeTable) { write.table (cor_table, file = "protCorrelationShift.txt", col.names = TRUE, row.names = TRUE, sep = "\t", quote = FALSE) } return (cor_table) } #to test the code in Rstudio: #prot_quant = read.table ("cptac_no_duplic", header = T, sep = "\t") #pairs = read.table ("ints_prot_compl_db_vers_Aug18", header = F, stringsAsFactors = F, sep = "\t") #samplesGroupA = c (7, 9, 11, 19, 20, 24, 26, 28, 31, 32, 34, 37, 41, 44, 51, 52, 54, 57, 59, 66, 70, 72, 77) #samplesGroupB = c (2, 4, 12, 15, 18, 22, 25, 39, 40, 48, 58, 60, 63, 64, 68, 69, 76, 78, 80) #cor_results = Cor_shift() #mini tests #pairs = read.table ("ints_temp", header = F, stringsAsFactors = F, sep = "\t") #prot1 = "O60879" #prot2 = "O75122"
/scratch/gouwar.j/cran-all/cranData/AlteredPQR/R/CorShift.R
globalVariables(c("int_pairs", "quant_data_all", "cols_with_reference_data", "samplesGroupA", "samplesGroupB", "int_pairs"))
/scratch/gouwar.j/cran-all/cranData/AlteredPQR/R/globals.R
### R code from vignette source 'AlteredPQR.Rnw' ################################################### ### code chunk number 1: AlteredPQR.Rnw:16-17 ################################################### library(AlteredPQR) ################################################### ### code chunk number 2: AlteredPQR.Rnw:22-24 ################################################### data("int_pairs", package = "AlteredPQR") data("quant_data_all", package = "AlteredPQR") ################################################### ### code chunk number 3: AlteredPQR.Rnw:34-35 ################################################### cols_with_reference_data = 1:23 ################################################### ### code chunk number 4: AlteredPQR.Rnw:40-41 ################################################### RepresentativePairs = AlteredPQR_RB() ################################################### ### code chunk number 5: AlteredPQR.Rnw:46-47 ################################################### head (RepresentativePairs) ################################################### ### code chunk number 6: AlteredPQR.Rnw:65-67 ################################################### samplesGroupA = 1:23 samplesGroupB = (1+23):(23+18) ################################################### ### code chunk number 7: AlteredPQR.Rnw:72-73 ################################################### cor_results = CorShift() ################################################### ### code chunk number 8: AlteredPQR.Rnw:78-79 ################################################### head (cor_results)
/scratch/gouwar.j/cran-all/cranData/AlteredPQR/inst/doc/AlteredPQR.R
## amcheck.r ## Function for checking for errors in coding ## of the data or input vectors ## ## 21/10/05 - now converts variables names to column numbers, stops if variable doesn't exist; returns codes and messages, doesn't stop execution ## 04/05/06 mb - moved parameter vs. obs check to prep, checks outname ## 10/07/06 mb - fixed handling of variance checks with no fully observed rows. ## 17/07/06 mb - stops if variable only has one observed value. ## 02/08/06 mb - fixed handling of character variables. ## 25/09/06 mb - fixed handling of errors in output writing. ## 13/12/06 mb - removed dropping of extra priors, added new priors ## 15/12/06 mb - fixed problem of nrow(priors)==5 ## 22/07/08 mb - good coding update: T->TRUE/F->FALSE ## 27/03/10 jh - added checks for splinetime amcheck <- function(x,m=5,p2s=1,frontend=FALSE,idvars=NULL,logs=NULL, ts=NULL,cs=NULL,means=NULL,sds=NULL, mins=NULL,maxs=NULL,conf=NULL,empri=NULL, tolerance=0.0001,polytime=NULL,splinetime=NULL,startvals=0,lags=NULL, leads=NULL,intercs=FALSE,archive=TRUE,sqrts=NULL, lgstc=NULL,noms=NULL,incheck=TRUE,ords=NULL,collect=FALSE, arglist=NULL, priors=NULL,bounds=NULL, max.resample=1000, overimp = NULL, emburn=NULL, boot.type=NULL) { #Checks for errors in list variables listcheck<-function(vars,optname) { if (identical(vars,NULL)) return(0) if (mode(vars) == "character") { if (any(is.na(match(vars,colnames(x))))) { mess<-paste("The following variables are refered to in the", optname,"argument, but don't are not columns in the data:", vars[is.na(match(vars,colnames(x)))]) return(list(1,mess)) } return(0) } if (any(vars>AMp,vars<0,vars%%1!=0)) { mess<-paste(optname," is out of the range of \n", "possible column numbers or is not an integer.") return(list(2,mess)) } return(0) } #Checks for errors in logical variables logiccheck<-function(opt,optname) { if (!identical(opt,NULL)) { if (length(opt) > 1) { mess<-paste("The",optname,"setting is longer than one logical.") return(list(1,mess)) } if (mode(opt) != "logical") { mess<-paste("The",optname,"setting is not a logical (TRUE/FALSE) value.") return(list(2,mess)) } } else { mess<-paste("The",optname,"setting cannot be NULL. Please change to TRUE/FALSE.") return(list(3,mess)) } return(0) } #Checks for errors in priors variables priorcheck<-function(opt,optname) { if (!identical(opt,NULL)) { if (!is.matrix(opt)) { mess<-paste("The", optname,"matrix is not a matrix.\n") return(list(1,mess)) } if (is.character(opt)) { mess<-paste("The", optname,"matrix is a character matrix.\n", "Please change it to a numeric matrix.") return(list(2,mess)) } if (any(dim(opt)!=dim(x))) { mess<-paste("The", optname,"matrices must have the same dimensions\n", "as the data.") return(list(3,mess)) } } return(0) } error.code <- 1 #Error Code: 3 #Arguments point to variables that do not exist. if (inherits(try(get("x"),silent=TRUE),"try-error")) return(list(code=3,mess=paste("The setting for the data argument doesn't exist."))) if (inherits(try(get("m"),silent=TRUE),"try-error")) return(list(code=3,mess=paste("The setting for the 'm' argument doesn't exist."))) if (inherits(try(get("idvars"),silent=TRUE),"try-error")) return(list(code=3,mess=paste("The setting for the 'idvars' argument doesn't exist."))) if (inherits(try(get("means"),silent=TRUE),"try-error")) return(list(code=3,mess=paste("The setting for the 'means' argument doesn't exist."))) if (inherits(try(get("sds"),silent=TRUE),"try-error")) return(list(code=3,mess=paste("The setting for the 'sds' argument doesn't exist."))) if (inherits(try(get("mins"),silent=TRUE),"try-error")) return(list(code=3,mess=paste("The setting for the 'mins' argument doesn't exist."))) if (inherits(try(get("maxs"),silent=TRUE),"try-error")) return(list(code=3,mess=paste("The setting for the 'maxs' argument doesn't exist."))) if (inherits(try(get("conf"),silent=TRUE),"try-error")) return(list(code=3,mess=paste("The setting for the 'conf' argument doesn't exist."))) if (inherits(try(get("empri"),silent=TRUE),"try-error")) return(list(code=3,mess=paste("The setting for the 'empri' argument doesn't exist."))) if (inherits(try(get("ts"),silent=TRUE),"try-error")) return(list(code=3,mess=paste("The setting for the 'ts' argument doesn't exist."))) if (inherits(try(get("cs"),silent=TRUE),"try-error")) return(list(code=3,mess=paste("The setting for the 'cs' argument doesn't exist."))) if (inherits(try(get("tolerance"),silent=TRUE),"try-error")) return(list(code=3,mess=paste("The setting for the 'tolerance' argument doesn't exist."))) if (inherits(try(get("polytime"),silent=TRUE),"try-error")) return(list(code=3,mess=paste("The setting for the 'polytime' argument doesn't exist."))) if (inherits(try(get("splinetime"),silent=TRUE),"try-error")) return(list(code=3,mess=paste("The setting for the 'splinetime' argument doesn't exist."))) if (inherits(try(get("lags"),silent=TRUE),"try-error")) return(list(code=3,mess=paste("The setting for the 'lags' argument doesn't exist."))) if (inherits(try(get("leads"),silent=TRUE),"try-error") ) return(list(code=3,mess=paste("The setting for the 'leads' argument doesn't exist."))) if (inherits(try(get("logs"),silent=TRUE),"try-error")) return(list(code=3,mess=paste("The setting for the 'logs' argument doesn't exist."))) if (inherits(try(get("sqrts"),silent=TRUE),"try-error")) return(list(code=3,mess=paste("The setting for the 'sqrts' argument doesn't exist."))) if (inherits(try(get("lgstc"),silent=TRUE),"try-error")) return(list(code=3,mess=paste("The setting for the 'lgstc' argument doesn't exist."))) if (inherits(try(get("p2s"),silent=TRUE),"try-error")) return(list(code=3,mess=paste("The setting for the 'p2s' argument doesn't exist."))) if (inherits(try(get("frontend"),silent=TRUE),"try-error")) return(list(code=3,mess=paste("The setting for the 'frontend' argument doesn't exist."))) if (inherits(try(get("intercs"),silent=TRUE),"try-error")) return(list(code=3,mess=paste("The setting for the 'intercs' argument doesn't exist."))) if (inherits(try(get("noms"),silent=TRUE),"try-error")) return(list(code=3,mess=paste("The setting for the 'noms' argument doesn't exist."))) if (inherits(try(get("startvals"),silent=TRUE),"try-error")) return(list(code=3,mess=paste("The setting for the 'startvals' argument doesn't exist."))) if (inherits(try(get("ords"),silent=TRUE),"try-error")) return(list(code=3,mess=paste("The setting for the 'ords' argument doesn't exist."))) if (inherits(try(get("collect"),silent=TRUE),"try-error")) return(list(code=3,mess=paste("The setting for the 'collect' argument doesn't exist."))) AMn<-nrow(x) AMp<-ncol(x) subbedout<-c(idvars,cs,ts) if (is.null(idvars)) idcheck <- c(1:AMp) else idcheck <- -idvars ## Error Code: 4 ## Completely missing columns if (any(colSums(!is.na(x[,idcheck])) <= 1)) { all.miss <- colnames(x[,idcheck])[colSums(!is.na(x[,idcheck])) <= 1] if (is.null(all.miss)) { all.miss <- which(colSums(!is.na(x[,idcheck])) <= 1) } all.miss <- paste(all.miss, collapse = ", ") error.code<-4 error.mess<-paste("The data has a column that is completely missing or only has one,observation. Remove these columns:", all.miss) return(list(code=error.code,mess=error.mess)) } #Error codes: 5-6 #Errors in one of the list variables idout<-listcheck(idvars,"One of the 'idvars'") if (!identical(idout,0)) return(list(code=(idout[[1]]+4),mess=idout[[2]])) lagout<-listcheck(lags,"One of the 'lags'") if (!identical(lagout,0)) return(list(code=(lagout[[1]]+4),mess=lagout[[2]])) leadout<-listcheck(leads,"One of the 'leads'") if (!identical(leadout,0)) return(list(code=(leadout[[1]]+4),mess=leadout[[2]])) logout<-listcheck(logs,"One of the 'logs'") if (!identical(logout,0)) return(list(code=(logout[[1]]+4),mess=logout[[2]])) sqout<-listcheck(sqrts,"One of the 'sqrts'") if (!identical(sqout,0)) return(list(code=(sqout[[1]]+4),mess=sqout[[2]])) lgout<-listcheck(lgstc,"One of the 'lgstc'") if (!identical(lgout,0)) return(list(code=(lgout[[1]]+4),mess=lgout[[2]])) tsout<-listcheck(ts,"The 'ts' variable") if (!identical(tsout,0)) return(list(code=(tsout[[1]]+4),mess=tsout[[2]])) csout<-listcheck(cs,"The 'cs' variable") if (!identical(csout,0)) return(list(code=(csout[[1]]+4),mess=csout[[2]])) nomout<-listcheck(noms,"One of the 'noms'") if (!identical(nomout,0)) return(list(code=(nomout[[1]]+4),mess=nomout[[2]])) ordout<-listcheck(ords,"One of the 'ords'") if (!identical(ordout,0)) # THIS FORMERLY READ "NOMOUT" return(list(code=(ordout[[1]]+4),mess=ordout[[2]])) # priors errors if (!identical(priors,NULL)) { # Error code: 7 # priors isn't a matrix if (!is.matrix(priors)) { error.code <- 7 error.mess <- "The priors argument is not a matrix." return(list(code=error.code, mess=error.mess)) } # Error code: 8 # priors is not numeric if (!is.numeric(priors)) { error.code <- 7 error.mess <- paste("The priors matrix is non-numeric. It should\n", "only have numeric values.") return(list(code=error.code, mess=error.mess)) } # Error code: 47 # priors matrix has the wrong dimensions if (ncol(priors) != 4 & ncol(priors) != 5) { error.code <- 47 error.mess <- paste("The priors matrix has the wrong numberof columns.\n", "It should have either 4 or 5 columns.",) return(list(code=error.code, mess=error.mess)) } if (nrow(priors) > nrow(x)*ncol(x)) { error.code <- 47 error.mess <- "There are more priors than there are observations." return(list(code=error.code, mess=error.mess)) } # Error code: 48 # NAs in priors matrix if (any(is.na(priors))) { error.code <- 48 error.mess <- "There are missing values in the priors matrix." return(list(code=error.code, mess=error.mess)) } # Error code: 49 # multiple priors set if (any(duplicated(priors[,1:2]))) { error.code <- 49 error.mess <- "Multiple priors set on one observation or variable." return(list(code=error.code,mess=error.mess)) } prior.cols <- priors[,2] %in% c(1:ncol(x)) prior.rows <- priors[,1] %in% c(0:nrow(x)) ## Error code: 9 ## priors set for cells that aren't in the data if (sum(c(!prior.cols,!prior.rows)) != 0) { error.code <- 9 error.mess <- "There are priors set on cells that don't exist." return(list(code=error.code,mess=error.mess)) } ## Error code: 59 ## no priors on nominal variables if (any(priors[,2] %in% noms)) { error.code <- 59 error.mess <- "Cannot set priors on nominal variables. " return(list(code = error.code, mess = error.mess)) } ## Error code: 60 ## no priors on nominal variables if (any(priors[,2] %in% idvars)) { error.code <- 60 error.mess <- "Cannot set priors on ID variables. " return(list(code = error.code, mess = error.mess)) } ## Error code: 12 ## confidences have to be in 0-1 if (ncol(priors) == 5) { if (any(priors[,5] <= 0) || any(priors[,5] >= 1)) { error.code<-12 error.mess<-paste("The priors confidences matrix has values that are less \n", "than or equal to 0 or greater than or equal to 1.") return(list(code=error.code,mess=error.mess)) } } } #Error code: 10 #Square roots with negative values if (!is.null(sqrts)) { if (sum(colSums(x[,sqrts, drop = FALSE] < 0, na.rm = T))) { neg.vals <- colnames(x[,sqrts, drop = FALSE])[colSums(x[,sqrts, drop = FALSE] < 0, na.rm = T) > 1] if (is.null(neg.vals)) neg.vals <- sqrts[colSums(x[,sqrts, drop = FALSE] < 0, na.rm = T) > 1] neg.vals <- paste(neg.vals, collapse = ", ") error.code<-10 error.mess<-paste("The square root transformation cannot be used on variables with negative values. See column(s):", neg.vals) return(list(code=error.code,mess=error.mess)) } } #warning message #logs with negative values if (!is.null(logs)) { triggered<-FALSE for(localindex in 1:length(logs)){ if(!triggered){ if (any(na.omit(x[,logs[localindex]]) < 0)) { warning(paste("The log transformation is being used on \n", "variables with negative values. The values \n", "will be shifted up by 1 plus the minimum value \n", "of that variable.")) triggered<-TRUE } } } } #Error code: 11 #0-1 Bounds on logistic transformations if (!identical(lgstc,NULL)) { lgstc.check <- colSums(x[,lgstc,drop=FALSE] <= 0 | x[,lgstc,drop=FALSE] >= 1, na.rm = TRUE) if (sum(lgstc.check)) { neg.vals <- colnames(x[,lgstc,drop=FALSE])[lgstc.check > 0] if (is.null(neg.vals)) neg.vals <- lgstc[lgstc.check > 0] neg.vals <- paste(neg.vals, collapse = ", ") error.code<-11 error.mess<-paste("The logistic transformation can only be used on values between 0 and 1. See column(s):", neg.vals) return(list(code=error.code,mess=error.mess)) } } #Error code: 12 #Confidence Intervals for priors bounded to 0-1 # if (!identical(conf,NULL)) { # if (any(conf <= 0,conf>=1,na.rm=T)) { # error.code<-12 # error.mess<-paste("The priors confidences matrix has values that are less \n", # "than or equal to 0 or greater than or equal to 1.") # return(list(code=error.code,mess=error.mess)) # } # } #Error code: 13 #Can't set all variables to 'idvar' if (!identical(idvars,NULL)) { if ((AMp-1) <= length(idvars)) { error.code<-13 error.mess<-paste("You cannot set all variables (or all but one) as ID variables.") return(list(code=error.code,mess=error.mess)) } } ## Error code: 14 ## ts canonot equal cs if (!identical(ts,NULL) && !identical(cs,NULL)) { if (ts==cs) { error.code<-14 error.mess<-paste("Time series and cross-sectional variables cannot be the same.") return(list(code=error.code,mess=error.mess)) } } #Error code: 15 #TS is more than one integer if (!identical(ts,NULL)) { if (length(ts) > 1) { error.code<-15 error.mess<-paste("The time series variable option is longer than one integer.") return(list(code=error.code,mess=error.mess)) } } #Error code: 16 #CS is more than one integer if (!identical(cs,NULL)) { if (length(cs) > 1) { error.code<-16 error.mess<-paste("The cross section variable option is longer than one integer.") return(list(code=error.code,mess=error.mess)) } } ## if (!identical(casepri,NULL)) { ## #Error code: 17 ## #Case prior must be in a matrix ## if (!is.matrix(casepri)) { ## error.code<-17 ## error.mess<-paste("The case priors should be in a martix form.") ## return(list(code=error.code,mess=error.mess)) ## } ## #Error code: 18 ## #CS must be specified with case priors ## if (identical(cs,NULL)) { ## error.code<-18 ## error.mess<-paste("The cross-sectional variable must be set in order to use case priors.") ## return(list(code=error.code,mess=error.mess)) ## } ## #Error code: 19 ## #Case priors have the wrong dimensions ## if (sum(dim(casepri) == c(length(unique(data[,cs])),length(unique(data[,cs])))) != 2) { ## error.code<-19 ## error.mess<-paste("The case priors have the wrong dimensions. It should \n", ## "have rows and columns equal to the number of cases.") ## return(list(code=error.code,mess=error.mess)) ## } ## #Error code: 20 ## #Case prior values are out of bounds ## if (all(casepri != 0,casepri!=1,casepri!=2,casepri!=3)) { ## error.code<-20 ## error.mess<-paste("The case priors can only have values 0, 1, 2, or 3.") ## return(list(code=error.code,mess=error.mess)) ## } ## } #check polynomials if (!identical(polytime,NULL)) { #Error code: 21 #Polynomials of time are longer than one integer if (length(polytime) > 1) { error.code<-21 error.mess<-paste("The polynomials of time setting is greater than one integer.") return(list(code=error.code,mess=error.mess)) } if (!is.numeric(polytime)) { error.code<-22 error.mess<-paste("The setting for polytime is not a number.") return(list(code=error.code,mess=error.mess)) } if ((polytime %% 1) != 0) { error.code<-23 error.mess<-paste("The number of polynomial terms to include for time (polytime) must be an integer.") return(list(code=error.code,mess=error.mess)) } if (any(polytime > 3,polytime < 0)) { error.code<-24 error.mess<-paste("The number of polynomial terms to include must be between 1 and 3.") return(list(code=error.code,mess=error.mess)) } if (identical(ts,NULL)) { error.code<-25 error.mess<-paste("You have set polynomials of time without setting the time series variable.") return(list(code=error.code,mess=error.mess)) } if (all(!intercs,identical(polytime,0))) { warning(paste("You've set the polynomials of time to zero with no interaction with \n", "the cross-sectional variable. This has no effect on the imputation.")) } } if (!identical(splinetime,NULL)) { #Error code: 54 #Spline of time are longer than one integer if (length(polytime) > 1) { error.code<-54 error.mess<-paste("The spline of time setting is greater than one integer.") return(list(code=error.code,mess=error.mess)) } if (!is.numeric(splinetime)) { error.code<-55 error.mess<-paste("The setting for splinetime is not a number.") return(list(code=error.code,mess=error.mess)) } if ((splinetime %% 1) != 0) { error.code<-56 error.mess<-paste("The number of spline degrees of freedom to include for time (splinetime) must be an integer.") return(list(code=error.code,mess=error.mess)) } if (any(splinetime > 6,splinetime < 0)) { error.code<-57 error.mess<-paste("The number of spline degrees of freedom to include must be between 0 and 6.") return(list(code=error.code,mess=error.mess)) } if (identical(ts,NULL)) { error.code<-58 error.mess<-paste("You have set splines of time without setting the time series variable.") return(list(code=error.code,mess=error.mess)) } if (all(!intercs,identical(polytime,0))) { warning(paste("You've set the spline of time to zero with no interaction with \n", "the cross-sectional variable. This has no effect on the imputation.")) } } #checks for intercs if (identical(intercs,TRUE)) { if (identical(cs,NULL)) { error.code<-27 error.mess<-paste("You have indicated an interaction with the cross section \n", "without setting the cross section variable.") return(list(code=error.code,mess=error.mess)) } if (length(unique(x[,cs])) > (1/3)*(AMn)) { error.code<-28 error.mess<-paste("There are too many cross-sections in the data to use an \n", "interaction between polynomial of time and the cross-section.") return(list(code=error.code,mess=error.mess)) } if (sum(is.na(x[,cs])) > 0) { error.code <- 60 error.mess <- paste("There are missing values in the 'cs' variable.") return(list(code=error.code,mess=error.mess)) } } #Error codes: 29-31 #logical variable errors interout<-logiccheck(intercs,"cross section interaction") if (!identical(interout,0)) return(list(code=(28+interout[[1]]),mess=interout[[2]])) #p2sout<-logiccheck(p2s,"print to screen") #if (!identical(p2sout,0)) # return(list(code=(p2sout[[1]]+28),mess=p2sout[[2]])) frout<-logiccheck(frontend,"frontend") if (!identical(frout,0)) return(list(code=(frout[[1]]+28),mess=frout[[2]])) collout<-logiccheck(collect,"archive") if (!identical(collout,0)) return(list(code=(collout[[1]]+28),mess=collout[[2]])) #Error code: 32 #Transformations must be mutually exclusive if (length(unique(c(logs,sqrts,lgstc,noms,ords,idvars))) != length(c(logs,sqrts,lgstc,noms,ords,idvars))) { error.code<-32 error.mess<-paste("Transfomations must be mutually exclusive, so one \n", "variable can only be assigned one transformation. You have the \n", "same variable designated for two transformations.") return(list(code=error.code,mess=error.mess)) } #Error code: 33 #ts/cs variables can't be transformed if (any(unique(c(logs,sqrts,lgstc,noms,ords,idvars)) == ts,unique(c(logs,sqrts,lgstc,noms,ords,idvars)) == cs)) { error.code<-33 error.mess<-paste("The time series and cross sectional variables cannot be transformed.") return(list(code=error.code,mess=error.mess)) } #Error code: 35 #tolerance must be greater than zero if (tolerance <= 0) { error.code<-35 error.mess<-paste("The tolerance option must be greater than zero.") return(list(code=error.code,mess=error.mess)) } #check nominals if (!identical(noms,NULL)) { for (i in noms) { #Error code: 36 #too many levels on noms if (length(unique(na.omit(x[,i]))) > (1/3)*(AMn)) { bad.var <- colnames(x)[i] if (is.null(bad.var)) bad.var <- i error.code<-36 error.mess<-paste("The number of categories in the nominal variable \'",bad.var,"\' is greater than one-third of the observations.", sep = "") return(list(code=error.code,mess=error.mess)) } if (length(unique(na.omit(x[,i]))) > 10) warning("\n\nThe number of categories in one of the variables marked nominal has greater than 10 categories. Check nominal specification.\n\n") if (all(i==cs,intercs==TRUE)) { noms<-noms[noms!=i] warning("The cross sectional variable was set as a nominal variable. Its nominal status has been dropped.") } } } if (is.null(c(noms,ords,idvars,cs))) fact <- c(1:AMp) else fact <- -c(noms,ords,idvars,cs) if (is.null(c(cs,idvars))) idcheck <- c(1:AMp) else idcheck <- -c(cs,idvars) ##Error code: 37 ##factors out of the noms,ids,ords,cs if (is.data.frame(x)) { if (length(x[,fact])) { if (sum(sapply(x[,fact],is.factor))) { bad.var <- colnames(x[,fact])[sapply(x[,fact],is.factor)] if (is.null(bad.var)) bad.var <- setdiff(which(sapply(x,is.factor)), -fact) bad.var <- paste(bad.var, collapse = ", ") error.code<-37 error.mess<-paste("The following variable(s) are 'factors': ", bad.var, "You may have wanted to set this as a ID variable to remove it", "from the imputation model or as an ordinal or nominal", "variable to be imputed. Please set it as either and", "try again.", sep = "\n") return(list(code=error.code,mess=error.mess)) } if (sum(sapply(x[,fact],is.ordered))) { bad.var <- colnames(x[,fact])[sapply(x[,fact],is.ordered)] if (is.null(bad.var)) bad.var <- setdiff(which(sapply(x,is.ordered)), -fact) bad.var <- paste(bad.var, collapse = ", ") error.code<-37 error.mess<-paste("The following variable(s) are 'factors': ", bad.var, "You may have wanted to set this as a ID variable to remove it", "from the imputation model or as an ordinal or nominal", "variable to be imputed. Please set it as either and", "try again.", sep = "\n") return(list(code=error.code,mess=error.mess)) } if (sum(sapply(x[,fact],is.character))) { bad.var <- colnames(x[,fact])[sapply(x[,fact],is.character)] if (is.null(bad.var)) bad.var <- setdiff(which(sapply(x,is.character)), -fact) bad.var <- paste(bad.var, collapse = ", ") error.code<-38 error.mess<-paste("The following variable(s) are characters: ", paste("\t",bad.var), "You may have wanted to set this as a ID variable to remove it", "from the imputation model or as an ordinal or nominal", "variable to be imputed. Please set it as either and", "try again.", sep = "\n") return(list(code=error.code,mess=error.mess)) } } } else { if (!is.numeric(x)) { error.code <- 38 error.mess <- paste("The \'x\' matrix is not numeric.") return(list(code=error.code,mess=error.mess)) } } #Error code: 39 #No missing observation if (!any(is.na(x[,idcheck,drop=FALSE])) & is.null(overimp)) { error.code<-39 error.mess<-paste("Your data has no missing values. Make sure the code for \n", "missing data is set to the code for R, which is NA.") return(list(code=error.code,mess=error.mess)) } #Error code: 40 #lags require ts if (!is.null(lags)) { if (is.null(ts)) { error.code<-40 error.mess<-paste("You need to specify the time variable in order to create lags.") return(list(code=error.code,mess=error.mess)) } } #Error code: 41 #leads require ts if (!is.null(leads)) { if (is.null(ts)) { error.code<-41 error.mess<-paste("You need to specify the time variable in order to create leads.") return(list(code=error.code,mess=error.mess)) } } #Error code: 42 #Only 1 column of data if (AMp==1) { error.code<-42 error.mess<-paste("There is only 1 column of data. Cannot impute.") return(list(code=error.code,mess=error.mess)) } ## catch problems when the only other variable is an unused ## cross-section. if (!isTRUE(intercs) & ncol(x[,idcheck, drop = FALSE]) == 1) { error.code<-42 error.mess<-paste("There is only 1 column of data. Cannot impute.") return(list(code=error.code,mess=error.mess)) } ts.nulls <- is.null(polytime) & is.null(splinetime) ts.zeros <- (polytime == 0) & (splinetime == 0) if (!isTRUE(polytime > 0) & !isTRUE(splinetime > 0)) { if (!isTRUE(intercs) & !is.null(ts)) { if (ncol(x[,-c(ts,cs,idvars), drop = FALSE]) == 1) { error.code<-61 error.mess<-paste("There is only 1 column of data after removing the ts, cs and idvars. Cannot impute without adding polytime.") return(list(code=error.code,mess=error.mess)) } } } #Error code: 43 #Variable that doesn't vary ## note that this will allow the rare case that a user only has ## variation in a variable when all of the other variables are missing ## in addition to having no variation in the listwise deleted ## dataset. Our starting value function should be robust to this. num.nonmissing <- function(obj) length(unique(na.omit(obj))) if (is.data.frame(x)) { non.vary <- sapply(x[,idcheck, drop = FALSE], num.nonmissing) } else { non.vary <- apply(x[,idcheck, drop = FALSE], 2, num.nonmissing) } if (sum(non.vary == 1)) { non.names <- colnames(x[,idcheck])[non.vary == 1] if (is.null(non.names)) { hold <- rep(-1, ncol(x)) hold[-idcheck] <- non.vary non.names <- which(hold == 0) } non.names <- paste(non.names, collapse = ", ") error.code<-43 error.mess<-paste("You have a variable in your dataset that does not vary. Please remove this variable. Variables that do not vary: ", non.names) return(list(code=error.code,mess=error.mess)) } ## } else { ## if (nrow(na.omit(x)) > 1) { ## if (any(diag(var(x[,idcheck],na.rm=TRUE))==0)) { ## error.code<-43 ## error.mess<-paste("You have a variable in your dataset that does not vary. Please remove this variable.") ## return(list(code=error.code,mess=error.mess)) ## } ## } else { ## for (i in 1:ncol(x[,idcheck])) { ## if (var(x[,i],na.rm=TRUE) == 0) { ## error.code<-43 ## error.mess<-paste("You have a variable in your dataset that does not vary. Please remove this variable.") ## return(list(code=error.code,mess=error.mess)) ## } ## } ## } ## } #checks for ordinals if (!is.null(ords)) { for (i in ords) { #Error code: 44 # Ordinal variable with non-integers (factors work by design, and they're # harder to check if (!is.factor(x[,i])) { if (any(unique(na.omit(x[,i])) %% 1 != 0 )) { non.ints <- colnames(x)[i] if (is.null(non.ints)) non.ints <- i error.code<-44 error.mess<-paste("You have designated the variable \'",non.ints, "\' as ordinal when it has non-integer values.", sep = "") return(list(code=error.code,mess=error.mess)) } } } } ## #checks for outname ## if (write.out==TRUE) { ## if (!is.character(outname)) { ## outname<-"outdata" ## warning("The output filename (outname) was not a character. It has been set it ## its default 'outdata' in the working directory.") ## } ## #Error code: 45 ## #output file errors ## outtest<-try(write.csv("test",file=paste(outname,"1.csv",sep="")),silent=TRUE) ## if (inherits(outtest,"try-error")) { ## error.code<-45 ## error.mess<-paste("R cannot write to the outname you have specified. Please ## check","that the directory exists and that you have permission to write.",sep="\n") ## return(list(code=error.code,mess=error.mess)) ## } ## tmpdir<- strsplit(paste(outname,"1.csv",sep=""),.Platform$file.sep) ## am.dir <- tmpdir[[1]][1] ## if (length(tmpdir[[1]]) > 1) ## for (i in 2:(length(tmpdir[[1]]))) ## am.dir <- file.path(am.dir, tmpdir[[1]][i]) ## file.remove(am.dir) ## } # if (xor(!identical(means,NULL),!identical(sds,NULL))) { # means<-NULL # sds<-NULL # warning("Both the means and the SDs have to be set in order to use observational priors. The priors have been removed from the analysis.") # } # if (sum(!identical(mins,NULL),!identical(maxs,NULL),!identical(conf,NULL)) != 3 && # sum(!identical(mins,NULL),!identical(maxs,NULL),!identical(conf,NULL)) != 0) { # mins<-NULL # maxs<-NULL # conf<-NULL # warning("Not all of the range parameters were set for the observational priors. They have been removed.") # } #checks of m if (!is.numeric(m)) { m<-5 warning("The number of imputations ('m') was a non-numeric. The value was changed to the default.") } if ((m %% 1) != 0) { m<-5 warning("The number of imputation ('m') was not an integer. The value was changed to the default (5).") } if (m<=0) { m<-5 warning("The number of imputations ('m') must be greater than 0. The value was changed to the default (5).") } # checks for bounds if (!is.null(bounds)) { b.size <- is.matrix(bounds) && ncol(bounds)==3 && nrow(bounds) > 0 b.cols <- sum(bounds[,1] %in% c(1:AMp)) == nrow(bounds) maxint <- max.resample > 0 && (max.resample %% 1)==0 # Error 50: # wrong sized bounds matrix if (!b.size) { error.code<-50 error.mess<-paste("The bounds argument is a three-column matrix.") return(list(code=error.code,mess=error.mess)) } # Error 51: # nonexistant columns in bounds. if (!b.cols) { error.code<-51 error.mess<-paste("One of the bounds is on a non-existant column.") return(list(code=error.code,mess=error.mess)) } # Error 52: # max.resample needs to be positive integer. if (!maxint) { error.code<-52 error.mess<-paste("The max.resample argument needs to be a positive integer.") return(list(code=error.code,mess=error.mess)) } } if (!is.null(overimp)) { o.num <- is.numeric(overimp) o.size <- (is.matrix(overimp) & ncol(overimp) == 2) | length(overimp) == 2 o.cols <- all(unique(overimp[,2]) %in% 1:ncol(x)) o.rows <- all(unique(overimp[,1]) %in% 1:nrow(x)) ## Error 53: ## overimp not numeric if (!o.num | !o.size) { error.code <- 53 error.mess <- "The overimp matrix needs to be a two-column numeric matrix." return(list(code=error.code,mess=error.mess)) } ## Error 54: ## overimp out of range if (!o.rows | !o.cols) { error.code <- 54 error.mess <- "A row/column pair in overimp is outside the range of the data." return(list(code=error.code,mess=error.mess)) } } if (is.data.frame(x)) { is.posix <- function(x) inherits(x, c("POSIXt", "POSIXct", "POSIXlt")) posix.check <- sapply(x, is.posix) if (any(is.na(x[, posix.check]))) { stop("NA in POSIXt variable: remove or convert to numeric") } } if (!is.null(emburn)) { if (length(emburn) != 2) { stop("emburn must be length 2") } } if (!is.null(boot.type)) { if (!(boot.type %in% c("ordinary", "none"))) { stop("boot.type must be either 'ordinary' or 'none'") } } if (is.data.frame(x)) { if (sum(sapply(x, length) == 0)) { bad.var <- colnames(x)[sapply(x,length) == 0] if (is.null(bad.var)) bad.var <- which(sapply(x,length) == 0) bad.var <- paste(bad.var, collapse = ", ") error.code <- 53 error.mess<-paste("The variable(s)",bad.var,"have length 0 in the data frame. Try removing these variables or reimporting the data.") return(list(code=error.code,mess=error.mess)) } } if (nrow(na.omit(x[,idcheck,drop=FALSE])) > ncol(x[,idcheck,drop=FALSE])) { if (is.data.frame(x)) { lmcheck <- lm(I(rnorm(AMn))~ ., data = x[,idcheck, drop = FALSE]) } else { lmcheck <- lm(I(rnorm(AMn))~ ., data = as.data.frame(x[,idcheck, drop = FALSE])) } if (any(is.na(coef(lmcheck)))) { bad.var <- names(coef(lmcheck))[which(is.na(coef(lmcheck)))] if (length(bad.var) == 1) { warning(paste("The variable", bad.var, "is perfectly collinear with another variable in the data.\n")) } else { bad.var <- paste(bad.var, collapse = ", ") warning(paste("The variables (or variable with levels)", bad.var, "are perfectly collinear with another variable in the data.\n")) } } } return(list(m=m,priors=priors)) }
/scratch/gouwar.j/cran-all/cranData/Amelia/R/amcheck.r
#' Interactive GUI for Amelia #' #' @name ameliagui #' #' @description #' Brings up the AmeliaView graphical interface, which allows users #' to load datasets, manage options and run Amelia from a traditional #' windowed environment. #' #' @usage AmeliaView() #' @keywords utilities main.close<-function() { qvalue<-tcltk::tkmessageBox(parent=getAmelia("gui"), message="Are you sure you want to exit Amelia?", icon="question", type="okcancel", default="cancel") if (tcltk::tclvalue(qvalue)=="ok") { tcltk::tkdestroy(getAmelia("gui")) } } setWorkingDir <- function() { newwd <- tcltk::tkchooseDirectory(parent = getAmelia("gui"), initialdir = getwd(), title = "Set output directory...", mustexist = TRUE) if (tcltk::tclvalue(newwd) != "") setwd(tcltk::tclvalue(newwd)) return(NULL) } loadStata <- function() { filetype <- c("{{Stata files} {.dta}} {{All files} *}") putAmelia("am.filename", tcltk::tclvalue(tcltk::tkgetOpenFile(parent=getAmelia("gui"), filetypes=filetype))) if (getAmelia("am.filename") == "") return(NULL) if (!is.null(getAmelia("amelia.data"))) { sure<-tcltk::tkmessageBox(parent=getAmelia("gui"), message="If you load another dataset, your current settings will be erased. Are you sure you want to load the new data?",icon="question",type="yesno") if (tcltk::tclvalue(sure) == "no") return(NULL) } putAmelia("amelia.data",try(read.dta(getAmelia("am.filename"),convert.factors=FALSE))) putAmelia("am.filetype", "Stata") if (inherits(getAmelia("amelia.data"), "try-error")) { tcltk::tkmessageBox(parent=getAmelia("gui"), message="Failure in loading the data. Try again.",icon="error",type="ok") putAmelia("amelia.data",NULL) return(NULL) } activateGUI() } loadSPSS <- function() { filetype <- c("{{SPSS} {.sav}} {{All files} *}") putAmelia("am.filename", tcltk::tclvalue(tcltk::tkgetOpenFile(parent=getAmelia("gui"), filetypes=filetype))) if (getAmelia("am.filename") == "") return(NULL) if (!is.null(getAmelia("amelia.data"))) { sure<-tcltk::tkmessageBox(parent=getAmelia("gui"), message="If you load another dataset, your current settings will be erased. Are you sure you want to load the new data?",icon="question",type="yesno") if (tcltk::tclvalue(sure) == "no") return(NULL) } putAmelia("amelia.data",try(read.spss(getAmelia("am.filename"),use.value.labels=FALSE,to.data.frame=TRUE))) putAmelia("am.filetype", "SPSS") if (inherits(getAmelia("amelia.data"), "try-error")) { tcltk::tkmessageBox(parent=getAmelia("gui"), message="Failure in loading the data. Try again.",icon="error",type="ok") putAmelia("amelia.data",NULL) return(NULL) } activateGUI() } loadSAS <- function() { filetype <- c("{{SAS Transport} {.xpt}} {{All files} *}") putAmelia("am.filename", tcltk::tclvalue(tcltk::tkgetOpenFile(parent=getAmelia("gui"), filetypes=filetype))) if (getAmelia("am.filename") == "") return(NULL) if (!is.null(getAmelia("amelia.data"))) { sure<-tcltk::tkmessageBox(parent=getAmelia("gui"), message="If you load another dataset, your current settings will be erased. Are you sure you want to load the new data?",icon="question",type="yesno") if (tcltk::tclvalue(sure) == "no") return(NULL) } putAmelia("amelia.data",try(read.xport(getAmelia("am.filename")))) putAmelia("am.filetype", "SAS") if (inherits(getAmelia("amelia.data"), "try-error")) { tcltk::tkmessageBox(parent=getAmelia("gui"), message="Failure in loading the data. Try again.",icon="error",type="ok") putAmelia("amelia.data",NULL) return(NULL) } activateGUI() } loadTAB <- function() { filetype <- c("{{Tab-delimited files} {.txt .tab .dat}} {{All files} *}") putAmelia("am.filename", tcltk::tclvalue(tcltk::tkgetOpenFile(parent=getAmelia("gui"), filetypes=filetype))) if (getAmelia("am.filename") == "") return(NULL) if (!is.null(getAmelia("amelia.data"))) { sure<-tcltk::tkmessageBox(parent=getAmelia("gui"), message="If you load another dataset, your current settings will be erased. Are you sure you want to load the new data?",icon="question",type="yesno") if (tcltk::tclvalue(sure) == "no") return(NULL) } putAmelia("amelia.data",try(read.table(getAmelia("am.filename"),header=TRUE))) putAmelia("am.filetype", "TAB") if (inherits(getAmelia("amelia.data"), "try-error")) { tcltk::tkmessageBox(parent=getAmelia("gui"), message="Failure in loading the data. Try again.",icon="error",type="ok") putAmelia("amelia.data",NULL) return(NULL) } activateGUI() } loadCSV <- function() { filetype <- c("{{Comma-delimited files} {.csv}} {{All files} *} ") putAmelia("am.filename", tcltk::tclvalue(tcltk::tkgetOpenFile(parent=getAmelia("gui"), filetypes=filetype))) if (getAmelia("am.filename") == "") return(NULL) if (!is.null(getAmelia("amelia.data"))) { sure<-tcltk::tkmessageBox(parent=getAmelia("gui"), message="If you load another dataset, your current settings will be erased. Are you sure you want to load the new data?",icon="question",type="yesno") if (tcltk::tclvalue(sure) == "no") return(NULL) } putAmelia("amelia.data",try(read.csv(getAmelia("am.filename"),header=TRUE))) putAmelia("am.filetype", "CSV") if (inherits(getAmelia("amelia.data"), "try-error")) { tcltk::tkmessageBox(parent=getAmelia("gui"), message="Failure in loading the data. Try again.",icon="error",type="ok") putAmelia("amelia.data",NULL) return(NULL) } activateGUI() } loadRData <- function() { onOK <- function() { putAmelia("amelia.data", eval(as.name(tcltk::tclvalue(tcltk::tkget(objectChooser))))) tcltk::tkdestroy(chooseObjectWindow) tcltk::tkfocus(getAmelia("gui")) tcltk::tkgrab.release(chooseObjectWindow) activateGUI() return() } onCancel <- function() { rm(list=getAmelia("amelia.data")) tcltk::tkdestroy(chooseObjectWindow) tcltk::tkfocus(getAmelia("gui")) tcltk::tkgrab.release(chooseObjectWindow) return() } filetype <- c("{{R Data files} {.RData .Rdata .Rda .rda}} {{All files} *} ") putAmelia("am.filename", tcltk::tclvalue(tcltk::tkgetOpenFile(parent=getAmelia("gui"), filetypes=filetype))) if (getAmelia("am.filename") == "") return(NULL) if (!is.null(getAmelia("amelia.data"))) { sure <- tcltk::tkmessageBox(parent = getAmelia("gui"), message = "If you load another dataset, your current settings will be erased. Are you sure you want to load the new data?", icon = "question", type = "yesno") if (tcltk::tclvalue(sure) == "no") return(NULL) } putAmelia("amelia.data",try(load(getAmelia("am.filename")))) putAmelia("am.filetype", "RData") if (inherits(getAmelia("amelia.data"), "try-error")) { tcltk::tkmessageBox(parent=getAmelia("gui"), message="Failure in loading the data. Try again.",icon="error",type="ok") putAmelia("amelia.data",NULL) return(NULL) } if (length(getAmelia("amelia.data")) == 1) { putAmelia("amelia.data", eval(as.name(getAmelia("amelia.data")))) } else { datasets <- sapply(getAmelia("amelia.data"), function(x) is.data.frame(eval(as.name(x)))) datasets <- getAmelia("amelia.data")[datasets] chooseObjectWindow <- tcltk::tktoplevel(parent=getAmelia("gui")) tcltk::tkwm.title(chooseObjectWindow, "Find Data Set") chooseFrame <- tcltk::ttkframe(chooseObjectWindow) objectChooser <- tcltk::ttkcombobox(chooseFrame, width = 20) tcltk::tkconfigure(objectChooser, values = datasets) tcltk::tkset(objectChooser, datasets[1]) objectOK <- tcltk::ttkbutton(chooseFrame, text = "OK", width = 10, command = onOK) objectCancel <- tcltk::ttkbutton(chooseFrame, text = "Cancel", width = 10, command = onCancel) tcltk::tkgrid(tcltk::ttklabel(chooseFrame, text = "Please select your dataset from the following objects:"), row = 0, column = 0, columnspan = 2, padx = 10, pady = 10) tcltk::tkgrid(objectChooser, row = 1, column = 0, columnspan = 2, padx = 10, pady = 10) tcltk::tkgrid(objectOK, row = 2, column = 0, padx = 10, pady = 10) tcltk::tkgrid(objectCancel, row = 2, column = 1, padx = 10, pady = 10) tcltk::tkgrid(chooseFrame, padx = 10, pady = 10) tcltk::tkgrab(chooseObjectWindow) tcltk::tkfocus(chooseObjectWindow) tcltk::tkwm.protocol(chooseObjectWindow, "WM_DELETE_WINDOW", onCancel) centerModalDialog(chooseObjectWindow, resize=FALSE) } return() } loadDemo <- function(name) { if (!is.null(getAmelia("amelia.data"))) { sure<-tcltk::tkmessageBox(parent=getAmelia("gui"), message="If you load another dataset, your current settings will be erased. Are you sure you want to load the new data?",icon="question",type="yesno") if (tcltk::tclvalue(sure) == "no") return(NULL) } data(list=name, package="Amelia", envir = ameliaEnv) putAmelia("amelia.data", eval(as.name(name))) putAmelia("am.filetype", "demo") putAmelia("am.filename", name) activateGUI() } drawMissMap <- function() { dev.new() missmap(getAmelia("amelia.data"), csvar = getAmelia("csvar"), tsvar = getAmelia("tsvar")) } activateGUI <- function(session = FALSE) { temp.list <- strsplit(getAmelia("am.filename"),"/")[[1]] if (getAmelia("am.filetype") != "demo") { temp.list <- strsplit(getAmelia("am.filename"),"/")[[1]] putAmelia("am.directory", paste(temp.list[-length(temp.list)],"",sep="/",collapse="")) setwd(getAmelia("am.directory")) } else { putAmelia("am.directory", getwd()) } filename <- temp.list[length(temp.list)] dotList <- strsplit(filename, "\\.")[[1]] if (length(dotList) > 1) dotList <- dotList[-length(dotList)] filestub <- paste(paste(dotList, collapse = "."), "-imp", sep="") putAmelia("varnames" , names(getAmelia("amelia.data"))) tcltk::tkgrid.remove(getAmelia("error.label")) tcltk::tkgrid.remove(getAmelia("allgood.label")) tcltk::tkgrid(getAmelia("noimps.label"), row = 2, column = 7, sticky ="e", padx = 10) ## Get rid of welcome frame if (as.logical(tcltk::tkwinfo("ismapped", getAmelia("gui.welcome")))) { tcltk::tkgrid.remove(getAmelia("gui.welcome")) tcltk::tkgrid(getAmelia("gui.skel"), row = 0, column = 0, sticky ="news") tcltk::tkgrid(getAmelia("statusbar"), sticky = "sew") } ## initialize values ## turn on various forms and buttons tcltk::tkconfigure(getAmelia("output.run"), state = "normal") #tcltk::tkconfigure(getAmelia("output.entry"), textvariable=getAmelia("outname")) #tcltk::tkconfigure(getAmelia("output.num"), textvariable=getAmelia("outnum")) tcltk::tkentryconfigure(getAmelia("main.menu.file"),"Edit Data...", state="normal") tcltk::tkentryconfigure(getAmelia("main.menu.options"),"Draw Missingness Map", state="normal") tcltk::tkentryconfigure(getAmelia("main.menu.file"),"Save Session...", state = "normal") tcltk::tkentryconfigure(getAmelia("main.menu.options"),"Output File Type...", state = "normal") tcltk::tkentryconfigure(getAmelia("main.menu.options"),"Output File Options...", state = "normal") tcltk::tkconfigure(getAmelia("missmapButton"), state = "normal") tcltk::tkconfigure(getAmelia("editDataButton"), state = "normal") tcltk::tkconfigure(getAmelia("plotHistButton"), state = "normal") tcltk::tkconfigure(getAmelia("showLogButton"), state = "disabled") fillMainTree() ## Mark factors as ID by default. classes <- sapply(getAmelia("amelia.data"), class) factorVars <- which(classes == "factor" | classes == "character") if (!session) { opt.holder <- vector("numeric",ncol(getAmelia("amelia.data"))) names(opt.holder) <- getAmelia("varnames") putAmelia("noms", opt.holder) putAmelia("ords", opt.holder) putAmelia("logs", opt.holder) putAmelia("sqrt", opt.holder) putAmelia("lgstc", opt.holder) putAmelia("idvar", opt.holder) putAmelia("lags", opt.holder) putAmelia("leads", opt.holder) boundsholder <- matrix(NA, nrow = ncol(getAmelia("amelia.data")), ncol = 3) boundsholder[,1] <- 1:ncol(getAmelia("amelia.data")) rownames(boundsholder) <- getAmelia("varnames") putAmelia("num.poly",tcltk::tclVar("0")) putAmelia("intercs",tcltk::tclVar("0")) putAmelia("priorsmat", NULL) putAmelia("boundsmat", boundsholder) putAmelia("max.resample", tcltk::tclVar("1000")) putAmelia("outname", tcltk::tclVar(filestub)) putAmelia("outnum", tcltk::tclVar("5")) putAmelia("empri", tcltk::tclVar("0")) putAmelia("tsvar", NULL) putAmelia("csvar", NULL) id.holder <- opt.holder id.holder[factorVars] <- 1 putAmelia("idvar", id.holder) for (i in factorVars) { tcltk::tkset(getAmelia("main.tree"), getAmelia("varnames")[i], "transform", "ID") } } else { for (i in factorVars) { if (all(getAmelia("idvar")[i]==0, getAmelia("csvar")!=getAmelia("varnames")[i],getAmelia("noms")[i]==0)) { tcltk::tcl(getAmelia("main.tree"), "item", getAmelia("varnames")[i], image = getAmelia("redFlagIcon")) } } } tcltk::tkentryconfigure(getAmelia("main.menu.options"), "Add Observations Priors...", state="normal") tcltk::tkentryconfigure(getAmelia("main.menu.options"), "Numerical Options", state="normal") ## add the filename and rows/cols to statusbar tcltk::tkconfigure(getAmelia("statusbar.lab1b"), text = getAmelia("am.filename"), foreground = "blue") tcltk::tkconfigure(getAmelia("statusbar.n"), text = paste(nrow(getAmelia("amelia.data"))), foreground = "blue") tcltk::tkconfigure(getAmelia("statusbar.k"), text = paste(ncol(getAmelia("amelia.data"))), foreground = "blue") } save.session <- function() { if (is.null(getAmelia("amelia.data"))) { tcltk::tkmessageBox(parent=getAmelia("gui"), message="You must load a dataset before you can save a session.", icon="error", type="ok") return(NULL) } file.select <- tcltk::tclvalue(tcltk::tkgetSaveFile(parent=getAmelia("gui"), filetypes="{{RData files} {.RData}} {{All files} *}")) putAmelia("session.flag", TRUE) sessionList <- c("am.directory","amelia.data", "am.filename", "am.filetype", "boundsmat", "csvar", "idvar", "lags", "leads", "lgstc", "logs", "noms", "num.poly", "ords", "outname.value", "outnum.value", "output.log", "outtype.value", "priorsmat", "runState", "seed.value", "session.flag", "splinestime.value", "sqrt", "tol.value", "tsvar", "empri.value", "intercs.value", "max.resample.value", "ameliaObject") putAmelia("empri.value", tcltk::tclvalue(getAmelia("empri"))) putAmelia("intercs.value", tcltk::tclvalue(getAmelia("intercs"))) putAmelia("max.resample.value", tcltk::tclvalue(getAmelia("max.resample"))) putAmelia("outname.value", tcltk::tclvalue(getAmelia("outname"))) putAmelia("outnum.value", tcltk::tclvalue(getAmelia("outnum"))) putAmelia("outtype.value", tcltk::tclvalue(getAmelia("outtype"))) putAmelia("seed.value", tcltk::tclvalue(getAmelia("seed"))) putAmelia("tol.value", tcltk::tclvalue(getAmelia("tol"))) putAmelia("splinestime.value", tcltk::tclvalue(getAmelia("splinestime"))) save(list = sessionList, envir=ameliaEnv, file = file.select) return(NULL) } load.session <- function() { ## diaglog to get RData file file.select <- tcltk::tclvalue(tcltk::tkgetOpenFile(parent=getAmelia("gui"), filetypes= "{{RData files} {.RData}} {{All files} *}")) if (nchar(file.select) <= 0) return(NULL) ## try loading the RData file and stop if it doesn't work tryloadsess <- try(load(file=file.select, envir=ameliaEnv), silent=TRUE) if (inherits(tryloadsess,"try-error")) { tcltk::tkmessageBox(parent=getAmelia("gui"),message="Error loading session. This is not a valid session file.",icon="error",type="ok") return(NULL) } ## make sure that the RData file loaded the right list if (!("session.flag" %in% ls(ameliaEnv)) | !getAmelia("session.flag")) { tcltk::tkmessageBox(parent=getAmelia("gui"), message="Not an Amelia session file. Try again.",icon="error",type="ok") return(NULL) } activateGUI(session = TRUE) nn <- ncol(getAmelia("amelia.data")) if (!is.null(getAmelia("tsvar"))) { tcltk::tcl(getAmelia("main.tree"), "item", getAmelia("tsvar"), image = getAmelia("clockIcon")) tcltk::tkentryconfigure(getAmelia("main.menu.options"),0, state="normal") for (i in 1:nn) { if (getAmelia("lags")[i] == 1) tcltk::tkset(getAmelia("main.tree"), getAmelia("varnames")[i], "lag", "X") if (getAmelia("leads")[i] == 1) tcltk::tkset(getAmelia("main.tree"), getAmelia("varnames")[i], "lead", "X") } } if (!is.null(getAmelia("csvar"))) { tcltk::tcl(getAmelia("main.tree"), "item", getAmelia("csvar"), image = getAmelia("userIcon")) tcltk::tkentryconfigure(getAmelia("main.menu.options"), 1, state="normal") tcltk::tkentryconfigure(getAmelia("main.menu.options"), 1, variable = getAmelia("intercs")) } for (i in 1:nn) { if (getAmelia("idvar")[i] == 1) tcltk::tkset(getAmelia("main.tree"), getAmelia("varnames")[i], "transform", "ID") if (getAmelia("ords")[i] == 1) tcltk::tkset(getAmelia("main.tree"), getAmelia("varnames")[i], "transform", "Ordinal") if (getAmelia("noms")[i] == 1) tcltk::tkset(getAmelia("main.tree"), getAmelia("varnames")[i], "transform", "Nominal") if (getAmelia("logs")[i] == 1) tcltk::tkset(getAmelia("main.tree"), getAmelia("varnames")[i], "transform", "Log") if (getAmelia("sqrt")[i] == 1) tcltk::tkset(getAmelia("main.tree"), getAmelia("varnames")[i], "transform", "Square Root") if (getAmelia("lgstc")[i] == 1) tcltk::tkset(getAmelia("main.tree"), getAmelia("varnames")[i], "transform", "Logistic") } for (i in 1:nn) { bdMin <- getAmelia("boundsmat")[i,2] bdMax <- getAmelia("boundsmat")[i,3] if (!is.na(bdMin)) { treeBounds <- paste("[",bdMin,", ", bdMax,"]", sep = "") } else { treeBounds <- "" } tcltk::tkset(getAmelia("main.tree"), getAmelia("varnames")[i], "bounds", treeBounds) } tcltk::tcl("set", getAmelia("seed"), getAmelia("seed.value")) tcltk::tcl("set", getAmelia("tol"), getAmelia("tol.value")) tcltk::tcl("set", getAmelia("empri"), getAmelia("empri.value")) tcltk::tcl("set", getAmelia("outname"), getAmelia("outname.value")) tcltk::tcl("set", getAmelia("outnum"), getAmelia("outnum.value")) tcltk::tcl("set", getAmelia("outtype"), getAmelia("outtype.value")) tcltk::tcl("set", getAmelia("intercs"), getAmelia("intercs.value")) tcltk::tcl("set", getAmelia("splinestime"), getAmelia("splinestime.value")) tcltk::tcl("set", getAmelia("max.resample"), getAmelia("max.resample.value")) tcltk::tkgrid.remove(getAmelia("noimps.label")) tcltk::tkgrid.remove(getAmelia("error.label")) tcltk::tkgrid.remove(getAmelia("allgood.label")) tcltk::tkgrid(getAmelia(paste(getAmelia("runState"),"label", sep = ".")), row = 2, column = 7, sticky ="e", padx = 10) if (getAmelia("runState") != "noimps") { tcltk::tkentryconfigure(getAmelia("main.menu.output"), "Output Log", state="normal") tcltk::tkconfigure(getAmelia("showLogButton"), state = "normal") } if (getAmelia("runState") == "allgood") { tcltk::tkentryconfigure(getAmelia("main.menu.output"), 0, state = "normal") tcltk::tkentryconfigure(getAmelia("main.menu.output"), 2, state = "normal") resave <- tcltk::tkmessageBox(parent = getAmelia("gui"), message = "Re-save imputed data sets to the working directory?", icon = "question", default = "yes", type = "yesno") if (tcltk::tclvalue(resave) == "yes") { amelia.save(getAmelia("ameliaObject"), tcltk::tclvalue(getAmelia("outname")), as.numeric(tcltk::tclvalue(getAmelia("outnum")))) } } return(NULL) } run.amelia <- function() { save.type <- as.numeric(tcltk::tclvalue(getAmelia("outtype"))) if (file.access(getwd(), mode = 2) == -1 & !(save.type %in% c(0,6))) { tcltk::tkmessageBox(parent = getAmelia("gui"), message = "The current working directory is not writable. Please select a different working directory or chose to not save the imputed data sets.", type ="ok") return(NULL) } ## Let's not allow people to overwrite their data. temp.list <- strsplit(getAmelia("am.filename"),"/")[[1]] filename <- temp.list[length(temp.list)] outfiles <- paste(tcltk::tclvalue(getAmelia("outname")), 1:as.numeric(tcltk::tclvalue(getAmelia("outnum"))), sep ="") save.type <- as.numeric(tcltk::tclvalue(getAmelia("outtype"))) exten <- switch(save.type, "csv","txt","dta","dta","RData") outfiles <- paste(outfiles, exten, sep = ".") outfiles <- paste(paste(temp.list[-length(temp.list)], collapse = "/"), outfiles, sep = "/") if (getAmelia("am.filename") %in% outfiles) { tcltk::tkmessageBox(parent = getAmelia("gui"), message = "Current settings would overwrite the original data. Please change the output file name.", icon = "error", type ="ok") return(NULL) } ts <- getAmelia("tsvar") cs <- getAmelia("csvar") nn <- ncol(getAmelia("amelia.data")) am.intercs <- as.logical(as.numeric(tcltk::tclvalue(getAmelia("intercs")))) sptime <- as.numeric(tcltk::tclvalue(getAmelia("splinestime"))) if (sptime == 0) if (am.intercs == FALSE) sptime <- NULL if (is.null(ts)) sptime <- NULL if (is.null(cs)) am.intercs <- FALSE id <- getAmelia("varnames")[getAmelia("idvar")==1] ord <- getAmelia("varnames")[getAmelia("ords")==1] nom <- getAmelia("varnames")[getAmelia("noms")==1] logs <- getAmelia("varnames")[getAmelia("logs")==1] sqrts <- getAmelia("varnames")[getAmelia("sqrt")==1] lgstc <- getAmelia("varnames")[getAmelia("lgstc")==1] amlags<- getAmelia("varnames")[getAmelia("lags")==1] amfut <- getAmelia("varnames")[getAmelia("leads")==1] if (length(id) == 0) id <- NULL if (length(ord) == 0) ord <- NULL if (length(nom) == 0) nom <- NULL if (length(logs) == 0) logs <- NULL if (length(sqrts)== 0) sqrts<- NULL if (length(lgstc)== 0) lgstc<- NULL if (length(amlags)==0) amlags <- NULL if (length(amfut)== 0) amfut<- NULL pmat <- getAmelia("priorsmat") colnames(pmat) <- NULL rownames(pmat) <- NULL bdmat <- getAmelia("boundsmat") colnames(bdmat) <- NULL rownames(bdmat) <- NULL bdmat <- bdmat[!is.na(bdmat[,2]) & !is.na(bdmat[,3]),,drop=FALSE] if (nrow(bdmat) == 0) bdmat <- NULL tol <- as.numeric(tcltk::tclvalue(getAmelia("tol"))) max.re <- as.numeric(tcltk::tclvalue(getAmelia("max.resample"))) num.imp <- as.numeric(tcltk::tclvalue(getAmelia("outnum"))) emp <- as.numeric(tcltk::tclvalue(getAmelia("empri"))) if (!is.na(as.numeric(tcltk::tclvalue(getAmelia("seed"))))) set.seed(as.numeric(tcltk::tclvalue(getAmelia("seed")))) tcltk::tkgrid.remove(getAmelia("noimps.label")) tcltk::tkgrid.remove(getAmelia("error.label")) tcltk::tkgrid.remove(getAmelia("allgood.label")) tcltk::tkgrid(getAmelia("runAmeliaProgress"), row = 2, column = 7, sticky ="e", padx = 10) amcall <- substitute(amelia(x = getAmelia("amelia.data"), m = num.imp, idvars = id, ts = ts, cs= cs, priors = pmat, lags = amlags, empri = emp, intercs = am.intercs, leads = amfut, splinetime = sptime, logs = logs, sqrts = sqrts, lgstc = lgstc, ords = ord, noms = nom, bounds = bdmat, max.resample = max.re, tolerance= tol)) putAmelia("output.log", c(getAmelia("output.log"), sub(" ","\n ",deparse(amcall, control=NULL, width.cutoff=60)),"\n\n")) putAmelia("wdForLastImputation", getwd()) ## run amelia! or at least try, and put the output in a list ## the name of the list will be the output name set by user output.connection <- textConnection(".Output", open="w", local = TRUE) sink(output.connection, type="output") putAmelia("ameliaObject", try(amelia.default(x = getAmelia("amelia.data"), m = as.numeric(tcltk::tclvalue(getAmelia("outnum"))), p2s = 1, idvars = id, ts = ts, cs = cs, priors = pmat, lags = amlags, empri = as.numeric(tcltk::tclvalue(getAmelia("empri"))), intercs = am.intercs, leads = amfut, splinetime = sptime, frontend = TRUE, logs = logs, sqrts = sqrts, lgstc = lgstc, ords = ord, noms = nom, bounds = bdmat, max.resample = as.numeric(tcltk::tclvalue(getAmelia("max.resample"))), tolerance= as.numeric(tcltk::tclvalue(getAmelia("tol")))), silent=TRUE)) sink(type = "output") putAmelia("output.log", c(getAmelia("output.log"), paste(textConnectionValue(output.connection), "\n"))) tcltk::tkgrid.remove(getAmelia("runAmeliaProgress")) tcltk::tkconfigure(getAmelia("runAmeliaProgress"), value = 0) ## check for errors in the process. if (inherits(getAmelia("ameliaObject"),"try-error")) { putAmelia("output.log", c(getAmelia("output.log"),"\nThere was an unexpected error in the execution of Amelia. \nDouble check all inputs for errors and take note of the error message:\n\n")) putAmelia("output.log", c(getAmelia("output.log"),paste(getAmelia("ameliaObject")))) #tcltk::tkconfigure(getAmelia("pass.fail.label"), foreground = "red") #tmp <- getAmelia("pass.fail") #tcltk::tclvalue(tmp) <- "Error! See log." show.output.log() tcltk::tkentryconfigure(getAmelia("main.menu.output"), 1, state = "normal") tcltk::tkconfigure(getAmelia("showLogButton"), state = "normal") tcltk::tkgrid(getAmelia("error.label"), row = 2, column = 7, sticky ="e", padx = 10) putAmelia("runState", "error") return(NULL) } if (all(getAmelia("ameliaObject")$code!=c(1,2))) { putAmelia("output.log", c(getAmelia("output.log"),"\n")) putAmelia("output.log", c(getAmelia("output.log"),paste("Amelia Error Code:", getAmelia("ameliaObject")[[1]],"\n", getAmelia("ameliaObject")[[2]]))) #tcltk::tkconfigure(getAmelia("pass.fail.label"), foreground = "red") #tmp <- getAmelia("pass.fail") #tcltk::tclvalue(tmp) <- "Error! See log." show.output.log() tcltk::tkentryconfigure(getAmelia("main.menu.output"), 1, state = "normal") tcltk::tkconfigure(getAmelia("showLogButton"), state = "normal") tcltk::tkgrid(getAmelia("error.label"), row = 2, column = 7, sticky ="e", padx = 10) putAmelia("runState", "error") } else { putAmelia("output.log", c(getAmelia("output.log"),"Amelia has run successfully.\n")) tcltk::tkentryconfigure(getAmelia("main.menu.output"), 0, state = "normal") tcltk::tkentryconfigure(getAmelia("main.menu.output"), 1, state = "normal") tcltk::tkentryconfigure(getAmelia("main.menu.output"), 2, state = "normal") tcltk::tkconfigure(getAmelia("showLogButton"), state = "normal") amelia.save(getAmelia("ameliaObject"), tcltk::tclvalue(getAmelia("outname")), as.numeric(tcltk::tclvalue(getAmelia("outnum")))) tcltk::tkgrid(getAmelia("allgood.label"), row = 2, column = 7, sticky ="e", padx = 10) putAmelia("runState", "allgood") } } amelia.save <- function(out,outname,m) { save.type <- as.numeric(tcltk::tclvalue(getAmelia("outtype"))) if (save.type == 1) { write.amelia(out, file.stem = outname, format = "csv", row.names = FALSE) } if (save.type == 2) { write.amelia(out, file.stem = outname, extension = "txt", format = "table", row.names = FALSE) } if (save.type == 3) { write.amelia(out, file.stem = outname, format = "dta", version = 6) } if (save.type == 4) { write.amelia(out, file.stem = outname, format = "dta", version = 7) } if (save.type == 5) { write.amelia(out, file.stem = outname, format = "dta", version = 8) } if (save.type == 6) { write.amelia(out, file.stem = outname, format = "dta", version = 10) } if (save.type == 7) { write.amelia(out, file.stem = outname, format = "dta", separate = FALSE, version = 10) } if (save.type == 8) { save(list = "ameliaObject", envir = ameliaEnv, file = paste(outname, ".RData", sep = "")) } } set.out<-function(...) { putAmelia("output.select",as.numeric(tcltk::tkget(getAmelia("output.drop.box")))) } setTS <- function() { tsvartemp <- strsplit(tcltk::tclvalue(tcltk::tcl(getAmelia("main.tree"),"selection")), " ")[[1]] if (length(tsvartemp) > 1) { tcltk::tkmessageBox(parent=getAmelia("gui"), message="Only one variable can be set as the times-series variable.",icon="error",type="ok") return(NULL) } if (!is.null(getAmelia("csvar"))) { if (getAmelia("csvar") == tsvartemp) { tcltk::tkmessageBox(parent=getAmelia("gui"), message="A variable cannot be both the time-series and cross-section index.",icon="error",type="ok") return(NULL) } } if (!(sapply(getAmelia("amelia.data"), class)[tsvartemp] %in% c("numeric","integer"))) { tcltk::tkmessageBox(parent=getAmelia("gui"), message="The time-series index must be numeric.",icon="error",type="ok") return(NULL) } children <- strsplit(tcltk::tclvalue(tcltk::tcl(getAmelia("main.tree"),"children","")), " ")[[1]] for(i in setdiff(children, getAmelia("csvar"))) tcltk::tcl(getAmelia("main.tree"), "item", i , image="") tcltk::tcl(getAmelia("main.tree"), "item", tsvartemp, image = getAmelia("clockIcon")) putAmelia("tsvar", tsvartemp) tcltk::tkentryconfigure(getAmelia("main.menu.options"),0, state="normal") dropTrans() } unsetTS <- function() { tsvartemp <- strsplit(tcltk::tclvalue(tcltk::tcl(getAmelia("main.tree"),"selection")), " ")[[1]] sure<-tcltk::tkmessageBox(parent=getAmelia("gui"), message="If you unset the time-series variable, you will lose any time-series settings such as lags, leads, or polynomials of time. Unset the time-series variable?",icon="question",type="yesno") if (tcltk::tclvalue(sure) == "no") return(NULL) tcltk::tcl(getAmelia("main.tree"), "item", tsvartemp, image = "") putAmelia("tsvar", NULL) tcltk::tkentryconfigure(getAmelia("main.menu.options"),0, state="disabled") putAmelia("lags",vector("numeric",ncol(getAmelia("amelia.data")))) putAmelia("leads",vector("numeric",ncol(getAmelia("amelia.data")))) children <- strsplit(tcltk::tclvalue(tcltk::tcl(getAmelia("main.tree"),"children","")), " ")[[1]] for(i in children) { tcltk::tkset(getAmelia("main.tree"), i, "lag", "") tcltk::tkset(getAmelia("main.tree"), i, "lead", "") } } unsetCS <- function() { csvartemp <- strsplit(tcltk::tclvalue(tcltk::tcl(getAmelia("main.tree"),"selection")), " ")[[1]] sure<-tcltk::tkmessageBox(parent=getAmelia("gui"), message="If you unset the cross-section variable, you will lose any cross-section settings. Unset the cross-section variable?",icon="question",type="yesno") if (tcltk::tclvalue(sure) == "no") return(NULL) tcltk::tcl(getAmelia("main.tree"), "item", csvartemp, image = "") putAmelia("csvar", NULL) tcltk::tkentryconfigure(getAmelia("main.menu.options"),0, state="normal") if (is.factor(getAmelia("amelia.data")[,csvartemp]) | is.character(getAmelia("amelia.data")[,csvartemp])) { tcltk::tcl(getAmelia("main.tree"), "item", csvartemp, image = getAmelia("redFlagIcon")) } } setCS <- function() { csvartemp <- strsplit(tcltk::tclvalue(tcltk::tcl(getAmelia("main.tree"),"selection")), " ")[[1]] if (length(csvartemp) > 1) { tcltk::tkmessageBox(parent=getAmelia("gui"), message="Only one variable can be set as the cross-section variable.",icon="error",type="ok") return(NULL) } if (!is.null(getAmelia("tsvar"))) { if (getAmelia("tsvar") == csvartemp) { tcltk::tkmessageBox(parent=getAmelia("gui"), message="A variable cannot be both the time-series and cross-section index.",icon="error",type="ok") return(NULL) } } if (!is.null(getAmelia("csvar"))) { if (is.factor(getAmelia("amelia.data")[,getAmelia("csvar")]) | is.character(getAmelia("amelia.data")[,getAmelia("csvar")])) { tcltk::tcl(getAmelia("main.tree"), "item", getAmelia("csvar"), image = getAmelia("redFlagIcon")) } else { tcltk::tcl(getAmelia("main.tree"), "item", getAmelia("csvar"), image = "") } } dropTrans() tcltk::tcl(getAmelia("main.tree"), "item", csvartemp, image = getAmelia("userIcon")) putAmelia("csvar", csvartemp) tcltk::tkentryconfigure(getAmelia("main.menu.options"),1,state="normal") tcltk::tkentryconfigure(getAmelia("main.menu.options"), 1, variable = getAmelia("intercs")) } fillMainTree <- function() { children <- strsplit(tcltk::tclvalue(tcltk::tcl(getAmelia("main.tree"),"children","")), " ")[[1]] tcltk::tkdelete(getAmelia("main.tree"), children) for (i in names(getAmelia("amelia.data"))) { if (is.factor(getAmelia("amelia.data")[,i]) | is.character(getAmelia("amelia.data")[,i])) { vals <- c("","","","","(factor)","...","...","...") vals <- c(vals,paste(sum(is.na(getAmelia("amelia.data")[,i])), nrow(getAmelia("amelia.data")), sep="/")) } else { vals <- c(min(getAmelia("amelia.data")[,i],na.rm=T), max(getAmelia("amelia.data")[,i],na.rm=T), mean(getAmelia("amelia.data")[,i],na.rm=T), sd(getAmelia("amelia.data")[,i],na.rm=T)) vals <- signif(vals, digits = 4) vals <- c("","","","", vals, paste(sum(is.na(getAmelia("amelia.data")[,i])), nrow(getAmelia("amelia.data")), sep="/")) } tcltk::tkinsert(getAmelia("main.tree"),"","end", id = i,tag="normal",text = i, values = vals) } bandTree() return() } #' Interactive GUI for Amelia #' #' Brings up the AmeliaView graphical interface, which allows users to load datasets, #' manage options and run Amelia from a traditional windowed environment. #' #' @details #' Requires the tcltk package. #' AmeliaView<-function() { ##Preamble requireNamespace("tcltk") || stop("The package 'tcltk' is required") if (.Platform$OS.type != "windows") { tcltk::tcl("ttk::style", "theme", "use", "clam") tcltk::tkfont.configure("TkHeadingFont", weight="normal") tcltk::tkfont.configure("TkCaptionFont", weight="normal") } ## If the current working directory is not writable, move to a ## sensible default locations: the HOME dir if (file.access(getwd(), mode = 2) == -1) { if (file.access(Sys.getenv("HOME"), mode = 0) == 0 & file.access(Sys.getenv("HOME"), mode = 2) == 0) { setwd(Sys.getenv("HOME")) } } tcltk::tclServiceMode(on=FALSE) putAmelia("outname", tcltk::tclVar("outdata")) putAmelia("outnum", tcltk::tclVar("5")) putAmelia("empri", tcltk::tclVar("0")) putAmelia("tol", tcltk::tclVar("0.0001")) putAmelia("amelia.data",NULL) putAmelia("am.filename",NULL) putAmelia("varnames", NULL) putAmelia("tsvar", NULL) putAmelia("csvar", NULL) putAmelia("varmin", NULL) putAmelia("varmax", NULL) putAmelia("runState", "noimps") putAmelia("session.flag", FALSE) putAmelia("intercs",tcltk::tclVar("0")) putAmelia("splinestime",tcltk::tclVar("0")) putAmelia("outtype", tcltk::tclVar("1")) putAmelia("max.resample", tcltk::tclVar("1000")) putAmelia("inname", tcltk::tclVar("")) putAmelia("seed", tcltk::tclVar("")) putAmelia("output.log", NULL) putAmelia("boundMin", tcltk::tclVar("")) putAmelia("boundMax", tcltk::tclVar("")) putAmelia("wdForLastImputation", getwd()) output.types <- c("(no save)", "CSV", "Tab Delimited", "Stata 6", "Stata 7", "Stata 8/9", "Stata 10+", "Stata 10+ (stacked)", "RData") ampath <- find.package(package = "Amelia")[1] ameliaFile <- file.path(ampath, "gui/gallery19.gif") goFile <- file.path(ampath, "gui/action_go.gif") tableFile <- file.path(ampath, "gui/table.gif") rFile <- file.path(ampath, "gui/page-R.gif") dtaFile <- file.path(ampath, "gui/page_dta.gif") spssFile <- file.path(ampath, "gui/page_spss.gif") clockFile <- file.path(ampath, "gui/icon_clock.gif") userFile <- file.path(ampath, "gui/icon_user.gif") upFile <- file.path(ampath, "gui/arrow_up.gif") downFile <- file.path(ampath, "gui/arrow_down.gif") worldFile <- file.path(ampath, "gui/icon_world.gif") pageTextFile <- file.path(ampath, "gui/page_text.gif") pageEditFile <- file.path(ampath, "gui/page_edit.gif") histFile <- file.path(ampath, "gui/histogram.gif") saveFile <- file.path(ampath, "gui/action_save.gif") pageUpFile <- file.path(ampath, "gui/page_up.gif") redStopFile <- file.path(ampath, "gui/action_stop.gif") redFlagFile <- file.path(ampath, "gui/flag_red.gif") greenCheckFile <- file.path(ampath, "gui/icon_accept.gif") putAmelia("ameliaPic", tcltk::tkimage.create("photo", file=ameliaFile)) putAmelia("action.go.icon", tcltk::tkimage.create("photo", file = goFile)) putAmelia("tablePic", tcltk::tkimage.create("photo", file = tableFile)) putAmelia("rPic", tcltk::tkimage.create("photo", file = rFile)) putAmelia("dtaPic", tcltk::tkimage.create("photo", file = dtaFile)) putAmelia("spssPic", tcltk::tkimage.create("photo", file = spssFile)) putAmelia("clockIcon", tcltk::tkimage.create("photo", file = clockFile)) putAmelia("userIcon", tcltk::tkimage.create("photo", file = userFile)) putAmelia("worldIcon", tcltk::tkimage.create("photo", file = worldFile)) putAmelia("upArrowIcon", tcltk::tkimage.create("photo", file = upFile)) putAmelia("downArrowIcon", tcltk::tkimage.create("photo", file = downFile)) putAmelia("histIcon", tcltk::tkimage.create("photo", file = histFile)) putAmelia("saveIcon", tcltk::tkimage.create("photo", file = saveFile)) putAmelia("pageUpIcon", tcltk::tkimage.create("photo", file = pageUpFile)) putAmelia("redFlagIcon", tcltk::tkimage.create("photo", file = redFlagFile)) putAmelia("redStopIcon", tcltk::tkimage.create("photo", file = redStopFile)) putAmelia("greenCheckIcon", tcltk::tkimage.create("photo", file = greenCheckFile)) putAmelia("pageTextIcon", tcltk::tkimage.create("photo", file = pageTextFile)) putAmelia("pageEditIcon", tcltk::tkimage.create("photo", file = pageEditFile)) putAmelia("gui", tcltk::tktoplevel()) tcltk::tkwm.title(getAmelia("gui"), "AmeliaView") tcltk::tkwm.protocol(getAmelia("gui"),"WM_DELETE_WINDOW", function() main.close()) tcltk::tkwm.geometry(getAmelia("gui"), "800x500") ##Menu putAmelia("main.menu", tcltk::tkmenu(getAmelia("gui"))) putAmelia("main.menu.file", tcltk::tkmenu(getAmelia("main.menu"), tearoff=0)) putAmelia("main.menu.demo", tcltk::tkmenu(getAmelia("main.menu"), tearoff=0)) putAmelia("main.menu.import", tcltk::tkmenu(getAmelia("main.menu"), tearoff=0)) putAmelia("main.menu.options", tcltk::tkmenu(getAmelia("main.menu"), tearoff=0)) putAmelia("main.menu.splines", tcltk::tkmenu(getAmelia("main.menu"), tearoff=0)) putAmelia("main.menu.output", tcltk::tkmenu(getAmelia("main.menu"), tearoff=0)) putAmelia("main.menu.help", tcltk::tkmenu(getAmelia("main.menu"), tearoff=0)) putAmelia("main.menu.variables", tcltk::tkmenu(getAmelia("main.menu"), tearoff=0, postcommand = variableOptionsPost)) putAmelia("main.menu.trans", tcltk::tkmenu(getAmelia("main.menu"), tearoff=0)) putAmelia("main.menu.outfile", tcltk::tkmenu(getAmelia("main.menu"), tearoff=0)) tcltk::tkadd(getAmelia("main.menu.file"),"command",label="Load R Data File...",command=function()loadRData(), underline = 5) tcltk::tkadd(getAmelia("main.menu.import"),"command",label="Import comma-separated value data...", command=loadCSV, underline = 7) tcltk::tkadd(getAmelia("main.menu.import"),"command",label="Import tab-delimited data...", command=loadTAB, underline = 7) tcltk::tkadd(getAmelia("main.menu.import"),"command",label="Import Stata dta file...", command=loadStata, underline = 13) tcltk::tkadd(getAmelia("main.menu.import"),"command",label="Import SPSS data...", command=loadSPSS, underline = 7) tcltk::tkadd(getAmelia("main.menu.import"),"command",label="Import SAS Transport data...", command=loadSAS, underline = 8) tcltk::tkadd(getAmelia("main.menu.file"),"cascade",menu=getAmelia("main.menu.import"),label="Import Data", underline = 0) tcltk::tkadd(getAmelia("main.menu.demo"),"command",label="africa", command=function() loadDemo(name="africa"), underline = 0) tcltk::tkadd(getAmelia("main.menu.demo"),"command",label="freetrade", command=function() loadDemo(name="freetrade"), underline = 0) tcltk::tkadd(getAmelia("main.menu.file"),"cascade",menu=getAmelia("main.menu.demo"),label="Load Package Data", underline = 5) tcltk::tkadd(getAmelia("main.menu.file"),"command",command = setWorkingDir,label="Set Working Directory...", underline = 4) tcltk::tkadd(getAmelia("main.menu.file"),"command",label="Edit Data...", command=function(){putAmelia("amelia.data", edit(getAmelia("amelia.data")));updateTreeStats()},state="disabled", underline = 0) tcltk::tkadd(getAmelia("main.menu.file"),"separator") tcltk::tkadd(getAmelia("main.menu.file"),"command",label="Load Session...",command=function()load.session(), underline = 0) tcltk::tkadd(getAmelia("main.menu.file"),"command",label="Save Session...",command=function()save.session(), state="disabled", underline = 0) tcltk::tkadd(getAmelia("main.menu.file"),"separator") tcltk::tkadd(getAmelia("main.menu.file"),"command",label="Quit Amelia",command=function()main.close(), underline = 0) tcltk::tkadd(getAmelia("main.menu.variables"), "command", label = "Set as Time-Series Variable", command = setTS, state = "disabled", underline = 0) tcltk::tkadd(getAmelia("main.menu.variables"), "command", label = "Set as Cross-Section Variable", command = setCS, state = "disabled", underline = 7) tcltk::tkadd(getAmelia("main.menu.variables"), "command", label = "Unset as Time-Series Variable", command = unsetTS, state = "disabled", underline = 0) tcltk::tkadd(getAmelia("main.menu.variables"), "command", label = "Unset as Cross-Section Variable", command = unsetCS, state = "disabled", underline = 23) tcltk::tkadd(getAmelia("main.menu.variables"),"separator") tcltk::tkadd(getAmelia("main.menu.variables"), "command", label = "Add Lag", command = function() addLag(), state = "disabled", underline = 0) tcltk::tkadd(getAmelia("main.menu.variables"), "command", label = "Add Lead", command = function() addLead(), state = "disabled", underline = 4) tcltk::tkadd(getAmelia("main.menu.variables"), "command", label = "Remove Lag", command = function() dropLag(), state = "disabled", underline = 0) tcltk::tkadd(getAmelia("main.menu.variables"), "command", label = "Remove Lead", command = function() dropLead(), state = "disabled", underline = 1) tcltk::tkadd(getAmelia("main.menu.variables"),"separator") tcltk::tkadd(getAmelia("main.menu.variables"), "command", label = "Plot Histogram(s) of Selected", command = plotHist, state = "disabled", underline = 0) tcltk::tkadd(getAmelia("main.menu.trans"), "command", label = "Log", command = function(x) setTrans("logs"), underline = 0) tcltk::tkadd(getAmelia("main.menu.trans"), "command", label = "Square Root", command = function(x) setTrans("sqrt"), underline = 0) tcltk::tkadd(getAmelia("main.menu.trans"), "command", label = "Logistic", command = function(x) setTrans("lgstc"), underline = 1) tcltk::tkadd(getAmelia("main.menu.trans"), "command", label = "Nominal", command = function(x) setTrans("noms"), underline = 0) tcltk::tkadd(getAmelia("main.menu.trans"), "command", label = "Ordinal", command = function(x) setTrans("ords"), underline = 0) tcltk::tkadd(getAmelia("main.menu.trans"), "command", label = "ID Variable", command = function(x) setTrans("idvar"), underline = 0) tcltk::tkadd(getAmelia("main.menu.variables"), "cascade", label = "Add Transformation...", menu = getAmelia("main.menu.trans"), state = "disabled", underline = 4) tcltk::tkadd(getAmelia("main.menu.variables"), "command", label = "Remove Transformations", command = dropTrans, state = "disabled", underline = 2) tcltk::tkadd(getAmelia("main.menu.variables"),"separator") tcltk::tkadd(getAmelia("main.menu.variables"), "command", label = "Add or Edit Bounds", command = addBounds, state = "disabled", underline = 12) for (i in 0:10) tcltk::tkadd(getAmelia("main.menu.splines"), "radiobutton", variable = getAmelia("splinestime"), label = paste(i,"knots"), value = i, underline = 0) tcltk::tkadd(getAmelia("main.menu.options"), "cascade", label = "Splines of Time with...", menu = getAmelia("main.menu.splines"), state="disabled", underline = 0) tcltk::tkadd(getAmelia("main.menu.options"), "checkbutton", label = "Interact Spline With Cross-Section?", variable = getAmelia("intercs"), onvalue=1,offvalue=0, state="disabled", underline = 0) tcltk::tkadd(getAmelia("main.menu.options"),"separator") tcltk::tkadd(getAmelia("main.menu.options"),"command", label = "Add Observations Priors...", command = gui.pri.setup, state="disabled", underline = 17) tcltk::tkadd(getAmelia("main.menu.options"), "separator") tcltk::tkadd(getAmelia("main.menu.options"), "command", label = "Numerical Options", command = buildNumericalOptions, state = "disabled", underline = 0) tcltk::tkadd(getAmelia("main.menu.options"), "command", label = "Draw Missingness Map", command = drawMissMap, state="disabled", underline = 5) tcltk::tkadd(getAmelia("main.menu.options"), "command", label = "Output File Options...", command = buildOutputOptions, state = "disabled", underline = 0) for (i in 1:length(output.types)) { tcltk::tkadd(getAmelia("main.menu.outfile"), "radiobutton", variable = getAmelia("outtype"), label = output.types[i], value = i-1) } tcltk::tkadd(getAmelia("main.menu.options"), "cascade", label = "Output File Type...", menu = getAmelia("main.menu.outfile"), state = "disabled", underline = 7) tcltk::tkadd(getAmelia("main.menu.output"),"command", label = "Imputation Diagnostics...", command = gui.diag.setup, state="disabled", underline = 11) tcltk::tkadd(getAmelia("main.menu.output"),"command", label = "Output Log", command = show.output.log, state="disabled", underline = 0) tcltk::tkadd(getAmelia("main.menu.output"),"command", label = "Open Folder Containing Imputated Data", command = showImputedFiles, state="disabled", underline = 12) tcltk::tkadd(getAmelia("main.menu.help"),"command",label="Amelia Website",command= function()browseURL("http://gking.harvard.edu/amelia/"), underline = 7) tcltk::tkadd(getAmelia("main.menu.help"),"command",label="Documentation",command= function() browseURL("http://gking.harvard.edu/amelia/docs/"), underline = 0) tcltk::tkadd(getAmelia("main.menu.help"),"command",label="About...",command= function()buildAboutDialog(), underline = 0) tcltk::tkadd(getAmelia("main.menu"),"cascade",label="File", menu = getAmelia("main.menu.file"), underline = 0) tcltk::tkadd(getAmelia("main.menu"),"cascade",label="Variables", menu = getAmelia("main.menu.variables"), underline = 0) tcltk::tkadd(getAmelia("main.menu"),"cascade",label="Options", menu = getAmelia("main.menu.options"), underline = 0) tcltk::tkadd(getAmelia("main.menu"),"cascade",label="Output", menu = getAmelia("main.menu.output"), underline = 1) tcltk::tkadd(getAmelia("main.menu"),"cascade",label="Help", menu = getAmelia("main.menu.help"), underline = 0) tcltk::tkconfigure(getAmelia("gui"), menu = getAmelia("main.menu")) ## Welcome Screen putAmelia("gui.welcome", tcltk::ttkframe(getAmelia("gui"))) ameliaPicLabel <- tcltk::ttklabel(getAmelia("gui.welcome"), relief = "groove", image = getAmelia("ameliaPic")) loadRButton <- tcltk::ttkbutton(getAmelia("gui.welcome"), text = "Load R Data", image = getAmelia("rPic"), compound = "top", command = loadRData) loadCSVButton <- tcltk::ttkbutton(getAmelia("gui.welcome"), text = "Import CSV", image = getAmelia("tablePic"), compound = "top", command = loadCSV) loadStataButton <- tcltk::ttkbutton(getAmelia("gui.welcome"), text = "Import STATA", image = getAmelia("dtaPic"), compound = "top", command = loadStata) loadSPSSButton <- tcltk::ttkbutton(getAmelia("gui.welcome"), text = "Import SPSS", image = getAmelia("spssPic"), compound = "top", command = loadSPSS) loadDemoButton <- tcltk::ttkbutton(getAmelia("gui.welcome"), text = "Load Demo", image = getAmelia("tablePic"), compound = "top", command = function () loadDemo(name = "africa")) tcltk::tkgrid(ameliaPicLabel, row = 0, column = 0, columnspan = 6, padx = 10, pady = 10) tcltk::tkgrid(tcltk::ttklabel(getAmelia("gui.welcome"), text=paste("Welcome to AmeliaView ",packageDescription("Amelia", fields="Version"), "!", sep="")), row = 1, column = 0, columnspan = 6, padx = 10, pady = 10) tcltk::tkgrid(tcltk::ttklabel(getAmelia("gui.welcome"), text="Please load a dataset:"), row = 2, column = 0, columnspan = 6, padx = 10, pady = 10) tcltk::tkgrid(loadRButton, row = 3, column = 0, padx = 10, pady = 10) tcltk::tkgrid(loadCSVButton, row = 3, column = 1, padx = 10, pady = 10) tcltk::tkgrid(loadStataButton, row = 3, column = 2, padx = 10, pady = 10) tcltk::tkgrid(loadSPSSButton, row = 3, column = 3, padx = 10, pady = 10) tcltk::tkgrid(loadDemoButton, row = 3, column = 4, padx = 10, pady = 10) tcltk::tkgrid(getAmelia("gui.welcome"), row = 0, column = 0) ##Frame putAmelia("gui.skel", tcltk::ttkpanedwindow(getAmelia("gui"), orient = "vertical")) ############### ### Toolbar ### ############### toolbar <- tcltk::ttkframe(getAmelia("gui.skel")) putAmelia("loadSessionButton", tcltk::ttkbutton(toolbar, text = "Load Session", command = load.session, image = getAmelia("pageUpIcon"), compound = "top", style="Toolbutton")) putAmelia("saveSessionButton", tcltk::ttkbutton(toolbar, text = "Save Session", command = save.session, image = getAmelia("saveIcon"), compound = "top", style="Toolbutton")) putAmelia("plotHistButton", tcltk::ttkbutton(toolbar, text = "Plot Histogram", state = "disabled", command = plotHist, image = getAmelia("histIcon"), compound = "top", style="Toolbutton")) putAmelia("editDataButton", tcltk::ttkbutton(toolbar, text = "Edit Data", state = "disabled", command = function(){putAmelia("amelia.data", edit(getAmelia("amelia.data")));updateTreeStats()}, image = getAmelia("pageEditIcon"), compound = "top", style="Toolbutton")) putAmelia("missmapButton", tcltk::ttkbutton(toolbar, text = "Missingness Map", state = "disabled", command = drawMissMap, image = getAmelia("worldIcon"), compound = "top", style="Toolbutton")) putAmelia("output.run", tcltk::ttkbutton(toolbar,text="Impute!", state = "disabled", command = run.amelia, image = getAmelia("action.go.icon"), compound = "top", style="Toolbutton")) putAmelia("showLogButton", tcltk::ttkbutton(toolbar, text = "Output Log", state = "disabled", command = show.output.log, image = getAmelia("pageTextIcon"), compound = "top", style="Toolbutton")) tcltk::tkgrid(getAmelia("loadSessionButton"), row =0, column = 0, sticky = "ew") tcltk::tkgrid(getAmelia("saveSessionButton"), row =0, column = 1, sticky = "ew") tcltk::tkgrid(tcltk::ttkseparator(toolbar, orient = "vertical"), row = 0, column = 2, padx=5, pady=5, sticky="ns") tcltk::tkgrid(getAmelia("plotHistButton"), row = 0, column = 3, sticky = "ew") tcltk::tkgrid(getAmelia("editDataButton"), row = 0, column = 4, sticky = "ew") tcltk::tkgrid(getAmelia("missmapButton"), row = 0, column = 5, sticky="ew") tcltk::tkgrid(tcltk::ttkseparator(toolbar, orient = "vertical"), row = 0, column = 6, padx=5, pady=5, sticky="ns") tcltk::tkgrid(getAmelia("output.run"), row = 0 , column = 7, sticky = "ew") tcltk::tkgrid(getAmelia("showLogButton"), row = 0, column = 8, sticky = "ew") ########################## ### Variable Dashboard ### ########################## dashboard <- tcltk::ttkframe(getAmelia("gui.skel")) yscr <- tcltk::ttkscrollbar(dashboard, orient = "vertical", command=function(...)tcltk::tkyview(getAmelia("main.tree"),...)) xscr <- tcltk::ttkscrollbar(dashboard, orient = "horizontal", command=function(...)tcltk::tkxview(getAmelia("main.tree"),...)) sorts <- rep(FALSE, times = 10) names(sorts) <- c("#0","transform","lag", "lead","bounds", "min", "max", "mean", "sd", "miss") putAmelia("sortDirs", sorts) putAmelia("main.tree", tcltk::ttktreeview(dashboard, columns = "transform lag lead bounds min max mean sd miss", yscrollcommand=function(...)tcltk::tkset(yscr,...), xscrollcommand=function(...)tcltk::tkset(xscr,...), selectmode = "extended")) #putAmelia("sum.right.click",tcltk::tkmenu(getAmelia("main.tree"), tearoff = FALSE) ) #tcltk::tkadd(getAmelia("sum.right.click"), "command", label = "Plot Histogram of Selected", command = function() sum.plot()) #tcltk::tkbind(getAmelia("main.tree"), "<Button-3>", RightClick) #putAmelia("sum.right.dis",tcltk::tkmenu(getAmelia("main.tree"), tearoff = FALSE) ) #tcltk::tkadd(getAmelia("sum.right.dis"), "command", label = "Plot Histogram of Selected", state = "disabled") tcltk::tcl(getAmelia("main.tree"), "column", "#0", width = 70, minwidth = 80) tcltk::tcl(getAmelia("main.tree"), "column", 0, width = 78, minwidth = 78, anchor = "center") tcltk::tcl(getAmelia("main.tree"), "column", 1, width = 20, minwidth = 20, anchor = "center") tcltk::tcl(getAmelia("main.tree"), "column", 2, width = 20, minwidth = 20, anchor = "center") tcltk::tcl(getAmelia("main.tree"), "column", 3, width = 50, minwidth = 50, anchor = "e") tcltk::tcl(getAmelia("main.tree"), "column", 4, width = 50, minwidth = 50, anchor = "e") tcltk::tcl(getAmelia("main.tree"), "column", 5, width = 50, minwidth = 50, anchor = "e") tcltk::tcl(getAmelia("main.tree"), "column", 6, width = 50, minwidth = 50, anchor = "e") tcltk::tcl(getAmelia("main.tree"), "column", 7, width = 50, minwidth = 50, anchor = "e") tcltk::tcl(getAmelia("main.tree"), "column", 8, width = 50, minwidth = 50, anchor = "e") tcltk::tcl(getAmelia("main.tree"), "heading", "#0", text = "Variable", command = function() sortTreeBy("#0")) tcltk::tcl(getAmelia("main.tree"), "heading", 0, text = "Transformation", command = function() sortTreeBy("transform")) tcltk::tcl(getAmelia("main.tree"), "heading", 1, text = "Lag", command = function() sortTreeBy("lag")) tcltk::tcl(getAmelia("main.tree"), "heading", 2, text = "Lead", command = function() sortTreeBy("lead")) tcltk::tcl(getAmelia("main.tree"), "heading", 3, text = "Bounds", command = function() sortTreeBy("lower")) tcltk::tcl(getAmelia("main.tree"), "heading", 4, text = "Min", command = function() sortTreeBy("min")) tcltk::tcl(getAmelia("main.tree"), "heading", 5, text = "Max", command = function() sortTreeBy("max")) tcltk::tcl(getAmelia("main.tree"), "heading", 6, text = "Mean", command = function() sortTreeBy("mean")) tcltk::tcl(getAmelia("main.tree"), "heading", 7, text = "SD", command = function() sortTreeBy("sd")) tcltk::tcl(getAmelia("main.tree"), "heading", 8, text = "Missing", command = function() sortTreeBy("miss")) tcltk::tkbind(getAmelia("main.tree"), "<Button-3>", mainTreeRightClick) ## Windows 7 doesn't handle treeview selection correctly selectbg <- tcltk::tcl("ttk::style","configure",".","-selectbackground") selectfg <- tcltk::tcl("ttk::style","configure",".","-selectforeground") tcltk::tktag.configure(getAmelia("main.tree"),"normal", background="white") tcltk::tktag.configure(getAmelia("main.tree"),"selected", background=selectbg, foreground=selectfg) tcltk::tkbind(getAmelia("main.tree"),"<<TreeviewSelect>>",function() refreshSelection(getAmelia("main.tree"))) putAmelia("legendFrame", tcltk::ttkframe(dashboard)) tcltk::tkgrid(tcltk::ttklabel(getAmelia("legendFrame"), text="= Time-Series Variable", image = getAmelia("clockIcon"), compound = "left"), row = 0, column = 0, sticky="w", padx = 5) tcltk::tkgrid(tcltk::ttklabel(getAmelia("legendFrame"), text="= Cross-Section Variable", image = getAmelia("userIcon"), compound = "left"), row = 0, column = 1, sticky="w", padx = 5) tcltk::tkgrid(tcltk::ttklabel(getAmelia("legendFrame"), text="= Unhandled Factor Variable", image = getAmelia("redFlagIcon"), compound = "left"), row = 0, column = 2, sticky="w", padx = 5) tcltk::tkgrid(getAmelia("main.tree"), row=0,column=0, sticky="news") tcltk::tkgrid(yscr, row = 0, column = 1, sticky = "ns") tcltk::tkgrid(xscr, row = 1, column = 0, sticky = "ew") tcltk::tkgrid(getAmelia("legendFrame"), row = 2, column = 0, sticky = "ew") tcltk::tkgrid.rowconfigure(dashboard, 0, weight = 1) tcltk::tkgrid.columnconfigure(dashboard, 0, weight = 1) ##Output Frame ##output options, run button, diag ##output options ##grid the whole thing tcltk::tkadd(getAmelia("gui.skel"), toolbar) tcltk::tkadd(getAmelia("gui.skel"), dashboard) tcltk::tkgrid(toolbar, row = 0, column = 1, padx = 2, pady=2, sticky = "ew") tcltk::tkgrid(dashboard,row = 1, column = 1, sticky = "news", padx = 10, pady = 5) tcltk::tkgrid.rowconfigure(getAmelia("gui.skel"), 1, weight = 1) tcltk::tkgrid.columnconfigure(getAmelia("gui.skel"), 1, weight = 1) #tcltk::tkgrid(gui.skel,sticky="news") tcltk::tkgrid.rowconfigure(getAmelia("gui"), 0, weight = 1) tcltk::tkgrid.columnconfigure(getAmelia("gui"), 0, weight = 1) ##statusbar at the bottom. putAmelia("statusbar", tcltk::ttkframe(getAmelia("gui"), relief = "groove", borderwidth = 3)) statusbar.lab1a <- tcltk::ttklabel(getAmelia("statusbar"), text = "Data Loaded:", anchor = "w", padding = c(2,0)) putAmelia("statusbar.lab1b", tcltk::ttklabel(getAmelia("statusbar"), text = "Unspecified", relief = "sunken", anchor = "w", foreground = "red",padding = c(2,0), width = 35)) statusbar.nlab <- tcltk::ttklabel(getAmelia("statusbar"), text = "Obs:", anchor="e", padding = c(2,0)) putAmelia("statusbar.n", tcltk::ttklabel(getAmelia("statusbar"), text = "----", relief = "sunken", anchor = "w", foreground = "red",padding = c(2,0,0,0), width = 6)) statusbar.klab <- tcltk::ttklabel(getAmelia("statusbar"), text = "Vars:", anchor="e", padding = c(2,0)) putAmelia("statusbar.k", tcltk::ttklabel(getAmelia("statusbar"), text = "----", relief = "sunken", anchor = "w", foreground = "red", padding = c(2,0,0,0), width = 6)) putAmelia("runAmeliaProgress", tcltk::ttkprogressbar(getAmelia("statusbar"), value = 0, length = 200, mode = "determinate")) putAmelia("error.label", tcltk::ttkbutton(getAmelia("statusbar"), text = "Error! See Output Log.", image = getAmelia("redStopIcon"), compound = "left", style = "Toolbutton", command = show.output.log)) putAmelia("allgood.label", tcltk::ttkbutton(getAmelia("statusbar"), text = "Successful Imputation.", image = getAmelia("greenCheckIcon"), compound = "left", style = "Toolbutton", command = showImputedFiles)) putAmelia("noimps.label", tcltk::ttklabel(getAmelia("statusbar"), text = "No imputations run.", justify = "right")) tcltk::tkgrid(statusbar.lab1a,row = 2, column = 1, sticky="w") tcltk::tkgrid(getAmelia("statusbar.lab1b"),row = 2, column = 2, sticky="w") tcltk::tkgrid(statusbar.nlab,row = 2, column = 3, sticky="w") tcltk::tkgrid(getAmelia("statusbar.n"),row = 2, column = 4, sticky="w") tcltk::tkgrid(statusbar.klab,row = 2, column = 5, sticky="w") tcltk::tkgrid(getAmelia("statusbar.k"), row = 2, column = 6, sticky = "w") tcltk::tkgrid(getAmelia("noimps.label"), row = 2, column = 7, sticky ="e", padx = 10) tcltk::tkgrid.rowconfigure(getAmelia("statusbar"), 2, weight = 1) #tcltk::tkgrid(statusbar, sticky = "sew") bindTooltip(widget = "output.run", tip = "Run Amelia on your input dataset with the current settings.") # bindTooltip(widget = "output.diag", tip = "Post-imputation checks for problems in the imputation.") bindTooltip(widget = "runAmeliaProgress", tip = "Amelia is currently running and this shows its progress. On large datasets, Amelia may take quite some time.") # bindTooltip(widget = "output.drop.label", tip = "Set the file format for saving the imputed datasets, if you want to save them.") # bindTooltip(widget = "output.drop.box", tip = "Set the file format for saving the imputed datasets, if you want to save them.") bindTooltip(widget = "showLogButton", tip = "Show the output log for the Amelia run. From here, you can save the output. Look here if something went wrong.") bindTooltip(widget = "missmapButton", tip = "Show a map of the missingnes in the data.") bindTooltip(widget = "editDataButton", tip = "Edit individual cells of the data set.") bindTooltip(widget = "plotHistButton", tip = "Plot histogram(s) of the selected variable(s).") bindTooltip(widget = "loadSessionButton", tip = "Load a previously saved Amelia session. This will remove any current settings.") bindTooltip(widget = "saveSessionButton", tip = "Save the current Amelia session. This will save the data, settings, and any imputed data in the Amelia session.") bindTooltip(widget = "legendFrame", tip = "A legend for the icons used in the variable dashboard.") bindTooltip(widget = "noimps.label", tip = "No imputations have been run yet. To run Amelia, hit the 'Impute!' button in the toolbar.") bindTooltip(widget = "allgood.label", tip = "Amelia has run successfully! You can now run imputation diagnostics from the 'Output' menu above. If you chose to save the imputations to file, they should be saved in the working directory. Click here to open the containing folder..") bindTooltip(widget = "error.label", tip = "There was an error the last time you ran Amelia. Click here to open the output log to identify the problem and to see how to fix it.") ## these commands force R to wait for tcltk if (.Platform$OS.type == "windows") tcltk::tkwm.iconbitmap(getAmelia("gui"),file.path(find.package(package = "Amelia")[1], "gui/amelia.ico")) tcltk::tkraise(getAmelia("gui")) tcltk::tkwm.deiconify(getAmelia("gui")) tcltk::tkfocus(getAmelia("gui")) tcltk::tclServiceMode(on = TRUE) tcltk::tkwait.window(getAmelia("gui")) } buildNumericalOptions <- function() { onCancel <- function(){ tcltk::tcl("set", getAmelia("seed"), getAmelia("temp.seed")) tcltk::tcl("set", getAmelia("tol"), getAmelia("temp.tol")) tcltk::tkwm.withdraw(getAmelia("numericalWindow")) tcltk::tkgrab.release(getAmelia("numericalWindow")) tcltk::tkfocus(getAmelia("gui")) } putAmelia("temp.seed", tcltk::tclvalue(getAmelia("seed"))) putAmelia("temp.tol", tcltk::tclvalue(getAmelia("tol"))) if (exists("numericalWindow", envir = ameliaEnv)) { tcltk::tkwm.deiconify(getAmelia("numericalWindow")) tcltk::tkraise(getAmelia("numericalWindow")) return() } putAmelia("numericalWindow", tcltk::tktoplevel()) tcltk::tkwm.title(getAmelia("numericalWindow"), "Numerical Options") numericalBox <- tcltk::ttkframe(getAmelia("numericalWindow")) putAmelia("output.seedlab", tcltk::ttklabel(numericalBox, text="Seed:")) putAmelia("output.seed", tcltk::ttkentry(numericalBox, width="7", textvariable=getAmelia("seed"))) putAmelia("output.tollab", tcltk::ttklabel(numericalBox, text="Tolerance:")) putAmelia("output.tol", tcltk::ttkentry(numericalBox, width="7", textvariable=getAmelia("tol"))) putAmelia("empri.ent", tcltk::ttkentry(numericalBox, width=7,textvariable = getAmelia("empri"))) putAmelia("empri.label", tcltk::ttklabel(numericalBox,text="Ridge prior:")) putAmelia("maxre.ent", tcltk::ttkentry(numericalBox, width=7,textvariable = getAmelia("max.resample"))) putAmelia("maxre.label", tcltk::ttklabel(numericalBox,text="Maximum Resample for Bounds:")) buttonBox <- tcltk::ttkframe(numericalBox) okButton <- tcltk::ttkbutton(buttonBox, text = "OK", width = 10, command = function() {tcltk::tkwm.withdraw(getAmelia("numericalWindow"));tcltk::tkgrab.release(getAmelia("numericalWindow"));tcltk::tkfocus(getAmelia("gui"))}) cancelButton <- tcltk::ttkbutton(buttonBox, width = 10, text = "Cancel", command = onCancel) tcltk::tkgrid(getAmelia("output.seedlab"), row = 1, column = 1, sticky = "w", padx = 10, pady = 10) tcltk::tkgrid(getAmelia("output.seed"), row = 1, column = 2, sticky = "w", padx = 10, pady = 10) tcltk::tkgrid(getAmelia("output.tollab"), row = 2, column = 1, sticky = "w", padx = 10, pady = 10) tcltk::tkgrid(getAmelia("output.tol"), row = 2, column = 2, sticky = "w", padx = 10, pady = 10) tcltk::tkgrid(getAmelia("empri.label"), row = 3, column = 1, sticky = "w", padx = 10, pady = 10) tcltk::tkgrid(getAmelia("empri.ent"), row = 3, column = 2, sticky = "w", padx = 10, pady = 10) tcltk::tkgrid(getAmelia("maxre.label"), row = 4, column = 1, sticky = "w", padx = 10, pady = 10) tcltk::tkgrid(getAmelia("maxre.ent"), row = 4, column = 2, sticky = "w", padx = 10, pady = 10) tcltk::tkgrid(okButton, row = 0, column = 0, padx = 10, pady = 10) tcltk::tkgrid(cancelButton, row = 0, column = 1, padx = 10, pady = 10) tcltk::tkgrid(buttonBox, row = 5, column = 1, sticky = "e", columnspan = 2) tcltk::tkgrid(numericalBox, sticky = "news") tcltk::tkwm.protocol(getAmelia("numericalWindow"), "WM_DELETE_WINDOW", onCancel) centerModalDialog(getAmelia("numericalWindow"), resize=FALSE) bindTooltip(widget = "empri.ent", "Ridge prior that shrinks the covariances, which stabilizes estimation. Five percent of the number of observations is a useful default.") bindTooltip(widget = "empri.label", "Ridge prior that shrinks the covariances, which stabilizes estimation. Five percent of the number of observations is a useful default.") bindTooltip(widget = "output.seed", tip = "Set seed for random number generator. Useful if you need to replicate the exact same imputations.") bindTooltip(widget = "output.seedlab", tip = "Set seed for random number generator. Useful if you need to replicate the exact same imputations.") bindTooltip(widget = "output.tol", tip = "Set the tolerance for the Amelia run. This is the value used to determine when Amelia has converged. Higher values mean Amelia will coverge more quickly, but this may lead to a poor approximation of the parameters.") bindTooltip(widget = "output.tollab", tip = "Set the tolerance for the Amelia run. This is the value used to determine when Amelia has converged. Higher values mean Amelia will coverge more quickly, but this may lead to a poor approximation of the parameters.") bindTooltip(widget = "maxre.ent", tip = "Amelia fits bounds by rejecting any draws that do not fall within the bounds. This value sets the number of times Amelia should attempt to resample to fit the bounds before setting the imputation to the bound.") bindTooltip(widget = "maxre.label", tip = "Amelia fits bounds by rejecting any draws that do not fall within the bounds. This value sets the number of times Amelia should attempt to resample to fit the bounds before setting the imputation to the bound.") } buildOutputOptions <- function() { onCancel <- function(){ tcltk::tcl("set", getAmelia("outname"), getAmelia("temp.name")) tcltk::tcl("set", getAmelia("outnum"), getAmelia("temp.num")) tcltk::tkwm.withdraw(getAmelia("outputWindow")) tcltk::tkgrab.release(getAmelia("outputWindow")) tcltk::tkfocus(getAmelia("gui")) } putAmelia("temp.name", tcltk::tclvalue(getAmelia("outname"))) putAmelia("temp.num", tcltk::tclvalue(getAmelia("outnum"))) if (exists("outputWindow", envir = ameliaEnv)) { tcltk::tkwm.deiconify(getAmelia("outputWindow")) tcltk::tkraise(getAmelia("outputWindow")) return() } putAmelia("outputWindow", tcltk::tktoplevel()) tcltk::tkwm.title(getAmelia("outputWindow"), "Output Options") outputBox <- tcltk::ttkframe(getAmelia("outputWindow")) putAmelia("output.label", tcltk::ttklabel(outputBox, text="Name the Imputed Dataset:")) putAmelia("output.entry", tcltk::ttkentry(outputBox, width="15", textvariable = getAmelia("outname"))) putAmelia("output.numlab", tcltk::ttklabel(outputBox, text = "Number of Imputed Datasets:")) putAmelia("output.num", tcltk::ttkentry(outputBox, width = "7", textvariable = getAmelia("outnum"))) buttonBox <- tcltk::ttkframe(outputBox) okButton <- tcltk::ttkbutton(buttonBox, text = "OK", width = 10, command = function() {tcltk::tkwm.withdraw(getAmelia("outputWindow"));tcltk::tkgrab.release(getAmelia("outputWindow"));tcltk::tkfocus(getAmelia("gui"))}) cancelButton <- tcltk::ttkbutton(buttonBox, width = 10, text = "Cancel", command = onCancel) tcltk::tkgrid(getAmelia("output.label"), row = 1, column = 1, sticky = "w", padx = 10, pady = 10) tcltk::tkgrid(getAmelia("output.entry"), row = 1, column = 2, sticky = "w", padx = 10, pady = 10) tcltk::tkgrid(getAmelia("output.numlab"), row = 2, column = 1, sticky = "w", padx = 10, pady = 10) tcltk::tkgrid(getAmelia("output.num"), row = 2, column = 2, sticky = "w", padx = 10, pady = 10) tcltk::tkgrid(okButton, row = 0, column = 0, padx = 10, pady = 10) tcltk::tkgrid(cancelButton, row = 0, column = 1, padx = 10, pady = 10) tcltk::tkgrid(buttonBox, row = 3, column = 1, sticky = "e", columnspan = 2) tcltk::tkgrid(outputBox, sticky = "news") tcltk::tkwm.protocol(getAmelia("outputWindow"), "WM_DELETE_WINDOW", onCancel) centerModalDialog(getAmelia("outputWindow"), resize=FALSE) bindTooltip(widget = "output.entry", tip = "The prefix for the saved imputed datasets. For most saving options they will be in the following format: \n\nmyprefix1.out\nmyprefix2.out\n...\n\nAnd so on, where \"out\" is the file extension.") bindTooltip(widget = "output.label", tip = "The prefix for the saved imputed datasets. For most saving options they will be in the following format: \n\nmyprefix1.out\nmyprefix2.out\n...\n\nAnd so on, where \"out\" is the file extension.") bindTooltip(widget = "output.num", tip = "Set the number of imputed datasets.\n\nIn many cases, around 5 is sufficient, but if the fraction of missingness is high, you may need more. Use the Summarize Data and Missingness Map above to get a sense for the amount of missingness in your data.") bindTooltip(widget = "output.numlab", tip = "Set the number of imputed datasets.\n\nIn many cases, around 5 is sufficient, but if the fraction of missingness is high, you may need more. Use the Summarize Data and Missingness Map above to get a sense for the amount of missingness in your data.") } buildAboutDialog <- function() { if (exists("aboutWindow", envir = ameliaEnv)) { tcltk::tkwm.deiconify(getAmelia("aboutWindow")) tcltk::tkraise(getAmelia("aboutWindow")) return() } putAmelia("aboutWindow", tcltk::tktoplevel(parent=getAmelia("gui"))) tcltk::tkwm.title(getAmelia("aboutWindow"), "About AmeliaView") aboutBox <- tcltk::ttkframe(getAmelia("aboutWindow"), height = 150, width = 200) #ameliaPic <- tcltk::tkimage.create("photo",file=ameliaFile) picLabel <- tcltk::ttklabel(aboutBox, image=getAmelia("ameliaPic"), relief="groove", borderwidth=2) tcltk::tkgrid(tcltk::ttkframe(aboutBox,width=100), row=0,column=1) tcltk::tkgrid(tcltk::ttkframe(aboutBox,height=150,width=0), row=0,column=0,rowspan=3) tcltk::tkgrid(picLabel, row = 1, column=1, pady = 20, padx = 20) tcltk::tkgrid(tcltk::ttklabel(aboutBox, text=paste("AmeliaView",packageDescription("Amelia", fields="Version")), justify="center"), row = 2, column = 1) tcltk::tkgrid(tcltk::ttklabel(aboutBox, text="James Honaker, Gary King, Matthew Blackwell", justify="center"), row = 3, column = 1, padx=20) tcltk::tkgrid(tcltk::ttklabel(aboutBox, text="\uA9 2006-2010", justify="center"), row = 4, column = 1, padx=20) buttonBox <- tcltk::ttkframe(aboutBox) closeButton <- tcltk::ttkbutton(buttonBox, text = "Close", command = function() {tcltk::tkwm.withdraw(getAmelia("aboutWindow"));tcltk::tkgrab.release(getAmelia("aboutWindow"));tcltk::tkfocus(getAmelia("gui"))}, width = 10) websiteButton <- tcltk::ttkbutton(buttonBox, text = "Website", command = function() browseURL("http://gking.harvard.edu/amelia/")) tcltk::tkgrid(websiteButton, row=0, column = 0, sticky="w", padx=10, pady=10) tcltk::tkgrid(closeButton, row=0, column = 0, sticky="e", padx=10, pady=10) tcltk::tkgrid.columnconfigure(buttonBox, 0, weight=1) tcltk::tkgrid(buttonBox, row=5, column = 1, sticky="ew") tcltk::tkgrid(aboutBox, sticky = "nsew") tcltk::tkwm.protocol(getAmelia("aboutWindow"), "WM_DELETE_WINDOW", function() {tcltk::tkwm.withdraw(getAmelia("aboutWindow"));tcltk::tkgrab.release(getAmelia("aboutWindow"));tcltk::tkfocus(getAmelia("gui"))}) centerModalDialog(getAmelia("aboutWindow"), resize=FALSE) } gui.pri.setup <- function() { cancelPriors <- function() { putAmelia("priorsmat", getAmelia("temp.priorsmat")) } onOK <- function() { nm <- c("dist","range")[getAmeliaInd("addpri.note")+1] varBox <- paste("add",nm,"var",sep=".") caseBox <- paste("add",nm,"case",sep=".") caseSelection <- as.numeric(tcltk::tcl(getAmelia(caseBox),"current")) varSelection <- as.numeric(tcltk::tcl(getAmelia(varBox),"current")) + 1 thiscase <- tcltk::tclvalue(tcltk::tkget(getAmelia(caseBox))) thisvar <- tcltk::tclvalue(tcltk::tkget(getAmelia(varBox))) if (caseSelection==0) { rowSelection <- 0 colSelection <- which(anyMissing)[varSelection] } else { rowSelection <- missingCases[caseSelection] colSelection <- which(is.na(getAmelia("amelia.data")[rowSelection,]))[varSelection] } # fork for range vs. dist if (nm == "range") { if (tcltk::tclvalue(getAmelia("priorMin"))=="") { tcltk::tkmessageBox(parent=getAmelia("priorsWindow"), title="Error", message="Please enter a minimum value.", type="ok",icon="error") return() } if (tcltk::tclvalue(getAmelia("priorMax"))=="") { tcltk::tkmessageBox(parent=getAmelia("priorsWindow"), title="Error", message="Please enter a maximum value.", type="ok",icon="error") return() } if (tcltk::tclvalue(getAmelia("priorConf"))=="") { tcltk::tkmessageBox(parent=getAmelia("priorsWindow"), title="Error", message="Please enter a confidence value.", type="ok",icon="error") return() } if (isTRUE(as.numeric(tcltk::tclvalue(getAmelia("priorConf"))) <= 0 | as.numeric(tcltk::tclvalue(getAmelia("priorConf"))) >= 1)) { tcltk::tkmessageBox(parent=getAmelia("priorsWindow"), title="Error", message="Confidence levels must be between 0 and 1.", type="ok",icon="error") return() } prMax <- as.numeric(tcltk::tclvalue(getAmelia("priorMax"))) prMin <- as.numeric(tcltk::tclvalue(getAmelia("priorMin"))) prCon <- as.numeric(tcltk::tclvalue(getAmelia("priorConf"))) if (prMax <= prMin) { tcltk::tkmessageBox(title="Error", message="The max is less than the min.", type="ok",icon="error") return() } prMean<- prMin + ((prMax-prMin)/2) prSD <-(prMax-prMin)/(2*qnorm(1-(1-prCon)/2)) #if dist prior } else { if (tcltk::tclvalue(getAmelia("priorMean"))=="") { tcltk::tkmessageBox(parent=getAmelia("priorsWindow"), title="Error", message="Please enter a mean value.", type="ok",icon="error") return() } if (tcltk::tclvalue(getAmelia("priorSD"))=="") { tcltk::tkmessageBox(parent=getAmelia("priorsWindow"), title="Error", message="Please enter a standard deviation.", type="ok",icon="error") return() } if (isTRUE(as.numeric(tcltk::tclvalue(getAmelia("priorSD"))) == 0)) { tcltk::tkmessageBox(parent=getAmelia("priorsWindow"), title="Error", message="Standard deviations must be greater than 0.", type="ok",icon="error") return() } prMean <- as.numeric(tcltk::tclvalue(getAmelia("priorMean"))) prSD <- as.numeric(tcltk::tclvalue(getAmelia("priorSD"))) } newPrior <- c(rowSelection, colSelection,prMean,prSD) if (!is.null(getAmelia("priorsmat"))) { matchPrior <- apply(getAmelia("priorsmat"), 1, function(x) all(x[1]==rowSelection, x[2]==colSelection)) } else { matchPrior <- FALSE } if (any(matchPrior)) { mess <- "There is a prior associate with this case. Overwrite?" over <- tcltk::tkmessageBox(parent=getAmelia("priorsWindow"), title="Overwrite Prior",message=mess, icon="question",type="yesno",default="no") if (tcltk::tclvalue(over)=="no") { return() } else { putAmelia("priorsmat",getAmelia("priorsmat")[-which(matchPrior),]) tcltk::tkdelete(getAmelia("priors.tree"), paste(rowSelection,colSelection,sep="-")) } } putAmelia("priorsmat",rbind(getAmelia("priorsmat"),newPrior)) ## need to change the treeview #updateTree() tcltk::tkinsert(getAmelia("priors.tree"),"","end", id = paste(rowSelection,colSelection,sep="-"), values = c(thisvar,prMean,prSD), text = thiscase,tag="normal") resetEntries() return() } validateNumeric <- function(x) { if (isTRUE(grep("(^-?[0-9]*\\.?[0-9]*$)",x)==1)) return(tcltk::tclVar("TRUE")) else return(tcltk::tclVar("FALSE")) } validateSD <- function(x) { if (isTRUE(grep("^[0-9]*\\.?[0-9]*$",x)==1)) return(tcltk::tclVar("TRUE")) else return(tcltk::tclVar("FALSE")) } validateConf <- function(x) { if (isTRUE(grep("^0*\\.[0-9]*$",x)==1)) return(tcltk::tclVar("TRUE")) else return(tcltk::tclVar("FALSE")) } setMissingVars <- function() { currentSelection <- as.numeric(tcltk::tcl(getAmelia("add.dist.case"), "current")) currentCase <- missingCases[currentSelection] if (currentSelection==0) missVars <- anyMissing else missVars <- is.na(getAmelia("amelia.data")[currentCase,]) missVarNames <- colnames(getAmelia("amelia.data"))[missVars] tcltk::tkconfigure(getAmelia("add.dist.var"),values = missVarNames) tcltk::tcl(getAmelia("add.dist.var"), "current", 0) } setMissingRangeVars <- function() { currentSelection <- as.numeric(tcltk::tcl(getAmelia("add.range.case"), "current")) currentCase <- missingCases[currentSelection] if (currentSelection==0) missVars <- anyMissing else missVars <- is.na(getAmelia("amelia.data")[currentCase,]) missVarNames <- colnames(getAmelia("amelia.data"))[missVars] tcltk::tkconfigure(getAmelia("add.range.var"),values = missVarNames) tcltk::tcl(getAmelia("add.range.var"), "current", 0) } resetEntries <- function() { tcltk::tcl("set", getAmelia("priorMin"),"") tcltk::tcl("set", getAmelia("priorMax"),"") tcltk::tcl("set", getAmelia("priorMean"),"") tcltk::tcl("set", getAmelia("priorSD"),"") tcltk::tcl("set", getAmelia("priorConf"),"") return() } updateTree <- function() { allrows <- paste(tcltk::tcl(getAmelia("priors.tree"),"children","")) tcltk::tkdelete(getAmelia("priors.tree"), allrows) if (is.null(getAmelia("priorsmat"))) { return() } varnames <- names(getAmelia("amelia.data")) cases <- paste(rownames(getAmelia("amelia.data")), ") ", getAmelia("amelia.data")[,getAmelia("csvar")]," ", getAmelia("amelia.data")[,getAmelia("tsvar")], sep="") cases <- c("(whole variable)", cases) for (i in 1:nrow(getAmelia("priorsmat"))) { thiscase <- cases[getAmelia("priorsmat")[i,1]+1] thisvar <- varnames[getAmelia("priorsmat")[i,2]] tcltk::tkinsert(getAmelia("priors.tree"),"","end", id = paste(getAmelia("priorsmat")[i,1],getAmelia("priorsmat")[i,2],sep="-"), values = c(thisvar,getAmelia("priorsmat")[i,c(3,4)]), text = thiscase,tag="normal") } return() } dropPriors <- function() { sel.pri <- strsplit(tcltk::tclvalue(tcltk::tcl(getAmelia("priors.tree"), "selection")), " ")[[1]] pri.mat.rows <- c() for (i in 1:length(sel.pri)) { pri.mat.rows <- c(pri.mat.rows, tcltk::tclvalue(tcltk::tkindex(getAmelia("priors.tree"),sel.pri[i]))) } pri.mat.rows <- as.numeric(pri.mat.rows) + 1 putAmelia("priorsmat", getAmelia("priorsmat")[-pri.mat.rows,, drop = FALSE]) tcltk::tkdelete(getAmelia("priors.tree"),paste(tcltk::tcl(getAmelia("priors.tree"), "selection"))) if (nrow(getAmelia("priorsmat")) == 0) putAmelia("priorsmat", NULL) return(NULL) } RightClick <- function(x, y) { # x and y are the mouse coordinates rootx <- as.integer(tcltk::tkwinfo("rootx", getAmelia("priors.tree"))) # tcltk::tkwinfo() return several infos rooty <- as.integer(tcltk::tkwinfo("rooty", getAmelia("priors.tree"))) xTxt <- as.integer(x) + rootx yTxt <- as.integer(y) + rooty # Create a Tcl command in a character string and run it tcltk::.Tcl(paste("tk_popup", tcltk::.Tcl.args(getAmelia("pri.right.click"), xTxt, yTxt))) } putAmelia("temp.priorsmat", getAmelia("priorsmat")) if (exists("priorsWindow", envir=ameliaEnv)) { updateTree() resetEntries() tcltk::tkwm.deiconify(getAmelia("priorsWindow")) tcltk::tkraise(getAmelia("priorsWindow")) tcltk::tkgrab(getAmelia("priorsWindow")) return() } putAmelia("priorsWindow", tcltk::tktoplevel()) tcltk::tkwm.title(getAmelia("priorsWindow"),"Observational Priors") priorsBox <- tcltk::ttkframe(getAmelia("priorsWindow")) prior.frame <- tcltk::ttkpanedwindow(priorsBox, orient = "horizontal") prior.disp <- tcltk::ttklabelframe(prior.frame, text = "Observational priors ", height = 200, width = 200) prior.add <- tcltk::ttklabelframe(prior.frame, text = "Add priors", height = 200, width = 200) putAmelia("prior.add.but", tcltk::ttkbutton(prior.add, text = "Add", command = function() onOK())) yscr <- tcltk::ttkscrollbar(prior.disp, orient = "vertical", command=function(...)tcltk::tkyview(getAmelia("priors.tree"),...)) xscr <- tcltk::ttkscrollbar(prior.disp, orient = "horizontal", command=function(...)tcltk::tkxview(getAmelia("priors.tree"),...)) putAmelia("priors.tree", tcltk::ttktreeview(prior.disp, columns = "Variable Mean SD", yscrollcommand=function(...)tcltk::tkset(yscr,...), xscrollcommand=function(...)tcltk::tkset(xscr,...))) putAmelia("pri.right.click",tcltk::tkmenu(getAmelia("priors.tree"), tearoff = FALSE) ) tcltk::tkadd(getAmelia("pri.right.click"), "command", label = "Remove selected priors", command = function() dropPriors()) tcltk::tkbind(getAmelia("priors.tree"), "<Button-3>", RightClick) tcltk::tcl(getAmelia("priors.tree"), "column", "#0", width = 120) tcltk::tcl(getAmelia("priors.tree"), "column", 0, width = 80, anchor = "center") tcltk::tcl(getAmelia("priors.tree"), "column", 1, width = 40, anchor = "center") tcltk::tcl(getAmelia("priors.tree"), "column", 2, width = 40, anchor = "center") tcltk::tcl(getAmelia("priors.tree"), "heading", "#0", text = "Case") tcltk::tcl(getAmelia("priors.tree"), "heading", 0, text = "Variable") tcltk::tcl(getAmelia("priors.tree"), "heading", 1, text = "Mean") tcltk::tcl(getAmelia("priors.tree"), "heading", 2, text = "SD") ## Windows 7 doesn't handle treeview selection correctly if (.Platform$OS.type == "windows") { tcltk::tktag.configure(getAmelia("priors.tree"),"normal", background="white") tcltk::tktag.configure(getAmelia("priors.tree"),"selected", background="SystemHighlight") tcltk::tkbind(getAmelia("priors.tree"),"<<TreeviewSelect>>",function() refreshSelection(getAmelia("priors.tree"))) } putAmelia("addpri.note", tcltk::ttknotebook(prior.add)) add.dist.frame <- tcltk::ttkframe(getAmelia("addpri.note")) add.range.frame <- tcltk::ttkframe(getAmelia("addpri.note")) missingCases <- which(!complete.cases(getAmelia("amelia.data"))) anyMissing <- apply(getAmelia("amelia.data"), 2, function(x) any(is.na(x))) cases1 <- paste(rownames(getAmelia("amelia.data"))[missingCases], ") ", getAmelia("amelia.data")[missingCases, getAmelia("csvar")]," ", getAmelia("amelia.data")[missingCases, getAmelia("tsvar")], sep="") cases <- c("(whole variable)",cases1) if (!is.null(getAmelia("priorsmat"))) updateTree() vars <- getAmelia("varnames")[anyMissing] ## Distribution prior note putAmelia("add.dist.case",tcltk::ttkcombobox(add.dist.frame, values=cases, state="readonly", width=15)) putAmelia("add.dist.var",tcltk::ttkcombobox(add.dist.frame, values=vars, state="readonly", width=15)) tcltk::tkbind(getAmelia("add.dist.case"), "<<ComboboxSelected>>", function(...) setMissingVars()) tcltk::tkgrid(tcltk::ttklabel(add.dist.frame, text="Case:"), column=1, row=1, sticky = "e") tcltk::tkgrid(tcltk::ttklabel(add.dist.frame, text="Variable:"), column=1, row=2, sticky = "e") tcltk::tcl(getAmelia("add.dist.case"), "current", 0) tcltk::tcl(getAmelia("add.dist.var"), "current", 0) tcltk::tkconfigure(getAmelia("add.dist.var"), postcommand=function(...) setMissingVars()) tcltk::tkgrid(getAmelia("add.dist.case"), column=2, row=1, pady=3) tcltk::tkgrid(getAmelia("add.dist.var"), column=2, row=2, pady=3) putAmelia("priorMean", tcltk::tclVar()) putAmelia("priorSD", tcltk::tclVar()) tcltk::tkgrid(tcltk::ttkframe(add.dist.frame, width = 150, height = 0), column = 1, row = 0) putAmelia("meanBox", tcltk::ttkentry(add.dist.frame, textvar=getAmelia("priorMean"), validate="key", validatecommand = function(P) validateNumeric(P))) putAmelia("sdBox", tcltk::ttkentry(add.dist.frame, textvar=getAmelia("priorSD"), validate="key", validatecommand = function(P) validateSD(P))) tcltk::tkgrid(tcltk::ttklabel(add.dist.frame, text="Mean:"), column=1, row=3, sticky = "e") tcltk::tkgrid(tcltk::ttklabel(add.dist.frame, text="Standard Deviation:"), column=1, row=4, sticky = "e") tcltk::tkgrid(getAmelia("meanBox"), column=2, row=3, pady=5, padx=5) tcltk::tkgrid(getAmelia("sdBox"), column=2, row=4, pady=5, padx=5) ## Range prior note putAmelia("add.range.case",tcltk::ttkcombobox(add.range.frame, values=cases, state="readonly", width=15)) putAmelia("add.range.var",tcltk::ttkcombobox(add.range.frame, values=vars, state="readonly", width=15)) tcltk::tkbind(getAmelia("add.range.case"), "<<ComboboxSelected>>", function(...) setMissingRangeVars()) tcltk::tkgrid(tcltk::ttklabel(add.range.frame, text="Case:"), column=1, row=1, sticky = "e") tcltk::tkgrid(tcltk::ttklabel(add.range.frame, text="Variable:"), column=1, row=2, sticky = "e") tcltk::tcl(getAmelia("add.range.case"), "current", 0) tcltk::tcl(getAmelia("add.range.var"), "current", 0) tcltk::tkconfigure(getAmelia("add.range.var"), postcommand=function(...) setMissingRangeVars()) tcltk::tkgrid(getAmelia("add.range.case"), column=2, row=1, pady=3) tcltk::tkgrid(getAmelia("add.range.var"), column=2, row=2, pady=3) tcltk::tkgrid(tcltk::ttkframe(add.range.frame, width = 150, height = 0), column = 1, row = 0) putAmelia("priorMax", tcltk::tclVar()) putAmelia("priorMin", tcltk::tclVar()) putAmelia("priorConf", tcltk::tclVar()) putAmelia("minBox", tcltk::ttkentry(add.range.frame, textvar=getAmelia("priorMin"), validate="key", validatecommand = function(P) validateNumeric(P))) putAmelia("maxBox", tcltk::ttkentry(add.range.frame, textvar=getAmelia("priorMax"), validate="key", validatecommand = function(P) validateNumeric(P))) putAmelia("confBox", tcltk::ttkentry(add.range.frame, textvar=getAmelia("priorConf"), validate="key", validatecommand = function(P) validateNumeric(P))) tcltk::tkgrid(tcltk::ttklabel(add.range.frame, text="Minimum:"), column=1, row=3, sticky = "e") tcltk::tkgrid(tcltk::ttklabel(add.range.frame, text="Maximum:"), column=1, row=4, sticky = "e") tcltk::tkgrid(tcltk::ttklabel(add.range.frame, text="Confidence:"), column=1, row=5, sticky = "e") #tcltk::tkgrid(tkframe(add.range.frame, width = 20, height = 0), column = 1, row = 6) tcltk::tkgrid(getAmelia("minBox"), column=2, row=3, pady=5, padx=5) tcltk::tkgrid(getAmelia("maxBox"), column=2, row=4, pady=5, padx=5) tcltk::tkgrid(getAmelia("confBox"), column=2, row=5, pady=5, padx=5) tcltk::tkadd(getAmelia("addpri.note"), add.dist.frame, text = "Add Distribution Prior") tcltk::tkadd(getAmelia("addpri.note"), add.range.frame, text = "Add Range Prior") tcltk::tkgrid(getAmelia("addpri.note"), row = 1, sticky = "nsew") tcltk::tkgrid(getAmelia("prior.add.but"), sticky = "se", padx = 10, pady = 10) but.frame <- tcltk::ttkframe(priorsBox) putAmelia("pri.ok", tcltk::ttkbutton(but.frame, text = "OK", command = function(){tcltk::tkwm.withdraw(getAmelia("priorsWindow"));tcltk::tkgrab.release(getAmelia("priorsWindow"));tcltk::tkfocus(getAmelia("gui"))}, width = 10)) putAmelia("pri.can", tcltk::ttkbutton(but.frame, text = "Cancel", width = 10, command = function() {cancelPriors();tcltk::tkwm.withdraw(getAmelia("priorsWindow"));tcltk::tkgrab.release(getAmelia("priorsWindow"));tcltk::tkfocus(getAmelia("gui"))})) tcltk::tkgrid(getAmelia("priors.tree"), row = 1, column = 1, sticky = "nsew") tcltk::tkgrid(yscr, row = 1, column = 2, sticky = "nsew") tcltk::tkgrid(xscr, row = 2, column = 1, sticky = "nsew") tcltk::tkgrid.rowconfigure(prior.disp, 1, weight = 1) tcltk::tkgrid.columnconfigure(prior.disp, 1, weight = 1) tcltk::tkadd(prior.frame, prior.add) tcltk::tkadd(prior.frame, prior.disp) tcltk::tkgrid(prior.frame, row = 1, column = 0, columnspan = 2, padx = 10, pady = 10, sticky = "news") tcltk::tkgrid(getAmelia("pri.ok"), row = 0, column = 1, sticky = "ne", padx = 10, pady = 10) tcltk::tkgrid(getAmelia("pri.can"), row = 0, column = 2, sticky = "ne", padx = 10, pady = 10) tcltk::tkgrid(but.frame, row = 2, column = 1, sticky = "ne") tcltk::tkgrid.rowconfigure(priorsBox, 1, weight = 1) tcltk::tkgrid.columnconfigure(priorsBox, 0, weight = 1) tcltk::tkgrid.columnconfigure(priorsBox, 1, weight = 1) tcltk::tkgrid(priorsBox, row = 0, column = 0, sticky = "news") tcltk::tkgrid.rowconfigure(getAmelia("priorsWindow"), 0, weight = 1) tcltk::tkgrid.columnconfigure(getAmelia("priorsWindow"), 0, weight = 1) tcltk::tkwm.protocol(getAmelia("priorsWindow"), "WM_DELETE_WINDOW", function() {tcltk::tkwm.withdraw(getAmelia("priorsWindow"));tcltk::tkgrab.release(getAmelia("priorsWindow"));tcltk::tkfocus(getAmelia("gui"))}) centerModalDialog(getAmelia("priorsWindow"), resize = TRUE) bindTooltip(widget = "priors.tree", "Currently set observation-level priors for the data. You can remove these using the right-click menu.") bindTooltip(widget = "pri.ok", tip = "Save changes and close window.") bindTooltip(widget = "pri.can", tip = "Cancel any changes and close window.") bindTooltip(widget = "prior.add.but", tip = "Add the above prior for the selected observation and variable to the list of priors for this data set.") bindTooltip(widget = "meanBox", tip = "The mean of a normal prior distribution on the value of the selected missing cell.") bindTooltip(widget = "sdBox", tip = "The standard deviation of a normal prior distribution on the value of the selected missing cell.") bindTooltip(widget = "add.dist.case", tip = "Select the case name or row number of the case for the cell-level prior.") bindTooltip(widget = "add.dist.var", tip = "Select the variable name for the cell-level prior.") bindTooltip(widget = "confBox", tip = "A confidence level between 0 and 1 for the confidence bound on the distribution of the selected missing cell. These confidence bounds are converted into a normal distribution prior on the value.") bindTooltip(widget = "minBox", tip = "A lower confidence bound on the distribution of the selected missing cell. These confidence bounds are converted into a normal distribution prior on the value.") bindTooltip(widget = "maxBox", tip = "An upper confidence bound on the distribution of the selected missing cell. These confidence bounds are converted into a normal distribution prior on the value.") bindTooltip(widget = "add.range.case", tip = "Select the case name or row number of the case for the cell-level prior.") bindTooltip(widget = "add.range.var", tip = "Select the variable name for the cell-level prior.") } gui.diag.setup <- function() { if (exists("diagWindow", envir = ameliaEnv)) { tcltk::tkwm.deiconify(getAmelia("diagWindow")) tcltk::tkraise(getAmelia("diagWindow")) tcltk::tkfocus(getAmelia("diagWindow")) return() } putAmelia("diagWindow", tcltk::tktoplevel()) tcltk::tkwm.title(getAmelia("diagWindow"), "Diagnostics") diagBox <- tcltk::ttkframe(getAmelia("diagWindow")) gui.top<-tcltk::ttkpanedwindow(diagBox, orient = "vertical") var.diags <- tcltk::ttklabelframe(gui.top, text = "Individual Variable Plots", width = 100, height = 100) tscs.diags <- tcltk::ttklabelframe(gui.top, text = "Time-Series Cross-Sectional Plots", width = 100, height = 100) disp.diags <- tcltk::ttklabelframe(gui.top, text = "Overdispersion Plots", width = 100, height = 100) tcltk::tcl("set","indvar","") ## get variable names that are actually numeric variables <- getAmelia("varnames") variables <- variables[sapply(getAmelia("amelia.data"), is.numeric)] putAmelia("var.diags.combo", tcltk::ttkcombobox(var.diags,textvariable="indvar", values = variables, state = "readonly")) indvar.lab <- tcltk::ttklabel(var.diags, text = "Variable:") var.button.frame <- tcltk::ttkframe(var.diags) putAmelia("diag.but.compare",tcltk::ttkbutton(var.button.frame, text="Compare", command = function() compare.density(getAmelia("ameliaObject"), var=tcltk::tclvalue("indvar"),frontend=TRUE))) putAmelia("diag.overimp",tcltk::ttkbutton(var.button.frame,text="Overimpute",state="normal", command = function() overimpute(getAmelia("ameliaObject"), var=tcltk::tclvalue("indvar"),frontend=TRUE))) tcltk::tcl(getAmelia("var.diags.combo"), "current", 0) tcltk::tkgrid(indvar.lab, row = 0, column = 0, padx = 5) tcltk::tkgrid(getAmelia("var.diags.combo"), row = 0, column = 1, padx = 10, pady = 10) tcltk::tkgrid(getAmelia("diag.but.compare"), row = 0, column = 0, padx = 10, pady = 10) tcltk::tkgrid(getAmelia("diag.overimp"), row = 0, column = 1, padx = 10, pady = 10) tcltk::tkgrid(var.button.frame, row =0, column = 2) tcltk::tkgrid(tcltk::ttkframe(var.diags, width = 50, height = 0), row = 1) ## tscs plots csvar <- getAmelia("ameliaObject")$arguments$cs tsvar <- getAmelia("ameliaObject")$arguments$ts ## can't do tscsplots for the ts or cs variable tscsvariables <- variables[variables != getAmelia("varnames")[csvar] & variables != getAmelia("varnames")[tsvar]] if (is.null(tsvar) | is.null(csvar)) { st <- "disabled" but.st <- st } else { st <- "readonly" but.st <- "normal" } if (!is.null(csvar)) { cases <- unique(getAmelia("amelia.data")[,csvar]) if (is.factor(getAmelia("amelia.data")[,csvar])) { cases <- levels(getAmelia("amelia.data")[,csvar])[cases] } } else { cases <- 1:nrow(getAmelia("amelia.data")) } tcltk::tcl("set", "casename","") tcltk::tcl("set", "tscsvarname", "") putAmelia("tscs.case.combo", tcltk::ttkcombobox(tscs.diags,textvariable="casename", values = cases, state = st)) putAmelia("tscs.var.combo", tcltk::ttkcombobox(tscs.diags,textvariable="tscsvarname", values = tscsvariables, state = st)) putAmelia("tscs.plot.but", tcltk::ttkbutton(tscs.diags, text = "TSCS Plot", state = but.st, command = function() tscsPlot(getAmelia("ameliaObject"), cs = tcltk::tclvalue("casename"), var = tcltk::tclvalue("tscsvarname"), frontend = TRUE))) if (st == "readonly") { tcltk::tcl(getAmelia("tscs.case.combo"), "current", 0) tcltk::tcl(getAmelia("tscs.var.combo"), "current", 0) } tcltk::tkgrid(tcltk::ttklabel(tscs.diags, text = "Case:"), row = 0, column = 0, sticky = "e", padx = 5) tcltk::tkgrid(getAmelia("tscs.case.combo"), row = 0, column = 1, padx = 10, pady = 10) tcltk::tkgrid(tcltk::ttklabel(tscs.diags, text = "Variable:"), row = 1, column = 0, sticky = "e", padx = 5) tcltk::tkgrid(getAmelia("tscs.var.combo"), row = 1, column = 1, padx = 10, pady = 10) tcltk::tkgrid(getAmelia("tscs.plot.but"), row = 1, column = 2, padx = 10, pady = 10, sticky = "se") tcltk::tkgrid(tcltk::ttkframe(tscs.diags, width = 50, height = 0), row = 2) dimvalue<-tcltk::tclVar("1") putAmelia("onedim", tcltk::ttkradiobutton(disp.diags, variable=dimvalue, value="1")) putAmelia("twodims", tcltk::ttkradiobutton(disp.diags, variable=dimvalue, value="2")) disp.imps.tcl<-tcltk::tclVar("5") putAmelia("disp.imps", tcltk::ttkentry(disp.diags,width="5",textvariable=disp.imps.tcl)) putAmelia("disp.but", tcltk::ttkbutton(disp.diags,text="Overdisperse",state="normal", command = function() disperse(m=as.numeric(tcltk::tclvalue(disp.imps.tcl)), dims=as.numeric(tcltk::tclvalue(dimvalue)),frontend=TRUE,output=getAmelia("ameliaObject")))) tcltk::tkgrid(tcltk::ttklabel(disp.diags,text="Number of dispersions:"),row=2,column=1, sticky="e") tcltk::tkgrid(tcltk::ttkframe(disp.diags, width = 50, height = 0), row = 5) tcltk::tkgrid(getAmelia("disp.imps"),column=2,row=2,sticky="nw", padx = 10, pady = 10) tcltk::tkgrid(tcltk::ttklabel(disp.diags,text="One Dimension:"),row=3,column=1, sticky = "e") tcltk::tkgrid(tcltk::ttklabel(disp.diags,text="Two Dimensions:"),row=4,column=1, sticky = "e") tcltk::tkgrid(getAmelia("onedim"),row=3,column=2,padx=10,pady=5) tcltk::tkgrid(getAmelia("twodims"),row=4,column=2,padx=10) tcltk::tkgrid(getAmelia("disp.but"),row=4,column=3,padx=15, pady=10,sticky="news") tcltk::tkadd(gui.top, var.diags) tcltk::tkadd(gui.top, tscs.diags) tcltk::tkadd(gui.top, disp.diags) tcltk::tkgrid(gui.top, row = 0, padx = 20, pady = 20) tcltk::tkgrid(diagBox, sticky = "news", row = 0, column = 0) tcltk::tkgrid.rowconfigure(getAmelia("diagWindow"), 0, weight = 1) tcltk::tkgrid.columnconfigure(getAmelia("diagWindow"), 0, weight = 1) tcltk::tkwm.protocol(getAmelia("diagWindow"), "WM_DELETE_WINDOW", function() {tcltk::tkwm.withdraw(getAmelia("diagWindow"));tcltk::tkgrab.release(getAmelia("diagWindow"));tcltk::tkfocus(getAmelia("gui"))}) centerModalDialog(getAmelia("diagWindow"), resize = FALSE) bindTooltip(widget = "var.diags.combo", tip = "Variable for either the density comparison plot or the overimputation plot.") bindTooltip(widget = "tscs.var.combo", tip = "Variable to use for the time-series cross-sectional plot.") bindTooltip(widget = "tscs.case.combo", tip = "Case to use for the time-series cross-sectional plot.") bindTooltip(widget = "diag.but.compare", tip = "Compare densities of the imputed values vs. observed values.") bindTooltip(widget = "diag.overimp", tip = "Overimpute and graph confidence intervals. ") bindTooltip(widget = "disp.but", tip = "Plot the convergence of the EM algorithm from overdispersed starting values.") bindTooltip(widget = "tscs.plot.but", tip = "Plot a time-series within one cross-section with imputation distributions in red.") bindTooltip(widget = "disp.imps", tip = "Number of different overdispersed starting values to use.") bindTooltip(widget = "onedim", tip = "Number of dimensions to visualize convergence.") bindTooltip(widget = "twodims", tip = "Number of dimensions to visualize convergence.") } ## the following functions have been imported from Rcmdr putAmelia <- function(x, value) { assign(x, value, envir = ameliaEnv) } getAmelia <- function(x, mode="any") get(x, envir = ameliaEnv, mode = mode, inherits = FALSE) getAmeliaInd <- function(x) { as.numeric(tcltk::tkindex(getAmelia(x), "current")) } ameliaTclSet <- function(name, value){ name <- ls(unclass(getAmelia(name))$env) tcltk::tcl("set", name, value) } save.log <- function() { file.select <- tcltk::tclvalue(tcltk::tkgetSaveFile(parent=getAmelia("gui"), filetypes="{{Text files} {*.txt}} {{All files} *}")) cat(getAmelia("output.log"), file = file.select) } show.output.log <- function() { RightClick <- function(x, y) { # x and y are the mouse coordinates rootx <- as.integer(tcltk::tkwinfo("rootx", getAmelia("log.viewer"))) # tcltk::tkwinfo() return several infos rooty <- as.integer(tcltk::tkwinfo("rooty", getAmelia("log.viewer"))) xTxt <- as.integer(x) + rootx yTxt <- as.integer(y) + rooty # Create a Tcl command in a character string and run it tcltk::.Tcl(paste("tk_popup", tcltk::.Tcl.args(getAmelia("log.right.click"), xTxt, yTxt))) } if (exists("log.top", envir = ameliaEnv)) { tcltk::tkconfigure(getAmelia("log.viewer"), state = "normal") tcltk::tkdelete(getAmelia("log.viewer"), "0.0", "end") tcltk::tkinsert(getAmelia("log.viewer"), "end", paste(getAmelia("output.log"), collapse = "")) tcltk::tkconfigure(getAmelia("log.viewer"), state = "disabled") tcltk::tkwm.deiconify(getAmelia("log.top")) tcltk::tkraise(getAmelia("log.top")) tcltk::tkfocus(getAmelia("log.top")) return() } putAmelia("log.top", tcltk::tktoplevel()) tcltk::tkwm.title(getAmelia("log.top"), "Output Log") scr <- tcltk::ttkscrollbar(getAmelia("log.top"), command=function(...)tcltk::tkyview(getAmelia("log.viewer"),...)) putAmelia("log.viewer", tcltk::tktext(getAmelia("log.top"), width = 80, height = 25, yscrollcommand=function(...)tcltk::tkset(scr,...))) tcltk::tkinsert(getAmelia("log.viewer"), "end", paste(getAmelia("output.log"), collapse = "")) tcltk::tkconfigure(getAmelia("log.viewer"), state = "disabled") main.menu <- tcltk::tkmenu(getAmelia("log.top")) main.menu.file <- tcltk::tkmenu(main.menu, tearoff=0) tcltk::tkadd(main.menu.file,"command",label="Save log file",command=function() save.log()) tcltk::tkadd(main.menu.file,"command",label="Close",command=function(){tcltk::tkwm.withdraw(getAmelia("log.top"));tcltk::tkgrab.release(getAmelia("log.top"));tcltk::tkfocus(getAmelia("gui"))}) tcltk::tkadd(main.menu,"cascade",label="File",menu=main.menu.file) tcltk::tkconfigure(getAmelia("log.top"),menu=main.menu) putAmelia("log.right.click",tcltk::tkmenu(getAmelia("log.viewer"), tearoff = FALSE) ) tcltk::tkadd(getAmelia("log.right.click"), "command", label = "Copy <Ctrl-V>", command = function() tcltk::tkevent.generate(getAmelia("log.viewer"),"<<Copy>>")) tcltk::tkbind(getAmelia("log.viewer"), "<Button-3>", RightClick) #tcltk::tkgrid(main.menu, row = 0, sticky = "ew") tcltk::tkgrid(getAmelia("log.viewer"), row = 0, column = 0, sticky = "news") tcltk::tkgrid(scr, row =0, column = 1, sticky = "ns") #tcltk::tkgrid.columnconfigure(log.top, 1, weight = 1) tcltk::tkgrid.columnconfigure(getAmelia("log.top"), 0, weight = 1) tcltk::tkgrid.rowconfigure(getAmelia("log.top"), 0, weight = 1) tcltk::tkwm.protocol(getAmelia("log.top"), "WM_DELETE_WINDOW", function() {tcltk::tkwm.withdraw(getAmelia("log.top"));tcltk::tkgrab.release(getAmelia("log.top"));tcltk::tkfocus(getAmelia("gui"))}) centerModalDialog(getAmelia("log.top"), resize=TRUE) } after <- function(ms, func) { tcltk::.Tcl(paste("after", ms, tcltk::.Tcl.callback(func))) } cancel.after <- function(id) { invisible(tcltk::.Tcl(paste("after","cancel", id))) } bindTooltip <- function(widget, tip) { after.name <- paste(widget, "after", sep = ".") tip.name <- paste(widget, "tip", sep = ".") # tcltk::tkbind(getAmelia(widget), "<Any-Enter>", showTooltip(widget, tip)) tcltk::tkbind(getAmelia(widget), "<Any-Enter>", function() putAmelia(after.name, after(400, showTooltip(widget, tip)))) tcltk::tkbind(getAmelia(widget), "<Any-Leave>", function() {killTooltip(widget) cancel.after(getAmelia(after.name))}) tcltk::tkbind(getAmelia(widget), "<Any-Button>", function() cancel.after(getAmelia(after.name))) tcltk::tkbind(getAmelia(widget), "<Any-KeyPress>", function() cancel.after(getAmelia(after.name))) } showTooltip <- function(widget, text) { function() { if (getAmelia(widget)$ID != tcltk::tclvalue(tcltk::tkwinfo("containing", tcltk::tkwinfo("pointerx","."), tcltk::tkwinfo("pointery",".")))) { return() } tip.name <- paste(widget, "tip", sep = ".") tiplabel.name <- paste(widget, "tiplabel",sep=".") if (exists(tip.name, envir = ameliaEnv)) { if (as.logical(tcltk::tkwinfo("exists",getAmelia(tip.name)))) { if (as.logical(tcltk::tkwinfo("ismapped",getAmelia(tip.name)))) { return() } } } scrh <- tcltk::tclvalue(tcltk::tkwinfo("screenheight", getAmelia(widget))) scrw <- tcltk::tclvalue(tcltk::tkwinfo("screenwidth", getAmelia(widget))) tcltk::tclServiceMode(on=FALSE) if (!exists(tip.name, envir = ameliaEnv)) { if (.Platform$OS.type == "windows") { borderColor <- "SystemWindowFrame" bgColor <- "SystemWindow" fgColor <- "SystemWindowText" } else { borderColor <- "black" bgColor <- "lightyellow" fgColor <- "black" } putAmelia(tip.name, tcltk::tktoplevel(getAmelia(widget), bd = 1, bg = borderColor, relief = "raised")) tcltk::tkwm.geometry(getAmelia(tip.name), paste("+",scrh,"+",scrw,sep="")) tcltk::tcl("wm","overrideredirect", getAmelia(tip.name), 1) putAmelia(tiplabel.name, tcltk::ttklabel(getAmelia(tip.name), background = bgColor, foreground = fgColor, text = text, justify = "left", wraplength=300)) tcltk::tkpack(getAmelia(tiplabel.name)) tcltk::tkbind(getAmelia(tip.name), "<Any-Enter>", function() tcltk::tkwm.withdraw(getAmelia(tip.name))) tcltk::tkbind(getAmelia(tip.name), "<Any-Leave>", function() tcltk::tkwm.withdraw(getAmelia(tip.name))) tcltk::tkbind(getAmelia(tip.name), "<Any-Button>", function() tcltk::tkwm.withdraw(getAmelia(tip.name))) } width <- as.numeric(tcltk::tclvalue(tcltk::tkwinfo("reqwidth", getAmelia(tiplabel.name)))) height <- as.numeric(tcltk::tclvalue(tcltk::tkwinfo("reqheight",getAmelia(tiplabel.name)))) posX <- as.numeric(tcltk::tclvalue(tcltk::tkwinfo("pointerx","."))) posY <- as.numeric(tcltk::tclvalue(tcltk::tkwinfo("pointery","."))) + 25 screen <- as.numeric(tcltk::tclvalue(tcltk::tkwinfo("screenwidth","."))) # a.) Ad-hockery: Set positionX so the entire tooltip widget will be displayed. if ((posX + width) > screen) { posX <- posX - ((posX + width) - screen) - 3 } tcltk::tclServiceMode(on = TRUE) tcltk::tkwm.geometry(getAmelia(tip.name), paste("+",posX,"+",posY,sep = "")) tcltk::tkwm.deiconify(getAmelia(tip.name)) tcltk::tkraise(getAmelia(tip.name)) } } killTooltip <- function(widget) { tip.name <- paste(widget,"tip", sep = ".") if (exists(tip.name, envir = ameliaEnv)) { tcltk::tkwm.withdraw(getAmelia(tip.name)) } } refreshSelection <- function(tree) { all <- strsplit(tcltk::tclvalue(tcltk::tcl(tree,"children","")), " ")[[1]] sel <- strsplit(tcltk::tclvalue(tcltk::tcl(tree, "selection")), " ")[[1]] bandTree() for (i in sel) { tcltk::tcl(tree, "item", i, tags = "selected") } return(NULL) } variableOptionStatus <- function(sel) { states <- rep("normal", 15) classes <- sapply(getAmelia("amelia.data"), class)[sel] if (length(sel) ==0) { states <- rep("disabled", 15) return(states) } if (length(sel) > 1) states[c(1:4,15)] <- "disabled" if (!is.null(getAmelia("tsvar"))) if (getAmelia("tsvar") %in% sel) states[c(1:2,5:9,12:13,15)] <- "disabled" else states[3] <- "disabled" if (!is.null(getAmelia("csvar"))) if (getAmelia("csvar") %in% sel) states[c(1:2,5:9,12:13,15)] <- "disabled" else states[4] <- "disabled" if (is.null(getAmelia("tsvar"))) states[c(3,6:9)] <- "disabled" if (is.null(getAmelia("csvar"))) states[4] <- "disabled" if ("factor" %in% classes | "character" %in% classes) states[c(11,15)] <- "disabled" if (is.null(getAmelia("amelia.data"))) states <- rep("disabled", 15) return(states) } variableOptionsPost <- function() { sel <- strsplit(tcltk::tclvalue(tcltk::tcl(getAmelia("main.tree"), "selection")), " ")[[1]] states <- variableOptionStatus(sel) for (i in 0:14) { if (tcltk::tclvalue(tcltk::tktype(getAmelia("main.menu.variables"), i)) != "separator") tcltk::tkentryconfigure(getAmelia("main.menu.variables"),i, state = states[i+1]) } return(NULL) } mainTreeRightClick <- function(x, y) { # x and y are the mouse coordinates rootx <- as.integer(tcltk::tkwinfo("rootx", getAmelia("main.tree"))) # tcltk::tkwinfo() return several infos rooty <- as.integer(tcltk::tkwinfo("rooty", getAmelia("main.tree"))) xTxt <- as.integer(x) + rootx yTxt <- as.integer(y) + rooty # Create a Tcl command in a character string and run it sel <- strsplit(tcltk::tclvalue(tcltk::tcl(getAmelia("main.tree"), "selection")), " ")[[1]] states <- variableOptionStatus(sel) main.tree.right.click <- tcltk::tkmenu(getAmelia("main.tree"), tearoff = FALSE) main.tree.trans <- tcltk::tkmenu(getAmelia("main.tree"), tearoff = FALSE) tcltk::tkadd(main.tree.right.click, "command", label = "Set as Time-Series Variable", command = setTS, state = states[1]) tcltk::tkadd(main.tree.right.click, "command", label = "Set as Cross-Section Variable", command = setCS, state = states[2]) tcltk::tkadd(main.tree.right.click, "command", label = "Unset as Time-Series Variable", command = unsetTS, state = states[3]) tcltk::tkadd(main.tree.right.click, "command", label = "Unset as Cross-Section Variable", command = unsetCS, state = states[4]) tcltk::tkadd(main.tree.right.click,"separator") tcltk::tkadd(main.tree.right.click, "command", label = "Add Lag", command = function() addLag(), state = states[6]) tcltk::tkadd(main.tree.right.click, "command", label = "Add Lead", command = function() addLead(), state = states[7]) tcltk::tkadd(main.tree.right.click, "command", label = "Remove Lag", command = function() dropLag(), state = states[8]) tcltk::tkadd(main.tree.right.click, "command", label = "Remove Lead", command = function() dropLead(), state = states[9]) tcltk::tkadd(main.tree.right.click,"separator") tcltk::tkadd(main.tree.right.click, "command", label = "Plot Histogram(s) of Selected", command = plotHist, state = states[10]) if (.Platform$OS.type == "windows") { tcltk::tkadd(main.tree.trans, "command", label = "Log", command = function(x) setTrans("logs")) tcltk::tkadd(main.tree.trans, "command", label = "Square Root", command = function(x) setTrans("sqrt")) tcltk::tkadd(main.tree.trans, "command", label = "Logistic", command = function(x) setTrans("lgstc")) tcltk::tkadd(main.tree.trans, "command", label = "Nominal", command = function(x) setTrans("noms")) tcltk::tkadd(main.tree.trans, "command", label = "Ordinal", command = function(x) setTrans("ords")) tcltk::tkadd(main.tree.trans, "command", label = "ID Variable", command = function(x) setTrans("idvar")) tcltk::tkadd(main.tree.right.click, "cascade", label = "Add Transformation...", menu = main.tree.trans, state = states[12]) } else { tcltk::tkadd(main.tree.right.click, "command", label = "Mark as Log", command = function(x) setTrans("logs"), state = states[12]) tcltk::tkadd(main.tree.right.click, "command", label = "Mark as Square Root", command = function(x) setTrans("sqrt"), state = states[12]) tcltk::tkadd(main.tree.right.click, "command", label = "Mark as Logistic", command = function(x) setTrans("lgstc"), state = states[12]) tcltk::tkadd(main.tree.right.click, "command", label = "Mark as Nominal", command = function(x) setTrans("noms"), state = states[12]) tcltk::tkadd(main.tree.right.click, "command", label = "Mark as Ordinal", command = function(x) setTrans("ords"), state = states[12]) tcltk::tkadd(main.tree.right.click, "command", label = "Mark as ID Variable", command = function(x) setTrans("idvar"), state = states[12]) } tcltk::tkadd(main.tree.right.click, "command", label = "Remove Transformations", command = dropTrans, state = states[13]) tcltk::tkadd(main.tree.right.click,"separator") tcltk::tkadd(main.tree.right.click, "command", label = "Add or Edit Bounds", command = addBounds, state = states[15]) tcltk::tkpopup(main.tree.right.click, xTxt, yTxt) } addLag <- function() { sel <- strsplit(tcltk::tclvalue(tcltk::tcl(getAmelia("main.tree"), "selection")), " ")[[1]] tmp <- getAmelia("lags") tmp[sel] <- 1 putAmelia("lags", tmp) for (i in sel) tcltk::tkset(getAmelia("main.tree"), i, "lag", "X") return() } addLead <- function() { sel <- strsplit(tcltk::tclvalue(tcltk::tcl(getAmelia("main.tree"), "selection")), " ")[[1]] tmp <- getAmelia("leads") tmp[sel] <- 1 putAmelia("leads", tmp) for (i in sel) tcltk::tkset(getAmelia("main.tree"), i, "lead", "X") return() } dropLag <- function() { sel <- strsplit(tcltk::tclvalue(tcltk::tcl(getAmelia("main.tree"), "selection")), " ")[[1]] tmp <- getAmelia("lags") tmp[sel] <- 0 putAmelia("lags", tmp) for (i in sel) tcltk::tkset(getAmelia("main.tree"), i, "lag", "") return() } dropLead <- function() { sel <- strsplit(tcltk::tclvalue(tcltk::tcl(getAmelia("main.tree"), "selection")), " ")[[1]] tmp <- getAmelia("leads") tmp[sel] <- 0 putAmelia("leads", tmp) for (i in sel) tcltk::tkset(getAmelia("main.tree"), i, "lead", "") return() } setTrans <- function(trans) { all.trans <- c(logs = "Log",sqrt = "Square Root", lgstc = "Logistic", noms = "Nominal", ords = "Ordinal", idvar = "ID") sel <- strsplit(tcltk::tclvalue(tcltk::tcl(getAmelia("main.tree"), "selection")), " ")[[1]] tmp <- getAmelia(trans) tmp[sel] <- 1 putAmelia(trans, tmp) for (j in sel) { tcltk::tkset(getAmelia("main.tree"), j,"transform", all.trans[trans]) tcltk::tcl(getAmelia("main.tree"), "item", j, image = "") } return() } dropTrans <- function() { all.trans <- c("logs","sqrt","lgstc","noms","ords","idvar") sel <- strsplit(tcltk::tclvalue(tcltk::tcl(getAmelia("main.tree"), "selection")), " ")[[1]] for (j in sel) tcltk::tkset(getAmelia("main.tree"), j,"transform", "") if (is.factor(getAmelia("amelia.data")[,j]) | is.character(getAmelia("amelia.data")[,j])) { tcltk::tcl(getAmelia("main.tree"), "item", j, image = getAmelia("redFlagIcon")) } for (i in all.trans) { tmp <- getAmelia(i) tmp[sel] <- 0 putAmelia(i, tmp) } } addBounds <- function() { onOK <- function(sel) { bdMax <- as.numeric(tcltk::tclvalue(getAmelia("boundMax"))) bdMin <- as.numeric(tcltk::tclvalue(getAmelia("boundMin"))) if (is.na(bdMax) & !is.na(bdMin)) { tcltk::tkmessageBox(parent=getAmelia("addBoundsWindow"), title="Error", message="Please enter a minimum and a maximum value or neither to clear the bounds.", type="ok",icon="error") return() } if (!is.na(bdMax) & is.na(bdMin)) { tcltk::tkmessageBox(parent=getAmelia("addBoundsWindow"), title="Error", message="Please enter a minimum and a maximum value or neither to clear the bounds.", type="ok",icon="error") return() } if (!is.na(bdMax) & !is.na(bdMin)) { if (bdMax <= bdMin) { tcltk::tkmessageBox(parent=getAmelia("addBoundsWindow"), title="Error", message="The maximum is less than the minimum.", type="ok",icon="error") return() } } tmpbmat <- getAmelia("boundsmat") tmpbmat[sel,2:3] <- c(bdMin, bdMax) putAmelia("boundsmat", tmpbmat) if (!is.na(bdMin)) { treeBounds <- paste("[",bdMin,", ", bdMax,"]", sep = "") } else { treeBounds <- "" } tcltk::tkset(getAmelia("main.tree"), sel, "bounds", treeBounds) tcltk::tkwm.withdraw(getAmelia("addBoundsWindow")) tcltk::tkgrab.release(getAmelia("addBoundsWindow")) tcltk::tkfocus(getAmelia("gui")) return() } validateNumeric <- function(x) { if (isTRUE(grep("(^-?[0-9]*\\.?[0-9]*$)",x)==1)) return(tcltk::tclVar("TRUE")) else return(tcltk::tclVar("FALSE")) } sel <- strsplit(tcltk::tclvalue(tcltk::tcl(getAmelia("main.tree"), "selection")), " ")[[1]] if (sum(is.na(getAmelia("amelia.data")[,sel])) == 0) { tcltk::tkmessageBox(parent = getAmelia("gui"), message = "No missing data on the selected variable.", type = "ok") return() } currMin <- getAmelia("boundsmat")[sel,2] currMax <- getAmelia("boundsmat")[sel,3] putAmelia("boundMin", tcltk::tclVar(ifelse(is.na(currMin), "", currMin))) putAmelia("boundMax", tcltk::tclVar(ifelse(is.na(currMax), "", currMax))) if (exists("addBoundsWindow", envir = ameliaEnv)) { tcltk::tkconfigure(getAmelia("maxBox"), textvar = getAmelia("boundMax")) tcltk::tkconfigure(getAmelia("minBox"), textvar = getAmelia("boundMin")) tcltk::tkconfigure(getAmelia("bd.ok"), command = function() onOK(sel)) tcltk::tkwm.deiconify(getAmelia("addBoundsWindow")) tcltk::tkraise(getAmelia("addBoundsWindow")) return() } putAmelia("addBoundsWindow", tcltk::tktoplevel()) tcltk::tkwm.title(getAmelia("addBoundsWindow"), "Add or Edit Bounds") bounds.add <- tcltk::ttkframe(getAmelia("addBoundsWindow")) putAmelia("minBox", tcltk::ttkentry(bounds.add, textvar=getAmelia("boundMin"), validate="key", validatecommand = function(P) validateNumeric(P))) putAmelia("maxBox", tcltk::ttkentry(bounds.add, textvar=getAmelia("boundMax"), validate="key", validatecommand = function(P) validateNumeric(P))) tcltk::tkgrid(tcltk::ttklabel(bounds.add, text="Minimum:"), column=1, row=2, sticky = "e", padx = 10, pady = 10) tcltk::tkgrid(tcltk::ttklabel(bounds.add, text="Maximum:"), column=1, row=3, sticky = "e", padx = 10, pady = 10) tcltk::tkgrid(getAmelia("minBox"), column=2, row=2, pady=5, padx=5) tcltk::tkgrid(getAmelia("maxBox"), column=2, row=3, pady=5, padx=5) but.frame <- tcltk::ttkframe(bounds.add) putAmelia("bd.ok", tcltk::ttkbutton(but.frame, text = "OK", command = function() onOK(sel))) putAmelia("bd.can", tcltk::ttkbutton(but.frame, text = "Cancel", width = 10, command = function() {tcltk::tkwm.withdraw(getAmelia("addBoundsWindow"));tcltk::tkgrab.release(getAmelia("addBoundsWindow"));tcltk::tkfocus(getAmelia("gui"))})) tcltk::tkgrid(getAmelia("bd.ok"), row = 0, column = 1, sticky = "ne", padx = 10, pady = 10) tcltk::tkgrid(getAmelia("bd.can"), row = 0, column = 2, sticky = "ne", padx = 10, pady = 10) tcltk::tkgrid(but.frame, row = 4, column = 1, columnspan = 2, sticky = "ne") tcltk::tkgrid(bounds.add, sticky = "news") tcltk::tkwm.protocol(getAmelia("addBoundsWindow"), "WM_DELETE_WINDOW", function() {tcltk::tkwm.withdraw(getAmelia("addBoundsWindow"));tcltk::tkgrab.release(getAmelia("addBoundsWindow"));tcltk::tkfocus(getAmelia("gui"))}) centerModalDialog(getAmelia("addBoundsWindow"), resize=FALSE) } plotHist <- function() { sel <- strsplit(tcltk::tclvalue(tcltk::tcl(getAmelia("main.tree"), "selection")), " ")[[1]] if (length(sel)==0) { tcltk::tkmessageBox(parent = getAmelia("gui"), type = "ok", message = "No variable selected.") return(NULL) } sel <- sel[which(sapply(getAmelia("amelia.data")[sel], is.numeric))] if (length(sel)==0) { tcltk::tkmessageBox(parent = getAmelia("gui"), type = "ok", message = "Cannot plot non-numeric variables.") return(NULL) } dev.new() mfrow <- set.mfrow(nvars = length(sel)) on.exit(par(NULL)) layout <- par(mfrow = mfrow) j <- 0 for (i in sel) { j <- j + 1 if (j > 9) { j <- 1 dev.new() layout <- par(mfrow = mfrow) } hist(getAmelia("amelia.data")[,i], main = paste("Histogram of",i), ylab = "Frequnecy", xlab ="", col="grey", border = "white") } invisible() } sortTreeBy <- function(col) { coldata <- c() children <- strsplit(tcltk::tclvalue(tcltk::tcl(getAmelia("main.tree"), "children","")), " ")[[1]] if (col == "#0") { coldata <- children } else { for (i in children) { coldata <- c(coldata, tcltk::tclvalue(tcltk::tkset(getAmelia("main.tree"), i, col))) } } dirs <- getAmelia("sortDirs") sortDir <- dirs[col] if (col %in% c("mean", "sd", "min", "max")) { coldata[coldata == "..."] <- "-Inf" coldata[coldata == "(factor)"] <- "-Inf" sortOrder <- order(as.numeric(coldata), decreasing = sortDir) } else if (col == "miss") { coldata <- matrix(unlist(strsplit(coldata,"/")), nrow=2)[1,] sortOrder <- order(as.numeric(coldata), decreasing = sortDir) } else { sortOrder <- order(coldata, decreasing = sortDir) } sorted <- children[sortOrder] for (i in 1:length(children)) { tcltk::tkmove(getAmelia("main.tree"), sorted[i],"", i-1) } drawArrow(col, sortDir) refreshSelection(getAmelia("main.tree")) dirs[col] <- !sortDir putAmelia("sortDirs", dirs) } drawArrow <- function(col, down) { treecols <- names(getAmelia("sortDirs")) for (i in treecols) { tcltk::tcl(getAmelia("main.tree"), "heading", i, image = "") } if (down) { tcltk::tcl(getAmelia("main.tree"), "heading", col, image = getAmelia("upArrowIcon")) } else { tcltk::tcl(getAmelia("main.tree"), "heading", col, image = getAmelia("downArrowIcon")) } return(NULL) } bandTree <- function() { children <- strsplit(tcltk::tclvalue(tcltk::tcl(getAmelia("main.tree"), "children","")), " ")[[1]] j <- 0 tcltk::tktag.configure(getAmelia("main.tree"),"white", background="white") tcltk::tktag.configure(getAmelia("main.tree"),"gray", background="gray92") for (i in children) { j <- j+1 if ((j %% 2) == 0) { tcltk::tcl(getAmelia("main.tree"), "item", i, tag = "white") } else { tcltk::tcl(getAmelia("main.tree"), "item", i, tag = "gray") } } } updateTreeStats <- function(){ children <- strsplit(tcltk::tclvalue(tcltk::tcl(getAmelia("main.tree"),"children","")), " ")[[1]] for (i in names(getAmelia("amelia.data"))) { if (is.factor(getAmelia("amelia.data")[,i]) | is.character(getAmelia("amelia.data")[,i])) { vals <- c("(factor)","...","...","...") vals <- c(vals,paste(sum(is.na(getAmelia("amelia.data")[,i])), nrow(getAmelia("amelia.data")), sep="/")) } else { vals <- c(min(getAmelia("amelia.data")[,i],na.rm=T), max(getAmelia("amelia.data")[,i],na.rm=T), mean(getAmelia("amelia.data")[,i],na.rm=T), sd(getAmelia("amelia.data")[,i],na.rm=T)) vals <- signif(vals, digits = 4) vals <- c(vals, paste(sum(is.na(getAmelia("amelia.data")[,i])), nrow(getAmelia("amelia.data")), sep="/")) } tcltk::tkset(getAmelia("main.tree"), i, "min", vals[1]) tcltk::tkset(getAmelia("main.tree"), i, "max", vals[2]) tcltk::tkset(getAmelia("main.tree"), i, "mean", vals[3]) tcltk::tkset(getAmelia("main.tree"), i, "sd", vals[4]) tcltk::tkset(getAmelia("main.tree"), i, "miss", vals[5]) } } centerModalDialog <- function(window, resize=TRUE) { xpos <- as.numeric(tcltk::tkwinfo("rootx",getAmelia("gui"))) ypos <- as.numeric(tcltk::tkwinfo("rootx",getAmelia("gui"))) rwidth <- as.numeric(tcltk::tkwinfo("width",getAmelia("gui"))) rheight <- as.numeric(tcltk::tkwinfo("height", getAmelia("gui"))) width <- as.numeric(tcltk::tkwinfo("reqwidth",window)) height <- as.numeric(tcltk::tkwinfo("reqheight",window)) newxpos <- xpos + .5*rwidth - .5*width newypos <- ypos + .5*rheight - .5*height if (.Platform$OS.type == "windows") tcltk::tkwm.geometry(window, paste("+",round(newxpos),"+",round(newypos),sep="")) tcltk::tkfocus(window) tcltk::tkgrab.set(window) if (!resize) { tcltk::tkwm.resizable(window, 0,0) } tcltk::tkwm.transient(window, getAmelia("gui")) tcltk::tcl("update","idletasks") } showImputedFiles <- function() { if (Sys.info()['sysname'] %in% c("Windows", "Darwin")) system(paste("open", shQuote(getAmelia("wdForLastImputation")))) else system(paste("xdg-open", shQuote(getAmelia("wdForLastImputation")))) return(NULL) } ## Here is (finally) a decent solution to the tcl/tk issues with ## global variables. Here we create new environment, whose parent is ## the Amelia namespace. We then make sure that all of the GUI ## functions use that as their enclosure. This means that any of these ## functions can use values in the ameliaEnv. This eliminates the need ## for any "getAmelia" calls, but we still have to be careful since ## assigning values in these functions is local and doesn't ## automatically add the value to ameliaEnv. So, for assigning, ## 'putAmelia' probably still makes sense. We could use ## assign("foo", "bar", envir = parent.frame()) ## but putAmelia is probably more clear. getAmelia() is probably still ## a little more safe to use because it will throw an error if ## something is missing, whereas relying on lexical scoping will try ## to use something with the same name in the search path. ameliaEnv <- new.env() environment(main.close) <- ameliaEnv environment(setWorkingDir) <- ameliaEnv environment(loadStata) <- ameliaEnv environment(loadSPSS) <- ameliaEnv environment(loadSAS) <- ameliaEnv environment(loadTAB) <- ameliaEnv environment(loadCSV) <- ameliaEnv environment(loadRData) <- ameliaEnv environment(loadDemo) <- ameliaEnv environment(drawMissMap) <- ameliaEnv environment(activateGUI) <- ameliaEnv environment(save.session) <- ameliaEnv environment(load.session) <- ameliaEnv environment(run.amelia) <- ameliaEnv environment(amelia.save) <- ameliaEnv environment(set.out) <- ameliaEnv environment(setTS) <- ameliaEnv environment(unsetTS) <- ameliaEnv environment(setCS) <- ameliaEnv environment(unsetCS) <- ameliaEnv environment(fillMainTree) <- ameliaEnv environment(AmeliaView) <- ameliaEnv environment(buildNumericalOptions) <- ameliaEnv environment(buildOutputOptions) <- ameliaEnv environment(buildAboutDialog) <- ameliaEnv environment(gui.pri.setup) <- ameliaEnv environment(gui.diag.setup) <- ameliaEnv environment(save.log) <- ameliaEnv environment(show.output.log) <- ameliaEnv environment(bindTooltip) <- ameliaEnv environment(showTooltip) <- ameliaEnv environment(killTooltip) <- ameliaEnv environment(refreshSelection) <- ameliaEnv environment(variableOptionStatus) <- ameliaEnv environment(variableOptionsPost) <- ameliaEnv environment(mainTreeRightClick) <- ameliaEnv environment(addLag) <- ameliaEnv environment(addLead) <- ameliaEnv environment(dropLag) <- ameliaEnv environment(dropLead) <- ameliaEnv environment(setTrans) <- ameliaEnv environment(dropTrans) <- ameliaEnv environment(addBounds) <- ameliaEnv environment(plotHist) <- ameliaEnv environment(sortTreeBy) <- ameliaEnv environment(drawArrow) <- ameliaEnv environment(bandTree) <- ameliaEnv environment(updateTreeStats) <- ameliaEnv environment(centerModalDialog) <- ameliaEnv environment(showImputedFiles) <- ameliaEnv
/scratch/gouwar.j/cran-all/cranData/Amelia/R/ameliagui.r
est.matrix <- function(x, name) { vals <- lapply(x, function(z) z[[name]]) out <- do.call(cbind, vals) out } ##' Combine results from statistical models run on multiply imputed ##' data sets using the so-called Rubin rules. ##' ##' @title Combine results from analyses on imputed data sets ##' @param x List of output from statistical models estimated on ##' different imputed data sets, as outputted by \code{with(a.out, ##' expr)} where \code{a.out} is the output of a call to \code{amelia}. ##' @param conf.int Logical indicating if confidence intervals should ##' be computed for each quantity of interest (default is \code{FALSE}). ##' @param conf.level The confidence level to use for the confidence ##' interval if \code{conf.level = TRUE}. Defaults to 0.95, which ##' corresponds to a 95 percent confidence interval. ##' @return Returns a \code{tibble} that contains: ##' \describe{ ##' \item{term}{Name of the coefficient or parameter.} ##' \item{estimate}{Estimate of the parameter, averagine across imputations.} ##' \item{std.error}{Standard error of the estimate, accounting for ##' imputation uncertainty.} ##' \item{statistic}{Value of the t-statistic for the estimated ##' parameter.} ##' \item{p.value}{p-value associated with the test of a null ##' hypothesis that the true coefficient is zero. Uses the ##' t-distribution with an imputation-adjusted degrees of freedom.} ##' \item{df}{Imputation-adjusted degrees of freedom for each ##' parameter.} ##' \item{r}{Relative increase in variance due to nonresponse.} ##' \item{miss.info}{Estimated fraction of missing information.} ##' \item{conf.low}{Lower bound of the estimated confidence interval. ##' Only present if \code{conf.int = TRUE}.} ##' \item{conf.high}{Upper bound of the estimated confidence interval. ##' Only present if \code{conf.int = TRUE}.} ##' } ##' @author Matt Blackwell ##' ##' @examples ##' data(africa) ##' a.out <- amelia(x = africa, cs = "country", ts = "year", logs = ##' "gdp_pc") ##' ##' imp.mods <- with(a.out, lm(gdp_pc ~ infl + trade)) ##' ##' mi.combine(imp.mods, conf.int = TRUE) ##' ##' @export mi.combine <- function(x, conf.int = FALSE, conf.level = 0.95) { if (requireNamespace("broom", quietly = TRUE)) { tidiers <- grep("^tidy\\.", ls(getNamespace("broom")), value = TRUE) tidiers <- gsub("tidy\\.", "", tidiers) } else { rlang::abort("{broom} package required for mi.combine") } if (!(class(x[[1L]]) %in% tidiers)) { rlang::abort("analysis model does not have tidy() method.") } mi_tidy <- lapply(x, function(x) broom::tidy(x)) m <- length(mi_tidy) out <- mi_tidy[[1L]] ests <- est.matrix(mi_tidy, "estimate") ses <- est.matrix(mi_tidy, "std.error") wi.var <- rowMeans(ses ^ 2) out$estimate <- rowMeans(ests) diffs <- sweep(ests, 1, rowMeans(ests)) bw.var <- rowSums(diffs ^ 2) / (m - 1) out$std.error <- sqrt(wi.var + bw.var * (1 + 1 / m)) r <- ((1 + 1 / m) * bw.var) / wi.var df <- (m - 1) * (1 + 1 / r) ^ 2 miss.info <- (r + 2 / (df + 3)) / (r + 1) out$statistic <- out$estimate / out$std.error out$p.value <- 2 * stats::pt(out$statistic, df = df, lower.tail = FALSE) out$df <- df out$r <- r out$miss.info <- miss.info if (conf.int) { t.c <- stats::qt(1 - (1 - conf.level) / 2, df = df, lower.tail = FALSE) out$conf.low <- out$estimate - t.c * out$std.error out$conf.high <- out$estimate + t.c * out$std.error } out }
/scratch/gouwar.j/cran-all/cranData/Amelia/R/combine.R
## diag.r ## amelia diagnostic functins ## ## 05/05/06 mb - added amelia.arg compatibility ## 07/05/06 jh - compare: changed how variable names are found, changed titles/labels, set x-axis values in matplot, colours for no imputations ## overimpute: added new m-name in output, ## 09/05/06 mb - overimpute: added frontend check for overimpute. ## 15/05/06 jh - overimpute: stacking of original data, and various graphics adjustments ## 01/06/06 mb - added "gethull" and "disperse" for overdispersion diagnostic ## 19/07/06 mb - moved handling of arglists to prep. ## 01/12/06 mb - can't compare non-numerics, only use the relevant columns when ## building compare ## 13/12/06 mb - changed for new priors. ## 26/03/07 jh - overimpute: excluded polynomials of time from missingness count, reordered ploting of ci's (smallest last), allow variable name as var argument ## 28/03/07 jh - disperse: changed tolerance and empri handling. ## 03/04/07 jh - disperse: changed 1d plot settings, number of colors, minor edits to "patt" construction. ## 10/04/07 jh - created sigalert function to view disperse principal components. ## 22/07/08 mb - good coding update: T->TRUE/F->FALSE ## 10/02/09 mb - compare: added lwd, col, main, lab, etc for user ## control, added scale so that users can control scaling, ## uses amelia class ## overimpute: uses amelia class, added lwd, col, main, lab, etc for user ## disperse: now uses amelia class ## 02/21/12 jh - added mi.meld to combine multiply imputed quantities of interest and se's. ## 10/30/12 jh - tscsPlot: expanded to allow to cycle through sets of cross sectional units efficiently. #' Compare observed versus imputed densities #' #' Plots smoothed density plots of observed and imputed values from output #' from the \code{amelia} function. #' #' @param output output from the function \code{amelia}. #' @param var column number or variable name of the variable to plot. #' @param col a vector of length 2 containing the color to plot the (1) #' imputed density and (2) the observed density. #' @param scaled a logical indicating if the two densities should be #' scaled to reflect the difference in number of units in each. #' @param lwd the line width of the density plots. #' @param main main title of the plot. The default is to title the plot #' using the variable name. #' @param xlab the label for the x-axis. The default is the name of the #' variable. #' @param ylab the label for the y-axis. The default is "Relative Density." #' @param legend a logical value indicating if a legend should be #' plotted. #' @param frontend a logical value used internally for the Amelia GUI. #' @param ... further graphical parameters for the plot. #' #' @details This function first plots a density plot of the observed units for the #' variable \code{var} in \code{col[2]}. The the function plots a density plot of the mean #' or modal imputations for the missing units in \code{col[1]}. If a #' variable is marked "ordinal" or "nominal" with the \code{ords} or #' \code{noms} options in \code{amelia}, then the modal imputation will #' be used. If \code{legend} is \code{TRUE}, then a legend is plotted as well. #' #' @references #' Abayomi, K. and Gelman, A. and Levy, M. 2005 "Diagnostics for #' Multivariate Imputations," \emph{Applied Statistics}. 57,3: 273--291. #' #' @examples #' data(africa) #' #' @seealso For more information on how densities are computed, #' \code{\link{density}}; Other imputation diagnostics are #' \code{\link{overimpute}}, \code{\link{disperse}}, and #' \code{\link{tscsPlot}}. #' compare.density <- function(output, var, col = c("indianred", "dodgerblue"), scaled = FALSE, lwd = 1, main, xlab, ylab, legend = TRUE, frontend = FALSE, ...) { if (!("amelia" %in% class(output))) stop("The 'output' is not Amelia output.") ##data <- getOriginalData(output) data <- remove.imputations(output) ## Checks on if the variable makes sense to plot. if (inherits(var, "character")) if (!(var %in% names(data))) stop("The variable name (var) doesn't correspond to a column in the data.") else var <- match(var, names(data)) if (any(var > ncol(data), var < 0, (var %% 1) != 0)) stop("The 'var' option points to a non-existant column.") if (var %in% output$arguments$idvar) stop("the variable selected was marked as an idvar") ## We need to clean the data to make sure that ## we're not going to run into NAs mcount <- sum(!is.na(output$imputations)) imputed <- (1:output$m)[!is.na(output$imputations)] ## create an empty vector to sum across varimp <- matrix(NA, nrow(data), mcount) for (i in 1:mcount) { if (is.data.frame(data)) { varimp[,i] <- output$imputations[[imputed[i]]][[var]] } else { varimp[,i] <- output$imputations[[imputed[i]]][,var] } } if (var %in% c(output$arguments$noms, output$arguments$ords)) { leg.text <- "Modal Imputations" varimp <- apply(varimp, 1, function(x) as.numeric(names(which.max(table(x))))) } else { leg.text <- "Mean Imputations" varimp <- rowMeans(varimp) } if (frontend) { dev.new() } if (is.data.frame(data)) { vars <- data[[var]] } else { vars <- data[,var] } if (scaled) ratio <- sum(is.na(vars))/sum(!is.na(vars)) else ratio <- 1 varnames <- dimnames(data)[[2]] # This will work for both data.frames AND matricies. vname <- varnames[var] # This will work for both data.frames AND matricies. if (sum(is.na(vars)) > 1) { oiDetect <- (sum(output$missMatrix[,var]) + sum(!is.na(vars))) > length(vars) if (missing(main)) { if (oiDetect) { main <- paste("Observed and Overimputed values of", vname) } else { main <- paste("Observed and Imputed values of", vname) } } if (missing(xlab)) { xlab <- paste(vname," -- Fraction Missing:", round(mean(is.na(vars)), digits = 3)) } if (missing(ylab)) { ylab <- "Relative Density" } xmiss <- density(varimp[output$missMatrix[, var]], na.rm = TRUE) xobs <- density(vars[!is.na(vars)], na.rm = TRUE) compplot <- matplot(x = cbind(xmiss$x, xobs$x), y = cbind(ratio * xmiss$y, xobs$y), xlab = xlab, ylab = ylab, type = "l", lwd = lwd, lty = 1, main = main, col = col, ...) if (legend) { legend("topright", legend = c(leg.text, "Observed Values"), col = col, lty = c(1,1), bg = 'gray90', lwd = lwd) } } else { if (missing(main)) { main <- paste("Observed values of",vname) } if (missing(xlab)) { xlab <- vname } if (missing(ylab)) { ylab <- "Relative Density" } compplot <- plot(density(varimp, na.rm = TRUE), col = col[2], main = main,...) if (sum(is.na(vars)) == 1) { abline(v = varimp[output$missMatrix[, var]], col = col[1]) } if (legend) { legend("topright", legend = c("Mean Imputations","Observed Values"), col = col, lty = c(1,1), bg = 'gray90') } } invisible() } #' Overimputation diagnostic plot #' #' Treats each observed value as missing and imputes from the imputation #' model from \code{amelia} output. #' #' @param output output from the function \code{amelia}. #' @param var column number or variable name of the variable to #' overimpute. #' @param draws the number of draws per imputed dataset to generate #' overimputations. Total number of simulations will \code{m * #' draws} where \code{m} is the number of imputations. #' @param subset an optional vector specifying a subset of observations #' to be used in the overimputation. #' @param legend a logical value indicating if a legend should be #' plotted. #' @param xlab the label for the x-axis. The default is "Observed Values." #' @param ylab the label for the y-axis. The default is "Imputed Values." #' @param main main title of the plot. The default is to smartly title the plot #' using the variable name. #' @param frontend a logical value used internally for the Amelia GUI. #' @param ... further graphical parameters for the plot. #' #' @details #' This function temporarily treats each observed value in #' \code{var} as missing and imputes that value based on the imputation #' model of \code{output}. The dots are the mean imputation and the #' vertical lines are the 90\% percent confidence intervals for #' imputations of each observed value. The diagonal line is the \eqn{y=x} #' line. If all of the imputations were perfect, then our points would #' all fall on the line. A good imputation model would have about 90\% of #' the confidence intervals containing the truth; that is, about 90\% of #' the vertical lines should cross the diagonal. #' #' The color of the vertical lines displays the fraction of missing #' observations in the pattern of missingness for that #' observation. The legend codes this information. Obviously, the #' imputations will be much tighter if there are more observed covariates #' to use to impute that observation. #' #' The \code{subset} argument evaluates in the environment of the #' data. That is, it can but is not required to refer to variables in the #' data frame as if it were attached. #' #' @return A list that contains (1) the row in the original data #' (\code{row}), (2) the observed value of that observation #' (\code{orig}), (2) the mean of the overimputations #' (\code{mean.overimputed}), (3) the lower bound of the 95\% #' confidence interval of the overimputations #' (\code{lower.overimputed}), (4) the upper bound of the 95\% #' confidence interval of the overimputations #' (\code{upper.overimputed}), (5) the fraction of the variables #' that were missing for that observation in the original data #' (\code{prcntmiss}), and (6) a matrix of the raw overimputations, #' with observations in rows and the different draws in columns (\code{overimps}). #' #' @seealso Other imputation diagnostics are #' \code{\link{compare.density}}, \code{\link{disperse}}, and #' \code{\link{tscsPlot}}. overimpute <- function(output, var, draws = 20, subset, legend = TRUE, xlab, ylab, main, frontend = FALSE, ...) { if (!("amelia" %in% class(output))) stop("The 'output' is not Amelia output.") data <- getOriginalData(output) ## via the subset.data.frame function if (missing(subset)) { r <- TRUE } else { e <- substitute(subset) r <- eval(e, data, parent.frame()) if (!is.logical(r)) { stop("'subset' must evaluate to logical") } r <- r & !is.na(r) if (sum(r) == 0) { stop("no observations in the subset") } } data <- data[r,] origAMr1 <- is.na(data) ## Allow character names as arguments for "var" with data.frames if(is.character(var)){ if(!is.data.frame(data)){ stop("var must be identified by column number as dataset is not a data frame.") } else { nomnames <- colnames(output$imputations[[1]])[output$arguments$noms] if (var %in% nomnames) { stop("Cannot overimpute variables set to be nominal") } varpos <- match(var, colnames(data)) if(is.na(varpos)){ stop("The name provided for var argument does not exist in the dataset provided.") } else { var <- varpos } } } ## The argument list for an amelia output is now ## at "output$arguments" prepped <- amelia.prep(x = data, arglist = output$arguments, incheck = FALSE) stacked.var <- match(var, prepped$subset.index[prepped$p.order]) subset.var <- match(var, prepped$subset.index) if (!is.null(prepped$blanks)) fully.missing <- origAMr1[-prepped$blanks, var][prepped$n.order] else fully.missing <- origAMr1[, var][prepped$n.order] if (is.na(stacked.var)) { if (frontend) tcltk::tkmessageBox(message="The variable you selected doesn't exist in the Amelia output becuase it wasn't imputed.",icon="error",type="ok") stop("var doesn't exist in the amelia output. It either didn't get imputed or is out of the range of columns.") } means <- c() lowers <- c() uppers <- c() pcnts <- c() color <- c() AMr1 <- is.na(prepped$x) ## if (sum(!AMr1[,stacked.var]) == 0){ ## if (frontend) { ## tkmessageBox(parent = getAmelia("gui"), ## message="The variable needs to have at least one fully observed cell.",icon="error",type="ok") ## } ## stop("function needs at least one fully observed cell in 'var'.") ## } AMr1[,stacked.var] <- TRUE AMp <- ncol(prepped$x) imphold <- matrix(NA, nrow = nrow(prepped$x), ncol = output$m * draws) for (i in 1:nrow(prepped$x)) { if (fully.missing[i]) { next() } x <- prepped$x[i,,drop=FALSE] x[1, stacked.var] <- NA o <- !is.na(x) miss <- !o x[is.na(x)] <- 0 oo <- 1 * o mm <- 1 * miss #o<-!AMr1[i,] #o[stacked.var]<-FALSE pcntmiss <- (sum(miss))/(length(miss)-sum(prepped$index==0)) # Does not include time polynomials (index==0) in the denominator ## These are always fully observed by construction, but auxiliary. ## Leaves constructed lags and ## leads, and nominal variables ## in count, however. conf <- c() for (k in 1:output$m) { ## The theta matrix is now stored in an array with ## dimensions c(vars+1,vars+1,m), so this grabs ## the kth theta matrix. thetareal <- output$theta[,,k] xx <- matrix(x, draws, AMp, byrow = TRUE) rr <- matrix(AMr1[i,], draws, AMp, byrow = TRUE) xc <- .Call("ameliaImpute", xx, rr, oo, mm, c(1, nrow(xx) + 1), thetareal, NULL, NULL, NULL, PACKAGE = "Amelia") conf <- c(conf, xc[, stacked.var]) } scaled.conf <- (conf * prepped$scaled.sd[subset.var]) + prepped$scaled.mu[subset.var] varlog <- match(var, prepped$logs) if (!is.na(varlog)) { scaled.conf <- untransform(as.matrix(scaled.conf), logs = 1, xmin = prepped$xmin[varlog], sqrts = NULL, lgstc = NULL) } if (!is.na(match(var,prepped$sqrts))) { scaled.conf <- untransform(as.matrix(scaled.conf), logs = NULL, xmin = NULL, sqrts = 1, lgstc = NULL) } if (!is.na(match(var,prepped$lgstc))) { scaled.conf <- untransform(as.matrix(scaled.conf), logs = NULL, xmin = NULL, sqrts = NULL, lgstc = 1) } ##colors are based on rainbow roygbiv l->r is higher missingness \ blue <- rgb(0,0,1, alpha = 0.75) green <- rgb(0,.75,0, alpha = 0.75) orange <- rgb(1, 0.65,0, alpha = 0.75) tomato <- rgb(1, 0.39, 0.28, alpha = 0.75) red <- rgb(0.75, 0, 0, alpha = 0.75) spectrum <- c(blue, green, orange, tomato, red) if (pcntmiss < .20) color <- c(color, spectrum[1]) else if (pcntmiss >= .20 && pcntmiss < .40) color <- c(color, spectrum[2]) else if (pcntmiss >= .40 && pcntmiss < .60) color <- c(color, spectrum[3]) else if (pcntmiss >= .60 && pcntmiss < .80) color <- c(color, spectrum[4]) else if (pcntmiss >= .80) color <- c(color, spectrum[5]) imphold[i,] <- scaled.conf means <- c(means, mean(scaled.conf)) lowers <- c(lowers, sort(scaled.conf)[round(output$m * draws * 0.05)]) uppers <- c(uppers, sort(scaled.conf)[round(output$m * draws * 0.95)]) pcnts <- c(pcnts, pcntmiss) } #AMr1<-is.na(prepped$x[,stacked.var]) #partial.n.order<-prepped$n.order[!origAMr1] if (is.data.frame(data)) { xplot <- data[[var]] } else { xplot <- data[,var] } if (is.null(prepped$blanks)) { xplot <- xplot[prepped$n.order][!fully.missing] } else { xplot <- xplot[-prepped$blanks][prepped$n.order][!fully.missing] } addedroom <- (max(uppers) - min(lowers)) * 0.1 if (!hasArg(log)) { this.ylim <- range(c(lowers - addedroom, uppers)) legpos <- "bottomright" } else { this.ylim <- range(c(lowers[lowers > 0], uppers + addedroom)) legpos <- "topright" } if (missing(xlab)) { xlab <- "Observed Values" } if (missing(ylab)) { ylab <- "Imputed Values" } if (missing(main)) { main <- paste("Observed versus Imputed Values of",colnames(data)[var]) } if (frontend) { dev.new() } ci.order <- order(uppers - lowers, decreasing = TRUE) # Allows smallest CI's to be printed last, and thus not buried in the plot. overplot <- plot(xplot[ci.order], means[ci.order], xlab = xlab, ylab = ylab, ylim = this.ylim, type = 'p', main = main, col = color[ci.order], pch = 19,...) segments(xplot[ci.order], lowers[ci.order], xplot[ci.order], uppers[ci.order], col = color[ci.order]) if (legend) { legend(legpos, legend = c(" 0-.2",".2-.4",".4-.6",".6-.8",".8-1"), col = spectrum, lty = c(1,1), horiz = TRUE, bty = "n") } abline(0,1) out <- list(row = prepped$n.order[!fully.missing], orig = xplot, mean.overimputed = means, lower.overimputed = lowers, upper.overimputed = uppers, prcntmiss = pcnts, overimps = imphold[!is.na(imphold[,1]),]) invisible(out) } gethull <- function(st,tol,rots) { stvec <- st for (i in 1:length(st)) { addedvec <- rep(0,length(st)) addedvec[i] <- tol * 100 newvec <- cbind(st + addedvec, st - addedvec) stvec <- cbind(stvec, newvec) } reduced.hull <- t(rots) %*% stvec return(reduced.hull) } #' Overdispersed starting values diagnostic for multiple imputation #' #' A visual diagnostic of EM convergence from multiple overdispersed #' starting values for an output from \code{amelia}. #' #' @param output output from the function \code{amelia}. #' @param m the number of EM chains to run from overdispersed starting values. #' @param dims the number of principle components of the parameters to #' display and assess convergence on (up to 2). #' @param p2s an integer that controls printing to screen. 0 (default) #' indicates no printing, 1 indicates normal screen output and 2 #' indicates diagnostic output. #' @param frontend a logical value used internally for the Amelia GUI. #' @param xlim limits of the plot in the horizontal dimension. #' @param ylim limits of the plot in vertical dimension. #' @param ... further graphical parameters for the plot. #' #' @details This function tracks the convergence of \code{m} EM chains which start #' from various overdispersed starting values. This plot should give some #' indication of the sensitivity of the EM algorithm to the choice of #' starting values in the imputation model in \code{output}. If all of #' the lines converge to the same point, then we can be confident that #' starting values are not affecting the EM algorithm. #' #' As the parameter space of the imputation model is of a #' high-dimension, this plot tracks how the first (and second if #' \code{dims} is 2) principle component(s) change over the iterations of #' the EM algorithm. Thus, the plot is a lower dimensional summary of the #' convergence and is subject to all the drawbacks inherent in said #' summaries. #' #' For \code{dims==1}, the function plots a horizontal line at the #' position where the first EM chain converges. Thus, we are checking #' that the other chains converge close to that horizontal line. For #' \code{dims==2}, the function draws a convex hull around the point of #' convergence for the first EM chain. The hull is scaled to be within #' the tolerance of the EM algorithm. Thus, we should check that the #' other chains end up in this hull. #' #' @seealso Other imputation diagnostics are #' \code{\link{compare.density}}, \code{\link{disperse}}, and #' \code{\link{tscsPlot}} disperse <- function(output, m = 5, dims = 1, p2s = 0, frontend = FALSE, ..., xlim = NULL, ylim = NULL) { if (!("amelia" %in% class(output))) stop("The 'output' is not Amelia output.") ## The original data is the imputed data with the ## imputations marked to NA. These two lines do that data <- getOriginalData(output) if (frontend) { requireNamespace("tcltk") putAmelia("output.log", c(getAmelia("output.log"), "==== Overdispersion Output ====\n")) } # prep the data and arguments prepped<-amelia.prep(x=data, arglist=output$arguments) if (p2s) cat("-- Imputation", "1", "--") if (frontend) { putAmelia("output.log", c(getAmelia("output.log"), paste("-- Imputation","1","--\n"))) } flush.console() # run EM, but return it with the theta at each iteration thetanew <- emarch(prepped$x, p2s = p2s, thetaold = NULL, tolerance = prepped$tolerance, startvals = 0, priors = prepped$priors, empri = prepped$empri, frontend = frontend, allthetas = TRUE, collect = FALSE) #change 4 # thetanew is a matrix whose columns are vectorized upper triangles of theta # matrices for each iteration. thus, there are k(k+1)/2 rows. impdata <- thetanew$thetanew # we'll put the theta of the last iteration into a new starting theta startsmat <- matrix(0, ncol(prepped$x) + 1, ncol(prepped$x) + 1) startsmat[upper.tri(startsmat, TRUE)] <- c(-1, impdata[, ncol(impdata)]) startsmat <- t(startsmat) startsmat[upper.tri(startsmat, TRUE)] <- c(-1, impdata[, ncol(impdata)]) iters <- nrow(thetanew$iter.hist) + 1 for (i in 2:m) { if (p2s) cat("-- Imputation", i, "--\n") if (frontend) { putAmelia("output.log", c(getAmelia("output.log"), paste("-- Imputation",i,"--\n"))) } # get a noisy sample of data from the that starting value (which is the # Amelia answer) and use that to estimate a new starting theta (mus/vcov) newstarts <- rmvnorm(round(2.5 * ncol(prepped$x)), startsmat[1,2:ncol(startsmat)], startsmat[2:nrow(startsmat),2:nrow(startsmat)]) startcov <- var(newstarts) startmus <- colMeans(newstarts) newstartsmat <- matrix(-1, ncol(prepped$x) + 1, ncol(prepped$x) + 1) newstartsmat[2:nrow(startsmat),2:nrow(startsmat)] <- startcov newstartsmat[1,2:nrow(startsmat)] <- startmus newstartsmat[2:nrow(startsmat),1] <- startmus # grab the iteration history of the thetas thetanew <- emarch(prepped$x, p2s = p2s, thetaold = newstartsmat, tolerance = prepped$tolerance, startvals = 0, priors = prepped$priors, empri = prepped$empri, frontend = frontend, allthetas = TRUE, collect = FALSE) # change 5 impdata <- cbind(impdata, thetanew$thetanew) iters <- c(iters, nrow(thetanew$iter.hist) + 1) } if (dims == 1) comps <- c(1) else comps <- c(1,2) # reduce the dimenionality from k(k+1)/2 to 1 or 2 via principle components rotations <- prcomp(t(impdata))$rotation[, comps] reduced.imps <- t(rotations) %*% impdata cols <- rainbow(m) # plot the imputations if (frontend) { dev.new() } if (dims == 1) { addedroom <- (max(reduced.imps) - min(reduced.imps)) * 0.1 x <- seq(iters[1]) if (is.null(xlim)) xlim <- c(0, max(iters)) if (is.null(ylim)) ylim <- range(c(reduced.imps - addedroom, reduced.imps)) y <- reduced.imps[1, 1:iters[1]] patt <- seq(1, length(x) - 1) plot(x, y, col = 1, main = "Overdispersed Start Values", xlab = "Number of Iterations", ylab = "Largest Principle Component", xlim = xlim, ylim = ylim, type = "n") segments(x[patt], y[patt], x[patt + 1], y[patt + 1], col = cols[1]) for (i in 2:length(iters)) { x <- seq(iters[i]) y <- reduced.imps[1, (sum(iters[1:(i-1)])+1):(sum(iters[1:i]))] patt <- seq(1, length(x)-1) segments(x[patt], y[patt], x[patt+1], y[patt+1], col=cols[i]) #points(x,y,col=i) } abline(h = reduced.imps[iters[1]], lwd = 2) legend("bottomright", legend = c("Convergence of original starting values"), lwd = 2, bty = "n") } else { xrange <- c((min(reduced.imps[1,])), (max(reduced.imps[1,]))) yrange <- c((min(reduced.imps[2,])), (max(reduced.imps[2,]))) if (is.null(xlim)) xlim <- xrange if (is.null(ylim)) ylim <- yrange plot(reduced.imps[1,1:iters[1]], reduced.imps[2,1:iters[1]], type = "n", main = "Overdispersed Starting Values", xlab = "First Principle Component", ylab = "Second Principle Component", col=cols[1], xlim = xlim, ylim = ylim) for (i in 2:length(iters)) { x <- reduced.imps[1, (sum(iters[1:(i-1)])+1):(sum(iters[1:i]))] y <- reduced.imps[2, (sum(iters[1:(i-1)])+1):(sum(iters[1:i]))] patt <- c() xdiffs <- diff(x) ydiffs <- diff(y) veclength <- sqrt(xdiffs^2+ydiffs^2) for (j in 1:length(xdiffs)) if (veclength[j] > xinch(1/500)) patt <- c(patt,j) if (!is.null(patt)) arrows(x[patt], y[patt], x[patt + 1], y[patt + 1], length = .1, col = cols[i]) patt <- seq(1, length(x) - 1) segments(x[patt], y[patt], x[patt+1], y[patt+1], col = cols[i]) } x <- reduced.imps[1,1:iters[1]] y <- reduced.imps[2,1:iters[1]] xdiffs <- diff(x) ydiffs <- diff(y) veclength <- sqrt(xdiffs^2+ydiffs^2) inchlength <- sqrt(sum(xyinch(1/500)^2)) patt <- c() for (j in 1:length(xdiffs)) if (veclength[j] > inchlength) patt <- c(patt,j) #if (!is.null(patt)) # arrows(x[patt],y[patt],x[patt+1],y[patt+1],length=.15,col=1,lwd=5) patt <- seq(1, length(x) -1) segments(x[patt], y[patt], x[patt + 1], y[patt + 1], col = cols[1], lwd = 1) dists <- gethull(st = impdata[ ,iters[1]], tol = prepped$tolerance, rots = rotations) convexhull <- chull(t(dists)) convexhull <- c(convexhull, convexhull[1]) lines(t(dists)[convexhull,], col = "orange", pch = 19, lwd = 2) abline(h = 0, lty = 2) abline(v = 0, lty = 2) } #if (frontend) # tkdestroy(getAmelia("tcl.window")) out <- list(impdat = impdata, p.order = prepped$p.order, index = prepped$index, iters = iters, rotations = rotations, dims = dims) invisible(out) } sigalert <- function(data, disperse.list, output, notorious = 5){ k <- length(disperse.list$p.order) + 1 # Construct Variable Names for all variables constructed in Imputation Model. # This uses the "index" which details all the variables included in the imputation model. # The index is in the unstacked variable order. # Possibly, if this is useful elsewhere, this might be moved to "prep.r". varnm <- NULL lag.count <- 0 lead.count <- 0 poly.count <- 0 unknown.count <- 0 for (i in 1:(k-1)) { if (identical(disperse.list$index[i], -0.5)) { lag.count <- lag.count + 1 varnm <- c(varnm, paste("lag", lag.count)) } else if (identical(disperse.list$index[i], 0.5)) { lead.count <- lead.count + 1 varnm <- c(varnm, paste("lead", lead.count)) } else if (identical(disperse.list$index[i],0)) { poly.count <- poly.count + 1 varnm <- c(varnm, paste("polytime", poly.count)) } else if(disperse.list$index[i] >= 1) { varnm <- c(varnm, names(data[disperse.list$index[i]])) # Check what this does with matricies? } else { unknown.count <- unknown.count + 1 varnm <- c(varnm, paste("unknown", unknown.count)) } } # WARNING: Currently assumes rotations is a vector. If dim=2, rotations is a matrix. # if(!identical(disperse.list$dims,1)){ # disperse.list$rotations<-disperse.list$rotations[1,] # } # This is a flag vector that identifies the largest values in the first principal component. largest.rotations <- disperse.list$rotations * 0 largest.rotations[order(abs(disperse.list$rotations),decreasing = TRUE)[1:notorious]] <- 1 # This is a matrix of the size of theta, which has a 1 in the positions of the largest # contributions to the first principal component. # (largest corresponding elements of disperse.list$rotations) map <- matrix(0, k, k) map[upper.tri(map, TRUE)] <- c(0, largest.rotations) map <- t(map) map[upper.tri(map, TRUE)] <- c(0, largest.rotations) map[c(1, disperse.list$p.order + 1), c(1, disperse.list$p.order + 1)] <- map # Rearrange to unstacked variable positions print(abs(map)) gtz<-function(a) return(sum(a) > 0) row.keep <- apply(map, 1, gtz) col.keep <- apply(map, 2, gtz) # This is the submatrix of rotations, reshaped as a theta matrix, with the largest elements. prcomp.matrix <- matrix(0,k,k) prcomp.matrix[upper.tri(prcomp.matrix, TRUE)] <- c(0, disperse.list$rotations) prcomp.matrix <- t(prcomp.matrix) prcomp.matrix[upper.tri(prcomp.matrix, TRUE)] <- c(0, disperse.list$rotations) prcomp.matrix[c(1,disperse.list$p.order+1),c(1,disperse.list$p.order+1)] <- prcomp.matrix # Rearrange to unstacked variable positions # This is the submatrix that we want to represent portal <- prcomp.matrix[row.keep,col.keep] portalsize <- ncol(portal) portal.row.names <- varnm[row.keep] # In symmetric matricies, these are the same. portal.col.names <- varnm[col.keep] # In symmetric matricies, these are the same. # This is a matrix that gives the relative rank of every element. col.map <- matrix(0, portalsize, portalsize) col.portal <- rank(abs(portal[upper.tri(portal, TRUE)])) col.map[upper.tri(col.map, TRUE)] <- col.portal col.map <- t(col.map) col.map[upper.tri(col.map, TRUE)] <- col.portal # This creates a continuous color palette of the correct size. n.unique <- sum(upper.tri(matrix(1, portalsize, portalsize), TRUE)) Lab.palette <- colorRampPalette(c("white", "yellow", "red"), space = "Lab") my.palette <- Lab.palette(n.unique) # Plot the submatrix to be represented. plot.new() plot.window(xlim = c(-2, portalsize + 1), ylim = c(1, portalsize + 3)) for(i in 1:portalsize){ text(x = 1, y = portalsize - i + 1 + 0.5, pos = 2, labels = portal.row.names[i]) # Row variable names for(j in 1:portalsize){ rect(xleft = j, ybottom = portalsize - i + 1, xright = j + 1, ytop = portalsize - i + 2, density = NULL, angle = 45, col = my.palette[col.map[i, j]], border = NULL, lty = par("lty"), lwd = par("lwd")) text(x = j + 0.5, y = portalsize - i + 1 + 0.5, labels = as.character(round(portal[i,j]*100)/100) ) # SHOULD FIND BETTER SIG FIGS HACK } } for(j in 1:portalsize){ text(x = j + 0.6, y = portalsize + 1.1, pos = 2, labels = portal.col.names[j], srt = 270) # Column variable names. } return(NULL) } #' Plot observed and imputed time-series for a single cross-section #' #' Plots a time series for a given variable in a given cross-section and #' provides confidence intervals for the imputed values. #' #' @param output output from the function \code{amelia}. #' @param var the column number or variable name of the variable to plot. #' @param cs the name (or level) of the cross-sectional unit to plot. #' Maybe a vector of names which will panel a window of plots #' @param draws the number of imputations on which to base the confidence #' intervals. #' @param conf the confidence level of the confidence intervals to plot #' for the imputated values. #' @param misscol the color of the imputed values and their confidence #' intervals. #' @param obscol the color of the points for observed units. #' @param xlab x axis label #' @param ylab y axis label #' @param main overall plot title #' @param pch point shapes for the plot. #' @param ylim y limits (y1, y2) of the plot. #' @param xlim x limits (x1, x2) of the plot. #' @param frontend a logical value for use with the \code{AmeliaView} GUI. #' @param plotall a logical value that provides a shortcut for ploting all unique values of the level. #' A shortcut for the \code{cs} argument, a TRUE value overwrites any #' \code{cs} argument. #' @param nr the number of rows of plots to use when ploting multiple cross-sectional #' units. The default value will try to minimize this value to create a roughly #' square representation, up to a value of four. If all plots do not fit on the #' window, a new window will be started. #' @param nc the number of columns of plots to use. See \code{nr} #' @param pdfstub a stub string used to write pdf copies of each window created by the #' plot. The default is not to write pdf output, but any string value will turn #' on pdf output to the local working directory. If the stub is \code{mystub}, #' then plots will be saved as \code{mystub1.pdf}, \code{mystub2.pdf}, etc. #' @param ... further graphical parameters for the plot. #' #' @details #' The \code{cs} argument should be a value from the variable set to the #' \code{cs} argument in the \code{amelia} function for this output. This #' function will not work if the \code{ts} and \code{cs} arguments were #' not set in the \code{amelia} function. If an observation has been #' overimputed, \code{tscsPlot} will plot both an observed and an imputed #' value. tscsPlot <- function(output, var, cs, draws = 100, conf = .90, misscol = "red", obscol = "black", xlab, ylab, main, pch, ylim, xlim, frontend = FALSE, plotall=FALSE, nr, nc, pdfstub, ...) { if (missing(var)) stop("I don't know which variable (var) to plot") if (missing(cs) && !plotall) stop("case name (cs) is not specified") if (is.null(output$arguments$ts) || is.null(output$arguments$cs)) stop("both 'ts' and 'cs' need to be set in the amelia output") if (!("amelia" %in% class(output))) stop("the 'output' is not Amelia output") data <- getOriginalData(output) # Allow character names as arguments for "var" with data.frames if (is.character(var)) { if (!is.data.frame(data)) { stop("'var' must be identified by column number as dataset is not a data frame") } else { varpos <- match(var, colnames(data)) if (is.na(varpos)) { stop("the name provided for 'var' argument does not exist in the dataset provided") } else { var <- varpos } } } csvarname <- output$arguments$cs tsvarname <- output$arguments$ts if (is.data.frame(data)) { csvar <- data[[csvarname]] tsvar <- data[[tsvarname]] } else { csvar <- data[,output$arguments$cs] tsvar <- data[,output$arguments$ts] } if (is.factor(csvar)) { units <- levels(csvar) } else { units <- unique(csvar) } if (plotall) { cs <- units } else { if (!(all(cs %in% units))) stop("some cross-section unit requested for the plot is not in the data") } # Picks a number of rows and columns if not user defined. Maxs out at 4-by-4, unless user defined if (missing(nr)) { nr <- min(4, ceiling(sqrt(length(cs)))) } if (missing(nc)) { nc <- min(4, ceiling(length(cs)/nr)) } if (length(cs)>1) { oldmfcol <- par()$mfcol par(mfcol = c(nr, nc)) } prepped <- amelia.prep(x = data, arglist = output$arguments) if (!is.null(prepped$blanks)) { data <- data[-prepped$blanks,] unit.rows <- which(csvar %in% cs) miss <- output$missMatrix[-prepped$blanks,][unit.rows, var] == 1 } else { unit.rows <- which(csvar %in% cs) miss <- output$missMatrix[unit.rows, var] == 1 } time <- tsvar[unit.rows] # These are the time values for rows appearing in some future plot imps.cs <- csvar[unit.rows] # These are the cs units for rows appearing in some future plot cross.sec <- prepped$x[!is.na(match(prepped$n.order, unit.rows)),] stacked.var <- match(var, prepped$subset.index[prepped$p.order]) subset.var <- match(var, prepped$subset.index) imps <- array(NA, dim = c(nrow(cross.sec), draws)) drawsperimp <- draws/output$m if (sum(miss) > 0) { for (i in 1:draws) { currtheta <- output$theta[,,ceiling(i/drawsperimp)] imps[,i] <- amelia.impute(x = cross.sec, thetareal = currtheta, bounds = prepped$bounds, priors = prepped$priors, max.resample = output$arguments$max.resample)[,stacked.var] } imps <- imps*prepped$scaled.sd[subset.var] + prepped$scaled.mu[subset.var] if (var %in% output$arguments$logs) { imps <- exp(imps) + prepped$xmin[which(var == output$arguments$logs)] } if (var %in% output$arguments$sqrt) { imps <- imps^2 } if (var %in% output$arguments$lgstc) { imps <- exp(imps)/(1 + exp(imps)) } outoforder <- match(prepped$n.order, unit.rows)[!is.na(match(prepped$n.order, unit.rows))] imps <- imps[order(outoforder),] } if (missing(pch)) pch <- 19 if (missing(xlab)) xlab <- "time" if (missing(ylab)) ylab <- names(data)[var] if (frontend) { dev.new() } if (!missing(main)) { main <- rep(main, length.out = length(cs)) } count <- 0 for(i in 1:length(cs)){ current.rows <- which(csvar == cs[i]) current.time <- tsvar[current.rows] flag <- imps.cs == cs[i] current.miss <- miss[flag] if (sum(current.miss) > 0) { current.imps <- imps[flag,] current.means <- rowMeans(current.imps) current.uppers <- apply(current.imps, 1, quantile, probs = (conf + (1 - conf)/2)) # THIS IS LIKELY SLOW current.lowers <- apply(current.imps, 1, quantile, probs = (1-conf)/2) # THIS IS LIKELY SLOW } else { current.means <- data[[var]][current.rows] current.uppers <- current.lowers <- current.means } cols <- ifelse(current.miss, misscol, obscol) current.main <- ifelse(missing(main), as.character(cs[i]), main[i]) # Allow title to be rolling if not defined if (missing(xlim)) { # Allow axes to vary by unit, if not defined current.xlim <- range(current.time) } else { current.xlim <- xlim } if (missing(ylim)) { current.ylim <- range(current.uppers,current.lowers,current.means) } else { current.ylim <- ylim } plot(x = current.time, y = current.means, col = cols, pch = pch, ylim = current.ylim, xlim = current.xlim, ylab = ylab, xlab = xlab, main = current.main, ...) segments(x0 = current.time, x1 = current.time, y0 = current.lowers, y1 = current.uppers, col = cols, ...) oiDetect <- (sum(output$missMatrix[current.rows,var]) + sum(!is.na(data[current.rows, var]))) > length(current.rows) if (oiDetect) { points(x = current.time, y = data[current.rows, var], pch = pch, col = obscol) } # print page if window full if ((!missing(pdfstub)) & (i %% (nr*nc) ==0)) { count <- count + 1 dev.copy2pdf(file = paste(pdfstub, count, ".pdf", sep="")) } } if (!missing(pdfstub)) { if ((i %% (nr*nc)) != 0) { # print last page if not complete count <- count + 1 dev.copy2pdf(file = paste(pdfstub, count, ".pdf", sep="")) } par(mfcol = oldmfcol) # return to previous windowing } # although always now fills by col even if previously by row invisible(imps) } #' Combine Multiple Results From Multiply Imputed Datasets #' #' Combine sets of estimates (and their standard errors) generated from #' different multiply imputed datasets into one set of results. #' #' @param q A matrix or data frame of (k) quantities of interest (eg. #' coefficients, parameters, means) from (m) multiply imputed datasets. #' Default is to assume the matrix is m-by-k (see \code{byrow}), thus each #' row represents a set of results from one dataset, and each column #' represents the different values of a particular quantity of interest #' across the imputed datasets. #' @param se A matrix or data frame of standard errors that correspond to each of the #' elements of the quantities of interest in \code{q}. Should be the same #' dimensions as \code{q}. #' @param byrow logical. If \code{TRUE}, \code{q} and \code{se} are treated as #' though each row represents the set of results from one dataset #' (thus m-by-k). If \code{FALSE}, each column represents results from one #' dataset (thus k-by-m). #' #' @details Uses Rubin's rules for combining a set of results from multiply imputed #' datasets to reflect the average result, with standard errors that both average #' uncertainty across models and account for disagreement in the estimated values #' across the models. #' #' @return #' \item{q.mi}{Average value of each quantity of interest across the m models} #' \item{se.mi}{Standard errors of each quantity of interest} #' #' @references #' Rubin, D. (1987). \emph{Multiple Imputation for Nonresponse in Surveys}. #' New York: Wiley. #' #' Honaker, J., King, G., Honaker, J. Joseph, A. Scheve K. (2001). Analyzing #' Incomplete Political Science Data: An Alternative Algorithm for Multiple #' Imputation \emph{American Political Science Review}, \bold{95(1)}, 49--69. (p53) #' mi.meld<-function(q, se, byrow = TRUE) { if (!byrow) { q <- t(q) se <- t(se) } if (is.data.frame(q)) { q <- as.matrix(q) } if (is.data.frame(se)) { se <- as.matrix(se) } am.m <- nrow(q) ones <- matrix(1, nrow = 1, ncol = am.m) imp.q <- (ones %*% q)/am.m # Slightly faster than "apply(b,2,mean)" ave.se2 <- (ones %*% (se^2))/am.m # Similarly, faster than "apply(se^2,2,mean)" diff <- q - matrix(1, nrow = am.m, ncol = 1) %*% imp.q sq2 <- (ones %*% (diff^2))/(am.m - 1) imp.se <- sqrt(ave.se2 + sq2 * (1 + 1/am.m)) return(list(q.mi = imp.q, se.mi = imp.se)) }
/scratch/gouwar.j/cran-all/cranData/Amelia/R/diag.r
## Code for bootstrapped Amelia ported to R ## 17/09/05 jh - Added "subset" routine for idvars and completely missing observations ## 22/09/05 jh - Changed stack function to optionally fix column positions, changed bootx to reject some bootstraps, changed emarch to work when no data missing ## 23/09/05 mb - Added "amcheck" function to change data and check for errors, "impdata" now in format given to amelia. ## 24/09/05 jh - Modified "amcheck," added polynomials of time, added ability to impute "logicals" from data frames ## 25/09/05 jh - Finalized plumbing for observational priors ## 26/09/05 mb - Added "frontend" argument and screen box to amelia and emarch functions ## 27/09/05 jh - Added observational and empirical priors ## 28/09/05 mb - Fixed "frontend" to update the GUI after each print. ## 30/09/05 mb - "amcheck" expanded, priors passed as individual matrices ## 07/10/05 mb - Added passing of lags and multiple levels of polynomials; expanded "amcheck" to cover these ## 08/10/05 jh - Enabled variable degree of polynomials of time, enabled interaction with cross-section ## 14/10/05 mb - Put "amcheck" into its own file ## 21/10/05 mb - Changed "stack" to "amstack" (and "unstack"); added log transformations in "amtransform"; adding "archive" option that saves a list of the settings ## 21/10/05 mb - Added a soft-crash that will print and output the error number and message. ## 24/10/05 mb - Added "sqrt" option for square root transformations, "lgstc" for logistic transformations ## 27/10/05 mb - Enabled lags and leads ## 9//11/05 mb - Enabled nominals; added "incheck" to allow skipping amcheck; moved dataframe->matrix conversion to framemat function. ## 15/12/05 mb - new (fixed) impute function; ## 21/02/06 mb - added positive definite check to "startvals", now defaults to identity if not pd. ## 22/02/06 mb - penrose inverse function added in sweep; soft-crashes on a non invertible covariance matrix at the end of EM ## 23/02/06 mb - empri increases if EM hits a non-monotonic section; added 'startvals' option; added iteration history to archive; ## 21/03/06 mb - added "ords" option and added ordinal support in "unsubset"; fixed a bug in nominals that wouldn't fill in imputations; ## 22/03/06 mb - character/factors can be specified and returned correctly as ordinals; ## 08/04/06 jh - revised functions to handle large datasets, merged with parallel emb.r version ## 10/04/06 mb - added "nametonumber" function that converts column names to numbers in all of the list options ## 28/04/06 mb - extracted transformation functions to prep.r ## 29/04/06 jh - changed screen output for "p2s", ivector and icap in "indxs", revised "diff" convergence monitor to upper triangular ## 01/05/06 mb - removed "rbind" calls in emfred, impute. ## 01/06/06 mb - added "allthetas" option to emarch for overdispersion diagnostic ## 15/06/06 jh - merged with priors version changing all EM and impute procs, modified how lists are generated in indxs("icap") and amelia("impdata"). ## 27/06/06 mb - added arglist argument to load in output from amelia or the gui. ## 13/07/06 mb - moved gc() calls out of emfred into emarch ## 02/08/06 mb - removed data.matrix() call when calling unsubset (moved to prep), fixed impfill for char. ## 29/08/06 jh - changed tolerance defaults ## 20/09/06 mb - new option (temp?) keep.data that will trash datasets from memory ## 01/10/06 mb - added additional info to p2s=2. ## 27/11/06 mb - new priors format ## 15/01/07 jh/mb - final version changes, degrees of freedom messages,autoprior option, modified comments, rearranged core arguments ## 10/05/07 mb - changed 'impute' to 'amelia.impute' ## 04/07/07 jh - added "emburn" option to modify convergence criteria ## 04/06/08 mb - changed the writing to GUI ('if (frontend)' calls) to remove globals ## 17/07/08 mb - fixed frontend error bug (dumping output to screen ## 22/07/08 mb - good coding update: T->TRUE/F->FALSE ## 27/03/10 jh - small changes to arguments of functions to deal with "splinetime" option in same fashion as "polytime" ## Draw from a multivariate normal distribution ## n: number of draws ## mu: vector of means ## vcv: variance-covariance matrix rmvnorm <- function(n,mu,vcv){ return(matrix(rnorm(n*length(mu)),n,length(mu)) %*% (chol(vcv)) + (matrix(1,n,1) %*% mu ) ) } ## Returns the data matrix without the rows with missing values ## (Same return as na.omit, without any attributes) ## x: data matrix ## can't send it a vector right now packr<-function(x) { r<-is.na(x) sumr<-rowSums(r) x2<-x[sumr==0, , drop=FALSE] return(x2) } ## Create dataset bootstrapped from original dataset ## Rejects Bootstraps where an entire variable becomes missing ## x: data (matrix) ## priors: matrix of priors about means for observations bootx<-function(x,priors=NULL, boot.type="np"){ flag <- TRUE AMn <- nrow(x) if (!is.null(boot.type)) { if (boot.type == "none") { return(list(x=x,priors=priors)) } } while (flag){ order<-trunc(runif(nrow(x), min=1, max=nrow(x)+1)) xboot<-x[order,] if (!identical(priors,NULL)){ sigPriors <- matrix(NA,nrow(x),ncol(x)) muPriors <- matrix(NA,nrow(x),ncol(x)) muPriors[priors[,1:2]] <- priors[,3] sigPriors[priors[,1:2]] <- priors[,4] muPriors <- muPriors[order,] sigPriors <- sigPriors[order,] prior.ind <- which(!is.na(muPriors), arr.ind = TRUE) priors <- cbind(prior.ind, muPriors[prior.ind], sigPriors[prior.ind]) # priors[,1]<-match(priors[,1],order) #priors <- priors[!is.na(priors[,1]),,drop=FALSE] } flag<-any(colSums(is.na(xboot))==AMn & !((1:ncol(xboot)) %in% priors[,2])) } return(list(x=xboot,priors=priors)) } ## Put imputations into the original data format ## Converts integer values back to factors or characters impfill <- function(x.orig, x.imp, noms, ords, priors, overimp) { if (!is.null(priors)) { is.na(x.orig)[priors[,c(1,2)]] <- TRUE } if (!is.null(overimp)) { is.na(x.orig)[overimp] <- TRUE } AMr1.orig <- is.na(x.orig) orig.fact <- sapply(x.orig, is.factor) orig.char <- sapply(x.orig, is.character) x.imp <- as.data.frame(x.imp[, 1:ncol(x.orig)]) for (i in 1:ncol(x.orig)) { if (is.logical(x.orig[[i]]) & sum(!is.na(x.orig[[i]])) > 0) { x.imp[,i] <- as.logical(x.imp[,i]>0.5) } ## imputations will be numeric and tibbles if (is.integer(x.orig[[i]]) & sum(is.na(x.orig[[i]])) > 0) { x.orig[, i] <- as.numeric(x.orig[[i]]) } } possibleFactors <- unique(c(noms, ords)) if (!is.null(possibleFactors)) { if (ncol(x.orig) > length(possibleFactors)) { num.cells <- which(is.na(x.orig) & !(col(x.orig) %in% possibleFactors), arr.ind = TRUE) for (j in seq_len(nrow(num.cells))) { j_row <- num.cells[j, 1] j_col <- num.cells[j, 2] x.orig[j_row, j_col] <- x.imp[j_row, j_col] } } for (i in possibleFactors) { if (orig.fact[i]) x.orig[is.na(x.orig[, i]), i] <- levels(x.orig[[i]])[x.imp[is.na(x.orig[[i]]), i]] else if (orig.char[i]) x.orig[, i] <- levels(factor(x.orig[[i]]))[x.imp[, i]] else x.orig[is.na(x.orig[[i]]), i] <- x.imp[is.na(x.orig[[i]]), i] } } else { x.orig[AMr1.orig] <- x.imp[AMr1.orig] } new.char <- sapply(x.orig, is.character) char.match <- orig.char != new.char if (sum(char.match) != 0) for (i in seq_along(char.match)) if (char.match[i]) x.orig[, i] <- as.numeric(x.orig[[i]]) return(x.orig) } ## Create Starting Values for EM Chain startval<-function(x,startvals=0,priors=NULL){ AMp<-ncol(x) if (!is.null(priors)) { ## fill in prior means x[(priors[,2]-1)*nrow(x)+priors[,1]] <- priors[,3] } if (ncol(as.matrix(startvals)) == AMp+1 && nrow(as.matrix(startvals)) == AMp+1) #checks for correct size of start value matrix if (startvals[1,1]==-1) #checks for the -1 necessary for sweep return(startvals) thetast<-matrix(0,nrow=AMp+1,ncol=AMp+1) # Create matrix of zeros thetast[row(thetast)==col(thetast)] <- 1 # Create Identity matrix thetast[1,1]<-(-1) if (startvals==0){ # Defaults to Identity if too few rows fully observed cmpr<-packr(x) if (nrow(cmpr)>AMp){ means<-colMeans(cmpr) if (all(eigen(cov(cmpr))$values > 10*.Machine$double.eps)) { #Checks for positive definiteness (positive eigenvalues) thetast[2:(AMp+1),2:(AMp+1)]<-cov(cmpr) #.Machine$double.eps instead of 0 to account for rounding. thetast[2:(AMp+1),1]<-means thetast[1,2:(AMp+1)]<-means } } } return(thetast) } ## Create certain indicies. Only needs to be called once, not every pattern. ## o,m,icap come from omiindxs ## ivector is i from indexm indxs<-function(x){ AMn<-nrow(x) AMr1<-is.na(x) # True if missing. AMr2<-unique(AMr1) o<- !AMr2 # (or o<-AMr2==1) Maybe == is not robust to small fluctuations m<- AMr2 # so put in check procedure (m<-) ## The following can be replaced by fortran .dll, although this has only moderate time savings ## ivector<-1 for(i in 2:AMn){ ischange<- !identical(AMr1[i,],AMr1[i-1,]) if(ischange){ ivector<-c(ivector,i) } } ivector<-c(ivector,AMn+1) ##################################################### ## ivector<-.Fortran("indxs",1*AMr1,as.integer(AMn),as.integer(ncol(x)),as.integer(nrow(AMr2)+1),ivector=integer(nrow(AMr2)+1))$ivector icap<-vector(mode="list",nrow(AMr2)) # This is a useful index, although no longer currently used for (i in 2:length(ivector)){ icap[[i]]<-seq(ivector[i-1],ivector[i]-1) } return(list(AMr1=AMr1,AMr2=AMr2,o=o,m=m,icap=icap,ivector=ivector)) } ## EM chain architecture calls emarch<-function(x,p2s=TRUE,thetaold=NULL,startvals=0,tolerance=0.0001,priors=NULL,empri=NULL,frontend=FALSE,collect=FALSE,allthetas=FALSE,autopri=0.05,emburn=c(0,0)){ if (p2s == 2) { cat("setting up EM chain indicies\n") flush.console() } iter.hist<-matrix(0,nrow=1,ncol=3) if (sum(complete.cases(x)) < nrow(x)){ # Check for fully observed data if (identical(thetaold,NULL)) thetaold<-startval(x,startvals=startvals,priors=priors) indx<-indxs(x) # This needs x.NA if (!identical(priors,NULL)){ priors[,4]<-1/priors[,4] # change sd to 1/var priors[,3]<-priors[,3]*priors[,4] # get the precision-weighted # mus priors <- priors[order(priors[,1],priors[,2]),,drop = FALSE] } x[is.na(x)]<-0 # Change x.NA to x.0s AM1stln<-sum(indx$m[1,])==0 & nrow(indx$m) > 1 count<-0 diff<- 1+tolerance AMr1 <- 1 * indx$AMr1 oo <- 1 * indx$o mm <- 1 * indx$m if (is.null(empri)) { empri <- 0 } theta <- .Call("emcore", x, AMr1, oo, mm, indx$ivector, thetaold, tolerance, emburn, p2s, empri,autopri, allthetas, priors, PACKAGE="Amelia") } else { if (p2s) cat("\n","No missing data in bootstrapped sample: EM chain unnecessary") pp1<-ncol(x)+1 # p (the number of variables) plus one means<-colMeans(x) thetanew<-matrix(0,pp1,pp1) thetanew[1,1]<-(-1) thetanew[2:pp1,1]<-means thetanew[1,2:pp1]<-means thetanew[2:pp1,2:pp1]<-cov(x) # Need to consider Priors in these cases, iter.hist<-NA # Although not # currently necessary. return(list(thetanew=thetanew,iter.hist=iter.hist)) } return(list(thetanew=theta$theta,iter.hist=theta$iter.hist)) } ## Draw imputations for missing values from a given theta matrix amelia.impute<-function(x,thetareal,priors=NULL,bounds=NULL,max.resample=NULL){ indx<-indxs(x) # This needs x.NA if (!identical(priors,NULL)){ priors[,4]<-1/priors[,4] priors[,3]<-priors[,3]*priors[,4] priors <- priors[order(priors[,1],priors[,2]),,drop = FALSE] } x[is.na(x)]<-0 # Change x.NA to x.0s AM1stln<-sum(indx$m[1,])==0 & nrow(indx$m) > 1 # Create sundry simple indicators i<-indx$ivector iii<-indx$icap AMp<-ncol(x) AMn<-nrow(x) AMr1 <- 1 * indx$AMr1 oo <- 1 * indx$o mm <- 1 * indx$m if (is.null(bounds)) max.resample <- NULL imp <- .Call("ameliaImpute", x, AMr1, oo, mm, indx$ivector, thetareal, priors, bounds, max.resample, PACKAGE="Amelia") return(imp) } #' Combine multiple runs of Amelia #' #' Combines multiple runs of \code{amelia} with the same #' arguments and data into one \code{amelia} object. #' #' @param ... one or more objects of class \code{amelia} with the same #' arguments and created from the same data. #' #' @details \code{ameliabind} will combine multiple runs of \code{amelia} into one #' object so that you can utilize diagnostics and modelling on all the #' imputations together. This function is useful for combining multiple #' runs of \code{amelia} run on parallel machines. #' #' Note that \code{ameliabind} only checks that they arguments and the #' missingness matrix are identical. Thus, it could be fooled by two #' datasets that are identical up to a transformation of one variable. #' #' @return An object of class \code{amelia}. #' #' @seealso \code{\link{amelia}} #' #' @examples #' data(africa) #' a1.out <- amelia(x = africa, cs = "country", ts = "year", logs = "gdp_pc") #' a2.out <- amelia(x = africa, cs = "country", ts = "year", logs = "gdp_pc") #' all.out <- ameliabind(a1.out, a2.out) #' summary(all.out) #' plot(all.out) #' ameliabind <- function(...) { args <- list(...) if (any(!sapply(args, is, "amelia"))) stop("All arguments must be amelia output.") if (length(args) > 1) { ## test that data is the same. we'll just compare the missMatrices. ## this will allow datasets with the same size and missingness ## matrix to be combined unintentionally, but this seems unlikely. datacheck <- lapply(args, function(x) isTRUE(identical(x$missMatrix,args[[1]]$missMatrix))) if (any(!unlist(datacheck))) stop("Non-compatible datasets.") ## test that all the arguments are the same check <- lapply(args, function(x) isTRUE(identical(x$arguments, args[[1]]$arguments))) if (any(!unlist(check))) stop("Non-compatible amelia arguments") check <- lapply(args, function(x) isTRUE(identical(x$transform.calls, args[[1]]$transform.calls))) if (any(!unlist(check))) stop("Non-compatible transformations on imputed datasets") imps <- unlist(lapply(args, function(x) return(x$m))) newm <- sum(imps) impindex <- c(0,cumsum(imps)) k <- nrow(args[[1]]$mu) out <- list(imputations = list(), m = integer(0), missMatrix = matrix(NA,0,0), overvalues = args[[1]]$overvalues, theta = array(NA, dim = c(k+1,k+1,newm) ), mu = matrix(NA, nrow = k, ncol = newm), covMatrices = array(NA, dim = c(k,k,newm)), code = integer(0), message = character(0), iterHist = list(), arguments = list(), orig.vars = args[[1]]$orig.vars) out$m <- newm out$missMatrix <- args[[1]]$missMatrix out$arguments <- args[[1]]$arguments out$transform.calls <- args[[1]]$transform.calls out$transform.vars <- args[[1]]$transform.vars ## since code==1 is good and code==2 means we have an NA, ## then our new output should inherit a 2 if there are any out$code <- max(unlist(lapply(args,function(x) return(x$code)))) if (out$code > 2) stop("Amelia output contains error.") if (out$code==2) out$message <- "One or more of the imputations resulted in a covariance matrix that was not invertible." else out$message <- "Normal EM convergence" for (i in 1:length(args)) { currimps <- (impindex[i]+1):impindex[i+1] out$mu[,currimps] <- args[[i]]$mu out$theta[,,currimps] <- args[[i]]$theta out$covMatrices[,,currimps] <- args[[i]]$covMatrices out$imputations <- c(out$imputations, args[[i]]$imputations) out$iterHist <- c(out$iterHist, args[[i]]$iterHist) } names(out$imputations) <- paste("imp",1:length(out$imputations),sep="") #or: names(out$imputations) <- paste("imp",1:impindex[i+1],sep="") class(out) <- "amelia" class(out$imputations) <- c("mi","list") } else { out <- args[[1]] if (out$code > 2) stop("Amelia output contains error.") } return(out) } getOriginalData <- function(obj) { data <- obj$imputations[[1]] is.na(data) <- obj$missMatrix data <- data[, obj$orig.vars] oi <- obj$arguments$overimp if (!is.null(oi)) { for (i in 1:nrow(oi)) { data[oi[i,1], oi[i,2]] <- obj$overvalues[i] } } return(data) } remove.imputations <- function(obj) { data <- obj$imputations[[1]] is.na(data) <- obj$missMatrix oi <- obj$arguments$overimp if (!is.null(oi)) { for (i in 1:nrow(oi)) { data[oi[i,1], oi[i,2]] <- obj$overvalues[i] } } return(data) } ## amelia - multiple imputation. core function ## #' AMELIA: Multiple Imputation of Incomplete Multivariate Data #' #' Runs the bootstrap EM algorithm on incomplete data and creates #' imputed datasets. #' #' @author James Honaker #' @author Gary King #' @author Matt Blackwell #' #' #' @param x either a matrix, data.frame, a object of class #' "amelia", or an object of class "molist". The first two will call the #' default S3 method. The third a convenient way to perform more imputations #' with the same parameters. The fourth will impute based on the settings from #' \code{moPrep} and any additional arguments. #' @param m the number of imputed datasets to create. #' @param p2s an integer value taking either 0 for no screen output, #' 1 for normal screen printing of iteration numbers, and 2 for detailed #' screen output. See "Details" for specifics on output when p2s=2. #' @param frontend a logical value used internally for the GUI. #' @param idvars a vector of column numbers or column names that indicates #' identification variables. These will be dropped from the analysis but #' copied into the imputed datasets. #' @param ts column number or variable name indicating the variable identifying time #' in time series data. #' @param cs column number or variable name indicating the cross section variable. #' @param polytime integer between 0 and 3 indicating what #' power of polynomial should be included in the imputation model #' to account for the effects of time. A setting of 0 would #' indicate constant levels, 1 would indicate linear time #' effects, 2 would indicate squared effects, and 3 would #' indicate cubic time effects. #' @param splinetime interger value of 0 or greater to control cubic #' smoothing splines of time. Values between 0 and 3 create a simple #' polynomial of time (identical to the polytime argument). Values \code{k} greater #' than 3 create a spline with an additional \code{k-3} #' knotpoints. #' @param intercs a logical variable indicating if the #' time effects of \code{polytime} should vary across the #' cross-section. #' @param lags a vector of numbers or names indicating columns in the data #' that should have their lags included in the imputation model. #' @param leads a vector of numbers or names indicating columns in the data #' that should have their leads (future values) included in the imputation #' model. #' @param startvals starting values, 0 for the parameter matrix from #' listwise deletion, 1 for an identity matrix. #' @param tolerance the convergence threshold for the EM algorithm. #' @param logs a vector of column numbers or column names that refer #' to variables that require log-linear transformation. #' @param sqrts a vector of numbers or names indicating columns in the data #' that should be transformed by a sqaure root function. Data in this #' column cannot be less than zero. #' @param lgstc a vector of numbers or names indicating columns in the data #' that should be transformed by a logistic function for proportional data. #' Data in this column must be between 0 and 1. #' @param noms a vector of numbers or names indicating columns in the data #' that are nominal variables. #' @param ords a vector of numbers or names indicating columns in the #' data that should be treated as ordinal variables. #' @param incheck a logical indicating whether or not the inputs to the #' function should be checked before running \code{amelia}. This should #' only be set to \code{FALSE} if you are extremely confident that your #' settings are non-problematic and you are trying to save computational #' time. #' @param collect a logical value indicating whether or #' not the garbage collection frequency should be increased during the #' imputation model. Only set this to \code{TRUE} if you are experiencing memory #' issues as it can significantly slow down the imputation #' process #' @param arglist an object of class "ameliaArgs" from a previous run of #' Amelia. Including this object will use the arguments from that run. #' @param empri number indicating level of the empirical (or ridge) prior. #' This prior shrinks the covariances of the data, but keeps the means #' and variances the same for problems of high missingness, small N's or #' large correlations among the variables. Should be kept small, #' perhaps 0.5 to 1 percent of the rows of the data; a #' reasonable upper bound is around 10 percent of the rows of the #' data. #' @param priors a four or five column matrix containing the priors for #' either individual missing observations or variable-wide missing #' values. See "Details" for more information. #' @param autopri allows the EM chain to increase the empirical prior if #' the path strays into an nonpositive definite covariance matrix, up #' to a maximum empirical prior of the value of this argument times #' \code{n}, the number of observations. Must be between 0 and 1, and at #' zero this turns off this feature. #' @param emburn a numeric vector of length 2, where \code{emburn[1]} is #' a the minimum EM chain length and \code{emburn[2]} is the #' maximum EM chain length. These are ignored if they are less than 1. #' @param bounds a three column matrix to hold logical bounds on the #' imputations. Each row of the matrix should be of the form #' \code{c(column.number, lower.bound,upper.bound)} See Details below. #' @param max.resample an integer that specifies how many times Amelia #' should redraw the imputed values when trying to meet the logical #' constraints of \code{bounds}. After this value, imputed values are #' set to the bounds. #' @param overimp a two-column matrix describing which cells are to be #' overimputed. Each row of the matrix should be a \code{c(row,column)} pair. #' Each of these cells will be treated as missing and #' replaced with draws from the imputation model. #' @param boot.type choice of bootstrap, currently restricted to either #' \code{"ordinary"} for the usual non-parametric bootstrap and #' \code{"none"} for no bootstrap. #' @param parallel the type of parallel operation to be used (if any). If #' missing, the default is taken from the option #' \code{"amelia.parallel"} (and if that is not set, \code{"no"}). #' @param ncpus integer: the number of processes to be used in parallel #' operation: typically one would choose the number of available CPUs. #' @param cl an optional \pkg{parallel} or \pkg{snow} cluster for use if #' \code{parallel = "snow"}. If not supplied, a cluster on the local #' machine is created for the duration of the \code{amelia} call. #' @param ... further arguments to be passed. #' #' @details #' Multiple imputation is a method for analyzing incomplete #' multivariate data. This function will take an incomplete dataset in #' either data frame or matrix form and return \code{m} imputed datatsets #' with no missing values. The algorithm first creates a bootstrapped #' version of the original data, estimates the sufficient statistics #' (with priors if specified) by EM on this bootstrapped sample, and #' then imputes the missing values of the original data using the #' estimated sufficient statistics. It repeats this process \code{m} #' times to produce the \code{m} complete datasets where the #' observed values are the same and the unobserved values are drawn #' from their posterior distributions. #' #' The function will start a "fresh" run of the algorithm if \code{x} is #' either a incomplete matrix or data.frame. In this method, all of the #' options will be user-defined or set to their default. If \code{x} #' is the output of a previous Amelia run (that is, an object of #' class "amelia"), then Amelia will run with the options used in #' that previous run. This is a convenient way to run more #' imputations of the same model. #' #' You can provide Amelia with informational priors about the missing #' observations in your data. To specify priors, pass a four or five #' column matrix to the \code{priors} argument with each row specifying a #' different priors as such: #' #' \code{ one.prior <- c(row, column, mean,standard deviation)} #' #' or, #' #' \code{ one.prior <- c(row, column, minimum, maximum, confidence)}. #' #' So, in the first and second column of the priors matrix should be the #' row and column number of the prior being set. In the other columns #' should either be the mean and standard deviation of the prior, or a #' minimum, maximum and confidence level for the prior. You must specify #' your priors all as distributions or all as confidence ranges. Note #' that ranges are converted to distributions, so setting a confidence of #' 1 will generate an error. #' #' Setting a priors for the missing values of an entire variable is done #' in the same manner as above, but inputing a \code{0} for the row #' instead of the row number. If priors are set for both the entire #' variable and an individual observation, the individual prior takes #' precedence. #' #' In addition to priors, Amelia allows for logical bounds on #' variables. The \code{bounds} argument should be a matrix with 3 #' columns, with each row referring to a logical bound on a variable. The #' first column should be the column number of the variable to be #' bounded, the second column should be the lower bounds for that #' variable, and the third column should be the upper bound for that #' variable. As Amelia enacts these bounds by resampling, particularly #' poor bounds will end up resampling forever. Amelia will stop #' resampling after \code{max.resample} attempts and simply set the #' imputation to the relevant bound. #' #' If each imputation is taking a long time to converge, you can increase #' the empirical prior, \code{empri}. This value has the effect of smoothing #' out the likelihood surface so that the EM algorithm can more easily find #' the maximum. It should be kept as low as possible and only used if needed. #' #' Amelia assumes the data is distributed multivariate normal. There are a #' number of variables that can break this assumption. Usually, though, a #' transformation can make any variable roughly continuous and unbounded. #' We have included a number of commonly needed transformations for data. #' Note that the data will not be transformed in the output datasets and the #' transformation is simply useful for climbing the likelihood. #' #' Amelia can run its imputations in parallel using the methods of the #' \pkg{parallel} package. The \code{parallel} argument names the #' parallel backend that Amelia should use. Users on Windows systems must #' use the \code{"snow"} option and users on Unix-like systems should use #' \code{"multicore"}. The \code{multicore} backend sets itself up #' automatically, but the \code{snow} backend requires more setup. You #' can pass a predefined cluster from the #' \code{parallel::makePSOCKcluster} function to the \code{cl} #' argument. Without this cluster, Amelia will attempt to create a #' reasonable default cluster and stop it once computation is #' complete. When using the parallel backend, users can set the number of #' CPUs to use with the \code{ncpus} argument. The defaults for these two #' arguments can be set with the options \code{"amelia.parallel"} and #' \code{"amelia.ncpus"}. #' #' Please refer to the Amelia manual for more information on the function #' or the options. #' #' @return An instance of S3 class "amelia" with the following objects: #' \item{imputations}{a list of length \code{m} with an imputed dataset in #' each entry. The class (matrix or data.frame) of these entries will #' match \code{x}.} #' \item{m}{an integer indicating the number of imputations run.} #' \item{missMatrix}{a matrix identical in size to the original dataset #' with 1 indicating a missing observation and a 0 indicating an observed #' observation.} #' \item{theta}{An array with dimensions \eqn{(p+1)} by \eqn{(p+1)} by \eqn{m} (where #' \eqn{p} is the number of variables in the imputations model) holding #' the converged parameters for each of the \code{m} EM chains.} #' \item{mu}{A \eqn{p} by \eqn{m} matrix of of the posterior modes for the #' complete-data means in each of the EM chains.} #' \item{covMatrices}{An array with dimensions \eqn{(p)} by \eqn{(p)} by #' \eqn{m} where the first two dimensions hold the posterior modes of the #' covariance matrix of the complete data for each of the EM chains.} #' \item{code}{a integer indicating the exit code of the Amelia run.} #' \item{message}{an exit message for the Amelia run} #' \item{iterHist}{a list of iteration histories for each EM chain. See #' documentation for details.} #' \item{arguments}{a instance of the class "ameliaArgs" which holds the #' arguments used in the Amelia run.} #' \item{overvalues}{a vector of values removed for overimputation. Used to #' reformulate the original data from the imputations. } #' #' Note that the \code{theta}, \code{mu} and \code{covMatrcies} objects #' refers to the data as seen by the EM algorithm and is thusly centered, #' scaled, stacked, tranformed and rearranged. See the manual for details #' and how to access this information. #' #' @references #' Honaker, J., King, G., Blackwell, M. (2011). #' Amelia II: A Program for Missing Data. #' \emph{Journal of Statistical Software}, \bold{45(7)}, 1--47. #' \doi{10.18637/jss.v045.i07} #' #' @seealso For imputation diagnostics, \code{\link{missmap}}, #' \code{\link{compare.density}}, #' \code{\link{overimpute}} and \code{\link{disperse}}. For time series #' plots, \code{\link{tscsPlot}}. Also: \code{\link{plot.amelia}}, #' \code{\link{write.amelia}}, and \code{\link{ameliabind}} #' #' @examples #' data(africa) #' a.out <- amelia(x = africa, cs = "country", ts = "year", logs = "gdp_pc") #' summary(a.out) #' plot(a.out) #' #' @keywords models amelia <- function(x, ...) { UseMethod("amelia", x) } #' @describeIn amelia Run additional imputations for Amelia output amelia.amelia <- function(x, m = 5, p2s = 1, frontend = FALSE, ...) { ## The original data is the imputed data with the ## imputations marked to NA. These two lines do that data <- x$imputations[[1]] ## Only the variables in the missMatrix should be passed. This is ## because the others are data <- getOriginalData(x) out <- amelia.default(x = data, m = m, arglist=x$arguments, p2s=p2s, frontend = frontend, incheck=FALSE) num.tcalls <- length(x$transform.calls) if (num.tcalls > 0) { for (i in 1:num.tcalls) { tcall <- x$transform.calls[[i]] tcall[[2]] <- as.name("out") out <- eval(tcall) } out$transform.calls <- x$transform.calls } ret <- ameliabind(x, out) return(ret) } #' @describeIn amelia Perform multiple overimputation from moPrep amelia.molist <- function(x, ...) { m <- match.call(expand.dots=TRUE) m$x <- x$data m$priors <- x$priors m$overimp <- x$overimp m[[1]] <- quote(Amelia::amelia.default) ret <- eval(m, parent.frame()) return(ret) } #' @describeIn amelia Run core Amelia algorithm amelia.default <- function(x, m = 5, p2s = 1, frontend = FALSE, idvars = NULL, ts = NULL, cs = NULL, polytime = NULL, splinetime = NULL, intercs = FALSE, lags = NULL, leads = NULL, startvals = 0, tolerance = 0.0001, logs = NULL, sqrts = NULL, lgstc = NULL, noms = NULL, ords = NULL, incheck = TRUE, collect = FALSE, arglist = NULL, empri = NULL, priors = NULL, autopri = 0.05, emburn = c(0,0), bounds = NULL, max.resample = 100, overimp = NULL, boot.type = "ordinary", parallel = c("no", "multicore", "snow"), ncpus = getOption("amelia.ncpus", 1L), cl = NULL, ...) { ## parellel infrastructure modeled off of 'boot' package if (missing(parallel)) parallel <- getOption("amelia.parallel", "no") parallel <- match.arg(parallel) have_mc <- have_snow <- FALSE if (parallel != "no" && ncpus > 1L) { if (parallel == "multicore") have_mc <- .Platform$OS.type != "windows" else if (parallel == "snow") have_snow <- TRUE if (!have_mc && !have_snow) ncpus <- 1L if (p2s == 2) { cat("\nUsing '", parallel, "' parallel backend with", ncpus, "cores.") } } if (p2s == 2) { cat("\namelia starting\n") flush.console() } am.call <- match.call(expand.dots = TRUE) archv <- am.call prepped<-amelia.prep(x = x, m = m, idvars = idvars, empri = empri, ts = ts, cs = cs, tolerance = tolerance, polytime = polytime, splinetime = splinetime, lags = lags, leads = leads, logs = logs, sqrts = sqrts, lgstc = lgstc, p2s = p2s, frontend = frontend, intercs = intercs, noms = noms, startvals = startvals, ords = ords, incheck = incheck, collect = collect, arglist = arglist, priors = priors, autopri = autopri, bounds = bounds, max.resample = max.resample, overimp = overimp, emburn = emburn, boot.type = boot.type) if (prepped$code != 1) { cat("Amelia Error Code: ", prepped$code, "\n", prepped$message, "\n") return(invisible(list(code = prepped$code, message = prepped$message))) } do.amelia <- function(X, ...) { if (p2s == 2) { cat("running bootstrap\n") } k <- ncol(prepped$x) if (!is.null(colnames(x))) { ovars <- colnames(x) } else { ovars <- 1:k } code <- 1 impdata <- list(imputations = list(), m = 1, missMatrix = prepped$missMatrix, overvalues = prepped$overvalues, theta = array(NA, dim = c(k+1,k+1,1) ), mu = matrix(NA, nrow = k, ncol = 1), covMatrices = array(NA, dim = c(k,k,1)), code = integer(0), message = character(0), iterHist = list(), arguments = list(), orig.vars = ovars) class(impdata) <- "amelia" class(impdata$imputations) <- c("mi","list") x.boot<-bootx(prepped$x,prepped$priors, boot.type) # Don't reorder columns thetanew will not align with d.stacked$x x.stacked<-amstack(x.boot$x,colorder=FALSE,x.boot$priors) if (p2s) cat("-- Imputation", X, "--\n") thetanew <- emarch(x.stacked$x, p2s = p2s, thetaold = NULL, tolerance = tolerance, startvals = startvals, priors = x.stacked$priors, empri = empri, frontend = frontend, collect = collect, autopri = prepped$autopri, emburn = emburn) ##thetanew <- .Call("emarch", PACKAGE = "Amelia") ## update the amelia ouptut impdata$iterHist[[1]] <- thetanew$iter.hist impdata$theta[,,1] <- thetanew$thetanew impdata$mu[,1] <- thetanew$thetanew[-1,1] impdata$covMatrices[,,1] <- thetanew$thetanew[-1,-1] dimnames(impdata$covMatrices)[[1]] <- prepped$theta.names dimnames(impdata$covMatrices)[[2]] <- prepped$theta.names dimnames(impdata$mu)[[1]] <- prepped$theta.names evs <- eigen(thetanew$thetanew[-1, -1, drop = FALSE], only.values=TRUE, symmetric=TRUE) if (any(evs$values < .Machine$double.eps)) { impdata$imputations[[1]] <- NA impdata$code <- 2 impdata$arguments <- prepped$archv class(impdata$arguments) <- c("ameliaArgs", "list") cat("\n\nThe resulting variance matrix was not invertible.", " Please check your data for highly collinear variables.\n\n") return(impdata) } ximp <- amelia.impute(prepped$x, thetanew$thetanew, priors = prepped$priors, bounds = prepped$bounds, max.resample) ximp <- amunstack(ximp, n.order = prepped$n.order, p.order = prepped$p.order) ximp <- unscale(ximp, mu = prepped$scaled.mu, sd = prepped$scaled.sd) ximp <- unsubset(x.orig = prepped$trans.x, x.imp = ximp, blanks = prepped$blanks, idvars = prepped$idvars, ts = prepped$ts, cs = prepped$cs, polytime = polytime, splinetime = splinetime, intercs = intercs, noms = prepped$noms, index = prepped$index, ords = prepped$ords) ximp <- untransform(ximp, logs = prepped$logs, xmin = prepped$xmin, sqrts = prepped$sqrts, lgstc = prepped$lgstc) if (p2s==2) { cat("\n saving and cleaning\n") } ## here we deal with the imputed matrix. ## first, we put the data into the output list and name it impdata$imputations[[1]] <- impfill(x.orig = x, x.imp = ximp, noms = prepped$noms, ords = prepped$ords, priors = priors, overimp = overimp) if (p2s) cat("\n") if (frontend) { requireNamespace("tcltk") tcltk::tcl(getAmelia("runAmeliaProgress"), "step",(100/m -1)) } impdata$code <- code impdata$arguments <- prepped$archv names(impdata$imputations) <- paste("imp", X, sep = "") class(impdata$arguments) <- c("ameliaArgs", "list") return(impdata) } ## parallel infrastructure from the 'boot' package impdata <- if (ncpus > 1L && (have_mc || have_snow)) { if (have_mc) { parallel::mclapply(seq_len(m), do.amelia, mc.cores = ncpus) } else if (have_snow) { list(...) # evaluate any promises if (is.null(cl)) { cl <- parallel::makePSOCKcluster(rep("localhost", ncpus)) if(RNGkind()[1L] == "L'Ecuyer-CMRG") parallel::clusterSetRNGStream(cl) res <- parallel::parLapply(cl, seq_len(m), do.amelia) parallel::stopCluster(cl) res } else parallel::parLapply(cl, seq_len(m), do.amelia) } } else lapply(seq_len(m), do.amelia) if (all(sapply(impdata, is, class="amelia"))) { if (!all(sapply(impdata, function(x) is.na(x$imputations)))) { impdata <- do.call(ameliabind, impdata) if (impdata$code == 2) { impdata$message <- paste("One or more of the imputations resulted in a", "covariance matrix that was not invertible.") } else { impdata$message <- paste("Normal EM convergence.") } } else { impdata <- do.call(ameliabind, impdata) impdata$code <- 2 impdata$message <- paste("All of the imputations resulted in a covariance", "matrix that is not invertible.") } } return(impdata) }
/scratch/gouwar.j/cran-all/cranData/Amelia/R/emb.r
#' Missingness Map #' #' Plots a missingness map showing where missingness occurs in #' the dataset passed to \code{amelia}. #' #' @param obj an object of class "amelia"; typically output from the #' function \code{amelia}, a matrix or a dataframe. #' @param vars a vector of column numbers or column names of the data #' to include in the plot. The default is to plot all variables. #' @param legend should a legend be drawn? (True or False) #' @param col a vector of length two where the first element specifies #' the color for missing cells and the second element specifies #" the color for observed cells. #' @param main main title of the plot. Defaults to "Missingness Map". #' @param y.cex expansion for the variables names on the x-axis. #' @param x.cex expansion for the unit names on the y-axis. #' @param y.labels a vector of row labels to print on the y-axis #' @param y.at a vector of the same length as \code{y.labels} with row #' nmumbers associated with the labels. #' @param csvar column number or name of the variable corresponding to #' the unit indicator. Only used when the \code{obj} is not of class #' \code{amelia}. #' @param tsvar column number or name of the variable corresponding to #' the time indicator. Only used when the \code{obj} is not of class #' \code{amelia}. #' @param rank.order a logical value. If \code{TRUE}, the default, then #' the order of the variables along the the x-axis is sorted by the #' percent missing (from highest to lowest). If \code{FALSE}, it is #' simply the order of the variables in the data. #' @param margins a vector of length two that specifies the bottom and #' left margins of the plot. Useful for when variable names or #' row names are long. #' @param gap.xaxis value to pass to the \code{gap.axis} argument of #' the \code{axis} function that plots the x-axis. See #' \code{\link{axis}} for more details. Ignored on R versions less #' than 4.0.0. #' @param x.las value of the \code{las} argument to pass to the #' \code{\link{axis}} function creating the x-axis. #' @param ... further graphical arguments. #' #' @details \code{missmap} draws a map of the missingness in a dataset using the #' \code{image} function. The columns are reordered to put the most #' missing variable farthest to the left. The rows are reordered to a #' unit-period order if the \code{ts} and \code{cs} arguments were passed #' to \code{amelia}. If not, the rows are not reordered. #' #' The \code{y.labels} and \code{y.at} commands can be used to associate #' labels with rows in the data to identify them in the plot. The y-axis #' is internally inverted so that the first row of the data is associated #' with the top-most row of the missingness map. The values of #' \code{y.at} should refer to the rows of the data, not to any point on #' the plotting region. #' #' @seealso \code{\link{compare.density}}, \code{\link{overimpute}}, #' \code{\link{tscsPlot}}, \code{\link{image}}, \code{\link{heatmap}} missmap <- function(obj, vars, legend = TRUE, col, main, y.cex = 0.8, x.cex = 0.8, y.labels, y.at, csvar = NULL, tsvar = NULL, rank.order = TRUE, margins = c(5, 5), gap.xaxis = 1, x.las = 2, ...) { if (inherits(obj, "amelia")) { vnames <- colnames(obj$imputations[[1]]) n <- nrow(obj$missMatrix) p <- ncol(obj$missMatrix) percent.missing <- colMeans(obj$missMatrix) pmiss.all <- mean(obj$missMatrix) r1 <- obj$missMatrix } else { vnames <- colnames(obj) n <- nrow(obj) p <- ncol(obj) percent.missing <- colMeans(is.na(obj)) pmiss.all <- mean(is.na(obj)) r1 <- 1 * is.na(obj) } if (missing(col)) col <- c("#eff3ff", "#2171b5") if (!missing(vars)) { if (is.character(vars)) { vars <- match(vars, vnames) if (any(is.na(vars))) { stop("vars not found in the data") } } if (any(!(vars %in% 1:p))) { stop("vars outside range of the data") } p <- length(vars) r1 <- r1[, vars] percent.missing <- percent.missing[vars] pmiss.all <- mean(r1) } if (!missing(y.labels) && (missing(y.at) && (length(y.labels) != n))) { stop("y.at must accompany y.labels if there is less than onefor each row") } if (is.null(csvar)) csvar <- obj$arguments$cs if (is.null(tsvar)) tsvar <- obj$arguments$ts if (missing(y.labels)) { if (!is.null(csvar)) { if (inherits(obj, "amelia")) { cs <- obj$imputations[[1]][, csvar] } else { cs <- obj[, csvar] } y.labels <- cs if (is.factor(y.labels)) y.labels <- levels(y.labels)[unclass(y.labels)] cs.names <- y.labels if (!is.numeric(cs)) cs <- as.numeric(as.factor(cs)) if (!is.null(tsvar)) { if (inherits(obj, "amelia")) { ts <- as.numeric(obj$imputations[[1]][, tsvar]) } else { ts <- as.numeric(obj[, tsvar]) } unit.period <- order(cs, ts) } else { unit.period <- 1:n } y.labels <- y.labels[unit.period] r1 <- r1[unit.period, ] brks <- c(TRUE,rep(FALSE, times = (n-1))) for (i in 2:n) { brks[i] <- (cs[unit.period][i] != cs[unit.period][i - 1]) } y.at <- which(brks) y.labels <- y.labels[brks] } else { y.labels <- row.names(obj$imputations[[1]]) y.at <- seq(1, n, by = 15) y.labels <- y.labels[y.at] } } else { if (missing(y.at)) y.at <- n:1 } missrank <- rev(order(percent.missing)) if (rank.order) { chess <- t(!r1[n:1, missrank]) vnames <- vnames[missrank] } else { chess <- t(!r1[n:1, ]) } y.at <- (n:1)[y.at] if (missing(main)) main <- "Missingness Map" par(mar = c(margins, 2, 1) + 0.1) ## here we fork for data/tscs type plots. users cant set this yet. type <- "data" if (legend) { graphics::layout(matrix(c(1, 2), nrow = 1), widths = c(0.75, 0.25)) par(mar = c(margins, 2, 0) + 0.1, mgp = c(3, 0.25, 0)) } if (type == "data") { col.fix <- col if (sum(!chess) == 0) { col.fix <- col[2] } image(x = 1:(p), y = 1:n, z = chess, axes = FALSE, col = col.fix, xlab = "", ylab = "", main = main) if (getRversion() >= "4.0.0") { axis(1, lwd = 0, labels = vnames, las = x.las, at = 1:p, cex.axis = x.cex, gap.axis = gap.xaxis) } else { axis(1, lwd = 0, labels = vnames, las = x.las, at = 1:p, cex.axis = x.cex) } axis(2, lwd = 0, labels = y.labels, las = 1, at = y.at, cex.axis = y.cex) if (legend) { pm.lab <- paste("Missing (", round(100 * pmiss.all), "%)", sep = "") po.lab <- paste("Observed (", 100 - round(100 * pmiss.all), "%)", sep = "") par(mar = c(0, 0, 0, 0.3)) plot(0, 0, type = "n", axes = FALSE, ann = FALSE) legend("left", col = col, bty = "n", xjust = 0, border = "grey", legend = c(pm.lab, po.lab), fill = col, horiz = FALSE) } } else { tscsdata <- data.frame(cs.names, ts, rowMeans(r1)) tscsdata <- reshape(tscsdata, idvar = "cs.names", timevar = "ts", direction = "wide") rownames(tscsdata) <- tscsdata[, 1] colnames(tscsdata) <- unique(ts) tscsdata <- as.matrix(tscsdata[, -1]) cols <- rev(heat.colors(5)) image(z = t(tscsdata), axes = FALSE, col = cols, main = main, ylab = "", xlab = "") at.seq <- seq(from = 0, to = 1, length = ncol(tscsdata)) axis(1, labels = unique(ts), at = at.seq, tck = 0, lwd = 0, las = 2) axis(2, labels = rownames(tscsdata), at = at.seq, tck = 0, lwd = 0, las = 1, cex.axis = .8) if (legend) { leg.names <- c("0-0.2", "0.2-0.4", "0.4-0.6", "0.6-0.8", "0.8-1") legend(x = 0.95, y = 1.01, col = cols, bty = "n", xjust = 1, legend = leg.names, fill = cols, horiz = TRUE) } } invisible(NULL) }
/scratch/gouwar.j/cran-all/cranData/Amelia/R/missmap.R
#' Prepare Multiple Overimputation Settings #' #' A function to generate priors for multiple overimputation of #' a variable measured with error. #' #' @param x either a matrix, data.frame, or a object of class "molist" #' from a previous \code{moPrep} call. The first two derive the priors #' from the data given, and the third will derive the priors from the #' first \code{moPrep} call and add them to the already defined #' priors. #' @param formula a formula describing the nature of the measurement #' error for the variable. See "Details." #' @param subset an optional vector specifying a subset of observations #' which possess measurement error. #' @param error.proportion an optional vector specifying the fraction of #' the observed variance that is due to measurement error. #' @param gold.standard a logical value indicating if values with no #' measurement error should be used to estimate the measurement error #' variance. #' @param error.sd an optional vector specifying the standard error of #' the measurement error. #' #' @return An instance of the S3 class "molist" with the following #' objects: #' \itemize{ #' \item priors a four-column matrix of the multiple overimputation priors #' associated with the data. Each row of the matrix is #' \code{c(row,column, prior.mean, prior.sd)} #' \item overimp a two-column matrix of cells to be overimputed. Each #' row of the matrix is of the form \code{c(row, column)}, which #' indicate the row and column of the cell to be overimputed. #' \item data the object name of the matrix or data.frame to which #' priors refer. #' } #' #' Note that \code{priors} and \code{overimp} might contain results from #' multiple calls to \code{moPrep}, not just the most recent. #' #' @details #' This function generates priors for multiple overimputation of data #' measured with error. With the \code{formula} arugment, you can specify #' which variable has the error, what the mean of the latent data is, and #' if there are any other proxy measures of the mismeasured variable. The #' general syntax for the formula is: \code{errvar ~ mean | proxy}, #' where \code{errvar} is the mismeasured variable, \code{mean} is a #' formula for the mean of the latent variable (usually just #' \code{errvar} itself), and \code{proxy} is a another mismeasurement of #' the same latent variable. The proxies are used to estimate the #' variance of the measurement error. #' #' \code{subset} and \code{gold.standard} refer to the the rows of the #' data which are and are not measured with error. Gold-standard rows are #' used to estimate the variance of the #' measurement. error. \code{error.proportion} is used to estimate the #' variance of the measurement error by estimating the variance of the #' mismeasurement and taking the proportion assumed to be due to #' error. \code{error.sd} sets the standard error of the measurement #' error directly. #' #'@seealso #' \code{\link{amelia}} #' #' @examples #' data(africa) #' m.out <- moPrep(africa, trade ~ trade, error.proportion = 0.1) #' a.out <- amelia(m.out, ts = "year", cs = "country") #' plot(a.out) #' m.out <- moPrep(africa, trade ~ trade, error.sd = 1) #' a.out <- amelia(m.out, ts = "year", cs = "country") #' moPrep <- function(x, formula, subset, error.proportion, gold.standard = !missing(subset), error.sd) { UseMethod("moPrep",x) } #' @describeIn moPrep Alter existing moPrep output moPrep.molist <- function(x, formula, subset, error.proportion, gold.standard = FALSE, error.sd) { m <- match.call() m$x <- x$data m[[1]] <- as.name("moPrep") res <- eval(m, sys.frame(sys.parent())) x$priors <- rbind(x$priors, res$priors) x$overimp <- rbind(x$overimp, res$overimp) return(x) } #' @describeIn moPrep Default call to moPrep moPrep.default <- function(x, formula, subset, error.proportion, gold.standard=!missing(subset), error.sd) { if (!missing(error.proportion) && !(length(error.proportion) %in% c(1,nrow(x)))) { stop("The error.proportion arugment must be of length 1 or the number of rows of the data.") } if (!missing(error.sd) && !(length(error.sd) %in% c(1,nrow(x)))) { stop("The error.sd arugment must be of length 1 or the number of rows of the data.") } if (!missing(error.proportion) & !missing(error.sd)) { stop("error.proportion and error.sd cannot be set at the same time.") } ## parse the formula target.name <- formula[[2]] pars <- formula[[3]] vnames <- all.vars(formula, unique = FALSE) if ("|" %in% all.names(formula)) { proxyname <- vnames[length(vnames)] meanpos <- length(vnames)-1 } else { meanpos <- length(vnames) } if (!exists("proxyname") && missing(error.proportion) && !gold.standard && missing(error.sd)) { stop("Need to specify a proxy, an error proportion, an error variance, or gold-standard data.") } proxysplit <- strsplit(deparse(formula), "\\|")[[1]] form <- formula(paste(proxysplit, collapse = "+")) m <- match.call() m[[1]] <- as.name("model.frame") m$formula <- form m$error.proportion <- NULL m$error.sd <- NULL m$gold.standard <- NULL m$data <- m$x m$x <- NULL mf <- eval(m, sys.frame(sys.parent())) if (nrow(mf) == 0L) stop("0 cases to overimpute, check subset argument") if (!missing(error.proportion)) { if (length(error.proportion) == nrow(x)) { if (!missing(subset)) { error.proportion <- error.proportion[eval(substitute(subset,x))] } gs <- mf[error.proportion == 0, , drop = FALSE] mf <- mf[error.proportion != 0, , drop = FALSE] } } else if (!missing(error.sd)) { if (length(error.sd) == nrow(x)) { if (!missing(subset)) { error.sd <- error.sd[eval(substitute(subset,x))] } gs <- mf[error.sd == 0, , drop = FALSE] mf <- mf[error.sd != 0, , drop = FALSE] } } else { gs <- mf[0,] } if (ncol(mf) < meanpos) meanpos <- ncol(mf) prior.mean <- mf[,meanpos] var.mm <- var(mf[,1], na.rm=TRUE) if (!missing(error.proportion)) { prior.var <- var.mm*error.proportion } if (!missing(error.sd)) { prior.var <- error.sd^2 } if (exists("proxyname")) { prior.var <- var.mm - cov(mf[,1],mf[,proxyname], use="complete.obs") } if (gold.standard && !is.null(m$subset)) { m$subset <- NULL mf.full <- eval(m, sys.frame(sys.parent())) gs2 <- mf.full[which(!(rownames(mf.full) %in% rownames(mf))), , drop = FALSE] gs <- rbind(gs, gs2) var.gs <- var(gs[,1],na.rm=TRUE) prior.var <- var.mm - var.gs } col <- match(names(mf)[1], names(x)) rows <- as.integer(rownames(mf)) out <- list() out$priors <- cbind(rows,col,prior.mean, prior.var) out$overimp <- cbind(rows, col) if (sum(out$priors[,4] <= 0) > 0) { out$priors <- out$priors[out$priors[,4] > 0,] warning("Some observations estimated with negative measurement error variance. Set to gold standard.") } out$priors[,4] <- sqrt(out$priors[,4]) out$data <- substitute(x) class(out) <- c("molist","list") return(out) }
/scratch/gouwar.j/cran-all/cranData/Amelia/R/mo.R
#' Summary plots for Amelia objects #' #' Plots diagnostic plots for the output from the #' \code{amelia} function. #' #' @param x an object of class "amelia"; typically output from the #' function \code{amelia}. #' @param which.vars a vector indicating the variables to plot. The #' default is to plot all of the numeric variables that were actually #' imputed. #' @param compare plot the density comparisons for each variable (True or False) #' @param overimpute plot the overimputation for each variable (True or False) #' @param ask prompt user before changing pages of a plot (True or False) #' @param ... further graphical arguments. #' plot.amelia <- function(x, which.vars, compare = TRUE, overimpute = FALSE, ask = TRUE, ...) { imputedVars <- colSums(x$missMatrix) > 0 ## if it's a matrix, it's already numeric if (is.data.frame(x$imputations[[1]])) { numericVars <- sapply(x$imputations[[1]],"is.numeric") } else { numericVars <- rep(TRUE, length(imputedVars)) } ## Choose the correct variables to plot. Only numerics. ## And, if they didn't pick, only show the imputed variables. if (missing(which.vars)) { which.vars <- which(imputedVars & numericVars) } else { ## trim user-choosen variables that are not numeric which.vars <- which.vars[numericVars[which.vars]] } mfrow <- set.mfrow(nvars = length(which.vars), overimpute) on.exit(par(NULL)) layout <- par(mfrow = mfrow) for (i in seq(along=which.vars)) { if (compare) compare.density(output=x, var=which.vars[i], legend=FALSE,...) if (overimpute) overimpute(output=x, var=which.vars[i]) if (i==1) devAskNewPage(ask) } devAskNewPage(ask=FALSE) invisible() } ## ## set.mfrow() - gets the proper number of frames for plotting the ## output of the "amelia" class. ## ## INPUTS: nvars - number of variables being plotted ## overimpute - are we plotting overimputes? ## ## OUTPUT: mfrow - vector of length 2 with the (rows,cols) of the ## plotting window ## ## NOTICE: idea taken from the "coda" package ## ## set.mfrow <- function(nvars = 1, overimpute = FALSE) { if (overimpute) { ## If we are overimputing as well, we need ## two plots per variable mfrow <- switch(min(nvars, 13), c(2,1), ## 2 plot : 1x2 c(2,2), ## 4 plots: 2x2 c(3,2), ## 6 plots: 3x2 c(4,2), ## 8 plots: 4x2 c(3,2), ## 10 plots: 3x2 c(3,2), ## 12 plots: 3x2 c(4,2), ## 14 plots: 4x2 c(4,2), ## 16 plots: 4x2 c(4,2), ## 18 plots: 4x2 c(3,2), ## 20 plots: 3x2 c(3,2), ## 22 plots: 3x2 c(3,2), ## 24 plots: 3x2 c(4,2)) ## 26 plots: 4x2 } else { mfrow <- switch(min(nvars, 13), c(1,1), ## 1 plot : 1x1 c(2,1), ## 2 plots: 2x1 c(2,2), ## 3 plots: 2x2 c(2,2), ## 4 plots: 2x2 c(3,2), ## 5 plots: 3x2 c(3,2), ## 6 plots: 3x2 c(3,3), ## 7 plots: 3x3 c(3,3), ## 8 plots: 3x3 c(3,3), ## 9 plots: 3x3 c(3,2), ## 10 plots: 3x2 c(3,2), ## 11 plots: 3x2 c(3,2), ## 12 plots: 3x2 c(3,3)) ## 13 plots: 3x3 } return(mfrow) }
/scratch/gouwar.j/cran-all/cranData/Amelia/R/plot.amelia.R
## ## prep.r ## ## Various routines for transforming the original data to the imputation model, ## and reverting back to the format of the original data ## 28/04/06 mb functions extracted from emb.r to prep.r ## 29/04/06 jh revised unsubset for memory issues ## 04/05/06 mb moved parameter vs. observation check to the end of prep. ## 18/05/06 mb 'ords' unsubset fixed to have range of original values ## 15/06/06 jh revised "generatepriors" ## 26/06/06 mb fixed archive to work with session loading. ## 27/06/06 mb amelia.prep accepts, checks, and processes 'arglist' ## 27/07/06 mb amsubset changes from dataframe to matrix after subsetting to ## avoid eating it on strings. ## 02/08/06 mb frame.to.matrix now converts chars to either factors (cs,noms) ## or junk (idvars). added to subset/unsubset (ignore last update). ## 02/08/06 mb fixed bug where polytime=0 would make for odd behaviour/crashing ## 11/09/06 mb fixed bug in unsubset: 'index' too long to subset w/o 'which' ## 18/10/06 mb incorporated confidence levels into generating priors ## 20/10/06 mb new format for priors ## 13/12/06 mb indiv. obs priors get priority in generatepriors ## 28/03/07 jh added empri to prepped$archv, modified construction of timevars ## 10/05/07 mb logs now adds 1 instead of "epsilon" to avoid strange imputations. ## fixed blanks problems when no priors specified. ## 11/05/07 mb added "combine.output" to combine multiple amelia outputs ## 15/08/07 jh modified construction of timevars ## 14/09/07 mb added 'bounds' support ## 22/07/08 mb - good coding update: T->TRUE/F->FALSE ## 27/03/10 jh added spline basis functions, changed "polynomials" matrix to instance of "timebasis" nametonumber <- function(x, ts, cs, idvars, noms, ords, logs, sqrts, lgstc, lags, leads) { listconvert <- function(opt) { junk.seq <- 1:ncol(x) junk.names <- dimnames(x)[[2]] for (i in 1:length(opt)) { mat <- opt[i] == junk.names if (sum(mat) == 0) return(NA) opt[i] <- junk.seq[mat] } return(as.numeric(opt)) } code<-0 mess<-paste("One of the variable names in the options list does not match a variable name in the data.") if (inherits(ts, "character")) ts <- listconvert(ts) if (inherits(cs, "character")) cs <- listconvert(cs) if (inherits(idvars, "character")) idvars <- listconvert(idvars) if (inherits(noms, "character")) noms <- listconvert(noms) if (inherits(ords, "character")) ords <- listconvert(ords) if (inherits(logs, "character")) logs <- listconvert(logs) if (inherits(sqrts, "character")) sqrts <- listconvert(sqrts) if (inherits(lgstc, "character")) lgstc <- listconvert(lgstc) if (inherits(lags, "character")) lags <- listconvert(lags) if (inherits(leads, "character")) leads <- listconvert(leads) output <- list(code = code, ts = ts, cs = cs, idvars = idvars, noms = noms, ords = ords, logs = logs, sqrts = sqrts, lgstc = lgstc, lags = lags, leads = leads, mess = mess) if (any(is.na(output))) output$code <- 1 return(output) } ## convert.priors - convert 4/5-column priors to matrix of priors ## priors: 4/5 column priors matrix ## nrow: rows of the data matrix ## ncol: cols of the data matrix ## ## output: a list of either 2 (in the 4 col case) or 3 (in the 5 col ## case) of prior matrices. #convert.priors <- fucntion(priors, nrow, ncol) { # if (!is.matrix(priors)) # stop("argument 'priors' is not a matrix") # if (ncol(priors) != 4 || ncol(priors) != 5) # stop("priors matrix has the wrong number of columns") # if (ncol(priors) == 4) { # #generate output priors matrix, the size of the data # out.means <- matrix(NA, nrow = nrow, ncol = ncol) # out.sds <- matrix(NA, nrow = nrow, ncol = ncol) # # fill in the the matrices # for (i in 1:nrow(priors)) { # out.means[priors[i,1], priors[i,2]] <- priors[i,3] # out.sds[priors[i,1], priors[i,2]] <- priors[i,4] # } # return(list(means = out.means, sds = out.sds)) # } # if (ncol(priors) == 5) { # out.mins <- matrix(NA, nrow = nrow, ncol = ncol) # out.maxs <- matrix(NA, nrow = nrow, ncol = ncol) # out.conf <- matrix(NA, nrow = nrow, ncol = ncol) # for (i in 1:nrow(priors)) { # out.mins[priors[i,1], priors[i,2]] <- priors[i,3] # out.maxs[priors[i,1], priors[i,2]] <- priors[i,4] # out.conf[priors[i,1], priors[i,2]] <- priors[i,5] # } # return(list(mins = out.mins, maxs = out.maxs, conf = out.conf)) # } #} ## amtransform - Transform variables to assume normality ## x: data matrix ## logs: variable list of log-linear transformations ## sqrts: variable list of square root transformations ## lgstc: variable list of logistic transformations ## xmin: vector of column minimums amtransform<-function(x,logs,sqrts,lgstc) { logs<-unique(logs) sqrts<-unique(sqrts) lgstc<-unique(lgstc) xmin<-c() if (!is.null(logs)) { for (i in 1:length(logs)) { j<-logs[i] xmin<-c(xmin,min(c(0,min(x[,j],na.rm=TRUE)))) #we need mins to avoid creating NAs x[,j]<-log(x[,j]-xmin[i]+1) #by taking a log of a negative number } } if (!is.null(sqrts)) for (i in sqrts) x[,i]<-sqrt(x[,i]) if (!is.null(lgstc)) for (i in lgstc) x[,i]<-log(x[,i]/(1-x[,i])) return(list(x=x,xmin=xmin)) } ## untransform - Convert imputed variables to original scale ## x.imp: imputed data matrix ## logs: variable list of log-linear transformations ## xmins: vector of column minimums ## sqrts: variable list of square root transformations ## lgstc: variable list of logistic transformations untransform<-function(x.imp,logs,xmin,sqrts,lgstc) { logs<-unique(logs) sqrts<-unique(sqrts) lgstc<-unique(lgstc) if (!is.null(logs)) { for (i in 1:length(logs)) { j<-logs[[i]] x.imp[,j]<-exp(x.imp[,j])+xmin[[i]] } } if (!is.null(sqrts)) for (i in sqrts) x.imp[,i]<-(x.imp[,i])^2 if (!is.null(lgstc)) for (i in lgstc) x.imp[,i]<-exp(x.imp[,i])/(1 + exp(x.imp[,i])) return(x.imp) } frame.to.matrix<-function(x,idvars) { char.vars<-which(sapply(x,class)=="character") if (length(char.vars) > 0) for (i in char.vars) if (is.na(match(i,idvars))) x[,i]<-as.factor(x[,i]) #changes cs/noms char. vars to factors else x[,i]<-1 #junks id char vars. return(data.matrix(x)) #return it as matrix } ## Remove rows and columns from dataset that do not belong amsubset<-function(x,idvars,p2s,ts,cs,priors=NULL, polytime=NULL,splinetime=NULL,intercs=FALSE,lags=NULL, leads=NULL,noms=NULL,bounds=NULL, overimp = NULL) { lags <- unique(lags) leads <- unique(leads) noms <- unique(noms) idvars <- unique(idvars) index <- c(1:ncol(x)) theta.names <- colnames(x) if (!is.null(idvars)) { index <- index[-idvars] theta.names <- theta.names[-idvars] } if (is.data.frame(x)) x <- frame.to.matrix(x,idvars) overvalues <- NULL ## Set overimp cells to missing if (!is.null(overimp)) { whole.vars <- overimp[overimp[,1] == 0, 2] whole.vars <- as.matrix(expand.grid(1:nrow(x), whole.vars)) overimp <- overimp[overimp[,1] != 0,] overimp <- rbind(overimp, whole.vars) if (!is.matrix(overimp)) overimp <- t(as.matrix(overimp)) overvalues <- x[overimp] is.na(x) <- overimp } AMmiss <- is.na(x) if (!is.null(lags)) { if (!identical(cs,NULL)) { tsarg<-list(x[,cs],x[,ts]) } else { tsarg<-list(x[,ts]) } tssort<-do.call("order",tsarg) x.sort<-x[tssort,] for (i in lags) { lagged<-c(NA,x.sort[1:(nrow(x)-1),i]) if (!identical(cs,NULL)) { for (i in 2:nrow(x.sort)) if (x.sort[i,cs]!=x.sort[i-1,cs]) is.na(lagged)<-i } x.sort<-cbind(x.sort,lagged) x<-cbind(x,1) index<-c(index,-.5) #-.5=lags theta.names <- c(theta.names, paste("lag",colnames(x)[i],sep=".")) } x[tssort,]<-x.sort } if (!is.null(leads)){ if (!identical(cs,NULL)) { tsarg<-list(x[,cs],x[,ts]) } else { tsarg<-list(x[,ts]) } tssort<-do.call("order",tsarg) x.sort<-x[tssort,] for (i in leads) { led<-x.sort[2:nrow(x),i] led<-c(led,NA) if (!identical(cs,NULL)) { for (i in 1:(nrow(x.sort)-1)) if (x.sort[i,cs]!=x.sort[i+1,cs]) is.na(led)<-i } x.sort<-cbind(x.sort,led) x<-cbind(x,1) index<-c(index,.5) #.5=leads theta.names <- c(theta.names, paste("lead",colnames(x)[i],sep=".")) } x[tssort,]<-x.sort } #puts timeseries and crosssection into the id variable to avoid singularity if (!is.null(ts)) { theta.names <- theta.names[index != ts] index<-index[index!=ts] idvars<-c(idvars,ts) } if (!is.null(cs)) { theta.names <- theta.names[index != cs] index<-index[index!=cs] idvars<-c(idvars,cs) } #nominals if (!is.null(noms)) { for (i in noms) { values<-unique(na.omit(x[,i])) newx<-matrix(0,nrow=nrow(x),ncol=length(values)-1) theta.names <- theta.names[index != i] index<-index[index!=i] for (j in 2:length(values)) { newx[,j-1]<-ifelse(x[,i] == values[j],1,0) index<-c(index,-i) theta.names <- c(theta.names, paste("noms",colnames(x)[i],j,sep=".")) } x<-cbind(x,newx) idvars<-c(idvars,i) } } ## REVISION TODAY BEGINS HERE #basis functions for time if (!identical(polytime,NULL) | !identical(splinetime,NULL) ){ if (!identical(splinetime,NULL)){ time<-x[,ts] knot<-rep(0,5) if(splinetime>3){ knot[1:(splinetime-1)]<-seq(from=min(time),to=max(time),length=(splinetime-1)) # The end points of this sequence are not being used } timebasis<-cbind(1,time,time^2,time^3,pmax(time-knot[2],0)^3,pmax(time-knot[3],0)^3,pmax(time-knot[4],0)^3) timebasis<-timebasis[,1:(splinetime+1),drop=FALSE] } if (!identical(polytime,NULL)){ time<-x[,ts] timebasis<-cbind(1,time,time^2,time^3) timebasis<-timebasis[,1:(polytime+1) ,drop=FALSE] } cstypes<-unique(x[,cs]) timevars<-matrix(0,nrow(x),1) if (intercs){ for (i in cstypes){ dummy<-as.numeric(x[,cs]==i) timevars<-cbind(timevars,dummy*timebasis) } timevars<-timevars[,c(-1,-2), drop = FALSE] } else { timevars<-cbind(timevars,timebasis) timevars<-timevars[,-c(1,2), drop = FALSE] # first column is a holding variable, second is to have fixed effects identified } ## ENDS TODAY x<-cbind(x,timevars) if (ncol(timevars)) { for (i in 1:ncol(as.matrix(timevars))) { index<-c(index,0) #0 - timevars theta.names <- c(theta.names, paste("time",i,sep=".")) } } } else { if (intercs) { cstypes <- unique(x[,cs]) timevars <- matrix(0, nrow(x), 1) for (i in cstypes) { dummy <- as.numeric(x[,cs] == i) timevars <- cbind(timevars, dummy) } timevars <- timevars[,-c(1,2)] x<-cbind(x,timevars) if (ncol(timevars)) { for (i in 1:ncol(as.matrix(timevars))) { index<-c(index,0) #0 - timevars theta.names <- c(theta.names, paste("time",i,sep=".")) } } } } if (!identical(idvars,NULL)) x<-x[,-idvars, drop = FALSE] if (p2s == 2) { cat("Variables used: ", theta.names,"\n") } AMr1 <- is.na(x) blanks <- which(rowSums(AMr1)==ncol(x)) if (length(blanks)) { x <- x[-blanks, ] if (!is.null(priors)) { priors <- priors[!(priors[,1] %in% blanks),] if (length(blanks) == 1) { row.adjust <- 1 * (priors[, 1, drop = FALSE] > blanks) } else { row.adjust <- colSums(sapply(priors[, 1, drop = FALSE],">",blanks)) } priors[,1] <- priors[,1,drop=FALSE] - row.adjust } if (p2s) cat("Warning: There are observations in the data that are completely missing.","\n", " These observations will remain unimputed in the final datasets.","\n") } else { blanks<-NULL } priors[,2] <- match(priors[,2], index) bounds[,1] <- match(bounds[,1], index) if (is.null(dim(x))) { x <- matrix(x, ncol = 1) } return(list(x=x,index=index,idvars=idvars,blanks=blanks,priors=priors,bounds=bounds,theta.names=theta.names,missMatrix=AMmiss,overvalues=overvalues)) } ## Replace rows and columns removed in "amsubset" ## Create integer values for nominals and ordinals ## ## x.orig: the original data-matrix. transformed, but not subsetted, ## scaled or centered, thus all variables are as they are in the ## user-submitted data. ## x.imp: the imputed data. has been unscaled, uncentered, but its ## it still has excess variables (polynomials of time, nominal ## categories, etc) and ordinal variables still have non-integer ## values. ## index: denotes what each column of x.imp is. ## a positive integer (i): ith column of x.orig. ## 0: basis function (polynomial/spline) of time ## .5: leads ## -.5: lags ## a negative integer (-i): a dummy used for the nominal var in ## the ith column of x.orig unsubset <- function(x.orig, x.imp, blanks, idvars, ts, cs, polytime, splinetime, intercs, noms, index, ords) { ## create if (is.data.frame(x.orig)) { oldidvars <- idvars[-match(c(cs, noms), idvars)] x.orig <- frame.to.matrix(x.orig, oldidvars) } AMr1.orig <- is.na(x.orig) ## since we're going to use the blanks in noms/ords ## we need these changed here. if (identical(blanks, NULL)) {blanks <- -(1:nrow(x.orig))} if (identical(idvars, NULL)) {idvars <- -(1:ncol(x.orig))} ## noms are idvars, so we'll fill them in manually ## (mb 2 Apr 09 -- fixed handling of "blanks") if (!is.null(noms)) { for (i in noms) { y <- runif(nrow(x.imp)) dums <- x.imp[, which(index == -i)] p <- dums * (dums > 0) * (dums < 1) + ((dums - 1) >= 0) psub <- rowSums(as.matrix(p)) psub <- (psub <= 1) + (psub) * (psub > 1) p <- p / psub pzero <- 1 - rowSums(as.matrix(p)) p <- cbind(pzero, p) pk <- ncol(p) utri.mat <- matrix(0, nrow = pk, ncol = pk) utri.mat <- utri.mat + upper.tri(utri.mat, diag = TRUE) cump <- p %*% utri.mat cump.shift <- cbind(matrix(0, nrow(cump), 1), cump[, 1:(ncol(cump) - 1)]) yy <- (y < cump) * (y > cump.shift) renom <- (yy %*% unique(na.omit(x.orig[, i]))) x.orig[-blanks, i] <- renom } } ## here we force the ords into integer values ## (mb 2 Apr 09 -- fixed handling of "blanks") if (!is.null(ords)) { ords <- unique(ords) # find where the ordinals are in the impords <- match(ords,index) x <- x.imp[, impords] * AMr1.orig[-blanks, ords] ############ revision ##################### minmaxords <- matrix(0, length(ords), 2) for(jj in 1:length(ords)) { tempords <- x.orig[AMr1.orig[, ords[jj]] == 0 , ords[jj]] minmaxords[jj,1] <- min(tempords) minmaxords[jj,2] <- max(tempords) } minord <- minmaxords[,1] maxord <- minmaxords[,2] ############ replaces ##################### # minord <- apply(ifelse(AMr1.orig[,ords]==1,NA,x.orig[,ords]),2,min,na.rm=T) # maxord <- apply(ifelse(AMr1.orig[,ords]==1,NA,x.orig[,ords]),2,max,na.rm=T) ordrange <- maxord - minord p <- t((t(x) - minord) / ordrange) * AMr1.orig[-blanks, ords] p <- p * (p > 0) * (p < 1) + ((p - 1) >= 0) newimp <- matrix(0, nrow(x.imp), length(ords)) for (k in 1:length(ords)) { reordnl <- rbinom(nrow(x.imp), ordrange[k], p[, k]) newimp[, k] <- reordnl + minord[k] * AMr1.orig[-blanks, ords[k]] } ############# revision ############################# ## replace the imputations with the ordinal values for(jj in 1:length(ords)){ x.imp[, impords[jj]] <- round(x.imp[, impords[jj]]) x.imp[AMr1.orig[-blanks, ords[jj]] == 1, impords[jj]] <- newimp[AMr1.orig[-blanks, ords[jj]] == 1, jj] } # MAYBE CAN REMOVE LOOP ############# replaces ############################# # x.orig[,ords] <- ifelse(AMr1.orig[,ords]==1,0,x.orig[,ords]) + newimp } ## now we'll fill the imputations back into the original. if (!identical(c(blanks, idvars), c(NULL, NULL))) { x.orig[-blanks, -idvars] <- x.imp[, 1:ncol(x.orig[, -idvars, drop = FALSE])] } else { x.orig <- x.imp[, 1:ncol(x.orig)] } return(x.orig) } ## Rescale Dataset scalecenter<-function(x,priors=NULL,bounds=NULL){ AMn<-nrow(x) ones<-matrix(1,AMn,1) meanx<-colMeans(x,na.rm=TRUE) stdvx<-apply(x,2,sd,na.rm=TRUE) no.obs <- colSums(!is.na(x)) == 0 if (!is.null(priors)) { meanx[no.obs] <- 0#unlist(tapply(priors[,3],priors[,2],mean))[order(unique(priors[,2]))] stdvx[no.obs] <- 1#unlist(tapply(priors[,3],priors[,2],sd))[order(unique(priors[,2]))] } x.ztrans<-(x-(ones %*% meanx))/(ones %*% stdvx) if (!is.null(priors)){ priors[,3]<-(priors[,3]-meanx[priors[,2]])/stdvx[priors[,2]] priors[,4]<- (priors[,4]/stdvx[priors[,2]])^2 #change to variances. } if (!is.null(bounds)) { bounds[,2] <- (bounds[,2]-meanx[bounds[,1]])/stdvx[bounds[,1]] bounds[,3] <- (bounds[,3]-meanx[bounds[,1]])/stdvx[bounds[,1]] } return(list(x=x.ztrans,mu=meanx,sd=stdvx,priors=priors,bounds=bounds)) } unscale<-function(x,mu,sd){ AMn<-nrow(x) ones<-matrix(1,AMn,1) x.unscale<-(x * (ones %*% sd)) + (ones %*% mu) return(x.unscale) } ## Stack dataset and return vectors for sorting ## NOTE: THIS ORDERS TIES IN A SLIGHTLY DIFFERENT WAY THAN "stack.g" IN GAUSS AMELIA amstack<-function(x,colorder=TRUE,priors=NULL,bounds=NULL){ AMp<-ncol(x) AMr1<-is.na(x) if (colorder){ #Rearrange Columns p.order <- order(colSums(AMr1)) AMr1<-AMr1[,p.order, drop = FALSE] } else { p.order<-1:ncol(x) } n.order <- do.call("order", as.data.frame(AMr1[,AMp:1])) #Rearrange Rows AMr1<- AMr1[n.order,, drop = FALSE] # p.order has already been rearranged x<- x[n.order,p.order, drop = FALSE] # rearrange rows and columns of dataset if (!identical(priors,NULL)){ priors[,1]<-match(priors[,1],n.order) priors[,2]<-match(priors[,2],p.order) } if (!identical(bounds,NULL)) bounds[,1]<-match(bounds[,1],p.order) return(list(x=x,n.order=n.order,p.order=p.order,priors=priors,bounds=bounds)) } ## Rearrange dataset to original ordering of rows and columns amunstack<-function(x,n.order,p.order){ x.unstacked<-matrix(0,nrow=nrow(x),ncol=ncol(x)) x.unstacked[n.order,p.order]<-x return(x.unstacked) } # This function is in miserable shape. Need to clean up how lack of priors are dealt with. generatepriors<-function(AMr1,empri=NULL,priors=NULL){ if (!identical(priors,NULL)) { if (ncol(priors) == 5){ new.priors<-matrix(NA, nrow = nrow(priors), ncol = 4) new.priors[,1:2]<-priors[,1:2] new.priors[,3]<-priors[,3] + ((priors[,4] - priors[,3])/2) new.priors[,4]<-(priors[,4]-priors[,3])/(2*qnorm(1-(1-priors[,5])/2)) #NOTE: FIX THIS: Currently ignores CONF- ASSUMES CI95 } else { new.priors <-priors } zeros <- which(new.priors[,1]==0) if (length(zeros) > 0) { varPriors <- new.priors[zeros,2] missCells <- which(AMr1[,varPriors,drop=FALSE], arr.ind=TRUE) addedPriors <- matrix(NA, nrow=nrow(missCells), ncol=4) addedPriors[,1] <- missCells[,1] addedPriors[,2] <- varPriors[missCells[,2]] addedPriors[,-c(1,2)] <- new.priors[zeros[missCells[,2]],-c(1,2)] new.priors <- new.priors[-zeros,,drop=FALSE] # find any matches in the rows/cols and remove from addedPriors # since we've removed other dups, addedPriors will have the only # dups new.priors <- rbind(new.priors,addedPriors) new.priors <- new.priors[!duplicated(new.priors[,1:2]),] } return(new.priors) } } #' Combine Multiple Amelia Output Lists #' #' This function combines output lists from multiple runs of #' Amelia, where each run used the same arguments. The result is one #' list, formatted as if Amelia had been run once. #' #' @param ... a list of Amelia output lists from runs of Amelia with the #' same arguments except the number of imputations. #' #' @details This function is useful for combining the output from Amelia #' runs that occurred at different times or in different sessions of #' R. It assumes that the arguments given to the runs of Amelia are the #' same except for \code{m}, the number of imputations, and it uses the #' arguments from the first output list as the arguments for the combined #' output list. #' #' #' @keywords utilities combine.output <- function(...) { cl <- match.call() cool <- unlist(lapply(cl, function(x) is.null(eval(x,parent.frame())$amelia.args))) if (max(cool[-1])==1) stop("One of the arguments is not an Amelia output list.") # we need the total number of imputations, so we'll # grab it from each argument (each ameliaoutput) # NOTE: the 'lapply' subset will be NULL for things in the call # that aren't amelia.output. 'unlist' then ignores those NULLs. ms <- unlist(lapply(cl,function(x) eval(x, parent.frame())$amelia.args$m)) m <- sum(ms) new.out <- vector("list", 2*m+1) names(new.out)[[2*m+1]] <- "amelia.args" new.out[[2*m+1]] <- eval(cl[[2]])$amelia.args new.out$amelia.args$m <- m count <- 1 for (i in 1:length(ms)) { for (j in 1:ms[i]) { new.out[[count]] <- eval(cl[[1+i]])[[j]] new.out[[m+count]] <- eval(cl[[1+i]])[[ms[i]+j]] new.out$amelia.args[[count+19]] <- eval(cl[[1+i]])$amelia.args[[j+19]] names(new.out)[count] <- paste("m", count, sep="") names(new.out)[m+count] <- paste("theta", count, sep="") names(new.out$amelia.args)[count+19] <- paste("iter.hist", count, sep="") count <- count + 1 } } return(new.out) } amelia.prep <- function(x,m=5,p2s=1,frontend=FALSE,idvars=NULL,logs=NULL, ts=NULL,cs=NULL,empri=NULL, tolerance=0.0001,polytime=NULL,splinetime=NULL,startvals=0,lags=NULL, leads=NULL,intercs=FALSE,sqrts=NULL, lgstc=NULL,noms=NULL,incheck=TRUE,ords=NULL,collect=FALSE, arglist=NULL, priors=NULL,var=NULL,autopri=0.05,bounds=NULL, max.resample=NULL, overimp = NULL, emburn=NULL, boot.type=NULL) { code <- 1 ## If there is an ameliaArgs passed, then we should use ## those. if (!identical(arglist,NULL)) { if (!("ameliaArgs" %in% class(arglist))) { error.code <- 46 error.mess <- paste("The argument list you provided is invalid.") return(list(code=error.code, message=error.mess)) } idvars <- arglist$idvars empri <- arglist$empri ts <- arglist$ts cs <- arglist$cs tolerance <- arglist$tolerance polytime <- arglist$polytime splinetime<- arglist$splinetime lags <- arglist$lags leads <- arglist$leads logs <- arglist$logs sqrts <- arglist$sqrts lgstc <- arglist$lgstc intercs <- arglist$intercs noms <- arglist$noms startvals <- arglist$startvals ords <- arglist$ords priors <- arglist$priors autopri <- arglist$autopri empri <- arglist$empri #change 1 bounds <- arglist$bounds overimp <- arglist$overimp emburn <- arglist$emburn boot.type <- arglist$boot.type max.resample <- arglist$max.resample } # If data frame is a tibble, code will break because of assumptions about # [, i, drop = TRUE]. Rather than change existing code, convert # `x` to a data.frame if (is.data.frame(x)) x <- as.data.frame(x) numopts<-nametonumber(x=x,ts=ts,cs=cs,idvars=idvars,noms=noms,ords=ords, logs=logs,sqrts=sqrts,lgstc=lgstc,lags=lags,leads=leads) if (numopts$code == 1) { return(list(code=44,message=numopts$mess)) } if (incheck) { checklist<-amcheck(x = x, m = m, idvars = numopts$idvars, priors = priors, empri = empri, ts = numopts$ts, cs = numopts$cs, tolerance = tolerance, polytime = polytime, splinetime = splinetime, lags = numopts$lags, leads = numopts$leads, logs = numopts$logs, sqrts = numopts$sqrts, lgstc =numopts$lgstc, p2s = p2s, frontend = frontend, intercs = intercs, noms = numopts$noms, startvals = startvals, ords = numopts$ords, collect = collect, bounds=bounds, max.resample=max.resample, overimp = overimp, emburn=emburn, boot.type=boot.type) #check.call <- match.call() #check.call[[1]] <- as.name("amcheck") #checklist <- eval(check.call, parent.frame()) if (!is.null(checklist$code)) { return(list(code=checklist$code,message=checklist$mess)) } m <- checklist$m priors <- checklist$priors } priors <- generatepriors(AMr1 = is.na(x),empri = empri, priors = priors) archv <- match.call(expand.dots=TRUE) archv[[1]] <- NULL archv <- list(idvars=numopts$idvars, logs=numopts$logs, ts=numopts$ts, cs=numopts$cs, empri=empri, tolerance=tolerance, polytime=polytime, splinetime=splinetime, lags=numopts$lags, leads=numopts$leads, intercs=intercs, sqrts=numopts$sqrts, lgstc=numopts$lgstc, noms=numopts$noms, ords=numopts$ords, priors=priors, autopri=autopri, bounds=bounds, max.resample=max.resample, startvals=startvals, overimp = overimp, emburn=emburn, boot.type=boot.type) #change 2 if (p2s==2) { cat("beginning prep functions\n") flush.console() } d.trans<-amtransform(x,logs=numopts$logs,sqrts=numopts$sqrts,lgstc=numopts$lgstc) d.subset<-amsubset(d.trans$x,idvars=numopts$idvars,p2s=p2s,ts=numopts$ts,cs=numopts$cs,polytime=polytime,splinetime=splinetime,intercs=intercs,noms=numopts$noms,priors=priors,bounds=bounds, lags=numopts$lags, leads=numopts$leads, overimp=overimp) d.scaled<-scalecenter(d.subset$x,priors=d.subset$priors,bounds=d.subset$bounds) d.stacked<-amstack(d.scaled$x,colorder=TRUE,priors=d.scaled$priors,bounds=d.scaled$bounds) if (incheck) { realAMp <- ncol(d.stacked$x) realAMn <- nrow(d.stacked$x) #Error code: 34-35 #Too few observations to estimate parameters if (!identical(empri,NULL)) { if (realAMp*2 > realAMn+empri) { error.code<-34 error.mess<-paste("The number of observations in too low to estimate the number of \n", "parameters. You can either remove some variables, reduce \n", "the order of the time polynomial, or increase the empirical prior.") return(list(code=error.code,message=error.mess)) } if (realAMp*4 > realAMn +empri) { warning("You have a small number of observations, relative to the number, of variables in the imputation model. Consider removing some variables, or reducing the order of time polynomials to reduce the number of parameters.") } } else { if (realAMp*2 > realAMn) { error.code<-34 error.mess<-paste("The number of observations is too low to estimate the number of \n", "parameters. You can either remove some variables, reduce \n", "the order of the time polynomial, or increase the empirical prior.") return(list(code=error.code,message=error.mess)) } if (realAMp*4 > realAMn) { warning("You have a small number of observations, relative to the number, of variables in the imputation model. Consider removing some variables, or reducing the order of time polynomials to reduce the number of parameters.") } } } return(list( x = d.stacked$x, code = code, priors = d.stacked$priors, n.order = d.stacked$n.order, p.order = d.stacked$p.order, scaled.mu = d.scaled$mu, scaled.sd = d.scaled$sd, trans.x = d.trans$x, blanks = d.subset$blanks, idvars = d.subset$idvars, ts = numopts$ts, cs = numopts$cs, noms = numopts$noms, index = d.subset$index, ords = numopts$ords, m = m, logs = numopts$logs, archv = archv, xmin = d.trans$xmin, sqrts = numopts$sqrts, lgstc = numopts$lgstc, # outname = outname, subset.index = d.subset$index, autopri = autopri, bounds = d.stacked$bounds, theta.names = d.subset$theta.names, missMatrix = d.subset$missMatrix, overvalues = d.subset$overvalues, empri = empri, #change 3a tolerance = tolerance)) #change 3b }
/scratch/gouwar.j/cran-all/cranData/Amelia/R/prep.r
## ## print.amelia() - print method for the "amelia" class ## ## INPUT: object - an object of class "amelia" which is output ## from the amelia() function ## ## OUTPUT: Prints some information about the imputations. ## ## mb 02/02/09 ## print.amelia <- function(x, ...) { m <- length(x$imputations) cat(paste("\nAmelia output with ",m," imputed datasets.\n", sep="")) cat(paste("Return code: ", x$code,"\n"), sep="") cat(paste("Message: ", x$message, "\n"), sep="") cat("\nChain Lengths:\n") cat("--------------\n") for (i in 1:m) { cat(paste("Imputation ",i,": ", nrow(x$iterHist[[i]]),"\n", sep="")) } cat("\n") invisible(x) }
/scratch/gouwar.j/cran-all/cranData/Amelia/R/print.amelia.R
#' Summary of an Amelia object #' #' Returns summary information from the Amelia run along with #' missingles information. #' #' @param object an object of class \code{amelia}. Typically, an output #' from the function \code{amelia}. #' @param ... further arguments. #' #' @seealso \code{\link{amelia}}, \code{\link{plot.amelia}} summary.amelia <- function(object, ...) { percent.missing <- colMeans(object$missMatrix) n.patterns <- nrow(unique(object$missMatrix)) rows.imputed <- nrow(na.omit(object$imputations[[1]])) rows.lwd <- sum(rowSums(object$missMatrix)==0) print.amelia(object) cat("Rows after Listwise Deletion: ",rows.lwd,"\n") cat("Rows after Imputation: ", rows.imputed,"\n") cat("Patterns of missingness in the data: ", n.patterns, "\n\n") cat("Fraction Missing for original variables: \n") cat("-----------------------------------------\n\n") tb <- data.frame(cbind(percent.missing)) rownames(tb) <- colnames(object$missMatrix) colnames(tb) <- "Fraction Missing" print(tb) cat("\n") if (!is.null(object$transform.calls)) { cat("Post-imputation transformed variables: \n") cat("-----------------------------------------\n\n") tnames <- unlist(lapply(object$transform.calls, function(x) names(x)[-c(1,2)])) texprs <- unlist(lapply(object$transform.calls, function(x) as.character(x[-c(1,2)]))) tb2 <- data.frame(cbind(texprs)) rownames(tb2) <- paste(tnames, "=") colnames(tb2) <- "Transformations" print(tb2) } }
/scratch/gouwar.j/cran-all/cranData/Amelia/R/summary.amelia.R
summary.mi <- function(object, ...) { m <- length(object) nv <- length(object[[1]]) nr <- nrow(object[[1]]) cat("[", m, "imputations,", nv, "variables,", nr, "rows]\n\n") summary(do.call(rbind, object)) }
/scratch/gouwar.j/cran-all/cranData/Amelia/R/summary.mi.R
#' Transform imputed datasets from Amelia objects #' #' Updates the imputed datasets from an \code{amelia} output #' with the specified transformations. #' #' @param _data an object of class "amelia"; typically output from the #' function \code{amelia}. #' @param ... further arguments of the form \code{tag = value}. #' #' @details #' The \code{\dots} arugments to \code{transform.amelia} are #' expressions of the form \code{tag = value}, where \code{tag} is the #' variable that is being updated or created and \code{value} is an #' expression that is a function of the variables in the imputed #' datasets. For instance, if you wanted to create an interaction of two #' imputed variables, you could have one argument be \code{intervar = #' var1 * var2}. This would either update the current variable #' \code{intervar} in the imputed data or append a new variable called #' \code{intervar} to the imputed datasets. #' #' @return #' An object of class \code{amelia} with its \code{imputations} and #' \code{missMatrix} values updated according to the transformations. In #' addition, each of the calls to \code{transform.amelia} are stored in #' #' @seealso \code{\link{transform}} transform.amelia <- function(`_data`, ...) { tcall <- match.call(expand.dots = TRUE) if (is.null(`_data`$transform.calls)) { `_data`$transform.calls <- list(tcall) } else { `_data`$transform.calls <- c(`_data`$transform.calls, tcall) } tcall[[1]] <- as.name("transform") names(tcall)[2] <- "" m <- length(`_data`$imputation) orig.data <- remove.imputations(`_data`) tcall[[2]] <- orig.data new.miss.matrix <- as.matrix(is.na(eval.parent(tcall))) for (i in 1:m) { tcall[[2]] <- `_data`$imputations[[i]] `_data`$imputations[[i]] <- eval.parent(tcall) } `_data`$missMatrix <- new.miss.matrix return(`_data`) }
/scratch/gouwar.j/cran-all/cranData/Amelia/R/transform.amelia.R
##' Evaluate an R expression in the environments constructed from the ##' imputed data sets of a call to \code{amelia} function. ##' ##' ##' @title Execute commands within each imputed data set ##' @param data imputation output from the \code{amelia} funtion. ##' @param expr expression to evaluate in each imputed data set in ##' \code{data}. ##' @param ... arguments to be passed to (future) methods. ##' @return a list the same length as \code{data$imputations} that ##' contains the output of the expression as evaluated in each imputed ##' data set of \code{data}. ##' @author Matt Blackwell ##' ##' @examples ##' data(africa) ##' a.out <- amelia(x = africa, cs = "country", ts = "year", logs = ##' "gdp_pc") ##' ##' imp.mods <- with(a.out, lm(gdp_pc ~ infl + trade)) ##' ##' mi.combine(imp.mods, conf.int = TRUE) ##' ##' @export with.amelia <- function(data, expr, ...) { expr <- rlang::enquo(expr) out <- vector("list", length(data$imputations)) for (j in seq_along(data$imputations)) { out[[j]] <- rlang::eval_tidy(expr, data$imputations[[j]]) } class(out) <- "amest" out }
/scratch/gouwar.j/cran-all/cranData/Amelia/R/with.R
#' Write Amelia imputations to file #' #' Writes the imptuted datasets to file from a run of \code{amelia} #' #' @param obj an object of class "amelia"; typically output from the #' function \code{amelia} #' @param separate logical variable. If \code{TRUE} (default), the #' imputed datasets will be written to separate files, whose names come #' from the \code{file.stem} and \code{extension} arguments. If \code{FALSE}, #' the imputations are stacked and written as a single file. #' @param file.stem the leading part of the filename to save to #' output The imputation number and \code{extension} will be added to #' complete the filename. This can include a directory path. #' @param extension the extension of the filename. This is simply what #' follows \code{file.stem} and the imputation number. #' @param format one of the following output formats: \code{csv}, #' \code{dta} or \code{table}. See details. #' @param impvar the name of imputation number variable written to the #' stacked dataset when \code{separate} is \code{FALSE}. #' @param orig.data logical variable indicating whether the original, #' unimputed dataset should be included in the stacked dataset when #' \code{separate} is \code{FALSE}. #' @param \dots further arguments for the \code{write} functions. #' #' @details #' \code{write.amelia} writes the imputed datasets to a file or a set of files #' using one of the following functions: \code{write.csv}, #' \code{write.dta}, or \code{write.table}. You can pass arguments to #' these functions from \code{write.amelia}. #' #' When \code{separate} is \code{TRUE}, each imputed dataset is written #' to its own file. If you were to set \code{file.stem} to #' \code{"outdata"} and the \code{extension} to \code{".csv"} , then the #' resulting filename of the written files will be #' \preformatted{ #' outdata1.csv #' outdata2.csv #' outdata3.csv #' ... #' } #' and so on. #' #' When \code{separate} is \code{FALSE}, the function adds a variable #' called \code{impvar} to each dataset which indicates the imputed #' dataset to which the row belongs. Then, each of the datasets are #' stacked together to create one dataset. If \code{orig.data} is \code{TRUE}, #' then the original, unimputed dataset is included at the top of the #' stack, with its imputation number set to 0. #' #' @seealso \code{\link{write.csv}}, \code{\link{write.table}}, \code{\link{write.dta}} write.amelia <- function(obj, separate = TRUE, file.stem, extension = NULL, format = "csv", impvar = "imp", orig.data = TRUE, ...) { if(!(format %in% c("csv","table","dta"))) { stop("The writing format is not supported") } ## smart defaults for the extensions if (missing(extension)) { if (format == "dta") extension <- ".dta" if (format == "csv") extension <- ".csv" } m <- length(obj$imputations) Call <- match.call(expand.dots = TRUE) Call[[1]] <- as.name(paste("write",format, sep=".")) ## these arugments should not be passed to write.format Call$obj <- NULL Call$file.stem <- NULL Call$extension <- NULL Call$format <- NULL Call$separate <- NULL Call$orig.data <- NULL Call$impvar <- NULL if (separate) { for (i in 1:m) { if (format == "dta") Call$dataframe <- obj$imputations[[i]] else Call$x <- obj$imputations[[i]] Call$file <- paste(file.stem, i, extension,sep="") eval.parent(Call) } } else { if (orig.data) { odata <- obj$imputations[[1]] is.na(odata) <- obj$missMatrix odata[, impvar] <- 0 } obj$imputations[[1]][, impvar] <- 1 if (orig.data) { obj$imputations[[1]] <- rbind(odata, obj$imputations[[1]]) } if (format == "dta") { Call$dataframe <- obj$imputations[[1]] } else { Call$x <- obj$imputations[[1]] } for (i in 2:m) { obj$imputations[[i]][, impvar] <- i if (format == "dta") { Call$dataframe <- rbind(Call$dataframe, obj$imputations[[i]]) } else { Call$x <- rbind(Call$x, obj$imputations[[i]]) } } Call$file <- paste(file.stem, extension, sep = "") eval.parent(Call) } invisible() }
/scratch/gouwar.j/cran-all/cranData/Amelia/R/write.amelia.R
.onAttach <- function(...) { mylib <- dirname(system.file(package = "Amelia")) ver <- packageVersion("Amelia") builddate <- packageDescription("Amelia")$Date curryear <- format(Sys.time(), "%Y") mess <- c("## ", "## Amelia II: Multiple Imputation", paste("## (Version ",ver,", built: ", builddate,")", sep=""), paste("## Copyright (C) 2005-",curryear, " James Honaker, Gary King and Matthew Blackwell",sep=""), paste("## Refer to http://gking.harvard.edu/amelia/", "for more information"), "## ") mess <- paste(mess, collapse = "\n") packageStartupMessage(mess) }
/scratch/gouwar.j/cran-all/cranData/Amelia/R/zzz.R
## ----setup, echo = FALSE, include = FALSE------------------------------------- knitr::opts_chunk$set(fig.width = 5, fig.height = 4, fig.align = "center") options(digits = 4, show.signif.stars = FALSE) set.seed(12345) ## ----load_av, eval = FALSE---------------------------------------------------- # library(Amelia) # AmeliaView()
/scratch/gouwar.j/cran-all/cranData/Amelia/inst/doc/ameliaview.R
--- title: "AmeliaView GUI Guide" date: "`r Sys.Date()`" link-citations: yes bibliography: amelia.bib output: rmarkdown::html_vignette: keep_md: true vignette: > %\VignetteIndexEntry{AmeliaView GUI Guide} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r setup, echo = FALSE, include = FALSE} knitr::opts_chunk$set(fig.width = 5, fig.height = 4, fig.align = "center") options(digits = 4, show.signif.stars = FALSE) set.seed(12345) ``` Below is a guide to the AmeliaView menus with references back to the users's guide. The same principles from the user's guide apply to AmeliaView. The only difference is how you interact with the program. Whether you use the GUI or the command line versions, the same underlying code is being called, and so you can read the command line-oriented discussion above even if you intend to use the GUI. ## Loading AmeliaView The easiest way to load AmeliaView is to open an R session and type the following two commands: ```{r load_av, eval = FALSE} library(Amelia) AmeliaView() ``` This will bring up the AmeliaView window on any platform. ![AmeliaView welcome screen](assets/splash.png) ## Loading data into AmeliaView AmeliaView loads with a welcome screen that has buttons which can load a data in many of the common formats. Each of these will bring up a window for choosing your dataset. Note that these buttons are only a subset of the possible ways to load data in AmeliaView. Under the File menu (shown below), you will find more options, including the datasets included in the package (`africa` and `freetrade`). You will also find import commands for Comma-Separated Values (.CSV), Tab-Delimited Text (.TXT), Stata v.5-10 (.DTA), SPSS (.DAT), and SAS Transport (.XPORT). Note that when using a CSV file, Amelia assumes that your file has a header (that is, a row at the top of the data indicating the variable names). ![AmeliaView File and import menu.](assets/import.png) You can also load data from an RData file. If the RData file contains more than one `data.frame`, a pop-up window will ask to you find the dataset you would like to load. In the file menu, you can also change the underlying working directory. This is where AmeliaView will look for data by default and where it will save imputed datasets. ## Variable Dashboard ![Main variable dashboard in AmeliaView](assets/main.png) Once a dataset is loaded, AmeliaView will show the variable dashboard. In this mode, you will see a table of variables, with the current options for each of them shown, along with a few summary statistics. You can reorder this table by any of these columns by clicking on the column headings. This might be helpful to, say, order the variables by mean or amount of missingness. ![Variable options via right-click menu on the variable dashboard](assets/context-menu.png) You can set options for individual variables by the right-click context menu or through the "Variables" menu. For instance, clicking "Set as Time-Series Variable" will set the currently selected variable in the dashboard as the time-series variable. Certain options are disabled until other options are enabled. For instance, you cannot add a lagged variable to the imputation until you have set the time-series variable. Note that any `factor` in the data is marked as a ID variable by default, since a `factor` cannot be included in the imputation without being set as an ID variable, a nominal variable, or the cross-section variable. If there is a `factor` that fails to meet one of these conditions, a red flag will appear next to the variable name. 1. **Set as Time-Series Variable** - Sets the currently selected variable to the time-series variable. Disabled when more than one variable is selected. Once this is set, you can add lags and leads and add splines of time. The time-series variable will have a clock icon next to it. 2. **Set as Cross-Section Variable** - Sets the currently selected variable to the cross-section variable. Disabled when more than one variable is selected. Once this is set, you can interact the splines of time with the cross-section. The cross-section variable will have a person icon next to it. 3. **Unset as Time-Series Variable** - Removes the time-series status of the variable. This will remove any lags, leads, or splines of time. 4. **Unset as Cross-Section Variable** - Removes the cross-section status of the variable. This will remove any intersection of the splines of time and the cross-section. 5. **Add Lag/Lead** - Adds versions of the selected variables either lagged back ("lag") or forward ("lead"). 6. **Remove Lag/Lead** - Removes any lags or leads on the selected variables. 7. **Plot Histogram of Selected** - Plots a histogram of the selected variables. This command will attempt to put all of the histograms on one page, but if more than nine histograms are requested, they will appear on multiple pages. 8. **Add Transformation...** - Adds a transformation setting for the selected variables. Note that each variable can only have one transformation and the time-series and cross-section variables cannot be transformed. 9. **Remove Transformation** - Removes any transformation for the selected variables. 10. **Add or Edit Bounds** - Opens a dialog box to set logical bounds for the selected variable. ## Amelia Options ![Options menu](assets/options.png) The "Variable" menu and the variable dashboard are the place to set variable-level options, but global options are set in the "Options" menu. For more information on these options, see `vignette("using-amelia")`. 1. **Splines of Time with...** - This option, if activated, will have Ameliause flexible trends of time with the specified number of knots in the imputation. The higher the number of knots the greater the variation in the trend structure, yet it will take more degrees of freedom to estimate. 2. **Interact with Cross-Section?** - Include and interaction of the cross-section with the time trends. This interaction is way of allowing the trend of time to vary across cases as well. Using a 0-level spline of time and interacting with the cross section is the equivalent of using a fixed effects. 3. **Add Observational Priors...** - Brings a dialog window to set prior beliefs about ranges for individual missing observations. 4. **Numerical Options** - Brings a dialog window to set the tolerance of the EM algorithm, the seed of the random number generator, the ridge prior for numerical stability, and the maximum number of redraws for the logical bounds. 5. **Draw Missingness Map** - Draws a missingness map. 6. **Output File Options** - Bring a dialog to set the stub of the prefix of the imputed data files and the number of imputations. If you set the prefix to `mydata`, your output files will be `mydata1.csv, mydata2.csv...` etc. 7. **Output File Type** - Sets the format of imputed data. If you would like to not save any output data sets (if you wanted, for instance, to simply look at diagnostics), set this option to "(no save)." Currently, you can save the output data as: Comma Separated Values (.CSV), Tab Delimited Text (.TXT), Stata (.DTA), R save object (.RData), or to hold it in R memory. This last option will only work if you have called AmeliaView from an R session and want to return to the R command line to work with the output. Its name in R workspace will be the file prefix. The stacked version of the Stata output will work with their built-in `mi` tools. ### Numerical options ![Numerical options menu](assets/numopts.png) 1. **Seed** - Sets the seed for the random number generator used by Amelia. Useful if you need to have the same output twice. 1. **Tolerance** - Adjust the level of tolerance that Amelia uses to check convergence of the EM algorithm. In very large datasets, if your imputation chains run a long time without converging, increasing the tolerance will allow a lower threshold to judge convergence and end chains after fewer iterations. 1. **Empirical Prior** - A prior that adds observations to your data in order to shrink the covariances. A useful place to start is around 0.5\% of the total number of observations in the dataset. 1. **Maximum Resample for Bounds** - Amelia fits logical bounds by rejecting any draws that do not fall within the bounds. This value sets the number of times Amelia should attempt to resample to fit the bounds before setting the imputation to the bound. ### Add Distributional Prior ![Detail for Add Distributional Prior dialog](assets/distpri.png) 1. **Current Priors** - A table of current priors in distributional form, with the variable and case name. You can remove priors by selecting them and using the right-click context menu. 1. **Case** - Select the case name or number you wish to set the prior about. You can also choose to make the prior for the entire variable, which will set the prior for any missing cell in that variable. The case names are generated from the row name of the observation, the value of the cross-section variable of the observation and the value of the time series variable of the observation. 1. **Variable** - The variable associated with the prior you would like specify. The list provided only shows the missing variables for the currently selected observation. 1.**Mean** - The mean value of the prior. The textbox will not accept letters or out of place punctuation. 1. **Standard Deviation** - The standard deviation of the prior. The textbox will only accept positive non-zero values. ### Add Range Prior ![Detail for Add Range Prior dialog](assets/rangepri.png) 1. **Case** - Select the case name or number you wish to set the prior about. You can also choose to make the prior for the entire variable, which will set the prior for any missing cell in that variable. The case names are generated from the row name of the observation, the value of the cross-section variable of the observation and the value of the time series variable of the observation. 1. **Variable** - The variable associated with the prior you would like specify. The list provided only shows the missing variables for the currently selected observation. 1. **Minimum** - The minimum value of the prior. The textbox will not accept letters or out of place punctuation. 1. **Maximum** - The maximum value of the prior. The textbox will not accept letters or out of place punctuation. 1. **Confidence** - The confidence level of the prior. This should be between 0 and 1, non-inclusive. This value represents how certain your priors are. This value cannot be 1, even if you are absolutely certain of a give range. This is used to convert the range into an appropriate distributional prior. ## Imputing and checking diagnostics ![Output log showing Amelia output for a successful imputation.](assets/output-log.png) Once you have set all the relevant options, you can impute your data by clicking the "Impute!" button in the toolbar. In the bottom right corner of the window, you will see a progress bar that indicates the progress of the imputations. For large datasets this could take some time. Once the imputations are complete, you should see a "Successful Imputation!" message appear where the progress bar was. You can click on this message to open the folder containing the imputed datasets. If there was an error during the imputation, the output log will pop-up and give you the error message along with some information about how to fix the problem. Once you have fixed the problem, simply click "Impute!" again. Even if there was no error, you may want to view the output log to see how Ameliaran. To do so, simply click the "Show Output Log" button. The log also shows the call to the `amelia()` function in R. You can use this code snippet to run the same imputation from the R command line. You will have to replace the `x` argument in the `amelia()` call to the name of you dataset in the R session. ### Diagnostics Dialog ![Detail for the Diagnostics dialog](assets/diag.png) Upon the successful completion of an imputation, the diagnostics menu will become available. Here you can use all of the diagnostics available at the command-line. 1. **Compare Plots** - This will display the relative densities of the observed (red) and imputed (black) data. The density of the imputed values are the average imputations across all of the imputed datasets. 1. **Overimpute** - This will run Ameliaon the full data with one cell of the chosen variable artificially set to missing and then check the result of that imputation against the truth. The resulting plot will plot average imputations against true values along with 90% confidence intervals. These are plotted over a $y=x$ line for visual inspection of the imputation model. 1. **Number of overdispersions** - When running the overdispersion diagnostic, you need to run the imputation algorithm from several overdispersed starting points in order to get a clear idea of how the chain are converging. Enter the number of imputations here. 1. **Number of dimensions** - The overdispersion diagnostic must reduce the dimensionality of the paths of the imputation algorithm to either one or two dimensions due to graphical restraints. 1. **Overdisperse** - Run overdispersion diagnostic to visually inspect the convergence of the Amelia algorithm from multiple start values that are drawn randomly. ## Sessions It is often useful to save a session of AmeliaView to save time if you have impute the same data again. Using the **Save Session** button will do just that, saving all of the current settings (including the original and any imputed data) to an RData file. You can then reload your session, on the same computer or any other, simply by clicking the **Load Session** button and finding the relevant RData file. All of the settings will be restored, including any completed imputations. Thus, if you save the session after imputing, you can always load up those imputations and view their diagnostics using the sessions feature of AmeliaView.
/scratch/gouwar.j/cran-all/cranData/Amelia/inst/doc/ameliaview.Rmd
## ----setup, echo = FALSE, include = FALSE------------------------------------- knitr::opts_chunk$set(fig.width = 5, fig.height = 4, fig.align = "center") options(digits = 4, show.signif.stars = FALSE) set.seed(12345) ## ----amelia, results = "hide"------------------------------------------------- library(Amelia) data(freetrade) a.out <- amelia(freetrade, m = 5, ts = "year", cs = "country", p2s = 0) ## ----plot_amelia-------------------------------------------------------------- plot(a.out, which.vars = 3:6) ## ----compare_density---------------------------------------------------------- compare.density(a.out, var = "signed") ## ----------------------------------------------------------------------------- overimpute(a.out, var = "tariff") ## ----overimp_bad, echo = FALSE, results = "hide"------------------------------ dd <- Amelia:::rmvnorm(50, mu = c(0.5,0.5), vcv = matrix(c(0.25^2,.06, .06,0.25^2),2,2)) ddmiss <- sample(1:50, replace = FALSE, size = 10) is.na(dd) <- ddmiss aa.out <- amelia(dd, m = 5) overimpute(aa.out, var = 2, main = "Observed versus Imputed Values") ## ----displd------------------------------------------------------------------- disperse(a.out, dims = 1, m = 5) disperse(a.out, dims = 2, m = 5) ## ----tsplot1------------------------------------------------------------------ a.out.time <- amelia(freetrade, ts = "year", cs = "country", polytime = 1, intercs = TRUE, p2s = 0) tscsPlot(a.out.time, cs = "Malaysia", main = "Malaysia (with time settings)", var = "tariff", ylim = c(-10, 60)) ## ----mmap1-------------------------------------------------------------------- missmap(a.out)
/scratch/gouwar.j/cran-all/cranData/Amelia/inst/doc/diagnostics.R
--- title: "Multiple Imputation Diagnostics" date: "`r Sys.Date()`" link-citations: yes bibliography: amelia.bib output: rmarkdown::html_vignette: keep_md: true vignette: > %\VignetteIndexEntry{Multiple Imputation Diagnostics} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r setup, echo = FALSE, include = FALSE} knitr::opts_chunk$set(fig.width = 5, fig.height = 4, fig.align = "center") options(digits = 4, show.signif.stars = FALSE) set.seed(12345) ``` Amelia currently provides a number of diagnostic tools to inspect the imputations that are created. To illustrate these, we use the `freetrade` data from the package: ```{r amelia, results = "hide"} library(Amelia) data(freetrade) a.out <- amelia(freetrade, m = 5, ts = "year", cs = "country", p2s = 0) ``` \subsubsection{Comparing Densities} One check on the plausibility of the imputation model is check the distribution of imputed values to the distribution of observed values. Obviously we cannot expect, *a priori*, that these distribution will be identical as the missing values may differ systematically from the observed value--this is fundamental reason to impute to begin with! Imputations with strange distributions or those that are far from the observed data may indicate that imputation model needs at least some investigation and possibly some improvement. The `plot.amelia()` method works on output from `amelia()` and, by default, shows for each variable a plot of the relative frequencies of the observed data with an overlay of the relative frequency of the imputed values. ```{r plot_amelia} plot(a.out, which.vars = 3:6) ``` where the argument `which.vars` indicates which of the variables to plot (in this case, we are taking the 3rd through the 6th variables). The imputed curve (in red) plots the density of the *mean* imputation over the $m$ datasets. That is, for each cell that is missing in the variable, the diagnostic will find the mean of that cell across each of the $m$ datasets and use that value for the density plot. The black distributions are the those of the observed data. When variables are completely observed, their densities are plotted in blue. These graphs will allow you to inspect how the density of imputations compares to the density of observed data. Some discussion of these graphs can be found in @AbaGelLev08. Minimally, these graphs can be used to check that the mean imputation falls within known bounds, when such bounds exist in certain variables or settings. We can also use the function `compare.density()` directly to make these plots for an individual variable: ```{r compare_density} compare.density(a.out, var = "signed") ``` ## Overimpute {#sec_overimpute} *Overimputing* is a technique we have developed to judge the fit of the imputation model. Because of the nature of the missing data mechanism, it is impossible to tell whether the mean prediction of the imputation model is close to the unobserved value that is trying to be recovered. By definition this missing data does not exist to create this comparison, and if it existed we would no longer need the imputations or care about their accuracy. However, a natural question the applied researcher will often ask is how accurate are these imputed values? Overimputing involves sequentially treating each of the *observed* values as if they had actually been missing. For each observed value in turn we then generate several hundred imputed values of that observed value, *as if it had been missing*. While $m=5$ imputations are sufficient for most analysis models, this large number of imputations allows us to construct a confidence interval of what the imputed value would have been, had any of the observed data been missing. We can then graphically inspect whether our observed data tends to fall within the region where it would have been imputed had it been missing. For example, we can run the overimputation diagnostic on our data by running ```{r} overimpute(a.out, var = "tariff") ``` Our overimputation diagnostic runs this procedure through all of the observed values for a user selected variable. We can graph the estimates of each observation against the true values of the observation. On this graph, a $y=x$ line indicates the line of perfect agreement; that is, if the imputation model was a perfect predictor of the true value, all the imputations would fall on this line. For each observation, `overimpute()` also plots 90\% confidence intervals that allows the user to visually inspect the behavior of the imputation model. By checking how many of the confidence intervals cover the $y=x$ line, we can tell how often the imputation model can confidently predict the true value of the observation. Occasionally, the overimputation can display unintuitive results. For example, different observations may have different numbers of observed covariates. If covariates that are useful to the prediction are themselves missing, then the confidence interval for this observation will be much larger. In the extreme, there may be observations where the observed value we are trying to overimpute is *the only* observed value in that observation, and thus there is nothing left to impute that observation with when we pretend that it is missing, other than the mean and variance of that variable. In these cases, we should correctly expect the confidence interval to be very large. An example of this graph is show here: ```{r overimp_bad, echo = FALSE, results = "hide"} dd <- Amelia:::rmvnorm(50, mu = c(0.5,0.5), vcv = matrix(c(0.25^2,.06, .06,0.25^2),2,2)) ddmiss <- sample(1:50, replace = FALSE, size = 10) is.na(dd) <- ddmiss aa.out <- amelia(dd, m = 5) overimpute(aa.out, var = 2, main = "Observed versus Imputed Values") ``` In this simulated bivariate dataset, one variable is overimputed and the results displayed. The second variable is either observed, in which case the confidence intervals are very small and the imputations (yellow) are very accurate, or the second variable is missing in which case this variable is being imputed simply from the mean and variance parameters, and the imputations (red) have a very large and encompassing spread. The circles represent the mean of all the imputations for that value. As the amount of missing information in a particular pattern of missingness increases, we expect the width of the confidence interval to increase. The color of the confidence interval reflects the percent of covariates observed in that pattern of missingness, as reflected in the legend at the bottom. ## Overdispersed Starting Values {#sec_overdisperse} If the data given to `amelia()` has a poorly behaved likelihood, the EM algorithm can have problems finding a global maximum of the likelihood surface and starting values can begin to effect imputations. Because the EM algorithm is deterministic, the point in the parameter space where you start it can impact where it ends, though this is irrelevant when the likelihood has only one mode. However, if the starting values of an EM chain are close to a local maximum, the algorithm may find this maximum, unaware that there is a global maximum farther away. To make sure that our imputations do not depend on our starting values, a good test is to run the EM algorithm from multiple, dispersed starting values and check their convergence. In a well behaved likelihood, we will see all of these chains converging to the same value, and reasonably conclude that this is the likely global maximum. On the other hand, we might see our EM chain converging to multiple locations. The algorithm may also wander around portions of the parameter space that are not fully identified, such as a ridge of equal likelihood, as would happen for example, if the same variable were accidentally included in the imputation model twice. Amelia includes a diagnostic to run the EM chain from multiple starting values that are overdispersed from the estimated maximum. The overdispersion diagnostic will display a graph of the paths of each chain. Since these chains move through spaces that are in an extremely high number of dimensions and can not be graphically displayed, the diagnostic reduces the dimensionality of the EM paths by showing the paths relative to the largest principle components of the final mode(s) that are reached. Users can choose between graphing the movement over the two largest principal components, or more simply the largest dimension with time (iteration number) on the $x$-axis. The number of EM chains can also be adjusted. Once the diagnostic draws the graph, the user can visually inspect the results to check that all chains convergence to the same point. For our original model, this is a simple call to `disperse()`: ```{r displd} disperse(a.out, dims = 1, m = 5) disperse(a.out, dims = 2, m = 5) ``` where `m` designates the number of places to start EM chains from and `dims` are the number of dimensions of the principal components to show. In one dimension, the diagnostic plots movement of the chain on the $y$-axis and time, in the form of the iteration number, on the $x$-axis. The first plot shows a well behaved likelihood, as the starting values all converge to the same point. The black horizontal line is the point where `amelia()` converges when it uses the default method for choosing the starting values. The diagnostic takes the end point of this chain as the possible maximum and disperses the starting values away from it to see if the chain will ever finish at another mode. ## Time-series Plots {#sec_tscsplots} As discussed above, information about time trends and fixed effects can help produce better imputations. One way to check the plausibility of our imputation model is to see how it predicts missing values in a time series. If the imputations for the Malaysian tariff rate were drastically higher in 1990 than the observed years of 1989 or 1991, we might worry that there is a problem in our imputation model. Checking these time series is easy to do with `tscsPlot()`. Simply choose the variable (with the `var` argument) and the cross-section (with the `cs` argument) to plot the observed time-series along with distributions of the imputed values for each missing time period. For instance, we can get the plot of the `tariff` variable for Malaysia with the following commands: ```{r tsplot1} a.out.time <- amelia(freetrade, ts = "year", cs = "country", polytime = 1, intercs = TRUE, p2s = 0) tscsPlot(a.out.time, cs = "Malaysia", main = "Malaysia (with time settings)", var = "tariff", ylim = c(-10, 60)) ``` Here, the black point are observed tariff rates for Malaysia from 1980 to 2000. The red points are the mean imputation for each of the missing values, along with their 95% confidence bands. We draw these bands by imputing each of missing values 100 times to get the imputation distribution for that observation. In figure \ref{fig:tsplot1}, we can see that the imputed 1990 tariff rate is quite in line with the values around it. Notice also that values toward the beginning and end of the time series have slightly higher imputation variance. This occurs because the fit of the polynomials of time in the imputation model have higher variance at the beginning and end of the time series. This is intuitive because these points have fewer neighbors from which to draw predictive power. A word of caution is in order. As with comparing the histograms of imputed and observed values, there could be reasons that the missing values are systematically different than the observed time series. For instance, if there had been a major financial crisis in Malaysia in 1990 which caused the government to close off trade, then we would expect that the missing tariff rates should be quite different than the observed time series. If we have this information in our imputation model, we might expect to see out-of-line imputations in these time-series plots. If, on the other hand, we did not have this information, we might see "good" time-series plots that fail to point out this violation of the MAR assumption. Our imputation model would produce poor estimates of the missing values since it would be unaware that both the missingness and the true unobserved tariff rate depend on another variable. Hence, `tscsPlot()` is useful for finding obvious problems in imputation model and comparing the efficiency of various imputation models, but it cannot speak to the untestable assumption of MAR. ## Missingness maps {#sec_missmaps} One useful tool for exploring the missingness in a dataset is a *missingness map*. This is a map that visualizes the dataset a grid and colors the grid by missingness status. The column of the grid are the variables and the rows are the observations, as in any spreadsheet program. This tool allows for a quick summary of the patterns of missingness in the data. If we simply call the `missmap()` function on our output from `amelia()`, ```{r mmap1} missmap(a.out) ``` The `missmap()` function arrange the columns so that the variables are in decreasing order of missingness from left to right. If the `cs` argument was set in the `amelia` function, the labels for the rows will indicate where each of the cross-sections begin. In this missingness map, it is clear that the tariff rate is the variable most missing in the data and it tends to be missing in blocks of a few observations. Gross international reserves (`intresmi`) and financial openness (`fivop`), on the other hand, are missing mostly at the end of each cross-section. This suggests *missingness by merging*, when variables with different temporal coverages are merged to make one dataset. Sometimes this kind of missingness is an artifact of the date at which the data was merged and researchers can resolve it by finding updated versions of the relevant variables. The missingness map is an important tool for understanding the patterns of missingness in the data and can often indicate potential ways to improve the imputation model or data collection process. ## References
/scratch/gouwar.j/cran-all/cranData/Amelia/inst/doc/diagnostics.Rmd
## ----loadpkg, echo = FALSE, include = FALSE----------------------------------- knitr::opts_chunk$set(fig.width = 5, fig.height = 4, fig.align = "center")
/scratch/gouwar.j/cran-all/cranData/Amelia/inst/doc/intro-mi.R
--- title: "Introduction to Multiple Imputation" date: "`r Sys.Date()`" link-citations: yes bibliography: amelia.bib output: rmarkdown::html_vignette: keep_md: true vignette: > %\VignetteIndexEntry{Introduction to Multiple Imputation} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r loadpkg, echo = FALSE, include = FALSE} knitr::opts_chunk$set(fig.width = 5, fig.height = 4, fig.align = "center") ``` ## Introduction {#sec:intro} Missing data is a ubiquitous problem in social science data. Respondents do not answer every question, countries do not collect statistics every year, archives are incomplete, subjects drop out of panels. Most statistical analysis methods, however, assume the absence of missing data, and are only able to include observations for which every variable is measured. Amelia allows users to impute ("fill in" or rectangularize) incomplete data sets so that analyses which require complete observations can appropriately use all the information present in a dataset with missingness, and avoid the biases, inefficiencies, and incorrect uncertainty estimates that can result from dropping all partially observed observations from the analysis. Amelia performs *multiple imputation*, a general-purpose approach to data with missing values. Multiple imputation has been shown to reduce bias and increase efficiency compared to listwise deletion. Furthermore, ad-hoc methods of imputation, such as mean imputation, can lead to serious biases in variances and covariances. Unfortunately, creating multiple imputations can be a burdensome process due to the technical nature of algorithms involved. \Amelia\ provides users with a simple way to create and implement an imputation model, generate imputed datasets, and check its fit using diagnostics. The Amelia program goes several significant steps beyond the capabilities of the first version of Amelia [@HonJosKin98]. For one, the bootstrap-based EMB algorithm included in Amelia can impute many more variables, with many more observations, in much less time. The great simplicity and power of the EMB algorithm made it possible to write Amelia so that it virtually never crashes --- which to our knowledge makes it unique among all existing multiple imputation software --- and is much faster than the alternatives too. Amelia also has features to make valid and much more accurate imputations for cross-sectional, time-series, and time-series-cross-section data, and allows the incorporation of observation and data-matrix-cell level prior information. In addition to all of this, Amelia provides many diagnostic functions that help users check the validity of their imputation model. This software implements the ideas developed in @HonKin10. ## What Amelia Does {#sec:what} Multiple imputation involves imputing $m$ values for each missing cell in your data matrix and creating $m$ "completed" data sets. Across these completed data sets, the observed values are the same, but the missing values are filled in with a distribution of imputations that reflect the uncertainty about the missing data. After imputation with Amelia's EMB algorithm, you can apply whatever statistical method you would have used if there had been no missing values to each of the $m$ data sets, and use a simple procedure, described below, to combine the results[^combine]. Under normal circumstances, you only need to impute once and can then analyze the $m$ imputed data sets as many times and for as many purposes as you wish. The advantage of Amelia is that it combines the comparative speed and ease-of-use of our algorithm with the power of multiple imputation, to let you focus on your substantive research questions rather than spending time developing complex application-specific models for nonresponse in each new data set. Unless the rate of missingness is very high, $m = 5$ (the program default) is probably adequate. [^combine]: You can combine the results automatically by doing your data analyses within [Zelig for R](https://zeligproject.org), or within [Clarify for Stata](https://gking.harvard.edu/clarify). ### Assumptions The imputation model in Amelia assumes that the complete data (that is, both observed and unobserved) are multivariate normal. If we denote the $(n \times k)$ dataset as $D$ (with observed part $D^{obs}$ and unobserved part $D^{mis}$), then this assumption is \begin{equation} D \sim \mathcal{N}_k(\mu, \Sigma), \end{equation} which states that $D$ has a multivariate normal distribution with mean vector $\mu$ and covariance matrix $\Sigma$. The multivariate normal distribution is often a crude approximation to the true distribution of the data, yet there is evidence that this model works as well as other, more complicated models even in the face of categorical or mixed data [see @Schafer97, @SchOls98]. Furthermore, transformations of many types of variables can often make this normality assumption more plausible (see \@ref(sec:trans) for more information on how to implement this in Amelia). The essential problem of imputation is that we only observe $D^{obs}$, not the entirety of $D$. In order to gain traction, we need to make the usual assumption in multiple imputation that the data are *missing at random* (MAR). This assumption means that the pattern of missingness only depends on the observed data $D^{obs}$, not the unobserved data $D^{mis}$. Let $M$ to be the missingness matrix, with cells $m_{ij} = 1$ if $d_{ij} \in D^{mis}$ and $m_{ij} = 0$ otherwise. Put simply, $M$ is a matrix that indicates whether or not a cell is missing in the data. With this, we can define the MAR assumption as \[ p(M|D) = p(M|D^{obs}). \] Note that MAR includes the case when missing values are created randomly by, say, coin flips, but it also includes many more sophisticated missingness models. When missingness is not dependent on the data at all, we say that the data are *missing completely at random* (MCAR). Amelia requires both the multivariate normality and the MAR assumption (or the simpler special case of MCAR). Note that the MAR assumption can be made more plausible by including additional variables in the dataset $D$ in the imputation dataset than just those eventually envisioned to be used in the analysis model. ### Algorithm In multiple imputation, we are concerned with the complete-data parameters, $\theta = (\mu, \Sigma)$. When writing down a model of the data, it is clear that our observed data is actually $D^{obs}$ and $M$, the missingness matrix. Thus, the likelihood of our observed data is $p(D^{obs}, M|\theta)$. Using the MAR assumption\footnote{There is an additional assumption hidden here that $M$ does not depend on the complete-data parameters.}, we can break this up, \begin{align} p(D^{obs},M|\theta) = p(M|D^{obs})p(D^{obs}|\theta). \end{align} As we only care about inference on the complete data parameters, we can write the likelihood as \begin{align} L(\theta|D^{obs}) &\propto p(D^{obs}|\theta), \end{align} which we can rewrite using the law of iterated expectations as \begin{align} p(D^{obs}|\theta) &= \int p(D|\theta) dD^{mis}. \end{align} With this likelihood and a flat prior on $\theta$, we can see that the posterior is \begin{equation} p(\theta | D^{obs}) \propto p(D^{obs}|\theta) = \int p(D|\theta) dD^{mis}. \end{equation} The main computational difficulty in the analysis of incomplete data is taking draws from this posterior. The EM algorithm [@DemLaiRub77] is a simple computational approach to finding the mode of the posterior. Our EMB algorithm combines the classic EM algorithm with a bootstrap approach to take draws from this posterior. For each draw, we bootstrap the data to simulate estimation uncertainty and then run the EM algorithm to find the mode of the posterior for the bootstrapped data, which gives us fundamental uncertainty too [see @HonKin10 for details of the EMB algorithm]. Once we have draws of the posterior of the complete-data parameters, we make imputations by drawing values of $D^{mis}$ from its distribution conditional on $D^{obs}$ and the draws of $\theta$, which is a linear regression with parameters that can be calculated directly from $\theta$. ### Analysis In order to combine the results across $m$ data sets, first decide on the quantity of interest to compute, such as a univariate mean, regression coefficient, predicted probability, or first difference. Then, the easiest way is to draw $1/m$ simulations of $q$ from each of the $m$ data sets, combine them into one set of $m$ simulations, and then to use the standard simulation-based methods of interpretation common for single data sets @KinTomWit00. Alternatively, you can combine directly and use as the multiple imputation estimate of this parameter, $\bar{q}$, the average of the $m$ separate estimates, $q_j$ $(j=1,\dots,m)$: \begin{equation} \bar{q}=\frac{1}{m}\sum^{m}_{j=1}q_j. \end{equation} The variance of the point estimate is the average of the estimated variances from *within* each completed data set, plus the sample variance in the point estimates *across* the data sets (multiplied by a factor that corrects for the bias because $m<\infty$). Let $SE(q_j)^2$ denote the estimated variance (squared standard error) of $q_j$ from the data set $j$, and $S^{2}_{q}=\Sigma^{m}_{j=1}(q_j-\bar{q})^2/(m-1)$ be the sample variance across the $m$ point estimates. The standard error of the multiple imputation point estimate is the square root of \begin{equation} SE(q)^2=\frac{1}{m}\sum^{m}_{j=1}SE(q_j)^2+S^2_q(1+1/m). \end{equation} ## References
/scratch/gouwar.j/cran-all/cranData/Amelia/inst/doc/intro-mi.Rmd
## ----setup, echo = FALSE, include = FALSE------------------------------------- knitr::opts_chunk$set(fig.width = 5, fig.height = 4, fig.align = "center") options(digits = 4, show.signif.stars = FALSE) set.seed(12345) ## ----load_data, results = "hide"---------------------------------------------- library(Amelia) data(freetrade) ## ----summarize_data----------------------------------------------------------- summary(freetrade) ## ----mk_lm-------------------------------------------------------------------- summary(lm(tariff ~ polity + pop + gdp.pc + year + country, data = freetrade)) ## ----amelia------------------------------------------------------------------- a.out <- amelia(freetrade, m = 5, ts = "year", cs = "country") a.out ## ----------------------------------------------------------------------------- hist(a.out$imputations[[3]]$tariff, col = "grey", border = "white") ## ----save, eval = FALSE------------------------------------------------------- # save(a.out, file = "imputations.RData") ## ----write_amelia, eval = FALSE----------------------------------------------- # write.amelia(obj = a.out, file.stem = "outdata") ## ----write_dta, eval = FALSE-------------------------------------------------- # write.amelia(obj = a.out, file.stem = "outdata", format = "dta") ## ----more_amelia-------------------------------------------------------------- a.out.more <- amelia(freetrade, m = 10, ts = "year", cs = "country", p2s = 0) a.out.more ## ----ameliabind--------------------------------------------------------------- a.out.more <- ameliabind(a.out, a.out.more) a.out.more ## ----rand_stem, eval = FALSE-------------------------------------------------- # b <- round(runif(1, min = 1111, max = 9999)) # random.name <- paste("am", b, sep = "") # amelia <- write.amelia(obj = a.out, file.stem = random.name) ## ----p2s---------------------------------------------------------------------- a.out.p2s <- amelia(freetrade, m = 1, ts = "year", cs = "country", p2s = 2) ## ----polity_tab--------------------------------------------------------------- table(a.out$imputations[[3]]$polity) ## ----polity_ord--------------------------------------------------------------- a.out1 <- amelia(freetrade, m = 5, ts = "year", cs = "country", ords = "polity", p2s = 0) table(a.out1$imputations[[3]]$polity) ## ----binary_tab--------------------------------------------------------------- table(a.out1$imputations[[3]]$signed) ## ----noms--------------------------------------------------------------------- a.out2 <- amelia(freetrade, m = 5, ts = "year", cs = "country", noms = "signed", p2s = 0) table(a.out2$imputations[[3]]$signed) ## ----tarrif_hist-------------------------------------------------------------- hist(freetrade$tariff, col="grey", border="white") hist(log(freetrade$tariff), col="grey", border="white") ## ----idvars------------------------------------------------------------------- amelia(freetrade, idvars = c("year", "country")) ## ----idvars_error------------------------------------------------------------- a.out2 <- amelia(freetrade, idvars = c("year")) ## ----polytime, results = "hide"----------------------------------------------- a.out2 <- amelia(freetrade, ts = "year", cs = "country", polytime = 2) ## ----intercs, results = "hide"------------------------------------------------ a.out.time <- amelia(freetrade, ts = "year", cs = "country", polytime = 1, intercs = TRUE, p2s = 2) ## ----tcomp1------------------------------------------------------------------- tscsPlot(a.out, cs = "Malaysia", main = "Malaysia (no time settings)", var = "tariff", ylim = c(-10, 60)) tscsPlot(a.out.time, cs = "Malaysia", main = "Malaysia (with time settings)", var = "tariff", ylim = c(-10, 60)) ## ----lags_leads--------------------------------------------------------------- a.out2 <- amelia(freetrade, ts = "year", cs = "country", lags = "tariff", leads = "tariff") ## ----empri-------------------------------------------------------------------- a.out.time2 <- amelia(freetrade, ts = "year", cs = "country", polytime = 1, intercs = TRUE, p2s = 0, empri = .01 * nrow(freetrade)) a.out.time2 ## ----thailand----------------------------------------------------------------- freetrade[freetrade$country == "Thailand", c("year", "country", "tariff")] ## ----build_prior-------------------------------------------------------------- pr <- matrix( c(158, 159, 160, 3, 3, 3, 40, 40, 40, 3, 3, 3), nrow = 3, ncol = 4 ) pr ## ----amelia_prior------------------------------------------------------------- a.out.pr <- amelia(freetrade, ts = "year", cs = "country", priors = pr) ## ----build_prior2------------------------------------------------------------- pr.2 <- matrix( c(158, 159, 160, 3, 3, 3, 34, 34, 34, 46, 46, 46, 0.95, 0.95, 0.95), nrow = 3, ncol = 5 ) pr.2 ## ----build_prior3------------------------------------------------------------- pr.3 <- matrix( c(158, 159, 160, 0, 3, 3 , 3, 3, 40, 40, 40, 20, 3, 3, 3, 5), nrow = 4, ncol = 4) pr.3 ## ----build_bounds------------------------------------------------------------- bds <- matrix(c(3, 30, 40), nrow = 1, ncol = 3) bds ## ----amelia_bounds------------------------------------------------------------ a.out.bds <- amelia(freetrade, ts = "year", cs = "country", bounds = bds, max.resample = 1000) ## ----bounds_plot-------------------------------------------------------------- tscsPlot(a.out, cs = "Malaysia", main = "No logical bounds", var = "tariff", ylim = c(-10, 60)) tscsPlot(a.out.bds, cs = "Malaysia", main = "Bounded between 30 and 40", var = "tariff", ylim = c(-10, 60)) ## ----amelia_transform--------------------------------------------------------- a.out <- transform(a.out, lgdp = log(gdp.pc)) head(a.out$imputations[[1]][,c("country", "year","gdp.pc", "lgdp")]) ## ----interaction-------------------------------------------------------------- a.out <- transform(a.out, pol_gdp = polity * gdp.pc) ## ----sum_trans---------------------------------------------------------------- summary(a.out) ## ----lm_lwd------------------------------------------------------------------- orig.model <- lm(tariff ~ polity + pop + gdp.pc + year + country, data = freetrade) orig.model ## ----lm_imp------------------------------------------------------------------- imp.models <- with( a.out, lm(tariff ~ polity + pop + gdp.pc + year + country) ) imp.models[1:2] ## ----mi_combine--------------------------------------------------------------- out <- mi.combine(imp.models, conf.int = TRUE) out ## ----write_dta_stacked, eval = FALSE------------------------------------------ # write.amelia(a.out, separate = FALSE, file.stem = "outdata", format = "dta")
/scratch/gouwar.j/cran-all/cranData/Amelia/inst/doc/using-amelia.R
--- title: "Using Amelia" date: "`r Sys.Date()`" link-citations: yes bibliography: amelia.bib output: rmarkdown::html_vignette: keep_md: true vignette: > %\VignetteIndexEntry{Using Amelia} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r setup, echo = FALSE, include = FALSE} knitr::opts_chunk$set(fig.width = 5, fig.height = 4, fig.align = "center") options(digits = 4, show.signif.stars = FALSE) set.seed(12345) ``` ## Data We now demonstrate how to use Amelia using data from @MilKub05 which studies the effect of democracy on trade policy. For the purposes of this user's guide, we will use a subset restricted to nine developing countries in Asia from 1980 to 1999[^freetrade]. This dataset includes 9 variables: | Variable | Description | |:-----------|:----------------------------------------------------| | `year` | year | | `country` | country | | `tariff` | average tariff rates | | `polity` | Polity IV Score[^polity] | | `pop` | total population | | `gdp.pc` | gross domestic product per capita | | `intresmi` | gross international reserves | | `signed` | dummy variable if signed an IMF agreement that year | | `fivop` | measure of financial openness | | `usheg` | measure of US hegemony[^hegemony] | These variables correspond to the variables used in the analysis model of @MilKub05 in table 2. [^freetrade]: We have artificially addedsome missingness to these data for presentational purposes. You can access the original data at [https://scholar.princeton.edu/hvmilner/data](https://scholar.princeton.edu/hvmilner/data). [^polity]: The Polity score is a number between -10 and 10 indicating how democratic a country is. A fully autocratic country would be a -10 while a fully democratic country would be 1 10. [^hegemony]: This measure of US hegemony is the US imports and exports as a percent of the world total imports and exports. We first load the Amelia and the data: ```{r load_data, results = "hide"} library(Amelia) data(freetrade) ``` We can check the summary statistics of the data to see that there is missingness on many of the variables: ```{r summarize_data} summary(freetrade) ``` In the presence of missing data, most statistical packages use *listwise deletion*, which removes any row that contains a missing value from the analysis. Using the base model of @MilKub05 Table 2, we run a simple linear model in R, which uses listwise deletion: ```{r mk_lm} summary(lm(tariff ~ polity + pop + gdp.pc + year + country, data = freetrade)) ``` Note that 60 of the 171 original observations are deleted due to missingness. These observations, however, are partially observed, and contain valuable information about the relationships between those variables which are present in the partially completed observations. Multiple imputation will help us retrieve that information and make better, more efficient, inferences. ## Multiple Imputation When performing multiple imputation, the first step is to identify the variables to include in the imputation model. It is crucial to include at least as much information as will be used in the analysis model. That is, any variable that will be in the analysis model should also be in the imputation model. This includes any transformations or interactions of variables that will appear in the analysis model. In fact, it is often useful to add more information to the imputation model than will be present when the analysis is run. Since imputation is predictive, any variables that would increase predictive power should be included in the model, even if including them in the analysis model would produce bias in estimating a causal effect (such as for post-treatment variables) or collinearity would preclude determining which variable had a relationship with the dependent variable (such as including multiple alternate measures of GDP). In our case, we include all the variables in `freetrade` in the imputation model, even though our analysis model focuses on `polity`, `pop` and `gdp.pc`. We're not incorporating time or spatial data yet, but we do below. To create multiple imputations in Amelia, we can simply run ```{r amelia} a.out <- amelia(freetrade, m = 5, ts = "year", cs = "country") a.out ``` Note that our example dataset is deliberately small both in variables and in cross-sectional elements. Typical datasets may often have hundreds or possibly a couple thousand steps to the EM algorithm. Long chains should remind the analyst to consider whether transformations of the variables would more closely fit the multivariate normal assumptions of the model (correct but omitted transformations will shorten the number of steps and improve the fit of the imputations), but do not necessarily denote problems with the imputation model. The output gives some information about how the algorithm ran. Each of the imputed datasets is now in the list `a.out$imputations`. Thus, we could plot a histogram of the `tariff` variable from the 3rd imputation, ```{r} hist(a.out$imputations[[3]]$tariff, col = "grey", border = "white") ``` ### Saving imputed datasets If you need to save your imputed datasets, one direct method is to save the output list from `amelia`, ```{r save, eval = FALSE} save(a.out, file = "imputations.RData") ``` As in the previous example, the ith imputed datasets can be retrieved from this list as `a.out$imputations[[i]]`. In addition, you can save each of the imputed datasets to its own file using the `write.amelia()` command, ```{r write_amelia, eval = FALSE} write.amelia(obj = a.out, file.stem = "outdata") ``` This will create one comma-separated value file for each imputed dataset in the following manner: outdata1.csv outdata2.csv outdata3.csv outdata4.csv outdata5.csv The `write.amelia` function can also save files in tab-delimited and Stata (`.dta`) file formats. For instance, to save Stata files, simply change the `format` argument to `"dta"`, ```{r write_dta, eval = FALSE} write.amelia(obj = a.out, file.stem = "outdata", format = "dta") ``` Additionally, `write.amelia()` can create a "stacked" version of the imputed dataset which stacks each imputed dataset on top of one another. This can be done by setting the \code{separate} argument to `FALSE`. The resulting matrix is of size $(N \cdot m) \times p$ if the original dataset is excluded (`orig.data = FALSE`) and of size $(N \cdot (m+1))\times p$ if it is included (`orig.data = TRUE`). The stacked dataset will include a variable (set with `impvar`) that indicates to which imputed dataset the observation belongs. ## Combining multiple calls to `amelia()` The EMB algorithm is what computer scientists call *embarrassingly parallel*, meaning that it is simple to separate each imputation into parallel processes. With Amelia it is simple to run subsets of the imputations on different machines and then combine them after the imputation for use in analysis model. This allows for a huge increase in the speed of the algorithm. Output lists from different Amelia runs can be combined together into a new list. For instance, suppose that we wanted to add another ten imputed datasets to our earlier call to `amelia()`. First, run the function to get these additional imputations, ```{r more_amelia} a.out.more <- amelia(freetrade, m = 10, ts = "year", cs = "country", p2s = 0) a.out.more ``` then combine this output with our original output using the `ameliabind()` function, ```{r ameliabind} a.out.more <- ameliabind(a.out, a.out.more) a.out.more ``` This function binds the two outputs into the same output so that you can pass the combined imputations easily to analysis models and diagnostics. Note that `a.out.more` now has a total of 15 imputations. A simple way to execute a parallel processing scheme with Amelia would be to run `amelia()` with `m` set to 1 on $m$ different machines or processors, save each output using the `save()` function, load them all on the same R session using `load()` command and then combine them using `ameliabind()`. In order to do this, however, make sure to name each of the outputs a different name so that they do not overwrite each other when loading into the same R session. Also, some parallel environments will dump all generated files into a common directory, where they may overwrite each other. If it is convenient in a parallel environment to run a large number of `amelia()` calls from a single piece of code, one useful way to avoid overwriting is to create the `file.stem` with a random suffix. For example: ```{r rand_stem, eval = FALSE} b <- round(runif(1, min = 1111, max = 9999)) random.name <- paste("am", b, sep = "") amelia <- write.amelia(obj = a.out, file.stem = random.name) ``` ### Screen output Screen output can be adjusted with the "print to screen" argument, `p2s`. At a value of 0, no screen printing will occur. This may be useful in large jobs or simulations where a very large number of imputation models may be required. The default value of 1, lists each bootstrap, and displays the number of iterations required to reach convergence in that bootstrapped dataset. The value of 2 gives more thorough screen output, including, at each iteration, the number of parameters that have significantly changed since the last iteration. This may be useful when the EM chain length is very long, as it can provide an intuition for many parameters still need to converge in the EM chain, and a sense of the time remaining. However, it is worth noting that the last several parameters can often take a significant fraction of the total number of iterations to converge. Setting `p2s` to 2 will also generate information on how EM algorithm is behaving, such as a `!` when the current estimated complete data covariance matrix is not invertible and a `*` when the likelihood has not monotonically increased in that step. Having many of these two symbols in the screen output is an indication of a problematic imputation model. Problems of non-invertible matrices often mean that current guess for the covariance matrix is singular. This is a sign that there may be two highly correlated variables in the model. One way to resolve is to use a ridge prior (see \@ref(sec_prior)). An example of the output when `p2s` is 2 would be ```{r p2s} a.out.p2s <- amelia(freetrade, m = 1, ts = "year", cs = "country", p2s = 2) ``` ## Parallel Imputation {#sec:parallel} Each imputation in the above EMB algorithm is completely independent of any other imputation, a property called embarrassingly parallel. This type of approach can take advantage of the multiple-core infrastructure of modern CPUs. Each core in a multi-core processor can execute independent operations in parallel. Amelia can utilize this parallel processing internally via the `parallel` and the `ncpus` arguments. The `parallel` argument sets the parallel processing backend, either with `"multicore"` or `"snow"` (or `"no"` for no parallel processing). The `"multicore"` backend is not available on Windows systems, but tends to be quicker at parallel processing. On a Windows system, the `"snow"` backend provides parallel processing through a cluster of worker processes across the CPUs. You can set the default for this argument using the `"amelia.parallel"` option. This allows you to run Amelia in parallel as the default for an entire R session without setting arguments in the `amelia()` call. For each of the parallel backends, Amelia requires a number of CPUs to use in parallel. This can be set using the `ncpus` argument. It can be higher than the number of physical cores in the system if hyperthreading or other technologies are available. You can use the `parallel::detectCores()` function to determine how many cores are available on your machine. The default for this argument can be set using the `"amelia.ncpus"` option. On Unix-alike systems (such as macOS and Linux distributions), the `"multicore"` backend automatically sets up and stops the parallel workers by forking the process. On Windows, the `"snow"` backend requires more attention. Amelia will attempt to create a parallel cluster of worker processes (since Windows systems cannot fork a process) and will stop this cluster after the imputations are complete. Alternatively, Amelia also has a `cl` argument, which accepts a predefined cluster made using the `parallel::makePSOCKcluster()`. For more information about parallel processing in R, see the documentation for the `parallel` package that ships along with R or the CRAN Task View on [Parallel Computing with R](https://cran.r-project.org/view=HighPerformanceComputing) ## Improving Imputations via Transformations {#sec:trans} Social science data commonly includes variables that fail to fit to a multivariate normal distribution. Indeed, numerous models have been introduced specifically to deal with the problems they present. As it turns out, much evidence in the literature [discussed in @KinHonJos01] indicates that the multivariate normal model used in Amelia usually works well for the imputation stage even when discrete or non-normal variables are included and when the analysis stage involves these limited dependent variable models. Nevertheless, Amelia includes some limited capacity to deal directly with ordinal and nominal variables and to modify variables that require other transformations. In general nominal and log transform variables should be declared to Amelia, whereas ordinal (including dichotomous) variables often need not be, as described below. (For harder cases, see [@Schafer97], for specialized MCMC-based imputation models for discrete variables.) Although these transformations are taken internally on these variables to better fit the data to the multivariate normal assumptions of the imputation model, all the imputations that are created will be returned in the original untransformed form of the data. If the user has already performed transformations on their data (such as by taking a log or square root prior to feeding the data to `amelia()`) these do not need to be declared, as that would result in the transformation occurring *doubly* in the imputation model. The fully imputed data sets that are returned will always be in the form of the original data that is passed to the `amelia()` routine. ### Ordinal {#sec:ord} In much statistical research, researchers treat independent ordinal (including dichotomous) variables as if they were really continuous. If the analysis model to be employed is of this type, then nothing extra is required of the of the imputation model. Users are advised to allow Amelia to impute non-integer values for any missing data, and to use these non-integer values in their analysis. Sometimes this makes sense, and sometimes this defies intuition. One particular imputation of 2.35 for a missing value on a seven point scale carries the intuition that the respondent is between a 2 and a 3 and most probably would have responded 2 had the data been observed. This is easier to accept than an imputation of 0.79 for a dichotomous variable where a zero represents a male and a one represents a female respondent. However, in both cases the non-integer imputations carry more information about the underlying distribution than would be carried if we were to force the imputations to be integers. Thus whenever the analysis model permits, missing ordinal observations should be allowed to take on continuously valued imputations. In the `freetrade` data, one such ordinal variable is `polity` which ranges from -10 (full autocracy) to 10 (full democracy). If we tabulate this variable from one of the imputed datasets, ```{r polity_tab} table(a.out$imputations[[3]]$polity) ``` we can see that there is one imputation between -4 and -3 and one imputation between 6 and 7. Again, the interpretation of these values is rather straightforward even if they are not strictly in the coding of the original Polity data. Often, however, analysis models require some variables to be strictly ordinal, as for example, when the dependent variable will be modeled in a logistical or Poisson regression. Imputations for variables set as ordinal are created by taking the continuously valued imputation and using an appropriately scaled version of this as the probability of success in a binomial distribution. The draw from this binomial distribution is then translated back into one of the ordinal categories. For our data we can simply add `polity` to the `ords` argument: ```{r polity_ord} a.out1 <- amelia(freetrade, m = 5, ts = "year", cs = "country", ords = "polity", p2s = 0) table(a.out1$imputations[[3]]$polity) ``` Now, we can see that all of the imputations fall into one of the original polity categories. ### Nominal {#sec:nom} Nominal variables[^binary] must be treated quite differently than ordinal variables. Any multinomial variables in the data set (such as religion coded 1 for Catholic, 2 for Jewish, and 3 for Protestant) must be specified to Amelia. In our \code{freetrade} dataset, we have `signed` which is 1 if a country signed an IMF agreement in that year and 0 if it did not. Of course, our first imputation did not limit the imputations to these two categories ```{r binary_tab} table(a.out1$imputations[[3]]$signed) ``` In order to fix this for a $p$-category multinomial variable, Amelia will determine $p$ (as long as your data contain at least one value in each category), and substitute $ p-1$ binary variables to specify each possible category. These new $p-1$ variables will be treated as the other variables in the multivariate normal imputation method chosen, and receive continuous imputations. These continuously valued imputations will then be appropriately scaled into probabilities for each of the $p$ possible categories, and one of these categories will be drawn, where upon the original $p$-category multinomial variable will be reconstructed and returned to the user. Thus all imputations will be appropriately multinomial. [^binary]: Dichotomous (two category) variables are a special case of nominal variables. For these variables, the nominal and ordinal methods of transformation in Amelia agree. For our data we can simply add `signed` to the `noms` argument: ```{r noms} a.out2 <- amelia(freetrade, m = 5, ts = "year", cs = "country", noms = "signed", p2s = 0) table(a.out2$imputations[[3]]$signed) ``` Note that Amelia can only fit imputations into categories that exist in the original data. Thus, if there was a third category of signed, say 2, that corresponded to a different kind of IMF agreement, but it never occurred in the original data, Amelia could not match imputations to it. Since Amelia properly treats a $p$-category multinomial variable as $p-1$ variables, one should understand the number of parameters that are quickly accumulating if many multinomial variables are being used. If the square of the number of real and constructed variables is large relative to the number of observations, it is useful to use a ridge prior as in section \@ref(sec_prior). ### Natural log {#sec:log} If one of your variables is heavily skewed or has outliers that may alter the imputation in an unwanted way, you can use a natural logarithm transformation of that variable in order to normalize its distribution. This transformed distribution helps Amelia to avoid imputing values that depend too heavily on outlying data points. Log transformations are common in expenditure and economic variables where we have strong beliefs that the marginal relationship between two variables decreases as we move across the range. For instance, we can show the `tariff` variable clearly has positive (or, right) skew while its natural log transformation has a roughly normal distribution. ```{r tarrif_hist} hist(freetrade$tariff, col="grey", border="white") hist(log(freetrade$tariff), col="grey", border="white") ``` ### Square root {#sec:sqrt} Event count data is often heavily skewed and has nonlinear relationships with other variables. One common transformation to tailor the linear model to count data is to take the square roots of the counts. This is a transformation that can be set as an option in Amelia. ### Logistic {#sec:lgstc} Proportional data is sharply bounded between 0 and 1. A logistic transformation is one possible option in Amelia to make the distribution symmetric and relatively unbounded. ## Identification Variables {#sec:idvars} Datasets often contain identification variables, such as country names, respondent numbers, or other identification numbers, codes or abbreviations. Sometimes these are text and sometimes these are numeric. Often it is not appropriate to include these variables in the imputation model, but it is useful to have them remain in the imputed datasets (However, there are models that would include the ID variables in the imputation model, such as fixed effects model for data with repeated observations of the same countries). Identification variables which are not to be included in the imputation model can be identified with the argument `idvars`. These variables will not be used in the imputation model, but will be kept in the imputed datasets. If the `year` and `country` contained no information except labels, we could omit them from the imputation: ```{r idvars} amelia(freetrade, idvars = c("year", "country")) ``` Note that Amelia will return with an error if your dataset contains a factor or character variable that is not marked as a nominal or identification variable. Thus, if we were to omit the factor `country` from the `cs` or `idvars` arguments, we would receive an error: ```{r idvars_error} a.out2 <- amelia(freetrade, idvars = c("year")) ``` In order to conserve memory, it is wise to remove unnecessary variables from a data set before loading it into Amelia. The only variables you should include in your data when running Amelia are variables you will use in the analysis stage and those variables that will help in the imputation model. While it may be tempting to simply mark unneeded variables as IDs, it only serves to waste memory and slow down the imputation procedure. ## Time Series, or Time Series Cross Sectional Data {#sec:tscs} Many variables that are recorded over time within a cross-sectional unit are observed to vary smoothly over time. In such cases, knowing the observed values of observations close in time to any missing value may enormously aid the imputation of that value. However, the exact pattern may vary over time within any cross-section. There may be periods of growth, stability, or decline; in each of which the observed values would be used in a different fashion to impute missing values. Also, these patterns may vary enormously across different cross-sections, or may exist in some and not others. Amelia can build a general model of patterns within variables across time by creating a sequence of polynomials of the time index. If, for example, tariffs vary smoothly over time, then we make the modeling assumption that there exists some polynomial that describes the economy in cross-sectional unit $i$ at time $t$ as: \[ \textrm{tariff}_{ti} = \beta_0 + \beta_1 t + \beta_1 t^2 + \beta_1 t^3 \ldots \] And thus if we include enough higher order terms of time then the pattern between observed values of the tariff rate can be estimated. Amelia will create polynomials of time up to the user defined $k$-th order, ($k\leq3$). We can implement this with the `ts` and `polytime` arguments. If we thought that a second-order polynomial would help predict we could run ```{r polytime, results = "hide"} a.out2 <- amelia(freetrade, ts = "year", cs = "country", polytime = 2) ``` With this input, Amelia will add covariates to the model that correspond to time and its polynomials. These covariates will help better predict the missing values. If cross-sectional units are specified these polynomials can be interacted with the cross-section unit to allow the patterns over time to vary between cross-sectional units. Unless you strongly believe all units have the same patterns over time in all variables (including the same constant term), this is a reasonable setting. When $k$ is set to 0, this interaction simply results in a model of *fixed effects* where every unit has a uniquely estimated constant term. Amelia does not smooth the observed data, and only uses this functional form, or one you choose, with all the other variables in the analysis and the uncertainty of the prediction, to impute the missing values. In order to impute with trends specific to each cross-sectional unit, we can set `intercs` to `TRUE`: ```{r intercs, results = "hide"} a.out.time <- amelia(freetrade, ts = "year", cs = "country", polytime = 1, intercs = TRUE, p2s = 2) ``` Note that attempting to use `polytime` without the `ts` argument, or `intercs` without the `cs` argument will result in an error. Using the `tscsPlot()` function (discussed below), we can see that we have a much better prediction about the missing values when incorporating time than when we omit it: ```{r tcomp1} tscsPlot(a.out, cs = "Malaysia", main = "Malaysia (no time settings)", var = "tariff", ylim = c(-10, 60)) tscsPlot(a.out.time, cs = "Malaysia", main = "Malaysia (with time settings)", var = "tariff", ylim = c(-10, 60)) ``` ### Lags and leads {#sec:lags} An alternative way of handling time-series information is to include lags and leads of certain variables into the imputation model. *Lags* are variables that take the value of another variable in the previous time period while *leads* take the value of another variable in the next time period. Many analysis models use lagged variables to deal with issues of endogeneity, thus using leads may seems strange. It is important to remember, however, that imputation models are predictive, not causal. Thus, since both past and future values of a variable are likely correlated with the present value, both lags and leads should improve the model. If we wanted to include lags and leads of tariffs, for instance, we would simply pass this to the `lags` and `leads` arguments: ```{r lags_leads} a.out2 <- amelia(freetrade, ts = "year", cs = "country", lags = "tariff", leads = "tariff") ``` ## Including Prior Information Amelia has a number of methods of setting priors within the imputation model. Two of these are commonly used and discussed below, ridge priors and observational priors. ### Ridge priors for high missingness, Small samples, or large correlations {#sec_prior} When the data to be analyzed contain a high degree of missingness or very strong correlations among the variables, or when the number of observations is only slightly greater than the number of parameters $p(p+3)/2$ (where $p$ is the number of variables), results from your analysis model will be more dependent on the choice of imputation model. This suggests more testing in these cases of alternative specifications under Amelia. This can happen when using the polynomials of time interacted with the cross section are included in the imputation model. For example, in our data, if we used a polynomial of degree 2 with unit-specific trends and there are 9 countries, it would add $3 \times 9 - 1= 17$ more variables to the imputation model (dropping one of the fixed effects for identification). When these are added, the EM algorithm can become unstable. You can detect this by inspecting the screen output under `p2s = 2` or by observing that the number iterations per imputation are very divergent. In these circumstances, we recommend adding a ridge prior which will help with numerical stability by shrinking the covariances among the variables toward zero without changing the means or variances. This can be done by including the `empri` argument. Including this prior as a positive number is roughly equivalent to adding `empri` artificial observations to the data set with the same means and variances as the existing data but with zero covariances. Thus, increasing the `empri` setting results in more shrinkage of the covariances, thus putting more a priori structure on the estimation problem: like many Bayesian methods, it reduces variance in return for an increase in bias that one hopes does not overwhelm the advantages in efficiency. In general, we suggest keeping the value on this prior relatively small and increase it only when necessary. A recommendation of 0.5 to 1 percent of the number of observations, $n$, is a reasonable starting value, and often useful in large datasets to add some numerical stability. For example, in a dataset of two thousand observations, this would translate to a prior value of 10 or 20 respectively. A prior of up to 5 percent is moderate in most applications and 10 percent is reasonable upper bound. For our data, it is easy to code up a 1 percent ridge prior: ```{r empri} a.out.time2 <- amelia(freetrade, ts = "year", cs = "country", polytime = 1, intercs = TRUE, p2s = 0, empri = .01 * nrow(freetrade)) a.out.time2 ``` ### Observation-level priors {#sec:obspri} Researchers often have additional prior information about missing data values based on previous research, academic consensus, or personal experience. Amelia can incorporate this information to produce vastly improved imputations. The Amelia algorithm allows users to include informative Bayesian priors about individual missing data cells instead of the more general model parameters, many of which have little direct meaning. The incorporation of priors follows basic Bayesian analysis where the imputation turns out to be a weighted average of the model-based imputation and the prior mean, where the weights are functions of the relative strength of the data and prior: when the model predicts very well, the imputation will down-weight the prior, and vice versa [@HonKin10]. The priors about individual observations should describe the analyst's belief about the distribution of the missing data cell. This can either take the form of a mean and a standard deviation or a confidence interval. For instance, we might know that 1986 tariff rates in Thailand around 40%, but we have some uncertainty as to the exact value. Our prior belief about the distribution of the missing data cell, then, centers on 40 with a standard deviation that reflects the amount of uncertainty we have about our prior belief. To input priors you must build a priors matrix with either four or five columns. Each row of the matrix represents a prior on either one observation or one variable. In any row, the entry in the first column is the row of the observation and the entry is the second column is the column of the observation. In the four column priors matrix the third and fourth columns are the mean and standard deviation of the prior distribution of the missing value. For instance, suppose that we had some expert prior information about tariff rates in Thailand. We know from the data that Thailand is missing tariff rates in many years, ```{r thailand} freetrade[freetrade$country == "Thailand", c("year", "country", "tariff")] ``` Suppose that we had expert information that tariff rates were roughly 40% in Thailand between 1986 and 1988 with about a 6% margin of error. This corresponds to a standard deviation of about 3. In order to include this information, we must form the priors matrix: ```{r build_prior} pr <- matrix( c(158, 159, 160, 3, 3, 3, 40, 40, 40, 3, 3, 3), nrow = 3, ncol = 4 ) pr ``` The first column of this matrix corresponds to the row numbers of Thailand in these three years, the second column refers to the column number of `tariff` in the data and the last two columns refer to the actual prior. Once we have this matrix, we can pass it to `amelia()`, ```{r amelia_prior} a.out.pr <- amelia(freetrade, ts = "year", cs = "country", priors = pr) ``` In the five column matrix, the last three columns describe a confidence range of the data. The columns are a lower bound, an upper bound, and a confidence level between 0 and 1, exclusive. Whichever format you choose, it must be consistent across the entire matrix. We could get roughly the same prior as above by utilizing this method. Our margin of error implies that we would want imputations between 34 and 46, so our matrix would be ```{r build_prior2} pr.2 <- matrix( c(158, 159, 160, 3, 3, 3, 34, 34, 34, 46, 46, 46, 0.95, 0.95, 0.95), nrow = 3, ncol = 5 ) pr.2 ``` These priors indicate that we are 95% confident that these missing values are in the range 34 to 46. If a prior has the value 0 in the first column, this prior will be applied to all missing values in this variable, except for explicitly set priors. Thus, we could set a prior for the entire `tariff` variable of 20, but still keep the above specific priors with the following code: ```{r build_prior3} pr.3 <- matrix( c(158, 159, 160, 0, 3, 3 , 3, 3, 40, 40, 40, 20, 3, 3, 3, 5), nrow = 4, ncol = 4) pr.3 ``` ### Logical bounds In some cases, variables in the social sciences have known logical bounds. Proportions must be between 0 and 1 and duration data must be greater than 0, for instance. Many of these logical bounds can be handled by using the correct transformation for that type of variable (see \@ref(sec:trans) for more details on the transformations handled by Amelia). In the occasional case that imputations must satisfy certain logical bounds not handled by these transformations, Amelia can take draws from a truncated normal distribution in order to achieve imputations that satisfy the bounds. Note, however, that this procedure imposes extremely strong restrictions on the imputations and can lead to lower variances than the imputation model implies. The mean value across all the imputed values of a missing cell is the best guess from the imputation model of that missing value. The variance of the distribution across imputed datasets correctly reflects the uncertainty in that imputation. It is often the mean imputed value that should conform to the any known bounds, even if individual imputations are drawn beyond those bounds. The mean imputed value can be checked with the diagnostics presented in the next section. In general, building a more predictive imputation model will lead to better imputations than imposing bounds. Amelia implements these bounds by rejection sampling. When drawing the imputations from their posterior, we repeatedly resample until we have a draw that satisfies all of the logical constraints. You can set an upper limit on the number of times to resample with the `max.resample` arguments. Thus, if after `max.resample` draws, the imputations are still outside the bounds, Amelia will set the imputation at the edge of the bounds. Thus, if the bounds were 0 and 100 and all of the draws were negative, Amelia would simply impute 0. As an extreme example, suppose that we know, for certain that tariff rates had to fall between 30 and 40. This, obviously, is not true, but we can generate imputations from this model. In order to specify these bounds, we need to generate a matrix of bounds to pass to the `bounds` argument. This matrix will have 3 columns: the first is the column for the bounded variable, the second is the lower bound and the third is the upper bound. Thus, to implement our bound on tariff rates (the 3rd column of the dataset), we would create the matrix, ```{r build_bounds} bds <- matrix(c(3, 30, 40), nrow = 1, ncol = 3) bds ``` which we can pass to the `bounds` argument to `amelia()`: ```{r amelia_bounds} a.out.bds <- amelia(freetrade, ts = "year", cs = "country", bounds = bds, max.resample = 1000) ``` The difference in results between the bounded and unbounded model are not obvious from the output, but inspection of the imputed tariff rates for Malaysia shows that there has been a drastic restriction of the imputations to the desired range: ```{r bounds_plot} tscsPlot(a.out, cs = "Malaysia", main = "No logical bounds", var = "tariff", ylim = c(-10, 60)) tscsPlot(a.out.bds, cs = "Malaysia", main = "Bounded between 30 and 40", var = "tariff", ylim = c(-10, 60)) ``` Again, analysts should be extremely cautious when using these bounds as they can seriously affect the inferences from the imputation model, as shown in this example. Even when logical bounds exist, we recommend simply imputing variables normally, as the violation of the logical bounds represents part of the true uncertainty of imputation. ## Post-imputations Transformations {#sec_postimptrans} In many cases, it is useful to create transformations of the imputed variables for use in further analysis. For instance, one may want to create an interaction between two variables or perform a log-transformation on the imputed data. To do this, Amelia includes a `transform()` function for `amelia()` output that adds or overwrites variables in each of the imputed datasets. For instance, if we wanted to create a log-transformation of the `gdp.pc` variable, we could use the following command: ```{r amelia_transform} a.out <- transform(a.out, lgdp = log(gdp.pc)) head(a.out$imputations[[1]][,c("country", "year","gdp.pc", "lgdp")]) ``` To create an interaction between two variables, we could simply use: ```{r interaction} a.out <- transform(a.out, pol_gdp = polity * gdp.pc) ``` Each transformation is recorded and the `summary()` command prints out each transformation that has been performed: ```{r sum_trans} summary(a.out) ``` Note the updated output is almost exactly the same as the fresh `amelia()` output. You can pass the transformed output back to `amelia()` and it will add imputations and update these imputations with the transformations you have performed. ## Analysis Models {#sec_analysis} Imputation is most often a data processing step as opposed to a final model in of itself. To this end, it is easy to pass output from `amelia()` to other functions. The easiest and most integrated way to run an analysis model is to use the `with()` and `mi.combine()` functions. For example, in @MilKub05, the dependent variable was tariff rates. We can replicate table 5.1 from their analysis with the original data simply by running ```{r lm_lwd} orig.model <- lm(tariff ~ polity + pop + gdp.pc + year + country, data = freetrade) orig.model ``` Running the same model with imputed data is almost identical. We can run the `lm` within each imputed data set by using the `with()` function: ```{r lm_imp} imp.models <- with( a.out, lm(tariff ~ polity + pop + gdp.pc + year + country) ) imp.models[1:2] ``` The result here is simply a list of output of `lm()` applied to each imputed data set. We can combine the imputed estimates using the rules described in @KinHonJos01 and @Schafer97 with the `mi.combine()` function: ```{r mi_combine} out <- mi.combine(imp.models, conf.int = TRUE) out ``` The combination of the results depends on the [broom](https://broom.tidymodels.org) package and results can be combined if a `tidy()` method exists for the estimation function passed to `with()`. Other packages such as [Zelig](https://zeligproject.org) can also combine imputed data sets across a number of statistical models. Furthermore, users can easily export their imputations using the `write.amelia()` function as described in \@ref(sec_saving) and use statistical packages other than R for the analysis model. In addition to the resources available in R, users can draw on Stata to implement their analysis models. As of version 11, Stata has built-in handling of multiply imputed datasets. In order to utilize this functionality, simply export the "stacked" imputations using the `write.amelia()` function: ```{r write_dta_stacked, eval = FALSE} write.amelia(a.out, separate = FALSE, file.stem = "outdata", format = "dta") ``` Once this stacked dataset is open in Stata, you must tell Stata that it is an imputed dataset using the \code{mi import flong} command: ```{stata eval = FALSE} mi import flong, m(imp) id(year country) imp(tariff-usheg) ``` The command takes a few options: `m` designates the imputation variable (set with `impvar` in `write.amelia()`), `id` sets the identifying varibles, and `imp` sets the variables that were imputed (or included in the imputation). The `tariff-usheg` indicates that Stata should treat the range of variables between `tariff` and `usheg` as imputed. Once we have set the dataset as imputed, we can use the built-in `mi` commands to analyze the data: ```{stata eval = FALSE} mi estimate: reg tariff polity pop gdp_pc ``` ``` Multiple-imputation estimates Imputations = 5 Linear regression Number of obs = 171 Average RVI = 1.4114 Complete DF = 167 DF adjustment: Small sample DF: min = 10.36 avg = 18.81 max = 37.62 Model F test: Equal FMI F( 2, 10.4) = 15.50 Within VCE type: OLS Prob > F = 0.0008 ------------------------------------------------------------------------------ tariff | Coef. Std. Err. t P>|t| [95% Conf. Interval] -------------+---------------------------------------------------------------- polity | -.2058115 .3911049 -0.53 0.610 -1.072968 .6613452 pop | 3.21e-08 8.72e-09 3.68 0.004 1.27e-08 5.14e-08 gdp_pc | -.0027561 .000644 -4.28 0.000 -.0040602 -.0014519 _cons | 32.70461 2.660091 12.29 0.000 27.08917 38.32005 ------------------------------------------------------------------------------ ``` ## The `amelia` class {#sec_out} The output from the `amelia()` function is an instance of the S3 class `amelia`. Instances of the `amelia` class contain much more than simply the imputed datasets. The `mu` object of the class contains the posterior draws of the means of the complete data. The `covMatrices` contains the posterior draws of the covariance matrices of the complete data. Note that these correspond to the variables as they are sent to the EM algorithm. Namely, they refer to the variables after being transformed, centered and scaled. The `iterHist` object is a list of `m` 3-column matrices. Each row of the matrices corresponds to an iteration of the EM algorithm. The first column indicates how many parameters had yet to converge at that iteration. The second column indicates if the EM algorithm made a step that decreased the number of converged parameters. The third column indicates whether the covariance matrix at this iteration was singular. Clearly, the last two columns are meant to indicate when the EM algorithm enters a problematic part of the parameter space. ## References
/scratch/gouwar.j/cran-all/cranData/Amelia/inst/doc/using-amelia.Rmd
library(Amelia) data(africa) a.out <- amelia(x = africa, cs = "country", ts = "year", logs = "gdp_pc") a.out2 <- transform(a.out, lgdppc = log(gdp_pc), newinfl = infl*100) a.out2 <- transform(a.out2, newclib = civlib *100, newtrade = trade/100) summary(a.out2) summary(transform(a.out, lgdppc = log(gdp_pc), newinfl = infl*100)) a.out3 <- amelia(a.out2) a.out4 <- amelia(a.out) africa <- transform(africa, ivar = gdp_pc * trade) a.out <- amelia(x = africa, cs = "country", ts = "year", logs = "gdp_pc") a.out2 <- transform(a.out, ivar = gdp_pc *trade, lgdppc = log(gdp_pc)) summary(a.out2) a.out3 <- amelia(a.out2) compare.density compare.density(a.out2, "lgdppc")
/scratch/gouwar.j/cran-all/cranData/Amelia/inst/test/transform.R
--- title: "AmeliaView GUI Guide" date: "`r Sys.Date()`" link-citations: yes bibliography: amelia.bib output: rmarkdown::html_vignette: keep_md: true vignette: > %\VignetteIndexEntry{AmeliaView GUI Guide} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r setup, echo = FALSE, include = FALSE} knitr::opts_chunk$set(fig.width = 5, fig.height = 4, fig.align = "center") options(digits = 4, show.signif.stars = FALSE) set.seed(12345) ``` Below is a guide to the AmeliaView menus with references back to the users's guide. The same principles from the user's guide apply to AmeliaView. The only difference is how you interact with the program. Whether you use the GUI or the command line versions, the same underlying code is being called, and so you can read the command line-oriented discussion above even if you intend to use the GUI. ## Loading AmeliaView The easiest way to load AmeliaView is to open an R session and type the following two commands: ```{r load_av, eval = FALSE} library(Amelia) AmeliaView() ``` This will bring up the AmeliaView window on any platform. ![AmeliaView welcome screen](assets/splash.png) ## Loading data into AmeliaView AmeliaView loads with a welcome screen that has buttons which can load a data in many of the common formats. Each of these will bring up a window for choosing your dataset. Note that these buttons are only a subset of the possible ways to load data in AmeliaView. Under the File menu (shown below), you will find more options, including the datasets included in the package (`africa` and `freetrade`). You will also find import commands for Comma-Separated Values (.CSV), Tab-Delimited Text (.TXT), Stata v.5-10 (.DTA), SPSS (.DAT), and SAS Transport (.XPORT). Note that when using a CSV file, Amelia assumes that your file has a header (that is, a row at the top of the data indicating the variable names). ![AmeliaView File and import menu.](assets/import.png) You can also load data from an RData file. If the RData file contains more than one `data.frame`, a pop-up window will ask to you find the dataset you would like to load. In the file menu, you can also change the underlying working directory. This is where AmeliaView will look for data by default and where it will save imputed datasets. ## Variable Dashboard ![Main variable dashboard in AmeliaView](assets/main.png) Once a dataset is loaded, AmeliaView will show the variable dashboard. In this mode, you will see a table of variables, with the current options for each of them shown, along with a few summary statistics. You can reorder this table by any of these columns by clicking on the column headings. This might be helpful to, say, order the variables by mean or amount of missingness. ![Variable options via right-click menu on the variable dashboard](assets/context-menu.png) You can set options for individual variables by the right-click context menu or through the "Variables" menu. For instance, clicking "Set as Time-Series Variable" will set the currently selected variable in the dashboard as the time-series variable. Certain options are disabled until other options are enabled. For instance, you cannot add a lagged variable to the imputation until you have set the time-series variable. Note that any `factor` in the data is marked as a ID variable by default, since a `factor` cannot be included in the imputation without being set as an ID variable, a nominal variable, or the cross-section variable. If there is a `factor` that fails to meet one of these conditions, a red flag will appear next to the variable name. 1. **Set as Time-Series Variable** - Sets the currently selected variable to the time-series variable. Disabled when more than one variable is selected. Once this is set, you can add lags and leads and add splines of time. The time-series variable will have a clock icon next to it. 2. **Set as Cross-Section Variable** - Sets the currently selected variable to the cross-section variable. Disabled when more than one variable is selected. Once this is set, you can interact the splines of time with the cross-section. The cross-section variable will have a person icon next to it. 3. **Unset as Time-Series Variable** - Removes the time-series status of the variable. This will remove any lags, leads, or splines of time. 4. **Unset as Cross-Section Variable** - Removes the cross-section status of the variable. This will remove any intersection of the splines of time and the cross-section. 5. **Add Lag/Lead** - Adds versions of the selected variables either lagged back ("lag") or forward ("lead"). 6. **Remove Lag/Lead** - Removes any lags or leads on the selected variables. 7. **Plot Histogram of Selected** - Plots a histogram of the selected variables. This command will attempt to put all of the histograms on one page, but if more than nine histograms are requested, they will appear on multiple pages. 8. **Add Transformation...** - Adds a transformation setting for the selected variables. Note that each variable can only have one transformation and the time-series and cross-section variables cannot be transformed. 9. **Remove Transformation** - Removes any transformation for the selected variables. 10. **Add or Edit Bounds** - Opens a dialog box to set logical bounds for the selected variable. ## Amelia Options ![Options menu](assets/options.png) The "Variable" menu and the variable dashboard are the place to set variable-level options, but global options are set in the "Options" menu. For more information on these options, see `vignette("using-amelia")`. 1. **Splines of Time with...** - This option, if activated, will have Ameliause flexible trends of time with the specified number of knots in the imputation. The higher the number of knots the greater the variation in the trend structure, yet it will take more degrees of freedom to estimate. 2. **Interact with Cross-Section?** - Include and interaction of the cross-section with the time trends. This interaction is way of allowing the trend of time to vary across cases as well. Using a 0-level spline of time and interacting with the cross section is the equivalent of using a fixed effects. 3. **Add Observational Priors...** - Brings a dialog window to set prior beliefs about ranges for individual missing observations. 4. **Numerical Options** - Brings a dialog window to set the tolerance of the EM algorithm, the seed of the random number generator, the ridge prior for numerical stability, and the maximum number of redraws for the logical bounds. 5. **Draw Missingness Map** - Draws a missingness map. 6. **Output File Options** - Bring a dialog to set the stub of the prefix of the imputed data files and the number of imputations. If you set the prefix to `mydata`, your output files will be `mydata1.csv, mydata2.csv...` etc. 7. **Output File Type** - Sets the format of imputed data. If you would like to not save any output data sets (if you wanted, for instance, to simply look at diagnostics), set this option to "(no save)." Currently, you can save the output data as: Comma Separated Values (.CSV), Tab Delimited Text (.TXT), Stata (.DTA), R save object (.RData), or to hold it in R memory. This last option will only work if you have called AmeliaView from an R session and want to return to the R command line to work with the output. Its name in R workspace will be the file prefix. The stacked version of the Stata output will work with their built-in `mi` tools. ### Numerical options ![Numerical options menu](assets/numopts.png) 1. **Seed** - Sets the seed for the random number generator used by Amelia. Useful if you need to have the same output twice. 1. **Tolerance** - Adjust the level of tolerance that Amelia uses to check convergence of the EM algorithm. In very large datasets, if your imputation chains run a long time without converging, increasing the tolerance will allow a lower threshold to judge convergence and end chains after fewer iterations. 1. **Empirical Prior** - A prior that adds observations to your data in order to shrink the covariances. A useful place to start is around 0.5\% of the total number of observations in the dataset. 1. **Maximum Resample for Bounds** - Amelia fits logical bounds by rejecting any draws that do not fall within the bounds. This value sets the number of times Amelia should attempt to resample to fit the bounds before setting the imputation to the bound. ### Add Distributional Prior ![Detail for Add Distributional Prior dialog](assets/distpri.png) 1. **Current Priors** - A table of current priors in distributional form, with the variable and case name. You can remove priors by selecting them and using the right-click context menu. 1. **Case** - Select the case name or number you wish to set the prior about. You can also choose to make the prior for the entire variable, which will set the prior for any missing cell in that variable. The case names are generated from the row name of the observation, the value of the cross-section variable of the observation and the value of the time series variable of the observation. 1. **Variable** - The variable associated with the prior you would like specify. The list provided only shows the missing variables for the currently selected observation. 1.**Mean** - The mean value of the prior. The textbox will not accept letters or out of place punctuation. 1. **Standard Deviation** - The standard deviation of the prior. The textbox will only accept positive non-zero values. ### Add Range Prior ![Detail for Add Range Prior dialog](assets/rangepri.png) 1. **Case** - Select the case name or number you wish to set the prior about. You can also choose to make the prior for the entire variable, which will set the prior for any missing cell in that variable. The case names are generated from the row name of the observation, the value of the cross-section variable of the observation and the value of the time series variable of the observation. 1. **Variable** - The variable associated with the prior you would like specify. The list provided only shows the missing variables for the currently selected observation. 1. **Minimum** - The minimum value of the prior. The textbox will not accept letters or out of place punctuation. 1. **Maximum** - The maximum value of the prior. The textbox will not accept letters or out of place punctuation. 1. **Confidence** - The confidence level of the prior. This should be between 0 and 1, non-inclusive. This value represents how certain your priors are. This value cannot be 1, even if you are absolutely certain of a give range. This is used to convert the range into an appropriate distributional prior. ## Imputing and checking diagnostics ![Output log showing Amelia output for a successful imputation.](assets/output-log.png) Once you have set all the relevant options, you can impute your data by clicking the "Impute!" button in the toolbar. In the bottom right corner of the window, you will see a progress bar that indicates the progress of the imputations. For large datasets this could take some time. Once the imputations are complete, you should see a "Successful Imputation!" message appear where the progress bar was. You can click on this message to open the folder containing the imputed datasets. If there was an error during the imputation, the output log will pop-up and give you the error message along with some information about how to fix the problem. Once you have fixed the problem, simply click "Impute!" again. Even if there was no error, you may want to view the output log to see how Ameliaran. To do so, simply click the "Show Output Log" button. The log also shows the call to the `amelia()` function in R. You can use this code snippet to run the same imputation from the R command line. You will have to replace the `x` argument in the `amelia()` call to the name of you dataset in the R session. ### Diagnostics Dialog ![Detail for the Diagnostics dialog](assets/diag.png) Upon the successful completion of an imputation, the diagnostics menu will become available. Here you can use all of the diagnostics available at the command-line. 1. **Compare Plots** - This will display the relative densities of the observed (red) and imputed (black) data. The density of the imputed values are the average imputations across all of the imputed datasets. 1. **Overimpute** - This will run Ameliaon the full data with one cell of the chosen variable artificially set to missing and then check the result of that imputation against the truth. The resulting plot will plot average imputations against true values along with 90% confidence intervals. These are plotted over a $y=x$ line for visual inspection of the imputation model. 1. **Number of overdispersions** - When running the overdispersion diagnostic, you need to run the imputation algorithm from several overdispersed starting points in order to get a clear idea of how the chain are converging. Enter the number of imputations here. 1. **Number of dimensions** - The overdispersion diagnostic must reduce the dimensionality of the paths of the imputation algorithm to either one or two dimensions due to graphical restraints. 1. **Overdisperse** - Run overdispersion diagnostic to visually inspect the convergence of the Amelia algorithm from multiple start values that are drawn randomly. ## Sessions It is often useful to save a session of AmeliaView to save time if you have impute the same data again. Using the **Save Session** button will do just that, saving all of the current settings (including the original and any imputed data) to an RData file. You can then reload your session, on the same computer or any other, simply by clicking the **Load Session** button and finding the relevant RData file. All of the settings will be restored, including any completed imputations. Thus, if you save the session after imputing, you can always load up those imputations and view their diagnostics using the sessions feature of AmeliaView.
/scratch/gouwar.j/cran-all/cranData/Amelia/vignettes/ameliaview.Rmd
--- title: "Multiple Imputation Diagnostics" date: "`r Sys.Date()`" link-citations: yes bibliography: amelia.bib output: rmarkdown::html_vignette: keep_md: true vignette: > %\VignetteIndexEntry{Multiple Imputation Diagnostics} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r setup, echo = FALSE, include = FALSE} knitr::opts_chunk$set(fig.width = 5, fig.height = 4, fig.align = "center") options(digits = 4, show.signif.stars = FALSE) set.seed(12345) ``` Amelia currently provides a number of diagnostic tools to inspect the imputations that are created. To illustrate these, we use the `freetrade` data from the package: ```{r amelia, results = "hide"} library(Amelia) data(freetrade) a.out <- amelia(freetrade, m = 5, ts = "year", cs = "country", p2s = 0) ``` \subsubsection{Comparing Densities} One check on the plausibility of the imputation model is check the distribution of imputed values to the distribution of observed values. Obviously we cannot expect, *a priori*, that these distribution will be identical as the missing values may differ systematically from the observed value--this is fundamental reason to impute to begin with! Imputations with strange distributions or those that are far from the observed data may indicate that imputation model needs at least some investigation and possibly some improvement. The `plot.amelia()` method works on output from `amelia()` and, by default, shows for each variable a plot of the relative frequencies of the observed data with an overlay of the relative frequency of the imputed values. ```{r plot_amelia} plot(a.out, which.vars = 3:6) ``` where the argument `which.vars` indicates which of the variables to plot (in this case, we are taking the 3rd through the 6th variables). The imputed curve (in red) plots the density of the *mean* imputation over the $m$ datasets. That is, for each cell that is missing in the variable, the diagnostic will find the mean of that cell across each of the $m$ datasets and use that value for the density plot. The black distributions are the those of the observed data. When variables are completely observed, their densities are plotted in blue. These graphs will allow you to inspect how the density of imputations compares to the density of observed data. Some discussion of these graphs can be found in @AbaGelLev08. Minimally, these graphs can be used to check that the mean imputation falls within known bounds, when such bounds exist in certain variables or settings. We can also use the function `compare.density()` directly to make these plots for an individual variable: ```{r compare_density} compare.density(a.out, var = "signed") ``` ## Overimpute {#sec_overimpute} *Overimputing* is a technique we have developed to judge the fit of the imputation model. Because of the nature of the missing data mechanism, it is impossible to tell whether the mean prediction of the imputation model is close to the unobserved value that is trying to be recovered. By definition this missing data does not exist to create this comparison, and if it existed we would no longer need the imputations or care about their accuracy. However, a natural question the applied researcher will often ask is how accurate are these imputed values? Overimputing involves sequentially treating each of the *observed* values as if they had actually been missing. For each observed value in turn we then generate several hundred imputed values of that observed value, *as if it had been missing*. While $m=5$ imputations are sufficient for most analysis models, this large number of imputations allows us to construct a confidence interval of what the imputed value would have been, had any of the observed data been missing. We can then graphically inspect whether our observed data tends to fall within the region where it would have been imputed had it been missing. For example, we can run the overimputation diagnostic on our data by running ```{r} overimpute(a.out, var = "tariff") ``` Our overimputation diagnostic runs this procedure through all of the observed values for a user selected variable. We can graph the estimates of each observation against the true values of the observation. On this graph, a $y=x$ line indicates the line of perfect agreement; that is, if the imputation model was a perfect predictor of the true value, all the imputations would fall on this line. For each observation, `overimpute()` also plots 90\% confidence intervals that allows the user to visually inspect the behavior of the imputation model. By checking how many of the confidence intervals cover the $y=x$ line, we can tell how often the imputation model can confidently predict the true value of the observation. Occasionally, the overimputation can display unintuitive results. For example, different observations may have different numbers of observed covariates. If covariates that are useful to the prediction are themselves missing, then the confidence interval for this observation will be much larger. In the extreme, there may be observations where the observed value we are trying to overimpute is *the only* observed value in that observation, and thus there is nothing left to impute that observation with when we pretend that it is missing, other than the mean and variance of that variable. In these cases, we should correctly expect the confidence interval to be very large. An example of this graph is show here: ```{r overimp_bad, echo = FALSE, results = "hide"} dd <- Amelia:::rmvnorm(50, mu = c(0.5,0.5), vcv = matrix(c(0.25^2,.06, .06,0.25^2),2,2)) ddmiss <- sample(1:50, replace = FALSE, size = 10) is.na(dd) <- ddmiss aa.out <- amelia(dd, m = 5) overimpute(aa.out, var = 2, main = "Observed versus Imputed Values") ``` In this simulated bivariate dataset, one variable is overimputed and the results displayed. The second variable is either observed, in which case the confidence intervals are very small and the imputations (yellow) are very accurate, or the second variable is missing in which case this variable is being imputed simply from the mean and variance parameters, and the imputations (red) have a very large and encompassing spread. The circles represent the mean of all the imputations for that value. As the amount of missing information in a particular pattern of missingness increases, we expect the width of the confidence interval to increase. The color of the confidence interval reflects the percent of covariates observed in that pattern of missingness, as reflected in the legend at the bottom. ## Overdispersed Starting Values {#sec_overdisperse} If the data given to `amelia()` has a poorly behaved likelihood, the EM algorithm can have problems finding a global maximum of the likelihood surface and starting values can begin to effect imputations. Because the EM algorithm is deterministic, the point in the parameter space where you start it can impact where it ends, though this is irrelevant when the likelihood has only one mode. However, if the starting values of an EM chain are close to a local maximum, the algorithm may find this maximum, unaware that there is a global maximum farther away. To make sure that our imputations do not depend on our starting values, a good test is to run the EM algorithm from multiple, dispersed starting values and check their convergence. In a well behaved likelihood, we will see all of these chains converging to the same value, and reasonably conclude that this is the likely global maximum. On the other hand, we might see our EM chain converging to multiple locations. The algorithm may also wander around portions of the parameter space that are not fully identified, such as a ridge of equal likelihood, as would happen for example, if the same variable were accidentally included in the imputation model twice. Amelia includes a diagnostic to run the EM chain from multiple starting values that are overdispersed from the estimated maximum. The overdispersion diagnostic will display a graph of the paths of each chain. Since these chains move through spaces that are in an extremely high number of dimensions and can not be graphically displayed, the diagnostic reduces the dimensionality of the EM paths by showing the paths relative to the largest principle components of the final mode(s) that are reached. Users can choose between graphing the movement over the two largest principal components, or more simply the largest dimension with time (iteration number) on the $x$-axis. The number of EM chains can also be adjusted. Once the diagnostic draws the graph, the user can visually inspect the results to check that all chains convergence to the same point. For our original model, this is a simple call to `disperse()`: ```{r displd} disperse(a.out, dims = 1, m = 5) disperse(a.out, dims = 2, m = 5) ``` where `m` designates the number of places to start EM chains from and `dims` are the number of dimensions of the principal components to show. In one dimension, the diagnostic plots movement of the chain on the $y$-axis and time, in the form of the iteration number, on the $x$-axis. The first plot shows a well behaved likelihood, as the starting values all converge to the same point. The black horizontal line is the point where `amelia()` converges when it uses the default method for choosing the starting values. The diagnostic takes the end point of this chain as the possible maximum and disperses the starting values away from it to see if the chain will ever finish at another mode. ## Time-series Plots {#sec_tscsplots} As discussed above, information about time trends and fixed effects can help produce better imputations. One way to check the plausibility of our imputation model is to see how it predicts missing values in a time series. If the imputations for the Malaysian tariff rate were drastically higher in 1990 than the observed years of 1989 or 1991, we might worry that there is a problem in our imputation model. Checking these time series is easy to do with `tscsPlot()`. Simply choose the variable (with the `var` argument) and the cross-section (with the `cs` argument) to plot the observed time-series along with distributions of the imputed values for each missing time period. For instance, we can get the plot of the `tariff` variable for Malaysia with the following commands: ```{r tsplot1} a.out.time <- amelia(freetrade, ts = "year", cs = "country", polytime = 1, intercs = TRUE, p2s = 0) tscsPlot(a.out.time, cs = "Malaysia", main = "Malaysia (with time settings)", var = "tariff", ylim = c(-10, 60)) ``` Here, the black point are observed tariff rates for Malaysia from 1980 to 2000. The red points are the mean imputation for each of the missing values, along with their 95% confidence bands. We draw these bands by imputing each of missing values 100 times to get the imputation distribution for that observation. In figure \ref{fig:tsplot1}, we can see that the imputed 1990 tariff rate is quite in line with the values around it. Notice also that values toward the beginning and end of the time series have slightly higher imputation variance. This occurs because the fit of the polynomials of time in the imputation model have higher variance at the beginning and end of the time series. This is intuitive because these points have fewer neighbors from which to draw predictive power. A word of caution is in order. As with comparing the histograms of imputed and observed values, there could be reasons that the missing values are systematically different than the observed time series. For instance, if there had been a major financial crisis in Malaysia in 1990 which caused the government to close off trade, then we would expect that the missing tariff rates should be quite different than the observed time series. If we have this information in our imputation model, we might expect to see out-of-line imputations in these time-series plots. If, on the other hand, we did not have this information, we might see "good" time-series plots that fail to point out this violation of the MAR assumption. Our imputation model would produce poor estimates of the missing values since it would be unaware that both the missingness and the true unobserved tariff rate depend on another variable. Hence, `tscsPlot()` is useful for finding obvious problems in imputation model and comparing the efficiency of various imputation models, but it cannot speak to the untestable assumption of MAR. ## Missingness maps {#sec_missmaps} One useful tool for exploring the missingness in a dataset is a *missingness map*. This is a map that visualizes the dataset a grid and colors the grid by missingness status. The column of the grid are the variables and the rows are the observations, as in any spreadsheet program. This tool allows for a quick summary of the patterns of missingness in the data. If we simply call the `missmap()` function on our output from `amelia()`, ```{r mmap1} missmap(a.out) ``` The `missmap()` function arrange the columns so that the variables are in decreasing order of missingness from left to right. If the `cs` argument was set in the `amelia` function, the labels for the rows will indicate where each of the cross-sections begin. In this missingness map, it is clear that the tariff rate is the variable most missing in the data and it tends to be missing in blocks of a few observations. Gross international reserves (`intresmi`) and financial openness (`fivop`), on the other hand, are missing mostly at the end of each cross-section. This suggests *missingness by merging*, when variables with different temporal coverages are merged to make one dataset. Sometimes this kind of missingness is an artifact of the date at which the data was merged and researchers can resolve it by finding updated versions of the relevant variables. The missingness map is an important tool for understanding the patterns of missingness in the data and can often indicate potential ways to improve the imputation model or data collection process. ## References
/scratch/gouwar.j/cran-all/cranData/Amelia/vignettes/diagnostics.Rmd
--- title: "Introduction to Multiple Imputation" date: "`r Sys.Date()`" link-citations: yes bibliography: amelia.bib output: rmarkdown::html_vignette: keep_md: true vignette: > %\VignetteIndexEntry{Introduction to Multiple Imputation} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r loadpkg, echo = FALSE, include = FALSE} knitr::opts_chunk$set(fig.width = 5, fig.height = 4, fig.align = "center") ``` ## Introduction {#sec:intro} Missing data is a ubiquitous problem in social science data. Respondents do not answer every question, countries do not collect statistics every year, archives are incomplete, subjects drop out of panels. Most statistical analysis methods, however, assume the absence of missing data, and are only able to include observations for which every variable is measured. Amelia allows users to impute ("fill in" or rectangularize) incomplete data sets so that analyses which require complete observations can appropriately use all the information present in a dataset with missingness, and avoid the biases, inefficiencies, and incorrect uncertainty estimates that can result from dropping all partially observed observations from the analysis. Amelia performs *multiple imputation*, a general-purpose approach to data with missing values. Multiple imputation has been shown to reduce bias and increase efficiency compared to listwise deletion. Furthermore, ad-hoc methods of imputation, such as mean imputation, can lead to serious biases in variances and covariances. Unfortunately, creating multiple imputations can be a burdensome process due to the technical nature of algorithms involved. \Amelia\ provides users with a simple way to create and implement an imputation model, generate imputed datasets, and check its fit using diagnostics. The Amelia program goes several significant steps beyond the capabilities of the first version of Amelia [@HonJosKin98]. For one, the bootstrap-based EMB algorithm included in Amelia can impute many more variables, with many more observations, in much less time. The great simplicity and power of the EMB algorithm made it possible to write Amelia so that it virtually never crashes --- which to our knowledge makes it unique among all existing multiple imputation software --- and is much faster than the alternatives too. Amelia also has features to make valid and much more accurate imputations for cross-sectional, time-series, and time-series-cross-section data, and allows the incorporation of observation and data-matrix-cell level prior information. In addition to all of this, Amelia provides many diagnostic functions that help users check the validity of their imputation model. This software implements the ideas developed in @HonKin10. ## What Amelia Does {#sec:what} Multiple imputation involves imputing $m$ values for each missing cell in your data matrix and creating $m$ "completed" data sets. Across these completed data sets, the observed values are the same, but the missing values are filled in with a distribution of imputations that reflect the uncertainty about the missing data. After imputation with Amelia's EMB algorithm, you can apply whatever statistical method you would have used if there had been no missing values to each of the $m$ data sets, and use a simple procedure, described below, to combine the results[^combine]. Under normal circumstances, you only need to impute once and can then analyze the $m$ imputed data sets as many times and for as many purposes as you wish. The advantage of Amelia is that it combines the comparative speed and ease-of-use of our algorithm with the power of multiple imputation, to let you focus on your substantive research questions rather than spending time developing complex application-specific models for nonresponse in each new data set. Unless the rate of missingness is very high, $m = 5$ (the program default) is probably adequate. [^combine]: You can combine the results automatically by doing your data analyses within [Zelig for R](https://zeligproject.org), or within [Clarify for Stata](https://gking.harvard.edu/clarify). ### Assumptions The imputation model in Amelia assumes that the complete data (that is, both observed and unobserved) are multivariate normal. If we denote the $(n \times k)$ dataset as $D$ (with observed part $D^{obs}$ and unobserved part $D^{mis}$), then this assumption is \begin{equation} D \sim \mathcal{N}_k(\mu, \Sigma), \end{equation} which states that $D$ has a multivariate normal distribution with mean vector $\mu$ and covariance matrix $\Sigma$. The multivariate normal distribution is often a crude approximation to the true distribution of the data, yet there is evidence that this model works as well as other, more complicated models even in the face of categorical or mixed data [see @Schafer97, @SchOls98]. Furthermore, transformations of many types of variables can often make this normality assumption more plausible (see \@ref(sec:trans) for more information on how to implement this in Amelia). The essential problem of imputation is that we only observe $D^{obs}$, not the entirety of $D$. In order to gain traction, we need to make the usual assumption in multiple imputation that the data are *missing at random* (MAR). This assumption means that the pattern of missingness only depends on the observed data $D^{obs}$, not the unobserved data $D^{mis}$. Let $M$ to be the missingness matrix, with cells $m_{ij} = 1$ if $d_{ij} \in D^{mis}$ and $m_{ij} = 0$ otherwise. Put simply, $M$ is a matrix that indicates whether or not a cell is missing in the data. With this, we can define the MAR assumption as \[ p(M|D) = p(M|D^{obs}). \] Note that MAR includes the case when missing values are created randomly by, say, coin flips, but it also includes many more sophisticated missingness models. When missingness is not dependent on the data at all, we say that the data are *missing completely at random* (MCAR). Amelia requires both the multivariate normality and the MAR assumption (or the simpler special case of MCAR). Note that the MAR assumption can be made more plausible by including additional variables in the dataset $D$ in the imputation dataset than just those eventually envisioned to be used in the analysis model. ### Algorithm In multiple imputation, we are concerned with the complete-data parameters, $\theta = (\mu, \Sigma)$. When writing down a model of the data, it is clear that our observed data is actually $D^{obs}$ and $M$, the missingness matrix. Thus, the likelihood of our observed data is $p(D^{obs}, M|\theta)$. Using the MAR assumption\footnote{There is an additional assumption hidden here that $M$ does not depend on the complete-data parameters.}, we can break this up, \begin{align} p(D^{obs},M|\theta) = p(M|D^{obs})p(D^{obs}|\theta). \end{align} As we only care about inference on the complete data parameters, we can write the likelihood as \begin{align} L(\theta|D^{obs}) &\propto p(D^{obs}|\theta), \end{align} which we can rewrite using the law of iterated expectations as \begin{align} p(D^{obs}|\theta) &= \int p(D|\theta) dD^{mis}. \end{align} With this likelihood and a flat prior on $\theta$, we can see that the posterior is \begin{equation} p(\theta | D^{obs}) \propto p(D^{obs}|\theta) = \int p(D|\theta) dD^{mis}. \end{equation} The main computational difficulty in the analysis of incomplete data is taking draws from this posterior. The EM algorithm [@DemLaiRub77] is a simple computational approach to finding the mode of the posterior. Our EMB algorithm combines the classic EM algorithm with a bootstrap approach to take draws from this posterior. For each draw, we bootstrap the data to simulate estimation uncertainty and then run the EM algorithm to find the mode of the posterior for the bootstrapped data, which gives us fundamental uncertainty too [see @HonKin10 for details of the EMB algorithm]. Once we have draws of the posterior of the complete-data parameters, we make imputations by drawing values of $D^{mis}$ from its distribution conditional on $D^{obs}$ and the draws of $\theta$, which is a linear regression with parameters that can be calculated directly from $\theta$. ### Analysis In order to combine the results across $m$ data sets, first decide on the quantity of interest to compute, such as a univariate mean, regression coefficient, predicted probability, or first difference. Then, the easiest way is to draw $1/m$ simulations of $q$ from each of the $m$ data sets, combine them into one set of $m$ simulations, and then to use the standard simulation-based methods of interpretation common for single data sets @KinTomWit00. Alternatively, you can combine directly and use as the multiple imputation estimate of this parameter, $\bar{q}$, the average of the $m$ separate estimates, $q_j$ $(j=1,\dots,m)$: \begin{equation} \bar{q}=\frac{1}{m}\sum^{m}_{j=1}q_j. \end{equation} The variance of the point estimate is the average of the estimated variances from *within* each completed data set, plus the sample variance in the point estimates *across* the data sets (multiplied by a factor that corrects for the bias because $m<\infty$). Let $SE(q_j)^2$ denote the estimated variance (squared standard error) of $q_j$ from the data set $j$, and $S^{2}_{q}=\Sigma^{m}_{j=1}(q_j-\bar{q})^2/(m-1)$ be the sample variance across the $m$ point estimates. The standard error of the multiple imputation point estimate is the square root of \begin{equation} SE(q)^2=\frac{1}{m}\sum^{m}_{j=1}SE(q_j)^2+S^2_q(1+1/m). \end{equation} ## References
/scratch/gouwar.j/cran-all/cranData/Amelia/vignettes/intro-mi.Rmd
--- title: "Using Amelia" date: "`r Sys.Date()`" link-citations: yes bibliography: amelia.bib output: rmarkdown::html_vignette: keep_md: true vignette: > %\VignetteIndexEntry{Using Amelia} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r setup, echo = FALSE, include = FALSE} knitr::opts_chunk$set(fig.width = 5, fig.height = 4, fig.align = "center") options(digits = 4, show.signif.stars = FALSE) set.seed(12345) ``` ## Data We now demonstrate how to use Amelia using data from @MilKub05 which studies the effect of democracy on trade policy. For the purposes of this user's guide, we will use a subset restricted to nine developing countries in Asia from 1980 to 1999[^freetrade]. This dataset includes 9 variables: | Variable | Description | |:-----------|:----------------------------------------------------| | `year` | year | | `country` | country | | `tariff` | average tariff rates | | `polity` | Polity IV Score[^polity] | | `pop` | total population | | `gdp.pc` | gross domestic product per capita | | `intresmi` | gross international reserves | | `signed` | dummy variable if signed an IMF agreement that year | | `fivop` | measure of financial openness | | `usheg` | measure of US hegemony[^hegemony] | These variables correspond to the variables used in the analysis model of @MilKub05 in table 2. [^freetrade]: We have artificially addedsome missingness to these data for presentational purposes. You can access the original data at [https://scholar.princeton.edu/hvmilner/data](https://scholar.princeton.edu/hvmilner/data). [^polity]: The Polity score is a number between -10 and 10 indicating how democratic a country is. A fully autocratic country would be a -10 while a fully democratic country would be 1 10. [^hegemony]: This measure of US hegemony is the US imports and exports as a percent of the world total imports and exports. We first load the Amelia and the data: ```{r load_data, results = "hide"} library(Amelia) data(freetrade) ``` We can check the summary statistics of the data to see that there is missingness on many of the variables: ```{r summarize_data} summary(freetrade) ``` In the presence of missing data, most statistical packages use *listwise deletion*, which removes any row that contains a missing value from the analysis. Using the base model of @MilKub05 Table 2, we run a simple linear model in R, which uses listwise deletion: ```{r mk_lm} summary(lm(tariff ~ polity + pop + gdp.pc + year + country, data = freetrade)) ``` Note that 60 of the 171 original observations are deleted due to missingness. These observations, however, are partially observed, and contain valuable information about the relationships between those variables which are present in the partially completed observations. Multiple imputation will help us retrieve that information and make better, more efficient, inferences. ## Multiple Imputation When performing multiple imputation, the first step is to identify the variables to include in the imputation model. It is crucial to include at least as much information as will be used in the analysis model. That is, any variable that will be in the analysis model should also be in the imputation model. This includes any transformations or interactions of variables that will appear in the analysis model. In fact, it is often useful to add more information to the imputation model than will be present when the analysis is run. Since imputation is predictive, any variables that would increase predictive power should be included in the model, even if including them in the analysis model would produce bias in estimating a causal effect (such as for post-treatment variables) or collinearity would preclude determining which variable had a relationship with the dependent variable (such as including multiple alternate measures of GDP). In our case, we include all the variables in `freetrade` in the imputation model, even though our analysis model focuses on `polity`, `pop` and `gdp.pc`. We're not incorporating time or spatial data yet, but we do below. To create multiple imputations in Amelia, we can simply run ```{r amelia} a.out <- amelia(freetrade, m = 5, ts = "year", cs = "country") a.out ``` Note that our example dataset is deliberately small both in variables and in cross-sectional elements. Typical datasets may often have hundreds or possibly a couple thousand steps to the EM algorithm. Long chains should remind the analyst to consider whether transformations of the variables would more closely fit the multivariate normal assumptions of the model (correct but omitted transformations will shorten the number of steps and improve the fit of the imputations), but do not necessarily denote problems with the imputation model. The output gives some information about how the algorithm ran. Each of the imputed datasets is now in the list `a.out$imputations`. Thus, we could plot a histogram of the `tariff` variable from the 3rd imputation, ```{r} hist(a.out$imputations[[3]]$tariff, col = "grey", border = "white") ``` ### Saving imputed datasets If you need to save your imputed datasets, one direct method is to save the output list from `amelia`, ```{r save, eval = FALSE} save(a.out, file = "imputations.RData") ``` As in the previous example, the ith imputed datasets can be retrieved from this list as `a.out$imputations[[i]]`. In addition, you can save each of the imputed datasets to its own file using the `write.amelia()` command, ```{r write_amelia, eval = FALSE} write.amelia(obj = a.out, file.stem = "outdata") ``` This will create one comma-separated value file for each imputed dataset in the following manner: outdata1.csv outdata2.csv outdata3.csv outdata4.csv outdata5.csv The `write.amelia` function can also save files in tab-delimited and Stata (`.dta`) file formats. For instance, to save Stata files, simply change the `format` argument to `"dta"`, ```{r write_dta, eval = FALSE} write.amelia(obj = a.out, file.stem = "outdata", format = "dta") ``` Additionally, `write.amelia()` can create a "stacked" version of the imputed dataset which stacks each imputed dataset on top of one another. This can be done by setting the \code{separate} argument to `FALSE`. The resulting matrix is of size $(N \cdot m) \times p$ if the original dataset is excluded (`orig.data = FALSE`) and of size $(N \cdot (m+1))\times p$ if it is included (`orig.data = TRUE`). The stacked dataset will include a variable (set with `impvar`) that indicates to which imputed dataset the observation belongs. ## Combining multiple calls to `amelia()` The EMB algorithm is what computer scientists call *embarrassingly parallel*, meaning that it is simple to separate each imputation into parallel processes. With Amelia it is simple to run subsets of the imputations on different machines and then combine them after the imputation for use in analysis model. This allows for a huge increase in the speed of the algorithm. Output lists from different Amelia runs can be combined together into a new list. For instance, suppose that we wanted to add another ten imputed datasets to our earlier call to `amelia()`. First, run the function to get these additional imputations, ```{r more_amelia} a.out.more <- amelia(freetrade, m = 10, ts = "year", cs = "country", p2s = 0) a.out.more ``` then combine this output with our original output using the `ameliabind()` function, ```{r ameliabind} a.out.more <- ameliabind(a.out, a.out.more) a.out.more ``` This function binds the two outputs into the same output so that you can pass the combined imputations easily to analysis models and diagnostics. Note that `a.out.more` now has a total of 15 imputations. A simple way to execute a parallel processing scheme with Amelia would be to run `amelia()` with `m` set to 1 on $m$ different machines or processors, save each output using the `save()` function, load them all on the same R session using `load()` command and then combine them using `ameliabind()`. In order to do this, however, make sure to name each of the outputs a different name so that they do not overwrite each other when loading into the same R session. Also, some parallel environments will dump all generated files into a common directory, where they may overwrite each other. If it is convenient in a parallel environment to run a large number of `amelia()` calls from a single piece of code, one useful way to avoid overwriting is to create the `file.stem` with a random suffix. For example: ```{r rand_stem, eval = FALSE} b <- round(runif(1, min = 1111, max = 9999)) random.name <- paste("am", b, sep = "") amelia <- write.amelia(obj = a.out, file.stem = random.name) ``` ### Screen output Screen output can be adjusted with the "print to screen" argument, `p2s`. At a value of 0, no screen printing will occur. This may be useful in large jobs or simulations where a very large number of imputation models may be required. The default value of 1, lists each bootstrap, and displays the number of iterations required to reach convergence in that bootstrapped dataset. The value of 2 gives more thorough screen output, including, at each iteration, the number of parameters that have significantly changed since the last iteration. This may be useful when the EM chain length is very long, as it can provide an intuition for many parameters still need to converge in the EM chain, and a sense of the time remaining. However, it is worth noting that the last several parameters can often take a significant fraction of the total number of iterations to converge. Setting `p2s` to 2 will also generate information on how EM algorithm is behaving, such as a `!` when the current estimated complete data covariance matrix is not invertible and a `*` when the likelihood has not monotonically increased in that step. Having many of these two symbols in the screen output is an indication of a problematic imputation model. Problems of non-invertible matrices often mean that current guess for the covariance matrix is singular. This is a sign that there may be two highly correlated variables in the model. One way to resolve is to use a ridge prior (see \@ref(sec_prior)). An example of the output when `p2s` is 2 would be ```{r p2s} a.out.p2s <- amelia(freetrade, m = 1, ts = "year", cs = "country", p2s = 2) ``` ## Parallel Imputation {#sec:parallel} Each imputation in the above EMB algorithm is completely independent of any other imputation, a property called embarrassingly parallel. This type of approach can take advantage of the multiple-core infrastructure of modern CPUs. Each core in a multi-core processor can execute independent operations in parallel. Amelia can utilize this parallel processing internally via the `parallel` and the `ncpus` arguments. The `parallel` argument sets the parallel processing backend, either with `"multicore"` or `"snow"` (or `"no"` for no parallel processing). The `"multicore"` backend is not available on Windows systems, but tends to be quicker at parallel processing. On a Windows system, the `"snow"` backend provides parallel processing through a cluster of worker processes across the CPUs. You can set the default for this argument using the `"amelia.parallel"` option. This allows you to run Amelia in parallel as the default for an entire R session without setting arguments in the `amelia()` call. For each of the parallel backends, Amelia requires a number of CPUs to use in parallel. This can be set using the `ncpus` argument. It can be higher than the number of physical cores in the system if hyperthreading or other technologies are available. You can use the `parallel::detectCores()` function to determine how many cores are available on your machine. The default for this argument can be set using the `"amelia.ncpus"` option. On Unix-alike systems (such as macOS and Linux distributions), the `"multicore"` backend automatically sets up and stops the parallel workers by forking the process. On Windows, the `"snow"` backend requires more attention. Amelia will attempt to create a parallel cluster of worker processes (since Windows systems cannot fork a process) and will stop this cluster after the imputations are complete. Alternatively, Amelia also has a `cl` argument, which accepts a predefined cluster made using the `parallel::makePSOCKcluster()`. For more information about parallel processing in R, see the documentation for the `parallel` package that ships along with R or the CRAN Task View on [Parallel Computing with R](https://cran.r-project.org/view=HighPerformanceComputing) ## Improving Imputations via Transformations {#sec:trans} Social science data commonly includes variables that fail to fit to a multivariate normal distribution. Indeed, numerous models have been introduced specifically to deal with the problems they present. As it turns out, much evidence in the literature [discussed in @KinHonJos01] indicates that the multivariate normal model used in Amelia usually works well for the imputation stage even when discrete or non-normal variables are included and when the analysis stage involves these limited dependent variable models. Nevertheless, Amelia includes some limited capacity to deal directly with ordinal and nominal variables and to modify variables that require other transformations. In general nominal and log transform variables should be declared to Amelia, whereas ordinal (including dichotomous) variables often need not be, as described below. (For harder cases, see [@Schafer97], for specialized MCMC-based imputation models for discrete variables.) Although these transformations are taken internally on these variables to better fit the data to the multivariate normal assumptions of the imputation model, all the imputations that are created will be returned in the original untransformed form of the data. If the user has already performed transformations on their data (such as by taking a log or square root prior to feeding the data to `amelia()`) these do not need to be declared, as that would result in the transformation occurring *doubly* in the imputation model. The fully imputed data sets that are returned will always be in the form of the original data that is passed to the `amelia()` routine. ### Ordinal {#sec:ord} In much statistical research, researchers treat independent ordinal (including dichotomous) variables as if they were really continuous. If the analysis model to be employed is of this type, then nothing extra is required of the of the imputation model. Users are advised to allow Amelia to impute non-integer values for any missing data, and to use these non-integer values in their analysis. Sometimes this makes sense, and sometimes this defies intuition. One particular imputation of 2.35 for a missing value on a seven point scale carries the intuition that the respondent is between a 2 and a 3 and most probably would have responded 2 had the data been observed. This is easier to accept than an imputation of 0.79 for a dichotomous variable where a zero represents a male and a one represents a female respondent. However, in both cases the non-integer imputations carry more information about the underlying distribution than would be carried if we were to force the imputations to be integers. Thus whenever the analysis model permits, missing ordinal observations should be allowed to take on continuously valued imputations. In the `freetrade` data, one such ordinal variable is `polity` which ranges from -10 (full autocracy) to 10 (full democracy). If we tabulate this variable from one of the imputed datasets, ```{r polity_tab} table(a.out$imputations[[3]]$polity) ``` we can see that there is one imputation between -4 and -3 and one imputation between 6 and 7. Again, the interpretation of these values is rather straightforward even if they are not strictly in the coding of the original Polity data. Often, however, analysis models require some variables to be strictly ordinal, as for example, when the dependent variable will be modeled in a logistical or Poisson regression. Imputations for variables set as ordinal are created by taking the continuously valued imputation and using an appropriately scaled version of this as the probability of success in a binomial distribution. The draw from this binomial distribution is then translated back into one of the ordinal categories. For our data we can simply add `polity` to the `ords` argument: ```{r polity_ord} a.out1 <- amelia(freetrade, m = 5, ts = "year", cs = "country", ords = "polity", p2s = 0) table(a.out1$imputations[[3]]$polity) ``` Now, we can see that all of the imputations fall into one of the original polity categories. ### Nominal {#sec:nom} Nominal variables[^binary] must be treated quite differently than ordinal variables. Any multinomial variables in the data set (such as religion coded 1 for Catholic, 2 for Jewish, and 3 for Protestant) must be specified to Amelia. In our \code{freetrade} dataset, we have `signed` which is 1 if a country signed an IMF agreement in that year and 0 if it did not. Of course, our first imputation did not limit the imputations to these two categories ```{r binary_tab} table(a.out1$imputations[[3]]$signed) ``` In order to fix this for a $p$-category multinomial variable, Amelia will determine $p$ (as long as your data contain at least one value in each category), and substitute $ p-1$ binary variables to specify each possible category. These new $p-1$ variables will be treated as the other variables in the multivariate normal imputation method chosen, and receive continuous imputations. These continuously valued imputations will then be appropriately scaled into probabilities for each of the $p$ possible categories, and one of these categories will be drawn, where upon the original $p$-category multinomial variable will be reconstructed and returned to the user. Thus all imputations will be appropriately multinomial. [^binary]: Dichotomous (two category) variables are a special case of nominal variables. For these variables, the nominal and ordinal methods of transformation in Amelia agree. For our data we can simply add `signed` to the `noms` argument: ```{r noms} a.out2 <- amelia(freetrade, m = 5, ts = "year", cs = "country", noms = "signed", p2s = 0) table(a.out2$imputations[[3]]$signed) ``` Note that Amelia can only fit imputations into categories that exist in the original data. Thus, if there was a third category of signed, say 2, that corresponded to a different kind of IMF agreement, but it never occurred in the original data, Amelia could not match imputations to it. Since Amelia properly treats a $p$-category multinomial variable as $p-1$ variables, one should understand the number of parameters that are quickly accumulating if many multinomial variables are being used. If the square of the number of real and constructed variables is large relative to the number of observations, it is useful to use a ridge prior as in section \@ref(sec_prior). ### Natural log {#sec:log} If one of your variables is heavily skewed or has outliers that may alter the imputation in an unwanted way, you can use a natural logarithm transformation of that variable in order to normalize its distribution. This transformed distribution helps Amelia to avoid imputing values that depend too heavily on outlying data points. Log transformations are common in expenditure and economic variables where we have strong beliefs that the marginal relationship between two variables decreases as we move across the range. For instance, we can show the `tariff` variable clearly has positive (or, right) skew while its natural log transformation has a roughly normal distribution. ```{r tarrif_hist} hist(freetrade$tariff, col="grey", border="white") hist(log(freetrade$tariff), col="grey", border="white") ``` ### Square root {#sec:sqrt} Event count data is often heavily skewed and has nonlinear relationships with other variables. One common transformation to tailor the linear model to count data is to take the square roots of the counts. This is a transformation that can be set as an option in Amelia. ### Logistic {#sec:lgstc} Proportional data is sharply bounded between 0 and 1. A logistic transformation is one possible option in Amelia to make the distribution symmetric and relatively unbounded. ## Identification Variables {#sec:idvars} Datasets often contain identification variables, such as country names, respondent numbers, or other identification numbers, codes or abbreviations. Sometimes these are text and sometimes these are numeric. Often it is not appropriate to include these variables in the imputation model, but it is useful to have them remain in the imputed datasets (However, there are models that would include the ID variables in the imputation model, such as fixed effects model for data with repeated observations of the same countries). Identification variables which are not to be included in the imputation model can be identified with the argument `idvars`. These variables will not be used in the imputation model, but will be kept in the imputed datasets. If the `year` and `country` contained no information except labels, we could omit them from the imputation: ```{r idvars} amelia(freetrade, idvars = c("year", "country")) ``` Note that Amelia will return with an error if your dataset contains a factor or character variable that is not marked as a nominal or identification variable. Thus, if we were to omit the factor `country` from the `cs` or `idvars` arguments, we would receive an error: ```{r idvars_error} a.out2 <- amelia(freetrade, idvars = c("year")) ``` In order to conserve memory, it is wise to remove unnecessary variables from a data set before loading it into Amelia. The only variables you should include in your data when running Amelia are variables you will use in the analysis stage and those variables that will help in the imputation model. While it may be tempting to simply mark unneeded variables as IDs, it only serves to waste memory and slow down the imputation procedure. ## Time Series, or Time Series Cross Sectional Data {#sec:tscs} Many variables that are recorded over time within a cross-sectional unit are observed to vary smoothly over time. In such cases, knowing the observed values of observations close in time to any missing value may enormously aid the imputation of that value. However, the exact pattern may vary over time within any cross-section. There may be periods of growth, stability, or decline; in each of which the observed values would be used in a different fashion to impute missing values. Also, these patterns may vary enormously across different cross-sections, or may exist in some and not others. Amelia can build a general model of patterns within variables across time by creating a sequence of polynomials of the time index. If, for example, tariffs vary smoothly over time, then we make the modeling assumption that there exists some polynomial that describes the economy in cross-sectional unit $i$ at time $t$ as: \[ \textrm{tariff}_{ti} = \beta_0 + \beta_1 t + \beta_1 t^2 + \beta_1 t^3 \ldots \] And thus if we include enough higher order terms of time then the pattern between observed values of the tariff rate can be estimated. Amelia will create polynomials of time up to the user defined $k$-th order, ($k\leq3$). We can implement this with the `ts` and `polytime` arguments. If we thought that a second-order polynomial would help predict we could run ```{r polytime, results = "hide"} a.out2 <- amelia(freetrade, ts = "year", cs = "country", polytime = 2) ``` With this input, Amelia will add covariates to the model that correspond to time and its polynomials. These covariates will help better predict the missing values. If cross-sectional units are specified these polynomials can be interacted with the cross-section unit to allow the patterns over time to vary between cross-sectional units. Unless you strongly believe all units have the same patterns over time in all variables (including the same constant term), this is a reasonable setting. When $k$ is set to 0, this interaction simply results in a model of *fixed effects* where every unit has a uniquely estimated constant term. Amelia does not smooth the observed data, and only uses this functional form, or one you choose, with all the other variables in the analysis and the uncertainty of the prediction, to impute the missing values. In order to impute with trends specific to each cross-sectional unit, we can set `intercs` to `TRUE`: ```{r intercs, results = "hide"} a.out.time <- amelia(freetrade, ts = "year", cs = "country", polytime = 1, intercs = TRUE, p2s = 2) ``` Note that attempting to use `polytime` without the `ts` argument, or `intercs` without the `cs` argument will result in an error. Using the `tscsPlot()` function (discussed below), we can see that we have a much better prediction about the missing values when incorporating time than when we omit it: ```{r tcomp1} tscsPlot(a.out, cs = "Malaysia", main = "Malaysia (no time settings)", var = "tariff", ylim = c(-10, 60)) tscsPlot(a.out.time, cs = "Malaysia", main = "Malaysia (with time settings)", var = "tariff", ylim = c(-10, 60)) ``` ### Lags and leads {#sec:lags} An alternative way of handling time-series information is to include lags and leads of certain variables into the imputation model. *Lags* are variables that take the value of another variable in the previous time period while *leads* take the value of another variable in the next time period. Many analysis models use lagged variables to deal with issues of endogeneity, thus using leads may seems strange. It is important to remember, however, that imputation models are predictive, not causal. Thus, since both past and future values of a variable are likely correlated with the present value, both lags and leads should improve the model. If we wanted to include lags and leads of tariffs, for instance, we would simply pass this to the `lags` and `leads` arguments: ```{r lags_leads} a.out2 <- amelia(freetrade, ts = "year", cs = "country", lags = "tariff", leads = "tariff") ``` ## Including Prior Information Amelia has a number of methods of setting priors within the imputation model. Two of these are commonly used and discussed below, ridge priors and observational priors. ### Ridge priors for high missingness, Small samples, or large correlations {#sec_prior} When the data to be analyzed contain a high degree of missingness or very strong correlations among the variables, or when the number of observations is only slightly greater than the number of parameters $p(p+3)/2$ (where $p$ is the number of variables), results from your analysis model will be more dependent on the choice of imputation model. This suggests more testing in these cases of alternative specifications under Amelia. This can happen when using the polynomials of time interacted with the cross section are included in the imputation model. For example, in our data, if we used a polynomial of degree 2 with unit-specific trends and there are 9 countries, it would add $3 \times 9 - 1= 17$ more variables to the imputation model (dropping one of the fixed effects for identification). When these are added, the EM algorithm can become unstable. You can detect this by inspecting the screen output under `p2s = 2` or by observing that the number iterations per imputation are very divergent. In these circumstances, we recommend adding a ridge prior which will help with numerical stability by shrinking the covariances among the variables toward zero without changing the means or variances. This can be done by including the `empri` argument. Including this prior as a positive number is roughly equivalent to adding `empri` artificial observations to the data set with the same means and variances as the existing data but with zero covariances. Thus, increasing the `empri` setting results in more shrinkage of the covariances, thus putting more a priori structure on the estimation problem: like many Bayesian methods, it reduces variance in return for an increase in bias that one hopes does not overwhelm the advantages in efficiency. In general, we suggest keeping the value on this prior relatively small and increase it only when necessary. A recommendation of 0.5 to 1 percent of the number of observations, $n$, is a reasonable starting value, and often useful in large datasets to add some numerical stability. For example, in a dataset of two thousand observations, this would translate to a prior value of 10 or 20 respectively. A prior of up to 5 percent is moderate in most applications and 10 percent is reasonable upper bound. For our data, it is easy to code up a 1 percent ridge prior: ```{r empri} a.out.time2 <- amelia(freetrade, ts = "year", cs = "country", polytime = 1, intercs = TRUE, p2s = 0, empri = .01 * nrow(freetrade)) a.out.time2 ``` ### Observation-level priors {#sec:obspri} Researchers often have additional prior information about missing data values based on previous research, academic consensus, or personal experience. Amelia can incorporate this information to produce vastly improved imputations. The Amelia algorithm allows users to include informative Bayesian priors about individual missing data cells instead of the more general model parameters, many of which have little direct meaning. The incorporation of priors follows basic Bayesian analysis where the imputation turns out to be a weighted average of the model-based imputation and the prior mean, where the weights are functions of the relative strength of the data and prior: when the model predicts very well, the imputation will down-weight the prior, and vice versa [@HonKin10]. The priors about individual observations should describe the analyst's belief about the distribution of the missing data cell. This can either take the form of a mean and a standard deviation or a confidence interval. For instance, we might know that 1986 tariff rates in Thailand around 40%, but we have some uncertainty as to the exact value. Our prior belief about the distribution of the missing data cell, then, centers on 40 with a standard deviation that reflects the amount of uncertainty we have about our prior belief. To input priors you must build a priors matrix with either four or five columns. Each row of the matrix represents a prior on either one observation or one variable. In any row, the entry in the first column is the row of the observation and the entry is the second column is the column of the observation. In the four column priors matrix the third and fourth columns are the mean and standard deviation of the prior distribution of the missing value. For instance, suppose that we had some expert prior information about tariff rates in Thailand. We know from the data that Thailand is missing tariff rates in many years, ```{r thailand} freetrade[freetrade$country == "Thailand", c("year", "country", "tariff")] ``` Suppose that we had expert information that tariff rates were roughly 40% in Thailand between 1986 and 1988 with about a 6% margin of error. This corresponds to a standard deviation of about 3. In order to include this information, we must form the priors matrix: ```{r build_prior} pr <- matrix( c(158, 159, 160, 3, 3, 3, 40, 40, 40, 3, 3, 3), nrow = 3, ncol = 4 ) pr ``` The first column of this matrix corresponds to the row numbers of Thailand in these three years, the second column refers to the column number of `tariff` in the data and the last two columns refer to the actual prior. Once we have this matrix, we can pass it to `amelia()`, ```{r amelia_prior} a.out.pr <- amelia(freetrade, ts = "year", cs = "country", priors = pr) ``` In the five column matrix, the last three columns describe a confidence range of the data. The columns are a lower bound, an upper bound, and a confidence level between 0 and 1, exclusive. Whichever format you choose, it must be consistent across the entire matrix. We could get roughly the same prior as above by utilizing this method. Our margin of error implies that we would want imputations between 34 and 46, so our matrix would be ```{r build_prior2} pr.2 <- matrix( c(158, 159, 160, 3, 3, 3, 34, 34, 34, 46, 46, 46, 0.95, 0.95, 0.95), nrow = 3, ncol = 5 ) pr.2 ``` These priors indicate that we are 95% confident that these missing values are in the range 34 to 46. If a prior has the value 0 in the first column, this prior will be applied to all missing values in this variable, except for explicitly set priors. Thus, we could set a prior for the entire `tariff` variable of 20, but still keep the above specific priors with the following code: ```{r build_prior3} pr.3 <- matrix( c(158, 159, 160, 0, 3, 3 , 3, 3, 40, 40, 40, 20, 3, 3, 3, 5), nrow = 4, ncol = 4) pr.3 ``` ### Logical bounds In some cases, variables in the social sciences have known logical bounds. Proportions must be between 0 and 1 and duration data must be greater than 0, for instance. Many of these logical bounds can be handled by using the correct transformation for that type of variable (see \@ref(sec:trans) for more details on the transformations handled by Amelia). In the occasional case that imputations must satisfy certain logical bounds not handled by these transformations, Amelia can take draws from a truncated normal distribution in order to achieve imputations that satisfy the bounds. Note, however, that this procedure imposes extremely strong restrictions on the imputations and can lead to lower variances than the imputation model implies. The mean value across all the imputed values of a missing cell is the best guess from the imputation model of that missing value. The variance of the distribution across imputed datasets correctly reflects the uncertainty in that imputation. It is often the mean imputed value that should conform to the any known bounds, even if individual imputations are drawn beyond those bounds. The mean imputed value can be checked with the diagnostics presented in the next section. In general, building a more predictive imputation model will lead to better imputations than imposing bounds. Amelia implements these bounds by rejection sampling. When drawing the imputations from their posterior, we repeatedly resample until we have a draw that satisfies all of the logical constraints. You can set an upper limit on the number of times to resample with the `max.resample` arguments. Thus, if after `max.resample` draws, the imputations are still outside the bounds, Amelia will set the imputation at the edge of the bounds. Thus, if the bounds were 0 and 100 and all of the draws were negative, Amelia would simply impute 0. As an extreme example, suppose that we know, for certain that tariff rates had to fall between 30 and 40. This, obviously, is not true, but we can generate imputations from this model. In order to specify these bounds, we need to generate a matrix of bounds to pass to the `bounds` argument. This matrix will have 3 columns: the first is the column for the bounded variable, the second is the lower bound and the third is the upper bound. Thus, to implement our bound on tariff rates (the 3rd column of the dataset), we would create the matrix, ```{r build_bounds} bds <- matrix(c(3, 30, 40), nrow = 1, ncol = 3) bds ``` which we can pass to the `bounds` argument to `amelia()`: ```{r amelia_bounds} a.out.bds <- amelia(freetrade, ts = "year", cs = "country", bounds = bds, max.resample = 1000) ``` The difference in results between the bounded and unbounded model are not obvious from the output, but inspection of the imputed tariff rates for Malaysia shows that there has been a drastic restriction of the imputations to the desired range: ```{r bounds_plot} tscsPlot(a.out, cs = "Malaysia", main = "No logical bounds", var = "tariff", ylim = c(-10, 60)) tscsPlot(a.out.bds, cs = "Malaysia", main = "Bounded between 30 and 40", var = "tariff", ylim = c(-10, 60)) ``` Again, analysts should be extremely cautious when using these bounds as they can seriously affect the inferences from the imputation model, as shown in this example. Even when logical bounds exist, we recommend simply imputing variables normally, as the violation of the logical bounds represents part of the true uncertainty of imputation. ## Post-imputations Transformations {#sec_postimptrans} In many cases, it is useful to create transformations of the imputed variables for use in further analysis. For instance, one may want to create an interaction between two variables or perform a log-transformation on the imputed data. To do this, Amelia includes a `transform()` function for `amelia()` output that adds or overwrites variables in each of the imputed datasets. For instance, if we wanted to create a log-transformation of the `gdp.pc` variable, we could use the following command: ```{r amelia_transform} a.out <- transform(a.out, lgdp = log(gdp.pc)) head(a.out$imputations[[1]][,c("country", "year","gdp.pc", "lgdp")]) ``` To create an interaction between two variables, we could simply use: ```{r interaction} a.out <- transform(a.out, pol_gdp = polity * gdp.pc) ``` Each transformation is recorded and the `summary()` command prints out each transformation that has been performed: ```{r sum_trans} summary(a.out) ``` Note the updated output is almost exactly the same as the fresh `amelia()` output. You can pass the transformed output back to `amelia()` and it will add imputations and update these imputations with the transformations you have performed. ## Analysis Models {#sec_analysis} Imputation is most often a data processing step as opposed to a final model in of itself. To this end, it is easy to pass output from `amelia()` to other functions. The easiest and most integrated way to run an analysis model is to use the `with()` and `mi.combine()` functions. For example, in @MilKub05, the dependent variable was tariff rates. We can replicate table 5.1 from their analysis with the original data simply by running ```{r lm_lwd} orig.model <- lm(tariff ~ polity + pop + gdp.pc + year + country, data = freetrade) orig.model ``` Running the same model with imputed data is almost identical. We can run the `lm` within each imputed data set by using the `with()` function: ```{r lm_imp} imp.models <- with( a.out, lm(tariff ~ polity + pop + gdp.pc + year + country) ) imp.models[1:2] ``` The result here is simply a list of output of `lm()` applied to each imputed data set. We can combine the imputed estimates using the rules described in @KinHonJos01 and @Schafer97 with the `mi.combine()` function: ```{r mi_combine} out <- mi.combine(imp.models, conf.int = TRUE) out ``` The combination of the results depends on the [broom](https://broom.tidymodels.org) package and results can be combined if a `tidy()` method exists for the estimation function passed to `with()`. Other packages such as [Zelig](https://zeligproject.org) can also combine imputed data sets across a number of statistical models. Furthermore, users can easily export their imputations using the `write.amelia()` function as described in \@ref(sec_saving) and use statistical packages other than R for the analysis model. In addition to the resources available in R, users can draw on Stata to implement their analysis models. As of version 11, Stata has built-in handling of multiply imputed datasets. In order to utilize this functionality, simply export the "stacked" imputations using the `write.amelia()` function: ```{r write_dta_stacked, eval = FALSE} write.amelia(a.out, separate = FALSE, file.stem = "outdata", format = "dta") ``` Once this stacked dataset is open in Stata, you must tell Stata that it is an imputed dataset using the \code{mi import flong} command: ```{stata eval = FALSE} mi import flong, m(imp) id(year country) imp(tariff-usheg) ``` The command takes a few options: `m` designates the imputation variable (set with `impvar` in `write.amelia()`), `id` sets the identifying varibles, and `imp` sets the variables that were imputed (or included in the imputation). The `tariff-usheg` indicates that Stata should treat the range of variables between `tariff` and `usheg` as imputed. Once we have set the dataset as imputed, we can use the built-in `mi` commands to analyze the data: ```{stata eval = FALSE} mi estimate: reg tariff polity pop gdp_pc ``` ``` Multiple-imputation estimates Imputations = 5 Linear regression Number of obs = 171 Average RVI = 1.4114 Complete DF = 167 DF adjustment: Small sample DF: min = 10.36 avg = 18.81 max = 37.62 Model F test: Equal FMI F( 2, 10.4) = 15.50 Within VCE type: OLS Prob > F = 0.0008 ------------------------------------------------------------------------------ tariff | Coef. Std. Err. t P>|t| [95% Conf. Interval] -------------+---------------------------------------------------------------- polity | -.2058115 .3911049 -0.53 0.610 -1.072968 .6613452 pop | 3.21e-08 8.72e-09 3.68 0.004 1.27e-08 5.14e-08 gdp_pc | -.0027561 .000644 -4.28 0.000 -.0040602 -.0014519 _cons | 32.70461 2.660091 12.29 0.000 27.08917 38.32005 ------------------------------------------------------------------------------ ``` ## The `amelia` class {#sec_out} The output from the `amelia()` function is an instance of the S3 class `amelia`. Instances of the `amelia` class contain much more than simply the imputed datasets. The `mu` object of the class contains the posterior draws of the means of the complete data. The `covMatrices` contains the posterior draws of the covariance matrices of the complete data. Note that these correspond to the variables as they are sent to the EM algorithm. Namely, they refer to the variables after being transformed, centered and scaled. The `iterHist` object is a list of `m` 3-column matrices. Each row of the matrices corresponds to an iteration of the EM algorithm. The first column indicates how many parameters had yet to converge at that iteration. The second column indicates if the EM algorithm made a step that decreased the number of converged parameters. The third column indicates whether the covariance matrix at this iteration was singular. Clearly, the last two columns are meant to indicate when the EM algorithm enters a problematic part of the parameter space. ## References
/scratch/gouwar.j/cran-all/cranData/Amelia/vignettes/using-amelia.Rmd
#' Raw Ames Housing Data #' #' Summon the data described by De Cock (2011) where 82 fields were recored for 2,930 properties in Ames IA. #' #' @name ames_raw #' @aliases ames_raw #' @docType data #' @return \item{ames_raw}{a tibble} #' @details #' #' From the data documentation reference, the columns include: #' #' * `Order`: Observation number #' * `PID`: Parcel identification number - can be used with city web site for parcel review. #' * `MS SubClass`: Identifies the type of dwelling involved in the sale. #' * `MS Zoning`: Identifies the general zoning classification of the sale. #' * `Lot Frontage`: Linear feet of street connected to property #' * `Lot Area`: Lot size in square feet #' * `Street`: Type of road access to property #' * `Alley`: Type of alley access to property #' * `Lot Shape`: General shape of property #' * `Land Contour`: Flatness of the property #' * `Utilities`: Type of utilities available #' * `Lot Config`: Lot configuration #' * `Land Slope`: Slope of property #' * `Neighborhood`: Physical locations within Ames city limits (map available) #' * `Condition 1`: Proximity to various conditions #' * `Condition 2`: Proximity to various conditions (if more than one is present) #' * `Bldg Type`: Type of dwelling #' * `House Style`: Style of dwelling #' * `Overall Qual`: Rates the overall material and finish of the house #' * `Overall Cond`: Rates the overall condition of the house #' * `Year Built`: Original construction date #' * `Year Remod/Add`: Remodel date (same as construction date if no remodeling or additions) #' * `Roof Style`: Type of roof #' * `Roof Matl`: Roof material #' * `Exterior 1`: Exterior covering on house #' * `Exterior 2`: Exterior covering on house (if more than one material) #' * `Mas Vnr Type`: Masonry veneer type #' * `Mas Vnr Area`: Masonry veneer area in square feet #' * `Exter Qual`: Evaluates the quality of the material on the exterior #' * `Exter Cond`: Evaluates the present condition of the material on the exterior #' * `Foundation`: Type of foundation #' * `Bsmt Qual`: Evaluates the height of the basement #' * `Bsmt Cond`: Evaluates the general condition of the basement #' * `Bsmt Exposure`: Refers to walkout or garden level walls #' * `BsmtFin Type 1`: Rating of basement finished area #' * `BsmtFin SF 1`: Type 1 finished square feet #' * `BsmtFinType 2`: Rating of basement finished area (if multiple types) #' * `BsmtFin SF 2`: Type 2 finished square feet #' * `Bsmt Unf SF`: Unfinished square feet of basement area #' * `Total Bsmt SF`: Total square feet of basement area #' * `Heating`: Type of heating #' * `HeatingQC`: Heating quality and condition #' * `Central Air`: Central air conditioning #' * `Electrical`: Electrical system #' * `1st Flr SF`: First Floor square feet #' * `2nd Flr SF`: Second floor square feet #' * `Low Qual Fin SF`: Low quality finished square feet (all floors) #' * `Gr Liv Area`: Above grade (ground) living area square feet #' * `Bsmt Full Bath`: Basement full bathrooms #' * `Bsmt Half Bath`: Basement half bathrooms #' * `Full Bath`: Full bathrooms above grade #' * `Half Bath`: Half baths above grade #' * `Bedroom`: Bedrooms above grade (does NOT include basement bedrooms) #' * `Kitchen`: Kitchens above grade #' * `KitchenQual`: Kitchen quality #' * `TotRmsAbvGrd`: Total rooms above grade (does not include bathrooms) #' * `Functional`: Home functionality (Assume typical unless deductions are warranted) #' * `Fireplaces`: Number of fireplaces #' * `FireplaceQu`: Fireplace quality #' * `Garage Type`: Garage location #' * `Garage Yr Blt`: Year garage was built #' * `Garage Finish`: Interior finish of the garage #' * `Garage Cars`: Size of garage in car capacity #' * `Garage Area`: Size of garage in square feet #' * `Garage Qual`: Garage quality #' * `Garage Cond`: Garage condition #' * `Paved Drive`: Paved driveway #' * `Wood Deck SF`: Wood deck area in square feet #' * `Open Porch SF`: Open porch area in square feet #' * `Enclosed Porch`: Enclosed porch area in square feet #' * `3-Ssn Porch`: Three season porch area in square feet #' * `Screen Porch`: Screen porch area in square feet #' * `Pool Area`: Pool area in square feet #' * `Pool QC`: Pool quality #' * `Fence`: Fence quality #' * `Misc Feature`: Miscellaneous feature not covered in other categories #' * `Misc Val`: $Value of miscellaneous feature #' * `Mo Sold`: Month Sold #' * `Yr Sold`: Year Sold #' * `Sale Type`: Type of sale #' * `Sale Condition`: Condition of sale #' #' @source De Cock, D. (2011). "Ames, Iowa: Alternative to the Boston Housing Data as an End of Semester Regression Project," \emph{Journal of Statistics Education}, Volume 19, Number 3. #' #' \url{https://ww2.amstat.org/publications/jse/v19n3/decock/DataDocumentation.txt} #' #' \url{http://ww2.amstat.org/publications/jse/v19n3/decock.pdf} #' #' @keywords datasets NULL #' @rdname ames_raw #' @name ames_geo #' @aliases ames_geo NULL #' @rdname ames_raw #' @name ames_new #' @aliases ames_new NULL #' @rdname ames_raw #' @name hood_levels #' @aliases hood_levels NULL #' Ames Public Schools #' #' Locations of local schools and outline of elementary school districts. #' Elementary school district boundaries are returned as `sf` object in CRS 4326 #' #' @name ames_schools #' @aliases ames_schools_geo #' @docType data #' @return \item{ames_schools_geo}{a tibble} #' @details #' #' The data set includes the school name and the geocodes. #' @keywords datasets NULL #' @rdname ames_schools #' @name ames_school_districts_sf #' @aliases ames_school_districts_sf NULL
/scratch/gouwar.j/cran-all/cranData/AmesHousing/R/ames_raw.R
#' Create a Processed Version of the Ames Housing Data #' #' @details #' For the processed version, the exact details can be found in #' the code of `make_ames` but a summary of the differences between #' these data sets and `ames_raw` is: #' #' * All factors are _unordered_. #' * `PID` and `Order` are removed. #' * Spaces and special characters in column names where changed #' to snake case. To be consistent, `SalePrice` was changed to #' `Sale_Price`. #' * Many factor levels were changed to be more understandable #' (e.g. `Split_or_Multilevel` instead of `080`) #' * Many missing values were reset. For example, if the variable #' `Bsmt_Qual` was missing, this implies that there is no basement #' on the property. Instead of a missing value, the value of #' `Bsmt_Qual` was changed to `No_Basement`. Similarly, numeric #' data pertaining to basements were set to zero where appropriate #' such as variables `Bsmt_Full_Bath` and `Total_Bsmt_SF`. #' * `Garage_Yr_Blt` contained many missing data and was removed. #' * Approximate longitude and latitude are included for the #' properties. Also, note that there are 6 properties with #' identical geotags. These are units within the same building. #' For some properties, updated versions of the PID identifiers #' were found and are replaced with new values. #' #' `make_ordinal_ames` is the same as `make_ames` but many factor #' variables were changed to class `ordered` (see below). #' #' The documentation for [ames_raw()] contains descriptions of #' the columns although, as noted above, the column names in #' [ames_raw()] are slightly different from the processed #' versions. #' #' `make_ames_new()` creates a data set of new properties. These were populated #' using less data sources than the original and lack a number of the condition #' and quality. Both properties were unsold at the time of this writing. #' @return A tibble with the data. #' @examples #' ames <- make_ames() #' nrow(ames) #' summary(ames$Sale_Price) #' #' ames_ord <- make_ordinal_ames() #' ord_vars <- vapply(ames_ord, is.ordered, logical(1)) #' names(ord_vars)[ord_vars] #' @export #' @importFrom dplyr add_rownames add_rownames vars contains #' @importFrom dplyr funs rename_at rename mutate recode_factor #' @importFrom dplyr recode filter select inner_join # make_ames <- function() { process_ames(AmesHousing::ames_raw) } #' @export #' @rdname make_ames make_ames_new <- function() { process_ames(AmesHousing::ames_new) } process_ames <- function(dat) { out <- dat %>% # Rename variables with spaces or begin with numbers. # SalePrice would be inconsistently named so change that too. dplyr::rename_with( ~ gsub(' ', '_', .), dplyr::contains(' '), ) %>% dplyr::rename( Sale_Price = SalePrice, Three_season_porch = `3Ssn_Porch`, Year_Remod_Add = `Year_Remod/Add`, First_Flr_SF = `1st_Flr_SF`, Second_Flr_SF = `2nd_Flr_SF`, Year_Sold = Yr_Sold ) %>% # Remove leading zeros dplyr::mutate( MS_SubClass = as.character(as.integer(MS_SubClass)) ) %>% # Make more meaningful factor levels for some variables dplyr::mutate( MS_SubClass = dplyr::recode_factor( factor(MS_SubClass), '20' = 'One_Story_1946_and_Newer_All_Styles', '30' = 'One_Story_1945_and_Older', '40' = 'One_Story_with_Finished_Attic_All_Ages', '45' = 'One_and_Half_Story_Unfinished_All_Ages', '50' = 'One_and_Half_Story_Finished_All_Ages', '60' = 'Two_Story_1946_and_Newer', '70' = 'Two_Story_1945_and_Older', '75' = 'Two_and_Half_Story_All_Ages', '80' = 'Split_or_Multilevel', '85' = 'Split_Foyer', '90' = 'Duplex_All_Styles_and_Ages', '120' = 'One_Story_PUD_1946_and_Newer', '150' = 'One_and_Half_Story_PUD_All_Ages', '160' = 'Two_Story_PUD_1946_and_Newer', '180' = 'PUD_Multilevel_Split_Level_Foyer', '190' = 'Two_Family_conversion_All_Styles_and_Ages' ) ) %>% dplyr::mutate( MS_Zoning = dplyr::recode_factor( factor(MS_Zoning), 'A' = 'Agriculture', 'C' = 'Commercial', 'FV' = 'Floating_Village_Residential', 'I' = 'Industrial', 'RH' = 'Residential_High_Density', 'RL' = 'Residential_Low_Density', 'RP' = 'Residential_Low_Density_Park', 'RM' = 'Residential_Medium_Density', 'A (agr)' = 'A_agr', 'C (all)' = 'C_all', 'I (all)' = 'I_all' ) ) %>% dplyr::mutate( Lot_Shape = dplyr::recode_factor( factor(Lot_Shape), 'Reg' = 'Regular', 'IR1' = 'Slightly_Irregular', 'IR2' = 'Moderately_Irregular', 'IR3' = 'Irregular' ) ) %>% dplyr::mutate(Bldg_Type = dplyr::recode_factor(factor(Bldg_Type), '1Fam' = 'OneFam', '2fmCon' = 'TwoFmCon')) %>% # Change some factor levels so that they make valid R variable names dplyr::mutate( House_Style = gsub("^1.5", "One_and_Half_", House_Style), House_Style = gsub("^1", "One_", House_Style), House_Style = gsub("^2.5", "Two_and_Half_", House_Style), House_Style = gsub("^2", "Two_", House_Style), House_Style = factor(House_Style) ) %>% # Some characteristics that houses lack (e.g. garage, pool) are # coded as missing instead of "No_pool" or "No_Garage". Change these # and also cases where the number of missing (e.g. garage size) dplyr::mutate( Bsmt_Exposure = ifelse(is.na(Bsmt_Exposure), "No_Basement", Bsmt_Exposure), Bsmt_Exposure = factor(Bsmt_Exposure), BsmtFin_Type_1 = ifelse(is.na(BsmtFin_Type_1), "No_Basement", BsmtFin_Type_1), BsmtFin_Type_1 = factor(BsmtFin_Type_1), BsmtFin_SF_1 = ifelse(is.na(BsmtFin_SF_1), 0, BsmtFin_Type_1), BsmtFin_Type_2 = ifelse(is.na(BsmtFin_Type_2), "No_Basement", BsmtFin_Type_2), BsmtFin_Type_2 = factor(BsmtFin_Type_2), BsmtFin_SF_2 = ifelse(is.na(BsmtFin_SF_2), 0, BsmtFin_SF_2), Bsmt_Unf_SF = ifelse(is.na(Bsmt_Unf_SF), 0, Bsmt_Unf_SF), Total_Bsmt_SF = ifelse(is.na(Total_Bsmt_SF), 0, Total_Bsmt_SF), Bsmt_Full_Bath = ifelse(is.na(Bsmt_Full_Bath), 0, Bsmt_Full_Bath), Bsmt_Half_Bath = ifelse(is.na(Bsmt_Half_Bath), 0, Bsmt_Half_Bath), Electrical = ifelse(is.na(Electrical), "Unknown", Electrical), ) %>% dplyr::mutate(Garage_Type = dplyr::recode(Garage_Type, '2Types' = 'More_Than_Two_Types')) %>% dplyr::mutate( Garage_Type = ifelse(is.na(Garage_Type), "No_Garage", Garage_Type), Garage_Finish = ifelse(is.na(Garage_Finish), "No_Garage", Garage_Finish), Garage_Cars = ifelse(is.na(Garage_Cars), 0, Garage_Cars), Garage_Area = ifelse(is.na(Garage_Area), 0, Garage_Area), Bsmt_Full_Bath = ifelse(is.na(Bsmt_Full_Bath), 0, Bsmt_Full_Bath), Bsmt_Half_Bath = ifelse(is.na(Bsmt_Half_Bath), 0, Bsmt_Half_Bath), Misc_Feature = ifelse(is.na(Misc_Feature), "None", Misc_Feature), Mas_Vnr_Type = ifelse(is.na(Mas_Vnr_Type), "None", Mas_Vnr_Type), Mas_Vnr_Area = ifelse(is.na(Mas_Vnr_Area), 0, Mas_Vnr_Area), Lot_Frontage = ifelse(is.na(Lot_Frontage), 0, Lot_Frontage) ) %>% mutate( Overall_Qual = dplyr::recode( Overall_Qual, `10` = "Very_Excellent", `9` = "Excellent", `8` = "Very_Good", `7` = "Good", `6` = "Above_Average", `5` = "Average", `4` = "Below_Average", `3` = "Fair", `2` = "Poor", `1` = "Very_Poor" ) ) %>% mutate( Overall_Cond = dplyr::recode( Overall_Cond, `10` = "Very_Excellent", `9` = "Excellent", `8` = "Very_Good", `7` = "Good", `6` = "Above_Average", `5` = "Average", `4` = "Below_Average", `3` = "Fair", `2` = "Poor", `1` = "Very_Poor" ) ) %>% mutate( Exter_Qual = dplyr::recode( Exter_Qual, "Ex" = "Excellent", "Gd" = "Good", "TA" = "Typical", "Fa" = "Fair", "Po" = "Poor" ) ) %>% mutate( Exter_Cond = dplyr::recode( Exter_Cond, "Ex" = "Excellent", "Gd" = "Good", "TA" = "Typical", "Fa" = "Fair", "Po" = "Poor" ) ) %>% mutate( Bsmt_Qual = dplyr::recode( Bsmt_Qual, "Ex" = "Excellent", "Gd" = "Good", "TA" = "Typical", "Fa" = "Fair", "Po" = "Poor", .missing = "No_Basement" ) ) %>% mutate( Bsmt_Cond = dplyr::recode( Bsmt_Cond, "Ex" = "Excellent", "Gd" = "Good", "TA" = "Typical", "Fa" = "Fair", "Po" = "Poor", .missing = "No_Basement" ) ) %>% mutate( Heating_QC = dplyr::recode( Heating_QC, "Ex" = "Excellent", "Gd" = "Good", "TA" = "Typical", "Fa" = "Fair", "Po" = "Poor" ) ) %>% mutate( Kitchen_Qual = dplyr::recode( Kitchen_Qual, "Ex" = "Excellent", "Gd" = "Good", "TA" = "Typical", "Fa" = "Fair", "Po" = "Poor" ) ) %>% mutate( Fireplace_Qu = dplyr::recode( Fireplace_Qu, "Ex" = "Excellent", "Gd" = "Good", "TA" = "Typical", "Fa" = "Fair", "Po" = "Poor", .missing = "No_Fireplace" ) ) %>% mutate( Garage_Qual = dplyr::recode( Garage_Qual, "Ex" = "Excellent", "Gd" = "Good", "TA" = "Typical", "Fa" = "Fair", "Po" = "Poor", .missing = "No_Garage" ) ) %>% mutate( Garage_Cond = dplyr::recode( Garage_Cond, "Ex" = "Excellent", "Gd" = "Good", "TA" = "Typical", "Fa" = "Fair", "Po" = "Poor", .missing = "No_Garage" ) ) %>% mutate( Pool_QC = dplyr::recode( Pool_QC, "Ex" = "Excellent", "Gd" = "Good", "TA" = "Typical", "Fa" = "Fair", "Po" = "Poor", .missing = "No_Pool" ) ) %>% mutate( Neighborhood = dplyr::recode( Neighborhood, "Blmngtn" = "Bloomington_Heights", "Bluestem" = "Bluestem", "BrDale" = "Briardale", "BrkSide" = "Brookside", "ClearCr" = "Clear_Creek", "CollgCr" = "College_Creek", "Crawfor" = "Crawford", "Edwards" = "Edwards", "Gilbert" = "Gilbert", "Greens" = "Greens", "GrnHill" = "Green_Hills", "IDOTRR" = "Iowa_DOT_and_Rail_Road", "Landmrk" = "Landmark", "MeadowV" = "Meadow_Village", "Mitchel" = "Mitchell", "NAmes" = "North_Ames", "NoRidge" = "Northridge", "NPkVill" = "Northpark_Villa", "NridgHt" = "Northridge_Heights", "NWAmes" = "Northwest_Ames", "OldTown" = "Old_Town", "SWISU" = "South_and_West_of_Iowa_State_University", "Sawyer" = "Sawyer", "SawyerW" = "Sawyer_West", "Somerst" = "Somerset", "StoneBr" = "Stone_Brook", "Timber" = "Timberland", "Veenker" = "Veenker", "Hayden Lake" = "Hayden_Lake" ) ) %>% mutate( Alley = dplyr::recode( Alley, "Grvl" = "Gravel", "Pave" = "Paved", .missing = "No_Alley_Access" ) ) %>% mutate( Paved_Drive = dplyr::recode( Paved_Drive, "Y" = "Paved", "P" = "Partial_Pavement", "N" = "Dirt_Gravel" ) ) %>% mutate( Fence = dplyr::recode( Fence, "GdPrv" = "Good_Privacy", "MnPrv" = "Minimum_Privacy", "GdWo" = "Good_Wood", "MnWw" = "Minimum_Wood_Wire", .missing = "No_Fence" ) ) %>% # Convert everything else to factors dplyr::mutate( Alley = factor(Alley), Bsmt_Qual = factor(Bsmt_Qual), Bsmt_Cond = factor(Bsmt_Cond), Central_Air = factor(Central_Air), Condition_1 = factor(Condition_1), Condition_2 = factor(Condition_2), Electrical = factor(Electrical), Exter_Cond = factor(Exter_Cond), Exter_Qual = factor(Exter_Qual), Exterior_1st = factor(Exterior_1st), Exterior_2nd = factor(Exterior_2nd), Fence = factor(Fence), Fireplace_Qu = factor(Fireplace_Qu), Foundation = factor(Foundation), Functional = factor(Functional), Garage_Cond = factor(Garage_Cond), Garage_Finish = factor(Garage_Finish), Garage_Qual = factor(Garage_Qual), Garage_Type = factor(Garage_Type), Heating = factor(Heating), Heating_QC = factor(Heating_QC), Kitchen_Qual = factor(Kitchen_Qual), Land_Contour = factor(Land_Contour), Land_Slope = factor(Land_Slope), Lot_Config = factor(Lot_Config), Mas_Vnr_Type = factor(Mas_Vnr_Type), Misc_Feature = factor(Misc_Feature), Paved_Drive = factor(Paved_Drive), Pool_QC = factor(Pool_QC), Roof_Matl = factor(Roof_Matl), Roof_Style = factor(Roof_Style), Sale_Condition = factor(Sale_Condition), Sale_Type = factor(Sale_Type), Street = factor(Street), Utilities = factor(Utilities), Overall_Qual = factor(Overall_Qual, levels = rev(ten_point)), Overall_Cond = factor(Overall_Cond, levels = rev(ten_point)) ) %>% # see issue #2, updated PIDs for some properties mutate( PID = ifelse(PID == "0904351040", "0904351045,", PID), PID = ifelse(PID == "0535300120", "0535300125,", PID), PID = ifelse(PID == "0902401130", "0902401135,", PID), PID = ifelse(PID == "0906226090", "0906226090,", PID), PID = ifelse(PID == "0908154040", "0908154045,", PID), PID = ifelse(PID == "0909129100", "0909129105,", PID), PID = ifelse(PID == "0914465040", "0914465043,", PID), PID = ifelse(PID == "0902103150", "0902103145,", PID), PID = ifelse(PID == "0902401120", "0902401125,", PID), PID = ifelse(PID == "0916253320", "0916256880,", PID), PID = ifelse(PID == "0916477060", "0916477065,", PID), PID = ifelse(PID == "0916325040", "0916325045,", PID) ) %>% dplyr::inner_join(AmesHousing::ames_geo, by = "PID") %>% # Garage_Yr_Blt is removed due to a fair amount of missing data dplyr::select(-Order,-PID, -Garage_Yr_Blt) out <- out %>% dplyr::mutate( Neighborhood = factor(Neighborhood, levels = AmesHousing::hood_levels) ) out } ten_point <- c( "Very_Excellent", "Excellent", "Very_Good", "Good", "Above_Average", "Average", "Below_Average", "Fair", "Poor", "Very_Poor" ) five_point <- c( "Excellent", "Good", "Typical", "Fair", "Poor" ) #' @rdname make_ames #' @export make_ordinal_ames <- function() { get_no <- function(x) grep("^No", levels(x), value = TRUE) out <- make_ames() out$Lot_Shape <- ordered( as.character(out$Lot_Shape), levels = c("Irregular", "Moderately_Irregular", "Slightly_Irregular", "Regular") ) out$Land_Contour <- ordered( as.character(out$Land_Contour), levels = c("Low", "HLS", "Bnk", "Lvl") ) out$Utilities <- ordered( as.character(out$Utilities), levels = c("ELO", "NoSeWa", "NoSewr", "AllPub") ) out$Land_Slope <- ordered( as.character(out$Land_Slope), levels = c("Sev", "Mod", "Gtl") ) out$Overall_Qual <- ordered( as.character(out$Overall_Qual), levels = rev(ten_point) ) out$Overall_Cond <- ordered( as.character(out$Overall_Cond), levels = rev(ten_point) ) out$Exter_Qual <- ordered( as.character(out$Exter_Qual), levels = rev(five_point) ) out$Exter_Cond <- ordered( as.character(out$Exter_Cond), levels = rev(five_point) ) out$Bsmt_Qual <- ordered( as.character(out$Bsmt_Qual), levels = c(get_no(out$Bsmt_Qual), rev(five_point)) ) out$Bsmt_Cond <- ordered( as.character(out$Bsmt_Cond), levels = c(get_no(out$Bsmt_Cond), rev(five_point)) ) out$Bsmt_Exposure <- ordered( as.character(out$Bsmt_Exposure), levels = c( "No_Basement", "No", "Mn", "Av", "Gd" ) ) out$BsmtFin_Type_1 <- ordered( as.character(out$BsmtFin_Type_1), levels = c( "No_Basement", "Unf", "LwQ", "Rec", "BLQ", "ALQ", "GLQ" ) ) out$BsmtFin_Type_2 <- ordered( as.character(out$BsmtFin_Type_2), levels = c( "No_Basement", "Unf", "LwQ", "Rec", "BLQ", "ALQ", "GLQ" ) ) out$Heating_QC <- ordered( as.character(out$Heating_QC), levels = rev(five_point) ) out$Electrical <- ordered( as.character(out$Electrical), levels = c("Mix", "FuseP", "FuseF", "FuseA", "SBrkr") ) out$Kitchen_Qual <- ordered( as.character(out$Kitchen_Qual), levels = rev(five_point) ) out$Functional <- ordered( as.character(out$Functional), levels = c( "Sal", "Sev", "Maj2", "Maj1", "Mod", "Min2", "Min1", "Typ" ) ) out$Fireplace_Qu <- ordered( as.character(out$Fireplace_Qu), levels = c(get_no(out$Fireplace_Qu), rev(five_point)) ) out$Garage_Finish <- ordered( as.character(out$Garage_Finish), levels = c(get_no(out$Garage_Finish), "Unf", "RFn", "Fin") ) out$Garage_Qual <- ordered( as.character(out$Garage_Qual), levels = c(get_no(out$Garage_Qual), rev(five_point)) ) out$Garage_Cond <- ordered( as.character(out$Garage_Cond), levels = c(get_no(out$Garage_Cond), rev(five_point)) ) out$Paved_Drive <- ordered( as.character(out$Paved_Drive), levels = c("Dirt_Gravel", "Partial_Pavement", "Paved") ) out$Pool_QC <- ordered( as.character(out$Pool_QC), levels = c(get_no(out$Pool_QC), rev(five_point)) ) out$Fence <- ordered( as.character(out$Fence), levels = c("No_Fence", "Minimum_Wood_Wire", "Good_Wood", "Minimum_Privacy", "Good_Privacy") ) out } ames_vars <- c('.', 'SalePrice', '3Ssn_Porch', 'Year_Remod/Add', '1st_Flr_SF', '2nd_Flr_SF', 'MS_SubClass', 'MS_Zoning', 'Alley', 'Lot_Shape', 'Bldg_Type', 'House_Style', 'Bsmt_Qual', 'Bsmt_Cond', 'Bsmt_Exposure', 'BsmtFin_Type_1', 'BsmtFin_SF_1', 'BsmtFin_Type_2', 'BsmtFin_SF_2', 'Bsmt_Unf_SF', 'Total_Bsmt_SF', 'Bsmt_Full_Bath', 'Bsmt_Half_Bath', 'Fireplace_Qu', 'Garage_Type', 'Garage_Finish', 'Garage_Qual', 'Garage_Cond', 'Garage_Cars', 'Garage_Area', 'Pool_QC', 'Fence', 'Misc_Feature', 'Mas_Vnr_Type', 'Mas_Vnr_Area', 'Lot_Frontage', 'Central_Air', 'Condition_1', 'Condition_2', 'Electrical', 'Exter_Cond', 'Exter_Qual', 'Exterior_1st', 'Exterior_2nd', 'Foundation', 'Functional', 'Heating', 'Heating_QC', 'Kitchen_Qual', 'Land_Contour', 'Land_Slope', 'Lot_Config', 'Neighborhood', 'Yr_Sold', 'Overall_Cond', 'Overall_Qual', 'Paved_Drive', 'Roof_Matl', 'Roof_Style', 'Sale_Condition', 'Sale_Type', 'Street', 'Utilities', 'Order', 'PID', 'Garage_Yr_Blt') #' @importFrom utils globalVariables utils::globalVariables(ames_vars)
/scratch/gouwar.j/cran-all/cranData/AmesHousing/R/make_ames.R
#' @importFrom magrittr %>% #' @export magrittr::`%>%`
/scratch/gouwar.j/cran-all/cranData/AmesHousing/R/misc.R
#' The Amiga File Format Handler package #' #' The Amiga File Format Handler package (AmigaFFH) is designed to interpret file formats that were native #' to Commodore Amiga machines. #' #' In combination with the adfExplorer package this package can be used to interpret older file formats that #' were native to the Commodore Amiga. The focus of this package will be on the older system (Amiga OS <= 3.0). #' This will allow you to analyse and interpret these files in the scripted environment of R. #' #' Note that all functions and methods in this package are implemented as scripted source code and may not run #' very fast. #' @section Supported File Formats: #' This package supports a number of file formats. The ProTracker module file format requires sophisticated interpretation #' and a dedicated package ([ProTrackR][ProTrackR::ProTrackR-package]) is developed for that purpose. #' #' The following formats are supported by this package (to some extend): #' #' * [Amiga Basic][AmigaFFH::AmigaBasic] binary encode scripts and #' [Amiga Basic shapes][AmigaFFH::AmigaBasicShape] which #' were used by such scripts to display specific graphics. #' * Bitmap Font (.font). Originally fonts were stored in separate files on #' the Amiga. An overarching '.font' file contained generic information, #' amongst others the specific pixel heights that were available for a font. #' The actual font bitmap images were stored in separate files. There #' was a file available for each individual font height. For more details #' see [AmigaBitmapFont()] and [AmigaBitmapFontSet()]. #' * Interchange File Format (IFF). This file format is actually a container for a wide variety of #' data flavours. Of which the following are supported: #' * 8SVX (8-bit sampled voices (i.e., audio)). There are no major restrictions in this package's #' implementation. #' * ANIM (animations). Not all display modes are supported as per ILBM. Furthermore, the vertical byterun #' encoding for the animation frames is the only encoding currently supported. #' * ILBM (InterLeaved BitMap images). Specific display modes (such as 'extra halfbrite') can in #' some cases be decoded, but encoding for these modes may not (yet) be supported. #' * For more details see [IFFChunk()], [interpretIFFChunk()], [read.iff()] and #' [write.iff()]. #' * Hardware sprites. This format follows the hardware structure for displaying sprites on the screen. #' It is usually not used #' as a file format as such, but it can be found embedded in some files (for instance the mouse pointer #' is embedded as a #' hardware sprite in the 'system-configuration' file). For more details see [hardwareSprite()]. #' * System-configuration. A file that was stored in the 'devs' directory of a system disk. #' As the file name suggests, it holds many of the systems configurations. See [SysConfig] #' for more details. #' * Workbench icons (.info). Icons (i.e., graphical representation of files and directories #' on the Amiga) were stored as separate files with the extension '.info'. See #' [AmigaIcon()] for more details. #' #' In future versions of this package more file types may be added to this list. #' @section In Addition...: #' Several helper functions are also exported by this package. This will give you access #' to older compression techniques, such as the run length encoding ([packBitmap()]) #' and delta Fibonacci compression ([deltaFibonacciCompress()]). But also other #' techniques that will help in converting modern files into classic file formats and vice versa. #' Such as for instance the function to [dither()] full colour images to a limited #' colour palette. #' @references #' Documentation on several Amiga File types: #' <http://amigadev.elowar.com/read/ADCD_2.1/Devices_Manual_guide/> #' @keywords internal "_PACKAGE" NULL
/scratch/gouwar.j/cran-all/cranData/AmigaFFH/R/AmigaFFH-package.r
.amigaIntToRaw <- function(x, bits = 8, signed = F) { x <- round(x) if (!signed && any(x < 0)) stop("negative values not allowed for unsigned values.") val.range <- c(0, 2^bits - 1) if (signed) val.range <- c(-(2^bits)/2,(2^bits)/2 - 1) if (any(x < val.range[1]) || any(x > val.range[2])) { warning("One or more values are out of the specified bit-range. They will be clipped...") x[x < val.range[1]] <- val.range[1] x[x > val.range[2]] <- val.range[2] } if (signed) x[x < 0] <- (2^bits) + x[x < 0] ## used later on to reorder bits for the little-endian bytes idx <- sort(rep(((1:(bits/8)) - 1)*8, 8), T) + rep(1:8, bits/8) result <- unlist(lapply(x, function(y) { bitlist <- NULL while (y > 0) { bitlist <- c(bitlist, y %% 2) y <- floor(y/2) } bitlist <- c(bitlist, numeric(bits - length(bitlist))) res <- packBits(as.logical(bitlist)[idx], "raw") return(res) })) return(result) } .bitmapToRaw <- function(x, invert.bytes = T, invert.longs = T) { # 'x' should be anything that is accepted by packBits if (!all("logical" %in% c(typeof(invert.bytes), typeof(invert.longs)))) stop ("Both 'invert.bytes' and 'invert.longs' should be a logical value.") if (length(invert.bytes) != 1 || length(invert.longs) != 1) stop("Both 'invert.bytes' and 'invert.longs' should have a length of 1.") true.len <- length(x) ## pad with zeros x <- c(x, raw(32 - (true.len %% 32))) len <- length(x)/8 if (invert.bytes) { ord <- 1 + sort(rep((0:(len - 1))*8, 8)) + (7:0) } else { ord <- 1:(8*len) } if (invert.longs) { l2 <- ceiling(8*len/32) ord2 <- 1 + sort(rep((0:(l2 - 1))*32, 32)) + (31:0) ord2 <- ord2[1:(8*len)] x <- x[ord2] } ## order results and trim length to correspond with input x <- packBits(x[ord])[1:ceiling(true.len/8)] return(x) } .rawToAmigaInt <- function(x, bits = 8, signed = F) { # Convert raw values into Amiga integers (BYTE (8 bit signed), UBYTE (8 bit unsigned), # WORD (16 bit signed), UWORD (16 bit unsigned), LONG (32 bit signed), ULONG (32 bit unsigned)) if ((bits %% 8) != 0 || bits < 8) stop("Bits should be positive, it should also be a multitude of 8 (or 8 itself).") # pad x with zeros when it does not consist of a multitude of specified bits x <- c(x, raw(length(x) %% (bits/8))) i.start <- 1:floor(length(x)/(bits/8)) i.stop <- i.start*(bits/8) i.start <- (i.start - 1)*(bits/8) + 1 result <- mapply(function(start, stop) { y <- x[start:stop] result <- as.numeric(unlist(lapply(y, function(z) rev(rawToBits(z))))) result <- sum(2^(which(rev(result) == as.raw(0x01)) - 1)) return(result) }, start = i.start, stop = i.stop) if (signed) { result[result >= (2^bits)/2] <- result[result >= (2^bits)/2] - (2^bits) return(result) } else { return(result) } } .rawToBitmap <- function(x, invert.bytes = F, invert.longs = T) { if (typeof(x) != "raw") stop("Argument 'x' should be a vector of raw data.") if (!all("logical" %in% c(typeof(invert.bytes), typeof(invert.longs)))) stop ("Both 'invert.bytes' and 'invert.longs' should be a logical value.") if (length(invert.bytes) != 1 || length(invert.longs) != 1) stop("Both 'invert.bytes' and 'invert.longs' should have a length of 1.") ## pad data with zeros and trim at the end true.len <- length(x) x <- c(x, raw(4 - (true.len %% 4))) len <- length(x) if (invert.longs) { l2 <- ceiling(len/4) ord2 <- 1 + sort(rep((0:(l2 - 1))*4, 4)) + (3:0) ord2 <- ord2[1:len] x <- x[ord2] } if (invert.bytes) { ord <- 1 + sort(rep((0:(len - 1))*8, 8)) + (7:0) } else { ord <- 1:(8*len) } ## trim the result to correspond with the input length (data might get lost!) rawToBits(x)[ord][1:(true.len*8)] }
/scratch/gouwar.j/cran-all/cranData/AmigaFFH/R/adfExplorer.clone.r
#' The S3 AmigaBasic class #' #' A class that represents the content of Amiga Basic files. #' #' Amiga Basic is a [BASIC](https://en.wikipedia.org/wiki/BASIC)-style programming language that was shipped #' with early Commodore Amiga machines. It requires an interpreter to run an Amiga Basic script. The AmigaFFH #' package does not interpret Amiga Basic scripts. It does allow for encoding and decoding scripts in the binary #' format in which it was originally stored on the Amiga. Amiga Basic scripts were stored as encoded binaries instead #' of ASCII text files in order to save (at the time precious) memory and disk space. #' #' Amiga Basic binary files start with a file header (as an identifier) and is followed by each line of the script #' as binary data. The `AmigaBasic`-class object stores each line of the script as a `list` item as a `vector` #' of `raw` data. Use [as.character()] and [as.AmigaBasic()] to switch between #' `character` data and `AmigaBasic`-class objects. #' #' @note Although there is ample reference material on the Amiga BASIC language, there is no documentation #' available on the script file storage format. The implementation in the AmigaFFH package is all the result of #' painstaking reverse engineering on my part. Consequently the Amiga Basic file encoders and decoders implemented #' here may not be infallible. #' @docType class #' @name AmigaBasic #' @rdname AmigaBasic #' @family AmigaBasic.operations #' @author Pepijn de Vries #' @examples #' \dontrun{ #' ## This creates an AmigaBasic-class object: #' bas <- as.AmigaBasic("PRINT \"hello world!\"") #' #' ## This will decode the object as plain text: #' as.character(bas) #' } #' @references <https://en.wikipedia.org/wiki/AmigaBASIC> NULL #' The S3 AmigaBasicShape class #' #' A class that represents the file format used by Amiga Basic to store bitmap graphics: blitter objects and sprites. #' #' Amiga Basic used a specific format to store bitmap images that could be displayed using Basic code. Both #' sprites and blitter objects can be stored and used. This class is used to represent such files. #' #' @docType class #' @name AmigaBasicShape #' @rdname AmigaBasicShape #' @family AmigaBasicShape.operations #' @author Pepijn de Vries #' @examples #' \dontrun{ #' ball <- read.AmigaBasicShape(system.file("ball.shp", package = "AmigaFFH")) #' r_logo <- read.AmigaBasicShape(system.file("r_logo.shp", package = "AmigaFFH")) #' #' plot(ball) #' plot(r_logo) #' } NULL .amigabasicshape.flags <- c("fVSprite", "collisionPlaneIncluded", "imageShadowIncluded", "saveBack", "overlay", "saveBob", sprintf("reserved%02i", 1:10)) .amigabasic_commands <- read.table(text = "code1,code2,command 80,00,ABS 81,00,ASC 82,00,ATN 83,00,CALL 84,00,CDBL 85,00,CHR$ 86,00,CINT 87,00,CLOSE 88,00,COMMON 89,00,COS 8a,00,CVD 8b,00,CVI 8c,00,CVS 8d,00,DATA 8e,00,ELSE 8f,00,EOF 90,00,EXP 91,00,FIELD 92,00,FIX 93,00,FN 94,00,FOR 95,00,GET 96,00,GOSUB 97,00,GOTO 98,00,IF 99,00,INKEY$ 9a,00,INPUT 9b,00,INT 9c,00,LEFT$ 9d,00,LEN 9e,00,LET 9f,00,LINE a1,00,LOC a2,00,LOF a3,00,LOG a4,00,LSET a5,00,MID$ a6,00,MKD$ a7,00,MKI$ a8,00,MKS$ a9,00,NEXT aa,00,ON ab,00,OPEN ac,00,PRINT ad,00,PUT ae,00,READ af,00,REM af,e8,' b0,00,RETURN b1,00,RIGHT$ b2,00,RND b3,00,RSET b4,00,SGN b5,00,SIN b6,00,SPACE$ b7,00,SQR b8,00,STR$ b9,00,STRING$ ba,00,TAN bc,00,VAL bd,00,WEND be,ec,WHILE bf,00,WRITE c0,00,ELSEIF c1,00,CLNG c2,00,CVL c3,00,MKL$ c4,00,AREA e3,00,STATIC e4,00,USING e5,00,TO e6,00,THEN e7,00,NOT e9,00,> ea,00,= eb,00,< ec,00,+ ed,00,- ee,00,* ef,00,/ f0,00,^ f1,00,AND f2,00,OR f3,00,XOR f4,00,EQV f5,00,IMP f6,00,MOD f7,00,\\ f8,81,CHAIN f8,82,CLEAR f8,83,CLS f8,84,CONT f8,85,CSNG f8,86,DATE$ f8,87,DEFINT f8,88,DEFSNG f8,89,DEFDBL f8,8a,DEFSTR f8,8b,DEF f8,8c,DELETE f8,8d,DIM f8,8f,END f8,90,ERASE f8,91,ERL f8,92,ERROR f8,93,ERR f8,94,FILES f8,95,FRE f8,96,HEX$ f8,97,INSTR f8,98,KILL f8,9a,LLIST f8,9b,LOAD f8,9c,LPOS f8,9d,LPRINT f8,9e,MERGE f8,9f,NAME f8,a0,NEW f8,a1,OCT$ f8,a2,OPTION f8,a3,PEEK f8,a4,POKE f8,a5,POS f8,a6,RANDOMIZE f8,a8,RESTORE f8,a9,RESUME f8,aa,RUN f8,ab,SAVE f8,ad,STOP f8,ae,SWAP f8,af,SYSTEM f8,b0,TIME$ f8,b1,TRON f8,b2,TROFF f8,b3,VARPTR f8,b4,WIDTH f8,b5,BEEP f8,b6,CIRCLE f8,b8,MOUSE f8,b9,POINT f8,ba,PRESET f8,bb,PSET f8,bc,RESET f8,bd,TIMER f8,be,SUB f8,bf,EXIT f8,c0,SOUND f8,c2,MENU f8,c3,WINDOW f8,c5,LOCATE f8,c6,CSRLIN f8,c7,LBOUND f8,c8,UBOUND f8,c9,SHARED f8,ca,UCASE$ f8,cb,SCROLL f8,cc,LIBRARY f8,d2,PAINT f8,d3,SCREEN f8,d4,DECLARE f8,d5,FUNCTION f8,d6,DEFLNG f8,d7,SADD f8,d8,AREAFILL f8,d9,COLOR f8,da,PATTERN f8,db,PALETTE f8,dc,SLEEP f8,dd,CHDIR f8,de,STRIG f8,df,STICK f9,f4,OFF f9,f5,BREAK f9,f6,WAIT f9,f8,TAB f9,f9,STEP f9,fa,SPC f9,fb,OUTPUT f9,fc,BASE f9,fd,AS f9,fe,APPEND f9,ff,ALL fa,80,WAVE fa,81,POKEW fa,82,POKEL fa,83,PEEKW fa,84,PEEKL fa,85,SAY fa,86,TRANSLATE$ fa,87,OBJECT.SHAPE fa,88,OBJECT.PRIORITY fa,89,OBJECT.X fa,8a,OBJECT.Y fa,8b,OBJECT.VX fa,8c,OBJECT.VY fa,8d,OBJECT.AX fa,8e,OBJECT.AY fa,8f,OBJECT.CLIP fa,90,OBJECT.PLANES fa,91,OBJECT.HIT fa,92,OBJECT.ON fa,93,OBJECT.OFF fa,94,OBJECT.START fa,95,OBJECT.STOP fa,96,OBJECT.CLOSE fa,97,COLLISION fb,ff,PTAB", header = T, sep = ",", quote = "", as.is = T) .amigabasic_commands$code1 <- as.raw(paste0("0x", .amigabasic_commands$code1)) .amigabasic_commands$code2 <- as.raw(paste0("0x", .amigabasic_commands$code2)) .valid_code <- function(x) { apply( .amigabasic_commands[,c("code1", "code2")], 1, function(y) { y <- as.raw(paste0("0x", y)) (length(x) > 0 && y[[1]] == x[[1]]) && (y[[2]] == 0x00 || (length(x) > 1 && x[[2]] == y[[2]])) }) } #' Coerce raw data into an AmigaBasic class object #' #' [AmigaBasic()] objects are comprehensive representations of binary-encode Amiga Basic scripts. #' Use this function to convert raw content from encoded Amiga Basic scripts to an [AmigaBasic()] #' object. #' #' This function will convert raw data as stored in Amiga Basic files into its corresponding S3 #' [AmigaBasic()]-class object. #' #' @rdname rawToAmigaBasic #' @name rawToAmigaBasic #' @param x A `vector` of `raw` data that is to be converted #' into an [AmigaBasic()] class object. #' @param ... Currently ignored. #' @returns An [AmigaBasic()] class object based on `x`. #' @examples #' \dontrun{ #' ## First create an AmigaBAsic object: #' bas <- as.AmigaBasic("PRINT \"Hello world!\"") #' #' ## Make it raw: #' bas.raw <- as.raw(bas) #' #' ## Now convert it back to an AmigaBasic object: #' bas <- rawToAmigaBasic(bas.raw) #' } #' @family AmigaBasic.operations #' @family raw.operations #' @author Pepijn de Vries #' @export rawToAmigaBasic <- function(x, ...) { cursor <- 3 result <- list() attr(result, "basic_header") <- x[1:2] ## Seems to be an identifier for basic scripts codelines <- T while (cursor < length(x)) { prev <- cursor cursor <- cursor + .rawToAmigaInt(x[cursor], 8, F) if (cursor == prev) { ## encountered a terminator. From here on no more code lines codelines <- F cursor <- cursor + 1 if (x[cursor] == raw(1)) { cursor <- cursor + 1 } if ((cursor %% 2) == 1) { ## This padding byte only occurs align files to word (2byte) size. It does not seem to have any other function. if (x[cursor] != raw(1)) warning("Non-zero padding data encountered") cursor <- cursor + 1 } } else { r <- x[(prev + 1):(cursor - ifelse(codelines, 1, 0))] if (codelines) { result[[length(result) + 1]] <- r } else { attr(result, "basic_names") <- c(attr(result, "basic_names"), rawToChar(r)) cursor <- cursor + 1 } } } class(result) <- "AmigaBasic" return(result) } #' @rdname as.raw #' @name as.raw.AmigaBasic #' @export as.raw.AmigaBasic <- function(x, ...) { if (!inherits(x, "AmigaBasic")) stop("x should be of class AmigaBasic.") nms <- attr(x, "basic_names") basic_header <- attr(x, "basic_header") x <- unclass(x) lngths <- 1 + unlist(lapply(x, length)) if (any(lngths > 255)) stop(sprintf("Lines %s are to long to encode", paste(which(lngths > 255), collapse = ", "))) x <- lapply(1:length(x), function(i) c(as.raw(lngths[i]), x[i])) c(basic_header, ## file header unlist(x), ## code lines raw(2 + ((length(unlist(x)) + 1) %% 2)), ## terminator and padding if(length(nms) > 0) { ## append variable/label/etc. names unlist(lapply(1:length(nms), function(i) c(as.raw(nchar(nms[i])), charToRaw(nms[i])))) } else { raw() }) } #' Read Amiga Basic files #' #' Read an [AmigaBasic()] script from its binary format. #' #' Normally Amiga Basic code is stored encoded in a binary format #' ([rawToAmigaBasic()]). #' This function reads the binary data from a file (which can be #' stored on a virtual disk ([`amigaDisk()`][adfExplorer::amigaDisk-class])) #' and converts in into an [AmigaBasic()] class objec. #' @rdname read.AmigaBasic #' @name read.AmigaBasic #' @param file A `character` string of the filename of the Amiga Basic file to be read. #' @param disk A virtual Commodore Amiga disk from which the `file` should be #' read. This should be an [`amigaDisk()`][adfExplorer::amigaDisk-class] object. Using #' this argument requires the adfExplorer package. #' When set to `NULL`, this argument is ignored. #' @param ... Currently ignored #' @returns Returns an [AmigaBasic()] class object read from the `file`. #' @examples #' \dontrun{ #' ## First create an AmigaBasic file #' write.AmigaBasic(as.AmigaBasic("PRINT \"Hello world\""), #' file.path(tempdir(), "helloworld.bas")) #' #' ## Now let's read the same file: #' bas <- read.AmigaBasic(file.path(tempdir(), "helloworld.bas")) #' } #' #' ## There's also a demo file included with the package #' demo.bas <- read.AmigaBasic(system.file("demo.bas", package = "AmigaFFH")) #' demo.bas #' @family AmigaBasic.operations #' @family io.operations #' @author Pepijn de Vries #' @export read.AmigaBasic <- function(file, disk = NULL, ...) { dat <- .read.generic(file, disk) rawToAmigaBasic(dat, ...) } #' Write an AmigaBasic object to a file #' #' Write an [AmigaBasic()] class object to a file in its binary format. #' #' This function encodes the Amiga Basic code in its binary format #' (using [as.raw()]) and writes it to a file. The file #' can also be stored onto a virtual Amiga disk #' ([`amigaDisk()`][adfExplorer::amigaDisk-class]). #' #' @rdname write.AmigaBasic #' @name write.AmigaBasic #' @param x The [AmigaBasic()] class object that needs to be #' stored. #' @param file A `character` string specifying the file location #' to which `x` (an [AmigaBasic()] object) needs to be written. #' @param disk A virtual Commodore Amiga disk to which the `file` should be #' written. This should be an [`amigaDisk()`][adfExplorer::amigaDisk-class] object. Using #' this argument requires the adfExplorer package. #' When set to `NULL`, this argument is ignored. #' @returns Invisibly returns the result of the call of `close` to the #' file connection. Or, when `disk` is specified, a copy of #' `disk` is returned to which the file(s) is/are written. #' @examples #' \dontrun{ #' ## First create an AmigaBasic object: #' bas <- as.AmigaBasic("PRINT \"hello world!\"") #' #' ## write to tempdir: #' write.AmigaBasic(bas, file.path(tempdir(), "helloworld.bas")) #' } #' @family AmigaBasic.operations #' @family io.operations #' @author Pepijn de Vries #' @export write.AmigaBasic <- function(x, file, disk = NULL) { if (!inherits(x, "AmigaBasic")) stop("x should be of class AmigaBasic.") .write.generic(x, file, disk) } #' Coerce an AmigaBasic class object to its character representation #' #' Coerce an [AmigaBasic()]-class object to its character representation #' #' Amiga Basic files are encoded in a binary format and are also stored as such #' in [AmigaBasic()]-class objects. Use this function to convert #' these objects into legible `character` data. #' #' @rdname as.character #' @name as.character #' @param x An [AmigaBasic()] class object that needs to be #' coerced to its `character` representation. #' @param ... Currently ignored. #' @returns A `vector` of `character` strings, where #' each element of the `vector` is a `character` representation #' of a line of Amiga Basic code stored in `x`. #' @examples #' \dontrun{ #' ## First create an Amiga Basic object: #' bas <- as.AmigaBasic("PRINT \"Hello world!\"") #' #' ## now convert the object back into text: #' bas.txt <- as.character(bas) #' } #' @family AmigaBasic.operations #' @author Pepijn de Vries #' @export as.character.AmigaBasic <- function(x, ...) { if (!inherits(x, "AmigaBasic")) stop("x should be of class AmigaBasic.") nms <- attr(x, "basic_names") class(x) <- NULL x <- lapply(x, function(ln) { cmdln <- strrep(" ", .rawToAmigaInt(ln[1], 8, F)) ln <- ln[-1] while (length(ln) > 2) { ln1 <- ln[1] ln <- ln[-1] if (cmdln == "" && length(ln) > 1) { ## Check if line starts with numeric label if (((utils::tail(ln, 1) & as.raw(0x80)) != 0x00) || !(any(.valid_code(c(ln1, ln[1]))) || ln1 %in% as.raw(1:3)) && (length(ln) < 3 || (any(.valid_code(ln[2:3])) || ln[2] %in% as.raw(1:3)))) { ln[length(ln)] <- xor(utils::tail(ln, 1), as.raw(0x80)) cmdln <- paste0(cmdln, as.character(readBin(c(ln1, ln[1]), "integer", 2, 2, F, "big"))) ln1 <- ln[2] ln <- ln[-1:-2] if (length(ln) > 2) cmdln <- paste0(cmdln, " ") } } #TODO 'THEN' and 'ELSEIF' seems to be followed by redundant binaries m1 <- which(.amigabasic_commands$code1 %in% ln1) if ((length(m1) > 1 && length(ln) > 0) || (length(m1) == 1 && .amigabasic_commands$code2[m1] != raw(1))) { if (ln1 == as.raw(0xaf) && ln[1] != as.raw(0xe8)) { ## if command is "REM" m1 <- which(.amigabasic_commands$code1 %in% ln1 & .amigabasic_commands$code2 %in% raw(1)) } else { m1 <- which(.amigabasic_commands$code1 %in% ln1 & .amigabasic_commands$code2 %in% ln[1]) ln <- ln[-1] } } if (length(m1) == 0) { if (ln1 %in% as.raw(0x01:0x03)) { ## 0x01: variable, function or static sub names. Or an option name, such as 'P' in {SAVE "helloworld.bas",P} ## 0x02: label definition (using colon) ## 0x03: label reference if (ln1 == as.raw(0x03)) { if (ln[1] != raw(1)) warning(sprintf("Encountered non-zero padding byte (%02x).", as.numeric(ln[1]))) ln <- ln[-1] } idx <- .rawToAmigaInt(ln[1:2], 16, F) + 1 cmdln <- paste0(cmdln, nms[idx]) ln <- ln[-1:-2] } else if(ln1 == as.raw(0x08)) { ## TODO This code appears to be redundant and follows after THEN or ELSEIF statements ln <- ln[-1:-3] ## byte1 says nothing about the length of the data. byte1 is mostly 2, but can have different values. the amount of data is always fixed. There seems to be a correlation with the line number in which this occurs } else if (ln1 %in% as.raw(0x11:0x1a)) { cmdln <- paste0(cmdln, as.numeric(ln1) - 0x11) } else if (ln1 == as.raw(0x0b)) { ## octal number cmdln <- sprintf("%s&O%o", cmdln, readBin(ln[1:2], "integer", size = 2, endian = "big", signed = F)) ln <- ln[-1:-2] } else if (ln1 == as.raw(0x0c)) { ## hexadecimal short signed integer cmdln <- sprintf("%s&H%X", cmdln, readBin(ln[1:2], "integer", size = 2, endian = "big", signed = F)) ln <- ln[-1:-2] } else if (ln1 == as.raw(0x0e)) { ## longish unsigned integer cmdln <- paste0(cmdln, readBin(c(raw(1), ln[1:3]), "integer", size = 4, endian = "big")) ln <- ln[-1:-3] } else if (ln1 == as.raw(0x0f)) { cmdln <- paste0(cmdln, as.numeric(ln[1])) ln <- ln[-1] } else if (ln1 == as.raw(0x1c)) { ## short signed integer cmdln <- paste0(cmdln, readBin(ln[1:2], "integer", size = 2, endian = "big", signed = T)) ln <- ln[-1:-2] } else if (ln1 == as.raw(0x1d)) { ## single precision float num <- readBin(ln[1:4], "numeric", size = 4, endian = "big") numopt1 <- gsub("0[.]", ".", toupper(format(num, digits = 7, scientific = F))) if (nchar(numopt1) > 8) { num <- toupper(format(num, digits = 7, scientific = T)) } else num <- numopt1 if (!grepl("[.]|E", num)) num <- paste0(num, "!") cmdln <- paste0(cmdln, num) ln <- ln[-1:-4] } else if (ln1 == as.raw(0x1e)) { ## long signed integer cmdln <- paste0(cmdln, readBin(ln[1:4], "integer", size = 4, endian = "big"), "&") ln <- ln[-1:-4] } else if (ln1 == as.raw(0x1f)) { ## double precision float num <- readBin(ln[1:8], "numeric", size = 8, endian = "big") numopt1 <- gsub("0[.]", ".", toupper(format(num, digits = 16, scientific = F))) if (nchar(numopt1) > 17) { num <- toupper(format(num, digits = 16, scientific = T)) } else num <- numopt1 num <- gsub("E", "D", num) if (!grepl("D", num)) num <- paste0(num, "#") cmdln <- paste0(cmdln, num) ln <- ln[-1:-8] } else if (ln1 == as.raw(0x22)) { quo <- ln1 ln1 <- raw(1) ## If command is double quote, keep reading text ## until the end of the line or until another ## double quote is encountered while (length(ln) > 0 && ln1 != as.raw(0x22)) { ln1 <- ln[1] ln <- ln[-1] quo <- c(quo, ln1) } quo <- quo[quo != raw(1)] ## remove any possible null characters in case a closing quote is missing cmdln <- paste0(cmdln, rawToChar(quo)) } else { try({ ## These are probably white space, brackets, &, %, #, etc. cmdln <- paste0(cmdln, rawToChar(ln1)) }) } } else { cmd <- .amigabasic_commands$command[m1] if (cmd %in% c("ELSE", "REM", "'")) { ## remove preceding ":" if it is there if (nchar(cmdln) > 0) { if (substr(cmdln, nchar(cmdln), nchar(cmdln)) == ":") cmdln <- substr(cmdln, 0, nchar(cmdln) - 1) } if (cmd %in% c("REM", "'")) { ## all remaining data on the line should be treated as text when the command is REM or ' cmd <- paste0(cmd, rawToChar(ln)) ln <- raw(0) } } cmdln <- paste0(cmdln, cmd) } } return(cmdln) }) x <- unlist(x) return(x) } #' List Amiga Basic reserved words. #' #' Obtain a list of reserved Amiga Basic words. These words are not #' allowed as names of variables or labels in Amiga Basic. #' #' This function will return a full list of reserved Amiga Basic #' words. This list does not serve as a manual for basic (for #' that purpose consult external resources). This list is meant to #' consult when choosing label names in Amiga Basic code. These #' reserved words are not allowed as names. #' #' @rdname AmigaBasic.reserved #' @name AmigaBasic.reserved #' @returns Returns a `vecor` of `character` strings of #' reserved Amiga Basic words. #' @examples #' AmigaBasic.reserved() #' @family AmigaBasic.operations #' @author Pepijn de Vries #' @export AmigaBasic.reserved <- function() { sort(.amigabasic_commands$command) } #' Coerce raw or character data to an AmigaBasic class object #' #' Coerce raw or character data to an [AmigaBasic()] S3 class object #' #' Convert text to an [AmigaBasic()] S3 class object. The text should #' consist of valid Amiga BASIC syntaxis. This function does not perform a #' full check of the syntaxis, but will break on some fundamental syntaxis malformations #' #' @rdname as.AmigaBasic #' @name as.AmigaBasic #' @param x `x` should be a `vector` of `raw` data or #' `character` strings. When `x` is `raw` data, it #' is interpreted as if it where from an Amiga Basic binary encoded file. #' #' When `x` is a `vector` of `character` strings, #' each element of the vector should represent one line of Basic code. #' Each line should not contain line break or other special characters, #' as this will result in errors. The text should represent valid #' Amiga Basic syntax. The syntax is only checked to a limited extent as #' this package does not implement an interpreter for the code. #' @param ... Currently ignored. #' @returns Returns an [AmigaBasic()] class object based on `x`. #' @examples #' \dontrun{ #' ## An AmigaBasic object can be created from text. #' ## Note that each line of code is a seperate element #' ## in the vector: #' bas <- as.AmigaBasic(c( #' "CLS ' Clear the screen", #' "PRINT \"Hello world!\" ' Print a message on the screen" #' )) #' #' ## Let's make it raw data: #' bas.raw <- as.raw(bas) #' #' ## We can also use the raw data to create an Amiga Basic object: #' ## Note that this effectively the same as calling 'rawToAmigaBasic' #' bas <- as.AmigaBasic(bas.raw) #' } #' @references <https://en.wikipedia.org/wiki/AmigaBASIC> #' @family AmigaBasic.operations #' @family raw.operations #' @author Pepijn de Vries #' @export as.AmigaBasic <- function(x, ...) { if (inherits(x, "character")) { ## remember the number of leading spaces, then remove them leading.spaces <- unlist(lapply(gregexpr("^ +", x), function(y) attr(y, "match.length"))) leading.spaces[leading.spaces < 0] <- 0 leading.spaces <- as.raw(leading.spaces) x <- gsub("^ +", "", x) ## split the lines at special characters: x <- strsplit(x, "(?=[ !#$%\\^&*()\\-+=/?,<>:;\"'])", perl = T) nms <- NULL x <- lapply(x, function(y) { trailing_marker <- raw(1) result <- raw(0) while (length(y) > 0) { ## This cannot be moved outside the loop, as the extra split modifies the text. ## And that should not happen when text is between double quotes or follows a REM statement if (grepl("^[0-9.]", y[1], perl = T)) { exponent <- gregexpr("[d-e][-]?\\d", tolower(paste(y, collapse = "")), perl = T)[[1]] ## Find first non-numeric/period that is not !#%&: extrasplit <- gregexpr("[^\\d.!#%&]", y[1], perl = T)[[1]] ## except for when it is an exponent extrasplit <- extrasplit[extrasplit != exponent[1]] ## Find first occurrence of !#%& extrasplit <- c(extrasplit, 1 + gregexpr("[!#%&]", y[1], perl = T)[[1]]) ## second occurrence of period: extrasplit <- c(extrasplit, gregexpr("(?:.*?\\K[.]){2}", y[1], perl = T)[[1]]) extrasplit[extrasplit < 1] <- 1 + nchar(y[1]) extrasplit <- extrasplit[which(extrasplit == min(extrasplit))[1]] if (extrasplit > 1 && extrasplit <= nchar(y[1])) { extrasplit <- c(substr(y[1], 1, extrasplit - 1), " ", substr(y[1], extrasplit, nchar(y[1]))) } else extrasplit <- y[1] extrasplit[grepl("^[.]", extrasplit)] <- paste0("0", extrasplit[grepl("^[.]", extrasplit)]) y <- c(extrasplit, y[-1]) y <- y[nchar(y) > 0] } if (y[1] == "?") y <- c("PRINT", " ", y[-1]) cmd <- match(toupper(y[1]), .amigabasic_commands$command) if (is.na(cmd) && length(y) > 1 && y[2] == "$") { cmd <- match(toupper(paste0(y[1], y[2])), .amigabasic_commands$command) if (!is.na(cmd)) { y[1] <- paste0(y[1], y[2]) y <- y[-2] } } if (!is.na(cmd)) { cmd <- unlist(.amigabasic_commands[cmd, c("code1", "code2")], use.names = F) cmd <- cmd[cmd != raw(1)] if (toupper(y[1]) %in% c("'", "REM")) { cmd <- c(if(length(result) > 0) charToRaw(":"), cmd, charToRaw(paste(y[-1], collapse = ""))) y <- character() } result <- c(result, cmd) } else if (grepl("[ !#$%&()/,:;]", y[1])) { result <- c(result, charToRaw(y[1])) } else if (y[1] =="\"") { ## if it's between double quotes, it's a string repeat { result <- c(result, charToRaw(y[1])) y <- y[-1] if (length(y) == 0 || y[1] == "\"") break } if (length(y)> 0) result <- c(result, charToRaw(y[1])) } else if (suppressWarnings(!is.na(as.numeric(gsub("^O", "", gsub("^H", "0x", gsub("D", "E", toupper(y[1])))))))) { ## if it is a numeric if (any(y[y != " "][1] == "$")) stop("Fatal syntax error, numeric cannot be followed by '$'.") dot <- grepl("[.]", y[1]) tp <- NA if (grepl("D", toupper(y[[1]]))) tp <- "#" ## if The exponent is noted with a 'D', it is a double precision float if (grepl("E", toupper(y[[1]]))) tp <- "!" ## if The exponent is noted with a 'E', it is a single precision float if (startsWith(toupper(y[1]), "H") && length(result) > 0 && utils::tail(result, 1) == as.raw(0x26)) tp <- "&H" ## a signed hexadecimal short int (two bytes) if (startsWith(toupper(y[1]), "O") || (is.na(tp) && length(result) > 0 && utils::tail(result, 1) == as.raw(0x26))) tp <- "&O" ## an octal number if (length(result) == 0) tp <- "numeric_label" if (length(result) > 2) { comm_check <- .amigabasic_commands$command %in% c("GOTO", "GOSUB", "BREAK", "COLLISION", "ERROR", "MENU", "MOUSE", "TIMER") comm_check <- .amigabasic_commands[comm_check, c("code1", "code2")] check <- any(apply(comm_check, 1, function(cc) { code <- as.raw(paste0("0x", cc)) code <- if (!any(code == 0x00)) c(as.raw(c(0xaa, 0x20)), code) else code[code != 0x00] # 0xaa20 == "on " code <- c(code, as.raw(0x20)) return(length(result) >= length(code) && identical(code, utils::tail(result, length(code)))) })) if (check) tp <- "numeric_ref" } if (grepl("E|D", toupper(y[[1]])) && (length(y) > 1) && y[2] %in% c("+", "-")) { y[1] <- paste0(y[1], y[2]) y <- y[-2] if (length(y) > 1 && grepl("^[0-9]", y[2])) { nonnum <- gregexpr("\\D", y[2], perl = T)[[1]][1] if (nonnum != -1) { y <- c(y[1], substr(y[2], 1, nonnum - 1), " ", substr(y[2], nonnum, nchar(y[2])), y[-1:-2]) } y[1] <- paste0(y[1], y[2]) y <- y[-2] } } num <- suppressWarnings(as.numeric(gsub("D", "E", toupper(as.character(y[1]))))) if (is.na(num) && !(tp %in% c("&H", "&O"))) stop(sprintf("Fatal error in number format: %s%s.", tp, y[1])) if (is.na(tp) && length(y) > 1 && y[2] %in% c("!", "#", "%", "&")) { if (length(y) > 2 && !grepl("^[ ()/,:;-]", y[3])) y <- c(y[1:2], " ", y[-1:-2]) tp <- y[2] y <- y[-1] } ## if the number type is not specified, guess... if (is.na(tp)) { if (round(num) == num && !dot) { ## it's either a short or a long integer tp <- ifelse(num <= 32767 && num >= -32767, "%", "&") } else { ## it's either a single or a double float fm <- format(num, scientific = T) fm <- strsplit(fm, "e")[[1]] ## TODO need to test this more extensively tp <- ifelse(nchar(fm[1]) > 8 || abs(as.numeric(fm[2])) > 38, "#", "!") } } if (tp == "%" && num < 255) { if (num < 10) { result <- c( result, as.raw(num + 0x11) ) } else { result <- c( result, as.raw(c(0x0f, num)) ) } } else if(tp %in% c("numeric_label", "numeric_ref")) { if (as.integer(y[1]) < 0) stop("Fatal syntax error: numeric labels cannot be negative.") if (as.integer(y[1]) > 0xfff9) { y <- c(substr(y[1], 1, 4), substr(y[1], 5, nchar(y[1])), y[-1]) } result <- c(result, if(tp == "numeric_ref") as.raw(c(0x0e, 0x00)) else raw(), writeBin(as.integer(y[1]), raw(), 2, "big")) z <- y[y != " "] valid_current <- any(.valid_code(result)) || result[[1]] %in% as.raw(1:3) valid_next <- length(z) > 1 && (toupper(z[2]) %in% .amigabasic_commands$command || !is.na(suppressWarnings(try(as.numeric(z[2])))) || (!is.null(nms) && toupper(z[2]) %in% toupper(nms)) || grepl("[ !#$%\\^&*()\\-+=/?,<>:;\"']", z[2], perl = T)) ## TODO This check isn't 100% similar to original amigabasic. It will produce a different outcome on as.AmigaBasic("1 PRINT \"ja\":GOTO 9"), but it will still work if (length(result) == 2 && (!(valid_current && (length(y) == 1 || valid_next)) || valid_current)) trailing_marker <- as.raw(0x80) if (length(y) > 2 && y[2] == " ") y <- y[-2] } else { if (tp %in% c("&H", "&O")) result <- utils::head(result, -1) ## remove the '&' previously written to the result result <- c( result, as.raw(c(0x0b, 0x0c, 0x1c, 0x1d, 0x1e, 0x1f))[match(tp, c("&O", "&H", "%", "!", "&", "#"))], writeBin( ifelse(tp %in% c("%", "&"), as.integer(num), ifelse(tp == "&H", { ## two byte hexadecimal num <- as.integer(gsub("^H", "0x", y[1])) as.integer(ifelse(num > 0x8000, num - 0x10000, num)) }, ifelse(tp =="&O", { ## two byte octal y[1] <- gsub("^O", "", toupper(y[1])) num <- strsplit(y[1], "[^0-7]")[[1]] y_insert <- substr(y[1], nchar(num) + 1, nchar(y[1])) if (y_insert != "") y <- c(y[1], " ", y_insert, utils::tail(y, -1)) num <- as.numeric(strsplit(num, "")[[1]]) num <- as.integer(sum(num * (8 ^ (rev(seq_along(num) - 1))))) as.integer(ifelse(num > 0x8000, num - 0x10000, num)) }, num))), raw(), size = ifelse(tp %in% c("&H", "&O", "%"), 2, ifelse(tp == "#", 8, 4)), endian = "big") ) } } else { ## If it's none of the above, it must be a name/label ## If a name starts with FN, it should be a function defined with "DEF FN" if (grepl("^FN", toupper(y[1]))) { result <- c(result, as.raw(0x93)) y[1] <- substr(y[1], 3, nchar(y[1])) } if (any(as.logical(check.names.AmigaBasic(y[1])))) stop(sprintf("Fatal syntax error Basic code at: '%s'", paste(y, collapse = ""))) ## Check if the name was already used before, otherwise ## append it to the vector of names. nm <- match(toupper(y[1]), toupper(nms)) ## the first definition will determine the case of the characters in the name cd <- 1 ## variable or static sub label ## if the name is directly followed by a colon (and nothing else), it is a label definition if (length(y) == 2 && y[2] == ":") { cd <- 2 ## label definition } else { goto.gosub <- c( which(result %in% as.raw(c(0x96, 0x97))), # GOSUB and GOTO grepRaw(as.raw(c(0xf8, 0xa8)), result, fixed = T) # RESTORE ) if (length(goto.gosub) > 0) { goto.gosub <- max(goto.gosub) ## assume the label is meant to go with the GOSUB, GOTO or RESTORE command when the line is not split by ":" in between if (goto.gosub == length(result) || all(result[(goto.gosub + 1):length(result)] != as.raw(0x3a))) cd <- 3 } } if (is.na(nm)) { nms <<- c(nms, y[1]) nm <- length(nms) } result <- c( result, as.raw(cd), if(cd == 3) raw(1) else raw(), writeBin(as.integer(nm - 1), raw(), size = 2, endian = "big") ) } y <- y[-1] } attributes(result)$trailing_marker <- trailing_marker return (result) }) x <- lapply(seq_along(x), function(i) { padding <- raw(2) if (!is.null(attr(x[[i]], "trailing_marker"))) { padding[2] <- attr(x[[i]], "trailing_marker") attr(x[[i]], "trailing_marker") <- NULL } c(leading.spaces[[i]], x[[i]], padding) }) x <- lapply(x, function(y) { if (length(y) > 2 && identical(y[1:3], as.raw(c(0x00, 0xaf, 0xe8)))) y <- c(y[1], as.raw(0x3a), y[-1]) y }) ## TODO protected basic files have header 0xf4, 0xc2. U suspect that this is followed by a 5 byte encryption key, followed by encrypted data attr(x, "basic_header") <- as.raw(c(0xf5, 0x00)) attr(x, "basic_names") <- nms class(x) <- "AmigaBasic" return (x) } else if (inherits(x, "raw")) { return (rawToAmigaBasic(x, ...)) } else { print("Cannot convert 'x' to S3 AmigaBasic class object.") } } #' Extract or replace lines of Amiga Basic code #' #' Extract or replace lines of Amiga Basic code #' #' Extract or replace specific lines in an [AmigaBasic()]-class object. #' #' @rdname ExtractBasic #' @name [.AmigaBasic #' @param x An `AmigaBasic` class object from which specific lines #' need to be extracted or replaced. #' @param i In case of `[[', an integer index, representing the line-number of basic code to be selected. #' In case of `[': a `vector` of `numeric` indices. This index #' is used to select specific lines. Negative values will deselect lines. #' @param value A `vector` of `character` strings or an #' [AmigaBasic()] class object that is used to replace #' the selected indices `i`. `value` should represent the #' same number of lines of code as the selected number of lines. #' @returns The extraction method returns an [AmigaBasic()] object based in the lines selected with `i`. #' The replacement method returns an [AmigaBasic()] object with the selected lines replaced with `value`. #' @examples #' \dontrun{ #' ## First generate a few lines of Basic code: #' bas <- as.AmigaBasic(c( #' "LET a = 1", #' "a = a + 1", #' "PRINT \"a now equals\";a", #' "INPUT \"clear screen (y/n)? \", b$", #' "IF UCASE$(b$) = \"Y\" THEN CLS" #' )) #' #' ## Select only lines 4 and 5: #' bas[4:5] #' #' ## use negative indices to deselect specific lines. #' ## deselect line 2: #' bas[-2] #' #' ## replace line 2 #' bas[2] <- "a = a + 2" #' #' ## You can also use AmigaBasic class object as replacement #' bas[2] <- as.AmigaBasic("a = a + 3") #' #' ## single lines can also be selected with '[[' #' bas[[2]] #' } #' @family AmigaBasic.operations #' @author Pepijn de Vries #' @export `[.AmigaBasic` <- function(x, i) { vctrs::vec_restore(NextMethod(), x) } #' @rdname ExtractBasic #' @name [<-.AmigaBasic #' @export `[<-.AmigaBasic` <- function(x, i, value) { if (!inherits(x, "AmigaBasic")) stop("'x' should be of class AmigaBasic.") x <- as.character(x) x[i] <- as.character(value) return(as.AmigaBasic(x)) } #' @rdname ExtractBasic #' @name `[[.AmigaBasic` #' @export `[[.AmigaBasic` <- function(x, i) { vctrs::vec_restore(list(NextMethod()), x) } #' @rdname ExtractBasic #' @name `[[<-.AmigaBasic` #' @export `[[<-.AmigaBasic` <- function(x, i, value) { if (!inherits(x, "AmigaBasic")) stop("x should be of class AmigaBasic.") x <- as.character(x) x[[i]] <- as.character(value) return(as.AmigaBasic(x)) } #' @export print.AmigaBasic <- function(x, ...) { if (!inherits(x, "AmigaBasic")) stop("x should be of class AmigaBasic.") cat(paste(as.character(x), collapse = "\n")) } #' Extract or replace variable and label names from Amiga Basic scripts #' #' In the binary Amiga Basic files, names for labels and variables #' in the code are stored at the end of the file. In the encoded #' there is only a pointer to the index of the name in that list. Use #' this function to list, select or replace names included in the code #' #' Make sure that variable and label names are valid for the basic script (see [check.names.AmigaBasic]). #' #' @rdname names.AmigaBasic #' @name names.AmigaBasic #' @param x An [AmigaBasic()]-class object for which to obtain or change variable and/or label names #' @param value A (`vector` of) `character` string of desired replacement variable/label names. #' @returns A `vector` of `character` strings with label and variable names in the basic script. #' In case of the replacement method a [AmigaBasic()]-class with replaced names is returned. #' @examples #' ## Let's create some Basic code with labels and variables: #' bas <- as.AmigaBasic(c( #' "REM - This will loop forever...", #' "my.label:", #' " my.variable% = 0", #' " WHILE my.variable% < 10", #' " my.variable% = my.variable% + 1", #' " WEND", #' " GOTO my.label" #' )) #' #' ## list the names in the script above: #' names(bas) #' #' ## change the first name: #' names(bas)[1] <- "better.label" #' @family AmigaBasic.operations #' @author Pepijn de Vries #' @export names.AmigaBasic <- function(x) { if (!inherits(x, "AmigaBasic")) stop("x should be of class AmigaBasic.") result <- attr(x, "basic_names") if (length(result) == 0) character() else result } #' @rdname names.AmigaBasic #' @name names<-.AmigaBasic #' @export `names<-.AmigaBasic` <- function(x, value) { if (!inherits(x, "AmigaBasic")) stop("x should be of class AmigaBasic.") if (!is.character(value) || length(value) != length(attr(x, "basic_names"))) stop("Replacement should be a vector of characters of the same length.") if (any(duplicated(toupper(value)))) stop("All names should be unique.") if (any(nchar(value) > 255 | nchar(value) < 1)) stop("All names should be one or more and less than 255 characters in length.") if (any(toupper(value) %in% .amigabasic_commands$command)) stop("Names cannot be reserved AmigaBasic words.") if (any(grepl("[^a-zA-Z0-9.]", value, perl = TRUE))) stop("Names should consist of alphanumerics or periods.") if (any(grepl("[0-9]", substr(value, 1, 1), perl = T))) stop("Names should not start with numeric characters.") if (any(grepl("[ !#$%\\^&*()\\-+=/?,<>:;\"']", value, perl = T))) stop("Names should not contain special characters.") attr(x, "basic_names") <- value x } #' Check Amiga Basic label/variable names for validity #' #' Check Amiga Basic label/variable names for validity #' #' Names for variables and labels should adhere to the following rules in Amiga Basic: #' #' * Length of the names should be in the range of 1 up to 255 character #' * Names cannot be [AmigaBasic.reserved()] words #' * Names should only contain alphanumeric characters or periods and #' should not contain special characters (i.e., reserved for type definition, #' such as dollar- or percentage sign) #' * Names should not start with a numeric character #' #' This function tests names against each of these criteria. #' #' @rdname check.names.AmigaBasic #' @name check.names.AmigaBasic #' @param x A `vector` of `character` strings that need to be checked #' @param ... Currently ignored. #' @returns A `data.frame` with `logical` values with the same number of rows as the length of `x`. #' Columns in the data.frame corresponds with the criteria listed in the details. #' `FALSE` for invalid names. #' @examples #' \dontrun{ #' ## These are valid names in Amiga Basic: #' check.names.AmigaBasic(c("Foo", "Bar")) #' #' ## Reserved words and repeated names are not allowed: #' #' check.names.AmigaBasic(c("Print", "Foo", "Foo")) #' } #' @family AmigaBasic.operations #' @author Pepijn de Vries #' @export check.names.AmigaBasic <- function(x, ...) { nm <- if (inherits(x, "AmigaBasic")) names(x) else as.character(x) result <- data.frame( duplicated = duplicated(toupper(nm)), length = nchar(nm) > 255 | nchar(nm) < 1, reserved = toupper(nm) %in% .amigabasic_commands$command, characters = grepl("[^a-zA-Z0-9.]", nm, perl = TRUE), start = grepl("^[0-9.]", nm, perl = TRUE) ) row.names(result) <- nm result } #' @rdname c #' @name c #' @export c.AmigaBasic <- function(...) { bas.codes <- list(...) if (!all(unlist(lapply(bas.codes, inherits, what = "AmigaBasic")))) stop ("All arguments should be of type AmigaBasic.") ## It's not fastest, but it is safest to convert all codes to text ## than back to AmigaBasic. bas.codes <- as.AmigaBasic(unlist(lapply(bas.codes, as.character))) } .basic.shape.header <- data.frame( byte = c(4, 4, 4, 4, 4, -2, 2, -2), signed = c(F, F, F, F, F, F, F, F), par.names = c("colorset", "dataset", "depth", "width", "height", "flags", "planePick", "planeOnOff"), stringsAsFactors = F ) #' Coerce raw data into an AmigaBasicShape class object #' #' Coerce raw data into an [AmigaBasicShape()]-class object #' #' [AmigaBasicShape()] objects are comprehensive representations of blitter #' and sprite graphics that can be used in [AmigaBasic()] scripts. Use this function #' to convert `raw` content to an [AmigaBasicShape()] object. #' #' @rdname rawToAmigaBasicShape #' @name rawToAmigaBasicShape #' @param x A `vector` of `raw` data that is to be converted #' into an [AmigaBasicShape()] class object. #' @param palette A `vector` of `character` strings, where each element represents a colour in the palette. #' This palette will be used to display the graphics (note that the raw format does not store the palette, but this #' S3 class does). When this argument is omitted a grey scale palette will be generated. #' @returns returns an [AmigaBasicShape()]-class object. #' @examples #' \dontrun{ #' filename <- system.file("ball.shp", package = "AmigaFFH") #' #' ## read as binary: #' con <- file(filename, "rb") #' ball.raw <- readBin(con, "raw", file.size(filename)) #' close(con) #' #' ## convert raw data into something useful: #' ball <- rawToAmigaBasicShape(ball.raw) #' #' ## A shortcut would be to call read.AmigaBasicShape #' ball2 <- read.AmigaBasicShape(filename) #' } #' @family AmigaBasicShapes.operations #' @family raw.operations #' @author Pepijn de Vries #' @export rawToAmigaBasicShape <- function(x, palette) { ## colorset and dataset seem to be ignored in the basic object editor. ## They seem not required for interpretation of the file if (missing(palette)) palette <- NULL result <- with(.basic.shape.header, .read.amigaData(x, byte, signed, par.names)) if (is.null(palette)) { palette <- grDevices::grey(seq(0, 1, length.out = 2^result$depth)) } else { if (length(palette) < 2^result$depth) { palette <- rep_len(palette, 2^result$depth) warning(sprintf("Palette contains too few colours. Provided palette is repeated to length.out %i.", 2^result$depth)) } else if (length(palette) > 2^result$depth) { palette <- palette[1:(2^result$depth)] warning(sprintf("Palette contains too many colours. Provided palette is truncated to length.out %i.", 2^result$depth)) } } x <- x[-1:-sum(abs(.basic.shape.header$byte))] result$bitmap <- with(result, bitmapToRaster(x, width, height, depth, palette, interleaved = F)) attributes(result$bitmap)$palette <- palette sz_alt <- 2*ceiling(result$width/16)*result$height sz <- sz_alt*result$depth x <- x[-1:-sz] result$flags <- rev(as.logical(.rawToBitmap(c(raw(2), result$flags), T, F)))[1:16] result$planeOnOff <- rev(as.logical(.rawToBitmap(c(raw(2), result$planeOnOff), T, F)))[1:16] names(result$flags) <- .amigabasicshape.flags if (result$flags["fVSprite"] && result$depth != 2) warning("Unexpected bitmap depth for sprite mode.") if (result$flag["imageShadowIncluded"]) { result$shadow <- with(result, bitmapToRaster(x, width, height, 1, interleaved = F, palette = c("black", "white"))) x <- x[-1:-sz_alt] } if (result$flag["collisionPlaneIncluded"]) { result$collision <- with(result, bitmapToRaster(x, width, height, 1, interleaved = F, palette = c("black", "white"))) x <- x[-1:-sz_alt] } if (result$flags["fVSprite"]) { result$sprite_palette <- amigaRawToColour(x[1:6], "12 bit", "2") x <- x[-1:-6] } if (length(x) > 0) warning("Unexpected and unused trailing data!") class(result) <- "AmigaBasicShape" result } #' Read Amiga Basic Shape files #' #' Read Amiga Basic Shape files #' #' AmigaBasic used the term 'shapes' for graphics (sprites and blitter objects) which it could display. #' These graphics were stored in a specific binary format, which can be read with this function. See #' [AmigaBasicShape()] for more details. The file can also be read from a virtual Amiga disk #' ([`amigaDisk()`][adfExplorer::amigaDisk-class]). #' @rdname read.AmigaBasicShape #' @name read.AmigaBasicShape #' @param file A `character` string of the filename of the Amiga Basic Shape file to be read. #' @param disk A virtual Commodore Amiga disk from which the `file` should be #' read. This should be an [`amigaDisk()`][adfExplorer::amigaDisk-class] object. Using #' this argument requires the adfExplorer package. #' When set to `NULL`, this argument is ignored. #' @param ... Arguments passed to [rawToAmigaBasicShape()]. #' @returns Returns an [AmigaBasicShape()] class object read from the `file`. #' @examples #' \dontrun{ #' filename <- system.file("ball.shp", package = "AmigaFFH") #' ball <- read.AmigaBasicShape(filename) #' ## This is a sprite: #' ball$flags[["fVSprite"]] #' #' filename <- system.file("r_logo.shp", package = "AmigaFFH") #' ## The palette is not stored with an Amiga Basic Shape, so let's provide one: #' r_logo <- read.AmigaBasicShape(filename, #' palette = c("#FFFFFF", "#2266BB", "#3366BB", "#4477AA", #' "#778899", "#999999", "#AAAAAA", "#BBBBBB")) #' ## This is a blitter object: #' r_logo$flags[["fVSprite"]] #' #' ## Just for fun, plot it: #' plot(r_logo) #' } #' @family AmigaBasicShape.operations #' @family io.operations #' @author Pepijn de Vries #' @export read.AmigaBasicShape <- function(file, disk = NULL, ...) { dat <- .read.generic(file, disk) rawToAmigaBasicShape(dat, ...) } #' Write an AmigaBasicShape object to a file #' #' Write an [AmigaBasicShape()] class object to a file in its binary format. #' #' This function coerces the Amiga Basic Shape into its binary format #' (using [AmigaFFH::as.raw()]) and writes it to a file. The file #' can also be stored onto a virtual Amiga disk #' ([`amigaDisk()`][adfExplorer::amigaDisk-class]). #' #' @rdname write.AmigaBasicShape #' @name write.AmigaBasicShape #' @param x The [AmigaBasicShape()] class object that needs to be #' stored. #' @param file A `character` string specifying the file location #' to which `x` (an [AmigaBasicShape()] object) needs to be written. #' @param disk A virtual Commodore Amiga disk to which the `file` should be #' written. This should be an [`amigaDisk()`][adfExplorer::amigaDisk-class] object. Using #' this argument requires the adfExplorer package. #' When set to `NULL`, this argument is ignored. #' @returns Invisibly returns the result of the call of `close` to the #' file connection. Or, when `disk` is specified, a copy of #' `disk` is returned to which the file(s) is/are written. #' @examples #' \dontrun{ #' filename <- system.file("ball.shp", package = "AmigaFFH") #' ball <- read.AmigaBasicShape(filename) #' write.AmigaBasicShape(ball, file.path(tempdir(), "ball.shp")) #' } #' @family AmigaBasicShape.operations #' @family io.operations #' @author Pepijn de Vries #' @export write.AmigaBasicShape <- function(x, file, disk = NULL) { if (!inherits(x, "AmigaBasicShape")) stop("x should be of class AmigaBasicShape.") .write.generic(x, file, disk) } #' @rdname as.raw #' @name as.raw.AmigaBasicShape #' @export as.raw.AmigaBasicShape <- function(x, ...) { if (!inherits(x, "AmigaBasicShape")) stop("x should be of class AmigaBasicShape.") sprite <- x$flags[["fVSprite"]] shadow <- x$flags[["imageShadowIncluded"]] collision <- x$flags[["collisionPlaneIncluded"]] x$flags <- .bitmapToRaw(c(x$flags, rep(F, 16)), T, T)[3:4] x$planeOnOff <- .bitmapToRaw(c(x$planeOnOff, rep(F, 16)), T, T)[3:4] result <- with(.basic.shape.header, .write.amigaData(x[par.names], byte, signed, par.names)) pal <- attributes(x$bitmap)$palette result <- c(result, .bitmapToRaw(rasterToBitmap( x$bitmap, depth = x$depth, interleaved = F, indexing = function(x, length.out) index.colours(x, length.out, palette = pal)), T, F) ) if (shadow) { if (is.null(x$shadow)) stop("Expected shadow layer, but found nothing") else { result <- c(result, .bitmapToRaw(rasterToBitmap( x$shadow, depth = 1, interleaved = F, indexing = function(x, length.out) index.colours(x, length.out, palette = c("black", "white"))), T, F) ) } } if (collision) { if (is.null(x$collision)) stop("Expected shadow layer, but found nothing") else { result <- c(result, .bitmapToRaw(rasterToBitmap( x$collision, depth = 1, interleaved = F, indexing = function(x, length.out) index.colours(x, length.out, palette = c("black", "white"))), T, F) ) } } if (sprite) { if (is.null(x$sprite_palette)) stop("Expected sprite palette, but found nothing") else { result <- c(result, colourToAmigaRaw(x$sprite_palette, "12 bit", "2")) } } return(result) } #' @export print.AmigaBasicShape <- function(x, ...) { print(sprintf("A %i x %i %s with %i colours to be used in Amiga Basic.", x$width, x$height, c("blitter object", "sprite")[as.numeric(x$flags[["fVSprite"]]) + 1], 2^x$depth), ...) } #' @rdname plot #' @name plot.AmigaBasicShape #' @export plot.AmigaBasicShape <- function(x, y, ...) { if (!inherits(x, "AmigaBasicShape")) stop("x should be of class AmigaBasicShape.") if (missing(y)) y <- "bitmap" plot(as.raster(x, selected = y), ...) } #' @rdname as.raster #' @name as.raster.AmigaBasicShape #' @export as.raster.AmigaBasicShape <- function(x, selected = c("bitmap", "shadow", "collision"), ...) { if (!inherits(x, "AmigaBasicShape")) stop("x should be of class AmigaBasicShape.") bm <- x[[match.arg(selected, c("bitmap", "shadow", "collision"))]] if (is.null(bm)) stop(sprintf("No %s layer available in this object!", selected)) bm } #' Convert a grDevices raster object into an AmigaBasicShape class object. #' #' Convert a [`raster()`][grDevices::as.raster] object into an [AmigaBasicShape()] class object. #' #' This method can be used to turn any graphics into an [AmigaBasicShape()] class object. In order to do #' so, the colours of the input image (a [`raster()`][grDevices::as.raster] object) will be quantized to a #' limited palette. This palette can be forced as an argument to this function. Otherwise, it will be based on #' the input image. #' #' @rdname rasterToAmigaBasicShape #' @name rasterToAmigaBasicShape #' @param x A [`raster()`][grDevices::as.raster] class object to convert into a [AmigaBasicShape()] class obejct. #' @param type A `character` string indicating what type of graphic needs to be created: "`blitter object`" (default) or "`sprite`". #' @param palette A `vector` of `character` strings, where each element represents a colour. This palette is used to quantize the #' colours that occur in the `raster` `x`. #' @param shadow An optional layer that could be stored with the graphics. This layer could be used for specific #' shadow effects when blitting the graphics to the screen. It needs to be a [`raster()`][grDevices::as.raster] #' object consisting of the colours black (bit unset) and white (bit set). The raster needs to have the same dimensions #' as `x`. This layer will be omitted when this argument is omitted (or set to `NULL`). #' @param collision An optional layer that could be stored with the graphics. This layer could be used for collision #' detection between graphical objects. It needs to be a [`raster()`][grDevices::as.raster] #' object consisting of the colours black (bit unset) and white (bit set). The raster needs to have the same dimensions #' as `x`. This layer will be omitted when this argument is omitted (or set to `NULL`). #' @param ... Arguments passed onto [index.colours()]. Can be used, for instance, to achieve specific dithering effects. #' @returns Returns an [AmigaBasicShape()] class object based on `x`. #' @examples #' \dontrun{ #' ## get a raster image: #' ilbm <- as.raster(read.iff(system.file("ilbm8lores.iff", package = "AmigaFFH"))) #' #' ## convert to an Amiga Basic blitter object: #' bob <- rasterToAmigaBasicShape(ilbm, "blitter object") #' } #' @family AmigaBasicShape.operations #' @family raster.operations #' @author Pepijn de Vries #' @export rasterToAmigaBasicShape <- function(x, type = c("blitter object", "sprite"), palette, shadow, collision, ...) { if (missing(palette)) { if (is.null(attributes(x)$palette)) { palette <- table(x) palette <- palette[order(-palette)] palette <- names(palette) } else { palette <- attributes(x)$palette } } if (missing(shadow)) shadow <- NULL if (missing(collision)) collision <- NULL depth <- ceiling(log2(length(palette))) ## if not all pixel colours are in palette, the bitmap needs to be quantized if (!all((x %in% palette))) { x <- apply( index.colours(x, length.out = 2^depth, palette = palette, ...), 2, function(y) palette[y]) x <- as.raster(x) attributes(x)$palette <- palette } type <- match.arg(type, c("blitter object", "sprite")) if (type == "sprite" && (length(palette) != 4 || attributes(x)$dim[[2]] != 16)) stop("AmigaBasicShape sprites have to be 16 pixels wide and consist of 4 colours!") result <- sapply(.basic.shape.header$par.names, function(x) NULL) result$colorset <- 0 result$dataset <- 0 result$depth <- depth result$width <- attributes(x)$dim[[2]] result$height <- attributes(x)$dim[[1]] result$flags <- rep(F, 16) names(result$flags) <- .amigabasicshape.flags result$flags[c("saveBack", "overlay")] <- T result$flags["fVSprite"] <- type == "sprite" result$flags["imageShadowIncluded"] <- !is.null(shadow) result$flags["collisionPlaneIncluded"] <- !is.null(collision) result$planePick <- 2^depth - 1 result$planeOnOff <- rep(F, 16) result$bitmap <- x attributes(result$bitmap)$palette <- palette specialLayer <- function(z, w) { z <- as.raster(z) if (!identical(dim(z), dim(result$bitmap))) stop("Shadow layer should have the same dimensions as the bitmap") z <- index.colours(z, 2) cols <- grDevices::rgb2hsv(grDevices::col2rgb(attributes(z)$palette)) l <- (2 - cols["s",])*cols["v",]/2 attributes(z)$palette <- c("black", "white")[order(l)] z <- as.raster(apply(z, 2, function(i) attributes(z)$palette[i])) attributes(z)$palette <- c("black", "white") z } if (!is.null(shadow)) result$shadow <- specialLayer(shadow, "shadow") if (!is.null(collision)) result$collision <- specialLayer(collision, "collision") if (type == "sprite") result$sprite_palette <- palette[-1] ## background colour is not stored for sprite, hence -1 class(result) <- "AmigaBasicShape" result } #' The S3 AmigaBasicBMAP class #' #' A class that represents the content of Amiga Basic BMAP files. #' #' The Amiga operating system made use of library files to execute specific (repetitive/routine) tasks. Amiga Basic #' was also able to call such routines from library files. In order to do so, it required a 'bmap' file for each #' library. This file contains a map of the library where it specifies: the name of routine; the `Library Vector Offset' #' (explained below); and used CPU registers (explained below). #' #' The `Library Vector Offset' is an offset to the base address of a library in memory. This offsets indicates where #' a specific executable routine starts. The CPU registers are used to (temporary) store (pointers to) input data #' used by the routine. The BMAP file thus lists which CPU registers are used by specified routines. #' #' @docType class #' @name AmigaBasicBMAP #' @rdname AmigaBasicBMAP #' @family AmigaBasic.operations #' @author Pepijn de Vries #' @references <https://en.wikipedia.org/wiki/AmigaOS#Libraries_and_devices> NULL #' Read and write Amiga Basic BMAP files #' #' Read and write [AmigaBasicBMAP()] binary file format. #' #' An [Amiga Basic BMAP][AmigaFFH::AmigaBasicBMAP] file maps the offset of routines in Amiga libraries and can be #' read as a comprehensive list and written back to a binary file using these functions. #' @rdname AmigaBasicBMAP-io #' @name read.AmigaBasicBMAP #' @param x A [AmigaBasicBMAP()] class object that needs to be #' stored. #' @param file A `character` string of the filename of the Amiga Basic BMAP file to be read or written. #' @param disk A virtual Commodore Amiga disk from which the `file` should be #' read or written to. This should be an [`amigaDisk()`][adfExplorer::amigaDisk-class] object. Using #' this argument requires the adfExplorer package. #' When set to `NULL`, this argument is ignored. #' @returns Returns an [AmigaBasicBMAP()] class object read from the `file` in case of #' `read.AmigaBasicBMAP`. Otherwise, invisibly returns the result of the call of `close` to the #' file connection. Or, when `disk` is specified, a copy of #' `disk` is returned to which the file is written. #' @examples #' \dontrun{ #' ## A small fragment of the dos.library BMAP would look like this: #' dos.bmap <- as.AmigaBasicBMAP(list( #' xOpen = list( #' libraryVectorOffset = -30, #' registers = as.raw(2:3) #' ), #' xClose = list( #' libraryVectorOffset = -36, #' registers = as.raw(2) #' ), #' xRead = list( #' libraryVectorOffset = -42, #' registers = as.raw(2:4) #' ) #' )) #' #' ## This will write the BMAP to a file: #' write.AmigaBasicBMAP(dos.bmap, file.path(tempdir(), "dos.bmap")) #' #' ## This will read the same file: #' dos.bmap.copy <- read.AmigaBasicBMAP(file.path(tempdir(), "dos.bmap")) #' } #' @family AmigaBasic.operations #' @family io.operations #' @author Pepijn de Vries #' @export read.AmigaBasicBMAP <- function(file, disk = NULL) { dat <- .read.generic(file, disk) rawToAmigaBasicBMAP(dat) } #' @rdname AmigaBasicBMAP-io #' @name write.AmigaBasicBMAP #' @export write.AmigaBasicBMAP <- function(x, file, disk = NULL) { .write.generic(x, file, disk) } #' @rdname as.raw #' @name as.raw.AmigaBasicBMAP #' @export as.raw.AmigaBasicBMAP <- function(x) { .validate_AmigaBasicBMAP(x) unlist(lapply(seq_along(x), function(i) { c(charToRaw(names(x)[i]), raw(1), .amigaIntToRaw(x[[i]]$libraryVectorOffset, 16, T), x[[i]]$registers, raw(1)) })) } #' @export print.AmigaBasicBMAP <- function(x, ...) { print(sprintf("An AmigaBasicBMAP with %i references.", length(x)), ...) } #' Coerce raw data into an AmigaBasicBMAP class object #' #' Coerce raw data into an [AmigaBasicBMAP()] class object #' #' An [Amiga Basic BMAP][AmigaFFH::AmigaBasicBMAP] file maps the offset of routines in Amiga libraries. This #' function converts the raw format in which it would be stored as a file into a comprehensive S3 class object. #' #' @rdname rawToAmigaBasicBMAP #' @name rawToAmigaBasicBMAP #' @param x A `vector` of `raw` data that is to be converted #' into an [AmigaBasicBMAP()] class object. #' @param ... Currently ignored. #' @returns An [AmigaBasicBMAP()] class object based on `x`. #' @examples #' \dontrun{ #' ## A small fragment of the dos.library BMAP would look like this: #' dos.bmap <- as.AmigaBasicBMAP(list( #' xOpen = list( #' libraryVectorOffset = -30, #' registers = as.raw(2:3) #' ), #' xClose = list( #' libraryVectorOffset = -36, #' registers = as.raw(2) #' ), #' xRead = list( #' libraryVectorOffset = -42, #' registers = as.raw(2:4) #' ) #' )) #' #' ## The raw representation would be #' dos.bmap.raw <- as.raw(dos.bmap) #' #' ## And the reverse #' rawToAmigaBasicBMAP(dos.bmap.raw) #' } #' @family AmigaBasic.operations #' @family raw.operations #' @author Pepijn de Vries #' @export rawToAmigaBasicBMAP <- function(x, ...) { x <- c(raw(1), x) terminator <- c(which(x == 0x00), length(x) + 1) nm <- utils::head(unlist(lapply( mapply(seq, from = terminator[seq.int(1, length(terminator), 2)] + 1, to = terminator[seq.int(2, length(terminator), 2)] - 1), function(y) rawToChar(x[y]))), -1) lvo <- utils::head(unlist(lapply( mapply(seq, from = terminator[seq.int(2, length(terminator), 2)] + 1, to = terminator[seq.int(2, length(terminator), 2)] + 2, SIMPLIFY = F), function(y) .rawToAmigaInt(as.raw(x[y]), 16, T))), -1) registers <- lapply( mapply(seq, from = utils::head(terminator[seq.int(2, length(terminator), 2)] + 3, -1), to = utils::head(terminator[seq.int(2, length(terminator), 2) + 1] -1, -1), SIMPLIFY = F), function(y) as.raw(x[y])) no_reg <- diff(terminator) < 4 no_reg <- which(no_reg[c(F, T)]) registers[no_reg] <- lapply(seq_along(no_reg), function(i) raw()) result <- lapply(seq_along(nm), function(i) { list(libraryVectorOffset = lvo[[i]], registers = registers[[i]]) }) names(result) <- nm class(result) <- "AmigaBasicBMAP" .validate_AmigaBasicBMAP(result) return (result) } #' @export as.list.AmigaBasicBMAP <- function(x, ...) { unclass(x) } #' Coerce raw or named list to an AmigaBasicBMAP class object #' #' Coerce `raw` or named `list` to an [AmigaBasicBMAP()] class object #' #' An [Amiga Basic BMAP][AmigaFFH::AmigaBasicBMAP] file maps the offset of routines in Amiga libraries. This #' function converts the raw format in which it would be stored as a file into a comprehensive S3 class object. It #' can also convert a named list into an S3 class object. See `Arguments' and `Examples' sections on how to format #' this list. #' #' @rdname as.AmigaBasicBMAP #' @name as.AmigaBasicBMAP #' @param x When `x` is a `vector` of `raw` data, it needs to be structured as it would be #' when stored in a binary file (see [read.AmigaBasicBMAP()]). `x` can also be a named `list`, #' where the name of each element corresponds with a routine in the library. Each element should than consist #' of a `list` with 2 elements: The first should be named `libraryVectorOffset' and should hold the `numeric` #' offset of the routine in the library (see details). The second element should be named `registers' and should #' contain a `vector` of `raw` values refering to CPU registers used by the routine (see details). #' @returns Returns a [AmigaBasicBMAP()] based on `x` #' @examples #' \dontrun{ #' ## For the dos.library, the start of the bmap list would look like: #' dos.list <- list( #' xOpen = list( #' libraryVectorOffset = -30, #' registers = as.raw(2:3) #' ), #' xClose = list( #' libraryVectorOffset = -36, #' registers = as.raw(2) #' ), #' xRead = list( #' libraryVectorOffset = -42, #' registers = as.raw(2:4) #' ) #' ) #' #' ## Note that the list above is incomplete, the dos.library holds more routines than shown here. #' ## This merely serves as an example. #' ## This list can be converted to an S3 class as follows: #' dos.bmap <- as.AmigaBasicBMAP(dos.list) #' } #' @family AmigaBasic.operations #' @author Pepijn de Vries #' @export as.AmigaBasicBMAP <- function(x) { if (typeof(x) == "raw") return(rawToAmigaBasicBMAP(x)) if (typeof(x) != "list") stop("No method available for converting this object into AmigaBasicBMAP") x <- as.list(x) class(x) <- "AmigaBasicBMAP" .validate_AmigaBasicBMAP(x) x } .validate_AmigaBasicBMAP <- function(x) { if (!inherits(x, "AmigaBasicBMAP")) stop("AmigaBasicBMAP should inherit AmigaBasicBMAP class") if (typeof(x) != "list") stop("AmigaBasicBMAP should be of type list") if (any(apply(check.names.AmigaBasic(names(x)), 1, any))) stop("AmigaBasicBMAP routine names should not be Amiga Basic reserved words!") registers_ok <- unlist(lapply(x, function(y) { y$libraryVectorOffset >= -32768 && y$libraryVectorOffset < 0 && (length(y$registers) == 0 || ((y$registers %in% as.raw(1:15)) && !any(duplicated(y$registers)))) })) if (!any(registers_ok)) stop("Register numbers should be unique raw values ranging from 1 to 15, and library vector offsets should be in the range of -1 to -32768") return (T) }
/scratch/gouwar.j/cran-all/cranData/AmigaFFH/R/basic.r
#' The S3 AmigaBitmapFont and AmigaBitmapFontSet classes #' #' A comprehensive representation of monochromous Amiga bitmap fonts. #' #' Nowadays fonts are represented by vector graphics an computer systems. #' On the original Commodore Amiga, the screen resolution, system memory #' and cpu speed were limited. On those systems, it was more efficient #' to use bitmap images to represent the glyphs in fonts. The #' `AmigaBitmapFontSet` and `AmigaBitmapFont` classes can be used #' to represent Amiga bitmap fonts. #' #' The Commodore Amiga had a directory named 'FONTS' located in the #' root, where (bitmap) fonts were stored. Font sets were stored #' under the font name with a *.font extension. Files with the *.font #' extension did not contain the bitmap images of the font. Rather #' the *.font file contained information on which font heights (in #' pixels) are available, in addition to some other meta-information. #' #' The bitmap images were stored in separate files for each individual #' height. The `AmigaBitmapFontSet` is an S3 class that forms #' a comprehensive format (named `list`) to represent the *.font #' files. The `AmigaBitmapFont` is an S3 class is a comprehensive #' format (named `list`) that represent each font bitmap and glyph #' information. The `AmigaBitmapFontSet` objects will hold one or more #' `AmigaBitmapFont` objects. #' #' The `AmigaBitmapFont` and `AmigaBitmapFontSet` objects are #' essentially named `list`s. Their structure and most important #' elements are described below. Although it is possible to replace #' elements manually, it is only advisable when you know what you #' are doing as it may break the validity of the font. #' #' @section AmigaBitmapFontSet: #' * `fch_FileID`: A `factor` with levels 'FontContents', 'TFontContents' and #' 'ScalableOutline'. It specifies the type of font. #' Currently only the first level is supported. #' * `fch_NumEntries`: number of font heights available for this font. It should #' match with the length of `FontContents`. Do not change #' this value manually. #' * `FontContents`: This is a `list` with bitmap entries for each specific font #' height (in pixels). The name of each element in this list is #' 'pt' followed by the height. Each element in this list holds #' the elements: #' * Miscellaneous: Miscellaneous information from the \*.font file #' * `fc_FileName`: This element represents the filename of the #' nested font bitmap images. Note that it should be a valid #' Commodore Amiga filename. It is best to modify this name #' using [fontName()]. Note that this field could cause #' problems as Commodore Amiga filenames can contain characters #' that most modern platforms would not allow (such as the #' question mark). #' * `BitmapFont`: This element is of type `AmigaBitmapFont` and is structured #' as described in the following section. The information in this #' element is no longer part of the *.font file. #' #' @section AmigaBitmapFont: #' Information represented by a `AmigaBitmapFont` is not stored #' in *.font files. Rather it is stored in sub-directories of the font #' in separate files. It has the following structure: #' * Miscellaneous: Elements with information on the font #' properties and style, and also relative file pointers. #' * `glyph.info`: A `data.frame` containing glyph info with information #' for specific glyphs on each row. Each row matches with a specific #' ASCII code, ranging from `tf_LoChar` up to `tf_HiChar`. There is an additional #' row that contains information for the default glyph that is #' out of the range of the `tf_LoChar` and `tf_HiChar`. The `data.frame` #' thus has `2 + tf_HiChar - tf_LoChar` rows. This #' table is used to extract and plot a glyph from the #' `bitmap` image correctly. #' * `bitmap`: Is a monochromous bitmap image of all the font's glyphs in a #' single line. It is a simple `raster` object #' (see [grDevices::as.raster()]) with an additional #' attribute 'palette', which lists the two colours in the image. In #' this palette, the first colour is the background colour and the #' second colour is interpreted as the foregroundcolour. #' #' @section Useful functions: #' For importing and exporting the following functions are useful: #' [read.AmigaBitmapFont()], [read.AmigaBitmapFontSet()], #' [write.AmigaBitmapFont()] and [write.AmigaBitmapFontSet()]. #' #' The following generic functions are implemented for these objects: #' [AmigaFFH::plot()], `print`, #' [AmigaFFH::as.raster()] and [AmigaFFH::as.raw()]. #' #' Use [AmigaFFH::c()] to combine one or more #' `AmigaBitmapFont` objects into a `AmigaBitmapFontSet`. #' #' @docType class #' @aliases AmigaBitmapFontSet #' @name AmigaBitmapFont #' @rdname AmigaBitmapFont #' @family AmigaBitmapFont.operations #' @family raster.operations #' @author Pepijn de Vries #' @references #' <http://amigadev.elowar.com/read/ADCD_2.1/Libraries_Manual_guide/node03E0.html> #' <http://amigadev.elowar.com/read/ADCD_2.1/Libraries_Manual_guide/node03DE.html> #' <http://amigadev.elowar.com/read/ADCD_2.1/Libraries_Manual_guide/node05BA.html> #' @examples #' \dontrun{ #' ## 'font_example' is an example of the AmigaBitmapFontSet object: #' data(font_example) #' #' ## An AmigaBitmapFont object can also be extracted from this object: #' font_example_9 <- getAmigaBitmapFont(font_example, 9) #' #' ## the objects can be printed, plotted, converted to raw data or a raster: #' print(font_example) #' plot(font_example) #' font_example_raw <- as.raw(font_example) #' font_example_raster <- as.raster(font_example) #' #' ## You can also format text using the font: #' formated_raster <- as.raster(font_example, text = "Foo bar", style = "bold") #' plot(font_example, text = "Foo bar", style = "underlined", interpolate = F) #' } NULL .print_to_raster <- function(text, font, style = NULL, palette = NULL) { if (inherits(font, "AmigaBitmapFontSet")) { h <- availableFontSizes(font) font <- getAmigaBitmapFont(font, h[length(h)]) } if (!inherits(font, "AmigaBitmapFont")) stop("'font' should be of class AmigaBitmapFont or AmigaBitmapFontSet.") if (!is.null(style)) { style <- match.arg(style, c("bold", "italic", "underlined"), T) ## You can't apply styles to a font that is already styled: style <- style[!font$tf_Style[toupper(style)]] } if (length(text) > 1) { warning("'text' has multiple elements, only using the first.") text <- text[1] } # split text along lines: text <- strsplit(text, "\n")[[1]] pal <- attr(font$bitmap, "palette") bm <- as.matrix(font$bitmap) bm <- apply(bm, 2, function(y) y == pal[[2]]) if (!is.null(palette)) pal <- palette result <- lapply(text, function(y) { ## get ascii codes: y <- utf8ToInt(enc2utf8(y)) ## the final glyph is the default glyph when it is out of range y[y < font$tf_LoChar] <- font$tf_HiChar + 1 y[y > font$tf_HiChar] <- font$tf_HiChar + 1 y <- 1 + y - font$tf_LoChar positions <- apply( font$glyph.info[y,][,names(font$glyph.info) %in% c("glyphWidth", "charSpace"), drop = F], 1, max ) if (!is.null(font$glyph.info$charKern)) positions <- positions + c(font$glyph.info$charKern[y][-1], 0) positions <- cumsum(c(0, positions))[1:length(positions)] positions <- 1 + positions - min(positions) h <- font$tf_YSize glyphs <- mapply(function(loc, w, sp, kern) { result <- matrix(F, h + 2, max(c(sp, w)) + ifelse("bold" %in% style, font$tf_BoldSmear, 0) + ifelse(kern > 0, kern, 0) + ifelse("italic" %in% style, ceiling((h + 1)/2), 0)) if (w > 0) { for (i in 0:ifelse("bold" %in% style, font$tf_BoldSmear, 0)) { result[1:h, i + (1:w)] <- result[1:h, i + (1:w)] | bm[,(loc + 1):(loc + w)] } if ("italic" %in% style) { for (j in 1:(h - ifelse(h > 15, 2, 1))) { ## The displacement seems to shift for h > 15 displacement <- floor((h - j + ifelse(h > 15, 0, 1))/2) result[j,] <- c(rep(F, displacement), utils::head(result[j,], -displacement)) } } } if (ncol(result) > 0) { if ("underlined" %in% style) { result[font$tf_Baseline + 2,] <- result[font$tf_Baseline + 2,] | (c(!result[font$tf_Baseline + 2,][-1], T) & c(T, !result[font$tf_Baseline + 2,][-ncol(result)])) } } result }, loc = font$glyph.info$glyphLocation[y], w = font$glyph.info$glyphWidth[y], sp = if(is.null(font$glyph.info$charSpace)) rep(0, length(y)) else font$glyph.info$charSpace[y], kern = if(is.null(font$glyph.info$charKern)) rep(0, length(y)) else c(font$glyph.info$charKern[y][-1], 0), SIMPLIFY = F) widths <- unlist(lapply(glyphs, ncol)) result <- matrix(F, nrow = h + 2, ncol = max(positions + widths)) lapply(1:length(glyphs), function(i) { result[,positions[i]:(positions[i] + widths[i] - 1)] <<- result[,positions[i]:(positions[i] + widths[i] - 1)] | glyphs[[i]] }) ## make background transparent if no palette is specified if (is.null(palette)) pal[1] <<- grDevices::adjustcolor(pal[1], alpha.f = 0) result <- apply(result, 2, function(y) pal[1 + as.numeric(y)]) return(result) }) result2 <- matrix(pal[1], sum(unlist(lapply(result, nrow))), sum(unlist(lapply(result, ncol)))) lapply(1:length(result), function(i) { result2[(i - 1)*(font$tf_YSize + 2) + 1:(font$tf_YSize + 2),][,1:ncol(result[[i]])] <<- result[[i]] NULL }) result2 <- as.raster(result2) return(result2) } .amiga.node <- data.frame( byte = c(4, 4, 1, 1, 4), signed = c(F, F, F, T, F), par.names = c("ln_Succ", "ln_Pred", "ln_Type", "ln_Pri", "ln_Name"), stringsAsFactors = F ) .amiga.node.types <- c("NT_UNKNOWN", "NT_TASK", "NT_INTERRUPT", "NT_DEVICE", "NT_MSGPORT", "NT_MESSAGE", "NT_FREEMSG", "NT_REPLYMSG", "NT_RESOURCE", "NT_LIBRARY", "NT_MEMORY", "NT_SOFTINT", "NT_FONT", "NT_PROCESS", "NT_SEMAPHORE", "NT_SIGNALSEM", "NT_BOOTNODE", "NT_KICKMEM", "NT_GRAPHICS", "NT_DEATHMESSAGE", "NT_USER", "NT_EXTENDED") .amiga.font.sets <- data.frame( byte = c(2, 2), signed = c(F, F), par.names = c("fch_FileID", "fch_NumEntries"), stringsAsFactors = F ) .amiga.font.types <- c("FontContents", "TFontContents", "ScalableOutline") .read.amiga.node <- function(dat) { ## Currently only works for simple nodes as included in font files result <- with(.amiga.node, .read.amigaData(dat, byte, signed, par.names)) if (result$ln_Type < 22 || result$ln_Type >= 254) { result$ln_Type <- result$ln_Type + 1 if (result$ln_Type > 254) result$ln_Type <- result$ln_Type - 234 result$ln_Type <- .match.factor(result, "ln_Type", 1:length(.amiga.node.types), .amiga.node.types) } return(result) } .amiga.node.to.raw <- function(node) { node <- node[.amiga.node$par.names] node$ln_Type <- match(as.character(node$ln_Type), .amiga.node.types) if (node$ln_Type >= 22) node$ln_Type <- node$ln_Type + 234 node$ln_Type <- node$ln_Type - 1 node <- with(.amiga.node, .write.amigaData(node, byte, signed, par.names)) node } .amiga.font.header <- data.frame( byte = c(-36, -14, 2, 2, 4, -32, -14, 4, 2, 2, -1, -1, 2, 2, 2, 2, 1, 1, 4, 2, 4, 4, 4), signed = c( F, F, F, F, F, F, F, F, F, F, F, F, F, F, F, F, F, F, F, F, F, F, F), par.names = c("leadingHunks", "node.disklink", "dfh_FileID", "dfh_Revision", "dfh_Segment", "fontName", "node.message", "mn_ReplyPort", "mn_Length", "tf_YSize", "tf_Style", "tf_Flags", "tf_XSize", "tf_Baseline", "tf_BoldSmear", "tf_Accessors", "tf_LoChar", "tf_HiChar", "tf_CharData", "tf_Modulo", "tf_CharLoc", "tf_CharSpace", "tf_CharKern"), stringsAsFactors = F ) #' Read AmigaBitmapFontSet from *.font file #' #' Reads [AmigaBitmapFontSet()] from *.font file including #' all nested bitmap images for all font heights. #' #' The *.font file only holds meta-information. The bitmap images for #' each font height are stored in separate files, which are listed #' in the *.font file. The function reads the *.font file, including #' all nested bitmap files and returns it as a #' [AmigaBitmapFontSet()]. #' #' It can also read *.font files #' from [adfExplorer::amigaDisk-class()] objects, #' but that requires the adfExplorer package to be installed. #' @rdname read.AmigaBitmapFontSet #' @name read.AmigaBitmapFontSet #' @param file A `character` string of the filename of the *.font file to be read. #' @param disk A virtual Commodore Amiga disk from which the `file` should be #' read. This should be an [`amigaDisk()`][adfExplorer::amigaDisk-class] object. Using #' this argument requires the adfExplorer package. #' When set to `NULL`, this argument is ignored. #' @param ... Currently ignored. #' @returns Returns an [AmigaBitmapFontSet()] object read from the specified file. #' @examples #' \dontrun{ #' data(font_example) #' #' ## in order to read, we first need to write a file" #' write.AmigaBitmapFontSet(font_example, tempdir()) #' #' ## The font is written as 'AmigaFFH.font' as that name #' ## is embedded in the AmigaBitmapFontSet object 'font_example'. #' ## We can read it as follows: #' font.read <- read.AmigaBitmapFontSet(file.path(tempdir(), "AmigaFFH.font")) #' #' ## similarly, the file can also be written and read from and to #' ## a virtual amiga disk. The following codes requires the 'adfExplorer' #' ## package: #' adf <- adfExplorer::blank.amigaDOSDisk("font.disk") #' adf <- adfExplorer::dir.create.adf(adf, "FONTS") #' adf <- write.AmigaBitmapFontSet(font_example, "DF0:FONTS", adf) #' font.read <- read.AmigaBitmapFontSet("DF0:FONTS/AmigaFFH.font", adf) #' } #' @family AmigaBitmapFont.operations #' @family io.operations #' @author Pepijn de Vries #' @export read.AmigaBitmapFontSet <- function(file, disk = NULL, ...) { dat <- .read.generic(file, disk) rawToAmigaBitmapFontSet(dat, file, disk) } #' Coerce raw data into an AmigaBitmapFontSet class object #' #' [AmigaBitmapFontSet()] objects are comprehensive representations of binary Amiga #' font files (*.font). Use this function to convert `raw` data from #' such a file to an [AmigaBitmapFontSet] object. #' #' This function converts `raw` data as stored in *.font #' files. The function also needs the file location, in order #' to load the nested bitmap images for each font height. #' This function is effectively the inverse of [AmigaFFH::as.raw()]. #' #' @rdname rawToAmigaBitmapFontSet #' @name rawToAmigaBitmapFontSet #' @param x A `vector` of `raw` data that needs to be #' converted into an [AmigaBitmapFontSet()]. #' @param file The `raw` version of the [AmigaBitmapFontSet()] #' does not contain the nested font bitmap images. In order to correctly #' construct an [AmigaBitmapFontSet()] the file location of the #' original *.font file is required in order to read and include the #' font bitmap image information. `file` should thus be a `character` #' string specifying the file location of the *.font file. #' @param disk A virtual Commodore Amiga disk from which the `file` should be #' read. This should be an [`amigaDisk()`][adfExplorer::amigaDisk-class] object. Using #' this argument requires the adfExplorer package. #' When set to `NULL`, this argument is ignored. #' @returns Returns an [AmigaBitmapFontSet()] object. #' @examples #' \dontrun{ #' data(font_example) #' #' ## First create raw font set data. Note that this raw data #' ## does not include the nested font bitmap images. #' fontset.raw <- as.raw(font_example) #' #' ## Therefore it is necesary to have the entire font stored as files: #' write.AmigaBitmapFontSet(font_example, tempdir()) #' #' font.restored <- rawToAmigaBitmapFontSet(fontset.raw, file.path(tempdir(), "AmigaFFH.font")) #' } #' @family AmigaBitmapFont.operations #' @family raw.operations #' @author Pepijn de Vries #' @export rawToAmigaBitmapFontSet <- function(x, file, disk = NULL) { result <- with(.amiga.font.sets, .read.amigaData(x[1:4], byte, signed, par.names)) result$fch_FileID <- .match.factor(result, "fch_FileID", c(0x0f00, 0x0f02, 0x0f03), .amiga.font.types) if (result$fch_FileID != "FontContents") stop(sprintf("%s font type is not (yet) supported.", as.character(result$fch_FileID))) result[["FontContents"]] <- lapply(result$fch_NumEntries:1, function(i) { if (result$fch_FileID == "FontContents") { offset <- 260*(i - 1) result <- list( fc_FileName = .rawToCharNull(x[-1:-4][offset + 1:256]), fc_YSize = .rawToAmigaInt(x[-1:-4][offset + 257:258], 16, F), fc_Style = x[-1:-4][offset + 259], fc_Flags = x[-1:-4][offset + 260] ) result$fc_Style <- as.logical(.rawToBitmap(result$fc_Style, F, F)) names(result$fc_Style) <- c("UNDERLINED", "BOLD", "ITALIC", "EXTENDED", "RESERVED1", "RESERVED2", "COLORFONT", "TAGGED") result$fc_Flags <- as.logical(.rawToBitmap(result$fc_Flags, F, F)) names(result$fc_Flags) <- c("ROMFONT", "DISKFONT", "REVPATH", "TALLDOT", "WIDEDOT", "PROPORTIONAL", "DESIGNED", "REMOVED") if (is.null(disk)) { result[["BitmapFont"]] <- ## replace the file amiga file separator with the platform dependent file separator, ## and read font from file: read.AmigaBitmapFont(file.path(dirname(file), gsub("[/]", .Platform$file.sep, result$fc_FileName))) } else { result[["BitmapFont"]] <- read.AmigaBitmapFont(paste(c(utils::head(strsplit(file, "/", T)[[1]], -1), result$fc_FileName), collapse = "/"), disk) } if (any(result$fc_Style != result$BitmapFont$tf_Style)) warning(sprintf("Styles defined in main font (*.font) and bitmap file (%s) do not match.", result$fc_FileName)) if (any(result$fc_Flags != result$BitmapFont$tf_Flags)) warning(sprintf("Flags defined in main font (*.font) and bitmap file (%s) do not match.", result$fc_FileName)) result } else { stop("This bitmap font type is not (yet) supported") } }) pt.size <- as.numeric(unlist(lapply(strsplit(unlist(lapply(result$FontContents, function(x) x$fc_FileName)), "/"), function(x) x[[2]]))) result$FontContents <- result$FontContents[order(pt.size)] names(result$FontContents) <- paste0("pt", sort(pt.size)) class(result) <- "AmigaBitmapFontSet" return(result) } #' Read an AmigaBitmapFont class object from a file #' #' Amiga Font Bitmaps of distinctive font heights are stored in separate #' files, which in combination form a font collection or set. This #' function can be used to read a specific bitmap from a set and returns #' it as an [AmigaBitmapFont()] class object. #' #' Individual font bitmaps are stored in a font's subdirectory where #' the file name is usually equal to the font height in pixels. This #' function will read such a font bitmap file and return it as an #' [AmigaBitmapFont()] class object. It can also read such #' files from [adfExplorer::amigaDisk-class()] objects, #' but that requires the adfExplorer package to be installed. #' #' @rdname read.AmigaBitmapFont #' @name read.AmigaBitmapFont #' @param file The file name of a font subset is usually simply a numeric number #' indicating the font height in pixels. Use `file` as a `character` #' string representing that file location. #' @param disk A virtual Commodore Amiga disk from which the `file` should be #' read. This should be an [`amigaDisk()`][adfExplorer::amigaDisk-class] object. Using #' this argument requires the adfExplorer package. #' When set to `NULL`, this argument is ignored. #' @param ... Arguments passed on to [rawToAmigaBitmapFont()]. #' @returns Returns an [AmigaBitmapFont()] object read from the specified file. #' @examples #' \dontrun{ #' data(font_example) #' #' ## Let's store the example font first: #' write.AmigaBitmapFontSet(font_example, tempdir()) #' #' ## Now read a specific subset from the font files: #' font.sub <- read.AmigaBitmapFont(file.path(tempdir(), "AmigaFFH", "9")) #' #' ## The same can be done with a virtual Amiga disk. The following #' ## examples require the 'adfExplorer' package. #' font.disk <- adfExplorer::blank.amigaDOSDisk("font.disk") #' font.disk <- adfExplorer::dir.create.adf(font.disk, "FONTS") #' font.disk <- write.AmigaBitmapFontSet(font_example, "DF0:FONTS", font.disk) #' font.sub <- read.AmigaBitmapFont("DF0:FONTS/AmigaFFH/9", font.disk) #' } #' @family AmigaBitmapFont.operations #' @family io.operations #' @author Pepijn de Vries #' @export read.AmigaBitmapFont <- function(file, disk = NULL, ...) { dat <- .read.generic(file, disk) rawToAmigaBitmapFont(dat, file, ...) } #' Write an AmigaBitmapFont(set) file #' #' Functions to write [AmigaBitmapFont()] and [AmigaBitmapFontSet()] #' class objects to files. #' #' [AmigaBitmapFontSet()] class objects are written to a *.font #' file. The filename used for this purpose is obtained from the object #' itself using [fontName()]. In addition, a subdirectory is #' created automatically (when it doesn't already exist) #' to which al the separate bitmap images for each font height are written #' to individual files. #' #' [AmigaBitmapFont()] class objects can also be written to a #' file. In order to use it on a Commodore Amiga or emulator, it is better #' to embed the font bitmap in a font set (using [AmigaFFH::c()]) #' and write the set to corresponding files. #' @rdname write.AmigaBitmapFont #' @name write.AmigaBitmapFont #' @param x Respectively an [AmigaBitmapFont()] or a #' [AmigaBitmapFontSet()] object depending on which of the #' write-functions is called. This is the object that will be written #' to the specified file. #' @param file A `character` string specifying the file location #' to which `x` (an [AmigaBitmapFont()] object) needs to be written. #' It is common practice on the Amiga to use the font height in pixels as #' file name. #' @param path A `character` string specifying the path where #' `x` (an [AmigaBitmapFontSet()] object) needs to be stored. #' The filename for the font set will be extracted from `x` using #' [fontName()] followed by the *.font extension. A subdirectory #' will be created with the same name (without the extension) if it doesn't #' already exists. In this subdirectory all the nested [AmigaBitmapFont()] #' objects are stored. #' @param disk A virtual Commodore Amiga disk to which the `file` should be #' written. This should be an [`amigaDisk()`][adfExplorer::amigaDisk-class] object. Using #' this argument requires the adfExplorer package. #' When set to `NULL`, this argument is ignored. #' @returns Invisibly returns the result of the call of `close` to the #' file connection. Or, when `disk` is specified, a copy of #' `disk` is returned to which the file(s) is/are written. #' @examples #' \dontrun{ #' ## obtain a bitmap font set: #' data(font_example) #' #' ## write the font set to their files. The file name #' ## is extracted from the font object, so you only have #' ## to provide the path: #' write.AmigaBitmapFont(font_example, temp.dir()) #' #' ## extract a font bitmap: #' font <- getAmigaBitmapFont(font_example, 9) #' #' ## and write it to the temp dir: #' write.AmigaBitmapFont(font, file.path(temp.dir(), "9")) #' #' ## The following examples require the 'adfExplorer' package: #' font.disk <- adfExplorer::blank.amigaDOSDisk("font.disk") #' font.disk <- adfExplorer::dir.create.adf(font.disk, "FONTS") #' font.disk <- write.AmigaBitmapFontSet(font_example, "DF0:FONTS", font.disk) #' } #' @family AmigaBitmapFont.operations #' @family io.operations #' @author Pepijn de Vries #' @export write.AmigaBitmapFont <- function(x, file, disk = NULL) { if (!inherits(x, "AmigaBitmapFont")) stop("x should be of class AmigaBitmapFont.") .write.generic(x, file, disk) } #' @rdname write.AmigaBitmapFont #' @name write.AmigaBitmapFontSet #' @export write.AmigaBitmapFontSet <- function(x, path = getwd(), disk = NULL) { if (!inherits(x, "AmigaBitmapFontSet")) stop("x should be of class AmigaBitmapFontSet.") filenames <- unlist(lapply(x$FontContents, function(y) y$fc_FileName)) filenames <- do.call(rbind, strsplit(filenames, "/")) if (ncol(filenames) != 2) stop("Unexpected file structure.") if (length(unique(filenames[,1])) != 1) stop("Not a single base name for the font.") fn <- sprintf("%s.font", filenames[1, 1]) if (is.null(disk)) { if (path != "") fn <- file.path(path, fn) } else { if (path != "") fn <- paste(path, fn, sep = ifelse(substr(path, nchar(path), nchar(path)) == "/", "", "/")) } disk <- .write.generic(x, fn, disk) dr <- filenames[1, 1] if (is.null(disk)) { if (path != "") dr <- file.path(path, dr) if (!dir.exists(dr)) dir.create(dr) result <- lapply(1:nrow(filenames), function(y) { fn <- file.path(filenames[y, 1], filenames[y, 2]) if (path != "") fn <- file.path(path, fn) write.AmigaBitmapFont(x$FontContents[[y]]$BitmapFont, fn) }) return(invisible(result[[length(result)]])) } else { if (path != "") dr <- paste(path, dr, sep = ifelse(substr(path, nchar(path), nchar(path)) == "/", "", "/")) if (!adfExplorer::adf.file.exists(disk, dr)) ## better to use 'dir.exists', this needs to be implemented in adfExplorer disk <- adfExplorer::dir.create.adf(disk, dr) lapply(1:nrow(filenames), function(y) { fn <- paste(filenames[y, 1], filenames[y, 2], sep = "/") if (path != "") fn <- paste(path, fn, sep = ifelse(substr(path, nchar(path), nchar(path)) == "/", "", "/")) disk <<- write.AmigaBitmapFont(x$FontContents[[y]]$BitmapFont, fn, disk = disk) }) return(disk) } } #' Coerce raw data into an AmigaBitmapFont class object #' #' [AmigaBitmapFont()] objects are comprehensive representations of binary Amiga #' font subset files. The file name is usually simply a numeric number #' indicating the font height in pixels. Use this function to convert #' `raw` content from such a file to an [AmigaBitmapFont()] object. #' #' This function converts `raw` data as stored in font bitmap #' files. These files are stored in subdirectories with the font's #' name and usually have the font height in pixels as file name. #' This function is effectively the inverse of [AmigaFFH::as.raw()]. #' #' @rdname rawToAmigaBitmapFont #' @name rawToAmigaBitmapFont #' @param x An [AmigaBitmapFont()] object which needs to be converted #' into `raw` data. #' @param ... Currently ignored. #' @returns A `vector` of `raw` data representing `x`. #' @examples #' \dontrun{ #' ## first create raw data that can be converted into a AmigaBitmapFont #' data(font_example) #' font.raw <- as.raw(getAmigaBitmapFont(font_example, 9)) #' #' ## Convert it back into an AmigaBitmapFont object: #' font <- rawToAmigaBitmapFont(font.raw) #' } #' @family AmigaBitmapFont.operations #' @family raw.operations #' @author Pepijn de Vries #' @export rawToAmigaBitmapFont <- function(x, ...) { result <- with(.amiga.font.header, .read.amigaData(x, byte, signed, par.names)) index.trailing.hunks <- (1 + 4*(.rawToAmigaInt(result$leadingHunks[21:24], 32, F) + 8)) ## remove first part as it is not required for interpreting the data ## Check the leading hunks result$leadingHunks[c(-21:-24, -29:-32)] idx.check <- c(3,4,12,27,28,33,35,36) if (any(result$leadingHunks[idx.check] != as.raw(c(0x03, 0xf3, 0x01, 0x03, 0xe9, 0x70, 0x4e, 0x75))) || any(result$leadingHunks[c(-21:-24, -29:-32, -34, -idx.check)] != raw(1)) || !(result$leadingHunks[34] %in% as.raw(c(0x00, 0x64, 0xff)))) { warning("Unexpected file header. This file may not be a font bitmap.") } ## remove leading hunks after checks. They are no longer needed... result$leadingHunks <- NULL if (index.trailing.hunks > length(x)) stop("Unexpected end of file") ## remove trailing hunks from data trailing.hunks <- x[index.trailing.hunks:length(x)] x <- x[1:(index.trailing.hunks - 1)] result$dfh_FileID <- .match.factor(result, "dfh_FileID", 0xf80, "DFH_ID") ## Disk Font Header result$tf_Style <- as.logical(.rawToBitmap(result$tf_Style, F, F)) names(result$tf_Style) <- c("UNDERLINED", "BOLD", "ITALIC", "EXTENDED", "RESERVED1", "RESERVED2", "COLORFONT", "TAGGED") result$tf_Flags <- as.logical(.rawToBitmap(result$tf_Flags, F, F)) names(result$tf_Flags) <- c("ROMFONT", "DISKFONT", "REVPATH", "TALLDOT", "WIDEDOT", "PROPORTIONAL", "DESIGNED", "REMOVED") result$node.disklink <- .read.amiga.node(result$node.disklink) result$node.message <- .read.amiga.node(result$node.message) result$fontName <- .rawToCharNull(result$fontName) ## mn_ReplyPort points to FontExtension. Current implementation ignores these extensions n.glyphs <- 2 + result$tf_HiChar - result$tf_LoChar # +1 for difference in index base; another +1 for the default character glyph.info <- .rawToAmigaInt(x[result$tf_CharLoc + 32 + (1:(4*n.glyphs))], 16, F) # glyph.info <- as.data.frame(matrix(glyph.info, ncol = 2, byrow = T)) glyph.info <- as.data.frame(matrix(glyph.info, ncol = 2, byrow = T)) names(glyph.info) <- c("glyphLocation", "glyphWidth") if (result$tf_CharSpace != 0) { glyph.info$charSpace <- .rawToAmigaInt(x[result$tf_CharSpace + 32 + (1:(2*n.glyphs))], 16, T) } if (result$tf_CharKern != 0) { glyph.info$charKern <- .rawToAmigaInt(x[result$tf_CharKern + 32 + (1:(2*n.glyphs))], 16, T) } result[["glyph.info"]] <- glyph.info ## trailing.hunks trailing.hunks <- .rawToAmigaInt(trailing.hunks, 32, F) hunk.dat <- c(2, 7, 19, 21, if(result$tf_CharSpace != 0) 22, if(result$tf_CharKern != 0) 23) hunk.dat <- cumsum(abs(.amiga.font.header$byte[-1]))[hunk.dat - 1] if (any(trailing.hunks[c(1, length(trailing.hunks))] != c(1004, 1010)) || trailing.hunks[2] != (length(trailing.hunks) - 5) || any(trailing.hunks[c(3, length(trailing.hunks) - 1)] != 0) || !all(trailing.hunks[4:(length(trailing.hunks) - 2)] %in% hunk.dat)) warning("Unexpected trailing file hunks.") font.bitmap.data <- x[result$tf_CharData + 32 + (1:(result$tf_Modulo*result$tf_YSize))] result[["bitmap"]] <- bitmapToRaster(font.bitmap.data, w = result$tf_Modulo*8, h = result$tf_YSize, depth = 1, palette = c("white", "black")) attr(result[["bitmap"]], "palette") <- c("white", "black") class(result) <- "AmigaBitmapFont" return(result) } #' @rdname plot #' @name plot #' @export plot.AmigaBitmapFont <- function(x, y, ...) { if (!inherits(x, "AmigaBitmapFont")) stop("x should be of class AmigaBitmapFont.") args <- list(...) raster.args <- list(x = x) for (elm in c("text", "palette", "style")) { raster.args[[elm]] <- args[[elm]] args[[elm]] <- NULL } args$x <- do.call(as.raster, raster.args) if (is.null(args$asp)) { args$asp <- 1 if (x$tf_Flags["TALLDOT"]) args$asp <- args$asp*2 if (x$tf_Flags["WIDEDOT"]) args$asp <- args$asp/2 } do.call(plot, args) } #' @rdname plot #' @name plot #' @export plot.AmigaBitmapFontSet <- function(x, y, ...) { if (!inherits(x, "AmigaBitmapFontSet")) stop("x should be of class AmigaBitmapFontSet.") if (missing(y)) { args <- list(...) raster.args <- list(x = x) for (elm in c("text", "palette", "style")) { raster.args[[elm]] <- args[[elm]] args[[elm]] <- NULL } args$x <- do.call(as.raster, raster.args) if (is.null(args$asp)) { dot <- do.call(rbind, lapply(x$FontContents, function(z) z$fc_Flags[c("TALLDOT", "WIDEDOT")])) dot <- as.data.frame(table(as.data.frame(dot))) dot <- dot[which(dot$Freq == max(dot$Freq))[[1]],] args$asp <- 1 if (dot$TALLDOT == "TRUE") args$asp <- args$asp*2 if (dot$WIDEDOT == "TRUE") args$asp <- args$asp/2 } do.call(plot, args) } else { plot(getAmigaBitmapFont(x, y), ...) } } #' @export print.AmigaBitmapFont <- function(x, ...) { if (!inherits(x, "AmigaBitmapFont")) stop("x should be of class AmigaBitmapFont.") cat(sprintf(" y-size %i, %s", x$tf_YSize, paste(tolower(c(names(x$tf_Flags)[x$tf_Flags], names(x$tf_Style)[x$tf_Style])), collapse = ", "))) cat("\n") invisible(NULL) } #' @export print.AmigaBitmapFontSet <- function(x, ...) { if (!inherits(x, "AmigaBitmapFontSet")) stop("x should be of class AmigaBitmapFontSet.") cat(fontName(x)) cat("\n") lapply(x$FontContents, function(y) print(y$BitmapFont)) invisible(NULL) } #' @rdname as.raw #' @name as.raw #' @export as.raw.AmigaBitmapFont <- function(x, ...) { withCallingHandlers({ ## TODO remove handlers when replace functions are implemented ## initial checks. Throw errors when checks are unsuccessful if (!inherits(x, "AmigaBitmapFont")) stop("x should be of class AmigaBitmapFont.") max.loc <- max(x$glyph.info$glyphLocation) if (max.loc + max(x$glyph.info$glyphWidth[x$glyph.info$glyphLocation == max.loc]) > dim(x$bitmap)[2]) { stop("Glyph information exceeds bitmap dimensions!") } rm(max.loc) if (x$tf_YSize != dim(x$bitmap)[1]) stop ("tf_YSize does not bitmap height.") if (x$dfh_FileID != "DFH_ID") stop("Unexpected file ID...") if ((x$tf_HiChar - x$tf_LoChar + 2) != nrow(x$glyph.info)) stop("Glyph information does not match the number of characters...") if (x$tf_Modulo*8 != dim(x$bitmap)[2]) stop("tf_Modulo*8 does not equal the bitmap width.") x$leadingHunks <- raw(36) x$leadingHunks[c(3:4, 12, 27:28, 33:36)] <- as.raw(c(0x03, 0xF3, 0x01, 0x03, 0xE9, 0x70, 0x64, 0x4E, 0x75)) header <- x[.amiga.font.header$par.names] header$node.disklink <- .amiga.node.to.raw(header$node.disklink) header$node.message <- .amiga.node.to.raw(header$node.message) header$dfh_FileID <- 0xF80 header$fontName <- charToRaw(header$fontName)[1:32] header$tf_Style <- .bitmapToRaw(header$tf_Style, F, F) header$tf_Flags <- .bitmapToRaw(header$tf_Flags, F, F) header$tf_CharLoc <- 110 # This is where the first data always start addToPointer <- 2*prod(dim(x$glyph.info[,c("glyphLocation", "glyphWidth"), drop = F])) if (is.null(x$glyph.info$charSpace)) { header$tf_CharSpace <- 0 } else { header$tf_CharSpace <- header$tf_CharLoc + addToPointer addToPointer <- 2*prod(dim(x$glyph.info[,"charSpace", drop = F])) } if (is.null(x$glyph.info$charKern)) { header$tf_CharKern <- 0 } else { header$tf_CharKern <- max(c(header$tf_CharLoc, header$tf_CharSpace)) + addToPointer addToPointer <- 2*prod(dim(x$glyph.info[,"charKern", drop = F])) } header$tf_CharData <- max(with(header, c(tf_CharLoc, tf_CharSpace, tf_CharKern))) + addToPointer ## create HUNK_RELOC32 trailing relocator hunk. Points to relative ## addresses that contain relative pointers and should be reallocated when loaded in memory ## they are the nodes, the tf_CharData, tf_CharLoc, tf_CharSpace and tf_CharKern ## (the latter two are optional, and should only be included when not equal to 0) trailing.hunks <- c(2, 7, 19, 21, if(header$tf_CharSpace != 0) 22, if(header$tf_CharKern != 0) 23) trailing.hunks <- cumsum(abs(.amiga.font.header$byte[-1]))[trailing.hunks - 1] trailing.hunks <- c(1004, # HUNK_RELOC32 id length(trailing.hunks), # number of addresses to relocate 0, # hunk id number trailing.hunks, # adresses 0, # terminator (no more data follows) 1010) # stop loading HUNKS trailing.hunks <- .amigaIntToRaw(trailing.hunks, 32, F) header <- with(.amiga.font.header, .write.amigaData(header, byte, signed, par.names)) font.data <- .amigaIntToRaw(unlist(c(t(x$glyph.info[,c("glyphLocation", "glyphWidth")])), use.names = F), 16, F) if (!is.null(x$glyph.info$charSpace)) font.data <- c(font.data, .amigaIntToRaw(x$glyph.info$charSpace, 16, T)) if (!is.null(x$glyph.info$charKern)) font.data <- c(font.data, .amigaIntToRaw(x$glyph.info$charKern, 16, T)) palette <- attr(x$bitmap, "palette") bm <- apply(as.matrix(x$bitmap), 1, function(y) c(F, T)[match(y, palette)]) bm <- .bitmapToRaw(bm, invert.bytes = T, invert.longs = F) font.data <- c(header, font.data, bm) ## Add padding bytes to align the data along 32 bit. font.data <- font.data[1:(4*ceiling(length(font.data)/4))] ## specify in the leading hunks where the trailing hunks start: font.data[c(21:24, 29:32)] <- .amigaIntToRaw(ceiling((length(font.data) - 32)/4), 32, F) return(c(font.data, trailing.hunks)) }, warning=function(w) { if (startsWith(conditionMessage(w), "Replacement operator for AmigaBitmapFont")) invokeRestart("muffleWarning") }) } #' @rdname as.raw #' @name as.raw #' @export as.raw.AmigaBitmapFontSet <- function(x, ...) { withCallingHandlers({ ## TODO remove handlers when replace functions are implemented if (!inherits(x, "AmigaBitmapFontSet")) stop("x should be of class AmigaBitmapFontSet.") if (x$fch_FileID != "FontContents") stop("Sorry, currently only 'FontContents' font sets are supported.") ## put list in correct order... x <- x[c("fch_FileID", "fch_NumEntries", "FontContents")] .as.raw.FontContents <- function(y) { y <- y[c("fc_FileName", "fc_YSize", "fc_Style", "fc_Flags")] y$fc_FileName <- charToRaw(y$fc_FileName)[1:256] y$fc_YSize <- .amigaIntToRaw(y$fc_YSize, 16, F) y$fc_Style <- .bitmapToRaw(y$fc_Style, F, F) y$fc_Flags <- .bitmapToRaw(y$fc_Flags, F, F) unlist(y, use.names = F) } x$FontContents <- unlist(lapply(x$FontContents, .as.raw.FontContents), use.names = F) x$fch_FileID <- c(0x0f00, 0x0f02, 0x0f03)[match(as.character(x$fch_FileID), .amiga.font.types)] x$fch_FileID <- .amigaIntToRaw(x$fch_FileID, 16, F) x$fch_NumEntries <- .amigaIntToRaw(x$fch_NumEntries, 16, F) return(unlist(x, use.names = F)) }, warning=function(w) { if (startsWith(conditionMessage(w), "Replacement operator for AmigaBitmapFont")) invokeRestart("muffleWarning") }) } #' @param text Text (a `character` string) to be formated #' with `x` (when `x` is an [AmigaBitmapFont()] #' or an [AmigaBitmapFontSet()]. #' @param style Argument is only valid when `x` is an [AmigaBitmapFont()] #' or an [AmigaBitmapFontSet()]. No styling is applied #' when missing or `NULL`. One or more of the following styles #' can be used '`bold`', '`italic` or '`underlined`'. #' @param palette Argument is only valid when `x` is an [AmigaBitmapFont()] #' or an [AmigaBitmapFontSet()]. Should be a `vector` of #' two colours. The first is element is used as background colour, the #' second as foreground. When missing, transparent white and black are used. #' @family raster.operations #' @rdname as.raster #' @name as.raster #' @export as.raster.AmigaBitmapFont <- function(x, text, style, palette, ...) { if (!inherits(x, "AmigaBitmapFont")) stop("x should be of class AmigaBitmapFont.") if (missing(text)) return(x$bitmap) if (missing(style)) style <- NULL if (missing(palette)) palette <- NULL .print_to_raster(text, x, style, palette) } #' @family raster.operations #' @rdname as.raster #' @name as.raster #' @export as.raster.AmigaBitmapFontSet <- function(x, text, style, palette, ...) { if (!inherits(x, "AmigaBitmapFontSet")) stop("x should be of class AmigaBitmapFontSet.") if (missing(text)) { dims <- do.call(rbind, lapply(x$FontContents, function(y) dim(y$BitmapFont$bitmap))) pals <- do.call(rbind, lapply(x$FontContents, function(y) attr(y$BitmapFont$bitmap, "palette"))) if (!all(apply(pals, 2, function(y) all(y == y[1])))) stop("No consitent palettes used for bitmaps in the font set") result <- matrix(pals[1,1], sum(dims[,1]), max(dims[,2])) y.offset <- c(0, cumsum(dims[,1])) lapply(1:length(x$FontContents), function(y) { ys <- x$FontContents[[y]]$BitmapFont$tf_YSize xs <- ncol(x$FontContents[[y]]$BitmapFont$bitmap) result[y.offset[y] + (1:ys), 1:xs] <<- as.matrix(x$FontContents[[y]]$BitmapFont$bitmap) }) result <- grDevices::as.raster(result) return(result) } if (missing(style)) style <- NULL if (missing(palette)) palette <- NULL .print_to_raster(text, x, style, palette) } #' Combine multiple AmigaFFH objects #' #' Use this function to correctly combine one or more [AmigaBitmapFont()] #' class objects into a single [AmigaBitmapFontSet()] class #' object, or to combine multiple [AmigaBasic()] class objects. #' #' In case `...` are one or more [AmigaBasic()] class objects: #' #' [AmigaBasic()] class objects are combined into a single #' [AmigaBasic()] class object in the same order as they #' are given as argument to this function. for this purpose the lines of #' Amiga Basic codes are simply concatenated. #' #' In case `...` are one or more [AmigaBitmapFont()] class objects: #' #' [AmigaBitmapFontSet()] class objects can hold multiple #' [AmigaBitmapFont()] class objects. Use this method to #' combine font bitmaps into such a font set. Make sure each bitmap #' represents a unique font height (in pixels). When heights are duplicated #' an error will be thrown. #' #' You can also specify a `name` for the font, that will be embeded #' in the object. As this name will also be used as a file name when #' writing the font to a file, make sure that it is a valid filename. #' #' @rdname c #' @name c #' @param ... Either [AmigaBasic()] or [AmigaBitmapFont()] #' class objects. In case of [AmigaBitmapFont()] objects: #' Each [AmigaBitmapFont()] object should have a #' unique Y-size. #' @param name This argument is only valid when `...` are one or more #' [AmigaBitmapFont()] class objects. #' #' A `character` string specifying the name that needs to be #' applied to the font set. When unspecified, the default name 'font' is #' used. Note that this name will also be used as a file name when writing #' the font to a file. So make sure the name is also a valid file name. This #' will not be checked for you and may thus result in errors. #' @returns Returns an [AmigaBitmapFontSet()] in which the #' [AmigaBitmapFont()] objects are combined. Or when [AmigaBasic()] #' objects are combined, an [AmigaBasic()] object is returned #' in which the lines of Amiga Basic code are combined. #' @examples #' \dontrun{ #' data(font_example) #' #' ## first get some AmigaBitmapFont objects: #' font8 <- getAmigaBitmapFont(font_example, 8) #' font9 <- getAmigaBitmapFont(font_example, 9) #' #' ## now bind these bitmaps again in a single set #' font.set <- c(font8, font9, name = "my_font_name") #' #' ## Amiga Basic codes can also be combined: #' bas1 <- as.AmigaBasic("LET a = 1") #' bas2 <- as.AmigaBasic("PRINT a") #' bas <- c(bas1, bas2) #' } #' @family AmigaBitmapFont.operations #' @author Pepijn de Vries #' @export c.AmigaBitmapFont <- function(..., name = "font") { fonts <- list(...) lapply(fonts, function(f) { if (!inherits(f, "AmigaBitmapFont")) stop("'...' should be all of class AmigaBitmapFont.") }) sz <- unlist(lapply(fonts, function(x) x$tf_YSize)) if (any(duplicated(sz))) stop("The font Y-sizes are not unique.") fonts <- fonts[order(sz)] result <- list( fch_FileID = factor(.amiga.font.types[1], .amiga.font.types), fch_NumEntries = length(fonts), FontContents = lapply(fonts, function(x) { list( fc_FileName = paste(name, x$tf_YSize, sep = "/"), fc_YSize = x$tf_YSize, fc_Style = x$tf_Style, fc_Flags = x$tf_Flags, BitmapFont = x ) }) ) names(result$FontContents) <- sprintf("pt%i", sort(sz)) class(result) <- "AmigaBitmapFontSet" result } #' Extract or replace a font name #' #' Extract or replace a font name from an [AmigaBitmapFontSet()] #' object. #' #' The name of a font is embeded at multiple locations of an [AmigaBitmapFontSet()] #' object. This function can be used to extract or replace the font name #' correctly. This is also the name that will be used when writing the #' font to a file with [write.AmigaBitmapFontSet()]. #' @rdname fontName #' @name fontName #' @param x An [AmigaBitmapFontSet()] for which the font name #' needs to be changed. #' @param value A `character` string specifying the name you #' wish to use for the font. #' @returns Returns the font name. In case of the replace function, a copy #' of `x` is returned with the name replaced by '`value`'. #' @examples #' \dontrun{ #' data(font_example) #' #' ## show the name of the example font: #' fontName(font_example) #' #' ## This is how you change the name into "foo" #' fontName(font_example) <- "foo" #' #' ## see it worked: #' fontName(font_example) #' } #' @family AmigaBitmapFont.operations #' @author Pepijn de Vries #' @export fontName <- function(x) { if (!inherits(x, "AmigaBitmapFontSet")) stop("x should be of class AmigaBitmapFontSet") filenames <- unlist(lapply(x$FontContents, function(y) y$fc_FileName)) filenames <- do.call(rbind, strsplit(filenames, "/")) if (ncol(filenames) != 2) stop("Unexpected file structure.") if (length(unique(filenames[,1])) != 1) stop("Not a single base name for the font.") as.character(filenames[1,1]) } #' @rdname fontName #' @name fontName<- #' @export `fontName<-` <- function(x, value) { withCallingHandlers({ ## TODO remove handlers when replace functions are implemented lapply(1:length(x$FontContents), function(i) { x$FontContents[[i]]$fc_FileName <<- gsub(".+?([/])", paste0(value, "/"), x$FontContents[[i]]$fc_FileName) }) return(x) }, warning=function(w) { if (startsWith(conditionMessage(w), "Replacement operator for AmigaBitmapFont")) invokeRestart("muffleWarning") }) } #' Extract a specific AmigaBitmapFont from a AmigaBitmapFontSet #' #' Extract a specific [AmigaBitmapFont()] from a #' [AmigaBitmapFontSet()]. #' #' An [AmigaBitmapFontSet()] object can hold one or more #' bitmaps for specific font sizes (heights). Use this function to #' obtain such a specific [AmigaBitmapFont()]. #' @rdname getAmigaBitmapFont #' @name getAmigaBitmapFont #' @param x An [AmigaBitmapFontSet()] object, from which the #' specific [AmigaBitmapFont()] object needs to be extracted. #' @param size A single `numeric` value specifying the desired font #' size in pixels. Use [availableFontSizes()] to get available #' sizes. #' @returns Returns an [AmigaBitmapFont()] of the requested size. #' An error is thrown when the requested size is not available. #' @examples #' \dontrun{ #' data(font_example) #' #' ## get the font object for the first available size: #' font <- getAmigaBitmapFont(font_example, #' availableFontSizes(font_example)[1]) #' } #' @family AmigaBitmapFont.operations #' @author Pepijn de Vries #' @export getAmigaBitmapFont <- function(x, size) { if (!inherits(x, "AmigaBitmapFontSet")) stop("x should be of class AmigaBitmapFontSet.") if (length(size) > 1) { warning("Multiple sizes specified, using only the first value.") size <- size[1] } if (!(size %in% availableFontSizes(x))) stop(paste0("Requested font size (", size, ") not available.")) return (x$FontContents[[paste0("pt", size)]]$BitmapFont) } #' Get available font sizes from an AmigaBitmapFontSet #' #' Get available font sizes (height) from an [AmigaBitmapFontSet()] in pixels. #' #' An [AmigaBitmapFontSet()] can hold bitmaps of multiple font #' sizes. Use this function to obtain the available size from such a set. #' @rdname availableFontSizes #' @name availableFontSizes #' @param x An [AmigaBitmapFontSet()] for which the available #' font sizes (height) in number of pixels need to be obtained. #' @returns Returns a `vector` of `numeric` values specifying #' the available font sizes (height in pixels) for `x`. #' @examples #' \dontrun{ #' data(font_example) #' #' ## The example font holds two font sizes (8 and 9): #' availableFontSizes(font_example) #' } #' @family AmigaBitmapFont.operations #' @author Pepijn de Vries #' @export availableFontSizes <- function(x) { if (!inherits(x, "AmigaBitmapFontSet")) stop("x should be of class AmigaBitmapFontSet.") as.numeric(unlist(lapply(x$FontContents, function(y) y$fc_YSize))) } #' Convert a raster image into an AmigaBitmapFont #' #' Convert a two-coloured [grDevices::as.raster()] image into #' an [AmigaBitmapFont()] class object. #' #' Create an [AmigaBitmapFont()] class object by providing #' a two-coloured raster image and specifying which characters are #' depicted by the image. #' @rdname rasterToAmigaBitmapFont #' @name rasterToAmigaBitmapFont #' @param x A `raster` (see grDevices package) object composed of #' two colours only. Make sure that all glyphs (graphical representation #' of characters) are next to eachother on a single line. The height #' of this raster (in pixels) is taken automatically as font height. #' @param glyphs Specify which glyphs are included in the image #' `x` from left to right. It can be specified in one of the #' following ways: #' #' A single `character` string, where the length of the string #' (`nchar`) equals the number of displayed glyphs in `x`. #' #' A `vector` of `numeric` ASCII codes. The length of #' the `vector` should equal the number of displayed glyphs #' in `x`. #' #' A `list` of either `character` strings or `vector` of #' `numeric`s. The length of the `list` should equal the #' number of displayed glyphs in `x`. Each element can represent #' multiple characters, meaning that the nth element of the list #' uses the nth glyph shown in `x` to represent all the characters #' included in that element. #' #' Note that Amiga bitmap fonts represent ASCII characters and may #' not include all special characters or symbols. #' @param baseline The baseline of the font, specified in number of #' pixels from the top (`numeric`). Should be a whole number #' between 0 and the font height (height of `x`) minus 1. #' @param default_glyph A single `character` or ASCII code #' (`numeric`) that should be used by default. This means #' that all characters that are not specified by `glyphs` will #' be represented by this `default_glyph`. `default_glyph` should #' be included in `glyphs`. #' @param glyph_width A `numeric` `vector` with the same number #' of elements or characters as used for `glyphs`. It specifies #' the width in pixels for each glyph reserved in the raster image `x`. #' They should be whole numbers greater or equal to 0. #' @param glyph_space A `numeric` `vector` with the same number #' of elements or characters as used for `glyphs`. It specifies #' the width in pixels for each glyph that should be used when formatting. #' text. Note that these values can be smaller or larger than the values #' specified for `glyph_width`. #' They should be whole numbers greater or equal to 0. #' @param glyph_kern Note that in Amiga bitmap fonts not the formal #' definition from typography is used for kerning. Here, kerning is #' used as the number of pixels the cursor should be moved forward or #' backward after typesetting a character. It should be a #' `numeric` `vector` with the same number of elements or #' characters as used for `glyphs`. It can hold both positive #' and negative values. #' @param palette A `vector` of two colours. Both colours should #' be in `x`. The first colour is used as background colour, #' the second as foreground colour. #' #' When missing, it will be checked whether `x` has a palette #' as attribute, and uses that. If that attribute is also missing, #' the palette will be guessed from `x`, where the most #' frequently occurring colour is assumed to be the background #' colour. #' @param ... Currently ignored. #' @returns Returns a [AmigaBitmapFont()] class object based on `x`. #' @examples #' \dontrun{ #' data("font_example") #' #' ## make a raster that we can use to create a bitmap font #' font9.rast <- as.raster(getAmigaBitmapFont(font_example, 9)) #' #' ## note the glyphs and the order in which they are included in #' ## the raster image: #' plot(font9.rast) #' #' ## let's build a simple font, using only the first few glyphs #' ## in the raster: #' font9 <- rasterToAmigaBitmapFont( #' ## 'x' needs the raster image: #' x = font9.rast, #' #' ## 'glyphs' are the graphical representation of the characters #' ## that we will include in our font. We will only use the #' ## first 7 characters in the raster image: #' glyphs = " !\"#$%&", #' #' ## We will use the '&' glyph to represent all characters that #' ## are not specified in the font: #' default_glyph = "&", #' #' ## The raster image is 9 pixels tall, as will be the font. #' ## Let's use 7 as the base (it needs to be less than the height) #' baseline = 7, #' #' ## Let's define the width in pixels for each of the 7 #' ## characters. This is their width in the raster image: #' glyph_width = c(0, 1, 3, 6, 5, 5, 5), #' #' ## Let's define the space the character should take in pixels #' ## when it is used to format text: #' glyph_space = c(4, 2, 4, 7, 6, 6, 6), #' #' ## the raster uses white as background colour and black as #' ## foreground: #' palette = c("white", "black") #' ) #' #' ## note that for all characters that are not specified, #' ## the default glyph ('&') is used: #' plot(font9, text = "!@#$%ABCD") #' #' ## Let's take a subset from the font's bitmap (rasteer): #' font9abc.rast <- font9.rast[,263:282] #' #' ## as you can see this bitmap only contains the lowercase #' ## characters 'a', 'b', 'c', 'd' and 'e': #' plot(font9abc.rast) #' #' font9.abc <- rasterToAmigaBitmapFont( #' x = font9abc.rast, #' ## Each glyph in the image can be represented by a single #' ## element in a list. By specifying multiple characters in #' ## each element, you can recycle a glyph to represent different #' ## characters. So in this case, the glyph 'a' is used for #' ## all the accented variants of the character 'a'. #' glyphs = list("a\xE0\xE1\xE2\xE3\xE4\xE5", #' "b", #' "c\xA2\xE7", #' "d", #' "e\xE8\xE9\xEA\xEB"), #' default_glyph = "c", ## 'c' is used as default glyph for all other characters #' baseline = 7, #' glyph_width = c(4, 4, 4, 4, 4), #' glyph_space = c(5, 5, 5, 5, 5), #' palette = c("white", "black") #' ) #' #' ## see what happens when you format text using the font we just created: #' plot(font9.abc, text = "a\xE0\xE1\xE2\xE3\xE4\xE5bc\xA2\xE7de\xE8\xE9\xEA\xEB, foo bar") #' } #' @family AmigaBitmapFont.operations #' @family raster.operations #' @author Pepijn de Vries #' @export rasterToAmigaBitmapFont <- function(x, glyphs, default_glyph, baseline, glyph_width, glyph_space, glyph_kern, palette, ...) { glyph_width <- round(glyph_width) if (glyph_width < 0 || glyph_width > 65535) stop("'glyph_width' out of range (0, 65535).") if (baseline < 0 || baseline > (nrow(x) - 1) || baseline != round(baseline)) stop("'baseline' should be whole number between 0 and tf_YSize - 1.") if (is.character(default_glyph)) default_glyph <- utf8ToInt(enc2utf8(default_glyph)) if (length(default_glyph) != 1) stop("'default_glyph' should have a length of 1.") if (is.list(glyphs)) { test.default <- F glyphs <- lapply(1:length(glyphs), function(i) { if (is.character(glyphs[[i]])) glyphs[[i]] <- utf8ToInt(enc2utf8(glyphs[[i]])) if (default_glyph %in% glyphs[[i]]) { default_glyph <<- i test.default <<- T } data.frame( idx = i, glyphs = glyphs[[i]] ) }) glyphs <- do.call(rbind, glyphs) if (!test.default) stop("'default_glyph' should be in 'glyphs'.") } if (is.character(glyphs)) glyphs <- utf8ToInt(enc2utf8(glyphs)) if (is.numeric(glyphs)) { default_glyph <- which(glyphs == default_glyph) if (length(default_glyph) == 0) stop("'default_glyph' should be in 'glyphs'.") glyphs <- data.frame(idx = 1:length(glyphs), glyphs) } if (any(duplicated(glyphs$glyphs))) stop("Can't handle duplicated characters or ascii codes in 'glyphs'.") char_lo <- min(glyphs$glyphs) char_hi <- max(glyphs$glyphs) if (char_hi == glyphs$glyphs[default_glyph]) char_hi <- char_hi - 1 if (!inherits(x, "raster")) stop("'x' should be of class raster.") if (char_lo < 0 || char_lo > 255 || char_hi < 0 || char_hi > 255) stop("ASCII codes for 'glyphs' are out of range (0-255).") if (sum(glyph_width) > dim(x)[2]) stop("Sum of char width is wider than the provided raster image.") if (baseline > (dim(x)[1] - 1)) stop("'baseline' should not be greater then the height of 'x' minus 1.") if (missing(palette)) { if (is.null(attr(x, "palette"))) { ## If a palette is missing, take a guess based on the raster ## assume that the most frequent colour is the background colour palette <- table(x) palette <- names(palette)[order(-palette)] if (length(palette) != 2) stop("'x' does not contain 2 unique values/colours.") } else { palette <- attr(x, "palette") } } if (any(!(unique(x) %in% palette)) || length(palette) != 2) stop ("'palette' doesn't specify two colours, or 'x' contains different colours.") attr(x, "palette") <- palette font.result <- as.list(rep(0, length(.amiga.font.header$par.names))) names(font.result) <- .amiga.font.header$par.names font.result$leadingHunks <- NULL font.result$node.disklink <- font.result$node.message <- list( ln_Succ = 0, ln_Pred = 0, ln_Type = factor("NT_FONT", .amiga.node.types), ln_Pri = 0, ln_Name = 26 ) font.result$fontName <- "" font.result$dfh_FileID <- factor("DFH_ID", "DFH_ID") font.result$dfh_Revision <- 1 font.result$tf_LoChar <- char_lo font.result$tf_HiChar <- char_hi font.result$tf_Modulo <- ceiling(dim(x)[2]/8) if (dim(x)[2] != 8*font.result$tf_Modulo) { x <- cbind(as.matrix(x), matrix(palette[1], dim(x)[1], 8*font.result$tf_Modulo - dim(x)[2])) x <- as.raster(x) attr(x, "palette") <- palette } font.result$tf_YSize <- dim(x)[1] font.result$tf_Baseline <- baseline font.result$tf_XSize <- stats::median(glyph_width) font.result$tf_BoldSmear <- 1 font.result$tf_Style <- rep(F, 8) font.result$tf_Flags <- c(F, T, F, F, F, T, T, F) names(font.result$tf_Style) <- c("UNDERLINED", "BOLD", "ITALIC", "EXTENDED", "RESERVED1", "RESERVED2", "COLORFONT", "TAGGED") names(font.result$tf_Flags) <- c("ROMFONT", "DISKFONT", "REVPATH", "TALLDOT", "WIDEDOT", "PROPORTIONAL", "DESIGNED", "REMOVED") glyphs <- merge(glyphs, data.frame(glyphs = char_lo:(char_hi + 1)), all.x = T, all.y = T) glyphs$idx[is.na(glyphs$idx)] <- default_glyph loc <- cumsum(c(0, glyph_width)) font.result$glyph.info <- data.frame ( glyphLocation = loc[glyphs$idx], glyphWidth = glyph_width[glyphs$idx] ) offs <- 0 font.result$tf_CharLoc <- 110 if (!missing(glyph_space)) { glyph_space <- round(glyph_space) if (glyph_space < 0 || glyph_space > 65535) stop("'glyph_space' out of range (0, 65535).") font.result$glyph.info$charSpace <- glyph_space[glyphs$idx] font.result$tf_CharSpace <- 110 + 2*2*nrow(font.result$glyph.info) offs <- offs + 1 } if (!missing(glyph_kern)) { glyph_kern <- round(glyph_kern) if (glyph_kern < -32768 || glyph_kern > 32767) stop("'glyph_kern' out of range (-32768, 32767).") font.result$glyph.info$charKern <- glyph_kern[glyphs$idx] font.result$tf_CharKern <- 110 + (2 + offs)*2*nrow(font.result$glyph.info) offs <- offs + 1 } font.result$tf_CharData <- 110 + (2 + offs)*2*nrow(font.result$glyph.info) font.result$bitmap <- x class(font.result) <- "AmigaBitmapFont" return(font.result) } #' @export `$<-.AmigaBitmapFont` <- function(x, i, value) { x[[i]] <- value x } #' @export `[[<-.AmigaBitmapFont` <- function(x, i, value) { cl <- class(x) class(x) <- NULL x[[i]] <- value class(x) <- cl ## TODO update this replacement function and remove warning warning(paste0("Replacement operator for AmigaBitmapFont objects ", "will be modified in future versions of this package. ", "Note that not all replacement operations may be ", "allowed in future versions of this package.")) x } #' @export `$<-.AmigaBitmapFontSet` <- function(x, i, value) { x[[i]] <- value x } #' @export `[[<-.AmigaBitmapFontSet` <- function(x, i, value) { cl <- class(x) class(x) <- NULL x[[i]] <- value class(x) <- cl ## TODO update this replacement function and remove warning warning(paste0("Replacement operator for AmigaBitmapFontSet objects ", "will be modified in future versions of this package. ", "Note that not all replacement operations may be ", "allowed in future versions of this package.")) x }
/scratch/gouwar.j/cran-all/cranData/AmigaFFH/R/bitmapfont.r
#' A table of display modes on the Amiga and corresponding `raw` codes #' #' A table of display modes on the Amiga and corresponding `raw` codes #' representing these modes. #' #' This table contains most display modes that were available on the Amiga. #' It also contains `raw` codes that were used to represent these modes. #' The table also contains the hardware monitors that could display the specific #' modes, and the minimal chip set that was required for the display mode. #' This data is used to interpret [IFFChunk()] objects of type #' 'CAMG'. It is also used to interpret ILBM images and creating IFF files from #' raster images. #' #' @docType data #' @name amiga_display_modes #' @format { #' A `data.frame` with 4 columns: #' * The column named 'DISPLAY_MODE': a `factor` reflecting #' the display mode #' * The column named 'DISPLAY_MODE_ID': A `list` containing a `vector` #' of 4 `raw` values as used by the Amiga to reflect specific display modes. #' These raw values are usually also stored with bitmap images in the Interchange #' File Format in a [IFFChunk()] called 'CAMG'. #' * The column named 'MONITOR_ID': A `character` string identifying #' the monitor that could display the specific mode. #' * The column named 'CHIPSET': a `factor` identifying the minimal #' chip set that was required to display the specific mode. OCS is the original #' chip set; ECS is the Enhanced Chip Set. AGA is the Advanced Graphics Architecture #' chip set (in some countries known as just Advanced Architecture). AGA could #' also display OCS and ECS modes, ECS could also display OCS modes, OCS could only #' display OCS modes.} #' @references <https://wiki.amigaos.net/wiki/Display_Database#ModeID_Identifiers> #' @references <http://amigadev.elowar.com/read/ADCD_2.1/AmigaMail_Vol2_guide/node00FD.html> #' @examples #' data("amiga_display_modes") NULL #' A list of special display modes #' #' A list of special display modes on the Amiga and corresponding `raw` keys. #' #' This table show specific special display modes and to which Amiga monitors they #' relate. The `raw` codes can be used to interpret specific display modes #' as listed in [amiga_display_modes()]. This information is used to #' interpret [IFFChunk()] objects of type 'CAMG'. It is also used to #' interpret ILBM images and creating IFF files from raster images. #' #' @docType data #' @name amiga_display_keys #' @format { #' A `data.frame` with 2 columns: #' * The column named 'mode': a `factor` reflecting a display mode, monitor or bitwise mask #' * The column named 'code': vector of 4 `raw` values as used by the Amiga to reflect specific display modes} #' @references <https://wiki.amigaos.net/wiki/Display_Database#ModeID_Identifiers> #' @references <http://amigadev.elowar.com/read/ADCD_2.1/AmigaMail_Vol2_guide/node00FD.html> #' @examples #' data("amiga_display_keys") NULL #' A list of Amiga monitors #' #' This table lists Amiga monitors and corresponding `raw` codes that represent #' these monitors. #' #' This table contains monitors that were compatible with the Amiga. #' It also contains `raw` codes that were used to represent them. #' This data is used to interpret [IFFChunk()] objects of type #' 'CAMG'. It is also used to interpret ILBM images and creating IFF files from #' raster images. #' #' @docType data #' @name amiga_monitors #' @format { #' A `data.frame` with 2 columns: #' * The column named 'MONITOR_ID': a `factor` representing an Amiga monitor #' * The column named 'CODE': A `list` containing a `vector` of 4 #' `raw` values as used by the Amiga to represent a specific monitor.} #' @references <https://wiki.amigaos.net/wiki/Display_Database#ModeID_Identifiers> #' @examples #' data("amiga_monitors") NULL #' An example file of a bitmap image stored in the Interchange File Format #' #' This file is provided to demonstrate the structure of an Interchange File #' Format and is used in several examples throughout this package. #' #' The Interchange File Format stores information compartmentally in separate #' containers called 'chunks'. This file demonstrates how a bitmap image #' is stored in this format. In addition to the raw bitmap data, the file #' also contains meta-information on the bitmap dimensions, its colour palette and #' the display mode that should be used on an Amiga. See also #' [interpretIFFChunk()], [IFFChunk-class()] #' and the example for [bitmapToRaster()]. #' #' @docType data #' @name ilbm8lores.iff #' @format { #' See [IFFChunk-class()] and references for more information #' about the Interchange File Format.} #' @examples #' \dontrun{ #' filename <- system.file("ilbm8lores.iff", package = "AmigaFFH") #' example.iff <- read.iff(filename) #' #' ## show the structure of the IFF file: #' print(example.iff) #' } #' @references <https://en.wikipedia.org/wiki/Interchange_File_Format> #' @references <https://wiki.amigaos.net/wiki/A_Quick_Introduction_to_IFF> NULL #' 'demo.bas', 'r_logo.shp' and 'ball.shp' as example files for AmigaBasic and AmigaBasicShape objects #' #' 'demo.bas', 'r_logo.shp' and 'ball.shp' as example files for [AmigaBasic()] and #' [AmigaBasicShape()] objects #' #' The 'r_logo.shp' and 'ball.shp' files are formatted such that they can be read with #' [read.AmigaBasicShape()]. They serve as an example of the [AmigaBasicShape()] class, where #' the first represents a blitter object, and the latter a sprite. #' #' The 'demo.bas' file is an example of a binary encoded [Amiga Basic][AmigaFFH::AmigaBasic] script. It can be read with #' [read.AmigaBasic()]. The script demonstrates how the shape files could be used in Amiga Basic. #' #' @docType data #' @aliases demo.bas r_logo.shp ball.shp #' @name AmigaBasic-files #' @rdname AmigaBasic-files #' @format { #' See [AmigaBasic()] and [AmigaBasicShape()] for more information #' about the format.} #' @examples #' \dontrun{ #' read.AmigaBasic(system.file("demo.bas", package = "AmigaFFH")) #' read.AmigaBasicShape(system.file("ball.shp", package = "AmigaFFH")) #' read.AmigaBasicShape(system.file("r_logo.shp", package = "AmigaFFH")) #' } NULL #' Commonly used palettes on the Commodore Amiga #' #' `amiga_palettes` is a named list, where each element represents a commonly #' used palette on the Commodore Amiga. #' #' Some files that contain bitmap images with an indexed palette did not store the #' palette in the same file. Amiga Workbench icons ([AmigaIcon()]) for instance #' only store the index values of the palette, but not the palette itself. #' `amiga_palettes` therefore provides some commonly used palettes on the Amiga, #' such that these files can be interpreted. #' #' @docType data #' @name amiga_palettes #' @format { #' A named list with the following elements: #' * `wb.os1`: A `vector` of 4 colours that were used as the default #' palette of the Workbench on Amiga OS 1.x. #' * `wb.os2`: A `vector` of 8 colours. The first 4 colours are the default #' colours of a standard Workbench on Amiga OS 2.x. The latter 4 are additional #' colours used by the Workbench expansion MagicWB. #' * `spr.os1`: A `vector` of 3 colours that were used by default #' for a mouse pointer sprite on Amiga OS 1.x. #' * `spr.os2`: A `vector` of 3 colours that were used by default #' for a mouse pointer sprite on Amiga OS 2.x.} #' @examples #' data("amiga_palettes") NULL #' An example object for the AmigaBitmapFontSet class #' #' An example object for the [AmigaBitmapFontSet()] class used in #' examples throughout this package. It also contains a nested #' [AmigaBitmapFont()] class objects, which can be obtain by #' using `getAmigaBitmapFont(font_example, 9)`. #' #' The `font_example` contains a font that was designed as an example #' for this package. It holds bitmap glyphs for 8 and 9 pixels tall #' characters. #' #' @docType data #' @name font_example #' @format { #' `font_example` is an [AmigaBitmapFontSet()] #' object. For details see the object class documentation.} #' @family AmigaBitmapFont.operations #' @examples #' data("font_example") NULL
/scratch/gouwar.j/cran-all/cranData/AmigaFFH/R/data.r
#' @rdname dither #' @name dither #' @export dither <- function(x, method, ...) { ## I made this an S3 generic such that I could implement a dither function in the future ## for audio waves for downsampling audio... UseMethod("dither", x) } #' Convert colours to Amiga compatible raw data or vice versa #' #' Convert colours to Amiga compatible raw data or vice versa, such that #' it can be used in graphical objects from the Commodore Amiga. #' #' On the original Commodore Amiga chipset, graphics used indexed palettes of #' 12 bit colours. Colours are specified by their RGB (Red, Green and Blue) #' values, each component requiring 4 bits (with corresponding values ranging #' from 0 up to 15). Data structures on the Amiga were WORD (2 bytes) aligned. #' Colours are therefore typically stored in either 2 bytes (skipping the #' first four bits) or 3 bytes (one byte for each value). #' #' These functions can be used to convert R colours into the closest matching #' Amiga colour in a `raw` format, or vice versa. Note that later Amiga #' models with the advanced (graphics) architecture (known as AA or AGA) allowed #' for 24 bit colours. #' #' @param x In the case `amigaRawToColour` is called, `x` should #' be a `vector` of `raw` data. The length of this vector should #' be a multiple of 2 (when `n.bytes = "2"`) or 3 (when #' `n.bytes = "3"`). When `colourToAmigaRaw` is called, `x` #' should be a `character` strings representing a colour. #' @param colour.depth A `character` string: `"12 bit"` (default) or #' `"24 bit"`. The first should be used in most cases, as old Amigas #' have a 12 bit colour depth. #' @param n.bytes A `character` string: `"2"` or `"3"`. The #' number of bytes that is used or should be used to store each colour. #' @returns In the case `amigaRawToColour` is called, a (vector of) #' colour `character` string(s) is returned. When `colourToAmigaRaw` #' is called, `raw` representing the colour(s) specified in `x` is #' returned. #' #' @rdname colourToAmigaRaw #' @name colourToAmigaRaw #' @examples #' ## Let's create some Amiga palettes: #' colourToAmigaRaw(c("red", "navy blue", "brown", "#34AC5A")) #' #' ## let's do the reverse. #' ## this is white: #' amigaRawToColour(as.raw(c(0x0f, 0xff))) #' #' ## this is white specified in 3 bytes: #' amigaRawToColour(as.raw(c(0xf0, 0xf0, 0xf0)), n.bytes = "3") #' #' ## lower nybbles are ignored, you will get a warning when it is not zero: #' amigaRawToColour(as.raw(c(0xf0, 0xf0, 0x0f)), n.bytes = "3") #' @family raw.operations #' @author Pepijn de Vries #' @export colourToAmigaRaw <- function(x, colour.depth = c("12 bit", "24 bit"), n.bytes = c("2", "3")) { colour.depth <- match.arg(colour.depth, c("12 bit", "24 bit")) n.bytes <- match.arg(n.bytes, c("2", "3")) if (colour.depth == "24 bit" && n.bytes == "2") stop("3 bytes are required to store 24 bit colours!") col <- grDevices::col2rgb(x) if (colour.depth == "12 bit") { col <- floor((col + 4)/16.5) } if (colour.depth == "24 bit") col <- col/16 if (n.bytes == "3") { as.vector(apply(col, 2, function(y) .amigaIntToRaw(16*y, 8, F))) } else { as.vector(apply(col, 2, function(y) as.raw(c(y[1], y[2]*16 + y[3])))) } } #' @rdname colourToAmigaRaw #' @name amigaRawToColour #' @export amigaRawToColour <- function(x, colour.depth = c("12 bit", "24 bit"), n.bytes = c("2", "3")) { ## x = raw colour.depth <- match.arg(colour.depth, c("12 bit", "24 bit")) n.bytes <- match.arg(n.bytes, c("2", "3")) if (n.bytes == "2" && (length(x) %% 2) != 0) stop("x should be a vector of even length.") if (n.bytes == "3" && (length(x) %% 3) != 0) stop("x should be a vector with a multiple length of 3.") if (colour.depth == "24 bit" && n.bytes == "2") stop("3 bytes are required to store 24 bit colours!") hi <- .hiNybble(x) lo <- .loNybble(x) if (colour.depth == "24 bit" && n.bytes == "3") { sq <- seq(1, to = length(x), by = 3) x <- .rawToAmigaInt(x, 8, F) return(grDevices::rgb(x[sq]/255, x[sq + 1]/255, x[sq + 2]/255)) } else if (colour.depth == "12 bit" && n.bytes == "3") { sq <- seq(1, to = length(x), by = 3) hi <- .hiNybble(x) if (any(lo != 0)) warning("The low nybble is not zero for all colours.") return(grDevices::rgb(hi[sq]/15, hi[sq + 1]/15, hi[sq + 2]/15)) } else { x <- as.vector(rbind(hi, lo)) sq <- seq(1, to = length(x), by = 4) if (any(x[sq] != 0)) warning("The low nybble is not zero for all colours.") x <- x[-sq] sq <- seq(1, to = length(x), by = 3) return(grDevices::rgb(x[sq]/15, x[sq + 1]/15, x[sq + 2]/15)) } } #' A routine to (un)pack bitmap data #' #' A very simplistic lossless routine to (un)pack repetitive bitmap data. Often #' used in InterLeaved BitMap (ILBM) images in IFF containers ([IFFChunk()]). #' #' InterLeaved BitMap (ILBM) images on the Amiga often use a packing algorithm #' referred to as `ByteRun1'. This routine was introduced first on #' the Macintosh where it was called PackBits. It is a form of run-length encoding #' and is very simple: #' when a specific byte is repeated in a bitmap, it is replaced by #' a (signed negative) byte telling how many times the following byte #' should be repeated. When a series of bytes are not repetitive, it #' is preceded by a (signed positive) byte telling how long the non #' repetitive part is. #' #' Not very complicated, but for most images some bytes can be shaved #' off the file. This was very useful when everything had to be stored #' on 880 kilobyte floppy disks with little CPU time to spare. Note #' that the file size can also increase for (noisy) images. #' #' This packing routine will pack the entire bitmap (`x`) #' at once. The IFF file format requires packing of bitmap data per #' scanline. This is done automatically by the [rasterToIFF()] #' function, which calls this packing routine per scanline. #' #' @param x `raw` data, usually representing a (packed) bitmap. #' @returns Returns packed or unpacked `raw` data, depending on #' whether `packBitmap` or `unPackBitmap` was called. #' #' @rdname packBitmap #' @name packBitmap #' @examples #' ## generate some random raw data: #' dat.rnd <- as.raw(sample.int(10, 100, TRUE)) #' #' ## try to pack it: #' pack.rnd <- packBitmap(dat.rnd) #' #' ## due to the random nature of the source data #' ## the data could not be packed efficiently. #' ## The length of the packed data is close to #' ## the length of the original data: #' length(pack.rnd) - length(dat.rnd) #' #' ## Now generate similar data but sort it #' ## to generate more repetitive data: #' dat.srt <- as.raw(sort(sample.int(10, 100, TRUE))) #' pack.srt <- packBitmap(dat.srt) #' #' ## This time the packing routing is more successful: #' length(pack.srt) - length(dat.srt) #' #' ## The original data can always be obtained #' ## from the packed data: #' all(dat.rnd == unPackBitmap(pack.rnd)) #' all(dat.srt == unPackBitmap(pack.srt)) #' @references <http://amigadev.elowar.com/read/ADCD_2.1/Devices_Manual_guide/node01C0.html> #' @references <https://en.wikipedia.org/wiki/PackBits> #' @family raw.operations #' @author Pepijn de Vries #' @export packBitmap <- function(x) { if (typeof(x) != "raw") stop("Argument 'x' should be raw data") n <- length(x) y <- x[-1L] != x[-length(x)] i <- c(which(y | is.na(y)), n) l <- diff(c(0L, i)) while (any(l > 128)) { i <- sort(c(i, i[l > 128] - l[l > 128] + 128)) l <- diff(c(0L, i)) } ## Skip double repeats, as there is a large overhead from the packing byte: sel <- l > 1 & l < 4 i <- c(i, rep(i[sel], l[sel] - 1)) l <- c(l, rep(1, length(i) - length(l))) l[l > 1 & l < 4] <- 1 l <- l[order(i)] i <- i[order(i)] while (any(duplicated(i))) { i[duplicated(i, fromLast = T)] <- i[duplicated(i, fromLast = T)] - 1 } ## End skipping double repeats one.series.start <- which(diff(c(F, l == 1, F)) == 1) one.series.end <- which(diff(c(F, l == 1, F)) == -1) - 1 if (length(one.series.start) != length(one.series.end)) stop("Unexpected error in packing the bitmap. Please report this error to the package author.") one.series <- mapply(function(start, end) { list(x[i[start[[1]]]:i[end[[1]]]]) }, start = one.series.start, end = one.series.end) one.series <- lapply(one.series, function(y) { yl <- length(y) result <- NULL while (yl > 128) { result <- c(result, .amigaIntToRaw(127, 8, T), y[1:128]) yl <- yl - 128 y <- y[-1:-128] } return(c(result, .amigaIntToRaw(yl - 1, 8, T), y)) }) result <- rep(list(raw(0)), length(l)) result[one.series.start] <- one.series more.series <- mapply(function(y, dat, rep) { list(c(.amigaIntToRaw(-rep + 1, 8, T), dat)) }, dat = x[i[l > 1]], rep = l[l > 1]) result[l > 1] <- more.series result <- unlist(result) return (result) } #' @rdname packBitmap #' @name unPackBitmap #' @export unPackBitmap <- function(x) { if (typeof(x) != "raw") stop("Argument 'x' should be raw data") ## Very simple packing routine for bitmap images ## TODO this routine is very slow due to the while loop. See if this routine can be implemented more efficiently result <- raw(0) offset <- 0 while (offset < length(x)) { n <- .rawToAmigaInt(x[offset + 1], 8, T) if (n == -128) { offset <- offset + 1 } else if (n < 0) { result <- c(result, rep(x[offset + 2], -n + 1)) offset <- offset + 2 } else { result <- c(result, x[offset + 2:(n + 2)]) offset <- offset + 2 + n } } return(result) } #' Convert an Amiga bitmap image into a raster #' #' Amiga images are usually stored as bitmap images with indexed colours. This #' function converts raw Amiga bitmap data into raster data #' ([grDevices::as.raster()]). #' #' Bitmap images stored as raw data, representing palette index colours, can #' be converted into raster data ([grDevices::as.raster()]). The latter #' data can easily be plotted in R. It is usually not necessary to call this function #' directly, as there are several more convenient wrappers for this function. Those #' wrappers can convert specific file formats (such as IFF ILBM and Hardware Sprites, #' see [AmigaFFH::as.raster()]) into raster objects. This function is #' provided for completeness sake (or for when you want to search for images in an #' amiga memory dump). #' #' @param x a `vector` of `raw` values, representing bitmap data. #' @param w Width in pixels of the bitmap image. Can be any positive value. However, #' bitmap data is `word' aligned on the amiga. This means that the width of the stored #' bitmap data is a multiple of 16 pixels. The image is cropped to the width specified here. #' @param h Height in pixels of the bitmap image. #' @param depth The colour depth of the bitmap image (i.e., the number of bit planes). #' The image will be composed of `2^depth` indexed colours. #' @param palette A `vector` of `2^depth` colours, to be used for the indexed #' colours of the bitmap image. By default, a grayscale palette is used. #' When explicitly set to `NULL`, this function returns a matrix with palette index #' values. #' @param interleaved A `logical` value, indicating whether the bitmap is interleaved. #' An interleaved bitmap image stores each consecutive bitmap layer per horizontal scanline. #' @returns Returns a raster object ([as.raster()]) as specified in #' the [grDevices()] package. Unless, `palette` is set to `NULL`, #' in which case a `matrix` with `numeric` palette index values is returned. #' #' @rdname bitmapToRaster #' @name bitmapToRaster #' @examples #' \dontrun{ #' ## first load an example image: #' example.iff <- read.iff(system.file("ilbm8lores.iff", package = "AmigaFFH")) #' #' ## get the raw bitmap data, which is nested in the InterLeaved BitMap (ILBM) #' ## IFF chunk as the BODY: #' bitmap.data <- interpretIFFChunk(getIFFChunk(example.iff, c("ILBM", "BODY"))) #' #' ## In order to translate the bitmap data into a raster object we need #' ## to know the image dimensions (width, height and colour depth). This #' ## information can be obtained from the bitmap header (BMHD): #' #' bitmap.header <- interpretIFFChunk(getIFFChunk(example.iff, c("ILBM", "BMHD"))) #' #' ## First the bitmap data needs to be unpacked as it was stored in a compresssed #' ## form in the IFF file (see bitmap.header$Compression): #' #' bitmap.data <- unPackBitmap(bitmap.data) #' #' ## It would also be nice to use the correct colour palette. This can be obtained #' ## from the CMAP chunk in the IFF file: #' #' bitmap.palette <- interpretIFFChunk(getIFFChunk(example.iff, c("ILBM", "CMAP"))) #' #' example.raster <- bitmapToRaster(bitmap.data, #' bitmap.header$w, #' bitmap.header$h, #' bitmap.header$nPlanes, #' bitmap.palette) #' #' ## We now have a raster object that can be plotted: #' #' plot(example.raster, interpolate = FALSE) #' } #' @family raster.operations #' @author Pepijn de Vries #' @export bitmapToRaster <- function(x, w, h, depth, palette = grDevices::gray(seq(0, 1, length.out = 2^depth)), interleaved = T) { if (!is.raw(x)) stop("x should be a vector of raw values.") w <- round(w) h <- round(h) if (w < 1 || h < 1) stop("Width and heigth should both be at least 1 pixel.") if (depth != round(depth) || depth < 1) stop("Depth should be a whole positive number.") if (!is.null(palette) && any(!.is.colour(palette))) stop("Palette should be composed of colours only.") if (!is.null(palette) && length(palette) != (2^depth)) stop("Palette should have a length of 2^depth.") if (length(interleaved) > 1) warning("More than 1 interleave value is given, only the first element of the vector is used.") interleaved <- as.logical(interleaved[[1]]) ## invert bytes and longs is opposite to the defaults in adfExplorer. ## Does the user need to be able to change these values for bitmap images? x <- .rawToBitmap(x, invert.bytes = T, invert.longs = F) if (interleaved) { x <- array(x, c(16*ceiling(w/16), depth, h)) x <- apply(x, c(1, 3), function(y) { sum(2^(0:(length(y) - 1)) * as.numeric(y)) }) } else { x <- array(x, c(16*ceiling(w/16), h, depth)) x <- apply(x, c(1, 2), function(y) { sum(2^(0:(length(y) - 1)) * as.numeric(y)) }) } if (is.null(palette)) { x <- matrix(x, ncol = h, byrow = F) x <- t(x)[, 1:w, drop = F] return(x) } else { x <- matrix(palette[x + 1], ncol = h, byrow = F) x <- t(x)[, 1:w, drop = F] return(grDevices::as.raster(x)) } } #' Convert a grDevices `raster` object into binary bitmap data #' #' Converts an image represented by a grDevices `raster` object into binary #' (Amiga) bitmap data. #' #' Images represented by grDevices `raster` objects are virtually true colour (24 bit #' colour depth) and an alpha layer (transparency). On the early Amiga's the chipset #' (in combination with memory restrictions) only allowed images with indexed #' palettes. The colour depth was 12 bit with the original chipset and the number #' of colours allowed in a palette also depended on the chipset. This function #' will allow you to convert a `raster` object into binary bitmap data with #' an indexed palette. This means that the image is converted in a lossy way #' (information will be lost). So don't expect the result to have the same quality as #' the original image. #' #' With the `depth` argument, the raster can also be converted #' to special mode bitmap images. One of these modes is the #' \sQuote{hold and modify} (HAM). In this mode two of the bitplanes #' are reserved as modifier switches. If the this switch equals #' zero, the remainder of the bitplanes are used as an index for #' colours in a fixed palette. If the switch equals 1, 2 or 3, the #' red, green or blue component of the previous is modified, using the #' number in the remainder of the bitplanes. So it holds the previous #' colour but modifies one of the colour components (hence the term #' \sQuote{hold and modify}.) Here only the HAM6 and #' the HAM8 mode are implemented. HAM6 uses 6 bitplanes and a 12 bit #' colour depth, HAM8 uses 8 bitplanes and a 24 bit colour depth. #' #' The HAM mode was a special video modes supported by Amiga hardware. #' Normal mode bitmap images with a 6 bit depth would allow for a #' palette of 64 (2^6) colours, HAM6 can display 4096 colours with #' the same bit depth. #' #' In addition to HAM6 and HAM8, sliced HAM (or SHAM) was another #' HAM variant. Using the coprocessor on the Amiga, it was possible #' to change the palette at specific scanlines, increasing the number #' of available colours even further. The SHAM mode is currently not #' supported by this package. #' @param x A raster object created with [grDevices::as.raster()] which #' needs to be converted into bitmap data. It is also possible to let `x` be #' a matrix of `character`s, representing colours. #' @param depth The colour depth of the bitmap image. The image will be composed #' of `2^depth` indexed colours. #' #' `depth` can also be a `character` string "HAM6" or "HAM8" #' representing special Amiga display modes (see details). #' @param interleaved A `logical` value, indicating whether the bitmap needs to be #' interleaved. An interleaved bitmap image stores each consecutive bitmap layer per #' horizontal scanline. #' @param indexing A function that accepts two arguments: `x` (a grDevices #' `raster` object); `length.out`, a numeric value indicating the #' desired size of the palette (i.e., the number of colours). It should return #' a matrix with numeric palette indices (ranging from 1 up to the number of #' colours in the palette). The result should have an attribute named `palette' that #' contains the colours that correspond with the index numbers. The result should #' also carry an attribute with the name `transparent', with a single numeric value #' representing which colour in the palette should be treated as transparent (or #' `NA` when no transparency is required). By default the #' function [index.colours()] is used. You are free to provide #' a customised version of this function (see examples). #' @returns The bitmap is returned as a `vector` of `logical` values. #' The `logical` values reflect the bits for each bitplane. The palette used #' for the bitmap is returned as attribute to the `vector`. There will also be #' an attribute called `transparent'. This will hold a numeric index corresponding #' with the colour in the palette that will be treated as transparent. It will be #' `NA` when transparency is not used. #' #' @rdname rasterToBitmap #' @name rasterToBitmap #' @examples #' \dontrun{ #' ## first: Let's make a raster out of the 'volcano' data, which we can use in the example: #' volcano.raster <- as.raster(t(matrix(terrain.colors(1 + diff(range(volcano)))[volcano - #' min(volcano) + 1], nrow(volcano)))) #' #' ## convert the raster into binary (logical) bitmap data: #' volcano.bm <- rasterToBitmap(volcano.raster) #' #' ## The palette for the indexed colours of the generated bitmap is returned as #' ## attribute. There is no transparency is the image: #' attributes(volcano.bm) #' #' ## We can also include a custom function for colour quantisation. Let's include #' ## some dithering: #' volcano.dither <- rasterToBitmap(volcano.raster, #' indexing = function(x, length.out) { #' index.colours(x, length.out, #' dither = "floyd-steinberg") #' }) #' #' ## You can also use a custom indexing function to force a specified palette, #' ## in this case black and white: #' volcano.bw <- rasterToBitmap(volcano.raster, #' indexing = function(x, length.out) { #' index.colours(x, length.out, #' palette = c("black", "white"), #' dither = "floyd-steinberg") #' }) #' #' ## Make a bitmap using a special display mode (HAM6): #' volcano.HAM <- rasterToBitmap(volcano.raster, "HAM6") #' } #' @family raster.operations #' @author Pepijn de Vries #' @export rasterToBitmap <- function(x, depth = 3, interleaved = T, indexing = index.colours) { special.mode <- "none" if (depth %in% c("HAM6", "HAM8")) { special.mode <- depth depth <- ifelse(depth == "HAM6", 6, 8) } depth <- round(depth[[1]]) if (depth < 1) stop("Bitmap depth should be at least 1.") interleaved <- interleaved[[1]] if (!is.logical(interleaved)) stop("Interleaved should be a logical value.") if (!inherits(indexing, "function")) stop("'indexing' should be a function") if (!all(c("x", "length.out") %in% names(formals(indexing)))) stop("Function 'indexing' should require arguments 'x' and 'length.out'.") x <- as.matrix(x) x <- indexing(x = x, length.out = ifelse(special.mode %in% c("HAM6", "HAM8"), special.mode, 2^depth)) palette <- attributes(x)$palette transparent <- attributes(x)$transparent x <- .indexToBitmap(x, depth, interleaved) attributes(x) <- list(palette = palette, transparent = transparent) return (x) } #' Quantisation of colours and indexing a grDevices raster image #' #' Converts an image represented by a grDevices `raster` object into a #' matrix containing numeric indices of a quantised colour palette. #' #' Determines the optimal limited palette by clustering colours in an image #' with [stats::kmeans()]. The result of the optimisation routine #' will depend on the randomly chosen cluster centres by this algorithm. This #' means that the result may slightly differ for each call to this function. If #' you want reproducible results, you may want to reset the random seed #' ([set.seed()]) before each call to this function. #' #' @param x A raster object ([grDevices::as.raster()]), or a `matrix` #' containing `character` strings representing colours. `x` can also #' be a `list` of such matrices or rasters. All elements of this list should #' have identical dimensions. An overall palette will be generated for elements in the #' list. #' @param length.out A `numeric` value indicating the number of desired #' colours in the indexed palette. #' #' It can also be a `character` string indicating which special #' Amiga display mode should be used when indexing colours. #' \sQuote{`HAM6`} and \sQuote{`HAM8`} are supported. #' See [rasterToBitmap()] for more details on these #' special modes. #' @param palette A vector of no more than `length.out` colours, to be used #' for the bitmap image. When missing or set to `NULL`, a palette will be #' generated based on the provided colours in raster `x`. In that case, #' [stats::kmeans()] is used on the hue, saturation, brightness and #' alpha values of the colours in `x` for clustering the colours. The cluster #' centres will be used as palette colours. #' @param background On the Amiga, indexed images could not be semi-transparent. #' Only a single colour could be designated as being fully transparent. The #' ``background`' argument should contain a background colour with which #' semi-transparent colours should be mixed, before colour quantisation. It is #' white by default. #' @param dither Dither the output image using the algorithm specified here. #' See the usage section for possible options. By default no dithering ("`none`") #' is applied. See [dither()] for more details. #' @param colour.depth A `character` string indicating the colour depth to be used. #' Can be either "`12 bit`" (default, standard on an Amiga with original chipset), #' or "`24 bit`". #' #' This argument is overruled when `length.out` is set to \dQuote{`HAM6`} #' or \dQuote{`HAM8`}. In that case the colour depth linked to that special mode #' is used (12 bit for HAM6, 24 bit for HAM8). #' @param ... Arguments that are passed onto [stats::kmeans()] (see #' `palette` argument). #' @returns Returns a `matrix` with the same dimensions as `x` containing #' `numeric` index values. The corresponding palette is returned as attribute, #' as well as the index value for the fully transparent colour in the palette. #' When `x` is a `list` a `list` of matrices is returned. #' #' @rdname index.colours #' @name index.colours #' @examples #' \dontrun{ #' ## first: Let's make a raster out of the 'volcano' data, which we can use in the example: #' volcano.raster <- as.raster(t(matrix(terrain.colors(1 + diff(range(volcano)))[volcano - #' min(volcano) + 1], nrow(volcano)))) #' #' ## This will create an image of the original raster using an indexed palette: #' volcano.index <- index.colours(volcano.raster) #' #' ## The index values can be converted back into colours, using the palette: #' volcano.index <- as.raster(apply(volcano.index, 2, #' function(x) attributes(volcano.index)$palette[x])) #' #' ## Create an indexed image using dithering #' volcano.dith <- index.colours(volcano.raster, dither = "floyd-steinberg") #' volcano.dith <- as.raster(apply(volcano.dith, 2, #' function(x) attributes(volcano.dith)$palette[x])) #' #' ## plot the images side by side for comparison #' par(mfcol = c(1, 3)) #' plot(volcano.raster, interpolate = F) #' plot(volcano.index, interpolate = F) #' plot(volcano.dith, interpolate = F) #' } #' @family colour.quantisation.operations #' @family raster.operations #' @author Pepijn de Vries #' @export index.colours <- function(x, length.out = 8, palette = NULL, background = "#FFFFFF", dither = c("none", "floyd-steinberg", "JJN", "stucki", "atkinson", "burkse", "sierra", "two-row-sierra", "sierra-lite"), colour.depth = c("12 bit", "24 bit"), ...) { special.mode <- "none" x.is.list <- is.list(x) list.length <- 1 if (x.is.list) list.length <- length(x) if (x.is.list) x <- lapply(x, as.matrix) else x <- as.matrix(x) if (!all(.is.colour(c(unlist(x))))) stop("x should be a matrix of colours or a grDevices raster object.") if (length.out %in% c("HAM6", "HAM8")) { special.mode <- length.out length.out <- ifelse(length.out == "HAM6", 16, 64) ## overrule the colour.depth argument when HAM6 or HAM8 colour.depth <- ifelse(special.mode == "HAM6", "12 bit", "24 bit") } else { length.out <- round(length.out[[1]]) if (length.out < 2) stop("length.out should be 2 or more.") } if (!is.null(palette) && !all(.is.colour(palette))) stop("palette should consist of colours.") if (!is.null(palette) && length(palette) < 2) stop("palette should consist of at least 2 colours.") background <- background[[1]] if (!.is.colour(background)) stop("background is not a valid colour.") colour.depth <- match.arg(colour.depth) if (colour.depth != "12 bit" && special.mode == "HAM6") stop("HAM6 required 12 bit colour depth") if (colour.depth != "24 bit" && special.mode == "HAM8") stop("HAM8 required 24 bit colour depth") dither <- match.arg(dither) background <- grDevices::col2rgb(background) if (x.is.list) { c.dim <- do.call(rbind, lapply(x, dim)) if (any(!apply(c.dim, 2, function(y) all(y == y[[1]])))) stop("The dimensions of all elements in x should be equal") c.dim <- c.dim[1,] x <- unlist(x) } else { c.dim <- dim(x) } col.vals <- grDevices::col2rgb(x, T) if (special.mode %in% c("HAM6", "HAM8")) col.vals.rgb <- col.vals alpha <- col.vals[4,] col.vals <- col.vals[-4,] col.vals <- (col.vals*rbind(alpha, alpha, alpha) + rep(background, ncol(col.vals))*(255 - rbind(alpha, alpha, alpha)))/255 col.vals <- grDevices::rgb2hsv(col.vals) col.vals[,alpha == 0] <- grDevices::rgb2hsv(background) alpha[alpha > 0] <- 255 x <- apply(rbind(col.vals, alpha/255), 2, function(y) grDevices::hsv(y[1], y[2], y[3], y[4])) x <- array(x, c(c.dim, list.length)) x <- lapply(1:list.length, function(y) as.raster(x[,,y])) col.vals <- rbind(col.vals, 1 - as.numeric(alpha == 0)) current.unique.length <- length(unique(c(unlist(x)))) current.total.length <- length(unlist(x)) result <- NULL transparent <- NA if (is.null(palette)) { if (current.total.length <= length.out || current.unique.length < length.out) { palette <- rep("#000000", length.out) palette[1:current.unique.length] <- unique(c(unlist(x))) transparent <- which(substr(palette, 8, 9) == "00")[1] result <- lapply(x, function(y) apply(y, 2, match, table = palette)) } else { if (special.mode %in% c("HAM6", "HAM8")) { col.diff <- array(col.vals.rgb[-4,], c(3, c.dim, list.length)) col.diff <- c(apply(col.diff, 4, function(z) { z <- (z[,,-1] - z[,,-dim(z)[3]])^2 z <- apply(z, c(2, 3), function(z2) { z2[which(z2 == max(z2))[[1]]] <- 0 prod(1 + z2)/(256*256) }) z <- cbind(rep(0, nrow(z)), z) z })) ## include information on where the image changes a lot in R, G and B value col.vals <- rbind(col.vals, col.diff) palette <- stats::kmeans(as.matrix(t(col.vals)), length.out, ...) } else { palette <- stats::kmeans(as.matrix(t(col.vals)), length.out, ...) result <- palette$cluster result <- array(palette$cluster, c(c.dim, list.length)) result <- lapply(1:list.length, function(y) result[,,y]) } transparent <- which(palette$centers[,4] == 0)[1] palette <- apply(palette$centers, 1, function(x) grDevices::hsv(x[1], x[2], x[3], x[4])) } # sort colours such that the most frequently occuring colours are listed first freqs <- table(factor(unlist(result), as.character(1:length.out))) ord <- order(-freqs) rnk <- rank(-freqs, ties.method = "first") palette <- as.vector(palette[ord]) transparent <- as.vector(rnk[transparent]) if (!is.null(result)) { result <- lapply(result, function(y) as.vector(rnk)[y]) result <- lapply(result, matrix, nrow = c.dim) } } else { palette <- grDevices::col2rgb(palette, T) transparent <- which(palette[4,] == 0)[1] palette[4,palette[4,] > 0] <- 255 palette <- grDevices::rgb(palette[1,], palette[2,], palette[3,], palette[4,], maxColorValue = 255) result <- lapply(x, function(y) apply(y, 2, match, table = palette)) } if (dither != "none" || special.mode %in% c("HAM6", "HAM8")) { ## dithering should also be called in case of HAM modes if (x.is.list) { result <- lapply(x, function(y) dither(y, method = dither, palette = palette, mode = special.mode)) } else { result <- dither(x[[1]], method = dither, palette = palette, mode = special.mode) } } else if (!x.is.list) { result <- result[[1]] } palette <- suppressWarnings(amigaRawToColour(colourToAmigaRaw(palette, "24 bit", "3"), colour.depth, "3")) attributes(result)[["palette"]] <- as.vector(palette) attributes(result)[["transparent"]] <- transparent return(result) } #' Image dithering #' #' Dither is an intentional form of noise applied to an image to avoid colour #' banding when reducing the amount of colours in that image. This function #' applies dithering to a grDevices `raster` image. #' #' The approaches implemented here all use error diffusion to achieve dithering. #' Each pixel is scanned (from top to bottom, from left to right), where the actual #' colour is sampled and compared with the closest matching colour in the palette. #' The error (the differences between the actual and used colour) is distributed over #' the surrounding pixels. The only difference between the methods implemented here #' is the way the error is distributed. The algorithm itself is identical. For more #' details consult the listed references. #' #' Which method results in the best quality image will depend on the original image #' and the palette colours used for dithering, but is also a matter of taste. Note #' that the dithering algorithm is relatively slow and is provided in this package #' for your convenience. As it is not in the main scope of this package you should #' use dedicated software for faster/better results. #' @param x Original image data that needs to be dithered. Should be a raster object #' ([grDevices::as.raster()]), or a matrix of `character` string #' representing colours. #' @param method A `character` string indicating which dithering method should #' be applied. See usage section for all possible options (Note that the "JJN" is #' the Jarvis, Judice, and Ninke algorithm). Default is "`none`", meaning that #' no dithering is applied. #' @param palette A palette to which the image should be dithered. It should be a #' `vector` of `character` strings representing colours. #' @param mode A `character` string indicating whether a special #' Amiga display mode should be used when dithering. By default #' \sQuote{`none`} is used (no special mode). In addition, #' \sQuote{`HAM6`} and \sQuote{`HAM8`} are supported. #' See [rasterToBitmap()] for more details. #' @param ... Currently ignored. #' @returns Returns a `matrix` with the same dimensions as `x` containing #' `numeric` index values. The corresponding palette is returned as attribute, #' as well as the index value for the fully transparent colour in the palette. #' #' @rdname dither #' @name dither #' @aliases dither.raster #' @examples #' \dontrun{ #' ## first: Let's make a raster out of the 'volcano' data, which we can use in the example: #' volcano.raster <- as.raster(t(matrix(terrain.colors(1 + diff(range(volcano)))[volcano - #' min(volcano) + 1], nrow(volcano)))) #' #' ## let's dither the image, using a predefined two colour palette: #' volcano.dither <- dither(volcano.raster, #' method = "floyd-steinberg", #' palette = c("yellow", "green")) #' #' ## Convert the indices back into a raster object, such that we can plot it: #' volcano.dither <- as.raster(apply(volcano.dither, 2, function(x) c("yellow", "green")[x])) #' par(mfcol = c(1, 2)) #' plot(volcano.raster, interpolate = F) #' plot(volcano.dither, interpolate = F) #' #' ## results will get better when a better matching colour palette is used. #' ## for that purpose use the function 'index.colours'. #' } #' @references R.W. Floyd, L. Steinberg, *An adaptive algorithm for spatial grey scale*. Proceedings of the Society of Information Display 17, 75-77 (1976). #' @references J. F. Jarvis, C. N. Judice, and W. H. Ninke, *A survey of techniques for the display of continuous tone pictures on bilevel displays*. Computer Graphics and Image Processing, 5:1:13-40 (1976). #' @references <https://en.wikipedia.org/wiki/Floyd-Steinberg_dithering> #' @references <https://tannerhelland.com/4660/dithering-eleven-algorithms-source-code/> #' @family colour.quantisation.operations #' @family raster.operations #' @author Pepijn de Vries #' @export dither.raster <- function(x, method = c("none", "floyd-steinberg", "JJN", "stucki", "atkinson", "burkse", "sierra", "two-row-sierra", "sierra-lite"), palette, mode = c("none", "HAM6", "HAM8"), ...) { mode <- match.arg(mode) if (!all(.is.colour(c(x)))) stop("x should be a matrix of colours or a grDevices raster object.") if (!is.null(palette) && !all(.is.colour(palette))) stop("palette should consist of colours.") if (!is.null(palette) && length(palette) < 2) stop("palette should consist of at least 2 colours.") x <- matrix(x, nrow = dim(x)) method <- match.arg(method) c.dim <- dim(x) ## create an array with width, height, r, g, b and alpha as separate dimensions x <- grDevices::col2rgb(x, T) x <- lapply(split(x, row(x)), matrix, nrow = c.dim) x <- array(c(x[[1]], x[[2]], x[[3]], x[[4]]), dim = c(rev(c.dim), 4)) pal.rgb <- col2rgb(palette, T) result <- matrix(rep(NA, prod(c.dim)), nrow = c.dim) if (method == "floyd-steinberg") { e2 <- matrix(c(0, 3, -16, 5, 7, 1), nrow = c(2, 3))/16 ir2 <- 0:1 jr2 <- -1:1 } else if (method == "JJN") { e2 <- matrix(c(0, 3, 1, 0, 5, 3, -48, 7, 5, 7, 5, 3, 5, 3, 1), nrow = c(3, 5))/48 ir2 <- 0:2 jr2 <- -2:2 } else if (method == "stucki") { e2 <- matrix(c(0, 2, 1, 0, 4, 2, -42, 8, 4, 8, 4, 2, 4, 2, 1), nrow = c(3, 5))/42 ir2 <- 0:2 jr2 <- -2:2 } else if (method == "atkinson") { e2 <- matrix(c(0, 1, 0, -8, 1, 1, 1, 1, 0, 1, 0, 0), nrow = c(3, 4))/8 ir2 <- 0:2 jr2 <- -1:2 } else if (method == "burkse") { e2 <- matrix(c(0, 2, 0, 4, -32, 8, 8, 4, 4, 2), nrow = c(2, 5))/32 ir2 <- 0:1 jr2 <- -2:2 } else if (method == "sierra") { e2 <- matrix(c(0, 2, 0, 0, 4, 2, -32, 5, 3, 5, 4, 2, 3, 2, 0), nrow = c(3, 5))/32 ir2 <- 0:2 jr2 <- -2:2 } else if (method == "two-row-sierra") { e2 <- matrix(c(0, 1, 0, 2, -16, 3, 4, 2, 3, 1), nrow = c(2, 5))/16 ir2 <- 0:1 jr2 <- -2:2 } else if (method == "sierra-lite") { e2 <- matrix(c(0, 1, -4, 1, 2, 0), nrow = c(2, 3))/4 ir2 <- 0:1 jr2 <- -1:1 } if (method == "none" & !(mode %in% c("HAM6", "HAM8"))) { result <- apply(x, 2, function(a) { res <- apply(a, 1, function(b) { dst <- sqrt(colSums((pal.rgb - b)^2)) which(dst == min(dst))[[1]] }) res }) result <- t(result) } else { color_multi <- ifelse(mode == "HAM8", 255/63, 17) for(j in 1:dim(x)[2]) { if (mode %in% c("HAM6", "HAM8")) prev <- c(grDevices::col2rgb(palette[1])) for(i in 1:dim(x)[1]) { ## find the closest matching colour in the palette compared to the ## current pixel. This is the colour where the Euclidean distance ## in RGBA space is smallest compared to the actual colour: if (mode %in% c("HAM6", "HAM8")) { dst <- apply(pal.rgb, 2, function(z) { dst <- abs(x[i, j, ] - z) sqrt(sum(dst^2)) }) dst.diff <- abs(x[i, j, 1:3] - prev) control.flag = which(dst.diff == max(dst.diff))[[1]] dst.diff[control.flag] <- 0 dst.diff <- sqrt(sum(dst.diff^2)) if (all(dst.diff < dst)) { idx <- round(x[i,j,][control.flag]/ifelse(mode == "HAM6", 17, (255/63))) prev[control.flag] <- color_multi*idx control.flag <- c(2, 3, 1)[control.flag] } else { control.flag <- 0 ## Possible improvement for future versions: ## When multiple colours in the palette match best with the current ## pixel, now the first matching colour is selected. ## it is better to also look ahead to see if the pixel to the ## right matches best with this colour in the palette. idx <- which(dst == min(dst))[[1]] - 1 prev <- c(grDevices::col2rgb(palette[[idx + 1]])) } result[j, i] <- idx + bitwShiftL(control.flag, ifelse(mode == "HAM6", 4, 6)) } else { dst <- sqrt(colSums((pal.rgb - x[i, j, ])^2)) result[j, i] <- which(dst == min(dst))[[1]] } if (method != "none" && !(j == dim(x)[[2]] && i == dim(x)[[1]])) { if (mode %in% c("HAM6", "HAM8")) { ## seems to create a slight horizontal stripes artifact in HAM modes. ## See if this can be avoided P <- c(prev, 255) } else { P <- pal.rgb[,result[j, i]] } ## calculate the error (difference) between the actual colour and the colour ## from the palette: e <- x[i, j, ] - P ## get the proper row and column indices for the error distribution matrix. ## This is necessary when we are close to the edge of the image: sel.i <- i + ir2 ir <- ir2[sel.i %in% 1:dim(x)[1]] sel.j <- j + jr2 jr <- jr2[sel.j %in% 1:dim(x)[2]] ## Distribute the error (e) over the surrounding pixels using the error ## distribution matrix (e2) for the selected method: repl <- x[i + ir, j + jr, ] repl <- repl + (e2 %o% e)[ir - min(ir2) + 1, jr -min(jr2) + 1,] ## Put some constrains on the error: repl[repl < 0] <- 0 repl[repl > 255] <- 255 x[i + ir, j + jr, ] <- repl } } } if (mode %in% c("HAM6", "HAM8")) result <- result + 1 } return(result) } #' @rdname dither #' @name dither #' @aliases dither.matrix #' @export dither.matrix <- function(x, method = c("none", "floyd-steinberg", "JJN", "stucki", "atkinson", "burkse", "sierra", "two-row-sierra", "sierra-lite"), palette, mode = c("none", "HAM6", "HAM8"), ...) { dither.raster(grDevices::as.raster(x), method, palette, mode, ...) } #' (De)compress 8-bit continuous signals. #' #' Use a lossy delta-Fibonacci (de)compression to continuous 8-bit signals. #' This algorithm was used to compress 8-bit audio wave data on the Amiga. #' #' This form of compression is lossy, meaning that information and quality will get lost. #' 8-bit audio is normally stored as an 8-bit signed value representing the amplitude #' at specific time intervals. The delta-Fibonacci compression instead stores the #' difference between two time intervals (delta) as a 4-bit index. This index in turn #' represents a value from the Fibonacci series (hence the algorithm name). The compression #' stores small delta values accurately, but large delta values less accurately. #' As each sample is stored as a 4-bit value instead of an 8-bit value, the amount of #' data is reduced with almost 50\% (the exact compression ratio is (4 + n)/(2n)). #' #' The algorithm was first described by Steve Hayes and was used in 8SVX audio stored in #' the Interchange File Format (IFF). The quality loss is considerable (especially #' when the audio contained many large deltas) and was even in #' the time it was developed (1985) not used much. The function is provided here for #' the sake of completeness. The implementation here only compresses 8-bit data, as #' for 16-bit data the quality loss will be more considerable. #' @param x A `vector` of `raw` data that needs to be (de)compressed. #' @param ... Currently ignored. #' @returns Returns a `vector` of the resulting (de)compressed `raw` data. #' @rdname deltaFibonacciCompress #' @name deltaFibonacciCompress #' @examples #' \dontrun{ #' ## Let's get an audio wave from the ProTrackR package, which we #' ## can use in this example: #' buzz <- ProTrackR::PTSample(ProTrackR::mod.intro, 1) #' #' ## Let's convert it into raw data, such that we can compress it: #' buzz.raw <- adfExplorer::amigaIntToRaw(ProTrackR::waveform(buzz) - 128, 8, T) #' #' ## Let's compress it: #' buzz.compress <- deltaFibonacciCompress(buzz.raw) #' #' ## Look the new data uses less memory: #' length(buzz.compress)/length(buzz.raw) #' #' ## The compression was lossy, which we can examine by decompressing the #' ## sample again: #' buzz.decompress <- deltaFibonacciDecompress(buzz.compress) #' #' ## And turn the raw data into numeric data: #' buzz.decompress <- adfExplorer::rawToAmigaInt(buzz.decompress, 8, T) #' #' ## Plot the original wave in black, the decompressed wave in blue #' ## and the error in red (difference between the original and decompressed #' ## wave). The error is actually very small here. #' plot(ProTrackR::waveform(buzz) - 128, type = "l") #' lines(buzz.decompress, col = "blue") #' buzz.error <- ProTrackR::waveform(buzz) - 128 - buzz.decompress #' lines(buzz.error, col = "red") #' #' ## this can also be visualised by plotting the orignal wave data against #' ## the decompressed data (and observe a very good correlation): #' plot(ProTrackR::waveform(buzz) - 128, buzz.decompress) #' #' ## Let's do the same with a sample of a snare drum, which has larger #' ## delta values: #' snare.drum <- ProTrackR::PTSample(ProTrackR::mod.intro, 2) #' #' ## Let's convert it into raw data, such that we can compress it: #' snare.raw <- adfExplorer::amigaIntToRaw(ProTrackR::waveform(snare.drum) - 128, 8, T) #' #' ## Let's compress it: #' snare.compress <- deltaFibonacciCompress(snare.raw) #' #' ## Decompress the sample: #' snare.decompress <- deltaFibonacciDecompress(snare.compress) #' #' ## And turn the raw data into numeric data: #' snare.decompress <- adfExplorer::rawToAmigaInt(snare.decompress, 8, T) #' #' ## Now if we make the same comparison as before, we note that the #' ## error in the decompressed wave is much larger than in the previous #' ## case (red line): #' plot(ProTrackR::waveform(snare.drum) - 128, type = "l") #' lines(snare.decompress, col = "blue") #' snare.error <- ProTrackR::waveform(snare.drum) - 128 - snare.decompress #' lines(snare.error, col = "red") #' #' ## this can also be visualised by plotting the orignal wave data against #' ## the decompressed data (and observe a nice but not perfect correlation): #' plot(ProTrackR::waveform(snare.drum) - 128, snare.decompress) #' } #' @references <https://en.wikipedia.org/wiki/Delta_encoding> #' @references <http://amigadev.elowar.com/read/ADCD_2.1/Devices_Manual_guide/node02D6.html> #' @author Pepijn de Vries #' @export deltaFibonacciCompress <- function(x, ...) { ## Steve Hayes' Fibonacci Delta sound compression technique ## algorithm results in slightly different compression than ## achieved with Audiomaster IV. But the total error is smaller ## in this implementation result <- c(raw(1), x[1]) x <- .rawToAmigaInt(x, 8, T) fibonacci <- rev(c(-34,-21,-13,-8,-5,-3,-2,-1,0,1,2,3,5,8,13,21)) fib.deltas <- rep(NA, length(x)) new.wave <- fib.deltas new.wave[1] <- x[1] for (i in 1:length(x)) { target.value <- x[i] + 128 achieved.value <- (c(x[1], new.wave)[i] + fibonacci + 128) %% 256 value.dif <- target.value - achieved.value fib.deltas[i] <- 16 - (which(abs(value.dif) == min(abs(value.dif)))[[1]]) new.wave[i] <- c(x[1], new.wave)[i] + fibonacci[16 - fib.deltas[i]] } fib.even <- as.raw(fib.deltas) fib.odd <- fib.even[seq(1, length(fib.even), by = 2)] if (length(fib.even) == 1) fib.even <- as.raw(8) else fib.even <- fib.even[seq(2, length(fib.even), by = 2)] if (length(fib.odd) < length(fib.even)) fib.odd <- c(fib.odd, as.raw(8)) if (length(fib.odd) > length(fib.even)) fib.even <- c(fib.even, as.raw(8)) result <- c(result, .amigaIntToRaw(.rawToAmigaInt(fib.odd)*0x10) | fib.even) return(result) } #' @rdname deltaFibonacciCompress #' @name deltaFibonacciDecompress #' @export deltaFibonacciDecompress <- function(x, ...) { ## from http://amigadev.elowar.com/read/ADCD_2.1/Devices_Manual_guide/node02D6.html ## Unpack Fibonacci-delta encoded data from n byte source buffer into ## 2*(n-2) byte dest buffer. Source buffer has a pad byte, an 8-bit ## initial value, followed by n-2 bytes comprising 2*(n-2) 4-bit ## encoded samples. ## second byte indicates the base value: base.val <- x[2] ## first byte is a padding byte; second is already stored; skip them: x <- x[-1:-2] fibonacci <- c(-34,-21,-13,-8,-5,-3,-2,-1,0,1,2,3,5,8,13,21) result <- c(rbind(.hiNybble(x), .loNybble(x))) result <- fibonacci[result + 1] result <- .rawToAmigaInt(base.val, 8, T) + cumsum(result) result <- ((result + 128) %% 256) - 128 return(.amigaIntToRaw(result, 8, T)) } .is.colour <- function(x) { unlist(lapply(x, function(y) { res <- try(col2rgb(y), silent = TRUE) return(!inherits(res, "try-error")) })) } .inverseViewPort <- function(display.mode, monitor) { adm <- AmigaFFH::amiga_display_modes camg <- adm$DISPLAY_MODE_ID[adm$DISPLAY_MODE == display.mode][[1]] | AmigaFFH::amiga_monitors$CODE[AmigaFFH::amiga_monitors$MONITOR_ID == monitor][[1]] new("IFFChunk", chunk.type = "CAMG", chunk.data = list(camg)) } .amigaViewPortModes <- function(x) { MONITOR_ID_MASK <- as.raw(c(0xff, 0xff, 0x10, 0x00)) UPPER_MASK <- as.raw(c(0xff, 0xff, 0x00, 0x00)) EXTENDED_MODE <- as.raw(c(0x00, 0x00, 0x10, 0x00)) SPRITES <- as.raw(c(0x00, 0x00, 0x40, 0x00)) GENLOCK_AUDIO <- as.raw(c(0x00, 0x00, 0x01, 0x00)) GENLOCK_VIDEO <- as.raw(c(0x00, 0x00, 0x00, 0x02)) VP_HIDE <- as.raw(c(0x00, 0x00, 0x20, 0x00)) # Knock bad bits out of old-style CAMGs modes before checking # availability. (some ILBM CAMG's have these bits set in old 1.3 modes, # and should not) If not an extended monitor ID, or if marked as # extended but missing upper 16 bits, screen out inappropriate bits now. ## see: http://amigadev.elowar.com/read/ADCD_2.1/AmigaMail_Vol2_guide/node00FD.html if (!any(as.logical(x & MONITOR_ID_MASK)) || (any(as.logical(x & EXTENDED_MODE)) && !any(as.logical(x & UPPER_MASK)))) { if (any(as.logical(x & (EXTENDED_MODE|SPRITES|GENLOCK_AUDIO|GENLOCK_VIDEO|VP_HIDE)))) { warning("CAMG / display mode contains old style bad bits, I will knock them out...") x <- x & !(EXTENDED_MODE|SPRITES|GENLOCK_AUDIO|GENLOCK_VIDEO|VP_HIDE) } } # Check for bogus CAMG like some brushes have, with junk in # upper word and extended bit NOT set not set in lower word. if (any(as.logical(x & UPPER_MASK)) && !(any(as.logical(x & EXTENDED_MODE)))) { warning("CAMG / display mode contains bogus bits, I will use the simplest display mode possible...") x <- as.raw(c(0x00, 0x00, 0x00, 0x00)) } monitors <- AmigaFFH::amiga_monitors display_modes <- AmigaFFH::amiga_display_modes display_modes <- AmigaFFH::amiga_display_modes mon <- unlist(lapply(monitors$CODE, function(y) all(y == (x & MONITOR_ID_MASK)))) mon <- monitors$MONITOR_ID[mon] if (length(mon) > 0 && mon %in% c("STANDARD", "DEFAULT_MONITOR_ID", "NTSC_MONITOR_ID", "PAL_MONITOR_ID")) { x <- x & !MONITOR_ID_MASK } disp <- unlist(lapply(display_modes$DISPLAY_MODE_ID, function(y) all(y == x))) disp <- display_modes$DISPLAY_MODE[disp] if (length(mon) > 0 && length(disp) == 0 && mon %in% c("EURO36_MONITOR_ID", "SUPER72_MONITOR_ID")) { x <- x & !MONITOR_ID_MASK disp <- unlist(lapply(display_modes$DISPLAY_MODE_ID, function(y) all(y == x))) disp <- display_modes$DISPLAY_MODE[disp] } return(list(monitor = mon, display.mode = disp)) } .display.properties <- function(display.mode, monitor) { attribs <- list( is.lace = grepl("LACE", display.mode), is.super = grepl("SUPER", display.mode), is.hires = grepl("HIRES", display.mode), is.HAM = grepl("HAM", display.mode), is.extralores = grepl("EXTRALORES", display.mode), is.noflicker = grepl("FF", display.mode), is.scan.doubled = grepl("DBL", display.mode), is.productivity = grepl("PRODUCT", display.mode), is.halfbright = grepl("HB|HALFBRITE", display.mode) ) aspect.x <- ifelse(monitor == "A2024_MONITOR_ID", 14, ifelse(monitor == "SUPER72_MONITOR_ID", 34, 44)) width <- ifelse(monitor == "A2024_MONITOR_ID", 1024, ifelse(monitor == "SUPER72_MONITOR_ID", 200, 320)) if (length(attribs$is.extralores) > 0 && attribs$is.extralores) { width <- width/2 aspect.x <- aspect.x*2 } if (length(attribs$is.hires) > 0 && attribs$is.hires) { width <- width*2 aspect.x <- aspect.x/2 } if (length(attribs$is.super) > 0 && attribs$is.super) { width <- width*4 aspect.x <- aspect.x/4 } if (length(attribs$is.productivity) > 0 && attribs$is.productivity) { width <- width*2 aspect.x <- aspect.x/2 } height <- ifelse(monitor == "A2024_MONITOR_ID", 800, ifelse(monitor == "SUPER72_MONITOR_ID", 300, ifelse(monitor == "VGA_MONITOR_ID", 480, ifelse(monitor == "EURO72_MONITOR_ID", 400, ifelse(grepl("PAL", monitor), 256, 200))))) aspect.y <- ifelse(monitor == "A2024_MONITOR_ID", 11, ifelse(monitor == "SUPER72_MONITOR_ID", 40, ifelse(monitor == "VGA_MONITOR_ID", 22, ifelse(monitor == "EURO72_MONITOR_ID", 22, ifelse(grepl("NTSC", monitor), 52, 44))))) if (length(attribs$is.lace) > 0 && attribs$is.lace) { height <- height*2 aspect.y <- aspect.y/2 } if (length(attribs$is.scan.doubled) > 0 && attribs$is.scan.doubled) { height <- height*2 aspect.y <- aspect.y/2 } if (length(attribs$is.no.flicker) > 0 && attribs$is.no.flicker) { height <- height/2 aspect.y <- aspect.y*2 } attribs[["screenwidth"]] <- width attribs[["screenheight"]] <- height attribs[["aspect.x"]] <- aspect.x attribs[["aspect.y"]] <- aspect.y return(attribs) } .indexToBitmap <- function(x, depth, interleaved) { ## x should be a matrix of palette indices x <- cbind(x, matrix(1, ncol = -ncol(x)%%16, nrow = nrow(x))) x.dim <- dim(x) x <- .rawToBitmap(.amigaIntToRaw(c(x) - 1, 32, F), T, F) sq <- c(outer(31:(32 - depth), seq(1, length(x), by = 32), "+")) x <- as.logical(x[sq]) rm(sq) # dimensions are bitplane, height, width x <- array(x, dim = c(depth, x.dim)) if (interleaved == T) { ## rearrange dimensions to height, bitplane, width (non-interleaved.) x <- c(aperm(x, c(3, 1, 2))) } else { ## rearrange dimensions to bitplane, height, width (interleaved.) x <- c(aperm(x, c(3, 2, 1))) } } #' Get an Amiga timeval struct value from raw data #' #' Some Amiga applications use a timeval struct (see references) to represent a #' time span in seconds. This function coerces raw data to such a numeric time span. #' #' Timeval is a structure (struct) as specified in device/timer.h on the Amiga (see #' references). It represents a timespan in seconds. This function retrieves the #' numeric value from `raw` data. Amongst others, the timeval struct was used #' in the system-configuration file (see [SysConfig]) to specify key repeat speed, #' key repeat delay and mouse double click speed. Use `as.raw` for the inverse #' of this function and get the original raw data. #' @rdname timeval #' @name timeval #' @param x a `vector` of `raw` data that need to be converted into #' Amiga timeval structs. #' @returns Returns a `numeric` `vector` of a timespan in seconds. It is #' represented as an S3 AmigaTimeVal class. #' @examples #' ## First four raw values represent seconds, the latter four microseconds: #' temp <- timeval(as.raw(c(0, 0, 0, 1, 0, 0, 0, 1))) #' print(temp) #' #' ## You can use 'as.raw' to get the original raw data again: #' as.raw(temp) #' @author Pepijn de Vries #' @references <http://amigadev.elowar.com/read/ADCD_2.1/Includes_and_Autodocs_2._guide/node0053.html> #' @export timeval <- function(x) { ## get timeval struct from raw data if ((length(x) %% 8) != 0) stop("The length of x should be a multiple of 8.") if (typeof(x) != "raw") stop("x should be of type 'raw'.") x <- matrix(.rawToAmigaInt(x, 32, F), ncol = 2, byrow = T) result <- apply(x, 1, function(y) y[[1]] + y[[2]]/1e6) class(result) <- "AmigaTimeVal" return(result) } #' @name as.raw #' @rdname as.raw #' @export as.raw.AmigaTimeVal <- function(x, ...) { ## convert a timval (time interval in seconds) to raw timeval struct if (!inherits(x, "AmigaTimeVal")) stop("x should be of S3 class AmigaTimeVal.") secs <- floor(x) micros <- round((x - secs)*1e6) secs[secs >= 2^32] <- (2^32) - 1 micros[micros >= 2^32] <- (2^32) - 1 .amigaIntToRaw(c(rbind(secs, micros)), 32, F) } #' @export print.AmigaTimeVal <- function(x, ...) { invisible(lapply(x, function(y) cat(sprintf("%f [s] Amiga timeval struct\n", y, ...)))) } .read.amigaData <- function(dat, n.bytes, signed, par.names) { ## read numeric and raw data from amiga raw input ## dat = vector of raw data ## n.bytes = vector of lengths of bytes to be read from input data. Negative values are negated and indicate that raw data should be read as is ## signed = vector of logicals. Indicate whether the values read from data are signed (T) or unsigned (F) ## par.names = parameter names for the data read from the input data n.bytes <- round(n.bytes) offset <- 0 result <- mapply(function(n.b, sgnd) { res <- NULL if (n.b > 0) { res <- .rawToAmigaInt(dat[offset + (1:n.b)], n.b*8, sgnd) } else if (n.b < 0){ n.b <- -n.b res <- dat[offset + (1:n.b)] } offset <<- offset + n.b return(res) }, n.b = n.bytes, sgnd = signed, SIMPLIFY = F) names(result) <- par.names result } .write.amigaData <- function(lst, n.bytes, signed, par.names) { ## inverse function for .read.amigaData ## first make sure list is in correct order: lst <- lst[par.names] if (any(n.bytes > 0)) { lst[n.bytes > 0] <- lapply(1:sum(n.bytes > 0), function(y) { .amigaIntToRaw(lst[n.bytes > 0][[y]], 8*n.bytes[n.bytes > 0][y], signed[n.bytes > 0][y]) }) } lst <- unlist(lst) names(lst) <- NULL return(lst) } .match.factor <- function(lst, element.name, vals, levs) { result <- match(lst[[element.name]], vals) if (is.na(result)) stop(sprintf("Unknown %s.", element.name)) result <- factor(levs[result], levs) return(result) } .match.factor.inv <- function(lst, element.name, vals, levs) { result <- vals[which(levs %in% lst[[element.name]])] if (length(result) == 0) stop(sprintf("Unknown level for %s.", element.name)) if (length(result) > 1) stop(sprintf("Only a single value for %s is allowed.", element.name)) return(result) } .bitwOrAll <- function(x) { while (length(x) > 1) { x <- c(bitwOr(x[1], x[2]), x[-1:-2]) } return(x) } .match.multi.factor <- function(lst, element.name, vals, levs) { result <- levs[bitwAnd(lst[[element.name]], vals) == vals] result <- factor(result, levs) } .match.multi.factor.inv <- function(lst, element.name, vals, levs) { result <- vals[which(levs %in% lst[[element.name]])] while (length(result) > 1) { result <- c(bitwOr(result[1], result[2]), result[-1:-2]) } return(result) } .loNybble <- function(raw_dat) ## function that gets the value [0,16] of the 4 low bits of a raw byte { if (!inherits(raw_dat, "raw")) stop ("Only raw data is accepted as input") return(as.integer(raw_dat)%%16) } .hiNybble <- function(raw_dat) ## function that gets the value [0,16] of the 4 high bits of a raw byte { if (!inherits(raw_dat, "raw")) stop ("Only raw data is accepted as input") return(as.integer(as.integer(raw_dat)/16)) } .rawToCharNull <- function(raw_dat) { result <- "" if (length(raw_dat) < 3) try(result <- (rawToChar(raw_dat)), silent = T) else { result <- raw_dat runlength <- rle(result)$lengths if (length(runlength) > 2) { rel_range <- (runlength[1] + 1):(length(result) - runlength[length(runlength)]) if (result[[1]] != raw(1)) rel_range <- 1:rel_range[length(rel_range)] if (result[[length(result)]] != raw(1)) rel_range <- rel_range[1]:length(result) result[rel_range][result[rel_range] == as.raw(0x00)] <- as.raw(0x20) result <- result[result != raw(1)] } try(result <- rawToChar(result), silent = T) if (inherits(result, "raw")) result <- "" } return(result) } .read.generic <- function(file, disk = NULL) { ## If the file size can be determined from 'file', that size ## will be read. Other wise, the file will be read in 5 kB chunks. size <- 5*1024 if (!is.null(disk)) { if ("adfExplorer" %in% rownames(utils::installed.packages())) { dat <- adfExplorer::get.adf.file(disk, file) size <- length(dat) file <- rawConnection(dat, "rb") } else { stop("When specifying 'disk', the 'adfExplorer' package needs to be installed.") } } if (inherits(file, "character")) { size <- file.size(file) file <- file(file, "rb") } if (inherits(file, "connection")) { con_info <- summary(file) if (con_info$`can read` != "yes" || con_info$text != "binary") stop("file is not a connection from which binary data can be read...") } result <- NULL repeat { l1 <- length(result) result <- c(result, readBin(file, "raw", size)) l2 <- length(result) if ((l2 - l1) < size) break } close(file) return(result) } .write.generic <- function(x, file, disk = NULL, ...) { raw.dat <- as.raw(x, ...) if (is.null(disk)) { if (inherits(file, "character")) con <- file(file, "wb") if (inherits(file, "connection")) { con_info <- summary(con) if (con_info$`can write` != "yes" || con_info$text != "binary") stop("file is not a connection to which binary data can be written...") con <- file } writeBin(raw.dat, con, endian = "big") if (inherits(file, "character")) return(close(con)) } else { if ("adfExplorer" %in% rownames(utils::installed.packages())) { return(adfExplorer::put.adf.file(disk, raw.dat, file)) } else { stop("When specifying 'disk', the 'adfExplorer' package needs to be installed.") } } }
/scratch/gouwar.j/cran-all/cranData/AmigaFFH/R/generic_support.r
## Desired Y position + vertical-offset of the display window = 25 + 44 = 69 ## Desired X position + horizontal-offset of display window ~= 94 + 64 = 158 .validity.HWSprite <- function(object) { if (length(object@VStart) != 1) stop("VStart should have a length of 1") if (length(object@HStart) != 1) stop("HStart should have a length of 1") if (length(object@VStop) != 1) stop("VStop should have a length of 1") if (length([email protected]) != 8) stop("control.bits should have a length of 8") if (length([email protected]) != 4) stop("end.of.data should have a length of 4") if (length(object@colours) != 3) stop("colours should have a length of 3") if (object@VStop < object@VStart) stop("VStop should be equal to or greater than VStart.") if (object@HStart < 0) stop("HStart can't be negative") if (object@VStart < 0) stop("VStart can't be negative") if (object@VStop < 0) stop("VStop can't be negative") if (!all(.is.colour(object@colours))) stop("colours should represent colours") if (!all([email protected] == raw(4))) warning("Extended sprites are currently not supported") if (length(object@bitmap) != (object@VStop - object@VStart)*4) stop("bitmap should have a length of (VStop-VStart)*4") return(T) } #' The hardwareSprite class #' #' An S4 class object that represent graphical objects known as #' hardware sprites on the Commodore Amiga. #' #' Amiga hardware supported sprites, which are graphical objects that #' could be moved around the display and independently from each other. #' Basic sprites were 16 pixels wide and any number of pixels high and #' were composed of four colours, of which one is transparent. #' #' More complex sprites could be formed by linking separate sprites #' together. That way, sprites could become wider, or be composed of #' more colours. Such extended sprites are currently not supported #' by this package. #' #' A well known example of hardware sprite on the Commodore Amiga is #' the mouse pointer. #' #' This object simply holds the basic information belonging to #' hardware sprite. Use [as.raster()] to convert it to #' a raster which is a more useful graphical element in R. #' #' @slot VStart The vertical starting position of a sprite. #' @slot HStart The horizontal starting position of a sprite. #' @slot VStop The vertical stopping position of a sprite. The #' height of a sprite should be given by `VStart` - `VStop`. #' @slot control.bits 8 `logical` values used for extending #' sprites. The values are stored in this objects but extending #' sprites is not (yet) supported. #' @slot bitmap Interleaved bitmap data containing information on #' the pixel colour numbers of the sprite. #' @slot colours A vector of the 3 colours used for the sprite. #' @slot end.of.data Sprite data can be followed by another sprite. It is terminated #' with two WORDS equalling zero (`raw(4)`). Repeated sprite data is currently not #' supported. #' @references <http://amigadev.elowar.com/read/ADCD_2.1/Hardware_Manual_guide/node00AE.html> #' @name hardwareSprite-class #' @rdname hardwareSprite-class #' @aliases hardwareSprite #' @examples #' ## This generates a sprite of a single line (16x1 pixels) with an empty bitmap: #' new("hardwareSprite") #' #' ## This generates a sprite of a single line (16x1 pixels) where #' ## the bitmap contains some coloured pixels: #' new("hardwareSprite", bitmap = as.raw(c(0x01,0x02,0x03,0x04))) #' #' ## This generates a sprite of 16x16 pixels: #' new("hardwareSprite", #' VStop = 16, #' bitmap = as.raw(sample.int(255, 64, replace = TRUE))) #' @exportClass hardwareSprite #' @author Pepijn de Vries setClass("hardwareSprite", representation(VStart = "numeric", HStart = "numeric", VStop = "numeric", control.bits = "logical", bitmap = "raw", end.of.data = "raw", colours = "character"), prototype(VStart = 0, HStart = 0, VStop = 1, control.bits = rep(F, 8), bitmap = raw(4), end.of.data = raw(4), colours = c("#000000", "#888888", "#FFFFFF")), validity = .validity.HWSprite) setGeneric("rawToHWSprite", function(x, col) standardGeneric("rawToHWSprite")) #' Convert raw data into an Amiga hardware sprite #' #' Convert `raw` data structured conform a Commodore Amiga hardware #' sprite (see references) into a [hardwareSprite()] object. #' #' Information to set up a hardware sprite is stored as `raw` data #' on Commodore Amigas. This method can be used to convert this data #' into a [hardwareSprite()] object. This object can in turn #' be converted with [as.raster()] such that it can be plotted in R. #' #' @docType methods #' @rdname rawToHWSprite #' @name rawToHWSprite #' @aliases rawToHWSprite,raw,missing-method #' @param x `raw` data structured as an Amiga hardware sprite #' (see references). #' @param col A `vector` of colours (`character`) to be used #' for the hardware sprite. Specify the three visible colours for the #' sprite. When missing some default colours (grayscale) will be used. #' The colours have to be provided separately as they are usually not stored #' together with the hardware sprite data. #' @returns Returns a [hardwareSprite()] object based on the provided raw data #' @references <http://amigadev.elowar.com/read/ADCD_2.1/Hardware_Manual_guide/node00B9.html> #' @examples #' ## Let's generate a 16x16 sprite with a random bitmap: #' dat <- as.raw(c(0x00, 0x00, 0x10, 0x00, #' sample.int(255, 64, replace = TRUE), #' 0x00, 0x00, 0x00, 0x00)) #' ## make it a hardware sprite object: #' spr <- rawToHWSprite(dat) #' ## and plot it: #' plot(spr, interpolate = FALSE) #' #' ## with some imagination when can make #' ## a more structured image: #' dat <- as.raw(c(0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0xff, 0xf8, #' 0x7f, 0x80, 0x80, 0x70, 0x7f, 0x00, 0xbe, 0xe0, #' 0x7e, 0x00, 0x85, 0xc0, 0x7d, 0x80, 0x82, 0x40, #' 0x6b, 0xc0, 0x95, 0xa0, 0x57, 0xe0, 0xa8, 0xd0, #' 0x2f, 0xf0, 0xd1, 0x68, 0x4f, 0xf8, 0xb0, 0x34, #' 0x07, 0xfc, 0xf8, 0x5a, 0x03, 0xfe, 0xe4, 0x0d, #' 0x01, 0xfc, 0xc2, 0x12, 0x00, 0xf8, 0x81, 0x04, #' 0x00, 0x70, 0x00, 0x88, 0x00, 0x20, 0x00, 0x50, #' 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00)) #' spr <- rawToHWSprite(dat, c("#EE4444", "#000000", "#EEEECC")) #' plot(spr, interpolate = FALSE) #' @family raw.operations #' @family HWSprite.operations #' @author Pepijn de Vries #' @export setMethod("rawToHWSprite", c("raw", "missing"), function(x, col) { result <- methods::new("hardwareSprite") result@HStart <- .rawToAmigaInt(x[1], 8, F) result@VStart <- .rawToAmigaInt(x[2], 8, F) result@VStop <- .rawToAmigaInt(x[3], 8, F) if (result@VStop == 0) result@VStop <- 16 ## This appears to be the case for the mouse pointer. Check if this is always the case [email protected] <- as.logical(.rawToBitmap(x[4], invert.longs = F)) vlen <- result@VStop - result@VStart result@bitmap <- x[4 + 1:(vlen*4)] offset <- vlen*4 + 4 eod <- x[offset + 1:4] [email protected] <- eod while (!all(eod == raw(4))) { [email protected] <- c([email protected], eod) offset <- offset + 4 eod <- x[offset + 1:4] } return(result) }) #' @rdname rawToHWSprite #' @aliases rawToHWSprite,raw,character-method #' @export setMethod("rawToHWSprite", c("raw", "character"), function(x, col) { result <- rawToHWSprite(x) result@colours <- col return(result) }) #' @rdname as.raster #' @name as.raster #' @aliases as.raster,hardwareSprite-method #' @export as.raster.hardwareSprite <- function(x, background = "#AAAAAA", ...) { ## Make sure that background is a valid color background <- grDevices::adjustcolor(background) cols <- c(background, x@colours) return(bitmapToRaster(x@bitmap, 16, length(x@bitmap)*8/(2*16), 2, cols)) # assume 2 bitplanes } #' @rdname plot #' @name plot #' @export plot.hardwareSprite <- function(x, y, ...) { graphics::plot(as.raster(x), ...) } #' @rdname as.raw #' @name as.raw #' @aliases as.raw,hardwareSprite-method #' @export setMethod("as.raw", "hardwareSprite", function(x) { result <- c( .amigaIntToRaw(c(x@HStart, x@VStart, x@VStop), 8, F), .bitmapToRaw([email protected], invert.longs = F, invert.bytes = F), x@bitmap, [email protected] ) return(result) }) #' @export print.hardwareSprite <- function(x, ...) { cat(sprintf("A %i row high hardware sprite sprite", x@VStop - x@VStart)) } setMethod("show", "hardwareSprite", function(object){ print(object) }) #' Convert a raster object into an hardwareSprite object #' #' Convert a grDevices raster object into an Amiga hardwareSprite class object. #' #' A [grDevices()] raster image can be converted into a #' [hardwareSprite()] class object with this function. For this purpose #' the any true-colour image will be converted to an indexed palette with 4 colours. #' The Amiga hardware sprite will reserve one of the colours as transparent. Thos function #' will use fully transparent colours in the original image (i.e., the alpha level equals 0) #' for this purpose. Or when the image has no fully transparent colours, it will use the #' most frequently occuring colour (at least when the default `indexing` function #' is used). #' #' @rdname rasterToHWSprite #' @name rasterToHWSprite #' @param x A [grDevices()] raster object ([grDevices::as.raster()]) #' that needs to be converted into a [hardwareSprite()] class object. #' Note that a [hardwareSprite()] has a maximum width of 16 pixels. #' When `x` is wider, it will be cropped. #' @param indexing A function that accepts two arguments: `x` (a grDevices #' `raster` object); `length.out`, a numeric value indicating the #' desired size of the palette (i.e., the number of colours). It should return #' a matrix with numeric palette indices (ranging from 1 up to the number of #' colours in the palette). The result should have an attribute named `palette' that #' contains the colours that correspond with the index numbers. The result should #' also carry an attribute with the name `transparent', with a single numeric value #' representing which colour in the palette should be treated as transparent (or #' `NA` when no transparency is required). By default the #' function [index.colours()] is used. #' @returns Returns a [hardwareSprite()] class object based on `x` #' @examples #' \dontrun{ #' ## first create a raster object that can be used as input #' ## (making sure that the background is transparent): #' rst <- as.raster(simpleSysConfig()$PointerMatrix, "#AAAAAA00") #' #' ## now turn it into a hardware sprite: #' spr <- rasterToHWSprite(rst) #' #' ## and plot it as a check: #' plot(spr) #' } #' @family raster.operations #' @family HWSprite.operations #' @author Pepijn de Vries #' @export rasterToHWSprite <- function(x, indexing = index.colours) { if (!inherits(x, "raster")) stop ("x should be of class raster") if (!inherits(indexing, "function")) stop("'indexing' should be a function") if (!all(c("x", "length.out") %in% names(formals(indexing)))) stop("Function 'indexing' should require arguments 'x' and 'length.out'.") if (dim(x)[2] > 16) { warning("Raster is more then 16 pixels wide. It will be cropped.") x <- x[,1:16] } pal <- NULL bm <- rasterToBitmap(x, 2, indexing = function(x, length.out) { result <- indexing(x, length.out) pal <<- attributes(result)[["palette"]] trans <- attributes(result)[["transparent"]] ## make sure that the transparent colour is the first colour in the palette: if (!is.na(trans) && trans != 1) { result[result == 1] <- -1 result[result == trans] <- 1 result[result == -1] <- trans pal[c(1, trans)] <<- pal[c(trans, 1)] trans <- 1 } attributes(result)[["palette"]] <- pal attributes(result)[["transparent"]] <- trans result }) bm <- .bitmapToRaw(bm, T, F) result <- new("hardwareSprite", VStop = dim(x)[1], bitmap = bm, colours = pal[-1]) result } #' @export dim.hardwareSprite <- function(x) { result <- x@VStop - x@VStart result[result == 0] <- 16 c(result, 16) }
/scratch/gouwar.j/cran-all/cranData/AmigaFFH/R/hardwareSprites.r
.validity.IFFChunk <- function(object) { if (length([email protected]) !=1) stop("Slot 'chunk.type' should be of length 1") if (nchar([email protected]) != 4) stop("Slot 'chunk.type' should consist of 4 characters.") data.types <- unlist(lapply([email protected], class)) if (length([email protected]) == 0) stop("Chunk should have at least one element in it's data list.") if (!(all(data.types == "IFFChunk") || all(data.types == "raw"))) stop("Chunk data should be a list of IFFChunk objects or a list with a single element with raw data.") if (all(data.types == "raw") && length([email protected]) > 1) stop ("Chunk data can only hold one element of raw data.") ## note that the chunk.size can be odd, but in that case a padding byte should be read from ## an iff file... if (all(data.types == "raw") && length([email protected][[1]]) > (2^32 - 1)) stop("Chunk data cannot be larger than 4,294,967,295 bytes") if ([email protected] %in% c("FORM", "LIST", "CAT ", "PROP") && data.types == "raw") stop("IFF containers should contain IFF chunks, not raw data.") ## TODO note that the validity of all the IFFChunk objects in the data list are not checked ## TODO Doing that will make the object fool-proof, but also a lot slower... return(T) } #' A class structure to represent IFF files #' #' An S4 class structure to represent data stored in the Interchange File #' Format (IFF). #' #' The Interchange File Format (IFF) was introduced in 1985 by Electronic Arts. #' This format stores files in standardised modular objects, called `chunks'. #' At the start of each chunk it is specified what type of data can be expected #' and what the size of this data is. This was a very forward thinking way of #' storing data, similar structures are still used in modern file formats (such #' as PNG images and XML files). #' #' Although the IFF format is still in use, and new standardised chunk types can #' still be registered, this package will focus on the older chunk types that #' were primarily used on the Commodore Amiga (OS <= 3.0). IFF files could #' contain any kind of information. It could contain bitmap images, but also #' audio clips or (formatted) texts. #' #' The `IFFChunk` class is designed such that it theoretically can hold #' any type of IFF data. This package will mostly focus on the early IFF file types #' (i.e., IFF chunks as originally registered by Electronic Arts). IFF files are #' read by this package in a none lossy way ([read.iff()]), such that all #' information is preserved (even if it is of an unknown type, as long as the chunk #' identifier is 4 characters long). #' #' This means that the object needs to be interpreted in order to make sense out of #' it ([interpretIFFChunk()]). This interpretation returns simplified #' interpretations of class `IFF.ANY` when it is supported (see #' [IFFChunk-method()] for supported chunk types). Note that in the #' interpretation process (meta-)information may get lost. converting #' `IFF.ANY` objects back into [IFFChunk()] objects (if possible) #' could therefore result in an object that is different from then one stored in the #' original file and could even destroy the correct interpretation of IFF objects. #' IFF files should thus be handled with care. #' #' @slot chunk.type A four `character` long code reflecting the type of #' information represented by this chunk. #' @slot chunk.data A `list` that holds either one or more valid #' `IFFChunk`s or a single `vector` of `raw` data. This data #' can only be interpreted in context of the specified type or in some cases #' information from other `IFFChunk`s. #' @references <https://wiki.amigaos.net/wiki/IFF_Standard> #' @references <https://wiki.amigaos.net/wiki/IFF_FORM_and_Chunk_Registry> #' @references <https://en.wikipedia.org/wiki/Interchange_File_Format> #' @name IFFChunk-class #' @rdname IFFChunk-class #' @examples #' \dontrun{ #' ## load an IFF file #' example.iff <- read.iff(system.file("ilbm8lores.iff", package = "AmigaFFH")) #' #' ## 'example.iff' is of class IFFChunk: #' class(example.iff) #' #' ## let's plot it: #' plot(example.iff) #' #' ## The default constructor will create an empty FORM: #' new("IFFChunk") #' #' ## The constructor can also be used to create simple chunks: #' new("IFFChunk", #' chunk.type = "TEXT", #' chunk.data = list(charToRaw("A simple chunk"))) #' } #' @family iff.operations #' @exportClass IFFChunk #' @author Pepijn de Vries setClass("IFFChunk", representation( chunk.type = "character", chunk.data = "list" ## of either a single element of type raw, or a list of iffchunks ), prototype( chunk.type = "FORM", chunk.data = list(raw(0)) ), validity = .validity.IFFChunk) #' Read Interchange File Format (IFF) #' #' Read the Interchange File Format (IFF) as an [IFFChunk()] object. #' #' Information is stored as `chunks' in IFF files (see [IFFChunk()]). #' Each chunk should at least contain a label of the type of chunk and the data #' for that chunk. This function reads all chunks from a valid IFF file, including #' all nested chunks and stores them in an [IFFChunk()] object. IFF #' files can hold any kind of data (e.g. images or audio), this read function #' does not interpret the file. Use [interpretIFFChunk()] for that #' purpose. #' #' @rdname read.iff #' @name read.iff #' @param file A filename of an IFF file to be read, or a connection from which #' binary data can be read. #' @param disk A virtual Commodore Amiga disk from which the `file` should be #' read. This should be an [`amigaDisk()`][adfExplorer::amigaDisk-class] object. Using #' this argument requires the adfExplorer package. #' When set to `NULL`, this argument is ignored. #' @returns Returns a [IFFChunk()] object read from the specified file. #' @examples #' \dontrun{ #' ## let's read a bitmap image stored in IFF as provided with this package: #' filename <- system.file("ilbm8lores.iff", package = "AmigaFFH") #' example.iff <- read.iff(filename) #' #' ## And plot it: #' plot(example.iff) #' } #' @family io.operations #' @family iff.operations #' @author Pepijn de Vries #' @export read.iff <- function(file, disk = NULL) { result <- .read.generic(file, disk) result <- rawToIFFChunk(result) if ([email protected] != "FORM") stop("FORM is currently the only supported IFF container. LIST, CAT and others are not...") return (result) } #' Convert AmigaFFH objects into raw data #' #' Convert AmigaFFH objects into raw data, as they would be stored in the Commodore #' Amiga's memory or files. #' #' Objects originating from this package can in some cases be converted into #' raw data, as they would be stored on an original Amiga. See the usage section #' for the currently supported objects. #' #' Not all information from `x` may be included in the `raw` #' data that is returned, so handle with care. #' #' As this package grows additional objects can be converted with this method. #' #' @docType methods #' @rdname as.raw #' @name as.raw #' @aliases as.raw,IFFChunk-method #' @param x An AmigaFFH object that needs to be converted into raw data. #' See usage section for all supported objects. #' @returns Returns a `vector` of `raw` data based on `x`. #' @examples #' \dontrun{ #' ## read an IFF file as an IFFChunk object: #' example.iff <- read.iff(system.file("ilbm8lores.iff", package = "AmigaFFH")) #' #' ## This will recreate the exact raw data as it was read from the file: #' example.raw <- as.raw(example.iff) #' } #' @family raw.operations #' @author Pepijn de Vries #' @export setMethod("as.raw", "IFFChunk", function(x) { get.data <- function(y, parent.is.container = F) { result <- charToRaw([email protected]) if (inherits([email protected][[1]], "raw")) { ## only store chunk size if parent is not a container (i.e., FORM, LIST or CAT) if (!parent.is.container) result <- c(result, .amigaIntToRaw(length([email protected][[1]]), 32, F)) result <- c(result, [email protected][[1]]) ## Chunks should always be WORD aligned (pad with zeros if ## it is not): if ((length([email protected][[1]]) %% 2) != 0) result <- c(result, raw(1)) return(result) } else if (inherits([email protected][[1]], "IFFChunk")) { container <- [email protected] %in% c("FORM", "LIST", "CAT ", "PROP") dat <- unlist(lapply([email protected], get.data, parent.is.container = container)) ## only store chunk size if parent is not a container (i.e., FORM, LIST, CAT or PROP) if (!parent.is.container) result <- c(result, .amigaIntToRaw(length(dat), 32, F)) result <- c(result, dat) } else { stop("IFFChunk contains invalid data") } } return(get.data(x, F)) }) #' @rdname as.raw #' @method as.raw IFF.ANY #' @param ... Arguments passed on to [IFFChunk-method()] when `x` is #' of class `IFF.ANY`. #' @export as.raw.IFF.ANY <- function(x, ...) { as.raw(IFFChunk(x, ...)) } #' Write Interchange File Format (IFF) #' #' Write an [IFFChunk()] object conform the Interchange File Format (IFF). #' #' Writes an [IFFChunk()] object (including all nested chunks) to the #' specified file. Only the structure of the object needs to be valid, however, #' a correctly structured file does not necessarily result in an interpretable file #' (see examples). #' #' @rdname write.iff #' @name write.iff #' @param x An [IFFChunk()] object that needs to be written to a file. #' @param file A filename for the IFF file to which the [IFFChunk()] needs #' to be saved, or a connection to which the data should be written. #' @param disk A virtual Commodore Amiga disk to which the `file` should be #' written. This should be an [`amigaDisk()`][adfExplorer::amigaDisk-class] object. Using #' this argument requires the adfExplorer package. #' When set to `NULL`, this argument is ignored. #' @returns Returns either `NULL` or an `integer` status invisibly as passed #' by the [`close()`][base::connections] statement used to close the file connection. #' When `disk` is specified, a copy of `disk` is returned #' to which the file is written. #' #' @references <https://en.wikipedia.org/wiki/Interchange_File_Format> #' @examples #' \dontrun{ #' ## read an IFF file as an IFFChunk object: #' example.iff <- read.iff(system.file("ilbm8lores.iff", package = "AmigaFFH")) #' #' ## This will write the IFF file (in this case a bitmap image) #' ## to the temp directory: #' write.iff(example.iff, file.path(tempdir(), "image.iff")) #' #' } #' @family io.operations #' @family iff.operations #' @author Pepijn de Vries #' @export write.iff <- function(x, file, disk = NULL) { if (!inherits(x, "IFFChunk")) stop("x should be of class IFFChunk.") .write.generic(x, file, disk) } setGeneric("getIFFChunk", function(x, chunk.path, chunk.number) standardGeneric("getIFFChunk")) #' Get a specific IFFChunk nested inside other IFFChunks #' #' [IFFChunk()]s can be nested in a tree-like structure. Use this method #' to get a specific chunk with a specific label. #' #' `IFFChunk` objects have 4 `character` identifiers, indicating what type #' of chunk you are dealing with. These chunks can be nested inside of each other. #' Use this method to extract specific chunks by referring to there respective #' identifiers. The identifiers are shown when calling `print` on an #' [IFFChunk()]. If a specified path doesn't exist, this method throws a #' `subscript out of range' error. #' #' @docType methods #' @rdname getIFFChunk #' @name getIFFChunk #' @aliases getIFFChunk,IFFChunk,character,integer-method #' @param x An [IFFChunk()] object from which the nested #' [IFFChunk()] should be extracted an returned. #' @param chunk.path A `vector` of 4 `character` long strings #' of IFF chunk labels, specifying the path of the target IFF chunk. #' For example: `c("ILBM", "BODY")` means, get the "BODY" chunk #' from inside the "ILBM" chunk. #' @param chunk.number A `vector` of the same length as `chunk.path`, #' with `integer` index numbers. Sometimes a chunk can contain a list of #' chunks with the same label. With this argument you can specify which element #' should be returned. By default (when missing), the first element is always #' returned. #' @returns Returns an [IFFChunk()] object nested inside `x` at the #' specified path. Or in case of the replace method the original chunk `x` is #' returned with the target chunk replaced by `value`. #' @examples #' \dontrun{ #' ## load an IFF file #' example.iff <- read.iff(system.file("ilbm8lores.iff", package = "AmigaFFH")) #' #' ## Get the BMHD (bitmap header) from the ILBM (interleaved bitmap) chunk: #' bmhd <- getIFFChunk(example.iff, c("ILBM", "BMHD")) #' #' ## This is essentially doing the same thing, but we now explicitly #' ## tell the method to get the first element for each specified label: #' bmhd <- getIFFChunk(example.iff, c("ILBM", "BMHD"), c(1L, 1L)) #' #' ## Let's modify the bitmap header and replace it in the parent IFF chunk. #' bmhd.itpt <- interpretIFFChunk(bmhd) #' #' ## Let's disable the masking, the bitmap will no longer be transparent: #' bmhd.itpt$Masking <- "mskNone" #' bmhd <- IFFChunk(bmhd.itpt) #' #' ## Now replace the header from the original iff with the modified header: #' getIFFChunk(example.iff, c("ILBM", "BMHD")) <- bmhd #' } #' @family iff.operations #' @author Pepijn de Vries #' @export setMethod("getIFFChunk", c("IFFChunk", "character", "integer"), function(x, chunk.path, chunk.number) { chunk <- x id <- unlist(lapply(1:length(chunk.path), function(y){ id <- which(unlist(lapply([email protected], function(z) [email protected])) == chunk.path[[y]]) chunk <<- [email protected][[id[[chunk.number[[y]]]]]] return(id) })) return(chunk) }) #' @rdname getIFFChunk #' @name getIFFChunk #' @aliases getIFFChunk,IFFChunk,character,missing-method #' @export setMethod("getIFFChunk", c("IFFChunk", "character", "missing"), function(x, chunk.path, chunk.number) { getIFFChunk(x, chunk.path, rep(1L, length(chunk.path))) }) setGeneric("getIFFChunk<-", function(x, chunk.path, chunk.number, value) standardGeneric("getIFFChunk<-")) #' @rdname getIFFChunk #' @name getIFFChunk<- #' @aliases getIFFChunk<-,IFFChunk,character,missing,IFFChunk-method #' @param value An [IFFChunk()] with which the target chunk should be #' replaced. Make sure that `value` is of the same `chunk.type` as the last #' chunk specified in the `chunk.path`. #' @export setReplaceMethod("getIFFChunk", c("IFFChunk", "character", "missing", "IFFChunk"), function(x, chunk.path, chunk.number = NULL, value) { getIFFChunk(x, chunk.path, rep(1L, length(chunk.path))) <- value return(x) }) #' @rdname getIFFChunk #' @name getIFFChunk<- #' @aliases getIFFChunk<-,IFFChunk,character,integer,IFFChunk-method #' @export setReplaceMethod("getIFFChunk", c("IFFChunk", "character", "integer", "IFFChunk"), function(x, chunk.path, chunk.number = NULL, value) { if ([email protected] != chunk.path[[length(chunk.path)]]) stop("'value' should be of the same IFF type as the last type in the 'chunk.path'") iff.list <- as.list(x) val.list <- as.list(value) eval(parse(text = paste0("iff.list", paste0(sprintf("[[\"%s_%i\"]]", chunk.path, chunk.number), collapse = ""), "<-val.list"))) ## code below doesn't seem to work correctly: list.to.chunk <- function(x, nam) { if (typeof(x) == "raw") { return(new("IFFChunk", chunk.type = substr(nam, 1, 4), chunk.data = list(x))) } else if (typeof(x) == "list") { result <- lapply(seq_along(x), function(y) { list.to.chunk(x[[y]], names(x)[y]) }) return(new("IFFChunk", chunk.type = substr(nam, 1, 4), chunk.data = result)) } } return(list.to.chunk(iff.list, [email protected])) }) setGeneric("interpretIFFChunk", function(x, ...) standardGeneric("interpretIFFChunk")) #' Interpret an IFFChunk object #' #' [IFFChunk()]s represent the structure of the Interchange File Format well, #' but the iformation is stored as `raw` data. This method tries to interpret and #' translate the information into a more comprehensive format. #' #' Interchange File Format chunks can hold any kind of information (images, audio, #' (formatted) text, etc.). This method will try to convert this information into #' something useful. Information may get lost in the translation, so be careful when #' converting back to an [IFFChunk-class()] object using #' [IFFChunk-method()]. #' #' An error is thrown when the [IFFChunk()] object is currently not #' interpretable by this package. See [IFFChunk-method()] for an overview #' of currently supported IFF chunks. This list may increase while this package #' matures. #' #' @docType methods #' @rdname interpretIFFChunk #' @name interpretIFFChunk #' @aliases interpretIFFChunk,IFFChunk-method #' @param x An [IFFChunk()] object which needs to be interpreted. #' @param ... Currently ignored. #' @returns If `x` is interpretable by this package an S3 class object of #' `IFF.ANY` is returned. The content of the returned object will depend #' on the type of [IFFChunk()] provided for `x`. The result can #' for instance be a `raster` image ([grDevices::as.raster()]), #' a list of audio [tuneR::Wave()]s, a `character` string or a named #' `list`. #' @examples #' \dontrun{ #' ## load an IFF file #' example.iff <- read.iff(system.file("ilbm8lores.iff", package = "AmigaFFH")) #' #' ## in this case, the file is a FORM container with a bitmap image, and a #' ## list with a raster object is returned when interpreted: #' example.itpt <- interpretIFFChunk(example.iff) #' class(example.itpt) #' typeof(example.itpt) #' class(example.itpt[[1]]) #' #' ## Let's extraxt the bitmap header from the main chunk: #' bmhd <- getIFFChunk(example.iff, c("ILBM", "BMHD")) #' #' ## When interpreted, a named list is returned with (meta-)information #' ## on the bitmap image: #' bmhd.itpt <- interpretIFFChunk(bmhd) #' class(bmhd.itpt) #' typeof(bmhd.itpt) #' print(bmhd.itpt) #' } #' @family iff.operations #' @author Pepijn de Vries #' @export setMethod("interpretIFFChunk", "IFFChunk", function(x, ...) { type <- [email protected] if (!inherits([email protected][[1]], "raw")) sub.types <- unlist(lapply([email protected], function(x) [email protected])) dat <- [email protected][[1]] if (type == "FORM") { ## FORM can hold multiple objects, return the interpreation of these objects ## as a list: result <- lapply([email protected], interpretIFFChunk, ...) class(result) <- c("IFF.FORM", "IFF.ANY") return(result) } else if (type == "ILBM") { ## ILBM = interleaved bitmap; return as raster ## If subtype contains ANHD and not a BMHD, it should also contain a DLTA chunk rather than a BODY ## note that DLTA chunks are actualy not interleaved, in contrast to what the parent ILBM chunk ## suggests. if ("ANHD" %in% sub.types && !("BODY" %in% sub.types)) { anhd <- interpretIFFChunk([email protected][sub.types %in% "ANHD"][[1]], ...) dlta <- [email protected][sub.types %in% "DLTA"][[1]]@chunk.data[[1]] wbm <- with(anhd, w + (-w %% 16)) if (anhd$operation == "ByteVerticalCompression") { result <- .byteVerticalDecompression(dlta, wbm, anhd$h, anhd$interleave, anhd$flags[2], list(...)$hidden) cl <- class(result) result <- result[,0:anhd$w] class(result) <- cl return(result) } else { stop("Sorry this animation format is not (yet) supported by this package.") } } if ("hidden" %in% names(list(...))) { result <- grDevices::as.raster(x, palette = NULL) } else { result <- grDevices::as.raster(x) } class(result) <- c("IFF.ILBM", "IFF.ANY", class(result)) return(result) } else if (type == "CMAP") { ## COLOUR MAP ## when all low bits are 0 assume 12 bit, otherwise asume 24 bit colour.depth <- ifelse(all(ProTrackR::loNybble([email protected][[1]]) == 0), "12 bit", "24 bit") result <- amigaRawToColour(dat, colour.depth, "3") class(result) <- c("IFF.CMAP", "IFF.ANY") return(result) } else if(type == "BMHD") { ## BITMAP HEADER result <- list( w = .rawToAmigaInt(dat[1:2], 16, F), h = .rawToAmigaInt(dat[3:4], 16, F), x = .rawToAmigaInt(dat[5:6], 16, T), y = .rawToAmigaInt(dat[7:8], 16, T), nPlanes = .rawToAmigaInt(dat[9], 8, F), Masking = .rawToAmigaInt(dat[10], 8, F), Compression = .rawToAmigaInt(dat[11], 8, F), pad = dat[12], transparentColour = .rawToAmigaInt(dat[13:14], 16, F), xAspect = .rawToAmigaInt(dat[15], 8, F), yAspect = .rawToAmigaInt(dat[16], 8, F), pageWidth = .rawToAmigaInt(dat[17:18], 16, T), pageHeight = .rawToAmigaInt(dat[19:20], 16, T) ) if (result$Masking > 3) result$Masking <- 4 result$Masking <- c("mskNone", "mskHasMask", "mskHasTransparentColour", "mskLasso", "mskUnknown")[result$Masking + 1] if (result$Compression > 1) result$Compression <- 2 result$Compression <- c("cmpNone", "cmpByteRun1", "cmpUnknown")[result$Compression + 1] class(result) <- c("IFF.BMHD", "IFF.ANY") return(result) } else if (type == "CAMG") { ## Amiga Viewport Mode result <- .amigaViewPortModes(dat) class(result) <- c("IFF.CAMG", "IFF.ANY") return(result) } else if (type == "CRNG") { ## DPaint colour range (used for colour cycling) result <- list( padding = dat[1:2], rate = .rawToAmigaInt(dat[3:4], 16, F)*60/(2^14), ## steps per second flags = .rawToAmigaInt(dat[5:6], 16, F), low = .rawToAmigaInt(dat[7], 8, F), high = .rawToAmigaInt(dat[8], 8, F) ) result$flags[result$flags > 2] <- 2 result$flags <- c("RNG_OFF", "RNG_ACTIVE", "RNG_REVERSE", "RNG_UNKNOWN")[result$flags + 1] class(result) <- c("IFF.CRNG", "IFF.ANY") return (result) } else if (type == "ANIM") { result <- list() dpan <- NULL suppressWarnings(try(dpan <- interpretIFFChunk(getIFFChunk(x, c("FORM", "ILBM", "DPAN"))), T)) interleave <- interpretIFFChunk(getIFFChunk(x, c("FORM", "ILBM", "ANHD"), c(2L, 1L, 1L)))$interleave interleave[interleave == 0] <- 2 if (length([email protected]) < (2 + interleave)) stop("Animation contains insufficient information for at least 2 frames.") for (i in 1:length([email protected])) { ## Assuming that each sub-form contains a single frame (hence [[1]]): ## if first frame contains DLTA, it should be the difference from blank screen ## in that case result == list(), and is handled by .byteVerticalDecompression result[[i]] <- interpretIFFChunk([email protected][[i]], hidden = result, ...)[[1]] } for (k in 1:interleave) { if (!all(result[[length(result) + k - interleave]] == result[[k]])) stop("Could not interpret animation DLTA chunks correctly") } result <- result[1:(length(result) - interleave)] palettes <- lapply(result, function(z) attributes(z)$palette) asps <- lapply(result, function(z) attributes(z)$asp) modes <- lapply(result, function(z) attributes(z)$mode) for (i in 1:length(palettes)) { if (is.null(palettes[[i]])) palettes[[i]] <- palettes[[i - 1]] if (is.null(asps[[i]])) asps[[i]] <- asps[[i - 1]] if (i > 1 && !is.null(modes[[1]]) && is.null(modes[[i]])) modes[[i]] <- modes[[i - 1]] if (!is.null(modes[[i]]) && modes[[i]] %in% c("HAM6", "HAM8")) { result[[i]] <- .indexToHAMraster(result[[i]], ifelse(modes[[i]] == "HAM8", 8, 6), palettes[[i]], 0) ## assume transparent colour is zero } else { result[[i]] <- grDevices::as.raster(apply(result[[i]], 2, function(z) palettes[[i]][z + 1])) } class(result[[i]]) <- c("IFF.ILBM", "IFF.ANY", class(result[[i]])) attributes(result[[i]])[["asp"]] <- asps[[i]] } if (!is.null(dpan) && dpan$nframes != length(result)) warning("Number of frames does not match the number specified in the DPAN chunk.") class(result) <- c("IFF.ANIM", "IFF.ANY") return(result) } else if (type == "ANHD") { result <- list( operation = .rawToAmigaInt(dat[1], 8, F), mask = as.logical(.rawToBitmap(dat[2], F, F)), w = .rawToAmigaInt(dat[3:4], 16, F), h = .rawToAmigaInt(dat[5:6], 16, F), x = .rawToAmigaInt(dat[7:8], 16, T), y = .rawToAmigaInt(dat[9:10], 16, T), abstime = .rawToAmigaInt(dat[11:14], 32, T), reltime = .rawToAmigaInt(dat[15:18], 32, T), interleave = .rawToAmigaInt(dat[19], 8, F), pad0 = dat[20], flags = as.logical(.rawToBitmap(dat[21:24], F, T)), pad1 = dat[25:40] ) if (result$operation > 7) result$operation <- "UnknownMode" else result$operation <- c("standard", "XOR", "LongDeltaMode", "ShortDeltaMode", "GeneralDeltamode", "ByteVerticalCompression", "StereoOp5", "ShortLongVerticalDeltaMode")[1 + result$operation] class(result) <- c("IFF.ANHD", "IFF.ANY") return(result) } else if (type == "DLTA") { ## without context (its parent ANIM chunk or neighbouring ANHD chunk), ## DLTA chunks can't be interpreted. Just return the raw data class(dat) <- c("IFF.DLTA", "IFF.ANY") return(dat) } else if (type == "DPAN") { ## DPaint Animation chunk, is only used to determine number of animation frames result <- list( version = .rawToAmigaInt(dat[1:2], 16, F), nframes = .rawToAmigaInt(dat[3:4], 16, F), flags = as.logical(.rawToBitmap(dat[5:8], F, T)) ) return(result) } else if (type == "VHDR") { result <- as.list(.rawToAmigaInt(dat[1:(3*4)], 32, F)) result <- c(result, .rawToAmigaInt(dat[13:14], 16, F), .rawToAmigaInt(dat[15:16], 8, F), .rawToAmigaInt(dat[17:20], 32, F)) names(result) <- c("oneShotHiSamples", "repeatHiSamples", "samplesPerHiCycle", "samplesPerSec", "ctOctave", "sCompression", "volume") result$sCompression[result$sCompression > 2] <- 2 result$sCompression <- c("sCmpNone", "sCmpFibDelta", "sCmpUnknown")[result$sCompression + 1] class(result) <- c("IFF.VHDR", "IFF.ANY") return(result) } else if (type == "CHAN") { result <- which(c(2, 4, 6) %in% .rawToAmigaInt(dat, 32, F))[[1]] if (length(result) == 0) result <- list(channel = "UNKOWN") else result <- list(channel = c("LEFT", "RIGHT", "STEREO")[result]) class(result) <- c("IFF.CHAN", "IFF.ANY") return(result) } else if (type == "8SVX") { body <- getIFFChunk(x, "BODY")@chunk.data[[1]] if ("CHAN" %in% sub.types) { chan <- interpretIFFChunk(getIFFChunk(x, "CHAN"), ...) } else { chan <- interpretIFFChunk(IFFChunk("CHAN"), ...) } if ("VHDR" %in% sub.types) { vhdr <- interpretIFFChunk(getIFFChunk(x, "VHDR"), ...) } else { warning("Voice header is missing, going to make some assumptions here...") vhdr <- interpretIFFChunk(IFFChunk("VHDR"), ...) vhdr$oneShotHiSamples <- length(body) if (chan == "STEREO") vhdr$oneShotHiSamples <- floor(vhdr$oneShotHiSamples/2) } if (vhdr$sCompression == "sCmpUnknown") warning("An unknown form of compression is applied to the wave. Trying to continue anyway.") if (vhdr$sCompression == "sCmpFibDelta") body <- deltaFibonacciDecompress(body) samp.offset <- 0 wav <- lapply(1:ifelse(chan == "STEREO", 2, 1), function(z){ lapply(1:vhdr$ctOctave, function(y) { l <- ((vhdr$oneShotHiSamples + vhdr$repeatHiSamples)*(2^(y - 1))) result <- body[samp.offset + (1:l)] samp.offset <<- samp.offset + l .rawToAmigaInt(result, 8, T) + 128 }) }) ## wav will be a list of one or more Wave objects. ## We have let wav be a list. We can't downgrade the S4 class ## to an S3 IFF.ANY class. Ortherwise the Wave-methods are ## no longer applicable. wav <- lapply(1:vhdr$ctOctave, function(y) { ## Wave objects are always specified for left channel when audio is mono ## eventhough amiga iff is specified for right channel only. right <- numeric(0) if (chan == "STEREO") right <- wav[[2]][[y]] Wave(left = wav[[1]][[y]], right = right, bit = 8, pcm = T, samp.rate = vhdr$samplesPerSec) }) class(wav) <- c("IFF.8SVX", "IFF.ANY", class(wav)) return(wav) } else if (type == "BODY") { ## without context, the body cannot be converted to anything usefull ## return as unprocessed raw data class(dat) <- c("IFF.BODY", "IFF.ANY") return(dat) } else if (type %in% c("ANNO", "AUTH", "CHRS", "NAME", "TEXT", "(c) ")) { ## These are simply just ASCII texts, return them as such... result <- .rawToCharNull(dat) type[type == "(c) "] <- "copyright" class(result) <- c(paste0("IFF.", type), "IFF.ANY") return(result) } else { stop("Can't handle this chunk type (yet).") } }) #' Coerce to and create IFFChunk objects #' #' Convert `IFF.ANY` objects (created with [interpretIFFChunk()]) into #' [IFFChunk()] objects. A basic [IFFChunk()] can also be #' created with this method by providing the chunk type name. #' #' IFF data is stored in a [IFFChunk-class()] object when read from an #' IFF file ([read.iff()]). These objects reflect the file structure #' well, but the data is stored as `raw` information. IFF files can contain #' a wide variety of information types, ranging from bitmap images to audio #' clips. The raw information stored in [IFFChunk()] objects can #' be interpreted into more meaningful representations that can be handled in #' R. This is achieved with the [interpretIFFChunk()] method, which #' returns `IFF.ANY` objects. #' #' These `IFF.ANY` objects are a less strict representation of the #' IFF Chunk, but are easier to handle in R. The interpretation method is lossy #' and may not preserve all information in the `IFF.ANY` object. #' The [IFFChunk-method()] can coerce `IFF.ANY` back #' to the more strictly defined [IFFChunk-class()] objects. #' Be careful with conversions between [IFFChunk-class()] and #' `IFF.ANY` objects and vice versa, as information may get lost. #' #' More detailed information about IFF chunks can be found in the IFF chunk registry #' (see references). #' #' * `IFF.FORM` represents a FORM chunk, which is a container that can hold any kind of chunk. #' When interpreted, it is represented as a `list`, where each element is an interpreted chunk #' nested inside the FORM. #' * `IFF.BODY` represents the actual data in an IFF file. However, without context #' this chunk cannot be interpreted and is therefore interpreted as a vector of `raw` data. #' * `IFF.ANIM` represents an animation (ANIM) chunk. When interpreted, it will return a `list` where each #' element is an animation frame represented as an `IFF.ILBM` object. Each animation frame should be #' nested inside an ILBM chunk nested inside a FORM chunk, nested inside an ANIM chunk. #' * `IFF.ANHD` represents an ANimation HeaDer (ANHD) chunk. When interpreted, #' it returns a named `list` containing the #' following information: #' * `operation` is a `character` string indicating how the bitmap #' data for the animation frame is encoded. Can be one of the following: #' "`standard`", "`XOR`", "`LongDeltaMode`", #' "`ShortDeltaMode`", "`GeneralDeltamode`", #' "`ByteVerticalCompression`", "`StereoOp5`", or #' "`ShortLongVerticalDeltaMode`". Currently, only the #' ByteVerticalCompression is implemented in this package. #' * `mask` is a `vector` of 8 `logical` values. It is currently #' ignored. #' * `w` and `h` are positive `numeric` values, specifying #' the width and height of the frame (should be identical for all frames). #' * `x` and `y` are `numeric` values, specifying the plotting #' position for the frame. #' * `abstime` is a positive `numeric` value - currently unused - used for #' timing the frame relative to the time the first frame was displayed. In #' jiffies (1/60 sec). #' * `reltime` is a positive `numeric` value for timing the frame #' relative to time previous frame was displayed. In jiffies (1/60 sec). #' * `interleave` is currently unused. It should be set to 0. #' * `pad0` is a padding byte (`raw`) for future use. #' * `flags` is a `vector` of 32 `logical` values. They contain #' information on how the bitmap data is stored. #' * `pad1` are 16 padding bytes (`raw`) for future use. #' * `IFF.DPAN` represents an DPaint ANimation (DPAN) chunk. Some software will #' require this chunk to correctly derive the total number of frames in the animation. #' When interpreted, it will return a named `list` with the following elements: #' * `version` a `numeric` version number. #' * `nframes` a positive `numeric` value, indicating the number #' of frames in the animation. #' * `flags` a `vector` of 32 `logical` values. Ignored in #' this package as it was intended for future implementations. #' * `IFF.DLTA` represents a delta mode data chunk (DLTA). The first animation #' frame is stored as a normal InterLeaved BitMap (ILBM) image as described below. #' The following frames only store differences in bitmap data compared to the #' previous frames but is not interleaved. They are thus incorrectly embedded in #' an ILBM chunk (but is kept so for backward compatibility). When interpreted, #' a `grDevices` raster object is returned only showing the differences. It #' is not very meaningful to interpret these chunks on their own, but rather the #' entire parent ANIM chunk. #' * `IFF.ILBM` represents InterLeaved BitMap (ILBM) chunks. It is interpreted here as a #' raster image (see [grDevices::as.raster()]). ILBM chunks are usually nested inside #' a FORM container. #' * `IFF.BMHD` represents the header chunk of a bitmap (BMHD), and should always be present #' (nested inside) an ILBM chunk. It is interpreted as a named list containing the following elements: #' * `w` and `h` are positive `numeric` values specifying #' the bitmap width and height in pixels. Note that the width #' can be any positive whole number, whereas the bitmap data always #' has a width divisible by 16. #' * `x` and `y` are `numeric` values specifying the plotting #' position relative to the top left position of the screen. #' Although required in the bitmap header. It is ignored in the #' interpretation of bitmap images. #' * `nPlanes` is a positive value indicating the number of #' bitplanes in the image. The number of colours in an image #' can be calculated as `2^nPlanes`. #' * `Masking` indicates whether there are bitplanes that should #' be masked (i.e. are treated as transparent). It is a `character` #' string equalling any of the following: "`mskNone`", #' "`mskHasMask`", "`mskHasTransparentColour`", #' "`mskLasso`" or "`mskUnknown`". Only the first (no transparency) #' and third (one of the colours should be treated as transparent) #' id is currently interpreted correctly. The others are ignored. #' "`mskUnknown`" means that an undocumented mask is applied #' to the image. #' * `Compression` indicates whether the bitmap data is #' compressed. It is a `character` string that can equal any #' of the following: "`cmpNone`", "`cmpByteRun1`" or #' "`cmpUnknown`". The latter means an undocumented form of #' compression is applied and is currently ignored. In most cases #' bitmap data is compressed with the `cmpByteRun1` algorithm #' ([packBitmap()]). In some cases, bitmap data is not #' compressed (`cmpNone`). #' * `pad` is a `raw` byte that is only used to #' align data. It is ignored in the interpretation. #' * `transparentColour` is a `numeric` value that indicates #' which colour number in the palette should be treated as fully #' transparent (when `Masking` equals #' "`mskHasTransparentColour`"). #' * `xAspect` and `yAspect` or positive `numeric` #' values that indicate the aspect ratio of #' the pixels in the image. Amiga screen modes allowed for some #' extreme pixel aspect ratios. These values are used to stretch #' the image to their intended display mode. #' * `pageWidth` and `pageHeight` are positive #' `numeric` values indicating the size of the screen in which #' the image should be displayed. They are ignored in the #' interpretation of the image. #' * `IFF.CMAP` represents the colour map (CMAP) or palette of a bitmap image. Although common, #' the chunk is optional and can be omitted from the parent ILBM chunk. It is interpreted as a #' vector of colours (i.e., a `character` string formatted as '#RRGGBB' or named colours such as #' 'blue'). #' * `IFF.CAMG` represents a chunk with information with respect #' to the display mode in which the bitmap image should be displayed. #' This information can be used to determine the correct pixel aspect #' ratio, or is sometimes required to correctly interpret the bitmap #' information. The `IFF.CAMG` chunk is interpreted as a named list #' containing the following elements: #' * `monitor`: a `factor` indicating the hardware monitor #' on which the image was created and should be displayed (see #' [amiga_monitors()]). #' * `display.mode`: a `factor` indicating the display #' mode in which the image should be displayed (see #' [amiga_display_modes()]). #' * `IFF.CRNG` is an optional chunk nested in an ILBM chunk. #' It represents a `colour range' and is used to cycle through #' colours in the bitmap's palette in order to achieve #' animation effects. It is interpreted as a named list with the #' following elements. This chunk is currently not used with #' the interpretation of ILBM images. #' * `padding` are two `raw` padding bytes and are #' ignored when interpreted. #' * `rate` is a `numeric` value specifying the rate #' at which the colours are cycled. The rate is in steps per #' second. #' * `flags` is a flag that indicates how colours should #' be cycled. It is a `character` string that can equal #' any of the following: "`RNG_OFF`", "`RNG_ACTIVE`", #' "`RNG_REVERSE`" or "`RNG_UNKNOWN`". When equal to the #' first, colours are not cycled. When equal to the second, colours #' are cycled. When equal to the third, colours are cycled in #' reverse direction. When equal to the latter, an undocumented #' form of cycling is applied. #' * `low` and `high` are `numeric` indices of #' colours between which should be cycled. Only colour from #' index `low` up to index `high` are affected. #' * `IFF.8SVX` represents 8-bit sampled voice chunks (8SVX). The original #' Amiga supported 8-bit audio which could be stored using the IFF. 8SVX chunks #' can contain separate audio samples for each octave. 8SVX chunks are usually #' stored inside a FORM container. Its body chunk contains 8-bit PCM wave data that #' could be compressed. When the 8SVX chunk is #' interpreted with this package, a `list` is returned where each element #' represents an octave given as a [tuneR::Wave()] object. Possible #' chunks nested in 8SVX chunks and currently supported by this package are #' as follows. #' * `IFF.VHDR` represents voice header chunks (VHDR). It contains (meta-)information about #' the audio stored in the body of the parent 8SVX chunk. When interpreted, a named `list` is #' returned with the following elements: #' * `oneShotHiSamples` is a `numeric` value indicating how many samples there are in the #' audio wave of the first octave in the file, that should not be looped (repeated). #' * `repeatHiSamples` is a `numeric` value indicating how many samples there are in the #' audio wave of the first octave in the file, that should be looped (repeated). #' * `samplesPerHiCycle` is a `numeric` value specifying the #' number of samples per repeat cycle in the first octave, or 0 when unknown. #' The number of `repeatHiSamples` should be an exact multiple of #' `samplesPerHiCycle`. #' * `samplesPerSec` is a `numeric` value specifying the data #' sampling rate. #' * `ctOctave` a positive whole `numeric` value indicating how many octaves are included. #' In 8SVX files the audio wave is resampled for each octave. The wave data in the body starts with #' the audio sample in the highest octave (least number of samples). The data is then followed by #' each subsequent octave, where the number of samples increase by a factor of 2 for each octave. #' * `sCompression` is a `character` string indicating whether and how the wave data in the body #' is compressed. It can have one of the following values: "`sCmpNone`" (no compression), #' "`sCmpFibDelta`" ([deltaFibonacciCompress()]ion is applied), "`sCmpUnknown`" (an #' undocumented and unknown form of compression is applied). #' * `volume` is a numeric value between `0` (minimum) and `0x10000` (maximum) playback volume. #' * `IFF.CHAN` represents the channel chunk (CHAN). When interpreted it returns a named list #' with 1 named element: #' "`channel`". It's value can be one of the following `character` strings "`LEFT`", "`RIGHT`" or #' "`STEREO`". This indicates for how many (one or two) audio channels data is available in the #' body of the parent #' 8SVX chunk. It also indicates two which channels the audio should be played back. #' * `IFF.ANNO`, `IFF.AUTH`, `IFF.CHRS`, `IFF.NAME`, `IFF.TEXT` and `IFF.copyright` #' are all unformatted text chunks that can be included optionally in any of the chunk types. #' Respectively, they #' represent an annotation, the author's name, a generic character string, the name of the work, #' generic unformatted text, #' and copyright text. They are interpreted as a `character` string. #' @param x An S3 class `IFF.ANY` object that needs to be coerced into an #' [IFFChunk-class()] object. `IFF.ANY` objects are created with the #' [interpretIFFChunk()] method. `x` can also be a `character` string #' of a IFF chunk type (e.g., "`FORM`" or "`BMHD`"). In that case an #' [IFFChunk()] object of that type is created with some basic content. #' @param ... Arguments passed onto methods underlying the interpretation of the #' specific IFF chunks. Allowed arguments depend on the specific type of IFF chunk that #' `x` represents. #' @returns Returns an [IFFChunk-class()] representation of `x`. #' @examples #' \dontrun{ #' ## load an IFF file #' example.iff <- read.iff(system.file("ilbm8lores.iff", package = "AmigaFFH")) #' #' ## interpret the IFF file (in some cases information #' ## will get lost in this step): #' example.itpt <- interpretIFFChunk(example.iff) #' #' ## now coerce back to a formal IFFChunk class object. #' ## Only information in the interpreted object is used #' ## The coerced object may therefore depart from the #' ## original read from the file. #' example.coerce <- IFFChunk(example.itpt) #' #' ## and indeed the objects are not identical, as shown below. #' ## In this case the difference is not disastrous, the order #' ## of the colours in the palette have shifted. But be careful #' ## with switching between formal IFFChunk objects and #' ## interpreted IFF.ANY objects. #' identical(example.iff, example.coerce) #' #' ## It is also possible to create simple IFFChunk objects #' ## by providing the desired chunk type identifier as a #' ## character string. #' #' ## This creates a basic bitmap header: #' bmhd <- IFFChunk("BMHD") #' #' ## This creates a basic colour palette: #' cmap <- IFFChunk("CMAP") #' } #' @references <https://wiki.amigaos.net/wiki/IFF_FORM_and_Chunk_Registry> #' @name IFFChunk-method #' @rdname IFFChunk #' @export IFFChunk <- function (x, ...) { UseMethod("IFFChunk", x) } #' @rdname IFFChunk #' @method IFFChunk character #' @export IFFChunk.character <- function(x, ...) { if (length(x) > 1) warning("x has more than 1 element, only the first will be used...") x <- x[[1]] if (x == "FORM") { return(new("IFFChunk")) } else if (x == "BODY") { return(new("IFFChunk", chunk.type = "BODY")) } else if (x == "CMAP") { return(new("IFFChunk", chunk.type = "CMAP", chunk.data = list(colourToAmigaRaw(grDevices::gray(round(seq(0, 15, length.out = 16))/15), "12 bit", "3")))) } else if (x == "BMHD") { BMHD <- list( w = 16, h = 16, x = 0, y = 0, nPlanes = 4, Masking = "mskNone", Compression = "cmpByteRun1", pad = raw(1), transparentColour = 0, xAspect = 44, yAspect = 44, pageWidth = 320, pageHeight = 200 ) class(BMHD) <- "IFF.BMHD" return(IFFChunk(BMHD)) } else if(x == "CAMG") { return(.inverseViewPort("LORES_KEY", "DEFAULT_MONITOR_ID")) } else if (x == "CRNG") { dat <- list( padding = raw(2), rate = 0, flag = "RNG_OFF", low = 0, high = 0 ) class(dat) <- "IFF.CRNG" return(IFFChunk(dat)) } else if (x == "ILBM") { bm.dep <- 1 bmhd <- IFFChunk("BMHD") bmhd.int <- interpretIFFChunk(bmhd) bmhd.int$nPlanes <- 1 bmhd <- IFFChunk(bmhd.int) pal <- c("black", "white") class(pal) <- "IFF.CMAP" pal <- IFFChunk(pal) body <- new("IFFChunk", chunk.type = "BODY", chunk.data = list( as.raw(c(0x01, 0x03, 0xc0, 0x01, 0x0c, 0x30, 0x01, 0x10, 0x08, 0x01, 0x20, 0x04, 0x01, 0x40, 0x02, 0x01, 0x44, 0x22, 0x01, 0x8e, 0x71, 0x01, 0x84, 0x21, 0x01, 0x80, 0x01, 0x01, 0x8e, 0x71, 0x01, 0x44, 0x22, 0x01, 0x44, 0x22, 0x01, 0x23, 0xc4, 0x01, 0x10, 0x08, 0x01, 0x0c, 0x30, 0x01, 0x03, 0xc0)) )) return(new("IFFChunk", chunk.type = "ILBM", chunk.data = list( bmhd, pal, IFFChunk("CAMG"), body ) )) } else if (x == "ANIM") { anhd <- lapply(1:4, function(y) { anhdy <- interpretIFFChunk(IFFChunk("ANHD")) anhdy$abstime <- y*2 IFFChunk(anhdy) }) frame1 <- IFFChunk("ILBM")@chunk.data frame1 <- c(frame1[1:(length(frame1) - 1)], anhd[[1]], frame1[length(frame1)]) frame1 <- new("IFFChunk", chunk.type = "ILBM", chunk.data = frame1) dlta2 <- new("IFFChunk", chunk.type = "DLTA", chunk.data = list(unPackBitmap(as.raw(c(0x03, 0x00, 0x00, 0x00, 0x40, 0xc4, 0x00, 0x06, 0x03, 0x05, 0x83, 0x02, 0x11, 0x61, 0x08))))) dlta3 <- new("IFFChunk", chunk.type = "DLTA", chunk.data = list(unPackBitmap(as.raw(c(0xc1, 0x00))))) result <- new("IFFChunk", chunk.type = "ANIM", chunk.data = list( new("IFFChunk", chunk.type = "FORM", chunk.data = list(frame1)), new("IFFChunk", chunk.type = "FORM", chunk.data = list( new("IFFChunk", chunk.type = "ILBM", chunk.data = list(anhd[[2]], dlta2)) )), new("IFFChunk", chunk.type = "FORM", chunk.data = list( new("IFFChunk", chunk.type = "ILBM", chunk.data = list(anhd[[3]], dlta3)) )), new("IFFChunk", chunk.type = "FORM", chunk.data = list( new("IFFChunk", chunk.type = "ILBM", chunk.data = list(anhd[[4]], dlta3)) )) )) return(result) } else if (x == "ANHD") { result <- list( operation = "ByteVerticalCompression", mask = rep(F, 8), w = 16, h = 16, x = 0, y = 0, abstime = 2, reltime = 2, interleave = 0, pad0 = raw(1), flags = rep(F, 32), pad1 = raw(16) ) class(result) <- "IFF.ANHD" return(IFFChunk(result)) } else if (x == "DPAN") { result <- list( version = 4, nframes = 2, flags = rep(F, 32) ) class(result) <- "IFF.DPAN" return(IFFChunk(result)) } else if (x == "CHAN") { chan <- list(channel = "LEFT") class(chan) <- "IFF.CHAN" return(IFFChunk(chan)) } else if(x == "VHDR") { vhdr <- list( oneShotHiSamples = 0, repeatHiSamples = 0, samplesPerHiCycle = 0, samplesPerSec = 16574.28, ctOctave = 1, sCompression = "sCmpNone", volume = 0x10000L ) class(vhdr) <- "IFF.VHDR" return(IFFChunk(vhdr)) } else if(x == "8SVX") { return(WaveToIFF(Wave(round(128*sin((0:(10*62))*2*pi/62) - 0.5) + 128, bit = 8, samp.rate = 16574.28), loop.start = 0)) } else if (x %in% c("ANNO", "AUTH", "CHRS", "NAME", "TEXT", "(c) ")) { return (new("IFFChunk", chunk.type = x, chunk.data = raw(0))) } stop("Cannot generate this chunk type...") } #' @rdname IFFChunk #' @method IFFChunk IFF.FORM #' @export IFFChunk.IFF.FORM <- function(x, ...) { new("IFFChunk", chunk.type = "FORM", chunk.data = lapply(x, IFFChunk, ...)) } #' @rdname IFFChunk #' @export IFFChunk.IFF.BODY <- function(x, ...) { x <- as.raw(x, ...) return(new("IFFChunk", chunk.type = "BODY", chunk.data = list(x))) } #' @rdname IFFChunk #' @export IFFChunk.IFF.ANNO <- function(x, ...) { return(.text.chunk(x, "ANNO", ...)) } #' @rdname IFFChunk #' @export IFFChunk.IFF.AUTH <- function(x, ...) { return(.text.chunk(x, "AUTH", ...)) } #' @rdname IFFChunk #' @export IFFChunk.IFF.CHRS <- function(x, ...) { return(.text.chunk(x, "CHRS", ...)) } #' @rdname IFFChunk #' @export IFFChunk.IFF.NAME <- function(x, ...) { return(.text.chunk(x, "NAME", ...)) } #' @rdname IFFChunk #' @export IFFChunk.IFF.TEXT <- function(x, ...) { return(.text.chunk(x, "TEXT", ...)) } #' @rdname IFFChunk #' @export IFFChunk.IFF.copyright <- function(x, ...) { return(.text.chunk(x, "(c) ", ...)) } #' @rdname plot #' @name plot #' @export plot.IFFChunk <- function(x, y, ...) { if (missing(y)) y <- NULL graphics::plot(interpretIFFChunk(x), ...) } #' @rdname plot #' @name plot #' @export plot.IFF.FORM <- function(x, y, ...) { invisible(lapply(x, graphics::plot, ...)) } #' @export print.IFFChunk <- function(x, ...) { ctypes <- function(z) { result <- paste0("- ", [email protected]) temp <- unlist(lapply([email protected], function(a) { if (inherits(a, "IFFChunk")) return(ctypes(a)) else return(NULL) })) if (length(temp) > 0) result <- c(result, paste0(" ", temp)) return(result) } typ <- paste0(ctypes(x), collapse = "\n") cat(typ) } as.list.IFFChunk <- function(x, ...) { slotList <- function(x) { if (inherits([email protected][[1]], "raw")) { return([email protected][[1]]) } else { result <- lapply([email protected], slotList) names(result) <- unlist(lapply([email protected], function(x) [email protected])) id.num <- stats::ave(seq_along(names(result)), names(result), FUN = seq_along) names(result) <- sprintf("%s_%i", names(result), id.num) return(result) } } return(slotList(x)) } .rawToIFFChunk <- function(x, skip.size = F) { offset <- 0 chunks <- list() while (offset < length(x)) { chunk.type <- rawToChar(x[offset + 1:4]) if (skip.size) { offset <- offset + 4 return(list(methods::new("IFFChunk", chunk.type = chunk.type, chunk.data = .rawToIFFChunk(x[(offset + 1):length(x)])))) } chunk.size <- .rawToAmigaInt(x[offset + 5:8], 32, F) offset <- offset + 8 chunk.data <- x[offset + 1:chunk.size] offset <- offset + chunk.size ## Data should be word aligned: if ((offset %% 2) == 1) offset <- offset + 1 if (chunk.type %in% "FORM") { ## contains nested chunks, the container directly following 'FORM' will not specify the chunk length (hence the skip) chunk.data <- .rawToIFFChunk(chunk.data, skip.size = T) } else if (chunk.type %in% c("ILBM", "8SVX", "ANIM")) { chunk.data <- .rawToIFFChunk(chunk.data, skip.size = F) } if (inherits(chunk.data, "raw")) chunk.data <- list(chunk.data) chunks[[length(chunks) + 1]] <- methods::new("IFFChunk", chunk.type = chunk.type, chunk.data = chunk.data) } return(chunks) } .text.chunk <- function(x, type, ...) { return(new("IFFChunk", chunk.type = type, chunk.data = list(charToRaw(x)))) } setGeneric("rawToIFFChunk", function(x) standardGeneric("rawToIFFChunk")) #' Coerce raw data to an IFFChunk class object #' #' Coerce raw data, as it would be stored in the Interchange File Format (IFF), and #' convert it into an [IFFChunk()] class object. #' #' This method should work for all IFF chunk types that are implemented in this package #' (see [IFFChunk-method()] for details). For non-implemented chunks this method #' may work properly as long as the chunks are nested inside a FORM type container chunk. #' This method is provided for your convenience, but it is recommended to import IFFChunk #' methods using the [read.iff()] function. Use [AmigaFFH::as.raw()] #' to achieve the inverse of this method. #' #' @docType methods #' @rdname rawToIFFChunk #' @name rawToIFFChunk #' @aliases rawToIFFChunk,raw-method #' @param x A vector of raw data that needs to be converted into a [IFFChunk()] #' class object. #' @returns Returns an [IFFChunk()] class object based on `x`. #' @examples #' \dontrun{ #' ## Get an IFFChunk object: #' example.iff <- read.iff(system.file("ilbm8lores.iff", package = "AmigaFFH")) #' #' ## Coerce it to raw data: #' example.raw <- as.raw(example.iff) #' #' ## Coerce raw data to IFF chunk: #' example.iff.new <- rawToIFFChunk(example.raw) #' #' ## These conversions were non-destructive: #' identical(example.iff, example.iff.new) #' } #' @family iff.operations #' @family raw.operations #' @author Pepijn de Vries #' @export setMethod("rawToIFFChunk", "raw", function(x) { result <- .rawToIFFChunk(x) if (length(result) == 1) result <- result[[1]] return(result) })
/scratch/gouwar.j/cran-all/cranData/AmigaFFH/R/iff.0.r
#' @rdname IFFChunk #' @export IFFChunk.IFF.CHAN <- function(x, ...) { x <- c(2, 4, 6)[c("LEFT", "RIGHT", "STEREO") == x$channel[[1]]] x <- list(.amigaIntToRaw(x, 32, F)) return(new("IFFChunk", chunk.type = "CHAN", chunk.data = x)) } #' @rdname IFFChunk #' @export IFFChunk.IFF.VHDR <- function(x, ...) { compr <- x$sCompression compr <- which(c("sCmpNone", "sCmpFibDelta") == x$sCompression) - 1 ## if the compression type is unknown, set to 0 if (length(compr) == 0) compr <- 0 result <- c( .amigaIntToRaw(c(x$oneShotHiSamples, x$repeatHiSamples, x$samplesPerHiCycle), 32, F), .amigaIntToRaw(x$samplesPerSec, 16, F), .amigaIntToRaw(c(x$ctOctave, compr), 8, F), .amigaIntToRaw(x$volume, 32, F) ) return(new("IFFChunk", chunk.type = "VHDR", chunk.data = list(result))) } #' @rdname IFFChunk #' @export IFFChunk.IFF.8SVX <- function(x, ...) { ## check lengths of the waveforms in the list: testlen <- unlist(lapply(x, length))/length(x[[1]]) if (!all(testlen == 2^((1:length(x)) - 1))) { warning("Each subsequent waveform in the list should have twice the length of the previous waveform. Continuing with the first element only.") x <- x[1] } if (length(unique(unlist(lapply(x, function(y) y@stereo)))) != 1) { warning("x contains both mono and stereo samples, can't combine these. Continuing with the first element of x only.") x <- x[1] } if (!all(unlist(lapply(x, function(y) y@bit)) == 8)) { stop("All waves in x should be 8-bit.") } if (!all(unlist(lapply(x, function(y) y@pcm)))) { stop("All waves in x should be pcm formatted.") } wav <- .amigaIntToRaw(c(do.call(c, lapply(x, function(y) y@left)) - 128, do.call(c, lapply(x, function(y) y@right)) - 128), 8, T) if ((length(wav) %% 2) != 0) wav <- c(wav, raw(1)) vhdr <- list( oneShotHiSamples = length(x[[1]]), repeatHiSamples = 0, samplesPerHiCycle = 0, samplesPerSec = x[[1]]@samp.rate, ctOctave = length(x), sCompression = "sCmpNone", volume = 0x10000L ) class(vhdr) <- "IFF.VHDR" vhdr <- IFFChunk(vhdr) chan <- list(channel = ifelse(x[[1]]@stereo, "STEREO", "LEFT")) class(chan) <- "IFF.CHAN" chan <- IFFChunk(chan) return(new("IFFChunk", chunk.type = "8SVX", chunk.data = list( vhdr, chan, new("IFFChunk", chunk.type = "BODY", chunk.data = list(wav)) ))) } #' @rdname plot #' @name plot #' @export plot.IFF.8SVX <- function(x, y, ...) { invisible(lapply(x, tuneR::plot, ...)) } #' Playing Amiga audio data #' #' A wrapper for [tuneR()]-package's [tuneR::play()] routine. Allowing it to play #' Amiga audio (for instance stored in an 8SVX Interchange File Format). #' #' A wrapper for [tuneR()]-package's [tuneR::play()] routine. It will try to play #' audio using an external audio player. When 8SVX audio is played, each octave is played separately. #' When a FORM container contains multiple 8SVX samples, they are also played successively. #' #' Note that a separate package is developed to interpret and play ProTracker modules and samples #' ([`ProTrackR()`][ProTrackR::ProTrackR-package]). #' @rdname play #' @name play #' @aliases play,ANY-method #' @param object An [IFFChunk-class()] object that needs to be played. The [IFFChunk()] #' should be of type FORM, containing an 8SVX chunk, or an 8SVX itself. `object` can also be of class #' `IFF.FORM` or `IFF.8SVX`. See [tuneR::play()] for other objects that can be played. #' @param player Path to the external audio player. See [tuneR::play()] for more details. #' @param ... Arguments passed onto the tuneR [play()] routine. #' @return Returns a list of data returned by tuneR's [tuneR::play()], for which the output #' is undocumented. #' @examples #' \dontrun{ #' ## First get an audio sample from the ProTrackR package #' snare.samp <- ProTrackR::PTSample(ProTrackR::mod.intro, 2) #' #' ## Coerce it into an IFFChunk object: #' snare.iff <- WaveToIFF(snare.samp) #' #' ## Play the 8SVX sample: #' play(snare.iff) #' } #' @author Pepijn de Vries #' @export setMethod("play", "ANY", function(object, player = NULL, ...) { if (inherits(object, "IFF.FORM")) { invisible(lapply(object, function(x) { lapply(x, function(y) { tuneR::play(y, ...) }) })) } else if (inherits(object, "IFF.8SVX")) { invisible(lapply(object, function(x) { tuneR::play(x, ...) })) } else { stop(sprintf("Sorry, can't play %s object", class(object))) } }) #' @rdname play #' @name play #' @aliases play,IFFChunk-method #' @export setMethod("play", "IFFChunk", function(object, player = NULL, ...) { play(interpretIFFChunk(object), player, ...) }) #' Convert WaveMC objects into an Interchange File Format object #' #' Convert [tuneR::WaveMC()] objects (or objects that can be coerced to #' `WaveMC` objects) into an [IFFChunk-class()] object which #' can be stored as a valid Iterchange File Format ([write.iff()]). #' #' [tuneR::WaveMC()] objects can be read from contemporary file containers #' with [tuneR::readWave()] or [tuneR::readMP3()]. With this #' function such objects can be converted into an [IFFChunk-class()] object #' which can be stored conform the Interchange File Format ([write.iff()]). #' #' When `x` is not a pcm formatted 8-bit sample, `x` will first be #' normalised and scaled to a pcm-formatted 8-bit sample using #' [tuneR::normalize()]. If you don't like the result you need to convert #' the sample to 8-bit pcm yourself before calling this function. #' #' @rdname WaveToIFF #' @name WaveToIFF #' @param x A [tuneR::WaveMC()] object that needs to be converted into an [IFFChunk()] object. `x` #' can also be any other class object that can be coerced into a [tuneR::WaveMC()] object. [tuneR::Wave()] #' and [`PTSample()`][ProTrackR::PTSample-class] objects are therefore also allowed. #' @param loop.start If the sample should be looped from a specific position to the #' end of the sample, this argument specifies the starting position in samples (with #' a base of 0) for looping. `loop.start` therefore should be a whole non-negative #' number. When set to `NA` or negative values, the sample will not be looped. #' @param octaves A whole positive `numeric` value indicating the number of octaves #' that should be stored in the resulting IFF chunk. The original wave will be resampled #' for each value larger than 1. Each subsequent octave will contain precisely twice #' as many samples as the previous octave. #' @param compress A `character` string indicating whether compression should be applied to the waveform. "`sCmpNone`" #' (default) applies no compression, "`sCmpFibDelta`" applies the lossy [deltaFibonacciCompress()]ion. #' @param ... Currently ignored. #' @return Returns an [IFFChunk-class()] object with a FORM container that #' contains an 8SVX waveform based on `x`. #' @examples #' \dontrun{ #' ## First get an audio sample from the ProTrackR package #' snare.samp <- ProTrackR::PTSample(ProTrackR::mod.intro, 2) #' #' ## The sample can easily be converted into an IFFChunk: #' snare.iff <- WaveToIFF(snare.samp) #' #' ## You could also first convert the sample into a Wave object: #' snare.wav <- as(snare.samp, "Wave") #' #' ## And then convert into an IFFChunk. The result is the same: #' snare.iff <- WaveToIFF(snare.wav) #' #' ## You could also use a sine wave as input (although you will get some warnings). #' ## This will work because the vector of numeric data can be coerced to #' ## a WaveMC object #' sine.iff <- WaveToIFF(sin((0:2000)/20)) #' } #' @family iff.operations #' @references <https://en.wikipedia.org/wiki/8SVX> #' @author Pepijn de Vries #' @export WaveToIFF <- function(x, loop.start = NA, octaves = 1, compress = c("sCmpNone", "sCmpFibDelta"), ...) { octaves <- round(octaves[[1]]) loop.start <- round(loop.start[[1]]) x <- methods::as(x, "WaveMC") compress <- match.arg(compress) if (x@bit != 8 || !x@pcm) { warning(sprintf("Original %i-bit wave is normalized to an 8-bit wave.", x@bit)) ## Note tuneR's implementation let's 8-bit audio range from 0-254 instead of 0-255 ## writeWave from the same package uses the range of 0-255. Maybe contact package ## maintainer to check whether this discrapency is intentional x <- tuneR::normalize(x, "8", pcm = T) } if (is.null(colnames([email protected]))) colnames([email protected]) <- MCnames$name[1:ncol([email protected])] x <- tuneR::WaveMC(data = cbind(FL = rowMeans([email protected][,grepl("L", colnames([email protected])), drop = F]), FR = rowMeans([email protected][,grepl("R", colnames([email protected])), drop = F])), bit = x@bit, samp.rate = [email protected], pcm = x@pcm) if (any(is.nan([email protected][,"FL"]))) [email protected] <- [email protected][,colnames([email protected]) != "FL", drop = F] if (any(is.nan([email protected][,"FR"]))) [email protected] <- [email protected][,colnames([email protected]) != "FR", drop = F] if (octaves > 1) { temp <- lapply(2:octaves, function(y) { tuneR::WaveMC(data = apply([email protected], 2, function(z) { ProTrackR::resample(z, 1, y) }), bit = x@bit, samp.rate = [email protected], pcm = x@pcm) }) } x <- list(x) if (octaves > 1) { x<- c(x, temp) rm(temp) } oneshot <- length(x[[1]]@.Data[,1]) repeatsamp <- 0 if (!is.na(loop.start) && loop.start >= 0) { if (loop.start >= oneshot) stop("'loop.start' cannot be equal or larger than the sample length.") repeatsamp <- oneshot - loop.start oneshot <- loop.start } vhdr <- list( oneShotHiSamples = oneshot, repeatHiSamples = repeatsamp, samplesPerHiCycle = 0, samplesPerSec = x[[1]]@samp.rate, ctOctave = octaves, sCompression = compress, volume = 0x10000L ) class(vhdr) <- "IFF.VHDR" vhdr <- IFFChunk(vhdr) chan <- list(channel = ifelse(all(c("FL", "FR") %in% colnames(x[[1]]@.Data)), "STEREO", ifelse("FL" %in% colnames(x[[1]]@.Data), "LEFT", "RIGHT"))) class(chan) <- "IFF.CHAN" if (chan$channel == "STEREO" && octaves > 1) warning(paste0("Multiple octaves in stereo may not be handled correctly by Amiga software.\n", "Convert your sample to mono first or only use 1 octave to avoid problems.")) chan <- IFFChunk(chan) wav <- unlist(lapply(1:ncol(x[[1]]@.Data), function(z) { unlist(lapply(x, function(y) .amigaIntToRaw([email protected][,z] - 128, 8, T))) })) if (compress == "sCmpFibDelta") { wav <- deltaFibonacciCompress(wav) } ## Pad with zero if the body has an odd length: if ((length(wav) %% 2) !=0) wav <- c(wav, raw(1)) wav <- list(wav) wav <- list(vhdr, chan, new("IFFChunk", chunk.type = "BODY", chunk.data = wav)) wav <- list(new("IFFChunk", chunk.type = "8SVX", chunk.data = wav)) return(new("IFFChunk", chunk.type = "FORM", chunk.data = wav)) }
/scratch/gouwar.j/cran-all/cranData/AmigaFFH/R/iff.8svx.r
#' @rdname IFFChunk #' @method IFFChunk IFF.ILBM #' @export IFFChunk.IFF.ILBM <- function(x, ...) { if (inherits(x, "matrix")) stop(paste("This IFF chunk interpretation is probably based on", "an ANIM DLTA chunk. It can't be converted back into an IFFChunk object.")) rasterToIFF(x, ...)@chunk.data[[1]] } #' @rdname IFFChunk #' @method IFFChunk IFF.CMAP #' @export IFFChunk.IFF.CMAP <- function(x, ...) { result <- colourToAmigaRaw(x, n.bytes = "3", ...) return(new("IFFChunk", chunk.type = "CMAP", chunk.data = list(result))) } #' @rdname IFFChunk #' @export IFFChunk.IFF.BMHD <- function(x, ...) { compr <- which(c("cmpNone", "cmpByteRun1") == x$Compression) - 1 ## if the compression type is unknown, set to 0 if (length(compr) == 0) compr <- 0 mask <- which(c("mskNone", "mskHasMask", "mskHasTransparentColour", "mskLasso") == x$Masking) - 1 ## if the masking type is unknown, set to 0 if (length(mask) == 0) mask <- 0 result <- c(.amigaIntToRaw(c(x$w, x$h), 16, F), .amigaIntToRaw(c(x$x, x$y), 16, T), .amigaIntToRaw(c(x$nPlanes, mask, compr), 8, F), as.raw(x$pad)[[1]], .amigaIntToRaw(x$transparentColour, 16, F), .amigaIntToRaw(c(x$xAspect, x$yAspect), 8, F), .amigaIntToRaw(c(x$pageWidth, x$pageHeight), 16, F)) return(new("IFFChunk", chunk.type = "BMHD", chunk.data = list(result))) } #' @rdname IFFChunk #' @export IFFChunk.IFF.CAMG <- function(x, ...) { return(.inverseViewPort(x$display.mode, x$monitor)) } #' @rdname IFFChunk #' @export IFFChunk.IFF.CRNG <- function(x, ...) { flag <- which(c("RNG_OFF", "RNG_ACTIVE", "RNG_REVERSE") == x$flags) - 1 ## if the flag is unknown, set to 0 if (length(flag) == 0) flag <- 0 dat <- c( x$padding, .amigaIntToRaw(round(x$rate*(2^14)/60), 16, F), .amigaIntToRaw(flag, 16, F), .amigaIntToRaw(c(x$low, x$high), 8, F) ) result <- new("IFFChunk", chunk.type = "CRNG", chunk.data = list(dat)) return(result) } #' @rdname IFFChunk #' @export IFFChunk.IFF.ANIM <- function(x, ...) { rasterToIFF(x, ...)@chunk.data[[1]] } #' @rdname IFFChunk #' @export IFFChunk.IFF.ANHD <- function(x, ...) { oper <- which(x$operation == c("standard", "XOR", "LongDeltaMode", "ShortDeltaMode", "GeneralDeltamode", "ByteVerticalCompression", "StereoOp5", "ShortLongVerticalDeltaMode")) - 1 ## if the flag is unknown, set to 0 if (length(oper) == 0) oper <- 0 result <- c(.amigaIntToRaw(oper, 8, F), .bitmapToRaw(x$mask, F, F), .amigaIntToRaw(c(x$w, x$h), 16, F), .amigaIntToRaw(c(x$x, x$y), 16, T), .amigaIntToRaw(c(x$abstime, x$reltime), 32, T), .amigaIntToRaw(x$interleave, 8, F), x$pad0, .bitmapToRaw(x$flags, T, T), x$pad1 ) result <- new("IFFChunk", chunk.type = "ANHD", chunk.data = list(result)) return(result) } #' @rdname IFFChunk #' @export IFFChunk.IFF.DLTA <- function(x, ...) { return(new("IFFChunk", chunk.type = "DLTA", chunk.data = list(x))) } #' @rdname IFFChunk #' @export IFFChunk.IFF.DPAN <- function(x, ...) { result <- c(.amigaIntToRaw(c(x$version, x$nframes), 16, F), .bitmapToRaw(x$flags, T, T) ) result <- new("IFFChunk", chunk.type = "DPAN", chunk.data = list(result)) return(result) } #' Convert AmigaFFH objects into grDevices raster images #' #' Convert AmigaFFH objects that contain bitmap images into grDevices raster #' images. #' #' Images on the Amiga were stored as bitmap images with indexed colour #' palettes. This was mainly due to hardware and memory limitations. #' Bitmap images could also be embedded in several file types. This method #' can be used to convert AmigaFFH objects read from such files into #' grDevices raster images ([grDevices::as.raster()]). #' #' @rdname as.raster #' @name as.raster #' @param x Object that needs to be converted into a `grDevices` raster. It #' can be an [IFFChunk()] containing an interleaved bitmap image #' (ILBM) or animation (ANIM), a [hardwareSprite()], an #' [AmigaBitmapFont()] object or an [AmigaBitmapFontSet()] object. #' @param background Use the argument `background` to #' specify a background colour in case `x` is a [hardwareSprite()]. #' @param selected When `x` is an object of class [AmigaIcon()], `selected` can be #' used to select a specific state. When set to `TRUE`, the raster of the [AmigaIcon()] #' will be based on the `selected' state of the icon. Otherwise it will be based on the #' deselected state (default). #' #' When `x` is an [AmigaBasicShape()] class object, `selected` can be used to select a #' specific layer of the shape to plot, which can be one of `"bitmap"` (default), `"shadow"` or `"collision"`. #' @param ... Currently ignored. #' @returns Returns a `grDevices` raster image ([grDevices::as.raster()]) #' based on `x`. If `x` is an animation ([IFFChunk()] of type ANIM), #' a `list` of raster objects is returned. #' @examples #' \dontrun{ #' ## load an IFF file #' example.iff <- read.iff(system.file("ilbm8lores.iff", package = "AmigaFFH")) #' #' ## The file contains an interleaved bitmap image that can be #' ## converted into a raster: #' example.raster <- as.raster(example.iff) #' #' ## the raster can be plotted: #' plot(example.raster) #' #' ## note that the IFFChunk can also be plotted directly: #' plot(example.iff) #' #' ## Hardware sprites can also be converted into raster images. #' ## Let's generate a 16x16 sprite with a random bitmap: #' spr <- new("hardwareSprite", #' VStop = 16, #' bitmap = as.raw(sample.int(255, 64, replace = TRUE))) #' #' ## now convert it into a raster image. #' ## as the background colour is not specified for hardware #' ## sprite, we can optionally provide it here. #' spr.raster <- as.raster(spr, background = "green") #' #' ## AmigaBasicShape objects can also be converted into rasters: #' ball <- read.AmigaBasicShape(system.file("ball.shp", package = "AmigaFFH")) #' ball.rst <- as.raster(ball) #' } #' @family iff.operations #' @family raster.operations #' @author Pepijn de Vries #' @export as.raster.IFFChunk <- function(x, ...) { if ([email protected] == "FORM") { result <- lapply([email protected], function(y){ if ([email protected] == "ILBM") return(as.raster(y)) if ([email protected] == "ANIM") return(interpretIFFChunk(y)) }) if (length(result) == 1) result <- result[[1]] return (result) } else if ([email protected] == "ILBM") { sub.chunks <- unlist(lapply([email protected], function(y) [email protected])) if (!("BMHD" %in% sub.chunks)) stop("No bitmap header present. Can't interpret bitmap.") if (!("BODY" %in% sub.chunks)) stop("No BODY chunk present. Can't convert the bitmap into a raster.") bm.header <- interpretIFFChunk(getIFFChunk(x, "BMHD")) if (!("CAMG" %in% sub.chunks)) { bm.vp.mode <- NULL warning("No Amiga viewport available, interpretation possibly incorrect.") } else { bm.vp.mode <- interpretIFFChunk(getIFFChunk(x, "CAMG")) bm.vp.mode <- .display.properties(bm.vp.mode$display.mode, bm.vp.mode$monitor) } ## default palette in case 'CMAP' chunk is missing: bm.palette <- grDevices::gray(round(seq(0, 15, length.out = 2^bm.header$nPlanes))/15) ## Colours are interpreted as 12 bit when al low bits are 0, otherwise as 24 bit try(bm.palette <- interpretIFFChunk(getIFFChunk(x, "CMAP")), silent = T) ## extend the palette when the extra halfbright mode is used: if (!is.null(bm.vp.mode) && bm.vp.mode$is.halfbright) { bm.palette <- c(bm.palette, substr(grDevices::adjustcolor(bm.palette, 1, 0.5, 0.5, 0.5), 1, 7)) } if (bm.header$Compression == "cmpByteRun1") { bm <- unPackBitmap(interpretIFFChunk(getIFFChunk(x, "BODY"))) } else if (bm.header$Compression == "cmpNone") { bm <- interpretIFFChunk(getIFFChunk(x, "BODY")) } else { stop("Bitmap data is compressed with unsupported algorithm.") } np <- bm.header$nPlanes if (length(bm.palette) < (2^np)) { if (bm.vp.mode$is.HAM) np <- ifelse(np == 8, 6, 5) bm.palette <- c(bm.palette, rep("#000000", (2^np) - length(bm.palette))) } rm(np) if (bm.header$Masking == "mskHasTransparentColour") { transparent <- bm.header$transparentColour + 1 bm.palette[transparent] <- grDevices::adjustcolor(bm.palette[transparent], alpha.f = 0) } attr.palette <- NULL if ("palette" %in% names(list(...)) && is.null(list(...)$palette)){ attr.palette <- bm.palette bm.palette <- NULL } if (bm.vp.mode$is.HAM) { result <- bitmapToRaster(bm, bm.header$w, bm.header$h, bm.header$nPlanes, NULL) ## It is assumed that the image is HAM8 in case there are 8 bitplanes, ## HAM6 in all other cases... ## if bm.palette is null, we are dealing with an animation ## and we need to return the indices rather than the colours if (!is.null(bm.palette)) { result <- .indexToHAMraster(result, bm.header$nPlanes, bm.palette, bm.header$transparentColour) } } else { result <- bitmapToRaster(bm, bm.header$w, bm.header$h, bm.header$nPlanes, bm.palette) } if (bm.vp.mode$is.HAM) { attributes(result)[["mode"]] <- ifelse(bm.header$nPlanes == 8, "HAM8", "HAM6") } attributes(result)[["asp"]] <- bm.vp.mode$aspect.y/bm.vp.mode$aspect.x if (!is.null(attr.palette)) attributes(result)[["palette"]] <- attr.palette return(result) } else if ([email protected] == "ANIM") { return(interpretIFFChunk(x)) } else { stop(sprintf("IFF chunk of type %s cannot be converted into a raster.", [email protected])) } } #' @rdname plot #' @name plot #' @export plot.IFF.ILBM <- function(x, y, ...) { ## For ANIM frames, a matrix of palette indices is returned ## turn that into a raster object with a grayscale palette if (inherits(x, "matrix")) { pal <- grDevices::gray(seq(0, 1, length.out = max(round(abs(x))))) asp <- attributes(x)$asp x <- as.raster(apply(x, 2, function(z) pal[round(abs(z)) + 1])) attributes(x)$asp <- asp } class(x) <- "raster" if ("asp" %in% names(list(...))) graphics::plot(x, y, ...) else graphics::plot(x, y, asp = attributes(x)$asp, ...) } #' @rdname plot #' @name plot #' @export plot.IFF.ANIM <- function(x, y, ...) { invisible(lapply(x, plot.IFF.ILBM, ...)) } #' Convert a grDevices raster image into an IFF formated bitmap image #' #' Convert grDevices raster images ([grDevices::as.raster()]) #' into a formal [IFFChunk()] object, as an interleaved bitmap (ILBM) #' image. #' #' Convert any modern image into a interleaved bitmap (image) conform #' Interchange File Format (IFF) specifications. If your original image #' is in true colour (i.e., a 24 bit colour depth) it will be converted #' into a bitmap image with an indexed palette. #' #' @rdname rasterToIFF #' @name rasterToIFF #' @param x A raster object created with [grDevices::as.raster()] which #' needs to be converted into an IFF formated bitmap image. It is also possible to let `x` be #' a matrix of `character`s, representing colours. #' @param display.mode Specify the Amiga display mode that should be used. #' See [amiga_display_modes()] for all possible options. #' "`LORES_KEY`" is used by default, this is the lowest resolution #' possible on the Amiga. #' @param monitor The Amiga monitor on which the needs to be displayed. #' See [amiga_monitors()] for more details and posible options. #' By default "`DEFAULT_MONITOR_ID`" is used. #' @param anim.options Currently ignored. This argument will potentitally be implemented #' in future versions of this package. Currently, animations are always encoded #' with the "ByteVerticalCompression" in this package (when `x` is a list of #' `raster` objects). #' @param ... Arguments passed on to [rasterToBitmap()]. #' @returns Returns an [IFFChunk()] object holding an Interleaved #' Bitmap (ILBM) image based on `x`. #' @examples #' \dontrun{ #' ## first: Let's make a raster out of the 'volcano' data, which we can use in the example: #' volcano.raster <- as.raster(t(matrix(terrain.colors(1 + diff(range(volcano)))[volcano - #' min(volcano) + 1], nrow(volcano)))) #' #' ## Turning the raster into an IFFChunk object is easy: #' volcano.iff <- rasterToIFF(volcano.raster) #' #' ## This object can be saved as an IFF file using write.iff #' #' ## in special modes HAM6 and HAM 8 higher quality images #' ## can be obtained. See 'rasterToBitmap' for more info on the #' ## special HAM modes. #' volcano.ham <- rasterToIFF(volcano.raster, "HAM_KEY", depth = "HAM8") #' #' ## The result can be further improved by applying dithering #' volcano.ham.dither <- rasterToIFF(volcano.raster, "HAM_KEY", depth = "HAM8", #' indexing = function(x, length.out) { #' index.colours(x, length.out, dither = "JJN", iter.max = 20) #' }) #' } #' @family iff.operations #' @family raster.operations #' @author Pepijn de Vries #' @export rasterToIFF <- function(x, display.mode = as.character(AmigaFFH::amiga_display_modes$DISPLAY_MODE), monitor = as.character(AmigaFFH::amiga_monitors$MONITOR_ID), anim.options, ...) { display.mode <- match.arg(display.mode) monitor <- match.arg(monitor) pars <- list(...) if (is.null(pars$depth)) pars$depth <- 3 if (is.null(pars$colour.depth)) pars$colour.depth <- "12 bit" if (grepl("EHB|EXTRAHALFBRITE", display.mode)) stop("Sorry, 'extra halfbrite' modes is currently not implemented") special.mode <- "none" if (pars$depth %in% c("HAM6", "HAM8")) { if (!grepl("HAM", display.mode)) warning("Display mode should be a HAM mode, when 'depth' is set to 'HAM6' or 'HAM8'. Display mode is corrected to 'HAM_KEY'.") display.mode <- "HAM_KEY" } if (grepl("HAM", display.mode)) { if (pars$depth %in% c("HAM6", "HAM8")) { special.mode <- pars$depth pars$colour.depth <- ifelse(special.mode == "HAM6", "12 bit", "24 bit") pars$depth <- ifelse(special.mode == "HAM6", 6, 8) } else { special.mode <- ifelse(pars$colour.depth == "24 bit", "HAM8", "HAM6") } } if (is.list(x)) { if (length(x) < 2) stop("When x is a list of rasters, it will be converted to an anim. x should have a length of at least 2.") if (any(unlist(lapply(x, function(y) !inherits(y, c("raster", "matrix")) || !all(.is.colour(y)))))) stop("All elements of x should be a grDevices raster or a matrix of colours") if ("indexing" %in% names(list(...))) { x <- list(...)$indexing(x = x, length.out = ifelse(special.mode %in% c("HAM6", "HAM8"), special.mode, 2^pars$depth)) } else { x <- index.colours(x, length.out = ifelse(special.mode %in% c("HAM6", "HAM8"), special.mode, 2^pars$depth)) } pal <- attributes(x)$palette trans <- attributes(x)$transparent anhd <- lapply(1:(length(x) + 2), function(z) { anhdz <- list( operation = "ByteVerticalCompression", mask = rep(F, 8), w = dim(x[[1]])[[2]], h = dim(x[[1]])[[1]], x = 0, y = 0, abstime = z*2, reltime = 2, interleave = 0, pad0 = raw(1), flags = rep(F, 32), pad1 = raw(16) ) class(anhdz) <- "IFF.ANHD" anhdz <- IFFChunk(anhdz) anhdz }) frame1 <- .indexToBitmap(x[[1]], pars$depth, T) frame1 <- .bitmapToILBM(frame1, dim(x[[1]]), display.mode, monitor, pars$depth, pars$colour.depth, pal, trans) ## DPaint needs this class to set the correct number of frames: dpan <- list(version = 4, nframes = length(x), flags = rep(F, 32)) class(dpan) <- "IFF.DPAN" [email protected] <- c([email protected][1], anhd[[1]], [email protected][2], IFFChunk(dpan), [email protected][3:4]) frame1 <- new("IFFChunk", chunk.type = "FORM", chunk.data = list(frame1)) ## .byteVerticalCompression returns a list of DLTA chunks, with the first frame missing (NULL) x <- .byteVerticalCompression(x, pars$depth) x[-1] <- lapply(2:length(x), function(y) { res <- new("IFFChunk", chunk.type = "ILBM", chunk.data = list(anhd[[y]], x[[y]])) res <- new("IFFChunk", chunk.type = "FORM", chunk.data = list(res)) return(res) }) x[[1]] <- frame1 x <- new("IFFChunk", chunk.type = "ANIM", chunk.data = x) x <- new("IFFChunk", chunk.type = "FORM", chunk.data = list(x)) return(x) } if (!inherits(x, c("raster", "matrix")) || !all(.is.colour(x))) stop("x should be a raster object or a matrix of colours.") if ("depth" %in% names(list(...))) { bm <- rasterToBitmap(x, ...) } else { bm <- rasterToBitmap(x, depth = ifelse(special.mode %in% c("HAM6", "HAM8"), special.mode, pars$depth), ...) } pal <- attributes(bm)$palette transparent <- attributes(bm)$transparent ## Create an ILBM chunk based on the bitmap data ilbm <- .bitmapToILBM(bm, dim(x), display.mode, monitor, pars$depth, pars$colour.depth, pal, transparent) ## Create a FORM chunk, encapsulating the bitmap info form <- new("IFFChunk", chunk.type = "FORM", chunk.data = list(ilbm)) return(form) } ## decompression for anim dlta frames ## dlta = raw data from dlta chunk .byteVerticalDecompression <- function(dlta, w, h, interleave, use.xor, previous = NULL) { interleave[interleave == 0] <- 2 pointers <- .rawToAmigaInt(dlta[1:(16*4)], 32, F)[1:8] if (all(pointers == 0)) { if (is.null(previous) || length(previous) == 0) { result <- matrix(0, h, w) class(result) <- c("IFF.ILBM", "IFF.ANY", class(result)) return(result) } else { prev <- length(previous) - interleave + 1 prev[prev < 1] <- 1 return(previous[[prev]]) } } bitmap.layers <- which(pointers > 1) bitmap.layers <- 1:bitmap.layers[bitmap.layers == max(bitmap.layers)] bitmap.layers <- pointers[bitmap.layers] ## loop the bitmap depth dimensions: result <- lapply(1:length(bitmap.layers), function(y) { offs <- bitmap.layers[[y]] prev <- NULL if (!is.null(previous)) { prev <- length(previous) - (interleave - 1) if (prev < 1) prev <- 1 prev <- previous[[prev]] prev <- apply(prev, 2, function(z) { as.logical(floor(z/(2^(y - 1))) %% 2) }) prev <- cbind(prev, matrix(F, h, w - ncol(prev))) } ## if the pointer is zero for a bitmap layer, there is no change. Just return previous frame if (offs == 0) { if (is.null(prev)) { return (matrix(0, h, w)) } else return(prev) } ## loop columns: layer <- NULL for (i in 1:(w/8)) { op.count <- .rawToAmigaInt(dlta[1 + offs], 8, F) offs <- offs + 1 row.result <- matrix(F, 0, 8) if (op.count > 0) { for (j in 1:op.count) { op <- .rawToAmigaInt(dlta[1 + offs], 8, F) offs <- offs + 1 if (op == 0) { ## RUN operation; followed by number of repetitions and the repeating byte rep.count <- .rawToAmigaInt(dlta[1 + offs], 8, F) rep.dat <- matrix(rep( as.logical(.rawToBitmap(dlta[2 + offs], T, F)), rep.count ), ncol = 8, byrow = T) if (use.xor) rep.dat <- xor(prev[nrow(row.result) + 1:nrow(rep.dat), i*8 + (-7:0)], rep.dat) row.result <- rbind(row.result, rep.dat) offs <- offs + 2 } else if (op < 0x80) { ## SKIP operation; move the cursor op bytes forward if (is.null(previous) || (is.list(previous) && length(previous) == 0)) { row.result <- rbind(row.result, matrix(F, nrow = op, ncol = 8)) } else { nroff <- nrow(row.result) + 1 if (length(nroff) == 0) nroff <- 1 row.result <- rbind(row.result, prev[nroff + 0:(op - 1), i*8 + (-7:0)]) } } else { ## DUMP operation; use op bytes literally op.cor <- op - 0x80 temp <- as.logical(.rawToBitmap(dlta[(1 + offs):(offs + op.cor)], T, F)) temp <- matrix(temp, ncol = 8, byrow = T) if (use.xor) temp <- xor(prev[nrow(row.result) + 1:nrow(temp), i*8 + (-7:0)], temp) row.result <- rbind(row.result, temp) offs <- offs + op.cor } } } if (nrow(row.result) == 0 || op.count == 0) { if (is.null(previous)) { row.result <- matrix(F, ncol = 8, nrow = h) } else { row.result <- prev[1:h, i*8 + (-7:0)] } } if (is.null(dim(row.result)) || !all(dim(row.result) == c(h, 8))) stop("Could not decode the bitmap correctly. If the bitmap was encoded with the AmigaFFH package, please contact the package author to get this fixed.") layer <- cbind(layer, row.result) } layer <- apply(layer, 2, function(z) (2^(y - 1))*as.numeric(z)) return(layer) }) result <- Reduce("+", result) class(result) <- c("IFF.ILBM", "IFF.ANY", class(result)) return(result) } ## vertical delta compression ## x should be a list of matrices with palette indices ## length of x should be > 1 (not checked here) ## the function returns a list of DLTA chunks, with the first element empty (NULL). .byteVerticalCompression <- function(x, depth) { x <- c(x, x[1:2]) h <- dim(x[[1]])[[1]] w <- dim(x[[1]])[[2]] x <- lapply(x, function(y) cbind(y, matrix(0, nrow = h, ncol = (-w %% 16)))) wbm <- w + (-w %%16) ## first frame should be ILBM with BMHD and BODY (constructed later and skipped here) result <- vector("list", length(x)) for (i in 2:length(x)) { prev.id <- i - 2 if (prev.id < 1) prev.id <- 1 ## loop bitmap layers pointers <- NULL no.change <- rep(F, 16) for (j in 1:depth) { ## loop columns dep.data <- NULL for (k in 1:(wbm/8)) { previous.column <- floor((x[[prev.id]][,k*8 + (-7:0)] - 1)/(2^(j - 1))) %% 2 curr.column <- floor((x[[i]][,k*8 + (-7:0)] - 1)/(2^(j - 1))) %% 2 row.similarity <- apply(previous.column == curr.column, 1, all) ## when there are only short fragments the same, classify them as different ## this is to avoid the large overhead of the operation bytes run.lengths <- rle(row.similarity)$lengths run.lengths <- rep(run.lengths, run.lengths) row.similarity[row.similarity & run.lengths < 3] <- F ## If the entire row is similar to the previous frame, add 0 as op.count to the result cursor <- 1 op.count <- 0 op.data <- NULL if (!all(row.similarity)) { while (cursor <= h) { if (row.similarity[[cursor]]) { ## when elements at the cursor are the same compared to the previous frame, the SKIP operation is in place skip <- which(diff(c(row.similarity[cursor:h], F)) != 0)[[1]] repskip <- floor(skip/127) ## note that the op.count needs to preceed each column of data ## it will be added after all ops of the column have been processed op.data <- c(op.data, .amigaIntToRaw(c( rep(127, repskip), skip %% 127), 8, F)) op.count <- op.count + repskip + 1 cursor <- cursor + skip } else { dup <- c(T, duplicated(curr.column[cursor:h,, drop = F], fromLast = F)[-1]) & !row.similarity[cursor:h] dup.run.length <- rle(dup)$lengths dup.run1 <- dup.run.length[[1]] dup.run1[dup.run1 > 255] <- 255 dup.run.length <- c(1, dup.run.length[1] - 1, dup.run.length[-1]) dup.run.length <- rep(dup.run.length, dup.run.length) if (dup.run1 > 3 && length(dup) > 3 && dup[[2]]){ ## When elements at the cursor are not the same as the previous frame, and they are repetative, the RUN operation is in place op.data <- c(op.data, .amigaIntToRaw(c(0, dup.run1), 8, F), .bitmapToRaw(as.logical(t(curr.column[cursor,, drop = F])), T, F)) op.count <- op.count + 1 cursor <- cursor + dup.run1 } else { ## When elements at the cursor are not the same as the previous frame, and they are not repetative, the DUMP operation is in place skip <- which(diff(c(row.similarity[cursor:h] | c(dup.run.length[-1] > 2 & dup[-1], F), T)) != 0)[[1]] skip[skip > 127] <- 127 dat <- .bitmapToRaw(as.logical(t(curr.column[cursor:(cursor + skip - 1),, drop = F])), T, F) op.data <- c(op.data, .amigaIntToRaw(skip + 0x80, 8, F), dat) cursor <- cursor + skip op.count <- op.count + 1 } } } } ## After each column, bind the result to the previous column. Each column should start with the count of ## the total number of operations in the column, followed by the encoded data. dep.data <- c(dep.data, .amigaIntToRaw(op.count, 8, F), op.data) } if (all(dep.data == raw(1))) { no.change[[j]] <- T dep.data <- NULL } pointers <- c(pointers, length(dep.data)) if (!is.null(dep.data)) result[[i]] <- c(result[[i]], dep.data) } pointers <- cumsum(pointers) ptrs <- rep(0, 16) ptrs[1:length(pointers)] <- 64 + c(0 , pointers[0:(length(pointers) - 1)]) ptrs[no.change] <- 0 ## combine the op-data from all bitmap layers, starting with pointers to the start of the data for each layer ## if there was no change to a bitmap layer, there is no data. The pointer is set to 0 in that case. result[[i]] <- c( .amigaIntToRaw(ptrs, 32, F), result[[i]] ) ## DLTA should be preceded by an ANHD chunk, this is not added by this function result[[i]] <- new("IFFChunk", chunk.type = "DLTA", chunk.data = list(result[[i]])) } return(result) } .bitmapToILBM <- function(bm, dim.x, display.mode, monitor, depth, colour.depth, pal, transparent) { bm <- .bitmapToRaw(bm, T, F) bm <- matrix(bm, nrow = 2*ceiling(dim.x[[2]]/16), byrow = F) bm <- c(unlist(apply(bm, 2, packBitmap))) ## Create a BODY chunk based on the bitmap data body <- new("IFFChunk", chunk.type = "BODY", chunk.data = list(bm)) ## Create a CAMG chunk, specifying the displaymode camg <- .inverseViewPort(display.mode, monitor) disp <- .amigaViewPortModes([email protected][[1]]) disp.prop <- .display.properties(disp$display.mode, disp$monitor) ## Create BMHD chunk, with bitmap header information BMHD <- list( w = dim.x[2], h = dim.x[1], x = 0, y = 0, nPlanes = depth, Masking = ifelse(!is.null(transparent) && !is.na(transparent), "mskHasTransparentColour", "mskNone"), Compression = "cmpByteRun1", pad = raw(1), transparentColour = ifelse(!is.null(transparent) && !is.na(transparent), transparent - 1, 0), xAspect = disp.prop$aspect.x, yAspect = disp.prop$aspect.y, pageWidth = disp.prop$screenwidth, pageHeight = disp.prop$screenheight ) class(BMHD) <- "IFF.BMHD" hdr <- IFFChunk(BMHD) ## Create a CMAP chunk with palette information class(pal) <- "IFF.CMAP" cmap <- IFFChunk(pal, colour.depth = colour.depth) # cmap <- new("IFFChunk", chunk.type = "CMAP", chunk.data = list(colourToAmigaRaw(pal, pars$colour.depth, "3"))) ## Create a ILBM chunk, which hold all required bitmap information ilbm <- new("IFFChunk", chunk.type = "ILBM", chunk.data = list( hdr, cmap, camg, body )) return(ilbm) } .indexToHAMraster <- function(x, depth, palette, transparentColour) { control.mask <- bitwShiftL(3, depth - 2) max_color <- ifelse(depth == 8, 255, 15) color_divisor <- ifelse(depth == 8, 1, 17) color_multi <- ifelse(depth == 8, 255/63, 1) x <- apply(x, 1, function(y) { control.flags <- 3*bitwAnd(y, control.mask)/control.mask y.shift <- (y - control.mask*control.flags/3) z <- rep(NA, length(y)) z[control.flags == 0] <- palette[y.shift[control.flags == 0] + 1] for (i in 1:length(y)) { if (is.na(z[i])) { z0 <- z[i - 1] if (length(z0) == 0) z0 <- palette[transparentColour + 1] cl <- grDevices::col2rgb(z0) z[i] <- grDevices::rgb( ifelse(control.flags[i] == 2, color_multi*y.shift[i], cl["red",]/color_divisor), ifelse(control.flags[i] == 3, color_multi*y.shift[i], cl["green",]/color_divisor), ifelse(control.flags[i] == 1, color_multi*y.shift[i], cl["blue",]/color_divisor), maxColorValue = max_color ) } } z }) as.raster(t(x)) }
/scratch/gouwar.j/cran-all/cranData/AmigaFFH/R/iff.ilbm.r
#' @importFrom graphics plot #' @importFrom grDevices adjustcolor as.raster col2rgb hsv gray rgb rgb2hsv #' @importFrom methods as new #' @importFrom stats ave kmeans #' @importFrom tuneR play MCnames Wave WaveMC NULL
/scratch/gouwar.j/cran-all/cranData/AmigaFFH/R/imports.r
#' The S3 SysConfig class #' #' A comprehensive representation of an Amiga system-configuration file. #' #' The system-configuration file is a binary file stored in the `devs' folder #' of the root of a bootable Amiga DOS device, containing system preferences. #' It was used in Amiga OS 1.x. Although it could be used in later versions, it was #' gradually phased out and some settings may not be usable in the later versions. #' See references below for more details. #' #' Definitions of the system-configuration have file been revised at some points. #' Revisions are minor and usually targeted at backward compatibility. Here #' revision V38.2 (released on 16 September 1992) is implemented, which is the #' latest documented version. #' #' The sytem-configuration file contains settings for the serial and parallel #' port and the printer. It also contains some settings for the `workbench' #' which was the Amiga equivalent of what is now mostly known as the computers desktop. #' Colours for the workbench and the shape of the mouse pointer are also stored #' in this file. Settings for the mouse and basic screen resolution are also part #' of the file. #' #' The SysConfig object is a comprehensive representation of the binary #' system-configuration file. It is a a `list` where the elements have identical #' names as listed in the documents provided the references. The names are usually #' self-explanatory, but the referred documents can also be #' consulted to obtain more detailed information with respect to each of #' these elements. The mouse pointer is included as a [hardwareSprite()] object #' in the list. The pointer image can be replaced by a different [hardwareSprite()], #' but make sure it has an height of 16 pixels. #' #' It is possible to change the values of the list, but not all values may be valid. #' Note that they will not be fully checked for validity. Invalid values may result in errors #' when writing to a binary file using [write.SysConfig()], or may simply not #' work properly on an Amiga or in an emulator. #' #' Use [simpleSysConfig()] for creating a simple SysConfig object which can #' be modified. Use [read.SysConfig()] to read, and [write.SysConfig()] #' to write system-configuration files. With [rawToSysConfig()] and #' [AmigaFFH::as.raw()] SysConfig can be coerced back and forth from and to #' its raw (binary) form. #' #' @docType class #' @name SysConfig #' @rdname SysConfig #' @family SysConfig.operations #' @author Pepijn de Vries #' @references #' <https://wiki.amigaos.net/wiki/Preferences#Preferences_in_1.3_and_Older_Versions_of_the_OS> #' <http://amigadev.elowar.com/read/ADCD_2.1/Includes_and_Autodocs_2._guide/node00D5.html> #' <http://amigadev.elowar.com/read/ADCD_2.1/Includes_and_Autodocs_3._guide/node063B.html> NULL #' @rdname plot #' @name plot #' @export plot.SysConfig <- function(x, y, ...) { plot(as.raster(x$PointerMatrix, background = x$WBColours[[1]]), ...) } #' @export print.SysConfig <- function(x, ...) { cat(sprintf("Amiga system-configuration\nFontHeight:\t%i\nPrinter:\t%s %s\nInterlaced:\t%s", x$FontHeight, tolower(strsplit(as.character(x$PrinterPort), "_")[[1]][[1]]), x$PrinterFilename, as.character(x$LaceWB == "LACE")), ...) } #' @rdname as.raw #' @name as.raw #' @export as.raw.SysConfig <- function(x, ...) { class(x) <- NULL x$KeyRptSpeed <- as.raw(x$KeyRptSpeed) x$KeyRptDelay <- as.raw(x$KeyRptDelay) x$DoubleClick <- as.raw(x$DoubleClick) x$PointerMatrix <- as.raw(x$PointerMatrix) x$PointerMatrix[3] <- raw(1) ## set VStop to 0 x$WBColours <- colourToAmigaRaw(x$WBColours, colour.depth = "12 bit", n.bytes = "2") x$spriteColours <- colourToAmigaRaw(x$spriteColours, colour.depth = "12 bit", n.bytes = "2") x$PrinterFilename <- charToRaw(x$PrinterFilename)[1:30] x$PrtDevName <- charToRaw(x$PrtDevName)[1:16] x[names(.SysConfigFactors)] <- lapply(names(.SysConfigFactors), function(y) { .match.factor.inv(x, y, .SysConfigFactors[[y]]$vals, .SysConfigFactors[[y]]$levs) }) x$PrintFlags <- .match.multi.factor.inv(x, "PrintFlags", .SysConfigMultiFactors[["PrintFlags"]]$vals, .SysConfigMultiFactors[["PrintFlags"]]$levs) x$SerRWBits <- .bitmapToRaw(x$SerRWBits, F, F) x$SerParShk <- .amigaIntToRaw( 16*.match.factor.inv(x$SerParShk, "SerialParity", 0:4, c("SPARITY_NONE", "SPARITY_EVEN", "SPARITY_ODD", "SPARITY_MARK", "SPARITY_SPACE")) + .match.factor.inv(x$SerParShk, "HandshakeMode", 0:2, c("SHSHAKE_XON", "SHSHAKE_RTS", "SHSHAKE_NONE")), 8, F) x$SerStopBuf <- .amigaIntToRaw( 16*(x$SerStopBuf$N.StopBits - 1) + .match.factor.inv(x$SerStopBuf, "BufSize", 0:5, c("SBUF_512", "SBUF_1024", "SBUF_2048", "SBUF_4096", "SBUF_8000", "SBUF_16000")), 8, F) x <- .write.amigaData(x, .SysConfigData$byte, .SysConfigData$signed, .SysConfigData$par.names) return(x) } .SysConfigMultiFactors <- list( PrintFlags= data.frame( vals = c(0x0001, 0x0002, 0x0004, 0x0008, 0x0000, 0x0010, 0x0020, 0x0040, 0x0080, 0x0100, 0x0000, 0x0200, 0x0400, 0x0800, 0x1000), levs = c("CORRECT_RED", "CORRECT_GREEN", "CORRECT_BLUE", "CENTER_IMAGE", "IGNORE_DIMENSIONS", "BOUNDED_DIMENSIONS", "ABSOLUTE_DIMENSIONS", "PIXEL_DIMENSIONS", "MULTIPLY_DIMENSIONS", "INTEGER_SCALING", "ORDERED_DITHERING", "HALFTONE_DITHERING", "FLOYD_DITHERING", "ANTI_ALIAS", "GREY_SCALE2"), stringsAsFactors = F ) ) .SysConfigFactors <- list( PrinterPort = data.frame(vals = c(0x00, 0x01), levs = c("PARALLEL_PRINTER", "SERIAL_PRINTER"), stringsAsFactors = F), BaudRate = data.frame(vals = 0:7, levs = c("BAUD_110", "BAUD_300", "BAUD_1200", "BAUD_2400", "BAUD_4800", "BAUD_9600", "BAUD_19200", "BAUD_MIDI"), stringsAsFactors = F), PaperType = data.frame(vals = c(0x00, 0x80), levs = c("FANFOLD", "SINGLE"), stringsAsFactors = F), PrintPitch = data.frame(vals = c(0x000, 0x400, 0x800), levs = c("PICA", "ELITE", "FINE"), stringsAsFactors = F), PrintQuality = data.frame(vals = c(0x000, 0x100), levs = c("DRAFT", "LETTER")), PrintSpacing = data.frame(vals = c(0x000, 0x200), levs = c("SIX_LPI", "EIGHT_LPI"), stringsAsFactors = F), PrintImage = data.frame(vals = c(0x00, 0x01), levs = c("IMAGE_POSITIVE", "IMAGE_NEGATIVE"), stringsAsFactors = F), PrintAspect = data.frame(vals = c(0x00, 0x01), levs = c("ASPECT_HORIZ", "ASPECT_VERT"), stringsAsFactors = F), PrintShade = data.frame(vals = c(0x00, 0x01, 0x02), levs = c("SHADE_BW", "SHADE_GREYSCALE", "SHADE_COLOR"), stringsAsFactors = F), PaperSize = data.frame(vals = (0:13)*16, levs = c("US_LETTER", "US_LEGAL", "N_TRACTOR", "W_TRACTOR", "CUSTOM", paste0("EURO_A", 0:8)), stringsAsFactors = F), PrinterType = data.frame(vals = 0:12, levs = c("CUSTOM_NAME", "ALPHA_P_101", "BROTHER_15XL", "CBM_MPS1000", "DIAB_630", "DIAB_ADV_D25", "DIAB_C_150", "EPSON", "EPSON_JX_80", "OKIMATE_20", "QUME_LP_20", "HP_LASERJET", "HP_LASERJET_PLUS"), stringsAsFactors = F), LaceWB = data.frame(vals = 0:1, levs = c("NO_LACE", "LACE"), stringsAsFactors = F) ) .SysConfigData <- data.frame( byte = c(1, 1, 2, -8, -8, -8, -72, 1, 1, -6, 2, -8, 1, 1, 2, 2, -2, 2, -30, rep(2, 12), rep(-1, 3), 1, -12, -16, 1, 1, 1, 1, 2, 2, 2, 1, 1, 2, 2, 1, 1), signed = c(T, F, F, F, F, F, F, T, T, F, F, F, T, T, T, T, F, T, F, rep(F, 8), T, F, F, F, rep( F, 3), F, F, F, T, T, F, F, F, F, F, F, F, F, F, F, F), par.names = c("FontHeight", "PrinterPort", "BaudRate", "KeyRptSpeed", "KeyRptDelay", "DoubleClick", "PointerMatrix", "XOffset", "YOffset", "spriteColours", "PointerTicks", "WBColours", "ViewXOffset", "ViewYOffset", "ViewInitX", "ViewInitY", "EnableCLI", "PrinterType", "PrinterFilename", "PrintPitch", "PrintQuality", "PrintSpacing", "PrintLeftMargin", "PrintRightMargin", "PrintImage", "PrintAspect", "PrintShade", "PrintThreshold", "PaperSize", "PaperLength", "PaperType", "SerRWBits", "SerStopBuf", "SerParShk", "LaceWB", "Pad", "PrtDevName", "DefaultPrtUnit", "DefaultSerUnit", "RowSizeChange", "ColumnSizeChange", "PrintFlags", "PrintMaxWidth", "PrintMaxHeight", "PrintDensity", "PrintXOffset", "wb_Width", "wb_Height", "wb_Depth", "ext_size"), stringsAsFactors = F ) #' Read an Amiga system-configuration file #' #' Read a binary Amiga system-configuration file and return as [SysConfig] object. #' #' Amiga OS 1.x stored system preferences in a binary system-configuration file. This #' function returns the file in a comprehensive format (a [SysConfig] object). #' #' @rdname read.SysConfig #' @name read.SysConfig #' @param file The file name of a system-configuration file to be read. #' Can also be a connection that allows reading binary data. #' @param disk A virtual Commodore Amiga disk from which the `file` should be #' read. This should be an [`amigaDisk()`][adfExplorer::amigaDisk-class] object. Using #' this argument requires the adfExplorer package. #' When set to `NULL`, this argument is ignored. #' @return Returns an S3 [SysConfig] class object based on the file that is read. #' @examples #' \dontrun{ #' ## Put a simple SysConfig object into the tempdir: #' write.SysConfig(simpleSysConfig(), file.path(tempdir(), "system-configuration")) #' #' ## Now read the same file: #' sc <- read.SysConfig(file.path(tempdir(), "system-configuration")) #' #' ## and plot it #' plot(sc) #' } #' @family SysConfig.operations #' @family io.operations #' @author Pepijn de Vries #' @export read.SysConfig <- function(file, disk = NULL) { dat <- .read.generic(file, disk) rawToSysConfig(dat) } #' Write an Amiga system-configuration file #' #' Write a [SysConfig] class object to an Amiga binary system-configuration file. #' #' Amiga OS 1.x stored system preferences in a binary system-configuration file. This #' function writes a [SysConfig] class object as such a binary file. This file #' can be used on an Amiga or in an emulator. #' #' @rdname write.SysConfig #' @name write.SysConfig #' @param x An S3 [SysConfig] class object. #' @param file A file name to which the binary file should be written. #' @param disk A virtual Commodore Amiga disk to which the `file` should be #' written. This should be an [`amigaDisk()`][adfExplorer::amigaDisk-class] object. Using #' this argument requires the adfExplorer package. #' When set to `NULL`, this argument is ignored. #' @return Returns `NULL` or an `integer` status passed on by the #' [close()] function, that is used to close the file connection. #' It is returned invisibly. Or, when `disk` is specified, a copy of #' `disk` is returned to which the file is written. #' #' @examples #' \dontrun{ #' ## First generate a simple SysConfig object to write to a file: #' sc <- simpleSysConfig() #' #' ## And write to the tempdir: #' write.SysConfig(sc, file.path(tempdir(), "system-configuration")) #' } #' @family SysConfig.operations #' @family io.operations #' @author Pepijn de Vries #' @export write.SysConfig <- function(x, file, disk = NULL) { if (!inherits(x, "SysConfig")) stop("x should be of class SysConfig.") .write.generic(x, file, disk) } #' Coerce raw data into a SysConfig class object #' #' [SysConfig] objects are comprehensive representations of binary Amiga #' system-configuration files. Use this function to convert `raw` data from #' such a file to a [SysConfig] object. #' #' The Amiga used the system-configuration file to store certain system preferences #' in a binary file. With this function such `raw` data can be converted into #' a more comprehensive [SysConfig] object. Use [AmigaFFH::as.raw()] #' to achieve the inverse. #' #' @rdname rawToSysConfig #' @name rawToSysConfig #' @param x A vector of `raw` data that needs to be converted into an S3 #' [SysConfig] class object. It should have a length of at least 232. Although #' system-configurations can be extended, such extended files are not supported here. #' @return Returns a [SysConfig] class object based on `x`. #' @examples #' \dontrun{ #' ## get the system-configuration from the adfExplorer example disk: #' sc <- adfExplorer::get.adf.file(adfExplorer::adf.example, "devs/system-configuration") #' #' ## This will get you the raw data from the file: #' typeof(sc) #' #' ## Convert the raw data to a more comprehensive named list (and S3 SysConfig class): #' sc <- rawToSysConfig(sc) #' } #' @family SysConfig.operations #' @family raw.operations #' @author Pepijn de Vries #' @export rawToSysConfig <- function(x) { ## assuming pref. version 38.2 (slightly different from 37.4) see urls system.configuration <- .read.amigaData(x, .SysConfigData$byte, .SysConfigData$signed, .SysConfigData$par.names) ## EnableCLI is obsolete system.configuration$KeyRptSpeed <- timeval(system.configuration$KeyRptSpeed) system.configuration$KeyRptDelay <- timeval(system.configuration$KeyRptDelay) system.configuration$DoubleClick <- timeval(system.configuration$DoubleClick) ## When converting back to raw, we need to adjust the third byte (VSTOP). It is set to 16 in the call system.configuration$PointerMatrix <- rawToHWSprite(system.configuration$PointerMatrix) system.configuration$WBColours <- amigaRawToColour(system.configuration$WBColours, colour.depth = "12 bit", n.bytes = "2") system.configuration$spriteColours <- amigaRawToColour(system.configuration$spriteColours, colour.depth = "12 bit", n.bytes = "2") system.configuration$PointerMatrix@colours <- system.configuration$spriteColours system.configuration$PrinterFilename <- .rawToCharNull(system.configuration$PrinterFilename) system.configuration$PrtDevName <- .rawToCharNull(system.configuration$PrtDevName) system.configuration[names(.SysConfigFactors)] <- lapply(names(.SysConfigFactors), function(y) { .match.factor(system.configuration, y, .SysConfigFactors[[y]]$vals, .SysConfigFactors[[y]]$levs) }) system.configuration$PrintFlags <- .match.multi.factor(system.configuration, "PrintFlags", .SysConfigMultiFactors[["PrintFlags"]]$vals, .SysConfigMultiFactors[["PrintFlags"]]$levs) if (sum(grepl("DIMENSIONS", system.configuration$PrintFlags)) > 1) system.configuration$PrintFlags <- system.configuration$PrintFlags[!grepl("IGNORE_DIMENSIONS", system.configuration$PrintFlags)] if (sum(grepl("DITHERING", system.configuration$PrintFlags)) > 1) system.configuration$PrintFlags <- system.configuration$PrintFlags[!grepl("ORDERED_DITHERING", system.configuration$PrintFlags)] system.configuration$SerRWBits <- as.logical(.rawToBitmap(system.configuration$SerRWBits, F, F)) names(system.configuration$SerRWBits) <- c(t(outer(c("write.bit", "read.bit"), 0:3, paste0))) system.configuration$SerParShk <- list( SerialParity = .match.factor(list(SerialParity = ProTrackR::hiNybble(system.configuration$SerParShk)), "SerialParity", 0:4, c("SPARITY_NONE", "SPARITY_EVEN", "SPARITY_ODD", "SPARITY_MARK", "SPARITY_SPACE")), HandshakeMode = .match.factor(list(HandshakeMode = ProTrackR::loNybble(system.configuration$SerParShk)), "HandshakeMode", 0:2, c("SHSHAKE_XON", "SHSHAKE_RTS", "SHSHAKE_NONE")) ) system.configuration$SerStopBuf <- list( N.StopBits = ProTrackR::hiNybble(system.configuration$SerStopBuf) + 1L, BufSize = .match.factor(list(BufSize = ProTrackR::loNybble(system.configuration$SerStopBuf)), "BufSize", 0:5, c("SBUF_512", "SBUF_1024", "SBUF_2048", "SBUF_4096", "SBUF_8000", "SBUF_16000")) ) class(system.configuration) <- "SysConfig" return(system.configuration) } #' Function to generate a simple Amiga system-configuration representation #' #' [SysConfig] objects are comprehensive representations of binary Amiga #' system-configuration files. Use this function to create a simple [SysConfig] object. #' #' The Amiga used the system-configuration file to store certain system preferences #' in a binary file. In the AmigaFFH package such files can be represented by the more #' comprehensive [SysConfig] class object. Use this function to create such an object #' with basic settings (which can be modified). #' #' @rdname simpleSysConfig #' @name simpleSysConfig #' @param options A named `list` with elements of the target #' [SysConfig()] object that need to be modified. #' @return Returns a comprehensive representation of a system-configuration file in the #' for of a [SysConfig] class object. #' @examples #' \dontrun{ #' ## Create a simple system-configuration (S3 SysConfigClass) #' sc <- simpleSysConfig() #' #' ## And modify it as you wish. #' ## in this case change the setting for the printer #' ## from the parallel port to the serial port: #' sc$PrinterPort <- factor("SERIAL_PRINTER", levels(sc$PrinterPort)) #' #' ## It is also to provide modifications to the configuration #' ## via the 'options' argument: #' sc <- simpleSysConfig(options = list(FontHeight = 9)) #' } #' @family SysConfig.operations #' @author Pepijn de Vries #' @export simpleSysConfig <- function(options) { result <- paste0("789ce3606060650083c301608a53fd0090646460f0538088373430341c3c", "e078e0b142f2836b02e51fce70d4ff38c152ffe70053fdbf038c60fac701", "96fa0f0738ea1f1c1000d320fe9f06260686ffff4126fcffc8e7c2c0c0f7", "8681916b150303ff7fb66ea06023830e482e3d352fb5283399012f6065f0", "86b2d81914189cf02b46058c501a004f782dc6") pos <- 1:(nchar(result)/2) pos <- pos*2 - 1 result <- as.raw(as.numeric( paste0("0x",c(sapply(result, substring, first = pos, last = pos + 1))))) result <- memDecompress(result, "gzip") result <- rawToSysConfig(result) if (!missing(options)) { for (opt in names(options)) { result[[opt]] <- options[[opt]] } } return(result) } #' @export `$<-.SysConfig` <- function(x, i, value) { x[[i]] <- value x } #' @export `[[<-.SysConfig` <- function(x, i, value) { if (!inherits(i, "character")) stop("Refer to elements by name, not by index number, when replacing them.") # make sure that x is in the correct order: cl <- class(x) x <- x[.SysConfigData$par.names] if (!(i %in% .SysConfigData$par.names)) stop(sprintf("Element \"%s\" is not part of SysConfig and cannot be assigned.", i)) class(x) <- NULL if (i %in% c("WBColours", "spriteColours") && !all(.is.colour(value))) stop(sprintf("Can only assign colours to %s.", i)) if (i == "WBColours" && length(value) != 4) stop("WBColours needs a vector of 4 colours.") if (i == "spriteColours" && length(value) != 3) stop("spriteColours needs a vector of 3 colours.") if (i == "Pad") { value <- as.raw(value) if (length(value) != 12) stop("'Pad' should be a vector of 12 raw values.") } if (i == "SerRWBits") { value <- as.logical(value) if (length(value) != 12) stop("'SerRWBits' should be a vector of 8 logical values.") names(sc$SerRWBits) <- c(paste0("write.bit", 0:3), paste0("read.bit", 0:3)) } if (i == "SerStopBuf") { if (typeof(value) == "list" && all(names(value) == c("N.StopBits", "BufSize"))) { value$N.StopBits <- as.numeric(value$N.StopBits) if (value$N.StopBits < 0 || value$N.StopBits > 15) stop("value is out of range.") bfs <- c("SBUF_512", "SBUF_1024", "SBUF_2048", "SBUF_4096", "SBUF_8000", "SBUF_16000") if (is.numeric(value$BufSize)) value$BufSize <- bfs[match(value$BufSize, 0:5)] if (is.factor(value$BufSize)) value$BufSize <- as.character(value$BufSize) value$BufSize <- factor(value$BufSize[1], bfs) if (is.na(value$BufSize)) stop("Illegal value for SerStopBuf.") } else { stop("SerStopBuf should be a list with elements N.StopBits and BufSize") } } if (i == "SerParShk") { if (typeof(value) == "list" && all(names(value) == c("SerialParity", "HandshakeMode"))) { sp <- c("SPARITY_NONE", "SPARITY_EVEN", "SPARITY_ODD", "SPARITY_MARK", "SPARITY_SPACE") if (is.numeric(value$SerialParity)) value$SerialParity <- sp[match(value$SerialParity, 0:4)] if (is.factor(value$SerialParity)) value$SerialParity <- as.character(value$SerialParity) value$SerialParity <- factor(value$SerialParity[1], sp) hs <- c("SHSHAKE_XON", "SHSHAKE_RTS", "SHSHAKE_NONE") if (is.numeric(value$HandshakeMode)) value$HandshakeMode <- hs[match(value$HandshakeMode, 0:2)] if (is.factor(value$HandshakeMode)) value$HandshakeMode <- as.character(value$HandshakeMode) value$HandshakeMode <- factor(value$HandshakeMode[1], hs) if (is.na(value$HandshakeMode) || is.na(value$SerialParity)) stop("Illegal value for SerParShk") } else { stop("SerStopBuf should be a list with elements N.StopBits and BufSize") } } if (i == "EnableCLI") { value <- as.raw(value) if (length(value) != 2) stop("'EnableCLI' should be a vector of 2 raw values.") } if (i %in% c("PrtDevName", "PrinterFilename")) value <- as.character(value) if (i %in% c("KeyRptSpeed", "KeyRptDelay", "DoubleClick")) { if (is.numeric(value)) { if (value < 0) stop(sprintf("Negative numbers are not allowed for %s", i)) class(value) <- "AmigaTimeVal" } if (!inherits(value, "AmigaTimeVal")) stop("Value cannot be cast to 'timeval' class object.") } if (i == "PointerMatrix") { if (inherits(value, "raster")) value <- rasterToHWSprite(value) if (!inherits(value, "hardwareSprite")) stop ("PointerMatrix element and its replacement should be an S4 class hardwareSprite object.") if (!all(dim(value) == 16)) stop("The pointer sprite should be 16 pixels wide and 16 pixels high.") x[["spriteColours"]] <- value@colours } fct <- .SysConfigFactors[[i]] if (!is.null(fct)) { if (is.factor(value)) { if (!all(levels(value) == fct$levs)) stop(sprintf("Illegal levels for factor %s.", i)) value <- value[[1]] } if (is.numeric(value)) { if (!(value[[1]] %in% fct$vals)) stop (sprintf("Illegal value for %s.", i)) value <- factor(fct$levs[fct$vals == value[[1]]], fct$levs) } if (is.character(value)) { if (!(value[[1]] %in% fct$levs)) stop(sprintf("Illegal level for factor %s.", i)) value <- factor(value[[1]], fct$levs) } } else { fct <- .SysConfigMultiFactors[[i]] if (!is.null(fct)) { if (is.factor(value)) value <- as.character(value) if (is.character(value)) value <- match(value, .SysConfigMultiFactors[[i]]$levs) if (is.numeric(value)) { if (any(is.na(value))) stop(sprintf("Illegal value for %s.", i)) value <- .bitwOrAll(value) if (value < 0) stop(sprintf("Illegal value for %s.", i)) temp <- eval(parse(text = sprintf("list(%s = %i)", i, value))) value <- .match.multi.factor(temp, i, .SysConfigMultiFactors[[i]]$vals, .SysConfigMultiFactors[[i]]$levs) } } else { bt <- .SysConfigData$byte[.SysConfigData$par.names == i] sn <- .SysConfigData$signed[.SysConfigData$par.names == i] if (bt > 0) { value <- value[[1]] rn <- c(0, 2^(bt*8) - 1) if (sn) rn <- rn - ceiling(rn[2]/2) if (!sn && value < 0) stop("Negative values are not allowed for %s", i) if (value < rn[1] || value > rn[2]) stop("Value is out of range.") } } } x[[i]] <- value class(x) <- cl return(x) }
/scratch/gouwar.j/cran-all/cranData/AmigaFFH/R/system_configuration.r
#' The S3 AmigaIcon class #' #' A comprehensive representation of an Amiga Workbench icon file. #' #' Files, directories and other similar objects were depicted as icons on the #' Amiga Workbench (the Amiga's equivalent of what is now mostly known as the computer's #' desktop). Icons were actually separate files with the exact same name as the #' file or directory it represents, except for an additional `.info' extension. #' #' In addition of being a graphical representation of files or directories, icon #' files also contained additional information about the file. It could for instance #' indicate which tool would be required to open the file. #' #' The classic Amiga Workbench icon file has a rather complex structure as it is #' basically a dump of how it is stored in memory. As a result it contains many #' memory pointers that are really not necassary to store in a file. #' #' The S3 AmigaIcon class is used to represent these complex files as a named #' `list`. The elements in that `list` have mostly identical #' names as listed in the document at the top referenced below. The names are usually #' self-explanatory, but the referred documents can also be #' consulted to obtain more detailed information with respect to each of #' these elements. As pointed out earlier, not all elements will have a meaningful #' use. #' #' It is possible to change the values of the list, but not all values may be valid. #' Note that they will not be fully checked for validity. Invalid values may result in errors #' when writing to a binary file using [write.AmigaIcon()], or may simply not #' work properly on an Amiga or in an emulator. #' #' The original `.info' file could be extended with NewIcon or with an OS3.5 #' [IFFChunk()] data, that allowed for icons with larger colour depths. #' These extensions are currently not implemented. #' #' Use [simpleAmigaIcon()] for creating a simple `AmigaIcon` object which can #' be modified. Use [read.AmigaIcon()] to read, and [write.AmigaIcon()] #' to write workbench icon files (*.info). With [rawToAmigaIcon()] and #' [AmigaFFH::as.raw()] `AmigaIcon` can be coerced back and forth from #' and to its raw (binary) form. #' @docType class #' @name AmigaIcon #' @rdname AmigaIcon #' @family AmigaIcon.operations #' @author Pepijn de Vries #' @references #' <http://www.evillabs.net/index.php/Amiga_Icon_Formats> #' <http://fileformats.archiveteam.org/wiki/Amiga_Workbench_icon> #' <http://amigadev.elowar.com/read/ADCD_2.1/Libraries_Manual_guide/node0241.html> #' <http://amigadev.elowar.com/read/ADCD_2.1/Includes_and_Autodocs_3._guide/node05D6.html> NULL .icon.data.head <- data.frame( byte = c(2, 2, -44, 1, 1, 4, 4, 4, 4, 4, 4, 4), signed = c(F, F, F, F, 1, F, F, T, T, F, F, T), par.names = c("ic_Magic", "ic_Version", "ic_Gadget", "ic_Type", "ic_Pad", "ic_DefaultTool", "ic_ToolTypes", "ic_CurrentX", "ic_CurrentY", "ic_DrawerData", "ic_ToolWindow", "ic_StackSize"), stringsAsFactors = F ) .icon.gadget.data <- data.frame( byte = c(4, 2, 2, 2, 2, -2, 2, 2, 4, 4, 4, 4, 4, 2, 4), signed = c(F, T, T, T, T, F, F, F, F, F, F, T, F, F, F), par.names = c("ga_Next", "ga_LeftEdge", "ga_TopEdge", "ga_Width", "ga_Height", "ga_Flags", "ga_Activation", "ga_GadgetType", "ga_GadgetRender", "ga_SelectRender", "ga_GadgetText", "ga_MutualExclude", "ga_SpecialInfo", "ga_GadgetID", "ga_UserData"), stringsAsFactors = F ) .icon.drawer.data <- data.frame( byte = c(-48, 4, 4), signed = c( F, T, T), par.names = c("NewWindow", "dd_CurrentX", "dd_CurrentY"), stringsAsFactors = F ) .icon.new.window.data <- data.frame( byte = c(2, 2, 2, 2, 1, 1, -4, -4, 4, 4, 4, 4, 4, 2, 2, 2, 2, 2), signed = c(T, T, T, T, F, F, F, F, F, F, F, F, F, T, T, F, F, F), par.names = c("nw_LeftEdge", "nw_TopEdge", "nw_Width", "nw_Height", "nw_DetailPen", "nw_BlockPen", "nw_IDCMPFlags", "nw_Flags", "nw_FirstGadget", "nw_CheckMark", "nw_Title", "nw_Screen", "nw_BitMap", "nw_MinWidth", "nw_MinHeight", "nw_MaxWidth", "nw_MaxHeight", "nw_Type"), stringsAsFactors = F ) .icon.image.data <- data.frame( byte = c(2, 2, 2, 2, 2, 4, 1, 1, 4), signed = c(T, T, T, T, T, F, F, F, F), par.names = c("im_LeftEdge", "im_TopEdge", "im_Width", "im_Height", "im_Depth", "im_Data", "im_PlanePick", "im_PlaneOnOff", "im_Next"), stringsAsFactors = F ) #' Create simple AmigaIcon objects #' #' Graphical representation of files and directories (icons) are stored as #' separate files (with the .info extension) on the Amiga. This function writes #' [AmigaIcon()] class objects to such files. #' #' This function creates basic [AmigaIcon()] objects which #' can be modified afterwards. It uses simple generic images to represent #' different types of files or directories. #' #' @rdname simpleAmigaIcon #' @name simpleAmigaIcon #' @param version A `character` string indicating the Amiga OS version #' with which the icon should be compatible. "`OS2.x`" indicates \>=OS2.0 #' and "`OS1.x`" indicates <OS2.0. #' @param type A `character` string indicating the type of object (file, disk, directory, etc.) #' the icon should represent. See the `Usage' section for all posible options. #' @param two.images A single `logical` value, indicating whether #' the selected icon is depicted as a second image (in which case the #' icon contains two images). The default value is `TRUE`. #' @param back.fill A single `logical` value, indicating whether #' the selected image of the icon should use the `back fill' mode (default). #' If set to `FALSE` `complement' mode is used. Note that #' back fill is not compatible when the icon holds two images. In the #' `complement' mode, the image colours are inverted when selected. #' In the `back fill' exterior first colour is not inverted. #' @param ... Reserved for additional arguments. Currently ignored. #' @returns Returns a simple S3 object of class [AmigaIcon()]. #' @examples #' \dontrun{ #' ## Create an AmigaIcon object using the default arguments: #' icon <- simpleAmigaIcon() #' } #' @family AmigaIcon.operations #' @family raw.operations #' @author Pepijn de Vries #' @export simpleAmigaIcon <- function(version = c("OS1.x", "OS2.x"), type = c("WBDISK", "WBDRAWER", "WBTOOL", "WBPROJECT", "WBGARBAGE", "WBDEVICE", "WBKICK", "WBAPPICON"), two.images = TRUE, back.fill = FALSE, ...) { two.images <- as.logical(two.images[[1]]) back.fill <- as.logical(back.fill[[1]]) if (back.fill && two.images) { warning("Backfill is not possible when a second image is used for the selected icon. Backfill is set to FALSE.") back.fill <- F } version <- match.arg(version) type <- match.arg(type) result <- as.list(c(0xE310, 1, rep(0, nrow(.icon.data.head) - 2))) names(result) <- .icon.data.head$par.names result$ic_Type <- factor(type, c("WBDISK", "WBDRAWER", "WBTOOL", "WBPROJECT", "WBGARBAGE", "WBDEVICE", "WBKICK", "WBAPPICON")) result$ic_Gadget <- as.list(rep(0, nrow(.icon.gadget.data))) names(result$ic_Gadget) <- .icon.gadget.data$par.names result$ic_Gadget$ga_UserData <- factor(version, c("OS1.x", "OS2.x")) result$ic_Gadget$ga_GadgetRender <- 1 result$ic_Gadget$ga_SelectRender <- as.numeric(two.images) result$ic_Gadget$ga_Flags <- c(back.fill, two.images, T, rep(F, 13)) names(result$ic_Gadget$ga_Flags) <- c("BACKFILL_MODE", "TWO_IMAGE_MODE", "IMAGE_MODE", paste0("UNDEFINED", 1:13)) if (type %in% c("WBDISK", "WBDRAWER", "WBGARBAGE")) { result$ic_DrawerData <- 1 result$drawer <- list( NewWindow = NULL, dd_CurrentX = 0, dd_CurrentY = 0 ) result$drawer$NewWindow <- as.list(c(0, 0, 400, 100, 255, 255, 0, 0, 0, 0, 0, 0, 0, 90, 65, 640, 200, 1)) names(result$drawer$NewWindow) <- .icon.new.window.data$par.names result$drawer$NewWindow$nw_IDCMPFlags <- rep(F, 32) result$drawer$NewWindow$nw_Flags <- rep(F, 32) } else { result[["drawer"]] <- list(NewWindow = list()) } make_img <- function(type, idx) { img <- as.list(rep(0, nrow(.icon.image.data))) names(img) <- .icon.image.data$par.names img$im_Bitmap <- icon.images[[paste0("project", idx)]] if (type == "WBDISK") { img$im_Bitmap <- icon.images[[paste0("disk", idx)]] } else if (type == "WBDRAWER") { img$im_Bitmap <- icon.images[[paste0("drawer", idx)]] } else if (type == "WBTOOL") { img$im_Bitmap <- icon.images[[paste0("tool", idx)]] } else if (type == "WBGARBAGE") { img$im_Bitmap <- icon.images[[paste0("garbage", idx)]] } ## use 4 colour palette for default icons attributes(img$im_Bitmap)$palette <- attributes(img$im_Bitmap)$palette[1:4] img$im_Depth <- log2(length(attributes(img$im_Bitmap)$palette)) img$im_Width <- dim(img$im_Bitmap)[2] img$im_Height <- dim(img$im_Bitmap)[1] img$im_Data <- 1 attributes(img$im_Bitmap)$bitmap.size <- sum(abs(.icon.image.data$byte)) + 16*ceiling(img$im_Width/16)*img$im_Height*img$im_Depth/8 attribs <- attributes(img$im_Bitmap) if (version == "OS1.x") { img$im_Bitmap[img$im_Bitmap == AmigaFFH::amiga_palettes$wb.os2[4]] <- AmigaFFH::amiga_palettes$wb.os1[[4]] img$im_Bitmap[img$im_Bitmap == AmigaFFH::amiga_palettes$wb.os2[2]] <- AmigaFFH::amiga_palettes$wb.os1[[3]] img$im_Bitmap[img$im_Bitmap == AmigaFFH::amiga_palettes$wb.os2[3]] <- AmigaFFH::amiga_palettes$wb.os1[[2]] img$im_Bitmap[img$im_Bitmap == AmigaFFH::amiga_palettes$wb.os2[1]] <- AmigaFFH::amiga_palettes$wb.os1[[1]] attribs$palette <- AmigaFFH::amiga_palettes$wb.os1 } attributes(img$im_Bitmap) <- attribs img$im_PlanePick <- length(attributes(img$im_Bitmap)$palette) - 1 img } result$firstImage <- make_img(type, 1) if (two.images) result$secondImage <- make_img(type, 2) result$ic_Gadget$ga_Width <- result$firstImage$im_Width result$ic_Gadget$ga_Height <- result$firstImage$im_Height if (two.images) { if (result$ic_Gadget$ga_Width < result$secondImage$im_Width) result$ic_Gadget$ga_Width <- result$secondImage$im_Width if (result$ic_Gadget$ga_Height < result$secondImage$im_Height) result$ic_Gadget$ga_Height <- result$secondImage$im_Height } result$defaultTool <- "" result$toolTypes <- "" result$toolWindow <- "" result$dd_Flags <- factor(NULL, c("DDFLAGS_SHOWDEFAULT", "DDFLAGS_SHOWICONS", "DDFLAGS_SHOWALL")) result$dd_ViewModes <- factor(NULL, c("DDVM_BYDEFAULT", "DDVM_BYICON", "DDVM_BYNAME", "DDVM_BYDATE", "DDVM_BYSIZE", "DDVM_BYTYPE")) class(result) <- "AmigaIcon" return(result) } #' Coerce raw data into an AmigaIcon class object #' #' [AmigaIcon()] objects are comprehensive representations of binary Amiga #' Workbench icon files (*.info). Use this function to convert `raw` data from #' such a file to an [AmigaIcon()] object. #' #' Icons files (*.info) were used as a graphical representations of files and #' directories on the Commodore Amiga. This function will convert the raw data from such files #' into a more comprehensive names list (see [AmigaIcon()]). Use #' [AmigaFFH::as.raw()] to achieve the inverse. #' #' @rdname rawToAmigaIcon #' @name rawToAmigaIcon #' @param x A vector of `raw` data that needs to be converted into an S3 #' [AmigaIcon()] class object. #' @param palette Provide a palette (`vector` of colours) for the icon bitmap image. #' When set to `NULL` (default) the standard Amiga Workbench palette will be used. #' @returns Returns an [AmigaIcon()] class object based on `x`. #' @examples #' \dontrun{ #' ## generate a simple AmigaIcon object: #' icon <- simpleAmigaIcon() #' #' ## convert it into raw data: #' icon.raw <- as.raw(icon) #' #' ## convert the raw data back into an icon: #' icon.restored <- rawToAmigaIcon(icon.raw) #' } #' @family AmigaIcon.operations #' @family raw.operations #' @author Pepijn de Vries #' @export rawToAmigaIcon <- function(x, palette = NULL) { if (!all(is.raw(x))) stop("x should be a vector of raw data.") if (!is.null(palette) && (!all(.is.colour(palette)) || length(palette) < 4)) stop ("The palette should consist of at least 4 colours.") if (any(duplicated(palette))) warning("Cannot convert this icon correctly back to raw data due to duplicated colours in the palette.") result <- with(.icon.data.head, .read.amigaData(x, byte, signed, par.names)) if (result$ic_Magic != 0xe310) stop("This is not Amiga icon data") result$ic_Type <- .match.factor(result, "ic_Type", 1:8, c("WBDISK", "WBDRAWER", "WBTOOL", "WBPROJECT", "WBGARBAGE", "WBDEVICE", "WBKICK", "WBAPPICON")) result$ic_Gadget <- with(.icon.gadget.data, .read.amigaData(result$ic_Gadget, byte, signed, par.names)) result$ic_Gadget$ga_Flags <- rev(as.logical(.rawToBitmap(result$ic_Gadget$ga_Flags, T, F))) names(result$ic_Gadget$ga_Flags) <- c("BACKFILL_MODE", "TWO_IMAGE_MODE", "IMAGE_MODE", paste0("UNDEFINED", 1:13)) result$ic_Gadget$ga_UserData <- .match.factor(result$ic_Gadget, "ga_UserData", 0:1, c("OS1.x", "OS2.x")) ## get remainder of x: x <- x[-1:-sum(abs(.icon.data.head$byte))] result$drawer <- list(NewWindow = list()) if (result$ic_DrawerData != 0) { result$drawer <- with(.icon.drawer.data, .read.amigaData(x, byte, signed, par.names)) result$drawer$NewWindow <- with(.icon.new.window.data, .read.amigaData(result$drawer$NewWindow, byte, signed, par.names)) result$drawer$NewWindow$nw_IDCMPFlags <- as.logical(.rawToBitmap( result$drawer$NewWindow$nw_IDCMPFlags, invert.bytes = T, T )) result$drawer$NewWindow$nw_Flags <- as.logical(.rawToBitmap( result$drawer$NewWindow$nw_Flags, invert.bytes = T, T )) x <- x[-1:-sum(abs(.icon.drawer.data$byte))] } if (is.null(palette)) { palette <- AmigaFFH::amiga_palettes[["wb.os2"]] if (result$ic_Gadget$ga_UserData == "OS1.x") { palette <- AmigaFFH::amiga_palettes[["wb.os1"]] } } .get.icon.image <- function(y, p = palette) { img <- with(.icon.image.data, .read.amigaData(y, byte, signed, par.names)) y <- y[-1:-sum(abs(.icon.image.data$byte))] w <- 16*ceiling(img$im_Width/16) h <- img$im_Height bm <- bitmapToRaster(y[1:((w*h*img$im_Depth)/8)], img$im_Width, h, img$im_Depth, p[1:(2^img$im_Depth)], interleaved = F) attributes(bm) <- c(list(palette = p, bitmap.size = sum(abs(.icon.image.data$byte)) + w*h*img$im_Depth/8 ), attributes(bm)) img$im_Bitmap <- bm return(img) } result$firstImage <- .get.icon.image(x) x <- x[-1:-attributes(result$firstImage$im_Bitmap)[["bitmap.size"]]] result$secondImage <- list() if (result$ic_Gadget$ga_SelectRender != 0) { result$secondImage <- .get.icon.image(x) x <- x[-1:-attributes(result$secondImage$im_Bitmap)[["bitmap.size"]]] } result$defaultTool <- "" if (result$ic_DefaultTool != 0) { t.len <- .rawToAmigaInt(x[1:4], 32, F) result$defaultTool <- .rawToCharNull(x[5:(4 + t.len)]) x <- x[-1:-(4 + t.len)] } result$toolTypes <- "" if (result$ic_ToolTypes != 0) { entries <- .rawToAmigaInt(x[1:4], 32, F) result$toolTypes <- NULL x <- x[-1:-4] for (i in 1:entries) { t.len <- .rawToAmigaInt(x[1:4], 32, F) result$toolTypes <- c(result$toolTypes, ProTrackR::rawToCharNull(x[5:(4 + t.len)])) x <- x[-1:-(4 + t.len)] } } result$toolWindow = "" if (result$ic_ToolWindow != 0) { t.len <- .rawToAmigaInt(x[1:4], 32, F) result$toolWindow <- .rawToCharNull(x[5:(4 + t.len)]) x <- x[-1:-(4 + t.len)] } result$dd_Flags <- factor(NULL, c("DDFLAGS_SHOWDEFAULT", "DDFLAGS_SHOWICONS", "DDFLAGS_SHOWALL")) result$dd_ViewModes <- factor(NULL, c("DDVM_BYDEFAULT", "DDVM_BYICON", "DDVM_BYNAME", "DDVM_BYDATE", "DDVM_BYSIZE", "DDVM_BYTYPE")) if (result$ic_DrawerData != 0 && result$ic_Gadget$ga_UserData == "OS2.x") { result$dd_Flags <- .rawToAmigaInt(x[1:4], 32, F) result$dd_Flags <- .match.factor(result, "dd_Flags", 0:2, c("DDFLAGS_SHOWDEFAULT", "DDFLAGS_SHOWICONS", "DDFLAGS_SHOWALL")) result$dd_ViewModes <- .rawToAmigaInt(x[5:6], 16, F) result$dd_ViewModes <- .match.factor(result, "dd_ViewModes", 0:5, c("DDVM_BYDEFAULT", "DDVM_BYICON", "DDVM_BYNAME", "DDVM_BYDATE", "DDVM_BYSIZE", "DDVM_BYTYPE")) x <- x[-1:-6] } class(result) <- "AmigaIcon" return(result) } #' Plot AmigaFFH objects #' #' Plot AmigaFFH objects using `base` plotting routines. #' #' A plotting routine is implemented for most AmigaFFH objects. See the usage section #' for all supported objects. #' @rdname plot #' @name plot #' @param x An AmigaFFH object to be plotted. See usage section for supported object #' classes. If `x` is an [AmigaBitmapFont()] or [AmigaBitmapFontSet()] #' class object, it will plot the full bitmap that is used to extract the font glyphs. #' @param y When `x` is an [AmigaIcon()] class object, `y` can be used as #' an index. In that case, when `y=1` the first icon image is shown. When `y=2` #' the selected icon image is shown. #' #' When `x` is an [AmigaBitmapFontSet()] class #' object, `y` can be used to plot the bitmap of a specific font height (`y`). #' #' When `x` is an [AmigaBasicShape()] class object, `y` can be used to select a #' specific layer of the shape to plot, which can be one of `"bitmap"`, `"shadow"` or `"collision"`. #' @param asp A `numeric` value indicating the aspect ratio for the plot. For #' many AmigaFFH, the aspect ratio will be based on the Amiga display mode when known. #' For [AmigaIcon()] objects a default aspect ratio of `2` is used (tall #' pixels). #' #' When `x` is an [AmigaBitmapFont()] or [AmigaBitmapFontSet()] object, #' an aspect ratio of 1 is used by default. When the `TALLDOT` flag #' is set for that font, the aspect ratio s multiplied by 2. When the #' `WIDEDOT` flag is set, it will be divided by 2. #' #' A custom aspect ratio can also be used and will override the ratios specified above. #' @param ... Parameters passed onto the generic `graphics` plotting routine. #' #' When `x` is an [AmigaBitmapFont()] or an [AmigaBitmapFontSet()] #' object, '`...`' can also be used for arguments that need to be #' passed onto the [AmigaFFH::as.raster()] function. #' @returns Returns `NULL` silently. #' @examples #' \dontrun{ #' ## load an IFF file #' example.iff <- read.iff(system.file("ilbm8lores.iff", package = "AmigaFFH")) #' #' ## and plot it: #' plot(example.iff) #' #' ## AmigaIcons can also be plotted: #' plot(simpleAmigaIcon()) #' #' ## As can the cursor from a SysConfig object: #' plot(simpleSysConfig()) #' #' ## As can Amiga fonts: #' data(font_example) #' plot(font_example) #' plot(font_example, text = "foo bar", style = "underlined", interpolate = F) #' #' ## As can AmigaBasicShapes: #' ball <- read.AmigaBasicShape(system.file("ball.shp", package = "AmigaFFH")) #' plot(ball) #' } #' @author Pepijn de Vries #' @export plot.AmigaIcon <- function(x, y, asp = 2, ...) { if (missing(y)) y <- 1 ## Note that the aspect ratio is not set and is probably a bit off... plot(as.raster(x, selected = y), asp = asp, ...) } #' @family raster.operations #' @rdname as.raster #' @name as.raster #' @export as.raster.AmigaIcon <- function(x, selected = F, ...) { y <- selected[[1]] if (is.logical(selected)) y <- as.numeric(selected[[1]]) + 1 img <- x$firstImage$im_Bitmap if (x$ic_Gadget$ga_Flags[["TWO_IMAGE_MODE"]] && y == 2) img <- x$secondImage$im_Bitmap pal <- attributes(img)[["palette"]] img <- apply(img, 2, function(y) match(y, pal)) if (x$ic_Gadget$ga_Flags[["BACKFILL_MODE"]]) { ## set all pixels at the edge that are equal to the background colour to NA img[1,][img[1,] == 1] <- NA img[,1][img[,1] == 1] <- NA img[nrow(img),][img[nrow(img),] == 1] <- NA img[,ncol(img)][img[,ncol(img)] == 1] <- NA ## Then flood fill the NA values to the remainder of the icon center.sel <- as.matrix(expand.grid(2:(nrow(img) - 1), 2:(ncol(img) - 1))) center.left <- center.sel center.right <- center.sel center.top <- center.sel center.bottom <- center.sel center.left[,1] <- center.left[,1] - 1 center.right[,1] <- center.right[,1] + 1 center.top[,2] <- center.top[,2] - 1 center.bottom[,2] <- center.bottom[,2] + 1 img.center <- img[center.sel] while (T) { img.center[img.center == 1 & is.na(img[center.left])] <- NA img.center[img.center == 1 & is.na(img[center.right])] <- NA img.center[img.center == 1 & is.na(img[center.top])] <- NA img.center[img.center == 1 & is.na(img[center.bottom])] <- NA if (all(is.na(img.center) == is.na(img[center.sel]))) break img[center.sel] <- img.center } } if (!x$ic_Gadget$ga_Flags[["TWO_IMAGE_MODE"]] && !x$ic_Gadget$ga_Flags[["BACKFILL_MODE"]] && y == 2) img <- 1 + 2^x$firstImage$im_Depth - img img <- apply(img, 2, function(y) pal[y]) img <- grDevices::as.raster(img) attributes(img)$palette <- pal return(img) } #' @export print.AmigaIcon <- function(x, ...) { print(sprintf("A %s type Amiga Icon with %s in %s mode.", substring(tolower(x$ic_Type), 3), ifelse(x$ic_Gadget$ga_Flags["TWO_IMAGE_MODE"], "two images", "one image"), ifelse(x$ic_Gadget$ga_Flags["BACKFILL_MODE"], "back fill", "complement")), ...) } #' @rdname as.raw #' @name as.raw #' @export as.raw.AmigaIcon <- function(x, ...) { withCallingHandlers({ ## TODO remove calling handlers once the replace functions are fully implemented x$ic_Gadget$ga_Flags <- .bitmapToRaw(rev(x$ic_Gadget$ga_Flags), T, F) x$ic_Gadget$ga_UserData <- .match.factor.inv(x$ic_Gadget, "ga_UserData", 0:1, c("OS1.x", "OS2.x")) sec.img <- x$ic_Gadget$ga_SelectRender != 0 x$ic_Gadget <- .write.amigaData(x$ic_Gadget, .icon.gadget.data$byte, .icon.gadget.data$signed, .icon.gadget.data$par.names) x$ic_Type <- .match.factor.inv(x, "ic_Type", 1:8, c("WBDISK", "WBDRAWER", "WBTOOL", "WBPROJECT", "WBGARBAGE", "WBDEVICE", "WBKICK", "WBAPPICON")) if (x$ic_DrawerData != 0) { x$drawer$NewWindow$nw_IDCMPFlags <- .bitmapToRaw(x$drawer$NewWindow$nw_IDCMPFlags, F, T) x$drawer$NewWindow$nw_Flags <- .bitmapToRaw(x$drawer$NewWindow$nw_Flags, F, T) x$drawer$NewWindow <- with(.icon.new.window.data, .write.amigaData(x$drawer$NewWindow, byte, signed, par.names)) x$drawer <- with(.icon.drawer.data, .write.amigaData(x$drawer, byte, signed, par.names)) } else { x$drawer <- NULL } iconImgToRaw <- function(y) { pal <- attributes(y$im_Bitmap)[["palette"]][1:(2^y$im_Depth)] list( bmhead = .write.amigaData(y, .icon.image.data$byte, .icon.image.data$signed, .icon.image.data$par.names), bm = .bitmapToRaw(rasterToBitmap( y$im_Bitmap, depth = y$im_Depth, interleaved = F, indexing = function(x, length.out) index.colours(x, length.out, palette = pal)), T, F) ) } x$firstImage <- iconImgToRaw(x$firstImage) if (sec.img) { x$secondImage <- iconImgToRaw(x$secondImage) } else { x$secondImage <- NULL } if (x$defaultTool != "") { x$defaultTool <- c( .amigaIntToRaw(nchar(x$defaultTool) + 1, 32, F), charToRaw(x$defaultTool), raw(1)) } else { x$defaultTool <- NULL } if (length(x$toolTypes) == 1 && x$toolTypes == "") { x$toolTypes <- NULL } else { x$toolTypes <- c( .amigaIntToRaw(length(x$toolTypes), 32, F), unlist(lapply(x$toolTypes, function(y){ nc <- nchar(y) if (nc == 0) yc <- raw(0) else yc <- charToRaw(y) c(.amigaIntToRaw(nc, 32, F), yc, raw(1)) })) ) } if (x$toolWindow != "") { x$toolWindow <- c( .amigaIntToRaw(nchar(x$toolWindow) + 1, 32, F), charToRaw(x$toolWindow), raw(1)) } else { x$toolWindow <- NULL } x[.icon.data.head$par.names] <- lapply(1:nrow(.icon.data.head), function(y) { .write.amigaData(x[.icon.data.head$par.names[y]], .icon.data.head$byte[y], .icon.data.head$signed[y], .icon.data.head$par.names[y]) }) if (length(x$dd_Flags) == 1) { x$dd_Flags <- .match.factor.inv(x, "dd_Flags", 0:2, c("DDFLAGS_SHOWDEFAULT", "DDFLAGS_SHOWICONS", "DDFLAGS_SHOWALL")) x$dd_Flags <- .amigaIntToRaw(x$dd_Flags, 32, F) } else { x$dd_Flags <- NULL } if (length(x$dd_ViewModes) == 1) { x$dd_ViewModes <- .match.factor.inv(x, "dd_ViewModes", 0:5, c("DDVM_BYDEFAULT", "DDVM_BYICON", "DDVM_BYNAME", "DDVM_BYDATE", "DDVM_BYSIZE", "DDVM_BYTYPE")) x$dd_ViewModes <- .amigaIntToRaw(x$dd_ViewModes, 16, F) } else { x$dd_ViewModes <- NULL } x <- unlist(x) names(x) <- NULL return(x) }, warning=function(w) { if (startsWith(conditionMessage(w), "Replacement operator for AmigaIcon")) invokeRestart("muffleWarning") }) } #' Write an Amiga Workbench icon (info) file #' #' Graphical representation of files and directories (icons) are stored as #' separate files (with the .info extension) on the Amiga. This function writes #' [AmigaIcon()] class objects to such files. #' #' The [AmigaIcon()] S3 object provides a comprehensive format #' for Amiga icons, which are used as a graphical representation of files #' and directories on the Amiga. The [AmigaIcon()] is a named #' list containing all information of an icon. Use this function to #' write this object to a file which can be used on the Commodore Amiga #' or emulator. #' #' @rdname write.AmigaIcon #' @name write.AmigaIcon #' @param x An [AmigaIcon()] class object. #' @param file A `character` string representing the file name to which the #' icon data should be written. #' @param disk A virtual Commodore Amiga disk to which the `file` should be #' written. This should be an [`amigaDisk()`][adfExplorer::amigaDisk-class] object. Using #' this argument requires the adfExplorer package. #' When set to `NULL`, this argument is ignored. #' @returns Returns `NULL` or an `integer` status passed on by the #' [close()] function, that is used to close the file connection. #' It is returned invisibly. Or, when `disk` is specified, a copy of #' `disk` is returned to which the file is written. #' #' @examples #' \dontrun{ #' ## create a simple AmigaIcon: #' icon <- simpleAmigaIcon() #' #' ## write the icon to the temp dir: #' write.AmigaIcon(icon, file.path(tempdir(), "icon.info")) #' } #' @family AmigaIcon.operations #' @family io.operations #' @author Pepijn de Vries #' @export write.AmigaIcon <- function(x, file, disk = NULL) { if (!inherits(x, "AmigaIcon")) stop("x should be of S3 class AmigaIcon.") .write.generic(x, file, disk) } #' Read an Amiga Workbench icon (info) file #' #' Graphical representation of files and directories (icons) are stored as #' separate files (with the .info extension) on the Amiga. This function reads such files #' and imports them as [AmigaIcon()] class objects. #' #' The [AmigaIcon()] S3 object provides a comprehensive format #' for Amiga icons, which are used as a graphical representation of files #' and directories on the Amiga. The [AmigaIcon()] is a named #' list containing all information of an icon. Use this function to #' read an Amiga icon (with the .info extension) from a file and convert #' it into an [AmigaIcon()] object. #' #' @rdname read.AmigaIcon #' @name read.AmigaIcon #' @param file A `character` string representing the file name from which the #' icon data should be read. #' @param disk A virtual Commodore Amiga disk from which the `file` should be #' read. This should be an [`amigaDisk()`][adfExplorer::amigaDisk-class] object. Using #' this argument requires the adfExplorer package. #' When set to `NULL`, this argument is ignored. #' @param ... Arguments passed on to [rawToAmigaIcon()]. #' @returns Returns an [AmigaIcon()] class object as read from the `file`. #' @examples #' \dontrun{ #' ## create a simple AmigaIcon: #' icon <- simpleAmigaIcon() #' #' ## write the icon to the temp dir: #' write.AmigaIcon(icon, file.path(tempdir(), "icon.info")) #' #' ## read the same file: #' icon2 <- read.AmigaIcon(file.path(tempdir(), "icon.info")) #' } #' @family AmigaIcon.operations #' @family io.operations #' @author Pepijn de Vries #' @export read.AmigaIcon <- function(file, disk = NULL, ...) { dat <- .read.generic(file, disk) rawToAmigaIcon(dat, ...) } #' @export `$<-.AmigaIcon` <- function(x, i, value) { x[[i]] <- value x } #' @export `[[<-.AmigaIcon` <- function(x, i, value) { cl <- class(x) class(x) <- NULL x[[i]] <- value class(x) <- cl ## TODO update this replacement function and remove warning warning(paste0("Replacement operator for AmigaIcon objects ", "will be modified in future versions of this package. ", "Note that not all replacement operations may be ", "allowed in future versions of this package.")) x }
/scratch/gouwar.j/cran-all/cranData/AmigaFFH/R/workbench_icon.r
ammonia <- function(total_ammonia, temperature, ph, type_of_temperature) { ## Convert temperature to Kelvin (K) if it is Celsius (C) or Fahrenheit (F) if (type_of_temperature == "C"){ temperature <- temperature +273 } if (type_of_temperature == "F"){ temperature = 5 * (temperature - 32) / 9 + 273 } ## Calculate pka with the equation provided by Emerson et al. (1975) pka <- 0.09018 + 2729.92/temperature ## Calculate difference between pka and pH pka_ph <- pka-ph ## Calculate 10 to the power of the difference calculated ten_pka_ph <- 10^pka_ph ## Calculate ammonia concentration nh3 <- 1/(ten_pka_ph+1) ## Transform to percentage perc_nh3 <- nh3*100 ## Calculate the concentration of un-ionized ammonia in the total ammonia aqueous solution nh3_mgL <- total_ammonia*perc_nh3/100 list(pka = pka, pka_ph = pka_ph, ten_pka_ph = ten_pka_ph, nh3 = nh3, # perc_nh3 = perc_nh3, nh3_mgL = nh3_mgL) }
/scratch/gouwar.j/cran-all/cranData/AmmoniaConcentration/R/ammonia.R
#---------------------------------------------------------------------------------------- #' Arcsine-Log-logistic (ASLL) Cumulative Distribution Function. #---------------------------------------------------------------------------------------- #' @param beta : shape parameter #' @param alpha : scale parameter #' @param t : positive argument #' @return the value of the ASLL Cumulative Distribution Function. #' @references Tung, Y. L., Ahmad, Z., & Mahmoudi, E. (2021). The Arcsine-X Family of Distributions with Applications to Financial Sciences. Comput. Syst. Sci. Eng., 39(3), 351-363. #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' pASLL(t=t, alpha=0.7, beta=0.5) #' pASLL<-function(t,alpha,beta){ cdf0<-(2/pi)*(asin((t^(beta))/(alpha^(beta)+t^(beta)))) return(cdf0) } #---------------------------------------------------------------------------------------- #' Arcsine-Log-logistic (ASLL) Survival Function. #---------------------------------------------------------------------------------------- #' @param beta : shape parameter #' @param alpha : scale parameter #' @param t : positive argument #' @return the value of the ASLL Survival Function. #' @references Tung, Y. L., Ahmad, Z., & Mahmoudi, E. (2021). The Arcsine-X Family of Distributions with Applications to Financial Sciences. Comput. Syst. Sci. Eng., 39(3), 351-363. #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' sASLL(t=t, alpha=0.7, beta=0.5) #' sASLL<-function(t,alpha,beta){ cdf0<-(2/pi)*(asin((t^(beta))/(alpha^(beta)+t^(beta)))) val<-1-cdf0 return(val) } #---------------------------------------------------------------------------------------- #' Arcsine-Log-logistic (ASLL) Hazard Rate Function. #---------------------------------------------------------------------------------------- #' @param beta : shape parameter #' @param alpha : scale parameter #' @param t : positive argument #' @param log :log scale (TRUE or FALSE) #' @return the value of the ASLL Hazard Rate Function. #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' rSLL(t=t, alpha=0.7, beta=0.5,log=FALSE) #' rASLL<-function(t,alpha,beta,log=FALSE){ cdf0<-(2/pi)*(asin((t^(beta))/(alpha^(beta)+t^(beta)))) cdf0 <- ifelse(cdf0==1,0.9999999,cdf0) pdf0<-((2/pi)*(((beta/alpha)*(t/alpha)^(beta-1))/((1+(t/alpha)^(beta))^2))/sqrt(1-((t^(beta))/(alpha^(beta)+t^(beta)))^2)) log.h<-log(pdf0)-log(1-cdf0) ifelse(log, return(log.h), return(exp(log.h))) } #---------------------------------------------------------------------------------------- #' Sine-Log-logistic (SLL) Survivor Function. #---------------------------------------------------------------------------------------- #' @param beta : shape parameter #' @param alpha : scale parameter #' @param t : positive argument #' @return the value of the SLL Survivor function #' @references Souza, L., Junior, W., De Brito, C., Chesneau, C., Ferreira, T., & Soares, L. (2019). On the Sin-G class of distributions: theory, model and application. Journal of Mathematical Modeling, 7(3), 357-379. #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' sSLL(t=t, alpha=0.7, beta=0.5) sSLL<-function(t,alpha,beta){ cdf0<-sin((pi/2)*((t^(beta))/(alpha^(beta)+t^(beta)))) val<-1-cdf0 return(val) } #---------------------------------------------------------------------------------------- #' Sine-Log-logistic (SLL) Cumulative Distribution Function. #---------------------------------------------------------------------------------------- #' @param beta : shape parameter #' @param alpha : scale parameter #' @param t : positive argument #' @return the value of the SLL Cumulative Distribution function #' @references Souza, L., Junior, W., De Brito, C., Chesneau, C., Ferreira, T., & Soares, L. (2019). On the Sin-G class of distributions: theory, model and application. Journal of Mathematical Modeling, 7(3), 357-379. #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' pSLL(t=t, alpha=0.7, beta=0.5) #' pSLL<-function(t,alpha,beta){ cdf0<-sin((pi/2)*((t^(beta))/(alpha^(beta)+t^(beta)))) val<-cdf0 return(val) } #---------------------------------------------------------------------------------------- #' Sine-Log-logistic (SLL) Hazard Function. #---------------------------------------------------------------------------------------- #' @param beta : shape parameter #' @param alpha : scale parameter #' @param t : positive argument #' @param log :log scale (TRUE or FALSE) #' @return the value of the SLL hazard function #' @references Souza, L. (2015). New trigonometric classes of probabilistic distributions. esis, Universidade Federal Rural de Pernambuco, Brazil. #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' rSLL(t=t, alpha=0.7, beta=0.5,log=FALSE) #' rSLL<-function(t,alpha,beta,log=FALSE){ cdf0<-sin((pi/2)*((t^(beta))/(alpha^(beta)+t^(beta)))) cdf0 <- ifelse(cdf0==1,0.9999999,cdf0) pdf0<-((pi/2))*((beta/alpha)*(t/alpha)^(beta-1)/((1+(t/alpha)^(beta))^2))*cos((pi/2)*((t^(beta))/(alpha^(beta)+t^(beta)))) log.h<-log(pdf0)-log(1-cdf0) ifelse(log, return(log.h), return(exp(log.h))) } #---------------------------------------------------------------------------------------- #' Cosine-Log-logistic (CLL) Survivor Function. #---------------------------------------------------------------------------------------- #' @param beta : shape parameter #' @param alpha : scale parameter #' @param t : positive argument #' @return the value of the CLL Survivor function #' @references Mahmood, Z., M Jawa, T., Sayed-Ahmed, N., Khalil, E. M., Muse, A. H., & Tolba, A. H. (2022). An Extended Cosine Generalized Family of Distributions for Reliability Modeling: Characteristics and Applications with Simulation Study. Mathematical Problems in Engineering, 2022. #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' sCLL(t=t, alpha=0.7, beta=0.5) sCLL<-function(t,alpha,beta){ cdf0<-1-cos((pi/2)*((t^(beta))/(alpha^(beta)+t^(beta)))) val<-1-cdf0 return(val) } #---------------------------------------------------------------------------------------- #' Cosine-Log-logistic (SLL) Cumulative Distribution Function. #---------------------------------------------------------------------------------------- #' @param beta : shape parameter #' @param alpha : scale parameter #' @param t : positive argument #' @return the value of the CLL Cumulative Distribution function #' @references Souza, L., Junior, W. R. D. O., de Brito, C. C. R., Ferreira, T. A., & Soares, L. G. (2019). General properties for the Cos-G class of distributions with applications. Eurasian Bulletin of Mathematics (ISSN: 2687-5632), 63-79. #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' pCLL(t=t, alpha=0.7, beta=0.5) #' pCLL<-function(t,alpha,beta){ cdf0<-1-cos((pi/2)*((t^(beta))/(alpha^(beta)+t^(beta)))) val<-cdf0 return(val) } #---------------------------------------------------------------------------------------- #' Cosine-Log-logistic (CLL) Hazard Function. #---------------------------------------------------------------------------------------- #' @param beta : shape parameter #' @param alpha : scale parameter #' @param t : positive argument #' @param log :log scale (TRUE or FALSE) #' @return the value of the CLL hazard function #' @references Souza, L., Junior, W. R. D. O., de Brito, C. C. R., Ferreira, T. A., & Soares, L. G. (2019). General properties for the Cos-G class of distributions with applications. Eurasian Bulletin of Mathematics (ISSN: 2687-5632), 63-79. #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' rCLL(t=t, alpha=0.7, beta=0.5,log=FALSE) #' rCLL<-function(t,alpha,beta,log=FALSE){ cdf0<-1-cos((pi/2)*((t^(beta))/(alpha^(beta)+t^(beta)))) cdf0 <- ifelse(cdf0==1,0.9999999,cdf0) pdf0<-((pi/2))*((beta/alpha)*(t/alpha)^(beta-1)/((1+(t/alpha)^(beta))^2))*sin((pi/2)*((t^(beta))/(alpha^(beta)+t^(beta)))) log.h<-log(pdf0)-log(1-cdf0) ifelse(log, return(log.h), return(exp(log.h))) } #---------------------------------------------------------------------------------------- #' Tangent-Log-logistic (TLL) Survivor Function. #---------------------------------------------------------------------------------------- #' @param beta : shape parameter #' @param alpha : scale parameter #' @param t : positive argument #' @return the value of the TLL Survivor function #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' sTLL(t=t, alpha=0.7, beta=0.5) sTLL<-function(t,alpha,beta){ cdf0<-tan((pi/4)*((t^(beta))/(alpha^(beta)+t^(beta)))) val<-1-cdf0 return(val) } #---------------------------------------------------------------------------------------- #' Tangent-Log-logistic (TLL) Cumulative Distribution Function. #---------------------------------------------------------------------------------------- #' @param beta : shape parameter #' @param alpha : scale parameter #' @param t : positive argument #' @return the value of the TLL Cumulative Distribution function #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' pTLL(t=t, alpha=0.7, beta=0.5) #' pTLL<-function(t,alpha,beta){ cdf0<-tan((pi/4)*((t^(beta))/(alpha^(beta)+t^(beta)))) val<-cdf0 return(val) } #---------------------------------------------------------------------------------------- #' Tangent-Log-logistic (TLL) Hazard Function. #---------------------------------------------------------------------------------------- #' @param beta : shape parameter #' @param alpha : scale parameter #' @param t : positive argument #' @param log :log scale (TRUE or FALSE) #' @return the value of the TLL hazard function #' @references Muse, A. H., Tolba, A. H., Fayad, E., Abu Ali, O. A., Nagy, M., & Yusuf, M. (2021). Modelling the COVID-19 mortality rate with a new versatile modification of the log-logistic distribution. Computational Intelligence and Neuroscience, 2021. #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' rTLL(t=t, alpha=0.7, beta=0.5,log=FALSE) #' rTLL<-function(t,alpha,beta,log=FALSE){ cdf0<-tan((pi/4)*((t^(beta))/(alpha^(beta)+t^(beta)))) cdf0 <- ifelse(cdf0==1,0.9999999,cdf0) pdf0<-((pi/4))*((beta/alpha)*(t/alpha)^(beta-1)/((1+(t/alpha)^(beta))^2))*sec((pi/4)*((t^(beta))/(alpha^(beta)+t^(beta))))^2 log.h<-log(pdf0)-log(1-cdf0) ifelse(log, return(log.h), return(exp(log.h))) } #---------------------------------------------------------------------------------------- #' Secant-log-logistic (SCLL) Survivor Function. #---------------------------------------------------------------------------------------- #' @param beta : shape parameter #' @param alpha : scale parameter #' @param t : positive argument #' @return the value of the SCLL Survivor function #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' sSCLL(t=t, alpha=0.7, beta=0.5) sSCLL<-function(t,alpha,beta){ cdf0<-sec((pi/3)*((t^(beta))/(alpha^(beta)+t^(beta))))-1 val<-1-cdf0 return(val) } #---------------------------------------------------------------------------------------- #' Secant-log-logistic (SCLL) Cumulative Distribution Function. #---------------------------------------------------------------------------------------- #' @param beta : shape parameter #' @param alpha : scale parameter #' @param t : positive argument #' @return the value of the SCLL Cumulative Distribution function #' @references Souza, L., de Oliveira, W. R., de Brito, C. C. R., Chesneau, C., Fernandes, R., & Ferreira, T. A. (2022). Sec-G class of distributions: Properties and applications. Symmetry, 14(2), 299. #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' pSCLL(t=t, alpha=0.7, beta=0.5) #' pSCLL<-function(t,alpha,beta){ cdf0<-sec((pi/3)*((t^(beta))/(alpha^(beta)+t^(beta))))-1 val<-cdf0 return(val) } #---------------------------------------------------------------------------------------- #' Secant-log-logistic (SCLL) Hazard Function. #---------------------------------------------------------------------------------------- #' @param beta : shape parameter #' @param alpha : scale parameter #' @param t : positive argument #' @param log :log scale (TRUE or FALSE) #' @return the value of the SCLL hazard function #' @references Souza, L., de Oliveira, W. R., de Brito, C. C. R., Chesneau, C., Fernandes, R., & Ferreira, T. A. (2022). Sec-G class of distributions: Properties and applications. Symmetry, 14(2), 299. #' @references Tung, Y. L., Ahmad, Z., & Mahmoudi, E. (2021). The Arcsine-X Family of Distributions with Applications to Financial Sciences. Comput. Syst. Sci. Eng., 39(3), 351-363. #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' rSCLL(t=t, alpha=0.7, beta=0.5,log=FALSE) #' rSCLL<-function(t,alpha,beta,log=FALSE){ cdf0<-sec((pi/3)*((t^(beta))/(alpha^(beta)+t^(beta))))-1 cdf0 <- ifelse(cdf0==1,0.9999999,cdf0) pdf0<-(pi/3)*((beta/alpha)*(t/alpha)^(beta-1)/((1+(t/alpha)^(beta))^2))*tan((pi/3)*((t^(beta))/(alpha^(beta)+t^(beta))))*sec((pi/3)*((t^(beta))/(alpha^(beta)+t^(beta)))) log.h<-log(pdf0)-log(1-cdf0) ifelse(log, return(log.h), return(exp(log.h))) } #---------------------------------------------------------------------------------------- #' Arctangent-Log-logistic (ATLL) Survivor Function. #---------------------------------------------------------------------------------------- #' @param beta : shape parameter #' @param alpha : scale parameter #' @param t : positive argument #' @return the value of the ATLL Survivor function #' @references Alkhairy, I., Nagy, M., Muse, A. H., & Hussam, E. (2021). The Arctan-X family of distributions: Properties, simulation, and applications to actuarial sciences. Complexity, 2021. #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' sATLL(t=t, alpha=0.7, beta=0.5) sATLL<-function(t,alpha,beta){ cdf0<-(4/pi)*(atan((t^(beta))/(alpha^(beta)+t^(beta)))) val<-1-cdf0 return(val) } #---------------------------------------------------------------------------------------- #' Arctangent-Log-logistic (ATLL) Cumulative Distribution Function. #---------------------------------------------------------------------------------------- #' @param beta : shape parameter #' @param alpha : scale parameter #' @param t : positive argument #' @return the value of the ATLL Cumulative Distribution function #' @references Alkhairy, I., Nagy, M., Muse, A. H., & Hussam, E. (2021). The Arctan-X family of distributions: Properties, simulation, and applications to actuarial sciences. Complexity, 2021. #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' pATLL(t=t, alpha=0.7, beta=0.5) #' pATLL<-function(t,alpha,beta){ cdf0<-(4/pi)*(atan((t^(beta))/(alpha^(beta)+t^(beta)))) val<-cdf0 return(val) } #---------------------------------------------------------------------------------------- #' Arctangent-Log-logistic (ATLL) Hazard Function. #---------------------------------------------------------------------------------------- #' @param beta : shape parameter #' @param alpha : scale parameter #' @param t : positive argument #' @param log :log scale (TRUE or FALSE) #' @return the value of the ATLL hazard function #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' rATLL(t=t, alpha=0.7, beta=0.5,log=FALSE) #' rATLL<-function(t,alpha,beta,log=FALSE){ cdf0<-(4/pi)*(atan((t^(beta))/(alpha^(beta)+t^(beta)))) cdf0 <- ifelse(cdf0==1,0.9999999,cdf0) pdf0<-((4/pi)*(((beta/alpha)*(t/alpha)^(beta-1))/((1+(t/alpha)^(beta))^2))/(1+((t^(beta))/(alpha^(beta)+t^(beta)))^2)) log.h<-log(pdf0)-log(1-cdf0) ifelse(log, return(log.h), return(exp(log.h))) } #-------------------------------------------------------------------------------------------------------------------------- #' New Generalized Log-logistic (NGLL) hazard function. #-------------------------------------------------------------------------------------------------------------------------- #' @param kappa : scale parameter #' @param alpha : shape parameter #' @param eta : shape parameter #' @param zeta : shape parameter #' @param t : positive argument #' @param log :log scale (TRUE or FALSE) #' @return the value of the NGLL hazard function #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' rNGLL(t=t, kappa=0.5, alpha=0.35, eta=0.7, zeta=1.4, log=FALSE) #' rNGLL<- function(t,kappa,alpha,eta,zeta, log=FALSE){ pdf0 <- ((alpha*kappa)*((t*kappa)^(alpha-1)))/(1+zeta*((t*eta)^alpha))^(((kappa^alpha)/(zeta*(eta^alpha)))+1) cdf0 <- (1-((1+zeta*((t*eta)^alpha))^(-((kappa^alpha)/(zeta*(eta^alpha)))))) cdf0 <- ifelse(cdf0==1,0.9999999,cdf0) log.h <- log(pdf0) - log(1-cdf0) ifelse(log, return(log.h), return(exp(log.h))) } #-------------------------------------------------------------------------------------------------------------------------- #' New Generalized Log-logistic (NGLL) cumulative distribution function. #-------------------------------------------------------------------------------------------------------------------------- #' @param kappa : scale parameter #' @param alpha : shape parameter #' @param eta : shape parameter #' @param zeta : shape parameter #' @param t : positive argument #' @return the value of the NGLL cumulative distribution function #' @references Hassan Muse, A. A new generalized log-logistic distribution with increasing, decreasing, unimodal and bathtub-shaped hazard rates: properties and applications, in Proceedings of the Symmetry 2021 - The 3rd International Conference on Symmetry, 8–13 August 2021, MDPI: Basel, Switzerland, doi:10.3390/Symmetry2021-10765. #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' pNGLL(t=t, kappa=0.5, alpha=0.35, eta=0.7, zeta=1.4) #' pNGLL <- function(t,kappa,alpha,eta,zeta){ cdf0 <- (1-((1+zeta*((t*eta)^alpha))^(-((kappa^alpha)/(zeta*(eta^alpha)))))) return(-log(1-cdf0)) } #-------------------------------------------------------------------------------------------------------------------------- #' New Generalized Log-logistic (NGLL) survivor function. #-------------------------------------------------------------------------------------------------------------------------- #' @param kappa : scale parameter #' @param alpha : shape parameter #' @param eta : shape parameter #' @param zeta : shape parameter #' @param t : positive argument #' @return the value of the NGLL survivor function #' @references Hassan Muse, A. A new generalized log-logistic distribution with increasing, decreasing, unimodal and bathtub-shaped hazard rates: properties and applications, in Proceedings of the Symmetry 2021 - The 3rd International Conference on Symmetry, 8–13 August 2021, MDPI: Basel, Switzerland, doi:10.3390/Symmetry2021-10765. #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' SNGLL(t=t, kappa=0.5, alpha=0.35, eta=0.7, zeta=1.4) #' SNGLL <- function(t,kappa,alpha,eta,zeta){ cdf0 <- (1-((1+zeta*((t*eta)^alpha))^(-((kappa^alpha)/(zeta*(eta^alpha)))))) return(1-cdf0) } #-------------------------------------------------------------------------------------------------------------------------- #' Generalized Log-logistic (GLL) hazard function. #-------------------------------------------------------------------------------------------------------------------------- #' @param kappa : scale parameter #' @param alpha : shape parameter #' @param eta : shape parameter #' @param t : positive argument #' @param log :log scale (TRUE or FALSE) #' @return the value of the GLL hazard function #' @references Muse, A. H., Mwalili, S., Ngesa, O., Alshanbari, H. M., Khosa, S. K., & Hussam, E. (2022). Bayesian and frequentist approach for the generalized log-logistic accelerated failure time model with applications to larynx-cancer patients. Alexandria Engineering Journal, 61(10), 7953-7978. #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' rGLL(t=t, kappa=0.5, alpha=0.35, eta=0.7, log=FALSE) #' rGLL<- function(t, kappa,alpha,eta, log = FALSE){ val<-log(kappa)+log(alpha)+(alpha-1)*log(kappa*t)-log(1+(eta*t)^alpha) if(log) return(val) else return(exp(val)) } #-------------------------------------------------------------------------------------------------------------------------- #' Generalized Log-logistic (GLL) cumulative distribution function. #-------------------------------------------------------------------------------------------------------------------------- #' @param kappa : scale parameter #' @param alpha : shape parameter #' @param eta : shape parameter #' @param t : positive argument #' @return the value of the GLL cumulative distribution function #' @references Muse, A. H., Mwalili, S., Ngesa, O., Almalki, S. J., & Abd-Elmougod, G. A. (2021). Bayesian and classical inference for the generalized log-logistic distribution with applications to survival data. Computational intelligence and neuroscience, 2021. #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' pGLL(t=t, kappa=0.5, alpha=0.35, eta=0.9) #' pGLL <- function(t, kappa,alpha, eta){ val <- (1-((1+((t*eta)^alpha))^(-((kappa^alpha)/(eta^alpha))))) return(val) } #-------------------------------------------------------------------------------------------------------------------------- #' Generalized Log-logistic (GLL) survivor function. #-------------------------------------------------------------------------------------------------------------------------- #' @param kappa : scale parameter #' @param alpha : shape parameter #' @param eta : shape parameter #' @param t : positive argument #' @return the value of the GLL survivor function #' @references Muse, A. H., Mwalili, S., Ngesa, O., Alshanbari, H. M., Khosa, S. K., & Hussam, E. (2022). Bayesian and frequentist approach for the generalized log-logistic accelerated failure time model with applications to larynx-cancer patients. Alexandria Engineering Journal, 61(10), 7953-7978. #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' sGLL(t=t, kappa=0.5, alpha=0.35, eta=0.9) #' sGLL <- function(t, kappa,alpha, eta){ sf0 <- (1+((t*eta)^alpha))^(-((kappa^alpha)/(eta^alpha))) val<-sf0 return(val) } #---------------------------------------------------------------------------------------- #' Modified Kumaraswamy Weibull (MKW) Hazard Function. #---------------------------------------------------------------------------------------- #' @param alpha : inverse scale parameter #' @param kappa : shape parameter #' @param eta : shape parameter #' @param t : positive argument #' @param log :log scale (TRUE or FALSE) #' @return the value of the MKW hazard function #' @references Khosa, S. K. (2019). Parametric Proportional Hazard Models with Applications in Survival analysis (Doctoral dissertation, University of Saskatchewan). #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' rMKW(t=t, alpha=0.35, kappa=0.7, eta=1.4, log=FALSE) #' rMKW <- function(t,alpha,kappa,eta,log=FALSE){ log.h <- (log(kappa)+log(eta)+log(alpha)+((eta-1)*log(1-exp(-t^kappa)))+((kappa-1)*log(t)+log(exp(-t^kappa))))-(log(1-(1-exp(-t^kappa))^eta)) ifelse(log, return(log.h), return(exp(log.h))) } #---------------------------------------------------------------------------------------- #' Modified Kumaraswamy Weibull (MKW) Cumulative Distribution Function. #---------------------------------------------------------------------------------------- #' @param alpha : Inverse scale parameter #' @param kappa : shape parameter #' @param eta : shape parameter #' @param t : positive argument #' @return the value of the MKW cumulative distribution function #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' pMKW(t=t,alpha=0.35, kappa=0.7, eta=1.4) #' pMKW<- function(t,alpha,kappa,eta){ cdf0 <-1- (1-(1-exp(-t^kappa))^eta)^alpha return(cdf0) } #---------------------------------------------------------------------------------------- #' Modified Kumaraswamy Weibull (MKW) Survivor Function. #---------------------------------------------------------------------------------------- #' @param alpha : Inverse scale parameter #' @param kappa : shape parameter #' @param eta : shape parameter #' @param t : positive argument #' @return the value of the MKW survivor function #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' sMKW(t=t,alpha=0.35, kappa=0.7, eta=1.4) #' sMKW<- function(t,alpha,kappa,eta){ sf <- (1-(1-exp(-t^kappa))^eta)^alpha return(sf) } #---------------------------------------------------------------------------------------- #' Exponentiated Weibull (EW) Cumulative Distribution Function. #---------------------------------------------------------------------------------------- #' @param lambda : scale parameter #' @param kappa : shape parameter #' @param alpha : shape parameter #' @param t : positive argument #' @param log.p :log scale (TRUE or FALSE) #' @return the value of the EW cumulative distribution function #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' pEW(t=t, lambda=0.65,kappa=0.45, alpha=0.25, log.p=FALSE) #' pEW<- function(t,lambda,kappa,alpha,log.p=FALSE){ log.cdf <- alpha*pweibull(t,scale=lambda,shape=kappa,log.p=TRUE) ifelse(log.p, return(log.cdf), return(exp(log.cdf))) } #---------------------------------------------------------------------------------------- #' Exponentiated Weibull (EW) Hazard Function. #---------------------------------------------------------------------------------------- #' @param lambda : scale parameter #' @param kappa : shape parameter #' @param alpha : shape parameter #' @param t : positive argument #' @param log :log scale (TRUE or FALSE) #' @return the value of the EW hazard function #' @references Khan, S. A. (2018). Exponentiated Weibull regression for time-to-event data. Lifetime data analysis, 24(2), 328-354. #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' rEW(t=t, lambda=0.9, kappa=0.5, alpha=0.75, log=FALSE) #' rEW <- function(t,lambda,kappa,alpha,log=FALSE){ log.pdf <- log(alpha) + (alpha-1)*pweibull(t,scale=lambda,shape=kappa,log.p=TRUE) + dweibull(t,scale=lambda,shape=kappa,log=TRUE) cdf <- exp(alpha*pweibull(t,scale=lambda,shape=kappa,log.p=TRUE) ) log.h <- log.pdf - log(1-cdf) ifelse(log, return(log.h), return(exp(log.h))) } #---------------------------------------------------------------------------------------- #' Exponentiated Weibull (EW) Survivor Function. #---------------------------------------------------------------------------------------- #' @param lambda : scale parameter #' @param kappa : shape parameter #' @param alpha : shape parameter #' @param t : positive argument #' @return the value of the EW survivor function #' @references Rubio, F. J., Remontet, L., Jewell, N. P., & Belot, A. (2019). On a general structure for hazard-based regression models: an application to population-based cancer research. Statistical methods in medical research, 28(8), 2404-2417. #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' sEW(t=t, lambda=0.9, kappa=0.5, alpha=0.75) #' sEW<- function(t,lambda,kappa,alpha){ cdf <- exp(alpha*pweibull(t,scale=lambda,shape=kappa,log.p=TRUE) ) return(1-cdf) } #-------------------------------------------------------------------------------------------------------------------------- #' Modified Log-logistic (MLL) hazard function. #-------------------------------------------------------------------------------------------------------------------------- #' @param kappa : scale parameter #' @param alpha : shape parameter #' @param eta : shape parameter #' @param t : positive argument #' @param log :log scale (TRUE or FALSE) #' @return the value of the MLL hazard function #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' rMLL(t=t, kappa=0.75, alpha=0.5, eta=0.9,log=FALSE) #' rMLL<- function(t,kappa,alpha,eta,log=FALSE){ log.h <- log(kappa*(kappa*t)^(alpha-1)*exp(eta*t)*(alpha+eta*t)/(1+((kappa*t)^alpha)*exp(eta*t))) ifelse(log, return(log.h), return(exp(log.h))) } #-------------------------------------------------------------------------------------------------------------------------- #' Modified Log-logistic (MLL) cumulative distribution function. #-------------------------------------------------------------------------------------------------------------------------- #' @param kappa : scale parameter #' @param alpha : shape parameter #' @param eta : shape parameter #' @param t : positive argument #' @return the value of the MLL cumulative distribution function #' @references Kayid, M. (2022). Applications of Bladder Cancer Data Using a Modified Log-Logistic Model. Applied Bionics and Biomechanics, 2022. #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' pMLL(t=t, kappa=0.75, alpha=0.5, eta=0.9) #' pMLL<- function(t,kappa,alpha,eta){ cdf0 <- 1- (1/(1+((kappa*t)^alpha)*exp(eta*t))) return(cdf0) } #-------------------------------------------------------------------------------------------------------------------------- #' Modified Log-logistic (MLL) survivor function. #-------------------------------------------------------------------------------------------------------------------------- #' @param kappa : scale parameter #' @param alpha : shape parameter #' @param eta : shape parameter #' @param t : positive argument #' @return the value of the MLL survivor function #' @references Kayid, M. (2022). Applications of Bladder Cancer Data Using a Modified Log-Logistic Model. Applied Bionics and Biomechanics, 2022. #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' sMLL(t=t, kappa=0.75, alpha=0.5, eta=0.9) #' sMLL<- function(t,kappa,alpha,eta){ sf <- 1/(1+((kappa*t)^alpha)*exp(eta*t)) return(sf) } #-------------------------------------------------------------------------------------------------------------------------- #' Power Generalised Weibull (PGW) hazard function. #-------------------------------------------------------------------------------------------------------------------------- #' @param kappa : scale parameter #' @param alpha : shape parameter #' @param eta : shape parameter #' @param t : positive argument #' @param log :log scale (TRUE or FALSE) #' @return the value of the PGW hazard function #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' rPGW(t=t, kappa=0.5, alpha=1.5, eta=0.6,log=FALSE) #' rPGW <- function(t, kappa,alpha, eta, log = FALSE){ val <- log(alpha) - log(eta) - alpha*log(kappa) + (alpha-1)*log(t) + (1/eta - 1)*log( 1 + (t/kappa)^alpha ) if(log) return(val) else return(exp(val)) } #-------------------------------------------------------------------------------------------------------------------------- #' Power Generalised Weibull (PGW) cumulative distribution function. #-------------------------------------------------------------------------------------------------------------------------- #' @param kappa : scale parameter #' @param alpha : shape parameter #' @param eta : shape parameter #' @param t : positive argument #' @return the value of the PGW cumulative distribution function #' @references Alvares, D., & Rubio, F. J. (2021). A tractable Bayesian joint model for longitudinal and survival data. Statistics in Medicine, 40(19), 4213-4229. #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' pPGW(t=t, kappa=0.5, alpha=1.5, eta=0.6) #' pPGW <- function(t, kappa, alpha, eta){ cdf0 <- 1-exp(1-( 1 + (t/kappa)^alpha )^(1/eta)) return(cdf0) } #-------------------------------------------------------------------------------------------------------------------------- #' Power Generalised Weibull (PGW) survivor function. #-------------------------------------------------------------------------------------------------------------------------- #' @param kappa : scale parameter #' @param alpha : shape parameter #' @param eta : shape parameter #' @param t : positive argument #' @return the value of the PGW survivor function #' @references Alvares, D., & Rubio, F. J. (2021). A tractable Bayesian joint model for longitudinal and survival data. Statistics in Medicine, 40(19), 4213-4229. #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' sPGW(t=t, kappa=0.5, alpha=1.5, eta=0.6) #' sPGW <- function(t, kappa, alpha, eta){ sf <- exp(1 - ( 1 + (t/kappa)^alpha)^(1/eta)) return(sf) } #---------------------------------------------------------------------------------------- #' Generalised Gamma (GG) Probability Density Function. #---------------------------------------------------------------------------------------- #' @param kappa : scale parameter #' @param alpha : shape parameter #' @param eta : shape parameter #' @param t : positive argument #' @param log :log scale (TRUE or FALSE) #' @return the value of the GG probability density function #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' pdGG(t=t, kappa=0.5, alpha=0.35, eta=0.9,log=FALSE) #' pdGG <- function(t, kappa, alpha, eta, log = FALSE){ val <- log(eta) - alpha*log(kappa) - lgamma(alpha/eta) + (alpha - 1)*log(t) - (t/kappa)^eta if(log) return(val) else return(exp(val)) } #---------------------------------------------------------------------------------------- #' Generalised Gamma (GG) Cumulative Distribution Function. #---------------------------------------------------------------------------------------- #' @param kappa : scale parameter #' @param alpha : shape parameter #' @param eta : shape parameter #' @param t : positive argument #' @param log.p :log scale (TRUE or FALSE) #' @return the value of the GG cumulative distribution function #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' pGG(t=t, kappa=0.5, alpha=0.35, eta=0.9,log.p=FALSE) #' pGG <- function(t, kappa, alpha, eta, log.p = FALSE){ val <- pgamma( t^eta, shape = alpha/eta, scale = kappa^eta, log.p = TRUE) if(log.p) return(val) else return(exp(val)) } #---------------------------------------------------------------------------------------- #' Generalised Gamma (GG) Survival Function. #---------------------------------------------------------------------------------------- #' @param kappa : scale parameter #' @param alpha : shape parameter #' @param eta : shape parameter #' @param t : positive argument #' @param log.p :log scale (TRUE or FALSE) #' @return the value of the GG survival function #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' sGG(t=t, kappa=0.5, alpha=0.35, eta=0.9,log.p=FALSE) #' sGG <- function(t, kappa, alpha, eta, log.p = FALSE){ val <- pgamma( t^eta, shape = alpha/eta, scale = kappa^eta, log.p = TRUE, lower.tail = FALSE) if(log.p) return(val) else return(exp(val)) } #---------------------------------------------------------------------------------------- #' Generalised Gamma (GG) Hazard Function. #---------------------------------------------------------------------------------------- #' @param kappa : scale parameter #' @param alpha : shape parameter #' @param eta : shape parameter #' @param t : positive argument #' @param log :log scale (TRUE or FALSE) #' @return the value of the GG hazard function #' @references Agarwal, S. K., & Kalla, S. L. (1996). A generalized gamma distribution and its application in reliabilty. Communications in Statistics-Theory and Methods, 25(1), 201-210. #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' rGG(t=t, kappa=0.5, alpha=0.35, eta=0.9,log=FALSE) #' rGG <- function(t, kappa, alpha, eta, log = FALSE){ val <- dggamma(t, kappa, alpha, eta, log = TRUE) - sggamma(t, kappa, alpha, eta, log.p = TRUE) if(log) return(val) else return(exp(val)) } #---------------------------------------------------------------------------------------- #' Log-logistic (LL) Hazard Function. #---------------------------------------------------------------------------------------- #' @param kappa : scale parameter #' @param alpha : shape parameter #' @param t : positive argument #' @param log :log scale (TRUE or FALSE) #' @return the value of the LL hazard function #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' rLL(t=t, kappa=0.5, alpha=0.35,log=FALSE) #' rLL<- function(t,kappa,alpha, log = FALSE){ pdf0 <- dllogis(t,shape=alpha,scale=kappa) cdf0 <- pllogis(t,shape=alpha,scale=kappa) val<-log(pdf0)-log(1-cdf0) if(log) return(val) else return(exp(val)) } #---------------------------------------------------------------------------------------- #' Log-logistic (LL) Cumulative Distribution Function. #---------------------------------------------------------------------------------------- #' @param kappa : scale parameter #' @param alpha : shape parameter #' @param t : positive argument #' @return the value of the LL cumulative distribution function #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' pLL(t=t, kappa=0.5, alpha=0.35) #' pLL<- function(t,kappa,alpha){ cdf0<- pllogis(t,shape=alpha,scale=kappa) val<-cdf0 return(val) } #---------------------------------------------------------------------------------------- #' Log-logistic (LL) Survivor Function. #---------------------------------------------------------------------------------------- #' @param kappa : scale parameter #' @param alpha : shape parameter #' @param t : positive argument #' @return the value of the LL survivor function #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' sLL(t=t, kappa=0.5, alpha=0.35) #' sLL<- function(t,kappa,alpha){ cdf0<- pllogis(t,shape=alpha,scale=kappa) val<-1-cdf0 return(val) } #---------------------------------------------------------------------------------------- #' Weibull (W) Hazard Function. #---------------------------------------------------------------------------------------- #' @param kappa : scale parameter #' @param alpha : shape parameter #' @param t : positive argument #' @param log :log scale (TRUE or FALSE) #' @return the value of the w hazard function #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' rW(t=t, kappa=0.75, alpha=0.5,log=FALSE) #' rW<- function(t,kappa,alpha, log = FALSE){ val<- log(alpha*kappa*t^(alpha-1)) if(log) return(val) else return(exp(val)) } #---------------------------------------------------------------------------------------- #' Weibull (W) Survivor Function. #---------------------------------------------------------------------------------------- #' @param kappa : scale parameter #' @param alpha : shape parameter #' @param t : positive argument #' @return the value of the W Survivor function #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' sW(t=t, kappa=0.75, alpha=0.5) #' sW<- function(t,kappa,alpha){ val <- exp(-kappa*t^alpha) return(val) } #---------------------------------------------------------------------------------------- #' Weibull (W) Cumulative Distribution Function. #---------------------------------------------------------------------------------------- #' @param kappa : scale parameter #' @param alpha : shape parameter #' @param t : positive argument #' @return the value of the W Cumulative Distribution function #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' pW(t=t, kappa=0.75, alpha=0.5) #' pW<- function(t,kappa,alpha){ val <-exp(1-(-kappa*t^alpha)) return(val) } #---------------------------------------------------------------------------------------- #' Lognormal (LN) Hazard Function. #---------------------------------------------------------------------------------------- #' @param kappa : meanlog parameter #' @param alpha : sdlog parameter #' @param t : positive argument #' @param log :log scale (TRUE or FALSE) #' @return the value of the LN hazard function #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' rLN(t=t, kappa=0.5, alpha=0.75,log=FALSE) #' rLN <- function(t,kappa,alpha, log = FALSE){ pdf0 <- dlnorm(t,meanlog=kappa,sdlog=alpha) cdf0 <- plnorm(t,meanlog=kappa,sdlog=alpha) val<-log(pdf0)-log(1-cdf0) if(log) return(val) else return(exp(val)) } #---------------------------------------------------------------------------------------- #' Lognormal (LN) Survivor Hazard Function. #---------------------------------------------------------------------------------------- #' @param kappa : meanlog parameter #' @param alpha : sdlog parameter #' @param t : positive argument #' @return the value of the LN Survivor function #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' sLN(t=t, kappa=0.75, alpha=0.95) #' sLN<- function(t,kappa,alpha){ cdf <- plnorm(t,meanlog=kappa,sdlog=alpha) val<-(1-cdf) return(val) } #---------------------------------------------------------------------------------------- #' Lognormal (LN) Cumulative Distribution Function. #---------------------------------------------------------------------------------------- #' @param kappa : meanlog parameter #' @param alpha : sdlog parameter #' @param t : positive argument #' @return the value of the LN cumulative distribution function #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' pLN(t=t, kappa=0.75, alpha=0.95) #' pLN<- function(t,kappa,alpha){ cdf <- plnorm(t,meanlog=kappa,sdlog=alpha) val<-cdf return(val) } #---------------------------------------------------------------------------------------- #' Gamma (G) Hazard Function. #---------------------------------------------------------------------------------------- #' @param shape : shape parameter #' @param scale : scale parameter #' @param t : positive argument #' @param log :log scale (TRUE or FALSE) #' @return the value of the G hazard function #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' rG(t=t, shape=0.5, scale=0.85,log=FALSE) #' rG <- function(t, shape, scale, log = FALSE){ lpdf0 <- dgamma(t, shape = shape, scale = scale, log = T) ls0 <- pgamma(t, shape = shape, scale = scale, lower.tail = FALSE, log.p = T) val <- lpdf0 - ls0 if(log) return(val) else return(exp(val)) } #---------------------------------------------------------------------------------------- #' Gamma (G) Cumulative Distribution Function. #---------------------------------------------------------------------------------------- #' @param shape : shape parameter #' @param scale : scale parameter #' @param t : positive argument #' @return the value of the G Cumulative Distribution function #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' pG(t=t, shape=0.85, scale=0.5) #' pG <- function(t, shape, scale){ p0 <- pgamma(t, shape = shape, scale = scale) return(p0) } #---------------------------------------------------------------------------------------- #' Gamma (G) Survivor Function. #---------------------------------------------------------------------------------------- #' @param shape : shape parameter #' @param scale : scale parameter #' @param t : positive argument #' @return the value of the G Survivor function #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' t=runif(10,min=0,max=1) #' sG(t=t, shape=0.85, scale=0.5) #' sG <- function(t, shape, scale){ s0 <- 1-pgamma(t, shape = shape, scale = scale) return(s0) } ############################################################################################### ############################################################################################### ############################################################################################### #' General Odds (GO) Model. ############################################################################################### ############################################################################################### ############################################################################################### ######################################################################################################## #' @description A Tractable Parametric General Odds (GO) model's Log-likelihood, MLE and information criterion values. #' Baseline hazards: NGLL,GLL,MLL,PGW, GG, EW, MKW, LL, TLL, SLL,CLL,SCLL,ATLL, and ASLL ######################################################################################################## #' @param init : initial points for optimisation #' @param zt : design matrix for time-dependent effects (q x n), q >= 1 #' @param z : design matrix for odds-level effects (p x n), p >= 1 #' @param status : vital status (1 - dead, 0 - alive) #' @param n : The number of the data set #' @param times : survival times #' @param basehaz : baseline hazard structure including baseline (New generalized log-logistic general odds "NGLLGO" model, generalized log-logisitic general odds "GLLGO" model, #' modified log-logistic general odds "MLLGO" model,exponentiated Weibull general odds "EWGO" model, #' power generalized weibull general odds "PGWGO" model, generalized gamma general odds "GGGO" model, #' modified kumaraswamy Weibull general odds "MKWGO" model, log-logistic general odds "LLGO" model, #' tangent-log-logistic general odds "TLLGO" model, sine-log-logistic general odds "SLLGO" model, #' cosine log-logistic general odds "CLLGO" model,secant-log-logistic general odds "SCLLGO" model, #' arcsine-log-logistic general odds "ASLLGO" model, arctangent-log-logistic general odds "ATLLGO" model, #' Weibull general odds "WGO" model, gamma general odds "WGO" model, and log-normal general odds "ATLNGO" model.) #' @param hessian :A function to return (as a matrix) the hessian for those methods that can use this information. #' @param conf.int : confidence level #' @param method :"optim" or a method from "nlminb".The methods supported are: BFGS (default), "L-BFGS", "Nelder-Mead", "SANN", "CG", and "Brent". #' @param maxit :The maximum number of iterations. Defaults to 1000 #' @param log :log scale (TRUE or FALSE) #' @return a list containing the output of the optimisation (OPT) and the log-likelihood function (loglik) #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' #' #Example #1 #' data(alloauto) #' time<-alloauto$time #' delta<-alloauto$delta #' z<-alloauto$type #' MLEGO(init = c(1.0,0.50,0.50,0.5,0.5),times = time,status = delta,n=nrow(z), #' basehaz = "PGWGO",z = z,zt=z,method = "BFGS",hessian=TRUE, conf.int=0.95,maxit = 1000,log=FALSE) #' #' #Example #2 #' data(bmt) #' time<-bmt$Time #' delta<-bmt$Status #' z<-bmt$TRT #' MLEGO(init = c(1.0,0.50,0.45,0.5),times = time,status = delta,n=nrow(z), #' basehaz = "TLLGO",z = z,zt=z,method = "BFGS",hessian=TRUE, conf.int=0.95,maxit = 1000, #' log=FALSE) #' #' #Example #3 #' data("gastric") #' time<-gastric$time #' delta<-gastric$status #' z<-gastric$trt #' MLEGO(init = c(1.0,1.0,0.50,0.5,0.5),times = time,status = delta,n=nrow(z), #' basehaz = "GLLGO",z = z,zt=z,method = "BFGS",hessian=TRUE, conf.int=0.95,maxit = 1000,log=FALSE) #' #' MLEGO <- function(init, times, status, n,basehaz, z, zt,method = "BFGS",hessian=TRUE, conf.int=0.95,maxit = 1000, log=FALSE){ # Required variables times <- as.vector(times) status <- as.vector(as.logical(status)) z <- as.matrix(z) zt <- as.matrix(zt) n<-nrow(z) conf.int<-0.95 hessian<- TRUE times.obs <- times[status] if(!is.null(z)) z.obs <- z[status,] if(!is.null(zt)) zt.obs <- zt[status,] p0 <- dim(z)[2] p1 <- dim(zt)[2] # NGLL - GO Model if(basehaz == "NGLLGO"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); ce0 <- exp(par[3]); de0<-exp(par[4]);beta1 <- par[5:(4+p0)];beta2 <- par[(5+p0):(4+p0+p1)]; x.beta1 <- as.vector(zt%*%beta1) x.beta2 <- as.vector(z%*%beta2) x.betadiff<- as.vector(z%*%(beta2-beta1)) exp.x.beta1 <- exp(x.beta1) exp.x.beta1.obs <- exp(x.beta1[status]) exp.x.beta2 <- exp(x.beta2) exp.x.beta2.obs <- exp(x.beta2[status]) exp.x.dif2 <- exp(x.betadiff) exp.x.dif2.obs<- exp(x.betadiff[status]) lhaz0 <- log((exp.x.beta2.obs*(rNGLL(times.obs*exp.x.beta1.obs,ae0,be0,ce0,de0, log=FALSE)))/((exp.x.dif2.obs*pNGLL(times.obs*exp.x.beta1.obs,ae0,be0,ce0,de0))+SNGLL(times.obs*exp.x.beta1.obs,ae0,be0,ce0,de0))) lsf0<-(1/(1+(exp.x.dif2*((pNGLL(times*exp.x.beta1,ae0,be0,ce0,de0)/SNGLL(times*exp.x.beta1,ae0,be0,ce0,de0)))))) val <- - sum(lhaz0) - sum(log(lsf0)) return(sum(val)) } } # GLL - GO Model if(basehaz == "GLLGO"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); ce0 <- exp(par[3]);beta1 <- par[4:(3+p0)];beta2 <- par[(4+p0):(3+p0+p1)]; x.beta1 <- as.vector(zt%*%beta1) x.beta2 <- as.vector(z%*%beta2) x.betadiff<- as.vector(z%*%(beta2-beta1)) exp.x.beta1 <- exp(x.beta1) exp.x.beta1.obs <- exp(x.beta1[status]) exp.x.beta2 <- exp(x.beta2) exp.x.beta2.obs <- exp(x.beta2[status]) exp.x.dif2 <- exp(x.betadiff) exp.x.dif2.obs<- exp(x.betadiff[status]) lhaz0 <- log((exp.x.beta2.obs*(rGLL(times.obs*exp.x.beta1.obs,ae0,be0,ce0, log=FALSE)))/((exp.x.dif2.obs*pGLL(times.obs*exp.x.beta1.obs,ae0,be0,ce0))+sGLL(times.obs*exp.x.beta1.obs,ae0,be0,ce0))) lsf0<-(1/(1+(exp.x.dif2*((pGLL(times*exp.x.beta1,ae0,be0,ce0)/sGLL(times*exp.x.beta1,ae0,be0,ce0)))))) val <- - sum(lhaz0) -sum(log(lsf0)) return(sum(val)) } } # MLL - GO Model if(basehaz == "MLLGO"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); ce0 <- exp(par[3]);beta1 <- par[4:(3+p0)];beta2 <- par[(4+p0):(3+p0+p1)]; x.beta1 <- as.vector(zt%*%beta1) x.beta2 <- as.vector(z%*%beta2) x.betadiff<- as.vector(z%*%(beta2-beta1)) exp.x.beta1 <- exp(x.beta1) exp.x.beta1.obs <- exp(x.beta1[status]) exp.x.beta2 <- exp(x.beta2) exp.x.beta2.obs <- exp(x.beta2[status]) exp.x.dif2 <- exp(x.betadiff) exp.x.dif2.obs<- exp(x.betadiff[status]) lhaz0 <- log((exp.x.beta2.obs*(rMLL(times.obs*exp.x.beta1.obs,ae0,be0,ce0, log=FALSE)))/((exp.x.dif2.obs*pMLL(times.obs*exp.x.beta1.obs,ae0,be0,ce0))+sMLL(times.obs*exp.x.beta1.obs,ae0,be0,ce0))) lsf0<-(1/(1+(exp.x.dif2*((pMLL(times*exp.x.beta1,ae0,be0,ce0)/sMLL(times*exp.x.beta1,ae0,be0,ce0)))))) val <- - sum(lhaz0) -sum(log(lsf0)) return(sum(val)) } } # GG - GO Model if(basehaz == "GGGO"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); ce0 <- exp(par[3]);beta1 <- par[4:(3+p0)];beta2 <- par[(4+p0):(3+p0+p1)]; x.beta1 <- as.vector(zt%*%beta1) x.beta2 <- as.vector(z%*%beta2) x.betadiff<- as.vector(z%*%(beta2-beta1)) exp.x.beta1 <- exp(x.beta1) exp.x.beta1.obs <- exp(x.beta1[status]) exp.x.beta2 <- exp(x.beta2) exp.x.beta2.obs <- exp(x.beta2[status]) exp.x.dif2 <- exp(x.betadiff) exp.x.dif2.obs<- exp(x.betadiff[status]) lhaz0 <- log((exp.x.beta2.obs*(rGG(times.obs*exp.x.beta1.obs,ae0,be0,ce0, log=FALSE)))/((exp.x.dif2.obs*pGG(times.obs*exp.x.beta1.obs,ae0,be0,ce0))+sGG(times.obs*exp.x.beta1.obs,ae0,be0,ce0))) lsf0<-(1/(1+(exp.x.dif2*((pGG(times*exp.x.beta1,ae0,be0,ce0)/sGG(times*exp.x.beta1,ae0,be0,ce0)))))) val <- - sum(lhaz0) -sum(log(lsf0)) return(sum(val)) } } # EW - GO Model if(basehaz == "EWGO"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); ce0 <- exp(par[3]);beta1 <- par[4:(3+p0)];beta2 <- par[(4+p0):(3+p0+p1)]; x.beta1 <- as.vector(zt%*%beta1) x.beta2 <- as.vector(z%*%beta2) x.betadiff<- as.vector(z%*%(beta2-beta1)) exp.x.beta1 <- exp(x.beta1) exp.x.beta1.obs <- exp(x.beta1[status]) exp.x.beta2 <- exp(x.beta2) exp.x.beta2.obs <- exp(x.beta2[status]) exp.x.dif2 <- exp(x.betadiff) exp.x.dif2.obs<- exp(x.betadiff[status]) lhaz0 <- log((exp.x.beta2.obs*(rEW(times.obs*exp.x.beta1.obs,ae0,be0,ce0, log=FALSE)))/((exp.x.dif2.obs*pEW(times.obs*exp.x.beta1.obs,ae0,be0,ce0))+sEW(times.obs*exp.x.beta1.obs,ae0,be0,ce0))) lsf0<-(1/(1+(exp.x.dif2*((pEW(times*exp.x.beta1,ae0,be0,ce0)/sEW(times*exp.x.beta1,ae0,be0,ce0)))))) val <- - sum(lhaz0) -sum(log(lsf0)) return(sum(val)) } } # PGW - GO Model if(basehaz == "PGWGO"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); ce0 <- exp(par[3]);beta1 <- par[4:(3+p0)];beta2 <- par[(4+p0):(3+p0+p1)]; x.beta1 <- as.vector(zt%*%beta1) x.beta2 <- as.vector(z%*%beta2) x.betadiff<- as.vector(z%*%(beta2-beta1)) exp.x.beta1 <- exp(x.beta1) exp.x.beta1.obs <- exp(x.beta1[status]) exp.x.beta2 <- exp(x.beta2) exp.x.beta2.obs <- exp(x.beta2[status]) exp.x.dif2 <- exp(x.betadiff) exp.x.dif2.obs<- exp(x.betadiff[status]) lhaz0 <- log((exp.x.beta2.obs*(rPGW(times.obs*exp.x.beta1.obs,ae0,be0,ce0, log=FALSE)))/((exp.x.dif2.obs*pPGW(times.obs*exp.x.beta1.obs,ae0,be0,ce0))+sPGW(times.obs*exp.x.beta1.obs,ae0,be0,ce0))) lsf0<-(1/(1+(exp.x.dif2*((pPGW(times*exp.x.beta1,ae0,be0,ce0)/sPGW(times*exp.x.beta1,ae0,be0,ce0)))))) val <- - sum(lhaz0) -sum(log(lsf0)) return(sum(val)) } } # MKW - GO Model if(basehaz == "MKWGO"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); ce0 <- exp(par[3]);beta1 <- par[4:(3+p0)];beta2 <- par[(4+p0):(3+p0+p1)]; x.beta1 <- as.vector(zt%*%beta1) x.beta2 <- as.vector(z%*%beta2) x.betadiff<- as.vector(z%*%(beta2-beta1)) exp.x.beta1 <- exp(x.beta1) exp.x.beta1.obs <- exp(x.beta1[status]) exp.x.beta2 <- exp(x.beta2) exp.x.beta2.obs <- exp(x.beta2[status]) exp.x.dif2 <- exp(x.betadiff) exp.x.dif2.obs<- exp(x.betadiff[status]) lhaz0 <- log((exp.x.beta2.obs*(rMKW(times.obs*exp.x.beta1.obs,ae0,be0,ce0, log=FALSE)))/((exp.x.dif2.obs*pMKW(times.obs*exp.x.beta1.obs,ae0,be0,ce0))+sMKW(times.obs*exp.x.beta1.obs,ae0,be0,ce0))) lsf0<-(1/(1+(exp.x.dif2*((pMKW(times*exp.x.beta1,ae0,be0,ce0)/sMKW(times*exp.x.beta1,ae0,be0,ce0)))))) val <- - sum(lhaz0) -sum(log(lsf0)) return(sum(val)) } } # TLL - GO Model if(basehaz == "TLLGO"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]);beta1 <- par[3:(2+p0)];beta2 <- par[(3+p0):(2+p0+p1)]; x.beta1 <- as.vector(zt%*%beta1) x.beta2 <- as.vector(z%*%beta2) x.betadiff<- as.vector(z%*%(beta2-beta1)) exp.x.beta1 <- exp(x.beta1) exp.x.beta1.obs <- exp(x.beta1[status]) exp.x.beta2 <- exp(x.beta2) exp.x.beta2.obs <- exp(x.beta2[status]) exp.x.dif2 <- exp(x.betadiff) exp.x.dif2.obs<- exp(x.betadiff[status]) lhaz0 <- log((exp.x.beta2.obs*(rTLL(times.obs*exp.x.beta1.obs,ae0,be0, log=FALSE)))/((exp.x.dif2.obs*pTLL(times.obs*exp.x.beta1.obs,ae0,be0))+sTLL(times.obs*exp.x.beta1.obs,ae0,be0))) lsf0<-(1/(1+(exp.x.dif2*((pTLL(times*exp.x.beta1,ae0,be0)/sTLL(times*exp.x.beta1,ae0,be0)))))) val <- - sum(lhaz0) -sum(log(lsf0)) return(sum(val)) } } # CLL - GO Model if(basehaz == "CLLGO"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]);beta1 <- par[3:(2+p0)];beta2 <- par[(3+p0):(2+p0+p1)]; x.beta1 <- as.vector(zt%*%beta1) x.beta2 <- as.vector(z%*%beta2) x.betadiff<- as.vector(z%*%(beta2-beta1)) exp.x.beta1 <- exp(x.beta1) exp.x.beta1.obs <- exp(x.beta1[status]) exp.x.beta2 <- exp(x.beta2) exp.x.beta2.obs <- exp(x.beta2[status]) exp.x.dif2 <- exp(x.betadiff) exp.x.dif2.obs<- exp(x.betadiff[status]) lhaz0 <- log((exp.x.beta2.obs*(rCLL(times.obs*exp.x.beta1.obs,ae0,be0, log=FALSE)))/((exp.x.dif2.obs*pCLL(times.obs*exp.x.beta1.obs,ae0,be0))+sCLL(times.obs*exp.x.beta1.obs,ae0,be0))) lsf0<-(1/(1+(exp.x.dif2*((pCLL(times*exp.x.beta1,ae0,be0)/sCLL(times*exp.x.beta1,ae0,be0)))))) val <- - sum(lhaz0) -sum(log(lsf0)) return(sum(val)) } } # SLL - GO Model if(basehaz == "SLLGO"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]);beta1 <- par[3:(2+p0)];beta2 <- par[(3+p0):(2+p0+p1)]; x.beta1 <- as.vector(zt%*%beta1) x.beta2 <- as.vector(z%*%beta2) x.betadiff<- as.vector(z%*%(beta2-beta1)) exp.x.beta1 <- exp(x.beta1) exp.x.beta1.obs <- exp(x.beta1[status]) exp.x.beta2 <- exp(x.beta2) exp.x.beta2.obs <- exp(x.beta2[status]) exp.x.dif2 <- exp(x.betadiff) exp.x.dif2.obs<- exp(x.betadiff[status]) lhaz0 <- log((exp.x.beta2.obs*(rSLL(times.obs*exp.x.beta1.obs,ae0,be0, log=FALSE)))/((exp.x.dif2.obs*pSLL(times.obs*exp.x.beta1.obs,ae0,be0))+sSLL(times.obs*exp.x.beta1.obs,ae0,be0))) lsf0<-(1/(1+(exp.x.dif2*((pSLL(times*exp.x.beta1,ae0,be0)/sSLL(times*exp.x.beta1,ae0,be0)))))) val <- - sum(lhaz0) -sum(log(lsf0)) return(sum(val)) } } # SCLL - GO Model if(basehaz == "SCLLGO"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]);beta1 <- par[3:(2+p0)];beta2 <- par[(3+p0):(2+p0+p1)]; x.beta1 <- as.vector(zt%*%beta1) x.beta2 <- as.vector(z%*%beta2) x.betadiff<- as.vector(z%*%(beta2-beta1)) exp.x.beta1 <- exp(x.beta1) exp.x.beta1.obs <- exp(x.beta1[status]) exp.x.beta2 <- exp(x.beta2) exp.x.beta2.obs <- exp(x.beta2[status]) exp.x.dif2 <- exp(x.betadiff) exp.x.dif2.obs<- exp(x.betadiff[status]) lhaz0 <- log((exp.x.beta2.obs*(rSCLL(times.obs*exp.x.beta1.obs,ae0,be0, log=FALSE)))/((exp.x.dif2.obs*pTLL(times.obs*exp.x.beta1.obs,ae0,be0))+sSCLL(times.obs*exp.x.beta1.obs,ae0,be0))) lsf0<-(1/(1+(exp.x.dif2*((pSCLL(times*exp.x.beta1,ae0,be0)/sSCLL(times*exp.x.beta1,ae0,be0)))))) val <- - sum(lhaz0) -sum(log(lsf0)) return(sum(val)) } } # ATLL - GO Model if(basehaz == "ATLLGO"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]);beta1 <- par[3:(2+p0)];beta2 <- par[(3+p0):(2+p0+p1)]; x.beta1 <- as.vector(zt%*%beta1) x.beta2 <- as.vector(z%*%beta2) x.betadiff<- as.vector(z%*%(beta2-beta1)) exp.x.beta1 <- exp(x.beta1) exp.x.beta1.obs <- exp(x.beta1[status]) exp.x.beta2 <- exp(x.beta2) exp.x.beta2.obs <- exp(x.beta2[status]) exp.x.dif2 <- exp(x.betadiff) exp.x.dif2.obs<- exp(x.betadiff[status]) lhaz0 <- log((exp.x.beta2.obs*(rATLL(times.obs*exp.x.beta1.obs,ae0,be0, log=FALSE)))/((exp.x.dif2.obs*pATLL(times.obs*exp.x.beta1.obs,ae0,be0))+sATLL(times.obs*exp.x.beta1.obs,ae0,be0))) lsf0<-(1/(1+(exp.x.dif2*((pATLL(times*exp.x.beta1,ae0,be0)/sATLL(times*exp.x.beta1,ae0,be0)))))) val <- - sum(lhaz0) -sum(log(lsf0)) return(sum(val)) } } # ASLL - GO Model if(basehaz == "ASLLGO"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]);beta1 <- par[3:(2+p0)];beta2 <- par[(3+p0):(2+p0+p1)]; x.beta1 <- as.vector(zt%*%beta1) x.beta2 <- as.vector(z%*%beta2) x.betadiff<- as.vector(z%*%(beta2-beta1)) exp.x.beta1 <- exp(x.beta1) exp.x.beta1.obs <- exp(x.beta1[status]) exp.x.beta2 <- exp(x.beta2) exp.x.beta2.obs <- exp(x.beta2[status]) exp.x.dif2 <- exp(x.betadiff) exp.x.dif2.obs<- exp(x.betadiff[status]) lhaz0 <- log((exp.x.beta2.obs*(rASLL(times.obs*exp.x.beta1.obs,ae0,be0, log=FALSE)))/((exp.x.dif2.obs*pASLL(times.obs*exp.x.beta1.obs,ae0,be0))+sASLL(times.obs*exp.x.beta1.obs,ae0,be0))) lsf0<-(1/(1+(exp.x.dif2*((pASLL(times*exp.x.beta1,ae0,be0)/sASLL(times*exp.x.beta1,ae0,be0)))))) val <- - sum(lhaz0) -sum(log(lsf0)) return(sum(val)) } } # LL - GO Model if(basehaz == "LLGO"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]);beta1 <- par[3:(2+p0)];beta2 <- par[(3+p0):(2+p0+p1)]; x.beta1 <- as.vector(zt%*%beta1) x.beta2 <- as.vector(z%*%beta2) x.betadiff<- as.vector(z%*%(beta2-beta1)) exp.x.beta1 <- exp(x.beta1) exp.x.beta1.obs <- exp(x.beta1[status]) exp.x.beta2 <- exp(x.beta2) exp.x.beta2.obs <- exp(x.beta2[status]) exp.x.dif2 <- exp(x.betadiff) exp.x.dif2.obs<- exp(x.betadiff[status]) lhaz0 <- log((exp.x.beta2.obs*(rLL(times.obs*exp.x.beta1.obs,ae0,be0, log=FALSE)))/((exp.x.dif2.obs*pLL(times.obs*exp.x.beta1.obs,ae0,be0))+sLL(times.obs*exp.x.beta1.obs,ae0,be0))) lsf0<-(1/(1+(exp.x.dif2*((pLL(times*exp.x.beta1,ae0,be0)/sLL(times*exp.x.beta1,ae0,be0)))))) val <- - sum(lhaz0) -sum(log(lsf0)) return(sum(val)) } } # W - GO Model if(basehaz == "WGO"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]);beta1 <- par[3:(2+p0)];beta2 <- par[(3+p0):(2+p0+p1)]; x.beta1 <- as.vector(zt%*%beta1) x.beta2 <- as.vector(z%*%beta2) x.betadiff<- as.vector(z%*%(beta2-beta1)) exp.x.beta1 <- exp(x.beta1) exp.x.beta1.obs <- exp(x.beta1[status]) exp.x.beta2 <- exp(x.beta2) exp.x.beta2.obs <- exp(x.beta2[status]) exp.x.dif2 <- exp(x.betadiff) exp.x.dif2.obs<- exp(x.betadiff[status]) lhaz0 <- log((exp.x.beta2.obs*(rW(times.obs*exp.x.beta1.obs,ae0,be0, log=FALSE)))/((exp.x.dif2.obs*pW(times.obs*exp.x.beta1.obs,ae0,be0))+sW(times.obs*exp.x.beta1.obs,ae0,be0))) lsf0<-(1/(1+(exp.x.dif2*((pW(times*exp.x.beta1,ae0,be0)/sW(times*exp.x.beta1,ae0,be0)))))) val <- - sum(lhaz0) -sum(log(lsf0)) return(sum(val)) } } # G - GO Model if(basehaz == "GGO"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]);beta1 <- par[3:(2+p0)];beta2 <- par[(3+p0):(2+p0+p1)]; x.beta1 <- as.vector(zt%*%beta1) x.beta2 <- as.vector(z%*%beta2) x.betadiff<- as.vector(z%*%(beta2-beta1)) exp.x.beta1 <- exp(x.beta1) exp.x.beta1.obs <- exp(x.beta1[status]) exp.x.beta2 <- exp(x.beta2) exp.x.beta2.obs <- exp(x.beta2[status]) exp.x.dif2 <- exp(x.betadiff) exp.x.dif2.obs<- exp(x.betadiff[status]) lhaz0 <- log((exp.x.beta2.obs*(rG(times.obs*exp.x.beta1.obs,ae0,be0, log=FALSE)))/((exp.x.dif2.obs*pG(times.obs*exp.x.beta1.obs,ae0,be0))+sG(times.obs*exp.x.beta1.obs,ae0,be0))) lsf0<-(1/(1+(exp.x.dif2*((pG(times*exp.x.beta1,ae0,be0)/sG(times*exp.x.beta1,ae0,be0)))))) val <- - sum(lhaz0) -sum(log(lsf0)) return(sum(val)) } } # LN - GO Model if(basehaz == "LNGO"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]);beta1 <- par[3:(2+p0)];beta2 <- par[(3+p0):(2+p0+p1)]; x.beta1 <- as.vector(zt%*%beta1) x.beta2 <- as.vector(z%*%beta2) x.betadiff<- as.vector(z%*%(beta2-beta1)) exp.x.beta1 <- exp(x.beta1) exp.x.beta1.obs <- exp(x.beta1[status]) exp.x.beta2 <- exp(x.beta2) exp.x.beta2.obs <- exp(x.beta2[status]) exp.x.dif2 <- exp(x.betadiff) exp.x.dif2.obs<- exp(x.betadiff[status]) lhaz0 <- log((exp.x.beta2.obs*(rLN(times.obs*exp.x.beta1.obs,ae0,be0, log=FALSE)))/((exp.x.dif2.obs*pLN(times.obs*exp.x.beta1.obs,ae0,be0))+sLN(times.obs*exp.x.beta1.obs,ae0,be0))) lsf0<-(1/(1+(exp.x.dif2*((pLN(times*exp.x.beta1,ae0,be0)/sLN(times*exp.x.beta1,ae0,be0)))))) val <- - sum(lhaz0) -sum(log(lsf0)) return(sum(val)) } } if(method != "optim") OPT <- optim(init,log.lik,control=list(maxit=maxit), method = method,hessian = TRUE,) if(method == "optim") OPT <- nlminb(init,log.lik,hessian=TRUE,control=list(iter.max=maxit)) pars=length(OPT$par) l=OPT$value MLE<-OPT$par MLE<-MLE hessian<-OPT$hessian se<-sqrt(diag(solve(hessian))) SE<-se zval <- pars/ se zvalue=zval ci.lo<-MLE-SE*qnorm((1-conf.int)/2,lower.tail=FALSE) ci.lo<-ci.lo ci.up<-MLE+SE*qnorm((1-conf.int)/2,lower.tail=FALSE) ci.up<-ci.up p.value = 2*stats::pnorm(-abs(zval)) p.value=p.value estimates<-data.frame(MLE,SE,zvalue, "p.value"=ifelse(p.value<0.001,"<0.001",round(p.value,digits=3)),lower.95=ci.lo,upper.95=ci.up) AIC=2*l + 2*pars CAIC=AIC+(2*pars*(pars+1)/(n-l+1)) HQIC= 2*l+2*log(log(n))*pars BCAIC=2*l+(pars*(log(n)+1)) BIC=(2*l)+(pars*(log(n))) informationcriterions=cbind(AIC,CAIC,BCAIC,BIC,HQIC) value<-OPT$value convergence<-OPT$convergence counts<-OPT$counts message<-OPT$message loglikelihood<-cbind(value,convergence,message) counts<-cbind(counts) result <- list(estimates=estimates, informationcriterions=informationcriterions,loglikelihood=loglikelihood,counts=counts) return(result) } ############################################################################################### ############################################################################################### ############################################################################################### #' Accelerated Odds (AO) Model. ############################################################################################### ############################################################################################### ############################################################################################### ######################################################################################################## #' @description A Tractable Parametric Accelerated Odds (AO) model's maximum likelihood estimates,log-likelihood, and Information Criterion values. #' Baseline hazards: NGLL,GLL,MLL,PGW, GG, EW, MKW, LL, TLL, SLL,CLL,SCLL,ATLL, and ASLL ######################################################################################################## #' @param init : Initial parameters to maximize the likelihood function; #' @param z : design matrix for covariates (p x n), p >= 1 #' @param status : vital status (1 - dead, 0 - alive) #' @param n : The number of the data set #' @param times : survival times #' @param basehaz : baseline hazard structure including baseline (New generalized log-logistic accelerated odds "NGLLAO" model, generalized log-logisitic accelerated odds "GLLAO" model, #' modified log-logistic accelerated odds "MLLAO" model,exponentiated Weibull accelerated odds "EWAO" model, #' power generalized weibull accelerated odds "PGWAO" model, generalized gamma accelerated odds "GGAO" model, #' modified kumaraswamy Weibull accelerated odds "MKWAO" model, log-logistic accelerated odds "LLAO" model, #' tangent-log-logistic accelerated odds "TLLAO" model, sine-log-logistic accelerated odds "SLLAO" model, #' cosine log-logistic accelerated odds "CLLAO" model,secant-log-logistic accelerated odds "SCLLAO" model, #' arcsine-log-logistic accelerated odds "ASLLAO" model,arctangent-log-logistic accelerated odds "ATLLAO" model, #' Weibull accelerated odds "WAO" model, gamma accelerated odds "WAO" model, and log-normal accelerated odds "ATLNAO" model.) #' @param hessian :A function to return (as a matrix) the hessian for those methods that can use this information. #' @param conf.int : confidence level #' @param method :"optim" or a method from "nlminb".The methods supported are: BFGS (default), "L-BFGS", "Nelder-Mead", "SANN", "CG", and "Brent". #' @param maxit :The maximum number of iterations. Defaults to 1000 #' @param log :log scale (TRUE or FALSE) #' @return a list containing the output of the optimisation (OPT) and the log-likelihood function (loglik) #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' #'#Example #1 #'data(alloauto) #'time<-alloauto$time #'delta<-alloauto$delta #'z<-alloauto$type #'MLEAO(init = c(1.0,0.40,0.50,0.50),times = time,status = delta,n=nrow(z), #'basehaz = "GLLAO",z = z,method = "BFGS",hessian=TRUE, conf.int=0.95,maxit = 1000,log=FALSE) #' #'#Example #2 #'data(bmt) #'time<-bmt$Time #'delta<-bmt$Status #'z<-bmt$TRT #'MLEAO(init = c(1.0,1.0,0.5),times = time,status = delta,n=nrow(z), #'basehaz = "CLLAO",z = z,method = "BFGS",hessian=TRUE, conf.int=0.95,maxit = 1000, #'log=FALSE) #' #'#Example #3 #'data("gastric") #'time<-gastric$time #'delta<-gastric$status #'z<-gastric$trt #'MLEAO(init = c(1.0,1.0,0.5),times = time,status = delta,n=nrow(z), #'basehaz = "LNAO",z = z,method = "BFGS",hessian=TRUE, conf.int=0.95,maxit = 1000,log=FALSE) #' #'#Example #4 #'data("larynx") #'time<-larynx$time #'delta<-larynx$delta #'larynx$age<-as.numeric(scale(larynx$age)) #'larynx$diagyr<-as.numeric(scale(larynx$diagyr)) #'larynx$stage<-as.factor(larynx$stage) #'z<-model.matrix(~ stage+age+diagyr, data = larynx) #'MLEAO(init = c(1.0,1.0,0.5,0.5,0.5,0.5,0.5,0.5),times = time,status = delta,n=nrow(z), #'basehaz = "ASLLAO",z = z,method = "BFGS",hessian=TRUE, conf.int=0.95,maxit = 1000,log=FALSE) #' MLEAO <- function(init, times, status, n,basehaz, z, method = "BFGS",hessian=TRUE, conf.int=0.95,maxit = 1000, log=FALSE){ # Required variables times <- as.vector(times) status <- as.vector(as.logical(status)) z <- as.matrix(z) conf.int<-0.95 hessian<- TRUE n<-nrow(z) times.obs <- times[status] if(!is.null(z)) z.obs <- z[status,] p0 <- dim(z)[2] # NGLL - AO Model if(basehaz == "NGLLAO"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); ce0 <- exp(par[3]); de0<-exp(par[4]);beta <- par[5:(4+p0)]; x.beta <- as.vector(z%*%beta) exp.x.beta <- exp(x.beta) exp.x.beta.obs <- exp(x.beta[status]) lhaz0 <- log(((rNGLL(times.obs*exp.x.beta.obs,ae0,be0,ce0,de0, log=FALSE)))/((pNGLL(times.obs*exp.x.beta.obs,ae0,be0,ce0,de0)/exp.x.beta.obs)+SNGLL(times.obs*exp.x.beta.obs,ae0,be0,ce0,de0))) lsf0<-(1/(1+(1/exp.x.beta*((pNGLL(times*exp.x.beta,ae0,be0,ce0,de0)/SNGLL(times*exp.x.beta,ae0,be0,ce0,de0)))))) val <- - sum(lhaz0) - sum(log(lsf0)) return(sum(val)) } } # GLL - AO Model if(basehaz == "GLLAO"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); ce0 <- exp(par[3]); beta <- par[4:(3+p0)]; x.beta <- as.vector(z%*%beta) exp.x.beta <- exp(x.beta) exp.x.beta.obs <- exp(x.beta[status]) lhaz0 <- log(((rGLL(times.obs*exp.x.beta.obs,ae0,be0,ce0, log=FALSE)))/((pGLL(times.obs*exp.x.beta.obs,ae0,be0,ce0)/exp.x.beta.obs)+(sGLL(times.obs*exp.x.beta.obs,ae0,be0,ce0)))) lsf0<-(1/(1+(1/exp.x.beta*((pGLL(times*exp.x.beta,ae0,be0,ce0)/sGLL(times*exp.x.beta,ae0,be0,ce0)))))) val <- - sum(lhaz0) -sum(log(lsf0)) return(sum(val)) } } # MLL - AO Model if(basehaz == "MLLAO"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); ce0 <- exp(par[3]); beta <- par[4:(3+p0)]; x.beta <- as.vector(z%*%beta) exp.x.beta <- exp(x.beta) exp.x.beta.obs <- exp(x.beta[status]) lhaz0 <- log(((rMLL(times.obs*exp.x.beta.obs,ae0,be0,ce0, log=FALSE)))/((pMLL(times.obs*exp.x.beta.obs,ae0,be0,ce0)/exp.x.beta.obs)+(sMLL(times.obs*exp.x.beta.obs,ae0,be0,ce0)))) lsf0<-(1/(1+(1/exp.x.beta*((pMLL(times*exp.x.beta,ae0,be0,ce0)/sMLL(times*exp.x.beta,ae0,be0,ce0)))))) val <- - sum(lhaz0) -sum(log(lsf0)) return(sum(val)) } } # GG - AO Model if(basehaz == "GGAO"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); ce0 <- exp(par[3]); beta <- par[4:(3+p0)]; x.beta <- as.vector(z%*%beta) exp.x.beta <- exp(x.beta) exp.x.beta.obs <- exp(x.beta[status]) lhaz0 <- log(((rGG(times.obs*exp.x.beta.obs,ae0,be0,ce0, log=FALSE)))/((pGG(times.obs*exp.x.beta.obs,ae0,be0,ce0)/exp.x.beta.obs)+(sGG(times.obs*exp.x.beta.obs,ae0,be0,ce0)))) lsf0<-(1/(1+(1/exp.x.beta*((pGG(times*exp.x.beta,ae0,be0,ce0)/sGG(times*exp.x.beta,ae0,be0,ce0)))))) val <- - sum(lhaz0) -sum(log(lsf0)) return(sum(val)) } } # EW - AO Model if(basehaz == "EWAO"){ log.lik <- function(par){ ae0 <- par[1]; be0 <- par[2]; ce0 <- par[3]; beta <- par[4:(3+p0)]; x.beta <- as.vector(z%*%beta) exp.x.beta <- exp(x.beta) exp.x.beta.obs <- exp(x.beta[status]) lhaz0 <- log(((rEW(times.obs*exp.x.beta.obs,ae0,be0,ce0, log=FALSE)))/((pEW(times.obs*exp.x.beta.obs,ae0,be0,ce0)/exp.x.beta.obs)+(sEW(times.obs*exp.x.beta.obs,ae0,be0,ce0)))) lsf0<-(1/(1+(1/exp.x.beta*((pEW(times*exp.x.beta,ae0,be0,ce0)/sEW(times*exp.x.beta,ae0,be0,ce0)))))) val <- - sum(lhaz0) -sum(log(lsf0)) return(sum(val)) } } # PGW - AO Model if(basehaz == "PGWAO"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); ce0 <- exp(par[3]); beta <- par[4:(3+p0)]; x.beta <- as.vector(z%*%beta) exp.x.beta <- exp(x.beta) exp.x.beta.obs <- exp(x.beta[status]) lhaz0 <- log(((rPGW(times.obs*exp.x.beta.obs,ae0,be0,ce0, log=FALSE)))/((pPGW(times.obs*exp.x.beta.obs,ae0,be0,ce0)/exp.x.beta.obs)+(sPGW(times.obs*exp.x.beta.obs,ae0,be0,ce0)))) lsf0<-(1/(1+(1/exp.x.beta*((pPGW(times*exp.x.beta,ae0,be0,ce0)/sPGW(times*exp.x.beta,ae0,be0,ce0)))))) val <- - sum(lhaz0) -sum(log(lsf0)) return(sum(val)) } } # MKW - AO Model if(basehaz == "MKWAO"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); ce0 <- exp(par[3]); beta <- par[4:(3+p0)]; x.beta <- as.vector(z%*%beta) exp.x.beta <- exp(x.beta) exp.x.beta.obs <- exp(x.beta[status]) lhaz0 <- log(((rMKW(times.obs*exp.x.beta.obs,ae0,be0,ce0, log=FALSE)))/((pMKW(times.obs*exp.x.beta.obs,ae0,be0,ce0)/exp.x.beta.obs)+(sMKW(times.obs*exp.x.beta.obs,ae0,be0,ce0)))) lsf0<-(1/(1+(1/exp.x.beta*((pMKW(times*exp.x.beta,ae0,be0,ce0)/sMKW(times*exp.x.beta,ae0,be0,ce0)))))) val <- - sum(lhaz0) -sum(log(lsf0)) return(sum(val)) } } # TLL - AO Model if(basehaz == "TLLAO"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); beta <- par[3:(2+p0)]; x.beta <- as.vector(z%*%beta) exp.x.beta <- exp(x.beta) exp.x.beta.obs <- exp(x.beta[status]) lhaz0 <- log(((rTLL(times.obs*exp.x.beta.obs,ae0,be0, log=FALSE)))/((pTLL(times.obs*exp.x.beta.obs,ae0,be0)/exp.x.beta.obs)+(sTLL(times.obs*exp.x.beta.obs,ae0,be0)))) lsf0<-(1/(1+(1/exp.x.beta*((pTLL(times*exp.x.beta,ae0,be0)/sTLL(times*exp.x.beta,ae0,be0)))))) val <- - sum(lhaz0) -sum(log(lsf0)) return(sum(val)) } } # CLL - AO Model if(basehaz == "CLLAO"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); beta <- par[3:(2+p0)]; x.beta <- as.vector(z%*%beta) exp.x.beta <- exp(x.beta) exp.x.beta.obs <- exp(x.beta[status]) lhaz0 <- log(((rCLL(times.obs*exp.x.beta.obs,ae0,be0, log=FALSE)))/((pCLL(times.obs*exp.x.beta.obs,ae0,be0)/exp.x.beta.obs)+(sCLL(times.obs*exp.x.beta.obs,ae0,be0)))) lsf0<-(1/(1+(1/exp.x.beta*((pCLL(times*exp.x.beta,ae0,be0)/sCLL(times*exp.x.beta,ae0,be0)))))) val <- - sum(lhaz0) -sum(log(lsf0)) return(sum(val)) } } # SLL - AO Model if(basehaz == "SLLAO"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); beta <- par[3:(2+p0)]; x.beta <- as.vector(z%*%beta) exp.x.beta <- exp(x.beta) exp.x.beta.obs <- exp(x.beta[status]) lhaz0 <- log(((rSLL(times.obs*exp.x.beta.obs,ae0,be0, log=FALSE)))/((pSLL(times.obs*exp.x.beta.obs,ae0,be0)/exp.x.beta.obs)+(sSLL(times.obs*exp.x.beta.obs,ae0,be0)))) lsf0<-(1/(1+(1/exp.x.beta*((pSLL(times*exp.x.beta,ae0,be0)/sSLL(times*exp.x.beta,ae0,be0)))))) val <- - sum(lhaz0) -sum(log(lsf0)) return(sum(val)) } } # SCLL - AO Model if(basehaz == "SCLLAO"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); beta <- par[3:(2+p0)]; x.beta <- as.vector(z%*%beta) exp.x.beta <- exp(x.beta) exp.x.beta.obs <- exp(x.beta[status]) lhaz0 <- log(((rSCLL(times.obs*exp.x.beta.obs,ae0,be0, log=FALSE)))/((pSCLL(times.obs*exp.x.beta.obs,ae0,be0)/exp.x.beta.obs)+(sSCLL(times.obs*exp.x.beta.obs,ae0,be0)))) lsf0<-(1/(1+(1/exp.x.beta*((pSCLL(times*exp.x.beta,ae0,be0)/sCLL(times*exp.x.beta,ae0,be0)))))) val <- - sum(lhaz0) -sum(log(lsf0)) return(sum(val)) } } # ATLL - AO Model if(basehaz == "ATLLAO"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); beta <- par[3:(2+p0)]; x.beta <- as.vector(z%*%beta) exp.x.beta <- exp(x.beta) exp.x.beta.obs <- exp(x.beta[status]) lhaz0 <- log(((rATLL(times.obs*exp.x.beta.obs,ae0,be0, log=FALSE)))/((pATLL(times.obs*exp.x.beta.obs,ae0,be0)/exp.x.beta.obs)+(sATLL(times.obs*exp.x.beta.obs,ae0,be0)))) lsf0<-(1/(1+(1/exp.x.beta*((pATLL(times*exp.x.beta,ae0,be0)/sATLL(times*exp.x.beta,ae0,be0)))))) val <- - sum(lhaz0) -sum(log(lsf0)) return(sum(val)) } } # ASLL - AO Model if(basehaz == "ASLLAO"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); beta <- par[3:(2+p0)]; x.beta <- as.vector(z%*%beta) exp.x.beta <- exp(x.beta) exp.x.beta.obs <- exp(x.beta[status]) lhaz0 <- log(((rASLL(times.obs*exp.x.beta.obs,ae0,be0, log=FALSE)))/((pASLL(times.obs*exp.x.beta.obs,ae0,be0)/exp.x.beta.obs)+(sASLL(times.obs*exp.x.beta.obs,ae0,be0)))) lsf0<-(1/(1+(1/exp.x.beta*((pASLL(times*exp.x.beta,ae0,be0)/sASLL(times*exp.x.beta,ae0,be0)))))) val <- - sum(lhaz0) -sum(log(lsf0)) return(sum(val)) } } # LL - AO Model if(basehaz == "LLAO"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); beta <- par[3:(2+p0)]; x.beta <- as.vector(z%*%beta) exp.x.beta <- exp(x.beta) exp.x.beta.obs <- exp(x.beta[status]) lhaz0 <- log(((rLL(times.obs*exp.x.beta.obs,ae0,be0, log=FALSE)))/((pLL(times.obs*exp.x.beta.obs,ae0,be0)/exp.x.beta.obs)+(sLL(times.obs*exp.x.beta.obs,ae0,be0)))) lsf0<-(1/(1+(1/exp.x.beta*((pLL(times*exp.x.beta,ae0,be0)/sLL(times*exp.x.beta,ae0,be0)))))) val <- - sum(lhaz0) -sum(log(lsf0)) return(sum(val)) } } # LN - AO Model if(basehaz == "LNAO"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); beta <- par[3:(2+p0)]; x.beta <- as.vector(z%*%beta) exp.x.beta <- exp(x.beta) exp.x.beta.obs <- exp(x.beta[status]) lhaz0 <- log(((rLN(times.obs*exp.x.beta.obs,ae0,be0, log=FALSE)))/((pLN(times.obs*exp.x.beta.obs,ae0,be0)/exp.x.beta.obs)+(sLN(times.obs*exp.x.beta.obs,ae0,be0)))) lsf0<-(1/(1+(1/exp.x.beta*((pLN(times*exp.x.beta,ae0,be0)/sLN(times*exp.x.beta,ae0,be0)))))) val <- - sum(lhaz0) -sum(log(lsf0)) return(sum(val)) } } # W - AO Model if(basehaz == "WAO"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); beta <- par[3:(2+p0)]; x.beta <- as.vector(z%*%beta) exp.x.beta <- exp(x.beta) exp.x.beta.obs <- exp(x.beta[status]) lhaz0 <- log(((rW(times.obs*exp.x.beta.obs,ae0,be0, log=FALSE)))/((pW(times.obs*exp.x.beta.obs,ae0,be0)/exp.x.beta.obs)+(sW(times.obs*exp.x.beta.obs,ae0,be0)))) lsf0<-(1/(1+(1/exp.x.beta*((pW(times*exp.x.beta,ae0,be0)/sW(times*exp.x.beta,ae0,be0)))))) val <- - sum(lhaz0) -sum(log(lsf0)) return(sum(val)) } } # G - AO Model if(basehaz == "GAO"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); beta <- par[3:(2+p0)]; x.beta <- as.vector(z%*%beta) exp.x.beta <- exp(x.beta) exp.x.beta.obs <- exp(x.beta[status]) lhaz0 <- log(((rG(times.obs*exp.x.beta.obs,ae0,be0, log=FALSE)))/((pG(times.obs*exp.x.beta.obs,ae0,be0)/exp.x.beta.obs)+(sG(times.obs*exp.x.beta.obs,ae0,be0)))) lsf0<-(1/(1+(1/exp.x.beta*((pG(times*exp.x.beta,ae0,be0)/sG(times*exp.x.beta,ae0,be0)))))) val <- - sum(lhaz0) -sum(log(lsf0)) return(sum(val)) } } if(method != "optim") OPT <-optim(init,log.lik,control=list(maxit=maxit), method = method,hessian = TRUE) if(method == "optim") OPT <- nlminb(init,log.lik,hessian=TRUE,control=list(iter.max=maxit)) pars=length(OPT$par) l=OPT$value MLE<-OPT$par MLE<-MLE hessian<-OPT$hessian se<-sqrt(diag(solve(hessian))) SE<-se zval <- pars/ se zvalue=zval ci.lo<-MLE-SE*qnorm((1-conf.int)/2,lower.tail=FALSE) ci.lo<-ci.lo ci.up<-MLE+SE*qnorm((1-conf.int)/2,lower.tail=FALSE) ci.up<-ci.up p.value = 2*stats::pnorm(-abs(zval)) p.value=p.value estimates<-data.frame(MLE,SE,zvalue, "p.value"=ifelse(p.value<0.001,"<0.001",round(p.value,digits=3)),lower.95=ci.lo,upper.95=ci.up) AIC=2*l + 2*pars CAIC=AIC+(2*pars*(pars+1)/(n-l+1)) HQIC= 2*l+2*log(log(n))*pars BCAIC=2*l+(pars*(log(n)+1)) BIC=(2*l)+(pars*(log(n))) informationcriterions=cbind(AIC,CAIC,BCAIC,BIC,HQIC) value<-OPT$value convergence<-OPT$convergence counts<-OPT$counts message<-OPT$message loglikelihood<-cbind(value,convergence,message) counts<-cbind(counts) result <- list(estimates=estimates, informationcriterions=informationcriterions,loglikelihood=loglikelihood,counts=counts) return(result) } ############################################################################################### ############################################################################################### ############################################################################################### #' Proportional Odds (PO) model. ############################################################################################### ############################################################################################### ############################################################################################### ######################################################################################################## #' @description Tractable Parametric Proportional Odds (PO) model's maximum likelihood estimation, log-likelihood, and information criterion. #' Baseline hazards: NGLL,GLL,MLL,PGW, GG, EW, MKW, LL, TLL, SLL,CLL,SCLL,ATLL, and ASLL ######################################################################################################## #' @param init : initial points for optimisation #' @param z : design matrix for covariates (p x n), p >= 1 #' @param status : vital status (1 - dead, 0 - alive) #' @param n : The number of the data set #' @param times : survival times #' @param basehaz : baseline hazard structure including baseline (New generalized log-logistic proportional odds "NGLLPO" model, generalized log-logisitic proportional odds "GLLPO" model, modified log-logistic proportional odds "MLLPO" model, #' exponentiated Weibull proportional odds "EWPO" model, power generalized weibull proportional odds "PGWPO" model, generalized gamma proportional odds "GGPO" model, #' modified kumaraswamy Weibull proportional odds "MKWPO" model, log-logistic proportional odds "PO" model, #' tangent-log-logistic proportional odds "TLLPO" model, sine-log-logistic proportional odds "SLLPO" model, cosine log-logistic proportional odds "CLLPO" model, #' secant-log-logistic proportional odds "SCLLPO" model, arcsine-log-logistic proportional odds "ASLLPO" model, and arctangent-log-logistic proportional odds "ATLLPO" model, #' Weibull proportional odds "WPO" model, gamma proportional odds "GPO" model, and log-normal proportional odds "LNPO" model.) #' @param hessian :A function to return (as a matrix) the hessian for those methods that can use this information. #' @param conf.int : confidence level #' @param method :"optim" or a method from "nlminb".The methods supported are: BFGS (default), "L-BFGS", "Nelder-Mead", "SANN", "CG", and "Brent". #' @param maxit :The maximum number of iterations. Defaults to 1000 #' @param log :log scale (TRUE or FALSE) #' @return a list containing the output of the optimisation (OPT) and the log-likelihood function (loglik) #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' #' #Example #1 #' data(alloauto) #' time<-alloauto$time #' delta<-alloauto$delta #' z<-alloauto$type #' MLEPO(init = c(1.0,0.40,1.0,0.50),times = time,status = delta,n=nrow(z), #' basehaz = "GLLPO",z = z,method = "BFGS",hessian=TRUE, conf.int=0.95,maxit = 1000,log=FALSE) #' #' #Example #2 #' data(bmt) #' time<-bmt$Time #' delta<-bmt$Status #' z<-bmt$TRT #' MLEPO(init = c(1.0,1.0,0.5),times = time,status = delta,n=nrow(z), #' basehaz = "SLLPO",z = z,method = "BFGS",hessian=TRUE, conf.int=0.95,maxit = 1000,log=FALSE) #' #' #Example #3 #' data("gastric") #' time<-gastric$time #' delta<-gastric$status #' z<-gastric$trt #' MLEPO(init = c(1.0,0.50,1.0,0.75),times = time,status = delta,n=nrow(z), #' basehaz = "PGWPO",z = z,method = "BFGS",hessian=TRUE, conf.int=0.95,maxit = 1000, #' log=FALSE) #' #' #Example #4 #' data("larynx") #' time<-larynx$time #' delta<-larynx$delta #' larynx$age<-as.numeric(scale(larynx$age)) #' larynx$diagyr<-as.numeric(scale(larynx$diagyr)) #' larynx$stage<-as.factor(larynx$stage) #' z<-model.matrix(~ stage+age+diagyr, data = larynx) #' MLEPO(init = c(1.0,1.0,0.5,0.5,0.5,0.5,0.5,0.5),times = time,status = delta,n=nrow(z), #' basehaz = "ATLLPO",z = z,method = "BFGS",hessian=TRUE, conf.int=0.95,maxit = 1000,log=FALSE) #' MLEPO <- function(init, times, status, n,basehaz, z, method = "BFGS",hessian=TRUE, conf.int=0.95,maxit = 1000, log=FALSE){ # Required variables times <- as.vector(times) status <- as.vector(as.logical(status)) z <- as.matrix(z) conf.int<-0.95 hessian<- TRUE n<-nrow(z) times.obs <- times[status] if(!is.null(z)) z.obs <- z[status,] p0 <- dim(z)[2] # NGLL - PO Model if(basehaz == "NGLLPO"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); ce0 <- exp(par[3]); de0<-exp(par[4]);beta <- par[5:(4+p0)]; x.beta <- as.vector(z%*%beta) exp.x.beta <- exp(x.beta) exp.x.beta.obs <- exp(x.beta[status]) lhaz0 <- log((exp.x.beta.obs*(rNGLL(times.obs,ae0,be0,ce0,de0, log=FALSE)))/((exp.x.beta.obs*pNGLL(times.obs,ae0,be0,ce0,de0))+SNGLL(times.obs,ae0,be0,ce0,de0))) lsf0<-1/(1+(exp.x.beta*((pNGLL(times,ae0,be0,ce0,de0)/SNGLL(times,ae0,be0,ce0,de0))))) val <- - sum(lhaz0) - sum(log(lsf0)) return(sum(val)) } } # GLL - PO Model if(basehaz == "GLLPO"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); ce0 <- exp(par[3]); beta <- par[4:(3+p0)]; x.beta <- as.vector(z%*%beta) exp.x.beta <- exp(x.beta) exp.x.beta.obs <- exp(x.beta[status]) lhaz0 <- log((exp.x.beta.obs*(rGLL(times.obs,ae0,be0,ce0, log=FALSE)))/((exp.x.beta.obs*pGLL(times.obs,ae0,be0,ce0))+(sGLL(times.obs,ae0,be0,ce0)))) lsf0<-1/(1+(exp.x.beta*((pGLL(times,ae0,be0,ce0)/sGLL(times,ae0,be0,ce0))))) val <- - sum(lhaz0) -sum(log(lsf0)) return(sum(val)) } } # MLL - PO Model if(basehaz == "MLLPO"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); ce0 <- exp(par[3]); beta <- par[4:(3+p0)]; x.beta <- as.vector(z%*%beta) exp.x.beta <- exp(x.beta) exp.x.beta.obs <- exp(x.beta[status]) lhaz0 <- log((exp.x.beta.obs*(rMLL(times.obs,ae0,be0,ce0, log=FALSE)))/((exp.x.beta.obs*pMLL(times.obs,ae0,be0,ce0))+(sMLL(times.obs,ae0,be0,ce0)))) lsf0<-1/(1+(exp.x.beta*((pMLL(times,ae0,be0,ce0)/sMLL(times,ae0,be0,ce0))))) val <- - sum(lhaz0) -sum(log(lsf0)) return(sum(val)) } } # GG - PO Model if(basehaz == "GGPO"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); ce0 <- exp(par[3]); beta <- par[4:(3+p0)]; x.beta <- as.vector(z%*%beta) exp.x.beta <- exp(x.beta) exp.x.beta.obs <- exp(x.beta[status]) lhaz0 <- log((exp.x.beta.obs*(rGG(times.obs,ae0,be0,ce0, log=FALSE)))/((exp.x.beta.obs*pGG(times.obs,ae0,be0,ce0))+(sGG(times.obs,ae0,be0,ce0)))) lsf0<-1/(1+(exp.x.beta*((pGG(times,ae0,be0,ce0)/sGG(times,ae0,be0,ce0))))) val <- - sum(lhaz0) -sum(log(lsf0)) return(sum(val)) } } # EW - PO Model if(basehaz == "EWPO"){ log.lik <- function(par){ ae0 <- par[1]; be0 <- par[2]; ce0 <- par[3]; beta <- par[4:(3+p0)]; x.beta <- as.vector(z%*%beta) exp.x.beta <- exp(x.beta) exp.x.beta.obs <- exp(x.beta[status]) lhaz0 <- log((exp.x.beta.obs*(rEW(times.obs,ae0,be0,ce0, log=FALSE)))/((exp.x.beta.obs*pEW(times.obs,ae0,be0,ce0))+(sEW(times.obs,ae0,be0,ce0)))) lsf0<-1/(1+(exp.x.beta*((pEW(times,ae0,be0,ce0)/sEW(times,ae0,be0,ce0))))) val <- - sum(lhaz0) -sum(log(lsf0)) return(sum(val)) } } # PGW - PO Model if(basehaz == "PGWPO"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); ce0 <- exp(par[3]); beta <- par[4:(3+p0)]; x.beta <- as.vector(z%*%beta) exp.x.beta <- exp(x.beta) exp.x.beta.obs <- exp(x.beta[status]) lhaz0 <- log((exp.x.beta.obs*(rPGW(times.obs,ae0,be0,ce0, log=FALSE)))/((exp.x.beta.obs*pPGW(times.obs,ae0,be0,ce0))+(sPGW(times.obs,ae0,be0,ce0)))) lsf0<-1/(1+(exp.x.beta*((pPGW(times,ae0,be0,ce0)/sPGW(times,ae0,be0,ce0))))) val <- - sum(lhaz0) -sum(log(lsf0)) return(sum(val)) } } # MKW - PO Model if(basehaz == "MKWPO"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); ce0 <- exp(par[3]); beta <- par[4:(3+p0)]; x.beta <- as.vector(z%*%beta) exp.x.beta <- exp(x.beta) exp.x.beta.obs <- exp(x.beta[status]) lhaz0 <- log((exp.x.beta.obs*(rMKW(times.obs,ae0,be0,ce0, log=FALSE)))/((exp.x.beta.obs*pMKW(times.obs,ae0,be0,ce0))+(sMKW(times.obs,ae0,be0,ce0)))) lsf0<-1/(1+(exp.x.beta*((pMKW(times,ae0,be0,ce0)/sMKW(times,ae0,be0,ce0))))) val <- - sum(lhaz0) -sum(log(lsf0)) return(sum(val)) } } # TLL - PO Model if(basehaz == "TLLPO"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); beta <- par[3:(2+p0)]; x.beta <- as.vector(z%*%beta) exp.x.beta <- exp(x.beta) exp.x.beta.obs <- exp(x.beta[status]) lhaz0 <- log((exp.x.beta.obs*(rTLL(times.obs,ae0,be0, log=FALSE)))/((exp.x.beta.obs*pTLL(times.obs,ae0,be0))+sTLL(times.obs,ae0,be0))) lsf0<-1/(1+(exp.x.beta*((pTLL(times,ae0,be0)/sTLL(times,ae0,be0))))) val <- - sum(lhaz0) - sum(log(lsf0)) return(sum(val)) } } # CLL - PO Model if(basehaz == "CLLPO"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); beta <- par[3:(2+p0)]; x.beta <- as.vector(z%*%beta) exp.x.beta <- exp(x.beta) exp.x.beta.obs <- exp(x.beta[status]) lhaz0 <- log((exp.x.beta.obs*(rCLL(times.obs,ae0,be0, log=FALSE)))/((exp.x.beta.obs*pCLL(times.obs,ae0,be0))+sCLL(times.obs,ae0,be0))) lsf0<-1/(1+(exp.x.beta*((pCLL(times,ae0,be0)/sCLL(times,ae0,be0))))) val <- - sum(lhaz0) - sum(log(lsf0)) return(sum(val)) } } # SLL - PO Model if(basehaz == "SLLPO"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); beta <- par[3:(2+p0)]; x.beta <- as.vector(z%*%beta) exp.x.beta <- exp(x.beta) exp.x.beta.obs <- exp(x.beta[status]) lhaz0 <- log((exp.x.beta.obs*(rSLL(times.obs,ae0,be0, log=FALSE)))/((exp.x.beta.obs*pSLL(times.obs,ae0,be0))+sSLL(times.obs,ae0,be0))) lsf0<-1/(1+(exp.x.beta*((pSLL(times,ae0,be0)/sSLL(times,ae0,be0))))) val <- - sum(lhaz0) - sum(log(lsf0)) return(sum(val)) } } # SCLL - PO Model if(basehaz == "SCLLPO"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); beta <- par[3:(2+p0)]; x.beta <- as.vector(z%*%beta) exp.x.beta <- exp(x.beta) exp.x.beta.obs <- exp(x.beta[status]) lhaz0 <- log((exp.x.beta.obs*(rSCLL(times.obs,ae0,be0, log=FALSE)))/((exp.x.beta.obs*pSCLL(times.obs,ae0,be0))+sSCLL(times.obs,ae0,be0))) lsf0<-1/(1+(exp.x.beta*((pSCLL(times,ae0,be0)/sSCLL(times,ae0,be0))))) val <- - sum(lhaz0) - sum(log(lsf0)) return(sum(val)) } } # ATLL - PO Model if(basehaz == "ATLLPO"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); beta <- par[3:(2+p0)]; x.beta <- as.vector(z%*%beta) exp.x.beta <- exp(x.beta) exp.x.beta.obs <- exp(x.beta[status]) lhaz0 <- log((exp.x.beta.obs*(rATLL(times.obs,ae0,be0, log=FALSE)))/((exp.x.beta.obs*pATLL(times.obs,ae0,be0))+sATLL(times.obs,ae0,be0))) lsf0<-1/(1+(exp.x.beta*((pATLL(times,ae0,be0)/sATLL(times,ae0,be0))))) val <- - sum(lhaz0) - sum(log(lsf0)) return(sum(val)) } } # ASLL - PO Model if(basehaz == "ASLLPO"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); beta <- par[3:(2+p0)]; x.beta <- as.vector(z%*%beta) exp.x.beta <- exp(x.beta) exp.x.beta.obs <- exp(x.beta[status]) lhaz0 <- log((exp.x.beta.obs*(rASLL(times.obs,ae0,be0, log=FALSE)))/((exp.x.beta.obs*pASLL(times.obs,ae0,be0))+sASLL(times.obs,ae0,be0))) lsf0<-1/(1+(exp.x.beta*((pASLL(times,ae0,be0)/sASLL(times,ae0,be0))))) val <- - sum(lhaz0) - sum(log(lsf0)) return(sum(val)) } } # LL - PO Model if(basehaz == "LLPO"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); beta <- par[3:(2+p0)]; x.beta <- as.vector(z%*%beta) exp.x.beta <- exp(x.beta) exp.x.beta.obs <- exp(x.beta[status]) lhaz0 <- log((exp.x.beta.obs*(rLL(times.obs,ae0,be0, log=FALSE)))/((exp.x.beta.obs*pLL(times.obs,ae0,be0))+sLL(times.obs,ae0,be0))) lsf0<-1/(1+(exp.x.beta*((pLL(times,ae0,be0)/sLL(times,ae0,be0))))) val <- - sum(lhaz0) - sum(log(lsf0)) return(sum(val)) } } # W - PO Model if(basehaz == "WPO"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); beta <- par[3:(2+p0)]; x.beta <- as.vector(z%*%beta) exp.x.beta <- exp(x.beta) exp.x.beta.obs <- exp(x.beta[status]) lhaz0 <- log((exp.x.beta.obs*(rW(times.obs,ae0,be0, log=FALSE)))/((exp.x.beta.obs*pW(times.obs,ae0,be0))+sW(times.obs,ae0,be0))) lsf0<-1/(1+(exp.x.beta*((pW(times,ae0,be0)/sW(times,ae0,be0))))) val <- - sum(lhaz0) - sum(log(lsf0)) return(sum(val)) } } # G - PO Model if(basehaz == "GPO"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); beta <- par[3:(2+p0)]; x.beta <- as.vector(z%*%beta) exp.x.beta <- exp(x.beta) exp.x.beta.obs <- exp(x.beta[status]) lhaz0 <- log((exp.x.beta.obs*(rG(times.obs,ae0,be0, log=FALSE)))/((exp.x.beta.obs*pG(times.obs,ae0,be0))+sG(times.obs,ae0,be0))) lsf0<-1/(1+(exp.x.beta*((pG(times,ae0,be0)/sG(times,ae0,be0))))) val <- - sum(lhaz0) - sum(log(lsf0)) return(sum(val)) } } # LN - PO Model if(basehaz == "LNPO"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); beta <- par[3:(2+p0)]; x.beta <- as.vector(z%*%beta) exp.x.beta <- exp(x.beta) exp.x.beta.obs <- exp(x.beta[status]) lhaz0 <- log((exp.x.beta.obs*(rLL(times.obs,ae0,be0, log=FALSE)))/((exp.x.beta.obs*pLN(times.obs,ae0,be0))+sLN(times.obs,ae0,be0))) lsf0<-1/(1+(exp.x.beta*((pLN(times,ae0,be0)/sLN(times,ae0,be0))))) val <- - sum(lhaz0) - sum(log(lsf0)) return(sum(val)) } } if(method != "optim") OPT <- optim(init,log.lik,control=list(maxit=maxit), method = method,hessian = TRUE,) if(method == "optim") OPT <- nlminb(init,log.lik,hessian=TRUE,control=list(iter.max=maxit)) pars=length(OPT$par) l=OPT$value MLE<-OPT$par MLE<-MLE hessian<-OPT$hessian se<-sqrt(diag(solve(hessian))) SE<-se zval <- pars/ se zvalue=zval ci.lo<-MLE-SE*qnorm((1-conf.int)/2,lower.tail=FALSE) ci.lo<-ci.lo ci.up<-MLE+SE*qnorm((1-conf.int)/2,lower.tail=FALSE) ci.up<-ci.up p.value = 2*stats::pnorm(-abs(zval)) p.value=p.value estimates<-data.frame(MLE,SE,zvalue, "p.value"=ifelse(p.value<0.001,"<0.001",round(p.value,digits=3)),lower.95=ci.lo,upper.95=ci.up) AIC=2*l + 2*pars CAIC=AIC+(2*pars*(pars+1)/(n-l+1)) HQIC= 2*l+2*log(log(n))*pars BCAIC=2*l+(pars*(log(n)+1)) BIC=(2*l)+(pars*(log(n))) informationcriterions=cbind(AIC,CAIC,BCAIC,BIC,HQIC) value<-OPT$value convergence<-OPT$convergence counts<-OPT$counts message<-OPT$message loglikelihood<-cbind(value,convergence,message) counts<-cbind(counts) result <- list(estimates=estimates, informationcriterions=informationcriterions,loglikelihood=loglikelihood,counts=counts) return(result) } ############################################################################################### ############################################################################################### ############################################################################################### #' Accelerated Failure Time (AFT) Model. ############################################################################################### ############################################################################################### ############################################################################################### ######################################################################################################## #' @description Tractable Parametric accelerated failure time (AFT) model's maximum likelihood estimation, log-likelihood, and information criterion. #' Baseline hazards: NGLL,GLL,MLL,PGW, GG, EW, MKW, LL, TLL, SLL,CLL,SCLL,ATLL, and ASLL ######################################################################################################## #' @param init : initial points for optimisation #' @param z : design matrix for covariates (p x n), p >= 1 #' @param status : vital status (1 - dead, 0 - alive) #' @param n : The number of the data set #' @param times : survival times #' @param basehaz : baseline hazard structure including baseline (New generalized log-logistic accelerated failure time "NGLLAFT" model, generalized log-logisitic accelerated failure time "GLLAFT" model, modified log-logistic accelerated failure time "MLLAFT" model, #' exponentiated Weibull accelerated failure time "EWAFT" model, power generalized weibull accelerated failure time "PGWAFT" model, generalized gamma accelerated failure time "GGAFT" model, #' modified kumaraswamy Weibull proportional odds "MKWAFT" model, log-logistic accelerated failure time "LLAFT" model, #' tangent-log-logistic accelerated failure time "TLLAFT" model, sine-log-logistic accelerated failure time "SLLAFT" model, cosine log-logistic accelerated failure time "CLLAFT" model, #' secant-log-logistic accelerated failure time "SCLLAFT" model, arcsine-log-logistic accelerated failure time "ASLLAFT" model, arctangent-log-logistic accelerated failure time "ATLLAFT" model, #' Weibull accelerated failure time "WAFT" model, gamma accelerated failure time "GAFT", and log-normal accelerated failure time "LNAFT") #' @param hessian :A function to return (as a matrix) the hessian for those methods that can use this information. #' @param conf.int : confidence level #' @param method :"optim" or a method from "nlminb".The methods supported are: BFGS (default), "L-BFGS", "Nelder-Mead", "SANN", "CG", and "Brent". #' @param maxit :The maximum number of iterations. Defaults to 1000 #' @param log :log scale (TRUE or FALSE) #' @return a list containing the output of the optimisation (OPT) and the log-likelihood function (loglik) #' @export #' #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau \email{[email protected]} #' #' @examples #' #' #Example #1 #' data(alloauto) #' time<-alloauto$time #' delta<-alloauto$delta #' z<-alloauto$type #' MLEAFT(init = c(1.0,0.20,0.05),times = time,status = delta,n=nrow(z), #' basehaz = "WAFT",z = z,method = "BFGS",hessian=TRUE, conf.int=0.95,maxit = 1000, #' log=FALSE) #' #' #Example #2 #' data(bmt) #' time<-bmt$Time #' delta<-bmt$Status #' z<-bmt$TRT #' MLEAFT(init = c(1.0,1.0,0.5),times = time,status = delta,n=nrow(z), #' basehaz = "LNAFT",z = z,method = "BFGS",hessian=TRUE, conf.int=0.95,maxit = 1000,log=FALSE) #' #' #Example #3 #' data("gastric") #' time<-gastric$time #' delta<-gastric$status #' z<-gastric$trt #' MLEAFT(init = c(1.0,0.50,0.5),times = time,status = delta,n=nrow(z), #' basehaz = "LLAFT",z = z,method = "BFGS",hessian=TRUE, conf.int=0.95,maxit = 1000, #' log=FALSE) #' #' #Example #4 #' data("larynx") #' time<-larynx$time #' delta<-larynx$delta #' larynx$age<-as.numeric(scale(larynx$age)) #' larynx$diagyr<-as.numeric(scale(larynx$diagyr)) #' larynx$stage<-as.factor(larynx$stage) #' z<-model.matrix(~ stage+age+diagyr, data = larynx) #' MLEAFT(init = c(1.0,0.5,0.5,0.5,0.5,0.5,0.5,0.5),times = time,status = delta,n=nrow(z), #' basehaz = "LNAFT",z = z,method = "BFGS",hessian=TRUE, conf.int=0.95,maxit = 1000, #' log=FALSE) #' MLEAFT <- function(init, times, status, n,basehaz, z, method = "BFGS",hessian=TRUE, conf.int=0.95,maxit = 1000, log=FALSE){ # Required variables times <- as.vector(times) status <- as.vector(as.logical(status)) z <- as.matrix(z) conf.int<-0.95 hessian<- TRUE n<-nrow(z) times.obs <- times[status] if(!is.null(z)) z.obs <- z[status,] p0 <- dim(z)[2] # NGLL - AFT Model if(basehaz == "NGLLAFT"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); ce0 <- exp(par[3]); de0<-exp(par[4]);beta <- par[5:(4+p0)]; x.beta <- as.vector(z%*%beta) exp.x.beta <- exp(x.beta) exp.x.beta.obs <- exp(x.beta[status]) x.beta.obs <- x.beta[status] lhaz0 <- rNGLL(times.obs*exp.x.beta.obs,ae0,be0,ce0,de0, log=TRUE)+x.beta.obs lsf0<-SNGLL(times*exp.x.beta,ae0,be0,ce0,de0) val <- - sum(lhaz0) - sum(log(lsf0)) return(sum(val)) } } # GLL - AFT Model if(basehaz == "GLLAFT"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); ce0 <- exp(par[3]); beta <- par[4:(3+p0)]; x.beta <- as.vector(z%*%beta) exp.x.beta <- exp(x.beta) exp.x.beta.obs <- exp(x.beta[status]) x.beta.obs <- x.beta[status] lhaz0 <- rGLL(times.obs*exp.x.beta.obs,ae0,be0,ce0, log=TRUE)+x.beta.obs lsf0<-sGLL(times*exp.x.beta,ae0,be0,ce0) val <- - sum(lhaz0) -sum(log(lsf0)) return(sum(val)) } } # MLL - AFT Model if(basehaz == "MLLAFT"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); ce0 <- exp(par[3]); beta <- par[4:(3+p0)]; x.beta <- as.vector(z%*%beta) exp.x.beta <- exp(x.beta) exp.x.beta.obs <- exp(x.beta[status]) x.beta.obs <- x.beta[status] lhaz0 <- rMLL(times.obs*exp.x.beta.obs,ae0,be0,ce0, log=TRUE)+x.beta.obs lsf0<-sMLL(times*exp.x.beta,ae0,be0,ce0) val <- - sum(lhaz0) -sum(log(lsf0)) return(sum(val)) } } # GG - AFT Model if(basehaz == "GGAFT"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); ce0 <- exp(par[3]); beta <- par[4:(3+p0)]; x.beta <- as.vector(z%*%beta) exp.x.beta <- exp(x.beta) exp.x.beta.obs <- exp(x.beta[status]) x.beta.obs <- x.beta[status] lhaz0 <- rGG(times.obs*exp.x.beta.obs,ae0,be0,ce0, log=TRUE)+x.beta.obs lsf0<-sGG(times*exp.x.beta,ae0,be0,ce0) val <- - sum(lhaz0) -sum(log(lsf0)) return(sum(val)) } } # EW - AFT Model if(basehaz == "EWAFT"){ log.lik <- function(par){ ae0 <- par[1]; be0 <- par[2]; ce0 <- par[3]; beta <- par[4:(3+p0)]; x.beta <- as.vector(z%*%beta) exp.x.beta <- exp(x.beta) exp.x.beta.obs <- exp(x.beta[status]) x.beta.obs <- x.beta[status] lhaz0 <- rEW(times.obs*exp.x.beta.obs,ae0,be0,ce0, log=TRUE)+x.beta.obs lsf0<-sEW(times*exp.x.beta,ae0,be0,ce0) val <- - sum(lhaz0) -sum(log(lsf0)) return(sum(val)) } } # PGW - AFT Model if(basehaz == "PGWAFT"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); ce0 <- exp(par[3]); beta <- par[4:(3+p0)]; x.beta <- as.vector(z%*%beta) exp.x.beta <- exp(x.beta) exp.x.beta.obs <- exp(x.beta[status]) x.beta.obs <- x.beta[status] lhaz0 <- rPGW(times.obs*exp.x.beta.obs,ae0,be0,ce0, log=TRUE)+x.beta.obs lsf0<-sPGW(times*exp.x.beta,ae0,be0,ce0) val <- - sum(lhaz0) -sum(log(lsf0)) return(sum(val)) } } # MKW - AFT Model if(basehaz == "MKWAFT"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); ce0 <- exp(par[3]); beta <- par[4:(3+p0)]; x.beta <- as.vector(z%*%beta) exp.x.beta <- exp(x.beta) exp.x.beta.obs <- exp(x.beta[status]) x.beta.obs <- x.beta[status] lhaz0 <- rMKW(times.obs*exp.x.beta.obs,ae0,be0,ce0, log=TRUE)+x.beta.obs lsf0<-sMKW(times*exp.x.beta,ae0,be0,ce0) val <- - sum(lhaz0) -sum(log(lsf0)) return(sum(val)) } } # TLL - AFT Model if(basehaz == "TLLAFT"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); beta <- par[3:(2+p0)]; x.beta <- as.vector(z%*%beta) exp.x.beta <- exp(x.beta) exp.x.beta.obs <- exp(x.beta[status]) x.beta.obs <- x.beta[status] lhaz0 <- rTLL(times.obs*exp.x.beta.obs,ae0,be0, log=TRUE)+x.beta.obs lsf0<-sTLL(times*exp.x.beta,ae0,be0) val <- - sum(lhaz0) - sum(log(lsf0)) return(sum(val)) } } # CLL - AFT Model if(basehaz == "CLLAFT"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); beta <- par[3:(2+p0)]; x.beta <- as.vector(z%*%beta) exp.x.beta <- exp(x.beta) exp.x.beta.obs <- exp(x.beta[status]) x.beta.obs <- x.beta[status] lhaz0 <- rCLL(times.obs*exp.x.beta.obs,ae0,be0, log=TRUE)+x.beta.obs lsf0<-sCLL(times*exp.x.beta,ae0,be0) val <- - sum(lhaz0) - sum(log(lsf0)) return(sum(val)) } } # SLL - AFT Model if(basehaz == "SLLAFT"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); beta <- par[3:(2+p0)]; x.beta <- as.vector(z%*%beta) exp.x.beta <- exp(x.beta) exp.x.beta.obs <- exp(x.beta[status]) x.beta.obs <- x.beta[status] lhaz0 <- rSLL(times.obs*exp.x.beta.obs,ae0,be0, log=TRUE)+x.beta.obs lsf0<-sSLL(times*exp.x.beta,ae0,be0) val <- - sum(lhaz0) - sum(log(lsf0)) return(sum(val)) } } # SCLL - AFT Model if(basehaz == "SCLLAFT"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); beta <- par[3:(2+p0)]; x.beta <- as.vector(z%*%beta) exp.x.beta <- exp(x.beta) exp.x.beta.obs <- exp(x.beta[status]) x.beta.obs <- x.beta[status] lhaz0 <- rSCLL(times.obs*exp.x.beta.obs,ae0,be0, log=TRUE)+x.beta.obs lsf0<-sSCLL(times*exp.x.beta,ae0,be0) val <- - sum(lhaz0) - sum(log(lsf0)) return(sum(val)) } } # ATLL - AFT Model if(basehaz == "ATLLAFT"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); beta <- par[3:(2+p0)]; x.beta <- as.vector(z%*%beta) exp.x.beta <- exp(x.beta) exp.x.beta.obs <- exp(x.beta[status]) x.beta.obs <- x.beta[status] lhaz0 <- rATLL(times.obs*exp.x.beta.obs,ae0,be0, log=TRUE)+x.beta.obs lsf0<-sATLL(times*exp.x.beta,ae0,be0) val <- - sum(lhaz0) - sum(log(lsf0)) return(sum(val)) } } # ASLL - AFT Model if(basehaz == "ASLLAFT"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); beta <- par[3:(2+p0)]; x.beta <- as.vector(z%*%beta) exp.x.beta <- exp(x.beta) exp.x.beta.obs <- exp(x.beta[status]) x.beta.obs <- x.beta[status] lhaz0 <- rASLL(times.obs*exp.x.beta.obs,ae0,be0, log=TRUE)+x.beta.obs lsf0<-sASLL(times*exp.x.beta,ae0,be0) val <- - sum(lhaz0) - sum(log(lsf0)) return(sum(val)) } } # LL - AFT Model if(basehaz == "LLAFT"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); beta <- par[3:(2+p0)]; x.beta <- as.vector(z%*%beta) exp.x.beta <- exp(x.beta) exp.x.beta.obs <- exp(x.beta[status]) x.beta.obs <- x.beta[status] lhaz0 <- rLL(times.obs*exp.x.beta.obs,ae0,be0, log=TRUE)+x.beta.obs lsf0<-sLL(times*exp.x.beta,ae0,be0) val <- - sum(lhaz0) - sum(log(lsf0)) return(sum(val)) } } # Weibull - AFT Model if(basehaz == "WAFT"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); beta <- par[3:(2+p0)]; x.beta <- as.vector(z%*%beta) exp.x.beta <- exp(x.beta) exp.x.beta.obs <- exp(x.beta[status]) x.beta.obs <- x.beta[status] lhaz0 <- rW(times.obs*exp.x.beta.obs,ae0,be0, log=TRUE)+x.beta.obs lsf0<-sW(times*exp.x.beta,ae0,be0) val <- - sum(lhaz0) - sum(log(lsf0)) return(sum(val)) } } # Gamma - AFT Model if(basehaz == "GAFT"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); beta <- par[3:(2+p0)]; x.beta <- as.vector(z%*%beta) exp.x.beta <- exp(x.beta) exp.x.beta.obs <- exp(x.beta[status]) x.beta.obs <- x.beta[status] lhaz0 <- rG(times.obs*exp.x.beta.obs,ae0,be0, log=TRUE)+x.beta.obs lsf0<-sG(times*exp.x.beta,ae0,be0) val <- - sum(lhaz0) - sum(log(lsf0)) return(sum(val)) } } # Lognormal - AFT Model if(basehaz == "LNAFT"){ log.lik <- function(par){ ae0 <- exp(par[1]); be0 <- exp(par[2]); beta <- par[3:(2+p0)]; x.beta <- as.vector(z%*%beta) exp.x.beta <- exp(x.beta) exp.x.beta.obs <- exp(x.beta[status]) x.beta.obs <- x.beta[status] lhaz0 <- rLN(times.obs*exp.x.beta.obs,ae0,be0, log=TRUE)+x.beta.obs lsf0<-sLN(times*exp.x.beta,ae0,be0) val <- - sum(lhaz0) - sum(log(lsf0)) return(sum(val)) } } if(method != "optim") OPT <- optim(init,log.lik,control=list(maxit=maxit), method = method,hessian = TRUE,) if(method == "optim") OPT <- nlminb(init,log.lik,hessian=TRUE,control=list(iter.max=maxit)) pars=length(OPT$par) l=OPT$value MLE<-OPT$par MLE<-MLE hessian<-OPT$hessian se<-sqrt(diag(solve(hessian))) SE<-se zval <- pars/ se zvalue=zval ci.lo<-MLE-SE*qnorm((1-conf.int)/2,lower.tail=FALSE) ci.lo<-ci.lo ci.up<-MLE+SE*qnorm((1-conf.int)/2,lower.tail=FALSE) ci.up<-ci.up p.value = 2*stats::pnorm(-abs(zval)) p.value=p.value estimates<-data.frame(MLE,SE,zvalue, "p.value"=ifelse(p.value<0.001,"<0.001",round(p.value,digits=3)),lower.95=ci.lo,upper.95=ci.up) AIC=2*l + 2*pars CAIC=AIC+(2*pars*(pars+1)/(n-l+1)) HQIC= 2*l+2*log(log(n))*pars BCAIC=2*l+(pars*(log(n)+1)) BIC=(2*l)+(pars*(log(n))) informationcriterions=cbind(AIC,CAIC,BCAIC,BIC,HQIC) value<-OPT$value convergence<-OPT$convergence counts<-OPT$counts message<-OPT$message loglikelihood<-cbind(value,convergence,message) counts<-cbind(counts) result <- list(estimates=estimates, informationcriterions=informationcriterions,loglikelihood=loglikelihood,counts=counts) return(result) }
/scratch/gouwar.j/cran-all/cranData/AmoudSurv/R/Models.R
#' @import stats stats4 flexsurv AHSurv pracma #' @importFrom stats pweibull dweibull plnorm dlnorm plogis dlogis alias #' @importFrom stats nlminb glm.control optim qnorm pnorm optimHess optimize optimise #' @importFrom stats4 mle #' @importFrom flexsurv hgamma hlnorm hllogis dgompertz pgompertz #' @importFrom pracma sec cot acot asec csc acsc hessian NULL
/scratch/gouwar.j/cran-all/cranData/AmoudSurv/R/Packages.R
#' Leukemia data set #' #' @name alloauto #' @docType data #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa,Christophe Chesneau, \email{[email protected]} #' @keywords datasets #' @description The alloauto data frame has 101 rows and 3 columns. #' @format This data frame contains the following columns: #' \itemize{ #' \item time: Time to death or relapse, months #' \item type :Type of transplant (1=allogeneic, 2=autologous) #' \item delta:Leukemia-free survival indicator (0=alive without relapse, 1=dead #' or relapse) #' } #'@source { #' Klein and Moeschberger (1997) \emph{Survival Analysis Techniques for Censored #' and truncated data}, Springer. #' Kardaun Stat. Nederlandica 37 (1983), 103-126. #'} #'@examples { #'data(alloauto) #'str(alloauto) #'} NULL
/scratch/gouwar.j/cran-all/cranData/AmoudSurv/R/alloauto.R
#' Bone Marrow Transplant (bmt) data set #' #' @name bmt #' @docType data #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau, \email{[email protected]} #' @keywords datasets #' @description Bone marrow transplant study which is widely used in the hazard-based regression models #' @format There were 46 patients in the allogeneic treatment and 44 patients in the autologous treatment group #' \itemize{ #' \item Time: time to event #' \item Status: censor indicator, 0 for censored and 1 for uncensored #' \item TRT: 1 for autologous treatment group; 0 for allogeneic treatment group #' } #' @references Robertson, V. M., Dickson, L. G., Romond, E. H., & Ash, R. C. (1987). Positive antiglobulin tests due to intravenous immunoglobulin in patients who received bone marrow transplant. Transfusion, 27(1), 28-31. NULL
/scratch/gouwar.j/cran-all/cranData/AmoudSurv/R/bmt.R
#' Gastric data set #' #' @name gastric #' @docType data #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa,Christophe Chesneau, \email{[email protected]} #' @keywords datasets #' @description The gastric data frame has 90 rows and variables.It is a data set from a clinical trial conducted by the Gastrointestinal Tumor Study Group (GTSG) in 1982. The data set refers to the survival times of patients with locally nonresectable gastric cancer. Patients were either treated with chemotherapy combined with radiation or chemotherapy alone. #' @format This data frame contains the following columns: #' \itemize{ #' \item time: survival times in days #' \item trt :treatments (1=chemotherapy + radiation; 0=chemotherapy alone) #' \item status:failure indicator (1=failure, 0=otherwise) #' } #'@source { #' Gastrointestinal Tumor Study Group. (1982) A Comparison of Combination Chemotherapy and Combined Modality Therapy for Locally Advanced Gastric Carcinoma. Cancer 49:1771-7. #'} #'@examples { #'data(gastric) #'str(gastric);head(gastric) #'} NULL
/scratch/gouwar.j/cran-all/cranData/AmoudSurv/R/gastric.R
#' Larynx Cancer-Patients data set #' #' @name larynx #' @docType data #' @author Abdisalam Hassan Muse, Samuel Mwalili, Oscar Ngesa, Christophe Chesneau, \email{[email protected]} #' @keywords datasets #' @description Larynx Cancer-Patients data set which is widely used in the survival regression models #' @format The data frame contains 90 rows and 5 columns: #' \itemize{ #' \item time: time to event, in months #' \item delta: Censor indicator, 0 alive and 1 for dead #' \item stage: Stage of disease (1=stage 1, 2=stage2, 3=stage 3, 4=stage 4) #' \item diagyr: Year of diagnosis of larynx cancer #' \item age: Age at diagnosis of larynx cancer #' } #' @references Klein and Moeschberger (1997) \emph{Survival Analysis Techniques for Censored #'and truncated data}, Springer. #'Kardaun Stat. Nederlandica 37 (1983), 103-126. NULL
/scratch/gouwar.j/cran-all/cranData/AmoudSurv/R/larynx.R
#' Prediction of antimicrobial peptides #' #' Antimicrobial peptides (AMPs) are ancient and evolutionarily conserved #' molecules widespread in all living organisms that participate in host #' defence and/or microbial competition. Due to their positive charge, #' hydrophobicity and amphipathicity, they preferentially disrupt #' negatively-charged bacterial membranes. AMPs are considered an important #' alternative to traditional antibiotics, especially in times when the latter #' are drastically losing their effectiveness. Therefore, efficient computational #' tools for AMP prediction are essential to identify the best AMP candidates without #' undertaking expensive experimental studies. AmpGram is our novel tool for predicting #' AMPs based on the stacked random forests and n-gram analysis, able to #' successfully predict antimicrobial peptides in proteomes. #' #' AmpGram is available as R function (\code{\link{predict.ampgram_model}}) or #' shiny GUI (\code{\link{AmpGram_gui}}). #' #' AmpGram requires the external package, AmpGramModel, which #' contains models necessary to perform the prediction. The model #' can be installed using \code{\link{install_AmpGramModel}} #' #' @name AmpGram-package #' @aliases AmpGram-package AmpGram #' @docType package #' @importFrom utils menu #' @author #' Maintainer: Michal Burdukiewicz <michalburdukiewicz@@gmail.com> #' @references Burdukiewicz M, Sidorczuk K, Rafacz D, Pietluch F, Chilimoniuk J, #' Roediger S, Gagat P. (2020) \emph{AmpGram: a proteome screening tool for #' prediction and design of antimicrobial peptides}. (submitted) #' @keywords package NULL
/scratch/gouwar.j/cran-all/cranData/AmpGram/R/AmpGram-package.R
#' AmpGram Graphical User Interface #' #' Launches graphical user interface that predicts presence of #' antimicrobial peptides. #' #' @importFrom shiny runApp #' @seealso \code{\link[shiny]{runApp}} #' @return No return value, called for side effects. #' @section Warning : Any ad-blocking software may cause malfunctions. #' @export AmpGram_gui AmpGram_gui <- function() { require_AmpGramModel() runApp(system.file("AmpGram", package = "AmpGram")) }
/scratch/gouwar.j/cran-all/cranData/AmpGram/R/AmpGram_gui.R
#' @name AmpGram_predictions #' @title Prediction of antimicrobial peptides #' @description Predictions made with the AmpGram methods. #' @docType data #' @format A list of length three: random forest for 10-mer analysis, #' random forest for predictions of AMPs, and a vector of important n-grams. #' @keywords datasets NULL
/scratch/gouwar.j/cran-all/cranData/AmpGram/R/datasets.R
#' Get putative antimicrobial peptides #' #' Function gets sequences recognized as antimicrobial peptides and returns as data.frame. #' @param x AmpGram predictions for a single protein #' @return a data.frame with sequences recognized as antimicrobial peptides (AMPs). #' It consists of two columns: #' \describe{ #' \item{putative_AMP}{amino acid sequence of a 10-mer (subsequence of an analyzed peptide) #' predicted as AMP.} #' \item{prob}{Probability with which a 10-mer is recognized as AMP.}} #' @export #' @examples #' data(AmpGram_predictions) #' get_AMPs(AmpGram_predictions[[2]]) get_AMPs <- function(x) { tenmer_start <- 1L:length(x[["all_mers_pred"]]) only_AMP_start <- tenmer_start[x[["all_mers_pred"]] > 0.5] only_AMP_end <- only_AMP_start + 9 data.frame(putative_AMP = sapply(1L:length(only_AMP_start), function(ith_pos) paste0(x[["seq"]][only_AMP_start[ith_pos]:only_AMP_end[ith_pos]], collapse = "") ), prob = x[["all_mers_pred"]][x[["all_mers_pred"]] > 0.5]) }
/scratch/gouwar.j/cran-all/cranData/AmpGram/R/get_AMPs.R
#' Convert predictions to data.frame #' Return predictions as data.frame #' @param x results of prediction as produced by \code{\link{predict.ampgram_model}} #' @return a data.frame with two columns and number of rows corresponding to the #' number of peptides/proteins in the results of prediction. Columns contain following #' information: #' \describe{ #' \item{seq_name}{Name of an analyzed sequence} #' \item{probability}{Probability that a protein/peptide possesses antimicrobial #' activity. It assumes values from 0 (non-AMP) to 1 (AMP).}} #' Row names contain sequence name and decision if a peptide/protein is classified #' as AMP (\code{TRUE}) or non-AMP (\code{FALSE}). #' @export #' @examples #' data(AmpGram_predictions) #' pred2df(AmpGram_predictions) pred2df <- function(x) { data.frame(seq_name = names(x), probability = sapply(x, function(i) i[["single_prot_pred"]])) }
/scratch/gouwar.j/cran-all/cranData/AmpGram/R/pred2df.R
#' Predict antimicrobial peptides #' #' Recognizes antimicrobial peptides using the AmpGram algorithm. #' @param object \code{ampgram_model} object. #' @param newdata \code{list} of sequences (for example as given by #' \code{\link[biogram]{read_fasta}} or \code{\link{read_txt}}). #' @param ... further arguments passed to or from other methods. #' @return \code{list} of objects of class \code{single_ampgram_pred}. Each object #' of this class contains analyzed sequence, values of predictions for 10-mers and #' result of the prediction for the whole peptide/protein. #' @export #' @details AmpGram requires the external package, AmpGramModel, which #' contains models necessary to perform the prediction. The model #' can be installed using \code{\link{install_AmpGramModel}}. #' #' Predictions for each protein are stored in objects of class #' \code{single_ampgram_pred}. It consists of three elements: #' \describe{ #' \item{seq}{Character vector of amino acid sequence of an analyzed peptide/protein} #' \item{all_mers_pred}{Numeric vector of predictions for each 10-mer (subsequence #' of 10 amino acids) of a sequence. Prediction value indicates probability that #' a 10-mer possesses antimicrobial activity and ranges from 0 (non-AMP) to 1 #' (AMP).} #' \item{single_prot_pred}{Named numeric vector of a single prediction value for #' a whole peptide/protein. Its value corresponds to the probability that a #' peptide/protein exhibits antimicrobial activity. It assumes name \code{TRUE} #' if probability is equal or greater than 0.5, i.e. peptide/protein is classified #' as antimicrobial (AMP), and \code{FALSE} if probability is less that 0.5, #' i.e. peptide/protein is classified as non-antimicrobial (non-AMP).} #' } #' @importFrom biogram binarize decode_ngrams #' @importFrom pbapply pblapply #' @importFrom ranger ranger #' @importFrom stats predict #' @importFrom stringi stri_count predict.ampgram_model <- function(object, newdata, ...) { require_AmpGramModel() ngrams <- object[["imp_features"]] decoded_ngrams <- gsub(pattern = "_", replacement = ".", x = decode_ngrams(ngrams), fixed = TRUE) all_preds <- pblapply(newdata, function(ith_seq) { ngram_count <- find_ngrams(seq = ith_seq, decoded_ngrams = decoded_ngrams) colnames(ngram_count) <- ngrams all_mers_pred <- predict(object[["rf_mers"]], ngram_count)[["predictions"]][, 2] single_prot_pred <- predict(object[["rf_peptides"]], calculate_statistics(all_mers_pred))[["predictions"]][, 2] res <- list(seq = ith_seq, all_mers_pred = all_mers_pred, single_prot_pred = single_prot_pred) class(res) <- "single_ampgram_pred" res }) if(is.null(names(all_preds))) names(all_preds) <- paste0("seq", 1L:length(all_preds)) all_preds } # data(AmpGram_model) # sample_seq <- list(seq1 = c("F", "E", "N", "C", "N", "I", "T", "M", "G", "N", "M", "V", # "R", "H", "I", "R", "W", "Y", "R", "D", "R", "Q", "K", "G", "D", # "Y", "W", "W", "Y", "T", "I", "K", "Y", "S", "M", "A", "M", "I", # "A", "C", "N", "I", "N", "V", "T", "I", "N", "Q", "C", "V"), # seq2 = c("Q", "Y", "T", "S", "I", "M", "F", "L", "T", "A", "G", "H", # "L", "A", "P", "W", "D", "R", "W", "C", "R", "S", "L", "T", "T", # "W", "F", "G", "A", "P", "S", "A", "T", "Y", "P", "F", "F", "W", # "E", "P", "E", "D", "I", "I", "I", "K", "P", "N", "T", "A")) # predict(AmpGram_model, sample_seq)
/scratch/gouwar.j/cran-all/cranData/AmpGram/R/predict.R
#' Read sequences from .txt file #' #' Read sequence data saved in text file. #' #' @param connection a \code{\link{connection}} to the text (.txt) file. #' @keywords manip #' @return a list of sequences. #' @details The input file should contain one or more amino acid sequences separated by #' empty line(s). #' @importFrom biogram read_fasta #' @export #' @keywords manip #' @examples #' (sequences <- read_txt(system.file("AmpGram/prots.txt", package = "AmpGram"))) read_txt <- function(connection) { require_AmpGramModel() content <- readLines(connection) #test for empty content if(content[1] != "" || length(content) > 1) { if (sum(grepl(">", content, fixed = TRUE)) == 0) { if (content[1] != "") content <- c("", content) #number of empty lines nel <- 0 #content without too many empty lines content2 <- c() for (i in 1L:length(content)) { if(content[i] == "") { nel <- nel + 1 } else { nel <- 0 } if (nel <= 1) content2 <- c(content2, content[i]) } content <- content2 content_end <- length(content) while(content[content_end] == "i") content_end <- content_end - 1 prot_names <- sapply(1L:sum(content == ""), function(i) paste0(">sequence", i)) content[content == ""] <- prot_names } read_fasta(textConnection(content)) } else { warning("No text detected.") NULL } }
/scratch/gouwar.j/cran-all/cranData/AmpGram/R/read_txt.R
count_longest <- function(x) { splitted_x <- strsplit(x = paste0(as.numeric(x > 0.5), collapse = ""), split = "0")[[1]] len <- unname(sapply(splitted_x, nchar)) if (length(len[len > 0]) == 0) { 0 } else { len[len > 0] } } #' @importFrom stats median calculate_statistics <- function(pred) { data.frame(fraction_true = mean(pred > 0.5), pred_mean = mean(pred), pred_median = median(pred), n_peptide = length(pred), n_pos = sum(pred > 0.5), pred_min = min(pred), pred_max = max(pred), longest_pos = max(count_longest(pred)), n_pos_10 = sum(count_longest(pred) >= 10), frac_0_0.2 = sum(pred <= 0.2)/length(pred), frac_0.2_0.4 = sum(pred > 0.2 & pred <= 0.4)/length(pred), frac_0.4_0.6 = sum(pred > 0.4 & pred <= 0.6)/length(pred), frac_0.6_0.8 = sum(pred > 0.6 & pred <= 0.8)/length(pred), frac_0.8_1 = sum(pred > 0.8 & pred <= 1)/length(pred)) } find_ngrams <- function(seq, decoded_ngrams) { end_pos <- 10L:length(seq) start_pos <- end_pos - 9 res <- binarize(do.call(rbind, lapply(1L:length(end_pos), function(ith_mer_id) { ten_mer <- paste0(seq[start_pos[ith_mer_id]:end_pos[ith_mer_id]], collapse = "") stri_count(ten_mer, regex = decoded_ngrams) }))) res } #' Install AmpGramModel package containing model for AMP prediction #' #' Installs AmpGramModel package containing model required for prediction #' of antimicrobial peptides. Due to large size of our model and file size #' limit on CRAN, it needs to be stored in the external repository. #' See readme for more information or in case of installation problems. #' #' @export #' @importFrom devtools install_github install_AmpGramModel <- function() { install_github("michbur/AmpGramModel") } is_AmpGramModel_installed <- function() { check_AmpGramModel <- try(find.package("AmpGramModel"), silent = TRUE) !inherits(check_AmpGramModel, "try-error") } require_AmpGramModel <- function() { if(interactive()) { if (!is_AmpGramModel_installed() && !getOption("AmpGram_suppress_prompt")) { response <- menu(c("yes", "no", "no and don't ask me anymore"), title = "To be able to use AmpGram properly, you should have installed 'AmpGramModel' package available via GitHub. Install?") switch (response, tryCatch(install_AmpGramModel(), finally = if (!is_AmpGramModel_installed()) warning("There was an error during an attempt to install 'AmpGramModel' package.", call. = FALSE)), message("You cannot access full functionality of this package without having installed 'AmpGramModel'. You can do it manually by calling 'devtools::install_github('michbur/AmpGramModel')'", call. = FALSE), {options(AmpGram_suppress_prompt = TRUE) cat("Ok, but you cannot access full functionality of this package without having installed 'AmpGramModel'")}, message("You cannot access full functionality of this package without having installed 'AmpGramModel'. You can do it manually by calling 'devtools::install_github('michbur/AmpGramModel')'", call. = FALSE) ) } } else { message("To be able to use AmpGram properly, you should have installed 'AmpGramModel' with 'devtools::install_github('michbur/AmpGramModel'.") } }
/scratch/gouwar.j/cran-all/cranData/AmpGram/R/utils.R
.onAttach <- function(libname, pkgname) { options(AmpGram_suppress_prompt = FALSE) if (!is_AmpGramModel_installed()) packageStartupMessage("To be able to use AmpGram properly, you should install 'AmpGramModel' package available via GitHub. You can do it by calling 'install_AmpGramModel()'") }
/scratch/gouwar.j/cran-all/cranData/AmpGram/R/zzz.R
--- title: "AmpGram 1.0: analysis report" output: html_document: theme: united self_contained: true toc: true toc_float: true --- <style> img { max-width: none; /* other options: max-width: 200%; max-width: 700px; max-width: 9in; max-width: 25cm; etc */ } </style> ```{r, results='asis', echo=FALSE} file_name <- ifelse(is.null(input[["seq_file"]][["name"]]), "none", input[["seq_file"]][["name"]]) cat("**Input file name**: ", file_name, "\n\n") cat(paste0("**Date**: ", Sys.time()), "\n\n") ``` ```{r, echo=FALSE, fig.width=10, results='asis'} for (i in 1L:length(detailed_preds())) { cat("\n\n## ", names(detailed_preds())[[i]], "\n\n", sep = "") print(plot_single_protein(detailed_preds()[[i]])) print(knitr::kable(get_AMPs(selected_proteins()[[i]]))) } ``` ## About This analysis was provided by [**AmpGram**](https://github.com/michbur/AmpGram) ```{r, echo=FALSE} pander(sessionInfo()) ```
/scratch/gouwar.j/cran-all/cranData/AmpGram/inst/AmpGram/AmpGram-report.Rmd
library(shiny) library(ggplot2) library(AmpGram) library(AmpGramModel) library(ranger) library(DT) library(shinythemes) library(rmarkdown) library(pander) source("shiny-server-utils.R") data(AmpGram_model) options(shiny.maxRequestSize=10*1024^2) shinyServer(function(input, output) { prediction <- reactive({ if (!is.null(input[["seq_file"]])) input_sequences <- read_txt(input[["seq_file"]][["datapath"]]) input[["use_area"]] isolate({ if (!is.null(input[["text_area"]])) if(input[["text_area"]] != "") input_sequences <- read_txt(textConnection(input[["text_area"]])) }) if(exists("input_sequences")) { if(length(input_sequences) > 50) { #dummy error, just to stop further processing stop("Too many sequences. Please use AmpGram locally.") } else { if(any(lengths(input_sequences) < 10)) { #dummy error, just to stop further processing stop("The minimum length of the sequence is 10 amino acids.") } else { predict_in_shiny(AmpGram_model, input_sequences) } } } else { NULL } }) decision_table <- reactive({ if(!is.null(prediction())) { pred2df(prediction()) } }) output[["decision_table"]] <- renderDataTable({ df <- decision_table() colnames(df) <- c("Protein name", "AMP probability") my_DT(df) %>% formatRound(2, 4) }) selected_proteins <- reactive({ validate( need(input[["decision_table_rows_selected"]], "Select at least one row in the Results table") ) prediction()[input[["decision_table_rows_selected"]]] }) detailed_preds <- reactive(({ selected_pred_data <- selected_proteins() detailed_pred_list <- lapply(1L:length(selected_pred_data), function(ith_pred_id) { ith_pred <- selected_pred_data[[ith_pred_id]] data.frame(seq_name = names(selected_pred_data)[ith_pred_id], start = 1L:length(ith_pred[["all_mers_pred"]]), end = 1L:length(ith_pred[["all_mers_pred"]]) + 9, pred = ith_pred[["all_mers_pred"]], decision = factor(ith_pred[["all_mers_pred"]] > 0.5, levels = c("FALSE", "TRUE"), labels = c("No", "Yes"))) }) names(detailed_pred_list) <- names(selected_pred_data) detailed_pred_list })) output[["detailed_preds"]] <- renderUI({ detailed_preds_list <- lapply(1L:length(detailed_preds()), function(i) { list(plotOutput(paste0("detailed_plot", i)), dataTableOutput(paste0("detailed_table", i))) }) c(list(downloadButton("download_long_graph", "Download long output (with graphics)")), do.call(tagList, unlist(detailed_preds_list, recursive = FALSE))) }) for (i in 1L:50) { local({ my_i <- i output[[paste0("detailed_plot", my_i)]] <- renderPlot({ #browser() plot_single_protein(detailed_preds()[[my_i]]) }) output[[paste0("detailed_table", my_i)]] <- renderDataTable(AMP_DT(get_AMPs(selected_proteins()[[my_i]]))) }) } output[["detailed_tab"]] <- renderUI({ uiOutput("detailed_preds") }) output[["dynamic_ui"]] <- renderUI({ if (!is.null(input[["seq_file"]])) input_sequences <- read_txt(input[["seq_file"]][["datapath"]]) input[["use_area"]] isolate({ if (!is.null(input[["text_area"]])) if(input[["text_area"]] != "") input_sequences <- read_txt(textConnection(input[["text_area"]])) }) if(exists("input_sequences")) { fluidRow( tags$p(HTML("<h3><A HREF=\"javascript:history.go(0)\">Start a new query</A></h3>")) ) } else { fluidRow( h4("Exemplary sequences"), pre(includeText("prots.txt")) ) } }) output[["dynamic_tabset"]] <- renderUI({ if(is.null(prediction())) { tabPanel(title = "Sequence input", tags$textarea(id = "text_area", style = "width:90%", placeholder="Paste sequences (FASTA format required) here...", rows = 22, cols = 60, ""), tags$p(""), actionButton("use_area", "Submit data from field above"), tags$p(""), fileInput('seq_file', 'Submit .fasta or .txt file:')) } else { tabsetPanel( tabPanel("Results", tags$h4("Select at least one protein to produce detailed results."), dataTableOutput("decision_table") ), tabPanel("Detailed results", uiOutput("detailed_tab") ) ) } }) file_name <- reactive({ if(is.null(input[["seq_file"]][["name"]])) { part_name <- "AmpGram_results" } else { part_name <- strsplit(input[["seq_file"]][["name"]], ".", fixed = TRUE)[[1]][1] } part_name }) output[["download_long"]] <- downloadHandler( filename = function() { paste0(file_name(), "_pred.txt") }, content <- function(file) { sink(file, type = "output") cat("Input file name: ", ifelse(is.null(input[["seq_file"]][["name"]]), "none", input[["seq_file"]][["name"]]), "\n\n") cat(paste0("Date: ", Sys.time()), "\n\n") for (i in 1L:length(prediction())) { cat("\n\n") summary(prediction()[[i]]) cat("\n\n") } sink() } ) output[["download_long_graph"]] <- downloadHandler( filename = function() { paste0(file_name(), "_pred.html") }, content <- function(file) { src <- normalizePath("AmpGram-report.Rmd") # temporarily switch to the temp dir, in case you do not have write # permission to the current working directory owd <- setwd(tempdir()) on.exit(setwd(owd)) file.copy(src, "AmpGram-report.Rmd") out <- render("AmpGram-report.Rmd", output_format = "html_document", file, quiet = TRUE) file.rename(out, file) } ) })
/scratch/gouwar.j/cran-all/cranData/AmpGram/inst/AmpGram/server.R
options(DT.options = list(dom = "Brtip", buttons = c("copy", "csv", "excel", "print"), pageLength = 50 )) my_DT <- function(x, ...) datatable(x, ..., escape = FALSE, extensions = 'Buttons', filter = "top", rownames = FALSE, style = "bootstrap") AMP_DT <- function(x, ...) { df <- x colnames(df) <- c("Putative AMP", "Probability") formatRound(my_DT(df, ...), 2, 4) } plot_single_protein <- function(single_prot) { p <- ggplot(single_prot, aes(x = start, xend = end, y = pred, yend = pred, color = decision, linetype = decision)) + geom_segment() + geom_hline(yintercept = 0.5, color = "red") + ggtitle(single_prot[["seq_name"]][1]) + scale_x_continuous("Position") + scale_y_continuous("Probability of AMP", limits = c(0, 1)) + scale_color_manual("AMP", values = c(No = "#878787", Yes = "black")) + scale_linetype_manual("AMP", values = c(No = "dashed", Yes = "solid")) + theme_bw() + theme(plot.title = element_text(hjust = 0.5), legend.position = "bottom") if(max(single_prot[["end"]] > 100)) p <- p + scale_x_continuous("Position", breaks = seq(0, max(single_prot[["end"]]), by = 20)) p } predict_in_shiny <- function(object, newdata) { ngrams <- object[["imp_features"]] decoded_ngrams <- gsub(pattern = "_", replacement = ".", x = biogram::decode_ngrams(ngrams), fixed = TRUE) prediction_percentage <- 0 withProgress(message = "", value = 0, { all_preds <- lapply(1L:length(newdata), function(ith_seq_id) { ith_seq <- newdata[[ith_seq_id]] ngram_count <- AmpGram:::find_ngrams(seq = ith_seq, decoded_ngrams = decoded_ngrams) colnames(ngram_count) <- ngrams all_mers_pred <- predict(object[["rf_mers"]], ngram_count)[["predictions"]][, 2] single_prot_pred <- predict(object[["rf_peptides"]], AmpGram:::calculate_statistics(all_mers_pred))[["predictions"]][, 2] res <- list(seq = ith_seq, all_mers_pred = all_mers_pred, single_prot_pred = single_prot_pred) class(res) <- "single_ampgram_pred" prediction_percentage <<- prediction_percentage + 1/length(newdata)*100 incProgress(1/length(newdata), detail = paste0(round(prediction_percentage, 2), "% proteins analyzed")) res }) }, style = "old") if(is.null(names(newdata))) { names(all_preds) <- paste0("seq", 1L:length(all_preds)) } else { names(all_preds) <- names(newdata) } all_preds }
/scratch/gouwar.j/cran-all/cranData/AmpGram/inst/AmpGram/shiny-server-utils.R
library(shiny) library(shinythemes) shinyUI(fluidPage(tags$head(includeScript("ga.js"), tags$link(rel = "stylesheet", type = "text/css", href = "progress.css")), title = "AmpGram", theme = shinytheme("united"), headerPanel(""), sidebarLayout( sidebarPanel(style = "background-color: #e0e0e0;border-color: #E95420;border-width: .25rem", includeMarkdown("readme.md"), uiOutput("dynamic_ui") ), mainPanel( uiOutput("dynamic_tabset") ) )))
/scratch/gouwar.j/cran-all/cranData/AmpGram/inst/AmpGram/ui.R